summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c59x.c6
-rw-r--r--drivers/net/8139cp.c10
-rw-r--r--drivers/net/Kconfig48
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/atl1c/atl1c.h2
-rw-r--r--drivers/net/atl1c/atl1c_hw.c2
-rw-r--r--drivers/net/atl1c/atl1c_main.c6
-rw-r--r--drivers/net/atlx/atl1.c13
-rw-r--r--drivers/net/atlx/atl1.h9
-rw-r--r--drivers/net/atlx/atlx.c4
-rw-r--r--drivers/net/au1000_eth.c10
-rw-r--r--drivers/net/b44.c11
-rw-r--r--drivers/net/benet/be_cmds.c36
-rw-r--r--drivers/net/benet/be_cmds.h2
-rw-r--r--drivers/net/benet/be_main.c55
-rw-r--r--drivers/net/bfin_mac.c145
-rw-r--r--drivers/net/bfin_mac.h2
-rw-r--r--drivers/net/bnx2x/bnx2x.h9
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c5
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h55
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h9
-rw-r--r--drivers/net/bnx2x/bnx2x_init_ops.h34
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c194
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h15
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c57
-rw-r--r--drivers/net/bonding/bond_main.c8
-rw-r--r--drivers/net/caif/Kconfig7
-rw-r--r--drivers/net/caif/Makefile4
-rw-r--r--drivers/net/caif/caif_shm_u5500.c129
-rw-r--r--drivers/net/caif/caif_shmcore.c744
-rw-r--r--drivers/net/caif/caif_spi.c61
-rw-r--r--drivers/net/caif/caif_spi_slave.c13
-rw-r--r--drivers/net/can/Kconfig8
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/at91_can.c97
-rw-r--r--drivers/net/can/flexcan.c3
-rw-r--r--drivers/net/can/mcp251x.c3
-rw-r--r--drivers/net/can/pch_can.c1463
-rw-r--r--drivers/net/can/sja1000/Kconfig12
-rw-r--r--drivers/net/can/sja1000/Makefile1
-rw-r--r--drivers/net/can/sja1000/tscan1.c216
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c9
-rw-r--r--drivers/net/cxgb3/sge.c4
-rw-r--r--drivers/net/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c32
-rw-r--r--drivers/net/cxgb4/sge.c23
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c116
-rw-r--r--drivers/net/cxgb4vf/sge.c122
-rw-r--r--drivers/net/cxgb4vf/t4vf_common.h1
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c113
-rw-r--r--drivers/net/davinci_cpdma.c965
-rw-r--r--drivers/net/davinci_cpdma.h108
-rw-r--r--drivers/net/davinci_emac.c1338
-rw-r--r--drivers/net/davinci_mdio.c475
-rw-r--r--drivers/net/e1000/e1000_main.c14
-rw-r--r--drivers/net/e1000e/82571.c38
-rw-r--r--drivers/net/e1000e/e1000.h3
-rw-r--r--drivers/net/e1000e/netdev.c29
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c60
-rw-r--r--drivers/net/gianfar.c13
-rw-r--r--drivers/net/gianfar_ethtool.c5
-rw-r--r--drivers/net/ibm_newemac/core.c1
-rw-r--r--drivers/net/igb/igb_main.c1
-rw-r--r--drivers/net/igbvf/netdev.c8
-rw-r--r--drivers/net/ipg.c6
-rw-r--r--drivers/net/irda/sh_sir.c2
-rw-r--r--drivers/net/ixgb/ixgb_main.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c39
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c5
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c75
-rw-r--r--drivers/net/jme.c49
-rw-r--r--drivers/net/lib8390.c1
-rw-r--r--drivers/net/macb.c27
-rw-r--r--drivers/net/mlx4/en_main.c15
-rw-r--r--drivers/net/mlx4/en_netdev.c10
-rw-r--r--drivers/net/mlx4/en_port.c4
-rw-r--r--drivers/net/mlx4/en_port.h3
-rw-r--r--drivers/net/mlx4/fw.c3
-rw-r--r--drivers/net/mlx4/icm.c28
-rw-r--r--drivers/net/mlx4/icm.h2
-rw-r--r--drivers/net/mlx4/intf.c21
-rw-r--r--drivers/net/mlx4/main.c4
-rw-r--r--drivers/net/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/mlx4/port.c30
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c15
-rw-r--r--drivers/net/netxen/netxen_nic_main.c11
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c6
-rw-r--r--drivers/net/pch_gbe/pch_gbe_param.c8
-rw-r--r--drivers/net/pcmcia/axnet_cs.c30
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c1
-rw-r--r--drivers/net/phy/marvell.c182
-rw-r--r--drivers/net/phy/phy.c13
-rw-r--r--drivers/net/phy/phy_device.c19
-rw-r--r--drivers/net/ppp_generic.c43
-rw-r--r--drivers/net/qlcnic/qlcnic.h7
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c23
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c20
-rw-r--r--drivers/net/qlge/qlge.h12
-rw-r--r--drivers/net/qlge/qlge_main.c30
-rw-r--r--drivers/net/qlge/qlge_mpi.c6
-rw-r--r--drivers/net/r8169.c12
-rw-r--r--drivers/net/sb1000.c6
-rw-r--r--drivers/net/sgiseeq.c2
-rw-r--r--drivers/net/skge.c1
-rw-r--r--drivers/net/slhc.c15
-rw-r--r--drivers/net/smsc911x.c3
-rw-r--r--drivers/net/smsc911x.h13
-rw-r--r--drivers/net/stmmac/stmmac_main.c44
-rw-r--r--drivers/net/tg3.c10
-rw-r--r--drivers/net/tokenring/tms380tr.c2
-rw-r--r--drivers/net/tulip/de2104x.c1
-rw-r--r--drivers/net/tulip/dmfe.c6
-rw-r--r--drivers/net/typhoon.c92
-rw-r--r--drivers/net/ucc_geth.c25
-rw-r--r--drivers/net/ucc_geth.h3
-rw-r--r--drivers/net/usb/hso.c10
-rw-r--r--drivers/net/usb/usbnet.c11
-rw-r--r--drivers/net/virtio_net.c12
-rw-r--r--drivers/net/vmxnet3/upt1_defs.h8
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h6
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c24
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c14
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h27
-rw-r--r--drivers/net/vxge/vxge-config.c332
-rw-r--r--drivers/net/vxge/vxge-config.h227
-rw-r--r--drivers/net/vxge/vxge-ethtool.c2
-rw-r--r--drivers/net/vxge/vxge-main.c64
-rw-r--r--drivers/net/vxge/vxge-main.h59
-rw-r--r--drivers/net/vxge/vxge-traffic.c101
-rw-r--r--drivers/net/vxge/vxge-traffic.h134
-rw-r--r--drivers/net/wan/x25_asy.c11
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig3
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/net/xilinx_emaclite.c8
138 files changed, 6125 insertions, 2919 deletions
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index e1da258..0a92436f 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -699,7 +699,8 @@ DEFINE_WINDOW_IO(32)
#define DEVICE_PCI(dev) NULL
#endif
-#define VORTEX_PCI(vp) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL)
+#define VORTEX_PCI(vp) \
+ ((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL))
#ifdef CONFIG_EISA
#define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL)
@@ -707,7 +708,8 @@ DEFINE_WINDOW_IO(32)
#define DEVICE_EISA(dev) NULL
#endif
-#define VORTEX_EISA(vp) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL)
+#define VORTEX_EISA(vp) \
+ ((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL))
/* The action to take with a media selection timer tick.
Note that we deviate from the 3Com order by checking 10base2 before AUI.
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index ac422cd..dd16e83 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -490,13 +490,11 @@ static inline unsigned int cp_rx_csum_ok (u32 status)
{
unsigned int protocol = (status >> 16) & 0x3;
- if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
+ if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
+ ((protocol == RxProtoUDP) && !(status & UDPFail)))
return 1;
- else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
- return 1;
- else if ((protocol == RxProtoIP) && (!(status & IPFail)))
- return 1;
- return 0;
+ else
+ return 0;
}
static int cp_rx_poll(struct napi_struct *napi, int budget)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 77c1fab..a445a04 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -883,14 +883,6 @@ config BFIN_RX_DESC_NUM
help
Set the number of buffer packets used in driver.
-config BFIN_MAC_RMII
- bool "RMII PHY Interface"
- depends on BFIN_MAC
- default y if BFIN527_EZKIT
- default n if BFIN537_STAMP
- help
- Use Reduced PHY MII Interface
-
config BFIN_MAC_USE_HWSTAMP
bool "Use IEEE 1588 hwstamp"
depends on BFIN_MAC && BF518
@@ -954,6 +946,8 @@ config NET_NETX
config TI_DAVINCI_EMAC
tristate "TI DaVinci EMAC Support"
depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+ select TI_DAVINCI_MDIO
+ select TI_DAVINCI_CPDMA
select PHYLIB
help
This driver supports TI's DaVinci Ethernet .
@@ -961,6 +955,25 @@ config TI_DAVINCI_EMAC
To compile this driver as a module, choose M here: the module
will be called davinci_emac_driver. This is recommended.
+config TI_DAVINCI_MDIO
+ tristate "TI DaVinci MDIO Support"
+ depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+ select PHYLIB
+ help
+ This driver supports TI's DaVinci MDIO module.
+
+ To compile this driver as a module, choose M here: the module
+ will be called davinci_mdio. This is recommended.
+
+config TI_DAVINCI_CPDMA
+ tristate "TI DaVinci CPDMA Support"
+ depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
+ help
+ This driver supports TI's DaVinci CPDMA dma engine.
+
+ To compile this driver as a module, choose M here: the module
+ will be called davinci_cpdma. This is recommended.
+
config DM9000
tristate "DM9000 support"
depends on ARM || BLACKFIN || MIPS
@@ -1028,7 +1041,7 @@ config SMC911X
tristate "SMSC LAN911[5678] support"
select CRC32
select MII
- depends on ARM || SUPERH
+ depends on ARM || SUPERH || MN10300
help
This is a driver for SMSC's LAN911x series of Ethernet chipsets
including the new LAN9115, LAN9116, LAN9117, and LAN9118.
@@ -1042,7 +1055,7 @@ config SMC911X
config SMSC911X
tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
- depends on ARM || SUPERH || BLACKFIN || MIPS
+ depends on ARM || SUPERH || BLACKFIN || MIPS || MN10300
select CRC32
select MII
select PHYLIB
@@ -1054,6 +1067,14 @@ config SMSC911X
<file:Documentation/networking/net-modules.txt>. The module
will be called smsc911x.
+config SMSC911X_ARCH_HOOKS
+ def_bool n
+ depends on SMSC911X
+ help
+ If the arch enables this, it allows the arch to implement various
+ hooks for more comprehensive interrupt control and also to override
+ the source of the MAC address.
+
config NET_VENDOR_RACAL
bool "Racal-Interlan (Micom) NI cards"
depends on ISA
@@ -2520,11 +2541,12 @@ source "drivers/net/stmmac/Kconfig"
config PCH_GBE
tristate "PCH Gigabit Ethernet"
depends on PCI
+ select MII
---help---
- This is a gigabit ethernet driver for Topcliff PCH.
- Topcliff PCH is the platform controller hub that is used in Intel's
+ This is a gigabit ethernet driver for EG20T PCH.
+ EG20T PCH is the platform controller hub that is used in Intel's
general embedded platform.
- Topcliff PCH has Gigabit Ethernet interface.
+ EG20T PCH has Gigabit Ethernet interface.
Using this interface, it is able to access system devices connected
to Gigabit Ethernet.
This driver enables Gigabit Ethernet function.
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index b8bf93d..652fc6b 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -7,6 +7,8 @@ obj-$(CONFIG_MDIO) += mdio.o
obj-$(CONFIG_PHYLIB) += phy/
obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
+obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
+obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
obj-$(CONFIG_E1000) += e1000/
obj-$(CONFIG_E1000E) += e1000e/
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index 3134e53..8cb27cb 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -407,7 +407,7 @@ static noinline int __init addr_accessible(volatile void *regp, int wordflag,
int writeflag)
{
int ret;
- long flags;
+ unsigned long flags;
long *vbr, save_berr;
local_irq_save(flags);
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index ef4115b..9ab5809 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -631,8 +631,6 @@ struct atl1c_adapter {
extern char atl1c_driver_name[];
extern char atl1c_driver_version[];
-extern int atl1c_up(struct atl1c_adapter *adapter);
-extern void atl1c_down(struct atl1c_adapter *adapter);
extern void atl1c_reinit_locked(struct atl1c_adapter *adapter);
extern s32 atl1c_reset_hw(struct atl1c_hw *hw);
extern void atl1c_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index 919080b..1bf6720 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -82,7 +82,7 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
addr[0] = addr[1] = 0;
AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data);
if (atl1c_check_eeprom_exist(hw)) {
- if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b) {
+ if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) {
/* Enable OTP CLK */
if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) {
otp_ctrl_data |= OTP_CTRL_CLK_EN;
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 99ffcf6..09b099b 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -66,6 +66,8 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup);
static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter);
static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
int *work_done, int work_to_do);
+static int atl1c_up(struct atl1c_adapter *adapter);
+static void atl1c_down(struct atl1c_adapter *adapter);
static const u16 atl1c_pay_load_size[] = {
128, 256, 512, 1024, 2048, 4096,
@@ -2309,7 +2311,7 @@ static int atl1c_request_irq(struct atl1c_adapter *adapter)
return err;
}
-int atl1c_up(struct atl1c_adapter *adapter)
+static int atl1c_up(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int num;
@@ -2351,7 +2353,7 @@ err_alloc_rx:
return err;
}
-void atl1c_down(struct atl1c_adapter *adapter)
+static void atl1c_down(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index dbd27b8..5336310 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -91,6 +91,8 @@ MODULE_VERSION(ATLX_DRIVER_VERSION);
/* Temporary hack for merging atl1 and atl2 */
#include "atlx.c"
+static const struct ethtool_ops atl1_ethtool_ops;
+
/*
* This is the only thing that needs to be changed to adjust the
* maximum number of ports that the driver can manage.
@@ -353,7 +355,7 @@ static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
* hw - Struct containing variables accessed by shared code
* reg_addr - address of the PHY register to read
*/
-s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
+static s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
{
u32 val;
int i;
@@ -553,7 +555,7 @@ static s32 atl1_read_mac_addr(struct atl1_hw *hw)
* 1. calcu 32bit CRC for multicast address
* 2. reverse crc with MSB to LSB
*/
-u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
+static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
{
u32 crc32, value = 0;
int i;
@@ -570,7 +572,7 @@ u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
* hw - Struct containing variables accessed by shared code
* hash_value - Multicast address hash value
*/
-void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
+static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
{
u32 hash_bit, hash_reg;
u32 mta;
@@ -914,7 +916,7 @@ static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex
return 0;
}
-void atl1_set_mac_addr(struct atl1_hw *hw)
+static void atl1_set_mac_addr(struct atl1_hw *hw)
{
u32 value;
/*
@@ -3041,7 +3043,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
atl1_pcie_patch(adapter);
/* assume we have no link for now */
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
setup_timer(&adapter->phy_config_timer, atl1_phy_config,
(unsigned long)adapter);
@@ -3658,7 +3659,7 @@ static int atl1_nway_reset(struct net_device *netdev)
return 0;
}
-const struct ethtool_ops atl1_ethtool_ops = {
+static const struct ethtool_ops atl1_ethtool_ops = {
.get_settings = atl1_get_settings,
.set_settings = atl1_set_settings,
.get_drvinfo = atl1_get_drvinfo,
diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h
index 9c0ddb2..68de8cb 100644
--- a/drivers/net/atlx/atl1.h
+++ b/drivers/net/atlx/atl1.h
@@ -56,16 +56,13 @@ struct atl1_adapter;
struct atl1_hw;
/* function prototypes needed by multiple files */
-u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
-void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
-s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
-void atl1_set_mac_addr(struct atl1_hw *hw);
+static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
+static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
+static void atl1_set_mac_addr(struct atl1_hw *hw);
static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd);
static u32 atl1_check_link(struct atl1_adapter *adapter);
-extern const struct ethtool_ops atl1_ethtool_ops;
-
/* hardware definitions specific to L1 */
/* Block IDLE Status Register */
diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c
index f979ea2..afb7f7d 100644
--- a/drivers/net/atlx/atlx.c
+++ b/drivers/net/atlx/atlx.c
@@ -41,6 +41,10 @@
#include "atlx.h"
+static s32 atlx_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
+static u32 atlx_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
+static void atlx_set_mac_addr(struct atl1_hw *hw);
+
static struct atlx_spi_flash_dev flash_table[] = {
/* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SEC_ERS CHIP_ERS */
{"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62},
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 43489f8..53eff9b 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -155,10 +155,10 @@ static void au1000_enable_mac(struct net_device *dev, int force_reset)
spin_lock_irqsave(&aup->lock, flags);
if (force_reset || (!aup->mac_enabled)) {
- writel(MAC_EN_CLOCK_ENABLE, &aup->enable);
+ writel(MAC_EN_CLOCK_ENABLE, aup->enable);
au_sync_delay(2);
writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
- | MAC_EN_CLOCK_ENABLE), &aup->enable);
+ | MAC_EN_CLOCK_ENABLE), aup->enable);
au_sync_delay(2);
aup->mac_enabled = 1;
@@ -503,9 +503,9 @@ static void au1000_reset_mac_unlocked(struct net_device *dev)
au1000_hard_stop(dev);
- writel(MAC_EN_CLOCK_ENABLE, &aup->enable);
+ writel(MAC_EN_CLOCK_ENABLE, aup->enable);
au_sync_delay(2);
- writel(0, &aup->enable);
+ writel(0, aup->enable);
au_sync_delay(2);
aup->tx_full = 0;
@@ -1119,7 +1119,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
/* set a random MAC now in case platform_data doesn't provide one */
random_ether_addr(dev->dev_addr);
- writel(0, &aup->enable);
+ writel(0, aup->enable);
aup->mac_enabled = 0;
pd = pdev->dev.platform_data;
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index c6e8631..2e2b762 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -381,11 +381,11 @@ static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
__b44_set_flow_ctrl(bp, pause_enab);
}
-#ifdef SSB_DRIVER_MIPS
-extern char *nvram_get(char *name);
+#ifdef CONFIG_BCM47XX
+#include <asm/mach-bcm47xx/nvram.h>
static void b44_wap54g10_workaround(struct b44 *bp)
{
- const char *str;
+ char buf[20];
u32 val;
int err;
@@ -394,10 +394,9 @@ static void b44_wap54g10_workaround(struct b44 *bp)
* see https://dev.openwrt.org/ticket/146
* check and reset bit "isolate"
*/
- str = nvram_get("boardnum");
- if (!str)
+ if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
return;
- if (simple_strtoul(str, NULL, 0) == 2) {
+ if (simple_strtoul(buf, NULL, 0) == 2) {
err = __b44_readphy(bp, 0, MII_BMCR, &val);
if (err)
goto error;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 1e7f305..36eca1c 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1471,42 +1471,6 @@ err:
return status;
}
-/* Uses sync mcc */
-int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
- u8 *connector)
-{
- struct be_mcc_wrb *wrb;
- struct be_cmd_req_port_type *req;
- int status;
-
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = embedded_payload(wrb);
-
- be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
- OPCODE_COMMON_READ_TRANSRECV_DATA);
-
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
-
- req->port = cpu_to_le32(port);
- req->page_num = cpu_to_le32(TR_PAGE_A0);
- status = be_mcc_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
- *connector = resp->data.connector;
- }
-
-err:
- spin_unlock_bh(&adapter->mcc_lock);
- return status;
-}
-
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 flash_type, u32 flash_opcode, u32 buf_size)
{
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index c7f6cdf..8469ff0 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -1022,8 +1022,6 @@ extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
u8 port_num, u8 beacon, u8 status, u8 state);
extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
u8 port_num, u32 *state);
-extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
- u8 *connector);
extern int be_cmd_write_flashrom(struct be_adapter *adapter,
struct be_dma_mem *cmd, u32 flash_oper,
u32 flash_opcode, u32 buf_size);
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 45b1f66..93354ee 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -849,20 +849,16 @@ static void be_rx_stats_update(struct be_rx_obj *rxo,
stats->rx_mcast_pkts++;
}
-static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
+static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
{
- u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
+ u8 l4_cksm, ipv6, ipcksm;
l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
- ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
- if (ip_version) {
- tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
- udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
- }
- ipv6_chk = (ip_version && (tcpf || udpf));
+ ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
- return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
+ /* Ignore ipcksm for ipv6 pkts */
+ return l4_cksm && (ipcksm || ipv6);
}
static struct be_rx_page_info *
@@ -1017,10 +1013,10 @@ static void be_rx_compl_process(struct be_adapter *adapter,
skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
- if (do_pkt_csum(rxcp, adapter->rx_csum))
- skb_checksum_none_assert(skb);
- else
+ if (likely(adapter->rx_csum && csum_passed(rxcp)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
skb->truesize = skb->len + sizeof(struct sk_buff);
skb->protocol = eth_type_trans(skb, adapter->netdev);
@@ -1674,7 +1670,7 @@ static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
return (tcp_frame && !err) ? true : false;
}
-int be_poll_rx(struct napi_struct *napi, int budget)
+static int be_poll_rx(struct napi_struct *napi, int budget)
{
struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
@@ -1806,6 +1802,20 @@ static void be_worker(struct work_struct *work)
struct be_rx_obj *rxo;
int i;
+ /* when interrupts are not yet enabled, just reap any pending
+ * mcc completions */
+ if (!netif_running(adapter->netdev)) {
+ int mcc_compl, status = 0;
+
+ mcc_compl = be_process_mcc(adapter, &status);
+
+ if (mcc_compl) {
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+ be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
+ }
+ goto reschedule;
+ }
+
if (!adapter->stats_ioctl_sent)
be_cmd_get_stats(adapter, &adapter->stats_cmd);
@@ -1824,6 +1834,7 @@ static void be_worker(struct work_struct *work)
if (!adapter->ue_detected)
be_detect_dump_ue(adapter);
+reschedule:
schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
}
@@ -2019,8 +2030,6 @@ static int be_close(struct net_device *netdev)
struct be_eq_obj *tx_eq = &adapter->tx_eq;
int vec, i;
- cancel_delayed_work_sync(&adapter->work);
-
be_async_mcc_disable(adapter);
netif_stop_queue(netdev);
@@ -2085,8 +2094,6 @@ static int be_open(struct net_device *netdev)
/* Now that interrupts are on we can process async mcc */
be_async_mcc_enable(adapter);
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
-
status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
&link_speed);
if (status)
@@ -2299,9 +2306,6 @@ static int be_clear(struct be_adapter *adapter)
#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
-char flash_cookie[2][16] = {"*** SE FLAS",
- "H DIRECTORY *** "};
-
static bool be_flash_redboot(struct be_adapter *adapter,
const u8 *p, u32 img_start, int image_size,
int hdr_size)
@@ -2454,6 +2458,12 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
int status, i = 0, num_imgs = 0;
const u8 *p;
+ if (!netif_running(adapter->netdev)) {
+ dev_err(&adapter->pdev->dev,
+ "Firmware load not allowed (interface is down)\n");
+ return -EPERM;
+ }
+
strcpy(fw_file, func);
status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
@@ -2559,7 +2569,6 @@ static void be_netdev_init(struct net_device *netdev)
netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
BE_NAPI_WEIGHT);
- netif_carrier_off(netdev);
netif_stop_queue(netdev);
}
@@ -2715,6 +2724,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
if (!adapter)
return;
+ cancel_delayed_work_sync(&adapter->work);
+
unregister_netdev(adapter->netdev);
be_clear(adapter);
@@ -2868,8 +2879,10 @@ static int __devinit be_probe(struct pci_dev *pdev,
status = register_netdev(netdev);
if (status != 0)
goto unsetup;
+ netif_carrier_off(netdev);
dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
return 0;
unsetup:
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index f723319..ce1e5e9 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -1,7 +1,7 @@
/*
* Blackfin On-Chip MAC Driver
*
- * Copyright 2004-2007 Analog Devices Inc.
+ * Copyright 2004-2010 Analog Devices Inc.
*
* Enter bugs at http://blackfin.uclinux.org/
*
@@ -23,7 +23,6 @@
#include <linux/device.h>
#include <linux/spinlock.h>
#include <linux/mii.h>
-#include <linux/phy.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -76,12 +75,6 @@ static struct net_dma_desc_tx *current_tx_ptr;
static struct net_dma_desc_tx *tx_desc;
static struct net_dma_desc_rx *rx_desc;
-#if defined(CONFIG_BFIN_MAC_RMII)
-static u16 pin_req[] = P_RMII0;
-#else
-static u16 pin_req[] = P_MII0;
-#endif
-
static void desc_list_free(void)
{
struct net_dma_desc_rx *r;
@@ -347,23 +340,23 @@ static void bfin_mac_adjust_link(struct net_device *dev)
}
if (phydev->speed != lp->old_speed) {
-#if defined(CONFIG_BFIN_MAC_RMII)
- u32 opmode = bfin_read_EMAC_OPMODE();
- switch (phydev->speed) {
- case 10:
- opmode |= RMII_10;
- break;
- case 100:
- opmode &= ~(RMII_10);
- break;
- default:
- printk(KERN_WARNING
- "%s: Ack! Speed (%d) is not 10/100!\n",
- DRV_NAME, phydev->speed);
- break;
+ if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
+ u32 opmode = bfin_read_EMAC_OPMODE();
+ switch (phydev->speed) {
+ case 10:
+ opmode |= RMII_10;
+ break;
+ case 100:
+ opmode &= ~RMII_10;
+ break;
+ default:
+ printk(KERN_WARNING
+ "%s: Ack! Speed (%d) is not 10/100!\n",
+ DRV_NAME, phydev->speed);
+ break;
+ }
+ bfin_write_EMAC_OPMODE(opmode);
}
- bfin_write_EMAC_OPMODE(opmode);
-#endif
new_state = 1;
lp->old_speed = phydev->speed;
@@ -392,7 +385,7 @@ static void bfin_mac_adjust_link(struct net_device *dev)
/* MDC = 2.5 MHz */
#define MDC_CLK 2500000
-static int mii_probe(struct net_device *dev)
+static int mii_probe(struct net_device *dev, int phy_mode)
{
struct bfin_mac_local *lp = netdev_priv(dev);
struct phy_device *phydev = NULL;
@@ -411,8 +404,8 @@ static int mii_probe(struct net_device *dev)
sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div);
bfin_write_EMAC_SYSCTL(sysctl);
- /* search for connect PHY device */
- for (i = 0; i < PHY_MAX_ADDR; i++) {
+ /* search for connected PHY device */
+ for (i = 0; i < PHY_MAX_ADDR; ++i) {
struct phy_device *const tmp_phydev = lp->mii_bus->phy_map[i];
if (!tmp_phydev)
@@ -429,13 +422,14 @@ static int mii_probe(struct net_device *dev)
return -ENODEV;
}
-#if defined(CONFIG_BFIN_MAC_RMII)
- phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link,
- 0, PHY_INTERFACE_MODE_RMII);
-#else
+ if (phy_mode != PHY_INTERFACE_MODE_RMII &&
+ phy_mode != PHY_INTERFACE_MODE_MII) {
+ printk(KERN_INFO "%s: Invalid phy interface mode\n", dev->name);
+ return -EINVAL;
+ }
+
phydev = phy_connect(dev, dev_name(&phydev->dev), &bfin_mac_adjust_link,
- 0, PHY_INTERFACE_MODE_MII);
-#endif
+ 0, phy_mode);
if (IS_ERR(phydev)) {
printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
@@ -570,6 +564,8 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = {
/**************************************************************************/
void setup_system_regs(struct net_device *dev)
{
+ struct bfin_mac_local *lp = netdev_priv(dev);
+ int i;
unsigned short sysctl;
/*
@@ -577,6 +573,15 @@ void setup_system_regs(struct net_device *dev)
* Configure checksum support and rcve frame word alignment
*/
sysctl = bfin_read_EMAC_SYSCTL();
+ /*
+ * check if interrupt is requested for any PHY,
+ * enable PHY interrupt only if needed
+ */
+ for (i = 0; i < PHY_MAX_ADDR; ++i)
+ if (lp->mii_bus->irq[i] != PHY_POLL)
+ break;
+ if (i < PHY_MAX_ADDR)
+ sysctl |= PHYIE;
sysctl |= RXDWA;
#if defined(BFIN_MAC_CSUM_OFFLOAD)
sysctl |= RXCKS;
@@ -1203,7 +1208,7 @@ static void bfin_mac_disable(void)
/*
* Enable Interrupts, Receive, and Transmit
*/
-static int bfin_mac_enable(void)
+static int bfin_mac_enable(struct phy_device *phydev)
{
int ret;
u32 opmode;
@@ -1233,12 +1238,13 @@ static int bfin_mac_enable(void)
opmode |= DRO | DC | PSF;
opmode |= RE;
-#if defined(CONFIG_BFIN_MAC_RMII)
- opmode |= RMII; /* For Now only 100MBit are supported */
+ if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
+ opmode |= RMII; /* For Now only 100MBit are supported */
#if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) && CONFIG_BF_REV_0_2
- opmode |= TE;
-#endif
+ opmode |= TE;
#endif
+ }
+
/* Turn on the EMAC rx */
bfin_write_EMAC_OPMODE(opmode);
@@ -1270,7 +1276,7 @@ static void bfin_mac_timeout(struct net_device *dev)
if (netif_queue_stopped(lp->ndev))
netif_wake_queue(lp->ndev);
- bfin_mac_enable();
+ bfin_mac_enable(lp->phydev);
/* We can accept TX packets again */
dev->trans_start = jiffies; /* prevent tx timeout */
@@ -1342,11 +1348,19 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
+ struct bfin_mac_local *lp = netdev_priv(netdev);
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
switch (cmd) {
case SIOCSHWTSTAMP:
return bfin_mac_hwtstamp_ioctl(netdev, ifr, cmd);
default:
- return -EOPNOTSUPP;
+ if (lp->phydev)
+ return phy_mii_ioctl(lp->phydev, ifr, cmd);
+ else
+ return -EOPNOTSUPP;
}
}
@@ -1394,7 +1408,7 @@ static int bfin_mac_open(struct net_device *dev)
setup_mac_addr(dev->dev_addr);
bfin_mac_disable();
- ret = bfin_mac_enable();
+ ret = bfin_mac_enable(lp->phydev);
if (ret)
return ret;
pr_debug("hardware init finished\n");
@@ -1450,6 +1464,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
struct net_device *ndev;
struct bfin_mac_local *lp;
struct platform_device *pd;
+ struct bfin_mii_bus_platform_data *mii_bus_data;
int rc;
ndev = alloc_etherdev(sizeof(struct bfin_mac_local));
@@ -1501,11 +1516,12 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
if (!lp->mii_bus) {
dev_err(&pdev->dev, "Cannot get mii_bus!\n");
rc = -ENODEV;
- goto out_err_mii_bus_probe;
+ goto out_err_probe_mac;
}
lp->mii_bus->priv = ndev;
+ mii_bus_data = pd->dev.platform_data;
- rc = mii_probe(ndev);
+ rc = mii_probe(ndev, mii_bus_data->phy_mode);
if (rc) {
dev_err(&pdev->dev, "MII Probe failed!\n");
goto out_err_mii_probe;
@@ -1552,8 +1568,6 @@ out_err_request_irq:
out_err_mii_probe:
mdiobus_unregister(lp->mii_bus);
mdiobus_free(lp->mii_bus);
-out_err_mii_bus_probe:
- peripheral_free_list(pin_req);
out_err_probe_mac:
platform_set_drvdata(pdev, NULL);
free_netdev(ndev);
@@ -1576,8 +1590,6 @@ static int __devexit bfin_mac_remove(struct platform_device *pdev)
free_netdev(ndev);
- peripheral_free_list(pin_req);
-
return 0;
}
@@ -1623,12 +1635,21 @@ static int bfin_mac_resume(struct platform_device *pdev)
static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
{
struct mii_bus *miibus;
+ struct bfin_mii_bus_platform_data *mii_bus_pd;
+ const unsigned short *pin_req;
int rc, i;
+ mii_bus_pd = dev_get_platdata(&pdev->dev);
+ if (!mii_bus_pd) {
+ dev_err(&pdev->dev, "No peripherals in platform data!\n");
+ return -EINVAL;
+ }
+
/*
* We are setting up a network card,
* so set the GPIO pins to Ethernet mode
*/
+ pin_req = mii_bus_pd->mac_peripherals;
rc = peripheral_request_list(pin_req, DRV_NAME);
if (rc) {
dev_err(&pdev->dev, "Requesting peripherals failed!\n");
@@ -1645,13 +1666,30 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
miibus->parent = &pdev->dev;
miibus->name = "bfin_mii_bus";
+ miibus->phy_mask = mii_bus_pd->phy_mask;
+
snprintf(miibus->id, MII_BUS_ID_SIZE, "0");
miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
- if (miibus->irq == NULL)
- goto out_err_alloc;
- for (i = 0; i < PHY_MAX_ADDR; ++i)
+ if (!miibus->irq)
+ goto out_err_irq_alloc;
+
+ for (i = rc; i < PHY_MAX_ADDR; ++i)
miibus->irq[i] = PHY_POLL;
+ rc = clamp(mii_bus_pd->phydev_number, 0, PHY_MAX_ADDR);
+ if (rc != mii_bus_pd->phydev_number)
+ dev_err(&pdev->dev, "Invalid number (%i) of phydevs\n",
+ mii_bus_pd->phydev_number);
+ for (i = 0; i < rc; ++i) {
+ unsigned short phyaddr = mii_bus_pd->phydev_data[i].addr;
+ if (phyaddr < PHY_MAX_ADDR)
+ miibus->irq[phyaddr] = mii_bus_pd->phydev_data[i].irq;
+ else
+ dev_err(&pdev->dev,
+ "Invalid PHY address %i for phydev %i\n",
+ phyaddr, i);
+ }
+
rc = mdiobus_register(miibus);
if (rc) {
dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
@@ -1663,6 +1701,7 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
out_err_mdiobus_register:
kfree(miibus->irq);
+out_err_irq_alloc:
mdiobus_free(miibus);
out_err_alloc:
peripheral_free_list(pin_req);
@@ -1673,11 +1712,15 @@ out_err_alloc:
static int __devexit bfin_mii_bus_remove(struct platform_device *pdev)
{
struct mii_bus *miibus = platform_get_drvdata(pdev);
+ struct bfin_mii_bus_platform_data *mii_bus_pd =
+ dev_get_platdata(&pdev->dev);
+
platform_set_drvdata(pdev, NULL);
mdiobus_unregister(miibus);
kfree(miibus->irq);
mdiobus_free(miibus);
- peripheral_free_list(pin_req);
+ peripheral_free_list(mii_bus_pd->mac_peripherals);
+
return 0;
}
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index 04e4050..aed68be 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -14,6 +14,8 @@
#include <linux/clocksource.h>
#include <linux/timecompare.h>
#include <linux/timer.h>
+#include <linux/etherdevice.h>
+#include <linux/bfin_mac.h>
#define BFIN_MAC_CSUM_OFFLOAD
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 9571ecf..863e73a 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -20,8 +20,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.60.00-3"
-#define DRV_MODULE_RELDATE "2010/10/19"
+#define DRV_MODULE_VERSION "1.60.00-4"
+#define DRV_MODULE_RELDATE "2010/11/01"
#define BNX2X_BC_VER 0x040200
#define BNX2X_MULTI_QUEUE
@@ -1288,15 +1288,11 @@ struct bnx2x_func_init_params {
#define WAIT_RAMROD_POLL 0x01
#define WAIT_RAMROD_COMMON 0x02
-int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
- int *state_p, int flags);
/* dmae */
void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
u32 len32);
-void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
- u32 addr, u32 len);
void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
@@ -1307,7 +1303,6 @@ int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
-void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
void bnx2x_calc_fc_adv(struct bnx2x *bp);
int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index bc58375..94d5f59 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -25,6 +25,7 @@
#include "bnx2x_init.h"
+static int bnx2x_setup_irqs(struct bnx2x *bp);
/* free skb in the packet ring at pos idx
* return idx of last bd freed
@@ -1679,7 +1680,7 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
rc = XMIT_PLAIN;
else {
- if (skb->protocol == htons(ETH_P_IPV6)) {
+ if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
rc = XMIT_CSUM_V6;
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
rc |= XMIT_CSUM_TCP;
@@ -2187,7 +2188,7 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
}
-int bnx2x_setup_irqs(struct bnx2x *bp)
+static int bnx2x_setup_irqs(struct bnx2x *bp)
{
int rc = 0;
if (bp->flags & USING_MSIX_FLAG) {
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 5bfe0ab..6b28739 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -117,13 +117,6 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
void bnx2x_int_enable(struct bnx2x *bp);
/**
- * Disable HW interrupts.
- *
- * @param bp
- */
-void bnx2x_int_disable(struct bnx2x *bp);
-
-/**
* Disable interrupts. This function ensures that there are no
* ISRs or SP DPCs (sp_task) are running after it returns.
*
@@ -192,17 +185,6 @@ int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
int is_leading);
/**
- * Bring down an eth client.
- *
- * @param bp
- * @param p
- *
- * @return int
- */
-int bnx2x_stop_fw_client(struct bnx2x *bp,
- struct bnx2x_client_ramrod_params *p);
-
-/**
* Set number of queues according to mode
*
* @param bp
@@ -250,34 +232,6 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
*/
void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
-#ifdef BCM_CNIC
-/**
- * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
- * MAC(s). The function will wait until the ramrod completion
- * returns.
- *
- * @param bp driver handle
- * @param set set or clear the CAM entry
- *
- * @return 0 if cussess, -ENODEV if ramrod doesn't return.
- */
-int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set);
-#endif
-
-/**
- * Initialize status block in FW and HW
- *
- * @param bp driver handle
- * @param dma_addr_t mapping
- * @param int sb_id
- * @param int vfid
- * @param u8 vf_valid
- * @param int fw_sb_id
- * @param int igu_sb_id
- */
-void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
- u8 vf_valid, int fw_sb_id, int igu_sb_id);
-
/**
* Set MAC filtering configurations.
*
@@ -326,7 +280,6 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
* @return int
*/
int bnx2x_func_start(struct bnx2x *bp);
-int bnx2x_func_stop(struct bnx2x *bp);
/**
* Prepare ILT configurations according to current driver
@@ -396,14 +349,6 @@ int bnx2x_enable_msix(struct bnx2x *bp);
int bnx2x_enable_msi(struct bnx2x *bp);
/**
- * Request IRQ vectors from OS.
- *
- * @param bp
- *
- * @return int
- */
-int bnx2x_setup_irqs(struct bnx2x *bp);
-/**
* NAPI callback
*
* @param napi
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 18c8e23..4cfd4e9 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -244,7 +244,14 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
u16 xgxs_config_tx[4]; /* 0x1A0 */
- u32 Reserved1[57]; /* 0x1A8 */
+ u32 Reserved1[56]; /* 0x1A8 */
+ u32 default_cfg; /* 0x288 */
+ /* Enable BAM on KR */
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000
+
u32 speed_capability_mask2; /* 0x28C */
#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF
#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
index e65de78..a306b0e 100644
--- a/drivers/net/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
@@ -16,7 +16,9 @@
#define BNX2X_INIT_OPS_H
static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
-
+static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
+ u32 addr, u32 len);
static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
u32 len)
@@ -589,7 +591,7 @@ static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, u8 memop)
return rc;
}
-int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
+static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
{
int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
if (!rc)
@@ -635,7 +637,7 @@ static void bnx2x_ilt_line_init_op(struct bnx2x *bp, struct bnx2x_ilt *ilt,
}
}
-void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
+static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
struct ilt_client_info *ilt_cli,
u32 ilt_start, u8 initop)
{
@@ -688,8 +690,10 @@ void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
}
}
-void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, struct bnx2x_ilt *ilt,
- struct ilt_client_info *ilt_cli, u8 initop)
+static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp,
+ struct bnx2x_ilt *ilt,
+ struct ilt_client_info *ilt_cli,
+ u8 initop)
{
int i;
@@ -703,8 +707,8 @@ void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp, struct bnx2x_ilt *ilt,
bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
}
-void bnx2x_ilt_client_init_op(struct bnx2x *bp,
- struct ilt_client_info *ilt_cli, u8 initop)
+static void bnx2x_ilt_client_init_op(struct bnx2x *bp,
+ struct ilt_client_info *ilt_cli, u8 initop)
{
struct bnx2x_ilt *ilt = BP_ILT(bp);
@@ -720,7 +724,7 @@ static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
}
-void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
+static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
{
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
@@ -752,7 +756,7 @@ static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
* called during init common stage, ilt clients should be initialized
* prioir to calling this function
*/
-void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
+static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
{
bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
PXP2_REG_RQ_CDU_P_SIZE, initop);
@@ -772,8 +776,8 @@ void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
#define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
/* called during init port stage */
-void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
- u8 initop)
+static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
+ u8 initop)
{
int port = BP_PORT(bp);
@@ -814,8 +818,8 @@ static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count)
}
/* called during init common stage */
-void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
- u8 initop)
+static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
+ u8 initop)
{
if (!QM_INIT(qm_cid_count))
return;
@@ -836,8 +840,8 @@ void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
****************************************************************************/
/* called during init func stage */
-void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
- dma_addr_t t2_mapping, int src_cid_count)
+static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
+ dma_addr_t t2_mapping, int src_cid_count)
{
int i;
int port = BP_PORT(bp);
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 3e99bf9..5809196 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -181,6 +181,12 @@
(_bank + (_addr & 0xf)), \
_val)
+static u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 *ret_val);
+
+static u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+ u8 devad, u16 reg, u16 val);
+
static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
{
u32 val = REG_RD(bp, reg);
@@ -594,7 +600,7 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
return 0;
}
-u8 bnx2x_bmac_enable(struct link_params *params,
+static u8 bnx2x_bmac_enable(struct link_params *params,
struct link_vars *vars,
u8 is_lb)
{
@@ -604,7 +610,7 @@ u8 bnx2x_bmac_enable(struct link_params *params,
/* reset and unreset the BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- udelay(10);
+ msleep(1);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -2537,122 +2543,6 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
}
}
-/*
- *------------------------------------------------------------------------
- * bnx2x_override_led_value -
- *
- * Override the led value of the requested led
- *
- *------------------------------------------------------------------------
- */
-u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
- u32 led_idx, u32 value)
-{
- u32 reg_val;
-
- /* If port 0 then use EMAC0, else use EMAC1*/
- u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
-
- DP(NETIF_MSG_LINK,
- "bnx2x_override_led_value() port %x led_idx %d value %d\n",
- port, led_idx, value);
-
- switch (led_idx) {
- case 0: /* 10MB led */
- /* Read the current value of the LED register in
- the EMAC block */
- reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
- /* Set the OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the 10M_OVERRIDE bit,
- otherwise reset it.*/
- reg_val = (value == 1) ? (reg_val | EMAC_LED_10MB_OVERRIDE) :
- (reg_val & ~EMAC_LED_10MB_OVERRIDE);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
- break;
- case 1: /*100MB led */
- /*Read the current value of the LED register in
- the EMAC block */
- reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
- /* Set the OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the 100M_OVERRIDE bit,
- otherwise reset it.*/
- reg_val = (value == 1) ? (reg_val | EMAC_LED_100MB_OVERRIDE) :
- (reg_val & ~EMAC_LED_100MB_OVERRIDE);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
- break;
- case 2: /* 1000MB led */
- /* Read the current value of the LED register in the
- EMAC block */
- reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
- /* Set the OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the 1000M_OVERRIDE bit, otherwise
- reset it. */
- reg_val = (value == 1) ? (reg_val | EMAC_LED_1000MB_OVERRIDE) :
- (reg_val & ~EMAC_LED_1000MB_OVERRIDE);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
- break;
- case 3: /* 2500MB led */
- /* Read the current value of the LED register in the
- EMAC block*/
- reg_val = REG_RD(bp, emac_base + EMAC_REG_EMAC_LED);
- /* Set the OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the 2500M_OVERRIDE bit, otherwise
- reset it.*/
- reg_val = (value == 1) ? (reg_val | EMAC_LED_2500MB_OVERRIDE) :
- (reg_val & ~EMAC_LED_2500MB_OVERRIDE);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
- break;
- case 4: /*10G led */
- if (port == 0) {
- REG_WR(bp, NIG_REG_LED_10G_P0,
- value);
- } else {
- REG_WR(bp, NIG_REG_LED_10G_P1,
- value);
- }
- break;
- case 5: /* TRAFFIC led */
- /* Find if the traffic control is via BMAC or EMAC */
- if (port == 0)
- reg_val = REG_RD(bp, NIG_REG_NIG_EMAC0_EN);
- else
- reg_val = REG_RD(bp, NIG_REG_NIG_EMAC1_EN);
-
- /* Override the traffic led in the EMAC:*/
- if (reg_val == 1) {
- /* Read the current value of the LED register in
- the EMAC block */
- reg_val = REG_RD(bp, emac_base +
- EMAC_REG_EMAC_LED);
- /* Set the TRAFFIC_OVERRIDE bit to 1 */
- reg_val |= EMAC_LED_OVERRIDE;
- /* If value is 1, set the TRAFFIC bit, otherwise
- reset it.*/
- reg_val = (value == 1) ? (reg_val | EMAC_LED_TRAFFIC) :
- (reg_val & ~EMAC_LED_TRAFFIC);
- REG_WR(bp, emac_base + EMAC_REG_EMAC_LED, reg_val);
- } else { /* Override the traffic led in the BMAC: */
- REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
- + port*4, 1);
- REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4,
- value);
- }
- break;
- default:
- DP(NETIF_MSG_LINK,
- "bnx2x_override_led_value() unknown led index %d "
- "(should be 0-5)\n", led_idx);
- return -EINVAL;
- }
-
- return 0;
-}
-
-
u8 bnx2x_set_led(struct link_params *params,
struct link_vars *vars, u8 mode, u32 speed)
{
@@ -3635,13 +3525,19 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
/* Enable CL37 BAM */
- bnx2x_cl45_read(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8073_BAM, &val);
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_8073_BAM, val | 1);
+ if (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg)) &
+ PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8073_BAM, &val);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8073_BAM, val | 1);
+ DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
+ }
if (params->loopback_mode == LOOPBACK_EXT) {
bnx2x_807x_force_10G(bp, phy);
DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
@@ -4099,9 +3995,9 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
return -EINVAL;
}
-u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params, u16 addr,
- u8 byte_cnt, u8 *o_buf)
+static u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+ struct link_params *params, u16 addr,
+ u8 byte_cnt, u8 *o_buf)
{
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726)
return bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
@@ -5412,7 +5308,7 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 autoneg_val, an_1000_val, an_10_100_val;
- bnx2x_wait_reset_complete(bp, phy);
+
bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
1 << NIG_LATCH_BC_ENABLE_MI_INT);
@@ -5541,6 +5437,7 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy,
/* HW reset */
bnx2x_ext_phy_hw_reset(bp, params->port);
+ bnx2x_wait_reset_complete(bp, phy);
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
return bnx2x_848xx_cmn_config_init(phy, params, vars);
@@ -5551,7 +5448,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u8 port = params->port, initialize = 1;
+ u8 port, initialize = 1;
u16 val;
u16 temp;
u32 actual_phy_selection;
@@ -5560,11 +5457,16 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
/* This is just for MDIO_CTL_REG_84823_MEDIA register. */
msleep(1);
+ if (CHIP_IS_E2(bp))
+ port = BP_PATH(bp);
+ else
+ port = params->port;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
MISC_REGISTERS_GPIO_OUTPUT_HIGH,
port);
- msleep(200); /* 100 is not enough */
-
+ bnx2x_wait_reset_complete(bp, phy);
+ /* Wait for GPHY to come out of reset */
+ msleep(50);
/* BCM84823 requires that XGXS links up first @ 10G for normal
behavior */
temp = vars->line_speed;
@@ -5735,7 +5637,11 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
struct bnx2x *bp = params->bp;
- u8 port = params->port;
+ u8 port;
+ if (CHIP_IS_E2(bp))
+ port = BP_PATH(bp);
+ else
+ port = params->port;
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
MISC_REGISTERS_GPIO_OUTPUT_LOW,
port);
@@ -6819,13 +6725,6 @@ u8 bnx2x_phy_probe(struct link_params *params)
return 0;
}
-u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx)
-{
- if (phy_idx < params->num_phys)
- return params->phy[phy_idx].supported;
- return 0;
-}
-
static void set_phy_vars(struct link_params *params)
{
struct bnx2x *bp = params->bp;
@@ -7045,7 +6944,7 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
u8 reset_ext_phy)
{
struct bnx2x *bp = params->bp;
- u8 phy_index, port = params->port;
+ u8 phy_index, port = params->port, clear_latch_ind = 0;
DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
/* disable attentions */
vars->link_status = 0;
@@ -7083,9 +6982,18 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
params->phy[phy_index].link_reset(
&params->phy[phy_index],
params);
+ if (params->phy[phy_index].flags &
+ FLAGS_REARM_LATCH_SIGNAL)
+ clear_latch_ind = 1;
}
}
+ if (clear_latch_ind) {
+ /* Clear latching indication */
+ bnx2x_rearm_latch_signal(bp, port, 0);
+ bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4,
+ 1 << NIG_LATCH_BC_ENABLE_MI_INT);
+ }
if (params->phy[INT_PHY].link_reset)
params->phy[INT_PHY].link_reset(
&params->phy[INT_PHY], params);
@@ -7116,6 +7024,7 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
s8 port;
s8 port_of_path = 0;
+ bnx2x_ext_phy_hw_reset(bp, 0);
/* PART1 - Reset both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
u32 shmem_base, shmem2_base;
@@ -7138,7 +7047,8 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
return -EINVAL;
}
/* disable attentions */
- bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+ port_of_path*4,
(NIG_MASK_XGXS0_LINK_STATUS |
NIG_MASK_XGXS0_LINK10G |
NIG_MASK_SERDES0_LINK_STATUS |
@@ -7249,7 +7159,7 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp,
(1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
- bnx2x_ext_phy_hw_reset(bp, 1);
+ bnx2x_ext_phy_hw_reset(bp, 0);
msleep(5);
for (port = 0; port < PORT_MAX; port++) {
u32 shmem_base, shmem2_base;
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index 58a4c71..171abf8 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -279,12 +279,6 @@ u8 bnx2x_phy_read(struct link_params *params, u8 phy_addr,
u8 bnx2x_phy_write(struct link_params *params, u8 phy_addr,
u8 devad, u16 reg, u16 val);
-
-u8 bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 *ret_val);
-
-u8 bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
- u8 devad, u16 reg, u16 val);
/* Reads the link_status from the shmem,
and update the link vars accordingly */
void bnx2x_link_status_update(struct link_params *input,
@@ -304,8 +298,6 @@ u8 bnx2x_set_led(struct link_params *params, struct link_vars *vars,
#define LED_MODE_OPER 2
#define LED_MODE_FRONT_PANEL_OFF 3
-u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port, u32 led_idx, u32 value);
-
/* bnx2x_handle_module_detect_int should be called upon module detection
interrupt */
void bnx2x_handle_module_detect_int(struct link_params *params);
@@ -325,19 +317,12 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
/* Reset the external of SFX7101 */
void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
-u8 bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
- struct link_params *params, u16 addr,
- u8 byte_cnt, u8 *o_buf);
-
void bnx2x_hw_reset_phy(struct link_params *params);
/* Checks if HW lock is required for this phy/board type */
u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base,
u32 shmem2_base);
-/* Returns the aggregative supported attributes of the phys on board */
-u32 bnx2x_supported_attr(struct link_params *params, u8 phy_idx);
-
/* Check swap bit and adjust PHY order */
u32 bnx2x_phy_selection(struct link_params *params);
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index ff99a2f..9709b85 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -403,7 +403,7 @@ static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
/* used only at init
* locking is done by mcp
*/
-void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
+static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
{
pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
@@ -429,7 +429,8 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
#define DMAE_DP_DST_NONE "dst_addr [none]"
-void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
+static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
+ int msglvl)
{
u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
@@ -551,8 +552,9 @@ u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
return opcode;
}
-void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
- u8 src_type, u8 dst_type)
+static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
+ struct dmae_command *dmae,
+ u8 src_type, u8 dst_type)
{
memset(dmae, 0, sizeof(struct dmae_command));
@@ -567,7 +569,8 @@ void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
}
/* issue a dmae command over the init-channel and wailt for completion */
-int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
+static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
+ struct dmae_command *dmae)
{
u32 *wb_comp = bnx2x_sp(bp, wb_comp);
int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
@@ -674,8 +677,8 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
bnx2x_issue_dmae_with_comp(bp, &dmae);
}
-void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
- u32 addr, u32 len)
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
+ u32 addr, u32 len)
{
int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
int offset = 0;
@@ -1267,7 +1270,7 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp)
BNX2X_ERR("BUG! proper val not read from IGU!\n");
}
-void bnx2x_int_disable(struct bnx2x *bp)
+static void bnx2x_int_disable(struct bnx2x *bp)
{
if (bp->common.int_block == INT_BLOCK_HC)
bnx2x_hc_int_disable(bp);
@@ -2236,7 +2239,7 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
}
/* must be called under rtnl_lock */
-void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
+static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
{
u32 mask = (1 << cl_id);
@@ -2303,7 +2306,7 @@ void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
bp->mac_filters.unmatched_unicast & ~mask;
}
-void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
+static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
{
struct tstorm_eth_function_common_config tcfg = {0};
u16 rss_flgs;
@@ -2460,7 +2463,7 @@ static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
}
-void bnx2x_pf_init(struct bnx2x *bp)
+static void bnx2x_pf_init(struct bnx2x *bp)
{
struct bnx2x_func_init_params func_init = {0};
struct bnx2x_rss_params rss = {0};
@@ -3928,7 +3931,7 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
hc_sm->time_to_expire = 0xFFFFFFFF;
}
-void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
u8 vf_valid, int fw_sb_id, int igu_sb_id)
{
int igu_seg_id;
@@ -6021,6 +6024,9 @@ alloc_mem_err:
/*
* Init service functions
*/
+static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
+ int *state_p, int flags);
+
int bnx2x_func_start(struct bnx2x *bp)
{
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
@@ -6030,7 +6036,7 @@ int bnx2x_func_start(struct bnx2x *bp)
WAIT_RAMROD_COMMON);
}
-int bnx2x_func_stop(struct bnx2x *bp)
+static int bnx2x_func_stop(struct bnx2x *bp)
{
bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
@@ -6103,8 +6109,8 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
}
-int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
- int *state_p, int flags)
+static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
+ int *state_p, int flags)
{
/* can take a while if any port is running */
int cnt = 5000;
@@ -6154,7 +6160,7 @@ int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
return -EBUSY;
}
-u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
+static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
{
if (CHIP_IS_E1H(bp))
return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
@@ -6273,7 +6279,7 @@ static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
*
* @return 0 if cussess, -ENODEV if ramrod doesn't return.
*/
-int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
+static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
{
u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
@@ -6383,11 +6389,11 @@ static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
ETH_CONNECTION_TYPE);
}
-int bnx2x_setup_fw_client(struct bnx2x *bp,
- struct bnx2x_client_init_params *params,
- u8 activate,
- struct client_init_ramrod_data *data,
- dma_addr_t data_mapping)
+static int bnx2x_setup_fw_client(struct bnx2x *bp,
+ struct bnx2x_client_init_params *params,
+ u8 activate,
+ struct client_init_ramrod_data *data,
+ dma_addr_t data_mapping)
{
u16 hc_usec;
int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
@@ -6633,7 +6639,8 @@ int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return rc;
}
-int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
+static int bnx2x_stop_fw_client(struct bnx2x *bp,
+ struct bnx2x_client_ramrod_params *p)
{
int rc;
@@ -7440,7 +7447,7 @@ reset_task_exit:
* Init service functions
*/
-u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
+static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
{
u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
@@ -9057,7 +9064,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
default:
pr_err("Unknown board_type (%ld), aborting\n",
ent->driver_data);
- return ENODEV;
+ return -ENODEV;
}
cid_count += CNIC_CONTEXT_USE;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index beb3b7c..2fee00a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -493,9 +493,9 @@ static void bond_vlan_rx_register(struct net_device *bond_dev,
struct slave *slave;
int i;
- write_lock(&bond->lock);
+ write_lock_bh(&bond->lock);
bond->vlgrp = grp;
- write_unlock(&bond->lock);
+ write_unlock_bh(&bond->lock);
bond_for_each_slave(bond, slave, i) {
struct net_device *slave_dev = slave->dev;
@@ -878,8 +878,10 @@ static void __bond_resend_igmp_join_requests(struct net_device *dev)
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
if (in_dev) {
+ read_lock(&in_dev->mc_list_lock);
for (im = in_dev->mc_list; im; im = im->next)
ip_mc_rejoin_group(im);
+ read_unlock(&in_dev->mc_list_lock);
}
rcu_read_unlock();
@@ -1574,7 +1576,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/* If this is the first slave, then we need to set the master's hardware
* address to be the same as the slave's. */
- if (bond->slave_cnt == 0)
+ if (is_zero_ether_addr(bond->dev->dev_addr))
memcpy(bond->dev->dev_addr, slave_dev->dev_addr,
slave_dev->addr_len);
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 75bfc3a..09ed3f4 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -31,3 +31,10 @@ config CAIF_SPI_SYNC
Putting the next command and length in the start of the frame can
help to synchronize to the next transfer in case of over or under-runs.
This option also needs to be enabled on the modem.
+
+config CAIF_SHM
+ tristate "CAIF shared memory protocol driver"
+ depends on CAIF && U5500_MBOX
+ default n
+ ---help---
+ The CAIF shared memory protocol driver for the STE UX5500 platform.
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 3a11d61..b38d987 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -8,3 +8,7 @@ obj-$(CONFIG_CAIF_TTY) += caif_serial.o
# SPI slave physical interfaces module
cfspi_slave-objs := caif_spi.o caif_spi_slave.o
obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
+
+# Shared memory
+caif_shm-objs := caif_shmcore.o caif_shm_u5500.o
+obj-$(CONFIG_CAIF_SHM) += caif_shm.o
diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c
new file mode 100644
index 0000000..1cd90da
--- /dev/null
+++ b/drivers/net/caif/caif_shm_u5500.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <mach/mbox.h>
+#include <net/caif/caif_shm.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CAIF Shared Memory protocol driver");
+
+#define MAX_SHM_INSTANCES 1
+
+enum {
+ MBX_ACC0,
+ MBX_ACC1,
+ MBX_DSP
+};
+
+static struct shmdev_layer shmdev_lyr[MAX_SHM_INSTANCES];
+
+static unsigned int shm_start;
+static unsigned int shm_size;
+
+module_param(shm_size, uint , 0440);
+MODULE_PARM_DESC(shm_total_size, "Start of SHM shared memory");
+
+module_param(shm_start, uint , 0440);
+MODULE_PARM_DESC(shm_total_start, "Total Size of SHM shared memory");
+
+static int shmdev_send_msg(u32 dev_id, u32 mbx_msg)
+{
+ /* Always block until msg is written successfully */
+ mbox_send(shmdev_lyr[dev_id].hmbx, mbx_msg, true);
+ return 0;
+}
+
+static int shmdev_mbx_setup(void *pshmdrv_cb, struct shmdev_layer *pshm_dev,
+ void *pshm_drv)
+{
+ /*
+ * For UX5500, we have only 1 SHM instance which uses MBX0
+ * for communication with the peer modem
+ */
+ pshm_dev->hmbx = mbox_setup(MBX_ACC0, pshmdrv_cb, pshm_drv);
+
+ if (!pshm_dev->hmbx)
+ return -ENODEV;
+ else
+ return 0;
+}
+
+static int __init caif_shmdev_init(void)
+{
+ int i, result;
+
+ /* Loop is currently overkill, there is only one instance */
+ for (i = 0; i < MAX_SHM_INSTANCES; i++) {
+
+ shmdev_lyr[i].shm_base_addr = shm_start;
+ shmdev_lyr[i].shm_total_sz = shm_size;
+
+ if (((char *)shmdev_lyr[i].shm_base_addr == NULL)
+ || (shmdev_lyr[i].shm_total_sz <= 0)) {
+ pr_warn("ERROR,"
+ "Shared memory Address and/or Size incorrect"
+ ", Bailing out ...\n");
+ result = -EINVAL;
+ goto clean;
+ }
+
+ pr_info("SHM AREA (instance %d) STARTS"
+ " AT %p\n", i, (char *)shmdev_lyr[i].shm_base_addr);
+
+ shmdev_lyr[i].shm_id = i;
+ shmdev_lyr[i].pshmdev_mbxsend = shmdev_send_msg;
+ shmdev_lyr[i].pshmdev_mbxsetup = shmdev_mbx_setup;
+
+ /*
+ * Finally, CAIF core module is called with details in place:
+ * 1. SHM base address
+ * 2. SHM size
+ * 3. MBX handle
+ */
+ result = caif_shmcore_probe(&shmdev_lyr[i]);
+ if (result) {
+ pr_warn("ERROR[%d],"
+ "Could not probe SHM core (instance %d)"
+ " Bailing out ...\n", result, i);
+ goto clean;
+ }
+ }
+
+ return 0;
+
+clean:
+ /*
+ * For now, we assume that even if one instance of SHM fails, we bail
+ * out of the driver support completely. For this, we need to release
+ * any memory allocated and unregister any instance of SHM net device.
+ */
+ for (i = 0; i < MAX_SHM_INSTANCES; i++) {
+ if (shmdev_lyr[i].pshm_netdev)
+ unregister_netdev(shmdev_lyr[i].pshm_netdev);
+ }
+ return result;
+}
+
+static void __exit caif_shmdev_exit(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_SHM_INSTANCES; i++) {
+ caif_shmcore_remove(shmdev_lyr[i].pshm_netdev);
+ kfree((void *)shmdev_lyr[i].shm_base_addr);
+ }
+
+}
+
+module_init(caif_shmdev_init);
+module_exit(caif_shmdev_exit);
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
new file mode 100644
index 0000000..19f9c06
--- /dev/null
+++ b/drivers/net/caif/caif_shmcore.c
@@ -0,0 +1,744 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
+ * Authors: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
+ * Daniel Martensson / daniel.martensson@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
+
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/caif/caif_device.h>
+#include <net/caif/caif_shm.h>
+
+#define NR_TX_BUF 6
+#define NR_RX_BUF 6
+#define TX_BUF_SZ 0x2000
+#define RX_BUF_SZ 0x2000
+
+#define CAIF_NEEDED_HEADROOM 32
+
+#define CAIF_FLOW_ON 1
+#define CAIF_FLOW_OFF 0
+
+#define LOW_WATERMARK 3
+#define HIGH_WATERMARK 4
+
+/* Maximum number of CAIF buffers per shared memory buffer. */
+#define SHM_MAX_FRMS_PER_BUF 10
+
+/*
+ * Size in bytes of the descriptor area
+ * (With end of descriptor signalling)
+ */
+#define SHM_CAIF_DESC_SIZE ((SHM_MAX_FRMS_PER_BUF + 1) * \
+ sizeof(struct shm_pck_desc))
+
+/*
+ * Offset to the first CAIF frame within a shared memory buffer.
+ * Aligned on 32 bytes.
+ */
+#define SHM_CAIF_FRM_OFS (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
+
+/* Number of bytes for CAIF shared memory header. */
+#define SHM_HDR_LEN 1
+
+/* Number of padding bytes for the complete CAIF frame. */
+#define SHM_FRM_PAD_LEN 4
+
+#define CAIF_MAX_MTU 4096
+
+#define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0)
+#define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1)
+
+#define SHM_SET_EMPTY(x) (((x+1) & 0x0F) << 4)
+#define SHM_GET_EMPTY(x) (((x >> 4) & 0x0F) - 1)
+
+#define SHM_FULL_MASK (0x0F << 0)
+#define SHM_EMPTY_MASK (0x0F << 4)
+
+struct shm_pck_desc {
+ /*
+ * Offset from start of shared memory area to start of
+ * shared memory CAIF frame.
+ */
+ u32 frm_ofs;
+ u32 frm_len;
+};
+
+struct buf_list {
+ unsigned char *desc_vptr;
+ u32 phy_addr;
+ u32 index;
+ u32 len;
+ u32 frames;
+ u32 frm_ofs;
+ struct list_head list;
+};
+
+struct shm_caif_frm {
+ /* Number of bytes of padding before the CAIF frame. */
+ u8 hdr_ofs;
+};
+
+struct shmdrv_layer {
+ /* caif_dev_common must always be first in the structure*/
+ struct caif_dev_common cfdev;
+
+ u32 shm_tx_addr;
+ u32 shm_rx_addr;
+ u32 shm_base_addr;
+ u32 tx_empty_available;
+ spinlock_t lock;
+
+ struct list_head tx_empty_list;
+ struct list_head tx_pend_list;
+ struct list_head tx_full_list;
+ struct list_head rx_empty_list;
+ struct list_head rx_pend_list;
+ struct list_head rx_full_list;
+
+ struct workqueue_struct *pshm_tx_workqueue;
+ struct workqueue_struct *pshm_rx_workqueue;
+
+ struct work_struct shm_tx_work;
+ struct work_struct shm_rx_work;
+
+ struct sk_buff_head sk_qhead;
+ struct shmdev_layer *pshm_dev;
+};
+
+static int shm_netdev_open(struct net_device *shm_netdev)
+{
+ netif_wake_queue(shm_netdev);
+ return 0;
+}
+
+static int shm_netdev_close(struct net_device *shm_netdev)
+{
+ netif_stop_queue(shm_netdev);
+ return 0;
+}
+
+int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
+{
+ struct buf_list *pbuf;
+ struct shmdrv_layer *pshm_drv;
+ struct list_head *pos;
+ u32 avail_emptybuff = 0;
+ unsigned long flags = 0;
+
+ pshm_drv = (struct shmdrv_layer *)priv;
+
+ /* Check for received buffers. */
+ if (mbx_msg & SHM_FULL_MASK) {
+ int idx;
+
+ spin_lock_irqsave(&pshm_drv->lock, flags);
+
+ /* Check whether we have any outstanding buffers. */
+ if (list_empty(&pshm_drv->rx_empty_list)) {
+
+ /* Release spin lock. */
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* We print even in IRQ context... */
+ pr_warn("No empty Rx buffers to fill: "
+ "mbx_msg:%x\n", mbx_msg);
+
+ /* Bail out. */
+ goto err_sync;
+ }
+
+ pbuf =
+ list_entry(pshm_drv->rx_empty_list.next,
+ struct buf_list, list);
+ idx = pbuf->index;
+
+ /* Check buffer synchronization. */
+ if (idx != SHM_GET_FULL(mbx_msg)) {
+
+ /* We print even in IRQ context... */
+ pr_warn(
+ "phyif_shm_mbx_msg_cb: RX full out of sync:"
+ " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
+ idx, mbx_msg, SHM_GET_FULL(mbx_msg));
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* Bail out. */
+ goto err_sync;
+ }
+
+ list_del_init(&pbuf->list);
+ list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* Schedule RX work queue. */
+ if (!work_pending(&pshm_drv->shm_rx_work))
+ queue_work(pshm_drv->pshm_rx_workqueue,
+ &pshm_drv->shm_rx_work);
+ }
+
+ /* Check for emptied buffers. */
+ if (mbx_msg & SHM_EMPTY_MASK) {
+ int idx;
+
+ spin_lock_irqsave(&pshm_drv->lock, flags);
+
+ /* Check whether we have any outstanding buffers. */
+ if (list_empty(&pshm_drv->tx_full_list)) {
+
+ /* We print even in IRQ context... */
+ pr_warn("No TX to empty: msg:%x\n", mbx_msg);
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* Bail out. */
+ goto err_sync;
+ }
+
+ pbuf =
+ list_entry(pshm_drv->tx_full_list.next,
+ struct buf_list, list);
+ idx = pbuf->index;
+
+ /* Check buffer synchronization. */
+ if (idx != SHM_GET_EMPTY(mbx_msg)) {
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* We print even in IRQ context... */
+ pr_warn("TX empty "
+ "out of sync:idx:%d, msg:%x\n", idx, mbx_msg);
+
+ /* Bail out. */
+ goto err_sync;
+ }
+ list_del_init(&pbuf->list);
+
+ /* Reset buffer parameters. */
+ pbuf->frames = 0;
+ pbuf->frm_ofs = SHM_CAIF_FRM_OFS;
+
+ list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list);
+
+ /* Check the available no. of buffers in the empty list */
+ list_for_each(pos, &pshm_drv->tx_empty_list)
+ avail_emptybuff++;
+
+ /* Check whether we have to wake up the transmitter. */
+ if ((avail_emptybuff > HIGH_WATERMARK) &&
+ (!pshm_drv->tx_empty_available)) {
+ pshm_drv->tx_empty_available = 1;
+ pshm_drv->cfdev.flowctrl
+ (pshm_drv->pshm_dev->pshm_netdev,
+ CAIF_FLOW_ON);
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* Schedule the work queue. if required */
+ if (!work_pending(&pshm_drv->shm_tx_work))
+ queue_work(pshm_drv->pshm_tx_workqueue,
+ &pshm_drv->shm_tx_work);
+ } else
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+ }
+
+ return 0;
+
+err_sync:
+ return -EIO;
+}
+
+static void shm_rx_work_func(struct work_struct *rx_work)
+{
+ struct shmdrv_layer *pshm_drv;
+ struct buf_list *pbuf;
+ unsigned long flags = 0;
+ struct sk_buff *skb;
+ char *p;
+ int ret;
+
+ pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work);
+
+ while (1) {
+
+ struct shm_pck_desc *pck_desc;
+
+ spin_lock_irqsave(&pshm_drv->lock, flags);
+
+ /* Check for received buffers. */
+ if (list_empty(&pshm_drv->rx_full_list)) {
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+ break;
+ }
+
+ pbuf =
+ list_entry(pshm_drv->rx_full_list.next, struct buf_list,
+ list);
+ list_del_init(&pbuf->list);
+
+ /* Retrieve pointer to start of the packet descriptor area. */
+ pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
+
+ /*
+ * Check whether descriptor contains a CAIF shared memory
+ * frame.
+ */
+ while (pck_desc->frm_ofs) {
+ unsigned int frm_buf_ofs;
+ unsigned int frm_pck_ofs;
+ unsigned int frm_pck_len;
+ /*
+ * Check whether offset is within buffer limits
+ * (lower).
+ */
+ if (pck_desc->frm_ofs <
+ (pbuf->phy_addr - pshm_drv->shm_base_addr))
+ break;
+ /*
+ * Check whether offset is within buffer limits
+ * (higher).
+ */
+ if (pck_desc->frm_ofs >
+ ((pbuf->phy_addr - pshm_drv->shm_base_addr) +
+ pbuf->len))
+ break;
+
+ /* Calculate offset from start of buffer. */
+ frm_buf_ofs =
+ pck_desc->frm_ofs - (pbuf->phy_addr -
+ pshm_drv->shm_base_addr);
+
+ /*
+ * Calculate offset and length of CAIF packet while
+ * taking care of the shared memory header.
+ */
+ frm_pck_ofs =
+ frm_buf_ofs + SHM_HDR_LEN +
+ (*(pbuf->desc_vptr + frm_buf_ofs));
+ frm_pck_len =
+ (pck_desc->frm_len - SHM_HDR_LEN -
+ (*(pbuf->desc_vptr + frm_buf_ofs)));
+
+ /* Check whether CAIF packet is within buffer limits */
+ if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len)
+ break;
+
+ /* Get a suitable CAIF packet and copy in data. */
+ skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
+ frm_pck_len + 1);
+ BUG_ON(skb == NULL);
+
+ p = skb_put(skb, frm_pck_len);
+ memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
+
+ skb->protocol = htons(ETH_P_CAIF);
+ skb_reset_mac_header(skb);
+ skb->dev = pshm_drv->pshm_dev->pshm_netdev;
+
+ /* Push received packet up the stack. */
+ ret = netif_rx_ni(skb);
+
+ if (!ret) {
+ pshm_drv->pshm_dev->pshm_netdev->stats.
+ rx_packets++;
+ pshm_drv->pshm_dev->pshm_netdev->stats.
+ rx_bytes += pck_desc->frm_len;
+ } else
+ ++pshm_drv->pshm_dev->pshm_netdev->stats.
+ rx_dropped;
+ /* Move to next packet descriptor. */
+ pck_desc++;
+ }
+
+ list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ }
+
+ /* Schedule the work queue. if required */
+ if (!work_pending(&pshm_drv->shm_tx_work))
+ queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
+
+}
+
+static void shm_tx_work_func(struct work_struct *tx_work)
+{
+ u32 mbox_msg;
+ unsigned int frmlen, avail_emptybuff, append = 0;
+ unsigned long flags = 0;
+ struct buf_list *pbuf = NULL;
+ struct shmdrv_layer *pshm_drv;
+ struct shm_caif_frm *frm;
+ struct sk_buff *skb;
+ struct shm_pck_desc *pck_desc;
+ struct list_head *pos;
+
+ pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work);
+
+ do {
+ /* Initialize mailbox message. */
+ mbox_msg = 0x00;
+ avail_emptybuff = 0;
+
+ spin_lock_irqsave(&pshm_drv->lock, flags);
+
+ /* Check for pending receive buffers. */
+ if (!list_empty(&pshm_drv->rx_pend_list)) {
+
+ pbuf = list_entry(pshm_drv->rx_pend_list.next,
+ struct buf_list, list);
+
+ list_del_init(&pbuf->list);
+ list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list);
+ /*
+ * Value index is never changed,
+ * so read access should be safe.
+ */
+ mbox_msg |= SHM_SET_EMPTY(pbuf->index);
+ }
+
+ skb = skb_peek(&pshm_drv->sk_qhead);
+
+ if (skb == NULL)
+ goto send_msg;
+
+ /* Check the available no. of buffers in the empty list */
+ list_for_each(pos, &pshm_drv->tx_empty_list)
+ avail_emptybuff++;
+
+ if ((avail_emptybuff < LOW_WATERMARK) &&
+ pshm_drv->tx_empty_available) {
+ /* Update blocking condition. */
+ pshm_drv->tx_empty_available = 0;
+ pshm_drv->cfdev.flowctrl
+ (pshm_drv->pshm_dev->pshm_netdev,
+ CAIF_FLOW_OFF);
+ }
+ /*
+ * We simply return back to the caller if we do not have space
+ * either in Tx pending list or Tx empty list. In this case,
+ * we hold the received skb in the skb list, waiting to
+ * be transmitted once Tx buffers become available
+ */
+ if (list_empty(&pshm_drv->tx_empty_list))
+ goto send_msg;
+
+ /* Get the first free Tx buffer. */
+ pbuf = list_entry(pshm_drv->tx_empty_list.next,
+ struct buf_list, list);
+ do {
+ if (append) {
+ skb = skb_peek(&pshm_drv->sk_qhead);
+ if (skb == NULL)
+ break;
+ }
+
+ frm = (struct shm_caif_frm *)
+ (pbuf->desc_vptr + pbuf->frm_ofs);
+
+ frm->hdr_ofs = 0;
+ frmlen = 0;
+ frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len;
+
+ /* Add tail padding if needed. */
+ if (frmlen % SHM_FRM_PAD_LEN)
+ frmlen += SHM_FRM_PAD_LEN -
+ (frmlen % SHM_FRM_PAD_LEN);
+
+ /*
+ * Verify that packet, header and additional padding
+ * can fit within the buffer frame area.
+ */
+ if (frmlen >= (pbuf->len - pbuf->frm_ofs))
+ break;
+
+ if (!append) {
+ list_del_init(&pbuf->list);
+ append = 1;
+ }
+
+ skb = skb_dequeue(&pshm_drv->sk_qhead);
+ /* Copy in CAIF frame. */
+ skb_copy_bits(skb, 0, pbuf->desc_vptr +
+ pbuf->frm_ofs + SHM_HDR_LEN +
+ frm->hdr_ofs, skb->len);
+
+ pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
+ pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
+ frmlen;
+ dev_kfree_skb(skb);
+
+ /* Fill in the shared memory packet descriptor area. */
+ pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
+ /* Forward to current frame. */
+ pck_desc += pbuf->frames;
+ pck_desc->frm_ofs = (pbuf->phy_addr -
+ pshm_drv->shm_base_addr) +
+ pbuf->frm_ofs;
+ pck_desc->frm_len = frmlen;
+ /* Terminate packet descriptor area. */
+ pck_desc++;
+ pck_desc->frm_ofs = 0;
+ /* Update buffer parameters. */
+ pbuf->frames++;
+ pbuf->frm_ofs += frmlen + (frmlen % 32);
+
+ } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
+
+ /* Assign buffer as full. */
+ list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
+ append = 0;
+ mbox_msg |= SHM_SET_FULL(pbuf->index);
+send_msg:
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ if (mbox_msg)
+ pshm_drv->pshm_dev->pshmdev_mbxsend
+ (pshm_drv->pshm_dev->shm_id, mbox_msg);
+ } while (mbox_msg);
+}
+
+static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
+{
+ struct shmdrv_layer *pshm_drv;
+ unsigned long flags = 0;
+
+ pshm_drv = netdev_priv(shm_netdev);
+
+ spin_lock_irqsave(&pshm_drv->lock, flags);
+
+ skb_queue_tail(&pshm_drv->sk_qhead, skb);
+
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
+
+ /* Schedule Tx work queue. for deferred processing of skbs*/
+ if (!work_pending(&pshm_drv->shm_tx_work))
+ queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
+
+ return 0;
+}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = shm_netdev_open,
+ .ndo_stop = shm_netdev_close,
+ .ndo_start_xmit = shm_netdev_tx,
+};
+
+static void shm_netdev_setup(struct net_device *pshm_netdev)
+{
+ struct shmdrv_layer *pshm_drv;
+ pshm_netdev->netdev_ops = &netdev_ops;
+
+ pshm_netdev->mtu = CAIF_MAX_MTU;
+ pshm_netdev->type = ARPHRD_CAIF;
+ pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
+ pshm_netdev->tx_queue_len = 0;
+ pshm_netdev->destructor = free_netdev;
+
+ pshm_drv = netdev_priv(pshm_netdev);
+
+ /* Initialize structures in a clean state. */
+ memset(pshm_drv, 0, sizeof(struct shmdrv_layer));
+
+ pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
+}
+
+int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
+{
+ int result, j;
+ struct shmdrv_layer *pshm_drv = NULL;
+
+ pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
+ "cfshm%d", shm_netdev_setup);
+ if (!pshm_dev->pshm_netdev)
+ return -ENOMEM;
+
+ pshm_drv = netdev_priv(pshm_dev->pshm_netdev);
+ pshm_drv->pshm_dev = pshm_dev;
+
+ /*
+ * Initialization starts with the verification of the
+ * availability of MBX driver by calling its setup function.
+ * MBX driver must be available by this time for proper
+ * functioning of SHM driver.
+ */
+ if ((pshm_dev->pshmdev_mbxsetup
+ (caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) {
+ pr_warn("Could not config. SHM Mailbox,"
+ " Bailing out.....\n");
+ free_netdev(pshm_dev->pshm_netdev);
+ return -ENODEV;
+ }
+
+ skb_queue_head_init(&pshm_drv->sk_qhead);
+
+ pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
+ " INSTANCE AT pshm_drv =0x%p\n",
+ pshm_drv->pshm_dev->shm_id, pshm_drv);
+
+ if (pshm_dev->shm_total_sz <
+ (NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
+
+ pr_warn("ERROR, Amount of available"
+ " Phys. SHM cannot accomodate current SHM "
+ "driver configuration, Bailing out ...\n");
+ free_netdev(pshm_dev->pshm_netdev);
+ return -ENOMEM;
+ }
+
+ pshm_drv->shm_base_addr = pshm_dev->shm_base_addr;
+ pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr;
+
+ if (pshm_dev->shm_loopback)
+ pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr;
+ else
+ pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
+ (NR_TX_BUF * TX_BUF_SZ);
+
+ INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
+ INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
+ INIT_LIST_HEAD(&pshm_drv->tx_full_list);
+
+ INIT_LIST_HEAD(&pshm_drv->rx_empty_list);
+ INIT_LIST_HEAD(&pshm_drv->rx_pend_list);
+ INIT_LIST_HEAD(&pshm_drv->rx_full_list);
+
+ INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
+ INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
+
+ pshm_drv->pshm_tx_workqueue =
+ create_singlethread_workqueue("shm_tx_work");
+ pshm_drv->pshm_rx_workqueue =
+ create_singlethread_workqueue("shm_rx_work");
+
+ for (j = 0; j < NR_TX_BUF; j++) {
+ struct buf_list *tx_buf =
+ kmalloc(sizeof(struct buf_list), GFP_KERNEL);
+
+ if (tx_buf == NULL) {
+ pr_warn("ERROR, Could not"
+ " allocate dynamic mem. for tx_buf,"
+ " Bailing out ...\n");
+ free_netdev(pshm_dev->pshm_netdev);
+ return -ENOMEM;
+ }
+ tx_buf->index = j;
+ tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j);
+ tx_buf->len = TX_BUF_SZ;
+ tx_buf->frames = 0;
+ tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
+
+ if (pshm_dev->shm_loopback)
+ tx_buf->desc_vptr = (char *)tx_buf->phy_addr;
+ else
+ tx_buf->desc_vptr =
+ ioremap(tx_buf->phy_addr, TX_BUF_SZ);
+
+ list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list);
+ }
+
+ for (j = 0; j < NR_RX_BUF; j++) {
+ struct buf_list *rx_buf =
+ kmalloc(sizeof(struct buf_list), GFP_KERNEL);
+
+ if (rx_buf == NULL) {
+ pr_warn("ERROR, Could not"
+ " allocate dynamic mem.for rx_buf,"
+ " Bailing out ...\n");
+ free_netdev(pshm_dev->pshm_netdev);
+ return -ENOMEM;
+ }
+ rx_buf->index = j;
+ rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j);
+ rx_buf->len = RX_BUF_SZ;
+
+ if (pshm_dev->shm_loopback)
+ rx_buf->desc_vptr = (char *)rx_buf->phy_addr;
+ else
+ rx_buf->desc_vptr =
+ ioremap(rx_buf->phy_addr, RX_BUF_SZ);
+ list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list);
+ }
+
+ pshm_drv->tx_empty_available = 1;
+ result = register_netdev(pshm_dev->pshm_netdev);
+ if (result)
+ pr_warn("ERROR[%d], SHM could not, "
+ "register with NW FRMWK Bailing out ...\n", result);
+
+ return result;
+}
+
+void caif_shmcore_remove(struct net_device *pshm_netdev)
+{
+ struct buf_list *pbuf;
+ struct shmdrv_layer *pshm_drv = NULL;
+
+ pshm_drv = netdev_priv(pshm_netdev);
+
+ while (!(list_empty(&pshm_drv->tx_pend_list))) {
+ pbuf =
+ list_entry(pshm_drv->tx_pend_list.next,
+ struct buf_list, list);
+
+ list_del(&pbuf->list);
+ kfree(pbuf);
+ }
+
+ while (!(list_empty(&pshm_drv->tx_full_list))) {
+ pbuf =
+ list_entry(pshm_drv->tx_full_list.next,
+ struct buf_list, list);
+ list_del(&pbuf->list);
+ kfree(pbuf);
+ }
+
+ while (!(list_empty(&pshm_drv->tx_empty_list))) {
+ pbuf =
+ list_entry(pshm_drv->tx_empty_list.next,
+ struct buf_list, list);
+ list_del(&pbuf->list);
+ kfree(pbuf);
+ }
+
+ while (!(list_empty(&pshm_drv->rx_full_list))) {
+ pbuf =
+ list_entry(pshm_drv->tx_full_list.next,
+ struct buf_list, list);
+ list_del(&pbuf->list);
+ kfree(pbuf);
+ }
+
+ while (!(list_empty(&pshm_drv->rx_pend_list))) {
+ pbuf =
+ list_entry(pshm_drv->tx_pend_list.next,
+ struct buf_list, list);
+ list_del(&pbuf->list);
+ kfree(pbuf);
+ }
+
+ while (!(list_empty(&pshm_drv->rx_empty_list))) {
+ pbuf =
+ list_entry(pshm_drv->rx_empty_list.next,
+ struct buf_list, list);
+ list_del(&pbuf->list);
+ kfree(pbuf);
+ }
+
+ /* Destroy work queues. */
+ destroy_workqueue(pshm_drv->pshm_tx_workqueue);
+ destroy_workqueue(pshm_drv->pshm_rx_workqueue);
+
+ unregister_netdev(pshm_netdev);
+}
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 8427533..20da199 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -33,6 +33,9 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
MODULE_DESCRIPTION("CAIF SPI driver");
+/* Returns the number of padding bytes for alignment. */
+#define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
+
static int spi_loop;
module_param(spi_loop, bool, S_IRUGO);
MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
@@ -41,7 +44,10 @@ MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
module_param(spi_frm_align, int, S_IRUGO);
MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment.");
-/* SPI padding options. */
+/*
+ * SPI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
module_param(spi_up_head_align, int, S_IRUGO);
MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment.");
@@ -240,15 +246,13 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
static const struct file_operations dbgfs_state_fops = {
.open = dbgfs_open,
.read = dbgfs_state,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
+ .owner = THIS_MODULE
};
static const struct file_operations dbgfs_frame_fops = {
.open = dbgfs_open,
.read = dbgfs_frame,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
+ .owner = THIS_MODULE
};
static inline void dev_debugfs_add(struct cfspi *cfspi)
@@ -337,6 +341,9 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
u8 *dst = buf;
caif_assert(buf);
+ if (cfspi->slave && !cfspi->slave_talked)
+ cfspi->slave_talked = true;
+
do {
struct sk_buff *skb;
struct caif_payload_info *info;
@@ -357,8 +364,8 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
* Compute head offset i.e. number of bytes to add to
* get the start of the payload aligned.
*/
- if (spi_up_head_align) {
- spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
+ if (spi_up_head_align > 1) {
+ spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
*dst = (u8)(spad - 1);
dst += spad;
}
@@ -373,7 +380,7 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
* Compute tail offset i.e. number of bytes to add to
* get the complete CAIF frame aligned.
*/
- epad = (skb->len + spad) & spi_up_tail_align;
+ epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
dst += epad;
dev_kfree_skb(skb);
@@ -417,14 +424,14 @@ int cfspi_xmitlen(struct cfspi *cfspi)
* Compute head offset i.e. number of bytes to add to
* get the start of the payload aligned.
*/
- if (spi_up_head_align)
- spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
+ if (spi_up_head_align > 1)
+ spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
/*
* Compute tail offset i.e. number of bytes to add to
* get the complete CAIF frame aligned.
*/
- epad = (skb->len + spad) & spi_up_tail_align;
+ epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) {
skb_queue_tail(&cfspi->chead, skb);
@@ -433,6 +440,7 @@ int cfspi_xmitlen(struct cfspi *cfspi)
} else {
/* Put back packet. */
skb_queue_head(&cfspi->qhead, skb);
+ break;
}
} while (pkts <= CAIF_MAX_SPI_PKTS);
@@ -453,6 +461,15 @@ static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
{
struct cfspi *cfspi = (struct cfspi *)ifc->priv;
+ /*
+ * The slave device is the master on the link. Interrupts before the
+ * slave has transmitted are considered spurious.
+ */
+ if (cfspi->slave && !cfspi->slave_talked) {
+ printk(KERN_WARNING "CFSPI: Spurious SS interrupt.\n");
+ return;
+ }
+
if (!in_interrupt())
spin_lock(&cfspi->lock);
if (assert) {
@@ -465,7 +482,8 @@ static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
spin_unlock(&cfspi->lock);
/* Wake up the xfer thread. */
- wake_up_interruptible(&cfspi->wait);
+ if (assert)
+ wake_up_interruptible(&cfspi->wait);
}
static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc)
@@ -523,7 +541,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
* Compute head offset i.e. number of bytes added to
* get the start of the payload aligned.
*/
- if (spi_down_head_align) {
+ if (spi_down_head_align > 1) {
spad = 1 + *src;
src += spad;
}
@@ -564,7 +582,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
* Compute tail offset i.e. number of bytes added to
* get the complete CAIF frame aligned.
*/
- epad = (pkt_len + spad) & spi_down_tail_align;
+ epad = PAD_POW2((pkt_len + spad), spi_down_tail_align);
src += epad;
} while ((src - buf) < len);
@@ -617,19 +635,28 @@ int cfspi_spi_probe(struct platform_device *pdev)
ndev = alloc_netdev(sizeof(struct cfspi),
"cfspi%d", cfspi_setup);
- if (!dev)
- return -ENODEV;
+ if (!ndev)
+ return -ENOMEM;
cfspi = netdev_priv(ndev);
netif_stop_queue(ndev);
cfspi->ndev = ndev;
cfspi->pdev = pdev;
- /* Set flow info */
+ /* Set flow info. */
cfspi->flow_off_sent = 0;
cfspi->qd_low_mark = LOW_WATER_MARK;
cfspi->qd_high_mark = HIGH_WATER_MARK;
+ /* Set slave info. */
+ if (!strncmp(cfspi_spi_driver.driver.name, "cfspi_sspi", 10)) {
+ cfspi->slave = true;
+ cfspi->slave_talked = false;
+ } else {
+ cfspi->slave = false;
+ cfspi->slave_talked = false;
+ }
+
/* Assign the SPI device. */
cfspi->dev = dev;
/* Assign the device ifc to this SPI interface. */
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
index 2111dbf..1b9943a 100644
--- a/drivers/net/caif/caif_spi_slave.c
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -36,10 +36,15 @@ static inline int forward_to_spi_cmd(struct cfspi *cfspi)
#endif
int spi_frm_align = 2;
-int spi_up_head_align = 1;
-int spi_up_tail_align;
-int spi_down_head_align = 3;
-int spi_down_tail_align = 1;
+
+/*
+ * SPI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
+int spi_up_head_align = 1 << 1;
+int spi_up_tail_align = 1 << 0;
+int spi_down_head_align = 1 << 2;
+int spi_down_tail_align = 1 << 1;
#ifdef CONFIG_DEBUG_FS
static inline void debugfs_store_prev(struct cfspi *cfspi)
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 9d9e453..080574b 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -82,6 +82,14 @@ config CAN_FLEXCAN
---help---
Say Y here if you want to support for Freescale FlexCAN.
+config PCH_CAN
+ tristate "PCH CAN"
+ depends on CAN_DEV && PCI
+ ---help---
+ This driver is for PCH CAN of Topcliff which is an IOH for x86
+ embedded processor.
+ This driver can access CAN bus.
+
source "drivers/net/can/mscan/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 0057537..90af15a 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -17,5 +17,6 @@ obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
obj-$(CONFIG_CAN_BFIN) += bfin_can.o
obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
+obj-$(CONFIG_PCH_CAN) += pch_can.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 2d8bd86..7ef83d0 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1,8 +1,8 @@
/*
* at91_can.c - CAN network driver for AT91 SoC CAN controller
*
- * (C) 2007 by Hans J. Koch <hjk@linutronix.de>
- * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
+ * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
+ * (C) 2008, 2009, 2010 by Marc Kleine-Budde <kernel@pengutronix.de>
*
* This software may be distributed under the terms of the GNU General
* Public License ("GPL") version 2 as distributed in the 'COPYING'
@@ -40,7 +40,6 @@
#include <mach/board.h>
-#define DRV_NAME "at91_can"
#define AT91_NAPI_WEIGHT 12
/*
@@ -172,6 +171,7 @@ struct at91_priv {
};
static struct can_bittiming_const at91_bittiming_const = {
+ .name = KBUILD_MODNAME,
.tseg1_min = 4,
.tseg1_max = 16,
.tseg2_min = 2,
@@ -199,13 +199,13 @@ static inline int get_tx_echo_mb(const struct at91_priv *priv)
static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
{
- return readl(priv->reg_base + reg);
+ return __raw_readl(priv->reg_base + reg);
}
static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
u32 value)
{
- writel(value, priv->reg_base + reg);
+ __raw_writel(value, priv->reg_base + reg);
}
static inline void set_mb_mode_prio(const struct at91_priv *priv,
@@ -243,6 +243,12 @@ static void at91_setup_mailboxes(struct net_device *dev)
set_mb_mode(priv, i, AT91_MB_MODE_RX);
set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
+ /* reset acceptance mask and id register */
+ for (i = AT91_MB_RX_FIRST; i <= AT91_MB_RX_LAST; i++) {
+ at91_write(priv, AT91_MAM(i), 0x0 );
+ at91_write(priv, AT91_MID(i), AT91_MID_MIDE);
+ }
+
/* The last 4 mailboxes are used for transmitting. */
for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++)
set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
@@ -257,18 +263,30 @@ static int at91_set_bittiming(struct net_device *dev)
const struct can_bittiming *bt = &priv->can.bittiming;
u32 reg_br;
- reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) << 24) |
- ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
+ reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 << 24 : 0) |
+ ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) |
((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) |
((bt->phase_seg2 - 1) << 0);
- dev_info(dev->dev.parent, "writing AT91_BR: 0x%08x\n", reg_br);
+ netdev_info(dev, "writing AT91_BR: 0x%08x\n", reg_br);
at91_write(priv, AT91_BR, reg_br);
return 0;
}
+static int at91_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ const struct at91_priv *priv = netdev_priv(dev);
+ u32 reg_ecr = at91_read(priv, AT91_ECR);
+
+ bec->rxerr = reg_ecr & 0xff;
+ bec->txerr = reg_ecr >> 16;
+
+ return 0;
+}
+
static void at91_chip_start(struct net_device *dev)
{
struct at91_priv *priv = netdev_priv(dev);
@@ -281,6 +299,7 @@ static void at91_chip_start(struct net_device *dev)
reg_mr = at91_read(priv, AT91_MR);
at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
+ at91_set_bittiming(dev);
at91_setup_mailboxes(dev);
at91_transceiver_switch(priv, 1);
@@ -350,8 +369,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) {
netif_stop_queue(dev);
- dev_err(dev->dev.parent,
- "BUG! TX buffer full when queue awake!\n");
+ netdev_err(dev, "BUG! TX buffer full when queue awake!\n");
return NETDEV_TX_BUSY;
}
@@ -435,7 +453,7 @@ static void at91_rx_overflow_err(struct net_device *dev)
struct sk_buff *skb;
struct can_frame *cf;
- dev_dbg(dev->dev.parent, "RX buffer overflow\n");
+ netdev_dbg(dev, "RX buffer overflow\n");
stats->rx_over_errors++;
stats->rx_errors++;
@@ -480,6 +498,9 @@ static void at91_read_mb(struct net_device *dev, unsigned int mb,
*(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
*(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
+ /* allow RX of extended frames */
+ at91_write(priv, AT91_MID(mb), AT91_MID_MIDE);
+
if (unlikely(mb == AT91_MB_RX_LAST && reg_msr & AT91_MSR_MMI))
at91_rx_overflow_err(dev);
}
@@ -565,8 +586,8 @@ static int at91_poll_rx(struct net_device *dev, int quota)
if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
reg_sr & AT91_MB_RX_LOW_MASK)
- dev_info(dev->dev.parent,
- "order of incoming frames cannot be guaranteed\n");
+ netdev_info(dev,
+ "order of incoming frames cannot be guaranteed\n");
again:
for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next);
@@ -604,7 +625,7 @@ static void at91_poll_err_frame(struct net_device *dev,
/* CRC error */
if (reg_sr & AT91_IRQ_CERR) {
- dev_dbg(dev->dev.parent, "CERR irq\n");
+ netdev_dbg(dev, "CERR irq\n");
dev->stats.rx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -612,7 +633,7 @@ static void at91_poll_err_frame(struct net_device *dev,
/* Stuffing Error */
if (reg_sr & AT91_IRQ_SERR) {
- dev_dbg(dev->dev.parent, "SERR irq\n");
+ netdev_dbg(dev, "SERR irq\n");
dev->stats.rx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -621,14 +642,14 @@ static void at91_poll_err_frame(struct net_device *dev,
/* Acknowledgement Error */
if (reg_sr & AT91_IRQ_AERR) {
- dev_dbg(dev->dev.parent, "AERR irq\n");
+ netdev_dbg(dev, "AERR irq\n");
dev->stats.tx_errors++;
cf->can_id |= CAN_ERR_ACK;
}
/* Form error */
if (reg_sr & AT91_IRQ_FERR) {
- dev_dbg(dev->dev.parent, "FERR irq\n");
+ netdev_dbg(dev, "FERR irq\n");
dev->stats.rx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -637,7 +658,7 @@ static void at91_poll_err_frame(struct net_device *dev,
/* Bit Error */
if (reg_sr & AT91_IRQ_BERR) {
- dev_dbg(dev->dev.parent, "BERR irq\n");
+ netdev_dbg(dev, "BERR irq\n");
dev->stats.tx_errors++;
priv->can.can_stats.bus_error++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
@@ -755,12 +776,10 @@ static void at91_irq_err_state(struct net_device *dev,
struct can_frame *cf, enum can_state new_state)
{
struct at91_priv *priv = netdev_priv(dev);
- u32 reg_idr, reg_ier, reg_ecr;
- u8 tec, rec;
+ u32 reg_idr = 0, reg_ier = 0;
+ struct can_berr_counter bec;
- reg_ecr = at91_read(priv, AT91_ECR);
- rec = reg_ecr & 0xff;
- tec = reg_ecr >> 16;
+ at91_get_berr_counter(dev, &bec);
switch (priv->can.state) {
case CAN_STATE_ERROR_ACTIVE:
@@ -771,11 +790,11 @@ static void at91_irq_err_state(struct net_device *dev,
*/
if (new_state >= CAN_STATE_ERROR_WARNING &&
new_state <= CAN_STATE_BUS_OFF) {
- dev_dbg(dev->dev.parent, "Error Warning IRQ\n");
+ netdev_dbg(dev, "Error Warning IRQ\n");
priv->can.can_stats.error_warning++;
cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (tec > rec) ?
+ cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
}
@@ -787,11 +806,11 @@ static void at91_irq_err_state(struct net_device *dev,
*/
if (new_state >= CAN_STATE_ERROR_PASSIVE &&
new_state <= CAN_STATE_BUS_OFF) {
- dev_dbg(dev->dev.parent, "Error Passive IRQ\n");
+ netdev_dbg(dev, "Error Passive IRQ\n");
priv->can.can_stats.error_passive++;
cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (tec > rec) ?
+ cf->data[1] = (bec.txerr > bec.rxerr) ?
CAN_ERR_CRTL_TX_PASSIVE :
CAN_ERR_CRTL_RX_PASSIVE;
}
@@ -804,7 +823,7 @@ static void at91_irq_err_state(struct net_device *dev,
if (new_state <= CAN_STATE_ERROR_PASSIVE) {
cf->can_id |= CAN_ERR_RESTARTED;
- dev_dbg(dev->dev.parent, "restarted\n");
+ netdev_dbg(dev, "restarted\n");
priv->can.can_stats.restarts++;
netif_carrier_on(dev);
@@ -825,7 +844,7 @@ static void at91_irq_err_state(struct net_device *dev,
* circumstances. so just enable AT91_IRQ_ERRP, thus
* the "fallthrough"
*/
- dev_dbg(dev->dev.parent, "Error Active\n");
+ netdev_dbg(dev, "Error Active\n");
cf->can_id |= CAN_ERR_PROT;
cf->data[2] = CAN_ERR_PROT_ACTIVE;
case CAN_STATE_ERROR_WARNING: /* fallthrough */
@@ -843,7 +862,7 @@ static void at91_irq_err_state(struct net_device *dev,
cf->can_id |= CAN_ERR_BUSOFF;
- dev_dbg(dev->dev.parent, "bus-off\n");
+ netdev_dbg(dev, "bus-off\n");
netif_carrier_off(dev);
priv->can.can_stats.bus_off++;
@@ -881,7 +900,7 @@ static void at91_irq_err(struct net_device *dev)
else if (likely(reg_sr & AT91_IRQ_ERRA))
new_state = CAN_STATE_ERROR_ACTIVE;
else {
- dev_err(dev->dev.parent, "BUG! hardware in undefined state\n");
+ netdev_err(dev, "BUG! hardware in undefined state\n");
return;
}
@@ -1018,7 +1037,7 @@ static const struct net_device_ops at91_netdev_ops = {
.ndo_start_xmit = at91_start_xmit,
};
-static int __init at91_can_probe(struct platform_device *pdev)
+static int __devinit at91_can_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct at91_priv *priv;
@@ -1067,8 +1086,8 @@ static int __init at91_can_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
priv->can.clock.freq = clk_get_rate(clk);
priv->can.bittiming_const = &at91_bittiming_const;
- priv->can.do_set_bittiming = at91_set_bittiming;
priv->can.do_set_mode = at91_set_mode;
+ priv->can.do_get_berr_counter = at91_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
priv->reg_base = addr;
priv->dev = dev;
@@ -1092,7 +1111,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
return 0;
exit_free:
- free_netdev(dev);
+ free_candev(dev);
exit_iounmap:
iounmap(addr);
exit_release:
@@ -1113,8 +1132,6 @@ static int __devexit at91_can_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- free_netdev(dev);
-
iounmap(priv->reg_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1122,6 +1139,8 @@ static int __devexit at91_can_remove(struct platform_device *pdev)
clk_put(priv->clk);
+ free_candev(dev);
+
return 0;
}
@@ -1129,21 +1148,19 @@ static struct platform_driver at91_can_driver = {
.probe = at91_can_probe,
.remove = __devexit_p(at91_can_remove),
.driver = {
- .name = DRV_NAME,
+ .name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
};
static int __init at91_can_module_init(void)
{
- printk(KERN_INFO "%s netdevice driver\n", DRV_NAME);
return platform_driver_register(&at91_can_driver);
}
static void __exit at91_can_module_exit(void)
{
platform_driver_unregister(&at91_can_driver);
- printk(KERN_INFO "%s: driver removed\n", DRV_NAME);
}
module_init(at91_can_module_init);
@@ -1151,4 +1168,4 @@ module_exit(at91_can_module_exit);
MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION(DRV_NAME " CAN netdevice driver");
+MODULE_DESCRIPTION(KBUILD_MODNAME " CAN netdevice driver");
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index ef443a0..d499056 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -992,7 +992,6 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
unregister_flexcandev(dev);
platform_set_drvdata(pdev, NULL);
- free_candev(dev);
iounmap(priv->base);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1000,6 +999,8 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
clk_put(priv->clk);
+ free_candev(dev);
+
return 0;
}
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 6aadc3e..7ab534a 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -169,6 +169,7 @@
# define RXBSIDH_SHIFT 3
#define RXBSIDL(n) (((n) * 0x10) + 0x60 + RXBSIDL_OFF)
# define RXBSIDL_IDE 0x08
+# define RXBSIDL_SRR 0x10
# define RXBSIDL_EID 3
# define RXBSIDL_SHIFT 5
#define RXBEID8(n) (((n) * 0x10) + 0x60 + RXBEID8_OFF)
@@ -475,6 +476,8 @@ static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
frame->can_id =
(buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
(buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT);
+ if (buf[RXBSIDL_OFF] & RXBSIDL_SRR)
+ frame->can_id |= CAN_RTR_FLAG;
}
/* Data length */
frame->can_dlc = get_can_dlc(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK);
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
new file mode 100644
index 0000000..6727182
--- /dev/null
+++ b/drivers/net/can/pch_can.c
@@ -0,0 +1,1463 @@
+/*
+ * Copyright (C) 1999 - 2010 Intel Corporation.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#define MAX_MSG_OBJ 32
+#define MSG_OBJ_RX 0 /* The receive message object flag. */
+#define MSG_OBJ_TX 1 /* The transmit message object flag. */
+
+#define ENABLE 1 /* The enable flag */
+#define DISABLE 0 /* The disable flag */
+#define CAN_CTRL_INIT 0x0001 /* The INIT bit of CANCONT register. */
+#define CAN_CTRL_IE 0x0002 /* The IE bit of CAN control register */
+#define CAN_CTRL_IE_SIE_EIE 0x000e
+#define CAN_CTRL_CCE 0x0040
+#define CAN_CTRL_OPT 0x0080 /* The OPT bit of CANCONT register. */
+#define CAN_OPT_SILENT 0x0008 /* The Silent bit of CANOPT reg. */
+#define CAN_OPT_LBACK 0x0010 /* The LoopBack bit of CANOPT reg. */
+#define CAN_CMASK_RX_TX_SET 0x00f3
+#define CAN_CMASK_RX_TX_GET 0x0073
+#define CAN_CMASK_ALL 0xff
+#define CAN_CMASK_RDWR 0x80
+#define CAN_CMASK_ARB 0x20
+#define CAN_CMASK_CTRL 0x10
+#define CAN_CMASK_MASK 0x40
+#define CAN_CMASK_NEWDAT 0x04
+#define CAN_CMASK_CLRINTPND 0x08
+
+#define CAN_IF_MCONT_NEWDAT 0x8000
+#define CAN_IF_MCONT_INTPND 0x2000
+#define CAN_IF_MCONT_UMASK 0x1000
+#define CAN_IF_MCONT_TXIE 0x0800
+#define CAN_IF_MCONT_RXIE 0x0400
+#define CAN_IF_MCONT_RMTEN 0x0200
+#define CAN_IF_MCONT_TXRQXT 0x0100
+#define CAN_IF_MCONT_EOB 0x0080
+#define CAN_IF_MCONT_DLC 0x000f
+#define CAN_IF_MCONT_MSGLOST 0x4000
+#define CAN_MASK2_MDIR_MXTD 0xc000
+#define CAN_ID2_DIR 0x2000
+#define CAN_ID_MSGVAL 0x8000
+
+#define CAN_STATUS_INT 0x8000
+#define CAN_IF_CREQ_BUSY 0x8000
+#define CAN_ID2_XTD 0x4000
+
+#define CAN_REC 0x00007f00
+#define CAN_TEC 0x000000ff
+
+#define PCH_RX_OK 0x00000010
+#define PCH_TX_OK 0x00000008
+#define PCH_BUS_OFF 0x00000080
+#define PCH_EWARN 0x00000040
+#define PCH_EPASSIV 0x00000020
+#define PCH_LEC0 0x00000001
+#define PCH_LEC1 0x00000002
+#define PCH_LEC2 0x00000004
+#define PCH_LEC_ALL (PCH_LEC0 | PCH_LEC1 | PCH_LEC2)
+#define PCH_STUF_ERR PCH_LEC0
+#define PCH_FORM_ERR PCH_LEC1
+#define PCH_ACK_ERR (PCH_LEC0 | PCH_LEC1)
+#define PCH_BIT1_ERR PCH_LEC2
+#define PCH_BIT0_ERR (PCH_LEC0 | PCH_LEC2)
+#define PCH_CRC_ERR (PCH_LEC1 | PCH_LEC2)
+
+/* bit position of certain controller bits. */
+#define BIT_BITT_BRP 0
+#define BIT_BITT_SJW 6
+#define BIT_BITT_TSEG1 8
+#define BIT_BITT_TSEG2 12
+#define BIT_IF1_MCONT_RXIE 10
+#define BIT_IF2_MCONT_TXIE 11
+#define BIT_BRPE_BRPE 6
+#define BIT_ES_TXERRCNT 0
+#define BIT_ES_RXERRCNT 8
+#define MSK_BITT_BRP 0x3f
+#define MSK_BITT_SJW 0xc0
+#define MSK_BITT_TSEG1 0xf00
+#define MSK_BITT_TSEG2 0x7000
+#define MSK_BRPE_BRPE 0x3c0
+#define MSK_BRPE_GET 0x0f
+#define MSK_CTRL_IE_SIE_EIE 0x07
+#define MSK_MCONT_TXIE 0x08
+#define MSK_MCONT_RXIE 0x10
+#define PCH_CAN_NO_TX_BUFF 1
+#define COUNTER_LIMIT 10
+
+#define PCH_CAN_CLK 50000000 /* 50MHz */
+
+/* Define the number of message object.
+ * PCH CAN communications are done via Message RAM.
+ * The Message RAM consists of 32 message objects. */
+#define PCH_RX_OBJ_NUM 26 /* 1~ PCH_RX_OBJ_NUM is Rx*/
+#define PCH_TX_OBJ_NUM 6 /* PCH_RX_OBJ_NUM is RX ~ Tx*/
+#define PCH_OBJ_NUM (PCH_TX_OBJ_NUM + PCH_RX_OBJ_NUM)
+
+#define PCH_FIFO_THRESH 16
+
+enum pch_can_mode {
+ PCH_CAN_ENABLE,
+ PCH_CAN_DISABLE,
+ PCH_CAN_ALL,
+ PCH_CAN_NONE,
+ PCH_CAN_STOP,
+ PCH_CAN_RUN
+};
+
+struct pch_can_regs {
+ u32 cont;
+ u32 stat;
+ u32 errc;
+ u32 bitt;
+ u32 intr;
+ u32 opt;
+ u32 brpe;
+ u32 reserve1;
+ u32 if1_creq;
+ u32 if1_cmask;
+ u32 if1_mask1;
+ u32 if1_mask2;
+ u32 if1_id1;
+ u32 if1_id2;
+ u32 if1_mcont;
+ u32 if1_dataa1;
+ u32 if1_dataa2;
+ u32 if1_datab1;
+ u32 if1_datab2;
+ u32 reserve2;
+ u32 reserve3[12];
+ u32 if2_creq;
+ u32 if2_cmask;
+ u32 if2_mask1;
+ u32 if2_mask2;
+ u32 if2_id1;
+ u32 if2_id2;
+ u32 if2_mcont;
+ u32 if2_dataa1;
+ u32 if2_dataa2;
+ u32 if2_datab1;
+ u32 if2_datab2;
+ u32 reserve4;
+ u32 reserve5[20];
+ u32 treq1;
+ u32 treq2;
+ u32 reserve6[2];
+ u32 reserve7[56];
+ u32 reserve8[3];
+ u32 srst;
+};
+
+struct pch_can_priv {
+ struct can_priv can;
+ unsigned int can_num;
+ struct pci_dev *dev;
+ unsigned int tx_enable[MAX_MSG_OBJ];
+ unsigned int rx_enable[MAX_MSG_OBJ];
+ unsigned int rx_link[MAX_MSG_OBJ];
+ unsigned int int_enables;
+ unsigned int int_stat;
+ struct net_device *ndev;
+ spinlock_t msgif_reg_lock; /* Message Interface Registers Access Lock*/
+ unsigned int msg_obj[MAX_MSG_OBJ];
+ struct pch_can_regs __iomem *regs;
+ struct napi_struct napi;
+ unsigned int tx_obj; /* Point next Tx Obj index */
+ unsigned int use_msi;
+};
+
+static struct can_bittiming_const pch_can_bittiming_const = {
+ .name = KBUILD_MODNAME,
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024, /* 6bit + extended 4bit */
+ .brp_inc = 1,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(pch_pci_tbl) = {
+ {PCI_VENDOR_ID_INTEL, 0x8818, PCI_ANY_ID, PCI_ANY_ID,},
+ {0,}
+};
+MODULE_DEVICE_TABLE(pci, pch_pci_tbl);
+
+static inline void pch_can_bit_set(void __iomem *addr, u32 mask)
+{
+ iowrite32(ioread32(addr) | mask, addr);
+}
+
+static inline void pch_can_bit_clear(void __iomem *addr, u32 mask)
+{
+ iowrite32(ioread32(addr) & ~mask, addr);
+}
+
+static void pch_can_set_run_mode(struct pch_can_priv *priv,
+ enum pch_can_mode mode)
+{
+ switch (mode) {
+ case PCH_CAN_RUN:
+ pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_INIT);
+ break;
+
+ case PCH_CAN_STOP:
+ pch_can_bit_set(&priv->regs->cont, CAN_CTRL_INIT);
+ break;
+
+ default:
+ dev_err(&priv->ndev->dev, "%s -> Invalid Mode.\n", __func__);
+ break;
+ }
+}
+
+static void pch_can_set_optmode(struct pch_can_priv *priv)
+{
+ u32 reg_val = ioread32(&priv->regs->opt);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ reg_val |= CAN_OPT_SILENT;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ reg_val |= CAN_OPT_LBACK;
+
+ pch_can_bit_set(&priv->regs->cont, CAN_CTRL_OPT);
+ iowrite32(reg_val, &priv->regs->opt);
+}
+
+static void pch_can_set_int_custom(struct pch_can_priv *priv)
+{
+ /* Clearing the IE, SIE and EIE bits of Can control register. */
+ pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+
+ /* Appropriately setting them. */
+ pch_can_bit_set(&priv->regs->cont,
+ ((priv->int_enables & MSK_CTRL_IE_SIE_EIE) << 1));
+}
+
+/* This function retrieves interrupt enabled for the CAN device. */
+static void pch_can_get_int_enables(struct pch_can_priv *priv, u32 *enables)
+{
+ /* Obtaining the status of IE, SIE and EIE interrupt bits. */
+ *enables = ((ioread32(&priv->regs->cont) & CAN_CTRL_IE_SIE_EIE) >> 1);
+}
+
+static void pch_can_set_int_enables(struct pch_can_priv *priv,
+ enum pch_can_mode interrupt_no)
+{
+ switch (interrupt_no) {
+ case PCH_CAN_ENABLE:
+ pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE);
+ break;
+
+ case PCH_CAN_DISABLE:
+ pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE);
+ break;
+
+ case PCH_CAN_ALL:
+ pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+ break;
+
+ case PCH_CAN_NONE:
+ pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+ break;
+
+ default:
+ dev_err(&priv->ndev->dev, "Invalid interrupt number.\n");
+ break;
+ }
+}
+
+static void pch_can_check_if_busy(u32 __iomem *creq_addr, u32 num)
+{
+ u32 counter = COUNTER_LIMIT;
+ u32 ifx_creq;
+
+ iowrite32(num, creq_addr);
+ while (counter) {
+ ifx_creq = ioread32(creq_addr) & CAN_IF_CREQ_BUSY;
+ if (!ifx_creq)
+ break;
+ counter--;
+ udelay(1);
+ }
+ if (!counter)
+ pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__);
+}
+
+static void pch_can_set_rx_enable(struct pch_can_priv *priv, u32 buff_num,
+ u32 set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ /* Reading the receive buffer data from RAM to Interface1 registers */
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
+
+ /* Setting the IF1MASK1 register to access MsgVal and RxIE bits */
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ &priv->regs->if1_cmask);
+
+ if (set == ENABLE) {
+ /* Setting the MsgVal and RxIE bits */
+ pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
+ pch_can_bit_set(&priv->regs->if1_id2, CAN_ID_MSGVAL);
+
+ } else if (set == DISABLE) {
+ /* Resetting the MsgVal and RxIE bits */
+ pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
+ pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID_MSGVAL);
+ }
+
+ pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_rx_enable_all(struct pch_can_priv *priv)
+{
+ int i;
+
+ /* Traversing to obtain the object configured as receivers. */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_RX)
+ pch_can_set_rx_enable(priv, i + 1, ENABLE);
+ }
+}
+
+static void pch_can_rx_disable_all(struct pch_can_priv *priv)
+{
+ int i;
+
+ /* Traversing to obtain the object configured as receivers. */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_RX)
+ pch_can_set_rx_enable(priv, i + 1, DISABLE);
+ }
+}
+
+static void pch_can_set_tx_enable(struct pch_can_priv *priv, u32 buff_num,
+ u32 set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ /* Reading the Msg buffer from Message RAM to Interface2 registers. */
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+ pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
+
+ /* Setting the IF2CMASK register for accessing the
+ MsgVal and TxIE bits */
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ &priv->regs->if2_cmask);
+
+ if (set == ENABLE) {
+ /* Setting the MsgVal and TxIE bits */
+ pch_can_bit_set(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
+ pch_can_bit_set(&priv->regs->if2_id2, CAN_ID_MSGVAL);
+ } else if (set == DISABLE) {
+ /* Resetting the MsgVal and TxIE bits. */
+ pch_can_bit_clear(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
+ pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID_MSGVAL);
+ }
+
+ pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_tx_enable_all(struct pch_can_priv *priv)
+{
+ int i;
+
+ /* Traversing to obtain the object configured as transmit object. */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_TX)
+ pch_can_set_tx_enable(priv, i + 1, ENABLE);
+ }
+}
+
+static void pch_can_tx_disable_all(struct pch_can_priv *priv)
+{
+ int i;
+
+ /* Traversing to obtain the object configured as transmit object. */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_TX)
+ pch_can_set_tx_enable(priv, i + 1, DISABLE);
+ }
+}
+
+static void pch_can_get_rx_enable(struct pch_can_priv *priv, u32 buff_num,
+ u32 *enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
+
+ if (((ioread32(&priv->regs->if1_id2)) & CAN_ID_MSGVAL) &&
+ ((ioread32(&priv->regs->if1_mcont)) &
+ CAN_IF_MCONT_RXIE))
+ *enable = ENABLE;
+ else
+ *enable = DISABLE;
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_get_tx_enable(struct pch_can_priv *priv, u32 buff_num,
+ u32 *enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+ pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
+
+ if (((ioread32(&priv->regs->if2_id2)) & CAN_ID_MSGVAL) &&
+ ((ioread32(&priv->regs->if2_mcont)) &
+ CAN_IF_MCONT_TXIE)) {
+ *enable = ENABLE;
+ } else {
+ *enable = DISABLE;
+ }
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static int pch_can_int_pending(struct pch_can_priv *priv)
+{
+ return ioread32(&priv->regs->intr) & 0xffff;
+}
+
+static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
+ u32 buffer_num, u32 set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL, &priv->regs->if1_cmask);
+ if (set == ENABLE)
+ pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
+ else
+ pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
+
+ pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_get_rx_buffer_link(struct pch_can_priv *priv,
+ u32 buffer_num, u32 *link)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
+
+ if (ioread32(&priv->regs->if1_mcont) & CAN_IF_MCONT_EOB)
+ *link = DISABLE;
+ else
+ *link = ENABLE;
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_clear_buffers(struct pch_can_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < PCH_RX_OBJ_NUM; i++) {
+ iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if1_cmask);
+ iowrite32(0xffff, &priv->regs->if1_mask1);
+ iowrite32(0xffff, &priv->regs->if1_mask2);
+ iowrite32(0x0, &priv->regs->if1_id1);
+ iowrite32(0x0, &priv->regs->if1_id2);
+ iowrite32(0x0, &priv->regs->if1_mcont);
+ iowrite32(0x0, &priv->regs->if1_dataa1);
+ iowrite32(0x0, &priv->regs->if1_dataa2);
+ iowrite32(0x0, &priv->regs->if1_datab1);
+ iowrite32(0x0, &priv->regs->if1_datab2);
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
+ CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
+ }
+
+ for (i = i; i < PCH_OBJ_NUM; i++) {
+ iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if2_cmask);
+ iowrite32(0xffff, &priv->regs->if2_mask1);
+ iowrite32(0xffff, &priv->regs->if2_mask2);
+ iowrite32(0x0, &priv->regs->if2_id1);
+ iowrite32(0x0, &priv->regs->if2_id2);
+ iowrite32(0x0, &priv->regs->if2_mcont);
+ iowrite32(0x0, &priv->regs->if2_dataa1);
+ iowrite32(0x0, &priv->regs->if2_dataa2);
+ iowrite32(0x0, &priv->regs->if2_datab1);
+ iowrite32(0x0, &priv->regs->if2_datab2);
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
+ CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ &priv->regs->if2_cmask);
+ pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
+ }
+}
+
+static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_RX) {
+ iowrite32(CAN_CMASK_RX_TX_GET,
+ &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
+
+ iowrite32(0x0, &priv->regs->if1_id1);
+ iowrite32(0x0, &priv->regs->if1_id2);
+
+ pch_can_bit_set(&priv->regs->if1_mcont,
+ CAN_IF_MCONT_UMASK);
+
+ /* Set FIFO mode set to 0 except last Rx Obj*/
+ pch_can_bit_clear(&priv->regs->if1_mcont,
+ CAN_IF_MCONT_EOB);
+ /* In case FIFO mode, Last EoB of Rx Obj must be 1 */
+ if (i == (PCH_RX_OBJ_NUM - 1))
+ pch_can_bit_set(&priv->regs->if1_mcont,
+ CAN_IF_MCONT_EOB);
+
+ iowrite32(0, &priv->regs->if1_mask1);
+ pch_can_bit_clear(&priv->regs->if1_mask2,
+ 0x1fff | CAN_MASK2_MDIR_MXTD);
+
+ /* Setting CMASK for writing */
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
+ CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ &priv->regs->if1_cmask);
+
+ pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
+ } else if (priv->msg_obj[i] == MSG_OBJ_TX) {
+ iowrite32(CAN_CMASK_RX_TX_GET,
+ &priv->regs->if2_cmask);
+ pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
+
+ /* Resetting DIR bit for reception */
+ iowrite32(0x0, &priv->regs->if2_id1);
+ iowrite32(0x0, &priv->regs->if2_id2);
+ pch_can_bit_set(&priv->regs->if2_id2, CAN_ID2_DIR);
+
+ /* Setting EOB bit for transmitter */
+ iowrite32(CAN_IF_MCONT_EOB, &priv->regs->if2_mcont);
+
+ pch_can_bit_set(&priv->regs->if2_mcont,
+ CAN_IF_MCONT_UMASK);
+
+ iowrite32(0, &priv->regs->if2_mask1);
+ pch_can_bit_clear(&priv->regs->if2_mask2, 0x1fff);
+
+ /* Setting CMASK for writing */
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
+ CAN_CMASK_ARB | CAN_CMASK_CTRL,
+ &priv->regs->if2_cmask);
+
+ pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
+ }
+ }
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+}
+
+static void pch_can_init(struct pch_can_priv *priv)
+{
+ /* Stopping the Can device. */
+ pch_can_set_run_mode(priv, PCH_CAN_STOP);
+
+ /* Clearing all the message object buffers. */
+ pch_can_clear_buffers(priv);
+
+ /* Configuring the respective message object as either rx/tx object. */
+ pch_can_config_rx_tx_buffers(priv);
+
+ /* Enabling the interrupts. */
+ pch_can_set_int_enables(priv, PCH_CAN_ALL);
+}
+
+static void pch_can_release(struct pch_can_priv *priv)
+{
+ /* Stooping the CAN device. */
+ pch_can_set_run_mode(priv, PCH_CAN_STOP);
+
+ /* Disabling the interrupts. */
+ pch_can_set_int_enables(priv, PCH_CAN_NONE);
+
+ /* Disabling all the receive object. */
+ pch_can_rx_disable_all(priv);
+
+ /* Disabling all the transmit object. */
+ pch_can_tx_disable_all(priv);
+}
+
+/* This function clears interrupt(s) from the CAN device. */
+static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
+{
+ if (mask == CAN_STATUS_INT) {
+ ioread32(&priv->regs->stat);
+ return;
+ }
+
+ /* Clear interrupt for transmit object */
+ if (priv->msg_obj[mask - 1] == MSG_OBJ_TX) {
+ /* Setting CMASK for clearing interrupts for
+ frame transmission. */
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
+ &priv->regs->if2_cmask);
+
+ /* Resetting the ID registers. */
+ pch_can_bit_set(&priv->regs->if2_id2,
+ CAN_ID2_DIR | (0x7ff << 2));
+ iowrite32(0x0, &priv->regs->if2_id1);
+
+ /* Claring NewDat, TxRqst & IntPnd */
+ pch_can_bit_clear(&priv->regs->if2_mcont,
+ CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
+ CAN_IF_MCONT_TXRQXT);
+ pch_can_check_if_busy(&priv->regs->if2_creq, mask);
+ } else if (priv->msg_obj[mask - 1] == MSG_OBJ_RX) {
+ /* Setting CMASK for clearing the reception interrupts. */
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
+ &priv->regs->if1_cmask);
+
+ /* Clearing the Dir bit. */
+ pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
+
+ /* Clearing NewDat & IntPnd */
+ pch_can_bit_clear(&priv->regs->if1_mcont,
+ CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND);
+
+ pch_can_check_if_busy(&priv->regs->if1_creq, mask);
+ }
+}
+
+static int pch_can_get_buffer_status(struct pch_can_priv *priv)
+{
+ return (ioread32(&priv->regs->treq1) & 0xffff) |
+ ((ioread32(&priv->regs->treq2) & 0xffff) << 16);
+}
+
+static void pch_can_reset(struct pch_can_priv *priv)
+{
+ /* write to sw reset register */
+ iowrite32(1, &priv->regs->srst);
+ iowrite32(0, &priv->regs->srst);
+}
+
+static void pch_can_error(struct net_device *ndev, u32 status)
+{
+ struct sk_buff *skb;
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ struct can_frame *cf;
+ u32 errc;
+ struct net_device_stats *stats = &(priv->ndev->stats);
+ enum can_state state = priv->can.state;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (!skb)
+ return;
+
+ if (status & PCH_BUS_OFF) {
+ pch_can_tx_disable_all(priv);
+ pch_can_rx_disable_all(priv);
+ state = CAN_STATE_BUS_OFF;
+ cf->can_id |= CAN_ERR_BUSOFF;
+ can_bus_off(ndev);
+ pch_can_set_run_mode(priv, PCH_CAN_RUN);
+ dev_err(&ndev->dev, "%s -> Bus Off occurres.\n", __func__);
+ }
+
+ /* Warning interrupt. */
+ if (status & PCH_EWARN) {
+ state = CAN_STATE_ERROR_WARNING;
+ priv->can.can_stats.error_warning++;
+ cf->can_id |= CAN_ERR_CRTL;
+ errc = ioread32(&priv->regs->errc);
+ if (((errc & CAN_REC) >> 8) > 96)
+ cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
+ if ((errc & CAN_TEC) > 96)
+ cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
+ dev_warn(&ndev->dev,
+ "%s -> Error Counter is more than 96.\n", __func__);
+ }
+ /* Error passive interrupt. */
+ if (status & PCH_EPASSIV) {
+ priv->can.can_stats.error_passive++;
+ state = CAN_STATE_ERROR_PASSIVE;
+ cf->can_id |= CAN_ERR_CRTL;
+ errc = ioread32(&priv->regs->errc);
+ if (((errc & CAN_REC) >> 8) > 127)
+ cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
+ if ((errc & CAN_TEC) > 127)
+ cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
+ dev_err(&ndev->dev,
+ "%s -> CAN controller is ERROR PASSIVE .\n", __func__);
+ }
+
+ if (status & PCH_LEC_ALL) {
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+ switch (status & PCH_LEC_ALL) {
+ case PCH_STUF_ERR:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ case PCH_FORM_ERR:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case PCH_ACK_ERR:
+ cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
+ CAN_ERR_PROT_LOC_ACK_DEL;
+ break;
+ case PCH_BIT1_ERR:
+ case PCH_BIT0_ERR:
+ cf->data[2] |= CAN_ERR_PROT_BIT;
+ break;
+ case PCH_CRC_ERR:
+ cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+ CAN_ERR_PROT_LOC_CRC_DEL;
+ break;
+ default:
+ iowrite32(status | PCH_LEC_ALL, &priv->regs->stat);
+ break;
+ }
+
+ }
+
+ priv->can.state = state;
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+}
+
+static irqreturn_t pch_can_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct pch_can_priv *priv = netdev_priv(ndev);
+
+ pch_can_set_int_enables(priv, PCH_CAN_NONE);
+
+ napi_schedule(&priv->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat)
+{
+ u32 reg;
+ canid_t id;
+ u32 ide;
+ u32 rtr;
+ int i, j, k;
+ int rcv_pkts = 0;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &(priv->ndev->stats);
+
+ /* Reading the messsage object from the Message RAM */
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, int_stat);
+
+ /* Reading the MCONT register. */
+ reg = ioread32(&priv->regs->if1_mcont);
+ reg &= 0xffff;
+
+ for (k = int_stat; !(reg & CAN_IF_MCONT_EOB); k++) {
+ /* If MsgLost bit set. */
+ if (reg & CAN_IF_MCONT_MSGLOST) {
+ dev_err(&priv->ndev->dev, "Msg Obj is overwritten.\n");
+ pch_can_bit_clear(&priv->regs->if1_mcont,
+ CAN_IF_MCONT_MSGLOST);
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL,
+ &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, k);
+
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (!skb)
+ return -ENOMEM;
+
+ priv->can.can_stats.error_passive++;
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+ cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ netif_receive_skb(skb);
+ rcv_pkts++;
+ goto RX_NEXT;
+ }
+ if (!(reg & CAN_IF_MCONT_NEWDAT))
+ goto RX_NEXT;
+
+ skb = alloc_can_skb(priv->ndev, &cf);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Get Received data */
+ ide = ((ioread32(&priv->regs->if1_id2)) & CAN_ID2_XTD) >> 14;
+ if (ide) {
+ id = (ioread32(&priv->regs->if1_id1) & 0xffff);
+ id |= (((ioread32(&priv->regs->if1_id2)) &
+ 0x1fff) << 16);
+ cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ } else {
+ id = (((ioread32(&priv->regs->if1_id2)) &
+ (CAN_SFF_MASK << 2)) >> 2);
+ cf->can_id = (id & CAN_SFF_MASK);
+ }
+
+ rtr = (ioread32(&priv->regs->if1_id2) & CAN_ID2_DIR);
+ if (rtr) {
+ cf->can_dlc = 0;
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ cf->can_dlc = ((ioread32(&priv->regs->if1_mcont)) &
+ 0x0f);
+ }
+
+ for (i = 0, j = 0; i < cf->can_dlc; j++) {
+ reg = ioread32(&priv->regs->if1_dataa1 + j*4);
+ cf->data[i++] = cpu_to_le32(reg & 0xff);
+ if (i == cf->can_dlc)
+ break;
+ cf->data[i++] = cpu_to_le32((reg >> 8) & 0xff);
+ }
+
+ netif_receive_skb(skb);
+ rcv_pkts++;
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ if (k < PCH_FIFO_THRESH) {
+ iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL |
+ CAN_CMASK_ARB, &priv->regs->if1_cmask);
+
+ /* Clearing the Dir bit. */
+ pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
+
+ /* Clearing NewDat & IntPnd */
+ pch_can_bit_clear(&priv->regs->if1_mcont,
+ CAN_IF_MCONT_INTPND);
+ pch_can_check_if_busy(&priv->regs->if1_creq, k);
+ } else if (k > PCH_FIFO_THRESH) {
+ pch_can_int_clr(priv, k);
+ } else if (k == PCH_FIFO_THRESH) {
+ int cnt;
+ for (cnt = 0; cnt < PCH_FIFO_THRESH; cnt++)
+ pch_can_int_clr(priv, cnt+1);
+ }
+RX_NEXT:
+ /* Reading the messsage object from the Message RAM */
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
+ pch_can_check_if_busy(&priv->regs->if1_creq, k + 1);
+ reg = ioread32(&priv->regs->if1_mcont);
+ }
+
+ return rcv_pkts;
+}
+static int pch_can_rx_poll(struct napi_struct *napi, int quota)
+{
+ struct net_device *ndev = napi->dev;
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &(priv->ndev->stats);
+ u32 dlc;
+ u32 int_stat;
+ int rcv_pkts = 0;
+ u32 reg_stat;
+ unsigned long flags;
+
+ int_stat = pch_can_int_pending(priv);
+ if (!int_stat)
+ return 0;
+
+INT_STAT:
+ if (int_stat == CAN_STATUS_INT) {
+ reg_stat = ioread32(&priv->regs->stat);
+ if (reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) {
+ if ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)
+ pch_can_error(ndev, reg_stat);
+ }
+
+ if (reg_stat & PCH_TX_OK) {
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+ pch_can_check_if_busy(&priv->regs->if2_creq,
+ ioread32(&priv->regs->intr));
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+ pch_can_bit_clear(&priv->regs->stat, PCH_TX_OK);
+ }
+
+ if (reg_stat & PCH_RX_OK)
+ pch_can_bit_clear(&priv->regs->stat, PCH_RX_OK);
+
+ int_stat = pch_can_int_pending(priv);
+ if (int_stat == CAN_STATUS_INT)
+ goto INT_STAT;
+ }
+
+MSG_OBJ:
+ if ((int_stat >= 1) && (int_stat <= PCH_RX_OBJ_NUM)) {
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ rcv_pkts = pch_can_rx_normal(ndev, int_stat);
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+ if (rcv_pkts < 0)
+ return 0;
+ } else if ((int_stat > PCH_RX_OBJ_NUM) && (int_stat <= PCH_OBJ_NUM)) {
+ if (priv->msg_obj[int_stat - 1] == MSG_OBJ_TX) {
+ /* Handle transmission interrupt */
+ can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_NUM - 1);
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+ iowrite32(CAN_CMASK_RX_TX_GET | CAN_CMASK_CLRINTPND,
+ &priv->regs->if2_cmask);
+ dlc = ioread32(&priv->regs->if2_mcont) &
+ CAN_IF_MCONT_DLC;
+ pch_can_check_if_busy(&priv->regs->if2_creq, int_stat);
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+ if (dlc > 8)
+ dlc = 8;
+ stats->tx_bytes += dlc;
+ stats->tx_packets++;
+ }
+ }
+
+ int_stat = pch_can_int_pending(priv);
+ if (int_stat == CAN_STATUS_INT)
+ goto INT_STAT;
+ else if (int_stat >= 1 && int_stat <= 32)
+ goto MSG_OBJ;
+
+ napi_complete(napi);
+ pch_can_set_int_enables(priv, PCH_CAN_ALL);
+
+ return rcv_pkts;
+}
+
+static int pch_set_bittiming(struct net_device *ndev)
+{
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ u32 canbit;
+ u32 bepe;
+ u32 brp;
+
+ /* Setting the CCE bit for accessing the Can Timing register. */
+ pch_can_bit_set(&priv->regs->cont, CAN_CTRL_CCE);
+
+ brp = (bt->tq) / (1000000000/PCH_CAN_CLK) - 1;
+ canbit = brp & MSK_BITT_BRP;
+ canbit |= (bt->sjw - 1) << BIT_BITT_SJW;
+ canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << BIT_BITT_TSEG1;
+ canbit |= (bt->phase_seg2 - 1) << BIT_BITT_TSEG2;
+ bepe = (brp & MSK_BRPE_BRPE) >> BIT_BRPE_BRPE;
+ iowrite32(canbit, &priv->regs->bitt);
+ iowrite32(bepe, &priv->regs->brpe);
+ pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_CCE);
+
+ return 0;
+}
+
+static void pch_can_start(struct net_device *ndev)
+{
+ struct pch_can_priv *priv = netdev_priv(ndev);
+
+ if (priv->can.state != CAN_STATE_STOPPED)
+ pch_can_reset(priv);
+
+ pch_set_bittiming(ndev);
+ pch_can_set_optmode(priv);
+
+ pch_can_tx_enable_all(priv);
+ pch_can_rx_enable_all(priv);
+
+ /* Setting the CAN to run mode. */
+ pch_can_set_run_mode(priv, PCH_CAN_RUN);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ return;
+}
+
+static int pch_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ int ret = 0;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ pch_can_start(ndev);
+ netif_wake_queue(ndev);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int pch_can_open(struct net_device *ndev)
+{
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ int retval;
+
+ retval = pci_enable_msi(priv->dev);
+ if (retval) {
+ dev_info(&ndev->dev, "PCH CAN opened without MSI\n");
+ priv->use_msi = 0;
+ } else {
+ dev_info(&ndev->dev, "PCH CAN opened with MSI\n");
+ priv->use_msi = 1;
+ }
+
+ /* Regsitering the interrupt. */
+ retval = request_irq(priv->dev->irq, pch_can_interrupt, IRQF_SHARED,
+ ndev->name, ndev);
+ if (retval) {
+ dev_err(&ndev->dev, "request_irq failed.\n");
+ goto req_irq_err;
+ }
+
+ /* Open common can device */
+ retval = open_candev(ndev);
+ if (retval) {
+ dev_err(ndev->dev.parent, "open_candev() failed %d\n", retval);
+ goto err_open_candev;
+ }
+
+ pch_can_init(priv);
+ pch_can_start(ndev);
+ napi_enable(&priv->napi);
+ netif_start_queue(ndev);
+
+ return 0;
+
+err_open_candev:
+ free_irq(priv->dev->irq, ndev);
+req_irq_err:
+ if (priv->use_msi)
+ pci_disable_msi(priv->dev);
+
+ pch_can_release(priv);
+
+ return retval;
+}
+
+static int pch_close(struct net_device *ndev)
+{
+ struct pch_can_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ napi_disable(&priv->napi);
+ pch_can_release(priv);
+ free_irq(priv->dev->irq, ndev);
+ if (priv->use_msi)
+ pci_disable_msi(priv->dev);
+ close_candev(ndev);
+ priv->can.state = CAN_STATE_STOPPED;
+ return 0;
+}
+
+static int pch_get_msg_obj_sts(struct net_device *ndev, u32 obj_id)
+{
+ u32 buffer_status = 0;
+ struct pch_can_priv *priv = netdev_priv(ndev);
+
+ /* Getting the message object status. */
+ buffer_status = (u32) pch_can_get_buffer_status(priv);
+
+ return buffer_status & obj_id;
+}
+
+
+static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ int i, j;
+ unsigned long flags;
+ struct pch_can_priv *priv = netdev_priv(ndev);
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ int tx_buffer_avail = 0;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (priv->tx_obj == (PCH_OBJ_NUM + 1)) { /* Point tail Obj */
+ while (pch_get_msg_obj_sts(ndev, (((1 << PCH_TX_OBJ_NUM)-1) <<
+ PCH_RX_OBJ_NUM)))
+ udelay(500);
+
+ priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj ID */
+ tx_buffer_avail = priv->tx_obj; /* Point Tail of Tx Obj */
+ } else {
+ tx_buffer_avail = priv->tx_obj;
+ }
+ priv->tx_obj++;
+
+ /* Attaining the lock. */
+ spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+
+ /* Reading the Msg Obj from the Msg RAM to the Interface register. */
+ iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
+ pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
+
+ /* Setting the CMASK register. */
+ pch_can_bit_set(&priv->regs->if2_cmask, CAN_CMASK_ALL);
+
+ /* If ID extended is set. */
+ pch_can_bit_clear(&priv->regs->if2_id1, 0xffff);
+ pch_can_bit_clear(&priv->regs->if2_id2, 0x1fff | CAN_ID2_XTD);
+ if (cf->can_id & CAN_EFF_FLAG) {
+ pch_can_bit_set(&priv->regs->if2_id1, cf->can_id & 0xffff);
+ pch_can_bit_set(&priv->regs->if2_id2,
+ ((cf->can_id >> 16) & 0x1fff) | CAN_ID2_XTD);
+ } else {
+ pch_can_bit_set(&priv->regs->if2_id1, 0);
+ pch_can_bit_set(&priv->regs->if2_id2,
+ (cf->can_id & CAN_SFF_MASK) << 2);
+ }
+
+ /* If remote frame has to be transmitted.. */
+ if (cf->can_id & CAN_RTR_FLAG)
+ pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID2_DIR);
+
+ for (i = 0, j = 0; i < cf->can_dlc; j++) {
+ iowrite32(le32_to_cpu(cf->data[i++]),
+ (&priv->regs->if2_dataa1) + j*4);
+ if (i == cf->can_dlc)
+ break;
+ iowrite32(le32_to_cpu(cf->data[i++] << 8),
+ (&priv->regs->if2_dataa1) + j*4);
+ }
+
+ can_put_echo_skb(skb, ndev, tx_buffer_avail - PCH_RX_OBJ_NUM - 1);
+
+ /* Updating the size of the data. */
+ pch_can_bit_clear(&priv->regs->if2_mcont, 0x0f);
+ pch_can_bit_set(&priv->regs->if2_mcont, cf->can_dlc);
+
+ /* Clearing IntPend, NewDat & TxRqst */
+ pch_can_bit_clear(&priv->regs->if2_mcont,
+ CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
+ CAN_IF_MCONT_TXRQXT);
+
+ /* Setting NewDat, TxRqst bits */
+ pch_can_bit_set(&priv->regs->if2_mcont,
+ CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_TXRQXT);
+
+ pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
+
+ spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops pch_can_netdev_ops = {
+ .ndo_open = pch_can_open,
+ .ndo_stop = pch_close,
+ .ndo_start_xmit = pch_xmit,
+};
+
+static void __devexit pch_can_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct pch_can_priv *priv = netdev_priv(ndev);
+
+ unregister_candev(priv->ndev);
+ free_candev(priv->ndev);
+ pci_iounmap(pdev, priv->regs);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ pch_can_reset(priv);
+}
+
+#ifdef CONFIG_PM
+static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int i; /* Counter variable. */
+ int retval; /* Return value. */
+ u32 buf_stat; /* Variable for reading the transmit buffer status. */
+ u32 counter = 0xFFFFFF;
+
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct pch_can_priv *priv = netdev_priv(dev);
+
+ /* Stop the CAN controller */
+ pch_can_set_run_mode(priv, PCH_CAN_STOP);
+
+ /* Indicate that we are aboutto/in suspend */
+ priv->can.state = CAN_STATE_SLEEPING;
+
+ /* Waiting for all transmission to complete. */
+ while (counter) {
+ buf_stat = pch_can_get_buffer_status(priv);
+ if (!buf_stat)
+ break;
+ counter--;
+ udelay(1);
+ }
+ if (!counter)
+ dev_err(&pdev->dev, "%s -> Transmission time out.\n", __func__);
+
+ /* Save interrupt configuration and then disable them */
+ pch_can_get_int_enables(priv, &(priv->int_enables));
+ pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
+
+ /* Save Tx buffer enable state */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_TX)
+ pch_can_get_tx_enable(priv, i + 1,
+ &(priv->tx_enable[i]));
+ }
+
+ /* Disable all Transmit buffers */
+ pch_can_tx_disable_all(priv);
+
+ /* Save Rx buffer enable state */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_RX) {
+ pch_can_get_rx_enable(priv, i + 1,
+ &(priv->rx_enable[i]));
+ pch_can_get_rx_buffer_link(priv, i + 1,
+ &(priv->rx_link[i]));
+ }
+ }
+
+ /* Disable all Receive buffers */
+ pch_can_rx_disable_all(priv);
+ retval = pci_save_state(pdev);
+ if (retval) {
+ dev_err(&pdev->dev, "pci_save_state failed.\n");
+ } else {
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ }
+
+ return retval;
+}
+
+static int pch_can_resume(struct pci_dev *pdev)
+{
+ int i; /* Counter variable. */
+ int retval; /* Return variable. */
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct pch_can_priv *priv = netdev_priv(dev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ dev_err(&pdev->dev, "pci_enable_device failed.\n");
+ return retval;
+ }
+
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* Disabling all interrupts. */
+ pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
+
+ /* Setting the CAN device in Stop Mode. */
+ pch_can_set_run_mode(priv, PCH_CAN_STOP);
+
+ /* Configuring the transmit and receive buffers. */
+ pch_can_config_rx_tx_buffers(priv);
+
+ /* Restore the CAN state */
+ pch_set_bittiming(dev);
+
+ /* Listen/Active */
+ pch_can_set_optmode(priv);
+
+ /* Enabling the transmit buffer. */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_TX) {
+ pch_can_set_tx_enable(priv, i + 1,
+ priv->tx_enable[i]);
+ }
+ }
+
+ /* Configuring the receive buffer and enabling them. */
+ for (i = 0; i < PCH_OBJ_NUM; i++) {
+ if (priv->msg_obj[i] == MSG_OBJ_RX) {
+ /* Restore buffer link */
+ pch_can_set_rx_buffer_link(priv, i + 1,
+ priv->rx_link[i]);
+
+ /* Restore buffer enables */
+ pch_can_set_rx_enable(priv, i + 1, priv->rx_enable[i]);
+ }
+ }
+
+ /* Enable CAN Interrupts */
+ pch_can_set_int_custom(priv);
+
+ /* Restore Run Mode */
+ pch_can_set_run_mode(priv, PCH_CAN_RUN);
+
+ return retval;
+}
+#else
+#define pch_can_suspend NULL
+#define pch_can_resume NULL
+#endif
+
+static int pch_can_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ struct pch_can_priv *priv = netdev_priv(dev);
+
+ bec->txerr = ioread32(&priv->regs->errc) & CAN_TEC;
+ bec->rxerr = (ioread32(&priv->regs->errc) & CAN_REC) >> 8;
+
+ return 0;
+}
+
+static int __devinit pch_can_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct net_device *ndev;
+ struct pch_can_priv *priv;
+ int rc;
+ int index;
+ void __iomem *addr;
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed pci_enable_device %d\n", rc);
+ goto probe_exit_endev;
+ }
+
+ rc = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed pci_request_regions %d\n", rc);
+ goto probe_exit_pcireq;
+ }
+
+ addr = pci_iomap(pdev, 1, 0);
+ if (!addr) {
+ rc = -EIO;
+ dev_err(&pdev->dev, "Failed pci_iomap\n");
+ goto probe_exit_ipmap;
+ }
+
+ ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_NUM);
+ if (!ndev) {
+ rc = -ENOMEM;
+ dev_err(&pdev->dev, "Failed alloc_candev\n");
+ goto probe_exit_alloc_candev;
+ }
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+ priv->regs = addr;
+ priv->dev = pdev;
+ priv->can.bittiming_const = &pch_can_bittiming_const;
+ priv->can.do_set_mode = pch_can_do_set_mode;
+ priv->can.do_get_berr_counter = pch_can_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_LOOPBACK;
+ priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj */
+
+ ndev->irq = pdev->irq;
+ ndev->flags |= IFF_ECHO;
+
+ pci_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ ndev->netdev_ops = &pch_can_netdev_ops;
+
+ priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
+ for (index = 0; index < PCH_RX_OBJ_NUM;)
+ priv->msg_obj[index++] = MSG_OBJ_RX;
+
+ for (index = index; index < PCH_OBJ_NUM;)
+ priv->msg_obj[index++] = MSG_OBJ_TX;
+
+ netif_napi_add(ndev, &priv->napi, pch_can_rx_poll, PCH_RX_OBJ_NUM);
+
+ rc = register_candev(ndev);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed register_candev %d\n", rc);
+ goto probe_exit_reg_candev;
+ }
+
+ return 0;
+
+probe_exit_reg_candev:
+ free_candev(ndev);
+probe_exit_alloc_candev:
+ pci_iounmap(pdev, addr);
+probe_exit_ipmap:
+ pci_release_regions(pdev);
+probe_exit_pcireq:
+ pci_disable_device(pdev);
+probe_exit_endev:
+ return rc;
+}
+
+static struct pci_driver pch_can_pci_driver = {
+ .name = "pch_can",
+ .id_table = pch_pci_tbl,
+ .probe = pch_can_probe,
+ .remove = __devexit_p(pch_can_remove),
+ .suspend = pch_can_suspend,
+ .resume = pch_can_resume,
+};
+
+static int __init pch_can_pci_init(void)
+{
+ return pci_register_driver(&pch_can_pci_driver);
+}
+module_init(pch_can_pci_init);
+
+static void __exit pch_can_pci_exit(void)
+{
+ pci_unregister_driver(&pch_can_pci_driver);
+}
+module_exit(pch_can_pci_exit);
+
+MODULE_DESCRIPTION("Controller Area Network Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.94");
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index ae3505a..6fdc031 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -58,4 +58,16 @@ config CAN_PLX_PCI
- esd CAN-PCIe/2000
- Marathon CAN-bus-PCI card (http://www.marathon.ru/)
- TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
+
+config CAN_TSCAN1
+ tristate "TS-CAN1 PC104 boards"
+ depends on ISA
+ help
+ This driver is for Technologic Systems' TSCAN-1 PC104 boards.
+ http://www.embeddedarm.com/products/board-detail.php?product=TS-CAN1
+ The driver supports multiple boards and automatically configures them:
+ PLD IO base addresses are read from jumpers JP1 and JP2,
+ IRQ numbers are read from jumpers JP4 and JP5,
+ SJA1000 IO base addresses are chosen heuristically (first that works).
+
endif
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index ce92455..2c591eb 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -9,5 +9,6 @@ obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o
+obj-$(CONFIG_CAN_TSCAN1) += tscan1.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/sja1000/tscan1.c b/drivers/net/can/sja1000/tscan1.c
new file mode 100644
index 0000000..9756099
--- /dev/null
+++ b/drivers/net/can/sja1000/tscan1.c
@@ -0,0 +1,216 @@
+/*
+ * tscan1.c: driver for Technologic Systems TS-CAN1 PC104 boards
+ *
+ * Copyright 2010 Andre B. Oliveira
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * References:
+ * - Getting started with TS-CAN1, Technologic Systems, Jun 2009
+ * http://www.embeddedarm.com/documentation/ts-can1-manual.pdf
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/isa.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include "sja1000.h"
+
+MODULE_DESCRIPTION("Driver for Technologic Systems TS-CAN1 PC104 boards");
+MODULE_AUTHOR("Andre B. Oliveira <anbadeol@gmail.com>");
+MODULE_LICENSE("GPL");
+
+/* Maximum number of boards (one in each JP1:JP2 setting of IO address) */
+#define TSCAN1_MAXDEV 4
+
+/* PLD registers address offsets */
+#define TSCAN1_ID1 0
+#define TSCAN1_ID2 1
+#define TSCAN1_VERSION 2
+#define TSCAN1_LED 3
+#define TSCAN1_PAGE 4
+#define TSCAN1_MODE 5
+#define TSCAN1_JUMPERS 6
+
+/* PLD board identifier registers magic values */
+#define TSCAN1_ID1_VALUE 0xf6
+#define TSCAN1_ID2_VALUE 0xb9
+
+/* PLD mode register SJA1000 IO enable bit */
+#define TSCAN1_MODE_ENABLE 0x40
+
+/* PLD jumpers register bits */
+#define TSCAN1_JP4 0x10
+#define TSCAN1_JP5 0x20
+
+/* PLD IO base addresses start */
+#define TSCAN1_PLD_ADDRESS 0x150
+
+/* PLD register space size */
+#define TSCAN1_PLD_SIZE 8
+
+/* SJA1000 register space size */
+#define TSCAN1_SJA1000_SIZE 32
+
+/* SJA1000 crystal frequency (16MHz) */
+#define TSCAN1_SJA1000_XTAL 16000000
+
+/* SJA1000 IO base addresses */
+static const unsigned short tscan1_sja1000_addresses[] __devinitconst = {
+ 0x100, 0x120, 0x180, 0x1a0, 0x200, 0x240, 0x280, 0x320
+};
+
+/* Read SJA1000 register */
+static u8 tscan1_read(const struct sja1000_priv *priv, int reg)
+{
+ return inb((unsigned long)priv->reg_base + reg);
+}
+
+/* Write SJA1000 register */
+static void tscan1_write(const struct sja1000_priv *priv, int reg, u8 val)
+{
+ outb(val, (unsigned long)priv->reg_base + reg);
+}
+
+/* Probe for a TS-CAN1 board with JP2:JP1 jumper setting ID */
+static int __devinit tscan1_probe(struct device *dev, unsigned id)
+{
+ struct net_device *netdev;
+ struct sja1000_priv *priv;
+ unsigned long pld_base, sja1000_base;
+ int irq, i;
+
+ pld_base = TSCAN1_PLD_ADDRESS + id * TSCAN1_PLD_SIZE;
+ if (!request_region(pld_base, TSCAN1_PLD_SIZE, dev_name(dev)))
+ return -EBUSY;
+
+ if (inb(pld_base + TSCAN1_ID1) != TSCAN1_ID1_VALUE ||
+ inb(pld_base + TSCAN1_ID2) != TSCAN1_ID2_VALUE) {
+ release_region(pld_base, TSCAN1_PLD_SIZE);
+ return -ENODEV;
+ }
+
+ switch (inb(pld_base + TSCAN1_JUMPERS) & (TSCAN1_JP4 | TSCAN1_JP5)) {
+ case TSCAN1_JP4:
+ irq = 6;
+ break;
+ case TSCAN1_JP5:
+ irq = 7;
+ break;
+ case TSCAN1_JP4 | TSCAN1_JP5:
+ irq = 5;
+ break;
+ default:
+ dev_err(dev, "invalid JP4:JP5 setting (no IRQ)\n");
+ release_region(pld_base, TSCAN1_PLD_SIZE);
+ return -EINVAL;
+ }
+
+ netdev = alloc_sja1000dev(0);
+ if (!netdev) {
+ release_region(pld_base, TSCAN1_PLD_SIZE);
+ return -ENOMEM;
+ }
+
+ dev_set_drvdata(dev, netdev);
+ SET_NETDEV_DEV(netdev, dev);
+
+ netdev->base_addr = pld_base;
+ netdev->irq = irq;
+
+ priv = netdev_priv(netdev);
+ priv->read_reg = tscan1_read;
+ priv->write_reg = tscan1_write;
+ priv->can.clock.freq = TSCAN1_SJA1000_XTAL / 2;
+ priv->cdr = CDR_CBP | CDR_CLK_OFF;
+ priv->ocr = OCR_TX0_PUSHPULL;
+
+ /* Select the first SJA1000 IO address that is free and that works */
+ for (i = 0; i < ARRAY_SIZE(tscan1_sja1000_addresses); i++) {
+ sja1000_base = tscan1_sja1000_addresses[i];
+ if (!request_region(sja1000_base, TSCAN1_SJA1000_SIZE,
+ dev_name(dev)))
+ continue;
+
+ /* Set SJA1000 IO base address and enable it */
+ outb(TSCAN1_MODE_ENABLE | i, pld_base + TSCAN1_MODE);
+
+ priv->reg_base = (void __iomem *)sja1000_base;
+ if (!register_sja1000dev(netdev)) {
+ /* SJA1000 probe succeeded; turn LED off and return */
+ outb(0, pld_base + TSCAN1_LED);
+ netdev_info(netdev, "TS-CAN1 at 0x%lx 0x%lx irq %d\n",
+ pld_base, sja1000_base, irq);
+ return 0;
+ }
+
+ /* SJA1000 probe failed; release and try next address */
+ outb(0, pld_base + TSCAN1_MODE);
+ release_region(sja1000_base, TSCAN1_SJA1000_SIZE);
+ }
+
+ dev_err(dev, "failed to assign SJA1000 IO address\n");
+ dev_set_drvdata(dev, NULL);
+ free_sja1000dev(netdev);
+ release_region(pld_base, TSCAN1_PLD_SIZE);
+ return -ENXIO;
+}
+
+static int __devexit tscan1_remove(struct device *dev, unsigned id /*unused*/)
+{
+ struct net_device *netdev;
+ struct sja1000_priv *priv;
+ unsigned long pld_base, sja1000_base;
+
+ netdev = dev_get_drvdata(dev);
+ unregister_sja1000dev(netdev);
+ dev_set_drvdata(dev, NULL);
+
+ priv = netdev_priv(netdev);
+ pld_base = netdev->base_addr;
+ sja1000_base = (unsigned long)priv->reg_base;
+
+ outb(0, pld_base + TSCAN1_MODE); /* disable SJA1000 IO space */
+
+ release_region(sja1000_base, TSCAN1_SJA1000_SIZE);
+ release_region(pld_base, TSCAN1_PLD_SIZE);
+
+ free_sja1000dev(netdev);
+
+ return 0;
+}
+
+static struct isa_driver tscan1_isa_driver = {
+ .probe = tscan1_probe,
+ .remove = __devexit_p(tscan1_remove),
+ .driver = {
+ .name = "tscan1",
+ },
+};
+
+static int __init tscan1_init(void)
+{
+ return isa_register_driver(&tscan1_isa_driver, TSCAN1_MAXDEV);
+}
+module_init(tscan1_init);
+
+static void __exit tscan1_exit(void)
+{
+ isa_unregister_driver(&tscan1_isa_driver);
+}
+module_exit(tscan1_exit);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index a04ce6a..046d846 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1266,11 +1266,13 @@ static int cxgb_up(struct adapter *adap)
}
if (!(adap->flags & QUEUES_BOUND)) {
- err = bind_qsets(adap);
- if (err) {
- CH_ERR(adap, "failed to bind qsets, err %d\n", err);
+ int ret = bind_qsets(adap);
+
+ if (ret < 0) {
+ CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
t3_intr_disable(adap);
free_irq_resources(adap);
+ err = ret;
goto out;
}
adap->flags |= QUEUES_BOUND;
@@ -3299,7 +3301,6 @@ static int __devinit init_one(struct pci_dev *pdev,
pi->rx_offload = T3_RX_CSUM | T3_LRO;
pi->port_id = i;
netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
netdev->irq = pdev->irq;
netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len - 1;
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 5d72bda..f9f6645 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -296,8 +296,10 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
if (d->skb) { /* an SGL is present */
if (need_unmap)
unmap_skb(d->skb, q, cidx, pdev);
- if (d->eop)
+ if (d->eop) {
kfree_skb(d->skb);
+ d->skb = NULL;
+ }
}
++d;
if (++cidx == q->size) {
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index eaa49e4..3d4253d3 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -281,7 +281,6 @@ struct sge_rspq;
struct port_info {
struct adapter *adapter;
- struct vlan_group *vlan_grp;
u16 viid;
s16 xact_addr_filt; /* index of exact MAC address filter */
u16 rss_size; /* size of VI's RSS table slice */
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 87054e0..f50bc98 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -403,7 +403,7 @@ static int link_start(struct net_device *dev)
* that step explicitly.
*/
ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
- pi->vlan_grp != NULL, true);
+ !!(dev->features & NETIF_F_HW_VLAN_RX), true);
if (ret == 0) {
ret = t4_change_mac(pi->adapter, mb, pi->viid,
pi->xact_addr_filt, dev->dev_addr, true,
@@ -1881,7 +1881,24 @@ static int set_tso(struct net_device *dev, u32 value)
static int set_flags(struct net_device *dev, u32 flags)
{
- return ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH);
+ int err;
+ unsigned long old_feat = dev->features;
+
+ err = ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH |
+ ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
+ if (err)
+ return err;
+
+ if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX) {
+ const struct port_info *pi = netdev_priv(dev);
+
+ err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
+ -1, -1, -1, !!(flags & ETH_FLAG_RXVLAN),
+ true);
+ if (err)
+ dev->features = old_feat;
+ }
+ return err;
}
static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
@@ -2842,15 +2859,6 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
-static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
- struct port_info *pi = netdev_priv(dev);
-
- pi->vlan_grp = grp;
- t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, -1, -1, -1,
- grp != NULL, true);
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static void cxgb_netpoll(struct net_device *dev)
{
@@ -2878,7 +2886,6 @@ static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = cxgb_ioctl,
.ndo_change_mtu = cxgb_change_mtu,
- .ndo_vlan_rx_register = vlan_rx_register,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cxgb_netpoll,
#endif
@@ -3658,7 +3665,6 @@ static int __devinit init_one(struct pci_dev *pdev,
pi->rx_offload = RX_CSO;
pi->port_id = i;
netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
netdev->irq = pdev->irq;
netdev->features |= NETIF_F_SG | TSO_FLAGS;
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index 9967f3d..17022258 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -1530,18 +1530,11 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
if (unlikely(pkt->vlan_ex)) {
- struct port_info *pi = netdev_priv(rxq->rspq.netdev);
- struct vlan_group *grp = pi->vlan_grp;
-
+ __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
rxq->stats.vlan_ex++;
- if (likely(grp)) {
- ret = vlan_gro_frags(&rxq->rspq.napi, grp,
- ntohs(pkt->vlan));
- goto stats;
- }
}
ret = napi_gro_frags(&rxq->rspq.napi);
-stats: if (ret == GRO_HELD)
+ if (ret == GRO_HELD)
rxq->stats.lro_pkts++;
else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
rxq->stats.lro_merged++;
@@ -1608,16 +1601,10 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
skb_checksum_none_assert(skb);
if (unlikely(pkt->vlan_ex)) {
- struct vlan_group *grp = pi->vlan_grp;
-
+ __vlan_hwaccel_put_tag(skb, ntohs(pkt->vlan));
rxq->stats.vlan_ex++;
- if (likely(grp))
- vlan_hwaccel_receive_skb(skb, grp, ntohs(pkt->vlan));
- else
- dev_kfree_skb_any(skb);
- } else
- netif_receive_skb(skb);
-
+ }
+ netif_receive_skb(skb);
return 0;
}
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 555ecc5..d887a76 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -753,7 +753,9 @@ static int cxgb4vf_open(struct net_device *dev)
if (err)
return err;
set_bit(pi->port_id, &adapter->open_device_map);
- link_start(dev);
+ err = link_start(dev);
+ if (err)
+ return err;
netif_tx_start_all_queues(dev);
return 0;
}
@@ -814,40 +816,48 @@ static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
}
/*
- * Collect up to maxaddrs worth of a netdevice's unicast addresses into an
- * array of addrss pointers and return the number collected.
+ * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting
+ * at a specified offset within the list, into an array of addrss pointers and
+ * return the number collected.
*/
-static inline int collect_netdev_uc_list_addrs(const struct net_device *dev,
- const u8 **addr,
- unsigned int maxaddrs)
+static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev,
+ const u8 **addr,
+ unsigned int offset,
+ unsigned int maxaddrs)
{
+ unsigned int index = 0;
unsigned int naddr = 0;
const struct netdev_hw_addr *ha;
- for_each_dev_addr(dev, ha) {
- addr[naddr++] = ha->addr;
- if (naddr >= maxaddrs)
- break;
- }
+ for_each_dev_addr(dev, ha)
+ if (index++ >= offset) {
+ addr[naddr++] = ha->addr;
+ if (naddr >= maxaddrs)
+ break;
+ }
return naddr;
}
/*
- * Collect up to maxaddrs worth of a netdevice's multicast addresses into an
- * array of addrss pointers and return the number collected.
+ * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting
+ * at a specified offset within the list, into an array of addrss pointers and
+ * return the number collected.
*/
-static inline int collect_netdev_mc_list_addrs(const struct net_device *dev,
- const u8 **addr,
- unsigned int maxaddrs)
+static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev,
+ const u8 **addr,
+ unsigned int offset,
+ unsigned int maxaddrs)
{
+ unsigned int index = 0;
unsigned int naddr = 0;
const struct netdev_hw_addr *ha;
- netdev_for_each_mc_addr(ha, dev) {
- addr[naddr++] = ha->addr;
- if (naddr >= maxaddrs)
- break;
- }
+ netdev_for_each_mc_addr(ha, dev)
+ if (index++ >= offset) {
+ addr[naddr++] = ha->addr;
+ if (naddr >= maxaddrs)
+ break;
+ }
return naddr;
}
@@ -860,16 +870,20 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
u64 mhash = 0;
u64 uhash = 0;
bool free = true;
- u16 filt_idx[7];
+ unsigned int offset, naddr;
const u8 *addr[7];
- int ret, naddr = 0;
+ int ret;
const struct port_info *pi = netdev_priv(dev);
/* first do the secondary unicast addresses */
- naddr = collect_netdev_uc_list_addrs(dev, addr, ARRAY_SIZE(addr));
- if (naddr > 0) {
+ for (offset = 0; ; offset += naddr) {
+ naddr = collect_netdev_uc_list_addrs(dev, addr, offset,
+ ARRAY_SIZE(addr));
+ if (naddr == 0)
+ break;
+
ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
- naddr, addr, filt_idx, &uhash, sleep);
+ naddr, addr, NULL, &uhash, sleep);
if (ret < 0)
return ret;
@@ -877,12 +891,17 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
}
/* next set up the multicast addresses */
- naddr = collect_netdev_mc_list_addrs(dev, addr, ARRAY_SIZE(addr));
- if (naddr > 0) {
+ for (offset = 0; ; offset += naddr) {
+ naddr = collect_netdev_mc_list_addrs(dev, addr, offset,
+ ARRAY_SIZE(addr));
+ if (naddr == 0)
+ break;
+
ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
- naddr, addr, filt_idx, &mhash, sleep);
+ naddr, addr, NULL, &mhash, sleep);
if (ret < 0)
return ret;
+ free = false;
}
return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0,
@@ -1103,18 +1122,6 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
return 0;
}
-/*
- * Return a TX Queue on which to send the specified skb.
- */
-static u16 cxgb4vf_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
- /*
- * XXX For now just use the default hash but we probably want to
- * XXX look at other possibilities ...
- */
- return skb_tx_hash(dev, skb);
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Poll all of our receive queues. This is called outside of normal interrupt
@@ -2075,6 +2082,22 @@ static int adap_init0(struct adapter *adapter)
}
/*
+ * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
+ * 2.6.31 and later we can't call pci_reset_function() in order to
+ * issue an FLR because of a self- deadlock on the device semaphore.
+ * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
+ * cases where they're needed -- for instance, some versions of KVM
+ * fail to reset "Assigned Devices" when the VM reboots. Therefore we
+ * use the firmware based reset in order to reset any per function
+ * state.
+ */
+ err = t4vf_fw_reset(adapter);
+ if (err < 0) {
+ dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
+ return err;
+ }
+
+ /*
* Grab basic operational parameters. These will predominantly have
* been set up by the Physical Function Driver or will be hard coded
* into the adapter. We just have to live with them ... Note that
@@ -2417,7 +2440,6 @@ static const struct net_device_ops cxgb4vf_netdev_ops = {
.ndo_get_stats = cxgb4vf_get_stats,
.ndo_set_rx_mode = cxgb4vf_set_rxmode,
.ndo_set_mac_address = cxgb4vf_set_mac_addr,
- .ndo_select_queue = cxgb4vf_select_queue,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = cxgb4vf_do_ioctl,
.ndo_change_mtu = cxgb4vf_change_mtu,
@@ -2600,7 +2622,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
pi->xact_addr_filt = -1;
pi->rx_offload = RX_CSO;
netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
netdev->irq = pdev->irq;
netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
@@ -2625,7 +2646,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
netdev->do_ioctl = cxgb4vf_do_ioctl;
netdev->change_mtu = cxgb4vf_change_mtu;
netdev->set_mac_address = cxgb4vf_set_mac_addr;
- netdev->select_queue = cxgb4vf_select_queue;
#ifdef CONFIG_NET_POLL_CONTROLLER
netdev->poll_controller = cxgb4vf_poll_controller;
#endif
@@ -2844,6 +2864,14 @@ static struct pci_device_id cxgb4vf_pci_tbl[] = {
CH_DEVICE(0x4800, 0), /* T440-dbg */
CH_DEVICE(0x4801, 0), /* T420-cr */
CH_DEVICE(0x4802, 0), /* T422-cr */
+ CH_DEVICE(0x4803, 0), /* T440-cr */
+ CH_DEVICE(0x4804, 0), /* T420-bch */
+ CH_DEVICE(0x4805, 0), /* T440-bch */
+ CH_DEVICE(0x4806, 0), /* T460-ch */
+ CH_DEVICE(0x4807, 0), /* T420-so */
+ CH_DEVICE(0x4808, 0), /* T420-cx */
+ CH_DEVICE(0x4809, 0), /* T420-bt */
+ CH_DEVICE(0x480a, 0), /* T404-bt */
{ 0, }
};
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
index f10864d..ecf0770 100644
--- a/drivers/net/cxgb4vf/sge.c
+++ b/drivers/net/cxgb4vf/sge.c
@@ -154,13 +154,14 @@ enum {
*/
RX_COPY_THRES = 256,
RX_PULL_LEN = 128,
-};
-/*
- * Can't define this in the above enum because PKTSHIFT isn't a constant in
- * the VF Driver ...
- */
-#define RX_PKT_PULL_LEN (RX_PULL_LEN + PKTSHIFT)
+ /*
+ * Main body length for sk_buffs used for RX Ethernet packets with
+ * fragments. Should be >= RX_PULL_LEN but possibly bigger to give
+ * pskb_may_pull() some room.
+ */
+ RX_SKB_LEN = 512,
+};
/*
* Software state per TX descriptor.
@@ -1355,6 +1356,67 @@ out_free:
}
/**
+ * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
+ * @gl: the gather list
+ * @skb_len: size of sk_buff main body if it carries fragments
+ * @pull_len: amount of data to move to the sk_buff's main body
+ *
+ * Builds an sk_buff from the given packet gather list. Returns the
+ * sk_buff or %NULL if sk_buff allocation failed.
+ */
+struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
+ unsigned int skb_len, unsigned int pull_len)
+{
+ struct sk_buff *skb;
+ struct skb_shared_info *ssi;
+
+ /*
+ * If the ingress packet is small enough, allocate an skb large enough
+ * for all of the data and copy it inline. Otherwise, allocate an skb
+ * with enough room to pull in the header and reference the rest of
+ * the data via the skb fragment list.
+ *
+ * Below we rely on RX_COPY_THRES being less than the smallest Rx
+ * buff! size, which is expected since buffers are at least
+ * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one
+ * fragment.
+ */
+ if (gl->tot_len <= RX_COPY_THRES) {
+ /* small packets have only one fragment */
+ skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, gl->tot_len);
+ skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
+ } else {
+ skb = alloc_skb(skb_len, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto out;
+ __skb_put(skb, pull_len);
+ skb_copy_to_linear_data(skb, gl->va, pull_len);
+
+ ssi = skb_shinfo(skb);
+ ssi->frags[0].page = gl->frags[0].page;
+ ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
+ ssi->frags[0].size = gl->frags[0].size - pull_len;
+ if (gl->nfrags > 1)
+ memcpy(&ssi->frags[1], &gl->frags[1],
+ (gl->nfrags-1) * sizeof(skb_frag_t));
+ ssi->nr_frags = gl->nfrags;
+
+ skb->len = gl->tot_len;
+ skb->data_len = skb->len - pull_len;
+ skb->truesize += skb->data_len;
+
+ /* Get a reference for the last page, we don't own it */
+ get_page(gl->frags[gl->nfrags - 1].page);
+ }
+
+out:
+ return skb;
+}
+
+/**
* t4vf_pktgl_free - free a packet gather list
* @gl: the gather list
*
@@ -1463,10 +1525,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
{
struct sk_buff *skb;
struct port_info *pi;
- struct skb_shared_info *ssi;
const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
bool csum_ok = pkt->csum_calc && !pkt->err_vec;
- unsigned int len = be16_to_cpu(pkt->len);
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
/*
@@ -1481,42 +1541,14 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
}
/*
- * If the ingress packet is small enough, allocate an skb large enough
- * for all of the data and copy it inline. Otherwise, allocate an skb
- * with enough room to pull in the header and reference the rest of
- * the data via the skb fragment list.
+ * Convert the Packet Gather List into an skb.
*/
- if (len <= RX_COPY_THRES) {
- /* small packets have only one fragment */
- skb = alloc_skb(gl->frags[0].size, GFP_ATOMIC);
- if (!skb)
- goto nomem;
- __skb_put(skb, gl->frags[0].size);
- skb_copy_to_linear_data(skb, gl->va, gl->frags[0].size);
- } else {
- skb = alloc_skb(RX_PKT_PULL_LEN, GFP_ATOMIC);
- if (!skb)
- goto nomem;
- __skb_put(skb, RX_PKT_PULL_LEN);
- skb_copy_to_linear_data(skb, gl->va, RX_PKT_PULL_LEN);
-
- ssi = skb_shinfo(skb);
- ssi->frags[0].page = gl->frags[0].page;
- ssi->frags[0].page_offset = (gl->frags[0].page_offset +
- RX_PKT_PULL_LEN);
- ssi->frags[0].size = gl->frags[0].size - RX_PKT_PULL_LEN;
- if (gl->nfrags > 1)
- memcpy(&ssi->frags[1], &gl->frags[1],
- (gl->nfrags-1) * sizeof(skb_frag_t));
- ssi->nr_frags = gl->nfrags;
- skb->len = len + PKTSHIFT;
- skb->data_len = skb->len - RX_PKT_PULL_LEN;
- skb->truesize += skb->data_len;
-
- /* Get a reference for the last page, we don't own it */
- get_page(gl->frags[gl->nfrags - 1].page);
+ skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
+ if (unlikely(!skb)) {
+ t4vf_pktgl_free(gl);
+ rxq->stats.rx_drops++;
+ return 0;
}
-
__skb_pull(skb, PKTSHIFT);
skb->protocol = eth_type_trans(skb, rspq->netdev);
skb_record_rx_queue(skb, rspq->idx);
@@ -1549,11 +1581,6 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
netif_receive_skb(skb);
return 0;
-
-nomem:
- t4vf_pktgl_free(gl);
- rxq->stats.rx_drops++;
- return 0;
}
/**
@@ -1679,6 +1706,7 @@ int process_responses(struct sge_rspq *rspq, int budget)
}
len = RSPD_LEN(len);
}
+ gl.tot_len = len;
/*
* Gather packet fragments.
diff --git a/drivers/net/cxgb4vf/t4vf_common.h b/drivers/net/cxgb4vf/t4vf_common.h
index 873cb7d..a65c80ae 100644
--- a/drivers/net/cxgb4vf/t4vf_common.h
+++ b/drivers/net/cxgb4vf/t4vf_common.h
@@ -235,6 +235,7 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
int __devinit t4vf_wait_dev_ready(struct adapter *);
int __devinit t4vf_port_init(struct adapter *, int);
+int t4vf_fw_reset(struct adapter *);
int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index ea1c123..19520af 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -326,6 +326,25 @@ int __devinit t4vf_port_init(struct adapter *adapter, int pidx)
}
/**
+ * t4vf_fw_reset - issue a reset to FW
+ * @adapter: the adapter
+ *
+ * Issues a reset command to FW. For a Physical Function this would
+ * result in the Firmware reseting all of its state. For a Virtual
+ * Function this just resets the state associated with the VF.
+ */
+int t4vf_fw_reset(struct adapter *adapter)
+{
+ struct fw_reset_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) |
+ FW_CMD_WRITE);
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
* t4vf_query_params - query FW or device parameters
* @adapter: the adapter
* @nparams: the number of parameters
@@ -995,48 +1014,72 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
unsigned int naddr, const u8 **addr, u16 *idx,
u64 *hash, bool sleep_ok)
{
- int i, ret;
+ int offset, ret = 0;
+ unsigned nfilters = 0;
+ unsigned int rem = naddr;
struct fw_vi_mac_cmd cmd, rpl;
- struct fw_vi_mac_exact *p;
- size_t len16;
- if (naddr > ARRAY_SIZE(cmd.u.exact))
+ if (naddr > FW_CLS_TCAM_NUM_ENTRIES)
return -EINVAL;
- len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
- u.exact[naddr]), 16);
- memset(&cmd, 0, sizeof(cmd));
- cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
- FW_CMD_REQUEST |
- FW_CMD_WRITE |
- (free ? FW_CMD_EXEC : 0) |
- FW_VI_MAC_CMD_VIID(viid));
- cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
- FW_CMD_LEN16(len16));
+ for (offset = 0; offset < naddr; /**/) {
+ unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact)
+ ? rem
+ : ARRAY_SIZE(cmd.u.exact));
+ size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+ u.exact[fw_naddr]), 16);
+ struct fw_vi_mac_exact *p;
+ int i;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ (free ? FW_CMD_EXEC : 0) |
+ FW_VI_MAC_CMD_VIID(viid));
+ cmd.freemacs_to_len16 =
+ cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
+ FW_CMD_LEN16(len16));
+
+ for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
+ p->valid_to_idx = cpu_to_be16(
+ FW_VI_MAC_CMD_VALID |
+ FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
+ memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
+ }
- for (i = 0, p = cmd.u.exact; i < naddr; i++, p++) {
- p->valid_to_idx =
- cpu_to_be16(FW_VI_MAC_CMD_VALID |
- FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
- memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
- }
- ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, sleep_ok);
- if (ret)
- return ret;
-
- for (i = 0, p = rpl.u.exact; i < naddr; i++, p++) {
- u16 index = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx));
-
- if (idx)
- idx[i] = (index >= FW_CLS_TCAM_NUM_ENTRIES
- ? 0xffff
- : index);
- if (index < FW_CLS_TCAM_NUM_ENTRIES)
- ret++;
- else if (hash)
- *hash |= (1 << hash_mac_addr(addr[i]));
+ ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl,
+ sleep_ok);
+ if (ret && ret != -ENOMEM)
+ break;
+
+ for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) {
+ u16 index = FW_VI_MAC_CMD_IDX_GET(
+ be16_to_cpu(p->valid_to_idx));
+
+ if (idx)
+ idx[offset+i] =
+ (index >= FW_CLS_TCAM_NUM_ENTRIES
+ ? 0xffff
+ : index);
+ if (index < FW_CLS_TCAM_NUM_ENTRIES)
+ nfilters++;
+ else if (hash)
+ *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
+ }
+
+ free = false;
+ offset += fw_naddr;
+ rem -= fw_naddr;
}
+
+ /*
+ * If there were no errors or we merely ran out of room in our MAC
+ * address arena, return the number of filters actually written.
+ */
+ if (ret == 0 || ret == -ENOMEM)
+ ret = nfilters;
return ret;
}
diff --git a/drivers/net/davinci_cpdma.c b/drivers/net/davinci_cpdma.c
new file mode 100644
index 0000000..e92b2b6
--- /dev/null
+++ b/drivers/net/davinci_cpdma.c
@@ -0,0 +1,965 @@
+/*
+ * Texas Instruments CPDMA Driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+
+#include "davinci_cpdma.h"
+
+/* DMA Registers */
+#define CPDMA_TXIDVER 0x00
+#define CPDMA_TXCONTROL 0x04
+#define CPDMA_TXTEARDOWN 0x08
+#define CPDMA_RXIDVER 0x10
+#define CPDMA_RXCONTROL 0x14
+#define CPDMA_SOFTRESET 0x1c
+#define CPDMA_RXTEARDOWN 0x18
+#define CPDMA_TXINTSTATRAW 0x80
+#define CPDMA_TXINTSTATMASKED 0x84
+#define CPDMA_TXINTMASKSET 0x88
+#define CPDMA_TXINTMASKCLEAR 0x8c
+#define CPDMA_MACINVECTOR 0x90
+#define CPDMA_MACEOIVECTOR 0x94
+#define CPDMA_RXINTSTATRAW 0xa0
+#define CPDMA_RXINTSTATMASKED 0xa4
+#define CPDMA_RXINTMASKSET 0xa8
+#define CPDMA_RXINTMASKCLEAR 0xac
+#define CPDMA_DMAINTSTATRAW 0xb0
+#define CPDMA_DMAINTSTATMASKED 0xb4
+#define CPDMA_DMAINTMASKSET 0xb8
+#define CPDMA_DMAINTMASKCLEAR 0xbc
+#define CPDMA_DMAINT_HOSTERR BIT(1)
+
+/* the following exist only if has_ext_regs is set */
+#define CPDMA_DMACONTROL 0x20
+#define CPDMA_DMASTATUS 0x24
+#define CPDMA_RXBUFFOFS 0x28
+#define CPDMA_EM_CONTROL 0x2c
+
+/* Descriptor mode bits */
+#define CPDMA_DESC_SOP BIT(31)
+#define CPDMA_DESC_EOP BIT(30)
+#define CPDMA_DESC_OWNER BIT(29)
+#define CPDMA_DESC_EOQ BIT(28)
+#define CPDMA_DESC_TD_COMPLETE BIT(27)
+#define CPDMA_DESC_PASS_CRC BIT(26)
+
+#define CPDMA_TEARDOWN_VALUE 0xfffffffc
+
+struct cpdma_desc {
+ /* hardware fields */
+ u32 hw_next;
+ u32 hw_buffer;
+ u32 hw_len;
+ u32 hw_mode;
+ /* software fields */
+ void *sw_token;
+ u32 sw_buffer;
+ u32 sw_len;
+};
+
+struct cpdma_desc_pool {
+ u32 phys;
+ void __iomem *iomap; /* ioremap map */
+ void *cpumap; /* dma_alloc map */
+ int desc_size, mem_size;
+ int num_desc, used_desc;
+ unsigned long *bitmap;
+ struct device *dev;
+ spinlock_t lock;
+};
+
+enum cpdma_state {
+ CPDMA_STATE_IDLE,
+ CPDMA_STATE_ACTIVE,
+ CPDMA_STATE_TEARDOWN,
+};
+
+const char *cpdma_state_str[] = { "idle", "active", "teardown" };
+
+struct cpdma_ctlr {
+ enum cpdma_state state;
+ struct cpdma_params params;
+ struct device *dev;
+ struct cpdma_desc_pool *pool;
+ spinlock_t lock;
+ struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
+};
+
+struct cpdma_chan {
+ enum cpdma_state state;
+ struct cpdma_ctlr *ctlr;
+ int chan_num;
+ spinlock_t lock;
+ struct cpdma_desc __iomem *head, *tail;
+ int count;
+ void __iomem *hdp, *cp, *rxfree;
+ u32 mask;
+ cpdma_handler_fn handler;
+ enum dma_data_direction dir;
+ struct cpdma_chan_stats stats;
+ /* offsets into dmaregs */
+ int int_set, int_clear, td;
+};
+
+/* The following make access to common cpdma_ctlr params more readable */
+#define dmaregs params.dmaregs
+#define num_chan params.num_chan
+
+/* various accessors */
+#define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
+#define chan_read(chan, fld) __raw_readl((chan)->fld)
+#define desc_read(desc, fld) __raw_readl(&(desc)->fld)
+#define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
+#define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
+#define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
+
+/*
+ * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
+ * emac) have dedicated on-chip memory for these descriptors. Some other
+ * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
+ * abstract out these details
+ */
+static struct cpdma_desc_pool *
+cpdma_desc_pool_create(struct device *dev, u32 phys, int size, int align)
+{
+ int bitmap_size;
+ struct cpdma_desc_pool *pool;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return NULL;
+
+ spin_lock_init(&pool->lock);
+
+ pool->dev = dev;
+ pool->mem_size = size;
+ pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
+ pool->num_desc = size / pool->desc_size;
+
+ bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
+ pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!pool->bitmap)
+ goto fail;
+
+ if (phys) {
+ pool->phys = phys;
+ pool->iomap = ioremap(phys, size);
+ } else {
+ pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
+ GFP_KERNEL);
+ pool->iomap = (void __force __iomem *)pool->cpumap;
+ }
+
+ if (pool->iomap)
+ return pool;
+
+fail:
+ kfree(pool->bitmap);
+ kfree(pool);
+ return NULL;
+}
+
+static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
+{
+ unsigned long flags;
+
+ if (!pool)
+ return;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ WARN_ON(pool->used_desc);
+ kfree(pool->bitmap);
+ if (pool->cpumap) {
+ dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
+ pool->phys);
+ } else {
+ iounmap(pool->iomap);
+ }
+ spin_unlock_irqrestore(&pool->lock, flags);
+ kfree(pool);
+}
+
+static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
+ struct cpdma_desc __iomem *desc)
+{
+ if (!desc)
+ return 0;
+ return pool->phys + (__force dma_addr_t)desc -
+ (__force dma_addr_t)pool->iomap;
+}
+
+static inline struct cpdma_desc __iomem *
+desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
+{
+ return dma ? pool->iomap + dma - pool->phys : NULL;
+}
+
+static struct cpdma_desc __iomem *
+cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc)
+{
+ unsigned long flags;
+ int index;
+ struct cpdma_desc __iomem *desc = NULL;
+
+ spin_lock_irqsave(&pool->lock, flags);
+
+ index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0,
+ num_desc, 0);
+ if (index < pool->num_desc) {
+ bitmap_set(pool->bitmap, index, num_desc);
+ desc = pool->iomap + pool->desc_size * index;
+ pool->used_desc++;
+ }
+
+ spin_unlock_irqrestore(&pool->lock, flags);
+ return desc;
+}
+
+static void cpdma_desc_free(struct cpdma_desc_pool *pool,
+ struct cpdma_desc __iomem *desc, int num_desc)
+{
+ unsigned long flags, index;
+
+ index = ((unsigned long)desc - (unsigned long)pool->iomap) /
+ pool->desc_size;
+ spin_lock_irqsave(&pool->lock, flags);
+ bitmap_clear(pool->bitmap, index, num_desc);
+ pool->used_desc--;
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+
+struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
+{
+ struct cpdma_ctlr *ctlr;
+
+ ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL);
+ if (!ctlr)
+ return NULL;
+
+ ctlr->state = CPDMA_STATE_IDLE;
+ ctlr->params = *params;
+ ctlr->dev = params->dev;
+ spin_lock_init(&ctlr->lock);
+
+ ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
+ ctlr->params.desc_mem_phys,
+ ctlr->params.desc_mem_size,
+ ctlr->params.desc_align);
+ if (!ctlr->pool) {
+ kfree(ctlr);
+ return NULL;
+ }
+
+ if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
+ ctlr->num_chan = CPDMA_MAX_CHANNELS;
+ return ctlr;
+}
+
+int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (ctlr->state != CPDMA_STATE_IDLE) {
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return -EBUSY;
+ }
+
+ if (ctlr->params.has_soft_reset) {
+ unsigned long timeout = jiffies + HZ/10;
+
+ dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
+ while (time_before(jiffies, timeout)) {
+ if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
+ break;
+ }
+ WARN_ON(!time_before(jiffies, timeout));
+ }
+
+ for (i = 0; i < ctlr->num_chan; i++) {
+ __raw_writel(0, ctlr->params.txhdp + 4 * i);
+ __raw_writel(0, ctlr->params.rxhdp + 4 * i);
+ __raw_writel(0, ctlr->params.txcp + 4 * i);
+ __raw_writel(0, ctlr->params.rxcp + 4 * i);
+ }
+
+ dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
+ dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
+
+ dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
+ dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
+
+ ctlr->state = CPDMA_STATE_ACTIVE;
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ if (ctlr->channels[i])
+ cpdma_chan_start(ctlr->channels[i]);
+ }
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+}
+
+int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (ctlr->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return -EINVAL;
+ }
+
+ ctlr->state = CPDMA_STATE_TEARDOWN;
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ if (ctlr->channels[i])
+ cpdma_chan_stop(ctlr->channels[i]);
+ }
+
+ dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
+ dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
+
+ dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
+ dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
+
+ ctlr->state = CPDMA_STATE_IDLE;
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+}
+
+int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
+{
+ struct device *dev = ctlr->dev;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+
+ dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]);
+
+ dev_info(dev, "CPDMA: txidver: %x",
+ dma_reg_read(ctlr, CPDMA_TXIDVER));
+ dev_info(dev, "CPDMA: txcontrol: %x",
+ dma_reg_read(ctlr, CPDMA_TXCONTROL));
+ dev_info(dev, "CPDMA: txteardown: %x",
+ dma_reg_read(ctlr, CPDMA_TXTEARDOWN));
+ dev_info(dev, "CPDMA: rxidver: %x",
+ dma_reg_read(ctlr, CPDMA_RXIDVER));
+ dev_info(dev, "CPDMA: rxcontrol: %x",
+ dma_reg_read(ctlr, CPDMA_RXCONTROL));
+ dev_info(dev, "CPDMA: softreset: %x",
+ dma_reg_read(ctlr, CPDMA_SOFTRESET));
+ dev_info(dev, "CPDMA: rxteardown: %x",
+ dma_reg_read(ctlr, CPDMA_RXTEARDOWN));
+ dev_info(dev, "CPDMA: txintstatraw: %x",
+ dma_reg_read(ctlr, CPDMA_TXINTSTATRAW));
+ dev_info(dev, "CPDMA: txintstatmasked: %x",
+ dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED));
+ dev_info(dev, "CPDMA: txintmaskset: %x",
+ dma_reg_read(ctlr, CPDMA_TXINTMASKSET));
+ dev_info(dev, "CPDMA: txintmaskclear: %x",
+ dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR));
+ dev_info(dev, "CPDMA: macinvector: %x",
+ dma_reg_read(ctlr, CPDMA_MACINVECTOR));
+ dev_info(dev, "CPDMA: maceoivector: %x",
+ dma_reg_read(ctlr, CPDMA_MACEOIVECTOR));
+ dev_info(dev, "CPDMA: rxintstatraw: %x",
+ dma_reg_read(ctlr, CPDMA_RXINTSTATRAW));
+ dev_info(dev, "CPDMA: rxintstatmasked: %x",
+ dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED));
+ dev_info(dev, "CPDMA: rxintmaskset: %x",
+ dma_reg_read(ctlr, CPDMA_RXINTMASKSET));
+ dev_info(dev, "CPDMA: rxintmaskclear: %x",
+ dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR));
+ dev_info(dev, "CPDMA: dmaintstatraw: %x",
+ dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW));
+ dev_info(dev, "CPDMA: dmaintstatmasked: %x",
+ dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED));
+ dev_info(dev, "CPDMA: dmaintmaskset: %x",
+ dma_reg_read(ctlr, CPDMA_DMAINTMASKSET));
+ dev_info(dev, "CPDMA: dmaintmaskclear: %x",
+ dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR));
+
+ if (!ctlr->params.has_ext_regs) {
+ dev_info(dev, "CPDMA: dmacontrol: %x",
+ dma_reg_read(ctlr, CPDMA_DMACONTROL));
+ dev_info(dev, "CPDMA: dmastatus: %x",
+ dma_reg_read(ctlr, CPDMA_DMASTATUS));
+ dev_info(dev, "CPDMA: rxbuffofs: %x",
+ dma_reg_read(ctlr, CPDMA_RXBUFFOFS));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
+ if (ctlr->channels[i])
+ cpdma_chan_dump(ctlr->channels[i]);
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+}
+
+int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
+{
+ unsigned long flags;
+ int ret = 0, i;
+
+ if (!ctlr)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (ctlr->state != CPDMA_STATE_IDLE)
+ cpdma_ctlr_stop(ctlr);
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ if (ctlr->channels[i])
+ cpdma_chan_destroy(ctlr->channels[i]);
+ }
+
+ cpdma_desc_pool_destroy(ctlr->pool);
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ kfree(ctlr);
+ return ret;
+}
+
+int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
+{
+ unsigned long flags;
+ int i, reg;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (ctlr->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return -EINVAL;
+ }
+
+ reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR;
+ dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR);
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ if (ctlr->channels[i])
+ cpdma_chan_int_ctrl(ctlr->channels[i], enable);
+ }
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+}
+
+void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr)
+{
+ dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0);
+}
+
+struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
+ cpdma_handler_fn handler)
+{
+ struct cpdma_chan *chan;
+ int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
+ unsigned long flags;
+
+ if (__chan_linear(chan_num) >= ctlr->num_chan)
+ return NULL;
+
+ ret = -ENOMEM;
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ goto err_chan_alloc;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ ret = -EBUSY;
+ if (ctlr->channels[chan_num])
+ goto err_chan_busy;
+
+ chan->ctlr = ctlr;
+ chan->state = CPDMA_STATE_IDLE;
+ chan->chan_num = chan_num;
+ chan->handler = handler;
+
+ if (is_rx_chan(chan)) {
+ chan->hdp = ctlr->params.rxhdp + offset;
+ chan->cp = ctlr->params.rxcp + offset;
+ chan->rxfree = ctlr->params.rxfree + offset;
+ chan->int_set = CPDMA_RXINTMASKSET;
+ chan->int_clear = CPDMA_RXINTMASKCLEAR;
+ chan->td = CPDMA_RXTEARDOWN;
+ chan->dir = DMA_FROM_DEVICE;
+ } else {
+ chan->hdp = ctlr->params.txhdp + offset;
+ chan->cp = ctlr->params.txcp + offset;
+ chan->int_set = CPDMA_TXINTMASKSET;
+ chan->int_clear = CPDMA_TXINTMASKCLEAR;
+ chan->td = CPDMA_TXTEARDOWN;
+ chan->dir = DMA_TO_DEVICE;
+ }
+ chan->mask = BIT(chan_linear(chan));
+
+ spin_lock_init(&chan->lock);
+
+ ctlr->channels[chan_num] = chan;
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return chan;
+
+err_chan_busy:
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ kfree(chan);
+err_chan_alloc:
+ return ERR_PTR(ret);
+}
+
+int cpdma_chan_destroy(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ unsigned long flags;
+
+ if (!chan)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (chan->state != CPDMA_STATE_IDLE)
+ cpdma_chan_stop(chan);
+ ctlr->channels[chan->chan_num] = NULL;
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ kfree(chan);
+ return 0;
+}
+
+int cpdma_chan_get_stats(struct cpdma_chan *chan,
+ struct cpdma_chan_stats *stats)
+{
+ unsigned long flags;
+ if (!chan)
+ return -EINVAL;
+ spin_lock_irqsave(&chan->lock, flags);
+ memcpy(stats, &chan->stats, sizeof(*stats));
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+}
+
+int cpdma_chan_dump(struct cpdma_chan *chan)
+{
+ unsigned long flags;
+ struct device *dev = chan->ctlr->dev;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ dev_info(dev, "channel %d (%s %d) state %s",
+ chan->chan_num, is_rx_chan(chan) ? "rx" : "tx",
+ chan_linear(chan), cpdma_state_str[chan->state]);
+ dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp));
+ dev_info(dev, "\tcp: %x\n", chan_read(chan, cp));
+ if (chan->rxfree) {
+ dev_info(dev, "\trxfree: %x\n",
+ chan_read(chan, rxfree));
+ }
+
+ dev_info(dev, "\tstats head_enqueue: %d\n",
+ chan->stats.head_enqueue);
+ dev_info(dev, "\tstats tail_enqueue: %d\n",
+ chan->stats.tail_enqueue);
+ dev_info(dev, "\tstats pad_enqueue: %d\n",
+ chan->stats.pad_enqueue);
+ dev_info(dev, "\tstats misqueued: %d\n",
+ chan->stats.misqueued);
+ dev_info(dev, "\tstats desc_alloc_fail: %d\n",
+ chan->stats.desc_alloc_fail);
+ dev_info(dev, "\tstats pad_alloc_fail: %d\n",
+ chan->stats.pad_alloc_fail);
+ dev_info(dev, "\tstats runt_receive_buff: %d\n",
+ chan->stats.runt_receive_buff);
+ dev_info(dev, "\tstats runt_transmit_buff: %d\n",
+ chan->stats.runt_transmit_buff);
+ dev_info(dev, "\tstats empty_dequeue: %d\n",
+ chan->stats.empty_dequeue);
+ dev_info(dev, "\tstats busy_dequeue: %d\n",
+ chan->stats.busy_dequeue);
+ dev_info(dev, "\tstats good_dequeue: %d\n",
+ chan->stats.good_dequeue);
+ dev_info(dev, "\tstats requeue: %d\n",
+ chan->stats.requeue);
+ dev_info(dev, "\tstats teardown_dequeue: %d\n",
+ chan->stats.teardown_dequeue);
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+}
+
+static void __cpdma_chan_submit(struct cpdma_chan *chan,
+ struct cpdma_desc __iomem *desc)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc __iomem *prev = chan->tail;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ dma_addr_t desc_dma;
+ u32 mode;
+
+ desc_dma = desc_phys(pool, desc);
+
+ /* simple case - idle channel */
+ if (!chan->head) {
+ chan->stats.head_enqueue++;
+ chan->head = desc;
+ chan->tail = desc;
+ if (chan->state == CPDMA_STATE_ACTIVE)
+ chan_write(chan, hdp, desc_dma);
+ return;
+ }
+
+ /* first chain the descriptor at the tail of the list */
+ desc_write(prev, hw_next, desc_dma);
+ chan->tail = desc;
+ chan->stats.tail_enqueue++;
+
+ /* next check if EOQ has been triggered already */
+ mode = desc_read(prev, hw_mode);
+ if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
+ (chan->state == CPDMA_STATE_ACTIVE)) {
+ desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
+ chan_write(chan, hdp, desc_dma);
+ chan->stats.misqueued++;
+ }
+}
+
+int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
+ int len, gfp_t gfp_mask)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc __iomem *desc;
+ dma_addr_t buffer;
+ unsigned long flags;
+ u32 mode;
+ int ret = 0;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ if (chan->state == CPDMA_STATE_TEARDOWN) {
+ ret = -EINVAL;
+ goto unlock_ret;
+ }
+
+ desc = cpdma_desc_alloc(ctlr->pool, 1);
+ if (!desc) {
+ chan->stats.desc_alloc_fail++;
+ ret = -ENOMEM;
+ goto unlock_ret;
+ }
+
+ if (len < ctlr->params.min_packet_size) {
+ len = ctlr->params.min_packet_size;
+ chan->stats.runt_transmit_buff++;
+ }
+
+ buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
+ mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
+
+ desc_write(desc, hw_next, 0);
+ desc_write(desc, hw_buffer, buffer);
+ desc_write(desc, hw_len, len);
+ desc_write(desc, hw_mode, mode | len);
+ desc_write(desc, sw_token, token);
+ desc_write(desc, sw_buffer, buffer);
+ desc_write(desc, sw_len, len);
+
+ __cpdma_chan_submit(chan, desc);
+
+ if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
+ chan_write(chan, rxfree, 1);
+
+ chan->count++;
+
+unlock_ret:
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return ret;
+}
+
+static void __cpdma_chan_free(struct cpdma_chan *chan,
+ struct cpdma_desc __iomem *desc,
+ int outlen, int status)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ dma_addr_t buff_dma;
+ int origlen;
+ void *token;
+
+ token = (void *)desc_read(desc, sw_token);
+ buff_dma = desc_read(desc, sw_buffer);
+ origlen = desc_read(desc, sw_len);
+
+ dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
+ cpdma_desc_free(pool, desc, 1);
+ (*chan->handler)(token, outlen, status);
+}
+
+static int __cpdma_chan_process(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc __iomem *desc;
+ int status, outlen;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ dma_addr_t desc_dma;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ desc = chan->head;
+ if (!desc) {
+ chan->stats.empty_dequeue++;
+ status = -ENOENT;
+ goto unlock_ret;
+ }
+ desc_dma = desc_phys(pool, desc);
+
+ status = __raw_readl(&desc->hw_mode);
+ outlen = status & 0x7ff;
+ if (status & CPDMA_DESC_OWNER) {
+ chan->stats.busy_dequeue++;
+ status = -EBUSY;
+ goto unlock_ret;
+ }
+ status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE);
+
+ chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
+ chan_write(chan, cp, desc_dma);
+ chan->count--;
+ chan->stats.good_dequeue++;
+
+ if (status & CPDMA_DESC_EOQ) {
+ chan->stats.requeue++;
+ chan_write(chan, hdp, desc_phys(pool, chan->head));
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ __cpdma_chan_free(chan, desc, outlen, status);
+ return status;
+
+unlock_ret:
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return status;
+}
+
+int cpdma_chan_process(struct cpdma_chan *chan, int quota)
+{
+ int used = 0, ret = 0;
+
+ if (chan->state != CPDMA_STATE_ACTIVE)
+ return -EINVAL;
+
+ while (used < quota) {
+ ret = __cpdma_chan_process(chan);
+ if (ret < 0)
+ break;
+ used++;
+ }
+ return used;
+}
+
+int cpdma_chan_start(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_IDLE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EBUSY;
+ }
+ if (ctlr->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+ dma_reg_write(ctlr, chan->int_set, chan->mask);
+ chan->state = CPDMA_STATE_ACTIVE;
+ if (chan->head) {
+ chan_write(chan, hdp, desc_phys(pool, chan->head));
+ if (chan->rxfree)
+ chan_write(chan, rxfree, chan->count);
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+}
+
+int cpdma_chan_stop(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ unsigned long flags;
+ int ret;
+ unsigned long timeout;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+
+ chan->state = CPDMA_STATE_TEARDOWN;
+ dma_reg_write(ctlr, chan->int_clear, chan->mask);
+
+ /* trigger teardown */
+ dma_reg_write(ctlr, chan->td, chan->chan_num);
+
+ /* wait for teardown complete */
+ timeout = jiffies + HZ/10; /* 100 msec */
+ while (time_before(jiffies, timeout)) {
+ u32 cp = chan_read(chan, cp);
+ if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
+ break;
+ cpu_relax();
+ }
+ WARN_ON(!time_before(jiffies, timeout));
+ chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
+
+ /* handle completed packets */
+ do {
+ ret = __cpdma_chan_process(chan);
+ if (ret < 0)
+ break;
+ } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
+
+ /* remaining packets haven't been tx/rx'ed, clean them up */
+ while (chan->head) {
+ struct cpdma_desc __iomem *desc = chan->head;
+ dma_addr_t next_dma;
+
+ next_dma = desc_read(desc, hw_next);
+ chan->head = desc_from_phys(pool, next_dma);
+ chan->stats.teardown_dequeue++;
+
+ /* issue callback without locks held */
+ spin_unlock_irqrestore(&chan->lock, flags);
+ __cpdma_chan_free(chan, desc, 0, -ENOSYS);
+ spin_lock_irqsave(&chan->lock, flags);
+ }
+
+ chan->state = CPDMA_STATE_IDLE;
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+}
+
+int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+
+ dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
+ chan->mask);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return 0;
+}
+
+struct cpdma_control_info {
+ u32 reg;
+ u32 shift, mask;
+ int access;
+#define ACCESS_RO BIT(0)
+#define ACCESS_WO BIT(1)
+#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
+};
+
+struct cpdma_control_info controls[] = {
+ [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
+ [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
+ [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
+ [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
+ [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
+ [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
+ [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
+ [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
+ [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
+ [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
+ [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
+};
+
+int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
+{
+ unsigned long flags;
+ struct cpdma_control_info *info = &controls[control];
+ int ret;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+
+ ret = -ENOTSUPP;
+ if (!ctlr->params.has_ext_regs)
+ goto unlock_ret;
+
+ ret = -EINVAL;
+ if (ctlr->state != CPDMA_STATE_ACTIVE)
+ goto unlock_ret;
+
+ ret = -ENOENT;
+ if (control < 0 || control >= ARRAY_SIZE(controls))
+ goto unlock_ret;
+
+ ret = -EPERM;
+ if ((info->access & ACCESS_RO) != ACCESS_RO)
+ goto unlock_ret;
+
+ ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
+
+unlock_ret:
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return ret;
+}
+
+int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
+{
+ unsigned long flags;
+ struct cpdma_control_info *info = &controls[control];
+ int ret;
+ u32 val;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+
+ ret = -ENOTSUPP;
+ if (!ctlr->params.has_ext_regs)
+ goto unlock_ret;
+
+ ret = -EINVAL;
+ if (ctlr->state != CPDMA_STATE_ACTIVE)
+ goto unlock_ret;
+
+ ret = -ENOENT;
+ if (control < 0 || control >= ARRAY_SIZE(controls))
+ goto unlock_ret;
+
+ ret = -EPERM;
+ if ((info->access & ACCESS_WO) != ACCESS_WO)
+ goto unlock_ret;
+
+ val = dma_reg_read(ctlr, info->reg);
+ val &= ~(info->mask << info->shift);
+ val |= (value & info->mask) << info->shift;
+ dma_reg_write(ctlr, info->reg, val);
+ ret = 0;
+
+unlock_ret:
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return ret;
+}
diff --git a/drivers/net/davinci_cpdma.h b/drivers/net/davinci_cpdma.h
new file mode 100644
index 0000000..868e50e
--- /dev/null
+++ b/drivers/net/davinci_cpdma.h
@@ -0,0 +1,108 @@
+/*
+ * Texas Instruments CPDMA Driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DAVINCI_CPDMA_H__
+#define __DAVINCI_CPDMA_H__
+
+#define CPDMA_MAX_CHANNELS BITS_PER_LONG
+
+#define tx_chan_num(chan) (chan)
+#define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS)
+#define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
+#define is_tx_chan(chan) (!is_rx_chan(chan))
+#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
+#define chan_linear(chan) __chan_linear((chan)->chan_num)
+
+struct cpdma_params {
+ struct device *dev;
+ void __iomem *dmaregs;
+ void __iomem *txhdp, *rxhdp, *txcp, *rxcp;
+ void __iomem *rxthresh, *rxfree;
+ int num_chan;
+ bool has_soft_reset;
+ int min_packet_size;
+ u32 desc_mem_phys;
+ int desc_mem_size;
+ int desc_align;
+
+ /*
+ * Some instances of embedded cpdma controllers have extra control and
+ * status registers. The following flag enables access to these
+ * "extended" registers.
+ */
+ bool has_ext_regs;
+};
+
+struct cpdma_chan_stats {
+ u32 head_enqueue;
+ u32 tail_enqueue;
+ u32 pad_enqueue;
+ u32 misqueued;
+ u32 desc_alloc_fail;
+ u32 pad_alloc_fail;
+ u32 runt_receive_buff;
+ u32 runt_transmit_buff;
+ u32 empty_dequeue;
+ u32 busy_dequeue;
+ u32 good_dequeue;
+ u32 requeue;
+ u32 teardown_dequeue;
+};
+
+struct cpdma_ctlr;
+struct cpdma_chan;
+
+typedef void (*cpdma_handler_fn)(void *token, int len, int status);
+
+struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params);
+int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr);
+int cpdma_ctlr_start(struct cpdma_ctlr *ctlr);
+int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr);
+int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr);
+
+struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
+ cpdma_handler_fn handler);
+int cpdma_chan_destroy(struct cpdma_chan *chan);
+int cpdma_chan_start(struct cpdma_chan *chan);
+int cpdma_chan_stop(struct cpdma_chan *chan);
+int cpdma_chan_dump(struct cpdma_chan *chan);
+
+int cpdma_chan_get_stats(struct cpdma_chan *chan,
+ struct cpdma_chan_stats *stats);
+int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
+ int len, gfp_t gfp_mask);
+int cpdma_chan_process(struct cpdma_chan *chan, int quota);
+
+int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
+void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr);
+int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable);
+
+enum cpdma_control {
+ CPDMA_CMD_IDLE, /* write-only */
+ CPDMA_COPY_ERROR_FRAMES, /* read-write */
+ CPDMA_RX_OFF_LEN_UPDATE, /* read-write */
+ CPDMA_RX_OWNERSHIP_FLIP, /* read-write */
+ CPDMA_TX_PRIO_FIXED, /* read-write */
+ CPDMA_STAT_IDLE, /* read-only */
+ CPDMA_STAT_TX_ERR_CHAN, /* read-only */
+ CPDMA_STAT_TX_ERR_CODE, /* read-only */
+ CPDMA_STAT_RX_ERR_CHAN, /* read-only */
+ CPDMA_STAT_RX_ERR_CODE, /* read-only */
+ CPDMA_RX_BUFFER_OFFSET, /* read-write */
+};
+
+int cpdma_control_get(struct cpdma_ctlr *ctlr, int control);
+int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value);
+
+#endif
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 7fbd052..2a628d1 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -63,6 +63,8 @@
#include <asm/irq.h>
#include <asm/page.h>
+#include "davinci_cpdma.h"
+
static int debug_level;
module_param(debug_level, int, 0);
MODULE_PARM_DESC(debug_level, "DaVinci EMAC debug level (NETIF_MSG bits)");
@@ -113,7 +115,7 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_DEF_MAX_FRAME_SIZE (1500 + 14 + 4 + 4)
#define EMAC_DEF_TX_CH (0) /* Default 0th channel */
#define EMAC_DEF_RX_CH (0) /* Default 0th channel */
-#define EMAC_DEF_MDIO_TICK_MS (10) /* typically 1 tick=1 ms) */
+#define EMAC_DEF_RX_NUM_DESC (128)
#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */
#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */
#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */
@@ -125,7 +127,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
/* EMAC register related defines */
#define EMAC_ALL_MULTI_REG_VALUE (0xFFFFFFFF)
#define EMAC_NUM_MULTICAST_BITS (64)
-#define EMAC_TEARDOWN_VALUE (0xFFFFFFFC)
#define EMAC_TX_CONTROL_TX_ENABLE_VAL (0x1)
#define EMAC_RX_CONTROL_RX_ENABLE_VAL (0x1)
#define EMAC_MAC_HOST_ERR_INTMASK_VAL (0x2)
@@ -212,24 +213,10 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_DEF_MAX_MULTICAST_ADDRESSES (64) /* Max mcast addr's */
/* EMAC Peripheral Device Register Memory Layout structure */
-#define EMAC_TXIDVER 0x0
-#define EMAC_TXCONTROL 0x4
-#define EMAC_TXTEARDOWN 0x8
-#define EMAC_RXIDVER 0x10
-#define EMAC_RXCONTROL 0x14
-#define EMAC_RXTEARDOWN 0x18
-#define EMAC_TXINTSTATRAW 0x80
-#define EMAC_TXINTSTATMASKED 0x84
-#define EMAC_TXINTMASKSET 0x88
-#define EMAC_TXINTMASKCLEAR 0x8C
#define EMAC_MACINVECTOR 0x90
#define EMAC_DM646X_MACEOIVECTOR 0x94
-#define EMAC_RXINTSTATRAW 0xA0
-#define EMAC_RXINTSTATMASKED 0xA4
-#define EMAC_RXINTMASKSET 0xA8
-#define EMAC_RXINTMASKCLEAR 0xAC
#define EMAC_MACINTSTATRAW 0xB0
#define EMAC_MACINTSTATMASKED 0xB4
#define EMAC_MACINTMASKSET 0xB8
@@ -256,12 +243,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_MACADDRHI 0x504
#define EMAC_MACINDEX 0x508
-/* EMAC HDP and Completion registors */
-#define EMAC_TXHDP(ch) (0x600 + (ch * 4))
-#define EMAC_RXHDP(ch) (0x620 + (ch * 4))
-#define EMAC_TXCP(ch) (0x640 + (ch * 4))
-#define EMAC_RXCP(ch) (0x660 + (ch * 4))
-
/* EMAC statistics registers */
#define EMAC_RXGOODFRAMES 0x200
#define EMAC_RXBCASTFRAMES 0x204
@@ -303,25 +284,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_DM644X_INTMIN_INTVL 0x1
#define EMAC_DM644X_INTMAX_INTVL (EMAC_DM644X_EWINTCNT_MASK)
-/* EMAC MDIO related */
-/* Mask & Control defines */
-#define MDIO_CONTROL_CLKDIV (0xFF)
-#define MDIO_CONTROL_ENABLE BIT(30)
-#define MDIO_USERACCESS_GO BIT(31)
-#define MDIO_USERACCESS_WRITE BIT(30)
-#define MDIO_USERACCESS_READ (0)
-#define MDIO_USERACCESS_REGADR (0x1F << 21)
-#define MDIO_USERACCESS_PHYADR (0x1F << 16)
-#define MDIO_USERACCESS_DATA (0xFFFF)
-#define MDIO_USERPHYSEL_LINKSEL BIT(7)
-#define MDIO_VER_MODID (0xFFFF << 16)
-#define MDIO_VER_REVMAJ (0xFF << 8)
-#define MDIO_VER_REVMIN (0xFF)
-
-#define MDIO_USERACCESS(inst) (0x80 + (inst * 8))
-#define MDIO_USERPHYSEL(inst) (0x84 + (inst * 8))
-#define MDIO_CONTROL (0x04)
-
/* EMAC DM646X control module registers */
#define EMAC_DM646X_CMINTCTRL 0x0C
#define EMAC_DM646X_CMRXINTEN 0x14
@@ -345,120 +307,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
/* EMAC Stats Clear Mask */
#define EMAC_STATS_CLR_MASK (0xFFFFFFFF)
-/** net_buf_obj: EMAC network bufferdata structure
- *
- * EMAC network buffer data structure
- */
-struct emac_netbufobj {
- void *buf_token;
- char *data_ptr;
- int length;
-};
-
-/** net_pkt_obj: EMAC network packet data structure
- *
- * EMAC network packet data structure - supports buffer list (for future)
- */
-struct emac_netpktobj {
- void *pkt_token; /* data token may hold tx/rx chan id */
- struct emac_netbufobj *buf_list; /* array of network buffer objects */
- int num_bufs;
- int pkt_length;
-};
-
-/** emac_tx_bd: EMAC TX Buffer descriptor data structure
- *
- * EMAC TX Buffer descriptor data structure
- */
-struct emac_tx_bd {
- int h_next;
- int buff_ptr;
- int off_b_len;
- int mode; /* SOP, EOP, ownership, EOQ, teardown,Qstarv, length */
- struct emac_tx_bd __iomem *next;
- void *buf_token;
-};
-
-/** emac_txch: EMAC TX Channel data structure
- *
- * EMAC TX Channel data structure
- */
-struct emac_txch {
- /* Config related */
- u32 num_bd;
- u32 service_max;
-
- /* CPPI specific */
- u32 alloc_size;
- void __iomem *bd_mem;
- struct emac_tx_bd __iomem *bd_pool_head;
- struct emac_tx_bd __iomem *active_queue_head;
- struct emac_tx_bd __iomem *active_queue_tail;
- struct emac_tx_bd __iomem *last_hw_bdprocessed;
- u32 queue_active;
- u32 teardown_pending;
- u32 *tx_complete;
-
- /** statistics */
- u32 proc_count; /* TX: # of times emac_tx_bdproc is called */
- u32 mis_queued_packets;
- u32 queue_reinit;
- u32 end_of_queue_add;
- u32 out_of_tx_bd;
- u32 no_active_pkts; /* IRQ when there were no packets to process */
- u32 active_queue_count;
-};
-
-/** emac_rx_bd: EMAC RX Buffer descriptor data structure
- *
- * EMAC RX Buffer descriptor data structure
- */
-struct emac_rx_bd {
- int h_next;
- int buff_ptr;
- int off_b_len;
- int mode;
- struct emac_rx_bd __iomem *next;
- void *data_ptr;
- void *buf_token;
-};
-
-/** emac_rxch: EMAC RX Channel data structure
- *
- * EMAC RX Channel data structure
- */
-struct emac_rxch {
- /* configuration info */
- u32 num_bd;
- u32 service_max;
- u32 buf_size;
- char mac_addr[6];
-
- /** CPPI specific */
- u32 alloc_size;
- void __iomem *bd_mem;
- struct emac_rx_bd __iomem *bd_pool_head;
- struct emac_rx_bd __iomem *active_queue_head;
- struct emac_rx_bd __iomem *active_queue_tail;
- u32 queue_active;
- u32 teardown_pending;
-
- /* packet and buffer objects */
- struct emac_netpktobj pkt_queue;
- struct emac_netbufobj buf_queue;
-
- /** statistics */
- u32 proc_count; /* number of times emac_rx_bdproc is called */
- u32 processed_bd;
- u32 recycled_bd;
- u32 out_of_rx_bd;
- u32 out_of_rx_buffers;
- u32 queue_reinit;
- u32 end_of_queue_add;
- u32 end_of_queue;
- u32 mis_queued_packets;
-};
-
/* emac_priv: EMAC private data structure
*
* EMAC adapter private data structure
@@ -469,17 +317,13 @@ struct emac_priv {
struct platform_device *pdev;
struct napi_struct napi;
char mac_addr[6];
- spinlock_t tx_lock;
- spinlock_t rx_lock;
void __iomem *remap_addr;
u32 emac_base_phys;
void __iomem *emac_base;
void __iomem *ctrl_base;
- void __iomem *emac_ctrl_ram;
- u32 ctrl_ram_size;
- u32 hw_ram_addr;
- struct emac_txch *txch[EMAC_DEF_MAX_TX_CH];
- struct emac_rxch *rxch[EMAC_DEF_MAX_RX_CH];
+ struct cpdma_ctlr *dma;
+ struct cpdma_chan *txchan;
+ struct cpdma_chan *rxchan;
u32 link; /* 1=link on, 0=link off */
u32 speed; /* 0=Auto Neg, 1=No PHY, 10,100, 1000 - mbps */
u32 duplex; /* Link duplex: 0=Half, 1=Full */
@@ -493,13 +337,7 @@ struct emac_priv {
u32 mac_hash2;
u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
u32 rx_addr_type;
- /* periodic timer required for MDIO polling */
- struct timer_list periodic_timer;
- u32 periodic_ticks;
- u32 timer_active;
- u32 phy_mask;
- /* mii_bus,phy members */
- struct mii_bus *mii_bus;
+ const char *phy_id;
struct phy_device *phydev;
spinlock_t lock;
/*platform specific members*/
@@ -510,19 +348,6 @@ struct emac_priv {
/* clock frequency for EMAC */
static struct clk *emac_clk;
static unsigned long emac_bus_frequency;
-static unsigned long mdio_max_freq;
-
-#define emac_virt_to_phys(addr, priv) \
- (((u32 __force)(addr) - (u32 __force)(priv->emac_ctrl_ram)) \
- + priv->hw_ram_addr)
-
-/* Cache macros - Packet buffers would be from skb pool which is cached */
-#define EMAC_VIRT_NOCACHE(addr) (addr)
-
-/* DM644x does not have BD's in cached memory - so no cache functions */
-#define BD_CACHE_INVALIDATE(addr, size)
-#define BD_CACHE_WRITEBACK(addr, size)
-#define BD_CACHE_WRITEBACK_INVALIDATE(addr, size)
/* EMAC TX Host Error description strings */
static char *emac_txhost_errcodes[16] = {
@@ -548,9 +373,6 @@ static char *emac_rxhost_errcodes[16] = {
#define emac_ctrl_read(reg) ioread32((priv->ctrl_base + (reg)))
#define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg)))
-#define emac_mdio_read(reg) ioread32(bus->priv + (reg))
-#define emac_mdio_write(reg, val) iowrite32(val, (bus->priv + (reg)))
-
/**
* emac_dump_regs: Dump important EMAC registers to debug terminal
* @priv: The DaVinci EMAC private adapter structure
@@ -569,20 +391,6 @@ static void emac_dump_regs(struct emac_priv *priv)
emac_ctrl_read(EMAC_CTRL_EWCTL),
emac_ctrl_read(EMAC_CTRL_EWINTTCNT));
}
- dev_info(emac_dev, "EMAC: TXID: %08X %s, RXID: %08X %s\n",
- emac_read(EMAC_TXIDVER),
- ((emac_read(EMAC_TXCONTROL)) ? "enabled" : "disabled"),
- emac_read(EMAC_RXIDVER),
- ((emac_read(EMAC_RXCONTROL)) ? "enabled" : "disabled"));
- dev_info(emac_dev, "EMAC: TXIntRaw:%08X, TxIntMasked: %08X, "\
- "TxIntMasSet: %08X\n", emac_read(EMAC_TXINTSTATRAW),
- emac_read(EMAC_TXINTSTATMASKED), emac_read(EMAC_TXINTMASKSET));
- dev_info(emac_dev, "EMAC: RXIntRaw:%08X, RxIntMasked: %08X, "\
- "RxIntMasSet: %08X\n", emac_read(EMAC_RXINTSTATRAW),
- emac_read(EMAC_RXINTSTATMASKED), emac_read(EMAC_RXINTMASKSET));
- dev_info(emac_dev, "EMAC: MacIntRaw:%08X, MacIntMasked: %08X, "\
- "MacInVector=%08X\n", emac_read(EMAC_MACINTSTATRAW),
- emac_read(EMAC_MACINTSTATMASKED), emac_read(EMAC_MACINVECTOR));
dev_info(emac_dev, "EMAC: EmuControl:%08X, FifoControl: %08X\n",
emac_read(EMAC_EMCONTROL), emac_read(EMAC_FIFOCONTROL));
dev_info(emac_dev, "EMAC: MBPEnable:%08X, RXUnicastSet: %08X, "\
@@ -591,8 +399,6 @@ static void emac_dump_regs(struct emac_priv *priv)
dev_info(emac_dev, "EMAC: MacControl:%08X, MacStatus: %08X, "\
"MacConfig=%08X\n", emac_read(EMAC_MACCONTROL),
emac_read(EMAC_MACSTATUS), emac_read(EMAC_MACCONFIG));
- dev_info(emac_dev, "EMAC: TXHDP[0]:%08X, RXHDP[0]: %08X\n",
- emac_read(EMAC_TXHDP(0)), emac_read(EMAC_RXHDP(0)));
dev_info(emac_dev, "EMAC Statistics\n");
dev_info(emac_dev, "EMAC: rx_good_frames:%d\n",
emac_read(EMAC_RXGOODFRAMES));
@@ -654,11 +460,10 @@ static void emac_dump_regs(struct emac_priv *priv)
emac_read(EMAC_RXMOFOVERRUNS));
dev_info(emac_dev, "EMAC: rx_dma_overruns:%d\n",
emac_read(EMAC_RXDMAOVERRUNS));
+
+ cpdma_ctlr_dump(priv->dma);
}
-/*************************************************************************
- * EMAC MDIO/Phy Functionality
- *************************************************************************/
/**
* emac_get_drvinfo: Get EMAC driver information
* @ndev: The DaVinci EMAC network adapter
@@ -686,7 +491,7 @@ static int emac_get_settings(struct net_device *ndev,
struct ethtool_cmd *ecmd)
{
struct emac_priv *priv = netdev_priv(ndev);
- if (priv->phy_mask)
+ if (priv->phydev)
return phy_ethtool_gset(priv->phydev, ecmd);
else
return -EOPNOTSUPP;
@@ -704,7 +509,7 @@ static int emac_get_settings(struct net_device *ndev,
static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
{
struct emac_priv *priv = netdev_priv(ndev);
- if (priv->phy_mask)
+ if (priv->phydev)
return phy_ethtool_sset(priv->phydev, ecmd);
else
return -EOPNOTSUPP;
@@ -841,7 +646,7 @@ static void emac_update_phystatus(struct emac_priv *priv)
mac_control = emac_read(EMAC_MACCONTROL);
cur_duplex = (mac_control & EMAC_MACCONTROL_FULLDUPLEXEN) ?
DUPLEX_FULL : DUPLEX_HALF;
- if (priv->phy_mask)
+ if (priv->phydev)
new_duplex = priv->phydev->duplex;
else
new_duplex = DUPLEX_FULL;
@@ -1184,371 +989,68 @@ static irqreturn_t emac_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/** EMAC on-chip buffer descriptor memory
- *
- * WARNING: Please note that the on chip memory is used for both TX and RX
- * buffer descriptor queues and is equally divided between TX and RX desc's
- * If the number of TX or RX descriptors change this memory pointers need
- * to be adjusted. If external memory is allocated then these pointers can
- * pointer to the memory
- *
- */
-#define EMAC_TX_BD_MEM(priv) ((priv)->emac_ctrl_ram)
-#define EMAC_RX_BD_MEM(priv) ((priv)->emac_ctrl_ram + \
- (((priv)->ctrl_ram_size) >> 1))
-
-/**
- * emac_init_txch: TX channel initialization
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: RX channel number
- *
- * Called during device init to setup a TX channel (allocate buffer desc
- * create free pool and keep ready for transmission
- *
- * Returns success(0) or mem alloc failures error code
- */
-static int emac_init_txch(struct emac_priv *priv, u32 ch)
-{
- struct device *emac_dev = &priv->ndev->dev;
- u32 cnt, bd_size;
- void __iomem *mem;
- struct emac_tx_bd __iomem *curr_bd;
- struct emac_txch *txch = NULL;
-
- txch = kzalloc(sizeof(struct emac_txch), GFP_KERNEL);
- if (NULL == txch) {
- dev_err(emac_dev, "DaVinci EMAC: TX Ch mem alloc failed");
- return -ENOMEM;
- }
- priv->txch[ch] = txch;
- txch->service_max = EMAC_DEF_TX_MAX_SERVICE;
- txch->active_queue_head = NULL;
- txch->active_queue_tail = NULL;
- txch->queue_active = 0;
- txch->teardown_pending = 0;
-
- /* allocate memory for TX CPPI channel on a 4 byte boundry */
- txch->tx_complete = kzalloc(txch->service_max * sizeof(u32),
- GFP_KERNEL);
- if (NULL == txch->tx_complete) {
- dev_err(emac_dev, "DaVinci EMAC: Tx service mem alloc failed");
- kfree(txch);
- return -ENOMEM;
- }
-
- /* allocate buffer descriptor pool align every BD on four word
- * boundry for future requirements */
- bd_size = (sizeof(struct emac_tx_bd) + 0xF) & ~0xF;
- txch->num_bd = (priv->ctrl_ram_size >> 1) / bd_size;
- txch->alloc_size = (((bd_size * txch->num_bd) + 0xF) & ~0xF);
-
- /* alloc TX BD memory */
- txch->bd_mem = EMAC_TX_BD_MEM(priv);
- __memzero((void __force *)txch->bd_mem, txch->alloc_size);
-
- /* initialize the BD linked list */
- mem = (void __force __iomem *)
- (((u32 __force) txch->bd_mem + 0xF) & ~0xF);
- txch->bd_pool_head = NULL;
- for (cnt = 0; cnt < txch->num_bd; cnt++) {
- curr_bd = mem + (cnt * bd_size);
- curr_bd->next = txch->bd_pool_head;
- txch->bd_pool_head = curr_bd;
- }
-
- /* reset statistics counters */
- txch->out_of_tx_bd = 0;
- txch->no_active_pkts = 0;
- txch->active_queue_count = 0;
-
- return 0;
-}
-
-/**
- * emac_cleanup_txch: Book-keep function to clean TX channel resources
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: TX channel number
- *
- * Called to clean up TX channel resources
- *
- */
-static void emac_cleanup_txch(struct emac_priv *priv, u32 ch)
+static struct sk_buff *emac_rx_alloc(struct emac_priv *priv)
{
- struct emac_txch *txch = priv->txch[ch];
-
- if (txch) {
- if (txch->bd_mem)
- txch->bd_mem = NULL;
- kfree(txch->tx_complete);
- kfree(txch);
- priv->txch[ch] = NULL;
- }
+ struct sk_buff *skb = dev_alloc_skb(priv->rx_buf_size);
+ if (WARN_ON(!skb))
+ return NULL;
+ skb->dev = priv->ndev;
+ skb_reserve(skb, NET_IP_ALIGN);
+ return skb;
}
-/**
- * emac_net_tx_complete: TX packet completion function
- * @priv: The DaVinci EMAC private adapter structure
- * @net_data_tokens: packet token - skb pointer
- * @num_tokens: number of skb's to free
- * @ch: TX channel number
- *
- * Frees the skb once packet is transmitted
- *
- */
-static int emac_net_tx_complete(struct emac_priv *priv,
- void **net_data_tokens,
- int num_tokens, u32 ch)
+static void emac_rx_handler(void *token, int len, int status)
{
- struct net_device *ndev = priv->ndev;
- u32 cnt;
-
- if (unlikely(num_tokens && netif_queue_stopped(ndev)))
- netif_start_queue(ndev);
- for (cnt = 0; cnt < num_tokens; cnt++) {
- struct sk_buff *skb = (struct sk_buff *)net_data_tokens[cnt];
- if (skb == NULL)
- continue;
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
+ struct sk_buff *skb = token;
+ struct net_device *ndev = skb->dev;
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *emac_dev = &ndev->dev;
+ int ret;
+
+ /* free and bail if we are shutting down */
+ if (unlikely(!netif_running(ndev))) {
dev_kfree_skb_any(skb);
+ return;
}
- return 0;
-}
-
-/**
- * emac_txch_teardown: TX channel teardown
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: TX channel number
- *
- * Called to teardown TX channel
- *
- */
-static void emac_txch_teardown(struct emac_priv *priv, u32 ch)
-{
- struct device *emac_dev = &priv->ndev->dev;
- u32 teardown_cnt = 0xFFFFFFF0; /* Some high value */
- struct emac_txch *txch = priv->txch[ch];
- struct emac_tx_bd __iomem *curr_bd;
-
- while ((emac_read(EMAC_TXCP(ch)) & EMAC_TEARDOWN_VALUE) !=
- EMAC_TEARDOWN_VALUE) {
- /* wait till tx teardown complete */
- cpu_relax(); /* TODO: check if this helps ... */
- --teardown_cnt;
- if (0 == teardown_cnt) {
- dev_err(emac_dev, "EMAC: TX teardown aborted\n");
- break;
- }
- }
- emac_write(EMAC_TXCP(ch), EMAC_TEARDOWN_VALUE);
-
- /* process sent packets and return skb's to upper layer */
- if (1 == txch->queue_active) {
- curr_bd = txch->active_queue_head;
- while (curr_bd != NULL) {
- dma_unmap_single(emac_dev, curr_bd->buff_ptr,
- curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
- DMA_TO_DEVICE);
-
- emac_net_tx_complete(priv, (void __force *)
- &curr_bd->buf_token, 1, ch);
- if (curr_bd != txch->active_queue_tail)
- curr_bd = curr_bd->next;
- else
- break;
- }
- txch->bd_pool_head = txch->active_queue_head;
- txch->active_queue_head =
- txch->active_queue_tail = NULL;
- }
-}
-/**
- * emac_stop_txch: Stop TX channel operation
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: TX channel number
- *
- * Called to stop TX channel operation
- *
- */
-static void emac_stop_txch(struct emac_priv *priv, u32 ch)
-{
- struct emac_txch *txch = priv->txch[ch];
-
- if (txch) {
- txch->teardown_pending = 1;
- emac_write(EMAC_TXTEARDOWN, 0);
- emac_txch_teardown(priv, ch);
- txch->teardown_pending = 0;
- emac_write(EMAC_TXINTMASKCLEAR, BIT(ch));
+ /* recycle on recieve error */
+ if (status < 0) {
+ ndev->stats.rx_errors++;
+ goto recycle;
}
-}
-/**
- * emac_tx_bdproc: TX buffer descriptor (packet) processing
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: TX channel number to process buffer descriptors for
- * @budget: number of packets allowed to process
- * @pending: indication to caller that packets are pending to process
- *
- * Processes TX buffer descriptors after packets are transmitted - checks
- * ownership bit on the TX * descriptor and requeues it to free pool & frees
- * the SKB buffer. Only "budget" number of packets are processed and
- * indication of pending packets provided to the caller
- *
- * Returns number of packets processed
- */
-static int emac_tx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
-{
- struct device *emac_dev = &priv->ndev->dev;
- unsigned long flags;
- u32 frame_status;
- u32 pkts_processed = 0;
- u32 tx_complete_cnt = 0;
- struct emac_tx_bd __iomem *curr_bd;
- struct emac_txch *txch = priv->txch[ch];
- u32 *tx_complete_ptr = txch->tx_complete;
-
- if (unlikely(1 == txch->teardown_pending)) {
- if (netif_msg_tx_err(priv) && net_ratelimit()) {
- dev_err(emac_dev, "DaVinci EMAC:emac_tx_bdproc: "\
- "teardown pending\n");
- }
- return 0; /* dont handle any pkt completions */
- }
+ /* feed received packet up the stack */
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_receive_skb(skb);
+ ndev->stats.rx_bytes += len;
+ ndev->stats.rx_packets++;
- ++txch->proc_count;
- spin_lock_irqsave(&priv->tx_lock, flags);
- curr_bd = txch->active_queue_head;
- if (NULL == curr_bd) {
- emac_write(EMAC_TXCP(ch),
- emac_virt_to_phys(txch->last_hw_bdprocessed, priv));
- txch->no_active_pkts++;
- spin_unlock_irqrestore(&priv->tx_lock, flags);
- return 0;
+ /* alloc a new packet for receive */
+ skb = emac_rx_alloc(priv);
+ if (!skb) {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "failed rx buffer alloc\n");
+ return;
}
- BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
- frame_status = curr_bd->mode;
- while ((curr_bd) &&
- ((frame_status & EMAC_CPPI_OWNERSHIP_BIT) == 0) &&
- (pkts_processed < budget)) {
- emac_write(EMAC_TXCP(ch), emac_virt_to_phys(curr_bd, priv));
- txch->active_queue_head = curr_bd->next;
- if (frame_status & EMAC_CPPI_EOQ_BIT) {
- if (curr_bd->next) { /* misqueued packet */
- emac_write(EMAC_TXHDP(ch), curr_bd->h_next);
- ++txch->mis_queued_packets;
- } else {
- txch->queue_active = 0; /* end of queue */
- }
- }
- dma_unmap_single(emac_dev, curr_bd->buff_ptr,
- curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
- DMA_TO_DEVICE);
-
- *tx_complete_ptr = (u32) curr_bd->buf_token;
- ++tx_complete_ptr;
- ++tx_complete_cnt;
- curr_bd->next = txch->bd_pool_head;
- txch->bd_pool_head = curr_bd;
- --txch->active_queue_count;
- pkts_processed++;
- txch->last_hw_bdprocessed = curr_bd;
- curr_bd = txch->active_queue_head;
- if (curr_bd) {
- BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
- frame_status = curr_bd->mode;
- }
- } /* end of pkt processing loop */
-
- emac_net_tx_complete(priv,
- (void *)&txch->tx_complete[0],
- tx_complete_cnt, ch);
- spin_unlock_irqrestore(&priv->tx_lock, flags);
- return pkts_processed;
+recycle:
+ ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
+ skb_tailroom(skb), GFP_KERNEL);
+ if (WARN_ON(ret < 0))
+ dev_kfree_skb_any(skb);
}
-#define EMAC_ERR_TX_OUT_OF_BD -1
-
-/**
- * emac_send: EMAC Transmit function (internal)
- * @priv: The DaVinci EMAC private adapter structure
- * @pkt: packet pointer (contains skb ptr)
- * @ch: TX channel number
- *
- * Called by the transmit function to queue the packet in EMAC hardware queue
- *
- * Returns success(0) or error code (typically out of desc's)
- */
-static int emac_send(struct emac_priv *priv, struct emac_netpktobj *pkt, u32 ch)
+static void emac_tx_handler(void *token, int len, int status)
{
- unsigned long flags;
- struct emac_tx_bd __iomem *curr_bd;
- struct emac_txch *txch;
- struct emac_netbufobj *buf_list;
-
- txch = priv->txch[ch];
- buf_list = pkt->buf_list; /* get handle to the buffer array */
-
- /* check packet size and pad if short */
- if (pkt->pkt_length < EMAC_DEF_MIN_ETHPKTSIZE) {
- buf_list->length += (EMAC_DEF_MIN_ETHPKTSIZE - pkt->pkt_length);
- pkt->pkt_length = EMAC_DEF_MIN_ETHPKTSIZE;
- }
+ struct sk_buff *skb = token;
+ struct net_device *ndev = skb->dev;
- spin_lock_irqsave(&priv->tx_lock, flags);
- curr_bd = txch->bd_pool_head;
- if (curr_bd == NULL) {
- txch->out_of_tx_bd++;
- spin_unlock_irqrestore(&priv->tx_lock, flags);
- return EMAC_ERR_TX_OUT_OF_BD;
- }
-
- txch->bd_pool_head = curr_bd->next;
- curr_bd->buf_token = buf_list->buf_token;
- curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buf_list->data_ptr,
- buf_list->length, DMA_TO_DEVICE);
- curr_bd->off_b_len = buf_list->length;
- curr_bd->h_next = 0;
- curr_bd->next = NULL;
- curr_bd->mode = (EMAC_CPPI_SOP_BIT | EMAC_CPPI_OWNERSHIP_BIT |
- EMAC_CPPI_EOP_BIT | pkt->pkt_length);
-
- /* flush the packet from cache if write back cache is present */
- BD_CACHE_WRITEBACK_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
-
- /* send the packet */
- if (txch->active_queue_head == NULL) {
- txch->active_queue_head = curr_bd;
- txch->active_queue_tail = curr_bd;
- if (1 != txch->queue_active) {
- emac_write(EMAC_TXHDP(ch),
- emac_virt_to_phys(curr_bd, priv));
- txch->queue_active = 1;
- }
- ++txch->queue_reinit;
- } else {
- register struct emac_tx_bd __iomem *tail_bd;
- register u32 frame_status;
-
- tail_bd = txch->active_queue_tail;
- tail_bd->next = curr_bd;
- txch->active_queue_tail = curr_bd;
- tail_bd = EMAC_VIRT_NOCACHE(tail_bd);
- tail_bd->h_next = (int)emac_virt_to_phys(curr_bd, priv);
- frame_status = tail_bd->mode;
- if (frame_status & EMAC_CPPI_EOQ_BIT) {
- emac_write(EMAC_TXHDP(ch),
- emac_virt_to_phys(curr_bd, priv));
- frame_status &= ~(EMAC_CPPI_EOQ_BIT);
- tail_bd->mode = frame_status;
- ++txch->end_of_queue_add;
- }
- }
- txch->active_queue_count++;
- spin_unlock_irqrestore(&priv->tx_lock, flags);
- return 0;
+ if (unlikely(netif_queue_stopped(ndev)))
+ netif_start_queue(ndev);
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += len;
+ dev_kfree_skb_any(skb);
}
/**
@@ -1565,42 +1067,36 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct device *emac_dev = &ndev->dev;
int ret_code;
- struct emac_netbufobj tx_buf; /* buffer obj-only single frame support */
- struct emac_netpktobj tx_packet; /* packet object */
struct emac_priv *priv = netdev_priv(ndev);
/* If no link, return */
if (unlikely(!priv->link)) {
if (netif_msg_tx_err(priv) && net_ratelimit())
dev_err(emac_dev, "DaVinci EMAC: No link to transmit");
- return NETDEV_TX_BUSY;
+ goto fail_tx;
+ }
+
+ ret_code = skb_padto(skb, EMAC_DEF_MIN_ETHPKTSIZE);
+ if (unlikely(ret_code < 0)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "DaVinci EMAC: packet pad failed");
+ goto fail_tx;
}
- /* Build the buffer and packet objects - Since only single fragment is
- * supported, need not set length and token in both packet & object.
- * Doing so for completeness sake & to show that this needs to be done
- * in multifragment case
- */
- tx_packet.buf_list = &tx_buf;
- tx_packet.num_bufs = 1; /* only single fragment supported */
- tx_packet.pkt_length = skb->len;
- tx_packet.pkt_token = (void *)skb;
- tx_buf.length = skb->len;
- tx_buf.buf_token = (void *)skb;
- tx_buf.data_ptr = skb->data;
- ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH);
+ ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len,
+ GFP_KERNEL);
if (unlikely(ret_code != 0)) {
- if (ret_code == EMAC_ERR_TX_OUT_OF_BD) {
- if (netif_msg_tx_err(priv) && net_ratelimit())
- dev_err(emac_dev, "DaVinci EMAC: xmit() fatal"\
- " err. Out of TX BD's");
- netif_stop_queue(priv->ndev);
- }
- ndev->stats.tx_dropped++;
- return NETDEV_TX_BUSY;
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "DaVinci EMAC: desc submit failed");
+ goto fail_tx;
}
return NETDEV_TX_OK;
+
+fail_tx:
+ ndev->stats.tx_dropped++;
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
}
/**
@@ -1621,218 +1117,16 @@ static void emac_dev_tx_timeout(struct net_device *ndev)
if (netif_msg_tx_err(priv))
dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX");
+ emac_dump_regs(priv);
+
ndev->stats.tx_errors++;
emac_int_disable(priv);
- emac_stop_txch(priv, EMAC_DEF_TX_CH);
- emac_cleanup_txch(priv, EMAC_DEF_TX_CH);
- emac_init_txch(priv, EMAC_DEF_TX_CH);
- emac_write(EMAC_TXHDP(0), 0);
- emac_write(EMAC_TXINTMASKSET, BIT(EMAC_DEF_TX_CH));
+ cpdma_chan_stop(priv->txchan);
+ cpdma_chan_start(priv->txchan);
emac_int_enable(priv);
}
/**
- * emac_net_alloc_rx_buf: Allocate a skb for RX
- * @priv: The DaVinci EMAC private adapter structure
- * @buf_size: size of SKB data buffer to allocate
- * @data_token: data token returned (skb handle for storing in buffer desc)
- * @ch: RX channel number
- *
- * Called during RX channel setup - allocates skb buffer of required size
- * and provides the skb handle and allocated buffer data pointer to caller
- *
- * Returns skb data pointer or 0 on failure to alloc skb
- */
-static void *emac_net_alloc_rx_buf(struct emac_priv *priv, int buf_size,
- void **data_token, u32 ch)
-{
- struct net_device *ndev = priv->ndev;
- struct device *emac_dev = &ndev->dev;
- struct sk_buff *p_skb;
-
- p_skb = dev_alloc_skb(buf_size);
- if (unlikely(NULL == p_skb)) {
- if (netif_msg_rx_err(priv) && net_ratelimit())
- dev_err(emac_dev, "DaVinci EMAC: failed to alloc skb");
- return NULL;
- }
-
- /* set device pointer in skb and reserve space for extra bytes */
- p_skb->dev = ndev;
- skb_reserve(p_skb, NET_IP_ALIGN);
- *data_token = (void *) p_skb;
- return p_skb->data;
-}
-
-/**
- * emac_init_rxch: RX channel initialization
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: RX channel number
- * @param: mac address for RX channel
- *
- * Called during device init to setup a RX channel (allocate buffers and
- * buffer descriptors, create queue and keep ready for reception
- *
- * Returns success(0) or mem alloc failures error code
- */
-static int emac_init_rxch(struct emac_priv *priv, u32 ch, char *param)
-{
- struct device *emac_dev = &priv->ndev->dev;
- u32 cnt, bd_size;
- void __iomem *mem;
- struct emac_rx_bd __iomem *curr_bd;
- struct emac_rxch *rxch = NULL;
-
- rxch = kzalloc(sizeof(struct emac_rxch), GFP_KERNEL);
- if (NULL == rxch) {
- dev_err(emac_dev, "DaVinci EMAC: RX Ch mem alloc failed");
- return -ENOMEM;
- }
- priv->rxch[ch] = rxch;
- rxch->buf_size = priv->rx_buf_size;
- rxch->service_max = EMAC_DEF_RX_MAX_SERVICE;
- rxch->queue_active = 0;
- rxch->teardown_pending = 0;
-
- /* save mac address */
- for (cnt = 0; cnt < 6; cnt++)
- rxch->mac_addr[cnt] = param[cnt];
-
- /* allocate buffer descriptor pool align every BD on four word
- * boundry for future requirements */
- bd_size = (sizeof(struct emac_rx_bd) + 0xF) & ~0xF;
- rxch->num_bd = (priv->ctrl_ram_size >> 1) / bd_size;
- rxch->alloc_size = (((bd_size * rxch->num_bd) + 0xF) & ~0xF);
- rxch->bd_mem = EMAC_RX_BD_MEM(priv);
- __memzero((void __force *)rxch->bd_mem, rxch->alloc_size);
- rxch->pkt_queue.buf_list = &rxch->buf_queue;
-
- /* allocate RX buffer and initialize the BD linked list */
- mem = (void __force __iomem *)
- (((u32 __force) rxch->bd_mem + 0xF) & ~0xF);
- rxch->active_queue_head = NULL;
- rxch->active_queue_tail = mem;
- for (cnt = 0; cnt < rxch->num_bd; cnt++) {
- curr_bd = mem + (cnt * bd_size);
- /* for future use the last parameter contains the BD ptr */
- curr_bd->data_ptr = emac_net_alloc_rx_buf(priv,
- rxch->buf_size,
- (void __force **)&curr_bd->buf_token,
- EMAC_DEF_RX_CH);
- if (curr_bd->data_ptr == NULL) {
- dev_err(emac_dev, "DaVinci EMAC: RX buf mem alloc " \
- "failed for ch %d\n", ch);
- kfree(rxch);
- return -ENOMEM;
- }
-
- /* populate the hardware descriptor */
- curr_bd->h_next = emac_virt_to_phys(rxch->active_queue_head,
- priv);
- curr_bd->buff_ptr = dma_map_single(emac_dev, curr_bd->data_ptr,
- rxch->buf_size, DMA_FROM_DEVICE);
- curr_bd->off_b_len = rxch->buf_size;
- curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT;
-
- /* write back to hardware memory */
- BD_CACHE_WRITEBACK_INVALIDATE((u32) curr_bd,
- EMAC_BD_LENGTH_FOR_CACHE);
- curr_bd->next = rxch->active_queue_head;
- rxch->active_queue_head = curr_bd;
- }
-
- /* At this point rxCppi->activeQueueHead points to the first
- RX BD ready to be given to RX HDP and rxch->active_queue_tail
- points to the last RX BD
- */
- return 0;
-}
-
-/**
- * emac_rxch_teardown: RX channel teardown
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: RX channel number
- *
- * Called during device stop to teardown RX channel
- *
- */
-static void emac_rxch_teardown(struct emac_priv *priv, u32 ch)
-{
- struct device *emac_dev = &priv->ndev->dev;
- u32 teardown_cnt = 0xFFFFFFF0; /* Some high value */
-
- while ((emac_read(EMAC_RXCP(ch)) & EMAC_TEARDOWN_VALUE) !=
- EMAC_TEARDOWN_VALUE) {
- /* wait till tx teardown complete */
- cpu_relax(); /* TODO: check if this helps ... */
- --teardown_cnt;
- if (0 == teardown_cnt) {
- dev_err(emac_dev, "EMAC: RX teardown aborted\n");
- break;
- }
- }
- emac_write(EMAC_RXCP(ch), EMAC_TEARDOWN_VALUE);
-}
-
-/**
- * emac_stop_rxch: Stop RX channel operation
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: RX channel number
- *
- * Called during device stop to stop RX channel operation
- *
- */
-static void emac_stop_rxch(struct emac_priv *priv, u32 ch)
-{
- struct emac_rxch *rxch = priv->rxch[ch];
-
- if (rxch) {
- rxch->teardown_pending = 1;
- emac_write(EMAC_RXTEARDOWN, ch);
- /* wait for teardown complete */
- emac_rxch_teardown(priv, ch);
- rxch->teardown_pending = 0;
- emac_write(EMAC_RXINTMASKCLEAR, BIT(ch));
- }
-}
-
-/**
- * emac_cleanup_rxch: Book-keep function to clean RX channel resources
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: RX channel number
- *
- * Called during device stop to clean up RX channel resources
- *
- */
-static void emac_cleanup_rxch(struct emac_priv *priv, u32 ch)
-{
- struct emac_rxch *rxch = priv->rxch[ch];
- struct emac_rx_bd __iomem *curr_bd;
-
- if (rxch) {
- /* free the receive buffers previously allocated */
- curr_bd = rxch->active_queue_head;
- while (curr_bd) {
- if (curr_bd->buf_token) {
- dma_unmap_single(&priv->ndev->dev,
- curr_bd->buff_ptr,
- curr_bd->off_b_len
- & EMAC_RX_BD_BUF_SIZE,
- DMA_FROM_DEVICE);
-
- dev_kfree_skb_any((struct sk_buff *)\
- curr_bd->buf_token);
- }
- curr_bd = curr_bd->next;
- }
- if (rxch->bd_mem)
- rxch->bd_mem = NULL;
- kfree(rxch);
- priv->rxch[ch] = NULL;
- }
-}
-
-/**
* emac_set_type0addr: Set EMAC Type0 mac address
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number
@@ -1948,7 +1242,6 @@ static void emac_setmac(struct emac_priv *priv, u32 ch, char *mac_addr)
static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
{
struct emac_priv *priv = netdev_priv(ndev);
- struct emac_rxch *rxch = priv->rxch[EMAC_DEF_RX_CH];
struct device *emac_dev = &priv->ndev->dev;
struct sockaddr *sa = addr;
@@ -1959,11 +1252,10 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len);
- /* If the interface is down - rxch is NULL. */
/* MAC address is configured only after the interface is enabled. */
if (netif_running(ndev)) {
- memcpy(rxch->mac_addr, sa->sa_data, ndev->addr_len);
- emac_setmac(priv, EMAC_DEF_RX_CH, rxch->mac_addr);
+ memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
+ emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr);
}
if (netif_msg_drv(priv))
@@ -1974,194 +1266,6 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
}
/**
- * emac_addbd_to_rx_queue: Recycle RX buffer descriptor
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: RX channel number to process buffer descriptors for
- * @curr_bd: current buffer descriptor
- * @buffer: buffer pointer for descriptor
- * @buf_token: buffer token (stores skb information)
- *
- * Prepares the recycled buffer descriptor and addes it to hardware
- * receive queue - if queue empty this descriptor becomes the head
- * else addes the descriptor to end of queue
- *
- */
-static void emac_addbd_to_rx_queue(struct emac_priv *priv, u32 ch,
- struct emac_rx_bd __iomem *curr_bd,
- char *buffer, void *buf_token)
-{
- struct emac_rxch *rxch = priv->rxch[ch];
-
- /* populate the hardware descriptor */
- curr_bd->h_next = 0;
- curr_bd->buff_ptr = dma_map_single(&priv->ndev->dev, buffer,
- rxch->buf_size, DMA_FROM_DEVICE);
- curr_bd->off_b_len = rxch->buf_size;
- curr_bd->mode = EMAC_CPPI_OWNERSHIP_BIT;
- curr_bd->next = NULL;
- curr_bd->data_ptr = buffer;
- curr_bd->buf_token = buf_token;
-
- /* write back */
- BD_CACHE_WRITEBACK_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
- if (rxch->active_queue_head == NULL) {
- rxch->active_queue_head = curr_bd;
- rxch->active_queue_tail = curr_bd;
- if (0 != rxch->queue_active) {
- emac_write(EMAC_RXHDP(ch),
- emac_virt_to_phys(rxch->active_queue_head, priv));
- rxch->queue_active = 1;
- }
- } else {
- struct emac_rx_bd __iomem *tail_bd;
- u32 frame_status;
-
- tail_bd = rxch->active_queue_tail;
- rxch->active_queue_tail = curr_bd;
- tail_bd->next = curr_bd;
- tail_bd = EMAC_VIRT_NOCACHE(tail_bd);
- tail_bd->h_next = emac_virt_to_phys(curr_bd, priv);
- frame_status = tail_bd->mode;
- if (frame_status & EMAC_CPPI_EOQ_BIT) {
- emac_write(EMAC_RXHDP(ch),
- emac_virt_to_phys(curr_bd, priv));
- frame_status &= ~(EMAC_CPPI_EOQ_BIT);
- tail_bd->mode = frame_status;
- ++rxch->end_of_queue_add;
- }
- }
- ++rxch->recycled_bd;
-}
-
-/**
- * emac_net_rx_cb: Prepares packet and sends to upper layer
- * @priv: The DaVinci EMAC private adapter structure
- * @net_pkt_list: Network packet list (received packets)
- *
- * Invalidates packet buffer memory and sends the received packet to upper
- * layer
- *
- * Returns success or appropriate error code (none as of now)
- */
-static int emac_net_rx_cb(struct emac_priv *priv,
- struct emac_netpktobj *net_pkt_list)
-{
- struct net_device *ndev = priv->ndev;
- struct sk_buff *p_skb = net_pkt_list->pkt_token;
- /* set length of packet */
- skb_put(p_skb, net_pkt_list->pkt_length);
- p_skb->protocol = eth_type_trans(p_skb, priv->ndev);
- netif_receive_skb(p_skb);
- ndev->stats.rx_bytes += net_pkt_list->pkt_length;
- ndev->stats.rx_packets++;
- return 0;
-}
-
-/**
- * emac_rx_bdproc: RX buffer descriptor (packet) processing
- * @priv: The DaVinci EMAC private adapter structure
- * @ch: RX channel number to process buffer descriptors for
- * @budget: number of packets allowed to process
- * @pending: indication to caller that packets are pending to process
- *
- * Processes RX buffer descriptors - checks ownership bit on the RX buffer
- * descriptor, sends the receive packet to upper layer, allocates a new SKB
- * and recycles the buffer descriptor (requeues it in hardware RX queue).
- * Only "budget" number of packets are processed and indication of pending
- * packets provided to the caller.
- *
- * Returns number of packets processed (and indication of pending packets)
- */
-static int emac_rx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
-{
- unsigned long flags;
- u32 frame_status;
- u32 pkts_processed = 0;
- char *new_buffer;
- struct emac_rx_bd __iomem *curr_bd;
- struct emac_rx_bd __iomem *last_bd;
- struct emac_netpktobj *curr_pkt, pkt_obj;
- struct emac_netbufobj buf_obj;
- struct emac_netbufobj *rx_buf_obj;
- void *new_buf_token;
- struct emac_rxch *rxch = priv->rxch[ch];
-
- if (unlikely(1 == rxch->teardown_pending))
- return 0;
- ++rxch->proc_count;
- spin_lock_irqsave(&priv->rx_lock, flags);
- pkt_obj.buf_list = &buf_obj;
- curr_pkt = &pkt_obj;
- curr_bd = rxch->active_queue_head;
- BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
- frame_status = curr_bd->mode;
-
- while ((curr_bd) &&
- ((frame_status & EMAC_CPPI_OWNERSHIP_BIT) == 0) &&
- (pkts_processed < budget)) {
-
- new_buffer = emac_net_alloc_rx_buf(priv, rxch->buf_size,
- &new_buf_token, EMAC_DEF_RX_CH);
- if (unlikely(NULL == new_buffer)) {
- ++rxch->out_of_rx_buffers;
- goto end_emac_rx_bdproc;
- }
-
- /* populate received packet data structure */
- rx_buf_obj = &curr_pkt->buf_list[0];
- rx_buf_obj->data_ptr = (char *)curr_bd->data_ptr;
- rx_buf_obj->length = curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE;
- rx_buf_obj->buf_token = curr_bd->buf_token;
-
- dma_unmap_single(&priv->ndev->dev, curr_bd->buff_ptr,
- curr_bd->off_b_len & EMAC_RX_BD_BUF_SIZE,
- DMA_FROM_DEVICE);
-
- curr_pkt->pkt_token = curr_pkt->buf_list->buf_token;
- curr_pkt->num_bufs = 1;
- curr_pkt->pkt_length =
- (frame_status & EMAC_RX_BD_PKT_LENGTH_MASK);
- emac_write(EMAC_RXCP(ch), emac_virt_to_phys(curr_bd, priv));
- ++rxch->processed_bd;
- last_bd = curr_bd;
- curr_bd = last_bd->next;
- rxch->active_queue_head = curr_bd;
-
- /* check if end of RX queue ? */
- if (frame_status & EMAC_CPPI_EOQ_BIT) {
- if (curr_bd) {
- ++rxch->mis_queued_packets;
- emac_write(EMAC_RXHDP(ch),
- emac_virt_to_phys(curr_bd, priv));
- } else {
- ++rxch->end_of_queue;
- rxch->queue_active = 0;
- }
- }
-
- /* recycle BD */
- emac_addbd_to_rx_queue(priv, ch, last_bd, new_buffer,
- new_buf_token);
-
- /* return the packet to the user - BD ptr passed in
- * last parameter for potential *future* use */
- spin_unlock_irqrestore(&priv->rx_lock, flags);
- emac_net_rx_cb(priv, curr_pkt);
- spin_lock_irqsave(&priv->rx_lock, flags);
- curr_bd = rxch->active_queue_head;
- if (curr_bd) {
- BD_CACHE_INVALIDATE(curr_bd, EMAC_BD_LENGTH_FOR_CACHE);
- frame_status = curr_bd->mode;
- }
- ++pkts_processed;
- }
-
-end_emac_rx_bdproc:
- spin_unlock_irqrestore(&priv->rx_lock, flags);
- return pkts_processed;
-}
-
-/**
* emac_hw_enable: Enable EMAC hardware for packet transmission/reception
* @priv: The DaVinci EMAC private adapter structure
*
@@ -2172,7 +1276,7 @@ end_emac_rx_bdproc:
*/
static int emac_hw_enable(struct emac_priv *priv)
{
- u32 ch, val, mbp_enable, mac_control;
+ u32 val, mbp_enable, mac_control;
/* Soft reset */
emac_write(EMAC_SOFTRESET, 1);
@@ -2215,26 +1319,9 @@ static int emac_hw_enable(struct emac_priv *priv)
emac_write(EMAC_RXUNICASTCLEAR, EMAC_RX_UNICAST_CLEAR_ALL);
priv->rx_addr_type = (emac_read(EMAC_MACCONFIG) >> 8) & 0xFF;
- val = emac_read(EMAC_TXCONTROL);
- val |= EMAC_TX_CONTROL_TX_ENABLE_VAL;
- emac_write(EMAC_TXCONTROL, val);
- val = emac_read(EMAC_RXCONTROL);
- val |= EMAC_RX_CONTROL_RX_ENABLE_VAL;
- emac_write(EMAC_RXCONTROL, val);
emac_write(EMAC_MACINTMASKSET, EMAC_MAC_HOST_ERR_INTMASK_VAL);
- for (ch = 0; ch < EMAC_DEF_MAX_TX_CH; ch++) {
- emac_write(EMAC_TXHDP(ch), 0);
- emac_write(EMAC_TXINTMASKSET, BIT(ch));
- }
- for (ch = 0; ch < EMAC_DEF_MAX_RX_CH; ch++) {
- struct emac_rxch *rxch = priv->rxch[ch];
- emac_setmac(priv, ch, rxch->mac_addr);
- emac_write(EMAC_RXINTMASKSET, BIT(ch));
- rxch->queue_active = 1;
- emac_write(EMAC_RXHDP(ch),
- emac_virt_to_phys(rxch->active_queue_head, priv));
- }
+ emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr);
/* Enable MII */
val = emac_read(EMAC_MACCONTROL);
@@ -2279,8 +1366,8 @@ static int emac_poll(struct napi_struct *napi, int budget)
mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC;
if (status & mask) {
- num_tx_pkts = emac_tx_bdproc(priv, EMAC_DEF_TX_CH,
- EMAC_DEF_TX_MAX_SERVICE);
+ num_tx_pkts = cpdma_chan_process(priv->txchan,
+ EMAC_DEF_TX_MAX_SERVICE);
} /* TX processing */
mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC;
@@ -2289,7 +1376,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
mask = EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC;
if (status & mask) {
- num_rx_pkts = emac_rx_bdproc(priv, EMAC_DEF_RX_CH, budget);
+ num_rx_pkts = cpdma_chan_process(priv->rxchan, budget);
} /* RX processing */
mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT;
@@ -2348,79 +1435,6 @@ void emac_poll_controller(struct net_device *ndev)
}
#endif
-/* PHY/MII bus related */
-
-/* Wait until mdio is ready for next command */
-#define MDIO_WAIT_FOR_USER_ACCESS\
- while ((emac_mdio_read((MDIO_USERACCESS(0))) &\
- MDIO_USERACCESS_GO) != 0)
-
-static int emac_mii_read(struct mii_bus *bus, int phy_id, int phy_reg)
-{
- unsigned int phy_data = 0;
- unsigned int phy_control;
-
- /* Wait until mdio is ready for next command */
- MDIO_WAIT_FOR_USER_ACCESS;
-
- phy_control = (MDIO_USERACCESS_GO |
- MDIO_USERACCESS_READ |
- ((phy_reg << 21) & MDIO_USERACCESS_REGADR) |
- ((phy_id << 16) & MDIO_USERACCESS_PHYADR) |
- (phy_data & MDIO_USERACCESS_DATA));
- emac_mdio_write(MDIO_USERACCESS(0), phy_control);
-
- /* Wait until mdio is ready for next command */
- MDIO_WAIT_FOR_USER_ACCESS;
-
- return emac_mdio_read(MDIO_USERACCESS(0)) & MDIO_USERACCESS_DATA;
-
-}
-
-static int emac_mii_write(struct mii_bus *bus, int phy_id,
- int phy_reg, u16 phy_data)
-{
-
- unsigned int control;
-
- /* until mdio is ready for next command */
- MDIO_WAIT_FOR_USER_ACCESS;
-
- control = (MDIO_USERACCESS_GO |
- MDIO_USERACCESS_WRITE |
- ((phy_reg << 21) & MDIO_USERACCESS_REGADR) |
- ((phy_id << 16) & MDIO_USERACCESS_PHYADR) |
- (phy_data & MDIO_USERACCESS_DATA));
- emac_mdio_write(MDIO_USERACCESS(0), control);
-
- return 0;
-}
-
-static int emac_mii_reset(struct mii_bus *bus)
-{
- unsigned int clk_div;
- int mdio_bus_freq = emac_bus_frequency;
-
- if (mdio_max_freq && mdio_bus_freq)
- clk_div = ((mdio_bus_freq / mdio_max_freq) - 1);
- else
- clk_div = 0xFF;
-
- clk_div &= MDIO_CONTROL_CLKDIV;
-
- /* Set enable and clock divider in MDIOControl */
- emac_mdio_write(MDIO_CONTROL, (clk_div | MDIO_CONTROL_ENABLE));
-
- return 0;
-
-}
-
-static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, PHY_POLL };
-
-/* emac_driver: EMAC MII bus structure */
-
-static struct mii_bus *emac_mii;
-
static void emac_adjust_link(struct net_device *ndev)
{
struct emac_priv *priv = netdev_priv(ndev);
@@ -2485,6 +1499,11 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
return -EOPNOTSUPP;
}
+static int match_first_device(struct device *dev, void *data)
+{
+ return 1;
+}
+
/**
* emac_dev_open: EMAC device open
* @ndev: The DaVinci EMAC network adapter
@@ -2498,10 +1517,9 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
static int emac_dev_open(struct net_device *ndev)
{
struct device *emac_dev = &ndev->dev;
- u32 rc, cnt, ch;
- int phy_addr;
+ u32 cnt;
struct resource *res;
- int q, m;
+ int q, m, ret;
int i = 0;
int k = 0;
struct emac_priv *priv = netdev_priv(ndev);
@@ -2513,29 +1531,21 @@ static int emac_dev_open(struct net_device *ndev)
/* Configuration items */
priv->rx_buf_size = EMAC_DEF_MAX_FRAME_SIZE + NET_IP_ALIGN;
- /* Clear basic hardware */
- for (ch = 0; ch < EMAC_MAX_TXRX_CHANNELS; ch++) {
- emac_write(EMAC_TXHDP(ch), 0);
- emac_write(EMAC_RXHDP(ch), 0);
- emac_write(EMAC_RXHDP(ch), 0);
- emac_write(EMAC_RXINTMASKCLEAR, EMAC_INT_MASK_CLEAR);
- emac_write(EMAC_TXINTMASKCLEAR, EMAC_INT_MASK_CLEAR);
- }
priv->mac_hash1 = 0;
priv->mac_hash2 = 0;
emac_write(EMAC_MACHASH1, 0);
emac_write(EMAC_MACHASH2, 0);
- /* multi ch not supported - open 1 TX, 1RX ch by default */
- rc = emac_init_txch(priv, EMAC_DEF_TX_CH);
- if (0 != rc) {
- dev_err(emac_dev, "DaVinci EMAC: emac_init_txch() failed");
- return rc;
- }
- rc = emac_init_rxch(priv, EMAC_DEF_RX_CH, priv->mac_addr);
- if (0 != rc) {
- dev_err(emac_dev, "DaVinci EMAC: emac_init_rxch() failed");
- return rc;
+ for (i = 0; i < EMAC_DEF_RX_NUM_DESC; i++) {
+ struct sk_buff *skb = emac_rx_alloc(priv);
+
+ if (!skb)
+ break;
+
+ ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
+ skb_tailroom(skb), GFP_KERNEL);
+ if (WARN_ON(ret < 0))
+ break;
}
/* Request IRQ */
@@ -2560,28 +1570,28 @@ static int emac_dev_open(struct net_device *ndev)
emac_set_coalesce(ndev, &coal);
}
- /* find the first phy */
+ cpdma_ctlr_start(priv->dma);
+
priv->phydev = NULL;
- if (priv->phy_mask) {
- emac_mii_reset(priv->mii_bus);
- for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
- if (priv->mii_bus->phy_map[phy_addr]) {
- priv->phydev = priv->mii_bus->phy_map[phy_addr];
- break;
- }
- }
+ /* use the first phy on the bus if pdata did not give us a phy id */
+ if (!priv->phy_id) {
+ struct device *phy;
- if (!priv->phydev) {
- printk(KERN_ERR "%s: no PHY found\n", ndev->name);
- return -1;
- }
+ phy = bus_find_device(&mdio_bus_type, NULL, NULL,
+ match_first_device);
+ if (phy)
+ priv->phy_id = dev_name(phy);
+ }
- priv->phydev = phy_connect(ndev, dev_name(&priv->phydev->dev),
- &emac_adjust_link, 0, PHY_INTERFACE_MODE_MII);
+ if (priv->phy_id && *priv->phy_id) {
+ priv->phydev = phy_connect(ndev, priv->phy_id,
+ &emac_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
if (IS_ERR(priv->phydev)) {
- printk(KERN_ERR "%s: Could not attach to PHY\n",
- ndev->name);
+ dev_err(emac_dev, "could not connect to phy %s\n",
+ priv->phy_id);
+ priv->phydev = NULL;
return PTR_ERR(priv->phydev);
}
@@ -2589,12 +1599,13 @@ static int emac_dev_open(struct net_device *ndev)
priv->speed = 0;
priv->duplex = ~0;
- printk(KERN_INFO "%s: attached PHY driver [%s] "
- "(mii_bus:phy_addr=%s, id=%x)\n", ndev->name,
+ dev_info(emac_dev, "attached PHY driver [%s] "
+ "(mii_bus:phy_addr=%s, id=%x)\n",
priv->phydev->drv->name, dev_name(&priv->phydev->dev),
priv->phydev->phy_id);
- } else{
+ } else {
/* No PHY , fix the link, speed and duplex settings */
+ dev_notice(emac_dev, "no phy, defaulting to 100/full\n");
priv->link = 1;
priv->speed = SPEED_100;
priv->duplex = DUPLEX_FULL;
@@ -2607,7 +1618,7 @@ static int emac_dev_open(struct net_device *ndev)
if (netif_msg_drv(priv))
dev_notice(emac_dev, "DaVinci EMAC: Opened %s\n", ndev->name);
- if (priv->phy_mask)
+ if (priv->phydev)
phy_start(priv->phydev);
return 0;
@@ -2648,10 +1659,7 @@ static int emac_dev_stop(struct net_device *ndev)
netif_carrier_off(ndev);
emac_int_disable(priv);
- emac_stop_txch(priv, EMAC_DEF_TX_CH);
- emac_stop_rxch(priv, EMAC_DEF_RX_CH);
- emac_cleanup_txch(priv, EMAC_DEF_TX_CH);
- emac_cleanup_rxch(priv, EMAC_DEF_RX_CH);
+ cpdma_ctlr_stop(priv->dma);
emac_write(EMAC_SOFTRESET, 1);
if (priv->phydev)
@@ -2756,9 +1764,10 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
struct resource *res;
struct net_device *ndev;
struct emac_priv *priv;
- unsigned long size;
+ unsigned long size, hw_ram_addr;
struct emac_platform_data *pdata;
struct device *emac_dev;
+ struct cpdma_params dma_params;
/* obtain emac clock from kernel */
emac_clk = clk_get(&pdev->dev, NULL);
@@ -2782,8 +1791,6 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
priv->ndev = ndev;
priv->msg_enable = netif_msg_init(debug_level, DAVINCI_EMAC_DEBUG);
- spin_lock_init(&priv->tx_lock);
- spin_lock_init(&priv->rx_lock);
spin_lock_init(&priv->lock);
pdata = pdev->dev.platform_data;
@@ -2794,7 +1801,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
/* MAC addr and PHY mask , RMII enable info from platform_data */
memcpy(priv->mac_addr, pdata->mac_addr, 6);
- priv->phy_mask = pdata->phy_mask;
+ priv->phy_id = pdata->phy_id;
priv->rmii_en = pdata->rmii_en;
priv->version = pdata->version;
priv->int_enable = pdata->interrupt_enable;
@@ -2831,14 +1838,41 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
ndev->base_addr = (unsigned long)priv->remap_addr;
priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
- priv->ctrl_ram_size = pdata->ctrl_ram_size;
- priv->emac_ctrl_ram = priv->remap_addr + pdata->ctrl_ram_offset;
- if (pdata->hw_ram_addr)
- priv->hw_ram_addr = pdata->hw_ram_addr;
- else
- priv->hw_ram_addr = (u32 __force)res->start +
- pdata->ctrl_ram_offset;
+ hw_ram_addr = pdata->hw_ram_addr;
+ if (!hw_ram_addr)
+ hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
+
+ memset(&dma_params, 0, sizeof(dma_params));
+ dma_params.dev = emac_dev;
+ dma_params.dmaregs = priv->emac_base;
+ dma_params.rxthresh = priv->emac_base + 0x120;
+ dma_params.rxfree = priv->emac_base + 0x140;
+ dma_params.txhdp = priv->emac_base + 0x600;
+ dma_params.rxhdp = priv->emac_base + 0x620;
+ dma_params.txcp = priv->emac_base + 0x640;
+ dma_params.rxcp = priv->emac_base + 0x660;
+ dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS;
+ dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE;
+ dma_params.desc_mem_phys = hw_ram_addr;
+ dma_params.desc_mem_size = pdata->ctrl_ram_size;
+ dma_params.desc_align = 16;
+
+ priv->dma = cpdma_ctlr_create(&dma_params);
+ if (!priv->dma) {
+ dev_err(emac_dev, "DaVinci EMAC: Error initializing DMA\n");
+ rc = -ENOMEM;
+ goto no_dma;
+ }
+
+ priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH),
+ emac_tx_handler);
+ priv->rxchan = cpdma_chan_create(priv->dma, rx_chan_num(EMAC_DEF_RX_CH),
+ emac_rx_handler);
+ if (WARN_ON(!priv->txchan || !priv->rxchan)) {
+ rc = -ENOMEM;
+ goto no_irq_res;
+ }
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
@@ -2871,32 +1905,6 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
}
- /* MII/Phy intialisation, mdio bus registration */
- emac_mii = mdiobus_alloc();
- if (emac_mii == NULL) {
- dev_err(emac_dev, "DaVinci EMAC: Error allocating mii_bus\n");
- rc = -ENOMEM;
- goto mdio_alloc_err;
- }
-
- priv->mii_bus = emac_mii;
- emac_mii->name = "emac-mii",
- emac_mii->read = emac_mii_read,
- emac_mii->write = emac_mii_write,
- emac_mii->reset = emac_mii_reset,
- emac_mii->irq = mii_irqs,
- emac_mii->phy_mask = ~(priv->phy_mask);
- emac_mii->parent = &pdev->dev;
- emac_mii->priv = priv->remap_addr + pdata->mdio_reg_offset;
- snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", priv->pdev->id);
- mdio_max_freq = pdata->mdio_max_freq;
- emac_mii->reset(emac_mii);
-
- /* Register the MII bus */
- rc = mdiobus_register(emac_mii);
- if (rc)
- goto mdiobus_quit;
-
if (netif_msg_probe(priv)) {
dev_notice(emac_dev, "DaVinci EMAC Probe found device "\
"(regs: %p, irq: %d)\n",
@@ -2904,13 +1912,15 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
}
return 0;
-mdiobus_quit:
- mdiobus_free(emac_mii);
-
netdev_reg_err:
-mdio_alloc_err:
clk_disable(emac_clk);
no_irq_res:
+ if (priv->txchan)
+ cpdma_chan_destroy(priv->txchan);
+ if (priv->rxchan)
+ cpdma_chan_destroy(priv->rxchan);
+ cpdma_ctlr_destroy(priv->dma);
+no_dma:
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, res->end - res->start + 1);
iounmap(priv->remap_addr);
@@ -2938,8 +1948,12 @@ static int __devexit davinci_emac_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mdiobus_unregister(priv->mii_bus);
- mdiobus_free(priv->mii_bus);
+
+ if (priv->txchan)
+ cpdma_chan_destroy(priv->txchan);
+ if (priv->rxchan)
+ cpdma_chan_destroy(priv->rxchan);
+ cpdma_ctlr_destroy(priv->dma);
release_mem_region(res->start, res->end - res->start + 1);
diff --git a/drivers/net/davinci_mdio.c b/drivers/net/davinci_mdio.c
new file mode 100644
index 0000000..7615040
--- /dev/null
+++ b/drivers/net/davinci_mdio.c
@@ -0,0 +1,475 @@
+/*
+ * DaVinci MDIO Module driver
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * Shamelessly ripped out of davinci_emac.c, original copyrights follow:
+ *
+ * Copyright (C) 2009 Texas Instruments.
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * ---------------------------------------------------------------------------
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/phy.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/davinci_emac.h>
+
+/*
+ * This timeout definition is a worst-case ultra defensive measure against
+ * unexpected controller lock ups. Ideally, we should never ever hit this
+ * scenario in practice.
+ */
+#define MDIO_TIMEOUT 100 /* msecs */
+
+#define PHY_REG_MASK 0x1f
+#define PHY_ID_MASK 0x1f
+
+#define DEF_OUT_FREQ 2200000 /* 2.2 MHz */
+
+struct davinci_mdio_regs {
+ u32 version;
+ u32 control;
+#define CONTROL_IDLE BIT(31)
+#define CONTROL_ENABLE BIT(30)
+#define CONTROL_MAX_DIV (0xff)
+
+ u32 alive;
+ u32 link;
+ u32 linkintraw;
+ u32 linkintmasked;
+ u32 __reserved_0[2];
+ u32 userintraw;
+ u32 userintmasked;
+ u32 userintmaskset;
+ u32 userintmaskclr;
+ u32 __reserved_1[20];
+
+ struct {
+ u32 access;
+#define USERACCESS_GO BIT(31)
+#define USERACCESS_WRITE BIT(30)
+#define USERACCESS_ACK BIT(29)
+#define USERACCESS_READ (0)
+#define USERACCESS_DATA (0xffff)
+
+ u32 physel;
+ } user[0];
+};
+
+struct mdio_platform_data default_pdata = {
+ .bus_freq = DEF_OUT_FREQ,
+};
+
+struct davinci_mdio_data {
+ struct mdio_platform_data pdata;
+ struct davinci_mdio_regs __iomem *regs;
+ spinlock_t lock;
+ struct clk *clk;
+ struct device *dev;
+ struct mii_bus *bus;
+ bool suspended;
+ unsigned long access_time; /* jiffies */
+};
+
+static void __davinci_mdio_reset(struct davinci_mdio_data *data)
+{
+ u32 mdio_in, div, mdio_out_khz, access_time;
+
+ mdio_in = clk_get_rate(data->clk);
+ div = (mdio_in / data->pdata.bus_freq) - 1;
+ if (div > CONTROL_MAX_DIV)
+ div = CONTROL_MAX_DIV;
+
+ /* set enable and clock divider */
+ __raw_writel(div | CONTROL_ENABLE, &data->regs->control);
+
+ /*
+ * One mdio transaction consists of:
+ * 32 bits of preamble
+ * 32 bits of transferred data
+ * 24 bits of bus yield (not needed unless shared?)
+ */
+ mdio_out_khz = mdio_in / (1000 * (div + 1));
+ access_time = (88 * 1000) / mdio_out_khz;
+
+ /*
+ * In the worst case, we could be kicking off a user-access immediately
+ * after the mdio bus scan state-machine triggered its own read. If
+ * so, our request could get deferred by one access cycle. We
+ * defensively allow for 4 access cycles.
+ */
+ data->access_time = usecs_to_jiffies(access_time * 4);
+ if (!data->access_time)
+ data->access_time = 1;
+}
+
+static int davinci_mdio_reset(struct mii_bus *bus)
+{
+ struct davinci_mdio_data *data = bus->priv;
+ u32 phy_mask, ver;
+
+ __davinci_mdio_reset(data);
+
+ /* wait for scan logic to settle */
+ msleep(PHY_MAX_ADDR * data->access_time);
+
+ /* dump hardware version info */
+ ver = __raw_readl(&data->regs->version);
+ dev_info(data->dev, "davinci mdio revision %d.%d\n",
+ (ver >> 8) & 0xff, ver & 0xff);
+
+ /* get phy mask from the alive register */
+ phy_mask = __raw_readl(&data->regs->alive);
+ if (phy_mask) {
+ /* restrict mdio bus to live phys only */
+ dev_info(data->dev, "detected phy mask %x\n", ~phy_mask);
+ phy_mask = ~phy_mask;
+ } else {
+ /* desperately scan all phys */
+ dev_warn(data->dev, "no live phy, scanning all\n");
+ phy_mask = 0;
+ }
+ data->bus->phy_mask = phy_mask;
+
+ return 0;
+}
+
+/* wait until hardware is ready for another user access */
+static inline int wait_for_user_access(struct davinci_mdio_data *data)
+{
+ struct davinci_mdio_regs __iomem *regs = data->regs;
+ unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
+ u32 reg;
+
+ while (time_after(timeout, jiffies)) {
+ reg = __raw_readl(&regs->user[0].access);
+ if ((reg & USERACCESS_GO) == 0)
+ return 0;
+
+ reg = __raw_readl(&regs->control);
+ if ((reg & CONTROL_IDLE) == 0)
+ continue;
+
+ /*
+ * An emac soft_reset may have clobbered the mdio controller's
+ * state machine. We need to reset and retry the current
+ * operation
+ */
+ dev_warn(data->dev, "resetting idled controller\n");
+ __davinci_mdio_reset(data);
+ return -EAGAIN;
+ }
+ dev_err(data->dev, "timed out waiting for user access\n");
+ return -ETIMEDOUT;
+}
+
+/* wait until hardware state machine is idle */
+static inline int wait_for_idle(struct davinci_mdio_data *data)
+{
+ struct davinci_mdio_regs __iomem *regs = data->regs;
+ unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
+
+ while (time_after(timeout, jiffies)) {
+ if (__raw_readl(&regs->control) & CONTROL_IDLE)
+ return 0;
+ }
+ dev_err(data->dev, "timed out waiting for idle\n");
+ return -ETIMEDOUT;
+}
+
+static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
+{
+ struct davinci_mdio_data *data = bus->priv;
+ u32 reg;
+ int ret;
+
+ if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
+ return -EINVAL;
+
+ spin_lock(&data->lock);
+
+ if (data->suspended) {
+ spin_unlock(&data->lock);
+ return -ENODEV;
+ }
+
+ reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
+ (phy_id << 16));
+
+ while (1) {
+ ret = wait_for_user_access(data);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ break;
+
+ __raw_writel(reg, &data->regs->user[0].access);
+
+ ret = wait_for_user_access(data);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ break;
+
+ reg = __raw_readl(&data->regs->user[0].access);
+ ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO;
+ break;
+ }
+
+ spin_unlock(&data->lock);
+
+ return ret;
+}
+
+static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
+ int phy_reg, u16 phy_data)
+{
+ struct davinci_mdio_data *data = bus->priv;
+ u32 reg;
+ int ret;
+
+ if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
+ return -EINVAL;
+
+ spin_lock(&data->lock);
+
+ if (data->suspended) {
+ spin_unlock(&data->lock);
+ return -ENODEV;
+ }
+
+ reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
+ (phy_id << 16) | (phy_data & USERACCESS_DATA));
+
+ while (1) {
+ ret = wait_for_user_access(data);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ break;
+
+ __raw_writel(reg, &data->regs->user[0].access);
+
+ ret = wait_for_user_access(data);
+ if (ret == -EAGAIN)
+ continue;
+ break;
+ }
+
+ spin_unlock(&data->lock);
+
+ return 0;
+}
+
+static int __devinit davinci_mdio_probe(struct platform_device *pdev)
+{
+ struct mdio_platform_data *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct davinci_mdio_data *data;
+ struct resource *res;
+ struct phy_device *phy;
+ int ret, addr;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_err(dev, "failed to alloc device data\n");
+ return -ENOMEM;
+ }
+
+ data->pdata = pdata ? (*pdata) : default_pdata;
+
+ data->bus = mdiobus_alloc();
+ if (!data->bus) {
+ dev_err(dev, "failed to alloc mii bus\n");
+ ret = -ENOMEM;
+ goto bail_out;
+ }
+
+ data->bus->name = dev_name(dev);
+ data->bus->read = davinci_mdio_read,
+ data->bus->write = davinci_mdio_write,
+ data->bus->reset = davinci_mdio_reset,
+ data->bus->parent = dev;
+ data->bus->priv = data;
+ snprintf(data->bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
+
+ data->clk = clk_get(dev, NULL);
+ if (IS_ERR(data->clk)) {
+ data->clk = NULL;
+ dev_err(dev, "failed to get device clock\n");
+ ret = PTR_ERR(data->clk);
+ goto bail_out;
+ }
+
+ clk_enable(data->clk);
+
+ dev_set_drvdata(dev, data);
+ data->dev = dev;
+ spin_lock_init(&data->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "could not find register map resource\n");
+ ret = -ENOENT;
+ goto bail_out;
+ }
+
+ res = devm_request_mem_region(dev, res->start, resource_size(res),
+ dev_name(dev));
+ if (!res) {
+ dev_err(dev, "could not allocate register map resource\n");
+ ret = -ENXIO;
+ goto bail_out;
+ }
+
+ data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+ if (!data->regs) {
+ dev_err(dev, "could not map mdio registers\n");
+ ret = -ENOMEM;
+ goto bail_out;
+ }
+
+ /* register the mii bus */
+ ret = mdiobus_register(data->bus);
+ if (ret)
+ goto bail_out;
+
+ /* scan and dump the bus */
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ phy = data->bus->phy_map[addr];
+ if (phy) {
+ dev_info(dev, "phy[%d]: device %s, driver %s\n",
+ phy->addr, dev_name(&phy->dev),
+ phy->drv ? phy->drv->name : "unknown");
+ }
+ }
+
+ return 0;
+
+bail_out:
+ if (data->bus)
+ mdiobus_free(data->bus);
+
+ if (data->clk) {
+ clk_disable(data->clk);
+ clk_put(data->clk);
+ }
+
+ kfree(data);
+
+ return ret;
+}
+
+static int __devexit davinci_mdio_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+
+ if (data->bus)
+ mdiobus_free(data->bus);
+
+ if (data->clk) {
+ clk_disable(data->clk);
+ clk_put(data->clk);
+ }
+
+ dev_set_drvdata(dev, NULL);
+
+ kfree(data);
+
+ return 0;
+}
+
+static int davinci_mdio_suspend(struct device *dev)
+{
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+ u32 ctrl;
+
+ spin_lock(&data->lock);
+
+ /* shutdown the scan state machine */
+ ctrl = __raw_readl(&data->regs->control);
+ ctrl &= ~CONTROL_ENABLE;
+ __raw_writel(ctrl, &data->regs->control);
+ wait_for_idle(data);
+
+ if (data->clk)
+ clk_disable(data->clk);
+
+ data->suspended = true;
+ spin_unlock(&data->lock);
+
+ return 0;
+}
+
+static int davinci_mdio_resume(struct device *dev)
+{
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+ u32 ctrl;
+
+ spin_lock(&data->lock);
+ if (data->clk)
+ clk_enable(data->clk);
+
+ /* restart the scan state machine */
+ ctrl = __raw_readl(&data->regs->control);
+ ctrl |= CONTROL_ENABLE;
+ __raw_writel(ctrl, &data->regs->control);
+
+ data->suspended = false;
+ spin_unlock(&data->lock);
+
+ return 0;
+}
+
+static const struct dev_pm_ops davinci_mdio_pm_ops = {
+ .suspend = davinci_mdio_suspend,
+ .resume = davinci_mdio_resume,
+};
+
+static struct platform_driver davinci_mdio_driver = {
+ .driver = {
+ .name = "davinci_mdio",
+ .owner = THIS_MODULE,
+ .pm = &davinci_mdio_pm_ops,
+ },
+ .probe = davinci_mdio_probe,
+ .remove = __devexit_p(davinci_mdio_remove),
+};
+
+static int __init davinci_mdio_init(void)
+{
+ return platform_driver_register(&davinci_mdio_driver);
+}
+device_initcall(davinci_mdio_init);
+
+static void __exit davinci_mdio_exit(void)
+{
+ platform_driver_unregister(&davinci_mdio_driver);
+}
+module_exit(davinci_mdio_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DaVinci MDIO driver");
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index a117f2a..4d62f7b 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -31,7 +31,7 @@
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
-#define DRV_VERSION "7.3.21-k6-NAPI"
+#define DRV_VERSION "7.3.21-k8-NAPI"
const char e1000_driver_version[] = DRV_VERSION;
static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
@@ -485,9 +485,6 @@ void e1000_down(struct e1000_adapter *adapter)
struct net_device *netdev = adapter->netdev;
u32 rctl, tctl;
- /* signal that we're down so the interrupt handler does not
- * reschedule our watchdog timer */
- set_bit(__E1000_DOWN, &adapter->flags);
/* disable receives in the hardware */
rctl = er32(RCTL);
@@ -508,6 +505,13 @@ void e1000_down(struct e1000_adapter *adapter)
e1000_irq_disable(adapter);
+ /*
+ * Setting DOWN must be after irq_disable to prevent
+ * a screaming interrupt. Setting DOWN also prevents
+ * timers and tasks from rescheduling.
+ */
+ set_bit(__E1000_DOWN, &adapter->flags);
+
del_timer_sync(&adapter->tx_fifo_stall_timer);
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
@@ -521,7 +525,7 @@ void e1000_down(struct e1000_adapter *adapter)
e1000_clean_all_rx_rings(adapter);
}
-void e1000_reinit_safe(struct e1000_adapter *adapter)
+static void e1000_reinit_safe(struct e1000_adapter *adapter)
{
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index ca663f1..7236f1a 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -52,6 +52,10 @@
(ID_LED_DEF1_DEF2))
#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+#define E1000_BASE1000T_STATUS 10
+#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
+#define E1000_RECEIVE_ERROR_COUNTER 21
+#define E1000_RECEIVE_ERROR_MAX 0xFFFF
#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
@@ -1243,6 +1247,39 @@ static s32 e1000_led_on_82574(struct e1000_hw *hw)
}
/**
+ * e1000_check_phy_82574 - check 82574 phy hung state
+ * @hw: pointer to the HW structure
+ *
+ * Returns whether phy is hung or not
+ **/
+bool e1000_check_phy_82574(struct e1000_hw *hw)
+{
+ u16 status_1kbt = 0;
+ u16 receive_errors = 0;
+ bool phy_hung = false;
+ s32 ret_val = 0;
+
+ /*
+ * Read PHY Receive Error counter first, if its is max - all F's then
+ * read the Base1000T status register If both are max then PHY is hung.
+ */
+ ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors);
+
+ if (ret_val)
+ goto out;
+ if (receive_errors == E1000_RECEIVE_ERROR_MAX) {
+ ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt);
+ if (ret_val)
+ goto out;
+ if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) ==
+ E1000_IDLE_ERROR_COUNT_MASK)
+ phy_hung = true;
+ }
+out:
+ return phy_hung;
+}
+
+/**
* e1000_setup_link_82571 - Setup flow control and link settings
* @hw: pointer to the HW structure
*
@@ -1859,6 +1896,7 @@ struct e1000_info e1000_82574_info = {
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
+ .flags2 = FLAG2_CHECK_PHY_HANG,
.pba = 36,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index cee882d..fdc67fe 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -397,6 +397,7 @@ struct e1000_adapter {
struct work_struct print_hang_task;
bool idle_check;
+ int phy_hang_count;
};
struct e1000_info {
@@ -454,6 +455,7 @@ struct e1000_info {
#define FLAG2_HAS_EEE (1 << 5)
#define FLAG2_DMA_BURST (1 << 6)
#define FLAG2_DISABLE_AIM (1 << 8)
+#define FLAG2_CHECK_PHY_HANG (1 << 9)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -631,6 +633,7 @@ extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
extern s32 e1000_check_polarity_ife(struct e1000_hw *hw);
extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
extern s32 e1000_check_polarity_igp(struct e1000_hw *hw);
+extern bool e1000_check_phy_82574(struct e1000_hw *hw);
static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
{
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index ec8cf3f..c4ca162 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4098,6 +4098,25 @@ static void e1000e_enable_receives(struct e1000_adapter *adapter)
}
}
+static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ /*
+ * With 82574 controllers, PHY needs to be checked periodically
+ * for hung state and reset, if two calls return true
+ */
+ if (e1000_check_phy_82574(hw))
+ adapter->phy_hang_count++;
+ else
+ adapter->phy_hang_count = 0;
+
+ if (adapter->phy_hang_count > 1) {
+ adapter->phy_hang_count = 0;
+ schedule_work(&adapter->reset_task);
+ }
+}
+
/**
* e1000_watchdog - Timer Call-back
* @data: pointer to adapter cast into an unsigned long
@@ -4333,6 +4352,9 @@ link_up:
if (e1000e_get_laa_state_82571(hw))
e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
+ if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
+ e1000e_check_82574_phy_workaround(adapter);
+
/* Reset the timer */
if (!test_bit(__E1000_DOWN, &adapter->state))
mod_timer(&adapter->watchdog_timer,
@@ -4860,8 +4882,11 @@ static void e1000_reset_task(struct work_struct *work)
struct e1000_adapter *adapter;
adapter = container_of(work, struct e1000_adapter, reset_task);
- e1000e_dump(adapter);
- e_err("Reset adapter\n");
+ if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
+ (adapter->flags & FLAG_RX_RESTART_NOW))) {
+ e1000e_dump(adapter);
+ e_err("Reset adapter\n");
+ }
e1000e_reinit_locked(adapter);
}
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 1321cb6..8e745e7 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -396,7 +396,9 @@ struct ehea_port_res {
int swqe_ll_count;
u32 swqe_id_counter;
u64 tx_packets;
+ u64 tx_bytes;
u64 rx_packets;
+ u64 rx_bytes;
u32 poll_counter;
struct net_lro_mgr lro_mgr;
struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index bb7d306..3d0af08 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -330,7 +330,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
struct ehea_port *port = netdev_priv(dev);
struct net_device_stats *stats = &port->stats;
struct hcp_ehea_port_cb2 *cb2;
- u64 hret, rx_packets, tx_packets;
+ u64 hret, rx_packets, tx_packets, rx_bytes = 0, tx_bytes = 0;
int i;
memset(stats, 0, sizeof(*stats));
@@ -353,18 +353,22 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
rx_packets = 0;
- for (i = 0; i < port->num_def_qps; i++)
+ for (i = 0; i < port->num_def_qps; i++) {
rx_packets += port->port_res[i].rx_packets;
+ rx_bytes += port->port_res[i].rx_bytes;
+ }
tx_packets = 0;
- for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
+ for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
tx_packets += port->port_res[i].tx_packets;
+ tx_bytes += port->port_res[i].tx_bytes;
+ }
stats->tx_packets = tx_packets;
stats->multicast = cb2->rxmcp;
stats->rx_errors = cb2->rxuerr;
- stats->rx_bytes = cb2->rxo;
- stats->tx_bytes = cb2->txo;
+ stats->rx_bytes = rx_bytes;
+ stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets;
out_herr:
@@ -396,6 +400,7 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
skb_arr_rq1[index] = netdev_alloc_skb(dev,
EHEA_L_PKT_SIZE);
if (!skb_arr_rq1[index]) {
+ ehea_info("Unable to allocate enough skb in the array\n");
pr->rq1_skba.os_skbs = fill_wqes - i;
break;
}
@@ -418,13 +423,20 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
struct net_device *dev = pr->port->netdev;
int i;
- for (i = 0; i < pr->rq1_skba.len; i++) {
+ if (nr_rq1a > pr->rq1_skba.len) {
+ ehea_error("NR_RQ1A bigger than skb array len\n");
+ return;
+ }
+
+ for (i = 0; i < nr_rq1a; i++) {
skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
- if (!skb_arr_rq1[i])
+ if (!skb_arr_rq1[i]) {
+ ehea_info("No enough memory to allocate skb array\n");
break;
+ }
}
/* Ring doorbell */
- ehea_update_rq1a(pr->qp, nr_rq1a);
+ ehea_update_rq1a(pr->qp, i);
}
static int ehea_refill_rq_def(struct ehea_port_res *pr,
@@ -703,6 +715,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
int skb_arr_rq2_len = pr->rq2_skba.len;
int skb_arr_rq3_len = pr->rq3_skba.len;
int processed, processed_rq1, processed_rq2, processed_rq3;
+ u64 processed_bytes = 0;
int wqe_index, last_wqe_index, rq, port_reset;
processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
@@ -730,8 +743,10 @@ static int ehea_proc_rwqes(struct net_device *dev,
skb = netdev_alloc_skb(dev,
EHEA_L_PKT_SIZE);
- if (!skb)
+ if (!skb) {
+ ehea_info("Not enough memory to allocate skb\n");
break;
+ }
}
skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
cqe->num_bytes_transfered - 4);
@@ -760,6 +775,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
processed_rq3++;
}
+ processed_bytes += skb->len;
ehea_proc_skb(pr, cqe, skb);
} else {
pr->p_stats.poll_receive_errors++;
@@ -775,6 +791,7 @@ static int ehea_proc_rwqes(struct net_device *dev,
lro_flush_all(&pr->lro_mgr);
pr->rx_packets += processed;
+ pr->rx_bytes += processed_bytes;
ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
ehea_refill_rq2(pr, processed_rq2);
@@ -1509,9 +1526,20 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
enum ehea_eq_type eq_type = EHEA_EQ;
struct ehea_qp_init_attr *init_attr = NULL;
int ret = -EIO;
+ u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
+
+ tx_bytes = pr->tx_bytes;
+ tx_packets = pr->tx_packets;
+ rx_bytes = pr->rx_bytes;
+ rx_packets = pr->rx_packets;
memset(pr, 0, sizeof(struct ehea_port_res));
+ pr->tx_bytes = rx_bytes;
+ pr->tx_packets = tx_packets;
+ pr->rx_bytes = rx_bytes;
+ pr->rx_packets = rx_packets;
+
pr->port = port;
spin_lock_init(&pr->xmit_lock);
spin_lock_init(&pr->netif_queue);
@@ -2249,6 +2277,14 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
memset(swqe, 0, SWQE_HEADER_SIZE);
atomic_dec(&pr->swqe_avail);
+ if (vlan_tx_tag_present(skb)) {
+ swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
+ swqe->vlan_tag = vlan_tx_tag_get(skb);
+ }
+
+ pr->tx_packets++;
+ pr->tx_bytes += skb->len;
+
if (skb->len <= SWQE3_MAX_IMM) {
u32 sig_iv = port->sig_comp_iv;
u32 swqe_num = pr->swqe_id_counter;
@@ -2279,11 +2315,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
pr->swqe_id_counter += 1;
- if (vlan_tx_tag_present(skb)) {
- swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
- swqe->vlan_tag = vlan_tx_tag_get(skb);
- }
-
if (netif_msg_tx_queued(port)) {
ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
ehea_dump(swqe, 512, "swqe");
@@ -2295,7 +2326,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
ehea_post_swqe(pr->qp, swqe);
- pr->tx_packets++;
if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
spin_lock_irqsave(&pr->netif_queue, flags);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 4c4cc80..d1bec62 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -577,11 +577,10 @@ static int gfar_parse_group(struct device_node *np,
irq_of_parse_and_map(np, 1);
priv->gfargrp[priv->num_grps].interruptError =
irq_of_parse_and_map(np,2);
- if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 ||
- priv->gfargrp[priv->num_grps].interruptReceive < 0 ||
- priv->gfargrp[priv->num_grps].interruptError < 0) {
+ if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
+ priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ ||
+ priv->gfargrp[priv->num_grps].interruptError == NO_IRQ)
return -EINVAL;
- }
}
priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
@@ -2511,7 +2510,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
skb_recycle_check(skb, priv->rx_buffer_size +
RXBUF_ALIGNMENT)) {
gfar_align_skb(skb);
- __skb_queue_head(&priv->rx_recycle, skb);
+ skb_queue_head(&priv->rx_recycle, skb);
} else
dev_kfree_skb_any(skb);
@@ -2594,7 +2593,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev)
struct gfar_private *priv = netdev_priv(dev);
struct sk_buff *skb = NULL;
- skb = __skb_dequeue(&priv->rx_recycle);
+ skb = skb_dequeue(&priv->rx_recycle);
if (!skb)
skb = gfar_alloc_skb(dev);
@@ -2750,7 +2749,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
if (unlikely(!newskb))
newskb = skb;
else if (skb)
- __skb_queue_head(&priv->rx_recycle, skb);
+ skb_queue_head(&priv->rx_recycle, skb);
} else {
/* Increment the number of packets */
rx_queue->stats.rx_packets++;
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 5c566eb..3bc8e27 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -635,9 +635,10 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
+ device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
+
spin_lock_irqsave(&priv->bflock, flags);
- priv->wol_en = wol->wolopts & WAKE_MAGIC ? 1 : 0;
- device_set_wakeup_enable(&dev->dev, priv->wol_en);
+ priv->wol_en = !!device_may_wakeup(&dev->dev);
spin_unlock_irqrestore(&priv->bflock, flags);
return 0;
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 385dc32..06bb9b7 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2871,7 +2871,6 @@ static int __devinit emac_probe(struct platform_device *ofdev,
SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
netif_carrier_off(ndev);
- netif_stop_queue(ndev);
err = register_netdev(ndev);
if (err) {
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 14db09e..892d196 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -4107,7 +4107,6 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
struct igb_ring *tx_ring)
{
- struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
int tso = 0, count;
u32 tx_flags = 0;
u16 first;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index ebfaa68..28af019 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -2783,15 +2783,15 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
/* reset the hardware with the new settings */
igbvf_reset(adapter);
- /* tell the stack to leave us alone until igbvf_open() is called */
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
-
strcpy(netdev->name, "eth%d");
err = register_netdev(netdev);
if (err)
goto err_hw_init;
+ /* tell the stack to leave us alone until igbvf_open() is called */
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
igbvf_print_device_info(adapter);
igbvf_initialize_last_counter_stats(adapter);
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index dc01980..aa93655 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -88,16 +88,14 @@ static const char *ipg_brand_name[] = {
"IC PLUS IP1000 1000/100/10 based NIC",
"Sundance Technology ST2021 based NIC",
"Tamarack Microelectronics TC9020/9021 based NIC",
- "Tamarack Microelectronics TC9020/9021 based NIC",
"D-Link NIC IP1000A"
};
static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = {
{ PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
{ PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
- { PCI_VDEVICE(SUNDANCE, 0x1021), 2 },
- { PCI_VDEVICE(DLINK, 0x9021), 3 },
- { PCI_VDEVICE(DLINK, 0x4020), 4 },
+ { PCI_VDEVICE(DLINK, 0x9021), 2 },
+ { PCI_VDEVICE(DLINK, 0x4020), 3 },
{ 0, }
};
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 00b38bc..52a7c86 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -258,7 +258,7 @@ static int sh_sir_set_baudrate(struct sh_sir_self *self, u32 baudrate)
/* Baud Rate Error Correction x 10000 */
u32 rate_err_array[] = {
- 0000, 0625, 1250, 1875,
+ 0, 625, 1250, 1875,
2500, 3125, 3750, 4375,
5000, 5625, 6250, 6875,
7500, 8125, 8750, 9375,
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 666207a..caa8192 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -533,6 +533,7 @@ ixgb_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
free_netdev(netdev);
+ pci_disable_device(pdev);
}
/**
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index 8bb9ddb..0d44c64 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -43,9 +43,12 @@
* ixgbe_dcb_check_config().
*/
s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
- u8 direction)
+ int max_frame, u8 direction)
{
struct tc_bw_alloc *p;
+ int min_credit;
+ int min_multiplier;
+ int min_percent = 100;
s32 ret_val = 0;
/* Initialization values default for Tx settings */
u32 credit_refill = 0;
@@ -59,6 +62,31 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
goto out;
}
+ min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
+ DCB_CREDIT_QUANTUM;
+
+ /* Find smallest link percentage */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ p = &dcb_config->tc_config[i].path[direction];
+ bw_percent = dcb_config->bw_percentage[direction][p->bwg_id];
+ link_percentage = p->bwg_percent;
+
+ link_percentage = (link_percentage * bw_percent) / 100;
+
+ if (link_percentage && link_percentage < min_percent)
+ min_percent = link_percentage;
+ }
+
+ /*
+ * The ratio between traffic classes will control the bandwidth
+ * percentages seen on the wire. To calculate this ratio we use
+ * a multiplier. It is required that the refill credits must be
+ * larger than the max frame size so here we find the smallest
+ * multiplier that will allow all bandwidth percentages to be
+ * greater than the max frame size.
+ */
+ min_multiplier = (min_credit / min_percent) + 1;
+
/* Find out the link percentage for each TC first */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
p = &dcb_config->tc_config[i].path[direction];
@@ -73,8 +101,9 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
/* Save link_percentage for reference */
p->link_percent = (u8)link_percentage;
- /* Calculate credit refill and save it */
- credit_refill = link_percentage * MINIMUM_CREDIT_REFILL;
+ /* Calculate credit refill ratio using multiplier */
+ credit_refill = min(link_percentage * min_multiplier,
+ MAX_CREDIT_REFILL);
p->data_credits_refill = (u16)credit_refill;
/* Calculate maximum credit for the TC */
@@ -85,8 +114,8 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
* of a TC is too small, the maximum credit may not be
* enough to send out a jumbo frame in data plane arbitration.
*/
- if (credit_max && (credit_max < MINIMUM_CREDIT_FOR_JUMBO))
- credit_max = MINIMUM_CREDIT_FOR_JUMBO;
+ if (credit_max && (credit_max < min_credit))
+ credit_max = min_credit;
if (direction == DCB_TX_CONFIG) {
/*
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index eb1059f..0208a87 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -150,15 +150,14 @@ struct ixgbe_dcb_config {
/* DCB driver APIs */
/* DCB credits calculation */
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, u8);
+s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, int, u8);
/* DCB hw initialization */
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
/* DCB definitions for credit calculation */
+#define DCB_CREDIT_QUANTUM 64 /* DCB Quantum */
#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */
-#define MINIMUM_CREDIT_REFILL 5 /* 5*64B = 320B */
-#define MINIMUM_CREDIT_FOR_JUMBO 145 /* 145= UpperBound((9*1024+54)/64B) for 9KB jumbo frame */
#define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */
#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */
#define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 67c219f..05f2247 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -397,6 +397,11 @@ static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
reg &= ~IXGBE_RTTDCS_ARBDIS;
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
+ /* Enable Security TX Buffer IFG for DCB */
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ reg |= IXGBE_SECTX_DCB;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+
return 0;
}
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 18d7fbf..3841649 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -95,6 +95,9 @@
#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */
+/* SECTXMINIFG DCB */
+#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer IFG */
+
/* DCB hardware-specific driver APIs */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index f856312..eee0b29 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -764,8 +764,9 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
#ifdef IXGBE_FCOE
/* adjust for FCoE Sequence Offload */
if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
- && (skb->protocol == htons(ETH_P_FCOE)) &&
- skb_is_gso(skb)) {
+ && skb_is_gso(skb)
+ && vlan_get_protocol(skb) ==
+ htons(ETH_P_FCOE)) {
hlen = skb_transport_offset(skb) +
sizeof(struct fc_frame_header) +
sizeof(struct fcoe_crc_eof);
@@ -3347,6 +3348,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
u32 txdctl;
int i, j;
@@ -3359,8 +3361,15 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
if (hw->mac.type == ixgbe_mac_82598EB)
netif_set_gso_max_size(adapter->netdev, 32768);
- ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
- ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
+#ifdef CONFIG_FCOE
+ if (adapter->netdev->features & NETIF_F_FCOE_MTU)
+ max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
+#endif
+
+ ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
+ DCB_TX_CONFIG);
+ ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
+ DCB_RX_CONFIG);
/* reconfigure the hardware */
ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
@@ -4762,6 +4771,9 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
adapter->rx_ring[i] = NULL;
}
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+
ixgbe_free_q_vectors(adapter);
ixgbe_reset_interrupt_capability(adapter);
}
@@ -5815,7 +5827,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
static int ixgbe_tso(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, u8 *hdr_len)
+ u32 tx_flags, u8 *hdr_len, __be16 protocol)
{
struct ixgbe_adv_tx_context_desc *context_desc;
unsigned int i;
@@ -5833,7 +5845,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
l4len = tcp_hdrlen(skb);
*hdr_len += l4len;
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
@@ -5872,7 +5884,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
IXGBE_ADVTXD_DTYP_CTXT);
- if (skb->protocol == htons(ETH_P_IP))
+ if (protocol == htons(ETH_P_IP))
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
@@ -5898,16 +5910,10 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
return false;
}
-static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
+static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb,
+ __be16 protocol)
{
u32 rtn = 0;
- __be16 protocol;
-
- if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
- protocol = ((const struct vlan_ethhdr *)skb->data)->
- h_vlan_encapsulated_proto;
- else
- protocol = skb->protocol;
switch (protocol) {
case cpu_to_be16(ETH_P_IP):
@@ -5935,7 +5941,7 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
default:
if (unlikely(net_ratelimit()))
e_warn(probe, "partial checksum but proto=%x!\n",
- skb->protocol);
+ protocol);
break;
}
@@ -5944,7 +5950,8 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags)
+ struct sk_buff *skb, u32 tx_flags,
+ __be16 protocol)
{
struct ixgbe_adv_tx_context_desc *context_desc;
unsigned int i;
@@ -5973,7 +5980,7 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
IXGBE_ADVTXD_DTYP_CTXT);
if (skb->ip_summed == CHECKSUM_PARTIAL)
- type_tucmd_mlhl |= ixgbe_psum(adapter, skb);
+ type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
/* use index zero for tx checksum offload */
@@ -6171,7 +6178,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
}
static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
- int queue, u32 tx_flags)
+ int queue, u32 tx_flags, __be16 protocol)
{
struct ixgbe_atr_input atr_input;
struct tcphdr *th;
@@ -6182,7 +6189,7 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
u8 l4type = 0;
/* Right now, we support IPv4 only */
- if (skb->protocol != htons(ETH_P_IP))
+ if (protocol != htons(ETH_P_IP))
return;
/* check if we're UDP or TCP */
if (iph->protocol == IPPROTO_TCP) {
@@ -6249,10 +6256,13 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
int txq = smp_processor_id();
-
#ifdef IXGBE_FCOE
- if ((skb->protocol == htons(ETH_P_FCOE)) ||
- (skb->protocol == htons(ETH_P_FIP))) {
+ __be16 protocol;
+
+ protocol = vlan_get_protocol(skb);
+
+ if ((protocol == htons(ETH_P_FCOE)) ||
+ (protocol == htons(ETH_P_FIP))) {
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
txq += adapter->ring_feature[RING_F_FCOE].mask;
@@ -6295,6 +6305,9 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
int tso;
int count = 0;
unsigned int f;
+ __be16 protocol;
+
+ protocol = vlan_get_protocol(skb);
if (vlan_tx_tag_present(skb)) {
tx_flags |= vlan_tx_tag_get(skb);
@@ -6315,8 +6328,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
/* for FCoE with DCB, we force the priority to what
* was specified by the switch */
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
- (skb->protocol == htons(ETH_P_FCOE) ||
- skb->protocol == htons(ETH_P_FIP))) {
+ (protocol == htons(ETH_P_FCOE) ||
+ protocol == htons(ETH_P_FIP))) {
#ifdef CONFIG_IXGBE_DCB
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
@@ -6326,7 +6339,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
}
#endif
/* flag for FCoE offloads */
- if (skb->protocol == htons(ETH_P_FCOE))
+ if (protocol == htons(ETH_P_FCOE))
tx_flags |= IXGBE_TX_FLAGS_FCOE;
}
#endif
@@ -6360,9 +6373,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
tx_flags |= IXGBE_TX_FLAGS_FSO;
#endif /* IXGBE_FCOE */
} else {
- if (skb->protocol == htons(ETH_P_IP))
+ if (protocol == htons(ETH_P_IP))
tx_flags |= IXGBE_TX_FLAGS_IPV4;
- tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+ tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len,
+ protocol);
if (tso < 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -6370,7 +6384,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
if (tso)
tx_flags |= IXGBE_TX_FLAGS_TSO;
- else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+ else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags,
+ protocol) &&
(skb->ip_summed == CHECKSUM_PARTIAL))
tx_flags |= IXGBE_TX_FLAGS_CSUM;
}
@@ -6384,7 +6399,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
test_bit(__IXGBE_FDIR_INIT_DONE,
&tx_ring->reinit_state)) {
ixgbe_atr(adapter, skb, tx_ring->queue_index,
- tx_flags);
+ tx_flags, protocol);
tx_ring->atr_count = 0;
}
}
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index d7a975e..c57d9a4 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -1623,12 +1623,12 @@ err_out:
return rc;
}
-#ifdef CONFIG_PM
static void
jme_set_100m_half(struct jme_adapter *jme)
{
u32 bmcr, tmp;
+ jme_phy_on(jme);
bmcr = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_BMCR);
tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
BMCR_SPEED1000 | BMCR_FULLDPLX);
@@ -1656,7 +1656,6 @@ jme_wait_link(struct jme_adapter *jme)
phylink = jme_linkstat_from_phy(jme);
}
}
-#endif
static inline void
jme_phy_off(struct jme_adapter *jme)
@@ -1664,6 +1663,21 @@ jme_phy_off(struct jme_adapter *jme)
jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_BMCR, BMCR_PDOWN);
}
+static void
+jme_powersave_phy(struct jme_adapter *jme)
+{
+ if (jme->reg_pmcs) {
+ jme_set_100m_half(jme);
+
+ if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
+ jme_wait_link(jme);
+
+ jwrite32(jme, JME_PMCS, jme->reg_pmcs);
+ } else {
+ jme_phy_off(jme);
+ }
+}
+
static int
jme_close(struct net_device *netdev)
{
@@ -2941,11 +2955,7 @@ jme_init_one(struct pci_dev *pdev,
* Tell stack that we are not ready to work until open()
*/
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
- /*
- * Register netdev
- */
rc = register_netdev(netdev);
if (rc) {
pr_err("Cannot register net device\n");
@@ -2991,6 +3001,16 @@ jme_remove_one(struct pci_dev *pdev)
}
+static void
+jme_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct jme_adapter *jme = netdev_priv(netdev);
+
+ jme_powersave_phy(jme);
+ pci_pme_active(pdev, true);
+}
+
#ifdef CONFIG_PM
static int
jme_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -3028,19 +3048,9 @@ jme_suspend(struct pci_dev *pdev, pm_message_t state)
tasklet_hi_enable(&jme->rxempty_task);
pci_save_state(pdev);
- if (jme->reg_pmcs) {
- jme_set_100m_half(jme);
-
- if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
- jme_wait_link(jme);
-
- jwrite32(jme, JME_PMCS, jme->reg_pmcs);
-
- pci_enable_wake(pdev, PCI_D3cold, true);
- } else {
- jme_phy_off(jme);
- }
- pci_set_power_state(pdev, PCI_D3cold);
+ jme_powersave_phy(jme);
+ pci_enable_wake(jme->pdev, PCI_D3hot, true);
+ pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
@@ -3087,6 +3097,7 @@ static struct pci_driver jme_driver = {
.suspend = jme_suspend,
.resume = jme_resume,
#endif /* CONFIG_PM */
+ .shutdown = jme_shutdown,
};
static int __init
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index 316bb70..e7030ce 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -1077,7 +1077,6 @@ static void __NS8390_init(struct net_device *dev, int startp)
ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG);
ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD);
- netif_start_queue(dev);
ei_local->tx1 = ei_local->tx2 = 0;
ei_local->txing = 0;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 4297f6e..f69e73e 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -515,14 +515,15 @@ static int macb_poll(struct napi_struct *napi, int budget)
(unsigned long)status, budget);
work_done = macb_rx(bp, budget);
- if (work_done < budget)
+ if (work_done < budget) {
napi_complete(napi);
- /*
- * We've done what we can to clean the buffers. Make sure we
- * get notified when new packets arrive.
- */
- macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+ /*
+ * We've done what we can to clean the buffers. Make sure we
+ * get notified when new packets arrive.
+ */
+ macb_writel(bp, IER, MACB_RX_INT_FLAGS);
+ }
/* TODO: Handle errors */
@@ -550,12 +551,16 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
}
if (status & MACB_RX_INT_FLAGS) {
+ /*
+ * There's no point taking any more interrupts
+ * until we have processed the buffers. The
+ * scheduling call may fail if the poll routine
+ * is already scheduled, so disable interrupts
+ * now.
+ */
+ macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
+
if (napi_schedule_prep(&bp->napi)) {
- /*
- * There's no point taking any more interrupts
- * until we have processed the buffers
- */
- macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
dev_dbg(&bp->pdev->dev,
"scheduling RX softirq\n");
__napi_schedule(&bp->napi);
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index 1439064..f6e0d40 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -124,6 +124,13 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
return 0;
}
+static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port)
+{
+ struct mlx4_en_dev *endev = ctx;
+
+ return endev->pndev[port];
+}
+
static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
enum mlx4_dev_event event, int port)
{
@@ -282,9 +289,11 @@ err_free_res:
}
static struct mlx4_interface mlx4_en_interface = {
- .add = mlx4_en_add,
- .remove = mlx4_en_remove,
- .event = mlx4_en_event,
+ .add = mlx4_en_add,
+ .remove = mlx4_en_remove,
+ .event = mlx4_en_event,
+ .get_dev = mlx4_en_get_netdev,
+ .protocol = MLX4_PROTOCOL_EN,
};
static int __init mlx4_en_init(void)
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 79478bd..6d6806b 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -69,6 +69,7 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err;
+ int idx;
if (!priv->vlgrp)
return;
@@ -83,7 +84,10 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
if (err)
en_err(priv, "Failed configuring VLAN filter\n");
}
+ if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
+ en_err(priv, "failed adding vlan %d\n", vid);
mutex_unlock(&mdev->state_lock);
+
}
static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
@@ -91,6 +95,7 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int err;
+ int idx;
if (!priv->vlgrp)
return;
@@ -101,6 +106,11 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
/* Remove VID from port VLAN filter */
mutex_lock(&mdev->state_lock);
+ if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
+ mlx4_unregister_vlan(mdev->dev, priv->port, idx);
+ else
+ en_err(priv, "could not find vid %d in cache\n", vid);
+
if (mdev->device_up && priv->port_up) {
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
if (err)
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
index aa3ef2a..7f5a322 100644
--- a/drivers/net/mlx4/en_port.c
+++ b/drivers/net/mlx4/en_port.c
@@ -127,8 +127,8 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
memset(context, 0, sizeof *context);
context->base_qpn = cpu_to_be32(base_qpn);
- context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | base_qpn);
- context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_SHIFT | base_qpn);
+ context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_EN_SHIFT | base_qpn);
+ context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_MODE_SHIFT | base_qpn);
context->intra_no_vlan = 0;
context->no_vlan = MLX4_NO_VLAN_IDX;
context->intra_vlan_miss = 0;
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h
index f6511aa..092e814 100644
--- a/drivers/net/mlx4/en_port.h
+++ b/drivers/net/mlx4/en_port.h
@@ -36,7 +36,8 @@
#define SET_PORT_GEN_ALL_VALID 0x7
-#define SET_PORT_PROMISC_SHIFT 31
+#define SET_PORT_PROMISC_EN_SHIFT 31
+#define SET_PORT_PROMISC_MODE_SHIFT 30
enum {
MLX4_CMD_SET_VLAN_FLTR = 0x47,
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index b716e1a..b68eee2 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -98,7 +98,8 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
[20] = "Address vector port checking support",
[21] = "UD multicast support",
[24] = "Demand paging support",
- [25] = "Router support"
+ [25] = "Router support",
+ [30] = "IBoE support"
};
int i;
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index b07e4de..02393fd 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -210,38 +210,12 @@ static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
}
-int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
+static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
{
return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
MLX4_CMD_TIME_CLASS_B);
}
-int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt)
-{
- struct mlx4_cmd_mailbox *mailbox;
- __be64 *inbox;
- int err;
-
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
- inbox = mailbox->buf;
-
- inbox[0] = cpu_to_be64(virt);
- inbox[1] = cpu_to_be64(dma_addr);
-
- err = mlx4_cmd(dev, mailbox->dma, 1, 0, MLX4_CMD_MAP_ICM,
- MLX4_CMD_TIME_CLASS_B);
-
- mlx4_free_cmd_mailbox(dev, mailbox);
-
- if (!err)
- mlx4_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
- (unsigned long long) dma_addr, (unsigned long long) virt);
-
- return err;
-}
-
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
{
return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
diff --git a/drivers/net/mlx4/icm.h b/drivers/net/mlx4/icm.h
index ab56a2f..b10c07a 100644
--- a/drivers/net/mlx4/icm.h
+++ b/drivers/net/mlx4/icm.h
@@ -128,8 +128,6 @@ static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
}
-int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count);
-int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt);
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c
index 5550678..73c94fc 100644
--- a/drivers/net/mlx4/intf.c
+++ b/drivers/net/mlx4/intf.c
@@ -161,3 +161,24 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
mutex_unlock(&intf_mutex);
}
+
+void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_device_context *dev_ctx;
+ unsigned long flags;
+ void *result = NULL;
+
+ spin_lock_irqsave(&priv->ctx_lock, flags);
+
+ list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+ if (dev_ctx->intf->protocol == proto && dev_ctx->intf->get_dev) {
+ result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port);
+ break;
+ }
+
+ spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev);
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 569fa3d..782f11d 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -103,7 +103,7 @@ MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
-MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
+MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
int mlx4_check_port_params(struct mlx4_dev *dev,
enum mlx4_port_type *port_type)
@@ -1310,7 +1310,7 @@ static int __init mlx4_verify_params(void)
return -1;
}
- if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
+ if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
return -1;
}
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index 1fc16ab..dfed6a0 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -475,6 +475,7 @@ struct mlx4_en_priv {
char *mc_addrs;
int mc_addrs_cnt;
struct mlx4_en_stat_out_mbox hw_stats;
+ int vids[128];
};
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
index 606aa58..4513395 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/mlx4/port.c
@@ -111,6 +111,12 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
goto out;
}
}
+
+ if (free < 0) {
+ err = -ENOMEM;
+ goto out;
+ }
+
mlx4_dbg(dev, "Free MAC index is %d\n", free);
if (table->total == table->max) {
@@ -182,6 +188,25 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
return err;
}
+int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
+{
+ struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
+ int i;
+
+ for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
+ if (table->refs[i] &&
+ (vid == (MLX4_VLAN_MASK &
+ be32_to_cpu(table->entries[i])))) {
+ /* VLAN already registered, increase reference count */
+ *idx = i;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
+
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
{
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
@@ -205,6 +230,11 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
}
}
+ if (free < 0) {
+ err = -ENOMEM;
+ goto out;
+ }
+
if (table->total == table->max) {
/* No free vlan entries */
err = -ENOSPC;
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 1261212..f7d06cb 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -255,19 +255,6 @@ out_free_rq:
}
static void
-nx_fw_cmd_reset_ctx(struct netxen_adapter *adapter)
-{
-
- netxen_issue_cmd(adapter, adapter->ahw.pci_func, NXHAL_VERSION,
- adapter->ahw.pci_func, NX_DESTROY_CTX_RESET, 0,
- NX_CDRP_CMD_DESTROY_RX_CTX);
-
- netxen_issue_cmd(adapter, adapter->ahw.pci_func, NXHAL_VERSION,
- adapter->ahw.pci_func, NX_DESTROY_CTX_RESET, 0,
- NX_CDRP_CMD_DESTROY_TX_CTX);
-}
-
-static void
nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
{
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
@@ -698,8 +685,6 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state))
goto done;
- if (reset_devices)
- nx_fw_cmd_reset_ctx(adapter);
err = nx_fw_cmd_create_rx_ctx(adapter);
if (err)
goto err_out_free;
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 50820bea..e1d30d7 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -41,9 +41,6 @@
MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
-MODULE_FIRMWARE(NX_P2_MN_ROMIMAGE_NAME);
-MODULE_FIRMWARE(NX_P3_CT_ROMIMAGE_NAME);
-MODULE_FIRMWARE(NX_P3_MN_ROMIMAGE_NAME);
MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
char netxen_nic_driver_name[] = "netxen_nic";
@@ -1240,7 +1237,6 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
dev_warn(&pdev->dev, "failed to read mac addr\n");
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
err = register_netdev(netdev);
if (err) {
@@ -1356,6 +1352,13 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
+ if (reset_devices) {
+ if (adapter->portnum == 0) {
+ NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0);
+ adapter->need_fw_reset = 1;
+ }
+ }
+
err = netxen_start_firmware(adapter);
if (err)
goto err_out_decr_ref;
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 472056b..03a1d28 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 1999 - 2010 Intel Corporation.
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
*
* This code was derived from the Intel e1000e Linux driver.
*
@@ -2464,8 +2464,8 @@ static void __exit pch_gbe_exit_module(void)
module_init(pch_gbe_init_module);
module_exit(pch_gbe_exit_module);
-MODULE_DESCRIPTION("OKI semiconductor PCH Gigabit ethernet Driver");
-MODULE_AUTHOR("OKI semiconductor, <masa-korg@dsn.okisemi.com>");
+MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
+MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
diff --git a/drivers/net/pch_gbe/pch_gbe_param.c b/drivers/net/pch_gbe/pch_gbe_param.c
index 2510146..ef0996a 100644
--- a/drivers/net/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/pch_gbe/pch_gbe_param.c
@@ -434,8 +434,8 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
.err = "using default of "
__MODULE_STRING(PCH_GBE_DEFAULT_TXD),
.def = PCH_GBE_DEFAULT_TXD,
- .arg = { .r = { .min = PCH_GBE_MIN_TXD } },
- .arg = { .r = { .max = PCH_GBE_MAX_TXD } }
+ .arg = { .r = { .min = PCH_GBE_MIN_TXD,
+ .max = PCH_GBE_MAX_TXD } }
};
struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
tx_ring->count = TxDescriptors;
@@ -450,8 +450,8 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
.err = "using default of "
__MODULE_STRING(PCH_GBE_DEFAULT_RXD),
.def = PCH_GBE_DEFAULT_RXD,
- .arg = { .r = { .min = PCH_GBE_MIN_RXD } },
- .arg = { .r = { .max = PCH_GBE_MAX_RXD } }
+ .arg = { .r = { .min = PCH_GBE_MIN_RXD,
+ .max = PCH_GBE_MAX_RXD } }
};
struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
rx_ring->count = RxDescriptors;
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index d2e166e..8a4d19e 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -111,13 +111,14 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id);
typedef struct axnet_dev_t {
struct pcmcia_device *p_dev;
- caddr_t base;
- struct timer_list watchdog;
- int stale, fast_poll;
- u_short link_status;
- u_char duplex_flag;
- int phy_id;
- int flags;
+ caddr_t base;
+ struct timer_list watchdog;
+ int stale, fast_poll;
+ u_short link_status;
+ u_char duplex_flag;
+ int phy_id;
+ int flags;
+ int active_low;
} axnet_dev_t;
static inline axnet_dev_t *PRIV(struct net_device *dev)
@@ -322,6 +323,8 @@ static int axnet_config(struct pcmcia_device *link)
if (info->flags & IS_AX88790)
outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */
+ info->active_low = 0;
+
for (i = 0; i < 32; i++) {
j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
@@ -329,15 +332,18 @@ static int axnet_config(struct pcmcia_device *link)
if ((j != 0) && (j != 0xffff)) break;
}
- /* Maybe PHY is in power down mode. (PPD_SET = 1)
- Bit 2 of CCSR is active low. */
if (i == 32) {
+ /* Maybe PHY is in power down mode. (PPD_SET = 1)
+ Bit 2 of CCSR is active low. */
pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
for (i = 0; i < 32; i++) {
j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
if (j == j2) continue;
- if ((j != 0) && (j != 0xffff)) break;
+ if ((j != 0) && (j != 0xffff)) {
+ info->active_low = 1;
+ break;
+ }
}
}
@@ -383,8 +389,12 @@ static int axnet_suspend(struct pcmcia_device *link)
static int axnet_resume(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
+ axnet_dev_t *info = PRIV(dev);
if (link->open) {
+ if (info->active_low == 1)
+ pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
+
axnet_reset_8390(dev);
AX88190_init(dev, 1);
netif_device_attach(dev);
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 03096c8..d05c446 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1536,6 +1536,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_DEVICE_PROD_ID12("COMPU-SHACK", "FASTline PCMCIA 10/100 Fast-Ethernet", 0xfa2e424d, 0x3953d9b9),
PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722),
PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2),
+ PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d),
PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d),
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e2afdce..e8b9c53 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -30,11 +30,14 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/marvell_phy.h>
+#include <linux/of.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
+#define MII_MARVELL_PHY_PAGE 22
+
#define MII_M1011_IEVENT 0x13
#define MII_M1011_IEVENT_CLEAR 0x0000
@@ -74,13 +77,12 @@
#define MII_88E1121_PHY_MSCR_TX_DELAY BIT(4)
#define MII_88E1121_PHY_MSCR_DELAY_MASK (~(0x3 << 4))
-#define MII_88EC048_PHY_MSCR1_REG 16
-#define MII_88EC048_PHY_MSCR1_PAD_ODD BIT(6)
+#define MII_88E1318S_PHY_MSCR1_REG 16
+#define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6)
#define MII_88E1121_PHY_LED_CTRL 16
#define MII_88E1121_PHY_LED_PAGE 3
#define MII_88E1121_PHY_LED_DEF 0x0030
-#define MII_88E1121_PHY_PAGE 22
#define MII_M1011_PHY_STATUS 0x11
#define MII_M1011_PHY_STATUS_1000 0x8000
@@ -186,13 +188,94 @@ static int marvell_config_aneg(struct phy_device *phydev)
return 0;
}
+#ifdef CONFIG_OF_MDIO
+/*
+ * Set and/or override some configuration registers based on the
+ * marvell,reg-init property stored in the of_node for the phydev.
+ *
+ * marvell,reg-init = <reg-page reg mask value>,...;
+ *
+ * There may be one or more sets of <reg-page reg mask value>:
+ *
+ * reg-page: which register bank to use.
+ * reg: the register.
+ * mask: if non-zero, ANDed with existing register value.
+ * value: ORed with the masked value and written to the regiser.
+ *
+ */
+static int marvell_of_reg_init(struct phy_device *phydev)
+{
+ const __be32 *paddr;
+ int len, i, saved_page, current_page, page_changed, ret;
+
+ if (!phydev->dev.of_node)
+ return 0;
+
+ paddr = of_get_property(phydev->dev.of_node, "marvell,reg-init", &len);
+ if (!paddr || len < (4 * sizeof(*paddr)))
+ return 0;
+
+ saved_page = phy_read(phydev, MII_MARVELL_PHY_PAGE);
+ if (saved_page < 0)
+ return saved_page;
+ page_changed = 0;
+ current_page = saved_page;
+
+ ret = 0;
+ len /= sizeof(*paddr);
+ for (i = 0; i < len - 3; i += 4) {
+ u16 reg_page = be32_to_cpup(paddr + i);
+ u16 reg = be32_to_cpup(paddr + i + 1);
+ u16 mask = be32_to_cpup(paddr + i + 2);
+ u16 val_bits = be32_to_cpup(paddr + i + 3);
+ int val;
+
+ if (reg_page != current_page) {
+ current_page = reg_page;
+ page_changed = 1;
+ ret = phy_write(phydev, MII_MARVELL_PHY_PAGE, reg_page);
+ if (ret < 0)
+ goto err;
+ }
+
+ val = 0;
+ if (mask) {
+ val = phy_read(phydev, reg);
+ if (val < 0) {
+ ret = val;
+ goto err;
+ }
+ val &= mask;
+ }
+ val |= val_bits;
+
+ ret = phy_write(phydev, reg, val);
+ if (ret < 0)
+ goto err;
+
+ }
+err:
+ if (page_changed) {
+ i = phy_write(phydev, MII_MARVELL_PHY_PAGE, saved_page);
+ if (ret == 0)
+ ret = i;
+ }
+ return ret;
+}
+#else
+static int marvell_of_reg_init(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif /* CONFIG_OF_MDIO */
+
static int m88e1121_config_aneg(struct phy_device *phydev)
{
int err, oldpage, mscr;
- oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
+ oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
- err = phy_write(phydev, MII_88E1121_PHY_PAGE,
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
MII_88E1121_PHY_MSCR_PAGE);
if (err < 0)
return err;
@@ -218,7 +301,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
return err;
}
- phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
+ phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
err = phy_write(phydev, MII_BMCR, BMCR_RESET);
if (err < 0)
@@ -229,36 +312,36 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
if (err < 0)
return err;
- oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
+ oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
- phy_write(phydev, MII_88E1121_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
+ phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF);
- phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
+ phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
err = genphy_config_aneg(phydev);
return err;
}
-static int m88ec048_config_aneg(struct phy_device *phydev)
+static int m88e1318_config_aneg(struct phy_device *phydev)
{
int err, oldpage, mscr;
- oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
+ oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
- err = phy_write(phydev, MII_88E1121_PHY_PAGE,
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
MII_88E1121_PHY_MSCR_PAGE);
if (err < 0)
return err;
- mscr = phy_read(phydev, MII_88EC048_PHY_MSCR1_REG);
- mscr |= MII_88EC048_PHY_MSCR1_PAD_ODD;
+ mscr = phy_read(phydev, MII_88E1318S_PHY_MSCR1_REG);
+ mscr |= MII_88E1318S_PHY_MSCR1_PAD_ODD;
- err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr);
+ err = phy_write(phydev, MII_88E1318S_PHY_MSCR1_REG, mscr);
if (err < 0)
return err;
- err = phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
if (err < 0)
return err;
@@ -368,6 +451,9 @@ static int m88e1111_config_init(struct phy_device *phydev)
return err;
}
+ err = marvell_of_reg_init(phydev);
+ if (err < 0)
+ return err;
err = phy_write(phydev, MII_BMCR, BMCR_RESET);
if (err < 0)
@@ -398,7 +484,7 @@ static int m88e1118_config_init(struct phy_device *phydev)
int err;
/* Change address */
- err = phy_write(phydev, 0x16, 0x0002);
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002);
if (err < 0)
return err;
@@ -408,7 +494,7 @@ static int m88e1118_config_init(struct phy_device *phydev)
return err;
/* Change address */
- err = phy_write(phydev, 0x16, 0x0003);
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0003);
if (err < 0)
return err;
@@ -420,8 +506,42 @@ static int m88e1118_config_init(struct phy_device *phydev)
if (err < 0)
return err;
+ err = marvell_of_reg_init(phydev);
+ if (err < 0)
+ return err;
+
/* Reset address */
- err = phy_write(phydev, 0x16, 0x0);
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int m88e1149_config_init(struct phy_device *phydev)
+{
+ int err;
+
+ /* Change address */
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002);
+ if (err < 0)
+ return err;
+
+ /* Enable 1000 Mbit */
+ err = phy_write(phydev, 0x15, 0x1048);
+ if (err < 0)
+ return err;
+
+ err = marvell_of_reg_init(phydev);
+ if (err < 0)
+ return err;
+
+ /* Reset address */
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0);
if (err < 0)
return err;
@@ -491,6 +611,10 @@ static int m88e1145_config_init(struct phy_device *phydev)
}
}
+ err = marvell_of_reg_init(phydev);
+ if (err < 0)
+ return err;
+
return 0;
}
@@ -659,12 +783,12 @@ static struct phy_driver marvell_drivers[] = {
.driver = { .owner = THIS_MODULE },
},
{
- .phy_id = MARVELL_PHY_ID_88EC048,
+ .phy_id = MARVELL_PHY_ID_88E1318S,
.phy_id_mask = MARVELL_PHY_ID_MASK,
- .name = "Marvell 88EC048",
+ .name = "Marvell 88E1318S",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &m88ec048_config_aneg,
+ .config_aneg = &m88e1318_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
@@ -685,6 +809,19 @@ static struct phy_driver marvell_drivers[] = {
.driver = { .owner = THIS_MODULE },
},
{
+ .phy_id = MARVELL_PHY_ID_88E1149R,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "Marvell 88E1149R",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = &m88e1149_config_init,
+ .config_aneg = &m88e1118_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &marvell_ack_interrupt,
+ .config_intr = &marvell_config_intr,
+ .driver = { .owner = THIS_MODULE },
+ },
+ {
.phy_id = MARVELL_PHY_ID_88E1240,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1240",
@@ -735,6 +872,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ 0x01410e10, 0xfffffff0 },
{ 0x01410cb0, 0xfffffff0 },
{ 0x01410cd0, 0xfffffff0 },
+ { 0x01410e50, 0xfffffff0 },
{ 0x01410e30, 0xfffffff0 },
{ 0x01410e90, 0xfffffff0 },
{ }
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1bb16cb7..7670aac 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -65,7 +65,7 @@ EXPORT_SYMBOL(phy_print_status);
*
* Returns 0 on success on < 0 on error.
*/
-int phy_clear_interrupt(struct phy_device *phydev)
+static int phy_clear_interrupt(struct phy_device *phydev)
{
int err = 0;
@@ -82,7 +82,7 @@ int phy_clear_interrupt(struct phy_device *phydev)
*
* Returns 0 on success on < 0 on error.
*/
-int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
+static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
{
int err = 0;
@@ -208,7 +208,7 @@ static inline int phy_find_valid(int idx, u32 features)
* duplexes. Drop down by one in this order: 1000/FULL,
* 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
*/
-void phy_sanitize_settings(struct phy_device *phydev)
+static void phy_sanitize_settings(struct phy_device *phydev)
{
u32 features = phydev->supported;
int idx;
@@ -223,7 +223,6 @@ void phy_sanitize_settings(struct phy_device *phydev)
phydev->speed = settings[idx].speed;
phydev->duplex = settings[idx].duplex;
}
-EXPORT_SYMBOL(phy_sanitize_settings);
/**
* phy_ethtool_sset - generic ethtool sset function, handles all the details
@@ -532,7 +531,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
* phy_enable_interrupts - Enable the interrupts from the PHY side
* @phydev: target phy_device struct
*/
-int phy_enable_interrupts(struct phy_device *phydev)
+static int phy_enable_interrupts(struct phy_device *phydev)
{
int err;
@@ -545,13 +544,12 @@ int phy_enable_interrupts(struct phy_device *phydev)
return err;
}
-EXPORT_SYMBOL(phy_enable_interrupts);
/**
* phy_disable_interrupts - Disable the PHY interrupts from the PHY side
* @phydev: target phy_device struct
*/
-int phy_disable_interrupts(struct phy_device *phydev)
+static int phy_disable_interrupts(struct phy_device *phydev)
{
int err;
@@ -574,7 +572,6 @@ phy_err:
return err;
}
-EXPORT_SYMBOL(phy_disable_interrupts);
/**
* phy_start_interrupts - request and enable interrupts for a PHY device
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 16ddc77..993c52c 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -57,6 +57,9 @@ extern void mdio_bus_exit(void);
static LIST_HEAD(phy_fixup_list);
static DEFINE_MUTEX(phy_fixup_lock);
+static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
+ u32 flags, phy_interface_t interface);
+
/*
* Creates a new phy_fixup and adds it to the list
* @bus_id: A string which matches phydev->dev.bus_id (or PHY_ANY_ID)
@@ -146,7 +149,8 @@ int phy_scan_fixups(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_scan_fixups);
-struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
+static struct phy_device* phy_device_create(struct mii_bus *bus,
+ int addr, int phy_id)
{
struct phy_device *dev;
@@ -193,7 +197,6 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
return dev;
}
-EXPORT_SYMBOL(phy_device_create);
/**
* get_phy_id - reads the specified addr for its ID.
@@ -316,7 +319,7 @@ EXPORT_SYMBOL(phy_find_first);
* If you want to monitor your own link state, don't call
* this function.
*/
-void phy_prepare_link(struct phy_device *phydev,
+static void phy_prepare_link(struct phy_device *phydev,
void (*handler)(struct net_device *))
{
phydev->adjust_link = handler;
@@ -435,8 +438,8 @@ int phy_init_hw(struct phy_device *phydev)
* the attaching device, and given a callback for link status
* change. The phy_device is returned to the attaching driver.
*/
-int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
- u32 flags, phy_interface_t interface)
+static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
+ u32 flags, phy_interface_t interface)
{
struct device *d = &phydev->dev;
@@ -473,7 +476,6 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
* (dev_flags and interface) */
return phy_init_hw(phydev);
}
-EXPORT_SYMBOL(phy_attach_direct);
/**
* phy_attach - attach a network device to a particular PHY device
@@ -540,7 +542,7 @@ EXPORT_SYMBOL(phy_detach);
* what is supported. Returns < 0 on error, 0 if the PHY's advertisement
* hasn't changed, and > 0 if it has changed.
*/
-int genphy_config_advert(struct phy_device *phydev)
+static int genphy_config_advert(struct phy_device *phydev)
{
u32 advertise;
int oldadv, adv;
@@ -605,7 +607,6 @@ int genphy_config_advert(struct phy_device *phydev)
return changed;
}
-EXPORT_SYMBOL(genphy_config_advert);
/**
* genphy_setup_forced - configures/forces speed/duplex from @phydev
@@ -615,7 +616,7 @@ EXPORT_SYMBOL(genphy_config_advert);
* to the values in phydev. Assumes that the values are valid.
* Please see phy_sanitize_settings().
*/
-int genphy_setup_forced(struct phy_device *phydev)
+static int genphy_setup_forced(struct phy_device *phydev)
{
int err;
int ctl = 0;
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 09cf56d..3965997 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -2584,16 +2584,16 @@ ppp_create_interface(struct net *net, int unit, int *retp)
*/
dev_net_set(dev, net);
- ret = -EEXIST;
mutex_lock(&pn->all_ppp_mutex);
if (unit < 0) {
unit = unit_get(&pn->units_idr, ppp);
if (unit < 0) {
- *retp = unit;
+ ret = unit;
goto out2;
}
} else {
+ ret = -EEXIST;
if (unit_find(&pn->units_idr, unit))
goto out2; /* unit already exists */
/*
@@ -2668,10 +2668,10 @@ static void ppp_shutdown_interface(struct ppp *ppp)
ppp->closing = 1;
ppp_unlock(ppp);
unregister_netdev(ppp->dev);
+ unit_put(&pn->units_idr, ppp->file.index);
} else
ppp_unlock(ppp);
- unit_put(&pn->units_idr, ppp->file.index);
ppp->file.dead = 1;
ppp->owner = NULL;
wake_up_interruptible(&ppp->file.rwait);
@@ -2859,8 +2859,7 @@ static void __exit ppp_cleanup(void)
* by holding all_ppp_mutex
*/
-/* associate pointer with specified number */
-static int unit_set(struct idr *p, void *ptr, int n)
+static int __unit_alloc(struct idr *p, void *ptr, int n)
{
int unit, err;
@@ -2871,10 +2870,24 @@ again:
}
err = idr_get_new_above(p, ptr, n, &unit);
- if (err == -EAGAIN)
- goto again;
+ if (err < 0) {
+ if (err == -EAGAIN)
+ goto again;
+ return err;
+ }
+
+ return unit;
+}
+
+/* associate pointer with specified number */
+static int unit_set(struct idr *p, void *ptr, int n)
+{
+ int unit;
- if (unit != n) {
+ unit = __unit_alloc(p, ptr, n);
+ if (unit < 0)
+ return unit;
+ else if (unit != n) {
idr_remove(p, unit);
return -EINVAL;
}
@@ -2885,19 +2898,7 @@ again:
/* get new free unit number and associate pointer with it */
static int unit_get(struct idr *p, void *ptr)
{
- int unit, err;
-
-again:
- if (!idr_pre_get(p, GFP_KERNEL)) {
- printk(KERN_ERR "PPP: No free memory for idr\n");
- return -ENOMEM;
- }
-
- err = idr_get_new_above(p, ptr, 0, &unit);
- if (err == -EAGAIN)
- goto again;
-
- return unit;
+ return __unit_alloc(p, ptr, 0);
}
/* put unit number back to a pool */
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 26c37d3..8ecc170 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -146,11 +146,13 @@
#define MAX_CMD_DESCRIPTORS 1024
#define MAX_RCV_DESCRIPTORS_1G 4096
#define MAX_RCV_DESCRIPTORS_10G 8192
+#define MAX_RCV_DESCRIPTORS_VF 2048
#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
#define DEFAULT_RCV_DESCRIPTORS_1G 2048
#define DEFAULT_RCV_DESCRIPTORS_10G 4096
+#define DEFAULT_RCV_DESCRIPTORS_VF 1024
#define MAX_RDS_RINGS 2
#define get_next_index(index, length) \
@@ -942,6 +944,7 @@ struct qlcnic_ipaddr {
#define QLCNIC_LOOPBACK_TEST 2
#define QLCNIC_FILTER_AGE 80
+#define QLCNIC_READD_AGE 20
#define QLCNIC_LB_MAX_FILTERS 64
struct qlcnic_filter {
@@ -970,6 +973,8 @@ struct qlcnic_adapter {
u16 num_txd;
u16 num_rxd;
u16 num_jumbo_rxd;
+ u16 max_rxd;
+ u16 max_jumbo_rxd;
u8 max_rds_rings;
u8 max_sds_rings;
@@ -1129,7 +1134,7 @@ struct qlcnic_eswitch {
#define MAX_RX_QUEUES 4
#define DEFAULT_MAC_LEARN 1
-#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan <= MAX_VLAN_ID)
+#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW)
#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES)
#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES)
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 25e93a5..ec21d24 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -437,14 +437,8 @@ qlcnic_get_ringparam(struct net_device *dev,
ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
ring->tx_pending = adapter->num_txd;
- if (adapter->ahw.port_type == QLCNIC_GBE) {
- ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
- ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G;
- } else {
- ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
- ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G;
- }
-
+ ring->rx_max_pending = adapter->max_rxd;
+ ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd;
ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
ring->rx_mini_max_pending = 0;
@@ -472,24 +466,17 @@ qlcnic_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
- u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
- u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
u16 num_rxd, num_jumbo_rxd, num_txd;
-
if (ring->rx_mini_pending)
return -EOPNOTSUPP;
- if (adapter->ahw.port_type == QLCNIC_GBE) {
- max_rcv_desc = MAX_RCV_DESCRIPTORS_1G;
- max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
- }
-
num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
- MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx");
+ MIN_RCV_DESCRIPTORS, adapter->max_rxd, "rx");
num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
- MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo");
+ MIN_JUMBO_DESCRIPTORS, adapter->max_jumbo_rxd,
+ "rx jumbo");
num_txd = qlcnic_validate_ringparam(ring->tx_pending,
MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index f047c7c..a3dcd04 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -656,13 +656,23 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
fw_major, fw_minor, fw_build);
-
if (adapter->ahw.port_type == QLCNIC_XGBE) {
- adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
+ } else {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
+ }
+
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+
} else if (adapter->ahw.port_type == QLCNIC_GBE) {
adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
}
adapter->msix_supported = !!use_msi_x;
@@ -1440,7 +1450,6 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
netdev->irq = adapter->msix_entries[0].vector;
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
err = register_netdev(netdev);
if (err) {
@@ -1860,6 +1869,11 @@ qlcnic_send_filter(struct qlcnic_adapter *adapter,
hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
tmp_fil->vlan_id == vlan_id) {
+
+ if (jiffies >
+ (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
+ qlcnic_change_filter(adapter, src_addr, vlan_id,
+ tx_ring);
tmp_fil->ftime = jiffies;
return;
}
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index a478786..2282139 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -2226,7 +2226,6 @@ int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
int ql_core_dump(struct ql_adapter *qdev,
struct ql_mpi_coredump *mpi_coredump);
int ql_mb_about_fw(struct ql_adapter *qdev);
-int ql_wol(struct ql_adapter *qdev);
int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
@@ -2243,16 +2242,13 @@ netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
int ql_own_firmware(struct ql_adapter *qdev);
int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
-void qlge_set_multicast_list(struct net_device *ndev);
-#if 1
-#define QL_ALL_DUMP
-#define QL_REG_DUMP
-#define QL_DEV_DUMP
-#define QL_CB_DUMP
+/* #define QL_ALL_DUMP */
+/* #define QL_REG_DUMP */
+/* #define QL_DEV_DUMP */
+/* #define QL_CB_DUMP */
/* #define QL_IB_DUMP */
/* #define QL_OB_DUMP */
-#endif
#ifdef QL_REG_DUMP
extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index ba0053d..528eaef 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -62,15 +62,15 @@ static const u32 default_msg =
/* NETIF_MSG_PKTDATA | */
NETIF_MSG_HW | NETIF_MSG_WOL | 0;
-static int debug = 0x00007fff; /* defaults above */
-module_param(debug, int, 0);
+static int debug = -1; /* defaults above */
+module_param(debug, int, 0664);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
#define MSIX_IRQ 0
#define MSI_IRQ 1
#define LEG_IRQ 2
static int qlge_irq_type = MSIX_IRQ;
-module_param(qlge_irq_type, int, MSIX_IRQ);
+module_param(qlge_irq_type, int, 0664);
MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
static int qlge_mpi_coredump;
@@ -94,6 +94,9 @@ static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
+static int ql_wol(struct ql_adapter *qdev);
+static void qlge_set_multicast_list(struct net_device *ndev);
+
/* This hardware semaphore causes exclusive access to
* resources shared between the NIC driver, MPI firmware,
* FCOE firmware and the FC driver.
@@ -2382,6 +2385,20 @@ static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
}
+static void qlge_restore_vlan(struct ql_adapter *qdev)
+{
+ qlge_vlan_rx_register(qdev->ndev, qdev->vlgrp);
+
+ if (qdev->vlgrp) {
+ u16 vid;
+ for (vid = 0; vid < VLAN_N_VID; vid++) {
+ if (!vlan_group_get_device(qdev->vlgrp, vid))
+ continue;
+ qlge_vlan_rx_add_vid(qdev->ndev, vid);
+ }
+ }
+}
+
/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
{
@@ -3842,7 +3859,7 @@ static void ql_display_dev_info(struct net_device *ndev)
"MAC address %pM\n", ndev->dev_addr);
}
-int ql_wol(struct ql_adapter *qdev)
+static int ql_wol(struct ql_adapter *qdev)
{
int status = 0;
u32 wol = MB_WOL_DISABLE;
@@ -3957,6 +3974,9 @@ static int ql_adapter_up(struct ql_adapter *qdev)
clear_bit(QL_PROMISCUOUS, &qdev->flags);
qlge_set_multicast_list(qdev->ndev);
+ /* Restore vlan setting. */
+ qlge_restore_vlan(qdev);
+
ql_enable_interrupts(qdev);
ql_enable_all_completion_interrupts(qdev);
netif_tx_start_all_queues(qdev->ndev);
@@ -4242,7 +4262,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device
return &ndev->stats;
}
-void qlge_set_multicast_list(struct net_device *ndev)
+static void qlge_set_multicast_list(struct net_device *ndev)
{
struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
struct netdev_hw_addr *ha;
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index f84e857..0e7c7c7 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -87,7 +87,7 @@ exit:
return status;
}
-int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
+static int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
{
int status;
status = ql_write_mpi_reg(qdev, 0x00001010, 1);
@@ -681,7 +681,7 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev)
/* Send and ACK mailbox command to the firmware to
* let it continue with the change.
*/
-int ql_mb_idc_ack(struct ql_adapter *qdev)
+static int ql_mb_idc_ack(struct ql_adapter *qdev)
{
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
@@ -744,7 +744,7 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
return status;
}
-int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
+static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
u32 size)
{
int status = 0;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index d88ce9f..7d33ef4 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -846,10 +846,10 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
else
tp->features &= ~RTL_FEATURE_WOL;
__rtl8169_set_wol(tp, wol->wolopts);
- device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
-
spin_unlock_irq(&tp->lock);
+ device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
+
return 0;
}
@@ -2931,7 +2931,7 @@ static const struct rtl_cfg_info {
.hw_start = rtl_hw_start_8168,
.region = 2,
.align = 8,
- .intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow |
+ .intr_event = SYSErr | LinkChg | RxOverflow |
TxErr | TxOK | RxOK | RxErr,
.napi_event = TxErr | TxOK | RxOK | RxOverflow,
.features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
@@ -4440,8 +4440,7 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
u32 status = opts1 & RxProtoMask;
if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
- ((status == RxProtoUDP) && !(opts1 & UDPFail)) ||
- ((status == RxProtoIP) && !(opts1 & IPFail)))
+ ((status == RxProtoUDP) && !(opts1 & UDPFail)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
@@ -4588,7 +4587,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
}
/* Work around for rx fifo overflow */
- if (unlikely(status & RxFIFOOver)) {
+ if (unlikely(status & RxFIFOOver) &&
+ (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
netif_stop_queue(dev);
rtl8169_tx_timeout(dev);
break;
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index a9ae505..66c2f1a 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -961,9 +961,9 @@ sb1000_open(struct net_device *dev)
lp->rx_error_count = 0;
lp->rx_error_dpc_count = 0;
lp->rx_session_id[0] = 0x50;
- lp->rx_session_id[0] = 0x48;
- lp->rx_session_id[0] = 0x44;
- lp->rx_session_id[0] = 0x42;
+ lp->rx_session_id[1] = 0x48;
+ lp->rx_session_id[2] = 0x44;
+ lp->rx_session_id[3] = 0x42;
lp->rx_frame_id[0] = 0;
lp->rx_frame_id[1] = 0;
lp->rx_frame_id[2] = 0;
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 9265315..3a0cc63 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -531,7 +531,7 @@ static int sgiseeq_open(struct net_device *dev)
if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) {
printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq);
- err = -EAGAIN;
+ return -EAGAIN;
}
err = init_seeq(dev, sp, sregs);
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index bfec2e0..220e039 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3858,7 +3858,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
/* device is off until link detection */
netif_carrier_off(dev);
- netif_stop_queue(dev);
return dev;
}
diff --git a/drivers/net/slhc.c b/drivers/net/slhc.c
index ac279fa..ab9e3b7 100644
--- a/drivers/net/slhc.c
+++ b/drivers/net/slhc.c
@@ -688,18 +688,8 @@ slhc_toss(struct slcompress *comp)
return 0;
}
-
-/* VJ header compression */
-EXPORT_SYMBOL(slhc_init);
-EXPORT_SYMBOL(slhc_free);
-EXPORT_SYMBOL(slhc_remember);
-EXPORT_SYMBOL(slhc_compress);
-EXPORT_SYMBOL(slhc_uncompress);
-EXPORT_SYMBOL(slhc_toss);
-
#else /* CONFIG_INET */
-
int
slhc_toss(struct slcompress *comp)
{
@@ -738,6 +728,10 @@ slhc_init(int rslots, int tslots)
printk(KERN_DEBUG "Called IP function on non IP-system: slhc_init");
return NULL;
}
+
+#endif /* CONFIG_INET */
+
+/* VJ header compression */
EXPORT_SYMBOL(slhc_init);
EXPORT_SYMBOL(slhc_free);
EXPORT_SYMBOL(slhc_remember);
@@ -745,5 +739,4 @@ EXPORT_SYMBOL(slhc_compress);
EXPORT_SYMBOL(slhc_uncompress);
EXPORT_SYMBOL(slhc_toss);
-#endif /* CONFIG_INET */
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index a8e5856..64bfdae 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -2075,7 +2075,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
} else {
/* Try reading mac address from device. if EEPROM is present
* it will already have been set */
- smsc911x_read_mac_address(dev);
+ smsc_get_mac(dev);
if (is_valid_ether_addr(dev->dev_addr)) {
/* eeprom values are valid so use them */
@@ -2176,6 +2176,7 @@ static struct platform_driver smsc911x_driver = {
/* Entry point for loading the module */
static int __init smsc911x_init_module(void)
{
+ SMSC_INITIALIZE();
return platform_driver_register(&smsc911x_driver);
}
diff --git a/drivers/net/smsc911x.h b/drivers/net/smsc911x.h
index 016360c..50f712e 100644
--- a/drivers/net/smsc911x.h
+++ b/drivers/net/smsc911x.h
@@ -22,7 +22,7 @@
#define __SMSC911X_H__
#define TX_FIFO_LOW_THRESHOLD ((u32)1600)
-#define SMSC911X_EEPROM_SIZE ((u32)7)
+#define SMSC911X_EEPROM_SIZE ((u32)128)
#define USE_DEBUG 0
/* This is the maximum number of packets to be received every
@@ -394,4 +394,15 @@
#define LPA_PAUSE_ALL (LPA_PAUSE_CAP | \
LPA_PAUSE_ASYM)
+/*
+ * Provide hooks to let the arch add to the initialisation procedure
+ * and to override the source of the MAC address.
+ */
+#define SMSC_INITIALIZE() do {} while (0)
+#define smsc_get_mac(dev) smsc911x_read_mac_address((dev))
+
+#ifdef CONFIG_SMSC911X_ARCH_HOOKS
+#include <asm/smsc911x.h>
+#endif
+
#endif /* __SMSC911X_H__ */
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 823b9e6..2114837 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -337,33 +337,19 @@ static int stmmac_init_phy(struct net_device *dev)
return 0;
}
-static inline void stmmac_mac_enable_rx(void __iomem *ioaddr)
+static inline void stmmac_enable_mac(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + MAC_CTRL_REG);
- value |= MAC_RNABLE_RX;
- /* Set the RE (receive enable bit into the MAC CTRL register). */
- writel(value, ioaddr + MAC_CTRL_REG);
-}
-static inline void stmmac_mac_enable_tx(void __iomem *ioaddr)
-{
- u32 value = readl(ioaddr + MAC_CTRL_REG);
- value |= MAC_ENABLE_TX;
- /* Set the TE (transmit enable bit into the MAC CTRL register). */
+ value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
writel(value, ioaddr + MAC_CTRL_REG);
}
-static inline void stmmac_mac_disable_rx(void __iomem *ioaddr)
+static inline void stmmac_disable_mac(void __iomem *ioaddr)
{
u32 value = readl(ioaddr + MAC_CTRL_REG);
- value &= ~MAC_RNABLE_RX;
- writel(value, ioaddr + MAC_CTRL_REG);
-}
-static inline void stmmac_mac_disable_tx(void __iomem *ioaddr)
-{
- u32 value = readl(ioaddr + MAC_CTRL_REG);
- value &= ~MAC_ENABLE_TX;
+ value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
writel(value, ioaddr + MAC_CTRL_REG);
}
@@ -857,8 +843,7 @@ static int stmmac_open(struct net_device *dev)
writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
/* Enable the MAC Rx/Tx */
- stmmac_mac_enable_rx(priv->ioaddr);
- stmmac_mac_enable_tx(priv->ioaddr);
+ stmmac_enable_mac(priv->ioaddr);
/* Set the HW DMA mode and the COE */
stmmac_dma_operation_mode(priv);
@@ -928,9 +913,8 @@ static int stmmac_release(struct net_device *dev)
/* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv);
- /* Disable the MAC core */
- stmmac_mac_disable_tx(priv->ioaddr);
- stmmac_mac_disable_rx(priv->ioaddr);
+ /* Disable the MAC Rx/Tx */
+ stmmac_disable_mac(priv->ioaddr);
netif_carrier_off(dev);
@@ -1525,6 +1509,8 @@ static int stmmac_probe(struct net_device *dev)
pr_warning("\tno valid MAC address;"
"please, use ifconfig or nwhwconfig!\n");
+ spin_lock_init(&priv->lock);
+
ret = register_netdev(dev);
if (ret) {
pr_err("%s: ERROR %i registering the device\n",
@@ -1536,8 +1522,6 @@ static int stmmac_probe(struct net_device *dev)
dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
(dev->features & NETIF_F_HW_CSUM) ? "on" : "off");
- spin_lock_init(&priv->lock);
-
return ret;
}
@@ -1787,8 +1771,7 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
priv->hw->dma->stop_rx(priv->ioaddr);
priv->hw->dma->stop_tx(priv->ioaddr);
- stmmac_mac_disable_rx(priv->ioaddr);
- stmmac_mac_disable_tx(priv->ioaddr);
+ stmmac_disable_mac(priv->ioaddr);
netif_carrier_off(ndev);
@@ -1839,13 +1822,11 @@ static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
dis_ic);
priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
- stmmac_mac_disable_tx(priv->ioaddr);
-
/* Enable Power down mode by programming the PMT regs */
if (device_can_wakeup(priv->device))
priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
else
- stmmac_mac_disable_rx(priv->ioaddr);
+ stmmac_disable_mac(priv->ioaddr);
} else {
priv->shutdown = 1;
/* Although this can appear slightly redundant it actually
@@ -1886,8 +1867,7 @@ static int stmmac_resume(struct platform_device *pdev)
netif_device_attach(dev);
/* Enable the MAC and DMA */
- stmmac_mac_enable_rx(priv->ioaddr);
- stmmac_mac_enable_tx(priv->ioaddr);
+ stmmac_enable_mac(priv->ioaddr);
priv->hw->dma->start_tx(priv->ioaddr);
priv->hw->dma->start_rx(priv->ioaddr);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 852e917..30ccbb6 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -9948,16 +9948,16 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
!((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
return -EINVAL;
+ device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
+
spin_lock_bh(&tp->lock);
- if (wol->wolopts & WAKE_MAGIC) {
+ if (device_may_wakeup(dp))
tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
- device_set_wakeup_enable(dp, true);
- } else {
+ else
tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
- device_set_wakeup_enable(dp, false);
- }
spin_unlock_bh(&tp->lock);
+
return 0;
}
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 663b886..7930203 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -1220,7 +1220,7 @@ void tms380tr_wait(unsigned long time)
tmp = schedule_timeout_interruptible(tmp);
} while(time_after(tmp, jiffies));
#else
- udelay(time);
+ mdelay(time / 1000);
#endif
}
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 28e1ffb..c78a505 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -2021,7 +2021,6 @@ static int __devinit de_init_one (struct pci_dev *pdev,
de->media_timer.data = (unsigned long) de;
netif_carrier_off(dev);
- netif_stop_queue(dev);
/* wake up device, assign resources */
rc = pci_enable_device(pdev);
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index a9f7d5d..7064e03 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -688,9 +688,6 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
DMFE_DBUG(0, "dmfe_start_xmit", 0);
- /* Resource flag check */
- netif_stop_queue(dev);
-
/* Too large packet check */
if (skb->len > MAX_PACKET_SIZE) {
pr_err("big packet = %d\n", (u16)skb->len);
@@ -698,6 +695,9 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+ /* Resource flag check */
+ netif_stop_queue(dev);
+
spin_lock_irqsave(&db->lock, flags);
/* No Tx resource check, it never happen nromally */
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 1cc6713..5b83c3f 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -24,10 +24,6 @@
3XP Processor. It has been tested on x86 and sparc64.
KNOWN ISSUES:
- *) The current firmware always strips the VLAN tag off, even if
- we tell it not to. You should filter VLANs at the switch
- as a workaround (good practice in any event) until we can
- get this fixed.
*) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
issue. Hopefully 3Com will fix it.
*) Waiting for a command response takes 8ms due to non-preemptable
@@ -280,8 +276,6 @@ struct typhoon {
struct pci_dev * pdev;
struct net_device * dev;
struct napi_struct napi;
- spinlock_t state_lock;
- struct vlan_group * vlgrp;
struct basic_ring rxHiRing;
struct basic_ring rxBuffRing;
struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
@@ -695,44 +689,6 @@ out:
return err;
}
-static void
-typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
-{
- struct typhoon *tp = netdev_priv(dev);
- struct cmd_desc xp_cmd;
- int err;
-
- spin_lock_bh(&tp->state_lock);
- if(!tp->vlgrp != !grp) {
- /* We've either been turned on for the first time, or we've
- * been turned off. Update the 3XP.
- */
- if(grp)
- tp->offload |= TYPHOON_OFFLOAD_VLAN;
- else
- tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
-
- /* If the interface is up, the runtime is running -- and we
- * must be up for the vlan core to call us.
- *
- * Do the command outside of the spin lock, as it is slow.
- */
- INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
- TYPHOON_CMD_SET_OFFLOAD_TASKS);
- xp_cmd.parm2 = tp->offload;
- xp_cmd.parm3 = tp->offload;
- spin_unlock_bh(&tp->state_lock);
- err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- if(err < 0)
- netdev_err(tp->dev, "vlan offload error %d\n", -err);
- spin_lock_bh(&tp->state_lock);
- }
-
- /* now make the change visible */
- tp->vlgrp = grp;
- spin_unlock_bh(&tp->state_lock);
-}
-
static inline void
typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
u32 ring_dma)
@@ -818,7 +774,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
first_txd->processFlags |=
TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
first_txd->processFlags |=
- cpu_to_le32(ntohs(vlan_tx_tag_get(skb)) <<
+ cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
TYPHOON_TX_PF_VLAN_TAG_SHIFT);
}
@@ -936,7 +892,7 @@ typhoon_set_rx_mode(struct net_device *dev)
filter |= TYPHOON_RX_FILTER_MCAST_HASH;
}
- INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
+ INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
xp_cmd.parm1 = filter;
typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
}
@@ -1198,6 +1154,20 @@ typhoon_get_rx_csum(struct net_device *dev)
return 1;
}
+static int
+typhoon_set_flags(struct net_device *dev, u32 data)
+{
+ /* There's no way to turn off the RX VLAN offloading and stripping
+ * on the current 3XP firmware -- it does not respect the offload
+ * settings -- so we only allow the user to toggle the TX processing.
+ */
+ if (!(data & ETH_FLAG_RXVLAN))
+ return -EINVAL;
+
+ return ethtool_op_set_flags(dev, data,
+ ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
+}
+
static void
typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
{
@@ -1224,6 +1194,8 @@ static const struct ethtool_ops typhoon_ethtool_ops = {
.set_sg = ethtool_op_set_sg,
.set_tso = ethtool_op_set_tso,
.get_ringparam = typhoon_get_ringparam,
+ .set_flags = typhoon_set_flags,
+ .get_flags = ethtool_op_get_flags,
};
static int
@@ -1309,9 +1281,9 @@ typhoon_init_interface(struct typhoon *tp)
tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
+ tp->offload |= TYPHOON_OFFLOAD_VLAN;
spin_lock_init(&tp->command_lock);
- spin_lock_init(&tp->state_lock);
/* Force the writes to the shared memory area out before continuing. */
wmb();
@@ -1328,7 +1300,7 @@ typhoon_init_rings(struct typhoon *tp)
tp->rxHiRing.lastWrite = 0;
tp->rxBuffRing.lastWrite = 0;
tp->cmdRing.lastWrite = 0;
- tp->cmdRing.lastWrite = 0;
+ tp->respRing.lastWrite = 0;
tp->txLoRing.lastRead = 0;
tp->txHiRing.lastRead = 0;
@@ -1762,13 +1734,10 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
} else
skb_checksum_none_assert(new_skb);
- spin_lock(&tp->state_lock);
- if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
- vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
- ntohl(rx->vlanTag) & 0xffff);
- else
- netif_receive_skb(new_skb);
- spin_unlock(&tp->state_lock);
+ if (rx->rxStatus & TYPHOON_RX_VLAN)
+ __vlan_hwaccel_put_tag(new_skb,
+ ntohl(rx->vlanTag) & 0xffff);
+ netif_receive_skb(new_skb);
received++;
budget--;
@@ -1989,11 +1958,9 @@ typhoon_start_runtime(struct typhoon *tp)
goto error_out;
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
- spin_lock_bh(&tp->state_lock);
xp_cmd.parm2 = tp->offload;
xp_cmd.parm3 = tp->offload;
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
- spin_unlock_bh(&tp->state_lock);
if(err < 0)
goto error_out;
@@ -2231,13 +2198,9 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
if(!netif_running(dev))
return 0;
- spin_lock_bh(&tp->state_lock);
- if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
- spin_unlock_bh(&tp->state_lock);
- netdev_err(dev, "cannot do WAKE_MAGIC with VLANS\n");
- return -EBUSY;
- }
- spin_unlock_bh(&tp->state_lock);
+ /* TYPHOON_OFFLOAD_VLAN is always on now, so this doesn't work */
+ if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
+ netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n");
netif_device_detach(dev);
@@ -2338,7 +2301,6 @@ static const struct net_device_ops typhoon_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = typhoon_set_mac_address,
.ndo_change_mtu = eth_change_mtu,
- .ndo_vlan_rx_register = typhoon_vlan_rx_register,
};
static int __devinit
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index a4c3f57..acbdab3 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2050,12 +2050,16 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
ugeth_vdbg("%s: IN", __func__);
+ /*
+ * Tell the kernel the link is down.
+ * Must be done before disabling the controller
+ * or deadlock may happen.
+ */
+ phy_stop(phydev);
+
/* Disable the controller */
ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
- /* Tell the kernel the link is down */
- phy_stop(phydev);
-
/* Mask all interrupts */
out_be32(ugeth->uccf->p_uccm, 0x00000000);
@@ -2065,9 +2069,6 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth)
/* Disable Rx and Tx */
clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
- phy_disconnect(ugeth->phydev);
- ugeth->phydev = NULL;
-
ucc_geth_memclean(ugeth);
}
@@ -3550,7 +3551,10 @@ static int ucc_geth_close(struct net_device *dev)
napi_disable(&ugeth->napi);
+ cancel_work_sync(&ugeth->timeout_work);
ucc_geth_stop(ugeth);
+ phy_disconnect(ugeth->phydev);
+ ugeth->phydev = NULL;
free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
@@ -3579,8 +3583,12 @@ static void ucc_geth_timeout_work(struct work_struct *work)
* Must reset MAC *and* PHY. This is done by reopening
* the device.
*/
- ucc_geth_close(dev);
- ucc_geth_open(dev);
+ netif_tx_stop_all_queues(dev);
+ ucc_geth_stop(ugeth);
+ ucc_geth_init_mac(ugeth);
+ /* Must start PHY here */
+ phy_start(ugeth->phydev);
+ netif_tx_start_all_queues(dev);
}
netif_tx_schedule_all(dev);
@@ -3594,7 +3602,6 @@ static void ucc_geth_timeout(struct net_device *dev)
{
struct ucc_geth_private *ugeth = netdev_priv(dev);
- netif_carrier_off(dev);
schedule_work(&ugeth->timeout_work);
}
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index 05a9558..055b87a 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -899,7 +899,8 @@ struct ucc_geth_hardware_statistics {
#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size
*/
#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */
-#define UCC_GETH_UTFTT_INIT 512
+#define UCC_GETH_UTFTT_INIT 256 /* 1/2 utfs
+ due to errata */
/* Gigabit Ethernet (1000 Mbps) */
#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual
FIFO size */
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index b154a94..62e9e8d 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2994,12 +2994,14 @@ static int hso_probe(struct usb_interface *interface,
case HSO_INTF_BULK:
/* It's a regular bulk interface */
- if (((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) &&
- !disable_net)
- hso_dev = hso_create_net_device(interface, port_spec);
- else
+ if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) {
+ if (!disable_net)
+ hso_dev =
+ hso_create_net_device(interface, port_spec);
+ } else {
hso_dev =
hso_create_bulk_serial_device(interface, port_spec);
+ }
if (!hso_dev)
goto exit;
break;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ca7fc9d..c04d49e 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -45,6 +45,7 @@
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
#include <linux/kernel.h>
+#include <linux/pm_runtime.h>
#define DRIVER_VERSION "22-Aug-2005"
@@ -1273,6 +1274,16 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
struct usb_device *xdev;
int status;
const char *name;
+ struct usb_driver *driver = to_usb_driver(udev->dev.driver);
+
+ /* usbnet already took usb runtime pm, so have to enable the feature
+ * for usb interface, otherwise usb_autopm_get_interface may return
+ * failure if USB_SUSPEND(RUNTIME_PM) is enabled.
+ */
+ if (!driver->supports_autosuspend) {
+ driver->supports_autosuspend = 1;
+ pm_runtime_enable(&udev->dev);
+ }
name = udev->dev.driver->name;
info = (struct driver_info *) prod->driver_info;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index bb6b67f..b6d4028 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -986,9 +986,15 @@ static int virtnet_probe(struct virtio_device *vdev)
goto unregister;
}
- vi->status = VIRTIO_NET_S_LINK_UP;
- virtnet_update_status(vi);
- netif_carrier_on(dev);
+ /* Assume link up if device can't report link status,
+ otherwise get link status from config. */
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
+ netif_carrier_off(dev);
+ virtnet_update_status(vi);
+ } else {
+ vi->status = VIRTIO_NET_S_LINK_UP;
+ netif_carrier_on(dev);
+ }
pr_debug("virtnet: registered device %s\n", dev->name);
return 0;
diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h
index 37108fb..969c751 100644
--- a/drivers/net/vmxnet3/upt1_defs.h
+++ b/drivers/net/vmxnet3/upt1_defs.h
@@ -88,9 +88,9 @@ struct UPT1_RSSConf {
/* features */
enum {
- UPT1_F_RXCSUM = 0x0001, /* rx csum verification */
- UPT1_F_RSS = 0x0002,
- UPT1_F_RXVLAN = 0x0004, /* VLAN tag stripping */
- UPT1_F_LRO = 0x0008,
+ UPT1_F_RXCSUM = cpu_to_le64(0x0001), /* rx csum verification */
+ UPT1_F_RSS = cpu_to_le64(0x0002),
+ UPT1_F_RXVLAN = cpu_to_le64(0x0004), /* VLAN tag stripping */
+ UPT1_F_LRO = cpu_to_le64(0x0008),
};
#endif
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index ca7727b..4d84912 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -523,9 +523,9 @@ struct Vmxnet3_RxFilterConf {
#define VMXNET3_PM_MAX_PATTERN_SIZE 128
#define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8)
-#define VMXNET3_PM_WAKEUP_MAGIC 0x01 /* wake up on magic pkts */
-#define VMXNET3_PM_WAKEUP_FILTER 0x02 /* wake up on pkts matching
- * filters */
+#define VMXNET3_PM_WAKEUP_MAGIC cpu_to_le16(0x01) /* wake up on magic pkts */
+#define VMXNET3_PM_WAKEUP_FILTER cpu_to_le16(0x02) /* wake up on pkts matching
+ * filters */
struct Vmxnet3_PM_PktFilter {
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 3f60e0e..21314e0 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -873,7 +873,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
skb_shinfo(skb)->nr_frags + 1;
- ctx.ipv4 = (skb->protocol == __constant_ntohs(ETH_P_IP));
+ ctx.ipv4 = (skb->protocol == cpu_to_be16(ETH_P_IP));
ctx.mss = skb_shinfo(skb)->gso_size;
if (ctx.mss) {
@@ -1563,8 +1563,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
adapter->vlan_grp = grp;
/* update FEATURES to device */
- set_flag_le64(&devRead->misc.uptFeatures,
- UPT1_F_RXVLAN);
+ devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
/*
@@ -1587,7 +1586,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
adapter->vlan_grp = NULL;
- if (le64_to_cpu(devRead->misc.uptFeatures) & UPT1_F_RXVLAN) {
+ if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) {
int i;
for (i = 0; i < VMXNET3_VFT_SIZE; i++) {
@@ -1600,8 +1599,7 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
VMXNET3_CMD_UPDATE_VLAN_FILTERS);
/* update FEATURES to device */
- reset_flag_le64(&devRead->misc.uptFeatures,
- UPT1_F_RXVLAN);
+ devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
}
@@ -1762,15 +1760,15 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
/* set up feature flags */
if (adapter->rxcsum)
- set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXCSUM);
+ devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
if (adapter->lro) {
- set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_LRO);
+ devRead->misc.uptFeatures |= UPT1_F_LRO;
devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
}
if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) &&
adapter->vlan_grp) {
- set_flag_le64(&devRead->misc.uptFeatures, UPT1_F_RXVLAN);
+ devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
}
devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
@@ -2577,7 +2575,7 @@ vmxnet3_suspend(struct device *device)
memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
- set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
+ pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
i++;
}
@@ -2619,13 +2617,13 @@ vmxnet3_suspend(struct device *device)
pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
in_dev_put(in_dev);
- set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_FILTER);
+ pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
i++;
}
skip_arp:
if (adapter->wol & WAKE_MAGIC)
- set_flag_le16(&pmConf->wakeUpEvents, VMXNET3_PM_WAKEUP_MAGIC);
+ pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
pmConf->numFilters = i;
@@ -2667,7 +2665,7 @@ vmxnet3_resume(struct device *device)
adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
*pmConf));
- adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le32(virt_to_phys(
+ adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
pmConf));
netif_device_attach(netdev);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 7e4b5a8..b79070b 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -50,13 +50,11 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
adapter->rxcsum = val;
if (netif_running(netdev)) {
if (val)
- set_flag_le64(
- &adapter->shared->devRead.misc.uptFeatures,
- UPT1_F_RXCSUM);
+ adapter->shared->devRead.misc.uptFeatures |=
+ UPT1_F_RXCSUM;
else
- reset_flag_le64(
- &adapter->shared->devRead.misc.uptFeatures,
- UPT1_F_RXCSUM);
+ adapter->shared->devRead.misc.uptFeatures &=
+ ~UPT1_F_RXCSUM;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
@@ -292,10 +290,10 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
/* update harware LRO capability accordingly */
if (lro_requested)
adapter->shared->devRead.misc.uptFeatures |=
- cpu_to_le64(UPT1_F_LRO);
+ UPT1_F_LRO;
else
adapter->shared->devRead.misc.uptFeatures &=
- cpu_to_le64(~UPT1_F_LRO);
+ ~UPT1_F_LRO;
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
}
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index c88ea5c..edf2288 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -301,8 +301,8 @@ struct vmxnet3_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
- u8 *hw_addr0; /* for BAR 0 */
- u8 *hw_addr1; /* for BAR 1 */
+ u8 __iomem *hw_addr0; /* for BAR 0 */
+ u8 __iomem *hw_addr1; /* for BAR 1 */
/* feature control */
bool rxcsum;
@@ -330,14 +330,14 @@ struct vmxnet3_adapter {
};
#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
- writel(cpu_to_le32(val), (adapter)->hw_addr0 + (reg))
+ writel((val), (adapter)->hw_addr0 + (reg))
#define VMXNET3_READ_BAR0_REG(adapter, reg) \
- le32_to_cpu(readl((adapter)->hw_addr0 + (reg)))
+ readl((adapter)->hw_addr0 + (reg))
#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \
- writel(cpu_to_le32(val), (adapter)->hw_addr1 + (reg))
+ writel((val), (adapter)->hw_addr1 + (reg))
#define VMXNET3_READ_BAR1_REG(adapter, reg) \
- le32_to_cpu(readl((adapter)->hw_addr1 + (reg)))
+ readl((adapter)->hw_addr1 + (reg))
#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5)
#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
@@ -353,21 +353,6 @@ struct vmxnet3_adapter {
#define VMXNET3_MAX_ETH_HDR_SIZE 22
#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
-static inline void set_flag_le16(__le16 *data, u16 flag)
-{
- *data = cpu_to_le16(le16_to_cpu(*data) | flag);
-}
-
-static inline void set_flag_le64(__le64 *data, u64 flag)
-{
- *data = cpu_to_le64(le64_to_cpu(*data) | flag);
-}
-
-static inline void reset_flag_le64(__le64 *data, u64 flag)
-{
- *data = cpu_to_le64(le64_to_cpu(*data) & ~flag);
-}
-
int
vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 0e6db59..906a3ca3 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -20,6 +20,179 @@
#include "vxge-traffic.h"
#include "vxge-config.h"
+static enum vxge_hw_status
+__vxge_hw_fifo_create(
+ struct __vxge_hw_vpath_handle *vpath_handle,
+ struct vxge_hw_fifo_attr *attr);
+
+static enum vxge_hw_status
+__vxge_hw_fifo_abort(
+ struct __vxge_hw_fifo *fifoh);
+
+static enum vxge_hw_status
+__vxge_hw_fifo_reset(
+ struct __vxge_hw_fifo *ringh);
+
+static enum vxge_hw_status
+__vxge_hw_fifo_delete(
+ struct __vxge_hw_vpath_handle *vpath_handle);
+
+static struct __vxge_hw_blockpool_entry *
+__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
+ u32 size);
+
+static void
+__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
+ struct __vxge_hw_blockpool_entry *entry);
+
+static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
+ void *block_addr,
+ u32 length,
+ struct pci_dev *dma_h,
+ struct pci_dev *acc_handle);
+
+static enum vxge_hw_status
+__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
+ struct __vxge_hw_blockpool *blockpool,
+ u32 pool_size,
+ u32 pool_max);
+
+static void
+__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool);
+
+static void *
+__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
+ u32 size,
+ struct vxge_hw_mempool_dma *dma_object);
+
+static void
+__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
+ void *memblock,
+ u32 size,
+ struct vxge_hw_mempool_dma *dma_object);
+
+
+static struct __vxge_hw_channel*
+__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
+ enum __vxge_hw_channel_type type, u32 length,
+ u32 per_dtr_space, void *userdata);
+
+static void
+__vxge_hw_channel_free(
+ struct __vxge_hw_channel *channel);
+
+static enum vxge_hw_status
+__vxge_hw_channel_initialize(
+ struct __vxge_hw_channel *channel);
+
+static enum vxge_hw_status
+__vxge_hw_channel_reset(
+ struct __vxge_hw_channel *channel);
+
+static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp);
+
+static enum vxge_hw_status
+__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
+
+static enum vxge_hw_status
+__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
+
+static void
+__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
+
+static void
+__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_card_info_get(
+ u32 vp_id,
+ struct vxge_hw_vpath_reg __iomem *vpath_reg,
+ struct vxge_hw_device_hw_info *hw_info);
+
+static enum vxge_hw_status
+__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
+
+static void
+__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
+
+static enum vxge_hw_status
+__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
+
+static enum vxge_hw_status
+__vxge_hw_device_register_poll(
+ void __iomem *reg,
+ u64 mask, u32 max_millis);
+
+static inline enum vxge_hw_status
+__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
+ u64 mask, u32 max_millis)
+{
+ __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
+ wmb();
+
+ __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
+ wmb();
+
+ return __vxge_hw_device_register_poll(addr, mask, max_millis);
+}
+
+static struct vxge_hw_mempool*
+__vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
+ u32 item_size, u32 private_size, u32 items_initial,
+ u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
+ void *userdata);
+static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_vpath_stats_hw_info *hw_stats);
+
+static enum vxge_hw_status
+vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
+
+static enum vxge_hw_status
+__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
+
+static u64
+__vxge_hw_vpath_pci_func_mode_get(u32 vp_id,
+ struct vxge_hw_vpath_reg __iomem *vpath_reg);
+
+static u32
+__vxge_hw_vpath_func_id_get(u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_addr_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
+ u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
+
+
+static enum vxge_hw_status
+__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_fw_ver_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
+ struct vxge_hw_device_hw_info *hw_info);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id);
+
+static void
+__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
+ u32 operation, u32 offset, u64 *stat);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
+
+static enum vxge_hw_status
+__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
+
/*
* __vxge_hw_channel_allocate - Allocate memory for channel
* This function allocates required memory for the channel and various arrays
@@ -190,7 +363,7 @@ __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
* Will poll certain register for specified amount of time.
* Will poll until masked bit is not cleared.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
{
u64 val64;
@@ -221,7 +394,7 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
* in progress
* This routine checks the vpath reset in progress register is turned zero
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
{
enum vxge_hw_status status;
@@ -236,7 +409,7 @@ __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
* This routine sets the swapper and reads the toc pointer and returns the
* memory mapped address of the toc
*/
-struct vxge_hw_toc_reg __iomem *
+static struct vxge_hw_toc_reg __iomem *
__vxge_hw_device_toc_get(void __iomem *bar0)
{
u64 val64;
@@ -779,7 +952,7 @@ exit:
* vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
* Get the Statistics on aggregate port
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
struct vxge_hw_xmac_aggr_stats *aggr_stats)
{
@@ -814,7 +987,7 @@ exit:
* vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
* Get the Statistics on port
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
struct vxge_hw_xmac_port_stats *port_stats)
{
@@ -952,20 +1125,6 @@ u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
return 0;
#endif
}
-/*
- * vxge_hw_device_debug_mask_get - Get the debug mask
- * This routine returns the current debug mask set
- */
-u32 vxge_hw_device_debug_mask_get(struct __vxge_hw_device *hldev)
-{
-#if defined(VXGE_DEBUG_TRACE_MASK) || defined(VXGE_DEBUG_ERR_MASK)
- if (hldev == NULL)
- return 0;
- return hldev->debug_module_mask;
-#else
- return 0;
-#endif
-}
/*
* vxge_hw_getpause_data -Pause frame frame generation and reception.
@@ -1090,7 +1249,7 @@ __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
* first block
* Returns the dma address of the first RxD block
*/
-u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
+static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
{
struct vxge_hw_mempool_dma *dma_object;
@@ -1252,7 +1411,7 @@ exit:
* This function creates Ring and initializes it.
*
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
struct vxge_hw_ring_attr *attr)
{
@@ -1363,7 +1522,7 @@ exit:
* __vxge_hw_ring_abort - Returns the RxD
* This function terminates the RxDs of ring
*/
-enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
+static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
{
void *rxdh;
struct __vxge_hw_channel *channel;
@@ -1392,7 +1551,7 @@ enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
* __vxge_hw_ring_reset - Resets the ring
* This function resets the ring during vpath reset operation
*/
-enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
+static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_channel *channel;
@@ -1419,7 +1578,7 @@ exit:
* __vxge_hw_ring_delete - Removes the ring
* This function freeup the memory pool and removes the ring
*/
-enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
+static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
{
struct __vxge_hw_ring *ring = vp->vpath->ringh;
@@ -1438,7 +1597,7 @@ enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
* __vxge_hw_mempool_grow
* Will resize mempool up to %num_allocate value.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
u32 *num_allocated)
{
@@ -1527,7 +1686,7 @@ exit:
* with size enough to hold %items_initial number of items. Memory is
* DMA-able but client must map/unmap before interoperating with the device.
*/
-struct vxge_hw_mempool*
+static struct vxge_hw_mempool*
__vxge_hw_mempool_create(
struct __vxge_hw_device *devh,
u32 memblock_size,
@@ -1644,7 +1803,7 @@ exit:
/*
* vxge_hw_mempool_destroy
*/
-void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
+static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
{
u32 i, j;
struct __vxge_hw_device *devh = mempool->devh;
@@ -1700,7 +1859,7 @@ __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
* __vxge_hw_device_vpath_config_check - Check vpath configuration.
* Check the vpath configuration
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
{
enum vxge_hw_status status;
@@ -1922,7 +2081,7 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
* _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
* Set the swapper bits appropriately for the lagacy section.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
{
u64 val64;
@@ -1977,7 +2136,7 @@ __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
* __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
* Set the swapper bits appropriately for the vpath.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
{
#ifndef __BIG_ENDIAN
@@ -1996,7 +2155,7 @@ __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
* __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
* Set the swapper bits appropriately for the vpath.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_kdfc_swapper_set(
struct vxge_hw_legacy_reg __iomem *legacy_reg,
struct vxge_hw_vpath_reg __iomem *vpath_reg)
@@ -2021,28 +2180,6 @@ __vxge_hw_kdfc_swapper_set(
}
/*
- * vxge_hw_mgmt_device_config - Retrieve device configuration.
- * Get device configuration. Permits to retrieve at run-time configuration
- * values that were used to initialize and configure the device.
- */
-enum vxge_hw_status
-vxge_hw_mgmt_device_config(struct __vxge_hw_device *hldev,
- struct vxge_hw_device_config *dev_config, int size)
-{
-
- if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC))
- return VXGE_HW_ERR_INVALID_DEVICE;
-
- if (size != sizeof(struct vxge_hw_device_config))
- return VXGE_HW_ERR_VERSION_CONFLICT;
-
- memcpy(dev_config, &hldev->config,
- sizeof(struct vxge_hw_device_config));
-
- return VXGE_HW_OK;
-}
-
-/*
* vxge_hw_mgmt_reg_read - Read Titan register.
*/
enum vxge_hw_status
@@ -2438,7 +2575,7 @@ exit:
* __vxge_hw_fifo_abort - Returns the TxD
* This function terminates the TxDs of fifo
*/
-enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
+static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
{
void *txdlh;
@@ -2466,7 +2603,7 @@ enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
* __vxge_hw_fifo_reset - Resets the fifo
* This function resets the fifo during vpath reset operation
*/
-enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
+static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
{
enum vxge_hw_status status = VXGE_HW_OK;
@@ -2501,7 +2638,7 @@ enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
* in pci config space.
* Read from the vpath pci config space.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
u32 phy_func_0, u32 offset, u32 *val)
{
@@ -2542,7 +2679,7 @@ exit:
* __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
* Returns the function number of the vpath.
*/
-u32
+static u32
__vxge_hw_vpath_func_id_get(u32 vp_id,
struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
{
@@ -2573,7 +2710,7 @@ __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
* __vxge_hw_vpath_card_info_get - Get the serial numbers,
* part number and product description.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_card_info_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg,
@@ -2695,7 +2832,7 @@ __vxge_hw_vpath_card_info_get(
* __vxge_hw_vpath_fw_ver_get - Get the fw version
* Returns FW Version
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_fw_ver_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg,
@@ -2789,7 +2926,7 @@ exit:
* __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
* Returns pci function mode
*/
-u64
+static u64
__vxge_hw_vpath_pci_func_mode_get(
u32 vp_id,
struct vxge_hw_vpath_reg __iomem *vpath_reg)
@@ -2995,7 +3132,7 @@ exit:
* __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
* from MAC address table.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_addr_get(
u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
@@ -3347,7 +3484,7 @@ __vxge_hw_vpath_mgmt_read(
* This routine checks the vpath_rst_in_prog register to see if
* adapter completed the reset process for the vpath
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
{
enum vxge_hw_status status;
@@ -3365,7 +3502,7 @@ __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
* __vxge_hw_vpath_reset
* This routine resets the vpath on the device
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@@ -3383,7 +3520,7 @@ __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
* __vxge_hw_vpath_sw_reset
* This routine resets the vpath structures
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
{
enum vxge_hw_status status = VXGE_HW_OK;
@@ -3408,7 +3545,7 @@ exit:
* This routine configures the prc registers of virtual path using the config
* passed
*/
-void
+static void
__vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@@ -3480,7 +3617,7 @@ __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
* This routine configures the kdfc registers of virtual path using the
* config passed
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@@ -3553,7 +3690,7 @@ exit:
* __vxge_hw_vpath_mac_configure
* This routine configures the mac of virtual path using the config passed
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@@ -3621,7 +3758,7 @@ __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
* This routine configures the tim registers of virtual path using the config
* passed
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@@ -3897,7 +4034,7 @@ vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
* This routine is the final phase of init which initializes the
* registers of the vpath using the configuration passed.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
@@ -3966,7 +4103,7 @@ exit:
* This routine is the initial phase of init which resets the vpath and
* initializes the software support structures.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
struct vxge_hw_vp_config *config)
{
@@ -4022,7 +4159,7 @@ exit:
* __vxge_hw_vp_terminate - Terminate Virtual Path structure
* This routine closes all channels it opened and freeup memory
*/
-void
+static void
__vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
{
struct __vxge_hw_virtualpath *vpath;
@@ -4384,7 +4521,7 @@ vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
* Enable the DMA vpath statistics. The function is to be called to re-enable
* the adapter to update stats into the host memory
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
{
enum vxge_hw_status status = VXGE_HW_OK;
@@ -4409,7 +4546,7 @@ exit:
* __vxge_hw_vpath_stats_access - Get the statistics from the given location
* and offset and perform an operation
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
u32 operation, u32 offset, u64 *stat)
{
@@ -4445,7 +4582,7 @@ vpath_stats_access_exit:
/*
* __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_xmac_tx_stats_get(
struct __vxge_hw_virtualpath *vpath,
struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
@@ -4478,9 +4615,9 @@ exit:
/*
* __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
+ struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
{
u64 *val64;
enum vxge_hw_status status = VXGE_HW_OK;
@@ -4509,9 +4646,9 @@ exit:
/*
* __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
*/
-enum vxge_hw_status __vxge_hw_vpath_stats_get(
- struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_vpath_stats_hw_info *hw_stats)
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
+ struct vxge_hw_vpath_stats_hw_info *hw_stats)
{
u64 val64;
enum vxge_hw_status status = VXGE_HW_OK;
@@ -4643,6 +4780,32 @@ exit:
return status;
}
+
+static void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
+ unsigned long size)
+{
+ gfp_t flags;
+ void *vaddr;
+
+ if (in_interrupt())
+ flags = GFP_ATOMIC | GFP_DMA;
+ else
+ flags = GFP_KERNEL | GFP_DMA;
+
+ vaddr = kmalloc((size), flags);
+
+ vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
+}
+
+static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
+ struct pci_dev **p_dma_acch)
+{
+ unsigned long misaligned = *(unsigned long *)p_dma_acch;
+ u8 *tmp = (u8 *)vaddr;
+ tmp -= misaligned;
+ kfree((void *)tmp);
+}
+
/*
* __vxge_hw_blockpool_create - Create block pool
*/
@@ -4845,12 +5008,11 @@ void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
* vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
* Adds a block to block pool
*/
-void vxge_hw_blockpool_block_add(
- struct __vxge_hw_device *devh,
- void *block_addr,
- u32 length,
- struct pci_dev *dma_h,
- struct pci_dev *acc_handle)
+static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
+ void *block_addr,
+ u32 length,
+ struct pci_dev *dma_h,
+ struct pci_dev *acc_handle)
{
struct __vxge_hw_blockpool *blockpool;
struct __vxge_hw_blockpool_entry *entry = NULL;
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 1a94343..5c00861 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -183,11 +183,6 @@ struct vxge_hw_device_version {
char version[VXGE_HW_FW_STRLEN];
};
-u64
-__vxge_hw_vpath_pci_func_mode_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg);
-
/**
* struct vxge_hw_fifo_config - Configuration of fifo.
* @enable: Is this fifo to be commissioned
@@ -1426,9 +1421,6 @@ struct vxge_hw_rth_hash_types {
u8 hash_type_ipv6ex_en;
};
-u32
-vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh);
-
void vxge_hw_device_debug_set(
struct __vxge_hw_device *devh,
enum vxge_debug_level level,
@@ -1440,9 +1432,6 @@ vxge_hw_device_error_level_get(struct __vxge_hw_device *devh);
u32
vxge_hw_device_trace_level_get(struct __vxge_hw_device *devh);
-u32
-vxge_hw_device_debug_mask_get(struct __vxge_hw_device *devh);
-
/**
* vxge_hw_ring_rxd_size_get - Get the size of ring descriptor.
* @buf_mode: Buffer mode (1, 3 or 5)
@@ -1817,60 +1806,10 @@ struct vxge_hw_vpath_attr {
struct vxge_hw_fifo_attr fifo_attr;
};
-enum vxge_hw_status
-__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
- struct __vxge_hw_blockpool *blockpool,
- u32 pool_size,
- u32 pool_max);
-
-void
-__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool);
-
-struct __vxge_hw_blockpool_entry *
-__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
- u32 size);
-
-void
-__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
- struct __vxge_hw_blockpool_entry *entry);
-
-void *
-__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
- u32 size,
- struct vxge_hw_mempool_dma *dma_object);
-
-void
-__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
- void *memblock,
- u32 size,
- struct vxge_hw_mempool_dma *dma_object);
-
-enum vxge_hw_status
-__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
-
-enum vxge_hw_status
-__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
-
-enum vxge_hw_status
-vxge_hw_mgmt_device_config(struct __vxge_hw_device *devh,
- struct vxge_hw_device_config *dev_config, int size);
-
enum vxge_hw_status __devinit vxge_hw_device_hw_info_get(
void __iomem *bar0,
struct vxge_hw_device_hw_info *hw_info);
-enum vxge_hw_status
-__vxge_hw_vpath_fw_ver_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg,
- struct vxge_hw_device_hw_info *hw_info);
-
-enum vxge_hw_status
-__vxge_hw_vpath_card_info_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg,
- struct vxge_hw_device_hw_info *hw_info);
-
enum vxge_hw_status __devinit vxge_hw_device_config_default_get(
struct vxge_hw_device_config *device_config);
@@ -1954,38 +1893,6 @@ out:
return vaddr;
}
-extern void vxge_hw_blockpool_block_add(
- struct __vxge_hw_device *devh,
- void *block_addr,
- u32 length,
- struct pci_dev *dma_h,
- struct pci_dev *acc_handle);
-
-static inline void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
- unsigned long size)
-{
- gfp_t flags;
- void *vaddr;
-
- if (in_interrupt())
- flags = GFP_ATOMIC | GFP_DMA;
- else
- flags = GFP_KERNEL | GFP_DMA;
-
- vaddr = kmalloc((size), flags);
-
- vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
-}
-
-static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
- struct pci_dev **p_dma_acch)
-{
- unsigned long misaligned = *(unsigned long *)p_dma_acch;
- u8 *tmp = (u8 *)vaddr;
- tmp -= misaligned;
- kfree((void *)tmp);
-}
-
/*
* __vxge_hw_mempool_item_priv - will return pointer on per item private space
*/
@@ -2010,40 +1917,6 @@ __vxge_hw_mempool_item_priv(
(*memblock_item_idx) * mempool->items_priv_size;
}
-enum vxge_hw_status
-__vxge_hw_mempool_grow(
- struct vxge_hw_mempool *mempool,
- u32 num_allocate,
- u32 *num_allocated);
-
-struct vxge_hw_mempool*
-__vxge_hw_mempool_create(
- struct __vxge_hw_device *devh,
- u32 memblock_size,
- u32 item_size,
- u32 private_size,
- u32 items_initial,
- u32 items_max,
- struct vxge_hw_mempool_cbs *mp_callback,
- void *userdata);
-
-struct __vxge_hw_channel*
-__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
- enum __vxge_hw_channel_type type, u32 length,
- u32 per_dtr_space, void *userdata);
-
-void
-__vxge_hw_channel_free(
- struct __vxge_hw_channel *channel);
-
-enum vxge_hw_status
-__vxge_hw_channel_initialize(
- struct __vxge_hw_channel *channel);
-
-enum vxge_hw_status
-__vxge_hw_channel_reset(
- struct __vxge_hw_channel *channel);
-
/*
* __vxge_hw_fifo_txdl_priv - Return the max fragments allocated
* for the fifo.
@@ -2065,9 +1938,6 @@ enum vxge_hw_status vxge_hw_vpath_open(
struct vxge_hw_vpath_attr *attr,
struct __vxge_hw_vpath_handle **vpath_handle);
-enum vxge_hw_status
-__vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog);
-
enum vxge_hw_status vxge_hw_vpath_close(
struct __vxge_hw_vpath_handle *vpath_handle);
@@ -2089,54 +1959,9 @@ enum vxge_hw_status vxge_hw_vpath_mtu_set(
struct __vxge_hw_vpath_handle *vpath_handle,
u32 new_mtu);
-enum vxge_hw_status vxge_hw_vpath_stats_enable(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
-enum vxge_hw_status
-__vxge_hw_vpath_stats_access(
- struct __vxge_hw_virtualpath *vpath,
- u32 operation,
- u32 offset,
- u64 *stat);
-
-enum vxge_hw_status
-__vxge_hw_vpath_xmac_tx_stats_get(
- struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
-
-enum vxge_hw_status
-__vxge_hw_vpath_xmac_rx_stats_get(
- struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
-
-enum vxge_hw_status
-__vxge_hw_vpath_stats_get(
- struct __vxge_hw_virtualpath *vpath,
- struct vxge_hw_vpath_stats_hw_info *hw_stats);
-
void
vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
-enum vxge_hw_status
-__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config);
-
-void
-__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
-
-enum vxge_hw_status
-__vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg);
-
-enum vxge_hw_status
-__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
- struct vxge_hw_vpath_reg __iomem *vpath_reg);
-
-enum vxge_hw_status
-__vxge_hw_device_register_poll(
- void __iomem *reg,
- u64 mask, u32 max_millis);
#ifndef readq
static inline u64 readq(void __iomem *addr)
@@ -2168,62 +1993,12 @@ static inline void __vxge_hw_pio_mem_write32_lower(u32 val, void __iomem *addr)
writel(val, addr);
}
-static inline enum vxge_hw_status
-__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
- u64 mask, u32 max_millis)
-{
- enum vxge_hw_status status = VXGE_HW_OK;
-
- __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
- wmb();
- __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
- wmb();
-
- status = __vxge_hw_device_register_poll(addr, mask, max_millis);
- return status;
-}
-
-struct vxge_hw_toc_reg __iomem *
-__vxge_hw_device_toc_get(void __iomem *bar0);
-
-enum vxge_hw_status
-__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
-
-void
-__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
-
-void
-__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
-
enum vxge_hw_status
vxge_hw_device_flick_link_led(struct __vxge_hw_device *devh, u64 on_off);
enum vxge_hw_status
-__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-__vxge_hw_vpath_pci_read(
- struct __vxge_hw_virtualpath *vpath,
- u32 phy_func_0,
- u32 offset,
- u32 *val);
-
-enum vxge_hw_status
-__vxge_hw_vpath_addr_get(
- u32 vp_id,
- struct vxge_hw_vpath_reg __iomem *vpath_reg,
- u8 (macaddr)[ETH_ALEN],
- u8 (macaddr_mask)[ETH_ALEN]);
-
-u32
-__vxge_hw_vpath_func_id_get(
- u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
-
-enum vxge_hw_status
-__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
-
-enum vxge_hw_status
vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
+
/**
* vxge_debug
* @level: level of debug verbosity.
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index 05679e3..b67746e 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -1142,7 +1142,7 @@ static const struct ethtool_ops vxge_ethtool_ops = {
.get_ethtool_stats = vxge_get_ethtool_stats,
};
-void initialize_ethtool_ops(struct net_device *ndev)
+void vxge_initialize_ethtool_ops(struct net_device *ndev)
{
SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops);
}
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index a69542e..813829f 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -82,6 +82,16 @@ module_param_array(bw_percentage, uint, NULL, 0);
static struct vxge_drv_config *driver_config;
+static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
+ struct macInfo *mac);
+static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
+ struct macInfo *mac);
+static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
+static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
+static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
+static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
+static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
+
static inline int is_vxge_card_up(struct vxgedev *vdev)
{
return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -138,7 +148,7 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
* This function is called during interrupt context to notify link up state
* change.
*/
-void
+static void
vxge_callback_link_up(struct __vxge_hw_device *hldev)
{
struct net_device *dev = hldev->ndev;
@@ -162,7 +172,7 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev)
* This function is called during interrupt context to notify link down state
* change.
*/
-void
+static void
vxge_callback_link_down(struct __vxge_hw_device *hldev)
{
struct net_device *dev = hldev->ndev;
@@ -354,7 +364,7 @@ static inline void vxge_post(int *dtr_cnt, void **first_dtr,
* If the interrupt is because of a received frame or if the receive ring
* contains fresh as yet un-processed frames, this function is called.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
u8 t_code, void *userdata)
{
@@ -531,7 +541,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
* freed and frees all skbs whose data have already DMA'ed into the NICs
* internal memory.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
enum vxge_hw_fifo_tcode t_code, void *userdata,
struct sk_buff ***skb_ptr, int nr_skb, int *more)
@@ -1246,7 +1256,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
*
* Enables the interrupts for the vpath
*/
-void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
+static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
{
struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
int msix_id = 0;
@@ -1279,7 +1289,7 @@ void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
*
* Disables the interrupts for the vpath
*/
-void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
+static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
{
struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
int msix_id;
@@ -1553,7 +1563,7 @@ out:
*
* driver may reset the chip on events of serr, eccerr, etc
*/
-int vxge_reset(struct vxgedev *vdev)
+static int vxge_reset(struct vxgedev *vdev)
{
return do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
}
@@ -1724,7 +1734,7 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
return status;
}
-int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
+static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
{
struct vxge_mac_addrs *new_mac_entry;
u8 *mac_address = NULL;
@@ -1757,7 +1767,8 @@ int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
}
/* Add a mac address to DA table */
-enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
+static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
+ struct macInfo *mac)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
@@ -1782,7 +1793,7 @@ enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
return status;
}
-int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
+static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
{
struct list_head *entry, *next;
u64 del_mac = 0;
@@ -1807,7 +1818,8 @@ int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
return FALSE;
}
/* delete a mac address from DA table */
-enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
+static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
+ struct macInfo *mac)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
@@ -1854,7 +1866,7 @@ static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
}
/* Store all vlan ids from the list to the vid table */
-enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
+static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxgedev *vdev = vpath->vdev;
@@ -1874,7 +1886,7 @@ enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
}
/* Store all mac addresses from the list to the DA table */
-enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
+static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct macInfo mac_info;
@@ -1916,7 +1928,7 @@ enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
}
/* reset vpaths */
-enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
+static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
{
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
@@ -1948,7 +1960,7 @@ enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
}
/* close vpaths */
-void vxge_close_vpaths(struct vxgedev *vdev, int index)
+static void vxge_close_vpaths(struct vxgedev *vdev, int index)
{
struct vxge_vpath *vpath;
int i;
@@ -1966,7 +1978,7 @@ void vxge_close_vpaths(struct vxgedev *vdev, int index)
}
/* open vpaths */
-int vxge_open_vpaths(struct vxgedev *vdev)
+static int vxge_open_vpaths(struct vxgedev *vdev)
{
struct vxge_hw_vpath_attr attr;
enum vxge_hw_status status;
@@ -2517,7 +2529,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
* Return value: '0' on success and an appropriate (-)ve integer as
* defined in errno.h file on failure.
*/
-int
+static int
vxge_open(struct net_device *dev)
{
enum vxge_hw_status status;
@@ -2721,7 +2733,7 @@ out0:
}
/* Loop throught the mac address list and delete all the entries */
-void vxge_free_mac_add_list(struct vxge_vpath *vpath)
+static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
{
struct list_head *entry, *next;
@@ -2745,7 +2757,7 @@ static void vxge_napi_del_all(struct vxgedev *vdev)
}
}
-int do_vxge_close(struct net_device *dev, int do_io)
+static int do_vxge_close(struct net_device *dev, int do_io)
{
enum vxge_hw_status status;
struct vxgedev *vdev;
@@ -2856,7 +2868,7 @@ int do_vxge_close(struct net_device *dev, int do_io)
* Return value: '0' on success and an appropriate (-)ve integer as
* defined in errno.h file on failure.
*/
-int
+static int
vxge_close(struct net_device *dev)
{
do_vxge_close(dev, 1);
@@ -3113,10 +3125,10 @@ static const struct net_device_ops vxge_netdev_ops = {
#endif
};
-int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
- struct vxge_config *config,
- int high_dma, int no_of_vpath,
- struct vxgedev **vdev_out)
+static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
+ struct vxge_config *config,
+ int high_dma, int no_of_vpath,
+ struct vxgedev **vdev_out)
{
struct net_device *ndev;
enum vxge_hw_status status = VXGE_HW_OK;
@@ -3164,7 +3176,7 @@ int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
- initialize_ethtool_ops(ndev);
+ vxge_initialize_ethtool_ops(ndev);
/* Allocate memory for vpath */
vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
@@ -3249,7 +3261,7 @@ _out0:
*
* This function will unregister and free network device
*/
-void
+static void
vxge_device_unregister(struct __vxge_hw_device *hldev)
{
struct vxgedev *vdev;
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index d4be07e..de64536 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -396,64 +396,7 @@ struct vxge_tx_priv {
mod_timer(&timer, (jiffies + exp)); \
} while (0);
-int __devinit vxge_device_register(struct __vxge_hw_device *devh,
- struct vxge_config *config,
- int high_dma, int no_of_vpath,
- struct vxgedev **vdev);
-
-void vxge_device_unregister(struct __vxge_hw_device *devh);
-
-void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id);
-
-void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id);
-
-void vxge_callback_link_up(struct __vxge_hw_device *devh);
-
-void vxge_callback_link_down(struct __vxge_hw_device *devh);
-
-enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
- struct macInfo *mac);
-
-int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
-
-int vxge_reset(struct vxgedev *vdev);
-
-enum vxge_hw_status
-vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
- u8 t_code, void *userdata);
-
-enum vxge_hw_status
-vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
- enum vxge_hw_fifo_tcode t_code, void *userdata,
- struct sk_buff ***skb_ptr, int nr_skbs, int *more);
-
-int vxge_close(struct net_device *dev);
-
-int vxge_open(struct net_device *dev);
-
-void vxge_close_vpaths(struct vxgedev *vdev, int index);
-
-int vxge_open_vpaths(struct vxgedev *vdev);
-
-enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
-
-enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
- struct macInfo *mac);
-
-enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
- struct macInfo *mac);
-
-int vxge_mac_list_add(struct vxge_vpath *vpath,
- struct macInfo *mac);
-
-void vxge_free_mac_add_list(struct vxge_vpath *vpath);
-
-enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
-
-enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
-
-int do_vxge_close(struct net_device *dev, int do_io);
-extern void initialize_ethtool_ops(struct net_device *ndev);
+extern void vxge_initialize_ethtool_ops(struct net_device *ndev);
/**
* #define VXGE_DEBUG_INIT: debug for initialization functions
* #define VXGE_DEBUG_TX : debug transmit related functions
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index cedf08f..4bdb611 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -17,6 +17,13 @@
#include "vxge-config.h"
#include "vxge-main.h"
+static enum vxge_hw_status
+__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev,
+ u32 vp_id, enum vxge_hw_event type);
+static enum vxge_hw_status
+__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
+ u32 skip_alarms);
+
/*
* vxge_hw_vpath_intr_enable - Enable vpath interrupts.
* @vp: Virtual Path handle.
@@ -513,7 +520,7 @@ exit:
* Link up indication handler. The function is invoked by HW when
* Titan indicates that the link is up for programmable amount of time.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
{
/*
@@ -538,7 +545,7 @@ exit:
* Link down indication handler. The function is invoked by HW when
* Titan indicates that the link is down.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
{
/*
@@ -564,7 +571,7 @@ exit:
*
* Handle error.
*/
-enum vxge_hw_status
+static enum vxge_hw_status
__vxge_hw_device_handle_error(
struct __vxge_hw_device *hldev,
u32 vp_id,
@@ -646,7 +653,7 @@ void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
* it swaps the reserve and free arrays.
*
*/
-enum vxge_hw_status
+static enum vxge_hw_status
vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
{
void **tmp_arr;
@@ -692,7 +699,8 @@ _alloc_after_swap:
* Posts a dtr to work array.
*
*/
-void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
+static void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel,
+ void *dtrh)
{
vxge_assert(channel->work_arr[channel->post_index] == NULL);
@@ -1658,37 +1666,6 @@ exit:
}
/**
- * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath
- * from vlan id table.
- * @vp: Vpath handle.
- * @vid: Buffer to return vlan id
- *
- * Returns the next vlan id in the list for this vpath.
- * see also: vxge_hw_vpath_vid_get
- *
- */
-enum vxge_hw_status
-vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid)
-{
- u64 data;
- enum vxge_hw_status status = VXGE_HW_OK;
-
- if (vp == NULL) {
- status = VXGE_HW_ERR_INVALID_HANDLE;
- goto exit;
- }
-
- status = __vxge_hw_vpath_rts_table_get(vp,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
- VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
- 0, vid, &data);
-
- *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
-exit:
- return status;
-}
-
-/**
* vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
* to vlan id table.
* @vp: Vpath handle.
@@ -1898,9 +1875,9 @@ exit:
* Process vpath alarms.
*
*/
-enum vxge_hw_status __vxge_hw_vpath_alarm_process(
- struct __vxge_hw_virtualpath *vpath,
- u32 skip_alarms)
+static enum vxge_hw_status
+__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
+ u32 skip_alarms)
{
u64 val64;
u64 alarm_status;
@@ -2265,36 +2242,6 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
}
/**
- * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
- * @vp: Virtual Path handle.
- * @msix_id: MSI ID
- *
- * The function clears the msix interrupt for the given msix_id
- *
- * Returns: 0,
- * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
- * status.
- * See also:
- */
-void
-vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
-{
- struct __vxge_hw_device *hldev = vp->vpath->hldev;
- if (hldev->config.intr_mode ==
- VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &hldev->common_reg->
- clr_msix_one_shot_vec[msix_id%4]);
- } else {
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
- &hldev->common_reg->
- clear_msix_mask_vect[msix_id%4]);
- }
-}
-
-/**
* vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
* @vp: Virtual Path handle.
* @msix_id: MSI ID
@@ -2316,22 +2263,6 @@ vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
}
/**
- * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath.
- * @vp: Virtual Path handle.
- *
- * The function masks all msix interrupt for the given vpath
- *
- */
-void
-vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
-{
-
- __vxge_hw_pio_mem_write32_upper(
- (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
- &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
-}
-
-/**
* vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
* @vp: Virtual Path handle.
*
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 6fa07d1..9890d4d 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -1749,14 +1749,6 @@ vxge_hw_mrpcim_stats_access(
u64 *stat);
enum vxge_hw_status
-vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *devh, u32 port,
- struct vxge_hw_xmac_aggr_stats *aggr_stats);
-
-enum vxge_hw_status
-vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *devh, u32 port,
- struct vxge_hw_xmac_port_stats *port_stats);
-
-enum vxge_hw_status
vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh,
struct vxge_hw_xmac_stats *xmac_stats);
@@ -2117,49 +2109,10 @@ struct __vxge_hw_ring_rxd_priv {
#endif
};
-/* ========================= RING PRIVATE API ============================= */
-u64
-__vxge_hw_ring_first_block_address_get(
- struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status
-__vxge_hw_ring_create(
- struct __vxge_hw_vpath_handle *vpath_handle,
- struct vxge_hw_ring_attr *attr);
-
-enum vxge_hw_status
-__vxge_hw_ring_abort(
- struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status
-__vxge_hw_ring_reset(
- struct __vxge_hw_ring *ringh);
-
-enum vxge_hw_status
-__vxge_hw_ring_delete(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
/* ========================= FIFO PRIVATE API ============================= */
struct vxge_hw_fifo_attr;
-enum vxge_hw_status
-__vxge_hw_fifo_create(
- struct __vxge_hw_vpath_handle *vpath_handle,
- struct vxge_hw_fifo_attr *attr);
-
-enum vxge_hw_status
-__vxge_hw_fifo_abort(
- struct __vxge_hw_fifo *fifoh);
-
-enum vxge_hw_status
-__vxge_hw_fifo_reset(
- struct __vxge_hw_fifo *ringh);
-
-enum vxge_hw_status
-__vxge_hw_fifo_delete(
- struct __vxge_hw_vpath_handle *vpath_handle);
-
struct vxge_hw_mempool_cbs {
void (*item_func_alloc)(
struct vxge_hw_mempool *mempoolh,
@@ -2169,10 +2122,6 @@ struct vxge_hw_mempool_cbs {
u32 is_last);
};
-void
-__vxge_hw_mempool_destroy(
- struct vxge_hw_mempool *mempool);
-
#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
@@ -2195,61 +2144,10 @@ __vxge_hw_vpath_rts_table_set(
u64 data2);
enum vxge_hw_status
-__vxge_hw_vpath_reset(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_sw_reset(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
__vxge_hw_vpath_enable(
struct __vxge_hw_device *devh,
u32 vp_id);
-void
-__vxge_hw_vpath_prc_configure(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_kdfc_configure(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_mac_configure(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_tim_configure(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_initialize(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vp_initialize(
- struct __vxge_hw_device *devh,
- u32 vp_id,
- struct vxge_hw_vp_config *config);
-
-void
-__vxge_hw_vp_terminate(
- struct __vxge_hw_device *devh,
- u32 vp_id);
-
-enum vxge_hw_status
-__vxge_hw_vpath_alarm_process(
- struct __vxge_hw_virtualpath *vpath,
- u32 skip_alarms);
-
void vxge_hw_device_intr_enable(
struct __vxge_hw_device *devh);
@@ -2321,11 +2219,6 @@ vxge_hw_vpath_vid_get(
u64 *vid);
enum vxge_hw_status
-vxge_hw_vpath_vid_get_next(
- struct __vxge_hw_vpath_handle *vpath_handle,
- u64 *vid);
-
-enum vxge_hw_status
vxge_hw_vpath_vid_delete(
struct __vxge_hw_vpath_handle *vpath_handle,
u64 vid);
@@ -2387,16 +2280,9 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
void
-vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vpath_handle,
- int msix_id);
-
-void
vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle,
int msix_id);
-void
-vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vpath_handle);
-
enum vxge_hw_status vxge_hw_vpath_intr_enable(
struct __vxge_hw_vpath_handle *vpath_handle);
@@ -2415,12 +2301,6 @@ vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id);
void
vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
-enum vxge_hw_status
-vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh);
-
-void
-vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh);
-
void
vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
void **dtrh);
@@ -2436,18 +2316,4 @@ vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
void
vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
-/* ========================== PRIVATE API ================================= */
-
-enum vxge_hw_status
-__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev);
-
-enum vxge_hw_status
-__vxge_hw_device_handle_error(
- struct __vxge_hw_device *hldev,
- u32 vp_id,
- enum vxge_hw_event type);
-
#endif
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index d81ad83..cf05504 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -498,7 +498,6 @@ norbuff:
static int x25_asy_close(struct net_device *dev)
{
struct x25_asy *sl = netdev_priv(dev);
- int err;
spin_lock(&sl->lock);
if (sl->tty)
@@ -507,10 +506,6 @@ static int x25_asy_close(struct net_device *dev)
netif_stop_queue(dev);
sl->rcount = 0;
sl->xleft = 0;
- err = lapb_unregister(dev);
- if (err != LAPB_OK)
- printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",
- err);
spin_unlock(&sl->lock);
return 0;
}
@@ -595,6 +590,7 @@ static int x25_asy_open_tty(struct tty_struct *tty)
static void x25_asy_close_tty(struct tty_struct *tty)
{
struct x25_asy *sl = tty->disc_data;
+ int err;
/* First make sure we're connected. */
if (!sl || sl->magic != X25_ASY_MAGIC)
@@ -605,6 +601,11 @@ static void x25_asy_close_tty(struct tty_struct *tty)
dev_close(sl->dev);
rtnl_unlock();
+ err = lapb_unregister(sl->dev);
+ if (err != LAPB_OK)
+ printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",
+ err);
+
tty->disc_data = NULL;
sl->tty = NULL;
x25_asy_free(sl);
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index eea1ef2..4396d4b 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -221,9 +221,6 @@ config RT2X00_LIB_LEDS
boolean
default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n)
-comment "rt2x00 leds support disabled due to modularized LEDS_CLASS and built-in rt2x00"
- depends on RT2X00_LIB=y && LEDS_CLASS=m
-
config RT2X00_LIB_DEBUGFS
bool "Ralink debugfs support"
depends on RT2X00_LIB && MAC80211_DEBUGFS
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 630fb86..458bb57 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1610,6 +1610,8 @@ static void netback_changed(struct xenbus_device *dev,
switch (backend_state) {
case XenbusStateInitialising:
case XenbusStateInitialised:
+ case XenbusStateReconfiguring:
+ case XenbusStateReconfigured:
case XenbusStateConnected:
case XenbusStateUnknown:
case XenbusStateClosed:
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index f3f8be5..14f0955 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -430,8 +430,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
}
/* Get the protocol type of the ethernet frame that arrived */
- proto_type = ((in_be32(addr + XEL_HEADER_OFFSET +
- XEL_RXBUFF_OFFSET) >> XEL_HEADER_SHIFT) &
+ proto_type = ((ntohl(in_be32(addr + XEL_HEADER_OFFSET +
+ XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
/* Check if received ethernet frame is a raw ethernet frame
@@ -439,9 +439,9 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
if (proto_type == ETH_P_IP) {
- length = ((in_be32(addr +
+ length = ((ntohl(in_be32(addr +
XEL_HEADER_IP_LENGTH_OFFSET +
- XEL_RXBUFF_OFFSET) >>
+ XEL_RXBUFF_OFFSET)) >>
XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
length += ETH_HLEN + ETH_FCS_LEN;
OpenPOWER on IntegriCloud