diff options
Diffstat (limited to 'drivers/net')
69 files changed, 2542 insertions, 600 deletions
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 8d3893d..862f472 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -3118,7 +3118,13 @@ static void acpi_set_WOL(struct net_device *dev) iowrite16(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD); iowrite16(RxEnable, ioaddr + EL3_CMD); - pci_enable_wake(VORTEX_PCI(vp), 0, 1); + if (pci_enable_wake(VORTEX_PCI(vp), PCI_D3hot, 1)) { + printk(KERN_INFO "%s: WOL not supported.\n", + pci_name(VORTEX_PCI(vp))); + + vp->enable_wol = 0; + return; + } /* Change the power state to D3; RxEnable doesn't take effect. */ pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 9c635a2..83d52c8 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1780,6 +1780,15 @@ config SC92031 To compile this driver as a module, choose M here: the module will be called sc92031. This is recommended. +config CPMAC + tristate "TI AR7 CPMAC Ethernet support (EXPERIMENTAL)" + depends on NET_ETHERNET && EXPERIMENTAL && AR7 + select PHYLIB + select FIXED_PHY + select FIXED_MII_100_FDX + help + TI AR7 CPMAC Ethernet support + config NET_POCKET bool "Pocket and portable adapters" depends on PARPORT @@ -2164,6 +2173,16 @@ config SKGE To compile this driver as a module, choose M here: the module will be called skge. This is recommended. +config SKGE_DEBUG + bool "Debugging interface" + depends on SKGE && DEBUG_FS + help + This option adds the ability to dump driver state for debugging. + The file debugfs/skge/ethX displays the state of the internal + transmit and receive rings. + + If unsure, say N. + config SKY2 tristate "SysKonnect Yukon2 support" depends on PCI diff --git a/drivers/net/Makefile b/drivers/net/Makefile index d2e0f35..22f78cb 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -159,6 +159,7 @@ obj-$(CONFIG_8139CP) += 8139cp.o obj-$(CONFIG_8139TOO) += 8139too.o obj-$(CONFIG_ZNET) += znet.o obj-$(CONFIG_LAN_SAA9730) += saa9730.o +obj-$(CONFIG_CPMAC) += cpmac.o obj-$(CONFIG_DEPCA) += depca.o obj-$(CONFIG_EWRK3) += ewrk3.o obj-$(CONFIG_ATP) += atp.o diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c index ebf1a3a..b74dbee 100644 --- a/drivers/net/atarilance.c +++ b/drivers/net/atarilance.c @@ -1023,7 +1023,7 @@ static int lance_rx( struct net_device *dev ) DECLARE_MAC_BUF(mac); DECLARE_MAC_BUF(mac2); - printk(KERN_DEBUG "%s: RX pkt type 0x%04x from %s to %s ", + printk(KERN_DEBUG "%s: RX pkt type 0x%04x from %s to %s " "data %02x %02x %02x %02x %02x %02x %02x %02x " "len %d\n", dev->name, ((u_short *)data)[6], diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index b46c5d8..185f98e 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c @@ -54,13 +54,16 @@ #include <linux/delay.h> #include <linux/crc32.h> #include <linux/phy.h> + +#include <asm/cpu.h> #include <asm/mipsregs.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/processor.h> -#include <asm/mach-au1x00/au1000.h> -#include <asm/cpu.h> +#include <au1000.h> +#include <prom.h> + #include "au1000_eth.h" #ifdef AU1000_ETH_DEBUG @@ -96,11 +99,6 @@ static void mdio_write(struct net_device *, int, int, u16); static void au1000_adjust_link(struct net_device *); static void enable_mac(struct net_device *, int); -// externs -extern int get_ethernet_addr(char *ethernet_addr); -extern void str2eaddr(unsigned char *ea, unsigned char *str); -extern char * prom_getcmdline(void); - /* * Theory of operation * @@ -619,7 +617,6 @@ static struct net_device * au1000_probe(int port_num) struct au1000_private *aup = NULL; struct net_device *dev = NULL; db_dest_t *pDB, *pDBfree; - char *pmac, *argptr; char ethaddr[6]; int irq, i, err; u32 base, macen; @@ -677,21 +674,12 @@ static struct net_device * au1000_probe(int port_num) au_macs[port_num] = aup; if (port_num == 0) { - /* Check the environment variables first */ - if (get_ethernet_addr(ethaddr) == 0) + if (prom_get_ethernet_addr(ethaddr) == 0) memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr)); else { - /* Check command line */ - argptr = prom_getcmdline(); - if ((pmac = strstr(argptr, "ethaddr=")) == NULL) - printk(KERN_INFO "%s: No MAC address found\n", - dev->name); + printk(KERN_INFO "%s: No MAC address found\n", + dev->name); /* Use the hard coded MAC addresses */ - else { - str2eaddr(ethaddr, pmac + strlen("ethaddr=")); - memcpy(au1000_mac_addr, ethaddr, - sizeof(au1000_mac_addr)); - } } setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 64bfec3..6f85cc3 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -98,6 +98,7 @@ static char *xmit_hash_policy = NULL; static int arp_interval = BOND_LINK_ARP_INTERV; static char *arp_ip_target[BOND_MAX_ARP_TARGETS] = { NULL, }; static char *arp_validate = NULL; +static int fail_over_mac = 0; struct bond_params bonding_defaults; module_param(max_bonds, int, 0); @@ -131,6 +132,8 @@ module_param_array(arp_ip_target, charp, NULL, 0); MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form"); module_param(arp_validate, charp, 0); MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all"); +module_param(fail_over_mac, int, 0); +MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC. 0 of off (default), 1 for on."); /*----------------------------- Global variables ----------------------------*/ @@ -1096,7 +1099,21 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) if (new_active) { bond_set_slave_active_flags(new_active); } - bond_send_gratuitous_arp(bond); + + /* when bonding does not set the slave MAC address, the bond MAC + * address is the one of the active slave. + */ + if (new_active && bond->params.fail_over_mac) + memcpy(bond->dev->dev_addr, new_active->dev->dev_addr, + new_active->dev->addr_len); + if (bond->curr_active_slave && + test_bit(__LINK_STATE_LINKWATCH_PENDING, + &bond->curr_active_slave->dev->state)) { + dprintk("delaying gratuitous arp on %s\n", + bond->curr_active_slave->dev->name); + bond->send_grat_arp = 1; + } else + bond_send_gratuitous_arp(bond); } } @@ -1217,7 +1234,8 @@ static int bond_compute_features(struct bonding *bond) struct slave *slave; struct net_device *bond_dev = bond->dev; unsigned long features = bond_dev->features; - unsigned short max_hard_header_len = ETH_HLEN; + unsigned short max_hard_header_len = max((u16)ETH_HLEN, + bond_dev->hard_header_len); int i; features &= ~(NETIF_F_ALL_CSUM | BOND_VLAN_FEATURES); @@ -1238,6 +1256,24 @@ static int bond_compute_features(struct bonding *bond) return 0; } + +static void bond_setup_by_slave(struct net_device *bond_dev, + struct net_device *slave_dev) +{ + struct bonding *bond = bond_dev->priv; + + bond_dev->neigh_setup = slave_dev->neigh_setup; + bond_dev->header_ops = slave_dev->header_ops; + + bond_dev->type = slave_dev->type; + bond_dev->hard_header_len = slave_dev->hard_header_len; + bond_dev->addr_len = slave_dev->addr_len; + + memcpy(bond_dev->broadcast, slave_dev->broadcast, + slave_dev->addr_len); + bond->setup_by_slave = 1; +} + /* enslave device <slave> to bond device <master> */ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) { @@ -1258,8 +1294,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) /* bond must be initialized by bond_open() before enslaving */ if (!(bond_dev->flags & IFF_UP)) { - dprintk("Error, master_dev is not up\n"); - return -EPERM; + printk(KERN_WARNING DRV_NAME + " %s: master_dev is not up in bond_enslave\n", + bond_dev->name); } /* already enslaved */ @@ -1312,14 +1349,42 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) goto err_undo_flags; } + /* set bonding device ether type by slave - bonding netdevices are + * created with ether_setup, so when the slave type is not ARPHRD_ETHER + * there is a need to override some of the type dependent attribs/funcs. + * + * bond ether type mutual exclusion - don't allow slaves of dissimilar + * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond + */ + if (bond->slave_cnt == 0) { + if (slave_dev->type != ARPHRD_ETHER) + bond_setup_by_slave(bond_dev, slave_dev); + } else if (bond_dev->type != slave_dev->type) { + printk(KERN_ERR DRV_NAME ": %s ether type (%d) is different " + "from other slaves (%d), can not enslave it.\n", + slave_dev->name, + slave_dev->type, bond_dev->type); + res = -EINVAL; + goto err_undo_flags; + } + if (slave_dev->set_mac_address == NULL) { - printk(KERN_ERR DRV_NAME - ": %s: Error: The slave device you specified does " - "not support setting the MAC address. " - "Your kernel likely does not support slave " - "devices.\n", bond_dev->name); - res = -EOPNOTSUPP; - goto err_undo_flags; + if (bond->slave_cnt == 0) { + printk(KERN_WARNING DRV_NAME + ": %s: Warning: The first slave device " + "specified does not support setting the MAC " + "address. Enabling the fail_over_mac option.", + bond_dev->name); + bond->params.fail_over_mac = 1; + } else if (!bond->params.fail_over_mac) { + printk(KERN_ERR DRV_NAME + ": %s: Error: The slave device specified " + "does not support setting the MAC address, " + "but fail_over_mac is not enabled.\n" + , bond_dev->name); + res = -EOPNOTSUPP; + goto err_undo_flags; + } } new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL); @@ -1340,16 +1405,18 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) */ memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN); - /* - * Set slave to master's mac address. The application already - * set the master's mac address to that of the first slave - */ - memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len); - addr.sa_family = slave_dev->type; - res = dev_set_mac_address(slave_dev, &addr); - if (res) { - dprintk("Error %d calling set_mac_address\n", res); - goto err_free; + if (!bond->params.fail_over_mac) { + /* + * Set slave to master's mac address. The application already + * set the master's mac address to that of the first slave + */ + memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len); + addr.sa_family = slave_dev->type; + res = dev_set_mac_address(slave_dev, &addr); + if (res) { + dprintk("Error %d calling set_mac_address\n", res); + goto err_free; + } } res = netdev_set_master(slave_dev, bond_dev); @@ -1574,9 +1641,11 @@ err_close: dev_close(slave_dev); err_restore_mac: - memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN); - addr.sa_family = slave_dev->type; - dev_set_mac_address(slave_dev, &addr); + if (!bond->params.fail_over_mac) { + memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN); + addr.sa_family = slave_dev->type; + dev_set_mac_address(slave_dev, &addr); + } err_free: kfree(new_slave); @@ -1749,10 +1818,12 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) /* close slave before restoring its mac address */ dev_close(slave_dev); - /* restore original ("permanent") mac address */ - memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); - addr.sa_family = slave_dev->type; - dev_set_mac_address(slave_dev, &addr); + if (!bond->params.fail_over_mac) { + /* restore original ("permanent") mac address */ + memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); + addr.sa_family = slave_dev->type; + dev_set_mac_address(slave_dev, &addr); + } slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB | IFF_SLAVE_INACTIVE | IFF_BONDING | @@ -1764,6 +1835,35 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) } /* +* Destroy a bonding device. +* Must be under rtnl_lock when this function is called. +*/ +void bond_destroy(struct bonding *bond) +{ + bond_deinit(bond->dev); + bond_destroy_sysfs_entry(bond); + unregister_netdevice(bond->dev); +} + +/* +* First release a slave and than destroy the bond if no more slaves iare left. +* Must be under rtnl_lock when this function is called. +*/ +int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev) +{ + struct bonding *bond = bond_dev->priv; + int ret; + + ret = bond_release(bond_dev, slave_dev); + if ((ret == 0) && (bond->slave_cnt == 0)) { + printk(KERN_INFO DRV_NAME ": %s: destroying bond %s.\n", + bond_dev->name, bond_dev->name); + bond_destroy(bond); + } + return ret; +} + +/* * This function releases all slaves. */ static int bond_release_all(struct net_device *bond_dev) @@ -1839,10 +1939,12 @@ static int bond_release_all(struct net_device *bond_dev) /* close slave before restoring its mac address */ dev_close(slave_dev); - /* restore original ("permanent") mac address*/ - memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); - addr.sa_family = slave_dev->type; - dev_set_mac_address(slave_dev, &addr); + if (!bond->params.fail_over_mac) { + /* restore original ("permanent") mac address*/ + memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN); + addr.sa_family = slave_dev->type; + dev_set_mac_address(slave_dev, &addr); + } slave_dev->priv_flags &= ~(IFF_MASTER_8023AD | IFF_MASTER_ALB | IFF_SLAVE_INACTIVE); @@ -2013,6 +2115,17 @@ void bond_mii_monitor(struct net_device *bond_dev) * program could monitor the link itself if needed. */ + if (bond->send_grat_arp) { + if (bond->curr_active_slave && test_bit(__LINK_STATE_LINKWATCH_PENDING, + &bond->curr_active_slave->dev->state)) + dprintk("Needs to send gratuitous arp but not yet\n"); + else { + dprintk("sending delayed gratuitous arp on on %s\n", + bond->curr_active_slave->dev->name); + bond_send_gratuitous_arp(bond); + bond->send_grat_arp = 0; + } + } read_lock(&bond->curr_slave_lock); oldcurrent = bond->curr_active_slave; read_unlock(&bond->curr_slave_lock); @@ -2414,7 +2527,7 @@ static void bond_send_gratuitous_arp(struct bonding *bond) if (bond->master_ip) { bond_arp_send(slave->dev, ARPOP_REPLY, bond->master_ip, - bond->master_ip, 0); + bond->master_ip, 0); } list_for_each_entry(vlan, &bond->vlan_list, vlan_list) { @@ -2951,9 +3064,15 @@ static void bond_info_show_master(struct seq_file *seq) curr = bond->curr_active_slave; read_unlock(&bond->curr_slave_lock); - seq_printf(seq, "Bonding Mode: %s\n", + seq_printf(seq, "Bonding Mode: %s", bond_mode_name(bond->params.mode)); + if (bond->params.mode == BOND_MODE_ACTIVEBACKUP && + bond->params.fail_over_mac) + seq_printf(seq, " (fail_over_mac)"); + + seq_printf(seq, "\n"); + if (bond->params.mode == BOND_MODE_XOR || bond->params.mode == BOND_MODE_8023AD) { seq_printf(seq, "Transmit Hash Policy: %s (%d)\n", @@ -3233,7 +3352,10 @@ static int bond_slave_netdev_event(unsigned long event, struct net_device *slave switch (event) { case NETDEV_UNREGISTER: if (bond_dev) { - bond_release(bond_dev, slave_dev); + if (bond->setup_by_slave) + bond_release_and_destroy(bond_dev, slave_dev); + else + bond_release(bond_dev, slave_dev); } break; case NETDEV_CHANGE: @@ -3880,6 +4002,13 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr) dprintk("bond=%p, name=%s\n", bond, (bond_dev ? bond_dev->name : "None")); + /* + * If fail_over_mac is enabled, do nothing and return success. + * Returning an error causes ifenslave to fail. + */ + if (bond->params.fail_over_mac) + return 0; + if (!is_valid_ether_addr(sa->sa_data)) { return -EADDRNOTAVAIL; } @@ -4217,6 +4346,8 @@ static int bond_init(struct net_device *bond_dev, struct bond_params *params) bond->current_arp_slave = NULL; bond->primary_slave = NULL; bond->dev = bond_dev; + bond->send_grat_arp = 0; + bond->setup_by_slave = 0; INIT_LIST_HEAD(&bond->vlan_list); /* Initialize the device entry points */ @@ -4265,7 +4396,6 @@ static int bond_init(struct net_device *bond_dev, struct bond_params *params) #ifdef CONFIG_PROC_FS bond_create_proc_entry(bond); #endif - list_add_tail(&bond->bond_list, &bond_dev_list); return 0; @@ -4599,6 +4729,11 @@ static int bond_check_params(struct bond_params *params) primary = NULL; } + if (fail_over_mac && (bond_mode != BOND_MODE_ACTIVEBACKUP)) + printk(KERN_WARNING DRV_NAME + ": Warning: fail_over_mac only affects " + "active-backup mode.\n"); + /* fill params struct with the proper values */ params->mode = bond_mode; params->xmit_policy = xmit_hashtype; @@ -4610,6 +4745,7 @@ static int bond_check_params(struct bond_params *params) params->use_carrier = use_carrier; params->lacp_fast = lacp_fast; params->primary[0] = 0; + params->fail_over_mac = fail_over_mac; if (primary) { strncpy(params->primary, primary, IFNAMSIZ); diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 6f49ca7..80c0c8c 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -164,9 +164,7 @@ static ssize_t bonding_store_bonds(struct class *cls, const char *buffer, size_t printk(KERN_INFO DRV_NAME ": %s is being deleted...\n", bond->dev->name); - bond_deinit(bond->dev); - bond_destroy_sysfs_entry(bond); - unregister_netdevice(bond->dev); + bond_destroy(bond); rtnl_unlock(); goto out; } @@ -260,17 +258,16 @@ static ssize_t bonding_store_slaves(struct device *d, char command[IFNAMSIZ + 1] = { 0, }; char *ifname; int i, res, found, ret = count; + u32 original_mtu; struct slave *slave; struct net_device *dev = NULL; struct bonding *bond = to_bond(d); /* Quick sanity check -- is the bond interface up? */ if (!(bond->dev->flags & IFF_UP)) { - printk(KERN_ERR DRV_NAME - ": %s: Unable to update slaves because interface is down.\n", + printk(KERN_WARNING DRV_NAME + ": %s: doing slave updates when interface is down.\n", bond->dev->name); - ret = -EPERM; - goto out; } /* Note: We can't hold bond->lock here, as bond_create grabs it. */ @@ -327,6 +324,7 @@ static ssize_t bonding_store_slaves(struct device *d, } /* Set the slave's MTU to match the bond */ + original_mtu = dev->mtu; if (dev->mtu != bond->dev->mtu) { if (dev->change_mtu) { res = dev->change_mtu(dev, @@ -341,6 +339,9 @@ static ssize_t bonding_store_slaves(struct device *d, } rtnl_lock(); res = bond_enslave(bond->dev, dev); + bond_for_each_slave(bond, slave, i) + if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) + slave->original_mtu = original_mtu; rtnl_unlock(); if (res) { ret = res; @@ -353,13 +354,17 @@ static ssize_t bonding_store_slaves(struct device *d, bond_for_each_slave(bond, slave, i) if (strnicmp(slave->dev->name, ifname, IFNAMSIZ) == 0) { dev = slave->dev; + original_mtu = slave->original_mtu; break; } if (dev) { printk(KERN_INFO DRV_NAME ": %s: Removing slave %s\n", bond->dev->name, dev->name); rtnl_lock(); - res = bond_release(bond->dev, dev); + if (bond->setup_by_slave) + res = bond_release_and_destroy(bond->dev, dev); + else + res = bond_release(bond->dev, dev); rtnl_unlock(); if (res) { ret = res; @@ -367,9 +372,9 @@ static ssize_t bonding_store_slaves(struct device *d, } /* set the slave MTU to the default */ if (dev->change_mtu) { - dev->change_mtu(dev, 1500); + dev->change_mtu(dev, original_mtu); } else { - dev->mtu = 1500; + dev->mtu = original_mtu; } } else { @@ -563,6 +568,54 @@ static ssize_t bonding_store_arp_validate(struct device *d, static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate, bonding_store_arp_validate); /* + * Show and store fail_over_mac. User only allowed to change the + * value when there are no slaves. + */ +static ssize_t bonding_show_fail_over_mac(struct device *d, struct device_attribute *attr, char *buf) +{ + struct bonding *bond = to_bond(d); + + return sprintf(buf, "%d\n", bond->params.fail_over_mac) + 1; +} + +static ssize_t bonding_store_fail_over_mac(struct device *d, struct device_attribute *attr, const char *buf, size_t count) +{ + int new_value; + int ret = count; + struct bonding *bond = to_bond(d); + + if (bond->slave_cnt != 0) { + printk(KERN_ERR DRV_NAME + ": %s: Can't alter fail_over_mac with slaves in bond.\n", + bond->dev->name); + ret = -EPERM; + goto out; + } + + if (sscanf(buf, "%d", &new_value) != 1) { + printk(KERN_ERR DRV_NAME + ": %s: no fail_over_mac value specified.\n", + bond->dev->name); + ret = -EINVAL; + goto out; + } + + if ((new_value == 0) || (new_value == 1)) { + bond->params.fail_over_mac = new_value; + printk(KERN_INFO DRV_NAME ": %s: Setting fail_over_mac to %d.\n", + bond->dev->name, new_value); + } else { + printk(KERN_INFO DRV_NAME + ": %s: Ignoring invalid fail_over_mac value %d.\n", + bond->dev->name, new_value); + } +out: + return ret; +} + +static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR, bonding_show_fail_over_mac, bonding_store_fail_over_mac); + +/* * Show and set the arp timer interval. There are two tricky bits * here. First, if ARP monitoring is activated, then we must disable * MII monitoring. Second, if the ARP timer isn't running, we must @@ -1383,6 +1436,7 @@ static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL); static struct attribute *per_bond_attrs[] = { &dev_attr_slaves.attr, &dev_attr_mode.attr, + &dev_attr_fail_over_mac.attr, &dev_attr_arp_validate.attr, &dev_attr_arp_interval.attr, &dev_attr_arp_ip_target.attr, diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 2a6af7d..b818060 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -22,8 +22,8 @@ #include "bond_3ad.h" #include "bond_alb.h" -#define DRV_VERSION "3.1.3" -#define DRV_RELDATE "June 13, 2007" +#define DRV_VERSION "3.2.1" +#define DRV_RELDATE "October 15, 2007" #define DRV_NAME "bonding" #define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" @@ -128,6 +128,7 @@ struct bond_params { int arp_interval; int arp_validate; int use_carrier; + int fail_over_mac; int updelay; int downdelay; int lacp_fast; @@ -156,6 +157,7 @@ struct slave { s8 link; /* one of BOND_LINK_XXXX */ s8 state; /* one of BOND_STATE_XXXX */ u32 original_flags; + u32 original_mtu; u32 link_failure_count; u16 speed; u8 duplex; @@ -185,6 +187,8 @@ struct bonding { struct timer_list mii_timer; struct timer_list arp_timer; s8 kill_timers; + s8 send_grat_arp; + s8 setup_by_slave; struct net_device_stats stats; #ifdef CONFIG_PROC_FS struct proc_dir_entry *proc_entry; @@ -292,6 +296,8 @@ static inline void bond_unset_master_alb_flags(struct bonding *bond) struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); int bond_create(char *name, struct bond_params *params, struct bonding **newbond); +void bond_destroy(struct bonding *bond); +int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev); void bond_deinit(struct net_device *bond_dev); int bond_create_sysfs(void); void bond_destroy_sysfs(void); diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 563bf5f..7df31b5 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c @@ -4443,7 +4443,7 @@ static struct { {REG_MAC_COLL_EXCESS}, {REG_MAC_COLL_LATE} }; -#define CAS_REG_LEN (sizeof(ethtool_register_table)/sizeof(int)) +#define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table) #define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN) static void cas_read_regs(struct cas *cp, u8 *ptr, int len) diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c new file mode 100644 index 0000000..ed53aaa --- /dev/null +++ b/drivers/net/cpmac.c @@ -0,0 +1,1174 @@ +/* + * Copyright (C) 2006, 2007 Eugene Konev + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/moduleparam.h> + +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/delay.h> +#include <linux/version.h> + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/skbuff.h> +#include <linux/mii.h> +#include <linux/phy.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <asm/gpio.h> + +MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); +MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); +MODULE_LICENSE("GPL"); + +static int debug_level = 8; +static int dumb_switch; + +/* Next 2 are only used in cpmac_probe, so it's pointless to change them */ +module_param(debug_level, int, 0444); +module_param(dumb_switch, int, 0444); + +MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable"); +MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus"); + +#define CPMAC_VERSION "0.5.0" +/* stolen from net/ieee80211.h */ +#ifndef MAC_FMT +#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x" +#define MAC_ARG(x) ((u8*)(x))[0], ((u8*)(x))[1], ((u8*)(x))[2], \ + ((u8*)(x))[3], ((u8*)(x))[4], ((u8*)(x))[5] +#endif +/* frame size + 802.1q tag */ +#define CPMAC_SKB_SIZE (ETH_FRAME_LEN + 4) +#define CPMAC_QUEUES 8 + +/* Ethernet registers */ +#define CPMAC_TX_CONTROL 0x0004 +#define CPMAC_TX_TEARDOWN 0x0008 +#define CPMAC_RX_CONTROL 0x0014 +#define CPMAC_RX_TEARDOWN 0x0018 +#define CPMAC_MBP 0x0100 +# define MBP_RXPASSCRC 0x40000000 +# define MBP_RXQOS 0x20000000 +# define MBP_RXNOCHAIN 0x10000000 +# define MBP_RXCMF 0x01000000 +# define MBP_RXSHORT 0x00800000 +# define MBP_RXCEF 0x00400000 +# define MBP_RXPROMISC 0x00200000 +# define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16) +# define MBP_RXBCAST 0x00002000 +# define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8) +# define MBP_RXMCAST 0x00000020 +# define MBP_MCASTCHAN(channel) ((channel) & 0x7) +#define CPMAC_UNICAST_ENABLE 0x0104 +#define CPMAC_UNICAST_CLEAR 0x0108 +#define CPMAC_MAX_LENGTH 0x010c +#define CPMAC_BUFFER_OFFSET 0x0110 +#define CPMAC_MAC_CONTROL 0x0160 +# define MAC_TXPTYPE 0x00000200 +# define MAC_TXPACE 0x00000040 +# define MAC_MII 0x00000020 +# define MAC_TXFLOW 0x00000010 +# define MAC_RXFLOW 0x00000008 +# define MAC_MTEST 0x00000004 +# define MAC_LOOPBACK 0x00000002 +# define MAC_FDX 0x00000001 +#define CPMAC_MAC_STATUS 0x0164 +# define MAC_STATUS_QOS 0x00000004 +# define MAC_STATUS_RXFLOW 0x00000002 +# define MAC_STATUS_TXFLOW 0x00000001 +#define CPMAC_TX_INT_ENABLE 0x0178 +#define CPMAC_TX_INT_CLEAR 0x017c +#define CPMAC_MAC_INT_VECTOR 0x0180 +# define MAC_INT_STATUS 0x00080000 +# define MAC_INT_HOST 0x00040000 +# define MAC_INT_RX 0x00020000 +# define MAC_INT_TX 0x00010000 +#define CPMAC_MAC_EOI_VECTOR 0x0184 +#define CPMAC_RX_INT_ENABLE 0x0198 +#define CPMAC_RX_INT_CLEAR 0x019c +#define CPMAC_MAC_INT_ENABLE 0x01a8 +#define CPMAC_MAC_INT_CLEAR 0x01ac +#define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4) +#define CPMAC_MAC_ADDR_MID 0x01d0 +#define CPMAC_MAC_ADDR_HI 0x01d4 +#define CPMAC_MAC_HASH_LO 0x01d8 +#define CPMAC_MAC_HASH_HI 0x01dc +#define CPMAC_TX_PTR(channel) (0x0600 + (channel) * 4) +#define CPMAC_RX_PTR(channel) (0x0620 + (channel) * 4) +#define CPMAC_TX_ACK(channel) (0x0640 + (channel) * 4) +#define CPMAC_RX_ACK(channel) (0x0660 + (channel) * 4) +#define CPMAC_REG_END 0x0680 +/* + * Rx/Tx statistics + * TODO: use some of them to fill stats in cpmac_stats() + */ +#define CPMAC_STATS_RX_GOOD 0x0200 +#define CPMAC_STATS_RX_BCAST 0x0204 +#define CPMAC_STATS_RX_MCAST 0x0208 +#define CPMAC_STATS_RX_PAUSE 0x020c +#define CPMAC_STATS_RX_CRC 0x0210 +#define CPMAC_STATS_RX_ALIGN 0x0214 +#define CPMAC_STATS_RX_OVER 0x0218 +#define CPMAC_STATS_RX_JABBER 0x021c +#define CPMAC_STATS_RX_UNDER 0x0220 +#define CPMAC_STATS_RX_FRAG 0x0224 +#define CPMAC_STATS_RX_FILTER 0x0228 +#define CPMAC_STATS_RX_QOSFILTER 0x022c +#define CPMAC_STATS_RX_OCTETS 0x0230 + +#define CPMAC_STATS_TX_GOOD 0x0234 +#define CPMAC_STATS_TX_BCAST 0x0238 +#define CPMAC_STATS_TX_MCAST 0x023c +#define CPMAC_STATS_TX_PAUSE 0x0240 +#define CPMAC_STATS_TX_DEFER 0x0244 +#define CPMAC_STATS_TX_COLLISION 0x0248 +#define CPMAC_STATS_TX_SINGLECOLL 0x024c +#define CPMAC_STATS_TX_MULTICOLL 0x0250 +#define CPMAC_STATS_TX_EXCESSCOLL 0x0254 +#define CPMAC_STATS_TX_LATECOLL 0x0258 +#define CPMAC_STATS_TX_UNDERRUN 0x025c +#define CPMAC_STATS_TX_CARRIERSENSE 0x0260 +#define CPMAC_STATS_TX_OCTETS 0x0264 + +#define cpmac_read(base, reg) (readl((void __iomem *)(base) + (reg))) +#define cpmac_write(base, reg, val) (writel(val, (void __iomem *)(base) + \ + (reg))) + +/* MDIO bus */ +#define CPMAC_MDIO_VERSION 0x0000 +#define CPMAC_MDIO_CONTROL 0x0004 +# define MDIOC_IDLE 0x80000000 +# define MDIOC_ENABLE 0x40000000 +# define MDIOC_PREAMBLE 0x00100000 +# define MDIOC_FAULT 0x00080000 +# define MDIOC_FAULTDETECT 0x00040000 +# define MDIOC_INTTEST 0x00020000 +# define MDIOC_CLKDIV(div) ((div) & 0xff) +#define CPMAC_MDIO_ALIVE 0x0008 +#define CPMAC_MDIO_LINK 0x000c +#define CPMAC_MDIO_ACCESS(channel) (0x0080 + (channel) * 8) +# define MDIO_BUSY 0x80000000 +# define MDIO_WRITE 0x40000000 +# define MDIO_REG(reg) (((reg) & 0x1f) << 21) +# define MDIO_PHY(phy) (((phy) & 0x1f) << 16) +# define MDIO_DATA(data) ((data) & 0xffff) +#define CPMAC_MDIO_PHYSEL(channel) (0x0084 + (channel) * 8) +# define PHYSEL_LINKSEL 0x00000040 +# define PHYSEL_LINKINT 0x00000020 + +struct cpmac_desc { + u32 hw_next; + u32 hw_data; + u16 buflen; + u16 bufflags; + u16 datalen; + u16 dataflags; +#define CPMAC_SOP 0x8000 +#define CPMAC_EOP 0x4000 +#define CPMAC_OWN 0x2000 +#define CPMAC_EOQ 0x1000 + struct sk_buff *skb; + struct cpmac_desc *next; + dma_addr_t mapping; + dma_addr_t data_mapping; +}; + +struct cpmac_priv { + spinlock_t lock; + spinlock_t rx_lock; + struct cpmac_desc *rx_head; + int ring_size; + struct cpmac_desc *desc_ring; + dma_addr_t dma_ring; + void __iomem *regs; + struct mii_bus *mii_bus; + struct phy_device *phy; + char phy_name[BUS_ID_SIZE]; + int oldlink, oldspeed, oldduplex; + u32 msg_enable; + struct net_device *dev; + struct work_struct reset_work; + struct platform_device *pdev; +}; + +static irqreturn_t cpmac_irq(int, void *); +static void cpmac_hw_start(struct net_device *dev); +static void cpmac_hw_stop(struct net_device *dev); +static int cpmac_stop(struct net_device *dev); +static int cpmac_open(struct net_device *dev); + +static void cpmac_dump_regs(struct net_device *dev) +{ + int i; + struct cpmac_priv *priv = netdev_priv(dev); + for (i = 0; i < CPMAC_REG_END; i += 4) { + if (i % 16 == 0) { + if (i) + printk("\n"); + printk(KERN_DEBUG "%s: reg[%p]:", dev->name, + priv->regs + i); + } + printk(" %08x", cpmac_read(priv->regs, i)); + } + printk("\n"); +} + +static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc) +{ + int i; + printk(KERN_DEBUG "%s: desc[%p]:", dev->name, desc); + for (i = 0; i < sizeof(*desc) / 4; i++) + printk(" %08x", ((u32 *)desc)[i]); + printk("\n"); +} + +static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) +{ + int i; + printk(KERN_DEBUG "%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len); + for (i = 0; i < skb->len; i++) { + if (i % 16 == 0) { + if (i) + printk("\n"); + printk(KERN_DEBUG "%s: data[%p]:", dev->name, + skb->data + i); + } + printk(" %02x", ((u8 *)skb->data)[i]); + } + printk("\n"); +} + +static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg) +{ + u32 val; + + while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) + cpu_relax(); + cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) | + MDIO_PHY(phy_id)); + while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY) + cpu_relax(); + return MDIO_DATA(val); +} + +static int cpmac_mdio_write(struct mii_bus *bus, int phy_id, + int reg, u16 val) +{ + while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) + cpu_relax(); + cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE | + MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val)); + return 0; +} + +static int cpmac_mdio_reset(struct mii_bus *bus) +{ + ar7_device_reset(AR7_RESET_BIT_MDIO); + cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE | + MDIOC_CLKDIV(ar7_cpmac_freq() / 2200000 - 1)); + return 0; +} + +static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, }; + +static struct mii_bus cpmac_mii = { + .name = "cpmac-mii", + .read = cpmac_mdio_read, + .write = cpmac_mdio_write, + .reset = cpmac_mdio_reset, + .irq = mii_irqs, +}; + +static int cpmac_config(struct net_device *dev, struct ifmap *map) +{ + if (dev->flags & IFF_UP) + return -EBUSY; + + /* Don't allow changing the I/O address */ + if (map->base_addr != dev->base_addr) + return -EOPNOTSUPP; + + /* ignore other fields */ + return 0; +} + +static void cpmac_set_multicast_list(struct net_device *dev) +{ + struct dev_mc_list *iter; + int i; + u8 tmp; + u32 mbp, bit, hash[2] = { 0, }; + struct cpmac_priv *priv = netdev_priv(dev); + + mbp = cpmac_read(priv->regs, CPMAC_MBP); + if (dev->flags & IFF_PROMISC) { + cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) | + MBP_RXPROMISC); + } else { + cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC); + if (dev->flags & IFF_ALLMULTI) { + /* enable all multicast mode */ + cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff); + cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff); + } else { + /* + * cpmac uses some strange mac address hashing + * (not crc32) + */ + for (i = 0, iter = dev->mc_list; i < dev->mc_count; + i++, iter = iter->next) { + bit = 0; + tmp = iter->dmi_addr[0]; + bit ^= (tmp >> 2) ^ (tmp << 4); + tmp = iter->dmi_addr[1]; + bit ^= (tmp >> 4) ^ (tmp << 2); + tmp = iter->dmi_addr[2]; + bit ^= (tmp >> 6) ^ tmp; + tmp = iter->dmi_addr[3]; + bit ^= (tmp >> 2) ^ (tmp << 4); + tmp = iter->dmi_addr[4]; + bit ^= (tmp >> 4) ^ (tmp << 2); + tmp = iter->dmi_addr[5]; + bit ^= (tmp >> 6) ^ tmp; + bit &= 0x3f; + hash[bit / 32] |= 1 << (bit % 32); + } + + cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]); + cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]); + } + } +} + +static struct sk_buff *cpmac_rx_one(struct net_device *dev, + struct cpmac_priv *priv, + struct cpmac_desc *desc) +{ + struct sk_buff *skb, *result = NULL; + + if (unlikely(netif_msg_hw(priv))) + cpmac_dump_desc(dev, desc); + cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping); + if (unlikely(!desc->datalen)) { + if (netif_msg_rx_err(priv) && net_ratelimit()) + printk(KERN_WARNING "%s: rx: spurious interrupt\n", + dev->name); + return NULL; + } + + skb = netdev_alloc_skb(dev, CPMAC_SKB_SIZE); + if (likely(skb)) { + skb_reserve(skb, 2); + skb_put(desc->skb, desc->datalen); + desc->skb->protocol = eth_type_trans(desc->skb, dev); + desc->skb->ip_summed = CHECKSUM_NONE; + dev->stats.rx_packets++; + dev->stats.rx_bytes += desc->datalen; + result = desc->skb; + dma_unmap_single(&dev->dev, desc->data_mapping, CPMAC_SKB_SIZE, + DMA_FROM_DEVICE); + desc->skb = skb; + desc->data_mapping = dma_map_single(&dev->dev, skb->data, + CPMAC_SKB_SIZE, + DMA_FROM_DEVICE); + desc->hw_data = (u32)desc->data_mapping; + if (unlikely(netif_msg_pktdata(priv))) { + printk(KERN_DEBUG "%s: received packet:\n", dev->name); + cpmac_dump_skb(dev, result); + } + } else { + if (netif_msg_rx_err(priv) && net_ratelimit()) + printk(KERN_WARNING + "%s: low on skbs, dropping packet\n", dev->name); + dev->stats.rx_dropped++; + } + + desc->buflen = CPMAC_SKB_SIZE; + desc->dataflags = CPMAC_OWN; + + return result; +} + +static int cpmac_poll(struct net_device *dev, int *budget) +{ + struct sk_buff *skb; + struct cpmac_desc *desc; + int received = 0, quota = min(dev->quota, *budget); + struct cpmac_priv *priv = netdev_priv(dev); + + spin_lock(&priv->rx_lock); + if (unlikely(!priv->rx_head)) { + if (netif_msg_rx_err(priv) && net_ratelimit()) + printk(KERN_WARNING "%s: rx: polling, but no queue\n", + dev->name); + netif_rx_complete(dev); + return 0; + } + + desc = priv->rx_head; + while ((received < quota) && ((desc->dataflags & CPMAC_OWN) == 0)) { + skb = cpmac_rx_one(dev, priv, desc); + if (likely(skb)) { + netif_receive_skb(skb); + received++; + } + desc = desc->next; + } + + priv->rx_head = desc; + spin_unlock(&priv->rx_lock); + *budget -= received; + dev->quota -= received; + if (unlikely(netif_msg_rx_status(priv))) + printk(KERN_DEBUG "%s: poll processed %d packets\n", dev->name, + received); + if (desc->dataflags & CPMAC_OWN) { + netif_rx_complete(dev); + cpmac_write(priv->regs, CPMAC_RX_PTR(0), (u32)desc->mapping); + cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); + return 0; + } + + return 1; +} + +static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + int queue, len; + struct cpmac_desc *desc; + struct cpmac_priv *priv = netdev_priv(dev); + + if (unlikely(skb_padto(skb, ETH_ZLEN))) { + if (netif_msg_tx_err(priv) && net_ratelimit()) + printk(KERN_WARNING + "%s: tx: padding failed, dropping\n", dev->name); + spin_lock(&priv->lock); + dev->stats.tx_dropped++; + spin_unlock(&priv->lock); + return -ENOMEM; + } + + len = max(skb->len, ETH_ZLEN); + queue = skb->queue_mapping; +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + netif_stop_subqueue(dev, queue); +#else + netif_stop_queue(dev); +#endif + + desc = &priv->desc_ring[queue]; + if (unlikely(desc->dataflags & CPMAC_OWN)) { + if (netif_msg_tx_err(priv) && net_ratelimit()) + printk(KERN_WARNING "%s: tx dma ring full, dropping\n", + dev->name); + spin_lock(&priv->lock); + dev->stats.tx_dropped++; + spin_unlock(&priv->lock); + dev_kfree_skb_any(skb); + return -ENOMEM; + } + + spin_lock(&priv->lock); + dev->trans_start = jiffies; + spin_unlock(&priv->lock); + desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN; + desc->skb = skb; + desc->data_mapping = dma_map_single(&dev->dev, skb->data, len, + DMA_TO_DEVICE); + desc->hw_data = (u32)desc->data_mapping; + desc->datalen = len; + desc->buflen = len; + if (unlikely(netif_msg_tx_queued(priv))) + printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb, + skb->len); + if (unlikely(netif_msg_hw(priv))) + cpmac_dump_desc(dev, desc); + if (unlikely(netif_msg_pktdata(priv))) + cpmac_dump_skb(dev, skb); + cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); + + return 0; +} + +static void cpmac_end_xmit(struct net_device *dev, int queue) +{ + struct cpmac_desc *desc; + struct cpmac_priv *priv = netdev_priv(dev); + + desc = &priv->desc_ring[queue]; + cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping); + if (likely(desc->skb)) { + spin_lock(&priv->lock); + dev->stats.tx_packets++; + dev->stats.tx_bytes += desc->skb->len; + spin_unlock(&priv->lock); + dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len, + DMA_TO_DEVICE); + + if (unlikely(netif_msg_tx_done(priv))) + printk(KERN_DEBUG "%s: sent 0x%p, len=%d\n", dev->name, + desc->skb, desc->skb->len); + + dev_kfree_skb_irq(desc->skb); + desc->skb = NULL; +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + if (netif_subqueue_stopped(dev, queue)) + netif_wake_subqueue(dev, queue); +#else + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); +#endif + } else { + if (netif_msg_tx_err(priv) && net_ratelimit()) + printk(KERN_WARNING + "%s: end_xmit: spurious interrupt\n", dev->name); +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + if (netif_subqueue_stopped(dev, queue)) + netif_wake_subqueue(dev, queue); +#else + if (netif_queue_stopped(dev)) + netif_wake_queue(dev); +#endif + } +} + +static void cpmac_hw_stop(struct net_device *dev) +{ + int i; + struct cpmac_priv *priv = netdev_priv(dev); + struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data; + + ar7_device_reset(pdata->reset_bit); + cpmac_write(priv->regs, CPMAC_RX_CONTROL, + cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1); + cpmac_write(priv->regs, CPMAC_TX_CONTROL, + cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1); + for (i = 0; i < 8; i++) { + cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); + cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); + } + cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); + cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); + cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); + cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); + cpmac_write(priv->regs, CPMAC_MAC_CONTROL, + cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII); +} + +static void cpmac_hw_start(struct net_device *dev) +{ + int i; + struct cpmac_priv *priv = netdev_priv(dev); + struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data; + + ar7_device_reset(pdata->reset_bit); + for (i = 0; i < 8; i++) { + cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); + cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); + } + cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping); + + cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST | + MBP_RXMCAST); + cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0); + for (i = 0; i < 8; i++) + cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]); + cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]); + cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] | + (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) | + (dev->dev_addr[3] << 24)); + cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE); + cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); + cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); + cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); + cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); + cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1); + cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); + cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff); + cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); + + cpmac_write(priv->regs, CPMAC_RX_CONTROL, + cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1); + cpmac_write(priv->regs, CPMAC_TX_CONTROL, + cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1); + cpmac_write(priv->regs, CPMAC_MAC_CONTROL, + cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII | + MAC_FDX); +} + +static void cpmac_clear_rx(struct net_device *dev) +{ + struct cpmac_priv *priv = netdev_priv(dev); + struct cpmac_desc *desc; + int i; + if (unlikely(!priv->rx_head)) + return; + desc = priv->rx_head; + for (i = 0; i < priv->ring_size; i++) { + if ((desc->dataflags & CPMAC_OWN) == 0) { + if (netif_msg_rx_err(priv) && net_ratelimit()) + printk(KERN_WARNING "%s: packet dropped\n", + dev->name); + if (unlikely(netif_msg_hw(priv))) + cpmac_dump_desc(dev, desc); + desc->dataflags = CPMAC_OWN; + dev->stats.rx_dropped++; + } + desc = desc->next; + } +} + +static void cpmac_clear_tx(struct net_device *dev) +{ + struct cpmac_priv *priv = netdev_priv(dev); + int i; + if (unlikely(!priv->desc_ring)) + return; + for (i = 0; i < CPMAC_QUEUES; i++) + if (priv->desc_ring[i].skb) { + dev_kfree_skb_any(priv->desc_ring[i].skb); + if (netif_subqueue_stopped(dev, i)) + netif_wake_subqueue(dev, i); + } +} + +static void cpmac_hw_error(struct work_struct *work) +{ + struct cpmac_priv *priv = + container_of(work, struct cpmac_priv, reset_work); + + spin_lock(&priv->rx_lock); + cpmac_clear_rx(priv->dev); + spin_unlock(&priv->rx_lock); + cpmac_clear_tx(priv->dev); + cpmac_hw_start(priv->dev); + netif_start_queue(priv->dev); +} + +static irqreturn_t cpmac_irq(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct cpmac_priv *priv; + int queue; + u32 status; + + if (!dev) + return IRQ_NONE; + + priv = netdev_priv(dev); + + status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR); + + if (unlikely(netif_msg_intr(priv))) + printk(KERN_DEBUG "%s: interrupt status: 0x%08x\n", dev->name, + status); + + if (status & MAC_INT_TX) + cpmac_end_xmit(dev, (status & 7)); + + if (status & MAC_INT_RX) { + queue = (status >> 8) & 7; + netif_rx_schedule(dev); + cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue); + } + + cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); + + if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) { + if (netif_msg_drv(priv) && net_ratelimit()) + printk(KERN_ERR "%s: hw error, resetting...\n", + dev->name); + netif_stop_queue(dev); + cpmac_hw_stop(dev); + schedule_work(&priv->reset_work); + if (unlikely(netif_msg_hw(priv))) + cpmac_dump_regs(dev); + } + + return IRQ_HANDLED; +} + +static void cpmac_tx_timeout(struct net_device *dev) +{ + struct cpmac_priv *priv = netdev_priv(dev); + int i; + + spin_lock(&priv->lock); + dev->stats.tx_errors++; + spin_unlock(&priv->lock); + if (netif_msg_tx_err(priv) && net_ratelimit()) + printk(KERN_WARNING "%s: transmit timeout\n", dev->name); + /* + * FIXME: waking up random queue is not the best thing to + * do... on the other hand why we got here at all? + */ +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + for (i = 0; i < CPMAC_QUEUES; i++) + if (priv->desc_ring[i].skb) { + dev_kfree_skb_any(priv->desc_ring[i].skb); + netif_wake_subqueue(dev, i); + break; + } +#else + if (priv->desc_ring[0].skb) + dev_kfree_skb_any(priv->desc_ring[0].skb); + netif_wake_queue(dev); +#endif +} + +static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct cpmac_priv *priv = netdev_priv(dev); + if (!(netif_running(dev))) + return -EINVAL; + if (!priv->phy) + return -EINVAL; + if ((cmd == SIOCGMIIPHY) || (cmd == SIOCGMIIREG) || + (cmd == SIOCSMIIREG)) + return phy_mii_ioctl(priv->phy, if_mii(ifr), cmd); + + return -EOPNOTSUPP; +} + +static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct cpmac_priv *priv = netdev_priv(dev); + + if (priv->phy) + return phy_ethtool_gset(priv->phy, cmd); + + return -EINVAL; +} + +static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct cpmac_priv *priv = netdev_priv(dev); + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (priv->phy) + return phy_ethtool_sset(priv->phy, cmd); + + return -EINVAL; +} + +static void cpmac_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) +{ + struct cpmac_priv *priv = netdev_priv(dev); + + ring->rx_max_pending = 1024; + ring->rx_mini_max_pending = 1; + ring->rx_jumbo_max_pending = 1; + ring->tx_max_pending = 1; + + ring->rx_pending = priv->ring_size; + ring->rx_mini_pending = 1; + ring->rx_jumbo_pending = 1; + ring->tx_pending = 1; +} + +static int cpmac_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) +{ + struct cpmac_priv *priv = netdev_priv(dev); + + if (dev->flags && IFF_UP) + return -EBUSY; + priv->ring_size = ring->rx_pending; + return 0; +} + +static void cpmac_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strcpy(info->driver, "cpmac"); + strcpy(info->version, CPMAC_VERSION); + info->fw_version[0] = '\0'; + sprintf(info->bus_info, "%s", "cpmac"); + info->regdump_len = 0; +} + +static const struct ethtool_ops cpmac_ethtool_ops = { + .get_settings = cpmac_get_settings, + .set_settings = cpmac_set_settings, + .get_drvinfo = cpmac_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ringparam = cpmac_get_ringparam, + .set_ringparam = cpmac_set_ringparam, +}; + +static void cpmac_adjust_link(struct net_device *dev) +{ + struct cpmac_priv *priv = netdev_priv(dev); + int new_state = 0; + + spin_lock(&priv->lock); + if (priv->phy->link) { + netif_start_queue(dev); + if (priv->phy->duplex != priv->oldduplex) { + new_state = 1; + priv->oldduplex = priv->phy->duplex; + } + + if (priv->phy->speed != priv->oldspeed) { + new_state = 1; + priv->oldspeed = priv->phy->speed; + } + + if (!priv->oldlink) { + new_state = 1; + priv->oldlink = 1; + netif_schedule(dev); + } + } else if (priv->oldlink) { + netif_stop_queue(dev); + new_state = 1; + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + } + + if (new_state && netif_msg_link(priv) && net_ratelimit()) + phy_print_status(priv->phy); + + spin_unlock(&priv->lock); +} + +static int cpmac_open(struct net_device *dev) +{ + int i, size, res; + struct cpmac_priv *priv = netdev_priv(dev); + struct resource *mem; + struct cpmac_desc *desc; + struct sk_buff *skb; + + priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, + 0, PHY_INTERFACE_MODE_MII); + if (IS_ERR(priv->phy)) { + if (netif_msg_drv(priv)) + printk(KERN_ERR "%s: Could not attach to PHY\n", + dev->name); + return PTR_ERR(priv->phy); + } + + mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); + if (!request_mem_region(mem->start, mem->end - mem->start, dev->name)) { + if (netif_msg_drv(priv)) + printk(KERN_ERR "%s: failed to request registers\n", + dev->name); + res = -ENXIO; + goto fail_reserve; + } + + priv->regs = ioremap(mem->start, mem->end - mem->start); + if (!priv->regs) { + if (netif_msg_drv(priv)) + printk(KERN_ERR "%s: failed to remap registers\n", + dev->name); + res = -ENXIO; + goto fail_remap; + } + + size = priv->ring_size + CPMAC_QUEUES; + priv->desc_ring = dma_alloc_coherent(&dev->dev, + sizeof(struct cpmac_desc) * size, + &priv->dma_ring, + GFP_KERNEL); + if (!priv->desc_ring) { + res = -ENOMEM; + goto fail_alloc; + } + + for (i = 0; i < size; i++) + priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i; + + priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; + for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) { + skb = netdev_alloc_skb(dev, CPMAC_SKB_SIZE); + if (unlikely(!skb)) { + res = -ENOMEM; + goto fail_desc; + } + skb_reserve(skb, 2); + desc->skb = skb; + desc->data_mapping = dma_map_single(&dev->dev, skb->data, + CPMAC_SKB_SIZE, + DMA_FROM_DEVICE); + desc->hw_data = (u32)desc->data_mapping; + desc->buflen = CPMAC_SKB_SIZE; + desc->dataflags = CPMAC_OWN; + desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; + desc->hw_next = (u32)desc->next->mapping; + } + + if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, + dev->name, dev))) { + if (netif_msg_drv(priv)) + printk(KERN_ERR "%s: failed to obtain irq\n", + dev->name); + goto fail_irq; + } + + INIT_WORK(&priv->reset_work, cpmac_hw_error); + cpmac_hw_start(dev); + + priv->phy->state = PHY_CHANGELINK; + phy_start(priv->phy); + + return 0; + +fail_irq: +fail_desc: + for (i = 0; i < priv->ring_size; i++) { + if (priv->rx_head[i].skb) { + dma_unmap_single(&dev->dev, + priv->rx_head[i].data_mapping, + CPMAC_SKB_SIZE, + DMA_FROM_DEVICE); + kfree_skb(priv->rx_head[i].skb); + } + } +fail_alloc: + kfree(priv->desc_ring); + iounmap(priv->regs); + +fail_remap: + release_mem_region(mem->start, mem->end - mem->start); + +fail_reserve: + phy_disconnect(priv->phy); + + return res; +} + +static int cpmac_stop(struct net_device *dev) +{ + int i; + struct cpmac_priv *priv = netdev_priv(dev); + struct resource *mem; + + netif_stop_queue(dev); + + cancel_work_sync(&priv->reset_work); + phy_stop(priv->phy); + phy_disconnect(priv->phy); + priv->phy = NULL; + + cpmac_hw_stop(dev); + + for (i = 0; i < 8; i++) + cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); + cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0); + cpmac_write(priv->regs, CPMAC_MBP, 0); + + free_irq(dev->irq, dev); + iounmap(priv->regs); + mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); + release_mem_region(mem->start, mem->end - mem->start); + priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; + for (i = 0; i < priv->ring_size; i++) { + if (priv->rx_head[i].skb) { + dma_unmap_single(&dev->dev, + priv->rx_head[i].data_mapping, + CPMAC_SKB_SIZE, + DMA_FROM_DEVICE); + kfree_skb(priv->rx_head[i].skb); + } + } + + dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) * + (CPMAC_QUEUES + priv->ring_size), + priv->desc_ring, priv->dma_ring); + return 0; +} + +static int external_switch; + +static int __devinit cpmac_probe(struct platform_device *pdev) +{ + int rc, phy_id; + struct resource *mem; + struct cpmac_priv *priv; + struct net_device *dev; + struct plat_cpmac_data *pdata; + + pdata = pdev->dev.platform_data; + + for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) { + if (!(pdata->phy_mask & (1 << phy_id))) + continue; + if (!cpmac_mii.phy_map[phy_id]) + continue; + break; + } + + if (phy_id == PHY_MAX_ADDR) { + if (external_switch || dumb_switch) + phy_id = 0; + else { + printk(KERN_ERR "cpmac: no PHY present\n"); + return -ENODEV; + } + } + + dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES); + + if (!dev) { + printk(KERN_ERR "cpmac: Unable to allocate net_device\n"); + return -ENOMEM; + } + + platform_set_drvdata(pdev, dev); + priv = netdev_priv(dev); + + priv->pdev = pdev; + mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); + if (!mem) { + rc = -ENODEV; + goto fail; + } + + dev->irq = platform_get_irq_byname(pdev, "irq"); + + dev->open = cpmac_open; + dev->stop = cpmac_stop; + dev->set_config = cpmac_config; + dev->hard_start_xmit = cpmac_start_xmit; + dev->do_ioctl = cpmac_ioctl; + dev->set_multicast_list = cpmac_set_multicast_list; + dev->tx_timeout = cpmac_tx_timeout; + dev->ethtool_ops = &cpmac_ethtool_ops; + dev->poll = cpmac_poll; + dev->weight = 64; + dev->features |= NETIF_F_MULTI_QUEUE; + + spin_lock_init(&priv->lock); + spin_lock_init(&priv->rx_lock); + priv->dev = dev; + priv->ring_size = 64; + priv->msg_enable = netif_msg_init(debug_level, 0xff); + memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); + if (phy_id == 31) { + snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, + cpmac_mii.id, phy_id); + } else + snprintf(priv->phy_name, BUS_ID_SIZE, "fixed@%d:%d", 100, 1); + + if ((rc = register_netdev(dev))) { + printk(KERN_ERR "cpmac: error %i registering device %s\n", rc, + dev->name); + goto fail; + } + + if (netif_msg_probe(priv)) { + printk(KERN_INFO + "cpmac: device %s (regs: %p, irq: %d, phy: %s, mac: " + MAC_FMT ")\n", dev->name, (void *)mem->start, dev->irq, + priv->phy_name, MAC_ARG(dev->dev_addr)); + } + return 0; + +fail: + free_netdev(dev); + return rc; +} + +static int __devexit cpmac_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + unregister_netdev(dev); + free_netdev(dev); + return 0; +} + +static struct platform_driver cpmac_driver = { + .driver.name = "cpmac", + .probe = cpmac_probe, + .remove = __devexit_p(cpmac_remove), +}; + +int __devinit cpmac_init(void) +{ + u32 mask; + int i, res; + + cpmac_mii.priv = ioremap(AR7_REGS_MDIO, 256); + + if (!cpmac_mii.priv) { + printk(KERN_ERR "Can't ioremap mdio registers\n"); + return -ENXIO; + } + +#warning FIXME: unhardcode gpio&reset bits + ar7_gpio_disable(26); + ar7_gpio_disable(27); + ar7_device_reset(AR7_RESET_BIT_CPMAC_LO); + ar7_device_reset(AR7_RESET_BIT_CPMAC_HI); + ar7_device_reset(AR7_RESET_BIT_EPHY); + + cpmac_mii.reset(&cpmac_mii); + + for (i = 0; i < 300000; i++) + if ((mask = cpmac_read(cpmac_mii.priv, CPMAC_MDIO_ALIVE))) + break; + else + cpu_relax(); + + mask &= 0x7fffffff; + if (mask & (mask - 1)) { + external_switch = 1; + mask = 0; + } + + cpmac_mii.phy_mask = ~(mask | 0x80000000); + + res = mdiobus_register(&cpmac_mii); + if (res) + goto fail_mii; + + res = platform_driver_register(&cpmac_driver); + if (res) + goto fail_cpmac; + + return 0; + +fail_cpmac: + mdiobus_unregister(&cpmac_mii); + +fail_mii: + iounmap(cpmac_mii.priv); + + return res; +} + +void __devexit cpmac_exit(void) +{ + platform_driver_unregister(&cpmac_driver); + mdiobus_unregister(&cpmac_mii); + iounmap(cpmac_mii.priv); +} + +module_init(cpmac_init); +module_exit(cpmac_exit); diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index b7a7e2a..0666e62 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c @@ -110,6 +110,7 @@ static int e1000_get_settings(struct net_device *netdev, { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + u32 status; if (hw->media_type == e1000_media_type_copper) { @@ -147,16 +148,16 @@ static int e1000_get_settings(struct net_device *netdev, ecmd->transceiver = XCVR_EXTERNAL; } - if (er32(STATUS) & E1000_STATUS_LU) { - - adapter->hw.mac.ops.get_link_up_info(hw, &adapter->link_speed, - &adapter->link_duplex); - ecmd->speed = adapter->link_speed; - - /* unfortunately FULL_DUPLEX != DUPLEX_FULL - * and HALF_DUPLEX != DUPLEX_HALF */ + status = er32(STATUS); + if (status & E1000_STATUS_LU) { + if (status & E1000_STATUS_SPEED_1000) + ecmd->speed = 1000; + else if (status & E1000_STATUS_SPEED_100) + ecmd->speed = 100; + else + ecmd->speed = 10; - if (adapter->link_duplex == FULL_DUPLEX) + if (status & E1000_STATUS_FD) ecmd->duplex = DUPLEX_FULL; else ecmd->duplex = DUPLEX_HALF; @@ -170,6 +171,16 @@ static int e1000_get_settings(struct net_device *netdev, return 0; } +static u32 e1000_get_link(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 status; + + status = er32(STATUS); + return (status & E1000_STATUS_LU); +} + static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) { struct e1000_mac_info *mac = &adapter->hw.mac; @@ -1451,11 +1462,11 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) } *data = e1000_setup_desc_rings(adapter); - if (data) + if (*data) goto out; *data = e1000_setup_loopback_test(adapter); - if (data) + if (*data) goto err_loopback; *data = e1000_run_loopback_test(adapter); @@ -1751,7 +1762,7 @@ static const struct ethtool_ops e1000_ethtool_ops = { .get_msglevel = e1000_get_msglevel, .set_msglevel = e1000_set_msglevel, .nway_reset = e1000_nway_reset, - .get_link = ethtool_op_get_link, + .get_link = e1000_get_link, .get_eeprom_len = e1000_get_eeprom_len, .get_eeprom = e1000_get_eeprom, .set_eeprom = e1000_set_eeprom, diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index aa82f1a..6451578 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h @@ -852,7 +852,7 @@ struct e1000_hw { #ifdef DEBUG #define hw_dbg(hw, format, arg...) \ - printk(KERN_DEBUG, "%s: " format, e1000e_get_hw_dev_name(hw), ##arg); + printk(KERN_DEBUG "%s: " format, e1000e_get_hw_dev_name(hw), ##arg) #else static inline int __attribute__ ((format (printf, 2, 3))) hw_dbg(struct e1000_hw *hw, const char *format, ...) diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index ac21526..b557bb4 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h @@ -388,7 +388,7 @@ struct ehea_port_res { #define EHEA_MAX_PORTS 16 struct ehea_adapter { u64 handle; - struct ibmebus_dev *ebus_dev; + struct of_device *ofdev; struct ehea_port *port[EHEA_MAX_PORTS]; struct ehea_eq *neq; /* notification event queue */ struct tasklet_struct neq_tasklet; diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 2ba57e6..fe5ffac 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -98,10 +98,10 @@ struct work_struct ehea_rereg_mr_task; struct semaphore dlpar_mem_lock; -static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, +static int __devinit ehea_probe_adapter(struct of_device *dev, const struct of_device_id *id); -static int __devexit ehea_remove(struct ibmebus_dev *dev); +static int __devexit ehea_remove(struct of_device *dev); static struct of_device_id ehea_device_table[] = { { @@ -111,9 +111,9 @@ static struct of_device_id ehea_device_table[] = { {}, }; -static struct ibmebus_driver ehea_driver = { +static struct of_platform_driver ehea_driver = { .name = "ehea", - .id_table = ehea_device_table, + .match_table = ehea_device_table, .probe = ehea_probe_adapter, .remove = ehea_remove, }; @@ -1044,7 +1044,7 @@ static int ehea_reg_interrupts(struct net_device *dev) snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff", dev->name); - ret = ibmebus_request_irq(NULL, port->qp_eq->attr.ist1, + ret = ibmebus_request_irq(port->qp_eq->attr.ist1, ehea_qp_aff_irq_handler, IRQF_DISABLED, port->int_aff_name, port); if (ret) { @@ -1062,7 +1062,7 @@ static int ehea_reg_interrupts(struct net_device *dev) pr = &port->port_res[i]; snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1, "%s-queue%d", dev->name, i); - ret = ibmebus_request_irq(NULL, pr->eq->attr.ist1, + ret = ibmebus_request_irq(pr->eq->attr.ist1, ehea_recv_irq_handler, IRQF_DISABLED, pr->int_send_name, pr); @@ -1083,11 +1083,11 @@ out: out_free_req: while (--i >= 0) { u32 ist = port->port_res[i].eq->attr.ist1; - ibmebus_free_irq(NULL, ist, &port->port_res[i]); + ibmebus_free_irq(ist, &port->port_res[i]); } out_free_qpeq: - ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port); + ibmebus_free_irq(port->qp_eq->attr.ist1, port); i = port->num_def_qps; goto out; @@ -1104,14 +1104,14 @@ static void ehea_free_interrupts(struct net_device *dev) for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { pr = &port->port_res[i]; - ibmebus_free_irq(NULL, pr->eq->attr.ist1, pr); + ibmebus_free_irq(pr->eq->attr.ist1, pr); if (netif_msg_intr(port)) ehea_info("free send irq for res %d with handle 0x%X", i, pr->eq->attr.ist1); } /* associated events */ - ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port); + ibmebus_free_irq(port->qp_eq->attr.ist1, port); if (netif_msg_intr(port)) ehea_info("associated event interrupt for handle 0x%X freed", port->qp_eq->attr.ist1); @@ -2832,7 +2832,7 @@ static struct device *ehea_register_port(struct ehea_port *port, int ret; port->ofdev.node = of_node_get(dn); - port->ofdev.dev.parent = &port->adapter->ebus_dev->ofdev.dev; + port->ofdev.dev.parent = &port->adapter->ofdev->dev; port->ofdev.dev.bus = &ibmebus_bus_type; sprintf(port->ofdev.dev.bus_id, "port%d", port_name_cnt++); @@ -3011,7 +3011,7 @@ static int ehea_setup_ports(struct ehea_adapter *adapter) const u32 *dn_log_port_id; int i = 0; - lhea_dn = adapter->ebus_dev->ofdev.node; + lhea_dn = adapter->ofdev->node; while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", @@ -3051,7 +3051,7 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter, struct device_node *eth_dn = NULL; const u32 *dn_log_port_id; - lhea_dn = adapter->ebus_dev->ofdev.node; + lhea_dn = adapter->ofdev->node; while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", @@ -3157,31 +3157,31 @@ static ssize_t ehea_remove_port(struct device *dev, static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port); static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port); -int ehea_create_device_sysfs(struct ibmebus_dev *dev) +int ehea_create_device_sysfs(struct of_device *dev) { - int ret = device_create_file(&dev->ofdev.dev, &dev_attr_probe_port); + int ret = device_create_file(&dev->dev, &dev_attr_probe_port); if (ret) goto out; - ret = device_create_file(&dev->ofdev.dev, &dev_attr_remove_port); + ret = device_create_file(&dev->dev, &dev_attr_remove_port); out: return ret; } -void ehea_remove_device_sysfs(struct ibmebus_dev *dev) +void ehea_remove_device_sysfs(struct of_device *dev) { - device_remove_file(&dev->ofdev.dev, &dev_attr_probe_port); - device_remove_file(&dev->ofdev.dev, &dev_attr_remove_port); + device_remove_file(&dev->dev, &dev_attr_probe_port); + device_remove_file(&dev->dev, &dev_attr_remove_port); } -static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, +static int __devinit ehea_probe_adapter(struct of_device *dev, const struct of_device_id *id) { struct ehea_adapter *adapter; const u64 *adapter_handle; int ret; - if (!dev || !dev->ofdev.node) { + if (!dev || !dev->node) { ehea_error("Invalid ibmebus device probed"); return -EINVAL; } @@ -3189,36 +3189,36 @@ static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); if (!adapter) { ret = -ENOMEM; - dev_err(&dev->ofdev.dev, "no mem for ehea_adapter\n"); + dev_err(&dev->dev, "no mem for ehea_adapter\n"); goto out; } list_add(&adapter->list, &adapter_list); - adapter->ebus_dev = dev; + adapter->ofdev = dev; - adapter_handle = of_get_property(dev->ofdev.node, "ibm,hea-handle", + adapter_handle = of_get_property(dev->node, "ibm,hea-handle", NULL); if (adapter_handle) adapter->handle = *adapter_handle; if (!adapter->handle) { - dev_err(&dev->ofdev.dev, "failed getting handle for adapter" - " '%s'\n", dev->ofdev.node->full_name); + dev_err(&dev->dev, "failed getting handle for adapter" + " '%s'\n", dev->node->full_name); ret = -ENODEV; goto out_free_ad; } adapter->pd = EHEA_PD_ID; - dev->ofdev.dev.driver_data = adapter; + dev->dev.driver_data = adapter; /* initialize adapter and ports */ /* get adapter properties */ ret = ehea_sense_adapter_attr(adapter); if (ret) { - dev_err(&dev->ofdev.dev, "sense_adapter_attr failed: %d", ret); + dev_err(&dev->dev, "sense_adapter_attr failed: %d", ret); goto out_free_ad; } @@ -3226,18 +3226,18 @@ static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1); if (!adapter->neq) { ret = -EIO; - dev_err(&dev->ofdev.dev, "NEQ creation failed"); + dev_err(&dev->dev, "NEQ creation failed"); goto out_free_ad; } tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet, (unsigned long)adapter); - ret = ibmebus_request_irq(NULL, adapter->neq->attr.ist1, + ret = ibmebus_request_irq(adapter->neq->attr.ist1, ehea_interrupt_neq, IRQF_DISABLED, "ehea_neq", adapter); if (ret) { - dev_err(&dev->ofdev.dev, "requesting NEQ IRQ failed"); + dev_err(&dev->dev, "requesting NEQ IRQ failed"); goto out_kill_eq; } @@ -3247,7 +3247,7 @@ static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, ret = ehea_setup_ports(adapter); if (ret) { - dev_err(&dev->ofdev.dev, "setup_ports failed"); + dev_err(&dev->dev, "setup_ports failed"); goto out_rem_dev_sysfs; } @@ -3258,7 +3258,7 @@ out_rem_dev_sysfs: ehea_remove_device_sysfs(dev); out_free_irq: - ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter); + ibmebus_free_irq(adapter->neq->attr.ist1, adapter); out_kill_eq: ehea_destroy_eq(adapter->neq); @@ -3269,9 +3269,9 @@ out: return ret; } -static int __devexit ehea_remove(struct ibmebus_dev *dev) +static int __devexit ehea_remove(struct of_device *dev) { - struct ehea_adapter *adapter = dev->ofdev.dev.driver_data; + struct ehea_adapter *adapter = dev->dev.driver_data; int i; for (i = 0; i < EHEA_MAX_PORTS; i++) @@ -3284,7 +3284,7 @@ static int __devexit ehea_remove(struct ibmebus_dev *dev) flush_scheduled_work(); - ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter); + ibmebus_free_irq(adapter->neq->attr.ist1, adapter); tasklet_kill(&adapter->neq_tasklet); ehea_destroy_eq(adapter->neq); diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index dae30b7..cfbb7aa 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -128,7 +128,7 @@ #else #define DRIVERNAPI #endif -#define FORCEDETH_VERSION "0.60" +#define FORCEDETH_VERSION "0.61" #define DRV_NAME "forcedeth" #include <linux/module.h> @@ -752,7 +752,6 @@ struct fe_priv { /* General data: * Locking: spin_lock(&np->lock); */ - struct net_device_stats stats; struct nv_ethtool_stats estats; int in_shutdown; u32 linkspeed; @@ -1505,15 +1504,16 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev) nv_get_hw_stats(dev); /* copy to net_device stats */ - np->stats.tx_bytes = np->estats.tx_bytes; - np->stats.tx_fifo_errors = np->estats.tx_fifo_errors; - np->stats.tx_carrier_errors = np->estats.tx_carrier_errors; - np->stats.rx_crc_errors = np->estats.rx_crc_errors; - np->stats.rx_over_errors = np->estats.rx_over_errors; - np->stats.rx_errors = np->estats.rx_errors_total; - np->stats.tx_errors = np->estats.tx_errors_total; - } - return &np->stats; + dev->stats.tx_bytes = np->estats.tx_bytes; + dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors; + dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors; + dev->stats.rx_crc_errors = np->estats.rx_crc_errors; + dev->stats.rx_over_errors = np->estats.rx_over_errors; + dev->stats.rx_errors = np->estats.rx_errors_total; + dev->stats.tx_errors = np->estats.tx_errors_total; + } + + return &dev->stats; } /* @@ -1733,7 +1733,7 @@ static void nv_drain_tx(struct net_device *dev) np->tx_ring.ex[i].buflow = 0; } if (nv_release_txskb(dev, &np->tx_skb[i])) - np->stats.tx_dropped++; + dev->stats.tx_dropped++; } } @@ -2049,13 +2049,13 @@ static void nv_tx_done(struct net_device *dev) if (flags & NV_TX_LASTPACKET) { if (flags & NV_TX_ERROR) { if (flags & NV_TX_UNDERFLOW) - np->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; if (flags & NV_TX_CARRIERLOST) - np->stats.tx_carrier_errors++; - np->stats.tx_errors++; + dev->stats.tx_carrier_errors++; + dev->stats.tx_errors++; } else { - np->stats.tx_packets++; - np->stats.tx_bytes += np->get_tx_ctx->skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += np->get_tx_ctx->skb->len; } dev_kfree_skb_any(np->get_tx_ctx->skb); np->get_tx_ctx->skb = NULL; @@ -2064,13 +2064,13 @@ static void nv_tx_done(struct net_device *dev) if (flags & NV_TX2_LASTPACKET) { if (flags & NV_TX2_ERROR) { if (flags & NV_TX2_UNDERFLOW) - np->stats.tx_fifo_errors++; + dev->stats.tx_fifo_errors++; if (flags & NV_TX2_CARRIERLOST) - np->stats.tx_carrier_errors++; - np->stats.tx_errors++; + dev->stats.tx_carrier_errors++; + dev->stats.tx_errors++; } else { - np->stats.tx_packets++; - np->stats.tx_bytes += np->get_tx_ctx->skb->len; + dev->stats.tx_packets++; + dev->stats.tx_bytes += np->get_tx_ctx->skb->len; } dev_kfree_skb_any(np->get_tx_ctx->skb); np->get_tx_ctx->skb = NULL; @@ -2107,7 +2107,7 @@ static void nv_tx_done_optimized(struct net_device *dev, int limit) if (flags & NV_TX2_LASTPACKET) { if (!(flags & NV_TX2_ERROR)) - np->stats.tx_packets++; + dev->stats.tx_packets++; dev_kfree_skb_any(np->get_tx_ctx->skb); np->get_tx_ctx->skb = NULL; } @@ -2268,13 +2268,13 @@ static int nv_rx_process(struct net_device *dev, int limit) { struct fe_priv *np = netdev_priv(dev); u32 flags; - u32 rx_processed_cnt = 0; + int rx_work = 0; struct sk_buff *skb; int len; while((np->get_rx.orig != np->put_rx.orig) && !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && - (rx_processed_cnt++ < limit)) { + (rx_work < limit)) { dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n", dev->name, flags); @@ -2308,7 +2308,7 @@ static int nv_rx_process(struct net_device *dev, int limit) if (flags & NV_RX_ERROR4) { len = nv_getlen(dev, skb->data, len); if (len < 0) { - np->stats.rx_errors++; + dev->stats.rx_errors++; dev_kfree_skb(skb); goto next_pkt; } @@ -2322,12 +2322,12 @@ static int nv_rx_process(struct net_device *dev, int limit) /* the rest are hard errors */ else { if (flags & NV_RX_MISSEDFRAME) - np->stats.rx_missed_errors++; + dev->stats.rx_missed_errors++; if (flags & NV_RX_CRCERR) - np->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if (flags & NV_RX_OVERFLOW) - np->stats.rx_over_errors++; - np->stats.rx_errors++; + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; dev_kfree_skb(skb); goto next_pkt; } @@ -2343,7 +2343,7 @@ static int nv_rx_process(struct net_device *dev, int limit) if (flags & NV_RX2_ERROR4) { len = nv_getlen(dev, skb->data, len); if (len < 0) { - np->stats.rx_errors++; + dev->stats.rx_errors++; dev_kfree_skb(skb); goto next_pkt; } @@ -2357,10 +2357,10 @@ static int nv_rx_process(struct net_device *dev, int limit) /* the rest are hard errors */ else { if (flags & NV_RX2_CRCERR) - np->stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; if (flags & NV_RX2_OVERFLOW) - np->stats.rx_over_errors++; - np->stats.rx_errors++; + dev->stats.rx_over_errors++; + dev->stats.rx_errors++; dev_kfree_skb(skb); goto next_pkt; } @@ -2389,16 +2389,18 @@ static int nv_rx_process(struct net_device *dev, int limit) netif_rx(skb); #endif dev->last_rx = jiffies; - np->stats.rx_packets++; - np->stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; next_pkt: if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) np->get_rx.orig = np->first_rx.orig; if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) np->get_rx_ctx = np->first_rx_ctx; + + rx_work++; } - return rx_processed_cnt; + return rx_work; } static int nv_rx_process_optimized(struct net_device *dev, int limit) @@ -2505,8 +2507,8 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) } dev->last_rx = jiffies; - np->stats.rx_packets++; - np->stats.rx_bytes += len; + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; } else { dev_kfree_skb(skb); } @@ -3727,7 +3729,7 @@ static void nv_do_stats_poll(unsigned long data) static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct fe_priv *np = netdev_priv(dev); - strcpy(info->driver, "forcedeth"); + strcpy(info->driver, DRV_NAME); strcpy(info->version, FORCEDETH_VERSION); strcpy(info->bus_info, pci_name(np->pci_dev)); } @@ -4991,6 +4993,11 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i u32 phystate_orig = 0, phystate; int phyinitialized = 0; DECLARE_MAC_BUF(mac); + static int printed_version; + + if (!printed_version++) + printk(KERN_INFO "%s: Reverse Engineered nForce ethernet" + " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION); dev = alloc_etherdev(sizeof(struct fe_priv)); err = -ENOMEM; @@ -5014,11 +5021,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i np->stats_poll.function = &nv_do_stats_poll; /* timer handler */ err = pci_enable_device(pci_dev); - if (err) { - printk(KERN_INFO "forcedeth: pci_enable_dev failed (%d) for device %s\n", - err, pci_name(pci_dev)); + if (err) goto out_free; - } pci_set_master(pci_dev); @@ -5047,8 +5051,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i } } if (i == DEVICE_COUNT_RESOURCE) { - printk(KERN_INFO "forcedeth: Couldn't find register window for device %s.\n", - pci_name(pci_dev)); + dev_printk(KERN_INFO, &pci_dev->dev, + "Couldn't find register window\n"); goto out_relreg; } @@ -5061,16 +5065,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i np->desc_ver = DESC_VER_3; np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; if (dma_64bit) { - if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { - printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", - pci_name(pci_dev)); - } else { + if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) + dev_printk(KERN_INFO, &pci_dev->dev, + "64-bit DMA failed, using 32-bit addressing\n"); + else dev->features |= NETIF_F_HIGHDMA; - printk(KERN_INFO "forcedeth: using HIGHDMA\n"); - } if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) { - printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed, using 32-bit ring buffers for device %s.\n", - pci_name(pci_dev)); + dev_printk(KERN_INFO, &pci_dev->dev, + "64-bit DMA (consistent) failed, using 32-bit ring buffers\n"); } } } else if (id->driver_data & DEV_HAS_LARGEDESC) { @@ -5205,9 +5207,11 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i * Bad mac address. At least one bios sets the mac address * to 01:23:45:67:89:ab */ - printk(KERN_ERR "%s: Invalid Mac address detected: %s\n", - pci_name(pci_dev), print_mac(mac, dev->dev_addr)); - printk(KERN_ERR "Please complain to your hardware vendor. Switching to a random MAC.\n"); + dev_printk(KERN_ERR, &pci_dev->dev, + "Invalid Mac address detected: %s\n", + print_mac(mac, dev->dev_addr)); + dev_printk(KERN_ERR, &pci_dev->dev, + "Please complain to your hardware vendor. Switching to a random MAC.\n"); dev->dev_addr[0] = 0x00; dev->dev_addr[1] = 0x00; dev->dev_addr[2] = 0x6c; @@ -5321,8 +5325,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i break; } if (i == 33) { - printk(KERN_INFO "%s: open: Could not find a valid PHY.\n", - pci_name(pci_dev)); + dev_printk(KERN_INFO, &pci_dev->dev, + "open: Could not find a valid PHY.\n"); goto out_error; } @@ -5344,12 +5348,37 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i err = register_netdev(dev); if (err) { - printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err); + dev_printk(KERN_INFO, &pci_dev->dev, + "unable to register netdev: %d\n", err); goto out_error; } - printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n", - dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device, - pci_name(pci_dev)); + + dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, " + "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", + dev->name, + np->phy_oui, + np->phyaddr, + dev->dev_addr[0], + dev->dev_addr[1], + dev->dev_addr[2], + dev->dev_addr[3], + dev->dev_addr[4], + dev->dev_addr[5]); + + dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", + dev->features & NETIF_F_HIGHDMA ? "highdma " : "", + dev->features & (NETIF_F_HW_CSUM | NETIF_F_SG) ? + "csum " : "", + dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? + "vlan " : "", + id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "", + id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "", + id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "", + np->gigabit == PHY_GIGABIT ? "gbit " : "", + np->need_linktimer ? "lnktim " : "", + np->msi_flags & NV_MSI_CAPABLE ? "msi " : "", + np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "", + np->desc_ver); return 0; @@ -5567,17 +5596,16 @@ static struct pci_device_id pci_tbl[] = { }; static struct pci_driver driver = { - .name = "forcedeth", - .id_table = pci_tbl, - .probe = nv_probe, - .remove = __devexit_p(nv_remove), - .suspend = nv_suspend, - .resume = nv_resume, + .name = DRV_NAME, + .id_table = pci_tbl, + .probe = nv_probe, + .remove = __devexit_p(nv_remove), + .suspend = nv_suspend, + .resume = nv_resume, }; static int __init init_nic(void) { - printk(KERN_INFO "forcedeth.c: Reverse Engineered nForce ethernet driver. Version %s.\n", FORCEDETH_VERSION); return pci_register_driver(&driver); } diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 0db5e6f..cc288d8 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -168,7 +168,6 @@ static int gfar_probe(struct platform_device *pdev) struct gfar_private *priv = NULL; struct gianfar_platform_data *einfo; struct resource *r; - int idx; int err = 0; DECLARE_MAC_BUF(mac); @@ -261,7 +260,9 @@ static int gfar_probe(struct platform_device *pdev) dev->hard_start_xmit = gfar_start_xmit; dev->tx_timeout = gfar_timeout; dev->watchdog_timeo = TX_TIMEOUT; +#ifdef CONFIG_GFAR_NAPI netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT); +#endif #ifdef CONFIG_NET_POLL_CONTROLLER dev->poll_controller = gfar_netpoll; #endif @@ -931,9 +932,14 @@ tx_skb_fail: /* Returns 0 for success. */ static int gfar_enet_open(struct net_device *dev) { +#ifdef CONFIG_GFAR_NAPI + struct gfar_private *priv = netdev_priv(dev); +#endif int err; +#ifdef CONFIG_GFAR_NAPI napi_enable(&priv->napi); +#endif /* Initialize a bunch of registers */ init_registers(dev); @@ -943,13 +949,17 @@ static int gfar_enet_open(struct net_device *dev) err = init_phy(dev); if(err) { +#ifdef CONFIG_GFAR_NAPI napi_disable(&priv->napi); +#endif return err; } err = startup_gfar(dev); if (err) +#ifdef CONFIG_GFAR_NAPI napi_disable(&priv->napi); +#endif netif_start_queue(dev); @@ -1103,7 +1113,9 @@ static int gfar_close(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); +#ifdef CONFIG_GFAR_NAPI napi_disable(&priv->napi); +#endif stop_gfar(dev); @@ -1225,8 +1237,6 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu) * starting over will fix the problem. */ static void gfar_timeout(struct net_device *dev) { - struct gfar_private *priv = netdev_priv(dev); - dev->stats.tx_errors++; if (dev->flags & IFF_UP) { @@ -1332,8 +1342,9 @@ struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp) return skb; } -static inline void count_errors(unsigned short status, struct gfar_private *priv) +static inline void count_errors(unsigned short status, struct net_device *dev) { + struct gfar_private *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct gfar_extra_stats *estats = &priv->extra_stats; @@ -1527,7 +1538,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) dev->stats.rx_bytes += pkt_len; } else { - count_errors(bdp->status, priv); + count_errors(bdp->status, dev); if (skb) dev_kfree_skb_any(skb); diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index ecd156d..ad9e327 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -292,7 +292,7 @@ static int sp_header(struct sk_buff *skb, struct net_device *dev, const void *saddr, unsigned len) { #ifdef CONFIG_INET - if (type != htons(ETH_P_AX25)) + if (type != ETH_P_AX25) return ax25_hard_header(skb, dev, type, daddr, saddr, len); #endif return 0; diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 9e43c47..803a3bd 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -583,7 +583,7 @@ static int ax_header(struct sk_buff *skb, struct net_device *dev, const void *saddr, unsigned len) { #ifdef CONFIG_INET - if (type != htons(ETH_P_AX25)) + if (type != ETH_P_AX25) return ax25_hard_header(skb, dev, type, daddr, saddr, len); #endif return 0; diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c index e4fde17..49421d1 100644 --- a/drivers/net/hp100.c +++ b/drivers/net/hp100.c @@ -8,7 +8,7 @@ ** Extended for new busmaster capable chipsets by ** Siegfried "Frieder" Loeffler (dg1sek) <floeff@mathematik.uni-stuttgart.de> ** -** Maintained by: Jaroslav Kysela <perex@suse.cz> +** Maintained by: Jaroslav Kysela <perex@perex.cz> ** ** This driver has only been tested with ** -- HP J2585B 10/100 Mbit/s PCI Busmaster @@ -2951,7 +2951,7 @@ static struct pci_driver hp100_pci_driver = { */ MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Jaroslav Kysela <perex@suse.cz>, " +MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, " "Siegfried \"Frieder\" Loeffler (dg1sek) <floeff@mathematik.uni-stuttgart.de>"); MODULE_DESCRIPTION("HP CASCADE Architecture Driver for 100VG-AnyLan Network Adapters"); diff --git a/drivers/net/ibm_emac/ibm_emac_mal.c b/drivers/net/ibm_emac/ibm_emac_mal.c index 4e49e8c..dcd8826 100644 --- a/drivers/net/ibm_emac/ibm_emac_mal.c +++ b/drivers/net/ibm_emac/ibm_emac_mal.c @@ -413,7 +413,10 @@ static int __init mal_probe(struct ocp_device *ocpdev) ocpdev->def->index); return -ENOMEM; } - mal->dcrbase = maldata->dcr_base; + + /* XXX This only works for native dcr for now */ + mal->dcrhost = dcr_map(NULL, maldata->dcr_base, 0); + mal->def = ocpdev->def; INIT_LIST_HEAD(&mal->poll_list); diff --git a/drivers/net/ibm_emac/ibm_emac_mal.h b/drivers/net/ibm_emac/ibm_emac_mal.h index 8f54d62..b8adbe6 100644 --- a/drivers/net/ibm_emac/ibm_emac_mal.h +++ b/drivers/net/ibm_emac/ibm_emac_mal.h @@ -191,7 +191,6 @@ struct mal_commac { }; struct ibm_ocp_mal { - int dcrbase; dcr_host_t dcrhost; struct list_head poll_list; @@ -209,12 +208,12 @@ struct ibm_ocp_mal { static inline u32 get_mal_dcrn(struct ibm_ocp_mal *mal, int reg) { - return dcr_read(mal->dcrhost, mal->dcrbase + reg); + return dcr_read(mal->dcrhost, reg); } static inline void set_mal_dcrn(struct ibm_ocp_mal *mal, int reg, u32 val) { - dcr_write(mal->dcrhost, mal->dcrbase + reg, val); + dcr_write(mal->dcrhost, reg, val); } /* Register MAL devices */ diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index 8ea5009..0de3aa2 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c @@ -1534,7 +1534,7 @@ static inline int emac_rx_sg_append(struct emac_instance *dev, int slot) dev_kfree_skb(dev->rx_sg_skb); dev->rx_sg_skb = NULL; } else { - cacheable_memcpy(dev->rx_sg_skb->tail, + cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb), dev->rx_skb[slot]->data, len); skb_put(dev->rx_sg_skb, len); emac_recycle_rx_skb(dev, slot, len); @@ -1950,7 +1950,7 @@ static u32 emac_ethtool_get_rx_csum(struct net_device *ndev) { struct emac_instance *dev = netdev_priv(ndev); - return dev->tah_dev != 0; + return dev->tah_dev != NULL; } static int emac_get_regs_len(struct emac_instance *dev) diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c index 5885411..a680eb0 100644 --- a/drivers/net/ibm_newemac/mal.c +++ b/drivers/net/ibm_newemac/mal.c @@ -45,6 +45,8 @@ int __devinit mal_register_commac(struct mal_instance *mal, return -EBUSY; } + if (list_empty(&mal->list)) + napi_enable(&mal->napi); mal->tx_chan_mask |= commac->tx_chan_mask; mal->rx_chan_mask |= commac->rx_chan_mask; list_add(&commac->list, &mal->list); @@ -67,6 +69,8 @@ void __devexit mal_unregister_commac(struct mal_instance *mal, mal->tx_chan_mask &= ~commac->tx_chan_mask; mal->rx_chan_mask &= ~commac->rx_chan_mask; list_del_init(&commac->list); + if (list_empty(&mal->list)) + napi_disable(&mal->napi); spin_unlock_irqrestore(&mal->lock, flags); } @@ -182,7 +186,7 @@ static inline void mal_enable_eob_irq(struct mal_instance *mal) set_mal_dcrn(mal, MAL_CFG, get_mal_dcrn(mal, MAL_CFG) | MAL_CFG_EOPIE); } -/* synchronized by __LINK_STATE_RX_SCHED bit in ndev->state */ +/* synchronized by NAPI state */ static inline void mal_disable_eob_irq(struct mal_instance *mal) { // XXX might want to cache MAL_CFG as the DCR read can be slooooow @@ -317,8 +321,8 @@ void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac) while (test_and_set_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags)) msleep(1); - /* Synchronize with the MAL NAPI poller. */ - napi_disable(&mal->napi); + /* Synchronize with the MAL NAPI poller */ + __napi_synchronize(&mal->napi); } void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac) @@ -326,7 +330,12 @@ void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac) smp_wmb(); clear_bit(MAL_COMMAC_POLL_DISABLED, &commac->flags); - // XXX might want to kick a poll now... + /* Feels better to trigger a poll here to catch up with events that + * may have happened on this channel while disabled. It will most + * probably be delayed until the next interrupt but that's mostly a + * non-issue in the context where this is called. + */ + napi_schedule(&mal->napi); } static int mal_poll(struct napi_struct *napi, int budget) @@ -336,8 +345,7 @@ static int mal_poll(struct napi_struct *napi, int budget) int received = 0; unsigned long flags; - MAL_DBG2(mal, "poll(%d) %d ->" NL, *budget, - rx_work_limit); + MAL_DBG2(mal, "poll(%d)" NL, budget); again: /* Process TX skbs */ list_for_each(l, &mal->poll_list) { @@ -461,6 +469,7 @@ static int __devinit mal_probe(struct of_device *ofdev, struct mal_instance *mal; int err = 0, i, bd_size; int index = mal_count++; + unsigned int dcr_base; const u32 *prop; u32 cfg; @@ -497,14 +506,14 @@ static int __devinit mal_probe(struct of_device *ofdev, } mal->num_rx_chans = prop[0]; - mal->dcr_base = dcr_resource_start(ofdev->node, 0); - if (mal->dcr_base == 0) { + dcr_base = dcr_resource_start(ofdev->node, 0); + if (dcr_base == 0) { printk(KERN_ERR "mal%d: can't find DCR resource!\n", index); err = -ENODEV; goto fail; } - mal->dcr_host = dcr_map(ofdev->node, mal->dcr_base, 0x100); + mal->dcr_host = dcr_map(ofdev->node, dcr_base, 0x100); if (!DCR_MAP_OK(mal->dcr_host)) { printk(KERN_ERR "mal%d: failed to map DCRs !\n", index); @@ -527,11 +536,12 @@ static int __devinit mal_probe(struct of_device *ofdev, } INIT_LIST_HEAD(&mal->poll_list); - mal->napi.weight = CONFIG_IBM_NEW_EMAC_POLL_WEIGHT; - mal->napi.poll = mal_poll; INIT_LIST_HEAD(&mal->list); spin_lock_init(&mal->lock); + netif_napi_add(NULL, &mal->napi, mal_poll, + CONFIG_IBM_NEW_EMAC_POLL_WEIGHT); + /* Load power-on reset defaults */ mal_reset(mal); @@ -626,7 +636,7 @@ static int __devinit mal_probe(struct of_device *ofdev, fail2: dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma); fail_unmap: - dcr_unmap(mal->dcr_host, mal->dcr_base, 0x100); + dcr_unmap(mal->dcr_host, 0x100); fail: kfree(mal); diff --git a/drivers/net/ibm_newemac/mal.h b/drivers/net/ibm_newemac/mal.h index cb1a16d..784edb8 100644 --- a/drivers/net/ibm_newemac/mal.h +++ b/drivers/net/ibm_newemac/mal.h @@ -185,7 +185,6 @@ struct mal_commac { struct mal_instance { int version; - int dcr_base; dcr_host_t dcr_host; int num_tx_chans; /* Number of TX channels */ @@ -213,12 +212,12 @@ struct mal_instance { static inline u32 get_mal_dcrn(struct mal_instance *mal, int reg) { - return dcr_read(mal->dcr_host, mal->dcr_base + reg); + return dcr_read(mal->dcr_host, reg); } static inline void set_mal_dcrn(struct mal_instance *mal, int reg, u32 val) { - dcr_write(mal->dcr_host, mal->dcr_base + reg, val); + dcr_write(mal->dcr_host, reg, val); } /* Register MAL devices */ diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c index bcd7fc6..de41695 100644 --- a/drivers/net/ibm_newemac/rgmii.c +++ b/drivers/net/ibm_newemac/rgmii.c @@ -84,7 +84,7 @@ static inline u32 rgmii_mode_mask(int mode, int input) int __devinit rgmii_attach(struct of_device *ofdev, int input, int mode) { struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); - struct rgmii_regs *p = dev->base; + struct rgmii_regs __iomem *p = dev->base; RGMII_DBG(dev, "attach(%d)" NL, input); @@ -113,7 +113,7 @@ int __devinit rgmii_attach(struct of_device *ofdev, int input, int mode) void rgmii_set_speed(struct of_device *ofdev, int input, int speed) { struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); - struct rgmii_regs *p = dev->base; + struct rgmii_regs __iomem *p = dev->base; u32 ssr; mutex_lock(&dev->lock); @@ -135,7 +135,7 @@ void rgmii_set_speed(struct of_device *ofdev, int input, int speed) void rgmii_get_mdio(struct of_device *ofdev, int input) { struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); - struct rgmii_regs *p = dev->base; + struct rgmii_regs __iomem *p = dev->base; u32 fer; RGMII_DBG2(dev, "get_mdio(%d)" NL, input); @@ -156,7 +156,7 @@ void rgmii_get_mdio(struct of_device *ofdev, int input) void rgmii_put_mdio(struct of_device *ofdev, int input) { struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); - struct rgmii_regs *p = dev->base; + struct rgmii_regs __iomem *p = dev->base; u32 fer; RGMII_DBG2(dev, "put_mdio(%d)" NL, input); @@ -177,7 +177,7 @@ void rgmii_put_mdio(struct of_device *ofdev, int input) void __devexit rgmii_detach(struct of_device *ofdev, int input) { struct rgmii_instance *dev = dev_get_drvdata(&ofdev->dev); - struct rgmii_regs *p = dev->base; + struct rgmii_regs __iomem *p = dev->base; mutex_lock(&dev->lock); @@ -242,7 +242,7 @@ static int __devinit rgmii_probe(struct of_device *ofdev, } rc = -ENOMEM; - dev->base = (struct rgmii_regs *)ioremap(regs.start, + dev->base = (struct rgmii_regs __iomem *)ioremap(regs.start, sizeof(struct rgmii_regs)); if (dev->base == NULL) { printk(KERN_ERR "%s: Can't map device registers!\n", @@ -251,7 +251,7 @@ static int __devinit rgmii_probe(struct of_device *ofdev, } /* Check for RGMII type */ - if (device_is_compatible(ofdev->node, "ibm,rgmii-axon")) + if (of_device_is_compatible(ofdev->node, "ibm,rgmii-axon")) dev->type = RGMII_AXON; else dev->type = RGMII_STANDARD; diff --git a/drivers/net/ibm_newemac/tah.c b/drivers/net/ibm_newemac/tah.c index e05c7e8..f161fb1 100644 --- a/drivers/net/ibm_newemac/tah.c +++ b/drivers/net/ibm_newemac/tah.c @@ -42,7 +42,7 @@ void __devexit tah_detach(struct of_device *ofdev, int channel) void tah_reset(struct of_device *ofdev) { struct tah_instance *dev = dev_get_drvdata(&ofdev->dev); - struct tah_regs *p = dev->base; + struct tah_regs __iomem *p = dev->base; int n; /* Reset TAH */ @@ -108,7 +108,7 @@ static int __devinit tah_probe(struct of_device *ofdev, } rc = -ENOMEM; - dev->base = (struct tah_regs *)ioremap(regs.start, + dev->base = (struct tah_regs __iomem *)ioremap(regs.start, sizeof(struct tah_regs)); if (dev->base == NULL) { printk(KERN_ERR "%s: Can't map device registers!\n", diff --git a/drivers/net/ibm_newemac/zmii.c b/drivers/net/ibm_newemac/zmii.c index d063129..2219ec2 100644 --- a/drivers/net/ibm_newemac/zmii.c +++ b/drivers/net/ibm_newemac/zmii.c @@ -79,7 +79,7 @@ static inline u32 zmii_mode_mask(int mode, int input) int __devinit zmii_attach(struct of_device *ofdev, int input, int *mode) { struct zmii_instance *dev = dev_get_drvdata(&ofdev->dev); - struct zmii_regs *p = dev->base; + struct zmii_regs __iomem *p = dev->base; ZMII_DBG(dev, "init(%d, %d)" NL, input, *mode); @@ -250,7 +250,7 @@ static int __devinit zmii_probe(struct of_device *ofdev, } rc = -ENOMEM; - dev->base = (struct zmii_regs *)ioremap(regs.start, + dev->base = (struct zmii_regs __iomem *)ioremap(regs.start, sizeof(struct zmii_regs)); if (dev->base == NULL) { printk(KERN_ERR "%s: Can't map device registers!\n", diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c index 59898ce..6888723 100644 --- a/drivers/net/ipg.c +++ b/drivers/net/ipg.c @@ -754,7 +754,7 @@ static int init_rfdlist(struct net_device *dev) if (sp->RxBuff[i]) { pci_unmap_single(sp->pdev, - le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), + le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, sp->rx_buf_sz, PCI_DMA_FROMDEVICE); IPG_DEV_KFREE_SKB(sp->RxBuff[i]); sp->RxBuff[i] = NULL; @@ -871,7 +871,7 @@ static void ipg_nic_txfree(struct net_device *dev) /* Free the transmit buffer. */ if (skb) { pci_unmap_single(sp->pdev, - le64_to_cpu(txfd->frag_info & ~IPG_TFI_FRAGLEN), + le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN, skb->len, PCI_DMA_TODEVICE); IPG_DEV_KFREE_SKB(skb); @@ -1413,10 +1413,10 @@ static int ipg_nic_rx(struct net_device *dev) framelen = IPG_RXFRAG_SIZE; } - if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs & + if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) & (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME | IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR | - IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))))) { + IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR)))) { IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n", (unsigned long int) rxfd->rfs); @@ -1425,27 +1425,27 @@ static int ipg_nic_rx(struct net_device *dev) sp->stats.rx_errors++; /* Increment detailed receive error statistics. */ - if (le64_to_cpu(rxfd->rfs & IPG_RFS_RXFIFOOVERRUN)) { + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) { IPG_DEBUG_MSG("RX FIFO overrun occured.\n"); sp->stats.rx_fifo_errors++; } - if (le64_to_cpu(rxfd->rfs & IPG_RFS_RXRUNTFRAME)) { + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) { IPG_DEBUG_MSG("RX runt occured.\n"); sp->stats.rx_length_errors++; } - if (le64_to_cpu(rxfd->rfs & IPG_RFS_RXOVERSIZEDFRAME)) ; + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXOVERSIZEDFRAME) ; /* Do nothing, error count handled by a IPG * statistic register. */ - if (le64_to_cpu(rxfd->rfs & IPG_RFS_RXALIGNMENTERROR)) { + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) { IPG_DEBUG_MSG("RX alignment error occured.\n"); sp->stats.rx_frame_errors++; } - if (le64_to_cpu(rxfd->rfs & IPG_RFS_RXFCSERROR)) ; + if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFCSERROR) ; /* Do nothing, error count handled by a IPG * statistic register. */ @@ -1455,10 +1455,10 @@ static int ipg_nic_rx(struct net_device *dev) * not pass it to higher layer processes. */ if (skb) { - u64 info = rxfd->frag_info; + __le64 info = rxfd->frag_info; pci_unmap_single(sp->pdev, - le64_to_cpu(info & ~IPG_RFI_FRAGLEN), + le64_to_cpu(info) & ~IPG_RFI_FRAGLEN, sp->rx_buf_sz, PCI_DMA_FROMDEVICE); IPG_DEV_KFREE_SKB(skb); @@ -1532,9 +1532,9 @@ static int ipg_nic_rx(struct net_device *dev) if (!i) sp->EmptyRFDListCount++; #endif - while ((le64_to_cpu(rxfd->rfs & IPG_RFS_RFDDONE)) && - !((le64_to_cpu(rxfd->rfs & IPG_RFS_FRAMESTART)) && - (le64_to_cpu(rxfd->rfs & IPG_RFS_FRAMEEND)))) { + while ((le64_to_cpu(rxfd->rfs) & IPG_RFS_RFDDONE) && + !((le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) && + (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND))) { unsigned int entry = curr++ % IPG_RFDLIST_LENGTH; rxfd = sp->rxd + entry; @@ -1552,7 +1552,7 @@ static int ipg_nic_rx(struct net_device *dev) */ if (sp->RxBuff[entry]) { pci_unmap_single(sp->pdev, - le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), + le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, sp->rx_buf_sz, PCI_DMA_FROMDEVICE); IPG_DEV_KFREE_SKB(sp->RxBuff[entry]); } @@ -1730,7 +1730,7 @@ static void ipg_rx_clear(struct ipg_nic_private *sp) IPG_DEV_KFREE_SKB(sp->RxBuff[i]); sp->RxBuff[i] = NULL; pci_unmap_single(sp->pdev, - le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), + le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, sp->rx_buf_sz, PCI_DMA_FROMDEVICE); } } @@ -1745,7 +1745,7 @@ static void ipg_tx_clear(struct ipg_nic_private *sp) struct ipg_tx *txfd = sp->txd + i; pci_unmap_single(sp->pdev, - le64_to_cpu(txfd->frag_info & ~IPG_TFI_FRAGLEN), + le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN, sp->TxBuff[i]->len, PCI_DMA_TODEVICE); IPG_DEV_KFREE_SKB(sp->TxBuff[i]); diff --git a/drivers/net/ipg.h b/drivers/net/ipg.h index 1952d0d..e418b90 100644 --- a/drivers/net/ipg.h +++ b/drivers/net/ipg.h @@ -776,17 +776,17 @@ enum ipg_regs { * TFD field is 64 bits wide. */ struct ipg_tx { - u64 next_desc; - u64 tfc; - u64 frag_info; + __le64 next_desc; + __le64 tfc; + __le64 frag_info; }; /* Receive Frame Descriptor. Note, each RFD field is 64 bits wide. */ struct ipg_rx { - u64 next_desc; - u64 rfs; - u64 frag_info; + __le64 next_desc; + __le64 rfs; + __le64 frag_info; }; struct SJumbo { diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c index 3e5eca1..a82d8f9 100644 --- a/drivers/net/irda/donauboe.c +++ b/drivers/net/irda/donauboe.c @@ -840,7 +840,7 @@ toshoboe_probe (struct toshoboe_cb *self) /* test 1: SIR filter and back to back */ - for (j = 0; j < (sizeof (bauds) / sizeof (int)); ++j) + for (j = 0; j < ARRAY_SIZE(bauds); ++j) { int fir = (j > 1); toshoboe_stopchip (self); diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c index 55ff0fb..8c09344 100644 --- a/drivers/net/irda/pxaficp_ir.c +++ b/drivers/net/irda/pxaficp_ir.c @@ -23,6 +23,7 @@ #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/pm.h> +#include <linux/clk.h> #include <net/irda/irda.h> #include <net/irda/irmod.h> @@ -87,8 +88,30 @@ struct pxa_irda { struct device *dev; struct pxaficp_platform_data *pdata; + struct clk *fir_clk; + struct clk *sir_clk; + struct clk *cur_clk; }; +static inline void pxa_irda_disable_clk(struct pxa_irda *si) +{ + if (si->cur_clk) + clk_disable(si->cur_clk); + si->cur_clk = NULL; +} + +static inline void pxa_irda_enable_firclk(struct pxa_irda *si) +{ + si->cur_clk = si->fir_clk; + clk_enable(si->fir_clk); +} + +static inline void pxa_irda_enable_sirclk(struct pxa_irda *si) +{ + si->cur_clk = si->sir_clk; + clk_enable(si->sir_clk); +} + #define IS_FIR(si) ((si)->speed >= 4000000) #define IRDA_FRAME_SIZE_LIMIT 2047 @@ -134,7 +157,7 @@ static int pxa_irda_set_speed(struct pxa_irda *si, int speed) DCSR(si->rxdma) &= ~DCSR_RUN; /* disable FICP */ ICCR0 = 0; - pxa_set_cken(CKEN_FICP, 0); + pxa_irda_disable_clk(si); /* set board transceiver to SIR mode */ si->pdata->transceiver_mode(si->dev, IR_SIRMODE); @@ -144,7 +167,7 @@ static int pxa_irda_set_speed(struct pxa_irda *si, int speed) pxa_gpio_mode(GPIO47_STTXD_MD); /* enable the STUART clock */ - pxa_set_cken(CKEN_STUART, 1); + pxa_irda_enable_sirclk(si); } /* disable STUART first */ @@ -169,7 +192,7 @@ static int pxa_irda_set_speed(struct pxa_irda *si, int speed) /* disable STUART */ STIER = 0; STISR = 0; - pxa_set_cken(CKEN_STUART, 0); + pxa_irda_disable_clk(si); /* disable FICP first */ ICCR0 = 0; @@ -182,7 +205,7 @@ static int pxa_irda_set_speed(struct pxa_irda *si, int speed) pxa_gpio_mode(GPIO47_ICPTXD_MD); /* enable the FICP clock */ - pxa_set_cken(CKEN_FICP, 1); + pxa_irda_enable_firclk(si); si->speed = speed; pxa_irda_fir_dma_rx_start(si); @@ -592,16 +615,15 @@ static void pxa_irda_shutdown(struct pxa_irda *si) STIER = 0; /* disable STUART SIR mode */ STISR = 0; - /* disable the STUART clock */ - pxa_set_cken(CKEN_STUART, 0); /* disable DMA */ DCSR(si->txdma) &= ~DCSR_RUN; DCSR(si->rxdma) &= ~DCSR_RUN; /* disable FICP */ ICCR0 = 0; - /* disable the FICP clock */ - pxa_set_cken(CKEN_FICP, 0); + + /* disable the STUART or FICP clocks */ + pxa_irda_disable_clk(si); DRCMR17 = 0; DRCMR18 = 0; @@ -792,6 +814,13 @@ static int pxa_irda_probe(struct platform_device *pdev) si->dev = &pdev->dev; si->pdata = pdev->dev.platform_data; + si->sir_clk = clk_get(&pdev->dev, "UARTCLK"); + si->fir_clk = clk_get(&pdev->dev, "FICPCLK"); + if (IS_ERR(si->sir_clk) || IS_ERR(si->fir_clk)) { + err = PTR_ERR(IS_ERR(si->sir_clk) ? si->sir_clk : si->fir_clk); + goto err_mem_4; + } + /* * Initialise the SIR buffers */ @@ -831,6 +860,10 @@ static int pxa_irda_probe(struct platform_device *pdev) err_mem_5: kfree(si->rx_buff.head); err_mem_4: + if (si->sir_clk && !IS_ERR(si->sir_clk)) + clk_put(si->sir_clk); + if (si->fir_clk && !IS_ERR(si->fir_clk)) + clk_put(si->fir_clk); free_netdev(dev); err_mem_3: release_mem_region(__PREG(FICP), 0x1c); @@ -850,6 +883,8 @@ static int pxa_irda_remove(struct platform_device *_dev) unregister_netdev(dev); kfree(si->tx_buff.head); kfree(si->rx_buff.head); + clk_put(si->fir_clk); + clk_put(si->sir_clk); free_netdev(dev); } diff --git a/drivers/net/jazzsonic.c b/drivers/net/jazzsonic.c index d3825c8..5c154fe 100644 --- a/drivers/net/jazzsonic.c +++ b/drivers/net/jazzsonic.c @@ -208,7 +208,6 @@ static int __init jazz_sonic_probe(struct platform_device *pdev) struct sonic_local *lp; struct resource *res; int err = 0; - int i; DECLARE_MAC_BUF(mac); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index be25aa3..662b8d1 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -265,17 +265,16 @@ static __net_init int loopback_net_init(struct net *net) if (err) goto out_free_netdev; - err = 0; net->loopback_dev = dev; + return 0; -out: - if (err) - panic("loopback: Failed to register netdevice: %d\n", err); - return err; out_free_netdev: free_netdev(dev); - goto out; +out: + if (net == &init_net) + panic("loopback: Failed to register netdevice: %d\n", err); + return err; } static __net_exit void loopback_net_exit(struct net *net) diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c index 6589239..1877052 100644 --- a/drivers/net/macmace.c +++ b/drivers/net/macmace.c @@ -538,8 +538,9 @@ static void mace_set_multicast(struct net_device *dev) local_irq_restore(flags); } -static void mace_handle_misc_intrs(struct mace_data *mp, int intr) +static void mace_handle_misc_intrs(struct net_device *dev, int intr) { + struct mace_data *mp = netdev_priv(dev); volatile struct mace *mb = mp->mace; static int mace_babbles, mace_jabbers; @@ -571,7 +572,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) local_irq_save(flags); intr = mb->ir; /* read interrupt register */ - mace_handle_misc_intrs(mp, intr); + mace_handle_misc_intrs(dev, intr); if (intr & XMTINT) { fs = mb->xmtfs; @@ -645,7 +646,6 @@ static void mace_tx_timeout(struct net_device *dev) static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf) { - struct mace_data *mp = netdev_priv(dev); struct sk_buff *skb; unsigned int frame_status = mf->rcvsts; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index b7c81c8..2e4bcd5 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -178,7 +178,6 @@ static const struct header_ops macvlan_hard_header_ops = { .create = macvlan_hard_header, .rebuild = eth_rebuild_header, .parse = eth_header_parse, - .rebuild = eth_rebuild_header, .cache = eth_header_cache, .cache_update = eth_header_cache_update, }; diff --git a/drivers/net/mipsnet.c b/drivers/net/mipsnet.c index d593175..37707a0 100644 --- a/drivers/net/mipsnet.c +++ b/drivers/net/mipsnet.c @@ -7,12 +7,12 @@ #define DEBUG #include <linux/init.h> +#include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> -#include <asm/io.h> #include <asm/mips-boards/simint.h> #include "mipsnet.h" /* actual device IO mapping */ @@ -33,9 +33,8 @@ static int ioiocpy_frommipsnet(struct net_device *dev, unsigned char *kdata, if (available_len < len) return -EFAULT; - for (; len > 0; len--, kdata++) { + for (; len > 0; len--, kdata++) *kdata = inb(mipsnet_reg_address(dev, rxDataBuffer)); - } return inl(mipsnet_reg_address(dev, rxDataCount)); } @@ -47,16 +46,15 @@ static inline ssize_t mipsnet_put_todevice(struct net_device *dev, char *buf_ptr = skb->data; pr_debug("%s: %s(): telling MIPSNET txDataCount(%d)\n", - dev->name, __FUNCTION__, skb->len); + dev->name, __FUNCTION__, skb->len); outl(skb->len, mipsnet_reg_address(dev, txDataCount)); pr_debug("%s: %s(): sending data to MIPSNET txDataBuffer(%d)\n", - dev->name, __FUNCTION__, skb->len); + dev->name, __FUNCTION__, skb->len); - for (; count_to_go; buf_ptr++, count_to_go--) { + for (; count_to_go; buf_ptr++, count_to_go--) outb(*buf_ptr, mipsnet_reg_address(dev, txDataBuffer)); - } dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; @@ -67,7 +65,7 @@ static inline ssize_t mipsnet_put_todevice(struct net_device *dev, static int mipsnet_xmit(struct sk_buff *skb, struct net_device *dev) { pr_debug("%s:%s(): transmitting %d bytes\n", - dev->name, __FUNCTION__, skb->len); + dev->name, __FUNCTION__, skb->len); /* Only one packet at a time. Once TXDONE interrupt is serviced, the * queue will be restarted. @@ -83,7 +81,8 @@ static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count) struct sk_buff *skb; size_t len = count; - if (!(skb = alloc_skb(len + 2, GFP_KERNEL))) { + skb = alloc_skb(len + 2, GFP_KERNEL); + if (!skb) { dev->stats.rx_dropped++; return -ENOMEM; } @@ -96,7 +95,7 @@ static inline ssize_t mipsnet_get_fromdev(struct net_device *dev, size_t count) skb->ip_summed = CHECKSUM_UNNECESSARY; pr_debug("%s:%s(): pushing RXed data to kernel\n", - dev->name, __FUNCTION__); + dev->name, __FUNCTION__); netif_rx(skb); dev->stats.rx_packets++; @@ -114,42 +113,44 @@ static irqreturn_t mipsnet_interrupt(int irq, void *dev_id) if (irq == dev->irq) { pr_debug("%s:%s(): irq %d for device\n", - dev->name, __FUNCTION__, irq); + dev->name, __FUNCTION__, irq); retval = IRQ_HANDLED; interruptFlags = inl(mipsnet_reg_address(dev, interruptControl)); pr_debug("%s:%s(): intCtl=0x%016llx\n", dev->name, - __FUNCTION__, interruptFlags); + __FUNCTION__, interruptFlags); if (interruptFlags & MIPSNET_INTCTL_TXDONE) { pr_debug("%s:%s(): got TXDone\n", - dev->name, __FUNCTION__); + dev->name, __FUNCTION__); outl(MIPSNET_INTCTL_TXDONE, mipsnet_reg_address(dev, interruptControl)); - // only one packet at a time, we are done. + /* only one packet at a time, we are done. */ netif_wake_queue(dev); } else if (interruptFlags & MIPSNET_INTCTL_RXDONE) { pr_debug("%s:%s(): got RX data\n", - dev->name, __FUNCTION__); + dev->name, __FUNCTION__); mipsnet_get_fromdev(dev, - inl(mipsnet_reg_address(dev, rxDataCount))); + inl(mipsnet_reg_address(dev, rxDataCount))); pr_debug("%s:%s(): clearing RX int\n", - dev->name, __FUNCTION__); + dev->name, __FUNCTION__); outl(MIPSNET_INTCTL_RXDONE, mipsnet_reg_address(dev, interruptControl)); } else if (interruptFlags & MIPSNET_INTCTL_TESTBIT) { pr_debug("%s:%s(): got test interrupt\n", - dev->name, __FUNCTION__); - // TESTBIT is cleared on read. - // And takes effect after a write with 0 + dev->name, __FUNCTION__); + /* + * TESTBIT is cleared on read. + * And takes effect after a write with 0 + */ outl(0, mipsnet_reg_address(dev, interruptControl)); } else { pr_debug("%s:%s(): no valid fags 0x%016llx\n", - dev->name, __FUNCTION__, interruptFlags); - // Maybe shared IRQ, just ignore, no clearing. + dev->name, __FUNCTION__, interruptFlags); + /* Maybe shared IRQ, just ignore, no clearing. */ retval = IRQ_NONE; } @@ -159,7 +160,7 @@ static irqreturn_t mipsnet_interrupt(int irq, void *dev_id) retval = IRQ_NONE; } return retval; -} //mipsnet_interrupt() +} static int mipsnet_open(struct net_device *dev) { @@ -171,18 +172,18 @@ static int mipsnet_open(struct net_device *dev) if (err) { pr_debug("%s: %s(): can't get irq %d\n", - dev->name, __FUNCTION__, dev->irq); + dev->name, __FUNCTION__, dev->irq); release_region(dev->base_addr, MIPSNET_IO_EXTENT); return err; } pr_debug("%s: %s(): got IO region at 0x%04lx and irq %d for dev.\n", - dev->name, __FUNCTION__, dev->base_addr, dev->irq); + dev->name, __FUNCTION__, dev->base_addr, dev->irq); netif_start_queue(dev); - // test interrupt handler + /* test interrupt handler */ outl(MIPSNET_INTCTL_TESTBIT, mipsnet_reg_address(dev, interruptControl)); @@ -199,8 +200,6 @@ static int mipsnet_close(struct net_device *dev) static void mipsnet_set_mclist(struct net_device *dev) { - // we don't do anything - return; } static int __init mipsnet_probe(struct device *dev) @@ -226,13 +225,13 @@ static int __init mipsnet_probe(struct device *dev) */ netdev->base_addr = 0x4200; netdev->irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB0 + - inl(mipsnet_reg_address(netdev, interruptInfo)); + inl(mipsnet_reg_address(netdev, interruptInfo)); - // Get the io region now, get irq on open() + /* Get the io region now, get irq on open() */ if (!request_region(netdev->base_addr, MIPSNET_IO_EXTENT, "mipsnet")) { pr_debug("%s: %s(): IO region {start: 0x%04lux, len: %d} " - "for dev is not availble.\n", netdev->name, - __FUNCTION__, netdev->base_addr, MIPSNET_IO_EXTENT); + "for dev is not availble.\n", netdev->name, + __FUNCTION__, netdev->base_addr, MIPSNET_IO_EXTENT); err = -EBUSY; goto out_free_netdev; } diff --git a/drivers/net/mipsnet.h b/drivers/net/mipsnet.h index 026c732..0132c67 100644 --- a/drivers/net/mipsnet.h +++ b/drivers/net/mipsnet.h @@ -9,32 +9,34 @@ /* * Id of this Net device, as seen by the core. */ -#define MIPS_NET_DEV_ID ((uint64_t) \ - ((uint64_t)'M'<< 0)| \ - ((uint64_t)'I'<< 8)| \ - ((uint64_t)'P'<<16)| \ - ((uint64_t)'S'<<24)| \ - ((uint64_t)'N'<<32)| \ - ((uint64_t)'E'<<40)| \ - ((uint64_t)'T'<<48)| \ - ((uint64_t)'0'<<56)) +#define MIPS_NET_DEV_ID ((uint64_t) \ + ((uint64_t) 'M' << 0)| \ + ((uint64_t) 'I' << 8)| \ + ((uint64_t) 'P' << 16)| \ + ((uint64_t) 'S' << 24)| \ + ((uint64_t) 'N' << 32)| \ + ((uint64_t) 'E' << 40)| \ + ((uint64_t) 'T' << 48)| \ + ((uint64_t) '0' << 56)) /* * Net status/control block as seen by sw in the core. * (Why not use bit fields? can't be bothered with cross-platform struct * packing.) */ -typedef struct _net_control_block { - /// dev info for probing - /// reads as MIPSNET%d where %d is some form of version - uint64_t devId; /*0x00 */ +struct net_control_block { + /* + * dev info for probing + * reads as MIPSNET%d where %d is some form of version + */ + uint64_t devId; /* 0x00 */ /* * read only busy flag. * Set and cleared by the Net Device to indicate that an rx or a tx * is in progress. */ - uint32_t busy; /*0x08 */ + uint32_t busy; /* 0x08 */ /* * Set by the Net Device. @@ -43,16 +45,16 @@ typedef struct _net_control_block { * rxDataBuffer. The value will decrease till 0 until all the data * from rxDataBuffer has been read. */ - uint32_t rxDataCount; /*0x0c */ + uint32_t rxDataCount; /* 0x0c */ #define MIPSNET_MAX_RXTX_DATACOUNT (1<<16) /* - * Settable from the MIPS core, cleared by the Net Device. - * The core should set the number of bytes it wants to send, - * then it should write those bytes of data to txDataBuffer. - * The device will clear txDataCount has been processed (not necessarily sent). + * Settable from the MIPS core, cleared by the Net Device. The core + * should set the number of bytes it wants to send, then it should + * write those bytes of data to txDataBuffer. The device will clear + * txDataCount has been processed (not necessarily sent). */ - uint32_t txDataCount; /*0x10 */ + uint32_t txDataCount; /* 0x10 */ /* * Interrupt control @@ -69,39 +71,42 @@ typedef struct _net_control_block { * To clear the test interrupt, write 0 to this register. */ uint32_t interruptControl; /*0x14 */ -#define MIPSNET_INTCTL_TXDONE ((uint32_t)(1<< 0)) -#define MIPSNET_INTCTL_RXDONE ((uint32_t)(1<< 1)) -#define MIPSNET_INTCTL_TESTBIT ((uint32_t)(1<<31)) -#define MIPSNET_INTCTL_ALLSOURCES (MIPSNET_INTCTL_TXDONE|MIPSNET_INTCTL_RXDONE|MIPSNET_INTCTL_TESTBIT) +#define MIPSNET_INTCTL_TXDONE ((uint32_t)(1 << 0)) +#define MIPSNET_INTCTL_RXDONE ((uint32_t)(1 << 1)) +#define MIPSNET_INTCTL_TESTBIT ((uint32_t)(1 << 31)) +#define MIPSNET_INTCTL_ALLSOURCES (MIPSNET_INTCTL_TXDONE | \ + MIPSNET_INTCTL_RXDONE | \ + MIPSNET_INTCTL_TESTBIT) /* - * Readonly core-specific interrupt info for the device to signal the core. - * The meaning of the contents of this field might change. - */ - /*###\todo: the whole memIntf interrupt scheme is messy: the device should have - * no control what so ever of what VPE/register set is being used. - * The MemIntf should only expose interrupt lines, and something in the - * config should be responsible for the line<->core/vpe bindings. + * Readonly core-specific interrupt info for the device to signal the + * core. The meaning of the contents of this field might change. + * + * TODO: the whole memIntf interrupt scheme is messy: the device should + * have no control what so ever of what VPE/register set is being + * used. The MemIntf should only expose interrupt lines, and + * something in the config should be responsible for the + * line<->core/vpe bindings. */ - uint32_t interruptInfo; /*0x18 */ + uint32_t interruptInfo; /* 0x18 */ /* * This is where the received data is read out. * There is more data to read until rxDataReady is 0. * Only 1 byte at this regs offset is used. */ - uint32_t rxDataBuffer; /*0x1c */ + uint32_t rxDataBuffer; /* 0x1c */ /* - * This is where the data to transmit is written. - * Data should be written for the amount specified in the txDataCount register. - * Only 1 byte at this regs offset is used. + * This is where the data to transmit is written. Data should be + * written for the amount specified in the txDataCount register. Only + * 1 byte at this regs offset is used. */ - uint32_t txDataBuffer; /*0x20 */ -} MIPS_T_NetControl; + uint32_t txDataBuffer; /* 0x20 */ +}; #define MIPSNET_IO_EXTENT 0x40 /* being generous */ -#define field_offset(field) ((int)&((MIPS_T_NetControl*)(0))->field) +#define field_offset(field) (offsetof(struct net_control_block, field)) #endif /* __MIPSNET_H */ diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index e029b8a..89b3f0b 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c @@ -884,7 +884,7 @@ static int __devinit mlx4_init_one(struct pci_dev *pdev, ++mlx4_version_printed; } - return mlx4_init_one(pdev, id); + return __mlx4_init_one(pdev, id); } static void mlx4_remove_one(struct pci_dev *pdev) diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index b33d21f..84f2d63 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -784,7 +784,6 @@ static int mv643xx_eth_open(struct net_device *dev) unsigned int port_num = mp->port_num; unsigned int size; int err; - DECLARE_MAC_BUF(mac); /* Clear any pending ethernet port interrupts */ mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); @@ -1296,6 +1295,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) struct ethtool_cmd cmd; int duplex = DUPLEX_HALF; int speed = 0; /* default to auto-negotiation */ + DECLARE_MAC_BUF(mac); pd = pdev->dev.platform_data; if (pd == NULL) { diff --git a/drivers/net/mvme147.c b/drivers/net/mvme147.c index 86c9c06..06ca425 100644 --- a/drivers/net/mvme147.c +++ b/drivers/net/mvme147.c @@ -85,7 +85,6 @@ struct net_device * __init mvme147lance_probe(int unit) dev->open = &m147lance_open; dev->stop = &m147lance_close; dev->hard_start_xmit = &lance_start_xmit; - dev->get_stats = &lance_get_stats; dev->set_multicast_list = &lance_set_multicast; dev->tx_timeout = &lance_tx_timeout; dev->dma = 0; diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index e8afa10..64c8151 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -75,7 +75,7 @@ #include "myri10ge_mcp.h" #include "myri10ge_mcp_gen_header.h" -#define MYRI10GE_VERSION_STR "1.3.2-1.269" +#define MYRI10GE_VERSION_STR "1.3.2-1.287" MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); MODULE_AUTHOR("Maintainer: help@myri.com"); @@ -214,6 +214,8 @@ struct myri10ge_priv { unsigned long serial_number; int vendor_specific_offset; int fw_multicast_support; + unsigned long features; + u32 max_tso6; u32 read_dma; u32 write_dma; u32 read_write_dma; @@ -311,6 +313,7 @@ MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled\n"); #define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8) static void myri10ge_set_multicast_list(struct net_device *dev); +static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev); static inline void put_be32(__be32 val, __be32 __iomem * p) { @@ -612,6 +615,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp) __be32 buf[16]; u32 dma_low, dma_high, size; int status, i; + struct myri10ge_cmd cmd; size = 0; status = myri10ge_load_hotplug_firmware(mgp, &size); @@ -688,6 +692,14 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp) dev_info(&mgp->pdev->dev, "handoff confirmed\n"); myri10ge_dummy_rdma(mgp, 1); + /* probe for IPv6 TSO support */ + mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO; + status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, + &cmd, 0); + if (status == 0) { + mgp->max_tso6 = cmd.data0; + mgp->features |= NETIF_F_TSO6; + } return 0; } @@ -1047,7 +1059,8 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, hlen = MYRI10GE_HLEN > len ? len : MYRI10GE_HLEN; - /* allocate an skb to attach the page(s) to. */ + /* allocate an skb to attach the page(s) to. This is done + * after trying LRO, so as to avoid skb allocation overheads */ skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16); if (unlikely(skb == NULL)) { @@ -1217,7 +1230,8 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) static int myri10ge_poll(struct napi_struct *napi, int budget) { - struct myri10ge_priv *mgp = container_of(napi, struct myri10ge_priv, napi); + struct myri10ge_priv *mgp = + container_of(napi, struct myri10ge_priv, napi); struct net_device *netdev = mgp->dev; struct myri10ge_rx_done *rx_done = &mgp->rx_done; int work_done; @@ -1382,6 +1396,18 @@ static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled) return 0; } +static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled) +{ + struct myri10ge_priv *mgp = netdev_priv(netdev); + unsigned long flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO); + + if (tso_enabled) + netdev->features |= flags; + else + netdev->features &= ~flags; + return 0; +} + static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = { "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors", "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions", @@ -1506,7 +1532,7 @@ static const struct ethtool_ops myri10ge_ethtool_ops = { .set_rx_csum = myri10ge_set_rx_csum, .set_tx_csum = ethtool_op_set_tx_hw_csum, .set_sg = ethtool_op_set_sg, - .set_tso = ethtool_op_set_tso, + .set_tso = myri10ge_set_tso, .get_link = ethtool_op_get_link, .get_strings = myri10ge_get_strings, .get_sset_count = myri10ge_get_sset_count, @@ -2164,7 +2190,8 @@ again: pseudo_hdr_offset = cksum_offset + skb->csum_offset; /* If the headers are excessively large, then we must * fall back to a software checksum */ - if (unlikely(cksum_offset > 255 || pseudo_hdr_offset > 127)) { + if (unlikely(!mss && (cksum_offset > 255 || + pseudo_hdr_offset > 127))) { if (skb_checksum_help(skb)) goto drop; cksum_offset = 0; @@ -2184,9 +2211,18 @@ again: /* negative cum_len signifies to the * send loop that we are still in the * header portion of the TSO packet. - * TSO header must be at most 134 bytes long */ + * TSO header can be at most 1KB long */ cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb)); + /* for IPv6 TSO, the checksum offset stores the + * TCP header length, to save the firmware from + * the need to parse the headers */ + if (skb_is_gso_v6(skb)) { + cksum_offset = tcp_hdrlen(skb); + /* Can only handle headers <= max_tso6 long */ + if (unlikely(-cum_len > mgp->max_tso6)) + return myri10ge_sw_tso(skb, dev); + } /* for TSO, pseudo_hdr_offset holds mss. * The firmware figures out where to put * the checksum by parsing the header. */ @@ -2301,10 +2337,12 @@ again: req++; count++; rdma_count++; - if (unlikely(cksum_offset > seglen)) - cksum_offset -= seglen; - else - cksum_offset = 0; + if (cksum_offset != 0 && !(mss && skb_is_gso_v6(skb))) { + if (unlikely(cksum_offset > seglen)) + cksum_offset -= seglen; + else + cksum_offset = 0; + } } if (frag_idx == frag_cnt) break; @@ -2387,6 +2425,41 @@ drop: } +static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev) +{ + struct sk_buff *segs, *curr; + struct myri10ge_priv *mgp = dev->priv; + int status; + + segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); + if (unlikely(IS_ERR(segs))) + goto drop; + + while (segs) { + curr = segs; + segs = segs->next; + curr->next = NULL; + status = myri10ge_xmit(curr, dev); + if (status != 0) { + dev_kfree_skb_any(curr); + if (segs != NULL) { + curr = segs; + segs = segs->next; + curr->next = NULL; + dev_kfree_skb_any(segs); + } + goto drop; + } + } + dev_kfree_skb_any(skb); + return 0; + +drop: + dev_kfree_skb_any(skb); + mgp->stats.tx_dropped += 1; + return 0; +} + static struct net_device_stats *myri10ge_get_stats(struct net_device *dev) { struct myri10ge_priv *mgp = netdev_priv(dev); @@ -2706,7 +2779,6 @@ static void myri10ge_select_firmware(struct myri10ge_priv *mgp) } #ifdef CONFIG_PM - static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state) { struct myri10ge_priv *mgp; @@ -2787,7 +2859,6 @@ abort_with_enabled: return -EIO; } - #endif /* CONFIG_PM */ static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp) @@ -2954,8 +3025,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) mgp = netdev_priv(netdev); mgp->dev = netdev; - netif_napi_add(netdev, &mgp->napi, - myri10ge_poll, myri10ge_napi_weight); + netif_napi_add(netdev, &mgp->napi, myri10ge_poll, myri10ge_napi_weight); mgp->pdev = pdev; mgp->csum_flag = MXGEFW_FLAGS_CKSUM; mgp->pause = myri10ge_flow_control; @@ -3077,7 +3147,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->change_mtu = myri10ge_change_mtu; netdev->set_multicast_list = myri10ge_set_multicast_list; netdev->set_mac_address = myri10ge_set_mac_address; - netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO; + netdev->features = mgp->features; if (dac_enabled) netdev->features |= NETIF_F_HIGHDMA; diff --git a/drivers/net/myri10ge/myri10ge_mcp.h b/drivers/net/myri10ge/myri10ge_mcp.h index a1d2a22..58e5717 100644 --- a/drivers/net/myri10ge/myri10ge_mcp.h +++ b/drivers/net/myri10ge/myri10ge_mcp.h @@ -10,7 +10,7 @@ struct mcp_dma_addr { __be32 low; }; -/* 4 Bytes */ +/* 4 Bytes. 8 Bytes for NDIS drivers. */ struct mcp_slot { __sum16 checksum; __be16 length; @@ -205,8 +205,87 @@ enum myri10ge_mcp_cmd_type { /* same than DMA_TEST (same args) but abort with UNALIGNED on unaligned * chipset */ - MXGEFW_CMD_UNALIGNED_STATUS - /* return data = boolean, true if the chipset is known to be unaligned */ + MXGEFW_CMD_UNALIGNED_STATUS, + /* return data = boolean, true if the chipset is known to be unaligned */ + + MXGEFW_CMD_ALWAYS_USE_N_BIG_BUFFERS, + /* data0 = number of big buffers to use. It must be 0 or a power of 2. + * 0 indicates that the NIC consumes as many buffers as they are required + * for packet. This is the default behavior. + * A power of 2 number indicates that the NIC always uses the specified + * number of buffers for each big receive packet. + * It is up to the driver to ensure that this value is big enough for + * the NIC to be able to receive maximum-sized packets. + */ + + MXGEFW_CMD_GET_MAX_RSS_QUEUES, + MXGEFW_CMD_ENABLE_RSS_QUEUES, + /* data0 = number of slices n (0, 1, ..., n-1) to enable + * data1 = interrupt mode. 0=share one INTx/MSI, 1=use one MSI-X per queue. + * If all queues share one interrupt, the driver must have set + * RSS_SHARED_INTERRUPT_DMA before enabling queues. + */ + MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET, + MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA, + /* data0, data1 = bus address lsw, msw */ + MXGEFW_CMD_GET_RSS_TABLE_OFFSET, + /* get the offset of the indirection table */ + MXGEFW_CMD_SET_RSS_TABLE_SIZE, + /* set the size of the indirection table */ + MXGEFW_CMD_GET_RSS_KEY_OFFSET, + /* get the offset of the secret key */ + MXGEFW_CMD_RSS_KEY_UPDATED, + /* tell nic that the secret key's been updated */ + MXGEFW_CMD_SET_RSS_ENABLE, + /* data0 = enable/disable rss + * 0: disable rss. nic does not distribute receive packets. + * 1: enable rss. nic distributes receive packets among queues. + * data1 = hash type + * 1: IPV4 + * 2: TCP_IPV4 + * 3: IPV4 | TCP_IPV4 + */ + + MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, + /* Return data = the max. size of the entire headers of a IPv6 TSO packet. + * If the header size of a IPv6 TSO packet is larger than the specified + * value, then the driver must not use TSO. + * This size restriction only applies to IPv6 TSO. + * For IPv4 TSO, the maximum size of the headers is fixed, and the NIC + * always has enough header buffer to store maximum-sized headers. + */ + + MXGEFW_CMD_SET_TSO_MODE, + /* data0 = TSO mode. + * 0: Linux/FreeBSD style (NIC default) + * 1: NDIS/NetBSD style + */ + + MXGEFW_CMD_MDIO_READ, + /* data0 = dev_addr (PMA/PMD or PCS ...), data1 = register/addr */ + MXGEFW_CMD_MDIO_WRITE, + /* data0 = dev_addr, data1 = register/addr, data2 = value */ + + MXGEFW_CMD_XFP_I2C_READ, + /* Starts to get a fresh copy of one byte or of the whole xfp i2c table, the + * obtained data is cached inside the xaui-xfi chip : + * data0 : "all" flag : 0 => get one byte, 1=> get 256 bytes, + * data1 : if (data0 == 0): index of byte to refresh [ not used otherwise ] + * The operation might take ~1ms for a single byte or ~65ms when refreshing all 256 bytes + * During the i2c operation, MXGEFW_CMD_XFP_I2C_READ or MXGEFW_CMD_XFP_BYTE attempts + * will return MXGEFW_CMD_ERROR_BUSY + */ + MXGEFW_CMD_XFP_BYTE, + /* Return the last obtained copy of a given byte in the xfp i2c table + * (copy cached during the last relevant MXGEFW_CMD_XFP_I2C_READ) + * data0 : index of the desired table entry + * Return data = the byte stored at the requested index in the table + */ + + MXGEFW_CMD_GET_VPUMP_OFFSET, + /* Return data = NIC memory offset of mcp_vpump_public_global */ + MXGEFW_CMD_RESET_VPUMP, + /* Resets the VPUMP state */ }; enum myri10ge_mcp_cmd_status { @@ -220,7 +299,10 @@ enum myri10ge_mcp_cmd_status { MXGEFW_CMD_ERROR_BAD_PORT, MXGEFW_CMD_ERROR_RESOURCES, MXGEFW_CMD_ERROR_MULTICAST, - MXGEFW_CMD_ERROR_UNALIGNED + MXGEFW_CMD_ERROR_UNALIGNED, + MXGEFW_CMD_ERROR_NO_MDIO, + MXGEFW_CMD_ERROR_XFP_FAILURE, + MXGEFW_CMD_ERROR_XFP_ABSENT }; #define MXGEFW_OLD_IRQ_DATA_LEN 40 diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c index 527f9dc..50e1ec6 100644 --- a/drivers/net/natsemi.c +++ b/drivers/net/natsemi.c @@ -1576,7 +1576,7 @@ static int netdev_open(struct net_device *dev) /* Set the timer to check for link beat. */ init_timer(&np->timer); - np->timer.expires = jiffies + NATSEMI_TIMER_FREQ; + np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ); np->timer.data = (unsigned long)dev; np->timer.function = &netdev_timer; /* timer handler */ add_timer(&np->timer); @@ -1856,7 +1856,11 @@ static void netdev_timer(unsigned long data) next_tick = 1; } } - mod_timer(&np->timer, jiffies + next_tick); + + if (next_tick > 1) + mod_timer(&np->timer, round_jiffies(jiffies + next_tick)); + else + mod_timer(&np->timer, jiffies + next_tick); } static void dump_ring(struct net_device *dev) @@ -3310,13 +3314,19 @@ static int natsemi_resume (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata (pdev); struct netdev_private *np = netdev_priv(dev); + int ret = 0; rtnl_lock(); if (netif_device_present(dev)) goto out; if (netif_running(dev)) { BUG_ON(!np->hands_off); - pci_enable_device(pdev); + ret = pci_enable_device(pdev); + if (ret < 0) { + dev_err(&pdev->dev, + "pci_enable_device() failed: %d\n", ret); + goto out; + } /* pci_power_on(pdev); */ napi_enable(&np->napi); @@ -3331,12 +3341,12 @@ static int natsemi_resume (struct pci_dev *pdev) spin_unlock_irq(&np->lock); enable_irq(dev->irq); - mod_timer(&np->timer, jiffies + 1*HZ); + mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ)); } netif_device_attach(dev); out: rtnl_unlock(); - return 0; + return ret; } #endif /* CONFIG_PM */ diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c index 368f256..fbc7531 100644 --- a/drivers/net/ne-h8300.c +++ b/drivers/net/ne-h8300.c @@ -93,7 +93,7 @@ static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr bus_width = *(volatile unsigned char *)ABWCR; bus_width &= 1 << ((base_addr >> 21) & 7); - for (i = 0; i < sizeof(reg_offset) / sizeof(u32); i++) + for (i = 0; i < ARRAY_SIZE(reg_offset); i++) if (bus_width == 0) reg_offset[i] = i * 2 + 1; else @@ -115,7 +115,7 @@ static int h8300_ne_irq[] = {EXT_IRQ5}; static inline int init_dev(struct net_device *dev) { - if (h8300_ne_count < (sizeof(h8300_ne_base) / sizeof(unsigned long))) { + if (h8300_ne_count < ARRAY_SIZE(h8300_ne_base)) { dev->base_addr = h8300_ne_base[h8300_ne_count]; dev->irq = h8300_ne_irq[h8300_ne_count]; h8300_ne_count++; diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c index 0976852..3edc971 100644 --- a/drivers/net/ni65.c +++ b/drivers/net/ni65.c @@ -183,7 +183,7 @@ static struct card { short addr_offset; unsigned char *vendor_id; char *cardname; - long config; + unsigned long config; } cards[] = { { .id0 = NI65_ID0, diff --git a/drivers/net/niu.c b/drivers/net/niu.c index 43bfe7e..ed1f9bb 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -6123,19 +6123,19 @@ static int __devinit niu_pci_probe_sprom(struct niu *np) val = nr64(ESPC_PHY_TYPE); switch (np->port) { case 0: - val = (val & ESPC_PHY_TYPE_PORT0) >> + val8 = (val & ESPC_PHY_TYPE_PORT0) >> ESPC_PHY_TYPE_PORT0_SHIFT; break; case 1: - val = (val & ESPC_PHY_TYPE_PORT1) >> + val8 = (val & ESPC_PHY_TYPE_PORT1) >> ESPC_PHY_TYPE_PORT1_SHIFT; break; case 2: - val = (val & ESPC_PHY_TYPE_PORT2) >> + val8 = (val & ESPC_PHY_TYPE_PORT2) >> ESPC_PHY_TYPE_PORT2_SHIFT; break; case 3: - val = (val & ESPC_PHY_TYPE_PORT3) >> + val8 = (val & ESPC_PHY_TYPE_PORT3) >> ESPC_PHY_TYPE_PORT3_SHIFT; break; default: @@ -6143,9 +6143,9 @@ static int __devinit niu_pci_probe_sprom(struct niu *np) np->port); return -EINVAL; } - niudbg(PROBE, "SPROM: PHY type %llx\n", (unsigned long long) val); + niudbg(PROBE, "SPROM: PHY type %x\n", val8); - switch (val) { + switch (val8) { case ESPC_PHY_TYPE_1G_COPPER: /* 1G copper, MII */ np->flags &= ~(NIU_FLAGS_FIBER | @@ -6175,8 +6175,7 @@ static int __devinit niu_pci_probe_sprom(struct niu *np) break; default: - dev_err(np->device, PFX "Bogus SPROM phy type %llu\n", - (unsigned long long) val); + dev_err(np->device, PFX "Bogus SPROM phy type %u\n", val8); return -EINVAL; } @@ -6213,7 +6212,7 @@ static int __devinit niu_pci_probe_sprom(struct niu *np) val = nr64(ESPC_MOD_STR_LEN); niudbg(PROBE, "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long) val); - if (val > 8 * 4) + if (val >= 8 * 4) return -EINVAL; for (i = 0; i < val; i += 4) { @@ -6229,7 +6228,7 @@ static int __devinit niu_pci_probe_sprom(struct niu *np) val = nr64(ESPC_BD_MOD_STR_LEN); niudbg(PROBE, "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long) val); - if (val > 4 * 4) + if (val >= 4 * 4) return -EINVAL; for (i = 0; i < val; i += 4) { diff --git a/drivers/net/saa9730.c b/drivers/net/saa9730.c index 14361e8..c65199d 100644 --- a/drivers/net/saa9730.c +++ b/drivers/net/saa9730.c @@ -97,13 +97,16 @@ static void evm_saa9730_unblock_lan_int(struct lan_saa9730_private *lp) &lp->evm_saa9730_regs->InterruptBlock1); } -static void __attribute_used__ show_saa9730_regs(struct lan_saa9730_private *lp) +static void __used show_saa9730_regs(struct net_device *dev) { + struct lan_saa9730_private *lp = netdev_priv(dev); int i, j; + printk("TxmBufferA = %p\n", lp->TxmBuffer[0][0]); printk("TxmBufferB = %p\n", lp->TxmBuffer[1][0]); printk("RcvBufferA = %p\n", lp->RcvBuffer[0][0]); printk("RcvBufferB = %p\n", lp->RcvBuffer[1][0]); + for (i = 0; i < LAN_SAA9730_BUFFERS; i++) { for (j = 0; j < LAN_SAA9730_TXM_Q_SIZE; j++) { printk("TxmBuffer[%d][%d] = %x\n", i, j, @@ -146,11 +149,13 @@ static void __attribute_used__ show_saa9730_regs(struct lan_saa9730_private *lp) readl(&lp->lan_saa9730_regs->RxCtl)); printk("lp->lan_saa9730_regs->RxStatus = %x\n", readl(&lp->lan_saa9730_regs->RxStatus)); + for (i = 0; i < LAN_SAA9730_CAM_DWORDS; i++) { writel(i, &lp->lan_saa9730_regs->CamAddress); printk("lp->lan_saa9730_regs->CamData = %x\n", readl(&lp->lan_saa9730_regs->CamData)); } + printk("dev->stats.tx_packets = %lx\n", dev->stats.tx_packets); printk("dev->stats.tx_errors = %lx\n", dev->stats.tx_errors); printk("dev->stats.tx_aborted_errors = %lx\n", @@ -855,7 +860,7 @@ static void lan_saa9730_tx_timeout(struct net_device *dev) /* Transmitter timeout, serious problems */ dev->stats.tx_errors++; printk("%s: transmit timed out, reset\n", dev->name); - /*show_saa9730_regs(lp); */ + /*show_saa9730_regs(dev); */ lan_saa9730_restart(lp); dev->trans_start = jiffies; diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 2aae9fe..b9961dc 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -36,13 +36,15 @@ #include <linux/delay.h> #include <linux/crc32.h> #include <linux/dma-mapping.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> #include <linux/mii.h> #include <asm/irq.h> #include "skge.h" #define DRV_NAME "skge" -#define DRV_VERSION "1.11" +#define DRV_VERSION "1.12" #define PFX DRV_NAME " " #define DEFAULT_TX_RING_SIZE 128 @@ -57,7 +59,10 @@ #define TX_WATCHDOG (5 * HZ) #define NAPI_WEIGHT 64 #define BLINK_MS 250 -#define LINK_HZ (HZ/2) +#define LINK_HZ HZ + +#define SKGE_EEPROM_MAGIC 0x9933aabb + MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver"); MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>"); @@ -445,15 +450,15 @@ static struct net_device_stats *skge_get_stats(struct net_device *dev) else yukon_get_stats(skge, data); - skge->net_stats.tx_bytes = data[0]; - skge->net_stats.rx_bytes = data[1]; - skge->net_stats.tx_packets = data[2] + data[4] + data[6]; - skge->net_stats.rx_packets = data[3] + data[5] + data[7]; - skge->net_stats.multicast = data[3] + data[5]; - skge->net_stats.collisions = data[10]; - skge->net_stats.tx_aborted_errors = data[12]; + dev->stats.tx_bytes = data[0]; + dev->stats.rx_bytes = data[1]; + dev->stats.tx_packets = data[2] + data[4] + data[6]; + dev->stats.rx_packets = data[3] + data[5] + data[7]; + dev->stats.multicast = data[3] + data[5]; + dev->stats.collisions = data[10]; + dev->stats.tx_aborted_errors = data[12]; - return &skge->net_stats; + return &dev->stats; } static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data) @@ -798,6 +803,98 @@ static int skge_phys_id(struct net_device *dev, u32 data) return 0; } +static int skge_get_eeprom_len(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + u32 reg2; + + pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, ®2); + return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); +} + +static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset) +{ + u32 val; + + pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset); + + do { + pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); + } while (!(offset & PCI_VPD_ADDR_F)); + + pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val); + return val; +} + +static void skge_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val) +{ + pci_write_config_dword(pdev, cap + PCI_VPD_DATA, val); + pci_write_config_word(pdev, cap + PCI_VPD_ADDR, + offset | PCI_VPD_ADDR_F); + + do { + pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); + } while (offset & PCI_VPD_ADDR_F); +} + +static int skge_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct skge_port *skge = netdev_priv(dev); + struct pci_dev *pdev = skge->hw->pdev; + int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); + int length = eeprom->len; + u16 offset = eeprom->offset; + + if (!cap) + return -EINVAL; + + eeprom->magic = SKGE_EEPROM_MAGIC; + + while (length > 0) { + u32 val = skge_vpd_read(pdev, cap, offset); + int n = min_t(int, length, sizeof(val)); + + memcpy(data, &val, n); + length -= n; + data += n; + offset += n; + } + return 0; +} + +static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct skge_port *skge = netdev_priv(dev); + struct pci_dev *pdev = skge->hw->pdev; + int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); + int length = eeprom->len; + u16 offset = eeprom->offset; + + if (!cap) + return -EINVAL; + + if (eeprom->magic != SKGE_EEPROM_MAGIC) + return -EINVAL; + + while (length > 0) { + u32 val; + int n = min_t(int, length, sizeof(val)); + + if (n < sizeof(val)) + val = skge_vpd_read(pdev, cap, offset); + memcpy(&val, data, n); + + skge_vpd_write(pdev, cap, offset, val); + + length -= n; + data += n; + offset += n; + } + return 0; +} + static const struct ethtool_ops skge_ethtool_ops = { .get_settings = skge_get_settings, .set_settings = skge_set_settings, @@ -810,6 +907,9 @@ static const struct ethtool_ops skge_ethtool_ops = { .set_msglevel = skge_set_msglevel, .nway_reset = skge_nway_reset, .get_link = ethtool_op_get_link, + .get_eeprom_len = skge_get_eeprom_len, + .get_eeprom = skge_get_eeprom, + .set_eeprom = skge_set_eeprom, .get_ringparam = skge_get_ring_param, .set_ringparam = skge_set_ring_param, .get_pauseparam = skge_get_pauseparam, @@ -995,19 +1095,15 @@ static void xm_link_down(struct skge_hw *hw, int port) { struct net_device *dev = hw->dev[port]; struct skge_port *skge = netdev_priv(dev); - u16 cmd, msk; + u16 cmd = xm_read16(hw, port, XM_MMU_CMD); - if (hw->phy_type == SK_PHY_XMAC) { - msk = xm_read16(hw, port, XM_IMSK); - msk |= XM_IS_INP_ASS | XM_IS_LIPA_RC | XM_IS_RX_PAGE | XM_IS_AND; - xm_write16(hw, port, XM_IMSK, msk); - } + xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); - cmd = xm_read16(hw, port, XM_MMU_CMD); cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); xm_write16(hw, port, XM_MMU_CMD, cmd); + /* dummy read to ensure writing */ - (void) xm_read16(hw, port, XM_MMU_CMD); + xm_read16(hw, port, XM_MMU_CMD); if (netif_carrier_ok(dev)) skge_link_down(skge); @@ -1103,7 +1199,7 @@ static void genesis_reset(struct skge_hw *hw, int port) /* reset the statistics module */ xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); - xm_write16(hw, port, XM_IMSK, 0xffff); /* disable XMAC IRQs */ + xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */ xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */ xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */ @@ -1141,7 +1237,7 @@ static void bcom_check_link(struct skge_hw *hw, int port) u16 status; /* read twice because of latch */ - (void) xm_phy_read(hw, port, PHY_BCOM_STAT); + xm_phy_read(hw, port, PHY_BCOM_STAT); status = xm_phy_read(hw, port, PHY_BCOM_STAT); if ((status & PHY_ST_LSYNC) == 0) { @@ -1342,7 +1438,7 @@ static void xm_phy_init(struct skge_port *skge) mod_timer(&skge->link_timer, jiffies + LINK_HZ); } -static void xm_check_link(struct net_device *dev) +static int xm_check_link(struct net_device *dev) { struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; @@ -1350,25 +1446,25 @@ static void xm_check_link(struct net_device *dev) u16 status; /* read twice because of latch */ - (void) xm_phy_read(hw, port, PHY_XMAC_STAT); + xm_phy_read(hw, port, PHY_XMAC_STAT); status = xm_phy_read(hw, port, PHY_XMAC_STAT); if ((status & PHY_ST_LSYNC) == 0) { xm_link_down(hw, port); - return; + return 0; } if (skge->autoneg == AUTONEG_ENABLE) { u16 lpa, res; if (!(status & PHY_ST_AN_OVER)) - return; + return 0; lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); if (lpa & PHY_B_AN_RF) { printk(KERN_NOTICE PFX "%s: remote fault\n", dev->name); - return; + return 0; } res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI); @@ -1384,7 +1480,7 @@ static void xm_check_link(struct net_device *dev) default: printk(KERN_NOTICE PFX "%s: duplex mismatch\n", dev->name); - return; + return 0; } /* We are using IEEE 802.3z/D5.0 Table 37-4 */ @@ -1408,11 +1504,14 @@ static void xm_check_link(struct net_device *dev) if (!netif_carrier_ok(dev)) genesis_link_up(skge); + return 1; } /* Poll to check for link coming up. + * * Since internal PHY is wired to a level triggered pin, can't - * get an interrupt when carrier is detected. + * get an interrupt when carrier is detected, need to poll for + * link coming up. */ static void xm_link_timer(unsigned long arg) { @@ -1420,29 +1519,35 @@ static void xm_link_timer(unsigned long arg) struct net_device *dev = skge->netdev; struct skge_hw *hw = skge->hw; int port = skge->port; + int i; + unsigned long flags; if (!netif_running(dev)) return; - if (netif_carrier_ok(dev)) { + spin_lock_irqsave(&hw->phy_lock, flags); + + /* + * Verify that the link by checking GPIO register three times. + * This pin has the signal from the link_sync pin connected to it. + */ + for (i = 0; i < 3; i++) { + if (xm_read16(hw, port, XM_GP_PORT) & XM_GP_INP_ASS) + goto link_down; + } + + /* Re-enable interrupt to detect link down */ + if (xm_check_link(dev)) { + u16 msk = xm_read16(hw, port, XM_IMSK); + msk &= ~XM_IS_INP_ASS; + xm_write16(hw, port, XM_IMSK, msk); xm_read16(hw, port, XM_ISRC); - if (!(xm_read16(hw, port, XM_ISRC) & XM_IS_INP_ASS)) - goto nochange; } else { - if (xm_read32(hw, port, XM_GP_PORT) & XM_GP_INP_ASS) - goto nochange; - xm_read16(hw, port, XM_ISRC); - if (xm_read16(hw, port, XM_ISRC) & XM_IS_INP_ASS) - goto nochange; +link_down: + mod_timer(&skge->link_timer, + round_jiffies(jiffies + LINK_HZ)); } - - spin_lock(&hw->phy_lock); - xm_check_link(dev); - spin_unlock(&hw->phy_lock); - -nochange: - if (netif_running(dev)) - mod_timer(&skge->link_timer, jiffies + LINK_HZ); + spin_unlock_irqrestore(&hw->phy_lock, flags); } static void genesis_mac_init(struct skge_hw *hw, int port) @@ -1679,24 +1784,27 @@ static void genesis_get_stats(struct skge_port *skge, u64 *data) static void genesis_mac_intr(struct skge_hw *hw, int port) { - struct skge_port *skge = netdev_priv(hw->dev[port]); + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); u16 status = xm_read16(hw, port, XM_ISRC); if (netif_msg_intr(skge)) printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n", - skge->netdev->name, status); + dev->name, status); - if (hw->phy_type == SK_PHY_XMAC && - (status & (XM_IS_INP_ASS | XM_IS_LIPA_RC))) - xm_link_down(hw, port); + if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) { + xm_link_down(hw, port); + mod_timer(&skge->link_timer, jiffies + 1); + } if (status & XM_IS_TXF_UR) { xm_write32(hw, port, XM_MODE, XM_MD_FTF); - ++skge->net_stats.tx_fifo_errors; + ++dev->stats.tx_fifo_errors; } + if (status & XM_IS_RXF_OV) { xm_write32(hw, port, XM_MODE, XM_MD_FRF); - ++skge->net_stats.rx_fifo_errors; + ++dev->stats.rx_fifo_errors; } } @@ -1753,11 +1861,12 @@ static void genesis_link_up(struct skge_port *skge) } xm_write32(hw, port, XM_MODE, mode); - msk = XM_DEF_MSK; - if (hw->phy_type != SK_PHY_XMAC) - msk |= XM_IS_INP_ASS; /* disable GP0 interrupt bit */ + /* Turn on detection of Tx underrun, Rx overrun */ + msk = xm_read16(hw, port, XM_IMSK); + msk &= ~(XM_IS_RXF_OV | XM_IS_TXF_UR); xm_write16(hw, port, XM_IMSK, msk); + xm_read16(hw, port, XM_ISRC); /* get MMU Command Reg. */ @@ -2192,12 +2301,12 @@ static void yukon_mac_intr(struct skge_hw *hw, int port) dev->name, status); if (status & GM_IS_RX_FF_OR) { - ++skge->net_stats.rx_fifo_errors; + ++dev->stats.rx_fifo_errors; skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); } if (status & GM_IS_TX_FF_UR) { - ++skge->net_stats.tx_fifo_errors; + ++dev->stats.tx_fifo_errors; skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); } @@ -2403,32 +2512,31 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return err; } -static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len) +/* Assign Ram Buffer allocation to queue */ +static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, u32 space) { u32 end; - start /= 8; - len /= 8; - end = start + len - 1; + /* convert from K bytes to qwords used for hw register */ + start *= 1024/8; + space *= 1024/8; + end = start + space - 1; skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); skge_write32(hw, RB_ADDR(q, RB_START), start); + skge_write32(hw, RB_ADDR(q, RB_END), end); skge_write32(hw, RB_ADDR(q, RB_WP), start); skge_write32(hw, RB_ADDR(q, RB_RP), start); - skge_write32(hw, RB_ADDR(q, RB_END), end); if (q == Q_R1 || q == Q_R2) { + u32 tp = space - space/4; + /* Set thresholds on receive queue's */ - skge_write32(hw, RB_ADDR(q, RB_RX_UTPP), - start + (2*len)/3); - skge_write32(hw, RB_ADDR(q, RB_RX_LTPP), - start + (len/3)); - } else { - /* Enable store & forward on Tx queue's because - * Tx FIFO is only 4K on Genesis and 1K on Yukon - */ + skge_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); + skge_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); + } else if (hw->chip_id != CHIP_ID_GENESIS) + /* Genesis Tx Fifo is too small for normal store/forward */ skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); - } skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); } @@ -2456,7 +2564,7 @@ static int skge_up(struct net_device *dev) struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; int port = skge->port; - u32 chunk, ram_addr; + u32 ramaddr, ramsize, rxspace; size_t rx_size, tx_size; int err; @@ -2511,14 +2619,15 @@ static int skge_up(struct net_device *dev) spin_unlock_bh(&hw->phy_lock); /* Configure RAMbuffers */ - chunk = hw->ram_size / ((hw->ports + 1)*2); - ram_addr = hw->ram_offset + 2 * chunk * port; + ramsize = (hw->ram_size - hw->ram_offset) / hw->ports; + ramaddr = hw->ram_offset + port * ramsize; + rxspace = 8 + (2*(ramsize - 16))/3; - skge_ramset(hw, rxqaddr[port], ram_addr, chunk); - skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean); + skge_ramset(hw, rxqaddr[port], ramaddr, rxspace); + skge_ramset(hw, txqaddr[port], ramaddr + rxspace, ramsize - rxspace); + skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean); BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean); - skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk); skge_qset(skge, txqaddr[port], skge->tx_ring.to_use); /* Start receiver BMU */ @@ -2544,6 +2653,15 @@ static int skge_up(struct net_device *dev) return err; } +/* stop receiver */ +static void skge_rx_stop(struct skge_hw *hw, int port) +{ + skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP); + skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL), + RB_RST_SET|RB_DIS_OP_MD); + skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET); +} + static int skge_down(struct net_device *dev) { struct skge_port *skge = netdev_priv(dev); @@ -2595,11 +2713,8 @@ static int skge_down(struct net_device *dev) /* Reset the RAM Buffer async Tx queue */ skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET); - /* stop receiver */ - skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP); - skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL), - RB_RST_SET|RB_DIS_OP_MD); - skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET); + + skge_rx_stop(hw, port); if (hw->chip_id == CHIP_ID_GENESIS) { skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET); @@ -2782,7 +2897,11 @@ static void skge_tx_timeout(struct net_device *dev) static int skge_change_mtu(struct net_device *dev, int new_mtu) { + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; int err; + u16 ctl, reg; if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) return -EINVAL; @@ -2792,13 +2911,40 @@ static int skge_change_mtu(struct net_device *dev, int new_mtu) return 0; } - skge_down(dev); + skge_write32(hw, B0_IMSK, 0); + dev->trans_start = jiffies; /* prevent tx timeout */ + netif_stop_queue(dev); + napi_disable(&skge->napi); + + ctl = gma_read16(hw, port, GM_GP_CTRL); + gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA); + + skge_rx_clean(skge); + skge_rx_stop(hw, port); dev->mtu = new_mtu; - err = skge_up(dev); + reg = GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF); + if (new_mtu > 1500) + reg |= GM_SMOD_JUMBO_ENA; + gma_write16(hw, port, GM_SERIAL_MODE, reg); + + skge_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD); + + err = skge_rx_fill(dev); + wmb(); + if (!err) + skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); + skge_write32(hw, B0_IMSK, hw->intr_mask); + if (err) dev_close(dev); + else { + gma_write16(hw, port, GM_GP_CTRL, ctl); + + napi_enable(&skge->napi); + netif_wake_queue(dev); + } return err; } @@ -2994,18 +3140,18 @@ error: if (skge->hw->chip_id == CHIP_ID_GENESIS) { if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) - skge->net_stats.rx_length_errors++; + dev->stats.rx_length_errors++; if (status & XMR_FS_FRA_ERR) - skge->net_stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (status & XMR_FS_FCS_ERR) - skge->net_stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; } else { if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) - skge->net_stats.rx_length_errors++; + dev->stats.rx_length_errors++; if (status & GMR_FS_FRAGMENT) - skge->net_stats.rx_frame_errors++; + dev->stats.rx_frame_errors++; if (status & GMR_FS_CRC_ERR) - skge->net_stats.rx_crc_errors++; + dev->stats.rx_crc_errors++; } resubmit: @@ -3103,10 +3249,7 @@ static void skge_mac_parity(struct skge_hw *hw, int port) { struct net_device *dev = hw->dev[port]; - if (dev) { - struct skge_port *skge = netdev_priv(dev); - ++skge->net_stats.tx_heartbeat_errors; - } + ++dev->stats.tx_heartbeat_errors; if (hw->chip_id == CHIP_ID_GENESIS) skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), @@ -3259,9 +3402,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id) skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1); if (status & IS_PA_TO_RX1) { - struct skge_port *skge = netdev_priv(hw->dev[0]); - - ++skge->net_stats.rx_over_errors; + ++hw->dev[0]->stats.rx_over_errors; skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1); } @@ -3278,7 +3419,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id) } if (status & IS_PA_TO_RX2) { - ++skge->net_stats.rx_over_errors; + ++hw->dev[1]->stats.rx_over_errors; skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2); } @@ -3450,15 +3591,12 @@ static int skge_reset(struct skge_hw *hw) if (hw->chip_id == CHIP_ID_GENESIS) { if (t8 == 3) { /* special case: 4 x 64k x 36, offset = 0x80000 */ - hw->ram_size = 0x100000; - hw->ram_offset = 0x80000; + hw->ram_size = 1024; + hw->ram_offset = 512; } else hw->ram_size = t8 * 512; - } - else if (t8 == 0) - hw->ram_size = 0x20000; - else - hw->ram_size = t8 * 4096; + } else /* Yukon */ + hw->ram_size = t8 ? t8 * 4 : 128; hw->intr_mask = IS_HW_ERR; @@ -3540,6 +3678,145 @@ static int skge_reset(struct skge_hw *hw) return 0; } + +#ifdef CONFIG_SKGE_DEBUG + +static struct dentry *skge_debug; + +static int skge_debug_show(struct seq_file *seq, void *v) +{ + struct net_device *dev = seq->private; + const struct skge_port *skge = netdev_priv(dev); + const struct skge_hw *hw = skge->hw; + const struct skge_element *e; + + if (!netif_running(dev)) + return -ENETDOWN; + + seq_printf(seq, "IRQ src=%x mask=%x\n", skge_read32(hw, B0_ISRC), + skge_read32(hw, B0_IMSK)); + + seq_printf(seq, "Tx Ring: (%d)\n", skge_avail(&skge->tx_ring)); + for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { + const struct skge_tx_desc *t = e->desc; + seq_printf(seq, "%#x dma=%#x%08x %#x csum=%#x/%x/%x\n", + t->control, t->dma_hi, t->dma_lo, t->status, + t->csum_offs, t->csum_write, t->csum_start); + } + + seq_printf(seq, "\nRx Ring: \n"); + for (e = skge->rx_ring.to_clean; ; e = e->next) { + const struct skge_rx_desc *r = e->desc; + + if (r->control & BMU_OWN) + break; + + seq_printf(seq, "%#x dma=%#x%08x %#x %#x csum=%#x/%x\n", + r->control, r->dma_hi, r->dma_lo, r->status, + r->timestamp, r->csum1, r->csum1_start); + } + + return 0; +} + +static int skge_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, skge_debug_show, inode->i_private); +} + +static const struct file_operations skge_debug_fops = { + .owner = THIS_MODULE, + .open = skge_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* + * Use network device events to create/remove/rename + * debugfs file entries + */ +static int skge_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = ptr; + struct skge_port *skge; + struct dentry *d; + + if (dev->open != &skge_up || !skge_debug) + goto done; + + skge = netdev_priv(dev); + switch(event) { + case NETDEV_CHANGENAME: + if (skge->debugfs) { + d = debugfs_rename(skge_debug, skge->debugfs, + skge_debug, dev->name); + if (d) + skge->debugfs = d; + else { + pr_info(PFX "%s: rename failed\n", dev->name); + debugfs_remove(skge->debugfs); + } + } + break; + + case NETDEV_GOING_DOWN: + if (skge->debugfs) { + debugfs_remove(skge->debugfs); + skge->debugfs = NULL; + } + break; + + case NETDEV_UP: + d = debugfs_create_file(dev->name, S_IRUGO, + skge_debug, dev, + &skge_debug_fops); + if (!d || IS_ERR(d)) + pr_info(PFX "%s: debugfs create failed\n", + dev->name); + else + skge->debugfs = d; + break; + } + +done: + return NOTIFY_DONE; +} + +static struct notifier_block skge_notifier = { + .notifier_call = skge_device_event, +}; + + +static __init void skge_debug_init(void) +{ + struct dentry *ent; + + ent = debugfs_create_dir("skge", NULL); + if (!ent || IS_ERR(ent)) { + pr_info(PFX "debugfs create directory failed\n"); + return; + } + + skge_debug = ent; + register_netdevice_notifier(&skge_notifier); +} + +static __exit void skge_debug_cleanup(void) +{ + if (skge_debug) { + unregister_netdevice_notifier(&skge_notifier); + debugfs_remove(skge_debug); + skge_debug = NULL; + } +} + +#else +#define skge_debug_init() +#define skge_debug_cleanup() +#endif + /* Initialize network device */ static struct net_device *skge_devinit(struct skge_hw *hw, int port, int highmem) @@ -3904,12 +4181,14 @@ static struct pci_driver skge_driver = { static int __init skge_init_module(void) { + skge_debug_init(); return pci_register_driver(&skge_driver); } static void __exit skge_cleanup_module(void) { pci_unregister_driver(&skge_driver); + skge_debug_cleanup(); } module_init(skge_init_module); diff --git a/drivers/net/skge.h b/drivers/net/skge.h index 1a57bdd..17caccb 100644 --- a/drivers/net/skge.h +++ b/drivers/net/skge.h @@ -1,5 +1,5 @@ /* - * Definitions for the new Marvell Yukon / SysKonenct driver. + * Definitions for the new Marvell Yukon / SysKonnect driver. */ #ifndef _SKGE_H #define _SKGE_H @@ -8,8 +8,10 @@ #define PCI_DEV_REG1 0x40 #define PCI_PHY_COMA 0x8000000 #define PCI_VIO 0x2000000 + #define PCI_DEV_REG2 0x44 -#define PCI_REV_DESC 0x4 +#define PCI_VPD_ROM_SZ 7L<<14 /* VPD ROM size 0=256, 1=512, ... */ +#define PCI_REV_DESC 1<<2 /* Reverse Descriptor bytes */ #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ PCI_STATUS_SIG_SYSTEM_ERROR | \ @@ -2191,11 +2193,9 @@ enum { XM_IS_TXF_UR = 1<<2, /* Bit 2: Transmit FIFO Underrun */ XM_IS_TX_COMP = 1<<1, /* Bit 1: Frame Tx Complete */ XM_IS_RX_COMP = 1<<0, /* Bit 0: Frame Rx Complete */ -}; - -#define XM_DEF_MSK (~(XM_IS_INP_ASS | XM_IS_LIPA_RC | \ - XM_IS_RXF_OV | XM_IS_TXF_UR)) + XM_IMSK_DISABLE = 0xffff, +}; /* XM_HW_CFG 16 bit r/w Hardware Config Register */ enum { @@ -2469,8 +2469,9 @@ struct skge_port { void *mem; /* PCI memory for rings */ dma_addr_t dma; unsigned long mem_size; - - struct net_device_stats net_stats; +#ifdef CONFIG_SKGE_DEBUG + struct dentry *debugfs; +#endif }; diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 68f728f..7967240 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -4396,7 +4396,7 @@ static void sky2_shutdown(struct pci_dev *pdev) if (!hw) return; - napi_disable(&hw->napi); + del_timer_sync(&hw->watchdog_timer); for (i = 0; i < hw->ports; i++) { struct net_device *dev = hw->dev[i]; diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index 24e610e..7da7589 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c @@ -173,49 +173,6 @@ MODULE_LICENSE("GPL"); */ #define MII_DELAY 1 -/* store this information for the driver.. */ -struct smc_local { - /* - * If I have to wait until memory is available to send a - * packet, I will store the skbuff here, until I get the - * desired memory. Then, I'll send it out and free it. - */ - struct sk_buff *pending_tx_skb; - struct tasklet_struct tx_task; - - /* version/revision of the SMC91x chip */ - int version; - - /* Contains the current active transmission mode */ - int tcr_cur_mode; - - /* Contains the current active receive mode */ - int rcr_cur_mode; - - /* Contains the current active receive/phy mode */ - int rpc_cur_mode; - int ctl_rfduplx; - int ctl_rspeed; - - u32 msg_enable; - u32 phy_type; - struct mii_if_info mii; - - /* work queue */ - struct work_struct phy_configure; - struct net_device *dev; - int work_pending; - - spinlock_t lock; - -#ifdef SMC_USE_PXA_DMA - /* DMA needs the physical address of the chip */ - u_long physaddr; -#endif - void __iomem *base; - void __iomem *datacs; -}; - #if SMC_DEBUG > 0 #define DBG(n, args...) \ do { \ @@ -2215,17 +2172,19 @@ static int smc_drv_probe(struct platform_device *pdev) goto out_release_attrib; } - platform_set_drvdata(pdev, ndev); - ret = smc_probe(ndev, addr); - if (ret != 0) - goto out_iounmap; #ifdef SMC_USE_PXA_DMA - else { + { struct smc_local *lp = netdev_priv(ndev); + lp->device = &pdev->dev; lp->physaddr = res->start; } #endif + platform_set_drvdata(pdev, ndev); + ret = smc_probe(ndev, addr); + if (ret != 0) + goto out_iounmap; + smc_request_datacs(pdev, ndev); return 0; diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index af9e6bf..729fd28 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h @@ -462,6 +462,52 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r, #endif + +/* store this information for the driver.. */ +struct smc_local { + /* + * If I have to wait until memory is available to send a + * packet, I will store the skbuff here, until I get the + * desired memory. Then, I'll send it out and free it. + */ + struct sk_buff *pending_tx_skb; + struct tasklet_struct tx_task; + + /* version/revision of the SMC91x chip */ + int version; + + /* Contains the current active transmission mode */ + int tcr_cur_mode; + + /* Contains the current active receive mode */ + int rcr_cur_mode; + + /* Contains the current active receive/phy mode */ + int rpc_cur_mode; + int ctl_rfduplx; + int ctl_rspeed; + + u32 msg_enable; + u32 phy_type; + struct mii_if_info mii; + + /* work queue */ + struct work_struct phy_configure; + struct net_device *dev; + int work_pending; + + spinlock_t lock; + +#ifdef SMC_USE_PXA_DMA + /* DMA needs the physical address of the chip */ + u_long physaddr; + struct device *device; +#endif + void __iomem *base; + void __iomem *datacs; +}; + + #ifdef SMC_USE_PXA_DMA /* * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is @@ -476,11 +522,12 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r, #ifdef SMC_insl #undef SMC_insl #define SMC_insl(a, r, p, l) \ - smc_pxa_dma_insl(a, lp->physaddr, r, dev->dma, p, l) + smc_pxa_dma_insl(a, lp, r, dev->dma, p, l) static inline void -smc_pxa_dma_insl(void __iomem *ioaddr, u_long physaddr, int reg, int dma, +smc_pxa_dma_insl(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma, u_char *buf, int len) { + u_long physaddr = lp->physaddr; dma_addr_t dmabuf; /* fallback if no DMA available */ @@ -497,7 +544,7 @@ smc_pxa_dma_insl(void __iomem *ioaddr, u_long physaddr, int reg, int dma, } len *= 4; - dmabuf = dma_map_single(NULL, buf, len, DMA_FROM_DEVICE); + dmabuf = dma_map_single(lp->device, buf, len, DMA_FROM_DEVICE); DCSR(dma) = DCSR_NODESC; DTADR(dma) = dmabuf; DSADR(dma) = physaddr + reg; @@ -507,18 +554,19 @@ smc_pxa_dma_insl(void __iomem *ioaddr, u_long physaddr, int reg, int dma, while (!(DCSR(dma) & DCSR_STOPSTATE)) cpu_relax(); DCSR(dma) = 0; - dma_unmap_single(NULL, dmabuf, len, DMA_FROM_DEVICE); + dma_unmap_single(lp->device, dmabuf, len, DMA_FROM_DEVICE); } #endif #ifdef SMC_insw #undef SMC_insw #define SMC_insw(a, r, p, l) \ - smc_pxa_dma_insw(a, lp->physaddr, r, dev->dma, p, l) + smc_pxa_dma_insw(a, lp, r, dev->dma, p, l) static inline void -smc_pxa_dma_insw(void __iomem *ioaddr, u_long physaddr, int reg, int dma, +smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma, u_char *buf, int len) { + u_long physaddr = lp->physaddr; dma_addr_t dmabuf; /* fallback if no DMA available */ @@ -535,7 +583,7 @@ smc_pxa_dma_insw(void __iomem *ioaddr, u_long physaddr, int reg, int dma, } len *= 2; - dmabuf = dma_map_single(NULL, buf, len, DMA_FROM_DEVICE); + dmabuf = dma_map_single(lp->device, buf, len, DMA_FROM_DEVICE); DCSR(dma) = DCSR_NODESC; DTADR(dma) = dmabuf; DSADR(dma) = physaddr + reg; @@ -545,7 +593,7 @@ smc_pxa_dma_insw(void __iomem *ioaddr, u_long physaddr, int reg, int dma, while (!(DCSR(dma) & DCSR_STOPSTATE)) cpu_relax(); DCSR(dma) = 0; - dma_unmap_single(NULL, dmabuf, len, DMA_FROM_DEVICE); + dma_unmap_single(lp->device, dmabuf, len, DMA_FROM_DEVICE); } #endif diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h index a897bee..e1d05c0 100644 --- a/drivers/net/spider_net.h +++ b/drivers/net/spider_net.h @@ -494,7 +494,4 @@ struct spider_net_card { struct spider_net_descr darray[0]; }; -#define pr_err(fmt,arg...) \ - printk(KERN_ERR fmt ,##arg) - #endif diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index a679f43..8038f28 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c @@ -1461,7 +1461,6 @@ static irqreturn_t tc35815_interrupt(int irq, void *dev_id) } return IRQ_NONE; #else - struct tc35815_local *lp = dev->priv; int handled; u32 status; diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c index 8d04654..4e1b84e 100644 --- a/drivers/net/tehuti.c +++ b/drivers/net/tehuti.c @@ -1906,7 +1906,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /************** pci *****************/ if ((err = pci_enable_device(pdev))) /* it trigers interrupt, dunno why. */ - RET(err); /* it's not a problem though */ + goto err_pci; /* it's not a problem though */ if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) && !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) { @@ -2076,6 +2076,7 @@ err_out_res: pci_release_regions(pdev); err_dma: pci_disable_device(pdev); +err_pci: vfree(nic); RET(err); diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 30b1cca..014dc2c 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -64,8 +64,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.83" -#define DRV_MODULE_RELDATE "October 10, 2007" +#define DRV_MODULE_VERSION "3.84" +#define DRV_MODULE_RELDATE "October 12, 2007" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -5056,6 +5056,12 @@ static void tg3_restore_pci_state(struct tg3 *tp) pci_write_config_dword(tp->pdev, TG3PCI_COMMAND, tp->pci_cmd); + if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) { + pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, + tp->pci_cacheline_sz); + pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER, + tp->pci_lat_timer); + } /* Make sure PCI-X relaxed ordering bit is clear. */ if (tp->pcix_cap) { u16 pcix_cmd; @@ -9034,7 +9040,7 @@ static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len) int i; u32 j; - for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) { + for (i = 0; i < ARRAY_SIZE(test_pattern); i++) { for (j = 0; j < len; j += 4) { u32 val; diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c index 7224d36..5d31519 100644 --- a/drivers/net/tokenring/3c359.c +++ b/drivers/net/tokenring/3c359.c @@ -760,7 +760,7 @@ static int xl_open_hw(struct net_device *dev) if (xl_priv->xl_laa[0]) { /* If using a LAA address */ for (i=10;i<16;i++) { writel( (MEM_BYTE_WRITE | 0xD0000 | xl_priv->srb) + i, xl_mmio + MMIO_MAC_ACCESS_CMD) ; - writeb(xl_priv->xl_laa[i],xl_mmio + MMIO_MACDATA) ; + writeb(xl_priv->xl_laa[i-10],xl_mmio + MMIO_MACDATA) ; } memcpy(dev->dev_addr,xl_priv->xl_laa,dev->addr_len) ; } else { /* Regular hardware address */ diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c index 9b9cd83..41f34bb 100644 --- a/drivers/net/tulip/de4x5.c +++ b/drivers/net/tulip/de4x5.c @@ -1041,7 +1041,7 @@ static struct InfoLeaf infoleaf_array[] = { {DC21142, dc21142_infoleaf}, {DC21143, dc21143_infoleaf} }; -#define INFOLEAF_SIZE (sizeof(infoleaf_array)/(sizeof(int)+sizeof(int *))) +#define INFOLEAF_SIZE ARRAY_SIZE(infoleaf_array) /* ** List the SROM info block functions @@ -1056,7 +1056,7 @@ static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = { compact_infoblock }; -#define COMPACT (sizeof(dc_infoblock)/sizeof(int *) - 1) +#define COMPACT (ARRAY_SIZE(dc_infoblock) - 1) /* ** Miscellaneous defines... diff --git a/drivers/net/tulip/de4x5.h b/drivers/net/tulip/de4x5.h index 12af0cc..9fb8d7f 100644 --- a/drivers/net/tulip/de4x5.h +++ b/drivers/net/tulip/de4x5.h @@ -1017,4 +1017,4 @@ struct de4x5_ioctl { #define DE4X5_SET_OMR 0x0d /* Set the OMR Register contents */ #define DE4X5_GET_REG 0x0e /* Get the DE4X5 Registers */ -#define MOTO_SROM_BUG ((lp->active == 8) && (((le32_to_cpu(get_unaligned(((s32 *)dev->dev_addr))))&0x00ffffff)==0x3e0008)) +#define MOTO_SROM_BUG ((lp->active == 8) && (((le32_to_cpu(get_unaligned(((__le32 *)dev->dev_addr))))&0x00ffffff)==0x3e0008)) diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index ee08292..e5e2c9c 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c @@ -292,6 +292,7 @@ static void tulip_up(struct net_device *dev) struct tulip_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->base_addr; int next_tick = 3*HZ; + u32 reg; int i; #ifdef CONFIG_TULIP_NAPI @@ -307,14 +308,14 @@ static void tulip_up(struct net_device *dev) /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */ iowrite32(0x00000001, ioaddr + CSR0); - pci_read_config_dword(tp->pdev, PCI_COMMAND, &i); /* flush write */ + pci_read_config_dword(tp->pdev, PCI_COMMAND, ®); /* flush write */ udelay(100); /* Deassert reset. Wait the specified 50 PCI cycles after a reset by initializing Tx and Rx queues and the address filter list. */ iowrite32(tp->csr0, ioaddr + CSR0); - pci_read_config_dword(tp->pdev, PCI_COMMAND, &i); /* flush write */ + pci_read_config_dword(tp->pdev, PCI_COMMAND, ®); /* flush write */ udelay(100); if (tulip_debug > 1) diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index d00e7d4..bec413b 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -63,7 +63,7 @@ #define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1 void uec_set_ethtool_ops(struct net_device *netdev); - + static DEFINE_SPINLOCK(ugeth_lock); static struct { @@ -3454,9 +3454,12 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit u16 length, howmany = 0; u32 bd_status; u8 *bdBuffer; + struct net_device * dev; ugeth_vdbg("%s: IN", __FUNCTION__); + dev = ugeth->dev; + /* collect received buffers */ bd = ugeth->rxBd[rxQ]; diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 26058b4..ff37bf4 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -154,8 +154,8 @@ struct cosa_data { int nchannels; /* # of channels on this card */ int driver_status; /* For communicating with firmware */ int firmware_status; /* Downloaded, reseted, etc. */ - long int rxbitmap, txbitmap; /* Bitmap of channels who are willing to send/receive data */ - long int rxtx; /* RX or TX in progress? */ + unsigned long rxbitmap, txbitmap;/* Bitmap of channels who are willing to send/receive data */ + unsigned long rxtx; /* RX or TX in progress? */ int enabled; int usage; /* usage count */ int txchan, txsize, rxsize; diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c index b39a541..05df0a3 100644 --- a/drivers/net/wan/sdla.c +++ b/drivers/net/wan/sdla.c @@ -1342,11 +1342,11 @@ static int sdla_set_config(struct net_device *dev, struct ifmap *map) if (flp->initialized) return(-EINVAL); - for(i=0;i < sizeof(valid_port) / sizeof (int) ; i++) + for(i=0; i < ARRAY_SIZE(valid_port); i++) if (valid_port[i] == map->base_addr) break; - if (i == sizeof(valid_port) / sizeof(int)) + if (i == ARRAY_SIZE(valid_port)) return(-EINVAL); if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){ @@ -1487,12 +1487,12 @@ got_type: } } - for(i=0;i < sizeof(valid_mem) / sizeof (int) ; i++) + for(i=0; i < ARRAY_SIZE(valid_mem); i++) if (valid_mem[i] == map->mem_start) break; err = -EINVAL; - if (i == sizeof(valid_mem) / sizeof(int)) + if (i == ARRAY_SIZE(valid_mem)) goto fail2; if (flp->type == SDLA_S502A && (map->mem_start & 0xF000) >> 12 == 0x0E) diff --git a/drivers/net/wireless/b43/phy.c b/drivers/net/wireless/b43/phy.c index 5f7ffa0..3d4ed64 100644 --- a/drivers/net/wireless/b43/phy.c +++ b/drivers/net/wireless/b43/phy.c @@ -26,6 +26,7 @@ */ #include <linux/delay.h> +#include <linux/io.h> #include <linux/types.h> #include "b43.h" diff --git a/drivers/net/wireless/b43/pio.h b/drivers/net/wireless/b43/pio.h index 34a44c1..3488f24 100644 --- a/drivers/net/wireless/b43/pio.h +++ b/drivers/net/wireless/b43/pio.h @@ -4,6 +4,7 @@ #include "b43.h" #include <linux/interrupt.h> +#include <linux/io.h> #include <linux/list.h> #include <linux/skbuff.h> diff --git a/drivers/net/wireless/b43/sysfs.c b/drivers/net/wireless/b43/sysfs.c index fcb7773..f4faff6 100644 --- a/drivers/net/wireless/b43/sysfs.c +++ b/drivers/net/wireless/b43/sysfs.c @@ -23,13 +23,14 @@ */ +#include <linux/capability.h> +#include <linux/io.h> + #include "b43.h" #include "sysfs.h" #include "main.h" #include "phy.h" -#include <linux/capability.h> - #define GENERIC_FILESIZE 64 static int get_integer(const char *buf, size_t count) diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h index c27b2c1..e6516a1 100644 --- a/drivers/net/wireless/hostap/hostap_wlan.h +++ b/drivers/net/wireless/hostap/hostap_wlan.h @@ -661,7 +661,7 @@ struct local_info { #define HOSTAP_BITS_TRANSMIT 0 #define HOSTAP_BITS_BAP_TASKLET 1 #define HOSTAP_BITS_BAP_TASKLET2 2 - long bits; + unsigned long bits; struct ap_data *ap; diff --git a/drivers/net/wireless/ray_cs.h b/drivers/net/wireless/ray_cs.h index bd73ebf..1e23b7f 100644 --- a/drivers/net/wireless/ray_cs.h +++ b/drivers/net/wireless/ray_cs.h @@ -33,8 +33,8 @@ typedef struct ray_dev_t { void __iomem *rmem; /* pointer to receive buffer window */ struct pcmcia_device *finder; /* pointer back to struct pcmcia_device for card */ struct timer_list timer; - long tx_ccs_lock; - long ccs_lock; + unsigned long tx_ccs_lock; + unsigned long ccs_lock; int dl_param_ccs; union { struct b4_startup_params b4; diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index f464b82..7fd505c 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -74,22 +74,12 @@ struct netfront_info { struct napi_struct napi; - struct xen_netif_tx_front_ring tx; - struct xen_netif_rx_front_ring rx; - - spinlock_t tx_lock; - spinlock_t rx_lock; - unsigned int evtchn; + struct xenbus_device *xbdev; - /* Receive-ring batched refills. */ -#define RX_MIN_TARGET 8 -#define RX_DFL_MIN_TARGET 64 -#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) - unsigned rx_min_target, rx_max_target, rx_target; - struct sk_buff_head rx_batch; - - struct timer_list rx_refill_timer; + spinlock_t tx_lock; + struct xen_netif_tx_front_ring tx; + int tx_ring_ref; /* * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries @@ -108,14 +98,23 @@ struct netfront_info { grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; unsigned tx_skb_freelist; + spinlock_t rx_lock ____cacheline_aligned_in_smp; + struct xen_netif_rx_front_ring rx; + int rx_ring_ref; + + /* Receive-ring batched refills. */ +#define RX_MIN_TARGET 8 +#define RX_DFL_MIN_TARGET 64 +#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) + unsigned rx_min_target, rx_max_target, rx_target; + struct sk_buff_head rx_batch; + + struct timer_list rx_refill_timer; + struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; grant_ref_t gref_rx_head; grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; - struct xenbus_device *xbdev; - int tx_ring_ref; - int rx_ring_ref; - unsigned long rx_pfn_array[NET_RX_RING_SIZE]; struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; struct mmu_update rx_mmu[NET_RX_RING_SIZE]; |