diff options
157 files changed, 34304 insertions, 3463 deletions
diff --git a/Documentation/DocBook/z8530book.tmpl b/Documentation/DocBook/z8530book.tmpl index 42c75ba..a42a8a4 100644 --- a/Documentation/DocBook/z8530book.tmpl +++ b/Documentation/DocBook/z8530book.tmpl @@ -69,12 +69,6 @@ device to be used as both a tty interface and as a synchronous controller is a project for Linux post the 2.4 release </para> - <para> - The support code handles most common card configurations and - supports running both Cisco HDLC and Synchronous PPP. With extra - glue the frame relay and X.25 protocols can also be used with this - driver. - </para> </chapter> <chapter id="Driver_Modes"> @@ -179,35 +173,27 @@ <para> If you wish to use the network interface facilities of the driver, then you need to attach a network device to each channel that is - present and in use. In addition to use the SyncPPP and Cisco HDLC + present and in use. In addition to use the generic HDLC you need to follow some additional plumbing rules. They may seem complex but a look at the example hostess_sv11 driver should reassure you. </para> <para> The network device used for each channel should be pointed to by - the netdevice field of each channel. The dev-> priv field of the + the netdevice field of each channel. The hdlc-> priv field of the network device points to your private data - you will need to be - able to find your ppp device from this. In addition to use the - sync ppp layer the private data must start with a void * pointer - to the syncppp structures. + able to find your private data from this. </para> <para> The way most drivers approach this particular problem is to create a structure holding the Z8530 device definition and - put that and the syncppp pointer into the private field of - the network device. The network device fields of the channels - then point back to the network devices. The ppp_device can also - be put in the private structure conveniently. + put that into the private field of the network device. The + network device fields of the channels then point back to the + network devices. </para> <para> - If you wish to use the synchronous ppp then you need to attach - the syncppp layer to the network device. You should do this before - you register the network device. The - <function>sppp_attach</function> requires that the first void * - pointer in your private data is pointing to an empty struct - ppp_device. The function fills in the initial data for the - ppp/hdlc layer. + If you wish to use the generic HDLC then you need to register + the HDLC device. </para> <para> Before you register your network device you will also need to @@ -314,10 +300,10 @@ buffer in sk_buff format and queues it for transmission. The caller must provide the entire packet with the exception of the bitstuffing and CRC. This is normally done by the caller via - the syncppp interface layer. It returns 0 if the buffer has been - queued and non zero values for queue full. If the function accepts - the buffer it becomes property of the Z8530 layer and the caller - should not free it. + the generic HDLC interface layer. It returns 0 if the buffer has been + queued and non zero values for queue full. If the function accepts + the buffer it becomes property of the Z8530 layer and the caller + should not free it. </para> <para> The function <function>z8530_get_stats</function> returns a pointer diff --git a/MAINTAINERS b/MAINTAINERS index 8223a52..c67a402 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -720,6 +720,15 @@ L: linux-wireless@vger.kernel.org L: ath5k-devel@lists.ath5k.org S: Maintained +ATHEROS ATH9K WIRELESS DRIVER +P: Luis R. Rodriguez +M: lrodriguez@atheros.com +P: Jouni Malinen +M: jmalinen@atheros.com +L: linux-wireless@vger.kernel.org +L: ath9k-devel@lists.ath9k.org +S: Supported + ATI_REMOTE2 DRIVER P: Ville Syrjala M: syrjala@sci.fi diff --git a/arch/sh/include/asm/sh_eth.h b/arch/sh/include/asm/sh_eth.h new file mode 100644 index 0000000..bb83258 --- /dev/null +++ b/arch/sh/include/asm/sh_eth.h @@ -0,0 +1,11 @@ +#ifndef __ASM_SH_ETH_H__ +#define __ASM_SH_ETH_H__ + +enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN}; + +struct sh_eth_plat_data { + int phy; + int edmac_endian; +}; + +#endif diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index d1fceab..c240562 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c @@ -232,7 +232,6 @@ typedef struct _mgslpc_info { /* SPPP/Cisco HDLC device parts */ int netcount; - int dosyncppp; spinlock_t netlock; #if SYNCLINK_GENERIC_HDLC @@ -459,13 +458,11 @@ static int ttymajor=0; static int debug_level = 0; static int maxframe[MAX_DEVICE_COUNT] = {0,}; -static int dosyncppp[MAX_DEVICE_COUNT] = {1,1,1,1}; module_param(break_on_load, bool, 0); module_param(ttymajor, int, 0); module_param(debug_level, int, 0); module_param_array(maxframe, int, NULL, 0); -module_param_array(dosyncppp, int, NULL, 0); MODULE_LICENSE("GPL"); @@ -2915,7 +2912,6 @@ static void mgslpc_add_device(MGSLPC_INFO *info) if (info->line < MAX_DEVICE_COUNT) { if (maxframe[info->line]) info->max_frame_size = maxframe[info->line]; - info->dosyncppp = dosyncppp[info->line]; } mgslpc_device_count++; diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c index ef6706f..500f517 100644 --- a/drivers/char/synclink.c +++ b/drivers/char/synclink.c @@ -304,7 +304,6 @@ struct mgsl_struct { /* generic HDLC device parts */ int netcount; - int dosyncppp; spinlock_t netlock; #if SYNCLINK_GENERIC_HDLC @@ -868,7 +867,6 @@ static int irq[MAX_ISA_DEVICES]; static int dma[MAX_ISA_DEVICES]; static int debug_level; static int maxframe[MAX_TOTAL_DEVICES]; -static int dosyncppp[MAX_TOTAL_DEVICES]; static int txdmabufs[MAX_TOTAL_DEVICES]; static int txholdbufs[MAX_TOTAL_DEVICES]; @@ -879,7 +877,6 @@ module_param_array(irq, int, NULL, 0); module_param_array(dma, int, NULL, 0); module_param(debug_level, int, 0); module_param_array(maxframe, int, NULL, 0); -module_param_array(dosyncppp, int, NULL, 0); module_param_array(txdmabufs, int, NULL, 0); module_param_array(txholdbufs, int, NULL, 0); @@ -4258,7 +4255,6 @@ static void mgsl_add_device( struct mgsl_struct *info ) if (info->line < MAX_TOTAL_DEVICES) { if (maxframe[info->line]) info->max_frame_size = maxframe[info->line]; - info->dosyncppp = dosyncppp[info->line]; if (txdmabufs[info->line]) { info->num_tx_dma_buffers = txdmabufs[info->line]; diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c index 3e90589..509c89a 100644 --- a/drivers/char/synclink_gt.c +++ b/drivers/char/synclink_gt.c @@ -128,17 +128,14 @@ static int slgt_device_count; static int ttymajor; static int debug_level; static int maxframe[MAX_DEVICES]; -static int dosyncppp[MAX_DEVICES]; module_param(ttymajor, int, 0); module_param(debug_level, int, 0); module_param_array(maxframe, int, NULL, 0); -module_param_array(dosyncppp, int, NULL, 0); MODULE_PARM_DESC(ttymajor, "TTY major device number override: 0=auto assigned"); MODULE_PARM_DESC(debug_level, "Debug syslog output: 0=disabled, 1 to 5=increasing detail"); MODULE_PARM_DESC(maxframe, "Maximum frame size used by device (4096 to 65535)"); -MODULE_PARM_DESC(dosyncppp, "Enable synchronous net device, 0=disable 1=enable"); /* * tty support and callbacks @@ -349,7 +346,6 @@ struct slgt_info { /* SPPP/Cisco HDLC device parts */ int netcount; - int dosyncppp; spinlock_t netlock; #if SYNCLINK_GENERIC_HDLC struct net_device *netdev; @@ -3405,7 +3401,6 @@ static void add_device(struct slgt_info *info) if (info->line < MAX_DEVICES) { if (maxframe[info->line]) info->max_frame_size = maxframe[info->line]; - info->dosyncppp = dosyncppp[info->line]; } slgt_device_count++; diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c index c0490cb..6bdb44f 100644 --- a/drivers/char/synclinkmp.c +++ b/drivers/char/synclinkmp.c @@ -270,7 +270,6 @@ typedef struct _synclinkmp_info { /* SPPP/Cisco HDLC device parts */ int netcount; - int dosyncppp; spinlock_t netlock; #if SYNCLINK_GENERIC_HDLC @@ -469,13 +468,11 @@ static int ttymajor = 0; */ static int debug_level = 0; static int maxframe[MAX_DEVICES] = {0,}; -static int dosyncppp[MAX_DEVICES] = {0,}; module_param(break_on_load, bool, 0); module_param(ttymajor, int, 0); module_param(debug_level, int, 0); module_param_array(maxframe, int, NULL, 0); -module_param_array(dosyncppp, int, NULL, 0); static char *driver_name = "SyncLink MultiPort driver"; static char *driver_version = "$Revision: 4.38 $"; @@ -3752,7 +3749,6 @@ static void add_device(SLMP_INFO *info) if (info->line < MAX_DEVICES) { if (maxframe[info->line]) info->max_frame_size = maxframe[info->line]; - info->dosyncppp = dosyncppp[info->line]; } synclinkmp_device_count++; diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index a52156e..bc8c6e3 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c @@ -551,7 +551,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) /* write address into NextDescriptor field of last desc in chain */ to_ioat_desc(ioat_chan->used_desc.prev)->hw->next = first->async_tx.phys; - __list_splice(&new_chain, ioat_chan->used_desc.prev); + list_splice_tail(&new_chain, &ioat_chan->used_desc); ioat_chan->dmacount += desc_count; ioat_chan->pending += desc_count; diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c index dc6e474..e2ce41d 100644 --- a/drivers/net/3c523.c +++ b/drivers/net/3c523.c @@ -640,10 +640,8 @@ static int init586(struct net_device *dev) cfg_cmd->time_low = 0x00; cfg_cmd->time_high = 0xf2; cfg_cmd->promisc = 0; - if (dev->flags & (IFF_ALLMULTI | IFF_PROMISC)) { + if (dev->flags & (IFF_ALLMULTI | IFF_PROMISC)) cfg_cmd->promisc = 1; - dev->flags |= IFF_PROMISC; - } cfg_cmd->carr_coll = 0x00; p->scb->cbl_offset = make16(cfg_cmd); diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c index 6aca0c6..abc84f7 100644 --- a/drivers/net/3c527.c +++ b/drivers/net/3c527.c @@ -1521,14 +1521,11 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry) struct mc32_local *lp = netdev_priv(dev); u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */ - if (dev->flags&IFF_PROMISC) + if ((dev->flags&IFF_PROMISC) || + (dev->flags&IFF_ALLMULTI) || + dev->mc_count > 10) /* Enable promiscuous mode */ filt |= 1; - else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > 10) - { - dev->flags|=IFF_PROMISC; - filt |= 1; - } else if(dev->mc_count) { unsigned char block[62]; diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 8db4e6b..491ee16 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -1692,12 +1692,14 @@ vortex_open(struct net_device *dev) vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1)); vp->rx_ring[i].status = 0; /* Clear complete bit. */ vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG); - skb = dev_alloc_skb(PKT_BUF_SZ); + + skb = __netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN, + GFP_KERNEL); vp->rx_skbuff[i] = skb; if (skb == NULL) break; /* Bad news! */ - skb->dev = dev; /* Mark as being used by this device. */ - skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ + + skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); } if (i != RX_RING_SIZE) { @@ -2538,7 +2540,7 @@ boomerang_rx(struct net_device *dev) struct sk_buff *skb; entry = vp->dirty_rx % RX_RING_SIZE; if (vp->rx_skbuff[entry] == NULL) { - skb = dev_alloc_skb(PKT_BUF_SZ); + skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN); if (skb == NULL) { static unsigned long last_jif; if (time_after(jiffies, last_jif + 10 * HZ)) { @@ -2549,8 +2551,8 @@ boomerang_rx(struct net_device *dev) mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1)); break; /* Bad news! */ } - skb->dev = dev; /* Mark as being used by this device. */ - skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ + + skb_reserve(skb, NET_IP_ALIGN); vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); vp->rx_skbuff[entry] = skb; } diff --git a/drivers/net/8390.c b/drivers/net/8390.c index dc5d258..f72a2e8 100644 --- a/drivers/net/8390.c +++ b/drivers/net/8390.c @@ -9,42 +9,39 @@ int ei_open(struct net_device *dev) { return __ei_open(dev); } +EXPORT_SYMBOL(ei_open); int ei_close(struct net_device *dev) { return __ei_close(dev); } +EXPORT_SYMBOL(ei_close); irqreturn_t ei_interrupt(int irq, void *dev_id) { return __ei_interrupt(irq, dev_id); } +EXPORT_SYMBOL(ei_interrupt); #ifdef CONFIG_NET_POLL_CONTROLLER void ei_poll(struct net_device *dev) { __ei_poll(dev); } +EXPORT_SYMBOL(ei_poll); #endif struct net_device *__alloc_ei_netdev(int size) { return ____alloc_ei_netdev(size); } +EXPORT_SYMBOL(__alloc_ei_netdev); void NS8390_init(struct net_device *dev, int startp) { __NS8390_init(dev, startp); } - -EXPORT_SYMBOL(ei_open); -EXPORT_SYMBOL(ei_close); -EXPORT_SYMBOL(ei_interrupt); -#ifdef CONFIG_NET_POLL_CONTROLLER -EXPORT_SYMBOL(ei_poll); -#endif EXPORT_SYMBOL(NS8390_init); -EXPORT_SYMBOL(__alloc_ei_netdev); #if defined(MODULE) diff --git a/drivers/net/8390p.c b/drivers/net/8390p.c index 71f1988..4c6eea4 100644 --- a/drivers/net/8390p.c +++ b/drivers/net/8390p.c @@ -4,9 +4,9 @@ static const char version[] = "8390p.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n"; #define ei_inb(_p) inb(_p) -#define ei_outb(_v,_p) outb(_v,_p) +#define ei_outb(_v, _p) outb(_v, _p) #define ei_inb_p(_p) inb_p(_p) -#define ei_outb_p(_v,_p) outb_p(_v,_p) +#define ei_outb_p(_v, _p) outb_p(_v, _p) #include "lib8390.c" @@ -14,42 +14,39 @@ int eip_open(struct net_device *dev) { return __ei_open(dev); } +EXPORT_SYMBOL(eip_open); int eip_close(struct net_device *dev) { return __ei_close(dev); } +EXPORT_SYMBOL(eip_close); irqreturn_t eip_interrupt(int irq, void *dev_id) { return __ei_interrupt(irq, dev_id); } +EXPORT_SYMBOL(eip_interrupt); #ifdef CONFIG_NET_POLL_CONTROLLER void eip_poll(struct net_device *dev) { __ei_poll(dev); } +EXPORT_SYMBOL(eip_poll); #endif struct net_device *__alloc_eip_netdev(int size) { return ____alloc_ei_netdev(size); } +EXPORT_SYMBOL(__alloc_eip_netdev); void NS8390p_init(struct net_device *dev, int startp) { - return __NS8390_init(dev, startp); + __NS8390_init(dev, startp); } - -EXPORT_SYMBOL(eip_open); -EXPORT_SYMBOL(eip_close); -EXPORT_SYMBOL(eip_interrupt); -#ifdef CONFIG_NET_POLL_CONTROLLER -EXPORT_SYMBOL(eip_poll); -#endif EXPORT_SYMBOL(NS8390p_init); -EXPORT_SYMBOL(__alloc_eip_netdev); #if defined(MODULE) diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 8a03875..4b4cb2b 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -510,14 +510,15 @@ config STNIC config SH_ETH tristate "Renesas SuperH Ethernet support" depends on SUPERH && \ - (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || CPU_SUBTYPE_SH7763) + (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || CPU_SUBTYPE_SH7763 || \ + CPU_SUBTYPE_SH7619) select CRC32 select MII select MDIO_BITBANG select PHYLIB help Renesas SuperH Ethernet device driver. - This driver support SH7710, SH7712 and SH7763. + This driver support SH7710, SH7712, SH7763 and SH7619. config SUNLANCE tristate "Sun LANCE support" diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index f12e3d1..e6a7bb7 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c @@ -1790,6 +1790,17 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter, { struct pci_dev *pdev = adapter->pdev; + /* + * The L1 hardware contains a bug that erroneously sets the + * PACKET_FLAG_ERR and ERR_FLAG_L4_CHKSUM bits whenever a + * fragmented IP packet is received, even though the packet + * is perfectly valid and its checksum is correct. There's + * no way to distinguish between one of these good packets + * and a packet that actually contains a TCP/UDP checksum + * error, so all we can do is allow it to be handed up to + * the higher layers and let it be sorted out there. + */ + skb->ip_summed = CHECKSUM_NONE; if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { @@ -1816,14 +1827,6 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter, return; } - /* IPv4, but hardware thinks its checksum is wrong */ - if (netif_msg_rx_err(adapter)) - dev_printk(KERN_DEBUG, &pdev->dev, - "hw csum wrong, pkt_flag:%x, err_flag:%x\n", - rrd->pkt_flg, rrd->err_flg); - skb->ip_summed = CHECKSUM_COMPLETE; - skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum); - adapter->hw_csum_err++; return; } diff --git a/drivers/net/atp.c b/drivers/net/atp.c index 3d44333..c10cd80 100644 --- a/drivers/net/atp.c +++ b/drivers/net/atp.c @@ -854,14 +854,9 @@ static void set_rx_mode_8002(struct net_device *dev) struct net_local *lp = netdev_priv(dev); long ioaddr = dev->base_addr; - if ( dev->mc_count > 0 || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC))) { - /* We must make the kernel realise we had to move - * into promisc mode or we start all out war on - * the cable. - AC - */ - dev->flags|=IFF_PROMISC; + if (dev->mc_count > 0 || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC))) lp->addr_mode = CMR2h_PROMISC; - } else + else lp->addr_mode = CMR2h_Normal; write_reg_high(ioaddr, CMR2, lp->addr_mode); } diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index ebb539e..6106660 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -2107,6 +2107,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work) aggregator = __get_first_agg(port); ad_agg_selection_logic(aggregator); } + bond_3ad_set_carrier(bond); } // for each port run the state machines diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index a641eea..c792138 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2223,272 +2223,217 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in /*-------------------------------- Monitoring -------------------------------*/ -/* - * if !have_locks, return nonzero if a failover is necessary. if - * have_locks, do whatever failover activities are needed. - * - * This is to separate the inspection and failover steps for locking - * purposes; failover requires rtnl, but acquiring it for every - * inspection is undesirable, so a wrapper first does inspection, and - * the acquires the necessary locks and calls again to perform - * failover if needed. Since all locks are dropped, a complete - * restart is needed between calls. - */ -static int __bond_mii_monitor(struct bonding *bond, int have_locks) -{ - struct slave *slave, *oldcurrent; - int do_failover = 0; - int i; - - if (bond->slave_cnt == 0) - goto out; - /* we will try to read the link status of each of our slaves, and - * set their IFF_RUNNING flag appropriately. For each slave not - * supporting MII status, we won't do anything so that a user-space - * program could monitor the link itself if needed. - */ - - read_lock(&bond->curr_slave_lock); - oldcurrent = bond->curr_active_slave; - read_unlock(&bond->curr_slave_lock); +static int bond_miimon_inspect(struct bonding *bond) +{ + struct slave *slave; + int i, link_state, commit = 0; bond_for_each_slave(bond, slave, i) { - struct net_device *slave_dev = slave->dev; - int link_state; - u16 old_speed = slave->speed; - u8 old_duplex = slave->duplex; + slave->new_link = BOND_LINK_NOCHANGE; - link_state = bond_check_dev_link(bond, slave_dev, 0); + link_state = bond_check_dev_link(bond, slave->dev, 0); switch (slave->link) { - case BOND_LINK_UP: /* the link was up */ - if (link_state == BMSR_LSTATUS) { - if (!oldcurrent) { - if (!have_locks) - return 1; - do_failover = 1; - } - break; - } else { /* link going down */ - slave->link = BOND_LINK_FAIL; - slave->delay = bond->params.downdelay; - - if (slave->link_failure_count < UINT_MAX) { - slave->link_failure_count++; - } + case BOND_LINK_UP: + if (link_state) + continue; - if (bond->params.downdelay) { - printk(KERN_INFO DRV_NAME - ": %s: link status down for %s " - "interface %s, disabling it in " - "%d ms.\n", - bond->dev->name, - IS_UP(slave_dev) - ? ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) - ? ((slave == oldcurrent) - ? "active " : "backup ") - : "") - : "idle ", - slave_dev->name, - bond->params.downdelay * bond->params.miimon); - } + slave->link = BOND_LINK_FAIL; + slave->delay = bond->params.downdelay; + if (slave->delay) { + printk(KERN_INFO DRV_NAME + ": %s: link status down for %s" + "interface %s, disabling it in %d ms.\n", + bond->dev->name, + (bond->params.mode == + BOND_MODE_ACTIVEBACKUP) ? + ((slave->state == BOND_STATE_ACTIVE) ? + "active " : "backup ") : "", + slave->dev->name, + bond->params.downdelay * bond->params.miimon); } - /* no break ! fall through the BOND_LINK_FAIL test to - ensure proper action to be taken - */ - case BOND_LINK_FAIL: /* the link has just gone down */ - if (link_state != BMSR_LSTATUS) { - /* link stays down */ - if (slave->delay <= 0) { - if (!have_locks) - return 1; - - /* link down for too long time */ - slave->link = BOND_LINK_DOWN; - - /* in active/backup mode, we must - * completely disable this interface - */ - if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) || - (bond->params.mode == BOND_MODE_8023AD)) { - bond_set_slave_inactive_flags(slave); - } - - printk(KERN_INFO DRV_NAME - ": %s: link status definitely " - "down for interface %s, " - "disabling it\n", - bond->dev->name, - slave_dev->name); - - /* notify ad that the link status has changed */ - if (bond->params.mode == BOND_MODE_8023AD) { - bond_3ad_handle_link_change(slave, BOND_LINK_DOWN); - } - - if ((bond->params.mode == BOND_MODE_TLB) || - (bond->params.mode == BOND_MODE_ALB)) { - bond_alb_handle_link_change(bond, slave, BOND_LINK_DOWN); - } - - if (slave == oldcurrent) { - do_failover = 1; - } - } else { - slave->delay--; - } - } else { - /* link up again */ - slave->link = BOND_LINK_UP; + /*FALLTHRU*/ + case BOND_LINK_FAIL: + if (link_state) { + /* + * recovered before downdelay expired + */ + slave->link = BOND_LINK_UP; slave->jiffies = jiffies; printk(KERN_INFO DRV_NAME ": %s: link status up again after %d " "ms for interface %s.\n", bond->dev->name, - (bond->params.downdelay - slave->delay) * bond->params.miimon, - slave_dev->name); + (bond->params.downdelay - slave->delay) * + bond->params.miimon, + slave->dev->name); + continue; } - break; - case BOND_LINK_DOWN: /* the link was down */ - if (link_state != BMSR_LSTATUS) { - /* the link stays down, nothing more to do */ - break; - } else { /* link going up */ - slave->link = BOND_LINK_BACK; - slave->delay = bond->params.updelay; - if (bond->params.updelay) { - /* if updelay == 0, no need to - advertise about a 0 ms delay */ - printk(KERN_INFO DRV_NAME - ": %s: link status up for " - "interface %s, enabling it " - "in %d ms.\n", - bond->dev->name, - slave_dev->name, - bond->params.updelay * bond->params.miimon); - } + if (slave->delay <= 0) { + slave->new_link = BOND_LINK_DOWN; + commit++; + continue; } - /* no break ! fall through the BOND_LINK_BACK state in - case there's something to do. - */ - case BOND_LINK_BACK: /* the link has just come back */ - if (link_state != BMSR_LSTATUS) { - /* link down again */ - slave->link = BOND_LINK_DOWN; + slave->delay--; + break; + + case BOND_LINK_DOWN: + if (!link_state) + continue; + + slave->link = BOND_LINK_BACK; + slave->delay = bond->params.updelay; + + if (slave->delay) { + printk(KERN_INFO DRV_NAME + ": %s: link status up for " + "interface %s, enabling it in %d ms.\n", + bond->dev->name, slave->dev->name, + bond->params.updelay * + bond->params.miimon); + } + /*FALLTHRU*/ + case BOND_LINK_BACK: + if (!link_state) { + slave->link = BOND_LINK_DOWN; printk(KERN_INFO DRV_NAME ": %s: link status down again after %d " "ms for interface %s.\n", bond->dev->name, - (bond->params.updelay - slave->delay) * bond->params.miimon, - slave_dev->name); - } else { - /* link stays up */ - if (slave->delay == 0) { - if (!have_locks) - return 1; - - /* now the link has been up for long time enough */ - slave->link = BOND_LINK_UP; - slave->jiffies = jiffies; - - if (bond->params.mode == BOND_MODE_8023AD) { - /* prevent it from being the active one */ - slave->state = BOND_STATE_BACKUP; - } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) { - /* make it immediately active */ - slave->state = BOND_STATE_ACTIVE; - } else if (slave != bond->primary_slave) { - /* prevent it from being the active one */ - slave->state = BOND_STATE_BACKUP; - } + (bond->params.updelay - slave->delay) * + bond->params.miimon, + slave->dev->name); - printk(KERN_INFO DRV_NAME - ": %s: link status definitely " - "up for interface %s.\n", - bond->dev->name, - slave_dev->name); - - /* notify ad that the link status has changed */ - if (bond->params.mode == BOND_MODE_8023AD) { - bond_3ad_handle_link_change(slave, BOND_LINK_UP); - } - - if ((bond->params.mode == BOND_MODE_TLB) || - (bond->params.mode == BOND_MODE_ALB)) { - bond_alb_handle_link_change(bond, slave, BOND_LINK_UP); - } - - if ((!oldcurrent) || - (slave == bond->primary_slave)) { - do_failover = 1; - } - } else { - slave->delay--; - } + continue; } + + if (slave->delay <= 0) { + slave->new_link = BOND_LINK_UP; + commit++; + continue; + } + + slave->delay--; break; - default: - /* Should not happen */ - printk(KERN_ERR DRV_NAME - ": %s: Error: %s Illegal value (link=%d)\n", - bond->dev->name, - slave->dev->name, - slave->link); - goto out; - } /* end of switch (slave->link) */ + } + } - bond_update_speed_duplex(slave); + return commit; +} - if (bond->params.mode == BOND_MODE_8023AD) { - if (old_speed != slave->speed) { - bond_3ad_adapter_speed_changed(slave); - } +static void bond_miimon_commit(struct bonding *bond) +{ + struct slave *slave; + int i; + + bond_for_each_slave(bond, slave, i) { + switch (slave->new_link) { + case BOND_LINK_NOCHANGE: + continue; + + case BOND_LINK_UP: + slave->link = BOND_LINK_UP; + slave->jiffies = jiffies; - if (old_duplex != slave->duplex) { - bond_3ad_adapter_duplex_changed(slave); + if (bond->params.mode == BOND_MODE_8023AD) { + /* prevent it from being the active one */ + slave->state = BOND_STATE_BACKUP; + } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) { + /* make it immediately active */ + slave->state = BOND_STATE_ACTIVE; + } else if (slave != bond->primary_slave) { + /* prevent it from being the active one */ + slave->state = BOND_STATE_BACKUP; } - } - } /* end of for */ + printk(KERN_INFO DRV_NAME + ": %s: link status definitely " + "up for interface %s.\n", + bond->dev->name, slave->dev->name); - if (do_failover) { - ASSERT_RTNL(); + /* notify ad that the link status has changed */ + if (bond->params.mode == BOND_MODE_8023AD) + bond_3ad_handle_link_change(slave, BOND_LINK_UP); - write_lock_bh(&bond->curr_slave_lock); + if ((bond->params.mode == BOND_MODE_TLB) || + (bond->params.mode == BOND_MODE_ALB)) + bond_alb_handle_link_change(bond, slave, + BOND_LINK_UP); - bond_select_active_slave(bond); + if (!bond->curr_active_slave || + (slave == bond->primary_slave)) + goto do_failover; - write_unlock_bh(&bond->curr_slave_lock); + continue; - } else - bond_set_carrier(bond); + case BOND_LINK_DOWN: + slave->link = BOND_LINK_DOWN; -out: - return 0; + if (bond->params.mode == BOND_MODE_ACTIVEBACKUP || + bond->params.mode == BOND_MODE_8023AD) + bond_set_slave_inactive_flags(slave); + + printk(KERN_INFO DRV_NAME + ": %s: link status definitely down for " + "interface %s, disabling it\n", + bond->dev->name, slave->dev->name); + + if (bond->params.mode == BOND_MODE_8023AD) + bond_3ad_handle_link_change(slave, + BOND_LINK_DOWN); + + if (bond->params.mode == BOND_MODE_TLB || + bond->params.mode == BOND_MODE_ALB) + bond_alb_handle_link_change(bond, slave, + BOND_LINK_DOWN); + + if (slave == bond->curr_active_slave) + goto do_failover; + + continue; + + default: + printk(KERN_ERR DRV_NAME + ": %s: invalid new link %d on slave %s\n", + bond->dev->name, slave->new_link, + slave->dev->name); + slave->new_link = BOND_LINK_NOCHANGE; + + continue; + } + +do_failover: + ASSERT_RTNL(); + write_lock_bh(&bond->curr_slave_lock); + bond_select_active_slave(bond); + write_unlock_bh(&bond->curr_slave_lock); + } + + bond_set_carrier(bond); } /* * bond_mii_monitor * * Really a wrapper that splits the mii monitor into two phases: an - * inspection, then (if inspection indicates something needs to be - * done) an acquisition of appropriate locks followed by another pass - * to implement whatever link state changes are indicated. + * inspection, then (if inspection indicates something needs to be done) + * an acquisition of appropriate locks followed by a commit phase to + * implement whatever link state changes are indicated. */ void bond_mii_monitor(struct work_struct *work) { struct bonding *bond = container_of(work, struct bonding, mii_work.work); - unsigned long delay; read_lock(&bond->lock); - if (bond->kill_timers) { - read_unlock(&bond->lock); - return; - } + if (bond->kill_timers) + goto out; + + if (bond->slave_cnt == 0) + goto re_arm; if (bond->send_grat_arp) { read_lock(&bond->curr_slave_lock); @@ -2496,19 +2441,24 @@ void bond_mii_monitor(struct work_struct *work) read_unlock(&bond->curr_slave_lock); } - if (__bond_mii_monitor(bond, 0)) { + if (bond_miimon_inspect(bond)) { read_unlock(&bond->lock); rtnl_lock(); read_lock(&bond->lock); - __bond_mii_monitor(bond, 1); + + bond_miimon_commit(bond); + read_unlock(&bond->lock); rtnl_unlock(); /* might sleep, hold no other locks */ read_lock(&bond->lock); } - delay = msecs_to_jiffies(bond->params.miimon); +re_arm: + if (bond->params.miimon) + queue_delayed_work(bond->wq, &bond->mii_work, + msecs_to_jiffies(bond->params.miimon)); +out: read_unlock(&bond->lock); - queue_delayed_work(bond->wq, &bond->mii_work, delay); } static __be32 bond_glean_dev_ip(struct net_device *dev) diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 6caac0f..3bdb473 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -350,9 +350,6 @@ static ssize_t bonding_store_slaves(struct device *d, if (dev) { printk(KERN_INFO DRV_NAME ": %s: Removing slave %s\n", bond->dev->name, dev->name); - if (bond->setup_by_slave) - res = bond_release_and_destroy(bond->dev, dev); - else res = bond_release(bond->dev, dev); if (res) { ret = res; diff --git a/drivers/net/de620.c b/drivers/net/de620.c index 3f5190c..d454e14 100644 --- a/drivers/net/de620.c +++ b/drivers/net/de620.c @@ -488,13 +488,6 @@ static void de620_set_multicast_list(struct net_device *dev) { if (dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC)) { /* Enable promiscuous mode */ - /* - * We must make the kernel realise we had to move - * into promisc mode or we start all out war on - * the cable. - AC - */ - dev->flags|=IFF_PROMISC; - de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL); } else diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 0b0f1c4..f42c23f 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c @@ -1374,6 +1374,11 @@ dm9000_probe(struct platform_device *pdev) for (i = 0; i < 6; i += 2) dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i); + if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) { + mac_src = "platform data"; + memcpy(ndev->dev_addr, pdata->dev_addr, 6); + } + if (!is_valid_ether_addr(ndev->dev_addr)) { /* try reading from mac */ diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 4a4f62e..cf57050 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h @@ -41,24 +41,25 @@ struct e1000_info; -#define ndev_printk(level, netdev, format, arg...) \ - printk(level "%s: " format, (netdev)->name, ## arg) +#define e_printk(level, adapter, format, arg...) \ + printk(level "%s: %s: " format, pci_name(adapter->pdev), \ + adapter->netdev->name, ## arg) #ifdef DEBUG -#define ndev_dbg(netdev, format, arg...) \ - ndev_printk(KERN_DEBUG , netdev, format, ## arg) +#define e_dbg(format, arg...) \ + e_printk(KERN_DEBUG , adapter, format, ## arg) #else -#define ndev_dbg(netdev, format, arg...) do { (void)(netdev); } while (0) +#define e_dbg(format, arg...) do { (void)(adapter); } while (0) #endif -#define ndev_err(netdev, format, arg...) \ - ndev_printk(KERN_ERR , netdev, format, ## arg) -#define ndev_info(netdev, format, arg...) \ - ndev_printk(KERN_INFO , netdev, format, ## arg) -#define ndev_warn(netdev, format, arg...) \ - ndev_printk(KERN_WARNING , netdev, format, ## arg) -#define ndev_notice(netdev, format, arg...) \ - ndev_printk(KERN_NOTICE , netdev, format, ## arg) +#define e_err(format, arg...) \ + e_printk(KERN_ERR, adapter, format, ## arg) +#define e_info(format, arg...) \ + e_printk(KERN_INFO, adapter, format, ## arg) +#define e_warn(format, arg...) \ + e_printk(KERN_WARNING, adapter, format, ## arg) +#define e_notice(format, arg...) \ + e_printk(KERN_NOTICE, adapter, format, ## arg) /* Tx/Rx descriptor defines */ @@ -283,10 +284,6 @@ struct e1000_adapter { unsigned long led_status; unsigned int flags; - - /* for ioport free */ - int bars; - int need_ioport; }; struct e1000_info { diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c index 9350564..cf9679f 100644 --- a/drivers/net/e1000e/ethtool.c +++ b/drivers/net/e1000e/ethtool.c @@ -189,8 +189,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) /* Fiber NICs only allow 1000 gbps Full duplex */ if ((adapter->hw.phy.media_type == e1000_media_type_fiber) && spddplx != (SPEED_1000 + DUPLEX_FULL)) { - ndev_err(adapter->netdev, "Unsupported Speed/Duplex " - "configuration\n"); + e_err("Unsupported Speed/Duplex configuration\n"); return -EINVAL; } @@ -213,8 +212,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) break; case SPEED_1000 + DUPLEX_HALF: /* not supported */ default: - ndev_err(adapter->netdev, "Unsupported Speed/Duplex " - "configuration\n"); + e_err("Unsupported Speed/Duplex configuration\n"); return -EINVAL; } return 0; @@ -231,8 +229,8 @@ static int e1000_set_settings(struct net_device *netdev, * cannot be changed */ if (e1000_check_reset_block(hw)) { - ndev_err(netdev, "Cannot change link " - "characteristics when SoL/IDER is active.\n"); + e_err("Cannot change link characteristics when SoL/IDER is " + "active.\n"); return -EINVAL; } @@ -380,8 +378,7 @@ static int e1000_set_tso(struct net_device *netdev, u32 data) netdev->features &= ~NETIF_F_TSO6; } - ndev_info(netdev, "TSO is %s\n", - data ? "Enabled" : "Disabled"); + e_info("TSO is %s\n", data ? "Enabled" : "Disabled"); adapter->flags |= FLAG_TSO_FORCE; return 0; } @@ -722,10 +719,9 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, (test[pat] & write)); val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); if (val != (test[pat] & write & mask)) { - ndev_err(adapter->netdev, "pattern test reg %04X " - "failed: got 0x%08X expected 0x%08X\n", - reg + offset, - val, (test[pat] & write & mask)); + e_err("pattern test reg %04X failed: got 0x%08X " + "expected 0x%08X\n", reg + offset, val, + (test[pat] & write & mask)); *data = reg; return 1; } @@ -740,9 +736,8 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, __ew32(&adapter->hw, reg, write & mask); val = __er32(&adapter->hw, reg); if ((write & mask) != (val & mask)) { - ndev_err(adapter->netdev, "set/check reg %04X test failed: " - "got 0x%08X expected 0x%08X\n", reg, (val & mask), - (write & mask)); + e_err("set/check reg %04X test failed: got 0x%08X " + "expected 0x%08X\n", reg, (val & mask), (write & mask)); *data = reg; return 1; } @@ -766,7 +761,6 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) { struct e1000_hw *hw = &adapter->hw; struct e1000_mac_info *mac = &adapter->hw.mac; - struct net_device *netdev = adapter->netdev; u32 value; u32 before; u32 after; @@ -799,8 +793,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) ew32(STATUS, toggle); after = er32(STATUS) & toggle; if (value != after) { - ndev_err(netdev, "failed STATUS register test got: " - "0x%08X expected: 0x%08X\n", after, value); + e_err("failed STATUS register test got: 0x%08X expected: " + "0x%08X\n", after, value); *data = 1; return 1; } @@ -903,8 +897,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) *data = 1; return -1; } - ndev_info(netdev, "testing %s interrupt\n", - (shared_int ? "shared" : "unshared")); + e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared")); /* Disable all the interrupts */ ew32(IMC, 0xFFFFFFFF); @@ -1526,8 +1519,7 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) * sessions are active */ if (e1000_check_reset_block(&adapter->hw)) { - ndev_err(adapter->netdev, "Cannot do PHY loopback test " - "when SoL/IDER is active.\n"); + e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); *data = 0; goto out; } @@ -1612,7 +1604,7 @@ static void e1000_diag_test(struct net_device *netdev, forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; autoneg = adapter->hw.mac.autoneg; - ndev_info(netdev, "offline testing starting\n"); + e_info("offline testing starting\n"); /* * Link test performed before hardware reset so autoneg doesn't @@ -1658,7 +1650,7 @@ static void e1000_diag_test(struct net_device *netdev, if (if_running) dev_open(netdev); } else { - ndev_info(netdev, "online testing starting\n"); + e_info("online testing starting\n"); /* Online tests */ if (e1000_link_test(adapter, &data[4])) eth_test->flags |= ETH_TEST_FL_FAILED; @@ -1694,8 +1686,8 @@ static void e1000_get_wol(struct net_device *netdev, wol->supported &= ~WAKE_UCAST; if (adapter->wol & E1000_WUFC_EX) - ndev_err(netdev, "Interface does not support " - "directed (unicast) frame wake-up packets\n"); + e_err("Interface does not support directed (unicast) " + "frame wake-up packets\n"); } if (adapter->wol & E1000_WUFC_EX) diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index d136778..05b0b2f 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -484,8 +484,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, * packet, also make sure the frame isn't just CRC only */ if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { /* All receives must fit into a single buffer */ - ndev_dbg(netdev, "%s: Receive packet consumed " - "multiple buffers\n", netdev->name); + e_dbg("%s: Receive packet consumed multiple buffers\n", + netdev->name); /* recycle */ buffer_info->skb = skb; goto next_desc; @@ -576,28 +576,26 @@ static void e1000_print_tx_hang(struct e1000_adapter *adapter) unsigned int i = tx_ring->next_to_clean; unsigned int eop = tx_ring->buffer_info[i].next_to_watch; struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); - struct net_device *netdev = adapter->netdev; /* detected Tx unit hang */ - ndev_err(netdev, - "Detected Tx Unit Hang:\n" - " TDH <%x>\n" - " TDT <%x>\n" - " next_to_use <%x>\n" - " next_to_clean <%x>\n" - "buffer_info[next_to_clean]:\n" - " time_stamp <%lx>\n" - " next_to_watch <%x>\n" - " jiffies <%lx>\n" - " next_to_watch.status <%x>\n", - readl(adapter->hw.hw_addr + tx_ring->head), - readl(adapter->hw.hw_addr + tx_ring->tail), - tx_ring->next_to_use, - tx_ring->next_to_clean, - tx_ring->buffer_info[eop].time_stamp, - eop, - jiffies, - eop_desc->upper.fields.status); + e_err("Detected Tx Unit Hang:\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]:\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", + readl(adapter->hw.hw_addr + tx_ring->head), + readl(adapter->hw.hw_addr + tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->buffer_info[eop].time_stamp, + eop, + jiffies, + eop_desc->upper.fields.status); } /** @@ -747,8 +745,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, buffer_info->dma = 0; if (!(staterr & E1000_RXD_STAT_EOP)) { - ndev_dbg(netdev, "%s: Packet Split buffers didn't pick " - "up the full packet\n", netdev->name); + e_dbg("%s: Packet Split buffers didn't pick up the " + "full packet\n", netdev->name); dev_kfree_skb_irq(skb); goto next_desc; } @@ -761,8 +759,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, length = le16_to_cpu(rx_desc->wb.middle.length0); if (!length) { - ndev_dbg(netdev, "%s: Last part of the packet spanning" - " multiple descriptors\n", netdev->name); + e_dbg("%s: Last part of the packet spanning multiple " + "descriptors\n", netdev->name); dev_kfree_skb_irq(skb); goto next_desc; } @@ -1011,7 +1009,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, /* eth type trans needs skb->data to point to something */ if (!pskb_may_pull(skb, ETH_HLEN)) { - ndev_err(netdev, "pskb_may_pull failed.\n"); + e_err("pskb_may_pull failed.\n"); dev_kfree_skb(skb); goto next_desc; } @@ -1251,10 +1249,8 @@ static int e1000_request_irq(struct e1000_adapter *adapter) err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, netdev); if (err) { - ndev_err(netdev, - "Unable to allocate %s interrupt (return: %d)\n", - adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx", - err); + e_err("Unable to allocate %s interrupt (return: %d)\n", + adapter->flags & FLAG_MSI_ENABLED ? "MSI":"INTx", err); if (adapter->flags & FLAG_MSI_ENABLED) pci_disable_msi(adapter->pdev); } @@ -1395,8 +1391,7 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter) return 0; err: vfree(tx_ring->buffer_info); - ndev_err(adapter->netdev, - "Unable to allocate memory for the transmit descriptor ring\n"); + e_err("Unable to allocate memory for the transmit descriptor ring\n"); return err; } @@ -1450,8 +1445,7 @@ err_pages: } err: vfree(rx_ring->buffer_info); - ndev_err(adapter->netdev, - "Unable to allocate memory for the transmit descriptor ring\n"); + e_err("Unable to allocate memory for the transmit descriptor ring\n"); return err; } @@ -2450,13 +2444,13 @@ void e1000e_reset(struct e1000_adapter *adapter) * For parts with AMT enabled, let the firmware know * that the network interface is in control */ - if ((adapter->flags & FLAG_HAS_AMT) && e1000e_check_mng_mode(hw)) + if (adapter->flags & FLAG_HAS_AMT) e1000_get_hw_control(adapter); ew32(WUC, 0); if (mac->ops.init_hw(hw)) - ndev_err(adapter->netdev, "Hardware Error\n"); + e_err("Hardware Error\n"); e1000_update_mng_vlan(adapter); @@ -2591,7 +2585,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter) return 0; err: - ndev_err(netdev, "Unable to allocate memory for queues\n"); + e_err("Unable to allocate memory for queues\n"); kfree(adapter->rx_ring); kfree(adapter->tx_ring); return -ENOMEM; @@ -2640,8 +2634,7 @@ static int e1000_open(struct net_device *netdev) * If AMT is enabled, let the firmware know that the network * interface is now open */ - if ((adapter->flags & FLAG_HAS_AMT) && - e1000e_check_mng_mode(&adapter->hw)) + if (adapter->flags & FLAG_HAS_AMT) e1000_get_hw_control(adapter); /* @@ -2719,8 +2712,7 @@ static int e1000_close(struct net_device *netdev) * If AMT is enabled, let the firmware know that the network * interface is now closed */ - if ((adapter->flags & FLAG_HAS_AMT) && - e1000e_check_mng_mode(&adapter->hw)) + if (adapter->flags & FLAG_HAS_AMT) e1000_release_hw_control(adapter); return 0; @@ -2917,8 +2909,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000); ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus); if (ret_val) - ndev_warn(adapter->netdev, - "Error reading PHY register\n"); + e_warn("Error reading PHY register\n"); } else { /* * Do not read PHY registers if link is not up @@ -2943,18 +2934,16 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter) static void e1000_print_link_info(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; u32 ctrl = er32(CTRL); - ndev_info(netdev, - "Link is Up %d Mbps %s, Flow Control: %s\n", - adapter->link_speed, - (adapter->link_duplex == FULL_DUPLEX) ? - "Full Duplex" : "Half Duplex", - ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? - "RX/TX" : - ((ctrl & E1000_CTRL_RFCE) ? "RX" : - ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); + e_info("Link is Up %d Mbps %s, Flow Control: %s\n", + adapter->link_speed, + (adapter->link_duplex == FULL_DUPLEX) ? + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ? + "RX/TX" : + ((ctrl & E1000_CTRL_RFCE) ? "RX" : + ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); } static bool e1000_has_link(struct e1000_adapter *adapter) @@ -2994,8 +2983,7 @@ static bool e1000_has_link(struct e1000_adapter *adapter) if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ - ndev_info(adapter->netdev, - "Gigabit has been disabled, downgrading speed\n"); + e_info("Gigabit has been disabled, downgrading speed\n"); } return link_active; @@ -3096,8 +3084,7 @@ static void e1000_watchdog_task(struct work_struct *work) switch (adapter->link_speed) { case SPEED_10: case SPEED_100: - ndev_info(netdev, - "10/100 speed: disabling TSO\n"); + e_info("10/100 speed: disabling TSO\n"); netdev->features &= ~NETIF_F_TSO; netdev->features &= ~NETIF_F_TSO6; break; @@ -3130,7 +3117,7 @@ static void e1000_watchdog_task(struct work_struct *work) if (netif_carrier_ok(netdev)) { adapter->link_speed = 0; adapter->link_duplex = 0; - ndev_info(netdev, "Link is Down\n"); + e_info("Link is Down\n"); netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); if (!test_bit(__E1000_DOWN, &adapter->state)) @@ -3604,8 +3591,7 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) pull_size = min((unsigned int)4, skb->data_len); if (!__pskb_pull_tail(skb, pull_size)) { - ndev_err(netdev, - "__pskb_pull_tail failed.\n"); + e_err("__pskb_pull_tail failed.\n"); dev_kfree_skb_any(skb); return NETDEV_TX_OK; } @@ -3737,25 +3723,25 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { - ndev_err(netdev, "Invalid MTU setting\n"); + e_err("Invalid MTU setting\n"); return -EINVAL; } /* Jumbo frame size limits */ if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) { if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { - ndev_err(netdev, "Jumbo Frames not supported.\n"); + e_err("Jumbo Frames not supported.\n"); return -EINVAL; } if (adapter->hw.phy.type == e1000_phy_ife) { - ndev_err(netdev, "Jumbo Frames not supported.\n"); + e_err("Jumbo Frames not supported.\n"); return -EINVAL; } } #define MAX_STD_JUMBO_FRAME_SIZE 9234 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { - ndev_err(netdev, "MTU > 9216 not supported.\n"); + e_err("MTU > 9216 not supported.\n"); return -EINVAL; } @@ -3792,8 +3778,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; - ndev_info(netdev, "changing MTU from %d to %d\n", - netdev->mtu, new_mtu); + e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); netdev->mtu = new_mtu; if (netif_running(netdev)) @@ -4006,10 +3991,7 @@ static int e1000_resume(struct pci_dev *pdev) pci_restore_state(pdev); e1000e_disable_l1aspm(pdev); - if (adapter->need_ioport) - err = pci_enable_device(pdev); - else - err = pci_enable_device_mem(pdev); + err = pci_enable_device_mem(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); @@ -4043,7 +4025,7 @@ static int e1000_resume(struct pci_dev *pdev) * is up. For all other cases, let the f/w know that the h/w is now * under the control of the driver. */ - if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw)) + if (!(adapter->flags & FLAG_HAS_AMT)) e1000_get_hw_control(adapter); return 0; @@ -4111,10 +4093,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) int err; e1000e_disable_l1aspm(pdev); - if (adapter->need_ioport) - err = pci_enable_device(pdev); - else - err = pci_enable_device_mem(pdev); + err = pci_enable_device_mem(pdev); if (err) { dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n"); @@ -4162,8 +4141,7 @@ static void e1000_io_resume(struct pci_dev *pdev) * is up. For all other cases, let the f/w know that the h/w is now * under the control of the driver. */ - if (!(adapter->flags & FLAG_HAS_AMT) || - !e1000e_check_mng_mode(&adapter->hw)) + if (!(adapter->flags & FLAG_HAS_AMT)) e1000_get_hw_control(adapter); } @@ -4175,36 +4153,40 @@ static void e1000_print_device_info(struct e1000_adapter *adapter) u32 pba_num; /* print bus type/speed/width info */ - ndev_info(netdev, "(PCI Express:2.5GB/s:%s) " - "%02x:%02x:%02x:%02x:%02x:%02x\n", - /* bus width */ - ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : - "Width x1"), - /* MAC address */ - netdev->dev_addr[0], netdev->dev_addr[1], - netdev->dev_addr[2], netdev->dev_addr[3], - netdev->dev_addr[4], netdev->dev_addr[5]); - ndev_info(netdev, "Intel(R) PRO/%s Network Connection\n", - (hw->phy.type == e1000_phy_ife) - ? "10/100" : "1000"); + e_info("(PCI Express:2.5GB/s:%s) %02x:%02x:%02x:%02x:%02x:%02x\n", + /* bus width */ + ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : + "Width x1"), + /* MAC address */ + netdev->dev_addr[0], netdev->dev_addr[1], + netdev->dev_addr[2], netdev->dev_addr[3], + netdev->dev_addr[4], netdev->dev_addr[5]); + e_info("Intel(R) PRO/%s Network Connection\n", + (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); e1000e_read_pba_num(hw, &pba_num); - ndev_info(netdev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", - hw->mac.type, hw->phy.type, - (pba_num >> 8), (pba_num & 0xff)); + e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n", + hw->mac.type, hw->phy.type, (pba_num >> 8), (pba_num & 0xff)); } -/** - * e1000e_is_need_ioport - determine if an adapter needs ioport resources or not - * @pdev: PCI device information struct - * - * Returns true if an adapters needs ioport resources - **/ -static int e1000e_is_need_ioport(struct pci_dev *pdev) +static void e1000_eeprom_checks(struct e1000_adapter *adapter) { - switch (pdev->device) { - /* Currently there are no adapters that need ioport resources */ - default: - return false; + struct e1000_hw *hw = &adapter->hw; + int ret_val; + u16 buf = 0; + + if (hw->mac.type != e1000_82573) + return; + + ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); + if (!(le16_to_cpu(buf) & (1 << 0))) { + /* Deep Smart Power Down (DSPD) */ + e_warn("Warning: detected DSPD enabled in EEPROM\n"); + } + + ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf); + if (le16_to_cpu(buf) & (3 << 2)) { + /* ASPM enable */ + e_warn("Warning: detected ASPM enabled in EEPROM\n"); } } @@ -4233,19 +4215,10 @@ static int __devinit e1000_probe(struct pci_dev *pdev, int i, err, pci_using_dac; u16 eeprom_data = 0; u16 eeprom_apme_mask = E1000_EEPROM_APME; - int bars, need_ioport; e1000e_disable_l1aspm(pdev); - /* do not allocate ioport bars when not needed */ - need_ioport = e1000e_is_need_ioport(pdev); - if (need_ioport) { - bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); - err = pci_enable_device(pdev); - } else { - bars = pci_select_bars(pdev, IORESOURCE_MEM); - err = pci_enable_device_mem(pdev); - } + err = pci_enable_device_mem(pdev); if (err) return err; @@ -4268,7 +4241,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev, } } - err = pci_request_selected_regions(pdev, bars, e1000e_driver_name); + err = pci_request_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM), + e1000e_driver_name); if (err) goto err_pci_reg; @@ -4293,8 +4268,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev, adapter->hw.adapter = adapter; adapter->hw.mac.type = ei->mac; adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; - adapter->bars = bars; - adapter->need_ioport = need_ioport; mmio_start = pci_resource_start(pdev, 0); mmio_len = pci_resource_len(pdev, 0); @@ -4366,8 +4339,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, } if (e1000_check_reset_block(&adapter->hw)) - ndev_info(netdev, - "PHY reset is blocked due to SOL/IDER session.\n"); + e_info("PHY reset is blocked due to SOL/IDER session.\n"); netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | @@ -4411,25 +4383,26 @@ static int __devinit e1000_probe(struct pci_dev *pdev, if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) break; if (i == 2) { - ndev_err(netdev, "The NVM Checksum Is Not Valid\n"); + e_err("The NVM Checksum Is Not Valid\n"); err = -EIO; goto err_eeprom; } } + e1000_eeprom_checks(adapter); + /* copy the MAC address out of the NVM */ if (e1000e_read_mac_addr(&adapter->hw)) - ndev_err(netdev, "NVM Read Error while reading MAC address\n"); + e_err("NVM Read Error while reading MAC address\n"); memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); if (!is_valid_ether_addr(netdev->perm_addr)) { - ndev_err(netdev, "Invalid MAC Address: " - "%02x:%02x:%02x:%02x:%02x:%02x\n", - netdev->perm_addr[0], netdev->perm_addr[1], - netdev->perm_addr[2], netdev->perm_addr[3], - netdev->perm_addr[4], netdev->perm_addr[5]); + e_err("Invalid MAC Address: %02x:%02x:%02x:%02x:%02x:%02x\n", + netdev->perm_addr[0], netdev->perm_addr[1], + netdev->perm_addr[2], netdev->perm_addr[3], + netdev->perm_addr[4], netdev->perm_addr[5]); err = -EIO; goto err_eeprom; } @@ -4499,8 +4472,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev, * is up. For all other cases, let the f/w know that the h/w is now * under the control of the driver. */ - if (!(adapter->flags & FLAG_HAS_AMT) || - !e1000e_check_mng_mode(&adapter->hw)) + if (!(adapter->flags & FLAG_HAS_AMT)) e1000_get_hw_control(adapter); /* tell the stack to leave us alone until e1000_open() is called */ @@ -4517,24 +4489,25 @@ static int __devinit e1000_probe(struct pci_dev *pdev, return 0; err_register: -err_hw_init: - e1000_release_hw_control(adapter); + if (!(adapter->flags & FLAG_HAS_AMT)) + e1000_release_hw_control(adapter); err_eeprom: if (!e1000_check_reset_block(&adapter->hw)) e1000_phy_hw_reset(&adapter->hw); +err_hw_init: - if (adapter->hw.flash_address) - iounmap(adapter->hw.flash_address); - -err_flashmap: kfree(adapter->tx_ring); kfree(adapter->rx_ring); err_sw_init: + if (adapter->hw.flash_address) + iounmap(adapter->hw.flash_address); +err_flashmap: iounmap(adapter->hw.hw_addr); err_ioremap: free_netdev(netdev); err_alloc_etherdev: - pci_release_selected_regions(pdev, bars); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); err_pci_reg: err_dma: pci_disable_device(pdev); @@ -4582,7 +4555,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev) iounmap(adapter->hw.hw_addr); if (adapter->hw.flash_address) iounmap(adapter->hw.flash_address); - pci_release_selected_regions(pdev, adapter->bars); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); free_netdev(netdev); diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c index a66b92e..8effc31 100644 --- a/drivers/net/e1000e/param.c +++ b/drivers/net/e1000e/param.c @@ -27,6 +27,7 @@ *******************************************************************************/ #include <linux/netdevice.h> +#include <linux/pci.h> #include "e1000.h" @@ -162,17 +163,16 @@ static int __devinit e1000_validate_option(unsigned int *value, case enable_option: switch (*value) { case OPTION_ENABLED: - ndev_info(adapter->netdev, "%s Enabled\n", opt->name); + e_info("%s Enabled\n", opt->name); return 0; case OPTION_DISABLED: - ndev_info(adapter->netdev, "%s Disabled\n", opt->name); + e_info("%s Disabled\n", opt->name); return 0; } break; case range_option: if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { - ndev_info(adapter->netdev, - "%s set to %i\n", opt->name, *value); + e_info("%s set to %i\n", opt->name, *value); return 0; } break; @@ -184,8 +184,7 @@ static int __devinit e1000_validate_option(unsigned int *value, ent = &opt->arg.l.p[i]; if (*value == ent->i) { if (ent->str[0] != '\0') - ndev_info(adapter->netdev, "%s\n", - ent->str); + e_info("%s\n", ent->str); return 0; } } @@ -195,8 +194,8 @@ static int __devinit e1000_validate_option(unsigned int *value, BUG(); } - ndev_info(adapter->netdev, "Invalid %s value specified (%i) %s\n", - opt->name, *value, opt->err); + e_info("Invalid %s value specified (%i) %s\n", opt->name, *value, + opt->err); *value = opt->def; return -1; } @@ -213,13 +212,11 @@ static int __devinit e1000_validate_option(unsigned int *value, void __devinit e1000e_check_options(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; - struct net_device *netdev = adapter->netdev; int bd = adapter->bd_number; if (bd >= E1000_MAX_NIC) { - ndev_notice(netdev, - "Warning: no configuration for board #%i\n", bd); - ndev_notice(netdev, "Using defaults for all values\n"); + e_notice("Warning: no configuration for board #%i\n", bd); + e_notice("Using defaults for all values\n"); } { /* Transmit Interrupt Delay */ @@ -313,19 +310,15 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) adapter->itr = InterruptThrottleRate[bd]; switch (adapter->itr) { case 0: - ndev_info(netdev, "%s turned off\n", - opt.name); + e_info("%s turned off\n", opt.name); break; case 1: - ndev_info(netdev, - "%s set to dynamic mode\n", - opt.name); + e_info("%s set to dynamic mode\n", opt.name); adapter->itr_setting = adapter->itr; adapter->itr = 20000; break; case 3: - ndev_info(netdev, - "%s set to dynamic conservative mode\n", + e_info("%s set to dynamic conservative mode\n", opt.name); adapter->itr_setting = adapter->itr; adapter->itr = 20000; diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c index 56f5049..1f11350 100644 --- a/drivers/net/eepro.c +++ b/drivers/net/eepro.c @@ -1283,14 +1283,6 @@ set_multicast_list(struct net_device *dev) if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63) { - /* - * We must make the kernel realise we had to move - * into promisc mode or we start all out war on - * the cable. If it was a promisc request the - * flag is already set. If not we assert it. - */ - dev->flags|=IFF_PROMISC; - eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */ mode = inb(ioaddr + REG2); outb(mode | PRMSC_Mode, ioaddr + REG2); diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c index e3dd8b1..bee8b3f 100644 --- a/drivers/net/eth16i.c +++ b/drivers/net/eth16i.c @@ -1356,7 +1356,6 @@ static void eth16i_multicast(struct net_device *dev) if(dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC)) { - dev->flags|=IFF_PROMISC; /* Must do this */ outb(3, ioaddr + RECEIVE_MODE_REG); } else { outb(2, ioaddr + RECEIVE_MODE_REG); diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 01b38b0..053971e 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -77,26 +77,27 @@ * Hardware access: */ -#define DEV_NEED_TIMERIRQ 0x00001 /* set the timer irq flag in the irq mask */ -#define DEV_NEED_LINKTIMER 0x00002 /* poll link settings. Relies on the timer irq */ -#define DEV_HAS_LARGEDESC 0x00004 /* device supports jumbo frames and needs packet format 2 */ -#define DEV_HAS_HIGH_DMA 0x00008 /* device supports 64bit dma */ -#define DEV_HAS_CHECKSUM 0x00010 /* device supports tx and rx checksum offloads */ -#define DEV_HAS_VLAN 0x00020 /* device supports vlan tagging and striping */ -#define DEV_HAS_MSI 0x00040 /* device supports MSI */ -#define DEV_HAS_MSI_X 0x00080 /* device supports MSI-X */ -#define DEV_HAS_POWER_CNTRL 0x00100 /* device supports power savings */ -#define DEV_HAS_STATISTICS_V1 0x00200 /* device supports hw statistics version 1 */ -#define DEV_HAS_STATISTICS_V2 0x00400 /* device supports hw statistics version 2 */ -#define DEV_HAS_TEST_EXTENDED 0x00800 /* device supports extended diagnostic test */ -#define DEV_HAS_MGMT_UNIT 0x01000 /* device supports management unit */ -#define DEV_HAS_CORRECT_MACADDR 0x02000 /* device supports correct mac address order */ -#define DEV_HAS_COLLISION_FIX 0x04000 /* device supports tx collision fix */ -#define DEV_HAS_PAUSEFRAME_TX_V1 0x08000 /* device supports tx pause frames version 1 */ -#define DEV_HAS_PAUSEFRAME_TX_V2 0x10000 /* device supports tx pause frames version 2 */ -#define DEV_HAS_PAUSEFRAME_TX_V3 0x20000 /* device supports tx pause frames version 3 */ -#define DEV_NEED_TX_LIMIT 0x40000 /* device needs to limit tx */ -#define DEV_HAS_GEAR_MODE 0x80000 /* device supports gear mode */ +#define DEV_NEED_TIMERIRQ 0x000001 /* set the timer irq flag in the irq mask */ +#define DEV_NEED_LINKTIMER 0x000002 /* poll link settings. Relies on the timer irq */ +#define DEV_HAS_LARGEDESC 0x000004 /* device supports jumbo frames and needs packet format 2 */ +#define DEV_HAS_HIGH_DMA 0x000008 /* device supports 64bit dma */ +#define DEV_HAS_CHECKSUM 0x000010 /* device supports tx and rx checksum offloads */ +#define DEV_HAS_VLAN 0x000020 /* device supports vlan tagging and striping */ +#define DEV_HAS_MSI 0x000040 /* device supports MSI */ +#define DEV_HAS_MSI_X 0x000080 /* device supports MSI-X */ +#define DEV_HAS_POWER_CNTRL 0x000100 /* device supports power savings */ +#define DEV_HAS_STATISTICS_V1 0x000200 /* device supports hw statistics version 1 */ +#define DEV_HAS_STATISTICS_V2 0x000400 /* device supports hw statistics version 2 */ +#define DEV_HAS_STATISTICS_V3 0x000800 /* device supports hw statistics version 3 */ +#define DEV_HAS_TEST_EXTENDED 0x001000 /* device supports extended diagnostic test */ +#define DEV_HAS_MGMT_UNIT 0x002000 /* device supports management unit */ +#define DEV_HAS_CORRECT_MACADDR 0x004000 /* device supports correct mac address order */ +#define DEV_HAS_COLLISION_FIX 0x008000 /* device supports tx collision fix */ +#define DEV_HAS_PAUSEFRAME_TX_V1 0x010000 /* device supports tx pause frames version 1 */ +#define DEV_HAS_PAUSEFRAME_TX_V2 0x020000 /* device supports tx pause frames version 2 */ +#define DEV_HAS_PAUSEFRAME_TX_V3 0x040000 /* device supports tx pause frames version 3 */ +#define DEV_NEED_TX_LIMIT 0x080000 /* device needs to limit tx */ +#define DEV_HAS_GEAR_MODE 0x100000 /* device supports gear mode */ enum { NvRegIrqStatus = 0x000, @@ -248,6 +249,8 @@ enum { #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010 #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0 #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880 + NvRegTxPauseFrameLimit = 0x174, +#define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000 NvRegMIIStatus = 0x180, #define NVREG_MIISTAT_ERROR 0x0001 #define NVREG_MIISTAT_LINKCHANGE 0x0008 @@ -270,6 +273,9 @@ enum { #define NVREG_MIICTL_WRITE 0x00400 #define NVREG_MIICTL_ADDRSHIFT 5 NvRegMIIData = 0x194, + NvRegTxUnicast = 0x1a0, + NvRegTxMulticast = 0x1a4, + NvRegTxBroadcast = 0x1a8, NvRegWakeUpFlags = 0x200, #define NVREG_WAKEUPFLAGS_VAL 0x7770 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24 @@ -402,6 +408,7 @@ union ring_type { #define NV_RX_FRAMINGERR (1<<29) #define NV_RX_ERROR (1<<30) #define NV_RX_AVAIL (1<<31) +#define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR) #define NV_RX2_CHECKSUMMASK (0x1C000000) #define NV_RX2_CHECKSUM_IP (0x10000000) @@ -419,6 +426,7 @@ union ring_type { /* error and avail are the same for both */ #define NV_RX2_ERROR (1<<30) #define NV_RX2_AVAIL (1<<31) +#define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR) #define NV_RX3_VLAN_TAG_PRESENT (1<<16) #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) @@ -616,7 +624,12 @@ static const struct nv_ethtool_str nv_estats_str[] = { { "rx_bytes" }, { "tx_pause" }, { "rx_pause" }, - { "rx_drop_frame" } + { "rx_drop_frame" }, + + /* version 3 stats */ + { "tx_unicast" }, + { "tx_multicast" }, + { "tx_broadcast" } }; struct nv_ethtool_stats { @@ -652,9 +665,15 @@ struct nv_ethtool_stats { u64 tx_pause; u64 rx_pause; u64 rx_drop_frame; + + /* version 3 stats */ + u64 tx_unicast; + u64 tx_multicast; + u64 tx_broadcast; }; -#define NV_DEV_STATISTICS_V2_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64)) +#define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64)) +#define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3) #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6) /* diagnostics */ @@ -1628,6 +1647,12 @@ static void nv_get_hw_stats(struct net_device *dev) np->estats.rx_pause += readl(base + NvRegRxPause); np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame); } + + if (np->driver_data & DEV_HAS_STATISTICS_V3) { + np->estats.tx_unicast += readl(base + NvRegTxUnicast); + np->estats.tx_multicast += readl(base + NvRegTxMulticast); + np->estats.tx_broadcast += readl(base + NvRegTxBroadcast); + } } /* @@ -1641,7 +1666,7 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev) struct fe_priv *np = netdev_priv(dev); /* If the nic supports hw counters then retrieve latest values */ - if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) { + if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) { nv_get_hw_stats(dev); /* copy to net_device stats */ @@ -2632,7 +2657,7 @@ static int nv_rx_process(struct net_device *dev, int limit) if (likely(flags & NV_RX_DESCRIPTORVALID)) { len = flags & LEN_MASK_V1; if (unlikely(flags & NV_RX_ERROR)) { - if (flags & NV_RX_ERROR4) { + if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) { len = nv_getlen(dev, skb->data, len); if (len < 0) { dev->stats.rx_errors++; @@ -2641,7 +2666,7 @@ static int nv_rx_process(struct net_device *dev, int limit) } } /* framing errors are soft errors */ - else if (flags & NV_RX_FRAMINGERR) { + else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) { if (flags & NV_RX_SUBSTRACT1) { len--; } @@ -2667,7 +2692,7 @@ static int nv_rx_process(struct net_device *dev, int limit) if (likely(flags & NV_RX2_DESCRIPTORVALID)) { len = flags & LEN_MASK_V2; if (unlikely(flags & NV_RX2_ERROR)) { - if (flags & NV_RX2_ERROR4) { + if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { len = nv_getlen(dev, skb->data, len); if (len < 0) { dev->stats.rx_errors++; @@ -2676,7 +2701,7 @@ static int nv_rx_process(struct net_device *dev, int limit) } } /* framing errors are soft errors */ - else if (flags & NV_RX2_FRAMINGERR) { + else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { if (flags & NV_RX2_SUBSTRACT1) { len--; } @@ -2766,7 +2791,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) if (likely(flags & NV_RX2_DESCRIPTORVALID)) { len = flags & LEN_MASK_V2; if (unlikely(flags & NV_RX2_ERROR)) { - if (flags & NV_RX2_ERROR4) { + if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) { len = nv_getlen(dev, skb->data, len); if (len < 0) { dev_kfree_skb(skb); @@ -2774,7 +2799,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) } } /* framing errors are soft errors */ - else if (flags & NV_RX2_FRAMINGERR) { + else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { if (flags & NV_RX2_SUBSTRACT1) { len--; } @@ -3053,8 +3078,11 @@ static void nv_update_pause(struct net_device *dev, u32 pause_flags) u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1; if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2; - if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) + if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) { pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3; + /* limit the number of tx pause frames to a default of 8 */ + writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit); + } writel(pause_enable, base + NvRegTxPauseFrame); writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; @@ -4740,6 +4768,8 @@ static int nv_get_sset_count(struct net_device *dev, int sset) return NV_DEV_STATISTICS_V1_COUNT; else if (np->driver_data & DEV_HAS_STATISTICS_V2) return NV_DEV_STATISTICS_V2_COUNT; + else if (np->driver_data & DEV_HAS_STATISTICS_V3) + return NV_DEV_STATISTICS_V3_COUNT; else return 0; default: @@ -5324,7 +5354,7 @@ static int nv_open(struct net_device *dev) mod_timer(&np->oom_kick, jiffies + OOM_REFILL); /* start statistics timer */ - if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) + if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) mod_timer(&np->stats_poll, round_jiffies(jiffies + STATS_INTERVAL)); @@ -5428,7 +5458,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i if (err < 0) goto out_disable; - if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2)) + if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) np->register_size = NV_PCI_REGSZ_VER3; else if (id->driver_data & DEV_HAS_STATISTICS_V1) np->register_size = NV_PCI_REGSZ_VER2; @@ -6083,35 +6113,35 @@ static struct pci_device_id pci_tbl[] = { }, { /* MCP77 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, }, { /* MCP77 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, }, { /* MCP77 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, }, { /* MCP77 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, }, { /* MCP79 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, }, { /* MCP79 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, }, { /* MCP79 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, }, { /* MCP79 Ethernet Controller */ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, }, {0,}, }; diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c index 0a97fc2..1c7ef81 100644 --- a/drivers/net/fs_enet/mac-fcc.c +++ b/drivers/net/fs_enet/mac-fcc.c @@ -126,7 +126,7 @@ out: #define FCC_NAPI_RX_EVENT_MSK (FCC_ENET_RXF | FCC_ENET_RXB) #define FCC_RX_EVENT (FCC_ENET_RXF) #define FCC_TX_EVENT (FCC_ENET_TXB) -#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE | FCC_ENET_BSY) +#define FCC_ERR_EVENT_MSK (FCC_ENET_TXE) static int setup_data(struct net_device *dev) { diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index b8394cf..ca6cf6e 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -414,9 +414,7 @@ static int gfar_suspend(struct platform_device *pdev, pm_message_t state) spin_unlock(&priv->rxlock); spin_unlock_irqrestore(&priv->txlock, flags); -#ifdef CONFIG_GFAR_NAPI napi_disable(&priv->napi); -#endif if (magic_packet) { /* Enable interrupt on Magic Packet */ @@ -469,9 +467,7 @@ static int gfar_resume(struct platform_device *pdev) netif_device_attach(dev); -#ifdef CONFIG_GFAR_NAPI napi_enable(&priv->napi); -#endif return 0; } diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 3249df5..b8e25c4 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -548,7 +548,7 @@ static int ax_xmit(struct sk_buff *skb, struct net_device *dev) } printk(KERN_ERR "mkiss: %s: transmit timed out, %s?\n", dev->name, - (ax->tty->ops->chars_in_buffer(ax->tty) || ax->xleft) ? + (tty_chars_in_buffer(ax->tty) || ax->xleft) ? "bad line quality" : "driver error"); ax->xleft = 0; diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c index e098f23..bb823ac 100644 --- a/drivers/net/igb/e1000_82575.c +++ b/drivers/net/igb/e1000_82575.c @@ -850,7 +850,7 @@ void igb_update_mc_addr_list_82575(struct e1000_hw *hw, for (; mc_addr_count > 0; mc_addr_count--) { hash_value = igb_hash_mc_addr(hw, mc_addr_list); hw_dbg("Hash value = 0x%03X\n", hash_value); - hw->mac.ops.mta_set(hw, hash_value); + igb_mta_set(hw, hash_value); mc_addr_list += ETH_ALEN; } } @@ -1136,6 +1136,12 @@ static s32 igb_setup_fiber_serdes_link_82575(struct e1000_hw *hw) E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ hw_dbg("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg); } + + if (hw->mac.type == e1000_82576) { + reg |= E1000_PCS_LCTL_FORCE_FCTRL; + igb_force_mac_fc(hw); + } + wr32(E1000_PCS_LCTL, reg); return 0; @@ -1232,70 +1238,6 @@ out: } /** - * igb_translate_register_82576 - Translate the proper register offset - * @reg: e1000 register to be read - * - * Registers in 82576 are located in different offsets than other adapters - * even though they function in the same manner. This function takes in - * the name of the register to read and returns the correct offset for - * 82576 silicon. - **/ -u32 igb_translate_register_82576(u32 reg) -{ - /* - * Some of the Kawela registers are located at different - * offsets than they are in older adapters. - * Despite the difference in location, the registers - * function in the same manner. - */ - switch (reg) { - case E1000_TDBAL(0): - reg = 0x0E000; - break; - case E1000_TDBAH(0): - reg = 0x0E004; - break; - case E1000_TDLEN(0): - reg = 0x0E008; - break; - case E1000_TDH(0): - reg = 0x0E010; - break; - case E1000_TDT(0): - reg = 0x0E018; - break; - case E1000_TXDCTL(0): - reg = 0x0E028; - break; - case E1000_RDBAL(0): - reg = 0x0C000; - break; - case E1000_RDBAH(0): - reg = 0x0C004; - break; - case E1000_RDLEN(0): - reg = 0x0C008; - break; - case E1000_RDH(0): - reg = 0x0C010; - break; - case E1000_RDT(0): - reg = 0x0C018; - break; - case E1000_RXDCTL(0): - reg = 0x0C028; - break; - case E1000_SRRCTL(0): - reg = 0x0C00C; - break; - default: - break; - } - - return reg; -} - -/** * igb_reset_init_script_82575 - Inits HW defaults after reset * @hw: pointer to the HW structure * diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h index 2f848e5..c1928b5 100644 --- a/drivers/net/igb/e1000_82575.h +++ b/drivers/net/igb/e1000_82575.h @@ -28,7 +28,6 @@ #ifndef _E1000_82575_H_ #define _E1000_82575_H_ -u32 igb_translate_register_82576(u32 reg); void igb_update_mc_addr_list_82575(struct e1000_hw*, u8*, u32, u32, u32); extern void igb_shutdown_fiber_serdes_link_82575(struct e1000_hw *hw); extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw); diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h index afdba3c..ce70068 100644 --- a/drivers/net/igb/e1000_defines.h +++ b/drivers/net/igb/e1000_defines.h @@ -257,6 +257,7 @@ #define E1000_PCS_LCTL_FDV_FULL 8 #define E1000_PCS_LCTL_FSD 0x10 #define E1000_PCS_LCTL_FORCE_LINK 0x20 +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 #define E1000_PCS_LCTL_AN_ENABLE 0x10000 #define E1000_PCS_LCTL_AN_RESTART 0x20000 #define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h index 19fa4ee..a65ccc3 100644 --- a/drivers/net/igb/e1000_hw.h +++ b/drivers/net/igb/e1000_hw.h @@ -420,7 +420,6 @@ struct e1000_mac_operations { void (*rar_set)(struct e1000_hw *, u8 *, u32); s32 (*read_mac_addr)(struct e1000_hw *); s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *); - void (*mta_set)(struct e1000_hw *, u32); }; struct e1000_phy_operations { diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c index 20408aa..e18747c 100644 --- a/drivers/net/igb/e1000_mac.c +++ b/drivers/net/igb/e1000_mac.c @@ -144,34 +144,6 @@ void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) } /** - * igb_init_rx_addrs - Initialize receive address's - * @hw: pointer to the HW structure - * @rar_count: receive address registers - * - * Setups the receive address registers by setting the base receive address - * register to the devices MAC address and clearing all the other receive - * address registers to 0. - **/ -void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) -{ - u32 i; - - /* Setup the receive address */ - hw_dbg("Programming MAC Address into RAR[0]\n"); - - hw->mac.ops.rar_set(hw, hw->mac.addr, 0); - - /* Zero out the other (rar_entry_count - 1) receive addresses */ - hw_dbg("Clearing RAR[1-%u]\n", rar_count-1); - for (i = 1; i < rar_count; i++) { - array_wr32(E1000_RA, (i << 1), 0); - wrfl(); - array_wr32(E1000_RA, ((i << 1) + 1), 0); - wrfl(); - } -} - -/** * igb_check_alt_mac_addr - Check for alternate MAC addr * @hw: pointer to the HW structure * @@ -271,7 +243,7 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) * current value is read, the new bit is OR'd in and the new value is * written back into the register. **/ -static void igb_mta_set(struct e1000_hw *hw, u32 hash_value) +void igb_mta_set(struct e1000_hw *hw, u32 hash_value) { u32 hash_bit, hash_reg, mta; @@ -297,60 +269,6 @@ static void igb_mta_set(struct e1000_hw *hw, u32 hash_value) } /** - * igb_update_mc_addr_list - Update Multicast addresses - * @hw: pointer to the HW structure - * @mc_addr_list: array of multicast addresses to program - * @mc_addr_count: number of multicast addresses to program - * @rar_used_count: the first RAR register free to program - * @rar_count: total number of supported Receive Address Registers - * - * Updates the Receive Address Registers and Multicast Table Array. - * The caller must have a packed mc_addr_list of multicast addresses. - * The parameter rar_count will usually be hw->mac.rar_entry_count - * unless there are workarounds that change this. - **/ -void igb_update_mc_addr_list(struct e1000_hw *hw, - u8 *mc_addr_list, u32 mc_addr_count, - u32 rar_used_count, u32 rar_count) -{ - u32 hash_value; - u32 i; - - /* - * Load the first set of multicast addresses into the exact - * filters (RAR). If there are not enough to fill the RAR - * array, clear the filters. - */ - for (i = rar_used_count; i < rar_count; i++) { - if (mc_addr_count) { - hw->mac.ops.rar_set(hw, mc_addr_list, i); - mc_addr_count--; - mc_addr_list += ETH_ALEN; - } else { - array_wr32(E1000_RA, i << 1, 0); - wrfl(); - array_wr32(E1000_RA, (i << 1) + 1, 0); - wrfl(); - } - } - - /* Clear the old settings from the MTA */ - hw_dbg("Clearing MTA\n"); - for (i = 0; i < hw->mac.mta_reg_count; i++) { - array_wr32(E1000_MTA, i, 0); - wrfl(); - } - - /* Load any remaining multicast addresses into the hash table. */ - for (; mc_addr_count > 0; mc_addr_count--) { - hash_value = igb_hash_mc_addr(hw, mc_addr_list); - hw_dbg("Hash value = 0x%03X\n", hash_value); - igb_mta_set(hw, hash_value); - mc_addr_list += ETH_ALEN; - } -} - -/** * igb_hash_mc_addr - Generate a multicast hash value * @hw: pointer to the HW structure * @mc_addr: pointer to a multicast address diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h index dc2f8cc..cbee6af 100644 --- a/drivers/net/igb/e1000_mac.h +++ b/drivers/net/igb/e1000_mac.h @@ -51,9 +51,6 @@ s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex); s32 igb_id_led_init(struct e1000_hw *hw); s32 igb_led_off(struct e1000_hw *hw); -void igb_update_mc_addr_list(struct e1000_hw *hw, - u8 *mc_addr_list, u32 mc_addr_count, - u32 rar_used_count, u32 rar_count); s32 igb_setup_link(struct e1000_hw *hw); s32 igb_validate_mdi_setting(struct e1000_hw *hw); s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, @@ -62,7 +59,7 @@ s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, void igb_clear_hw_cntrs_base(struct e1000_hw *hw); void igb_clear_vfta(struct e1000_hw *hw); void igb_config_collision_dist(struct e1000_hw *hw); -void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); +void igb_mta_set(struct e1000_hw *hw, u32 hash_value); void igb_put_hw_semaphore(struct e1000_hw *hw); void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); s32 igb_check_alt_mac_addr(struct e1000_hw *hw); diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h index b95093d..95523af 100644 --- a/drivers/net/igb/e1000_regs.h +++ b/drivers/net/igb/e1000_regs.h @@ -262,9 +262,6 @@ #define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) #define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ -#define E1000_REGISTER(a, reg) (((a)->mac.type < e1000_82576) \ - ? reg : e1000_translate_register_82576(reg)) - #define wr32(reg, value) (writel(value, hw->hw_addr + reg)) #define rd32(reg) (readl(hw->hw_addr + reg)) #define wrfl() ((void)rd32(E1000_STATUS)) diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index b602c4d..8f66e15 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -311,7 +311,7 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); break; case e1000_82576: - /* Kawela uses a table-based method for assigning vectors. + /* The 82576 uses a table-based method for assigning vectors. Each queue has a single entry in the table to which we write a vector number along with a "valid" bit. Sadly, the layout of the table is somewhat counterintuitive. */ @@ -720,28 +720,6 @@ static void igb_get_hw_control(struct igb_adapter *adapter) ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); } -static void igb_init_manageability(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - - if (adapter->en_mng_pt) { - u32 manc2h = rd32(E1000_MANC2H); - u32 manc = rd32(E1000_MANC); - - /* enable receiving management packets to the host */ - /* this will probably generate destination unreachable messages - * from the host OS, but the packets will be handled on SMBUS */ - manc |= E1000_MANC_EN_MNG2HOST; -#define E1000_MNG2HOST_PORT_623 (1 << 5) -#define E1000_MNG2HOST_PORT_664 (1 << 6) - manc2h |= E1000_MNG2HOST_PORT_623; - manc2h |= E1000_MNG2HOST_PORT_664; - wr32(E1000_MANC2H, manc2h); - - wr32(E1000_MANC, manc); - } -} - /** * igb_configure - configure the hardware for RX and TX * @adapter: private board structure @@ -755,7 +733,6 @@ static void igb_configure(struct igb_adapter *adapter) igb_set_multi(netdev); igb_restore_vlan(adapter); - igb_init_manageability(adapter); igb_configure_tx(adapter); igb_setup_rctl(adapter); @@ -1372,7 +1349,8 @@ static void __devexit igb_remove(struct pci_dev *pdev) unregister_netdev(netdev); - if (!igb_check_reset_block(&adapter->hw)) + if (adapter->hw.phy.ops.reset_phy && + !igb_check_reset_block(&adapter->hw)) adapter->hw.phy.ops.reset_phy(&adapter->hw); igb_remove_device(&adapter->hw); @@ -4523,8 +4501,6 @@ static void igb_io_resume(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct igb_adapter *adapter = netdev_priv(netdev); - igb_init_manageability(adapter); - if (netif_running(netdev)) { if (igb_up(adapter)) { dev_err(&pdev->dev, "igb_up failed after reset\n"); diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c index 591a7e4..83fa9d8 100644 --- a/drivers/net/lp486e.c +++ b/drivers/net/lp486e.c @@ -1272,8 +1272,6 @@ static void set_multicast_list(struct net_device *dev) { return; } if (dev->mc_count == 0 && !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) { - if (dev->flags & IFF_ALLMULTI) - dev->flags |= IFF_PROMISC; lp->i596_config[8] &= ~0x01; } else { lp->i596_config[8] |= 0x01; diff --git a/drivers/net/meth.c b/drivers/net/meth.c index 4cb364e..0a97c26 100644 --- a/drivers/net/meth.c +++ b/drivers/net/meth.c @@ -100,7 +100,7 @@ static inline void load_eaddr(struct net_device *dev) DPRINTK("Loading MAC Address: %s\n", print_mac(mac, dev->dev_addr)); macaddr = 0; for (i = 0; i < 6; i++) - macaddr |= dev->dev_addr[i] << ((5 - i) * 8); + macaddr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); mace->eth.mac_addr = macaddr; } diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 3ab0e52..f1de38f 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -3699,6 +3699,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_err(&pdev->dev, "Error %d setting DMA mask\n", status); goto abort_with_netdev; } + (void)pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), &mgp->cmd_bus, GFP_KERNEL); if (mgp->cmd == NULL) diff --git a/drivers/net/myri10ge/myri10ge_mcp.h b/drivers/net/myri10ge/myri10ge_mcp.h index fdbeeee..9937210 100644 --- a/drivers/net/myri10ge/myri10ge_mcp.h +++ b/drivers/net/myri10ge/myri10ge_mcp.h @@ -101,6 +101,8 @@ struct mcp_kreq_ether_recv { #define MXGEFW_ETH_SEND_3 0x2c0000 #define MXGEFW_ETH_RECV_SMALL 0x300000 #define MXGEFW_ETH_RECV_BIG 0x340000 +#define MXGEFW_ETH_SEND_GO 0x380000 +#define MXGEFW_ETH_SEND_STOP 0x3C0000 #define MXGEFW_ETH_SEND(n) (0x200000 + (((n) & 0x03) * 0x40000)) #define MXGEFW_ETH_SEND_OFFSET(n) (MXGEFW_ETH_SEND(n) - MXGEFW_ETH_SEND_4) @@ -120,6 +122,11 @@ enum myri10ge_mcp_cmd_type { * MXGEFW_CMD_RESET is issued */ MXGEFW_CMD_SET_INTRQ_DMA, + /* data0 = LSW of the host address + * data1 = MSW of the host address + * data2 = slice number if multiple slices are used + */ + MXGEFW_CMD_SET_BIG_BUFFER_SIZE, /* in bytes, power of 2 */ MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, /* in bytes */ @@ -129,6 +136,8 @@ enum myri10ge_mcp_cmd_type { MXGEFW_CMD_GET_SEND_OFFSET, MXGEFW_CMD_GET_SMALL_RX_OFFSET, MXGEFW_CMD_GET_BIG_RX_OFFSET, + /* data0 = slice number if multiple slices are used */ + MXGEFW_CMD_GET_IRQ_ACK_OFFSET, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, @@ -200,7 +209,12 @@ enum myri10ge_mcp_cmd_type { MXGEFW_CMD_SET_STATS_DMA_V2, /* data0, data1 = bus addr, * data2 = sizeof(struct mcp_irq_data) from driver point of view, allows - * adding new stuff to mcp_irq_data without changing the ABI */ + * adding new stuff to mcp_irq_data without changing the ABI + * + * If multiple slices are used, data2 contains both the size of the + * structure (in the lower 16 bits) and the slice number + * (in the upper 16 bits). + */ MXGEFW_CMD_UNALIGNED_TEST, /* same than DMA_TEST (same args) but abort with UNALIGNED on unaligned @@ -222,13 +236,18 @@ enum myri10ge_mcp_cmd_type { MXGEFW_CMD_GET_MAX_RSS_QUEUES, MXGEFW_CMD_ENABLE_RSS_QUEUES, /* data0 = number of slices n (0, 1, ..., n-1) to enable - * data1 = interrupt mode. - * 0=share one INTx/MSI, 1=use one MSI-X per queue. + * data1 = interrupt mode | use of multiple transmit queues. + * 0=share one INTx/MSI. + * 1=use one MSI-X per queue. * If all queues share one interrupt, the driver must have set * RSS_SHARED_INTERRUPT_DMA before enabling queues. + * 2=enable both receive and send queues. + * Without this bit set, only one send queue (slice 0's send queue) + * is enabled. The receive queues are always enabled. */ -#define MXGEFW_SLICE_INTR_MODE_SHARED 0 -#define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 1 +#define MXGEFW_SLICE_INTR_MODE_SHARED 0x0 +#define MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE 0x1 +#define MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES 0x2 MXGEFW_CMD_GET_RSS_SHARED_INTERRUPT_MASK_OFFSET, MXGEFW_CMD_SET_RSS_SHARED_INTERRUPT_DMA, @@ -250,10 +269,13 @@ enum myri10ge_mcp_cmd_type { * 2: TCP_IPV4 (required by RSS) * 3: IPV4 | TCP_IPV4 (required by RSS) * 4: source port + * 5: source port + destination port */ #define MXGEFW_RSS_HASH_TYPE_IPV4 0x1 #define MXGEFW_RSS_HASH_TYPE_TCP_IPV4 0x2 #define MXGEFW_RSS_HASH_TYPE_SRC_PORT 0x4 +#define MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT 0x5 +#define MXGEFW_RSS_HASH_TYPE_MAX 0x5 MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, /* Return data = the max. size of the entire headers of a IPv6 TSO packet. @@ -329,6 +351,20 @@ enum myri10ge_mcp_cmd_type { MXGEFW_CMD_GET_DCA_OFFSET, /* offset of dca control for WDMAs */ + + /* VMWare NetQueue commands */ + MXGEFW_CMD_NETQ_GET_FILTERS_PER_QUEUE, + MXGEFW_CMD_NETQ_ADD_FILTER, + /* data0 = filter_id << 16 | queue << 8 | type */ + /* data1 = MS4 of MAC Addr */ + /* data2 = LS2_MAC << 16 | VLAN_tag */ + MXGEFW_CMD_NETQ_DEL_FILTER, + /* data0 = filter_id */ + MXGEFW_CMD_NETQ_QUERY1, + MXGEFW_CMD_NETQ_QUERY2, + MXGEFW_CMD_NETQ_QUERY3, + MXGEFW_CMD_NETQ_QUERY4, + }; enum myri10ge_mcp_cmd_status { @@ -381,4 +417,10 @@ struct mcp_irq_data { u8 valid; }; +/* definitions for NETQ filter type */ +#define MXGEFW_NETQ_FILTERTYPE_NONE 0 +#define MXGEFW_NETQ_FILTERTYPE_MACADDR 1 +#define MXGEFW_NETQ_FILTERTYPE_VLAN 2 +#define MXGEFW_NETQ_FILTERTYPE_VLANMACADDR 3 + #endif /* __MYRI10GE_MCP_H__ */ diff --git a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h index 07d65c2..a8662ea 100644 --- a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h +++ b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h @@ -35,7 +35,7 @@ struct mcp_gen_header { unsigned char mcp_index; unsigned char disable_rabbit; unsigned char unaligned_tlp; - unsigned char pad1; + unsigned char pcie_link_algo; unsigned counters_addr; unsigned copy_block_info; /* for small mcps loaded with "lload -d" */ unsigned short handoff_id_major; /* must be equal */ diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index 8e73661..93a7b9b 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h @@ -508,6 +508,8 @@ typedef enum { NETXEN_BRDTYPE_P3_10000_BASE_T = 0x0027, NETXEN_BRDTYPE_P3_XG_LOM = 0x0028, NETXEN_BRDTYPE_P3_4_GB_MM = 0x0029, + NETXEN_BRDTYPE_P3_10G_SFP_CT = 0x002a, + NETXEN_BRDTYPE_P3_10G_SFP_QT = 0x002b, NETXEN_BRDTYPE_P3_10G_CX4 = 0x0031, NETXEN_BRDTYPE_P3_10G_XFP = 0x0032 @@ -1170,6 +1172,36 @@ typedef struct { nx_nic_intr_coalesce_data_t irq; } nx_nic_intr_coalesce_t; +#define NX_HOST_REQUEST 0x13 +#define NX_NIC_REQUEST 0x14 + +#define NX_MAC_EVENT 0x1 + +enum { + NX_NIC_H2C_OPCODE_START = 0, + NX_NIC_H2C_OPCODE_CONFIG_RSS, + NX_NIC_H2C_OPCODE_CONFIG_RSS_TBL, + NX_NIC_H2C_OPCODE_CONFIG_INTR_COALESCE, + NX_NIC_H2C_OPCODE_CONFIG_LED, + NX_NIC_H2C_OPCODE_CONFIG_PROMISCUOUS, + NX_NIC_H2C_OPCODE_CONFIG_L2_MAC, + NX_NIC_H2C_OPCODE_LRO_REQUEST, + NX_NIC_H2C_OPCODE_GET_SNMP_STATS, + NX_NIC_H2C_OPCODE_PROXY_START_REQUEST, + NX_NIC_H2C_OPCODE_PROXY_STOP_REQUEST, + NX_NIC_H2C_OPCODE_PROXY_SET_MTU, + NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE, + NX_H2P_OPCODE_GET_FINGER_PRINT_REQUEST, + NX_H2P_OPCODE_INSTALL_LICENSE_REQUEST, + NX_H2P_OPCODE_GET_LICENSE_CAPABILITY_REQUEST, + NX_NIC_H2C_OPCODE_GET_NET_STATS, + NX_NIC_H2C_OPCODE_LAST +}; + +#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */ +#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */ +#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */ + typedef struct { u64 qhdr; u64 req_hdr; @@ -1288,7 +1320,7 @@ struct netxen_adapter { int (*disable_phy_interrupts) (struct netxen_adapter *); int (*macaddr_set) (struct netxen_adapter *, netxen_ethernet_macaddr_t); int (*set_mtu) (struct netxen_adapter *, int); - int (*set_promisc) (struct netxen_adapter *, netxen_niu_prom_mode_t); + int (*set_promisc) (struct netxen_adapter *, u32); int (*phy_read) (struct netxen_adapter *, long reg, u32 *); int (*phy_write) (struct netxen_adapter *, long reg, u32 val); int (*init_port) (struct netxen_adapter *, int); @@ -1465,9 +1497,10 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter); u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max); void netxen_p2_nic_set_multi(struct net_device *netdev); void netxen_p3_nic_set_multi(struct net_device *netdev); +int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32); int netxen_config_intr_coalesce(struct netxen_adapter *adapter); -u32 nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu); +int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu); int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); int netxen_nic_set_mac(struct net_device *netdev, void *p); @@ -1502,7 +1535,9 @@ static const struct netxen_brdinfo netxen_boards[] = { {NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"}, {NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"}, {NETXEN_BRDTYPE_P3_XG_LOM, 2, "Dual XGb LOM"}, - {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "Quad GB - March Madness"}, + {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "NX3031 Gigabit Ethernet"}, + {NETXEN_BRDTYPE_P3_10G_SFP_CT, 2, "NX3031 10 Gigabit Ethernet"}, + {NETXEN_BRDTYPE_P3_10G_SFP_QT, 2, "Quanta Dual XGb SFP+"}, {NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"}, {NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"} }; diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c index 64babc5..64b5164 100644 --- a/drivers/net/netxen/netxen_nic_ctx.c +++ b/drivers/net/netxen/netxen_nic_ctx.c @@ -145,8 +145,8 @@ netxen_issue_cmd(struct netxen_adapter *adapter, return rcode; } -u32 -nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu) +int +nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu) { u32 rcode = NX_RCODE_SUCCESS; struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0]; @@ -160,7 +160,10 @@ nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu) 0, NX_CDRP_CMD_SET_MTU); - return rcode; + if (rcode != NX_RCODE_SUCCESS) + return -EIO; + + return 0; } static int diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c index 48ee06b..4ad3e08 100644 --- a/drivers/net/netxen/netxen_nic_ethtool.c +++ b/drivers/net/netxen/netxen_nic_ethtool.c @@ -140,18 +140,33 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) if (netif_running(dev)) { ecmd->speed = adapter->link_speed; ecmd->duplex = adapter->link_duplex; - } else - return -EIO; /* link absent */ + ecmd->autoneg = adapter->link_autoneg; + } + } else if (adapter->ahw.board_type == NETXEN_NIC_XGBE) { - ecmd->supported = (SUPPORTED_TP | - SUPPORTED_1000baseT_Full | - SUPPORTED_10000baseT_Full); - ecmd->advertising = (ADVERTISED_TP | - ADVERTISED_1000baseT_Full | - ADVERTISED_10000baseT_Full); + u32 val; + + adapter->hw_read_wx(adapter, NETXEN_PORT_MODE_ADDR, &val, 4); + if (val == NETXEN_PORT_MODE_802_3_AP) { + ecmd->supported = SUPPORTED_1000baseT_Full; + ecmd->advertising = ADVERTISED_1000baseT_Full; + } else { + ecmd->supported = SUPPORTED_10000baseT_Full; + ecmd->advertising = ADVERTISED_10000baseT_Full; + } + ecmd->port = PORT_TP; - ecmd->speed = SPEED_10000; + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + u16 pcifn = adapter->ahw.pci_func; + + adapter->hw_read_wx(adapter, + P3_LINK_SPEED_REG(pcifn), &val, 4); + ecmd->speed = P3_LINK_SPEED_MHZ * + P3_LINK_SPEED_VAL(pcifn, val); + } else + ecmd->speed = SPEED_10000; + ecmd->duplex = DUPLEX_FULL; ecmd->autoneg = AUTONEG_DISABLE; } else @@ -192,6 +207,8 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) break; case NETXEN_BRDTYPE_P2_SB31_10G: case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: + case NETXEN_BRDTYPE_P3_10G_SFP_CT: + case NETXEN_BRDTYPE_P3_10G_SFP_QT: case NETXEN_BRDTYPE_P3_10G_XFP: ecmd->supported |= SUPPORTED_FIBRE; ecmd->advertising |= ADVERTISED_FIBRE; diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h index 3ce13e4..e8e8d73 100644 --- a/drivers/net/netxen/netxen_nic_hdr.h +++ b/drivers/net/netxen/netxen_nic_hdr.h @@ -724,6 +724,13 @@ enum { #define XG_LINK_STATE_P3(pcifn,val) \ (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK) +#define P3_LINK_SPEED_MHZ 100 +#define P3_LINK_SPEED_MASK 0xff +#define P3_LINK_SPEED_REG(pcifn) \ + (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4)) +#define P3_LINK_SPEED_VAL(pcifn, reg) \ + (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK) + #define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000) #define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg)) #define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150)) @@ -836,9 +843,11 @@ enum { #define PCIE_SETUP_FUNCTION (0x12040) #define PCIE_SETUP_FUNCTION2 (0x12048) +#define PCIE_MISCCFG_RC (0x1206c) #define PCIE_TGT_SPLIT_CHICKEN (0x12080) #define PCIE_CHICKEN3 (0x120c8) +#define ISR_INT_STATE_REG (NETXEN_PCIX_PS_REG(PCIE_MISCCFG_RC)) #define PCIE_MAX_MASTER_SPLIT (0x14048) #define NETXEN_PORT_MODE_NONE 0 @@ -854,6 +863,7 @@ enum { #define NETXEN_CAM_RAM_DMA_WATCHDOG_CTRL (0x14) #define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC))) +#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200) /* * PCI Interrupt Vector Values. diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index 96a3bc6..9aa20f9 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c @@ -285,14 +285,7 @@ static unsigned crb_hub_agt[64] = #define ADDR_IN_RANGE(addr, low, high) \ (((addr) <= (high)) && ((addr) >= (low))) -#define NETXEN_MAX_MTU 8000 + NETXEN_ENET_HEADER_SIZE + NETXEN_ETH_FCS_SIZE -#define NETXEN_MIN_MTU 64 -#define NETXEN_ETH_FCS_SIZE 4 -#define NETXEN_ENET_HEADER_SIZE 14 #define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */ -#define NETXEN_FIRMWARE_LEN ((16 * 1024) / 4) -#define NETXEN_NIU_HDRSIZE (0x1 << 6) -#define NETXEN_NIU_TLRSIZE (0x1 << 5) #define NETXEN_NIC_ZERO_PAUSE_ADDR 0ULL #define NETXEN_NIC_UNIT_PAUSE_ADDR 0x200ULL @@ -541,9 +534,6 @@ netxen_send_cmd_descs(struct netxen_adapter *adapter, return 0; } -#define NIC_REQUEST 0x14 -#define NETXEN_MAC_EVENT 0x1 - static int nx_p3_sre_macaddr_change(struct net_device *dev, u8 *addr, unsigned op) { @@ -553,8 +543,8 @@ static int nx_p3_sre_macaddr_change(struct net_device *dev, int rv; memset(&req, 0, sizeof(nx_nic_req_t)); - req.qhdr |= (NIC_REQUEST << 23); - req.req_hdr |= NETXEN_MAC_EVENT; + req.qhdr |= (NX_NIC_REQUEST << 23); + req.req_hdr |= NX_MAC_EVENT; req.req_hdr |= ((u64)adapter->portnum << 16); mac_req.op = op; memcpy(&mac_req.mac_addr, addr, 6); @@ -575,31 +565,35 @@ void netxen_p3_nic_set_multi(struct net_device *netdev) nx_mac_list_t *cur, *next, *del_list, *add_list = NULL; struct dev_mc_list *mc_ptr; u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; - - adapter->set_promisc(adapter, NETXEN_NIU_PROMISC_MODE); - - /* - * Programming mac addresses will automaticly enabling L2 filtering. - * HW will replace timestamp with L2 conid when L2 filtering is - * enabled. This causes problem for LSA. Do not enabling L2 filtering - * until that problem is fixed. - */ - if ((netdev->flags & IFF_PROMISC) || - (netdev->mc_count > adapter->max_mc_count)) - return; + u32 mode = VPORT_MISS_MODE_DROP; del_list = adapter->mac_list; adapter->mac_list = NULL; nx_p3_nic_add_mac(adapter, netdev->dev_addr, &add_list, &del_list); + nx_p3_nic_add_mac(adapter, bcast_addr, &add_list, &del_list); + + if (netdev->flags & IFF_PROMISC) { + mode = VPORT_MISS_MODE_ACCEPT_ALL; + goto send_fw_cmd; + } + + if ((netdev->flags & IFF_ALLMULTI) || + (netdev->mc_count > adapter->max_mc_count)) { + mode = VPORT_MISS_MODE_ACCEPT_MULTI; + goto send_fw_cmd; + } + if (netdev->mc_count > 0) { - nx_p3_nic_add_mac(adapter, bcast_addr, &add_list, &del_list); for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, &add_list, &del_list); } } + +send_fw_cmd: + adapter->set_promisc(adapter, mode); for (cur = del_list; cur;) { nx_p3_sre_macaddr_change(netdev, cur->mac_addr, NETXEN_MAC_DEL); next = cur->next; @@ -615,6 +609,21 @@ void netxen_p3_nic_set_multi(struct net_device *netdev) } } +int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode) +{ + nx_nic_req_t req; + + memset(&req, 0, sizeof(nx_nic_req_t)); + + req.qhdr |= (NX_HOST_REQUEST << 23); + req.req_hdr |= NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE; + req.req_hdr |= ((u64)adapter->portnum << 16); + req.words[0] = cpu_to_le64(mode); + + return netxen_send_cmd_descs(adapter, + (struct cmd_desc_type0 *)&req, 1); +} + #define NETXEN_CONFIG_INTR_COALESCE 3 /* @@ -627,7 +636,7 @@ int netxen_config_intr_coalesce(struct netxen_adapter *adapter) memset(&req, 0, sizeof(nx_nic_req_t)); - req.qhdr |= (NIC_REQUEST << 23); + req.qhdr |= (NX_NIC_REQUEST << 23); req.req_hdr |= NETXEN_CONFIG_INTR_COALESCE; req.req_hdr |= ((u64)adapter->portnum << 16); @@ -653,6 +662,7 @@ int netxen_nic_change_mtu(struct net_device *netdev, int mtu) { struct netxen_adapter *adapter = netdev_priv(netdev); int max_mtu; + int rc = 0; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) max_mtu = P3_MAX_MTU; @@ -666,16 +676,12 @@ int netxen_nic_change_mtu(struct net_device *netdev, int mtu) } if (adapter->set_mtu) - adapter->set_mtu(adapter, mtu); - netdev->mtu = mtu; + rc = adapter->set_mtu(adapter, mtu); - mtu += MTU_FUDGE_FACTOR; - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - nx_fw_cmd_set_mtu(adapter, mtu); - else if (adapter->set_mtu) - adapter->set_mtu(adapter, mtu); + if (!rc) + netdev->mtu = mtu; - return 0; + return rc; } int netxen_is_flash_supported(struct netxen_adapter *adapter) @@ -1411,7 +1417,8 @@ static int netxen_nic_pci_mem_read_direct(struct netxen_adapter *adapter, (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) { write_unlock_irqrestore(&adapter->adapter_lock, flags); printk(KERN_ERR "%s out of bound pci memory access. " - "offset is 0x%llx\n", netxen_nic_driver_name, off); + "offset is 0x%llx\n", netxen_nic_driver_name, + (unsigned long long)off); return -1; } @@ -1484,7 +1491,8 @@ netxen_nic_pci_mem_write_direct(struct netxen_adapter *adapter, u64 off, (netxen_nic_pci_is_same_window(adapter, off+size-1) == 0)) { write_unlock_irqrestore(&adapter->adapter_lock, flags); printk(KERN_ERR "%s out of bound pci memory access. " - "offset is 0x%llx\n", netxen_nic_driver_name, off); + "offset is 0x%llx\n", netxen_nic_driver_name, + (unsigned long long)off); return -1; } @@ -2016,6 +2024,8 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter) case NETXEN_BRDTYPE_P3_10G_CX4_LP: case NETXEN_BRDTYPE_P3_IMEZ: case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: + case NETXEN_BRDTYPE_P3_10G_SFP_CT: + case NETXEN_BRDTYPE_P3_10G_SFP_QT: case NETXEN_BRDTYPE_P3_10G_XFP: case NETXEN_BRDTYPE_P3_10000_BASE_T: @@ -2034,6 +2044,7 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter) default: printk("%s: Unknown(%x)\n", netxen_nic_driver_name, boardinfo->board_type); + rv = -ENODEV; break; } @@ -2044,6 +2055,7 @@ int netxen_nic_get_board_info(struct netxen_adapter *adapter) int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu) { + new_mtu += MTU_FUDGE_FACTOR; netxen_nic_write_w0(adapter, NETXEN_NIU_GB_MAX_FRAME_SIZE(adapter->physical_port), new_mtu); @@ -2052,7 +2064,7 @@ int netxen_nic_set_mtu_gb(struct netxen_adapter *adapter, int new_mtu) int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu) { - new_mtu += NETXEN_NIU_HDRSIZE + NETXEN_NIU_TLRSIZE; + new_mtu += MTU_FUDGE_FACTOR; if (adapter->physical_port == 0) netxen_nic_write_w0(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, new_mtu); @@ -2074,12 +2086,22 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter) __u32 status; __u32 autoneg; __u32 mode; + __u32 port_mode; netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode); if (netxen_get_niu_enable_ge(mode)) { /* Gb 10/100/1000 Mbps mode */ + + adapter->hw_read_wx(adapter, + NETXEN_PORT_MODE_ADDR, &port_mode, 4); + if (port_mode == NETXEN_PORT_MODE_802_3_AP) { + adapter->link_speed = SPEED_1000; + adapter->link_duplex = DUPLEX_FULL; + adapter->link_autoneg = AUTONEG_DISABLE; + return; + } + if (adapter->phy_read - && adapter-> - phy_read(adapter, + && adapter->phy_read(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, &status) == 0) { if (netxen_get_phy_link(status)) { @@ -2109,8 +2131,7 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter) break; } if (adapter->phy_read - && adapter-> - phy_read(adapter, + && adapter->phy_read(adapter, NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, &autoneg) != 0) adapter->link_autoneg = autoneg; diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h index b8e0030..aae737d 100644 --- a/drivers/net/netxen/netxen_nic_hw.h +++ b/drivers/net/netxen/netxen_nic_hw.h @@ -419,12 +419,9 @@ typedef enum { #define netxen_get_niu_enable_ge(config_word) \ _netxen_crb_get_bit(config_word, 1) -/* Promiscous mode options (GbE mode only) */ -typedef enum { - NETXEN_NIU_PROMISC_MODE = 0, - NETXEN_NIU_NON_PROMISC_MODE, - NETXEN_NIU_ALLMULTI_MODE -} netxen_niu_prom_mode_t; +#define NETXEN_NIU_NON_PROMISC_MODE 0 +#define NETXEN_NIU_PROMISC_MODE 1 +#define NETXEN_NIU_ALLMULTI_MODE 2 /* * NIU GB Drop CRC Register @@ -471,9 +468,9 @@ typedef enum { /* Set promiscuous mode for a GbE interface */ int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, - netxen_niu_prom_mode_t mode); + u32 mode); int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, - netxen_niu_prom_mode_t mode); + u32 mode); /* set the MAC address for a given MAC */ int netxen_niu_macaddr_set(struct netxen_adapter *adapter, diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 01ab31b3..519fc86 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c @@ -364,6 +364,11 @@ void netxen_initialize_adapter_ops(struct netxen_adapter *adapter) default: break; } + + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + adapter->set_mtu = nx_fw_cmd_set_mtu; + adapter->set_promisc = netxen_p3_nic_set_promisc; + } } /* diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 91d209a..7615c71 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -166,7 +166,8 @@ static void netxen_nic_disable_int(struct netxen_adapter *adapter) if (!NETXEN_IS_MSI_FAMILY(adapter)) { do { adapter->pci_write_immediate(adapter, - ISR_INT_TARGET_STATUS, 0xffffffff); + adapter->legacy_intr.tgt_status_reg, + 0xffffffff); mask = adapter->pci_read_immediate(adapter, ISR_INT_VECTOR); if (!(mask & 0x80)) @@ -175,7 +176,7 @@ static void netxen_nic_disable_int(struct netxen_adapter *adapter) } while (--retries); if (!retries) { - printk(KERN_NOTICE "%s: Failed to disable interrupt completely\n", + printk(KERN_NOTICE "%s: Failed to disable interrupt\n", netxen_nic_driver_name); } } else { @@ -190,8 +191,6 @@ static void netxen_nic_enable_int(struct netxen_adapter *adapter) { u32 mask; - DPRINTK(1, INFO, "Entered ISR Enable \n"); - if (adapter->intr_scheme != -1 && adapter->intr_scheme != INTR_SCHEME_PERPORT) { switch (adapter->ahw.board_type) { @@ -213,16 +212,13 @@ static void netxen_nic_enable_int(struct netxen_adapter *adapter) if (!NETXEN_IS_MSI_FAMILY(adapter)) { mask = 0xbff; - if (adapter->intr_scheme != -1 && - adapter->intr_scheme != INTR_SCHEME_PERPORT) { + if (adapter->intr_scheme == INTR_SCHEME_PERPORT) + adapter->pci_write_immediate(adapter, + adapter->legacy_intr.tgt_mask_reg, mask); + else adapter->pci_write_normalize(adapter, CRB_INT_VECTOR, 0); - } - adapter->pci_write_immediate(adapter, - ISR_INT_TARGET_MASK, mask); } - - DPRINTK(1, INFO, "Done with enable Int\n"); } static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id) @@ -284,6 +280,8 @@ static void netxen_check_options(struct netxen_adapter *adapter) case NETXEN_BRDTYPE_P3_10G_CX4_LP: case NETXEN_BRDTYPE_P3_IMEZ: case NETXEN_BRDTYPE_P3_10G_SFP_PLUS: + case NETXEN_BRDTYPE_P3_10G_SFP_QT: + case NETXEN_BRDTYPE_P3_10G_SFP_CT: case NETXEN_BRDTYPE_P3_10G_XFP: case NETXEN_BRDTYPE_P3_10000_BASE_T: adapter->msix_supported = !!use_msi_x; @@ -301,6 +299,10 @@ static void netxen_check_options(struct netxen_adapter *adapter) case NETXEN_BRDTYPE_P3_REF_QG: case NETXEN_BRDTYPE_P3_4_GB: case NETXEN_BRDTYPE_P3_4_GB_MM: + adapter->msix_supported = 0; + adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G; + break; + case NETXEN_BRDTYPE_P2_SB35_4G: case NETXEN_BRDTYPE_P2_SB31_2G: adapter->msix_supported = 0; @@ -700,13 +702,10 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->status &= ~NETXEN_NETDEV_STATUS; adapter->rx_csum = 1; adapter->mc_enabled = 0; - if (NX_IS_REVISION_P3(revision_id)) { + if (NX_IS_REVISION_P3(revision_id)) adapter->max_mc_count = 38; - adapter->max_rds_rings = 2; - } else { + else adapter->max_mc_count = 16; - adapter->max_rds_rings = 3; - } netdev->open = netxen_nic_open; netdev->stop = netxen_nic_close; @@ -779,10 +778,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (adapter->portnum == 0) first_driver = 1; } - adapter->crb_addr_cmd_producer = crb_cmd_producer[adapter->portnum]; - adapter->crb_addr_cmd_consumer = crb_cmd_consumer[adapter->portnum]; - netxen_nic_update_cmd_producer(adapter, 0); - netxen_nic_update_cmd_consumer(adapter, 0); if (first_driver) { first_boot = adapter->pci_read_normalize(adapter, @@ -1053,6 +1048,11 @@ static int netxen_nic_open(struct net_device *netdev) return -EIO; } + if (adapter->fw_major < 4) + adapter->max_rds_rings = 3; + else + adapter->max_rds_rings = 2; + err = netxen_alloc_sw_resources(adapter); if (err) { printk(KERN_ERR "%s: Error in setting sw resources\n", @@ -1074,10 +1074,10 @@ static int netxen_nic_open(struct net_device *netdev) crb_cmd_producer[adapter->portnum]; adapter->crb_addr_cmd_consumer = crb_cmd_consumer[adapter->portnum]; - } - netxen_nic_update_cmd_producer(adapter, 0); - netxen_nic_update_cmd_consumer(adapter, 0); + netxen_nic_update_cmd_producer(adapter, 0); + netxen_nic_update_cmd_consumer(adapter, 0); + } for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { for (ring = 0; ring < adapter->max_rds_rings; ring++) @@ -1113,9 +1113,7 @@ static int netxen_nic_open(struct net_device *netdev) netxen_nic_set_link_parameters(adapter); netdev->set_multicast_list(netdev); - if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) - nx_fw_cmd_set_mtu(adapter, netdev->mtu); - else + if (adapter->set_mtu) adapter->set_mtu(adapter, netdev->mtu); mod_timer(&adapter->watchdog_timer, jiffies); @@ -1410,20 +1408,17 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter) port = adapter->physical_port; - if (adapter->ahw.board_type == NETXEN_NIC_GBE) { - val = adapter->pci_read_normalize(adapter, CRB_XG_STATE); - linkup = (val >> port) & 1; + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + val = adapter->pci_read_normalize(adapter, CRB_XG_STATE_P3); + val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); + linkup = (val == XG_LINK_UP_P3); } else { - if (adapter->fw_major < 4) { - val = adapter->pci_read_normalize(adapter, - CRB_XG_STATE); + val = adapter->pci_read_normalize(adapter, CRB_XG_STATE); + if (adapter->ahw.board_type == NETXEN_NIC_GBE) + linkup = (val >> port) & 1; + else { val = (val >> port*8) & 0xff; linkup = (val == XG_LINK_UP); - } else { - val = adapter->pci_read_normalize(adapter, - CRB_XG_STATE_P3); - val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); - linkup = (val == XG_LINK_UP_P3); } } @@ -1535,15 +1530,33 @@ static irqreturn_t netxen_intr(int irq, void *data) struct netxen_adapter *adapter = data; u32 our_int = 0; - our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR); - /* not our interrupt */ - if ((our_int & (0x80 << adapter->portnum)) == 0) + u32 status = 0; + + status = adapter->pci_read_immediate(adapter, ISR_INT_VECTOR); + + if (!(status & adapter->legacy_intr.int_vec_bit)) return IRQ_NONE; - if (adapter->intr_scheme == INTR_SCHEME_PERPORT) { - /* claim interrupt */ - adapter->pci_write_normalize(adapter, CRB_INT_VECTOR, + if (adapter->ahw.revision_id >= NX_P3_B1) { + /* check interrupt state machine, to be sure */ + status = adapter->pci_read_immediate(adapter, + ISR_INT_STATE_REG); + if (!ISR_LEGACY_INT_TRIGGERED(status)) + return IRQ_NONE; + + } else if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { + + our_int = adapter->pci_read_normalize(adapter, CRB_INT_VECTOR); + /* not our interrupt */ + if ((our_int & (0x80 << adapter->portnum)) == 0) + return IRQ_NONE; + + if (adapter->intr_scheme == INTR_SCHEME_PERPORT) { + /* claim interrupt */ + adapter->pci_write_normalize(adapter, + CRB_INT_VECTOR, our_int & ~((u32)(0x80 << adapter->portnum))); + } } netxen_handle_int(adapter); diff --git a/drivers/net/netxen/netxen_nic_niu.c b/drivers/net/netxen/netxen_nic_niu.c index 4cb8f4a..27f07f6 100644 --- a/drivers/net/netxen/netxen_nic_niu.c +++ b/drivers/net/netxen/netxen_nic_niu.c @@ -610,6 +610,9 @@ int netxen_niu_macaddr_set(struct netxen_adapter *adapter, int i; DECLARE_MAC_BUF(mac); + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + return 0; + for (i = 0; i < 10; i++) { temp[0] = temp[1] = 0; memcpy(temp + 2, addr, 2); @@ -727,6 +730,9 @@ int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter) __u32 mac_cfg0; u32 port = adapter->physical_port; + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + return 0; + if (port > NETXEN_NIU_MAX_GBE_PORTS) return -EINVAL; mac_cfg0 = 0; @@ -743,6 +749,9 @@ int netxen_niu_disable_xg_port(struct netxen_adapter *adapter) __u32 mac_cfg; u32 port = adapter->physical_port; + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + return 0; + if (port > NETXEN_NIU_MAX_XG_PORTS) return -EINVAL; @@ -755,7 +764,7 @@ int netxen_niu_disable_xg_port(struct netxen_adapter *adapter) /* Set promiscuous mode for a GbE interface */ int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter, - netxen_niu_prom_mode_t mode) + u32 mode) { __u32 reg; u32 port = adapter->physical_port; @@ -819,6 +828,9 @@ int netxen_niu_xg_macaddr_set(struct netxen_adapter *adapter, u8 temp[4]; u32 val; + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) + return 0; + if ((phy < 0) || (phy > NETXEN_NIU_MAX_XG_PORTS)) return -EIO; @@ -894,7 +906,7 @@ int netxen_niu_xg_macaddr_get(struct netxen_adapter *adapter, #endif /* 0 */ int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter, - netxen_niu_prom_mode_t mode) + u32 mode) { __u32 reg; u32 port = adapter->physical_port; diff --git a/drivers/net/netxen/netxen_nic_phan_reg.h b/drivers/net/netxen/netxen_nic_phan_reg.h index 3bfa51b..83e5ee5 100644 --- a/drivers/net/netxen/netxen_nic_phan_reg.h +++ b/drivers/net/netxen/netxen_nic_phan_reg.h @@ -95,8 +95,8 @@ #define CRB_HOST_STS_PROD NETXEN_NIC_REG(0xdc) #define CRB_HOST_STS_CONS NETXEN_NIC_REG(0xe0) #define CRB_PEG_CMD_PROD NETXEN_NIC_REG(0xe4) -#define CRB_PEG_CMD_CONS NETXEN_NIC_REG(0xe8) -#define CRB_HOST_BUFFER_PROD NETXEN_NIC_REG(0xec) +#define CRB_PF_LINK_SPEED_1 NETXEN_NIC_REG(0xe8) +#define CRB_PF_LINK_SPEED_2 NETXEN_NIC_REG(0xec) #define CRB_HOST_BUFFER_CONS NETXEN_NIC_REG(0xf0) #define CRB_JUMBO_BUFFER_PROD NETXEN_NIC_REG(0xf4) #define CRB_JUMBO_BUFFER_CONS NETXEN_NIC_REG(0xf8) diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c index a20005c..8e0ca9f 100644 --- a/drivers/net/ni5010.c +++ b/drivers/net/ni5010.c @@ -648,7 +648,6 @@ static void ni5010_set_multicast_list(struct net_device *dev) PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name)); if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI || dev->mc_list) { - dev->flags |= IFF_PROMISC; outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */ PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name)); } else { diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c index a316dcc..b9a882d 100644 --- a/drivers/net/ni52.c +++ b/drivers/net/ni52.c @@ -621,7 +621,7 @@ static int init586(struct net_device *dev) if (num_addrs > len) { printk(KERN_ERR "%s: switching to promisc. mode\n", dev->name); - dev->flags |= IFF_PROMISC; + writeb(0x01, &cfg_cmd->promisc); } } if (dev->flags & IFF_PROMISC) diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c index e82b37b..3cdd07c 100644 --- a/drivers/net/qla3xxx.c +++ b/drivers/net/qla3xxx.c @@ -38,7 +38,7 @@ #define DRV_NAME "qla3xxx" #define DRV_STRING "QLogic ISP3XXX Network Driver" -#define DRV_VERSION "v2.03.00-k4" +#define DRV_VERSION "v2.03.00-k5" #define PFX DRV_NAME " " static const char ql3xxx_driver_name[] = DRV_NAME; @@ -3495,8 +3495,6 @@ static void ql_set_mac_info(struct ql3_adapter *qdev) case ISP_CONTROL_FN0_NET: qdev->mac_index = 0; qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; - qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number; - qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number; qdev->mb_bit_mask = FN0_MA_BITS_MASK; qdev->PHYAddr = PORT0_PHY_ADDRESS; if (port_status & PORT_STATUS_SM0) @@ -3508,8 +3506,6 @@ static void ql_set_mac_info(struct ql3_adapter *qdev) case ISP_CONTROL_FN1_NET: qdev->mac_index = 1; qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number; - qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number; - qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number; qdev->mb_bit_mask = FN1_MA_BITS_MASK; qdev->PHYAddr = PORT1_PHY_ADDRESS; if (port_status & PORT_STATUS_SM1) @@ -3730,14 +3726,6 @@ static int ql3xxx_open(struct net_device *ndev) return (ql_adapter_up(qdev)); } -static void ql3xxx_set_multicast_list(struct net_device *ndev) -{ - /* - * We are manually parsing the list in the net_device structure. - */ - return; -} - static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) { struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); @@ -4007,7 +3995,11 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, ndev->open = ql3xxx_open; ndev->hard_start_xmit = ql3xxx_send; ndev->stop = ql3xxx_close; - ndev->set_multicast_list = ql3xxx_set_multicast_list; + /* ndev->set_multicast_list + * This device is one side of a two-function adapter + * (NIC and iSCSI). Promiscuous mode setting/clearing is + * not allowed from the NIC side. + */ SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops); ndev->set_mac_address = ql3xxx_set_mac_address; ndev->tx_timeout = ql3xxx_tx_timeout; @@ -4040,9 +4032,6 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev, ndev->tx_queue_len = NUM_REQ_Q_ENTRIES; - /* Turn off support for multicasting */ - ndev->flags &= ~IFF_MULTICAST; - /* Record PCI bus information. */ ql_get_board_info(qdev); diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h index 58a086f..7113e71 100644 --- a/drivers/net/qla3xxx.h +++ b/drivers/net/qla3xxx.h @@ -14,24 +14,14 @@ #define OPCODE_OB_MAC_IOCB_FN0 0x01 #define OPCODE_OB_MAC_IOCB_FN2 0x21 -#define OPCODE_OB_TCP_IOCB_FN0 0x03 -#define OPCODE_OB_TCP_IOCB_FN2 0x23 -#define OPCODE_UPDATE_NCB_IOCB_FN0 0x00 -#define OPCODE_UPDATE_NCB_IOCB_FN2 0x20 -#define OPCODE_UPDATE_NCB_IOCB 0xF0 #define OPCODE_IB_MAC_IOCB 0xF9 #define OPCODE_IB_3032_MAC_IOCB 0x09 #define OPCODE_IB_IP_IOCB 0xFA #define OPCODE_IB_3032_IP_IOCB 0x0A -#define OPCODE_IB_TCP_IOCB 0xFB -#define OPCODE_DUMP_PROTO_IOCB 0xFE -#define OPCODE_BUFFER_ALERT_IOCB 0xFB #define OPCODE_FUNC_ID_MASK 0x30 #define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */ -#define OUTBOUND_TCP_IOCB 0x03 /* plus function bits */ -#define UPDATE_NCB_IOCB 0x00 /* plus function bits */ #define FN0_MA_BITS_MASK 0x00 #define FN1_MA_BITS_MASK 0x80 @@ -159,75 +149,6 @@ struct ob_ip_iocb_rsp { __le32 reserved2; }; -struct ob_tcp_iocb_req { - u8 opcode; - - u8 flags0; -#define OB_TCP_IOCB_REQ_P 0x80 -#define OB_TCP_IOCB_REQ_CI 0x20 -#define OB_TCP_IOCB_REQ_H 0x10 -#define OB_TCP_IOCB_REQ_LN 0x08 -#define OB_TCP_IOCB_REQ_K 0x04 -#define OB_TCP_IOCB_REQ_D 0x02 -#define OB_TCP_IOCB_REQ_I 0x01 - - u8 flags1; -#define OB_TCP_IOCB_REQ_OSM 0x40 -#define OB_TCP_IOCB_REQ_URG 0x20 -#define OB_TCP_IOCB_REQ_ACK 0x10 -#define OB_TCP_IOCB_REQ_PSH 0x08 -#define OB_TCP_IOCB_REQ_RST 0x04 -#define OB_TCP_IOCB_REQ_SYN 0x02 -#define OB_TCP_IOCB_REQ_FIN 0x01 - - u8 options_len; -#define OB_TCP_IOCB_REQ_OMASK 0xF0 -#define OB_TCP_IOCB_REQ_SHIFT 4 - - __le32 transaction_id; - __le32 data_len; - __le32 hncb_ptr_low; - __le32 hncb_ptr_high; - __le32 buf_addr0_low; - __le32 buf_addr0_high; - __le32 buf_0_len; - __le32 buf_addr1_low; - __le32 buf_addr1_high; - __le32 buf_1_len; - __le32 buf_addr2_low; - __le32 buf_addr2_high; - __le32 buf_2_len; - __le32 time_stamp; - __le32 reserved1; -}; - -struct ob_tcp_iocb_rsp { - u8 opcode; - - u8 flags0; -#define OB_TCP_IOCB_RSP_C 0x20 -#define OB_TCP_IOCB_RSP_H 0x10 -#define OB_TCP_IOCB_RSP_LN 0x08 -#define OB_TCP_IOCB_RSP_K 0x04 -#define OB_TCP_IOCB_RSP_D 0x02 -#define OB_TCP_IOCB_RSP_I 0x01 - - u8 flags1; -#define OB_TCP_IOCB_RSP_E 0x10 -#define OB_TCP_IOCB_RSP_W 0x08 -#define OB_TCP_IOCB_RSP_P 0x04 -#define OB_TCP_IOCB_RSP_T 0x02 -#define OB_TCP_IOCB_RSP_F 0x01 - - u8 state; -#define OB_TCP_IOCB_RSP_SMASK 0xF0 -#define OB_TCP_IOCB_RSP_SHIFT 4 - - __le32 transaction_id; - __le32 local_ncb_ptr; - __le32 reserved0; -}; - struct ib_ip_iocb_rsp { u8 opcode; #define IB_IP_IOCB_RSP_3032_V 0x80 @@ -256,25 +177,6 @@ struct ib_ip_iocb_rsp { __le32 ial_high; }; -struct ib_tcp_iocb_rsp { - u8 opcode; - u8 flags; -#define IB_TCP_IOCB_RSP_P 0x80 -#define IB_TCP_IOCB_RSP_T 0x40 -#define IB_TCP_IOCB_RSP_D 0x20 -#define IB_TCP_IOCB_RSP_N 0x10 -#define IB_TCP_IOCB_RSP_IP 0x03 -#define IB_TCP_FLAG_MASK 0xf0 -#define IB_TCP_FLAG_IOCB_SYN 0x00 - -#define TCP_IB_RSP_FLAGS(x) (x->flags & ~IB_TCP_FLAG_MASK) - - __le16 length; - __le32 hncb_ref_num; - __le32 ial_low; - __le32 ial_high; -}; - struct net_rsp_iocb { u8 opcode; u8 flags; @@ -1266,20 +1168,13 @@ struct ql3_adapter { u32 small_buf_release_cnt; u32 small_buf_total_size; - /* ISR related, saves status for DPC. */ - u32 control_status; - struct eeprom_data nvram_data; - struct timer_list ioctl_timer; u32 port_link_state; - u32 last_rsp_offset; /* 4022 specific */ u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */ u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */ u32 mac_ob_opcode; /* Opcode to use on mac transmission */ - u32 tcp_ob_opcode; /* Opcode to use on tcp transmission */ - u32 update_ob_opcode; /* Opcode to use for updating NCB */ u32 mb_bit_mask; /* MA Bits mask to use on transmission */ u32 numPorts; struct workqueue_struct *workqueue; diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index 6a06b95..25e62cf 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c @@ -34,6 +34,29 @@ #include "sh_eth.h" +/* CPU <-> EDMAC endian convert */ +static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x) +{ + switch (mdp->edmac_endian) { + case EDMAC_LITTLE_ENDIAN: + return cpu_to_le32(x); + case EDMAC_BIG_ENDIAN: + return cpu_to_be32(x); + } + return x; +} + +static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x) +{ + switch (mdp->edmac_endian) { + case EDMAC_LITTLE_ENDIAN: + return le32_to_cpu(x); + case EDMAC_BIG_ENDIAN: + return be32_to_cpu(x); + } + return x; +} + /* * Program the hardware MAC address from dev->dev_addr. */ @@ -240,7 +263,7 @@ static void sh_eth_ring_format(struct net_device *ndev) /* RX descriptor */ rxdesc = &mdp->rx_ring[i]; rxdesc->addr = (u32)skb->data & ~0x3UL; - rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP); + rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); /* The size of the buffer is 16 byte boundary. */ rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F; @@ -262,7 +285,7 @@ static void sh_eth_ring_format(struct net_device *ndev) mdp->dirty_rx = (u32) (i - RX_RING_SIZE); /* Mark the last entry as wrapping the ring. */ - rxdesc->status |= cpu_to_le32(RD_RDEL); + rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL); memset(mdp->tx_ring, 0, tx_ringsize); @@ -270,10 +293,10 @@ static void sh_eth_ring_format(struct net_device *ndev) for (i = 0; i < TX_RING_SIZE; i++) { mdp->tx_skbuff[i] = NULL; txdesc = &mdp->tx_ring[i]; - txdesc->status = cpu_to_le32(TD_TFP); + txdesc->status = cpu_to_edmac(mdp, TD_TFP); txdesc->buffer_length = 0; if (i == 0) { - /* Rx descriptor address set */ + /* Tx descriptor address set */ ctrl_outl((u32)txdesc, ioaddr + TDLAR); #if defined(CONFIG_CPU_SUBTYPE_SH7763) ctrl_outl((u32)txdesc, ioaddr + TDFAR); @@ -281,13 +304,13 @@ static void sh_eth_ring_format(struct net_device *ndev) } } - /* Rx descriptor address set */ + /* Tx descriptor address set */ #if defined(CONFIG_CPU_SUBTYPE_SH7763) ctrl_outl((u32)txdesc, ioaddr + TDFXR); ctrl_outl(0x1, ioaddr + TDFFR); #endif - txdesc->status |= cpu_to_le32(TD_TDLE); + txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); } /* Get skb and descriptor buffer */ @@ -455,7 +478,7 @@ static int sh_eth_txfree(struct net_device *ndev) for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { entry = mdp->dirty_tx % TX_RING_SIZE; txdesc = &mdp->tx_ring[entry]; - if (txdesc->status & cpu_to_le32(TD_TACT)) + if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) break; /* Free the original skb. */ if (mdp->tx_skbuff[entry]) { @@ -463,9 +486,9 @@ static int sh_eth_txfree(struct net_device *ndev) mdp->tx_skbuff[entry] = NULL; freeNum++; } - txdesc->status = cpu_to_le32(TD_TFP); + txdesc->status = cpu_to_edmac(mdp, TD_TFP); if (entry >= TX_RING_SIZE - 1) - txdesc->status |= cpu_to_le32(TD_TDLE); + txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); mdp->stats.tx_packets++; mdp->stats.tx_bytes += txdesc->buffer_length; @@ -486,8 +509,8 @@ static int sh_eth_rx(struct net_device *ndev) u32 desc_status, reserve = 0; rxdesc = &mdp->rx_ring[entry]; - while (!(rxdesc->status & cpu_to_le32(RD_RACT))) { - desc_status = le32_to_cpu(rxdesc->status); + while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { + desc_status = edmac_to_cpu(mdp, rxdesc->status); pkt_len = rxdesc->frame_length; if (--boguscnt < 0) @@ -522,7 +545,7 @@ static int sh_eth_rx(struct net_device *ndev) mdp->stats.rx_packets++; mdp->stats.rx_bytes += pkt_len; } - rxdesc->status |= cpu_to_le32(RD_RACT); + rxdesc->status |= cpu_to_edmac(mdp, RD_RACT); entry = (++mdp->cur_rx) % RX_RING_SIZE; } @@ -552,10 +575,10 @@ static int sh_eth_rx(struct net_device *ndev) } if (entry >= RX_RING_SIZE - 1) rxdesc->status |= - cpu_to_le32(RD_RACT | RD_RFP | RD_RDEL); + cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); else rxdesc->status |= - cpu_to_le32(RD_RACT | RD_RFP); + cpu_to_edmac(mdp, RD_RACT | RD_RFP); } /* Restart Rx engine if stopped. */ @@ -931,9 +954,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) txdesc->buffer_length = skb->len; if (entry >= TX_RING_SIZE - 1) - txdesc->status |= cpu_to_le32(TD_TACT | TD_TDLE); + txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); else - txdesc->status |= cpu_to_le32(TD_TACT); + txdesc->status |= cpu_to_edmac(mdp, TD_TACT); mdp->cur_tx++; @@ -1159,6 +1182,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) struct resource *res; struct net_device *ndev = NULL; struct sh_eth_private *mdp; + struct sh_eth_plat_data *pd; /* get base addr */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1196,8 +1220,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev) mdp = netdev_priv(ndev); spin_lock_init(&mdp->lock); + pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data); /* get PHY ID */ - mdp->phy_id = (int)pdev->dev.platform_data; + mdp->phy_id = pd->phy; + /* EDMAC endian */ + mdp->edmac_endian = pd->edmac_endian; /* set function */ ndev->open = sh_eth_open; @@ -1217,12 +1244,16 @@ static int sh_eth_drv_probe(struct platform_device *pdev) /* First device only init */ if (!devno) { +#if defined(ARSTR) /* reset device */ ctrl_outl(ARSTR_ARSTR, ARSTR); mdelay(1); +#endif +#if defined(SH_TSU_ADDR) /* TSU init (Init only)*/ sh_eth_tsu_init(SH_TSU_ADDR); +#endif } /* network device register */ @@ -1240,8 +1271,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev) ndev->name, CARDNAME, (u32) ndev->base_addr); for (i = 0; i < 5; i++) - printk(KERN_INFO "%02X:", ndev->dev_addr[i]); - printk(KERN_INFO "%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq); + printk("%02X:", ndev->dev_addr[i]); + printk("%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq); platform_set_drvdata(pdev, ndev); diff --git a/drivers/net/sh_eth.h b/drivers/net/sh_eth.h index 45ad1b0..73bc718 100644 --- a/drivers/net/sh_eth.h +++ b/drivers/net/sh_eth.h @@ -30,6 +30,8 @@ #include <linux/netdevice.h> #include <linux/phy.h> +#include <asm/sh_eth.h> + #define CARDNAME "sh-eth" #define TX_TIMEOUT (5*HZ) #define TX_RING_SIZE 64 /* Tx ring size */ @@ -143,10 +145,11 @@ #else /* CONFIG_CPU_SUBTYPE_SH7763 */ # define RX_OFFSET 2 /* skb offset */ +#ifndef CONFIG_CPU_SUBTYPE_SH7619 /* Chip base address */ # define SH_TSU_ADDR 0xA7000804 # define ARSTR 0xA7000800 - +#endif /* Chip Registers */ /* E-DMAC */ # define EDMR 0x0000 @@ -384,7 +387,11 @@ enum FCFTR_BIT { FCFTR_RFD1 = 0x00000002, FCFTR_RFD0 = 0x00000001, }; #define FIFO_F_D_RFF (FCFTR_RFF2|FCFTR_RFF1|FCFTR_RFF0) +#ifndef CONFIG_CPU_SUBTYPE_SH7619 #define FIFO_F_D_RFD (FCFTR_RFD2|FCFTR_RFD1|FCFTR_RFD0) +#else +#define FIFO_F_D_RFD (FCFTR_RFD0) +#endif /* Transfer descriptor bit */ enum TD_STS_BIT { @@ -414,8 +421,10 @@ enum FELIC_MODE_BIT { #ifdef CONFIG_CPU_SUBTYPE_SH7763 #define ECMR_CHG_DM (ECMR_TRCCM | ECMR_RZPF | ECMR_ZPF |\ ECMR_PFR | ECMR_RXF | ECMR_TXF | ECMR_MCT) +#elif CONFIG_CPU_SUBTYPE_SH7619 +#define ECMR_CHG_DM (ECMR_ZPF | ECMR_PFR | ECMR_RXF | ECMR_TXF) #else -#define ECMR_CHG_DM (ECMR_ZPF | ECMR_PFR ECMR_RXF | ECMR_TXF | ECMR_MCT) +#define ECMR_CHG_DM (ECMR_ZPF | ECMR_PFR | ECMR_RXF | ECMR_TXF | ECMR_MCT) #endif /* ECSR */ @@ -485,7 +494,11 @@ enum RPADIR_BIT { /* FDR */ enum FIFO_SIZE_BIT { +#ifndef CONFIG_CPU_SUBTYPE_SH7619 FIFO_SIZE_T = 0x00000700, FIFO_SIZE_R = 0x00000007, +#else + FIFO_SIZE_T = 0x00000100, FIFO_SIZE_R = 0x00000001, +#endif }; enum phy_offsets { PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3, @@ -601,7 +614,7 @@ struct sh_eth_txdesc { #endif u32 addr; /* TD2 */ u32 pad1; /* padding data */ -}; +} __attribute__((aligned(2), packed)); /* * The sh ether Rx buffer descriptors. @@ -618,7 +631,7 @@ struct sh_eth_rxdesc { #endif u32 addr; /* RD2 */ u32 pad0; /* padding data */ -}; +} __attribute__((aligned(2), packed)); struct sh_eth_private { dma_addr_t rx_desc_dma; @@ -633,6 +646,7 @@ struct sh_eth_private { u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */ u32 cur_tx, dirty_tx; u32 rx_buf_sz; /* Based on MTU+slack. */ + int edmac_endian; /* MII transceiver section. */ u32 phy_id; /* PHY ID */ struct mii_bus *mii_bus; /* MDIO bus control */ diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 5257cf4..7d29edc 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -275,86 +275,6 @@ static void sky2_power_aux(struct sky2_hw *hw) PC_VAUX_ON | PC_VCC_OFF)); } -static void sky2_power_state(struct sky2_hw *hw, pci_power_t state) -{ - u16 power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_CTRL); - int pex = pci_find_capability(hw->pdev, PCI_CAP_ID_EXP); - u32 reg; - - sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); - - switch (state) { - case PCI_D0: - break; - - case PCI_D1: - power_control |= 1; - break; - - case PCI_D2: - power_control |= 2; - break; - - case PCI_D3hot: - case PCI_D3cold: - power_control |= 3; - if (hw->flags & SKY2_HW_ADV_POWER_CTL) { - /* additional power saving measurements */ - reg = sky2_pci_read32(hw, PCI_DEV_REG4); - - /* set gating core clock for LTSSM in L1 state */ - reg |= P_PEX_LTSSM_STAT(P_PEX_LTSSM_L1_STAT) | - /* auto clock gated scheme controlled by CLKREQ */ - P_ASPM_A1_MODE_SELECT | - /* enable Gate Root Core Clock */ - P_CLK_GATE_ROOT_COR_ENA; - - if (pex && (hw->flags & SKY2_HW_CLK_POWER)) { - /* enable Clock Power Management (CLKREQ) */ - u16 ctrl = sky2_pci_read16(hw, pex + PCI_EXP_DEVCTL); - - ctrl |= PCI_EXP_DEVCTL_AUX_PME; - sky2_pci_write16(hw, pex + PCI_EXP_DEVCTL, ctrl); - } else - /* force CLKREQ Enable in Our4 (A1b only) */ - reg |= P_ASPM_FORCE_CLKREQ_ENA; - - /* set Mask Register for Release/Gate Clock */ - sky2_pci_write32(hw, PCI_DEV_REG5, - P_REL_PCIE_EXIT_L1_ST | P_GAT_PCIE_ENTER_L1_ST | - P_REL_PCIE_RX_EX_IDLE | P_GAT_PCIE_RX_EL_IDLE | - P_REL_GPHY_LINK_UP | P_GAT_GPHY_LINK_DOWN); - } else - sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_CLK_HALT); - - /* put CPU into reset state */ - sky2_write8(hw, B28_Y2_ASF_STAT_CMD, HCU_CCSR_ASF_RESET); - if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev == CHIP_REV_YU_SU_A0) - /* put CPU into halt state */ - sky2_write8(hw, B28_Y2_ASF_STAT_CMD, HCU_CCSR_ASF_HALTED); - - if (pex && !(hw->flags & SKY2_HW_RAM_BUFFER)) { - reg = sky2_pci_read32(hw, PCI_DEV_REG1); - /* force to PCIe L1 */ - reg |= PCI_FORCE_PEX_L1; - sky2_pci_write32(hw, PCI_DEV_REG1, reg); - } - break; - - default: - dev_warn(&hw->pdev->dev, PFX "Invalid power state (%d) ", - state); - return; - } - - power_control |= PCI_PM_CTRL_PME_ENABLE; - /* Finally, set the new power state. */ - sky2_pci_write32(hw, hw->pm_cap + PCI_PM_CTRL, power_control); - - sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); - sky2_pci_read32(hw, B0_CTST); -} - static void sky2_gmac_reset(struct sky2_hw *hw, unsigned port) { u16 reg; @@ -709,6 +629,11 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port) sky2_pci_write32(hw, PCI_DEV_REG1, reg1); sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); sky2_pci_read32(hw, PCI_DEV_REG1); + + if (hw->chip_id == CHIP_ID_YUKON_FE) + gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_ANE); + else if (hw->flags & SKY2_HW_ADV_POWER_CTL) + sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); } static void sky2_phy_power_down(struct sky2_hw *hw, unsigned port) @@ -2855,10 +2780,6 @@ static int __devinit sky2_init(struct sky2_hw *hw) hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY | SKY2_HW_ADV_POWER_CTL; - - /* check for Rev. A1 dev 4200 */ - if (sky2_read16(hw, Q_ADDR(Q_XA1, Q_WM)) == 0) - hw->flags |= SKY2_HW_CLK_POWER; break; case CHIP_ID_YUKON_EX: @@ -2914,12 +2835,6 @@ static int __devinit sky2_init(struct sky2_hw *hw) if (hw->pmd_type == 'L' || hw->pmd_type == 'S' || hw->pmd_type == 'P') hw->flags |= SKY2_HW_FIBRE_PHY; - hw->pm_cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PM); - if (hw->pm_cap == 0) { - dev_err(&hw->pdev->dev, "cannot find PowerManagement capability\n"); - return -EIO; - } - hw->ports = 1; t8 = sky2_read8(hw, B2_Y2_HW_RES); if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) { @@ -4512,7 +4427,7 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state) pci_save_state(pdev); pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); - sky2_power_state(hw, pci_choose_state(pdev, state)); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); return 0; } @@ -4525,7 +4440,9 @@ static int sky2_resume(struct pci_dev *pdev) if (!hw) return 0; - sky2_power_state(hw, PCI_D0); + err = pci_set_power_state(pdev, PCI_D0); + if (err) + goto out; err = pci_restore_state(pdev); if (err) @@ -4595,7 +4512,7 @@ static void sky2_shutdown(struct pci_dev *pdev) pci_enable_wake(pdev, PCI_D3cold, wol); pci_disable_device(pdev); - sky2_power_state(hw, PCI_D3hot); + pci_set_power_state(pdev, PCI_D3hot); } static struct pci_driver sky2_driver = { diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 4d9c4a19..92fb24b 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h @@ -2072,9 +2072,7 @@ struct sky2_hw { #define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */ #define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ #define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ -#define SKY2_HW_CLK_POWER 0x00000100 /* clock power management */ - int pm_cap; u8 chip_id; u8 chip_rev; u8 pmd_type; diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c index 9b2a7f7..e531302 100644 --- a/drivers/net/sun3_82586.c +++ b/drivers/net/sun3_82586.c @@ -425,14 +425,11 @@ static int init586(struct net_device *dev) int len = ((char *) p->iscp - (char *) ptr - 8) / 6; if(num_addrs > len) { printk("%s: switching to promisc. mode\n",dev->name); - dev->flags|=IFF_PROMISC; + cfg_cmd->promisc = 1; } } if(dev->flags&IFF_PROMISC) - { - cfg_cmd->promisc=1; - dev->flags|=IFF_PROMISC; - } + cfg_cmd->promisc = 1; cfg_cmd->carr_coll = 0x00; p->scb->cbl_offset = make16(cfg_cmd); diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index b588c89..a84ba48 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -1285,6 +1285,21 @@ static void check_carrier(struct work_struct *work) } } +static int pegasus_blacklisted(struct usb_device *udev) +{ + struct usb_device_descriptor *udd = &udev->descriptor; + + /* Special quirk to keep the driver from handling the Belkin Bluetooth + * dongle which happens to have the same ID. + */ + if ((udd->idVendor == VENDOR_BELKIN && udd->idProduct == 0x0121) && + (udd->bDeviceClass == USB_CLASS_WIRELESS_CONTROLLER) && + (udd->bDeviceProtocol == 1)) + return 1; + + return 0; +} + static int pegasus_probe(struct usb_interface *intf, const struct usb_device_id *id) { @@ -1296,6 +1311,12 @@ static int pegasus_probe(struct usb_interface *intf, DECLARE_MAC_BUF(mac); usb_get_dev(dev); + + if (pegasus_blacklisted(dev)) { + res = -ENODEV; + goto out; + } + net = alloc_etherdev(sizeof(struct pegasus)); if (!net) { dev_err(&intf->dev, "can't allocate %s\n", "device"); diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 370ce30..007c129 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -662,6 +662,10 @@ static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid spin_unlock_irq(&vptr->lock); } +static void velocity_init_rx_ring_indexes(struct velocity_info *vptr) +{ + vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0; +} /** * velocity_rx_reset - handle a receive reset @@ -677,16 +681,16 @@ static void velocity_rx_reset(struct velocity_info *vptr) struct mac_regs __iomem * regs = vptr->mac_regs; int i; - vptr->rd_dirty = vptr->rd_filled = vptr->rd_curr = 0; + velocity_init_rx_ring_indexes(vptr); /* * Init state, all RD entries belong to the NIC */ for (i = 0; i < vptr->options.numrx; ++i) - vptr->rd_ring[i].rdesc0.len |= OWNED_BY_NIC; + vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC; writew(vptr->options.numrx, ®s->RBRDU); - writel(vptr->rd_pool_dma, ®s->RDBaseLo); + writel(vptr->rx.pool_dma, ®s->RDBaseLo); writew(0, ®s->RDIdx); writew(vptr->options.numrx - 1, ®s->RDCSize); } @@ -779,15 +783,15 @@ static void velocity_init_registers(struct velocity_info *vptr, vptr->int_mask = INT_MASK_DEF; - writel(vptr->rd_pool_dma, ®s->RDBaseLo); + writel(vptr->rx.pool_dma, ®s->RDBaseLo); writew(vptr->options.numrx - 1, ®s->RDCSize); mac_rx_queue_run(regs); mac_rx_queue_wake(regs); writew(vptr->options.numtx - 1, ®s->TDCSize); - for (i = 0; i < vptr->num_txq; i++) { - writel(vptr->td_pool_dma[i], ®s->TDBaseLo[i]); + for (i = 0; i < vptr->tx.numq; i++) { + writel(vptr->tx.pool_dma[i], ®s->TDBaseLo[i]); mac_tx_queue_run(regs, i); } @@ -1047,7 +1051,7 @@ static void __devinit velocity_init_info(struct pci_dev *pdev, vptr->pdev = pdev; vptr->chip_id = info->chip_id; - vptr->num_txq = info->txqueue; + vptr->tx.numq = info->txqueue; vptr->multicast_limit = MCAM_SIZE; spin_lock_init(&vptr->lock); INIT_LIST_HEAD(&vptr->list); @@ -1093,14 +1097,14 @@ static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pc } /** - * velocity_init_rings - set up DMA rings + * velocity_init_dma_rings - set up DMA rings * @vptr: Velocity to set up * * Allocate PCI mapped DMA rings for the receive and transmit layer * to use. */ -static int velocity_init_rings(struct velocity_info *vptr) +static int velocity_init_dma_rings(struct velocity_info *vptr) { struct velocity_opt *opt = &vptr->options; const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc); @@ -1116,7 +1120,7 @@ static int velocity_init_rings(struct velocity_info *vptr) * pci_alloc_consistent() fulfills the requirement for 64 bytes * alignment */ - pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->num_txq + + pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq + rx_ring_size, &pool_dma); if (!pool) { dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n", @@ -1124,15 +1128,15 @@ static int velocity_init_rings(struct velocity_info *vptr) return -ENOMEM; } - vptr->rd_ring = pool; - vptr->rd_pool_dma = pool_dma; + vptr->rx.ring = pool; + vptr->rx.pool_dma = pool_dma; pool += rx_ring_size; pool_dma += rx_ring_size; - for (i = 0; i < vptr->num_txq; i++) { - vptr->td_rings[i] = pool; - vptr->td_pool_dma[i] = pool_dma; + for (i = 0; i < vptr->tx.numq; i++) { + vptr->tx.rings[i] = pool; + vptr->tx.pool_dma[i] = pool_dma; pool += tx_ring_size; pool_dma += tx_ring_size; } @@ -1141,18 +1145,18 @@ static int velocity_init_rings(struct velocity_info *vptr) } /** - * velocity_free_rings - free PCI ring pointers + * velocity_free_dma_rings - free PCI ring pointers * @vptr: Velocity to free from * * Clean up the PCI ring buffers allocated to this velocity. */ -static void velocity_free_rings(struct velocity_info *vptr) +static void velocity_free_dma_rings(struct velocity_info *vptr) { const int size = vptr->options.numrx * sizeof(struct rx_desc) + - vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq; + vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq; - pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma); + pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma); } static void velocity_give_many_rx_descs(struct velocity_info *vptr) @@ -1164,44 +1168,44 @@ static void velocity_give_many_rx_descs(struct velocity_info *vptr) * RD number must be equal to 4X per hardware spec * (programming guide rev 1.20, p.13) */ - if (vptr->rd_filled < 4) + if (vptr->rx.filled < 4) return; wmb(); - unusable = vptr->rd_filled & 0x0003; - dirty = vptr->rd_dirty - unusable; - for (avail = vptr->rd_filled & 0xfffc; avail; avail--) { + unusable = vptr->rx.filled & 0x0003; + dirty = vptr->rx.dirty - unusable; + for (avail = vptr->rx.filled & 0xfffc; avail; avail--) { dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; - vptr->rd_ring[dirty].rdesc0.len |= OWNED_BY_NIC; + vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC; } - writew(vptr->rd_filled & 0xfffc, ®s->RBRDU); - vptr->rd_filled = unusable; + writew(vptr->rx.filled & 0xfffc, ®s->RBRDU); + vptr->rx.filled = unusable; } static int velocity_rx_refill(struct velocity_info *vptr) { - int dirty = vptr->rd_dirty, done = 0; + int dirty = vptr->rx.dirty, done = 0; do { - struct rx_desc *rd = vptr->rd_ring + dirty; + struct rx_desc *rd = vptr->rx.ring + dirty; /* Fine for an all zero Rx desc at init time as well */ if (rd->rdesc0.len & OWNED_BY_NIC) break; - if (!vptr->rd_info[dirty].skb) { + if (!vptr->rx.info[dirty].skb) { if (velocity_alloc_rx_buf(vptr, dirty) < 0) break; } done++; dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; - } while (dirty != vptr->rd_curr); + } while (dirty != vptr->rx.curr); if (done) { - vptr->rd_dirty = dirty; - vptr->rd_filled += done; + vptr->rx.dirty = dirty; + vptr->rx.filled += done; } return done; @@ -1209,7 +1213,7 @@ static int velocity_rx_refill(struct velocity_info *vptr) static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu) { - vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; + vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; } /** @@ -1224,12 +1228,12 @@ static int velocity_init_rd_ring(struct velocity_info *vptr) { int ret = -ENOMEM; - vptr->rd_info = kcalloc(vptr->options.numrx, + vptr->rx.info = kcalloc(vptr->options.numrx, sizeof(struct velocity_rd_info), GFP_KERNEL); - if (!vptr->rd_info) + if (!vptr->rx.info) goto out; - vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0; + velocity_init_rx_ring_indexes(vptr); if (velocity_rx_refill(vptr) != vptr->options.numrx) { VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR @@ -1255,18 +1259,18 @@ static void velocity_free_rd_ring(struct velocity_info *vptr) { int i; - if (vptr->rd_info == NULL) + if (vptr->rx.info == NULL) return; for (i = 0; i < vptr->options.numrx; i++) { - struct velocity_rd_info *rd_info = &(vptr->rd_info[i]); - struct rx_desc *rd = vptr->rd_ring + i; + struct velocity_rd_info *rd_info = &(vptr->rx.info[i]); + struct rx_desc *rd = vptr->rx.ring + i; memset(rd, 0, sizeof(*rd)); if (!rd_info->skb) continue; - pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, + pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); rd_info->skb_dma = (dma_addr_t) NULL; @@ -1274,8 +1278,8 @@ static void velocity_free_rd_ring(struct velocity_info *vptr) rd_info->skb = NULL; } - kfree(vptr->rd_info); - vptr->rd_info = NULL; + kfree(vptr->rx.info); + vptr->rx.info = NULL; } /** @@ -1293,19 +1297,19 @@ static int velocity_init_td_ring(struct velocity_info *vptr) unsigned int j; /* Init the TD ring entries */ - for (j = 0; j < vptr->num_txq; j++) { - curr = vptr->td_pool_dma[j]; + for (j = 0; j < vptr->tx.numq; j++) { + curr = vptr->tx.pool_dma[j]; - vptr->td_infos[j] = kcalloc(vptr->options.numtx, + vptr->tx.infos[j] = kcalloc(vptr->options.numtx, sizeof(struct velocity_td_info), GFP_KERNEL); - if (!vptr->td_infos[j]) { + if (!vptr->tx.infos[j]) { while(--j >= 0) - kfree(vptr->td_infos[j]); + kfree(vptr->tx.infos[j]); return -ENOMEM; } - vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0; + vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0; } return 0; } @@ -1317,7 +1321,7 @@ static int velocity_init_td_ring(struct velocity_info *vptr) static void velocity_free_td_ring_entry(struct velocity_info *vptr, int q, int n) { - struct velocity_td_info * td_info = &(vptr->td_infos[q][n]); + struct velocity_td_info * td_info = &(vptr->tx.infos[q][n]); int i; if (td_info == NULL) @@ -1349,15 +1353,15 @@ static void velocity_free_td_ring(struct velocity_info *vptr) { int i, j; - for (j = 0; j < vptr->num_txq; j++) { - if (vptr->td_infos[j] == NULL) + for (j = 0; j < vptr->tx.numq; j++) { + if (vptr->tx.infos[j] == NULL) continue; for (i = 0; i < vptr->options.numtx; i++) { velocity_free_td_ring_entry(vptr, j, i); } - kfree(vptr->td_infos[j]); - vptr->td_infos[j] = NULL; + kfree(vptr->tx.infos[j]); + vptr->tx.infos[j] = NULL; } } @@ -1374,13 +1378,13 @@ static void velocity_free_td_ring(struct velocity_info *vptr) static int velocity_rx_srv(struct velocity_info *vptr, int status) { struct net_device_stats *stats = &vptr->stats; - int rd_curr = vptr->rd_curr; + int rd_curr = vptr->rx.curr; int works = 0; do { - struct rx_desc *rd = vptr->rd_ring + rd_curr; + struct rx_desc *rd = vptr->rx.ring + rd_curr; - if (!vptr->rd_info[rd_curr].skb) + if (!vptr->rx.info[rd_curr].skb) break; if (rd->rdesc0.len & OWNED_BY_NIC) @@ -1412,7 +1416,7 @@ static int velocity_rx_srv(struct velocity_info *vptr, int status) rd_curr = 0; } while (++works <= 15); - vptr->rd_curr = rd_curr; + vptr->rx.curr = rd_curr; if ((works > 0) && (velocity_rx_refill(vptr) > 0)) velocity_give_many_rx_descs(vptr); @@ -1510,8 +1514,8 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) { void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); struct net_device_stats *stats = &vptr->stats; - struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); - struct rx_desc *rd = &(vptr->rd_ring[idx]); + struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); + struct rx_desc *rd = &(vptr->rx.ring[idx]); int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; struct sk_buff *skb; @@ -1527,7 +1531,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) skb = rd_info->skb; pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, - vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); + vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); /* * Drop frame not meeting IEEE 802.3 @@ -1550,7 +1554,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) rd_info->skb = NULL; } - pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, + pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); skb_put(skb, pkt_len - 4); @@ -1580,10 +1584,10 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) { - struct rx_desc *rd = &(vptr->rd_ring[idx]); - struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); + struct rx_desc *rd = &(vptr->rx.ring[idx]); + struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); - rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx_buf_sz + 64); + rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64); if (rd_info->skb == NULL) return -ENOMEM; @@ -1592,14 +1596,15 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) * 64byte alignment. */ skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63); - rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); + rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, + vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); /* * Fill in the descriptor to match - */ + */ *((u32 *) & (rd->rdesc0)) = 0; - rd->size = cpu_to_le16(vptr->rx_buf_sz) | RX_INTEN; + rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN; rd->pa_low = cpu_to_le32(rd_info->skb_dma); rd->pa_high = 0; return 0; @@ -1625,15 +1630,15 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status) struct velocity_td_info *tdinfo; struct net_device_stats *stats = &vptr->stats; - for (qnum = 0; qnum < vptr->num_txq; qnum++) { - for (idx = vptr->td_tail[qnum]; vptr->td_used[qnum] > 0; + for (qnum = 0; qnum < vptr->tx.numq; qnum++) { + for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0; idx = (idx + 1) % vptr->options.numtx) { /* * Get Tx Descriptor */ - td = &(vptr->td_rings[qnum][idx]); - tdinfo = &(vptr->td_infos[qnum][idx]); + td = &(vptr->tx.rings[qnum][idx]); + tdinfo = &(vptr->tx.infos[qnum][idx]); if (td->tdesc0.len & OWNED_BY_NIC) break; @@ -1657,9 +1662,9 @@ static int velocity_tx_srv(struct velocity_info *vptr, u32 status) stats->tx_bytes += tdinfo->skb->len; } velocity_free_tx_buf(vptr, tdinfo); - vptr->td_used[qnum]--; + vptr->tx.used[qnum]--; } - vptr->td_tail[qnum] = idx; + vptr->tx.tail[qnum] = idx; if (AVAIL_TD(vptr, qnum) < 1) { full = 1; @@ -1846,6 +1851,40 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_ tdinfo->skb = NULL; } +static int velocity_init_rings(struct velocity_info *vptr, int mtu) +{ + int ret; + + velocity_set_rxbufsize(vptr, mtu); + + ret = velocity_init_dma_rings(vptr); + if (ret < 0) + goto out; + + ret = velocity_init_rd_ring(vptr); + if (ret < 0) + goto err_free_dma_rings_0; + + ret = velocity_init_td_ring(vptr); + if (ret < 0) + goto err_free_rd_ring_1; +out: + return ret; + +err_free_rd_ring_1: + velocity_free_rd_ring(vptr); +err_free_dma_rings_0: + velocity_free_dma_rings(vptr); + goto out; +} + +static void velocity_free_rings(struct velocity_info *vptr) +{ + velocity_free_td_ring(vptr); + velocity_free_rd_ring(vptr); + velocity_free_dma_rings(vptr); +} + /** * velocity_open - interface activation callback * @dev: network layer device to open @@ -1862,20 +1901,10 @@ static int velocity_open(struct net_device *dev) struct velocity_info *vptr = netdev_priv(dev); int ret; - velocity_set_rxbufsize(vptr, dev->mtu); - - ret = velocity_init_rings(vptr); + ret = velocity_init_rings(vptr, dev->mtu); if (ret < 0) goto out; - ret = velocity_init_rd_ring(vptr); - if (ret < 0) - goto err_free_desc_rings; - - ret = velocity_init_td_ring(vptr); - if (ret < 0) - goto err_free_rd_ring; - /* Ensure chip is running */ pci_set_power_state(vptr->pdev, PCI_D0); @@ -1888,7 +1917,8 @@ static int velocity_open(struct net_device *dev) if (ret < 0) { /* Power down the chip */ pci_set_power_state(vptr->pdev, PCI_D3hot); - goto err_free_td_ring; + velocity_free_rings(vptr); + goto out; } mac_enable_int(vptr->mac_regs); @@ -1896,14 +1926,6 @@ static int velocity_open(struct net_device *dev) vptr->flags |= VELOCITY_FLAGS_OPENED; out: return ret; - -err_free_td_ring: - velocity_free_td_ring(vptr); -err_free_rd_ring: - velocity_free_rd_ring(vptr); -err_free_desc_rings: - velocity_free_rings(vptr); - goto out; } /** @@ -1919,50 +1941,72 @@ err_free_desc_rings: static int velocity_change_mtu(struct net_device *dev, int new_mtu) { struct velocity_info *vptr = netdev_priv(dev); - unsigned long flags; - int oldmtu = dev->mtu; int ret = 0; if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) { VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n", vptr->dev->name); - return -EINVAL; + ret = -EINVAL; + goto out_0; } if (!netif_running(dev)) { dev->mtu = new_mtu; - return 0; + goto out_0; } - if (new_mtu != oldmtu) { + if (dev->mtu != new_mtu) { + struct velocity_info *tmp_vptr; + unsigned long flags; + struct rx_info rx; + struct tx_info tx; + + tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL); + if (!tmp_vptr) { + ret = -ENOMEM; + goto out_0; + } + + tmp_vptr->dev = dev; + tmp_vptr->pdev = vptr->pdev; + tmp_vptr->options = vptr->options; + tmp_vptr->tx.numq = vptr->tx.numq; + + ret = velocity_init_rings(tmp_vptr, new_mtu); + if (ret < 0) + goto out_free_tmp_vptr_1; + spin_lock_irqsave(&vptr->lock, flags); netif_stop_queue(dev); velocity_shutdown(vptr); - velocity_free_td_ring(vptr); - velocity_free_rd_ring(vptr); + rx = vptr->rx; + tx = vptr->tx; - dev->mtu = new_mtu; + vptr->rx = tmp_vptr->rx; + vptr->tx = tmp_vptr->tx; - velocity_set_rxbufsize(vptr, new_mtu); + tmp_vptr->rx = rx; + tmp_vptr->tx = tx; - ret = velocity_init_rd_ring(vptr); - if (ret < 0) - goto out_unlock; + dev->mtu = new_mtu; - ret = velocity_init_td_ring(vptr); - if (ret < 0) - goto out_unlock; + velocity_give_many_rx_descs(vptr); velocity_init_registers(vptr, VELOCITY_INIT_COLD); mac_enable_int(vptr->mac_regs); netif_start_queue(dev); -out_unlock: + spin_unlock_irqrestore(&vptr->lock, flags); - } + velocity_free_rings(tmp_vptr); + +out_free_tmp_vptr_1: + kfree(tmp_vptr); + } +out_0: return ret; } @@ -2008,9 +2052,6 @@ static int velocity_close(struct net_device *dev) /* Power down the chip */ pci_set_power_state(vptr->pdev, PCI_D3hot); - /* Free the resources */ - velocity_free_td_ring(vptr); - velocity_free_rd_ring(vptr); velocity_free_rings(vptr); vptr->flags &= (~VELOCITY_FLAGS_OPENED); @@ -2056,9 +2097,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) spin_lock_irqsave(&vptr->lock, flags); - index = vptr->td_curr[qnum]; - td_ptr = &(vptr->td_rings[qnum][index]); - tdinfo = &(vptr->td_infos[qnum][index]); + index = vptr->tx.curr[qnum]; + td_ptr = &(vptr->tx.rings[qnum][index]); + tdinfo = &(vptr->tx.infos[qnum][index]); td_ptr->tdesc1.TCR = TCR0_TIC; td_ptr->td_buf[0].size &= ~TD_QUEUE; @@ -2071,9 +2112,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); tdinfo->skb_dma[0] = tdinfo->buf_dma; td_ptr->tdesc0.len = len; - td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); - td_ptr->td_buf[0].pa_high = 0; - td_ptr->td_buf[0].size = len; /* queue is 0 anyway */ + td_ptr->tx.buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); + td_ptr->tx.buf[0].pa_high = 0; + td_ptr->tx.buf[0].size = len; /* queue is 0 anyway */ tdinfo->nskb_dma = 1; } else { int i = 0; @@ -2084,9 +2125,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) td_ptr->tdesc0.len = len; /* FIXME: support 48bit DMA later */ - td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma); - td_ptr->td_buf[i].pa_high = 0; - td_ptr->td_buf[i].size = cpu_to_le16(skb_headlen(skb)); + td_ptr->tx.buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma); + td_ptr->tx.buf[i].pa_high = 0; + td_ptr->tx.buf[i].size = cpu_to_le16(skb_headlen(skb)); for (i = 0; i < nfrags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; @@ -2094,9 +2135,9 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE); - td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); - td_ptr->td_buf[i + 1].pa_high = 0; - td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size); + td_ptr->tx.buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); + td_ptr->tx.buf[i + 1].pa_high = 0; + td_ptr->tx.buf[i + 1].size = cpu_to_le16(frag->size); } tdinfo->nskb_dma = i - 1; } @@ -2142,13 +2183,13 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev) if (prev < 0) prev = vptr->options.numtx - 1; td_ptr->tdesc0.len |= OWNED_BY_NIC; - vptr->td_used[qnum]++; - vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx; + vptr->tx.used[qnum]++; + vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx; if (AVAIL_TD(vptr, qnum) < 1) netif_stop_queue(dev); - td_ptr = &(vptr->td_rings[qnum][prev]); + td_ptr = &(vptr->tx.rings[qnum][prev]); td_ptr->td_buf[0].size |= TD_QUEUE; mac_tx_queue_wake(vptr->mac_regs, qnum); } @@ -3405,8 +3446,8 @@ static int velocity_resume(struct pci_dev *pdev) velocity_tx_srv(vptr, 0); - for (i = 0; i < vptr->num_txq; i++) { - if (vptr->td_used[i]) { + for (i = 0; i < vptr->tx.numq; i++) { + if (vptr->tx.used[i]) { mac_tx_queue_wake(vptr->mac_regs, i); } } diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h index 8644614..1b95b04 100644 --- a/drivers/net/via-velocity.h +++ b/drivers/net/via-velocity.h @@ -1494,6 +1494,10 @@ struct velocity_opt { u32 flags; }; +#define AVAIL_TD(p,q) ((p)->options.numtx-((p)->tx.used[(q)])) + +#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx]) + struct velocity_info { struct list_head list; @@ -1501,9 +1505,6 @@ struct velocity_info { struct net_device *dev; struct net_device_stats stats; - dma_addr_t rd_pool_dma; - dma_addr_t td_pool_dma[TX_QUEUE_NO]; - struct vlan_group *vlgrp; u8 ip_addr[4]; enum chip_type chip_id; @@ -1512,25 +1513,29 @@ struct velocity_info { unsigned long memaddr; unsigned long ioaddr; - u8 rev_id; - -#define AVAIL_TD(p,q) ((p)->options.numtx-((p)->td_used[(q)])) + struct tx_info { + int numq; + + /* FIXME: the locality of the data seems rather poor. */ + int used[TX_QUEUE_NO]; + int curr[TX_QUEUE_NO]; + int tail[TX_QUEUE_NO]; + struct tx_desc *rings[TX_QUEUE_NO]; + struct velocity_td_info *infos[TX_QUEUE_NO]; + dma_addr_t pool_dma[TX_QUEUE_NO]; + } tx; + + struct rx_info { + int buf_sz; + + int dirty; + int curr; + u32 filled; + struct rx_desc *ring; + struct velocity_rd_info *info; /* It's an array */ + dma_addr_t pool_dma; + } rx; - int num_txq; - - volatile int td_used[TX_QUEUE_NO]; - int td_curr[TX_QUEUE_NO]; - int td_tail[TX_QUEUE_NO]; - struct tx_desc *td_rings[TX_QUEUE_NO]; - struct velocity_td_info *td_infos[TX_QUEUE_NO]; - - int rd_curr; - int rd_dirty; - u32 rd_filled; - struct rx_desc *rd_ring; - struct velocity_rd_info *rd_info; /* It's an array */ - -#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx]) u32 mib_counter[MAX_HW_MIB_COUNTER]; struct velocity_opt options; @@ -1538,7 +1543,6 @@ struct velocity_info { u32 flags; - int rx_buf_sz; u32 mii_status; u32 phy_id; int multicast_limit; @@ -1554,8 +1558,8 @@ struct velocity_info { struct velocity_context context; u32 ticks; - u32 rx_bytes; + u8 rev_id; }; /** diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig index 846be60..2ae2ec4 100644 --- a/drivers/net/wan/Kconfig +++ b/drivers/net/wan/Kconfig @@ -25,7 +25,7 @@ if WAN # There is no way to detect a comtrol sv11 - force it modular for now. config HOSTESS_SV11 tristate "Comtrol Hostess SV-11 support" - depends on ISA && m && ISA_DMA_API && INET + depends on ISA && m && ISA_DMA_API && INET && HDLC help Driver for Comtrol Hostess SV-11 network card which operates on low speed synchronous serial links at up to @@ -37,7 +37,7 @@ config HOSTESS_SV11 # The COSA/SRP driver has not been tested as non-modular yet. config COSA tristate "COSA/SRP sync serial boards support" - depends on ISA && m && ISA_DMA_API + depends on ISA && m && ISA_DMA_API && HDLC ---help--- Driver for COSA and SRP synchronous serial boards. @@ -61,7 +61,7 @@ config COSA # config LANMEDIA tristate "LanMedia Corp. SSI/V.35, T1/E1, HSSI, T3 boards" - depends on PCI && VIRT_TO_BUS + depends on PCI && VIRT_TO_BUS && HDLC ---help--- Driver for the following Lan Media family of serial boards: @@ -78,9 +78,8 @@ config LANMEDIA - LMC 5245 board connects directly to a T3 circuit saving the additional external hardware. - To change setting such as syncPPP vs Cisco HDLC or clock source you - will need lmcctl. It is available at <ftp://ftp.lanmedia.com/> - (broken link). + To change setting such as clock source you will need lmcctl. + It is available at <ftp://ftp.lanmedia.com/> (broken link). To compile this driver as a module, choose M here: the module will be called lmc. @@ -88,7 +87,7 @@ config LANMEDIA # There is no way to detect a Sealevel board. Force it modular config SEALEVEL_4021 tristate "Sealevel Systems 4021 support" - depends on ISA && m && ISA_DMA_API && INET + depends on ISA && m && ISA_DMA_API && INET && HDLC help This is a driver for the Sealevel Systems ACB 56 serial I/O adapter. @@ -154,8 +153,6 @@ config HDLC_PPP help Generic HDLC driver supporting PPP over WAN connections. - It will be replaced by new PPP implementation in Linux 2.6.26. - If unsure, say N. config HDLC_X25 diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile index d61fef3..1025496 100644 --- a/drivers/net/wan/Makefile +++ b/drivers/net/wan/Makefile @@ -21,12 +21,11 @@ pc300-y := pc300_drv.o pc300-$(CONFIG_PC300_MLPPP) += pc300_tty.o pc300-objs := $(pc300-y) -obj-$(CONFIG_HOSTESS_SV11) += z85230.o syncppp.o hostess_sv11.o -obj-$(CONFIG_SEALEVEL_4021) += z85230.o syncppp.o sealevel.o -obj-$(CONFIG_COSA) += syncppp.o cosa.o -obj-$(CONFIG_FARSYNC) += syncppp.o farsync.o -obj-$(CONFIG_DSCC4) += dscc4.o -obj-$(CONFIG_LANMEDIA) += syncppp.o +obj-$(CONFIG_HOSTESS_SV11) += z85230.o hostess_sv11.o +obj-$(CONFIG_SEALEVEL_4021) += z85230.o sealevel.o +obj-$(CONFIG_COSA) += cosa.o +obj-$(CONFIG_FARSYNC) += farsync.o +obj-$(CONFIG_DSCC4) += dscc4.o obj-$(CONFIG_X25_ASY) += x25_asy.o obj-$(CONFIG_LANMEDIA) += lmc/ diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index f7d3349..f140515 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -2,6 +2,7 @@ /* * Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz> + * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -54,7 +55,7 @@ * * The Linux driver (unlike the present *BSD drivers :-) can work even * for the COSA and SRP in one computer and allows each channel to work - * in one of the three modes (character device, Cisco HDLC, Sync PPP). + * in one of the two modes (character or network device). * * AUTHOR * @@ -72,12 +73,6 @@ * The Comtrol Hostess SV11 driver by Alan Cox * The Sync PPP/Cisco HDLC layer (syncppp.c) ported to Linux by Alan Cox */ -/* - * 5/25/1999 : Marcelo Tosatti <marcelo@conectiva.com.br> - * fixed a deadlock in cosa_sppp_open - */ - -/* ---------- Headers, macros, data structures ---------- */ #include <linux/module.h> #include <linux/kernel.h> @@ -86,6 +81,7 @@ #include <linux/fs.h> #include <linux/interrupt.h> #include <linux/delay.h> +#include <linux/hdlc.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/netdevice.h> @@ -93,14 +89,12 @@ #include <linux/mutex.h> #include <linux/device.h> #include <linux/smp_lock.h> - -#undef COSA_SLOW_IO /* for testing purposes only */ - #include <asm/io.h> #include <asm/dma.h> #include <asm/byteorder.h> -#include <net/syncppp.h> +#undef COSA_SLOW_IO /* for testing purposes only */ + #include "cosa.h" /* Maximum length of the identification string. */ @@ -112,7 +106,6 @@ /* Per-channel data structure */ struct channel_data { - void *if_ptr; /* General purpose pointer (used by SPPP) */ int usage; /* Usage count; >0 for chrdev, -1 for netdev */ int num; /* Number of the channel */ struct cosa_data *cosa; /* Pointer to the per-card structure */ @@ -136,10 +129,9 @@ struct channel_data { wait_queue_head_t txwaitq, rxwaitq; int tx_status, rx_status; - /* SPPP/HDLC device parts */ - struct ppp_device pppdev; + /* generic HDLC device parts */ + struct net_device *netdev; struct sk_buff *rx_skb, *tx_skb; - struct net_device_stats stats; }; /* cosa->firmware_status bits */ @@ -281,21 +273,19 @@ static int cosa_start_tx(struct channel_data *channel, char *buf, int size); static void cosa_kick(struct cosa_data *cosa); static int cosa_dma_able(struct channel_data *chan, char *buf, int data); -/* SPPP/HDLC stuff */ -static void sppp_channel_init(struct channel_data *chan); -static void sppp_channel_delete(struct channel_data *chan); -static int cosa_sppp_open(struct net_device *d); -static int cosa_sppp_close(struct net_device *d); -static void cosa_sppp_timeout(struct net_device *d); -static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *d); -static char *sppp_setup_rx(struct channel_data *channel, int size); -static int sppp_rx_done(struct channel_data *channel); -static int sppp_tx_done(struct channel_data *channel, int size); -static int cosa_sppp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); -static struct net_device_stats *cosa_net_stats(struct net_device *dev); +/* Network device stuff */ +static int cosa_net_attach(struct net_device *dev, unsigned short encoding, + unsigned short parity); +static int cosa_net_open(struct net_device *d); +static int cosa_net_close(struct net_device *d); +static void cosa_net_timeout(struct net_device *d); +static int cosa_net_tx(struct sk_buff *skb, struct net_device *d); +static char *cosa_net_setup_rx(struct channel_data *channel, int size); +static int cosa_net_rx_done(struct channel_data *channel); +static int cosa_net_tx_done(struct channel_data *channel, int size); +static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); /* Character device */ -static void chardev_channel_init(struct channel_data *chan); static char *chrdev_setup_rx(struct channel_data *channel, int size); static int chrdev_rx_done(struct channel_data *channel); static int chrdev_tx_done(struct channel_data *channel, int size); @@ -357,17 +347,17 @@ static void debug_status_in(struct cosa_data *cosa, int status); static void debug_status_out(struct cosa_data *cosa, int status); #endif - +static inline struct channel_data* dev_to_chan(struct net_device *dev) +{ + return (struct channel_data *)dev_to_hdlc(dev)->priv; +} + /* ---------- Initialization stuff ---------- */ static int __init cosa_init(void) { int i, err = 0; - printk(KERN_INFO "cosa v1.08 (c) 1997-2000 Jan Kasprzak <kas@fi.muni.cz>\n"); -#ifdef CONFIG_SMP - printk(KERN_INFO "cosa: SMP found. Please mail any success/failure reports to the author.\n"); -#endif if (cosa_major > 0) { if (register_chrdev(cosa_major, "cosa", &cosa_fops)) { printk(KERN_WARNING "cosa: unable to get major %d\n", @@ -402,7 +392,7 @@ static int __init cosa_init(void) NULL, "cosa%d", i); err = 0; goto out; - + out_chrdev: unregister_chrdev(cosa_major, "cosa"); out: @@ -414,43 +404,29 @@ static void __exit cosa_exit(void) { struct cosa_data *cosa; int i; - printk(KERN_INFO "Unloading the cosa module\n"); - for (i=0; i<nr_cards; i++) + for (i = 0; i < nr_cards; i++) device_destroy(cosa_class, MKDEV(cosa_major, i)); class_destroy(cosa_class); - for (cosa=cosa_cards; nr_cards--; cosa++) { + + for (cosa = cosa_cards; nr_cards--; cosa++) { /* Clean up the per-channel data */ - for (i=0; i<cosa->nchannels; i++) { + for (i = 0; i < cosa->nchannels; i++) { /* Chardev driver has no alloc'd per-channel data */ - sppp_channel_delete(cosa->chan+i); + unregister_hdlc_device(cosa->chan[i].netdev); + free_netdev(cosa->chan[i].netdev); } /* Clean up the per-card data */ kfree(cosa->chan); kfree(cosa->bouncebuf); free_irq(cosa->irq, cosa); free_dma(cosa->dma); - release_region(cosa->datareg,is_8bit(cosa)?2:4); + release_region(cosa->datareg, is_8bit(cosa) ? 2 : 4); } unregister_chrdev(cosa_major, "cosa"); } module_exit(cosa_exit); -/* - * This function should register all the net devices needed for the - * single channel. - */ -static __inline__ void channel_init(struct channel_data *chan) -{ - sprintf(chan->name, "cosa%dc%d", chan->cosa->num, chan->num); - - /* Initialize the chardev data structures */ - chardev_channel_init(chan); - - /* Register the sppp interface */ - sppp_channel_init(chan); -} - static int cosa_probe(int base, int irq, int dma) { struct cosa_data *cosa = cosa_cards+nr_cards; @@ -576,13 +552,43 @@ static int cosa_probe(int base, int irq, int dma) /* Initialize the per-channel data */ cosa->chan = kcalloc(cosa->nchannels, sizeof(struct channel_data), GFP_KERNEL); if (!cosa->chan) { - err = -ENOMEM; + err = -ENOMEM; goto err_out3; } - for (i=0; i<cosa->nchannels; i++) { - cosa->chan[i].cosa = cosa; - cosa->chan[i].num = i; - channel_init(cosa->chan+i); + + for (i = 0; i < cosa->nchannels; i++) { + struct channel_data *chan = &cosa->chan[i]; + + chan->cosa = cosa; + chan->num = i; + sprintf(chan->name, "cosa%dc%d", chan->cosa->num, i); + + /* Initialize the chardev data structures */ + mutex_init(&chan->rlock); + init_MUTEX(&chan->wsem); + + /* Register the network interface */ + if (!(chan->netdev = alloc_hdlcdev(chan))) { + printk(KERN_WARNING "%s: alloc_hdlcdev failed.\n", + chan->name); + goto err_hdlcdev; + } + dev_to_hdlc(chan->netdev)->attach = cosa_net_attach; + dev_to_hdlc(chan->netdev)->xmit = cosa_net_tx; + chan->netdev->open = cosa_net_open; + chan->netdev->stop = cosa_net_close; + chan->netdev->do_ioctl = cosa_net_ioctl; + chan->netdev->tx_timeout = cosa_net_timeout; + chan->netdev->watchdog_timeo = TX_TIMEOUT; + chan->netdev->base_addr = chan->cosa->datareg; + chan->netdev->irq = chan->cosa->irq; + chan->netdev->dma = chan->cosa->dma; + if (register_hdlc_device(chan->netdev)) { + printk(KERN_WARNING "%s: register_hdlc_device()" + " failed.\n", chan->netdev->name); + free_netdev(chan->netdev); + goto err_hdlcdev; + } } printk (KERN_INFO "cosa%d: %s (%s at 0x%x irq %d dma %d), %d channels\n", @@ -590,13 +596,20 @@ static int cosa_probe(int base, int irq, int dma) cosa->datareg, cosa->irq, cosa->dma, cosa->nchannels); return nr_cards++; + +err_hdlcdev: + while (i-- > 0) { + unregister_hdlc_device(cosa->chan[i].netdev); + free_netdev(cosa->chan[i].netdev); + } + kfree(cosa->chan); err_out3: kfree(cosa->bouncebuf); err_out2: free_dma(cosa->dma); err_out1: free_irq(cosa->irq, cosa); -err_out: +err_out: release_region(cosa->datareg,is_8bit(cosa)?2:4); printk(KERN_NOTICE "cosa%d: allocating resources failed\n", cosa->num); @@ -604,54 +617,19 @@ err_out: } -/*---------- SPPP/HDLC netdevice ---------- */ +/*---------- network device ---------- */ -static void cosa_setup(struct net_device *d) +static int cosa_net_attach(struct net_device *dev, unsigned short encoding, + unsigned short parity) { - d->open = cosa_sppp_open; - d->stop = cosa_sppp_close; - d->hard_start_xmit = cosa_sppp_tx; - d->do_ioctl = cosa_sppp_ioctl; - d->get_stats = cosa_net_stats; - d->tx_timeout = cosa_sppp_timeout; - d->watchdog_timeo = TX_TIMEOUT; -} - -static void sppp_channel_init(struct channel_data *chan) -{ - struct net_device *d; - chan->if_ptr = &chan->pppdev; - d = alloc_netdev(0, chan->name, cosa_setup); - if (!d) { - printk(KERN_WARNING "%s: alloc_netdev failed.\n", chan->name); - return; - } - chan->pppdev.dev = d; - d->base_addr = chan->cosa->datareg; - d->irq = chan->cosa->irq; - d->dma = chan->cosa->dma; - d->ml_priv = chan; - sppp_attach(&chan->pppdev); - if (register_netdev(d)) { - printk(KERN_WARNING "%s: register_netdev failed.\n", d->name); - sppp_detach(d); - free_netdev(d); - chan->pppdev.dev = NULL; - return; - } -} - -static void sppp_channel_delete(struct channel_data *chan) -{ - unregister_netdev(chan->pppdev.dev); - sppp_detach(chan->pppdev.dev); - free_netdev(chan->pppdev.dev); - chan->pppdev.dev = NULL; + if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) + return 0; + return -EINVAL; } -static int cosa_sppp_open(struct net_device *d) +static int cosa_net_open(struct net_device *dev) { - struct channel_data *chan = d->ml_priv; + struct channel_data *chan = dev_to_chan(dev); int err; unsigned long flags; @@ -662,36 +640,35 @@ static int cosa_sppp_open(struct net_device *d) } spin_lock_irqsave(&chan->cosa->lock, flags); if (chan->usage != 0) { - printk(KERN_WARNING "%s: sppp_open called with usage count %d\n", - chan->name, chan->usage); + printk(KERN_WARNING "%s: cosa_net_open called with usage count" + " %d\n", chan->name, chan->usage); spin_unlock_irqrestore(&chan->cosa->lock, flags); return -EBUSY; } - chan->setup_rx = sppp_setup_rx; - chan->tx_done = sppp_tx_done; - chan->rx_done = sppp_rx_done; - chan->usage=-1; + chan->setup_rx = cosa_net_setup_rx; + chan->tx_done = cosa_net_tx_done; + chan->rx_done = cosa_net_rx_done; + chan->usage = -1; chan->cosa->usage++; spin_unlock_irqrestore(&chan->cosa->lock, flags); - err = sppp_open(d); + err = hdlc_open(dev); if (err) { spin_lock_irqsave(&chan->cosa->lock, flags); - chan->usage=0; + chan->usage = 0; chan->cosa->usage--; - spin_unlock_irqrestore(&chan->cosa->lock, flags); return err; } - netif_start_queue(d); + netif_start_queue(dev); cosa_enable_rx(chan); return 0; } -static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev) +static int cosa_net_tx(struct sk_buff *skb, struct net_device *dev) { - struct channel_data *chan = dev->ml_priv; + struct channel_data *chan = dev_to_chan(dev); netif_stop_queue(dev); @@ -700,16 +677,16 @@ static int cosa_sppp_tx(struct sk_buff *skb, struct net_device *dev) return 0; } -static void cosa_sppp_timeout(struct net_device *dev) +static void cosa_net_timeout(struct net_device *dev) { - struct channel_data *chan = dev->ml_priv; + struct channel_data *chan = dev_to_chan(dev); if (test_bit(RXBIT, &chan->cosa->rxtx)) { - chan->stats.rx_errors++; - chan->stats.rx_missed_errors++; + chan->netdev->stats.rx_errors++; + chan->netdev->stats.rx_missed_errors++; } else { - chan->stats.tx_errors++; - chan->stats.tx_aborted_errors++; + chan->netdev->stats.tx_errors++; + chan->netdev->stats.tx_aborted_errors++; } cosa_kick(chan->cosa); if (chan->tx_skb) { @@ -719,13 +696,13 @@ static void cosa_sppp_timeout(struct net_device *dev) netif_wake_queue(dev); } -static int cosa_sppp_close(struct net_device *d) +static int cosa_net_close(struct net_device *dev) { - struct channel_data *chan = d->ml_priv; + struct channel_data *chan = dev_to_chan(dev); unsigned long flags; - netif_stop_queue(d); - sppp_close(d); + netif_stop_queue(dev); + hdlc_close(dev); cosa_disable_rx(chan); spin_lock_irqsave(&chan->cosa->lock, flags); if (chan->rx_skb) { @@ -736,13 +713,13 @@ static int cosa_sppp_close(struct net_device *d) kfree_skb(chan->tx_skb); chan->tx_skb = NULL; } - chan->usage=0; + chan->usage = 0; chan->cosa->usage--; spin_unlock_irqrestore(&chan->cosa->lock, flags); return 0; } -static char *sppp_setup_rx(struct channel_data *chan, int size) +static char *cosa_net_setup_rx(struct channel_data *chan, int size) { /* * We can safely fall back to non-dma-able memory, because we have @@ -754,66 +731,53 @@ static char *sppp_setup_rx(struct channel_data *chan, int size) if (chan->rx_skb == NULL) { printk(KERN_NOTICE "%s: Memory squeeze, dropping packet\n", chan->name); - chan->stats.rx_dropped++; + chan->netdev->stats.rx_dropped++; return NULL; } - chan->pppdev.dev->trans_start = jiffies; + chan->netdev->trans_start = jiffies; return skb_put(chan->rx_skb, size); } -static int sppp_rx_done(struct channel_data *chan) +static int cosa_net_rx_done(struct channel_data *chan) { if (!chan->rx_skb) { printk(KERN_WARNING "%s: rx_done with empty skb!\n", chan->name); - chan->stats.rx_errors++; - chan->stats.rx_frame_errors++; + chan->netdev->stats.rx_errors++; + chan->netdev->stats.rx_frame_errors++; return 0; } - chan->rx_skb->protocol = htons(ETH_P_WAN_PPP); - chan->rx_skb->dev = chan->pppdev.dev; + chan->rx_skb->protocol = hdlc_type_trans(chan->rx_skb, chan->netdev); + chan->rx_skb->dev = chan->netdev; skb_reset_mac_header(chan->rx_skb); - chan->stats.rx_packets++; - chan->stats.rx_bytes += chan->cosa->rxsize; + chan->netdev->stats.rx_packets++; + chan->netdev->stats.rx_bytes += chan->cosa->rxsize; netif_rx(chan->rx_skb); chan->rx_skb = NULL; - chan->pppdev.dev->last_rx = jiffies; + chan->netdev->last_rx = jiffies; return 0; } /* ARGSUSED */ -static int sppp_tx_done(struct channel_data *chan, int size) +static int cosa_net_tx_done(struct channel_data *chan, int size) { if (!chan->tx_skb) { printk(KERN_WARNING "%s: tx_done with empty skb!\n", chan->name); - chan->stats.tx_errors++; - chan->stats.tx_aborted_errors++; + chan->netdev->stats.tx_errors++; + chan->netdev->stats.tx_aborted_errors++; return 1; } dev_kfree_skb_irq(chan->tx_skb); chan->tx_skb = NULL; - chan->stats.tx_packets++; - chan->stats.tx_bytes += size; - netif_wake_queue(chan->pppdev.dev); + chan->netdev->stats.tx_packets++; + chan->netdev->stats.tx_bytes += size; + netif_wake_queue(chan->netdev); return 1; } -static struct net_device_stats *cosa_net_stats(struct net_device *dev) -{ - struct channel_data *chan = dev->ml_priv; - return &chan->stats; -} - - /*---------- Character device ---------- */ -static void chardev_channel_init(struct channel_data *chan) -{ - mutex_init(&chan->rlock); - init_MUTEX(&chan->wsem); -} - static ssize_t cosa_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { @@ -1223,16 +1187,15 @@ static int cosa_ioctl_common(struct cosa_data *cosa, return -ENOIOCTLCMD; } -static int cosa_sppp_ioctl(struct net_device *dev, struct ifreq *ifr, - int cmd) +static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { int rv; - struct channel_data *chan = dev->ml_priv; - rv = cosa_ioctl_common(chan->cosa, chan, cmd, (unsigned long)ifr->ifr_data); - if (rv == -ENOIOCTLCMD) { - return sppp_do_ioctl(dev, ifr, cmd); - } - return rv; + struct channel_data *chan = dev_to_chan(dev); + rv = cosa_ioctl_common(chan->cosa, chan, cmd, + (unsigned long)ifr->ifr_data); + if (rv != -ENOIOCTLCMD) + return rv; + return hdlc_ioctl(dev, ifr, cmd); } static int cosa_chardev_ioctl(struct inode *inode, struct file *file, diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index 50ef5b4..f5d55ad 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -103,7 +103,6 @@ #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/delay.h> -#include <net/syncppp.h> #include <linux/hdlc.h> #include <linux/mutex.h> diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index 754f008..9557ad0 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -47,10 +47,7 @@ MODULE_LICENSE("GPL"); /* Default parameters for the link */ #define FST_TX_QUEUE_LEN 100 /* At 8Mbps a longer queue length is - * useful, the syncppp module forces - * this down assuming a slower line I - * guess. - */ + * useful */ #define FST_TXQ_DEPTH 16 /* This one is for the buffering * of frames on the way down to the card * so that we can keep the card busy diff --git a/drivers/net/wan/farsync.h b/drivers/net/wan/farsync.h index d871daf..6b27e7c 100644 --- a/drivers/net/wan/farsync.h +++ b/drivers/net/wan/farsync.h @@ -54,9 +54,6 @@ /* Ioctl call command values - * - * The first three private ioctls are used by the sync-PPP module, - * allowing a little room for expansion we start our numbering at 10. */ #define FSTWRITE (SIOCDEVPRIVATE+10) #define FSTCPURESET (SIOCDEVPRIVATE+11) @@ -202,9 +199,6 @@ struct fstioc_info { #define J1 7 /* "proto" */ -#define FST_HDLC 1 /* Cisco compatible HDLC */ -#define FST_PPP 2 /* Sync PPP */ -#define FST_MONITOR 3 /* Monitor only (raw packet reception) */ #define FST_RAW 4 /* Two way raw packets */ #define FST_GEN_HDLC 5 /* Using "Generic HDLC" module */ diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c index e3a5364..1f2a140 100644 --- a/drivers/net/wan/hdlc.c +++ b/drivers/net/wan/hdlc.c @@ -22,20 +22,19 @@ * - proto->start() and stop() are called with spin_lock_irq held. */ -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/poll.h> #include <linux/errno.h> +#include <linux/hdlc.h> #include <linux/if_arp.h> +#include <linux/inetdevice.h> #include <linux/init.h> -#include <linux/skbuff.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/notifier.h> #include <linux/pkt_sched.h> -#include <linux/inetdevice.h> -#include <linux/lapb.h> +#include <linux/poll.h> #include <linux/rtnetlink.h> -#include <linux/notifier.h> -#include <linux/hdlc.h> +#include <linux/skbuff.h> +#include <linux/slab.h> #include <net/net_namespace.h> @@ -109,7 +108,7 @@ static int hdlc_device_event(struct notifier_block *this, unsigned long event, if (dev->get_stats != hdlc_get_stats) return NOTIFY_DONE; /* not an HDLC device */ - + if (event != NETDEV_CHANGE) return NOTIFY_DONE; /* Only interrested in carrier changes */ @@ -357,7 +356,7 @@ static struct packet_type hdlc_packet_type = { static struct notifier_block hdlc_notifier = { - .notifier_call = hdlc_device_event, + .notifier_call = hdlc_device_event, }; @@ -367,8 +366,8 @@ static int __init hdlc_module_init(void) printk(KERN_INFO "%s\n", version); if ((result = register_netdevice_notifier(&hdlc_notifier)) != 0) - return result; - dev_add_pack(&hdlc_packet_type); + return result; + dev_add_pack(&hdlc_packet_type); return 0; } diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index 849819c..44e64b15 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c @@ -9,19 +9,18 @@ * as published by the Free Software Foundation. */ -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/poll.h> #include <linux/errno.h> +#include <linux/hdlc.h> #include <linux/if_arp.h> +#include <linux/inetdevice.h> #include <linux/init.h> -#include <linux/skbuff.h> +#include <linux/kernel.h> +#include <linux/module.h> #include <linux/pkt_sched.h> -#include <linux/inetdevice.h> -#include <linux/lapb.h> +#include <linux/poll.h> #include <linux/rtnetlink.h> -#include <linux/hdlc.h> +#include <linux/skbuff.h> +#include <linux/slab.h> #undef DEBUG_HARD_HEADER @@ -68,9 +67,9 @@ struct cisco_state { static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr); -static inline struct cisco_state * state(hdlc_device *hdlc) +static inline struct cisco_state* state(hdlc_device *hdlc) { - return(struct cisco_state *)(hdlc->state); + return (struct cisco_state *)hdlc->state; } @@ -172,7 +171,7 @@ static int cisco_rx(struct sk_buff *skb) data->address != CISCO_UNICAST) goto rx_error; - switch(ntohs(data->protocol)) { + switch (ntohs(data->protocol)) { case CISCO_SYS_INFO: /* Packet is not needed, drop it. */ dev_kfree_skb_any(skb); @@ -336,7 +335,7 @@ static struct hdlc_proto proto = { static const struct header_ops cisco_header_ops = { .create = cisco_hard_header, }; - + static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) { cisco_proto __user *cisco_s = ifr->ifr_settings.ifs_ifsu.cisco; @@ -359,10 +358,10 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) return 0; case IF_PROTO_CISCO: - if(!capable(CAP_NET_ADMIN)) + if (!capable(CAP_NET_ADMIN)) return -EPERM; - if(dev->flags & IFF_UP) + if (dev->flags & IFF_UP) return -EBUSY; if (copy_from_user(&new_settings, cisco_s, size)) @@ -372,7 +371,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) new_settings.timeout < 2) return -EINVAL; - result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT); + result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT); if (result) return result; diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 62e93da..d3d5055 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -33,20 +33,19 @@ */ -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/poll.h> #include <linux/errno.h> +#include <linux/etherdevice.h> +#include <linux/hdlc.h> #include <linux/if_arp.h> +#include <linux/inetdevice.h> #include <linux/init.h> -#include <linux/skbuff.h> +#include <linux/kernel.h> +#include <linux/module.h> #include <linux/pkt_sched.h> -#include <linux/inetdevice.h> -#include <linux/lapb.h> +#include <linux/poll.h> #include <linux/rtnetlink.h> -#include <linux/etherdevice.h> -#include <linux/hdlc.h> +#include <linux/skbuff.h> +#include <linux/slab.h> #undef DEBUG_PKT #undef DEBUG_ECN @@ -96,7 +95,7 @@ typedef struct { unsigned ea1: 1; unsigned cr: 1; unsigned dlcih: 6; - + unsigned ea2: 1; unsigned de: 1; unsigned becn: 1; diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index 0030833..4efe9e6 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -9,19 +9,18 @@ * as published by the Free Software Foundation. */ -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/poll.h> #include <linux/errno.h> +#include <linux/hdlc.h> #include <linux/if_arp.h> +#include <linux/inetdevice.h> #include <linux/init.h> -#include <linux/skbuff.h> +#include <linux/kernel.h> +#include <linux/module.h> #include <linux/pkt_sched.h> -#include <linux/inetdevice.h> -#include <linux/lapb.h> +#include <linux/poll.h> #include <linux/rtnetlink.h> -#include <linux/hdlc.h> +#include <linux/skbuff.h> +#include <linux/slab.h> #include <net/syncppp.h> struct ppp_state { diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c index bbbb819..8612311 100644 --- a/drivers/net/wan/hdlc_raw.c +++ b/drivers/net/wan/hdlc_raw.c @@ -9,19 +9,18 @@ * as published by the Free Software Foundation. */ -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/poll.h> #include <linux/errno.h> +#include <linux/hdlc.h> #include <linux/if_arp.h> +#include <linux/inetdevice.h> #include <linux/init.h> -#include <linux/skbuff.h> +#include <linux/kernel.h> +#include <linux/module.h> #include <linux/pkt_sched.h> -#include <linux/inetdevice.h> -#include <linux/lapb.h> +#include <linux/poll.h> #include <linux/rtnetlink.h> -#include <linux/hdlc.h> +#include <linux/skbuff.h> +#include <linux/slab.h> static int raw_ioctl(struct net_device *dev, struct ifreq *ifr); diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c index 26dee60..a13fc32 100644 --- a/drivers/net/wan/hdlc_raw_eth.c +++ b/drivers/net/wan/hdlc_raw_eth.c @@ -9,20 +9,19 @@ * as published by the Free Software Foundation. */ -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/poll.h> #include <linux/errno.h> +#include <linux/etherdevice.h> +#include <linux/hdlc.h> #include <linux/if_arp.h> +#include <linux/inetdevice.h> #include <linux/init.h> -#include <linux/skbuff.h> +#include <linux/kernel.h> +#include <linux/module.h> #include <linux/pkt_sched.h> -#include <linux/inetdevice.h> -#include <linux/lapb.h> +#include <linux/poll.h> #include <linux/rtnetlink.h> -#include <linux/etherdevice.h> -#include <linux/hdlc.h> +#include <linux/skbuff.h> +#include <linux/slab.h> static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr); diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index e808720..8b7e5d2 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -9,20 +9,19 @@ * as published by the Free Software Foundation. */ -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/poll.h> #include <linux/errno.h> +#include <linux/hdlc.h> #include <linux/if_arp.h> -#include <linux/init.h> -#include <linux/skbuff.h> -#include <linux/pkt_sched.h> #include <linux/inetdevice.h> +#include <linux/init.h> +#include <linux/kernel.h> #include <linux/lapb.h> +#include <linux/module.h> +#include <linux/pkt_sched.h> +#include <linux/poll.h> #include <linux/rtnetlink.h> -#include <linux/hdlc.h> - +#include <linux/skbuff.h> +#include <linux/slab.h> #include <net/x25device.h> static int x25_ioctl(struct net_device *dev, struct ifreq *ifr); diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c index f3065d3..e299313 100644 --- a/drivers/net/wan/hostess_sv11.c +++ b/drivers/net/wan/hostess_sv11.c @@ -16,6 +16,8 @@ * touching control registers. * * Port B isnt wired (why - beats me) + * + * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl> */ #include <linux/module.h> @@ -26,6 +28,7 @@ #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/delay.h> +#include <linux/hdlc.h> #include <linux/ioport.h> #include <net/arp.h> @@ -33,34 +36,31 @@ #include <asm/io.h> #include <asm/dma.h> #include <asm/byteorder.h> -#include <net/syncppp.h> #include "z85230.h" static int dma; -struct sv11_device -{ - void *if_ptr; /* General purpose pointer (used by SPPP) */ - struct z8530_dev sync; - struct ppp_device netdev; -}; - /* * Network driver support routines */ +static inline struct z8530_dev* dev_to_sv(struct net_device *dev) +{ + return (struct z8530_dev *)dev_to_hdlc(dev)->priv; +} + /* - * Frame receive. Simple for our card as we do sync ppp and there + * Frame receive. Simple for our card as we do HDLC and there * is no funny garbage involved */ - + static void hostess_input(struct z8530_channel *c, struct sk_buff *skb) { /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ - skb_trim(skb, skb->len-2); - skb->protocol=__constant_htons(ETH_P_WAN_PPP); + skb_trim(skb, skb->len - 2); + skb->protocol = hdlc_type_trans(skb, c->netdevice); skb_reset_mac_header(skb); - skb->dev=c->netdevice; + skb->dev = c->netdevice; /* * Send it to the PPP layer. We don't have time to process * it right now. @@ -68,56 +68,51 @@ static void hostess_input(struct z8530_channel *c, struct sk_buff *skb) netif_rx(skb); c->netdevice->last_rx = jiffies; } - + /* * We've been placed in the UP state - */ - + */ + static int hostess_open(struct net_device *d) { - struct sv11_device *sv11=d->ml_priv; + struct z8530_dev *sv11 = dev_to_sv(d); int err = -1; - + /* * Link layer up */ - switch(dma) - { + switch (dma) { case 0: - err=z8530_sync_open(d, &sv11->sync.chanA); + err = z8530_sync_open(d, &sv11->chanA); break; case 1: - err=z8530_sync_dma_open(d, &sv11->sync.chanA); + err = z8530_sync_dma_open(d, &sv11->chanA); break; case 2: - err=z8530_sync_txdma_open(d, &sv11->sync.chanA); + err = z8530_sync_txdma_open(d, &sv11->chanA); break; } - - if(err) + + if (err) return err; - /* - * Begin PPP - */ - err=sppp_open(d); - if(err) - { - switch(dma) - { + + err = hdlc_open(d); + if (err) { + switch (dma) { case 0: - z8530_sync_close(d, &sv11->sync.chanA); + z8530_sync_close(d, &sv11->chanA); break; case 1: - z8530_sync_dma_close(d, &sv11->sync.chanA); + z8530_sync_dma_close(d, &sv11->chanA); break; case 2: - z8530_sync_txdma_close(d, &sv11->sync.chanA); + z8530_sync_txdma_close(d, &sv11->chanA); break; - } + } return err; } - sv11->sync.chanA.rx_function=hostess_input; - + sv11->chanA.rx_function = hostess_input; + /* * Go go go */ @@ -128,30 +123,24 @@ static int hostess_open(struct net_device *d) static int hostess_close(struct net_device *d) { - struct sv11_device *sv11=d->ml_priv; + struct z8530_dev *sv11 = dev_to_sv(d); /* * Discard new frames */ - sv11->sync.chanA.rx_function=z8530_null_rx; - /* - * PPP off - */ - sppp_close(d); - /* - * Link layer down - */ + sv11->chanA.rx_function = z8530_null_rx; + + hdlc_close(d); netif_stop_queue(d); - - switch(dma) - { + + switch (dma) { case 0: - z8530_sync_close(d, &sv11->sync.chanA); + z8530_sync_close(d, &sv11->chanA); break; case 1: - z8530_sync_dma_close(d, &sv11->sync.chanA); + z8530_sync_dma_close(d, &sv11->chanA); break; case 2: - z8530_sync_txdma_close(d, &sv11->sync.chanA); + z8530_sync_txdma_close(d, &sv11->chanA); break; } return 0; @@ -159,232 +148,174 @@ static int hostess_close(struct net_device *d) static int hostess_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) { - /* struct sv11_device *sv11=d->ml_priv; - z8530_ioctl(d,&sv11->sync.chanA,ifr,cmd) */ - return sppp_do_ioctl(d, ifr,cmd); -} - -static struct net_device_stats *hostess_get_stats(struct net_device *d) -{ - struct sv11_device *sv11=d->ml_priv; - if(sv11) - return z8530_get_stats(&sv11->sync.chanA); - else - return NULL; + /* struct z8530_dev *sv11=dev_to_sv(d); + z8530_ioctl(d,&sv11->chanA,ifr,cmd) */ + return hdlc_ioctl(d, ifr, cmd); } /* - * Passed PPP frames, fire them downwind. + * Passed network frames, fire them downwind. */ - + static int hostess_queue_xmit(struct sk_buff *skb, struct net_device *d) { - struct sv11_device *sv11=d->ml_priv; - return z8530_queue_xmit(&sv11->sync.chanA, skb); + return z8530_queue_xmit(&dev_to_sv(d)->chanA, skb); } -static int hostess_neigh_setup(struct neighbour *n) +static int hostess_attach(struct net_device *dev, unsigned short encoding, + unsigned short parity) { - if (n->nud_state == NUD_NONE) { - n->ops = &arp_broken_ops; - n->output = n->ops->output; - } - return 0; -} - -static int hostess_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p) -{ - if (p->tbl->family == AF_INET) { - p->neigh_setup = hostess_neigh_setup; - p->ucast_probes = 0; - p->mcast_probes = 0; - } - return 0; -} - -static void sv11_setup(struct net_device *dev) -{ - dev->open = hostess_open; - dev->stop = hostess_close; - dev->hard_start_xmit = hostess_queue_xmit; - dev->get_stats = hostess_get_stats; - dev->do_ioctl = hostess_ioctl; - dev->neigh_setup = hostess_neigh_setup_dev; + if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) + return 0; + return -EINVAL; } /* * Description block for a Comtrol Hostess SV11 card */ - -static struct sv11_device *sv11_init(int iobase, int irq) + +static struct z8530_dev *sv11_init(int iobase, int irq) { - struct z8530_dev *dev; - struct sv11_device *sv; - + struct z8530_dev *sv; + struct net_device *netdev; /* * Get the needed I/O space */ - - if(!request_region(iobase, 8, "Comtrol SV11")) - { - printk(KERN_WARNING "hostess: I/O 0x%X already in use.\n", iobase); + + if (!request_region(iobase, 8, "Comtrol SV11")) { + printk(KERN_WARNING "hostess: I/O 0x%X already in use.\n", + iobase); return NULL; } - - sv = kzalloc(sizeof(struct sv11_device), GFP_KERNEL); - if(!sv) - goto fail3; - - sv->if_ptr=&sv->netdev; - - sv->netdev.dev = alloc_netdev(0, "hdlc%d", sv11_setup); - if(!sv->netdev.dev) - goto fail2; - - dev=&sv->sync; - + + sv = kzalloc(sizeof(struct z8530_dev), GFP_KERNEL); + if (!sv) + goto err_kzalloc; + /* * Stuff in the I/O addressing */ - - dev->active = 0; - - dev->chanA.ctrlio=iobase+1; - dev->chanA.dataio=iobase+3; - dev->chanB.ctrlio=-1; - dev->chanB.dataio=-1; - dev->chanA.irqs=&z8530_nop; - dev->chanB.irqs=&z8530_nop; - - outb(0, iobase+4); /* DMA off */ - + + sv->active = 0; + + sv->chanA.ctrlio = iobase + 1; + sv->chanA.dataio = iobase + 3; + sv->chanB.ctrlio = -1; + sv->chanB.dataio = -1; + sv->chanA.irqs = &z8530_nop; + sv->chanB.irqs = &z8530_nop; + + outb(0, iobase + 4); /* DMA off */ + /* We want a fast IRQ for this device. Actually we'd like an even faster IRQ ;) - This is one driver RtLinux is made for */ - - if(request_irq(irq, &z8530_interrupt, IRQF_DISABLED, "Hostess SV11", dev)<0) - { + + if (request_irq(irq, &z8530_interrupt, IRQF_DISABLED, + "Hostess SV11", sv) < 0) { printk(KERN_WARNING "hostess: IRQ %d already in use.\n", irq); - goto fail1; + goto err_irq; } - - dev->irq=irq; - dev->chanA.private=sv; - dev->chanA.netdevice=sv->netdev.dev; - dev->chanA.dev=dev; - dev->chanB.dev=dev; - - if(dma) - { + + sv->irq = irq; + sv->chanA.private = sv; + sv->chanA.dev = sv; + sv->chanB.dev = sv; + + if (dma) { /* * You can have DMA off or 1 and 3 thats the lot * on the Comtrol. */ - dev->chanA.txdma=3; - dev->chanA.rxdma=1; - outb(0x03|0x08, iobase+4); /* DMA on */ - if(request_dma(dev->chanA.txdma, "Hostess SV/11 (TX)")!=0) - goto fail; - - if(dma==1) - { - if(request_dma(dev->chanA.rxdma, "Hostess SV/11 (RX)")!=0) - goto dmafail; - } + sv->chanA.txdma = 3; + sv->chanA.rxdma = 1; + outb(0x03 | 0x08, iobase + 4); /* DMA on */ + if (request_dma(sv->chanA.txdma, "Hostess SV/11 (TX)")) + goto err_txdma; + + if (dma == 1) + if (request_dma(sv->chanA.rxdma, "Hostess SV/11 (RX)")) + goto err_rxdma; } /* Kill our private IRQ line the hostess can end up chattering until the configuration is set */ disable_irq(irq); - + /* * Begin normal initialise */ - - if(z8530_init(dev)!=0) - { + + if (z8530_init(sv)) { printk(KERN_ERR "Z8530 series device not found.\n"); enable_irq(irq); - goto dmafail2; + goto free_dma; } - z8530_channel_load(&dev->chanB, z8530_dead_port); - if(dev->type==Z85C30) - z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream); + z8530_channel_load(&sv->chanB, z8530_dead_port); + if (sv->type == Z85C30) + z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream); else - z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230); - + z8530_channel_load(&sv->chanA, z8530_hdlc_kilostream_85230); + enable_irq(irq); - /* * Now we can take the IRQ */ - if(dev_alloc_name(dev->chanA.netdevice,"hdlc%d")>=0) - { - struct net_device *d=dev->chanA.netdevice; - /* - * Initialise the PPP components - */ - d->ml_priv = sv; - sppp_attach(&sv->netdev); - - /* - * Local fields - */ - - d->base_addr = iobase; - d->irq = irq; - - if(register_netdev(d)) - { - printk(KERN_ERR "%s: unable to register device.\n", - d->name); - sppp_detach(d); - goto dmafail2; - } + sv->chanA.netdevice = netdev = alloc_hdlcdev(sv); + if (!netdev) + goto free_dma; - z8530_describe(dev, "I/O", iobase); - dev->active=1; - return sv; + dev_to_hdlc(netdev)->attach = hostess_attach; + dev_to_hdlc(netdev)->xmit = hostess_queue_xmit; + netdev->open = hostess_open; + netdev->stop = hostess_close; + netdev->do_ioctl = hostess_ioctl; + netdev->base_addr = iobase; + netdev->irq = irq; + + if (register_hdlc_device(netdev)) { + printk(KERN_ERR "hostess: unable to register HDLC device.\n"); + free_netdev(netdev); + goto free_dma; } -dmafail2: - if(dma==1) - free_dma(dev->chanA.rxdma); -dmafail: - if(dma) - free_dma(dev->chanA.txdma); -fail: - free_irq(irq, dev); -fail1: - free_netdev(sv->netdev.dev); -fail2: + + z8530_describe(sv, "I/O", iobase); + sv->active = 1; + return sv; + +free_dma: + if (dma == 1) + free_dma(sv->chanA.rxdma); +err_rxdma: + if (dma) + free_dma(sv->chanA.txdma); +err_txdma: + free_irq(irq, sv); +err_irq: kfree(sv); -fail3: - release_region(iobase,8); +err_kzalloc: + release_region(iobase, 8); return NULL; } -static void sv11_shutdown(struct sv11_device *dev) +static void sv11_shutdown(struct z8530_dev *dev) { - sppp_detach(dev->netdev.dev); - unregister_netdev(dev->netdev.dev); - z8530_shutdown(&dev->sync); - free_irq(dev->sync.irq, dev); - if(dma) - { - if(dma==1) - free_dma(dev->sync.chanA.rxdma); - free_dma(dev->sync.chanA.txdma); + unregister_hdlc_device(dev->chanA.netdevice); + z8530_shutdown(dev); + free_irq(dev->irq, dev); + if (dma) { + if (dma == 1) + free_dma(dev->chanA.rxdma); + free_dma(dev->chanA.txdma); } - release_region(dev->sync.chanA.ctrlio-1, 8); - free_netdev(dev->netdev.dev); + release_region(dev->chanA.ctrlio - 1, 8); + free_netdev(dev->chanA.netdevice); kfree(dev); } -#ifdef MODULE - -static int io=0x200; -static int irq=9; +static int io = 0x200; +static int irq = 9; module_param(io, int, 0); MODULE_PARM_DESC(io, "The I/O base of the Comtrol Hostess SV11 card"); @@ -397,22 +328,17 @@ MODULE_AUTHOR("Alan Cox"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Modular driver for the Comtrol Hostess SV11"); -static struct sv11_device *sv11_unit; +static struct z8530_dev *sv11_unit; int init_module(void) { - printk(KERN_INFO "SV-11 Z85230 Synchronous Driver v 0.03.\n"); - printk(KERN_INFO "(c) Copyright 2001, Red Hat Inc.\n"); - if((sv11_unit=sv11_init(io,irq))==NULL) + if ((sv11_unit = sv11_init(io, irq)) == NULL) return -ENODEV; return 0; } void cleanup_module(void) { - if(sv11_unit) + if (sv11_unit) sv11_shutdown(sv11_unit); } - -#endif - diff --git a/drivers/net/wan/lmc/lmc.h b/drivers/net/wan/lmc/lmc.h index 882e58c..4ced7ac 100644 --- a/drivers/net/wan/lmc/lmc.h +++ b/drivers/net/wan/lmc/lmc.h @@ -11,12 +11,12 @@ unsigned lmc_mii_readreg(lmc_softc_t * const sc, unsigned devaddr, unsigned regno); void lmc_mii_writereg(lmc_softc_t * const sc, unsigned devaddr, unsigned regno, unsigned data); -void lmc_led_on(lmc_softc_t * const, u_int32_t); -void lmc_led_off(lmc_softc_t * const, u_int32_t); +void lmc_led_on(lmc_softc_t * const, u32); +void lmc_led_off(lmc_softc_t * const, u32); unsigned lmc_mii_readreg(lmc_softc_t * const, unsigned, unsigned); void lmc_mii_writereg(lmc_softc_t * const, unsigned, unsigned, unsigned); -void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits); -void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits); +void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits); +void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits); int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); @@ -26,8 +26,7 @@ extern lmc_media_t lmc_t1_media; extern lmc_media_t lmc_hssi_media; #ifdef _DBG_EVENTLOG -static void lmcEventLog( u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3 ); +static void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3); #endif #endif - diff --git a/drivers/net/wan/lmc/lmc_debug.c b/drivers/net/wan/lmc/lmc_debug.c index 3b94352..15049d7 100644 --- a/drivers/net/wan/lmc/lmc_debug.c +++ b/drivers/net/wan/lmc/lmc_debug.c @@ -1,4 +1,3 @@ - #include <linux/types.h> #include <linux/netdevice.h> #include <linux/interrupt.h> @@ -48,10 +47,10 @@ void lmcConsoleLog(char *type, unsigned char *ucData, int iLen) #endif #ifdef DEBUG -u_int32_t lmcEventLogIndex = 0; -u_int32_t lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; +u32 lmcEventLogIndex; +u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; -void lmcEventLog (u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3) +void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3) { lmcEventLogBuf[lmcEventLogIndex++] = EventNum; lmcEventLogBuf[lmcEventLogIndex++] = arg2; diff --git a/drivers/net/wan/lmc/lmc_debug.h b/drivers/net/wan/lmc/lmc_debug.h index cf35638..2d46f12 100644 --- a/drivers/net/wan/lmc/lmc_debug.h +++ b/drivers/net/wan/lmc/lmc_debug.h @@ -38,15 +38,15 @@ #ifdef DEBUG -extern u_int32_t lmcEventLogIndex; -extern u_int32_t lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; +extern u32 lmcEventLogIndex; +extern u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS]; #define LMC_EVENT_LOG(x, y, z) lmcEventLog((x), (y), (z)) #else #define LMC_EVENT_LOG(x,y,z) #endif /* end ifdef _DBG_EVENTLOG */ void lmcConsoleLog(char *type, unsigned char *ucData, int iLen); -void lmcEventLog (u_int32_t EventNum, u_int32_t arg2, u_int32_t arg3); +void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3); void lmc_trace(struct net_device *dev, char *msg); #endif diff --git a/drivers/net/wan/lmc/lmc_ioctl.h b/drivers/net/wan/lmc/lmc_ioctl.h index 57dd861..72fb113 100644 --- a/drivers/net/wan/lmc/lmc_ioctl.h +++ b/drivers/net/wan/lmc/lmc_ioctl.h @@ -61,7 +61,7 @@ /* * IFTYPE defines */ -#define LMC_PPP 1 /* use sppp interface */ +#define LMC_PPP 1 /* use generic HDLC interface */ #define LMC_NET 2 /* use direct net interface */ #define LMC_RAW 3 /* use direct net interface */ diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 62133ce..f80640f 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -1,6 +1,7 @@ /* * Copyright (c) 1997-2000 LAN Media Corporation (LMC) * All rights reserved. www.lanmedia.com + * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl> * * This code is written by: * Andrew Stanley-Jones (asj@cban.com) @@ -36,8 +37,6 @@ * */ -/* $Id: lmc_main.c,v 1.36 2000/04/11 05:25:25 asj Exp $ */ - #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> @@ -49,6 +48,7 @@ #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/delay.h> +#include <linux/hdlc.h> #include <linux/init.h> #include <linux/in.h> #include <linux/if_arp.h> @@ -57,9 +57,6 @@ #include <linux/skbuff.h> #include <linux/inet.h> #include <linux/bitops.h> - -#include <net/syncppp.h> - #include <asm/processor.h> /* Processor type for cache alignment. */ #include <asm/io.h> #include <asm/dma.h> @@ -78,8 +75,6 @@ #include "lmc_debug.h" #include "lmc_proto.h" -static int lmc_first_load = 0; - static int LMC_PKT_BUF_SZ = 1542; static struct pci_device_id lmc_pci_tbl[] = { @@ -91,11 +86,10 @@ static struct pci_device_id lmc_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, lmc_pci_tbl); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev); -static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev); static int lmc_rx (struct net_device *dev); static int lmc_open(struct net_device *dev); static int lmc_close(struct net_device *dev); @@ -114,20 +108,14 @@ static void lmc_driver_timeout(struct net_device *dev); * linux reserves 16 device specific IOCTLs. We call them * LMCIOC* to control various bits of our world. */ -int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ +int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ { - lmc_softc_t *sc; + lmc_softc_t *sc = dev_to_sc(dev); lmc_ctl_t ctl; - int ret; - u_int16_t regVal; + int ret = -EOPNOTSUPP; + u16 regVal; unsigned long flags; - struct sppp *sp; - - ret = -EOPNOTSUPP; - - sc = dev->priv; - lmc_trace(dev, "lmc_ioctl in"); /* @@ -149,7 +137,6 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ break; case LMCIOCSINFO: /*fold01*/ - sp = &((struct ppp_device *) dev)->sppp; if (!capable(CAP_NET_ADMIN)) { ret = -EPERM; break; @@ -175,25 +162,20 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ sc->TxDescriptControlInit &= ~LMC_TDES_ADD_CRC_DISABLE; } - if (ctl.keepalive_onoff == LMC_CTL_OFF) - sp->pp_flags &= ~PP_KEEPALIVE; /* Turn off */ - else - sp->pp_flags |= PP_KEEPALIVE; /* Turn on */ - ret = 0; break; case LMCIOCIFTYPE: /*fold01*/ { - u_int16_t old_type = sc->if_type; - u_int16_t new_type; + u16 old_type = sc->if_type; + u16 new_type; if (!capable(CAP_NET_ADMIN)) { ret = -EPERM; break; } - if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u_int16_t))) { + if (copy_from_user(&new_type, ifr->ifr_data, sizeof(u16))) { ret = -EFAULT; break; } @@ -206,15 +188,11 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ } lmc_proto_close(sc); - lmc_proto_detach(sc); sc->if_type = new_type; -// lmc_proto_init(sc); lmc_proto_attach(sc); - lmc_proto_open(sc); - - ret = 0 ; - break ; + ret = lmc_proto_open(sc); + break; } case LMCIOCGETXINFO: /*fold01*/ @@ -241,51 +219,53 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ break; - case LMCIOCGETLMCSTATS: /*fold01*/ - if (sc->lmc_cardtype == LMC_CARDTYPE_T1){ - lmc_mii_writereg (sc, 0, 17, T1FRAMER_FERR_LSB); - sc->stats.framingBitErrorCount += - lmc_mii_readreg (sc, 0, 18) & 0xff; - lmc_mii_writereg (sc, 0, 17, T1FRAMER_FERR_MSB); - sc->stats.framingBitErrorCount += - (lmc_mii_readreg (sc, 0, 18) & 0xff) << 8; - lmc_mii_writereg (sc, 0, 17, T1FRAMER_LCV_LSB); - sc->stats.lineCodeViolationCount += - lmc_mii_readreg (sc, 0, 18) & 0xff; - lmc_mii_writereg (sc, 0, 17, T1FRAMER_LCV_MSB); - sc->stats.lineCodeViolationCount += - (lmc_mii_readreg (sc, 0, 18) & 0xff) << 8; - lmc_mii_writereg (sc, 0, 17, T1FRAMER_AERR); - regVal = lmc_mii_readreg (sc, 0, 18) & 0xff; - - sc->stats.lossOfFrameCount += - (regVal & T1FRAMER_LOF_MASK) >> 4; - sc->stats.changeOfFrameAlignmentCount += - (regVal & T1FRAMER_COFA_MASK) >> 2; - sc->stats.severelyErroredFrameCount += - regVal & T1FRAMER_SEF_MASK; - } - - if (copy_to_user(ifr->ifr_data, &sc->stats, - sizeof (struct lmc_statistics))) - ret = -EFAULT; - else - ret = 0; - break; + case LMCIOCGETLMCSTATS: + if (sc->lmc_cardtype == LMC_CARDTYPE_T1) { + lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_LSB); + sc->extra_stats.framingBitErrorCount += + lmc_mii_readreg(sc, 0, 18) & 0xff; + lmc_mii_writereg(sc, 0, 17, T1FRAMER_FERR_MSB); + sc->extra_stats.framingBitErrorCount += + (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8; + lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_LSB); + sc->extra_stats.lineCodeViolationCount += + lmc_mii_readreg(sc, 0, 18) & 0xff; + lmc_mii_writereg(sc, 0, 17, T1FRAMER_LCV_MSB); + sc->extra_stats.lineCodeViolationCount += + (lmc_mii_readreg(sc, 0, 18) & 0xff) << 8; + lmc_mii_writereg(sc, 0, 17, T1FRAMER_AERR); + regVal = lmc_mii_readreg(sc, 0, 18) & 0xff; + + sc->extra_stats.lossOfFrameCount += + (regVal & T1FRAMER_LOF_MASK) >> 4; + sc->extra_stats.changeOfFrameAlignmentCount += + (regVal & T1FRAMER_COFA_MASK) >> 2; + sc->extra_stats.severelyErroredFrameCount += + regVal & T1FRAMER_SEF_MASK; + } + if (copy_to_user(ifr->ifr_data, &sc->lmc_device->stats, + sizeof(sc->lmc_device->stats)) || + copy_to_user(ifr->ifr_data + sizeof(sc->lmc_device->stats), + &sc->extra_stats, sizeof(sc->extra_stats))) + ret = -EFAULT; + else + ret = 0; + break; - case LMCIOCCLEARLMCSTATS: /*fold01*/ - if (!capable(CAP_NET_ADMIN)){ - ret = -EPERM; - break; - } + case LMCIOCCLEARLMCSTATS: + if (!capable(CAP_NET_ADMIN)) { + ret = -EPERM; + break; + } - memset (&sc->stats, 0, sizeof (struct lmc_statistics)); - sc->stats.check = STATCHECK; - sc->stats.version_size = (DRIVER_VERSION << 16) + - sizeof (struct lmc_statistics); - sc->stats.lmc_cardtype = sc->lmc_cardtype; - ret = 0; - break; + memset(&sc->lmc_device->stats, 0, sizeof(sc->lmc_device->stats)); + memset(&sc->extra_stats, 0, sizeof(sc->extra_stats)); + sc->extra_stats.check = STATCHECK; + sc->extra_stats.version_size = (DRIVER_VERSION << 16) + + sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats); + sc->extra_stats.lmc_cardtype = sc->lmc_cardtype; + ret = 0; + break; case LMCIOCSETCIRCUIT: /*fold01*/ if (!capable(CAP_NET_ADMIN)){ @@ -330,7 +310,8 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ ret = -EFAULT; break; } - if (copy_to_user(ifr->ifr_data + sizeof (u32), lmcEventLogBuf, sizeof (lmcEventLogBuf))) + if (copy_to_user(ifr->ifr_data + sizeof(u32), lmcEventLogBuf, + sizeof(lmcEventLogBuf))) ret = -EFAULT; else ret = 0; @@ -641,14 +622,12 @@ int lmc_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/ /* the watchdog process that cruises around */ static void lmc_watchdog (unsigned long data) /*fold00*/ { - struct net_device *dev = (struct net_device *) data; - lmc_softc_t *sc; + struct net_device *dev = (struct net_device *)data; + lmc_softc_t *sc = dev_to_sc(dev); int link_status; - u_int32_t ticks; + u32 ticks; unsigned long flags; - sc = dev->priv; - lmc_trace(dev, "lmc_watchdog in"); spin_lock_irqsave(&sc->lmc_lock, flags); @@ -677,22 +656,22 @@ static void lmc_watchdog (unsigned long data) /*fold00*/ * check for a transmit interrupt timeout * Has the packet xmt vs xmt serviced threshold been exceeded */ if (sc->lmc_taint_tx == sc->lastlmc_taint_tx && - sc->stats.tx_packets > sc->lasttx_packets && - sc->tx_TimeoutInd == 0) + sc->lmc_device->stats.tx_packets > sc->lasttx_packets && + sc->tx_TimeoutInd == 0) { /* wait for the watchdog to come around again */ sc->tx_TimeoutInd = 1; } else if (sc->lmc_taint_tx == sc->lastlmc_taint_tx && - sc->stats.tx_packets > sc->lasttx_packets && - sc->tx_TimeoutInd) + sc->lmc_device->stats.tx_packets > sc->lasttx_packets && + sc->tx_TimeoutInd) { LMC_EVENT_LOG(LMC_EVENT_XMTINTTMO, LMC_CSR_READ (sc, csr_status), 0); sc->tx_TimeoutDisplay = 1; - sc->stats.tx_TimeoutCnt++; + sc->extra_stats.tx_TimeoutCnt++; /* DEC chip is stuck, hit it with a RESET!!!! */ lmc_running_reset (dev); @@ -712,13 +691,11 @@ static void lmc_watchdog (unsigned long data) /*fold00*/ /* reset the transmit timeout detection flag */ sc->tx_TimeoutInd = 0; sc->lastlmc_taint_tx = sc->lmc_taint_tx; - sc->lasttx_packets = sc->stats.tx_packets; - } - else - { + sc->lasttx_packets = sc->lmc_device->stats.tx_packets; + } else { sc->tx_TimeoutInd = 0; sc->lastlmc_taint_tx = sc->lmc_taint_tx; - sc->lasttx_packets = sc->stats.tx_packets; + sc->lasttx_packets = sc->lmc_device->stats.tx_packets; } /* --- end time out check ----------------------------------- */ @@ -748,19 +725,7 @@ static void lmc_watchdog (unsigned long data) /*fold00*/ sc->last_link_status = 1; /* lmc_reset (sc); Again why reset??? */ - /* Inform the world that link protocol is back up. */ netif_carrier_on(dev); - - /* Now we have to tell the syncppp that we had an outage - * and that it should deal. Calling sppp_reopen here - * should do the trick, but we may have to call sppp_close - * when the link goes down, and call sppp_open here. - * Subject to more testing. - * --bbraun - */ - - lmc_proto_reopen(sc); - } /* Call media specific watchdog functions */ @@ -816,114 +781,93 @@ kick_timer: } -static void lmc_setup(struct net_device * const dev) /*fold00*/ +static int lmc_attach(struct net_device *dev, unsigned short encoding, + unsigned short parity) { - lmc_trace(dev, "lmc_setup in"); - - dev->type = ARPHRD_HDLC; - dev->hard_start_xmit = lmc_start_xmit; - dev->open = lmc_open; - dev->stop = lmc_close; - dev->get_stats = lmc_get_stats; - dev->do_ioctl = lmc_ioctl; - dev->tx_timeout = lmc_driver_timeout; - dev->watchdog_timeo = (HZ); /* 1 second */ - - lmc_trace(dev, "lmc_setup out"); + if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) + return 0; + return -EINVAL; } - static int __devinit lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { - struct net_device *dev; - lmc_softc_t *sc; - u16 subdevice; - u_int16_t AdapModelNum; - int err = -ENOMEM; - static int cards_found; -#ifndef GCOM - /* We name by type not by vendor */ - static const char lmcname[] = "hdlc%d"; -#else - /* - * GCOM uses LMC vendor name so that clients can know which card - * to attach to. - */ - static const char lmcname[] = "lmc%d"; -#endif - - - /* - * Allocate our own device structure - */ - dev = alloc_netdev(sizeof(lmc_softc_t), lmcname, lmc_setup); - if (!dev) { - printk (KERN_ERR "lmc:alloc_netdev for device failed\n"); - goto out1; - } - - lmc_trace(dev, "lmc_init_one in"); - - err = pci_enable_device(pdev); - if (err) { - printk(KERN_ERR "lmc: pci enable failed:%d\n", err); - goto out2; - } - - if (pci_request_regions(pdev, "lmc")) { - printk(KERN_ERR "lmc: pci_request_region failed\n"); - err = -EIO; - goto out3; - } - - pci_set_drvdata(pdev, dev); - - if(lmc_first_load == 0){ - printk(KERN_INFO "Lan Media Corporation WAN Driver Version %d.%d.%d\n", - DRIVER_MAJOR_VERSION, DRIVER_MINOR_VERSION,DRIVER_SUB_VERSION); - lmc_first_load = 1; - } - - sc = dev->priv; - sc->lmc_device = dev; - sc->name = dev->name; - - /* Initialize the sppp layer */ - /* An ioctl can cause a subsequent detach for raw frame interface */ - dev->ml_priv = sc; - sc->if_type = LMC_PPP; - sc->check = 0xBEAFCAFE; - dev->base_addr = pci_resource_start(pdev, 0); - dev->irq = pdev->irq; - - SET_NETDEV_DEV(dev, &pdev->dev); - - /* - * This will get the protocol layer ready and do any 1 time init's - * Must have a valid sc and dev structure - */ - lmc_proto_init(sc); - - lmc_proto_attach(sc); + lmc_softc_t *sc; + struct net_device *dev; + u16 subdevice; + u16 AdapModelNum; + int err; + static int cards_found; + + /* lmc_trace(dev, "lmc_init_one in"); */ + + err = pci_enable_device(pdev); + if (err) { + printk(KERN_ERR "lmc: pci enable failed: %d\n", err); + return err; + } - /* - * Why were we changing this??? - dev->tx_queue_len = 100; - */ + err = pci_request_regions(pdev, "lmc"); + if (err) { + printk(KERN_ERR "lmc: pci_request_region failed\n"); + goto err_req_io; + } - /* Init the spin lock so can call it latter */ + /* + * Allocate our own device structure + */ + sc = kzalloc(sizeof(lmc_softc_t), GFP_KERNEL); + if (!sc) { + err = -ENOMEM; + goto err_kzalloc; + } - spin_lock_init(&sc->lmc_lock); - pci_set_master(pdev); + dev = alloc_hdlcdev(sc); + if (!dev) { + printk(KERN_ERR "lmc:alloc_netdev for device failed\n"); + goto err_hdlcdev; + } - printk ("%s: detected at %lx, irq %d\n", dev->name, - dev->base_addr, dev->irq); - if (register_netdev (dev) != 0) { - printk (KERN_ERR "%s: register_netdev failed.\n", dev->name); - goto out4; - } + dev->type = ARPHRD_HDLC; + dev_to_hdlc(dev)->xmit = lmc_start_xmit; + dev_to_hdlc(dev)->attach = lmc_attach; + dev->open = lmc_open; + dev->stop = lmc_close; + dev->get_stats = lmc_get_stats; + dev->do_ioctl = lmc_ioctl; + dev->tx_timeout = lmc_driver_timeout; + dev->watchdog_timeo = HZ; /* 1 second */ + dev->tx_queue_len = 100; + sc->lmc_device = dev; + sc->name = dev->name; + sc->if_type = LMC_PPP; + sc->check = 0xBEAFCAFE; + dev->base_addr = pci_resource_start(pdev, 0); + dev->irq = pdev->irq; + pci_set_drvdata(pdev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); + + /* + * This will get the protocol layer ready and do any 1 time init's + * Must have a valid sc and dev structure + */ + lmc_proto_attach(sc); + + /* Init the spin lock so can call it latter */ + + spin_lock_init(&sc->lmc_lock); + pci_set_master(pdev); + + printk(KERN_INFO "%s: detected at %lx, irq %d\n", dev->name, + dev->base_addr, dev->irq); + + err = register_hdlc_device(dev); + if (err) { + printk(KERN_ERR "%s: register_netdev failed.\n", dev->name); + free_netdev(dev); + goto err_hdlcdev; + } sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN; sc->lmc_timing = LMC_CTL_CLOCK_SOURCE_EXT; @@ -939,27 +883,27 @@ static int __devinit lmc_init_one(struct pci_dev *pdev, switch (subdevice) { case PCI_DEVICE_ID_LMC_HSSI: - printk ("%s: LMC HSSI\n", dev->name); + printk(KERN_INFO "%s: LMC HSSI\n", dev->name); sc->lmc_cardtype = LMC_CARDTYPE_HSSI; sc->lmc_media = &lmc_hssi_media; break; case PCI_DEVICE_ID_LMC_DS3: - printk ("%s: LMC DS3\n", dev->name); + printk(KERN_INFO "%s: LMC DS3\n", dev->name); sc->lmc_cardtype = LMC_CARDTYPE_DS3; sc->lmc_media = &lmc_ds3_media; break; case PCI_DEVICE_ID_LMC_SSI: - printk ("%s: LMC SSI\n", dev->name); + printk(KERN_INFO "%s: LMC SSI\n", dev->name); sc->lmc_cardtype = LMC_CARDTYPE_SSI; sc->lmc_media = &lmc_ssi_media; break; case PCI_DEVICE_ID_LMC_T1: - printk ("%s: LMC T1\n", dev->name); + printk(KERN_INFO "%s: LMC T1\n", dev->name); sc->lmc_cardtype = LMC_CARDTYPE_T1; sc->lmc_media = &lmc_t1_media; break; default: - printk (KERN_WARNING "%s: LMC UNKOWN CARD!\n", dev->name); + printk(KERN_WARNING "%s: LMC UNKOWN CARD!\n", dev->name); break; } @@ -977,32 +921,28 @@ static int __devinit lmc_init_one(struct pci_dev *pdev, */ AdapModelNum = (lmc_mii_readreg (sc, 0, 3) & 0x3f0) >> 4; - if ((AdapModelNum == LMC_ADAP_T1 - && subdevice == PCI_DEVICE_ID_LMC_T1) || /* detect LMC1200 */ - (AdapModelNum == LMC_ADAP_SSI - && subdevice == PCI_DEVICE_ID_LMC_SSI) || /* detect LMC1000 */ - (AdapModelNum == LMC_ADAP_DS3 - && subdevice == PCI_DEVICE_ID_LMC_DS3) || /* detect LMC5245 */ - (AdapModelNum == LMC_ADAP_HSSI - && subdevice == PCI_DEVICE_ID_LMC_HSSI)) - { /* detect LMC5200 */ + if ((AdapModelNum != LMC_ADAP_T1 || /* detect LMC1200 */ + subdevice != PCI_DEVICE_ID_LMC_T1) && + (AdapModelNum != LMC_ADAP_SSI || /* detect LMC1000 */ + subdevice != PCI_DEVICE_ID_LMC_SSI) && + (AdapModelNum != LMC_ADAP_DS3 || /* detect LMC5245 */ + subdevice != PCI_DEVICE_ID_LMC_DS3) && + (AdapModelNum != LMC_ADAP_HSSI || /* detect LMC5200 */ + subdevice != PCI_DEVICE_ID_LMC_HSSI)) + printk(KERN_WARNING "%s: Model number (%d) miscompare for PCI" + " Subsystem ID = 0x%04x\n", + dev->name, AdapModelNum, subdevice); - } - else { - printk ("%s: Model number (%d) miscompare for PCI Subsystem ID = 0x%04x\n", - dev->name, AdapModelNum, subdevice); -// return (NULL); - } /* * reset clock */ LMC_CSR_WRITE (sc, csr_gp_timer, 0xFFFFFFFFUL); sc->board_idx = cards_found++; - sc->stats.check = STATCHECK; - sc->stats.version_size = (DRIVER_VERSION << 16) + - sizeof (struct lmc_statistics); - sc->stats.lmc_cardtype = sc->lmc_cardtype; + sc->extra_stats.check = STATCHECK; + sc->extra_stats.version_size = (DRIVER_VERSION << 16) + + sizeof(sc->lmc_device->stats) + sizeof(sc->extra_stats); + sc->extra_stats.lmc_cardtype = sc->lmc_cardtype; sc->lmc_ok = 0; sc->last_link_status = 0; @@ -1010,58 +950,51 @@ static int __devinit lmc_init_one(struct pci_dev *pdev, lmc_trace(dev, "lmc_init_one out"); return 0; - out4: - lmc_proto_detach(sc); - out3: - if (pdev) { - pci_release_regions(pdev); - pci_set_drvdata(pdev, NULL); - } - out2: - free_netdev(dev); - out1: - return err; +err_hdlcdev: + pci_set_drvdata(pdev, NULL); + kfree(sc); +err_kzalloc: + pci_release_regions(pdev); +err_req_io: + pci_disable_device(pdev); + return err; } /* * Called from pci when removing module. */ -static void __devexit lmc_remove_one (struct pci_dev *pdev) +static void __devexit lmc_remove_one(struct pci_dev *pdev) { - struct net_device *dev = pci_get_drvdata(pdev); - - if (dev) { - lmc_softc_t *sc = dev->priv; - - printk("%s: removing...\n", dev->name); - lmc_proto_detach(sc); - unregister_netdev(dev); - free_netdev(dev); - pci_release_regions(pdev); - pci_disable_device(pdev); - pci_set_drvdata(pdev, NULL); - } + struct net_device *dev = pci_get_drvdata(pdev); + + if (dev) { + printk(KERN_DEBUG "%s: removing...\n", dev->name); + unregister_hdlc_device(dev); + free_netdev(dev); + pci_release_regions(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + } } /* After this is called, packets can be sent. * Does not initialize the addresses */ -static int lmc_open (struct net_device *dev) /*fold00*/ +static int lmc_open(struct net_device *dev) { - lmc_softc_t *sc = dev->priv; + lmc_softc_t *sc = dev_to_sc(dev); + int err; lmc_trace(dev, "lmc_open in"); lmc_led_on(sc, LMC_DS3_LED0); - lmc_dec_reset (sc); - lmc_reset (sc); - - LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ (sc, csr_status), 0); - LMC_EVENT_LOG(LMC_EVENT_RESET2, - lmc_mii_readreg (sc, 0, 16), - lmc_mii_readreg (sc, 0, 17)); + lmc_dec_reset(sc); + lmc_reset(sc); + LMC_EVENT_LOG(LMC_EVENT_RESET1, LMC_CSR_READ(sc, csr_status), 0); + LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg(sc, 0, 16), + lmc_mii_readreg(sc, 0, 17)); if (sc->lmc_ok){ lmc_trace(dev, "lmc_open lmc_ok out"); @@ -1106,14 +1039,14 @@ static int lmc_open (struct net_device *dev) /*fold00*/ /* dev->flags |= IFF_UP; */ - lmc_proto_open(sc); + if ((err = lmc_proto_open(sc)) != 0) + return err; dev->do_ioctl = lmc_ioctl; netif_start_queue(dev); - - sc->stats.tx_tbusy0++ ; + sc->extra_stats.tx_tbusy0++; /* * select what interrupts we want to get @@ -1165,8 +1098,7 @@ static int lmc_open (struct net_device *dev) /*fold00*/ static void lmc_running_reset (struct net_device *dev) /*fold00*/ { - - lmc_softc_t *sc = (lmc_softc_t *) dev->priv; + lmc_softc_t *sc = dev_to_sc(dev); lmc_trace(dev, "lmc_runnig_reset in"); @@ -1184,7 +1116,7 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/ netif_wake_queue(dev); sc->lmc_txfull = 0; - sc->stats.tx_tbusy0++ ; + sc->extra_stats.tx_tbusy0++; sc->lmc_intrmask = TULIP_DEFAULT_INTR_MASK; LMC_CSR_WRITE (sc, csr_intr, sc->lmc_intrmask); @@ -1200,14 +1132,13 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/ * This disables the timer for the watchdog and keepalives, * and disables the irq for dev. */ -static int lmc_close (struct net_device *dev) /*fold00*/ +static int lmc_close(struct net_device *dev) { /* not calling release_region() as we should */ - lmc_softc_t *sc; + lmc_softc_t *sc = dev_to_sc(dev); lmc_trace(dev, "lmc_close in"); - - sc = dev->priv; + sc->lmc_ok = 0; sc->lmc_media->set_link_status (sc, 0); del_timer (&sc->timer); @@ -1215,7 +1146,7 @@ static int lmc_close (struct net_device *dev) /*fold00*/ lmc_ifdown (dev); lmc_trace(dev, "lmc_close out"); - + return 0; } @@ -1223,16 +1154,16 @@ static int lmc_close (struct net_device *dev) /*fold00*/ /* When the interface goes down, this is called */ static int lmc_ifdown (struct net_device *dev) /*fold00*/ { - lmc_softc_t *sc = dev->priv; + lmc_softc_t *sc = dev_to_sc(dev); u32 csr6; int i; lmc_trace(dev, "lmc_ifdown in"); - + /* Don't let anything else go on right now */ // dev->start = 0; netif_stop_queue(dev); - sc->stats.tx_tbusy1++ ; + sc->extra_stats.tx_tbusy1++; /* stop interrupts */ /* Clear the interrupt mask */ @@ -1244,8 +1175,8 @@ static int lmc_ifdown (struct net_device *dev) /*fold00*/ csr6 &= ~LMC_DEC_SR; /* Turn off the Receive bit */ LMC_CSR_WRITE (sc, csr_command, csr6); - sc->stats.rx_missed_errors += - LMC_CSR_READ (sc, csr_missed_frames) & 0xffff; + sc->lmc_device->stats.rx_missed_errors += + LMC_CSR_READ(sc, csr_missed_frames) & 0xffff; /* release the interrupt */ if(sc->got_irq == 1){ @@ -1276,7 +1207,7 @@ static int lmc_ifdown (struct net_device *dev) /*fold00*/ lmc_led_off (sc, LMC_MII16_LED_ALL); netif_wake_queue(dev); - sc->stats.tx_tbusy0++ ; + sc->extra_stats.tx_tbusy0++; lmc_trace(dev, "lmc_ifdown out"); @@ -1289,7 +1220,7 @@ static int lmc_ifdown (struct net_device *dev) /*fold00*/ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ { struct net_device *dev = (struct net_device *) dev_instance; - lmc_softc_t *sc; + lmc_softc_t *sc = dev_to_sc(dev); u32 csr; int i; s32 stat; @@ -1300,8 +1231,6 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ lmc_trace(dev, "lmc_interrupt in"); - sc = dev->priv; - spin_lock(&sc->lmc_lock); /* @@ -1354,7 +1283,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ int n_compl = 0 ; /* reset the transmit timeout detection flag -baz */ - sc->stats.tx_NoCompleteCnt = 0; + sc->extra_stats.tx_NoCompleteCnt = 0; badtx = sc->lmc_taint_tx; i = badtx % LMC_TXDESCS; @@ -1378,27 +1307,25 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ if (sc->lmc_txq[i] == NULL) continue; - /* - * Check the total error summary to look for any errors - */ - if (stat & 0x8000) { - sc->stats.tx_errors++; - if (stat & 0x4104) - sc->stats.tx_aborted_errors++; - if (stat & 0x0C00) - sc->stats.tx_carrier_errors++; - if (stat & 0x0200) - sc->stats.tx_window_errors++; - if (stat & 0x0002) - sc->stats.tx_fifo_errors++; - } - else { - - sc->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff; - - sc->stats.tx_packets++; + /* + * Check the total error summary to look for any errors + */ + if (stat & 0x8000) { + sc->lmc_device->stats.tx_errors++; + if (stat & 0x4104) + sc->lmc_device->stats.tx_aborted_errors++; + if (stat & 0x0C00) + sc->lmc_device->stats.tx_carrier_errors++; + if (stat & 0x0200) + sc->lmc_device->stats.tx_window_errors++; + if (stat & 0x0002) + sc->lmc_device->stats.tx_fifo_errors++; + } else { + sc->lmc_device->stats.tx_bytes += sc->lmc_txring[i].length & 0x7ff; + + sc->lmc_device->stats.tx_packets++; } - + // dev_kfree_skb(sc->lmc_txq[i]); dev_kfree_skb_irq(sc->lmc_txq[i]); sc->lmc_txq[i] = NULL; @@ -1415,13 +1342,13 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/ LMC_EVENT_LOG(LMC_EVENT_TBUSY0, n_compl, 0); sc->lmc_txfull = 0; netif_wake_queue(dev); - sc->stats.tx_tbusy0++ ; + sc->extra_stats.tx_tbusy0++; #ifdef DEBUG - sc->stats.dirtyTx = badtx; - sc->stats.lmc_next_tx = sc->lmc_next_tx; - sc->stats.lmc_txfull = sc->lmc_txfull; + sc->extra_stats.dirtyTx = badtx; + sc->extra_stats.lmc_next_tx = sc->lmc_next_tx; + sc->extra_stats.lmc_txfull = sc->lmc_txfull; #endif sc->lmc_taint_tx = badtx; @@ -1476,9 +1403,9 @@ lmc_int_fail_out: return IRQ_RETVAL(handled); } -static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00*/ +static int lmc_start_xmit(struct sk_buff *skb, struct net_device *dev) { - lmc_softc_t *sc; + lmc_softc_t *sc = dev_to_sc(dev); u32 flag; int entry; int ret = 0; @@ -1486,8 +1413,6 @@ static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00 lmc_trace(dev, "lmc_start_xmit in"); - sc = dev->priv; - spin_lock_irqsave(&sc->lmc_lock, flags); /* normal path, tbusy known to be zero */ @@ -1532,8 +1457,8 @@ static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00 if (sc->lmc_next_tx - sc->lmc_taint_tx >= LMC_TXDESCS - 1) { /* ring full, go busy */ sc->lmc_txfull = 1; - netif_stop_queue(dev); - sc->stats.tx_tbusy1++ ; + netif_stop_queue(dev); + sc->extra_stats.tx_tbusy1++; LMC_EVENT_LOG(LMC_EVENT_TBUSY1, entry, 0); } #endif @@ -1550,7 +1475,7 @@ static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00 * the watchdog timer handler. -baz */ - sc->stats.tx_NoCompleteCnt++; + sc->extra_stats.tx_NoCompleteCnt++; sc->lmc_next_tx++; /* give ownership to the chip */ @@ -1569,9 +1494,9 @@ static int lmc_start_xmit (struct sk_buff *skb, struct net_device *dev) /*fold00 } -static int lmc_rx (struct net_device *dev) /*fold00*/ +static int lmc_rx(struct net_device *dev) { - lmc_softc_t *sc; + lmc_softc_t *sc = dev_to_sc(dev); int i; int rx_work_limit = LMC_RXDESCS; unsigned int next_rx; @@ -1583,8 +1508,6 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ lmc_trace(dev, "lmc_rx in"); - sc = dev->priv; - lmc_led_on(sc, LMC_DS3_LED3); rxIntLoopCnt = 0; /* debug -baz */ @@ -1597,39 +1520,38 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ rxIntLoopCnt++; /* debug -baz */ len = ((stat & LMC_RDES_FRAME_LENGTH) >> RDES_FRAME_LENGTH_BIT_NUMBER); if ((stat & 0x0300) != 0x0300) { /* Check first segment and last segment */ - if ((stat & 0x0000ffff) != 0x7fff) { - /* Oversized frame */ - sc->stats.rx_length_errors++; - goto skip_packet; - } - } - - if(stat & 0x00000008){ /* Catch a dribbling bit error */ - sc->stats.rx_errors++; - sc->stats.rx_frame_errors++; - goto skip_packet; - } + if ((stat & 0x0000ffff) != 0x7fff) { + /* Oversized frame */ + sc->lmc_device->stats.rx_length_errors++; + goto skip_packet; + } + } + if (stat & 0x00000008) { /* Catch a dribbling bit error */ + sc->lmc_device->stats.rx_errors++; + sc->lmc_device->stats.rx_frame_errors++; + goto skip_packet; + } - if(stat & 0x00000004){ /* Catch a CRC error by the Xilinx */ - sc->stats.rx_errors++; - sc->stats.rx_crc_errors++; - goto skip_packet; - } + if (stat & 0x00000004) { /* Catch a CRC error by the Xilinx */ + sc->lmc_device->stats.rx_errors++; + sc->lmc_device->stats.rx_crc_errors++; + goto skip_packet; + } - if (len > LMC_PKT_BUF_SZ){ - sc->stats.rx_length_errors++; - localLengthErrCnt++; - goto skip_packet; - } + if (len > LMC_PKT_BUF_SZ) { + sc->lmc_device->stats.rx_length_errors++; + localLengthErrCnt++; + goto skip_packet; + } - if (len < sc->lmc_crcSize + 2) { - sc->stats.rx_length_errors++; - sc->stats.rx_SmallPktCnt++; - localLengthErrCnt++; - goto skip_packet; - } + if (len < sc->lmc_crcSize + 2) { + sc->lmc_device->stats.rx_length_errors++; + sc->extra_stats.rx_SmallPktCnt++; + localLengthErrCnt++; + goto skip_packet; + } if(stat & 0x00004000){ printk(KERN_WARNING "%s: Receiver descriptor error, receiver out of sync?\n", dev->name); @@ -1656,8 +1578,8 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ } dev->last_rx = jiffies; - sc->stats.rx_packets++; - sc->stats.rx_bytes += len; + sc->lmc_device->stats.rx_packets++; + sc->lmc_device->stats.rx_bytes += len; LMC_CONSOLE_LOG("recv", skb->data, len); @@ -1679,7 +1601,6 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ skb_put (skb, len); skb->protocol = lmc_proto_type(sc, skb); - skb->protocol = htons(ETH_P_WAN_PPP); skb_reset_mac_header(skb); /* skb_reset_network_header(skb); */ skb->dev = dev; @@ -1704,7 +1625,7 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ * in which care we'll try to allocate the buffer * again. (once a second) */ - sc->stats.rx_BuffAllocErr++; + sc->extra_stats.rx_BuffAllocErr++; LMC_EVENT_LOG(LMC_EVENT_RCVINT, stat, len); sc->failed_recv_alloc = 1; goto skip_out_of_mem; @@ -1739,16 +1660,14 @@ static int lmc_rx (struct net_device *dev) /*fold00*/ * descriptors with bogus packets * if (localLengthErrCnt > LMC_RXDESCS - 3) { - sc->stats.rx_BadPktSurgeCnt++; - LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, - localLengthErrCnt, - sc->stats.rx_BadPktSurgeCnt); + sc->extra_stats.rx_BadPktSurgeCnt++; + LMC_EVENT_LOG(LMC_EVENT_BADPKTSURGE, localLengthErrCnt, + sc->extra_stats.rx_BadPktSurgeCnt); } */ /* save max count of receive descriptors serviced */ - if (rxIntLoopCnt > sc->stats.rxIntLoopCnt) { - sc->stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */ - } + if (rxIntLoopCnt > sc->extra_stats.rxIntLoopCnt) + sc->extra_stats.rxIntLoopCnt = rxIntLoopCnt; /* debug -baz */ #ifdef DEBUG if (rxIntLoopCnt == 0) @@ -1775,23 +1694,22 @@ skip_out_of_mem: return 0; } -static struct net_device_stats *lmc_get_stats (struct net_device *dev) /*fold00*/ +static struct net_device_stats *lmc_get_stats(struct net_device *dev) { - lmc_softc_t *sc = dev->priv; + lmc_softc_t *sc = dev_to_sc(dev); unsigned long flags; lmc_trace(dev, "lmc_get_stats in"); - spin_lock_irqsave(&sc->lmc_lock, flags); - sc->stats.rx_missed_errors += LMC_CSR_READ (sc, csr_missed_frames) & 0xffff; + sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff; spin_unlock_irqrestore(&sc->lmc_lock, flags); lmc_trace(dev, "lmc_get_stats out"); - return (struct net_device_stats *) &sc->stats; + return &sc->lmc_device->stats; } static struct pci_driver lmc_driver = { @@ -1970,7 +1888,7 @@ static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/ { if (sc->lmc_txq[i] != NULL){ /* have buffer */ dev_kfree_skb(sc->lmc_txq[i]); /* free it */ - sc->stats.tx_dropped++; /* We just dropped a packet */ + sc->lmc_device->stats.tx_dropped++; /* We just dropped a packet */ } sc->lmc_txq[i] = NULL; sc->lmc_txring[i].status = 0x00000000; @@ -1982,7 +1900,7 @@ static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/ lmc_trace(sc->lmc_device, "lmc_softreset out"); } -void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/ +void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/ { lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in"); sc->lmc_gpio_io &= ~bits; @@ -1990,7 +1908,7 @@ void lmc_gpio_mkinput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/ lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out"); } -void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/ +void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/ { lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in"); sc->lmc_gpio_io |= bits; @@ -1998,7 +1916,7 @@ void lmc_gpio_mkoutput(lmc_softc_t * const sc, u_int32_t bits) /*fold00*/ lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out"); } -void lmc_led_on(lmc_softc_t * const sc, u_int32_t led) /*fold00*/ +void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/ { lmc_trace(sc->lmc_device, "lmc_led_on in"); if((~sc->lmc_miireg16) & led){ /* Already on! */ @@ -2011,7 +1929,7 @@ void lmc_led_on(lmc_softc_t * const sc, u_int32_t led) /*fold00*/ lmc_trace(sc->lmc_device, "lmc_led_on out"); } -void lmc_led_off(lmc_softc_t * const sc, u_int32_t led) /*fold00*/ +void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/ { lmc_trace(sc->lmc_device, "lmc_led_off in"); if(sc->lmc_miireg16 & led){ /* Already set don't do anything */ @@ -2061,13 +1979,13 @@ static void lmc_reset(lmc_softc_t * const sc) /*fold00*/ */ sc->lmc_media->init(sc); - sc->stats.resetCount++; + sc->extra_stats.resetCount++; lmc_trace(sc->lmc_device, "lmc_reset out"); } static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/ { - u_int32_t val; + u32 val; lmc_trace(sc->lmc_device, "lmc_dec_reset in"); /* @@ -2151,23 +2069,21 @@ static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00 lmc_trace(sc->lmc_device, "lmc_initcsrs out"); } -static void lmc_driver_timeout(struct net_device *dev) { /*fold00*/ - lmc_softc_t *sc; +static void lmc_driver_timeout(struct net_device *dev) +{ + lmc_softc_t *sc = dev_to_sc(dev); u32 csr6; unsigned long flags; lmc_trace(dev, "lmc_driver_timeout in"); - sc = dev->priv; - spin_lock_irqsave(&sc->lmc_lock, flags); printk("%s: Xmitter busy|\n", dev->name); - sc->stats.tx_tbusy_calls++ ; - if (jiffies - dev->trans_start < TX_TIMEOUT) { - goto bug_out; - } + sc->extra_stats.tx_tbusy_calls++; + if (jiffies - dev->trans_start < TX_TIMEOUT) + goto bug_out; /* * Chip seems to have locked up @@ -2178,7 +2094,7 @@ static void lmc_driver_timeout(struct net_device *dev) { /*fold00*/ LMC_EVENT_LOG(LMC_EVENT_XMTPRCTMO, LMC_CSR_READ (sc, csr_status), - sc->stats.tx_ProcTimeout); + sc->extra_stats.tx_ProcTimeout); lmc_running_reset (dev); @@ -2195,8 +2111,8 @@ static void lmc_driver_timeout(struct net_device *dev) { /*fold00*/ /* immediate transmit */ LMC_CSR_WRITE (sc, csr_txpoll, 0); - sc->stats.tx_errors++; - sc->stats.tx_ProcTimeout++; /* -baz */ + sc->lmc_device->stats.tx_errors++; + sc->extra_stats.tx_ProcTimeout++; /* -baz */ dev->trans_start = jiffies; diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c index 8aa461c..f327674 100644 --- a/drivers/net/wan/lmc/lmc_media.c +++ b/drivers/net/wan/lmc/lmc_media.c @@ -16,8 +16,6 @@ #include <linux/inet.h> #include <linux/bitops.h> -#include <net/syncppp.h> - #include <asm/processor.h> /* Processor type for cache alignment. */ #include <asm/io.h> #include <asm/dma.h> @@ -95,8 +93,7 @@ static void lmc_dummy_set_1 (lmc_softc_t * const, int); static void lmc_dummy_set2_1 (lmc_softc_t * const, lmc_ctl_t *); static inline void write_av9110_bit (lmc_softc_t *, int); -static void write_av9110 (lmc_softc_t *, u_int32_t, u_int32_t, u_int32_t, - u_int32_t, u_int32_t); +static void write_av9110(lmc_softc_t *, u32, u32, u32, u32, u32); lmc_media_t lmc_ds3_media = { lmc_ds3_init, /* special media init stuff */ @@ -427,7 +424,7 @@ lmc_ds3_set_scram (lmc_softc_t * const sc, int ie) static int lmc_ds3_get_link_status (lmc_softc_t * const sc) { - u_int16_t link_status, link_status_11; + u16 link_status, link_status_11; int ret = 1; lmc_mii_writereg (sc, 0, 17, 7); @@ -449,7 +446,7 @@ lmc_ds3_get_link_status (lmc_softc_t * const sc) (link_status & LMC_FRAMER_REG0_OOFS)){ ret = 0; if(sc->last_led_err[3] != 1){ - u16 r1; + u16 r1; lmc_mii_writereg (sc, 0, 17, 01); /* Turn on Xbit error as our cisco does */ r1 = lmc_mii_readreg (sc, 0, 18); r1 &= 0xfe; @@ -462,7 +459,7 @@ lmc_ds3_get_link_status (lmc_softc_t * const sc) else { lmc_led_off(sc, LMC_DS3_LED3); /* turn on red LED */ if(sc->last_led_err[3] == 1){ - u16 r1; + u16 r1; lmc_mii_writereg (sc, 0, 17, 01); /* Turn off Xbit error */ r1 = lmc_mii_readreg (sc, 0, 18); r1 |= 0x01; @@ -540,20 +537,19 @@ lmc_ds3_watchdog (lmc_softc_t * const sc) * SSI methods */ -static void -lmc_ssi_init (lmc_softc_t * const sc) +static void lmc_ssi_init(lmc_softc_t * const sc) { - u_int16_t mii17; - int cable; + u16 mii17; + int cable; - sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000; + sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1000; - mii17 = lmc_mii_readreg (sc, 0, 17); + mii17 = lmc_mii_readreg(sc, 0, 17); - cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT; - sc->ictl.cable_type = cable; + cable = (mii17 & LMC_MII17_SSI_CABLE_MASK) >> LMC_MII17_SSI_CABLE_SHIFT; + sc->ictl.cable_type = cable; - lmc_gpio_mkoutput (sc, LMC_GEP_SSI_TXCLOCK); + lmc_gpio_mkoutput(sc, LMC_GEP_SSI_TXCLOCK); } static void @@ -681,11 +677,11 @@ lmc_ssi_set_speed (lmc_softc_t * const sc, lmc_ctl_t * ctl) static int lmc_ssi_get_link_status (lmc_softc_t * const sc) { - u_int16_t link_status; - u_int32_t ticks; + u16 link_status; + u32 ticks; int ret = 1; int hw_hdsk = 1; - + /* * missing CTS? Hmm. If we require CTS on, we may never get the * link to come up, so omit it in this test. @@ -720,9 +716,9 @@ lmc_ssi_get_link_status (lmc_softc_t * const sc) } else if (ticks == 0 ) { /* no clock found ? */ ret = 0; - if(sc->last_led_err[3] != 1){ - sc->stats.tx_lossOfClockCnt++; - printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name); + if (sc->last_led_err[3] != 1) { + sc->extra_stats.tx_lossOfClockCnt++; + printk(KERN_WARNING "%s: Lost Clock, Link Down\n", sc->name); } sc->last_led_err[3] = 1; lmc_led_on (sc, LMC_MII16_LED3); /* turn ON red LED */ @@ -838,9 +834,7 @@ write_av9110_bit (lmc_softc_t * sc, int c) LMC_CSR_WRITE (sc, csr_gp, sc->lmc_gpio); } -static void -write_av9110 (lmc_softc_t * sc, u_int32_t n, u_int32_t m, u_int32_t v, - u_int32_t x, u_int32_t r) +static void write_av9110(lmc_softc_t *sc, u32 n, u32 m, u32 v, u32 x, u32 r) { int i; @@ -887,19 +881,13 @@ write_av9110 (lmc_softc_t * sc, u_int32_t n, u_int32_t m, u_int32_t v, | LMC_GEP_SSI_GENERATOR)); } -static void -lmc_ssi_watchdog (lmc_softc_t * const sc) +static void lmc_ssi_watchdog(lmc_softc_t * const sc) { - u_int16_t mii17 = lmc_mii_readreg (sc, 0, 17); - if (((mii17 >> 3) & 7) == 7) - { - lmc_led_off (sc, LMC_MII16_LED2); - } - else - { - lmc_led_on (sc, LMC_MII16_LED2); - } - + u16 mii17 = lmc_mii_readreg(sc, 0, 17); + if (((mii17 >> 3) & 7) == 7) + lmc_led_off(sc, LMC_MII16_LED2); + else + lmc_led_on(sc, LMC_MII16_LED2); } /* @@ -929,7 +917,7 @@ lmc_t1_read (lmc_softc_t * const sc, int a) static void lmc_t1_init (lmc_softc_t * const sc) { - u_int16_t mii16; + u16 mii16; int i; sc->ictl.cardtype = LMC_CTL_CARDTYPE_LMC1200; @@ -1028,7 +1016,7 @@ lmc_t1_set_status (lmc_softc_t * const sc, lmc_ctl_t * ctl) */ static int lmc_t1_get_link_status (lmc_softc_t * const sc) { - u_int16_t link_status; + u16 link_status; int ret = 1; /* LMC5245 (DS3) & LMC1200 (DS1) LED definitions diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c index 8531575..be9877f 100644 --- a/drivers/net/wan/lmc/lmc_proto.c +++ b/drivers/net/wan/lmc/lmc_proto.c @@ -36,9 +36,6 @@ #include <linux/workqueue.h> #include <linux/proc_fs.h> #include <linux/bitops.h> - -#include <net/syncppp.h> - #include <asm/processor.h> /* Processor type for cache alignment. */ #include <asm/io.h> #include <asm/dma.h> @@ -50,48 +47,6 @@ #include "lmc_ioctl.h" #include "lmc_proto.h" -/* - * The compile-time variable SPPPSTUP causes the module to be - * compiled without referencing any of the sync ppp routines. - */ -#ifdef SPPPSTUB -#define SPPP_detach(d) (void)0 -#define SPPP_open(d) 0 -#define SPPP_reopen(d) (void)0 -#define SPPP_close(d) (void)0 -#define SPPP_attach(d) (void)0 -#define SPPP_do_ioctl(d,i,c) -EOPNOTSUPP -#else -#define SPPP_attach(x) sppp_attach((x)->pd) -#define SPPP_detach(x) sppp_detach((x)->pd->dev) -#define SPPP_open(x) sppp_open((x)->pd->dev) -#define SPPP_reopen(x) sppp_reopen((x)->pd->dev) -#define SPPP_close(x) sppp_close((x)->pd->dev) -#define SPPP_do_ioctl(x, y, z) sppp_do_ioctl((x)->pd->dev, (y), (z)) -#endif - -// init -void lmc_proto_init(lmc_softc_t *sc) /*FOLD00*/ -{ - lmc_trace(sc->lmc_device, "lmc_proto_init in"); - switch(sc->if_type){ - case LMC_PPP: - sc->pd = kmalloc(sizeof(struct ppp_device), GFP_KERNEL); - if (!sc->pd) { - printk("lmc_proto_init(): kmalloc failure!\n"); - return; - } - sc->pd->dev = sc->lmc_device; - sc->if_ptr = sc->pd; - break; - case LMC_RAW: - break; - default: - break; - } - lmc_trace(sc->lmc_device, "lmc_proto_init out"); -} - // attach void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/ { @@ -100,7 +55,6 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/ case LMC_PPP: { struct net_device *dev = sc->lmc_device; - SPPP_attach(sc); dev->do_ioctl = lmc_ioctl; } break; @@ -108,7 +62,7 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/ { struct net_device *dev = sc->lmc_device; /* - * They set a few basics because they don't use sync_ppp + * They set a few basics because they don't use HDLC */ dev->flags |= IFF_POINTOPOINT; @@ -124,88 +78,39 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/ lmc_trace(sc->lmc_device, "lmc_proto_attach out"); } -// detach -void lmc_proto_detach(lmc_softc_t *sc) /*FOLD00*/ +int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd) { - switch(sc->if_type){ - case LMC_PPP: - SPPP_detach(sc); - break; - case LMC_RAW: /* Tell someone we're detaching? */ - break; - default: - break; - } - + lmc_trace(sc->lmc_device, "lmc_proto_ioctl"); + if (sc->if_type == LMC_PPP) + return hdlc_ioctl(sc->lmc_device, ifr, cmd); + return -EOPNOTSUPP; } -// reopen -void lmc_proto_reopen(lmc_softc_t *sc) /*FOLD00*/ +int lmc_proto_open(lmc_softc_t *sc) { - lmc_trace(sc->lmc_device, "lmc_proto_reopen in"); - switch(sc->if_type){ - case LMC_PPP: - SPPP_reopen(sc); - break; - case LMC_RAW: /* Reset the interface after being down, prerape to receive packets again */ - break; - default: - break; - } - lmc_trace(sc->lmc_device, "lmc_proto_reopen out"); -} + int ret = 0; + lmc_trace(sc->lmc_device, "lmc_proto_open in"); -// ioctl -int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd) /*FOLD00*/ -{ - lmc_trace(sc->lmc_device, "lmc_proto_ioctl out"); - switch(sc->if_type){ - case LMC_PPP: - return SPPP_do_ioctl (sc, ifr, cmd); - break; - default: - return -EOPNOTSUPP; - break; - } - lmc_trace(sc->lmc_device, "lmc_proto_ioctl out"); + if (sc->if_type == LMC_PPP) { + ret = hdlc_open(sc->lmc_device); + if (ret < 0) + printk(KERN_WARNING "%s: HDLC open failed: %d\n", + sc->name, ret); + } + + lmc_trace(sc->lmc_device, "lmc_proto_open out"); + return ret; } -// open -void lmc_proto_open(lmc_softc_t *sc) /*FOLD00*/ +void lmc_proto_close(lmc_softc_t *sc) { - int ret; + lmc_trace(sc->lmc_device, "lmc_proto_close in"); - lmc_trace(sc->lmc_device, "lmc_proto_open in"); - switch(sc->if_type){ - case LMC_PPP: - ret = SPPP_open(sc); - if(ret < 0) - printk("%s: syncPPP open failed: %d\n", sc->name, ret); - break; - case LMC_RAW: /* We're about to start getting packets! */ - break; - default: - break; - } - lmc_trace(sc->lmc_device, "lmc_proto_open out"); -} - -// close + if (sc->if_type == LMC_PPP) + hdlc_close(sc->lmc_device); -void lmc_proto_close(lmc_softc_t *sc) /*FOLD00*/ -{ - lmc_trace(sc->lmc_device, "lmc_proto_close in"); - switch(sc->if_type){ - case LMC_PPP: - SPPP_close(sc); - break; - case LMC_RAW: /* Interface going down */ - break; - default: - break; - } - lmc_trace(sc->lmc_device, "lmc_proto_close out"); + lmc_trace(sc->lmc_device, "lmc_proto_close out"); } __be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/ @@ -213,8 +118,8 @@ __be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/ lmc_trace(sc->lmc_device, "lmc_proto_type in"); switch(sc->if_type){ case LMC_PPP: - return htons(ETH_P_WAN_PPP); - break; + return hdlc_type_trans(skb, sc->lmc_device); + break; case LMC_NET: return htons(ETH_P_802_2); break; @@ -245,4 +150,3 @@ void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/ } lmc_trace(sc->lmc_device, "lmc_proto_netif out"); } - diff --git a/drivers/net/wan/lmc/lmc_proto.h b/drivers/net/wan/lmc/lmc_proto.h index ccaa69e8..662148c 100644 --- a/drivers/net/wan/lmc/lmc_proto.h +++ b/drivers/net/wan/lmc/lmc_proto.h @@ -1,16 +1,18 @@ #ifndef _LMC_PROTO_H_ #define _LMC_PROTO_H_ -void lmc_proto_init(lmc_softc_t *sc); +#include <linux/hdlc.h> + void lmc_proto_attach(lmc_softc_t *sc); -void lmc_proto_detach(lmc_softc_t *sc); -void lmc_proto_reopen(lmc_softc_t *sc); int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd); -void lmc_proto_open(lmc_softc_t *sc); +int lmc_proto_open(lmc_softc_t *sc); void lmc_proto_close(lmc_softc_t *sc); __be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb); void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb); -int lmc_skb_rawpackets(char *buf, char **start, off_t offset, int len, int unused); -#endif +static inline lmc_softc_t* dev_to_sc(struct net_device *dev) +{ + return (lmc_softc_t *)dev_to_hdlc(dev)->priv; +} +#endif diff --git a/drivers/net/wan/lmc/lmc_var.h b/drivers/net/wan/lmc/lmc_var.h index 6d003a3..65d0197 100644 --- a/drivers/net/wan/lmc/lmc_var.h +++ b/drivers/net/wan/lmc/lmc_var.h @@ -1,8 +1,6 @@ #ifndef _LMC_VAR_H_ #define _LMC_VAR_H_ -/* $Id: lmc_var.h,v 1.17 2000/04/06 12:16:47 asj Exp $ */ - /* * Copyright (c) 1997-2000 LAN Media Corporation (LMC) * All rights reserved. www.lanmedia.com @@ -19,23 +17,6 @@ #include <linux/timer.h> -#ifndef __KERNEL__ -typedef signed char s8; -typedef unsigned char u8; - -typedef signed short s16; -typedef unsigned short u16; - -typedef signed int s32; -typedef unsigned int u32; - -typedef signed long long s64; -typedef unsigned long long u64; - -#define BITS_PER_LONG 32 - -#endif - /* * basic definitions used in lmc include files */ @@ -45,9 +26,6 @@ typedef struct lmc___media lmc_media_t; typedef struct lmc___ctl lmc_ctl_t; #define lmc_csrptr_t unsigned long -#define u_int16_t u16 -#define u_int8_t u8 -#define tulip_uint32_t u32 #define LMC_REG_RANGE 0x80 @@ -122,45 +100,45 @@ struct lmc_regfile_t { * used to define bits in the second tulip_desc_t field (length) * for the transmit descriptor -baz */ -#define LMC_TDES_FIRST_BUFFER_SIZE ((u_int32_t)(0x000007FF)) -#define LMC_TDES_SECOND_BUFFER_SIZE ((u_int32_t)(0x003FF800)) -#define LMC_TDES_HASH_FILTERING ((u_int32_t)(0x00400000)) -#define LMC_TDES_DISABLE_PADDING ((u_int32_t)(0x00800000)) -#define LMC_TDES_SECOND_ADDR_CHAINED ((u_int32_t)(0x01000000)) -#define LMC_TDES_END_OF_RING ((u_int32_t)(0x02000000)) -#define LMC_TDES_ADD_CRC_DISABLE ((u_int32_t)(0x04000000)) -#define LMC_TDES_SETUP_PACKET ((u_int32_t)(0x08000000)) -#define LMC_TDES_INVERSE_FILTERING ((u_int32_t)(0x10000000)) -#define LMC_TDES_FIRST_SEGMENT ((u_int32_t)(0x20000000)) -#define LMC_TDES_LAST_SEGMENT ((u_int32_t)(0x40000000)) -#define LMC_TDES_INTERRUPT_ON_COMPLETION ((u_int32_t)(0x80000000)) +#define LMC_TDES_FIRST_BUFFER_SIZE ((u32)(0x000007FF)) +#define LMC_TDES_SECOND_BUFFER_SIZE ((u32)(0x003FF800)) +#define LMC_TDES_HASH_FILTERING ((u32)(0x00400000)) +#define LMC_TDES_DISABLE_PADDING ((u32)(0x00800000)) +#define LMC_TDES_SECOND_ADDR_CHAINED ((u32)(0x01000000)) +#define LMC_TDES_END_OF_RING ((u32)(0x02000000)) +#define LMC_TDES_ADD_CRC_DISABLE ((u32)(0x04000000)) +#define LMC_TDES_SETUP_PACKET ((u32)(0x08000000)) +#define LMC_TDES_INVERSE_FILTERING ((u32)(0x10000000)) +#define LMC_TDES_FIRST_SEGMENT ((u32)(0x20000000)) +#define LMC_TDES_LAST_SEGMENT ((u32)(0x40000000)) +#define LMC_TDES_INTERRUPT_ON_COMPLETION ((u32)(0x80000000)) #define TDES_SECOND_BUFFER_SIZE_BIT_NUMBER 11 #define TDES_COLLISION_COUNT_BIT_NUMBER 3 /* Constants for the RCV descriptor RDES */ -#define LMC_RDES_OVERFLOW ((u_int32_t)(0x00000001)) -#define LMC_RDES_CRC_ERROR ((u_int32_t)(0x00000002)) -#define LMC_RDES_DRIBBLING_BIT ((u_int32_t)(0x00000004)) -#define LMC_RDES_REPORT_ON_MII_ERR ((u_int32_t)(0x00000008)) -#define LMC_RDES_RCV_WATCHDOG_TIMEOUT ((u_int32_t)(0x00000010)) -#define LMC_RDES_FRAME_TYPE ((u_int32_t)(0x00000020)) -#define LMC_RDES_COLLISION_SEEN ((u_int32_t)(0x00000040)) -#define LMC_RDES_FRAME_TOO_LONG ((u_int32_t)(0x00000080)) -#define LMC_RDES_LAST_DESCRIPTOR ((u_int32_t)(0x00000100)) -#define LMC_RDES_FIRST_DESCRIPTOR ((u_int32_t)(0x00000200)) -#define LMC_RDES_MULTICAST_FRAME ((u_int32_t)(0x00000400)) -#define LMC_RDES_RUNT_FRAME ((u_int32_t)(0x00000800)) -#define LMC_RDES_DATA_TYPE ((u_int32_t)(0x00003000)) -#define LMC_RDES_LENGTH_ERROR ((u_int32_t)(0x00004000)) -#define LMC_RDES_ERROR_SUMMARY ((u_int32_t)(0x00008000)) -#define LMC_RDES_FRAME_LENGTH ((u_int32_t)(0x3FFF0000)) -#define LMC_RDES_OWN_BIT ((u_int32_t)(0x80000000)) +#define LMC_RDES_OVERFLOW ((u32)(0x00000001)) +#define LMC_RDES_CRC_ERROR ((u32)(0x00000002)) +#define LMC_RDES_DRIBBLING_BIT ((u32)(0x00000004)) +#define LMC_RDES_REPORT_ON_MII_ERR ((u32)(0x00000008)) +#define LMC_RDES_RCV_WATCHDOG_TIMEOUT ((u32)(0x00000010)) +#define LMC_RDES_FRAME_TYPE ((u32)(0x00000020)) +#define LMC_RDES_COLLISION_SEEN ((u32)(0x00000040)) +#define LMC_RDES_FRAME_TOO_LONG ((u32)(0x00000080)) +#define LMC_RDES_LAST_DESCRIPTOR ((u32)(0x00000100)) +#define LMC_RDES_FIRST_DESCRIPTOR ((u32)(0x00000200)) +#define LMC_RDES_MULTICAST_FRAME ((u32)(0x00000400)) +#define LMC_RDES_RUNT_FRAME ((u32)(0x00000800)) +#define LMC_RDES_DATA_TYPE ((u32)(0x00003000)) +#define LMC_RDES_LENGTH_ERROR ((u32)(0x00004000)) +#define LMC_RDES_ERROR_SUMMARY ((u32)(0x00008000)) +#define LMC_RDES_FRAME_LENGTH ((u32)(0x3FFF0000)) +#define LMC_RDES_OWN_BIT ((u32)(0x80000000)) #define RDES_FRAME_LENGTH_BIT_NUMBER 16 -#define LMC_RDES_ERROR_MASK ( (u_int32_t)( \ +#define LMC_RDES_ERROR_MASK ( (u32)( \ LMC_RDES_OVERFLOW \ | LMC_RDES_DRIBBLING_BIT \ | LMC_RDES_REPORT_ON_MII_ERR \ @@ -172,32 +150,32 @@ struct lmc_regfile_t { */ typedef struct { - u_int32_t n; - u_int32_t m; - u_int32_t v; - u_int32_t x; - u_int32_t r; - u_int32_t f; - u_int32_t exact; + u32 n; + u32 m; + u32 v; + u32 x; + u32 r; + u32 f; + u32 exact; } lmc_av9110_t; /* * Common structure passed to the ioctl code. */ struct lmc___ctl { - u_int32_t cardtype; - u_int32_t clock_source; /* HSSI, T1 */ - u_int32_t clock_rate; /* T1 */ - u_int32_t crc_length; - u_int32_t cable_length; /* DS3 */ - u_int32_t scrambler_onoff; /* DS3 */ - u_int32_t cable_type; /* T1 */ - u_int32_t keepalive_onoff; /* protocol */ - u_int32_t ticks; /* ticks/sec */ + u32 cardtype; + u32 clock_source; /* HSSI, T1 */ + u32 clock_rate; /* T1 */ + u32 crc_length; + u32 cable_length; /* DS3 */ + u32 scrambler_onoff; /* DS3 */ + u32 cable_type; /* T1 */ + u32 keepalive_onoff; /* protocol */ + u32 ticks; /* ticks/sec */ union { lmc_av9110_t ssi; } cardspec; - u_int32_t circuit_type; /* T1 or E1 */ + u32 circuit_type; /* T1 or E1 */ }; @@ -244,108 +222,69 @@ struct lmc___media { #define STATCHECK 0xBEEFCAFE -/* Included in this structure are first - * - standard net_device_stats - * - some other counters used for debug and driver performance - * evaluation -baz - */ -struct lmc_statistics +struct lmc_extra_statistics { - unsigned long rx_packets; /* total packets received */ - unsigned long tx_packets; /* total packets transmitted */ - unsigned long rx_bytes; - unsigned long tx_bytes; - - unsigned long rx_errors; /* bad packets received */ - unsigned long tx_errors; /* packet transmit problems */ - unsigned long rx_dropped; /* no space in linux buffers */ - unsigned long tx_dropped; /* no space available in linux */ - unsigned long multicast; /* multicast packets received */ - unsigned long collisions; - - /* detailed rx_errors: */ - unsigned long rx_length_errors; - unsigned long rx_over_errors; /* receiver ring buff overflow */ - unsigned long rx_crc_errors; /* recved pkt with crc error */ - unsigned long rx_frame_errors; /* recv'd frame alignment error */ - unsigned long rx_fifo_errors; /* recv'r fifo overrun */ - unsigned long rx_missed_errors; /* receiver missed packet */ - - /* detailed tx_errors */ - unsigned long tx_aborted_errors; - unsigned long tx_carrier_errors; - unsigned long tx_fifo_errors; - unsigned long tx_heartbeat_errors; - unsigned long tx_window_errors; - - /* for cslip etc */ - unsigned long rx_compressed; - unsigned long tx_compressed; - - /* ------------------------------------- - * Custom stats & counters follow -baz */ - u_int32_t version_size; - u_int32_t lmc_cardtype; - - u_int32_t tx_ProcTimeout; - u_int32_t tx_IntTimeout; - u_int32_t tx_NoCompleteCnt; - u_int32_t tx_MaxXmtsB4Int; - u_int32_t tx_TimeoutCnt; - u_int32_t tx_OutOfSyncPtr; - u_int32_t tx_tbusy0; - u_int32_t tx_tbusy1; - u_int32_t tx_tbusy_calls; - u_int32_t resetCount; - u_int32_t lmc_txfull; - u_int32_t tbusy; - u_int32_t dirtyTx; - u_int32_t lmc_next_tx; - u_int32_t otherTypeCnt; - u_int32_t lastType; - u_int32_t lastTypeOK; - u_int32_t txLoopCnt; - u_int32_t usedXmtDescripCnt; - u_int32_t txIndexCnt; - u_int32_t rxIntLoopCnt; - - u_int32_t rx_SmallPktCnt; - u_int32_t rx_BadPktSurgeCnt; - u_int32_t rx_BuffAllocErr; - u_int32_t tx_lossOfClockCnt; - - /* T1 error counters */ - u_int32_t framingBitErrorCount; - u_int32_t lineCodeViolationCount; - - u_int32_t lossOfFrameCount; - u_int32_t changeOfFrameAlignmentCount; - u_int32_t severelyErroredFrameCount; - - u_int32_t check; + u32 version_size; + u32 lmc_cardtype; + + u32 tx_ProcTimeout; + u32 tx_IntTimeout; + u32 tx_NoCompleteCnt; + u32 tx_MaxXmtsB4Int; + u32 tx_TimeoutCnt; + u32 tx_OutOfSyncPtr; + u32 tx_tbusy0; + u32 tx_tbusy1; + u32 tx_tbusy_calls; + u32 resetCount; + u32 lmc_txfull; + u32 tbusy; + u32 dirtyTx; + u32 lmc_next_tx; + u32 otherTypeCnt; + u32 lastType; + u32 lastTypeOK; + u32 txLoopCnt; + u32 usedXmtDescripCnt; + u32 txIndexCnt; + u32 rxIntLoopCnt; + + u32 rx_SmallPktCnt; + u32 rx_BadPktSurgeCnt; + u32 rx_BuffAllocErr; + u32 tx_lossOfClockCnt; + + /* T1 error counters */ + u32 framingBitErrorCount; + u32 lineCodeViolationCount; + + u32 lossOfFrameCount; + u32 changeOfFrameAlignmentCount; + u32 severelyErroredFrameCount; + + u32 check; }; - typedef struct lmc_xinfo { - u_int32_t Magic0; /* BEEFCAFE */ + u32 Magic0; /* BEEFCAFE */ - u_int32_t PciCardType; - u_int32_t PciSlotNumber; /* PCI slot number */ + u32 PciCardType; + u32 PciSlotNumber; /* PCI slot number */ - u_int16_t DriverMajorVersion; - u_int16_t DriverMinorVersion; - u_int16_t DriverSubVersion; + u16 DriverMajorVersion; + u16 DriverMinorVersion; + u16 DriverSubVersion; - u_int16_t XilinxRevisionNumber; - u_int16_t MaxFrameSize; + u16 XilinxRevisionNumber; + u16 MaxFrameSize; - u_int16_t t1_alarm1_status; - u_int16_t t1_alarm2_status; + u16 t1_alarm1_status; + u16 t1_alarm2_status; - int link_status; - u_int32_t mii_reg16; + int link_status; + u32 mii_reg16; - u_int32_t Magic1; /* DEADBEEF */ + u32 Magic1; /* DEADBEEF */ } LMC_XINFO; @@ -353,23 +292,22 @@ typedef struct lmc_xinfo { * forward decl */ struct lmc___softc { - void *if_ptr; /* General purpose pointer (used by SPPP) */ char *name; u8 board_idx; - struct lmc_statistics stats; - struct net_device *lmc_device; + struct lmc_extra_statistics extra_stats; + struct net_device *lmc_device; int hang, rxdesc, bad_packet, some_counter; - u_int32_t txgo; + u32 txgo; struct lmc_regfile_t lmc_csrs; - volatile u_int32_t lmc_txtick; - volatile u_int32_t lmc_rxtick; - u_int32_t lmc_flags; - u_int32_t lmc_intrmask; /* our copy of csr_intr */ - u_int32_t lmc_cmdmode; /* our copy of csr_cmdmode */ - u_int32_t lmc_busmode; /* our copy of csr_busmode */ - u_int32_t lmc_gpio_io; /* state of in/out settings */ - u_int32_t lmc_gpio; /* state of outputs */ + volatile u32 lmc_txtick; + volatile u32 lmc_rxtick; + u32 lmc_flags; + u32 lmc_intrmask; /* our copy of csr_intr */ + u32 lmc_cmdmode; /* our copy of csr_cmdmode */ + u32 lmc_busmode; /* our copy of csr_busmode */ + u32 lmc_gpio_io; /* state of in/out settings */ + u32 lmc_gpio; /* state of outputs */ struct sk_buff* lmc_txq[LMC_TXDESCS]; struct sk_buff* lmc_rxq[LMC_RXDESCS]; volatile @@ -381,42 +319,41 @@ struct lmc___softc { unsigned int lmc_taint_tx, lmc_taint_rx; int lmc_tx_start, lmc_txfull; int lmc_txbusy; - u_int16_t lmc_miireg16; + u16 lmc_miireg16; int lmc_ok; int last_link_status; int lmc_cardtype; - u_int32_t last_frameerr; + u32 last_frameerr; lmc_media_t *lmc_media; struct timer_list timer; lmc_ctl_t ictl; - u_int32_t TxDescriptControlInit; + u32 TxDescriptControlInit; int tx_TimeoutInd; /* additional driver state */ int tx_TimeoutDisplay; unsigned int lastlmc_taint_tx; int lasttx_packets; - u_int32_t tx_clockState; - u_int32_t lmc_crcSize; - LMC_XINFO lmc_xinfo; + u32 tx_clockState; + u32 lmc_crcSize; + LMC_XINFO lmc_xinfo; char lmc_yel, lmc_blue, lmc_red; /* for T1 and DS3 */ - char lmc_timing; /* for HSSI and SSI */ - int got_irq; + char lmc_timing; /* for HSSI and SSI */ + int got_irq; - char last_led_err[4]; + char last_led_err[4]; - u32 last_int; - u32 num_int; + u32 last_int; + u32 num_int; spinlock_t lmc_lock; - u_int16_t if_type; /* PPP or NET */ - struct ppp_device *pd; + u16 if_type; /* HDLC/PPP or NET */ - /* Failure cases */ - u8 failed_ring; - u8 failed_recv_alloc; + /* Failure cases */ + u8 failed_ring; + u8 failed_recv_alloc; - /* Structure check */ - u32 check; + /* Structure check */ + u32 check; }; #define LMC_PCI_TIME 1 @@ -512,8 +449,8 @@ struct lmc___softc { | TULIP_STS_TXUNDERFLOW\ | TULIP_STS_RXSTOPPED ) -#define DESC_OWNED_BY_SYSTEM ((u_int32_t)(0x00000000)) -#define DESC_OWNED_BY_DC21X4 ((u_int32_t)(0x80000000)) +#define DESC_OWNED_BY_SYSTEM ((u32)(0x00000000)) +#define DESC_OWNED_BY_DC21X4 ((u32)(0x80000000)) #ifndef TULIP_CMD_RECEIVEALL #define TULIP_CMD_RECEIVEALL 0x40000000L @@ -525,46 +462,9 @@ struct lmc___softc { #define LMC_ADAP_SSI 4 #define LMC_ADAP_T1 5 -#define HDLC_HDR_LEN 4 -#define HDLC_ADDR_LEN 1 -#define HDLC_SLARP 0x8035 #define LMC_MTU 1500 -#define SLARP_LINECHECK 2 #define LMC_CRC_LEN_16 2 /* 16-bit CRC */ #define LMC_CRC_LEN_32 4 -#ifdef LMC_HDLC -/* definition of an hdlc header. */ -struct hdlc_hdr -{ - u8 address; - u8 control; - u16 type; -}; - -/* definition of a slarp header. */ -struct slarp -{ - long code; - union sl - { - struct - { - ulong address; - ulong mask; - ushort unused; - } add; - struct - { - ulong mysequence; - ulong yoursequence; - ushort reliability; - ulong time; - } chk; - } t; -}; -#endif /* LMC_HDLC */ - - #endif /* _LMC_VAR_H_ */ diff --git a/drivers/net/wan/pc300.h b/drivers/net/wan/pc300.h index 63e9fcf..2e4f84f 100644 --- a/drivers/net/wan/pc300.h +++ b/drivers/net/wan/pc300.h @@ -100,31 +100,14 @@ #define _PC300_H #include <linux/hdlc.h> -#include <net/syncppp.h> #include "hd64572.h" #include "pc300-falc-lh.h" -#ifndef CY_TYPES -#define CY_TYPES -typedef __u64 ucdouble; /* 64 bits, unsigned */ -typedef __u32 uclong; /* 32 bits, unsigned */ -typedef __u16 ucshort; /* 16 bits, unsigned */ -typedef __u8 ucchar; /* 8 bits, unsigned */ -#endif /* CY_TYPES */ +#define PC300_PROTO_MLPPP 1 -#define PC300_PROTO_MLPPP 1 - -#define PC300_KERNEL "2.4.x" /* Kernel supported by this driver */ - -#define PC300_DEVNAME "hdlc" /* Dev. name base (for hdlc0, hdlc1, etc.) */ -#define PC300_MAXINDEX 100 /* Max dev. name index (the '0' in hdlc0) */ - -#define PC300_MAXCARDS 4 /* Max number of cards per system */ #define PC300_MAXCHAN 2 /* Number of channels per card */ -#define PC300_PLX_WIN 0x80 /* PLX control window size (128b) */ #define PC300_RAMSIZE 0x40000 /* RAM window size (256Kb) */ -#define PC300_SCASIZE 0x400 /* SCA window size (1Kb) */ #define PC300_FALCSIZE 0x400 /* FALC window size (1Kb) */ #define PC300_OSC_CLOCK 24576000 @@ -160,26 +143,14 @@ typedef __u8 ucchar; /* 8 bits, unsigned */ * Memory access functions/macros * * (required to support Alpha systems) * ***************************************/ -#ifdef __KERNEL__ -#define cpc_writeb(port,val) {writeb((ucchar)(val),(port)); mb();} +#define cpc_writeb(port,val) {writeb((u8)(val),(port)); mb();} #define cpc_writew(port,val) {writew((ushort)(val),(port)); mb();} -#define cpc_writel(port,val) {writel((uclong)(val),(port)); mb();} +#define cpc_writel(port,val) {writel((u32)(val),(port)); mb();} #define cpc_readb(port) readb(port) #define cpc_readw(port) readw(port) #define cpc_readl(port) readl(port) -#else /* __KERNEL__ */ -#define cpc_writeb(port,val) (*(volatile ucchar *)(port) = (ucchar)(val)) -#define cpc_writew(port,val) (*(volatile ucshort *)(port) = (ucshort)(val)) -#define cpc_writel(port,val) (*(volatile uclong *)(port) = (uclong)(val)) - -#define cpc_readb(port) (*(volatile ucchar *)(port)) -#define cpc_readw(port) (*(volatile ucshort *)(port)) -#define cpc_readl(port) (*(volatile uclong *)(port)) - -#endif /* __KERNEL__ */ - /****** Data Structures *****************************************************/ /* @@ -188,15 +159,15 @@ typedef __u8 ucchar; /* 8 bits, unsigned */ * (memory mapped). */ struct RUNTIME_9050 { - uclong loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */ - uclong loc_rom_range; /* 10h : Local ROM Range */ - uclong loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */ - uclong loc_rom_base; /* 24h : Local ROM Base */ - uclong loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */ - uclong rom_bus_descr; /* 38h : ROM Bus Descriptor */ - uclong cs_base[4]; /* 3C-48h : Chip Select Base Addrs */ - uclong intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */ - uclong init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */ + u32 loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */ + u32 loc_rom_range; /* 10h : Local ROM Range */ + u32 loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */ + u32 loc_rom_base; /* 24h : Local ROM Base */ + u32 loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */ + u32 rom_bus_descr; /* 38h : ROM Bus Descriptor */ + u32 cs_base[4]; /* 3C-48h : Chip Select Base Addrs */ + u32 intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */ + u32 init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */ }; #define PLX_9050_LINT1_ENABLE 0x01 @@ -240,66 +211,66 @@ struct RUNTIME_9050 { #define PC300_FALC_MAXLOOP 0x0000ffff /* for falc_issue_cmd() */ typedef struct falc { - ucchar sync; /* If true FALC is synchronized */ - ucchar active; /* if TRUE then already active */ - ucchar loop_active; /* if TRUE a line loopback UP was received */ - ucchar loop_gen; /* if TRUE a line loopback UP was issued */ + u8 sync; /* If true FALC is synchronized */ + u8 active; /* if TRUE then already active */ + u8 loop_active; /* if TRUE a line loopback UP was received */ + u8 loop_gen; /* if TRUE a line loopback UP was issued */ - ucchar num_channels; - ucchar offset; /* 1 for T1, 0 for E1 */ - ucchar full_bandwidth; + u8 num_channels; + u8 offset; /* 1 for T1, 0 for E1 */ + u8 full_bandwidth; - ucchar xmb_cause; - ucchar multiframe_mode; + u8 xmb_cause; + u8 multiframe_mode; /* Statistics */ - ucshort pden; /* Pulse Density violation count */ - ucshort los; /* Loss of Signal count */ - ucshort losr; /* Loss of Signal recovery count */ - ucshort lfa; /* Loss of frame alignment count */ - ucshort farec; /* Frame Alignment Recovery count */ - ucshort lmfa; /* Loss of multiframe alignment count */ - ucshort ais; /* Remote Alarm indication Signal count */ - ucshort sec; /* One-second timer */ - ucshort es; /* Errored second */ - ucshort rai; /* remote alarm received */ - ucshort bec; - ucshort fec; - ucshort cvc; - ucshort cec; - ucshort ebc; + u16 pden; /* Pulse Density violation count */ + u16 los; /* Loss of Signal count */ + u16 losr; /* Loss of Signal recovery count */ + u16 lfa; /* Loss of frame alignment count */ + u16 farec; /* Frame Alignment Recovery count */ + u16 lmfa; /* Loss of multiframe alignment count */ + u16 ais; /* Remote Alarm indication Signal count */ + u16 sec; /* One-second timer */ + u16 es; /* Errored second */ + u16 rai; /* remote alarm received */ + u16 bec; + u16 fec; + u16 cvc; + u16 cec; + u16 ebc; /* Status */ - ucchar red_alarm; - ucchar blue_alarm; - ucchar loss_fa; - ucchar yellow_alarm; - ucchar loss_mfa; - ucchar prbs; + u8 red_alarm; + u8 blue_alarm; + u8 loss_fa; + u8 yellow_alarm; + u8 loss_mfa; + u8 prbs; } falc_t; typedef struct falc_status { - ucchar sync; /* If true FALC is synchronized */ - ucchar red_alarm; - ucchar blue_alarm; - ucchar loss_fa; - ucchar yellow_alarm; - ucchar loss_mfa; - ucchar prbs; + u8 sync; /* If true FALC is synchronized */ + u8 red_alarm; + u8 blue_alarm; + u8 loss_fa; + u8 yellow_alarm; + u8 loss_mfa; + u8 prbs; } falc_status_t; typedef struct rsv_x21_status { - ucchar dcd; - ucchar dsr; - ucchar cts; - ucchar rts; - ucchar dtr; + u8 dcd; + u8 dsr; + u8 cts; + u8 rts; + u8 dtr; } rsv_x21_status_t; typedef struct pc300stats { int hw_type; - uclong line_on; - uclong line_off; + u32 line_on; + u32 line_off; struct net_device_stats gen_stats; falc_t te_stats; } pc300stats_t; @@ -317,28 +288,19 @@ typedef struct pc300loopback { typedef struct pc300patterntst { char patrntst_on; /* 0 - off; 1 - on; 2 - read num_errors */ - ucshort num_errors; + u16 num_errors; } pc300patterntst_t; typedef struct pc300dev { - void *if_ptr; /* General purpose pointer */ struct pc300ch *chan; - ucchar trace_on; - uclong line_on; /* DCD(X.21, RSV) / sync(TE) change counters */ - uclong line_off; -#ifdef __KERNEL__ + u8 trace_on; + u32 line_on; /* DCD(X.21, RSV) / sync(TE) change counters */ + u32 line_off; char name[16]; struct net_device *dev; - - void *private; - struct sk_buff *tx_skb; - union { /* This union has all the protocol-specific structures */ - struct ppp_device pppdev; - }ifu; #ifdef CONFIG_PC300_MLPPP void *cpc_tty; /* information to PC300 TTY driver */ #endif -#endif /* __KERNEL__ */ }pc300dev_t; typedef struct pc300hw { @@ -346,43 +308,42 @@ typedef struct pc300hw { int bus; /* Bus (PCI, PMC, etc.) */ int nchan; /* number of channels */ int irq; /* interrupt request level */ - uclong clock; /* Board clock */ - ucchar cpld_id; /* CPLD ID (TE only) */ - ucshort cpld_reg1; /* CPLD reg 1 (TE only) */ - ucshort cpld_reg2; /* CPLD reg 2 (TE only) */ - ucshort gpioc_reg; /* PLX GPIOC reg */ - ucshort intctl_reg; /* PLX Int Ctrl/Status reg */ - uclong iophys; /* PLX registers I/O base */ - uclong iosize; /* PLX registers I/O size */ - uclong plxphys; /* PLX registers MMIO base (physical) */ + u32 clock; /* Board clock */ + u8 cpld_id; /* CPLD ID (TE only) */ + u16 cpld_reg1; /* CPLD reg 1 (TE only) */ + u16 cpld_reg2; /* CPLD reg 2 (TE only) */ + u16 gpioc_reg; /* PLX GPIOC reg */ + u16 intctl_reg; /* PLX Int Ctrl/Status reg */ + u32 iophys; /* PLX registers I/O base */ + u32 iosize; /* PLX registers I/O size */ + u32 plxphys; /* PLX registers MMIO base (physical) */ void __iomem * plxbase; /* PLX registers MMIO base (virtual) */ - uclong plxsize; /* PLX registers MMIO size */ - uclong scaphys; /* SCA registers MMIO base (physical) */ + u32 plxsize; /* PLX registers MMIO size */ + u32 scaphys; /* SCA registers MMIO base (physical) */ void __iomem * scabase; /* SCA registers MMIO base (virtual) */ - uclong scasize; /* SCA registers MMIO size */ - uclong ramphys; /* On-board RAM MMIO base (physical) */ + u32 scasize; /* SCA registers MMIO size */ + u32 ramphys; /* On-board RAM MMIO base (physical) */ void __iomem * rambase; /* On-board RAM MMIO base (virtual) */ - uclong alloc_ramsize; /* RAM MMIO size allocated by the PCI bridge */ - uclong ramsize; /* On-board RAM MMIO size */ - uclong falcphys; /* FALC registers MMIO base (physical) */ + u32 alloc_ramsize; /* RAM MMIO size allocated by the PCI bridge */ + u32 ramsize; /* On-board RAM MMIO size */ + u32 falcphys; /* FALC registers MMIO base (physical) */ void __iomem * falcbase;/* FALC registers MMIO base (virtual) */ - uclong falcsize; /* FALC registers MMIO size */ + u32 falcsize; /* FALC registers MMIO size */ } pc300hw_t; typedef struct pc300chconf { - sync_serial_settings phys_settings; /* Clock type/rate (in bps), + sync_serial_settings phys_settings; /* Clock type/rate (in bps), loopback mode */ raw_hdlc_proto proto_settings; /* Encoding, parity (CRC) */ - uclong media; /* HW media (RS232, V.35, etc.) */ - uclong proto; /* Protocol (PPP, X.25, etc.) */ - ucchar monitor; /* Monitor mode (0 = off, !0 = on) */ + u32 media; /* HW media (RS232, V.35, etc.) */ + u32 proto; /* Protocol (PPP, X.25, etc.) */ /* TE-specific parameters */ - ucchar lcode; /* Line Code (AMI, B8ZS, etc.) */ - ucchar fr_mode; /* Frame Mode (ESF, D4, etc.) */ - ucchar lbo; /* Line Build Out */ - ucchar rx_sens; /* Rx Sensitivity (long- or short-haul) */ - uclong tslot_bitmap; /* bit[i]=1 => timeslot _i_ is active */ + u8 lcode; /* Line Code (AMI, B8ZS, etc.) */ + u8 fr_mode; /* Frame Mode (ESF, D4, etc.) */ + u8 lbo; /* Line Build Out */ + u8 rx_sens; /* Rx Sensitivity (long- or short-haul) */ + u32 tslot_bitmap; /* bit[i]=1 => timeslot _i_ is active */ } pc300chconf_t; typedef struct pc300ch { @@ -390,20 +351,18 @@ typedef struct pc300ch { int channel; pc300dev_t d; pc300chconf_t conf; - ucchar tx_first_bd; /* First TX DMA block descr. w/ data */ - ucchar tx_next_bd; /* Next free TX DMA block descriptor */ - ucchar rx_first_bd; /* First free RX DMA block descriptor */ - ucchar rx_last_bd; /* Last free RX DMA block descriptor */ - ucchar nfree_tx_bd; /* Number of free TX DMA block descriptors */ - falc_t falc; /* FALC structure (TE only) */ + u8 tx_first_bd; /* First TX DMA block descr. w/ data */ + u8 tx_next_bd; /* Next free TX DMA block descriptor */ + u8 rx_first_bd; /* First free RX DMA block descriptor */ + u8 rx_last_bd; /* Last free RX DMA block descriptor */ + u8 nfree_tx_bd; /* Number of free TX DMA block descriptors */ + falc_t falc; /* FALC structure (TE only) */ } pc300ch_t; typedef struct pc300 { pc300hw_t hw; /* hardware config. */ pc300ch_t chan[PC300_MAXCHAN]; -#ifdef __KERNEL__ spinlock_t card_lock; -#endif /* __KERNEL__ */ } pc300_t; typedef struct pc300conf { @@ -471,12 +430,7 @@ enum pc300_loopback_cmds { #define PC300_TX_QUEUE_LEN 100 #define PC300_DEF_MTU 1600 -#ifdef __KERNEL__ /* Function Prototypes */ -void tx_dma_start(pc300_t *, int); int cpc_open(struct net_device *dev); -int cpc_set_media(hdlc_device *, int); -#endif /* __KERNEL__ */ #endif /* _PC300_H */ - diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c index 3341705..d0a8d1e 100644 --- a/drivers/net/wan/pc300_drv.c +++ b/drivers/net/wan/pc300_drv.c @@ -227,8 +227,6 @@ static char rcsid[] = #include <linux/netdevice.h> #include <linux/spinlock.h> #include <linux/if.h> - -#include <net/syncppp.h> #include <net/arp.h> #include <asm/io.h> @@ -285,8 +283,8 @@ static void rx_dma_buf_init(pc300_t *, int); static void tx_dma_buf_check(pc300_t *, int); static void rx_dma_buf_check(pc300_t *, int); static irqreturn_t cpc_intr(int, void *); -static int clock_rate_calc(uclong, uclong, int *); -static uclong detect_ram(pc300_t *); +static int clock_rate_calc(u32, u32, int *); +static u32 detect_ram(pc300_t *); static void plx_init(pc300_t *); static void cpc_trace(struct net_device *, struct sk_buff *, char); static int cpc_attach(struct net_device *, unsigned short, unsigned short); @@ -311,10 +309,10 @@ static void tx_dma_buf_pt_init(pc300_t * card, int ch) + DMA_TX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); for (i = 0; i < N_DMA_TX_BUF; i++, ptdescr++) { - cpc_writel(&ptdescr->next, (uclong) (DMA_TX_BD_BASE + + cpc_writel(&ptdescr->next, (u32)(DMA_TX_BD_BASE + (ch_factor + ((i + 1) & (N_DMA_TX_BUF - 1))) * sizeof(pcsca_bd_t))); - cpc_writel(&ptdescr->ptbuf, - (uclong) (DMA_TX_BASE + (ch_factor + i) * BD_DEF_LEN)); + cpc_writel(&ptdescr->ptbuf, + (u32)(DMA_TX_BASE + (ch_factor + i) * BD_DEF_LEN)); } } @@ -341,10 +339,10 @@ static void rx_dma_buf_pt_init(pc300_t * card, int ch) + DMA_RX_BD_BASE + ch_factor * sizeof(pcsca_bd_t)); for (i = 0; i < N_DMA_RX_BUF; i++, ptdescr++) { - cpc_writel(&ptdescr->next, (uclong) (DMA_RX_BD_BASE + - (ch_factor + ((i + 1) & (N_DMA_RX_BUF - 1))) * sizeof(pcsca_bd_t))); + cpc_writel(&ptdescr->next, (u32)(DMA_RX_BD_BASE + + (ch_factor + ((i + 1) & (N_DMA_RX_BUF - 1))) * sizeof(pcsca_bd_t))); cpc_writel(&ptdescr->ptbuf, - (uclong) (DMA_RX_BASE + (ch_factor + i) * BD_DEF_LEN)); + (u32)(DMA_RX_BASE + (ch_factor + i) * BD_DEF_LEN)); } } @@ -367,8 +365,8 @@ static void tx_dma_buf_check(pc300_t * card, int ch) { volatile pcsca_bd_t __iomem *ptdescr; int i; - ucshort first_bd = card->chan[ch].tx_first_bd; - ucshort next_bd = card->chan[ch].tx_next_bd; + u16 first_bd = card->chan[ch].tx_first_bd; + u16 next_bd = card->chan[ch].tx_next_bd; printk("#CH%d: f_bd = %d(0x%08zx), n_bd = %d(0x%08zx)\n", ch, first_bd, TX_BD_ADDR(ch, first_bd), @@ -392,9 +390,9 @@ static void tx1_dma_buf_check(pc300_t * card, int ch) { volatile pcsca_bd_t __iomem *ptdescr; int i; - ucshort first_bd = card->chan[ch].tx_first_bd; - ucshort next_bd = card->chan[ch].tx_next_bd; - uclong scabase = card->hw.scabase; + u16 first_bd = card->chan[ch].tx_first_bd; + u16 next_bd = card->chan[ch].tx_next_bd; + u32 scabase = card->hw.scabase; printk ("\nnfree_tx_bd = %d \n", card->chan[ch].nfree_tx_bd); printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch, @@ -413,13 +411,13 @@ static void tx1_dma_buf_check(pc300_t * card, int ch) printk("\n"); } #endif - + static void rx_dma_buf_check(pc300_t * card, int ch) { volatile pcsca_bd_t __iomem *ptdescr; int i; - ucshort first_bd = card->chan[ch].rx_first_bd; - ucshort last_bd = card->chan[ch].rx_last_bd; + u16 first_bd = card->chan[ch].rx_first_bd; + u16 last_bd = card->chan[ch].rx_last_bd; int ch_factor; ch_factor = ch * N_DMA_RX_BUF; @@ -440,9 +438,9 @@ static void rx_dma_buf_check(pc300_t * card, int ch) static int dma_get_rx_frame_size(pc300_t * card, int ch) { volatile pcsca_bd_t __iomem *ptdescr; - ucshort first_bd = card->chan[ch].rx_first_bd; + u16 first_bd = card->chan[ch].rx_first_bd; int rcvd = 0; - volatile ucchar status; + volatile u8 status; ptdescr = (card->hw.rambase + RX_BD_ADDR(ch, first_bd)); while ((status = cpc_readb(&ptdescr->status)) & DST_OSB) { @@ -462,12 +460,12 @@ static int dma_get_rx_frame_size(pc300_t * card, int ch) * dma_buf_write: writes a frame to the Tx DMA buffers * NOTE: this function writes one frame at a time. */ -static int dma_buf_write(pc300_t * card, int ch, ucchar * ptdata, int len) +static int dma_buf_write(pc300_t *card, int ch, u8 *ptdata, int len) { int i, nchar; volatile pcsca_bd_t __iomem *ptdescr; int tosend = len; - ucchar nbuf = ((len - 1) / BD_DEF_LEN) + 1; + u8 nbuf = ((len - 1) / BD_DEF_LEN) + 1; if (nbuf >= card->chan[ch].nfree_tx_bd) { return -ENOMEM; @@ -509,7 +507,7 @@ static int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb) pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; volatile pcsca_bd_t __iomem *ptdescr; int rcvd = 0; - volatile ucchar status; + volatile u8 status; ptdescr = (card->hw.rambase + RX_BD_ADDR(ch, chan->rx_first_bd)); @@ -563,8 +561,8 @@ static int dma_buf_read(pc300_t * card, int ch, struct sk_buff *skb) static void tx_dma_stop(pc300_t * card, int ch) { void __iomem *scabase = card->hw.scabase; - ucchar drr_ena_bit = 1 << (5 + 2 * ch); - ucchar drr_rst_bit = 1 << (1 + 2 * ch); + u8 drr_ena_bit = 1 << (5 + 2 * ch); + u8 drr_rst_bit = 1 << (1 + 2 * ch); /* Disable DMA */ cpc_writeb(scabase + DRR, drr_ena_bit); @@ -574,8 +572,8 @@ static void tx_dma_stop(pc300_t * card, int ch) static void rx_dma_stop(pc300_t * card, int ch) { void __iomem *scabase = card->hw.scabase; - ucchar drr_ena_bit = 1 << (4 + 2 * ch); - ucchar drr_rst_bit = 1 << (2 * ch); + u8 drr_ena_bit = 1 << (4 + 2 * ch); + u8 drr_rst_bit = 1 << (2 * ch); /* Disable DMA */ cpc_writeb(scabase + DRR, drr_ena_bit); @@ -607,7 +605,7 @@ static void rx_dma_start(pc300_t * card, int ch) /*************************/ /*** FALC Routines ***/ /*************************/ -static void falc_issue_cmd(pc300_t * card, int ch, ucchar cmd) +static void falc_issue_cmd(pc300_t *card, int ch, u8 cmd) { void __iomem *falcbase = card->hw.falcbase; unsigned long i = 0; @@ -675,7 +673,7 @@ static void falc_intr_enable(pc300_t * card, int ch) static void falc_open_timeslot(pc300_t * card, int ch, int timeslot) { void __iomem *falcbase = card->hw.falcbase; - ucchar tshf = card->chan[ch].falc.offset; + u8 tshf = card->chan[ch].falc.offset; cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch), cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) & @@ -691,7 +689,7 @@ static void falc_open_timeslot(pc300_t * card, int ch, int timeslot) static void falc_close_timeslot(pc300_t * card, int ch, int timeslot) { void __iomem *falcbase = card->hw.falcbase; - ucchar tshf = card->chan[ch].falc.offset; + u8 tshf = card->chan[ch].falc.offset; cpc_writeb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch), cpc_readb(falcbase + F_REG((ICB1 + (timeslot - tshf) / 8), ch)) | @@ -812,7 +810,7 @@ static void falc_init_t1(pc300_t * card, int ch) pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; falc_t *pfalc = (falc_t *) & chan->falc; void __iomem *falcbase = card->hw.falcbase; - ucchar dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0); + u8 dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0); /* Switch to T1 mode (PCM 24) */ cpc_writeb(falcbase + F_REG(FMR1, ch), FMR1_PMOD); @@ -981,7 +979,7 @@ static void falc_init_e1(pc300_t * card, int ch) pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; falc_t *pfalc = (falc_t *) & chan->falc; void __iomem *falcbase = card->hw.falcbase; - ucchar dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0); + u8 dja = (ch ? (LIM2_DJA2 | LIM2_DJA1) : 0); /* Switch to E1 mode (PCM 30) */ cpc_writeb(falcbase + F_REG(FMR1, ch), @@ -1187,7 +1185,7 @@ static void te_config(pc300_t * card, int ch) pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; falc_t *pfalc = (falc_t *) & chan->falc; void __iomem *falcbase = card->hw.falcbase; - ucchar dummy; + u8 dummy; unsigned long flags; memset(pfalc, 0, sizeof(falc_t)); @@ -1403,7 +1401,7 @@ static void falc_update_stats(pc300_t * card, int ch) pc300chconf_t *conf = (pc300chconf_t *) & chan->conf; falc_t *pfalc = (falc_t *) & chan->falc; void __iomem *falcbase = card->hw.falcbase; - ucshort counter; + u16 counter; counter = cpc_readb(falcbase + F_REG(FECL, ch)); counter |= cpc_readb(falcbase + F_REG(FECH, ch)) << 8; @@ -1729,7 +1727,7 @@ static void falc_pattern_test(pc300_t * card, int ch, unsigned int activate) * Description: This routine returns the bit error counter value *---------------------------------------------------------------------------- */ -static ucshort falc_pattern_test_error(pc300_t * card, int ch) +static u16 falc_pattern_test_error(pc300_t * card, int ch) { pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; falc_t *pfalc = (falc_t *) & chan->falc; @@ -1776,7 +1774,7 @@ static void cpc_tx_timeout(struct net_device *dev) pc300_t *card = (pc300_t *) chan->card; int ch = chan->channel; unsigned long flags; - ucchar ilar; + u8 ilar; dev->stats.tx_errors++; dev->stats.tx_aborted_errors++; @@ -1807,11 +1805,7 @@ static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev) int i; #endif - if (chan->conf.monitor) { - /* In monitor mode no Tx is done: ignore packet */ - dev_kfree_skb(skb); - return 0; - } else if (!netif_carrier_ok(dev)) { + if (!netif_carrier_ok(dev)) { /* DCD must be OFF: drop packet */ dev_kfree_skb(skb); dev->stats.tx_errors++; @@ -1836,7 +1830,7 @@ static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev) } /* Write buffer to DMA buffers */ - if (dma_buf_write(card, ch, (ucchar *) skb->data, skb->len) != 0) { + if (dma_buf_write(card, ch, (u8 *)skb->data, skb->len) != 0) { // printk("%s: write error. Dropping TX packet.\n", dev->name); netif_stop_queue(dev); dev_kfree_skb(skb); @@ -2001,7 +1995,7 @@ static void sca_tx_intr(pc300dev_t *dev) static void sca_intr(pc300_t * card) { void __iomem *scabase = card->hw.scabase; - volatile uclong status; + volatile u32 status; int ch; int intr_count = 0; unsigned char dsr_rx; @@ -2016,7 +2010,7 @@ static void sca_intr(pc300_t * card) /**** Reception ****/ if (status & IR0_DRX((IR0_DMIA | IR0_DMIB), ch)) { - ucchar drx_stat = cpc_readb(scabase + DSR_RX(ch)); + u8 drx_stat = cpc_readb(scabase + DSR_RX(ch)); /* Clear RX interrupts */ cpc_writeb(scabase + DSR_RX(ch), drx_stat | DSR_DWE); @@ -2090,7 +2084,7 @@ static void sca_intr(pc300_t * card) /**** Transmission ****/ if (status & IR0_DTX((IR0_EFT | IR0_DMIA | IR0_DMIB), ch)) { - ucchar dtx_stat = cpc_readb(scabase + DSR_TX(ch)); + u8 dtx_stat = cpc_readb(scabase + DSR_TX(ch)); /* Clear TX interrupts */ cpc_writeb(scabase + DSR_TX(ch), dtx_stat | DSR_DWE); @@ -2134,7 +2128,7 @@ static void sca_intr(pc300_t * card) /**** MSCI ****/ if (status & IR0_M(IR0_RXINTA, ch)) { - ucchar st1 = cpc_readb(scabase + M_REG(ST1, ch)); + u8 st1 = cpc_readb(scabase + M_REG(ST1, ch)); /* Clear MSCI interrupts */ cpc_writeb(scabase + M_REG(ST1, ch), st1); @@ -2176,7 +2170,7 @@ static void sca_intr(pc300_t * card) } } -static void falc_t1_loop_detection(pc300_t * card, int ch, ucchar frs1) +static void falc_t1_loop_detection(pc300_t *card, int ch, u8 frs1) { pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; falc_t *pfalc = (falc_t *) & chan->falc; @@ -2201,7 +2195,7 @@ static void falc_t1_loop_detection(pc300_t * card, int ch, ucchar frs1) } } -static void falc_e1_loop_detection(pc300_t * card, int ch, ucchar rsp) +static void falc_e1_loop_detection(pc300_t *card, int ch, u8 rsp) { pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; falc_t *pfalc = (falc_t *) & chan->falc; @@ -2231,8 +2225,8 @@ static void falc_t1_intr(pc300_t * card, int ch) pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; falc_t *pfalc = (falc_t *) & chan->falc; void __iomem *falcbase = card->hw.falcbase; - ucchar isr0, isr3, gis; - ucchar dummy; + u8 isr0, isr3, gis; + u8 dummy; while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) { if (gis & GIS_ISR0) { @@ -2278,8 +2272,8 @@ static void falc_e1_intr(pc300_t * card, int ch) pc300ch_t *chan = (pc300ch_t *) & card->chan[ch]; falc_t *pfalc = (falc_t *) & chan->falc; void __iomem *falcbase = card->hw.falcbase; - ucchar isr1, isr2, isr3, gis, rsp; - ucchar dummy; + u8 isr1, isr2, isr3, gis, rsp; + u8 dummy; while ((gis = cpc_readb(falcbase + F_REG(GIS, ch))) != 0) { rsp = cpc_readb(falcbase + F_REG(RSP, ch)); @@ -2361,7 +2355,7 @@ static void falc_intr(pc300_t * card) static irqreturn_t cpc_intr(int irq, void *dev_id) { pc300_t *card = dev_id; - volatile ucchar plx_status; + volatile u8 plx_status; if (!card) { #ifdef PC300_DEBUG_INTR @@ -2400,7 +2394,7 @@ static irqreturn_t cpc_intr(int irq, void *dev_id) static void cpc_sca_status(pc300_t * card, int ch) { - ucchar ilar; + u8 ilar; void __iomem *scabase = card->hw.scabase; unsigned long flags; @@ -2818,7 +2812,7 @@ static int cpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) } } -static int clock_rate_calc(uclong rate, uclong clock, int *br_io) +static int clock_rate_calc(u32 rate, u32 clock, int *br_io) { int br, tc; int br_pwr, error; @@ -2855,12 +2849,12 @@ static int ch_config(pc300dev_t * d) void __iomem *scabase = card->hw.scabase; void __iomem *plxbase = card->hw.plxbase; int ch = chan->channel; - uclong clkrate = chan->conf.phys_settings.clock_rate; - uclong clktype = chan->conf.phys_settings.clock_type; - ucshort encoding = chan->conf.proto_settings.encoding; - ucshort parity = chan->conf.proto_settings.parity; - ucchar md0, md2; - + u32 clkrate = chan->conf.phys_settings.clock_rate; + u32 clktype = chan->conf.phys_settings.clock_type; + u16 encoding = chan->conf.proto_settings.encoding; + u16 parity = chan->conf.proto_settings.parity; + u8 md0, md2; + /* Reset the channel */ cpc_writeb(scabase + M_REG(CMD, ch), CMD_CH_RST); @@ -3152,19 +3146,10 @@ int cpc_open(struct net_device *dev) printk("pc300: cpc_open"); #endif -#ifdef FIXME - if (hdlc->proto.id == IF_PROTO_PPP) { - d->if_ptr = &hdlc->state.ppp.pppdev; - } -#endif - result = hdlc_open(dev); - if (/* FIXME hdlc->proto.id == IF_PROTO_PPP*/ 0) { - dev->priv = d; - } - if (result) { + + if (result) return result; - } sprintf(ifr.ifr_name, "%s", dev->name); result = cpc_opench(d); @@ -3197,9 +3182,7 @@ static int cpc_close(struct net_device *dev) CPC_UNLOCK(card, flags); hdlc_close(dev); - if (/* FIXME hdlc->proto.id == IF_PROTO_PPP*/ 0) { - d->if_ptr = NULL; - } + #ifdef CONFIG_PC300_MLPPP if (chan->conf.proto == PC300_PROTO_MLPPP) { cpc_tty_unregister_service(d); @@ -3210,16 +3193,16 @@ static int cpc_close(struct net_device *dev) return 0; } -static uclong detect_ram(pc300_t * card) +static u32 detect_ram(pc300_t * card) { - uclong i; - ucchar data; + u32 i; + u8 data; void __iomem *rambase = card->hw.rambase; card->hw.ramsize = PC300_RAMSIZE; /* Let's find out how much RAM is present on this board */ for (i = 0; i < card->hw.ramsize; i++) { - data = (ucchar) (i & 0xff); + data = (u8)(i & 0xff); cpc_writeb(rambase + i, data); if (cpc_readb(rambase + i) != data) { break; @@ -3296,7 +3279,7 @@ static void cpc_init_card(pc300_t * card) cpc_writeb(card->hw.scabase + DMER, 0x80); if (card->hw.type == PC300_TE) { - ucchar reg1; + u8 reg1; /* Check CPLD version */ reg1 = cpc_readb(card->hw.falcbase + CPLD_REG1); @@ -3360,7 +3343,6 @@ static void cpc_init_card(pc300_t * card) chan->nfree_tx_bd = N_DMA_TX_BUF; d->chan = chan; - d->tx_skb = NULL; d->trace_on = 0; d->line_on = 0; d->line_off = 0; @@ -3431,7 +3413,7 @@ cpc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int first_time = 1; int err, eeprom_outdated = 0; - ucshort device_id; + u16 device_id; pc300_t *card; if (first_time) { diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c index 44a89df..c023584 100644 --- a/drivers/net/wan/sealevel.c +++ b/drivers/net/wan/sealevel.c @@ -8,6 +8,7 @@ * * (c) Copyright 1999, 2001 Alan Cox * (c) Copyright 2001 Red Hat Inc. + * Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl> * */ @@ -19,6 +20,7 @@ #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/delay.h> +#include <linux/hdlc.h> #include <linux/ioport.h> #include <linux/init.h> #include <net/arp.h> @@ -27,22 +29,19 @@ #include <asm/io.h> #include <asm/dma.h> #include <asm/byteorder.h> -#include <net/syncppp.h> #include "z85230.h" struct slvl_device { - void *if_ptr; /* General purpose pointer (used by SPPP) */ struct z8530_channel *chan; - struct ppp_device pppdev; int channel; }; struct slvl_board { - struct slvl_device *dev[2]; + struct slvl_device dev[2]; struct z8530_dev board; int iobase; }; @@ -51,72 +50,69 @@ struct slvl_board * Network driver support routines */ +static inline struct slvl_device* dev_to_chan(struct net_device *dev) +{ + return (struct slvl_device *)dev_to_hdlc(dev)->priv; +} + /* - * Frame receive. Simple for our card as we do sync ppp and there + * Frame receive. Simple for our card as we do HDLC and there * is no funny garbage involved */ - + static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb) { /* Drop the CRC - it's not a good idea to try and negotiate it ;) */ - skb_trim(skb, skb->len-2); - skb->protocol=htons(ETH_P_WAN_PPP); + skb_trim(skb, skb->len - 2); + skb->protocol = hdlc_type_trans(skb, c->netdevice); skb_reset_mac_header(skb); - skb->dev=c->netdevice; - /* - * Send it to the PPP layer. We don't have time to process - * it right now. - */ + skb->dev = c->netdevice; netif_rx(skb); c->netdevice->last_rx = jiffies; } - + /* * We've been placed in the UP state - */ - + */ + static int sealevel_open(struct net_device *d) { - struct slvl_device *slvl=d->priv; + struct slvl_device *slvl = dev_to_chan(d); int err = -1; int unit = slvl->channel; - + /* - * Link layer up. + * Link layer up. */ - switch(unit) + switch (unit) { case 0: - err=z8530_sync_dma_open(d, slvl->chan); + err = z8530_sync_dma_open(d, slvl->chan); break; case 1: - err=z8530_sync_open(d, slvl->chan); + err = z8530_sync_open(d, slvl->chan); break; } - - if(err) + + if (err) return err; - /* - * Begin PPP - */ - err=sppp_open(d); - if(err) - { - switch(unit) - { + + err = hdlc_open(d); + if (err) { + switch (unit) { case 0: z8530_sync_dma_close(d, slvl->chan); break; case 1: z8530_sync_close(d, slvl->chan); break; - } + } return err; } - - slvl->chan->rx_function=sealevel_input; - + + slvl->chan->rx_function = sealevel_input; + /* * Go go go */ @@ -126,26 +122,19 @@ static int sealevel_open(struct net_device *d) static int sealevel_close(struct net_device *d) { - struct slvl_device *slvl=d->priv; + struct slvl_device *slvl = dev_to_chan(d); int unit = slvl->channel; - + /* * Discard new frames */ - - slvl->chan->rx_function=z8530_null_rx; - - /* - * PPP off - */ - sppp_close(d); - /* - * Link layer down - */ + slvl->chan->rx_function = z8530_null_rx; + + hdlc_close(d); netif_stop_queue(d); - - switch(unit) + + switch (unit) { case 0: z8530_sync_dma_close(d, slvl->chan); @@ -159,210 +148,153 @@ static int sealevel_close(struct net_device *d) static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd) { - /* struct slvl_device *slvl=d->priv; + /* struct slvl_device *slvl=dev_to_chan(d); z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */ - return sppp_do_ioctl(d, ifr,cmd); -} - -static struct net_device_stats *sealevel_get_stats(struct net_device *d) -{ - struct slvl_device *slvl=d->priv; - if(slvl) - return z8530_get_stats(slvl->chan); - else - return NULL; + return hdlc_ioctl(d, ifr, cmd); } /* - * Passed PPP frames, fire them downwind. + * Passed network frames, fire them downwind. */ - + static int sealevel_queue_xmit(struct sk_buff *skb, struct net_device *d) { - struct slvl_device *slvl=d->priv; - return z8530_queue_xmit(slvl->chan, skb); + return z8530_queue_xmit(dev_to_chan(d)->chan, skb); } -static int sealevel_neigh_setup(struct neighbour *n) +static int sealevel_attach(struct net_device *dev, unsigned short encoding, + unsigned short parity) { - if (n->nud_state == NUD_NONE) { - n->ops = &arp_broken_ops; - n->output = n->ops->output; - } - return 0; + if (encoding == ENCODING_NRZ && parity == PARITY_CRC16_PR1_CCITT) + return 0; + return -EINVAL; } -static int sealevel_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p) +static int slvl_setup(struct slvl_device *sv, int iobase, int irq) { - if (p->tbl->family == AF_INET) { - p->neigh_setup = sealevel_neigh_setup; - p->ucast_probes = 0; - p->mcast_probes = 0; + struct net_device *dev = alloc_hdlcdev(sv); + if (!dev) + return -1; + + dev_to_hdlc(dev)->attach = sealevel_attach; + dev_to_hdlc(dev)->xmit = sealevel_queue_xmit; + dev->open = sealevel_open; + dev->stop = sealevel_close; + dev->do_ioctl = sealevel_ioctl; + dev->base_addr = iobase; + dev->irq = irq; + + if (register_hdlc_device(dev)) { + printk(KERN_ERR "sealevel: unable to register HDLC device\n"); + free_netdev(dev); + return -1; } - return 0; -} -static int sealevel_attach(struct net_device *dev) -{ - struct slvl_device *sv = dev->priv; - sppp_attach(&sv->pppdev); + sv->chan->netdevice = dev; return 0; } -static void sealevel_detach(struct net_device *dev) -{ - sppp_detach(dev); -} - -static void slvl_setup(struct net_device *d) -{ - d->open = sealevel_open; - d->stop = sealevel_close; - d->init = sealevel_attach; - d->uninit = sealevel_detach; - d->hard_start_xmit = sealevel_queue_xmit; - d->get_stats = sealevel_get_stats; - d->set_multicast_list = NULL; - d->do_ioctl = sealevel_ioctl; - d->neigh_setup = sealevel_neigh_setup_dev; - d->set_mac_address = NULL; - -} - -static inline struct slvl_device *slvl_alloc(int iobase, int irq) -{ - struct net_device *d; - struct slvl_device *sv; - - d = alloc_netdev(sizeof(struct slvl_device), "hdlc%d", - slvl_setup); - - if (!d) - return NULL; - - sv = d->priv; - d->ml_priv = sv; - sv->if_ptr = &sv->pppdev; - sv->pppdev.dev = d; - d->base_addr = iobase; - d->irq = irq; - - return sv; -} - /* * Allocate and setup Sealevel board. */ - -static __init struct slvl_board *slvl_init(int iobase, int irq, + +static __init struct slvl_board *slvl_init(int iobase, int irq, int txdma, int rxdma, int slow) { struct z8530_dev *dev; struct slvl_board *b; - + /* * Get the needed I/O space */ - if(!request_region(iobase, 8, "Sealevel 4021")) - { - printk(KERN_WARNING "sealevel: I/O 0x%X already in use.\n", iobase); + if (!request_region(iobase, 8, "Sealevel 4021")) { + printk(KERN_WARNING "sealevel: I/O 0x%X already in use.\n", + iobase); return NULL; } - - b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL); - if(!b) - goto fail3; - if (!(b->dev[0]= slvl_alloc(iobase, irq))) - goto fail2; + b = kzalloc(sizeof(struct slvl_board), GFP_KERNEL); + if (!b) + goto err_kzalloc; - b->dev[0]->chan = &b->board.chanA; - b->dev[0]->channel = 0; - - if (!(b->dev[1] = slvl_alloc(iobase, irq))) - goto fail1_0; + b->dev[0].chan = &b->board.chanA; + b->dev[0].channel = 0; - b->dev[1]->chan = &b->board.chanB; - b->dev[1]->channel = 1; + b->dev[1].chan = &b->board.chanB; + b->dev[1].channel = 1; dev = &b->board; - + /* * Stuff in the I/O addressing */ - + dev->active = 0; b->iobase = iobase; - + /* * Select 8530 delays for the old board */ - - if(slow) + + if (slow) iobase |= Z8530_PORT_SLEEP; - - dev->chanA.ctrlio=iobase+1; - dev->chanA.dataio=iobase; - dev->chanB.ctrlio=iobase+3; - dev->chanB.dataio=iobase+2; - - dev->chanA.irqs=&z8530_nop; - dev->chanB.irqs=&z8530_nop; - + + dev->chanA.ctrlio = iobase + 1; + dev->chanA.dataio = iobase; + dev->chanB.ctrlio = iobase + 3; + dev->chanB.dataio = iobase + 2; + + dev->chanA.irqs = &z8530_nop; + dev->chanB.irqs = &z8530_nop; + /* * Assert DTR enable DMA */ - - outb(3|(1<<7), b->iobase+4); - + + outb(3 | (1 << 7), b->iobase + 4); + /* We want a fast IRQ for this device. Actually we'd like an even faster IRQ ;) - This is one driver RtLinux is made for */ - - if(request_irq(irq, &z8530_interrupt, IRQF_DISABLED, "SeaLevel", dev)<0) - { + + if (request_irq(irq, &z8530_interrupt, IRQF_DISABLED, + "SeaLevel", dev) < 0) { printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq); - goto fail1_1; + goto err_request_irq; } - - dev->irq=irq; - dev->chanA.private=&b->dev[0]; - dev->chanB.private=&b->dev[1]; - dev->chanA.netdevice=b->dev[0]->pppdev.dev; - dev->chanB.netdevice=b->dev[1]->pppdev.dev; - dev->chanA.dev=dev; - dev->chanB.dev=dev; - - dev->chanA.txdma=3; - dev->chanA.rxdma=1; - if(request_dma(dev->chanA.txdma, "SeaLevel (TX)")!=0) - goto fail; - - if(request_dma(dev->chanA.rxdma, "SeaLevel (RX)")!=0) - goto dmafail; - + + dev->irq = irq; + dev->chanA.private = &b->dev[0]; + dev->chanB.private = &b->dev[1]; + dev->chanA.dev = dev; + dev->chanB.dev = dev; + + dev->chanA.txdma = 3; + dev->chanA.rxdma = 1; + if (request_dma(dev->chanA.txdma, "SeaLevel (TX)")) + goto err_dma_tx; + + if (request_dma(dev->chanA.rxdma, "SeaLevel (RX)")) + goto err_dma_rx; + disable_irq(irq); - + /* * Begin normal initialise */ - - if(z8530_init(dev)!=0) - { + + if (z8530_init(dev) != 0) { printk(KERN_ERR "Z8530 series device not found.\n"); enable_irq(irq); - goto dmafail2; + goto free_hw; } - if(dev->type==Z85C30) - { + if (dev->type == Z85C30) { z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream); z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream); - } - else - { + } else { z8530_channel_load(&dev->chanA, z8530_hdlc_kilostream_85230); z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230); } @@ -370,36 +302,31 @@ static __init struct slvl_board *slvl_init(int iobase, int irq, /* * Now we can take the IRQ */ - + enable_irq(irq); - if (register_netdev(b->dev[0]->pppdev.dev)) - goto dmafail2; - - if (register_netdev(b->dev[1]->pppdev.dev)) - goto fail_unit; + if (slvl_setup(&b->dev[0], iobase, irq)) + goto free_hw; + if (slvl_setup(&b->dev[1], iobase, irq)) + goto free_netdev0; z8530_describe(dev, "I/O", iobase); - dev->active=1; + dev->active = 1; return b; -fail_unit: - unregister_netdev(b->dev[0]->pppdev.dev); - -dmafail2: +free_netdev0: + unregister_hdlc_device(b->dev[0].chan->netdevice); + free_netdev(b->dev[0].chan->netdevice); +free_hw: free_dma(dev->chanA.rxdma); -dmafail: +err_dma_rx: free_dma(dev->chanA.txdma); -fail: +err_dma_tx: free_irq(irq, dev); -fail1_1: - free_netdev(b->dev[1]->pppdev.dev); -fail1_0: - free_netdev(b->dev[0]->pppdev.dev); -fail2: +err_request_irq: kfree(b); -fail3: - release_region(iobase,8); +err_kzalloc: + release_region(iobase, 8); return NULL; } @@ -408,14 +335,14 @@ static void __exit slvl_shutdown(struct slvl_board *b) int u; z8530_shutdown(&b->board); - - for(u=0; u<2; u++) + + for (u = 0; u < 2; u++) { - struct net_device *d = b->dev[u]->pppdev.dev; - unregister_netdev(d); + struct net_device *d = b->dev[u].chan->netdevice; + unregister_hdlc_device(d); free_netdev(d); } - + free_irq(b->board.irq, &b->board); free_dma(b->board.chanA.rxdma); free_dma(b->board.chanA.txdma); @@ -451,10 +378,6 @@ static struct slvl_board *slvl_unit; static int __init slvl_init_module(void) { -#ifdef MODULE - printk(KERN_INFO "SeaLevel Z85230 Synchronous Driver v 0.02.\n"); - printk(KERN_INFO "(c) Copyright 1998, Building Number Three Ltd.\n"); -#endif slvl_unit = slvl_init(io, irq, txdma, rxdma, slow); return slvl_unit ? 0 : -ENODEV; diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c index 29b4b94..327d585 100644 --- a/drivers/net/wan/syncppp.c +++ b/drivers/net/wan/syncppp.c @@ -230,13 +230,6 @@ static void sppp_input (struct net_device *dev, struct sk_buff *skb) skb->dev=dev; skb_reset_mac_header(skb); - if (dev->flags & IFF_RUNNING) - { - /* Count received bytes, add FCS and one flag */ - sp->ibytes+= skb->len + 3; - sp->ipkts++; - } - if (!pskb_may_pull(skb, PPP_HEADER_LEN)) { /* Too small packet, drop it. */ if (sp->pp_flags & PP_DEBUG) @@ -832,7 +825,6 @@ static void sppp_cp_send (struct sppp *sp, u16 proto, u8 type, sppp_print_bytes ((u8*) (lh+1), len); printk (">\n"); } - sp->obytes += skb->len; /* Control is high priority so it doesn't get queued behind data */ skb->priority=TC_PRIO_CONTROL; skb->dev = dev; @@ -875,7 +867,6 @@ static void sppp_cisco_send (struct sppp *sp, int type, u32 par1, u32 par2) printk (KERN_WARNING "%s: cisco output: <%xh %xh %xh %xh %xh-%xh>\n", dev->name, ntohl (ch->type), ch->par1, ch->par2, ch->rel, ch->time0, ch->time1); - sp->obytes += skb->len; skb->priority=TC_PRIO_CONTROL; skb->dev = dev; skb_queue_tail(&tx_queue, skb); diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c index 98ef400..243bd8d 100644 --- a/drivers/net/wan/z85230.c +++ b/drivers/net/wan/z85230.c @@ -43,6 +43,7 @@ #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/delay.h> +#include <linux/hdlc.h> #include <linux/ioport.h> #include <linux/init.h> #include <asm/dma.h> @@ -51,7 +52,6 @@ #define RT_UNLOCK #include <linux/spinlock.h> -#include <net/syncppp.h> #include "z85230.h" @@ -440,51 +440,46 @@ static void z8530_tx(struct z8530_channel *c) * A status event occurred in PIO synchronous mode. There are several * reasons the chip will bother us here. A transmit underrun means we * failed to feed the chip fast enough and just broke a packet. A DCD - * change is a line up or down. We communicate that back to the protocol - * layer for synchronous PPP to renegotiate. + * change is a line up or down. */ static void z8530_status(struct z8530_channel *chan) { u8 status, altered; - status=read_zsreg(chan, R0); - altered=chan->status^status; - - chan->status=status; - - if(status&TxEOM) - { + status = read_zsreg(chan, R0); + altered = chan->status ^ status; + + chan->status = status; + + if (status & TxEOM) { /* printk("%s: Tx underrun.\n", chan->dev->name); */ - chan->stats.tx_fifo_errors++; + chan->netdevice->stats.tx_fifo_errors++; write_zsctrl(chan, ERR_RES); z8530_tx_done(chan); } - - if(altered&chan->dcdcheck) + + if (altered & chan->dcdcheck) { - if(status&chan->dcdcheck) - { + if (status & chan->dcdcheck) { printk(KERN_INFO "%s: DCD raised\n", chan->dev->name); - write_zsreg(chan, R3, chan->regs[3]|RxENABLE); - if(chan->netdevice && - ((chan->netdevice->type == ARPHRD_HDLC) || - (chan->netdevice->type == ARPHRD_PPP))) - sppp_reopen(chan->netdevice); - } - else - { + write_zsreg(chan, R3, chan->regs[3] | RxENABLE); + if (chan->netdevice) + netif_carrier_on(chan->netdevice); + } else { printk(KERN_INFO "%s: DCD lost\n", chan->dev->name); - write_zsreg(chan, R3, chan->regs[3]&~RxENABLE); + write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE); z8530_flush_fifo(chan); + if (chan->netdevice) + netif_carrier_off(chan->netdevice); } - - } + + } write_zsctrl(chan, RES_EXT_INT); write_zsctrl(chan, RES_H_IUS); } -struct z8530_irqhandler z8530_sync= +struct z8530_irqhandler z8530_sync = { z8530_rx, z8530_tx, @@ -556,8 +551,7 @@ static void z8530_dma_tx(struct z8530_channel *chan) * * A status event occurred on the Z8530. We receive these for two reasons * when in DMA mode. Firstly if we finished a packet transfer we get one - * and kick the next packet out. Secondly we may see a DCD change and - * have to poke the protocol layer. + * and kick the next packet out. Secondly we may see a DCD change. * */ @@ -586,24 +580,21 @@ static void z8530_dma_status(struct z8530_channel *chan) } } - if(altered&chan->dcdcheck) + if (altered & chan->dcdcheck) { - if(status&chan->dcdcheck) - { + if (status & chan->dcdcheck) { printk(KERN_INFO "%s: DCD raised\n", chan->dev->name); - write_zsreg(chan, R3, chan->regs[3]|RxENABLE); - if(chan->netdevice && - ((chan->netdevice->type == ARPHRD_HDLC) || - (chan->netdevice->type == ARPHRD_PPP))) - sppp_reopen(chan->netdevice); - } - else - { + write_zsreg(chan, R3, chan->regs[3] | RxENABLE); + if (chan->netdevice) + netif_carrier_on(chan->netdevice); + } else { printk(KERN_INFO "%s:DCD lost\n", chan->dev->name); - write_zsreg(chan, R3, chan->regs[3]&~RxENABLE); + write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE); z8530_flush_fifo(chan); + if (chan->netdevice) + netif_carrier_off(chan->netdevice); } - } + } write_zsctrl(chan, RES_EXT_INT); write_zsctrl(chan, RES_H_IUS); @@ -1459,10 +1450,10 @@ static void z8530_tx_begin(struct z8530_channel *c) /* * Check if we crapped out. */ - if(get_dma_residue(c->txdma)) + if (get_dma_residue(c->txdma)) { - c->stats.tx_dropped++; - c->stats.tx_fifo_errors++; + c->netdevice->stats.tx_dropped++; + c->netdevice->stats.tx_fifo_errors++; } release_dma_lock(flags); } @@ -1534,21 +1525,21 @@ static void z8530_tx_begin(struct z8530_channel *c) * packet. This code is fairly timing sensitive. * * Called with the register lock held. - */ - + */ + static void z8530_tx_done(struct z8530_channel *c) { struct sk_buff *skb; /* Actually this can happen.*/ - if(c->tx_skb==NULL) + if (c->tx_skb == NULL) return; - skb=c->tx_skb; - c->tx_skb=NULL; + skb = c->tx_skb; + c->tx_skb = NULL; z8530_tx_begin(c); - c->stats.tx_packets++; - c->stats.tx_bytes+=skb->len; + c->netdevice->stats.tx_packets++; + c->netdevice->stats.tx_bytes += skb->len; dev_kfree_skb_irq(skb); } @@ -1558,7 +1549,7 @@ static void z8530_tx_done(struct z8530_channel *c) * @skb: The buffer * * We point the receive handler at this function when idle. Instead - * of syncppp processing the frames we get to throw them away. + * of processing the frames we get to throw them away. */ void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb) @@ -1635,10 +1626,11 @@ static void z8530_rx_done(struct z8530_channel *c) else /* Can't occur as we dont reenable the DMA irq until after the flip is done */ - printk(KERN_WARNING "%s: DMA flip overrun!\n", c->netdevice->name); - + printk(KERN_WARNING "%s: DMA flip overrun!\n", + c->netdevice->name); + release_dma_lock(flags); - + /* * Shove the old buffer into an sk_buff. We can't DMA * directly into one on a PC - it might be above the 16Mb @@ -1646,27 +1638,23 @@ static void z8530_rx_done(struct z8530_channel *c) * can avoid the copy. Optimisation 2 - make the memcpy * a copychecksum. */ - - skb=dev_alloc_skb(ct); - if(skb==NULL) - { - c->stats.rx_dropped++; - printk(KERN_WARNING "%s: Memory squeeze.\n", c->netdevice->name); - } - else - { + + skb = dev_alloc_skb(ct); + if (skb == NULL) { + c->netdevice->stats.rx_dropped++; + printk(KERN_WARNING "%s: Memory squeeze.\n", + c->netdevice->name); + } else { skb_put(skb, ct); skb_copy_to_linear_data(skb, rxb, ct); - c->stats.rx_packets++; - c->stats.rx_bytes+=ct; + c->netdevice->stats.rx_packets++; + c->netdevice->stats.rx_bytes += ct; } - c->dma_ready=1; - } - else - { - RT_LOCK; - skb=c->skb; - + c->dma_ready = 1; + } else { + RT_LOCK; + skb = c->skb; + /* * The game we play for non DMA is similar. We want to * get the controller set up for the next packet as fast @@ -1677,48 +1665,39 @@ static void z8530_rx_done(struct z8530_channel *c) * if you build a system where the sync irq isnt blocked * by the kernel IRQ disable then you need only block the * sync IRQ for the RT_LOCK area. - * + * */ ct=c->count; - + c->skb = c->skb2; c->count = 0; c->max = c->mtu; - if(c->skb) - { + if (c->skb) { c->dptr = c->skb->data; c->max = c->mtu; - } - else - { - c->count= 0; + } else { + c->count = 0; c->max = 0; } RT_UNLOCK; c->skb2 = dev_alloc_skb(c->mtu); - if(c->skb2==NULL) + if (c->skb2 == NULL) printk(KERN_WARNING "%s: memory squeeze.\n", - c->netdevice->name); + c->netdevice->name); else - { - skb_put(c->skb2,c->mtu); - } - c->stats.rx_packets++; - c->stats.rx_bytes+=ct; - + skb_put(c->skb2, c->mtu); + c->netdevice->stats.rx_packets++; + c->netdevice->stats.rx_bytes += ct; } /* * If we received a frame we must now process it. */ - if(skb) - { + if (skb) { skb_trim(skb, ct); - c->rx_function(c,skb); - } - else - { - c->stats.rx_dropped++; + c->rx_function(c, skb); + } else { + c->netdevice->stats.rx_dropped++; printk(KERN_ERR "%s: Lost a frame\n", c->netdevice->name); } } @@ -1730,7 +1709,7 @@ static void z8530_rx_done(struct z8530_channel *c) * Returns true if the buffer cross a DMA boundary on a PC. The poor * thing can only DMA within a 64K block not across the edges of it. */ - + static inline int spans_boundary(struct sk_buff *skb) { unsigned long a=(unsigned long)skb->data; @@ -1799,24 +1778,6 @@ int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb) EXPORT_SYMBOL(z8530_queue_xmit); -/** - * z8530_get_stats - Get network statistics - * @c: The channel to use - * - * Get the statistics block. We keep the statistics in software as - * the chip doesn't do it for us. - * - * Locking is ignored here - we could lock for a copy but its - * not likely to be that big an issue - */ - -struct net_device_stats *z8530_get_stats(struct z8530_channel *c) -{ - return &c->stats; -} - -EXPORT_SYMBOL(z8530_get_stats); - /* * Module support */ diff --git a/drivers/net/wan/z85230.h b/drivers/net/wan/z85230.h index 158aea7..4f37239 100644 --- a/drivers/net/wan/z85230.h +++ b/drivers/net/wan/z85230.h @@ -325,7 +325,6 @@ struct z8530_channel void *private; /* For our owner */ struct net_device *netdevice; /* Network layer device */ - struct net_device_stats stats; /* Network layer statistics */ /* * Async features @@ -366,13 +365,13 @@ struct z8530_channel unsigned char tx_active; /* character is being xmitted */ unsigned char tx_stopped; /* output is suspended */ - spinlock_t *lock; /* Devicr lock */ -}; + spinlock_t *lock; /* Device lock */ +}; /* * Each Z853x0 device. - */ - + */ + struct z8530_dev { char *name; /* Device instance name */ @@ -408,7 +407,6 @@ extern int z8530_sync_txdma_open(struct net_device *, struct z8530_channel *); extern int z8530_sync_txdma_close(struct net_device *, struct z8530_channel *); extern int z8530_channel_load(struct z8530_channel *, u8 *); extern int z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb); -extern struct net_device_stats *z8530_get_stats(struct z8530_channel *c); extern void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb); diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 4c7ff61..9931b5a 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -695,6 +695,7 @@ config MAC80211_HWSIM source "drivers/net/wireless/p54/Kconfig" source "drivers/net/wireless/ath5k/Kconfig" +source "drivers/net/wireless/ath9k/Kconfig" source "drivers/net/wireless/iwlwifi/Kconfig" source "drivers/net/wireless/hostap/Kconfig" source "drivers/net/wireless/b43/Kconfig" diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 54a4f6f..59aa89e 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -62,5 +62,6 @@ obj-$(CONFIG_RT2X00) += rt2x00/ obj-$(CONFIG_P54_COMMON) += p54/ obj-$(CONFIG_ATH5K) += ath5k/ +obj-$(CONFIG_ATH9K) += ath9k/ obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c index ebf19bc..2028866 100644 --- a/drivers/net/wireless/ath5k/base.c +++ b/drivers/net/wireless/ath5k/base.c @@ -95,8 +95,6 @@ static struct pci_device_id ath5k_pci_id_table[] __devinitdata = { { PCI_VDEVICE(ATHEROS, 0x001a), .driver_data = AR5K_AR5212 }, /* 2413 Griffin-lite */ { PCI_VDEVICE(ATHEROS, 0x001b), .driver_data = AR5K_AR5212 }, /* 5413 Eagle */ { PCI_VDEVICE(ATHEROS, 0x001c), .driver_data = AR5K_AR5212 }, /* 5424 Condor (PCI-E)*/ - { PCI_VDEVICE(ATHEROS, 0x0023), .driver_data = AR5K_AR5212 }, /* 5416 */ - { PCI_VDEVICE(ATHEROS, 0x0024), .driver_data = AR5K_AR5212 }, /* 5418 */ { 0 } }; MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table); diff --git a/drivers/net/wireless/ath9k/Kconfig b/drivers/net/wireless/ath9k/Kconfig new file mode 100644 index 0000000..9e19dcc --- /dev/null +++ b/drivers/net/wireless/ath9k/Kconfig @@ -0,0 +1,8 @@ +config ATH9K + tristate "Atheros 802.11n wireless cards support" + depends on PCI && MAC80211 && WLAN_80211 + ---help--- + This module adds support for wireless adapters based on + Atheros IEEE 802.11n AR5008 and AR9001 family of chipsets. + + If you choose to build a module, it'll be called ath9k. diff --git a/drivers/net/wireless/ath9k/Makefile b/drivers/net/wireless/ath9k/Makefile new file mode 100644 index 0000000..a641151 --- /dev/null +++ b/drivers/net/wireless/ath9k/Makefile @@ -0,0 +1,11 @@ +ath9k-y += hw.o \ + phy.o \ + regd.o \ + beacon.o \ + main.o \ + recv.o \ + xmit.o \ + rc.o \ + core.o + +obj-$(CONFIG_ATH9K) += ath9k.o diff --git a/drivers/net/wireless/ath9k/ath9k.h b/drivers/net/wireless/ath9k/ath9k.h new file mode 100644 index 0000000..d1b0fba --- /dev/null +++ b/drivers/net/wireless/ath9k/ath9k.h @@ -0,0 +1,1021 @@ +/* + * Copyright (c) 2008 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef ATH9K_H +#define ATH9K_H + +#include <linux/io.h> + +#define ATHEROS_VENDOR_ID 0x168c + +#define AR5416_DEVID_PCI 0x0023 +#define AR5416_DEVID_PCIE 0x0024 +#define AR9160_DEVID_PCI 0x0027 +#define AR9280_DEVID_PCI 0x0029 +#define AR9280_DEVID_PCIE 0x002a + +#define AR5416_AR9100_DEVID 0x000b + +#define AR_SUBVENDOR_ID_NOG 0x0e11 +#define AR_SUBVENDOR_ID_NEW_A 0x7065 + +#define ATH9K_TXERR_XRETRY 0x01 +#define ATH9K_TXERR_FILT 0x02 +#define ATH9K_TXERR_FIFO 0x04 +#define ATH9K_TXERR_XTXOP 0x08 +#define ATH9K_TXERR_TIMER_EXPIRED 0x10 + +#define ATH9K_TX_BA 0x01 +#define ATH9K_TX_PWRMGMT 0x02 +#define ATH9K_TX_DESC_CFG_ERR 0x04 +#define ATH9K_TX_DATA_UNDERRUN 0x08 +#define ATH9K_TX_DELIM_UNDERRUN 0x10 +#define ATH9K_TX_SW_ABORTED 0x40 +#define ATH9K_TX_SW_FILTERED 0x80 + +#define NBBY 8 + +struct ath_tx_status { + u32 ts_tstamp; + u16 ts_seqnum; + u8 ts_status; + u8 ts_ratecode; + u8 ts_rateindex; + int8_t ts_rssi; + u8 ts_shortretry; + u8 ts_longretry; + u8 ts_virtcol; + u8 ts_antenna; + u8 ts_flags; + int8_t ts_rssi_ctl0; + int8_t ts_rssi_ctl1; + int8_t ts_rssi_ctl2; + int8_t ts_rssi_ext0; + int8_t ts_rssi_ext1; + int8_t ts_rssi_ext2; + u8 pad[3]; + u32 ba_low; + u32 ba_high; + u32 evm0; + u32 evm1; + u32 evm2; +}; + +struct ath_rx_status { + u32 rs_tstamp; + u16 rs_datalen; + u8 rs_status; + u8 rs_phyerr; + int8_t rs_rssi; + u8 rs_keyix; + u8 rs_rate; + u8 rs_antenna; + u8 rs_more; + int8_t rs_rssi_ctl0; + int8_t rs_rssi_ctl1; + int8_t rs_rssi_ctl2; + int8_t rs_rssi_ext0; + int8_t rs_rssi_ext1; + int8_t rs_rssi_ext2; + u8 rs_isaggr; + u8 rs_moreaggr; + u8 rs_num_delims; + u8 rs_flags; + u32 evm0; + u32 evm1; + u32 evm2; +}; + +#define ATH9K_RXERR_CRC 0x01 +#define ATH9K_RXERR_PHY 0x02 +#define ATH9K_RXERR_FIFO 0x04 +#define ATH9K_RXERR_DECRYPT 0x08 +#define ATH9K_RXERR_MIC 0x10 + +#define ATH9K_RX_MORE 0x01 +#define ATH9K_RX_MORE_AGGR 0x02 +#define ATH9K_RX_GI 0x04 +#define ATH9K_RX_2040 0x08 +#define ATH9K_RX_DELIM_CRC_PRE 0x10 +#define ATH9K_RX_DELIM_CRC_POST 0x20 +#define ATH9K_RX_DECRYPT_BUSY 0x40 + +#define ATH9K_RXKEYIX_INVALID ((u8)-1) +#define ATH9K_TXKEYIX_INVALID ((u32)-1) + +struct ath_desc { + u32 ds_link; + u32 ds_data; + u32 ds_ctl0; + u32 ds_ctl1; + u32 ds_hw[20]; + union { + struct ath_tx_status tx; + struct ath_rx_status rx; + void *stats; + } ds_us; + void *ds_vdata; +} __packed; + +#define ds_txstat ds_us.tx +#define ds_rxstat ds_us.rx +#define ds_stat ds_us.stats + +#define ATH9K_TXDESC_CLRDMASK 0x0001 +#define ATH9K_TXDESC_NOACK 0x0002 +#define ATH9K_TXDESC_RTSENA 0x0004 +#define ATH9K_TXDESC_CTSENA 0x0008 +#define ATH9K_TXDESC_INTREQ 0x0010 +#define ATH9K_TXDESC_VEOL 0x0020 +#define ATH9K_TXDESC_EXT_ONLY 0x0040 +#define ATH9K_TXDESC_EXT_AND_CTL 0x0080 +#define ATH9K_TXDESC_VMF 0x0100 +#define ATH9K_TXDESC_FRAG_IS_ON 0x0200 + +#define ATH9K_RXDESC_INTREQ 0x0020 + +enum wireless_mode { + ATH9K_MODE_11A = 0, + ATH9K_MODE_11B = 2, + ATH9K_MODE_11G = 3, + ATH9K_MODE_11NA_HT20 = 6, + ATH9K_MODE_11NG_HT20 = 7, + ATH9K_MODE_11NA_HT40PLUS = 8, + ATH9K_MODE_11NA_HT40MINUS = 9, + ATH9K_MODE_11NG_HT40PLUS = 10, + ATH9K_MODE_11NG_HT40MINUS = 11, + ATH9K_MODE_MAX +}; + +enum ath9k_hw_caps { + ATH9K_HW_CAP_CHAN_SPREAD = BIT(0), + ATH9K_HW_CAP_MIC_AESCCM = BIT(1), + ATH9K_HW_CAP_MIC_CKIP = BIT(2), + ATH9K_HW_CAP_MIC_TKIP = BIT(3), + ATH9K_HW_CAP_CIPHER_AESCCM = BIT(4), + ATH9K_HW_CAP_CIPHER_CKIP = BIT(5), + ATH9K_HW_CAP_CIPHER_TKIP = BIT(6), + ATH9K_HW_CAP_VEOL = BIT(7), + ATH9K_HW_CAP_BSSIDMASK = BIT(8), + ATH9K_HW_CAP_MCAST_KEYSEARCH = BIT(9), + ATH9K_HW_CAP_CHAN_HALFRATE = BIT(10), + ATH9K_HW_CAP_CHAN_QUARTERRATE = BIT(11), + ATH9K_HW_CAP_HT = BIT(12), + ATH9K_HW_CAP_GTT = BIT(13), + ATH9K_HW_CAP_FASTCC = BIT(14), + ATH9K_HW_CAP_RFSILENT = BIT(15), + ATH9K_HW_CAP_WOW = BIT(16), + ATH9K_HW_CAP_CST = BIT(17), + ATH9K_HW_CAP_ENHANCEDPM = BIT(18), + ATH9K_HW_CAP_AUTOSLEEP = BIT(19), + ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(20), + ATH9K_HW_CAP_WOW_MATCHPATTERN_EXACT = BIT(21), +}; + +enum ath9k_capability_type { + ATH9K_CAP_CIPHER = 0, + ATH9K_CAP_TKIP_MIC, + ATH9K_CAP_TKIP_SPLIT, + ATH9K_CAP_PHYCOUNTERS, + ATH9K_CAP_DIVERSITY, + ATH9K_CAP_TXPOW, + ATH9K_CAP_PHYDIAG, + ATH9K_CAP_MCAST_KEYSRCH, + ATH9K_CAP_TSF_ADJUST, + ATH9K_CAP_WME_TKIPMIC, + ATH9K_CAP_RFSILENT, + ATH9K_CAP_ANT_CFG_2GHZ, + ATH9K_CAP_ANT_CFG_5GHZ +}; + +struct ath9k_hw_capabilities { + u32 hw_caps; /* ATH9K_HW_CAP_* from ath9k_hw_caps */ + DECLARE_BITMAP(wireless_modes, ATH9K_MODE_MAX); /* ATH9K_MODE_* */ + u16 total_queues; + u16 keycache_size; + u16 low_5ghz_chan, high_5ghz_chan; + u16 low_2ghz_chan, high_2ghz_chan; + u16 num_mr_retries; + u16 rts_aggr_limit; + u8 tx_chainmask; + u8 rx_chainmask; + u16 tx_triglevel_max; + u16 reg_cap; + u8 num_gpio_pins; + u8 num_antcfg_2ghz; + u8 num_antcfg_5ghz; +}; + +struct ath9k_ops_config { + int dma_beacon_response_time; + int sw_beacon_response_time; + int additional_swba_backoff; + int ack_6mb; + int cwm_ignore_extcca; + u8 pcie_powersave_enable; + u8 pcie_l1skp_enable; + u8 pcie_clock_req; + u32 pcie_waen; + int pcie_power_reset; + u8 pcie_restore; + u8 analog_shiftreg; + u8 ht_enable; + u32 ofdm_trig_low; + u32 ofdm_trig_high; + u32 cck_trig_high; + u32 cck_trig_low; + u32 enable_ani; + u8 noise_immunity_level; + u32 ofdm_weaksignal_det; + u32 cck_weaksignal_thr; + u8 spur_immunity_level; + u8 firstep_level; + int8_t rssi_thr_high; + int8_t rssi_thr_low; + u16 diversity_control; + u16 antenna_switch_swap; + int serialize_regmode; + int intr_mitigation; +#define SPUR_DISABLE 0 +#define SPUR_ENABLE_IOCTL 1 +#define SPUR_ENABLE_EEPROM 2 +#define AR_EEPROM_MODAL_SPURS 5 +#define AR_SPUR_5413_1 1640 +#define AR_SPUR_5413_2 1200 +#define AR_NO_SPUR 0x8000 +#define AR_BASE_FREQ_2GHZ 2300 +#define AR_BASE_FREQ_5GHZ 4900 +#define AR_SPUR_FEEQ_BOUND_HT40 19 +#define AR_SPUR_FEEQ_BOUND_HT20 10 + int spurmode; + u16 spurchans[AR_EEPROM_MODAL_SPURS][2]; +}; + +enum ath9k_tx_queue { + ATH9K_TX_QUEUE_INACTIVE = 0, + ATH9K_TX_QUEUE_DATA, + ATH9K_TX_QUEUE_BEACON, + ATH9K_TX_QUEUE_CAB, + ATH9K_TX_QUEUE_UAPSD, + ATH9K_TX_QUEUE_PSPOLL +}; + +#define ATH9K_NUM_TX_QUEUES 10 + +enum ath9k_tx_queue_subtype { + ATH9K_WME_AC_BK = 0, + ATH9K_WME_AC_BE, + ATH9K_WME_AC_VI, + ATH9K_WME_AC_VO, + ATH9K_WME_UPSD +}; + +enum ath9k_tx_queue_flags { + TXQ_FLAG_TXOKINT_ENABLE = 0x0001, + TXQ_FLAG_TXERRINT_ENABLE = 0x0001, + TXQ_FLAG_TXDESCINT_ENABLE = 0x0002, + TXQ_FLAG_TXEOLINT_ENABLE = 0x0004, + TXQ_FLAG_TXURNINT_ENABLE = 0x0008, + TXQ_FLAG_BACKOFF_DISABLE = 0x0010, + TXQ_FLAG_COMPRESSION_ENABLE = 0x0020, + TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE = 0x0040, + TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE = 0x0080, +}; + +#define ATH9K_TXQ_USEDEFAULT ((u32) -1) + +#define ATH9K_DECOMP_MASK_SIZE 128 +#define ATH9K_READY_TIME_LO_BOUND 50 +#define ATH9K_READY_TIME_HI_BOUND 96 + +enum ath9k_pkt_type { + ATH9K_PKT_TYPE_NORMAL = 0, + ATH9K_PKT_TYPE_ATIM, + ATH9K_PKT_TYPE_PSPOLL, + ATH9K_PKT_TYPE_BEACON, + ATH9K_PKT_TYPE_PROBE_RESP, + ATH9K_PKT_TYPE_CHIRP, + ATH9K_PKT_TYPE_GRP_POLL, +}; + +struct ath9k_tx_queue_info { + u32 tqi_ver; + enum ath9k_tx_queue tqi_type; + enum ath9k_tx_queue_subtype tqi_subtype; + enum ath9k_tx_queue_flags tqi_qflags; + u32 tqi_priority; + u32 tqi_aifs; + u32 tqi_cwmin; + u32 tqi_cwmax; + u16 tqi_shretry; + u16 tqi_lgretry; + u32 tqi_cbrPeriod; + u32 tqi_cbrOverflowLimit; + u32 tqi_burstTime; + u32 tqi_readyTime; + u32 tqi_physCompBuf; + u32 tqi_intFlags; +}; + +enum ath9k_rx_filter { + ATH9K_RX_FILTER_UCAST = 0x00000001, + ATH9K_RX_FILTER_MCAST = 0x00000002, + ATH9K_RX_FILTER_BCAST = 0x00000004, + ATH9K_RX_FILTER_CONTROL = 0x00000008, + ATH9K_RX_FILTER_BEACON = 0x00000010, + ATH9K_RX_FILTER_PROM = 0x00000020, + ATH9K_RX_FILTER_PROBEREQ = 0x00000080, + ATH9K_RX_FILTER_PSPOLL = 0x00004000, + ATH9K_RX_FILTER_PHYERR = 0x00000100, + ATH9K_RX_FILTER_PHYRADAR = 0x00002000, +}; + +enum ath9k_int { + ATH9K_INT_RX = 0x00000001, + ATH9K_INT_RXDESC = 0x00000002, + ATH9K_INT_RXNOFRM = 0x00000008, + ATH9K_INT_RXEOL = 0x00000010, + ATH9K_INT_RXORN = 0x00000020, + ATH9K_INT_TX = 0x00000040, + ATH9K_INT_TXDESC = 0x00000080, + ATH9K_INT_TIM_TIMER = 0x00000100, + ATH9K_INT_TXURN = 0x00000800, + ATH9K_INT_MIB = 0x00001000, + ATH9K_INT_RXPHY = 0x00004000, + ATH9K_INT_RXKCM = 0x00008000, + ATH9K_INT_SWBA = 0x00010000, + ATH9K_INT_BMISS = 0x00040000, + ATH9K_INT_BNR = 0x00100000, + ATH9K_INT_TIM = 0x00200000, + ATH9K_INT_DTIM = 0x00400000, + ATH9K_INT_DTIMSYNC = 0x00800000, + ATH9K_INT_GPIO = 0x01000000, + ATH9K_INT_CABEND = 0x02000000, + ATH9K_INT_CST = 0x10000000, + ATH9K_INT_GTT = 0x20000000, + ATH9K_INT_FATAL = 0x40000000, + ATH9K_INT_GLOBAL = 0x80000000, + ATH9K_INT_BMISC = ATH9K_INT_TIM | + ATH9K_INT_DTIM | + ATH9K_INT_DTIMSYNC | + ATH9K_INT_CABEND, + ATH9K_INT_COMMON = ATH9K_INT_RXNOFRM | + ATH9K_INT_RXDESC | + ATH9K_INT_RXEOL | + ATH9K_INT_RXORN | + ATH9K_INT_TXURN | + ATH9K_INT_TXDESC | + ATH9K_INT_MIB | + ATH9K_INT_RXPHY | + ATH9K_INT_RXKCM | + ATH9K_INT_SWBA | + ATH9K_INT_BMISS | + ATH9K_INT_GPIO, + ATH9K_INT_NOCARD = 0xffffffff +}; + +struct ath9k_rate_table { + int rateCount; + u8 rateCodeToIndex[256]; + struct { + u8 valid; + u8 phy; + u32 rateKbps; + u8 rateCode; + u8 shortPreamble; + u8 dot11Rate; + u8 controlRate; + u16 lpAckDuration; + u16 spAckDuration; + } info[32]; +}; + +#define ATH9K_RATESERIES_RTS_CTS 0x0001 +#define ATH9K_RATESERIES_2040 0x0002 +#define ATH9K_RATESERIES_HALFGI 0x0004 + +struct ath9k_11n_rate_series { + u32 Tries; + u32 Rate; + u32 PktDuration; + u32 ChSel; + u32 RateFlags; +}; + +#define CHANNEL_CW_INT 0x00002 +#define CHANNEL_CCK 0x00020 +#define CHANNEL_OFDM 0x00040 +#define CHANNEL_2GHZ 0x00080 +#define CHANNEL_5GHZ 0x00100 +#define CHANNEL_PASSIVE 0x00200 +#define CHANNEL_DYN 0x00400 +#define CHANNEL_HALF 0x04000 +#define CHANNEL_QUARTER 0x08000 +#define CHANNEL_HT20 0x10000 +#define CHANNEL_HT40PLUS 0x20000 +#define CHANNEL_HT40MINUS 0x40000 + +#define CHANNEL_INTERFERENCE 0x01 +#define CHANNEL_DFS 0x02 +#define CHANNEL_4MS_LIMIT 0x04 +#define CHANNEL_DFS_CLEAR 0x08 +#define CHANNEL_DISALLOW_ADHOC 0x10 +#define CHANNEL_PER_11D_ADHOC 0x20 + +#define CHANNEL_A (CHANNEL_5GHZ|CHANNEL_OFDM) +#define CHANNEL_B (CHANNEL_2GHZ|CHANNEL_CCK) +#define CHANNEL_G (CHANNEL_2GHZ|CHANNEL_OFDM) +#define CHANNEL_G_HT20 (CHANNEL_2GHZ|CHANNEL_HT20) +#define CHANNEL_A_HT20 (CHANNEL_5GHZ|CHANNEL_HT20) +#define CHANNEL_G_HT40PLUS (CHANNEL_2GHZ|CHANNEL_HT40PLUS) +#define CHANNEL_G_HT40MINUS (CHANNEL_2GHZ|CHANNEL_HT40MINUS) +#define CHANNEL_A_HT40PLUS (CHANNEL_5GHZ|CHANNEL_HT40PLUS) +#define CHANNEL_A_HT40MINUS (CHANNEL_5GHZ|CHANNEL_HT40MINUS) +#define CHANNEL_ALL \ + (CHANNEL_OFDM| \ + CHANNEL_CCK| \ + CHANNEL_2GHZ | \ + CHANNEL_5GHZ | \ + CHANNEL_HT20 | \ + CHANNEL_HT40PLUS | \ + CHANNEL_HT40MINUS) + +struct ath9k_channel { + u16 channel; + u32 channelFlags; + u8 privFlags; + int8_t maxRegTxPower; + int8_t maxTxPower; + int8_t minTxPower; + u32 chanmode; + int32_t CalValid; + bool oneTimeCalsDone; + int8_t iCoff; + int8_t qCoff; + int16_t rawNoiseFloor; + int8_t antennaMax; + u32 regDmnFlags; + u32 conformanceTestLimit[3]; /* 0:11a, 1: 11b, 2:11g */ +#ifdef ATH_NF_PER_CHAN + struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS]; +#endif +}; + +#define IS_CHAN_A(_c) ((((_c)->channelFlags & CHANNEL_A) == CHANNEL_A) || \ + (((_c)->channelFlags & CHANNEL_A_HT20) == CHANNEL_A_HT20) || \ + (((_c)->channelFlags & CHANNEL_A_HT40PLUS) == CHANNEL_A_HT40PLUS) || \ + (((_c)->channelFlags & CHANNEL_A_HT40MINUS) == CHANNEL_A_HT40MINUS)) +#define IS_CHAN_B(_c) (((_c)->channelFlags & CHANNEL_B) == CHANNEL_B) +#define IS_CHAN_G(_c) ((((_c)->channelFlags & (CHANNEL_G)) == CHANNEL_G) || \ + (((_c)->channelFlags & CHANNEL_G_HT20) == CHANNEL_G_HT20) || \ + (((_c)->channelFlags & CHANNEL_G_HT40PLUS) == CHANNEL_G_HT40PLUS) || \ + (((_c)->channelFlags & CHANNEL_G_HT40MINUS) == CHANNEL_G_HT40MINUS)) +#define IS_CHAN_CCK(_c) (((_c)->channelFlags & CHANNEL_CCK) != 0) +#define IS_CHAN_OFDM(_c) (((_c)->channelFlags & CHANNEL_OFDM) != 0) +#define IS_CHAN_5GHZ(_c) (((_c)->channelFlags & CHANNEL_5GHZ) != 0) +#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0) +#define IS_CHAN_PASSIVE(_c) (((_c)->channelFlags & CHANNEL_PASSIVE) != 0) +#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0) +#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0) + +/* These macros check chanmode and not channelFlags */ +#define IS_CHAN_HT20(_c) (((_c)->chanmode == CHANNEL_A_HT20) || \ + ((_c)->chanmode == CHANNEL_G_HT20)) +#define IS_CHAN_HT40(_c) (((_c)->chanmode == CHANNEL_A_HT40PLUS) || \ + ((_c)->chanmode == CHANNEL_A_HT40MINUS) || \ + ((_c)->chanmode == CHANNEL_G_HT40PLUS) || \ + ((_c)->chanmode == CHANNEL_G_HT40MINUS)) +#define IS_CHAN_HT(_c) (IS_CHAN_HT20((_c)) || IS_CHAN_HT40((_c))) + +#define IS_CHAN_IN_PUBLIC_SAFETY_BAND(_c) ((_c) > 4940 && (_c) < 4990) +#define IS_CHAN_A_5MHZ_SPACED(_c) \ + ((((_c)->channelFlags & CHANNEL_5GHZ) != 0) && \ + (((_c)->channel % 20) != 0) && \ + (((_c)->channel % 10) != 0)) + +struct ath9k_keyval { + u8 kv_type; + u8 kv_pad; + u16 kv_len; + u8 kv_val[16]; + u8 kv_mic[8]; + u8 kv_txmic[8]; +}; + +enum ath9k_key_type { + ATH9K_KEY_TYPE_CLEAR, + ATH9K_KEY_TYPE_WEP, + ATH9K_KEY_TYPE_AES, + ATH9K_KEY_TYPE_TKIP, +}; + +enum ath9k_cipher { + ATH9K_CIPHER_WEP = 0, + ATH9K_CIPHER_AES_OCB = 1, + ATH9K_CIPHER_AES_CCM = 2, + ATH9K_CIPHER_CKIP = 3, + ATH9K_CIPHER_TKIP = 4, + ATH9K_CIPHER_CLR = 5, + ATH9K_CIPHER_MIC = 127 +}; + +#define AR_EEPROM_EEPCAP_COMPRESS_DIS 0x0001 +#define AR_EEPROM_EEPCAP_AES_DIS 0x0002 +#define AR_EEPROM_EEPCAP_FASTFRAME_DIS 0x0004 +#define AR_EEPROM_EEPCAP_BURST_DIS 0x0008 +#define AR_EEPROM_EEPCAP_MAXQCU 0x01F0 +#define AR_EEPROM_EEPCAP_MAXQCU_S 4 +#define AR_EEPROM_EEPCAP_HEAVY_CLIP_EN 0x0200 +#define AR_EEPROM_EEPCAP_KC_ENTRIES 0xF000 +#define AR_EEPROM_EEPCAP_KC_ENTRIES_S 12 + +#define AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND 0x0040 +#define AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN 0x0080 +#define AR_EEPROM_EEREGCAP_EN_KK_U2 0x0100 +#define AR_EEPROM_EEREGCAP_EN_KK_MIDBAND 0x0200 +#define AR_EEPROM_EEREGCAP_EN_KK_U1_ODD 0x0400 +#define AR_EEPROM_EEREGCAP_EN_KK_NEW_11A 0x0800 + +#define AR_EEPROM_EEREGCAP_EN_KK_U1_ODD_PRE4_0 0x4000 +#define AR_EEPROM_EEREGCAP_EN_KK_NEW_11A_PRE4_0 0x8000 + +#define SD_NO_CTL 0xE0 +#define NO_CTL 0xff +#define CTL_MODE_M 7 +#define CTL_11A 0 +#define CTL_11B 1 +#define CTL_11G 2 +#define CTL_2GHT20 5 +#define CTL_5GHT20 6 +#define CTL_2GHT40 7 +#define CTL_5GHT40 8 + +#define AR_EEPROM_MAC(i) (0x1d+(i)) +#define EEP_SCALE 100 +#define EEP_DELTA 10 + +#define AR_EEPROM_RFSILENT_GPIO_SEL 0x001c +#define AR_EEPROM_RFSILENT_GPIO_SEL_S 2 +#define AR_EEPROM_RFSILENT_POLARITY 0x0002 +#define AR_EEPROM_RFSILENT_POLARITY_S 1 + +#define CTRY_DEBUG 0x1ff +#define CTRY_DEFAULT 0 + +enum reg_ext_bitmap { + REG_EXT_JAPAN_MIDBAND = 1, + REG_EXT_FCC_DFS_HT40 = 2, + REG_EXT_JAPAN_NONDFS_HT40 = 3, + REG_EXT_JAPAN_DFS_HT40 = 4 +}; + +struct ath9k_country_entry { + u16 countryCode; + u16 regDmnEnum; + u16 regDmn5G; + u16 regDmn2G; + u8 isMultidomain; + u8 iso[3]; +}; + +#define REG_WRITE(_ah, _reg, _val) iowrite32(_val, _ah->ah_sh + _reg) +#define REG_READ(_ah, _reg) ioread32(_ah->ah_sh + _reg) + +#define SM(_v, _f) (((_v) << _f##_S) & _f) +#define MS(_v, _f) (((_v) & _f) >> _f##_S) +#define REG_RMW(_a, _r, _set, _clr) \ + REG_WRITE(_a, _r, (REG_READ(_a, _r) & ~(_clr)) | (_set)) +#define REG_RMW_FIELD(_a, _r, _f, _v) \ + REG_WRITE(_a, _r, \ + (REG_READ(_a, _r) & ~_f) | (((_v) << _f##_S) & _f)) +#define REG_SET_BIT(_a, _r, _f) \ + REG_WRITE(_a, _r, REG_READ(_a, _r) | _f) +#define REG_CLR_BIT(_a, _r, _f) \ + REG_WRITE(_a, _r, REG_READ(_a, _r) & ~_f) + +#define ATH9K_COMP_BUF_MAX_SIZE 9216 +#define ATH9K_COMP_BUF_ALIGN_SIZE 512 + +#define ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS 0x00000001 + +#define INIT_AIFS 2 +#define INIT_CWMIN 15 +#define INIT_CWMIN_11B 31 +#define INIT_CWMAX 1023 +#define INIT_SH_RETRY 10 +#define INIT_LG_RETRY 10 +#define INIT_SSH_RETRY 32 +#define INIT_SLG_RETRY 32 + +#define WLAN_CTRL_FRAME_SIZE (2+2+6+4) + +#define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1) +#define ATH_AMPDU_LIMIT_DEFAULT ATH_AMPDU_LIMIT_MAX + +#define IEEE80211_WEP_IVLEN 3 +#define IEEE80211_WEP_KIDLEN 1 +#define IEEE80211_WEP_CRCLEN 4 +#define IEEE80211_MAX_MPDU_LEN (3840 + FCS_LEN + \ + (IEEE80211_WEP_IVLEN + \ + IEEE80211_WEP_KIDLEN + \ + IEEE80211_WEP_CRCLEN)) +#define IEEE80211_MAX_LEN (2300 + FCS_LEN + \ + (IEEE80211_WEP_IVLEN + \ + IEEE80211_WEP_KIDLEN + \ + IEEE80211_WEP_CRCLEN)) + +#define MAX_REG_ADD_COUNT 129 +#define MAX_RATE_POWER 63 + +enum ath9k_power_mode { + ATH9K_PM_AWAKE = 0, + ATH9K_PM_FULL_SLEEP, + ATH9K_PM_NETWORK_SLEEP, + ATH9K_PM_UNDEFINED +}; + +struct ath9k_mib_stats { + u32 ackrcv_bad; + u32 rts_bad; + u32 rts_good; + u32 fcs_bad; + u32 beacons; +}; + +enum ath9k_ant_setting { + ATH9K_ANT_VARIABLE = 0, + ATH9K_ANT_FIXED_A, + ATH9K_ANT_FIXED_B +}; + +enum ath9k_opmode { + ATH9K_M_STA = 1, + ATH9K_M_IBSS = 0, + ATH9K_M_HOSTAP = 6, + ATH9K_M_MONITOR = 8 +}; + +#define ATH9K_SLOT_TIME_6 6 +#define ATH9K_SLOT_TIME_9 9 +#define ATH9K_SLOT_TIME_20 20 + +enum ath9k_ht_macmode { + ATH9K_HT_MACMODE_20 = 0, + ATH9K_HT_MACMODE_2040 = 1, +}; + +enum ath9k_ht_extprotspacing { + ATH9K_HT_EXTPROTSPACING_20 = 0, + ATH9K_HT_EXTPROTSPACING_25 = 1, +}; + +struct ath9k_ht_cwm { + enum ath9k_ht_macmode ht_macmode; + enum ath9k_ht_extprotspacing ht_extprotspacing; +}; + +enum ath9k_ani_cmd { + ATH9K_ANI_PRESENT = 0x1, + ATH9K_ANI_NOISE_IMMUNITY_LEVEL = 0x2, + ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION = 0x4, + ATH9K_ANI_CCK_WEAK_SIGNAL_THR = 0x8, + ATH9K_ANI_FIRSTEP_LEVEL = 0x10, + ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x20, + ATH9K_ANI_MODE = 0x40, + ATH9K_ANI_PHYERR_RESET = 0x80, + ATH9K_ANI_ALL = 0xff +}; + +enum phytype { + PHY_DS, + PHY_FH, + PHY_OFDM, + PHY_HT, +}; +#define PHY_CCK PHY_DS + +enum start_adhoc_option { + START_ADHOC_NO_11A, + START_ADHOC_PER_11D, + START_ADHOC_IN_11A, + START_ADHOC_IN_11B, +}; + +enum ath9k_tp_scale { + ATH9K_TP_SCALE_MAX = 0, + ATH9K_TP_SCALE_50, + ATH9K_TP_SCALE_25, + ATH9K_TP_SCALE_12, + ATH9K_TP_SCALE_MIN +}; + +enum ser_reg_mode { + SER_REG_MODE_OFF = 0, + SER_REG_MODE_ON = 1, + SER_REG_MODE_AUTO = 2, +}; + +#define AR_PHY_CCA_MAX_GOOD_VALUE -85 +#define AR_PHY_CCA_MAX_HIGH_VALUE -62 +#define AR_PHY_CCA_MIN_BAD_VALUE -121 +#define AR_PHY_CCA_FILTERWINDOW_LENGTH_INIT 3 +#define AR_PHY_CCA_FILTERWINDOW_LENGTH 5 + +#define ATH9K_NF_CAL_HIST_MAX 5 +#define NUM_NF_READINGS 6 + +struct ath9k_nfcal_hist { + int16_t nfCalBuffer[ATH9K_NF_CAL_HIST_MAX]; + u8 currIndex; + int16_t privNF; + u8 invalidNFcount; +}; + +struct ath9k_beacon_state { + u32 bs_nexttbtt; + u32 bs_nextdtim; + u32 bs_intval; +#define ATH9K_BEACON_PERIOD 0x0000ffff +#define ATH9K_BEACON_ENA 0x00800000 +#define ATH9K_BEACON_RESET_TSF 0x01000000 + u32 bs_dtimperiod; + u16 bs_cfpperiod; + u16 bs_cfpmaxduration; + u32 bs_cfpnext; + u16 bs_timoffset; + u16 bs_bmissthreshold; + u32 bs_sleepduration; +}; + +struct ath9k_node_stats { + u32 ns_avgbrssi; + u32 ns_avgrssi; + u32 ns_avgtxrssi; + u32 ns_avgtxrate; +}; + +#define ATH9K_RSSI_EP_MULTIPLIER (1<<7) + +enum ath9k_gpio_output_mux_type { + ATH9K_GPIO_OUTPUT_MUX_AS_OUTPUT, + ATH9K_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED, + ATH9K_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED, + ATH9K_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED, + ATH9K_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED, + ATH9K_GPIO_OUTPUT_MUX_NUM_ENTRIES +}; + +enum { + ATH9K_RESET_POWER_ON, + ATH9K_RESET_WARM, + ATH9K_RESET_COLD, +}; + +#define AH_USE_EEPROM 0x1 + +struct ath_hal { + u32 ah_magic; + u16 ah_devid; + u16 ah_subvendorid; + struct ath_softc *ah_sc; + void __iomem *ah_sh; + u16 ah_countryCode; + u32 ah_macVersion; + u16 ah_macRev; + u16 ah_phyRev; + u16 ah_analog5GhzRev; + u16 ah_analog2GhzRev; + u8 ah_decompMask[ATH9K_DECOMP_MASK_SIZE]; + u32 ah_flags; + enum ath9k_opmode ah_opmode; + struct ath9k_ops_config ah_config; + struct ath9k_hw_capabilities ah_caps; + int16_t ah_powerLimit; + u16 ah_maxPowerLevel; + u32 ah_tpScale; + u16 ah_currentRD; + u16 ah_currentRDExt; + u16 ah_currentRDInUse; + u16 ah_currentRD5G; + u16 ah_currentRD2G; + char ah_iso[4]; + enum start_adhoc_option ah_adHocMode; + bool ah_commonMode; + struct ath9k_channel ah_channels[150]; + u32 ah_nchan; + struct ath9k_channel *ah_curchan; + u16 ah_rfsilent; + bool ah_rfkillEnabled; + bool ah_isPciExpress; + u16 ah_txTrigLevel; +#ifndef ATH_NF_PER_CHAN + struct ath9k_nfcal_hist nfCalHist[NUM_NF_READINGS]; +#endif +}; + +struct chan_centers { + u16 synth_center; + u16 ctl_center; + u16 ext_center; +}; + +int ath_hal_getcapability(struct ath_hal *ah, + enum ath9k_capability_type type, + u32 capability, + u32 *result); +const struct ath9k_rate_table *ath9k_hw_getratetable(struct ath_hal *ah, + u32 mode); +void ath9k_hw_detach(struct ath_hal *ah); +struct ath_hal *ath9k_hw_attach(u16 devid, + struct ath_softc *sc, + void __iomem *mem, + int *error); +bool ath9k_regd_init_channels(struct ath_hal *ah, + u32 maxchans, u32 *nchans, + u8 *regclassids, + u32 maxregids, u32 *nregids, + u16 cc, + bool enableOutdoor, + bool enableExtendedChannels); +u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags); +enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah, + enum ath9k_int ints); +bool ath9k_hw_reset(struct ath_hal *ah, enum ath9k_opmode opmode, + struct ath9k_channel *chan, + enum ath9k_ht_macmode macmode, + u8 txchainmask, u8 rxchainmask, + enum ath9k_ht_extprotspacing extprotspacing, + bool bChannelChange, + int *status); +bool ath9k_hw_phy_disable(struct ath_hal *ah); +void ath9k_hw_reset_calvalid(struct ath_hal *ah, struct ath9k_channel *chan, + bool *isCalDone); +void ath9k_hw_ani_monitor(struct ath_hal *ah, + const struct ath9k_node_stats *stats, + struct ath9k_channel *chan); +bool ath9k_hw_calibrate(struct ath_hal *ah, + struct ath9k_channel *chan, + u8 rxchainmask, + bool longcal, + bool *isCalDone); +int16_t ath9k_hw_getchan_noise(struct ath_hal *ah, + struct ath9k_channel *chan); +void ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid, + u16 assocId); +void ath9k_hw_setrxfilter(struct ath_hal *ah, u32 bits); +void ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid, + u16 assocId); +bool ath9k_hw_stoptxdma(struct ath_hal *ah, u32 q); +void ath9k_hw_reset_tsf(struct ath_hal *ah); +bool ath9k_hw_keyisvalid(struct ath_hal *ah, u16 entry); +bool ath9k_hw_keysetmac(struct ath_hal *ah, u16 entry, + const u8 *mac); +bool ath9k_hw_set_keycache_entry(struct ath_hal *ah, + u16 entry, + const struct ath9k_keyval *k, + const u8 *mac, + int xorKey); +bool ath9k_hw_set_tsfadjust(struct ath_hal *ah, + u32 setting); +void ath9k_hw_configpcipowersave(struct ath_hal *ah, int restore); +bool ath9k_hw_intrpend(struct ath_hal *ah); +bool ath9k_hw_getisr(struct ath_hal *ah, enum ath9k_int *masked); +bool ath9k_hw_updatetxtriglevel(struct ath_hal *ah, + bool bIncTrigLevel); +void ath9k_hw_procmibevent(struct ath_hal *ah, + const struct ath9k_node_stats *stats); +bool ath9k_hw_setrxabort(struct ath_hal *ah, bool set); +void ath9k_hw_set11nmac2040(struct ath_hal *ah, enum ath9k_ht_macmode mode); +bool ath9k_hw_phycounters(struct ath_hal *ah); +bool ath9k_hw_keyreset(struct ath_hal *ah, u16 entry); +bool ath9k_hw_getcapability(struct ath_hal *ah, + enum ath9k_capability_type type, + u32 capability, + u32 *result); +bool ath9k_hw_setcapability(struct ath_hal *ah, + enum ath9k_capability_type type, + u32 capability, + u32 setting, + int *status); +u32 ath9k_hw_getdefantenna(struct ath_hal *ah); +void ath9k_hw_getmac(struct ath_hal *ah, u8 *mac); +void ath9k_hw_getbssidmask(struct ath_hal *ah, u8 *mask); +bool ath9k_hw_setbssidmask(struct ath_hal *ah, + const u8 *mask); +bool ath9k_hw_setpower(struct ath_hal *ah, + enum ath9k_power_mode mode); +enum ath9k_int ath9k_hw_intrget(struct ath_hal *ah); +u64 ath9k_hw_gettsf64(struct ath_hal *ah); +u32 ath9k_hw_getdefantenna(struct ath_hal *ah); +bool ath9k_hw_setslottime(struct ath_hal *ah, u32 us); +bool ath9k_hw_setantennaswitch(struct ath_hal *ah, + enum ath9k_ant_setting settings, + struct ath9k_channel *chan, + u8 *tx_chainmask, + u8 *rx_chainmask, + u8 *antenna_cfgd); +void ath9k_hw_setantenna(struct ath_hal *ah, u32 antenna); +int ath9k_hw_select_antconfig(struct ath_hal *ah, + u32 cfg); +bool ath9k_hw_puttxbuf(struct ath_hal *ah, u32 q, + u32 txdp); +bool ath9k_hw_txstart(struct ath_hal *ah, u32 q); +u16 ath9k_hw_computetxtime(struct ath_hal *ah, + const struct ath9k_rate_table *rates, + u32 frameLen, u16 rateix, + bool shortPreamble); +void ath9k_hw_set11n_ratescenario(struct ath_hal *ah, struct ath_desc *ds, + struct ath_desc *lastds, + u32 durUpdateEn, u32 rtsctsRate, + u32 rtsctsDuration, + struct ath9k_11n_rate_series series[], + u32 nseries, u32 flags); +void ath9k_hw_set11n_burstduration(struct ath_hal *ah, + struct ath_desc *ds, + u32 burstDuration); +void ath9k_hw_cleartxdesc(struct ath_hal *ah, struct ath_desc *ds); +u32 ath9k_hw_reverse_bits(u32 val, u32 n); +bool ath9k_hw_resettxqueue(struct ath_hal *ah, u32 q); +u32 ath9k_regd_get_ctl(struct ath_hal *ah, struct ath9k_channel *chan); +u32 ath9k_regd_get_antenna_allowed(struct ath_hal *ah, + struct ath9k_channel *chan); +u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags); +bool ath9k_hw_get_txq_props(struct ath_hal *ah, int q, + struct ath9k_tx_queue_info *qinfo); +bool ath9k_hw_set_txq_props(struct ath_hal *ah, int q, + const struct ath9k_tx_queue_info *qinfo); +struct ath9k_channel *ath9k_regd_check_channel(struct ath_hal *ah, + const struct ath9k_channel *c); +void ath9k_hw_set11n_txdesc(struct ath_hal *ah, struct ath_desc *ds, + u32 pktLen, enum ath9k_pkt_type type, + u32 txPower, u32 keyIx, + enum ath9k_key_type keyType, u32 flags); +bool ath9k_hw_filltxdesc(struct ath_hal *ah, struct ath_desc *ds, + u32 segLen, bool firstSeg, + bool lastSeg, + const struct ath_desc *ds0); +u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hal *ah, + u32 *rxc_pcnt, + u32 *rxf_pcnt, + u32 *txf_pcnt); +void ath9k_hw_dmaRegDump(struct ath_hal *ah); +void ath9k_hw_beaconinit(struct ath_hal *ah, + u32 next_beacon, u32 beacon_period); +void ath9k_hw_set_sta_beacon_timers(struct ath_hal *ah, + const struct ath9k_beacon_state *bs); +bool ath9k_hw_setuprxdesc(struct ath_hal *ah, struct ath_desc *ds, + u32 size, u32 flags); +void ath9k_hw_putrxbuf(struct ath_hal *ah, u32 rxdp); +void ath9k_hw_rxena(struct ath_hal *ah); +void ath9k_hw_setopmode(struct ath_hal *ah); +bool ath9k_hw_setmac(struct ath_hal *ah, const u8 *mac); +void ath9k_hw_setmcastfilter(struct ath_hal *ah, u32 filter0, + u32 filter1); +u32 ath9k_hw_getrxfilter(struct ath_hal *ah); +void ath9k_hw_startpcureceive(struct ath_hal *ah); +void ath9k_hw_stoppcurecv(struct ath_hal *ah); +bool ath9k_hw_stopdmarecv(struct ath_hal *ah); +int ath9k_hw_rxprocdesc(struct ath_hal *ah, + struct ath_desc *ds, u32 pa, + struct ath_desc *nds, u64 tsf); +u32 ath9k_hw_gettxbuf(struct ath_hal *ah, u32 q); +int ath9k_hw_txprocdesc(struct ath_hal *ah, + struct ath_desc *ds); +void ath9k_hw_set11n_aggr_middle(struct ath_hal *ah, struct ath_desc *ds, + u32 numDelims); +void ath9k_hw_set11n_aggr_first(struct ath_hal *ah, struct ath_desc *ds, + u32 aggrLen); +void ath9k_hw_set11n_aggr_last(struct ath_hal *ah, struct ath_desc *ds); +bool ath9k_hw_releasetxqueue(struct ath_hal *ah, u32 q); +void ath9k_hw_gettxintrtxqs(struct ath_hal *ah, u32 *txqs); +void ath9k_hw_clr11n_aggr(struct ath_hal *ah, struct ath_desc *ds); +void ath9k_hw_set11n_virtualmorefrag(struct ath_hal *ah, + struct ath_desc *ds, u32 vmf); +bool ath9k_hw_set_txpowerlimit(struct ath_hal *ah, u32 limit); +bool ath9k_regd_is_public_safety_sku(struct ath_hal *ah); +int ath9k_hw_setuptxqueue(struct ath_hal *ah, enum ath9k_tx_queue type, + const struct ath9k_tx_queue_info *qinfo); +u32 ath9k_hw_numtxpending(struct ath_hal *ah, u32 q); +const char *ath9k_hw_probe(u16 vendorid, u16 devid); +bool ath9k_hw_disable(struct ath_hal *ah); +void ath9k_hw_rfdetach(struct ath_hal *ah); +void ath9k_hw_get_channel_centers(struct ath_hal *ah, + struct ath9k_channel *chan, + struct chan_centers *centers); +bool ath9k_get_channel_edges(struct ath_hal *ah, + u16 flags, u16 *low, + u16 *high); +#endif diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath9k/beacon.c new file mode 100644 index 0000000..caf5694 --- /dev/null +++ b/drivers/net/wireless/ath9k/beacon.c @@ -0,0 +1,979 @@ +/* + * Copyright (c) 2008 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + /* Implementation of beacon processing. */ + +#include <asm/unaligned.h> +#include "core.h" + +/* + * Configure parameters for the beacon queue + * + * This function will modify certain transmit queue properties depending on + * the operating mode of the station (AP or AdHoc). Parameters are AIFS + * settings and channel width min/max +*/ + +static int ath_beaconq_config(struct ath_softc *sc) +{ + struct ath_hal *ah = sc->sc_ah; + struct ath9k_tx_queue_info qi; + + ath9k_hw_get_txq_props(ah, sc->sc_bhalq, &qi); + if (sc->sc_opmode == ATH9K_M_HOSTAP) { + /* Always burst out beacon and CAB traffic. */ + qi.tqi_aifs = 1; + qi.tqi_cwmin = 0; + qi.tqi_cwmax = 0; + } else { + /* Adhoc mode; important thing is to use 2x cwmin. */ + qi.tqi_aifs = sc->sc_beacon_qi.tqi_aifs; + qi.tqi_cwmin = 2*sc->sc_beacon_qi.tqi_cwmin; + qi.tqi_cwmax = sc->sc_beacon_qi.tqi_cwmax; + } + + if (!ath9k_hw_set_txq_props(ah, sc->sc_bhalq, &qi)) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: unable to update h/w beacon queue parameters\n", + __func__); + return 0; + } else { + ath9k_hw_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */ + return 1; + } +} + +/* + * Setup the beacon frame for transmit. + * + * Associates the beacon frame buffer with a transmit descriptor. Will set + * up all required antenna switch parameters, rate codes, and channel flags. + * Beacons are always sent out at the lowest rate, and are not retried. +*/ + +static void ath_beacon_setup(struct ath_softc *sc, + struct ath_vap *avp, struct ath_buf *bf) +{ + struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu; + struct ath_hal *ah = sc->sc_ah; + struct ath_desc *ds; + int flags, antenna; + const struct ath9k_rate_table *rt; + u8 rix, rate; + int ctsrate = 0; + int ctsduration = 0; + struct ath9k_11n_rate_series series[4]; + + DPRINTF(sc, ATH_DBG_BEACON, "%s: m %p len %u\n", + __func__, skb, skb->len); + + /* setup descriptors */ + ds = bf->bf_desc; + + flags = ATH9K_TXDESC_NOACK; + + if (sc->sc_opmode == ATH9K_M_IBSS && + (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) { + ds->ds_link = bf->bf_daddr; /* self-linked */ + flags |= ATH9K_TXDESC_VEOL; + /* Let hardware handle antenna switching. */ + antenna = 0; + } else { + ds->ds_link = 0; + /* + * Switch antenna every beacon. + * Should only switch every beacon period, not for every + * SWBA's + * XXX assumes two antenna + */ + antenna = ((sc->ast_be_xmit / sc->sc_nbcnvaps) & 1 ? 2 : 1); + } + + ds->ds_data = bf->bf_buf_addr; + + /* + * Calculate rate code. + * XXX everything at min xmit rate + */ + rix = 0; + rt = sc->sc_currates; + rate = rt->info[rix].rateCode; + if (sc->sc_flags & ATH_PREAMBLE_SHORT) + rate |= rt->info[rix].shortPreamble; + + ath9k_hw_set11n_txdesc(ah, ds + , skb->len + FCS_LEN /* frame length */ + , ATH9K_PKT_TYPE_BEACON /* Atheros packet type */ + , avp->av_btxctl.txpower /* txpower XXX */ + , ATH9K_TXKEYIX_INVALID /* no encryption */ + , ATH9K_KEY_TYPE_CLEAR /* no encryption */ + , flags /* no ack, veol for beacons */ + ); + + /* NB: beacon's BufLen must be a multiple of 4 bytes */ + ath9k_hw_filltxdesc(ah, ds + , roundup(skb->len, 4) /* buffer length */ + , true /* first segment */ + , true /* last segment */ + , ds /* first descriptor */ + ); + + memzero(series, sizeof(struct ath9k_11n_rate_series) * 4); + series[0].Tries = 1; + series[0].Rate = rate; + series[0].ChSel = sc->sc_tx_chainmask; + series[0].RateFlags = (ctsrate) ? ATH9K_RATESERIES_RTS_CTS : 0; + ath9k_hw_set11n_ratescenario(ah, ds, ds, 0, + ctsrate, ctsduration, series, 4, 0); +} + +/* Move everything from the vap's mcast queue to the hardware cab queue. + * Caller must hold mcasq lock and cabq lock + * XXX MORE_DATA bit? + */ +static void empty_mcastq_into_cabq(struct ath_hal *ah, + struct ath_txq *mcastq, struct ath_txq *cabq) +{ + struct ath_buf *bfmcast; + + BUG_ON(list_empty(&mcastq->axq_q)); + + bfmcast = list_first_entry(&mcastq->axq_q, struct ath_buf, list); + + /* link the descriptors */ + if (!cabq->axq_link) + ath9k_hw_puttxbuf(ah, cabq->axq_qnum, bfmcast->bf_daddr); + else + *cabq->axq_link = bfmcast->bf_daddr; + + /* append the private vap mcast list to the cabq */ + + cabq->axq_depth += mcastq->axq_depth; + cabq->axq_totalqueued += mcastq->axq_totalqueued; + cabq->axq_linkbuf = mcastq->axq_linkbuf; + cabq->axq_link = mcastq->axq_link; + list_splice_tail_init(&mcastq->axq_q, &cabq->axq_q); + mcastq->axq_depth = 0; + mcastq->axq_totalqueued = 0; + mcastq->axq_linkbuf = NULL; + mcastq->axq_link = NULL; +} + +/* This is only run at DTIM. We move everything from the vap's mcast queue + * to the hardware cab queue. Caller must hold the mcastq lock. */ +static void trigger_mcastq(struct ath_hal *ah, + struct ath_txq *mcastq, struct ath_txq *cabq) +{ + spin_lock_bh(&cabq->axq_lock); + + if (!list_empty(&mcastq->axq_q)) + empty_mcastq_into_cabq(ah, mcastq, cabq); + + /* cabq is gated by beacon so it is safe to start here */ + if (!list_empty(&cabq->axq_q)) + ath9k_hw_txstart(ah, cabq->axq_qnum); + + spin_unlock_bh(&cabq->axq_lock); +} + +/* + * Generate beacon frame and queue cab data for a vap. + * + * Updates the contents of the beacon frame. It is assumed that the buffer for + * the beacon frame has been allocated in the ATH object, and simply needs to + * be filled for this cycle. Also, any CAB (crap after beacon?) traffic will + * be added to the beacon frame at this point. +*/ +static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id) +{ + struct ath_hal *ah = sc->sc_ah; + struct ath_buf *bf; + struct ath_vap *avp; + struct sk_buff *skb; + int cabq_depth; + int mcastq_depth; + int is_beacon_dtim = 0; + unsigned int curlen; + struct ath_txq *cabq; + struct ath_txq *mcastq; + avp = sc->sc_vaps[if_id]; + + mcastq = &avp->av_mcastq; + cabq = sc->sc_cabq; + + ASSERT(avp); + + if (avp->av_bcbuf == NULL) { + DPRINTF(sc, ATH_DBG_BEACON, "%s: avp=%p av_bcbuf=%p\n", + __func__, avp, avp->av_bcbuf); + return NULL; + } + bf = avp->av_bcbuf; + skb = (struct sk_buff *) bf->bf_mpdu; + + /* + * Update dynamic beacon contents. If this returns + * non-zero then we need to remap the memory because + * the beacon frame changed size (probably because + * of the TIM bitmap). + */ + curlen = skb->len; + + /* XXX: spin_lock_bh should not be used here, but sparse bitches + * otherwise. We should fix sparse :) */ + spin_lock_bh(&mcastq->axq_lock); + mcastq_depth = avp->av_mcastq.axq_depth; + + if (ath_update_beacon(sc, if_id, &avp->av_boff, skb, mcastq_depth) == + 1) { + ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE, + get_dma_mem_context(bf, bf_dmacontext)); + bf->bf_buf_addr = ath_skb_map_single(sc, skb, PCI_DMA_TODEVICE, + get_dma_mem_context(bf, bf_dmacontext)); + } else { + pci_dma_sync_single_for_cpu(sc->pdev, + bf->bf_buf_addr, + skb_tailroom(skb), + PCI_DMA_TODEVICE); + } + + /* + * if the CABQ traffic from previous DTIM is pending and the current + * beacon is also a DTIM. + * 1) if there is only one vap let the cab traffic continue. + * 2) if there are more than one vap and we are using staggered + * beacons, then drain the cabq by dropping all the frames in + * the cabq so that the current vaps cab traffic can be scheduled. + */ + spin_lock_bh(&cabq->axq_lock); + cabq_depth = cabq->axq_depth; + spin_unlock_bh(&cabq->axq_lock); + + is_beacon_dtim = avp->av_boff.bo_tim[4] & 1; + + if (mcastq_depth && is_beacon_dtim && cabq_depth) { + /* + * Unlock the cabq lock as ath_tx_draintxq acquires + * the lock again which is a common function and that + * acquires txq lock inside. + */ + if (sc->sc_nvaps > 1) { + ath_tx_draintxq(sc, cabq, false); + DPRINTF(sc, ATH_DBG_BEACON, + "%s: flush previous cabq traffic\n", __func__); + } + } + + /* Construct tx descriptor. */ + ath_beacon_setup(sc, avp, bf); + + /* + * Enable the CAB queue before the beacon queue to + * insure cab frames are triggered by this beacon. + */ + if (is_beacon_dtim) + trigger_mcastq(ah, mcastq, cabq); + + spin_unlock_bh(&mcastq->axq_lock); + return bf; +} + +/* + * Startup beacon transmission for adhoc mode when they are sent entirely + * by the hardware using the self-linked descriptor + veol trick. +*/ + +static void ath_beacon_start_adhoc(struct ath_softc *sc, int if_id) +{ + struct ath_hal *ah = sc->sc_ah; + struct ath_buf *bf; + struct ath_vap *avp; + struct sk_buff *skb; + + avp = sc->sc_vaps[if_id]; + ASSERT(avp); + + if (avp->av_bcbuf == NULL) { + DPRINTF(sc, ATH_DBG_BEACON, "%s: avp=%p av_bcbuf=%p\n", + __func__, avp, avp != NULL ? avp->av_bcbuf : NULL); + return; + } + bf = avp->av_bcbuf; + skb = (struct sk_buff *) bf->bf_mpdu; + + /* Construct tx descriptor. */ + ath_beacon_setup(sc, avp, bf); + + /* NB: caller is known to have already stopped tx dma */ + ath9k_hw_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); + ath9k_hw_txstart(ah, sc->sc_bhalq); + DPRINTF(sc, ATH_DBG_BEACON, "%s: TXDP%u = %llx (%p)\n", __func__, + sc->sc_bhalq, ito64(bf->bf_daddr), bf->bf_desc); +} + +/* + * Setup a h/w transmit queue for beacons. + * + * This function allocates an information structure (struct ath9k_txq_info) + * on the stack, sets some specific parameters (zero out channel width + * min/max, and enable aifs). The info structure does not need to be + * persistant. +*/ + +int ath_beaconq_setup(struct ath_hal *ah) +{ + struct ath9k_tx_queue_info qi; + + memzero(&qi, sizeof(qi)); + qi.tqi_aifs = 1; + qi.tqi_cwmin = 0; + qi.tqi_cwmax = 0; + /* NB: don't enable any interrupts */ + return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi); +} + + +/* + * Allocate and setup an initial beacon frame. + * + * Allocate a beacon state variable for a specific VAP instance created on + * the ATH interface. This routine also calculates the beacon "slot" for + * staggared beacons in the mBSSID case. +*/ + +int ath_beacon_alloc(struct ath_softc *sc, int if_id) +{ + struct ath_vap *avp; + struct ieee80211_hdr *wh; + struct ath_buf *bf; + struct sk_buff *skb; + + avp = sc->sc_vaps[if_id]; + ASSERT(avp); + + /* Allocate a beacon descriptor if we haven't done so. */ + if (!avp->av_bcbuf) { + /* + * Allocate beacon state for hostap/ibss. We know + * a buffer is available. + */ + + avp->av_bcbuf = list_first_entry(&sc->sc_bbuf, + struct ath_buf, list); + list_del(&avp->av_bcbuf->list); + + if (sc->sc_opmode == ATH9K_M_HOSTAP || + !(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) { + int slot; + /* + * Assign the vap to a beacon xmit slot. As + * above, this cannot fail to find one. + */ + avp->av_bslot = 0; + for (slot = 0; slot < ATH_BCBUF; slot++) + if (sc->sc_bslot[slot] == ATH_IF_ID_ANY) { + /* + * XXX hack, space out slots to better + * deal with misses + */ + if (slot+1 < ATH_BCBUF && + sc->sc_bslot[slot+1] == + ATH_IF_ID_ANY) { + avp->av_bslot = slot+1; + break; + } + avp->av_bslot = slot; + /* NB: keep looking for a double slot */ + } + BUG_ON(sc->sc_bslot[avp->av_bslot] != ATH_IF_ID_ANY); + sc->sc_bslot[avp->av_bslot] = if_id; + sc->sc_nbcnvaps++; + } + } + + /* release the previous beacon frame , if it already exists. */ + bf = avp->av_bcbuf; + if (bf->bf_mpdu != NULL) { + skb = (struct sk_buff *)bf->bf_mpdu; + ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE, + get_dma_mem_context(bf, bf_dmacontext)); + dev_kfree_skb_any(skb); + bf->bf_mpdu = NULL; + } + + /* + * NB: the beacon data buffer must be 32-bit aligned; + * we assume the wbuf routines will return us something + * with this alignment (perhaps should assert). + * FIXME: Fill avp->av_boff.bo_tim,avp->av_btxctl.txpower and + * avp->av_btxctl.shortPreamble + */ + skb = ieee80211_beacon_get(sc->hw, avp->av_if_data); + if (skb == NULL) { + DPRINTF(sc, ATH_DBG_BEACON, "%s: cannot get skb\n", + __func__); + return -ENOMEM; + } + + /* + * Calculate a TSF adjustment factor required for + * staggered beacons. Note that we assume the format + * of the beacon frame leaves the tstamp field immediately + * following the header. + */ + if (avp->av_bslot > 0) { + u64 tsfadjust; + __le64 val; + int intval; + + /* FIXME: Use default value for now: Sujith */ + + intval = ATH_DEFAULT_BINTVAL; + + /* + * The beacon interval is in TU's; the TSF in usecs. + * We figure out how many TU's to add to align the + * timestamp then convert to TSF units and handle + * byte swapping before writing it in the frame. + * The hardware will then add this each time a beacon + * frame is sent. Note that we align vap's 1..N + * and leave vap 0 untouched. This means vap 0 + * has a timestamp in one beacon interval while the + * others get a timestamp aligned to the next interval. + */ + tsfadjust = (intval * (ATH_BCBUF - avp->av_bslot)) / ATH_BCBUF; + val = cpu_to_le64(tsfadjust << 10); /* TU->TSF */ + + DPRINTF(sc, ATH_DBG_BEACON, + "%s: %s beacons, bslot %d intval %u tsfadjust %llu\n", + __func__, "stagger", + avp->av_bslot, intval, (unsigned long long)tsfadjust); + + wh = (struct ieee80211_hdr *)skb->data; + memcpy(&wh[1], &val, sizeof(val)); + } + + bf->bf_buf_addr = ath_skb_map_single(sc, skb, PCI_DMA_TODEVICE, + get_dma_mem_context(bf, bf_dmacontext)); + bf->bf_mpdu = skb; + + return 0; +} + +/* + * Reclaim beacon resources and return buffer to the pool. + * + * Checks the VAP to put the beacon frame buffer back to the ATH object + * queue, and de-allocates any wbuf frames that were sent as CAB traffic. +*/ + +void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp) +{ + if (avp->av_bcbuf != NULL) { + struct ath_buf *bf; + + if (avp->av_bslot != -1) { + sc->sc_bslot[avp->av_bslot] = ATH_IF_ID_ANY; + sc->sc_nbcnvaps--; + } + + bf = avp->av_bcbuf; + if (bf->bf_mpdu != NULL) { + struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu; + ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE, + get_dma_mem_context(bf, bf_dmacontext)); + dev_kfree_skb_any(skb); + bf->bf_mpdu = NULL; + } + list_add_tail(&bf->list, &sc->sc_bbuf); + + avp->av_bcbuf = NULL; + } +} + +/* + * Reclaim beacon resources and return buffer to the pool. + * + * This function will free any wbuf frames that are still attached to the + * beacon buffers in the ATH object. Note that this does not de-allocate + * any wbuf objects that are in the transmit queue and have not yet returned + * to the ATH object. +*/ + +void ath_beacon_free(struct ath_softc *sc) +{ + struct ath_buf *bf; + + list_for_each_entry(bf, &sc->sc_bbuf, list) { + if (bf->bf_mpdu != NULL) { + struct sk_buff *skb = (struct sk_buff *) bf->bf_mpdu; + ath_skb_unmap_single(sc, skb, PCI_DMA_TODEVICE, + get_dma_mem_context(bf, bf_dmacontext)); + dev_kfree_skb_any(skb); + bf->bf_mpdu = NULL; + } + } +} + +/* + * Tasklet for Sending Beacons + * + * Transmit one or more beacon frames at SWBA. Dynamic updates to the frame + * contents are done as needed and the slot time is also adjusted based on + * current state. + * + * This tasklet is not scheduled, it's called in ISR context. +*/ + +void ath9k_beacon_tasklet(unsigned long data) +{ +#define TSF_TO_TU(_h,_l) \ + ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) + + struct ath_softc *sc = (struct ath_softc *)data; + struct ath_hal *ah = sc->sc_ah; + struct ath_buf *bf = NULL; + int slot, if_id; + u32 bfaddr; + u32 rx_clear = 0, rx_frame = 0, tx_frame = 0; + u32 show_cycles = 0; + u32 bc = 0; /* beacon count */ + u64 tsf; + u32 tsftu; + u16 intval; + + if (sc->sc_noreset) { + show_cycles = ath9k_hw_GetMibCycleCountsPct(ah, + &rx_clear, + &rx_frame, + &tx_frame); + } + + /* + * Check if the previous beacon has gone out. If + * not don't try to post another, skip this period + * and wait for the next. Missed beacons indicate + * a problem and should not occur. If we miss too + * many consecutive beacons reset the device. + */ + if (ath9k_hw_numtxpending(ah, sc->sc_bhalq) != 0) { + sc->sc_bmisscount++; + /* XXX: doth needs the chanchange IE countdown decremented. + * We should consider adding a mac80211 call to indicate + * a beacon miss so appropriate action could be taken + * (in that layer). + */ + if (sc->sc_bmisscount < BSTUCK_THRESH) { + if (sc->sc_noreset) { + DPRINTF(sc, ATH_DBG_BEACON, + "%s: missed %u consecutive beacons\n", + __func__, sc->sc_bmisscount); + if (show_cycles) { + /* + * Display cycle counter stats + * from HW to aide in debug of + * stickiness. + */ + DPRINTF(sc, + ATH_DBG_BEACON, + "%s: busy times: rx_clear=%d, " + "rx_frame=%d, tx_frame=%d\n", + __func__, rx_clear, rx_frame, + tx_frame); + } else { + DPRINTF(sc, + ATH_DBG_BEACON, + "%s: unable to obtain " + "busy times\n", __func__); + } + } else { + DPRINTF(sc, ATH_DBG_BEACON, + "%s: missed %u consecutive beacons\n", + __func__, sc->sc_bmisscount); + } + } else if (sc->sc_bmisscount >= BSTUCK_THRESH) { + if (sc->sc_noreset) { + if (sc->sc_bmisscount == BSTUCK_THRESH) { + DPRINTF(sc, + ATH_DBG_BEACON, + "%s: beacon is officially " + "stuck\n", __func__); + ath9k_hw_dmaRegDump(ah); + } + } else { + DPRINTF(sc, ATH_DBG_BEACON, + "%s: beacon is officially stuck\n", + __func__); + ath_bstuck_process(sc); + } + } + + return; + } + if (sc->sc_bmisscount != 0) { + if (sc->sc_noreset) { + DPRINTF(sc, + ATH_DBG_BEACON, + "%s: resume beacon xmit after %u misses\n", + __func__, sc->sc_bmisscount); + } else { + DPRINTF(sc, ATH_DBG_BEACON, + "%s: resume beacon xmit after %u misses\n", + __func__, sc->sc_bmisscount); + } + sc->sc_bmisscount = 0; + } + + /* + * Generate beacon frames. we are sending frames + * staggered so calculate the slot for this frame based + * on the tsf to safeguard against missing an swba. + */ + + /* FIXME: Use default value for now - Sujith */ + intval = ATH_DEFAULT_BINTVAL; + + tsf = ath9k_hw_gettsf64(ah); + tsftu = TSF_TO_TU(tsf>>32, tsf); + slot = ((tsftu % intval) * ATH_BCBUF) / intval; + if_id = sc->sc_bslot[(slot + 1) % ATH_BCBUF]; + DPRINTF(sc, ATH_DBG_BEACON, + "%s: slot %d [tsf %llu tsftu %u intval %u] if_id %d\n", + __func__, slot, (unsigned long long) tsf, tsftu, + intval, if_id); + bfaddr = 0; + if (if_id != ATH_IF_ID_ANY) { + bf = ath_beacon_generate(sc, if_id); + if (bf != NULL) { + bfaddr = bf->bf_daddr; + bc = 1; + } + } + /* + * Handle slot time change when a non-ERP station joins/leaves + * an 11g network. The 802.11 layer notifies us via callback, + * we mark updateslot, then wait one beacon before effecting + * the change. This gives associated stations at least one + * beacon interval to note the state change. + * + * NB: The slot time change state machine is clocked according + * to whether we are bursting or staggering beacons. We + * recognize the request to update and record the current + * slot then don't transition until that slot is reached + * again. If we miss a beacon for that slot then we'll be + * slow to transition but we'll be sure at least one beacon + * interval has passed. When bursting slot is always left + * set to ATH_BCBUF so this check is a noop. + */ + /* XXX locking */ + if (sc->sc_updateslot == UPDATE) { + sc->sc_updateslot = COMMIT; /* commit next beacon */ + sc->sc_slotupdate = slot; + } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot) + ath_setslottime(sc); /* commit change to hardware */ + + if (bfaddr != 0) { + /* + * Stop any current dma and put the new frame(s) on the queue. + * This should never fail since we check above that no frames + * are still pending on the queue. + */ + if (!ath9k_hw_stoptxdma(ah, sc->sc_bhalq)) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: beacon queue %u did not stop?\n", + __func__, sc->sc_bhalq); + /* NB: the HAL still stops DMA, so proceed */ + } + + /* NB: cabq traffic should already be queued and primed */ + ath9k_hw_puttxbuf(ah, sc->sc_bhalq, bfaddr); + ath9k_hw_txstart(ah, sc->sc_bhalq); + + sc->ast_be_xmit += bc; /* XXX per-vap? */ + } +#undef TSF_TO_TU +} + +/* + * Tasklet for Beacon Stuck processing + * + * Processing for Beacon Stuck. + * Basically calls the ath_internal_reset function to reset the chip. +*/ + +void ath_bstuck_process(struct ath_softc *sc) +{ + DPRINTF(sc, ATH_DBG_BEACON, + "%s: stuck beacon; resetting (bmiss count %u)\n", + __func__, sc->sc_bmisscount); + ath_internal_reset(sc); +} + +/* + * Configure the beacon and sleep timers. + * + * When operating as an AP this resets the TSF and sets + * up the hardware to notify us when we need to issue beacons. + * + * When operating in station mode this sets up the beacon + * timers according to the timestamp of the last received + * beacon and the current TSF, configures PCF and DTIM + * handling, programs the sleep registers so the hardware + * will wakeup in time to receive beacons, and configures + * the beacon miss handling so we'll receive a BMISS + * interrupt when we stop seeing beacons from the AP + * we've associated with. + */ + +void ath_beacon_config(struct ath_softc *sc, int if_id) +{ +#define TSF_TO_TU(_h,_l) \ + ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) + struct ath_hal *ah = sc->sc_ah; + u32 nexttbtt, intval; + struct ath_beacon_config conf; + enum ath9k_opmode av_opmode; + + if (if_id != ATH_IF_ID_ANY) + av_opmode = sc->sc_vaps[if_id]->av_opmode; + else + av_opmode = sc->sc_opmode; + + memzero(&conf, sizeof(struct ath_beacon_config)); + + /* FIXME: Use default values for now - Sujith */ + /* Query beacon configuration first */ + /* + * Protocol stack doesn't support dynamic beacon configuration, + * use default configurations. + */ + conf.beacon_interval = ATH_DEFAULT_BINTVAL; + conf.listen_interval = 1; + conf.dtim_period = conf.beacon_interval; + conf.dtim_count = 1; + conf.bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf.beacon_interval; + + /* extract tstamp from last beacon and convert to TU */ + nexttbtt = TSF_TO_TU(get_unaligned_le32(conf.u.last_tstamp + 4), + get_unaligned_le32(conf.u.last_tstamp)); + /* XXX conditionalize multi-bss support? */ + if (sc->sc_opmode == ATH9K_M_HOSTAP) { + /* + * For multi-bss ap support beacons are either staggered + * evenly over N slots or burst together. For the former + * arrange for the SWBA to be delivered for each slot. + * Slots that are not occupied will generate nothing. + */ + /* NB: the beacon interval is kept internally in TU's */ + intval = conf.beacon_interval & ATH9K_BEACON_PERIOD; + intval /= ATH_BCBUF; /* for staggered beacons */ + } else { + intval = conf.beacon_interval & ATH9K_BEACON_PERIOD; + } + + if (nexttbtt == 0) /* e.g. for ap mode */ + nexttbtt = intval; + else if (intval) /* NB: can be 0 for monitor mode */ + nexttbtt = roundup(nexttbtt, intval); + DPRINTF(sc, ATH_DBG_BEACON, "%s: nexttbtt %u intval %u (%u)\n", + __func__, nexttbtt, intval, conf.beacon_interval); + /* Check for ATH9K_M_HOSTAP and sc_nostabeacons for WDS client */ + if (sc->sc_opmode == ATH9K_M_STA) { + struct ath9k_beacon_state bs; + u64 tsf; + u32 tsftu; + int dtimperiod, dtimcount, sleepduration; + int cfpperiod, cfpcount; + + /* + * Setup dtim and cfp parameters according to + * last beacon we received (which may be none). + */ + dtimperiod = conf.dtim_period; + if (dtimperiod <= 0) /* NB: 0 if not known */ + dtimperiod = 1; + dtimcount = conf.dtim_count; + if (dtimcount >= dtimperiod) /* NB: sanity check */ + dtimcount = 0; /* XXX? */ + cfpperiod = 1; /* NB: no PCF support yet */ + cfpcount = 0; + + sleepduration = conf.listen_interval * intval; + if (sleepduration <= 0) + sleepduration = intval; + +#define FUDGE 2 + /* + * Pull nexttbtt forward to reflect the current + * TSF and calculate dtim+cfp state for the result. + */ + tsf = ath9k_hw_gettsf64(ah); + tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; + do { + nexttbtt += intval; + if (--dtimcount < 0) { + dtimcount = dtimperiod - 1; + if (--cfpcount < 0) + cfpcount = cfpperiod - 1; + } + } while (nexttbtt < tsftu); +#undef FUDGE + memzero(&bs, sizeof(bs)); + bs.bs_intval = intval; + bs.bs_nexttbtt = nexttbtt; + bs.bs_dtimperiod = dtimperiod*intval; + bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval; + bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod; + bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod; + bs.bs_cfpmaxduration = 0; + /* + * Calculate the number of consecutive beacons to miss + * before taking a BMISS interrupt. The configuration + * is specified in TU so we only need calculate based + * on the beacon interval. Note that we clamp the + * result to at most 15 beacons. + */ + if (sleepduration > intval) { + bs.bs_bmissthreshold = + conf.listen_interval * + ATH_DEFAULT_BMISS_LIMIT / 2; + } else { + bs.bs_bmissthreshold = + DIV_ROUND_UP(conf.bmiss_timeout, intval); + if (bs.bs_bmissthreshold > 15) + bs.bs_bmissthreshold = 15; + else if (bs.bs_bmissthreshold <= 0) + bs.bs_bmissthreshold = 1; + } + + /* + * Calculate sleep duration. The configuration is + * given in ms. We insure a multiple of the beacon + * period is used. Also, if the sleep duration is + * greater than the DTIM period then it makes senses + * to make it a multiple of that. + * + * XXX fixed at 100ms + */ + + bs.bs_sleepduration = + roundup(IEEE80211_MS_TO_TU(100), sleepduration); + if (bs.bs_sleepduration > bs.bs_dtimperiod) + bs.bs_sleepduration = bs.bs_dtimperiod; + + DPRINTF(sc, ATH_DBG_BEACON, + "%s: tsf %llu " + "tsf:tu %u " + "intval %u " + "nexttbtt %u " + "dtim %u " + "nextdtim %u " + "bmiss %u " + "sleep %u " + "cfp:period %u " + "maxdur %u " + "next %u " + "timoffset %u\n" + , __func__ + , (unsigned long long)tsf, tsftu + , bs.bs_intval + , bs.bs_nexttbtt + , bs.bs_dtimperiod + , bs.bs_nextdtim + , bs.bs_bmissthreshold + , bs.bs_sleepduration + , bs.bs_cfpperiod + , bs.bs_cfpmaxduration + , bs.bs_cfpnext + , bs.bs_timoffset + ); + + ath9k_hw_set_interrupts(ah, 0); + ath9k_hw_set_sta_beacon_timers(ah, &bs); + sc->sc_imask |= ATH9K_INT_BMISS; + ath9k_hw_set_interrupts(ah, sc->sc_imask); + } else { + u64 tsf; + u32 tsftu; + ath9k_hw_set_interrupts(ah, 0); + if (nexttbtt == intval) + intval |= ATH9K_BEACON_RESET_TSF; + if (sc->sc_opmode == ATH9K_M_IBSS) { + /* + * Pull nexttbtt forward to reflect the current + * TSF . + */ +#define FUDGE 2 + if (!(intval & ATH9K_BEACON_RESET_TSF)) { + tsf = ath9k_hw_gettsf64(ah); + tsftu = TSF_TO_TU((u32)(tsf>>32), + (u32)tsf) + FUDGE; + do { + nexttbtt += intval; + } while (nexttbtt < tsftu); + } +#undef FUDGE + DPRINTF(sc, ATH_DBG_BEACON, + "%s: IBSS nexttbtt %u intval %u (%u)\n", + __func__, nexttbtt, + intval & ~ATH9K_BEACON_RESET_TSF, + conf.beacon_interval); + + /* + * In IBSS mode enable the beacon timers but only + * enable SWBA interrupts if we need to manually + * prepare beacon frames. Otherwise we use a + * self-linked tx descriptor and let the hardware + * deal with things. + */ + intval |= ATH9K_BEACON_ENA; + if (!(ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) + sc->sc_imask |= ATH9K_INT_SWBA; + ath_beaconq_config(sc); + } else if (sc->sc_opmode == ATH9K_M_HOSTAP) { + /* + * In AP mode we enable the beacon timers and + * SWBA interrupts to prepare beacon frames. + */ + intval |= ATH9K_BEACON_ENA; + sc->sc_imask |= ATH9K_INT_SWBA; /* beacon prepare */ + ath_beaconq_config(sc); + } + ath9k_hw_beaconinit(ah, nexttbtt, intval); + sc->sc_bmisscount = 0; + ath9k_hw_set_interrupts(ah, sc->sc_imask); + /* + * When using a self-linked beacon descriptor in + * ibss mode load it once here. + */ + if (sc->sc_opmode == ATH9K_M_IBSS && + (ah->ah_caps.hw_caps & ATH9K_HW_CAP_VEOL)) + ath_beacon_start_adhoc(sc, 0); + } +#undef TSF_TO_TU +} + +/* Function to collect beacon rssi data and resync beacon if necessary */ + +void ath_beacon_sync(struct ath_softc *sc, int if_id) +{ + /* + * Resync beacon timers using the tsf of the + * beacon frame we just received. + */ + ath_beacon_config(sc, if_id); + sc->sc_beacons = 1; +} diff --git a/drivers/net/wireless/ath9k/core.c b/drivers/net/wireless/ath9k/core.c new file mode 100644 index 0000000..f6c4528 --- /dev/null +++ b/drivers/net/wireless/ath9k/core.c @@ -0,0 +1,1923 @@ +/* + * Copyright (c) 2008, Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + /* Implementation of the main "ATH" layer. */ + +#include "core.h" +#include "regd.h" + +static int ath_outdoor; /* enable outdoor use */ + +static const u8 ath_bcast_mac[ETH_ALEN] = + { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + +static u32 ath_chainmask_sel_up_rssi_thres = + ATH_CHAINMASK_SEL_UP_RSSI_THRES; +static u32 ath_chainmask_sel_down_rssi_thres = + ATH_CHAINMASK_SEL_DOWN_RSSI_THRES; +static u32 ath_chainmask_sel_period = + ATH_CHAINMASK_SEL_TIMEOUT; + +/* return bus cachesize in 4B word units */ + +static void bus_read_cachesize(struct ath_softc *sc, int *csz) +{ + u8 u8tmp; + + pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u8 *)&u8tmp); + *csz = (int)u8tmp; + + /* + * This check was put in to avoid "unplesant" consequences if + * the bootrom has not fully initialized all PCI devices. + * Sometimes the cache line size register is not set + */ + + if (*csz == 0) + *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */ +} + +/* + * Set current operating mode + * + * This function initializes and fills the rate table in the ATH object based + * on the operating mode. The blink rates are also set up here, although + * they have been superceeded by the ath_led module. +*/ + +static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode) +{ + const struct ath9k_rate_table *rt; + int i; + + memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); + rt = ath9k_hw_getratetable(sc->sc_ah, mode); + BUG_ON(!rt); + + for (i = 0; i < rt->rateCount; i++) + sc->sc_rixmap[rt->info[i].rateCode] = (u8) i; + + memzero(sc->sc_hwmap, sizeof(sc->sc_hwmap)); + for (i = 0; i < 256; i++) { + u8 ix = rt->rateCodeToIndex[i]; + + if (ix == 0xff) + continue; + + sc->sc_hwmap[i].ieeerate = + rt->info[ix].dot11Rate & IEEE80211_RATE_VAL; + sc->sc_hwmap[i].rateKbps = rt->info[ix].rateKbps; + + if (rt->info[ix].shortPreamble || + rt->info[ix].phy == PHY_OFDM) { + /* XXX: Handle this */ + } + + /* NB: this uses the last entry if the rate isn't found */ + /* XXX beware of overlow */ + } + sc->sc_currates = rt; + sc->sc_curmode = mode; + /* + * All protection frames are transmited at 2Mb/s for + * 11g, otherwise at 1Mb/s. + * XXX select protection rate index from rate table. + */ + sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0); +} + +/* + * Set up rate table (legacy rates) + */ +static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band) +{ + struct ath_hal *ah = sc->sc_ah; + const struct ath9k_rate_table *rt = NULL; + struct ieee80211_supported_band *sband; + struct ieee80211_rate *rate; + int i, maxrates; + + switch (band) { + case IEEE80211_BAND_2GHZ: + rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11G); + break; + case IEEE80211_BAND_5GHZ: + rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11A); + break; + default: + break; + } + + if (rt == NULL) + return; + + sband = &sc->sbands[band]; + rate = sc->rates[band]; + + if (rt->rateCount > ATH_RATE_MAX) + maxrates = ATH_RATE_MAX; + else + maxrates = rt->rateCount; + + for (i = 0; i < maxrates; i++) { + rate[i].bitrate = rt->info[i].rateKbps / 100; + rate[i].hw_value = rt->info[i].rateCode; + sband->n_bitrates++; + DPRINTF(sc, ATH_DBG_CONFIG, + "%s: Rate: %2dMbps, ratecode: %2d\n", + __func__, + rate[i].bitrate / 10, + rate[i].hw_value); + } +} + +/* + * Set up channel list + */ +static int ath_setup_channels(struct ath_softc *sc) +{ + struct ath_hal *ah = sc->sc_ah; + int nchan, i, a = 0, b = 0; + u8 regclassids[ATH_REGCLASSIDS_MAX]; + u32 nregclass = 0; + struct ieee80211_supported_band *band_2ghz; + struct ieee80211_supported_band *band_5ghz; + struct ieee80211_channel *chan_2ghz; + struct ieee80211_channel *chan_5ghz; + struct ath9k_channel *c; + + /* Fill in ah->ah_channels */ + if (!ath9k_regd_init_channels(ah, + ATH_CHAN_MAX, + (u32 *)&nchan, + regclassids, + ATH_REGCLASSIDS_MAX, + &nregclass, + CTRY_DEFAULT, + false, + 1)) { + u32 rd = ah->ah_currentRD; + + DPRINTF(sc, ATH_DBG_FATAL, + "%s: unable to collect channel list; " + "regdomain likely %u country code %u\n", + __func__, rd, CTRY_DEFAULT); + return -EINVAL; + } + + band_2ghz = &sc->sbands[IEEE80211_BAND_2GHZ]; + band_5ghz = &sc->sbands[IEEE80211_BAND_5GHZ]; + chan_2ghz = sc->channels[IEEE80211_BAND_2GHZ]; + chan_5ghz = sc->channels[IEEE80211_BAND_5GHZ]; + + for (i = 0; i < nchan; i++) { + c = &ah->ah_channels[i]; + if (IS_CHAN_2GHZ(c)) { + chan_2ghz[a].band = IEEE80211_BAND_2GHZ; + chan_2ghz[a].center_freq = c->channel; + chan_2ghz[a].max_power = c->maxTxPower; + + if (c->privFlags & CHANNEL_DISALLOW_ADHOC) + chan_2ghz[a].flags |= + IEEE80211_CHAN_NO_IBSS; + if (c->channelFlags & CHANNEL_PASSIVE) + chan_2ghz[a].flags |= + IEEE80211_CHAN_PASSIVE_SCAN; + + band_2ghz->n_channels = ++a; + + DPRINTF(sc, ATH_DBG_CONFIG, + "%s: 2MHz channel: %d, " + "channelFlags: 0x%x\n", + __func__, + c->channel, + c->channelFlags); + } else if (IS_CHAN_5GHZ(c)) { + chan_5ghz[b].band = IEEE80211_BAND_5GHZ; + chan_5ghz[b].center_freq = c->channel; + chan_5ghz[b].max_power = c->maxTxPower; + + if (c->privFlags & CHANNEL_DISALLOW_ADHOC) + chan_5ghz[b].flags |= + IEEE80211_CHAN_NO_IBSS; + if (c->channelFlags & CHANNEL_PASSIVE) + chan_5ghz[b].flags |= + IEEE80211_CHAN_PASSIVE_SCAN; + + band_5ghz->n_channels = ++b; + + DPRINTF(sc, ATH_DBG_CONFIG, + "%s: 5MHz channel: %d, " + "channelFlags: 0x%x\n", + __func__, + c->channel, + c->channelFlags); + } + } + + return 0; +} + +/* + * Determine mode from channel flags + * + * This routine will provide the enumerated WIRELESSS_MODE value based + * on the settings of the channel flags. If ho valid set of flags + * exist, the lowest mode (11b) is selected. +*/ + +static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan) +{ + if (chan->chanmode == CHANNEL_A) + return ATH9K_MODE_11A; + else if (chan->chanmode == CHANNEL_G) + return ATH9K_MODE_11G; + else if (chan->chanmode == CHANNEL_B) + return ATH9K_MODE_11B; + else if (chan->chanmode == CHANNEL_A_HT20) + return ATH9K_MODE_11NA_HT20; + else if (chan->chanmode == CHANNEL_G_HT20) + return ATH9K_MODE_11NG_HT20; + else if (chan->chanmode == CHANNEL_A_HT40PLUS) + return ATH9K_MODE_11NA_HT40PLUS; + else if (chan->chanmode == CHANNEL_A_HT40MINUS) + return ATH9K_MODE_11NA_HT40MINUS; + else if (chan->chanmode == CHANNEL_G_HT40PLUS) + return ATH9K_MODE_11NG_HT40PLUS; + else if (chan->chanmode == CHANNEL_G_HT40MINUS) + return ATH9K_MODE_11NG_HT40MINUS; + + /* NB: should not get here */ + return ATH9K_MODE_11B; +} + +/* + * Stop the device, grabbing the top-level lock to protect + * against concurrent entry through ath_init (which can happen + * if another thread does a system call and the thread doing the + * stop is preempted). + */ + +static int ath_stop(struct ath_softc *sc) +{ + struct ath_hal *ah = sc->sc_ah; + + DPRINTF(sc, ATH_DBG_CONFIG, "%s: invalid %u\n", + __func__, sc->sc_invalid); + + /* + * Shutdown the hardware and driver: + * stop output from above + * reset 802.11 state machine + * (sends station deassoc/deauth frames) + * turn off timers + * disable interrupts + * clear transmit machinery + * clear receive machinery + * turn off the radio + * reclaim beacon resources + * + * Note that some of this work is not possible if the + * hardware is gone (invalid). + */ + + if (!sc->sc_invalid) + ath9k_hw_set_interrupts(ah, 0); + ath_draintxq(sc, false); + if (!sc->sc_invalid) { + ath_stoprecv(sc); + ath9k_hw_phy_disable(ah); + } else + sc->sc_rxlink = NULL; + + return 0; +} + +/* + * Start Scan + * + * This function is called when starting a channel scan. It will perform + * power save wakeup processing, set the filter for the scan, and get the + * chip ready to send broadcast packets out during the scan. +*/ + +void ath_scan_start(struct ath_softc *sc) +{ + struct ath_hal *ah = sc->sc_ah; + u32 rfilt; + u32 now = (u32) jiffies_to_msecs(get_timestamp()); + + sc->sc_scanning = 1; + rfilt = ath_calcrxfilter(sc); + ath9k_hw_setrxfilter(ah, rfilt); + ath9k_hw_write_associd(ah, ath_bcast_mac, 0); + + /* Restore previous power management state. */ + + DPRINTF(sc, ATH_DBG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0\n", + now / 1000, now % 1000, __func__, rfilt); +} + +/* + * Scan End + * + * This routine is called by the upper layer when the scan is completed. This + * will set the filters back to normal operating mode, set the BSSID to the + * correct value, and restore the power save state. +*/ + +void ath_scan_end(struct ath_softc *sc) +{ + struct ath_hal *ah = sc->sc_ah; + u32 rfilt; + u32 now = (u32) jiffies_to_msecs(get_timestamp()); + + sc->sc_scanning = 0; + /* Request for a full reset due to rx packet filter changes */ + sc->sc_full_reset = 1; + rfilt = ath_calcrxfilter(sc); + ath9k_hw_setrxfilter(ah, rfilt); + ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid); + + DPRINTF(sc, ATH_DBG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0x%x\n", + now / 1000, now % 1000, __func__, rfilt, sc->sc_curaid); +} + +/* + * Set the current channel + * + * Set/change channels. If the channel is really being changed, it's done + * by reseting the chip. To accomplish this we must first cleanup any pending + * DMA, then restart stuff after a la ath_init. +*/ +int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan) +{ + struct ath_hal *ah = sc->sc_ah; + bool fastcc = true, stopped; + enum ath9k_ht_macmode ht_macmode; + + if (sc->sc_invalid) /* if the device is invalid or removed */ + return -EIO; + + DPRINTF(sc, ATH_DBG_CONFIG, + "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n", + __func__, + ath9k_hw_mhz2ieee(ah, sc->sc_curchan.channel, + sc->sc_curchan.channelFlags), + sc->sc_curchan.channel, + ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags), + hchan->channel, hchan->channelFlags); + + ht_macmode = ath_cwm_macmode(sc); + + if (hchan->channel != sc->sc_curchan.channel || + hchan->channelFlags != sc->sc_curchan.channelFlags || + sc->sc_update_chainmask || sc->sc_full_reset) { + int status; + /* + * This is only performed if the channel settings have + * actually changed. + * + * To switch channels clear any pending DMA operations; + * wait long enough for the RX fifo to drain, reset the + * hardware at the new frequency, and then re-enable + * the relevant bits of the h/w. + */ + ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */ + ath_draintxq(sc, false); /* clear pending tx frames */ + stopped = ath_stoprecv(sc); /* turn off frame recv */ + + /* XXX: do not flush receive queue here. We don't want + * to flush data frames already in queue because of + * changing channel. */ + + if (!stopped || sc->sc_full_reset) + fastcc = false; + + spin_lock_bh(&sc->sc_resetlock); + if (!ath9k_hw_reset(ah, sc->sc_opmode, hchan, + ht_macmode, sc->sc_tx_chainmask, + sc->sc_rx_chainmask, + sc->sc_ht_extprotspacing, + fastcc, &status)) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: unable to reset channel %u (%uMhz) " + "flags 0x%x hal status %u\n", __func__, + ath9k_hw_mhz2ieee(ah, hchan->channel, + hchan->channelFlags), + hchan->channel, hchan->channelFlags, status); + spin_unlock_bh(&sc->sc_resetlock); + return -EIO; + } + spin_unlock_bh(&sc->sc_resetlock); + + sc->sc_curchan = *hchan; + sc->sc_update_chainmask = 0; + sc->sc_full_reset = 0; + + /* Re-enable rx framework */ + if (ath_startrecv(sc) != 0) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: unable to restart recv logic\n", __func__); + return -EIO; + } + /* + * Change channels and update the h/w rate map + * if we're switching; e.g. 11a to 11b/g. + */ + ath_setcurmode(sc, ath_chan2mode(hchan)); + + ath_update_txpow(sc); /* update tx power state */ + /* + * Re-enable interrupts. + */ + ath9k_hw_set_interrupts(ah, sc->sc_imask); + } + return 0; +} + +/**********************/ +/* Chainmask Handling */ +/**********************/ + +static void ath_chainmask_sel_timertimeout(unsigned long data) +{ + struct ath_chainmask_sel *cm = (struct ath_chainmask_sel *)data; + cm->switch_allowed = 1; +} + +/* Start chainmask select timer */ +static void ath_chainmask_sel_timerstart(struct ath_chainmask_sel *cm) +{ + cm->switch_allowed = 0; + mod_timer(&cm->timer, ath_chainmask_sel_period); +} + +/* Stop chainmask select timer */ +static void ath_chainmask_sel_timerstop(struct ath_chainmask_sel *cm) +{ + cm->switch_allowed = 0; + del_timer_sync(&cm->timer); +} + +static void ath_chainmask_sel_init(struct ath_softc *sc, struct ath_node *an) +{ + struct ath_chainmask_sel *cm = &an->an_chainmask_sel; + + memzero(cm, sizeof(struct ath_chainmask_sel)); + + cm->cur_tx_mask = sc->sc_tx_chainmask; + cm->cur_rx_mask = sc->sc_rx_chainmask; + cm->tx_avgrssi = ATH_RSSI_DUMMY_MARKER; + setup_timer(&cm->timer, + ath_chainmask_sel_timertimeout, (unsigned long) cm); +} + +int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an) +{ + struct ath_chainmask_sel *cm = &an->an_chainmask_sel; + + /* + * Disable auto-swtiching in one of the following if conditions. + * sc_chainmask_auto_sel is used for internal global auto-switching + * enabled/disabled setting + */ + if (sc->sc_ah->ah_caps.tx_chainmask != ATH_CHAINMASK_SEL_3X3) { + cm->cur_tx_mask = sc->sc_tx_chainmask; + return cm->cur_tx_mask; + } + + if (cm->tx_avgrssi == ATH_RSSI_DUMMY_MARKER) + return cm->cur_tx_mask; + + if (cm->switch_allowed) { + /* Switch down from tx 3 to tx 2. */ + if (cm->cur_tx_mask == ATH_CHAINMASK_SEL_3X3 && + ATH_RSSI_OUT(cm->tx_avgrssi) >= + ath_chainmask_sel_down_rssi_thres) { + cm->cur_tx_mask = sc->sc_tx_chainmask; + + /* Don't let another switch happen until + * this timer expires */ + ath_chainmask_sel_timerstart(cm); + } + /* Switch up from tx 2 to 3. */ + else if (cm->cur_tx_mask == sc->sc_tx_chainmask && + ATH_RSSI_OUT(cm->tx_avgrssi) <= + ath_chainmask_sel_up_rssi_thres) { + cm->cur_tx_mask = ATH_CHAINMASK_SEL_3X3; + + /* Don't let another switch happen + * until this timer expires */ + ath_chainmask_sel_timerstart(cm); + } + } + + return cm->cur_tx_mask; +} + +/* + * Update tx/rx chainmask. For legacy association, + * hard code chainmask to 1x1, for 11n association, use + * the chainmask configuration. + */ + +void ath_update_chainmask(struct ath_softc *sc, int is_ht) +{ + sc->sc_update_chainmask = 1; + if (is_ht) { + sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask; + sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask; + } else { + sc->sc_tx_chainmask = 1; + sc->sc_rx_chainmask = 1; + } + + DPRINTF(sc, ATH_DBG_CONFIG, "%s: tx chmask: %d, rx chmask: %d\n", + __func__, sc->sc_tx_chainmask, sc->sc_rx_chainmask); +} + +/******************/ +/* VAP management */ +/******************/ + +/* + * VAP in Listen mode + * + * This routine brings the VAP out of the down state into a "listen" state + * where it waits for association requests. This is used in AP and AdHoc + * modes. +*/ + +int ath_vap_listen(struct ath_softc *sc, int if_id) +{ + struct ath_hal *ah = sc->sc_ah; + struct ath_vap *avp; + u32 rfilt = 0; + DECLARE_MAC_BUF(mac); + + avp = sc->sc_vaps[if_id]; + if (avp == NULL) { + DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n", + __func__, if_id); + return -EINVAL; + } + +#ifdef CONFIG_SLOW_ANT_DIV + ath_slow_ant_div_stop(&sc->sc_antdiv); +#endif + + /* update ratectrl about the new state */ + ath_rate_newstate(sc, avp); + + rfilt = ath_calcrxfilter(sc); + ath9k_hw_setrxfilter(ah, rfilt); + + if (sc->sc_opmode == ATH9K_M_STA || sc->sc_opmode == ATH9K_M_IBSS) { + memcpy(sc->sc_curbssid, ath_bcast_mac, ETH_ALEN); + ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid); + } else + sc->sc_curaid = 0; + + DPRINTF(sc, ATH_DBG_CONFIG, + "%s: RX filter 0x%x bssid %s aid 0x%x\n", + __func__, rfilt, print_mac(mac, + sc->sc_curbssid), sc->sc_curaid); + + /* + * XXXX + * Disable BMISS interrupt when we're not associated + */ + ath9k_hw_set_interrupts(ah, + sc->sc_imask & ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS)); + sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS); + /* need to reconfigure the beacons when it moves to RUN */ + sc->sc_beacons = 0; + + return 0; +} + +int ath_vap_attach(struct ath_softc *sc, + int if_id, + struct ieee80211_vif *if_data, + enum ath9k_opmode opmode) +{ + struct ath_vap *avp; + + if (if_id >= ATH_BCBUF || sc->sc_vaps[if_id] != NULL) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: Invalid interface id = %u\n", __func__, if_id); + return -EINVAL; + } + + switch (opmode) { + case ATH9K_M_STA: + case ATH9K_M_IBSS: + case ATH9K_M_MONITOR: + break; + case ATH9K_M_HOSTAP: + /* XXX not right, beacon buffer is allocated on RUN trans */ + if (list_empty(&sc->sc_bbuf)) + return -ENOMEM; + break; + default: + return -EINVAL; + } + + /* create ath_vap */ + avp = kmalloc(sizeof(struct ath_vap), GFP_KERNEL); + if (avp == NULL) + return -ENOMEM; + + memzero(avp, sizeof(struct ath_vap)); + avp->av_if_data = if_data; + /* Set the VAP opmode */ + avp->av_opmode = opmode; + avp->av_bslot = -1; + INIT_LIST_HEAD(&avp->av_mcastq.axq_q); + INIT_LIST_HEAD(&avp->av_mcastq.axq_acq); + spin_lock_init(&avp->av_mcastq.axq_lock); + + ath9k_hw_set_tsfadjust(sc->sc_ah, 1); + + sc->sc_vaps[if_id] = avp; + sc->sc_nvaps++; + /* Set the device opmode */ + sc->sc_opmode = opmode; + + /* default VAP configuration */ + avp->av_config.av_fixed_rateset = IEEE80211_FIXED_RATE_NONE; + avp->av_config.av_fixed_retryset = 0x03030303; + + return 0; +} + +int ath_vap_detach(struct ath_softc *sc, int if_id) +{ + struct ath_hal *ah = sc->sc_ah; + struct ath_vap *avp; + + avp = sc->sc_vaps[if_id]; + if (avp == NULL) { + DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n", + __func__, if_id); + return -EINVAL; + } + + /* + * Quiesce the hardware while we remove the vap. In + * particular we need to reclaim all references to the + * vap state by any frames pending on the tx queues. + * + * XXX can we do this w/o affecting other vap's? + */ + ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */ + ath_draintxq(sc, false); /* stop xmit side */ + ath_stoprecv(sc); /* stop recv side */ + ath_flushrecv(sc); /* flush recv queue */ + + /* Reclaim any pending mcast bufs on the vap. */ + ath_tx_draintxq(sc, &avp->av_mcastq, false); + + kfree(avp); + sc->sc_vaps[if_id] = NULL; + sc->sc_nvaps--; + + return 0; +} + +int ath_vap_config(struct ath_softc *sc, + int if_id, struct ath_vap_config *if_config) +{ + struct ath_vap *avp; + + if (if_id >= ATH_BCBUF) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: Invalid interface id = %u\n", __func__, if_id); + return -EINVAL; + } + + avp = sc->sc_vaps[if_id]; + ASSERT(avp != NULL); + + if (avp) + memcpy(&avp->av_config, if_config, sizeof(avp->av_config)); + + return 0; +} + +/********/ +/* Core */ +/********/ + +int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan) +{ + struct ath_hal *ah = sc->sc_ah; + int status; + int error = 0; + enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc); + + DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n", __func__, sc->sc_opmode); + + /* + * Stop anything previously setup. This is safe + * whether this is the first time through or not. + */ + ath_stop(sc); + + /* Initialize chanmask selection */ + sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask; + sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask; + + /* Reset SERDES registers */ + ath9k_hw_configpcipowersave(ah, 0); + + /* + * The basic interface to setting the hardware in a good + * state is ``reset''. On return the hardware is known to + * be powered up and with interrupts disabled. This must + * be followed by initialization of the appropriate bits + * and then setup of the interrupt mask. + */ + sc->sc_curchan = *initial_chan; + + spin_lock_bh(&sc->sc_resetlock); + if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan, ht_macmode, + sc->sc_tx_chainmask, sc->sc_rx_chainmask, + sc->sc_ht_extprotspacing, false, &status)) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: unable to reset hardware; hal status %u " + "(freq %u flags 0x%x)\n", __func__, status, + sc->sc_curchan.channel, sc->sc_curchan.channelFlags); + error = -EIO; + spin_unlock_bh(&sc->sc_resetlock); + goto done; + } + spin_unlock_bh(&sc->sc_resetlock); + /* + * This is needed only to setup initial state + * but it's best done after a reset. + */ + ath_update_txpow(sc); + + /* + * Setup the hardware after reset: + * The receive engine is set going. + * Frame transmit is handled entirely + * in the frame output path; there's nothing to do + * here except setup the interrupt mask. + */ + if (ath_startrecv(sc) != 0) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: unable to start recv logic\n", __func__); + error = -EIO; + goto done; + } + /* Setup our intr mask. */ + sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX + | ATH9K_INT_RXEOL | ATH9K_INT_RXORN + | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL; + + if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT) + sc->sc_imask |= ATH9K_INT_GTT; + + if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) + sc->sc_imask |= ATH9K_INT_CST; + + /* + * Enable MIB interrupts when there are hardware phy counters. + * Note we only do this (at the moment) for station mode. + */ + if (ath9k_hw_phycounters(ah) && + ((sc->sc_opmode == ATH9K_M_STA) || (sc->sc_opmode == ATH9K_M_IBSS))) + sc->sc_imask |= ATH9K_INT_MIB; + /* + * Some hardware processes the TIM IE and fires an + * interrupt when the TIM bit is set. For hardware + * that does, if not overridden by configuration, + * enable the TIM interrupt when operating as station. + */ + if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) && + (sc->sc_opmode == ATH9K_M_STA) && + !sc->sc_config.swBeaconProcess) + sc->sc_imask |= ATH9K_INT_TIM; + /* + * Don't enable interrupts here as we've not yet built our + * vap and node data structures, which will be needed as soon + * as we start receiving. + */ + ath_setcurmode(sc, ath_chan2mode(initial_chan)); + + /* XXX: we must make sure h/w is ready and clear invalid flag + * before turning on interrupt. */ + sc->sc_invalid = 0; +done: + return error; +} + +/* + * Reset the hardware w/o losing operational state. This is + * basically a more efficient way of doing ath_stop, ath_init, + * followed by state transitions to the current 802.11 + * operational state. Used to recover from errors rx overrun + * and to reset the hardware when rf gain settings must be reset. + */ + +static int ath_reset_start(struct ath_softc *sc, u32 flag) +{ + struct ath_hal *ah = sc->sc_ah; + + ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */ + ath_draintxq(sc, flag & RESET_RETRY_TXQ); /* stop xmit side */ + ath_stoprecv(sc); /* stop recv side */ + ath_flushrecv(sc); /* flush recv queue */ + + return 0; +} + +static int ath_reset_end(struct ath_softc *sc, u32 flag) +{ + struct ath_hal *ah = sc->sc_ah; + + if (ath_startrecv(sc) != 0) /* restart recv */ + DPRINTF(sc, ATH_DBG_FATAL, + "%s: unable to start recv logic\n", __func__); + + /* + * We may be doing a reset in response to a request + * that changes the channel so update any state that + * might change as a result. + */ + ath_setcurmode(sc, ath_chan2mode(&sc->sc_curchan)); + + ath_update_txpow(sc); /* update tx power state */ + + if (sc->sc_beacons) + ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */ + ath9k_hw_set_interrupts(ah, sc->sc_imask); + + /* Restart the txq */ + if (flag & RESET_RETRY_TXQ) { + int i; + for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { + if (ATH_TXQ_SETUP(sc, i)) { + spin_lock_bh(&sc->sc_txq[i].axq_lock); + ath_txq_schedule(sc, &sc->sc_txq[i]); + spin_unlock_bh(&sc->sc_txq[i].axq_lock); + } + } + } + return 0; +} + +int ath_reset(struct ath_softc *sc) +{ + struct ath_hal *ah = sc->sc_ah; + int status; + int error = 0; + enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc); + + /* NB: indicate channel change so we do a full reset */ + spin_lock_bh(&sc->sc_resetlock); + if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan, + ht_macmode, + sc->sc_tx_chainmask, sc->sc_rx_chainmask, + sc->sc_ht_extprotspacing, false, &status)) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: unable to reset hardware; hal status %u\n", + __func__, status); + error = -EIO; + } + spin_unlock_bh(&sc->sc_resetlock); + + return error; +} + +int ath_suspend(struct ath_softc *sc) +{ + struct ath_hal *ah = sc->sc_ah; + + /* No I/O if device has been surprise removed */ + if (sc->sc_invalid) + return -EIO; + + /* Shut off the interrupt before setting sc->sc_invalid to '1' */ + ath9k_hw_set_interrupts(ah, 0); + + /* XXX: we must make sure h/w will not generate any interrupt + * before setting the invalid flag. */ + sc->sc_invalid = 1; + + /* disable HAL and put h/w to sleep */ + ath9k_hw_disable(sc->sc_ah); + + ath9k_hw_configpcipowersave(sc->sc_ah, 1); + + return 0; +} + +/* Interrupt handler. Most of the actual processing is deferred. + * It's the caller's responsibility to ensure the chip is awake. */ + +irqreturn_t ath_isr(int irq, void *dev) +{ + struct ath_softc *sc = dev; + struct ath_hal *ah = sc->sc_ah; + enum ath9k_int status; + bool sched = false; + + do { + if (sc->sc_invalid) { + /* + * The hardware is not ready/present, don't + * touch anything. Note this can happen early + * on if the IRQ is shared. + */ + return IRQ_NONE; + } + if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */ + return IRQ_NONE; + } + + /* + * Figure out the reason(s) for the interrupt. Note + * that the hal returns a pseudo-ISR that may include + * bits we haven't explicitly enabled so we mask the + * value to insure we only process bits we requested. + */ + ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */ + + status &= sc->sc_imask; /* discard unasked-for bits */ + + /* + * If there are no status bits set, then this interrupt was not + * for me (should have been caught above). + */ + + if (!status) + return IRQ_NONE; + + sc->sc_intrstatus = status; + + if (status & ATH9K_INT_FATAL) { + /* need a chip reset */ + sched = true; + } else if (status & ATH9K_INT_RXORN) { + /* need a chip reset */ + sched = true; + } else { + if (status & ATH9K_INT_SWBA) { + /* schedule a tasklet for beacon handling */ + tasklet_schedule(&sc->bcon_tasklet); + } + if (status & ATH9K_INT_RXEOL) { + /* + * NB: the hardware should re-read the link when + * RXE bit is written, but it doesn't work + * at least on older hardware revs. + */ + sched = true; + } + + if (status & ATH9K_INT_TXURN) + /* bump tx trigger level */ + ath9k_hw_updatetxtriglevel(ah, true); + /* XXX: optimize this */ + if (status & ATH9K_INT_RX) + sched = true; + if (status & ATH9K_INT_TX) + sched = true; + if (status & ATH9K_INT_BMISS) + sched = true; + /* carrier sense timeout */ + if (status & ATH9K_INT_CST) + sched = true; + if (status & ATH9K_INT_MIB) { + /* + * Disable interrupts until we service the MIB + * interrupt; otherwise it will continue to + * fire. + */ + ath9k_hw_set_interrupts(ah, 0); + /* + * Let the hal handle the event. We assume + * it will clear whatever condition caused + * the interrupt. + */ + ath9k_hw_procmibevent(ah, &sc->sc_halstats); + ath9k_hw_set_interrupts(ah, sc->sc_imask); + } + if (status & ATH9K_INT_TIM_TIMER) { + if (!(ah->ah_caps.hw_caps & + ATH9K_HW_CAP_AUTOSLEEP)) { + /* Clear RxAbort bit so that we can + * receive frames */ + ath9k_hw_setrxabort(ah, 0); + sched = true; + } + } + } + } while (0); + + if (sched) { + /* turn off every interrupt except SWBA */ + ath9k_hw_set_interrupts(ah, (sc->sc_imask & ATH9K_INT_SWBA)); + tasklet_schedule(&sc->intr_tq); + } + + return IRQ_HANDLED; +} + +/* Deferred interrupt processing */ + +static void ath9k_tasklet(unsigned long data) +{ + struct ath_softc *sc = (struct ath_softc *)data; + u32 status = sc->sc_intrstatus; + + if (status & ATH9K_INT_FATAL) { + /* need a chip reset */ + ath_internal_reset(sc); + return; + } else { + + if (status & + (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) { + /* XXX: fill me in */ + /* + if (status & ATH9K_INT_RXORN) { + } + if (status & ATH9K_INT_RXEOL) { + } + */ + spin_lock_bh(&sc->sc_rxflushlock); + ath_rx_tasklet(sc, 0); + spin_unlock_bh(&sc->sc_rxflushlock); + } + /* XXX: optimize this */ + if (status & ATH9K_INT_TX) + ath_tx_tasklet(sc); + /* XXX: fill me in */ + /* + if (status & ATH9K_INT_BMISS) { + } + if (status & (ATH9K_INT_TIM | ATH9K_INT_DTIMSYNC)) { + if (status & ATH9K_INT_TIM) { + } + if (status & ATH9K_INT_DTIMSYNC) { + } + } + */ + } + + /* re-enable hardware interrupt */ + ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask); +} + +int ath_init(u16 devid, struct ath_softc *sc) +{ + struct ath_hal *ah = NULL; + int status; + int error = 0, i; + int csz = 0; + u32 rd; + + /* XXX: hardware will not be ready until ath_open() being called */ + sc->sc_invalid = 1; + + sc->sc_debug = DBG_DEFAULT; + DPRINTF(sc, ATH_DBG_CONFIG, "%s: devid 0x%x\n", __func__, devid); + + /* Initialize tasklet */ + tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc); + tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet, + (unsigned long)sc); + + /* + * Cache line size is used to size and align various + * structures used to communicate with the hardware. + */ + bus_read_cachesize(sc, &csz); + /* XXX assert csz is non-zero */ + sc->sc_cachelsz = csz << 2; /* convert to bytes */ + + spin_lock_init(&sc->sc_resetlock); + + ah = ath9k_hw_attach(devid, sc, sc->mem, &status); + if (ah == NULL) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: unable to attach hardware; HAL status %u\n", + __func__, status); + error = -ENXIO; + goto bad; + } + sc->sc_ah = ah; + + /* Get the chipset-specific aggr limit. */ + sc->sc_rtsaggrlimit = ah->ah_caps.rts_aggr_limit; + + /* Get the hardware key cache size. */ + sc->sc_keymax = ah->ah_caps.keycache_size; + if (sc->sc_keymax > ATH_KEYMAX) { + DPRINTF(sc, ATH_DBG_KEYCACHE, + "%s: Warning, using only %u entries in %u key cache\n", + __func__, ATH_KEYMAX, sc->sc_keymax); + sc->sc_keymax = ATH_KEYMAX; + } + + /* + * Reset the key cache since some parts do not + * reset the contents on initial power up. + */ + for (i = 0; i < sc->sc_keymax; i++) + ath9k_hw_keyreset(ah, (u16) i); + /* + * Mark key cache slots associated with global keys + * as in use. If we knew TKIP was not to be used we + * could leave the +32, +64, and +32+64 slots free. + * XXX only for splitmic. + */ + for (i = 0; i < IEEE80211_WEP_NKID; i++) { + set_bit(i, sc->sc_keymap); + set_bit(i + 32, sc->sc_keymap); + set_bit(i + 64, sc->sc_keymap); + set_bit(i + 32 + 64, sc->sc_keymap); + } + /* + * Collect the channel list using the default country + * code and including outdoor channels. The 802.11 layer + * is resposible for filtering this list based on settings + * like the phy mode. + */ + rd = ah->ah_currentRD; + + error = ath_setup_channels(sc); + if (error) + goto bad; + + /* default to STA mode */ + sc->sc_opmode = ATH9K_M_MONITOR; + + /* Setup rate tables */ + + ath_setup_rates(sc, IEEE80211_BAND_2GHZ); + ath_setup_rates(sc, IEEE80211_BAND_5GHZ); + + /* NB: setup here so ath_rate_update is happy */ + ath_setcurmode(sc, ATH9K_MODE_11A); + + /* + * Allocate hardware transmit queues: one queue for + * beacon frames and one data queue for each QoS + * priority. Note that the hal handles reseting + * these queues at the needed time. + */ + sc->sc_bhalq = ath_beaconq_setup(ah); + if (sc->sc_bhalq == -1) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: unable to setup a beacon xmit queue\n", __func__); + error = -EIO; + goto bad2; + } + sc->sc_cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0); + if (sc->sc_cabq == NULL) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: unable to setup CAB xmit queue\n", __func__); + error = -EIO; + goto bad2; + } + + sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME; + ath_cabq_update(sc); + + for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++) + sc->sc_haltype2q[i] = -1; + + /* Setup data queues */ + /* NB: ensure BK queue is the lowest priority h/w queue */ + if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: unable to setup xmit queue for BK traffic\n", + __func__); + error = -EIO; + goto bad2; + } + + if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: unable to setup xmit queue for BE traffic\n", + __func__); + error = -EIO; + goto bad2; + } + if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: unable to setup xmit queue for VI traffic\n", + __func__); + error = -EIO; + goto bad2; + } + if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: unable to setup xmit queue for VO traffic\n", + __func__); + error = -EIO; + goto bad2; + } + + sc->sc_rc = ath_rate_attach(ah); + if (sc->sc_rc == NULL) { + error = EIO; + goto bad2; + } + + if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER, + ATH9K_CIPHER_TKIP, NULL)) { + /* + * Whether we should enable h/w TKIP MIC. + * XXX: if we don't support WME TKIP MIC, then we wouldn't + * report WMM capable, so it's always safe to turn on + * TKIP MIC in this case. + */ + ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC, + 0, 1, NULL); + } + + /* + * Check whether the separate key cache entries + * are required to handle both tx+rx MIC keys. + * With split mic keys the number of stations is limited + * to 27 otherwise 59. + */ + if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER, + ATH9K_CIPHER_TKIP, NULL) + && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER, + ATH9K_CIPHER_MIC, NULL) + && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT, + 0, NULL)) + sc->sc_splitmic = 1; + + /* turn on mcast key search if possible */ + if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL)) + (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1, + 1, NULL); + + sc->sc_config.txpowlimit = ATH_TXPOWER_MAX; + sc->sc_config.txpowlimit_override = 0; + + /* 11n Capabilities */ + if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) { + sc->sc_txaggr = 1; + sc->sc_rxaggr = 1; + } + + sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask; + sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask; + + /* Configuration for rx chain detection */ + sc->sc_rxchaindetect_ref = 0; + sc->sc_rxchaindetect_thresh5GHz = 35; + sc->sc_rxchaindetect_thresh2GHz = 35; + sc->sc_rxchaindetect_delta5GHz = 30; + sc->sc_rxchaindetect_delta2GHz = 30; + + ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL); + sc->sc_defant = ath9k_hw_getdefantenna(ah); + + ath9k_hw_getmac(ah, sc->sc_myaddr); + if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) { + ath9k_hw_getbssidmask(ah, sc->sc_bssidmask); + ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask); + ath9k_hw_setbssidmask(ah, sc->sc_bssidmask); + } + sc->sc_slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */ + + /* initialize beacon slots */ + for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++) + sc->sc_bslot[i] = ATH_IF_ID_ANY; + + /* save MISC configurations */ + sc->sc_config.swBeaconProcess = 1; + +#ifdef CONFIG_SLOW_ANT_DIV + /* range is 40 - 255, we use something in the middle */ + ath_slow_ant_div_init(&sc->sc_antdiv, sc, 0x127); +#endif + + return 0; +bad2: + /* cleanup tx queues */ + for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) + if (ATH_TXQ_SETUP(sc, i)) + ath_tx_cleanupq(sc, &sc->sc_txq[i]); +bad: + if (ah) + ath9k_hw_detach(ah); + return error; +} + +void ath_deinit(struct ath_softc *sc) +{ + struct ath_hal *ah = sc->sc_ah; + int i; + + DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__); + + ath_stop(sc); + if (!sc->sc_invalid) + ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); + ath_rate_detach(sc->sc_rc); + /* cleanup tx queues */ + for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) + if (ATH_TXQ_SETUP(sc, i)) + ath_tx_cleanupq(sc, &sc->sc_txq[i]); + ath9k_hw_detach(ah); +} + +/*******************/ +/* Node Management */ +/*******************/ + +struct ath_node *ath_node_attach(struct ath_softc *sc, u8 *addr, int if_id) +{ + struct ath_vap *avp; + struct ath_node *an; + DECLARE_MAC_BUF(mac); + + avp = sc->sc_vaps[if_id]; + ASSERT(avp != NULL); + + /* mac80211 sta_notify callback is from an IRQ context, so no sleep */ + an = kmalloc(sizeof(struct ath_node), GFP_ATOMIC); + if (an == NULL) + return NULL; + memzero(an, sizeof(*an)); + + an->an_sc = sc; + memcpy(an->an_addr, addr, ETH_ALEN); + atomic_set(&an->an_refcnt, 1); + + /* set up per-node tx/rx state */ + ath_tx_node_init(sc, an); + ath_rx_node_init(sc, an); + + ath_chainmask_sel_init(sc, an); + ath_chainmask_sel_timerstart(&an->an_chainmask_sel); + list_add(&an->list, &sc->node_list); + + return an; +} + +void ath_node_detach(struct ath_softc *sc, struct ath_node *an, bool bh_flag) +{ + unsigned long flags; + + DECLARE_MAC_BUF(mac); + + ath_chainmask_sel_timerstop(&an->an_chainmask_sel); + an->an_flags |= ATH_NODE_CLEAN; + ath_tx_node_cleanup(sc, an, bh_flag); + ath_rx_node_cleanup(sc, an); + + ath_tx_node_free(sc, an); + ath_rx_node_free(sc, an); + + spin_lock_irqsave(&sc->node_lock, flags); + + list_del(&an->list); + + spin_unlock_irqrestore(&sc->node_lock, flags); + + kfree(an); +} + +/* Finds a node and increases the refcnt if found */ + +struct ath_node *ath_node_get(struct ath_softc *sc, u8 *addr) +{ + struct ath_node *an = NULL, *an_found = NULL; + + if (list_empty(&sc->node_list)) /* FIXME */ + goto out; + list_for_each_entry(an, &sc->node_list, list) { + if (!compare_ether_addr(an->an_addr, addr)) { + atomic_inc(&an->an_refcnt); + an_found = an; + break; + } + } +out: + return an_found; +} + +/* Decrements the refcnt and if it drops to zero, detach the node */ + +void ath_node_put(struct ath_softc *sc, struct ath_node *an, bool bh_flag) +{ + if (atomic_dec_and_test(&an->an_refcnt)) + ath_node_detach(sc, an, bh_flag); +} + +/* Finds a node, doesn't increment refcnt. Caller must hold sc->node_lock */ +struct ath_node *ath_node_find(struct ath_softc *sc, u8 *addr) +{ + struct ath_node *an = NULL, *an_found = NULL; + + if (list_empty(&sc->node_list)) + return NULL; + + list_for_each_entry(an, &sc->node_list, list) + if (!compare_ether_addr(an->an_addr, addr)) { + an_found = an; + break; + } + + return an_found; +} + +/* + * Set up New Node + * + * Setup driver-specific state for a newly associated node. This routine + * really only applies if compression or XR are enabled, there is no code + * covering any other cases. +*/ + +void ath_newassoc(struct ath_softc *sc, + struct ath_node *an, int isnew, int isuapsd) +{ + int tidno; + + /* if station reassociates, tear down the aggregation state. */ + if (!isnew) { + for (tidno = 0; tidno < WME_NUM_TID; tidno++) { + if (sc->sc_txaggr) + ath_tx_aggr_teardown(sc, an, tidno); + if (sc->sc_rxaggr) + ath_rx_aggr_teardown(sc, an, tidno); + } + } + an->an_flags = 0; +} + +/**************/ +/* Encryption */ +/**************/ + +void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot) +{ + ath9k_hw_keyreset(sc->sc_ah, keyix); + if (freeslot) + clear_bit(keyix, sc->sc_keymap); +} + +int ath_keyset(struct ath_softc *sc, + u16 keyix, + struct ath9k_keyval *hk, + const u8 mac[ETH_ALEN]) +{ + bool status; + + status = ath9k_hw_set_keycache_entry(sc->sc_ah, + keyix, hk, mac, false); + + return status != false; +} + +/***********************/ +/* TX Power/Regulatory */ +/***********************/ + +/* + * Set Transmit power in HAL + * + * This routine makes the actual HAL calls to set the new transmit power + * limit. +*/ + +void ath_update_txpow(struct ath_softc *sc) +{ + struct ath_hal *ah = sc->sc_ah; + u32 txpow; + + if (sc->sc_curtxpow != sc->sc_config.txpowlimit) { + ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit); + /* read back in case value is clamped */ + ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow); + sc->sc_curtxpow = txpow; + } +} + +/* Return the current country and domain information */ +void ath_get_currentCountry(struct ath_softc *sc, + struct ath9k_country_entry *ctry) +{ + ath9k_regd_get_current_country(sc->sc_ah, ctry); + + /* If HAL not specific yet, since it is band dependent, + * use the one we passed in. */ + if (ctry->countryCode == CTRY_DEFAULT) { + ctry->iso[0] = 0; + ctry->iso[1] = 0; + } else if (ctry->iso[0] && ctry->iso[1]) { + if (!ctry->iso[2]) { + if (ath_outdoor) + ctry->iso[2] = 'O'; + else + ctry->iso[2] = 'I'; + } + } +} + +/**************************/ +/* Slow Antenna Diversity */ +/**************************/ + +void ath_slow_ant_div_init(struct ath_antdiv *antdiv, + struct ath_softc *sc, + int32_t rssitrig) +{ + int trig; + + /* antdivf_rssitrig can range from 40 - 0xff */ + trig = (rssitrig > 0xff) ? 0xff : rssitrig; + trig = (rssitrig < 40) ? 40 : rssitrig; + + antdiv->antdiv_sc = sc; + antdiv->antdivf_rssitrig = trig; +} + +void ath_slow_ant_div_start(struct ath_antdiv *antdiv, + u8 num_antcfg, + const u8 *bssid) +{ + antdiv->antdiv_num_antcfg = + num_antcfg < ATH_ANT_DIV_MAX_CFG ? + num_antcfg : ATH_ANT_DIV_MAX_CFG; + antdiv->antdiv_state = ATH_ANT_DIV_IDLE; + antdiv->antdiv_curcfg = 0; + antdiv->antdiv_bestcfg = 0; + antdiv->antdiv_laststatetsf = 0; + + memcpy(antdiv->antdiv_bssid, bssid, sizeof(antdiv->antdiv_bssid)); + + antdiv->antdiv_start = 1; +} + +void ath_slow_ant_div_stop(struct ath_antdiv *antdiv) +{ + antdiv->antdiv_start = 0; +} + +static int32_t ath_find_max_val(int32_t *val, + u8 num_val, u8 *max_index) +{ + u32 MaxVal = *val++; + u32 cur_index = 0; + + *max_index = 0; + while (++cur_index < num_val) { + if (*val > MaxVal) { + MaxVal = *val; + *max_index = cur_index; + } + + val++; + } + + return MaxVal; +} + +void ath_slow_ant_div(struct ath_antdiv *antdiv, + struct ieee80211_hdr *hdr, + struct ath_rx_status *rx_stats) +{ + struct ath_softc *sc = antdiv->antdiv_sc; + struct ath_hal *ah = sc->sc_ah; + u64 curtsf = 0; + u8 bestcfg, curcfg = antdiv->antdiv_curcfg; + __le16 fc = hdr->frame_control; + + if (antdiv->antdiv_start && ieee80211_is_beacon(fc) + && !compare_ether_addr(hdr->addr3, antdiv->antdiv_bssid)) { + antdiv->antdiv_lastbrssi[curcfg] = rx_stats->rs_rssi; + antdiv->antdiv_lastbtsf[curcfg] = ath9k_hw_gettsf64(sc->sc_ah); + curtsf = antdiv->antdiv_lastbtsf[curcfg]; + } else { + return; + } + + switch (antdiv->antdiv_state) { + case ATH_ANT_DIV_IDLE: + if ((antdiv->antdiv_lastbrssi[curcfg] < + antdiv->antdivf_rssitrig) + && ((curtsf - antdiv->antdiv_laststatetsf) > + ATH_ANT_DIV_MIN_IDLE_US)) { + + curcfg++; + if (curcfg == antdiv->antdiv_num_antcfg) + curcfg = 0; + + if (!ath9k_hw_select_antconfig(ah, curcfg)) { + antdiv->antdiv_bestcfg = antdiv->antdiv_curcfg; + antdiv->antdiv_curcfg = curcfg; + antdiv->antdiv_laststatetsf = curtsf; + antdiv->antdiv_state = ATH_ANT_DIV_SCAN; + } + } + break; + + case ATH_ANT_DIV_SCAN: + if ((curtsf - antdiv->antdiv_laststatetsf) < + ATH_ANT_DIV_MIN_SCAN_US) + break; + + curcfg++; + if (curcfg == antdiv->antdiv_num_antcfg) + curcfg = 0; + + if (curcfg == antdiv->antdiv_bestcfg) { + ath_find_max_val(antdiv->antdiv_lastbrssi, + antdiv->antdiv_num_antcfg, &bestcfg); + if (!ath9k_hw_select_antconfig(ah, bestcfg)) { + antdiv->antdiv_bestcfg = bestcfg; + antdiv->antdiv_curcfg = bestcfg; + antdiv->antdiv_laststatetsf = curtsf; + antdiv->antdiv_state = ATH_ANT_DIV_IDLE; + } + } else { + if (!ath9k_hw_select_antconfig(ah, curcfg)) { + antdiv->antdiv_curcfg = curcfg; + antdiv->antdiv_laststatetsf = curtsf; + antdiv->antdiv_state = ATH_ANT_DIV_SCAN; + } + } + + break; + } +} + +/***********************/ +/* Descriptor Handling */ +/***********************/ + +/* + * Set up DMA descriptors + * + * This function will allocate both the DMA descriptor structure, and the + * buffers it contains. These are used to contain the descriptors used + * by the system. +*/ + +int ath_descdma_setup(struct ath_softc *sc, + struct ath_descdma *dd, + struct list_head *head, + const char *name, + int nbuf, + int ndesc) +{ +#define DS2PHYS(_dd, _ds) \ + ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) +#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0) +#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096) + + struct ath_desc *ds; + struct ath_buf *bf; + int i, bsize, error; + + DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA: %u buffers %u desc/buf\n", + __func__, name, nbuf, ndesc); + + /* ath_desc must be a multiple of DWORDs */ + if ((sizeof(struct ath_desc) % 4) != 0) { + DPRINTF(sc, ATH_DBG_FATAL, "%s: ath_desc not DWORD aligned\n", + __func__); + ASSERT((sizeof(struct ath_desc) % 4) == 0); + error = -ENOMEM; + goto fail; + } + + dd->dd_name = name; + dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc; + + /* + * Need additional DMA memory because we can't use + * descriptors that cross the 4K page boundary. Assume + * one skipped descriptor per 4K page. + */ + if (!(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) { + u32 ndesc_skipped = + ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len); + u32 dma_len; + + while (ndesc_skipped) { + dma_len = ndesc_skipped * sizeof(struct ath_desc); + dd->dd_desc_len += dma_len; + + ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len); + }; + } + + /* allocate descriptors */ + dd->dd_desc = pci_alloc_consistent(sc->pdev, + dd->dd_desc_len, + &dd->dd_desc_paddr); + if (dd->dd_desc == NULL) { + error = -ENOMEM; + goto fail; + } + ds = dd->dd_desc; + DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA map: %p (%u) -> %llx (%u)\n", + __func__, dd->dd_name, ds, (u32) dd->dd_desc_len, + ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len); + + /* allocate buffers */ + bsize = sizeof(struct ath_buf) * nbuf; + bf = kmalloc(bsize, GFP_KERNEL); + if (bf == NULL) { + error = -ENOMEM; + goto fail2; + } + memzero(bf, bsize); + dd->dd_bufptr = bf; + + INIT_LIST_HEAD(head); + for (i = 0; i < nbuf; i++, bf++, ds += ndesc) { + bf->bf_desc = ds; + bf->bf_daddr = DS2PHYS(dd, ds); + + if (!(sc->sc_ah->ah_caps.hw_caps & + ATH9K_HW_CAP_4KB_SPLITTRANS)) { + /* + * Skip descriptor addresses which can cause 4KB + * boundary crossing (addr + length) with a 32 dword + * descriptor fetch. + */ + while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) { + ASSERT((caddr_t) bf->bf_desc < + ((caddr_t) dd->dd_desc + + dd->dd_desc_len)); + + ds += ndesc; + bf->bf_desc = ds; + bf->bf_daddr = DS2PHYS(dd, ds); + } + } + list_add_tail(&bf->list, head); + } + return 0; +fail2: + pci_free_consistent(sc->pdev, + dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr); +fail: + memzero(dd, sizeof(*dd)); + return error; +#undef ATH_DESC_4KB_BOUND_CHECK +#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED +#undef DS2PHYS +} + +/* + * Cleanup DMA descriptors + * + * This function will free the DMA block that was allocated for the descriptor + * pool. Since this was allocated as one "chunk", it is freed in the same + * manner. +*/ + +void ath_descdma_cleanup(struct ath_softc *sc, + struct ath_descdma *dd, + struct list_head *head) +{ + /* Free memory associated with descriptors */ + pci_free_consistent(sc->pdev, + dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr); + + INIT_LIST_HEAD(head); + kfree(dd->dd_bufptr); + memzero(dd, sizeof(*dd)); +} + +/*************/ +/* Utilities */ +/*************/ + +void ath_internal_reset(struct ath_softc *sc) +{ + ath_reset_start(sc, 0); + ath_reset(sc); + ath_reset_end(sc, 0); +} + +int ath_get_hal_qnum(u16 queue, struct ath_softc *sc) +{ + int qnum; + + switch (queue) { + case 0: + qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO]; + break; + case 1: + qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI]; + break; + case 2: + qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE]; + break; + case 3: + qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK]; + break; + default: + qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE]; + break; + } + + return qnum; +} + +int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc) +{ + int qnum; + + switch (queue) { + case ATH9K_WME_AC_VO: + qnum = 0; + break; + case ATH9K_WME_AC_VI: + qnum = 1; + break; + case ATH9K_WME_AC_BE: + qnum = 2; + break; + case ATH9K_WME_AC_BK: + qnum = 3; + break; + default: + qnum = -1; + break; + } + + return qnum; +} + + +/* + * Expand time stamp to TSF + * + * Extend 15-bit time stamp from rx descriptor to + * a full 64-bit TSF using the current h/w TSF. +*/ + +u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp) +{ + u64 tsf; + + tsf = ath9k_hw_gettsf64(sc->sc_ah); + if ((tsf & 0x7fff) < rstamp) + tsf -= 0x8000; + return (tsf & ~0x7fff) | rstamp; +} + +/* + * Set Default Antenna + * + * Call into the HAL to set the default antenna to use. Not really valid for + * MIMO technology. +*/ + +void ath_setdefantenna(void *context, u32 antenna) +{ + struct ath_softc *sc = (struct ath_softc *)context; + struct ath_hal *ah = sc->sc_ah; + + /* XXX block beacon interrupts */ + ath9k_hw_setantenna(ah, antenna); + sc->sc_defant = antenna; + sc->sc_rxotherant = 0; +} + +/* + * Set Slot Time + * + * This will wake up the chip if required, and set the slot time for the + * frame (maximum transmit time). Slot time is assumed to be already set + * in the ATH object member sc_slottime +*/ + +void ath_setslottime(struct ath_softc *sc) +{ + ath9k_hw_setslottime(sc->sc_ah, sc->sc_slottime); + sc->sc_updateslot = OK; +} diff --git a/drivers/net/wireless/ath9k/core.h b/drivers/net/wireless/ath9k/core.h new file mode 100644 index 0000000..673b3d8 --- /dev/null +++ b/drivers/net/wireless/ath9k/core.h @@ -0,0 +1,1072 @@ +/* + * Copyright (c) 2008 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef CORE_H +#define CORE_H + +#include <linux/version.h> +#include <linux/autoconf.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/errno.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/in.h> +#include <linux/delay.h> +#include <linux/wait.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/sched.h> +#include <linux/list.h> +#include <asm/byteorder.h> +#include <linux/scatterlist.h> +#include <asm/page.h> +#include <net/mac80211.h> + +#include "ath9k.h" +#include "rc.h" + +struct ath_node; + +/******************/ +/* Utility macros */ +/******************/ + +/* Macro to expand scalars to 64-bit objects */ + +#define ito64(x) (sizeof(x) == 8) ? \ + (((unsigned long long int)(x)) & (0xff)) : \ + (sizeof(x) == 16) ? \ + (((unsigned long long int)(x)) & 0xffff) : \ + ((sizeof(x) == 32) ? \ + (((unsigned long long int)(x)) & 0xffffffff) : \ + (unsigned long long int)(x)) + +/* increment with wrap-around */ +#define INCR(_l, _sz) do { \ + (_l)++; \ + (_l) &= ((_sz) - 1); \ + } while (0) + +/* decrement with wrap-around */ +#define DECR(_l, _sz) do { \ + (_l)--; \ + (_l) &= ((_sz) - 1); \ + } while (0) + +#define A_MAX(a, b) ((a) > (b) ? (a) : (b)) + +#define ASSERT(exp) do { \ + if (unlikely(!(exp))) { \ + BUG(); \ + } \ + } while (0) + +/* XXX: remove */ +#define memzero(_buf, _len) memset(_buf, 0, _len) + +#define get_dma_mem_context(var, field) (&((var)->field)) +#define copy_dma_mem_context(dst, src) (*dst = *src) + +#define ATH9K_BH_STATUS_INTACT 0 +#define ATH9K_BH_STATUS_CHANGE 1 + +#define ATH_TXQ_SETUP(sc, i) ((sc)->sc_txqsetup & (1<<i)) + +static inline unsigned long get_timestamp(void) +{ + return ((jiffies / HZ) * 1000) + (jiffies % HZ) * (1000 / HZ); +} + +/*************/ +/* Debugging */ +/*************/ + +enum ATH_DEBUG { + ATH_DBG_RESET = 0x00000001, + ATH_DBG_PHY_IO = 0x00000002, + ATH_DBG_REG_IO = 0x00000004, + ATH_DBG_QUEUE = 0x00000008, + ATH_DBG_EEPROM = 0x00000010, + ATH_DBG_NF_CAL = 0x00000020, + ATH_DBG_CALIBRATE = 0x00000040, + ATH_DBG_CHANNEL = 0x00000080, + ATH_DBG_INTERRUPT = 0x00000100, + ATH_DBG_REGULATORY = 0x00000200, + ATH_DBG_ANI = 0x00000400, + ATH_DBG_POWER_MGMT = 0x00000800, + ATH_DBG_XMIT = 0x00001000, + ATH_DBG_BEACON = 0x00002000, + ATH_DBG_RATE = 0x00004000, + ATH_DBG_CONFIG = 0x00008000, + ATH_DBG_KEYCACHE = 0x00010000, + ATH_DBG_AGGR = 0x00020000, + ATH_DBG_FATAL = 0x00040000, + ATH_DBG_ANY = 0xffffffff +}; + +#define DBG_DEFAULT (ATH_DBG_FATAL) + +#define DPRINTF(sc, _m, _fmt, ...) do { \ + if (sc->sc_debug & (_m)) \ + printk(_fmt , ##__VA_ARGS__); \ + } while (0) + +/***************************/ +/* Load-time Configuration */ +/***************************/ + +/* Per-instance load-time (note: NOT run-time) configurations + * for Atheros Device */ +struct ath_config { + u32 ath_aggr_prot; + u16 txpowlimit; + u16 txpowlimit_override; + u8 cabqReadytime; /* Cabq Readytime % */ + u8 swBeaconProcess; /* Process received beacons in SW (vs HW) */ +}; + +/***********************/ +/* Chainmask Selection */ +/***********************/ + +#define ATH_CHAINMASK_SEL_TIMEOUT 6000 +/* Default - Number of last RSSI values that is used for + * chainmask selection */ +#define ATH_CHAINMASK_SEL_RSSI_CNT 10 +/* Means use 3x3 chainmask instead of configured chainmask */ +#define ATH_CHAINMASK_SEL_3X3 7 +/* Default - Rssi threshold below which we have to switch to 3x3 */ +#define ATH_CHAINMASK_SEL_UP_RSSI_THRES 20 +/* Default - Rssi threshold above which we have to switch to + * user configured values */ +#define ATH_CHAINMASK_SEL_DOWN_RSSI_THRES 35 +/* Struct to store the chainmask select related info */ +struct ath_chainmask_sel { + struct timer_list timer; + int cur_tx_mask; /* user configured or 3x3 */ + int cur_rx_mask; /* user configured or 3x3 */ + int tx_avgrssi; + u8 switch_allowed:1, /* timer will set this */ + cm_sel_enabled : 1; +}; + +int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an); +void ath_update_chainmask(struct ath_softc *sc, int is_ht); + +/*************************/ +/* Descriptor Management */ +/*************************/ + +/* Number of descriptors per buffer. The only case where we see skbuff +chains is due to FF aggregation in the driver. */ +#define ATH_TXDESC 1 +/* if there's more fragment for this MSDU */ +#define ATH_BF_MORE_MPDU 1 +#define ATH_TXBUF_RESET(_bf) do { \ + (_bf)->bf_status = 0; \ + (_bf)->bf_lastbf = NULL; \ + (_bf)->bf_lastfrm = NULL; \ + (_bf)->bf_next = NULL; \ + memzero(&((_bf)->bf_state), \ + sizeof(struct ath_buf_state)); \ + } while (0) + +struct ath_buf_state { + int bfs_nframes; /* # frames in aggregate */ + u16 bfs_al; /* length of aggregate */ + u16 bfs_frmlen; /* length of frame */ + int bfs_seqno; /* sequence number */ + int bfs_tidno; /* tid of this frame */ + int bfs_retries; /* current retries */ + struct ath_rc_series bfs_rcs[4]; /* rate series */ + u8 bfs_isdata:1; /* is a data frame/aggregate */ + u8 bfs_isaggr:1; /* is an aggregate */ + u8 bfs_isampdu:1; /* is an a-mpdu, aggregate or not */ + u8 bfs_ht:1; /* is an HT frame */ + u8 bfs_isretried:1; /* is retried */ + u8 bfs_isxretried:1; /* is excessive retried */ + u8 bfs_shpreamble:1; /* is short preamble */ + u8 bfs_isbar:1; /* is a BAR */ + u8 bfs_ispspoll:1; /* is a PS-Poll */ + u8 bfs_aggrburst:1; /* is a aggr burst */ + u8 bfs_calcairtime:1; /* requests airtime be calculated + when set for tx frame */ + int bfs_rifsburst_elem; /* RIFS burst/bar */ + int bfs_nrifsubframes; /* # of elements in burst */ + /* key type use to encrypt this frame */ + enum ath9k_key_type bfs_keytype; +}; + +#define bf_nframes bf_state.bfs_nframes +#define bf_al bf_state.bfs_al +#define bf_frmlen bf_state.bfs_frmlen +#define bf_retries bf_state.bfs_retries +#define bf_seqno bf_state.bfs_seqno +#define bf_tidno bf_state.bfs_tidno +#define bf_rcs bf_state.bfs_rcs +#define bf_isdata bf_state.bfs_isdata +#define bf_isaggr bf_state.bfs_isaggr +#define bf_isampdu bf_state.bfs_isampdu +#define bf_ht bf_state.bfs_ht +#define bf_isretried bf_state.bfs_isretried +#define bf_isxretried bf_state.bfs_isxretried +#define bf_shpreamble bf_state.bfs_shpreamble +#define bf_rifsburst_elem bf_state.bfs_rifsburst_elem +#define bf_nrifsubframes bf_state.bfs_nrifsubframes +#define bf_keytype bf_state.bfs_keytype +#define bf_isbar bf_state.bfs_isbar +#define bf_ispspoll bf_state.bfs_ispspoll +#define bf_aggrburst bf_state.bfs_aggrburst +#define bf_calcairtime bf_state.bfs_calcairtime + +/* + * Abstraction of a contiguous buffer to transmit/receive. There is only + * a single hw descriptor encapsulated here. + */ + +struct ath_buf { + struct list_head list; + struct list_head *last; + struct ath_buf *bf_lastbf; /* last buf of this unit (a frame or + an aggregate) */ + struct ath_buf *bf_lastfrm; /* last buf of this frame */ + struct ath_buf *bf_next; /* next subframe in the aggregate */ + struct ath_buf *bf_rifslast; /* last buf for RIFS burst */ + void *bf_mpdu; /* enclosing frame structure */ + void *bf_node; /* pointer to the node */ + struct ath_desc *bf_desc; /* virtual addr of desc */ + dma_addr_t bf_daddr; /* physical addr of desc */ + dma_addr_t bf_buf_addr; /* physical addr of data buffer */ + u32 bf_status; + u16 bf_flags; /* tx descriptor flags */ + struct ath_buf_state bf_state; /* buffer state */ + dma_addr_t bf_dmacontext; +}; + +/* + * reset the rx buffer. + * any new fields added to the athbuf and require + * reset need to be added to this macro. + * currently bf_status is the only one requires that + * requires reset. + */ +#define ATH_RXBUF_RESET(_bf) ((_bf)->bf_status = 0) + +/* hw processing complete, desc processed by hal */ +#define ATH_BUFSTATUS_DONE 0x00000001 +/* hw processing complete, desc hold for hw */ +#define ATH_BUFSTATUS_STALE 0x00000002 +/* Rx-only: OS is done with this packet and it's ok to queued it to hw */ +#define ATH_BUFSTATUS_FREE 0x00000004 + +/* DMA state for tx/rx descriptors */ + +struct ath_descdma { + const char *dd_name; + struct ath_desc *dd_desc; /* descriptors */ + dma_addr_t dd_desc_paddr; /* physical addr of dd_desc */ + u32 dd_desc_len; /* size of dd_desc */ + struct ath_buf *dd_bufptr; /* associated buffers */ + dma_addr_t dd_dmacontext; +}; + +/* Abstraction of a received RX MPDU/MMPDU, or a RX fragment */ + +struct ath_rx_context { + struct ath_buf *ctx_rxbuf; /* associated ath_buf for rx */ +}; +#define ATH_RX_CONTEXT(skb) ((struct ath_rx_context *)skb->cb) + +int ath_descdma_setup(struct ath_softc *sc, + struct ath_descdma *dd, + struct list_head *head, + const char *name, + int nbuf, + int ndesc); +int ath_desc_alloc(struct ath_softc *sc); +void ath_desc_free(struct ath_softc *sc); +void ath_descdma_cleanup(struct ath_softc *sc, + struct ath_descdma *dd, + struct list_head *head); + +/******/ +/* RX */ +/******/ + +#define ATH_MAX_ANTENNA 3 +#define ATH_RXBUF 512 +#define ATH_RX_TIMEOUT 40 /* 40 milliseconds */ +#define WME_NUM_TID 16 +#define IEEE80211_BAR_CTL_TID_M 0xF000 /* tid mask */ +#define IEEE80211_BAR_CTL_TID_S 2 /* tid shift */ + +enum ATH_RX_TYPE { + ATH_RX_NON_CONSUMED = 0, + ATH_RX_CONSUMED +}; + +/* per frame rx status block */ +struct ath_recv_status { + u64 tsf; /* mac tsf */ + int8_t rssi; /* RSSI (noise floor ajusted) */ + int8_t rssictl[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */ + int8_t rssiextn[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */ + int8_t abs_rssi; /* absolute RSSI */ + u8 rateieee; /* data rate received (IEEE rate code) */ + u8 ratecode; /* phy rate code */ + int rateKbps; /* data rate received (Kbps) */ + int antenna; /* rx antenna */ + int flags; /* status of associated skb */ +#define ATH_RX_FCS_ERROR 0x01 +#define ATH_RX_MIC_ERROR 0x02 +#define ATH_RX_DECRYPT_ERROR 0x04 +#define ATH_RX_RSSI_VALID 0x08 +/* if any of ctl,extn chainrssis are valid */ +#define ATH_RX_CHAIN_RSSI_VALID 0x10 +/* if extn chain rssis are valid */ +#define ATH_RX_RSSI_EXTN_VALID 0x20 +/* set if 40Mhz, clear if 20Mhz */ +#define ATH_RX_40MHZ 0x40 +/* set if short GI, clear if full GI */ +#define ATH_RX_SHORT_GI 0x80 +}; + +struct ath_rxbuf { + struct sk_buff *rx_wbuf; + unsigned long rx_time; /* system time when received */ + struct ath_recv_status rx_status; /* cached rx status */ +}; + +/* Per-TID aggregate receiver state for a node */ +struct ath_arx_tid { + struct ath_node *an; + struct ath_rxbuf *rxbuf; /* re-ordering buffer */ + struct timer_list timer; + spinlock_t tidlock; + int baw_head; /* seq_next at head */ + int baw_tail; /* tail of block-ack window */ + int seq_reset; /* need to reset start sequence */ + int addba_exchangecomplete; + u16 seq_next; /* next expected sequence */ + u16 baw_size; /* block-ack window size */ +}; + +/* Per-node receiver aggregate state */ +struct ath_arx { + struct ath_arx_tid tid[WME_NUM_TID]; +}; + +int ath_startrecv(struct ath_softc *sc); +bool ath_stoprecv(struct ath_softc *sc); +void ath_flushrecv(struct ath_softc *sc); +u32 ath_calcrxfilter(struct ath_softc *sc); +void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an); +void ath_rx_node_free(struct ath_softc *sc, struct ath_node *an); +void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an); +void ath_handle_rx_intr(struct ath_softc *sc); +int ath_rx_init(struct ath_softc *sc, int nbufs); +void ath_rx_cleanup(struct ath_softc *sc); +int ath_rx_tasklet(struct ath_softc *sc, int flush); +int ath_rx_input(struct ath_softc *sc, + struct ath_node *node, + int is_ampdu, + struct sk_buff *skb, + struct ath_recv_status *rx_status, + enum ATH_RX_TYPE *status); +int ath__rx_indicate(struct ath_softc *sc, + struct sk_buff *skb, + struct ath_recv_status *status, + u16 keyix); +int ath_rx_subframe(struct ath_node *an, struct sk_buff *skb, + struct ath_recv_status *status); + +/******/ +/* TX */ +/******/ + +#define ATH_FRAG_PER_MSDU 1 +#define ATH_TXBUF (512/ATH_FRAG_PER_MSDU) +/* max number of transmit attempts (tries) */ +#define ATH_TXMAXTRY 13 +/* max number of 11n transmit attempts (tries) */ +#define ATH_11N_TXMAXTRY 10 +/* max number of tries for management and control frames */ +#define ATH_MGT_TXMAXTRY 4 +#define WME_BA_BMP_SIZE 64 +#define WME_MAX_BA WME_BA_BMP_SIZE +#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA) +#define TID_TO_WME_AC(_tid) \ + ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \ + (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \ + (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \ + WME_AC_VO) + + +/* Wireless Multimedia Extension Defines */ +#define WME_AC_BE 0 /* best effort */ +#define WME_AC_BK 1 /* background */ +#define WME_AC_VI 2 /* video */ +#define WME_AC_VO 3 /* voice */ +#define WME_NUM_AC 4 + +enum ATH_SM_PWRSAV{ + ATH_SM_ENABLE, + ATH_SM_PWRSAV_STATIC, + ATH_SM_PWRSAV_DYNAMIC, +}; + +/* + * Data transmit queue state. One of these exists for each + * hardware transmit queue. Packets sent to us from above + * are assigned to queues based on their priority. Not all + * devices support a complete set of hardware transmit queues. + * For those devices the array sc_ac2q will map multiple + * priorities to fewer hardware queues (typically all to one + * hardware queue). + */ +struct ath_txq { + u32 axq_qnum; /* hardware q number */ + u32 *axq_link; /* link ptr in last TX desc */ + struct list_head axq_q; /* transmit queue */ + spinlock_t axq_lock; + unsigned long axq_lockflags; /* intr state when must cli */ + u32 axq_depth; /* queue depth */ + u8 axq_aggr_depth; /* aggregates queued */ + u32 axq_totalqueued; /* total ever queued */ + + /* count to determine if descriptor should generate int on this txq. */ + u32 axq_intrcnt; + + bool stopped; /* Is mac80211 queue stopped ? */ + struct ath_buf *axq_linkbuf; /* virtual addr of last buffer*/ + + /* first desc of the last descriptor that contains CTS */ + struct ath_desc *axq_lastdsWithCTS; + + /* final desc of the gating desc that determines whether + lastdsWithCTS has been DMA'ed or not */ + struct ath_desc *axq_gatingds; + + struct list_head axq_acq; +}; + +/* per TID aggregate tx state for a destination */ +struct ath_atx_tid { + struct list_head list; /* round-robin tid entry */ + struct list_head buf_q; /* pending buffers */ + struct ath_node *an; + struct ath_atx_ac *ac; + struct ath_buf *tx_buf[ATH_TID_MAX_BUFS]; /* active tx frames */ + u16 seq_start; + u16 seq_next; + u16 baw_size; + int tidno; + int baw_head; /* first un-acked tx buffer */ + int baw_tail; /* next unused tx buffer slot */ + int sched; + int paused; + int cleanup_inprogress; + u32 addba_exchangecomplete:1; + int32_t addba_exchangeinprogress; + int addba_exchangeattempts; +}; + +/* per access-category aggregate tx state for a destination */ +struct ath_atx_ac { + int sched; /* dest-ac is scheduled */ + int qnum; /* H/W queue number associated + with this AC */ + struct list_head list; /* round-robin txq entry */ + struct list_head tid_q; /* queue of TIDs with buffers */ +}; + +/* per dest tx state */ +struct ath_atx { + struct ath_atx_tid tid[WME_NUM_TID]; + struct ath_atx_ac ac[WME_NUM_AC]; +}; + +/* per-frame tx control block */ +struct ath_tx_control { + struct ath_node *an; + int if_id; + int qnum; + u32 ht:1; + u32 ps:1; + u32 use_minrate:1; + enum ath9k_pkt_type atype; + enum ath9k_key_type keytype; + u32 flags; + u16 seqno; + u16 tidno; + u16 txpower; + u16 frmlen; + u32 keyix; + int min_rate; + int mcast_rate; + u16 nextfraglen; + struct ath_softc *dev; + dma_addr_t dmacontext; +}; + +/* per frame tx status block */ +struct ath_xmit_status { + int retries; /* number of retries to successufully + transmit this frame */ + int flags; /* status of transmit */ +#define ATH_TX_ERROR 0x01 +#define ATH_TX_XRETRY 0x02 +#define ATH_TX_BAR 0x04 +}; + +struct ath_tx_stat { + int rssi; /* RSSI (noise floor ajusted) */ + int rssictl[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */ + int rssiextn[ATH_MAX_ANTENNA]; /* RSSI (noise floor ajusted) */ + int rateieee; /* data rate xmitted (IEEE rate code) */ + int rateKbps; /* data rate xmitted (Kbps) */ + int ratecode; /* phy rate code */ + int flags; /* validity flags */ +/* if any of ctl,extn chain rssis are valid */ +#define ATH_TX_CHAIN_RSSI_VALID 0x01 +/* if extn chain rssis are valid */ +#define ATH_TX_RSSI_EXTN_VALID 0x02 + u32 airtime; /* time on air per final tx rate */ +}; + +struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype); +void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq); +int ath_tx_setup(struct ath_softc *sc, int haltype); +void ath_draintxq(struct ath_softc *sc, bool retry_tx); +void ath_tx_draintxq(struct ath_softc *sc, + struct ath_txq *txq, bool retry_tx); +void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an); +void ath_tx_node_cleanup(struct ath_softc *sc, + struct ath_node *an, bool bh_flag); +void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an); +void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq); +int ath_tx_init(struct ath_softc *sc, int nbufs); +int ath_tx_cleanup(struct ath_softc *sc); +int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype); +int ath_txq_update(struct ath_softc *sc, int qnum, + struct ath9k_tx_queue_info *q); +int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb); +void ath_tx_tasklet(struct ath_softc *sc); +u32 ath_txq_depth(struct ath_softc *sc, int qnum); +u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum); +void ath_notify_txq_status(struct ath_softc *sc, u16 queue_depth); +void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, + struct ath_xmit_status *tx_status, struct ath_node *an); + +/**********************/ +/* Node / Aggregation */ +/**********************/ + +/* indicates the node is clened up */ +#define ATH_NODE_CLEAN 0x1 +/* indicates the node is 80211 power save */ +#define ATH_NODE_PWRSAVE 0x2 + +#define ADDBA_TIMEOUT 200 /* 200 milliseconds */ +#define ADDBA_EXCHANGE_ATTEMPTS 10 +#define ATH_AGGR_DELIM_SZ 4 /* delimiter size */ +#define ATH_AGGR_MINPLEN 256 /* in bytes, minimum packet length */ +/* number of delimiters for encryption padding */ +#define ATH_AGGR_ENCRYPTDELIM 10 +/* minimum h/w qdepth to be sustained to maximize aggregation */ +#define ATH_AGGR_MIN_QDEPTH 2 +#define ATH_AMPDU_SUBFRAME_DEFAULT 32 +#define IEEE80211_SEQ_SEQ_SHIFT 4 +#define IEEE80211_SEQ_MAX 4096 +#define IEEE80211_MIN_AMPDU_BUF 0x8 + +/* return whether a bit at index _n in bitmap _bm is set + * _sz is the size of the bitmap */ +#define ATH_BA_ISSET(_bm, _n) (((_n) < (WME_BA_BMP_SIZE)) && \ + ((_bm)[(_n) >> 5] & (1 << ((_n) & 31)))) + +/* return block-ack bitmap index given sequence and starting sequence */ +#define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_MAX - 1)) + +/* returns delimiter padding required given the packet length */ +#define ATH_AGGR_GET_NDELIM(_len) \ + (((((_len) + ATH_AGGR_DELIM_SZ) < ATH_AGGR_MINPLEN) ? \ + (ATH_AGGR_MINPLEN - (_len) - ATH_AGGR_DELIM_SZ) : 0) >> 2) + +#define BAW_WITHIN(_start, _bawsz, _seqno) \ + ((((_seqno) - (_start)) & 4095) < (_bawsz)) + +#define ATH_DS_BA_SEQ(_ds) ((_ds)->ds_us.tx.ts_seqnum) +#define ATH_DS_BA_BITMAP(_ds) (&(_ds)->ds_us.tx.ba_low) +#define ATH_DS_TX_BA(_ds) ((_ds)->ds_us.tx.ts_flags & ATH9K_TX_BA) +#define ATH_AN_2_TID(_an, _tidno) (&(_an)->an_aggr.tx.tid[(_tidno)]) + +enum ATH_AGGR_STATUS { + ATH_AGGR_DONE, + ATH_AGGR_BAW_CLOSED, + ATH_AGGR_LIMITED, + ATH_AGGR_SHORTPKT, + ATH_AGGR_8K_LIMITED, +}; + +enum ATH_AGGR_CHECK { + AGGR_NOT_REQUIRED, + AGGR_REQUIRED, + AGGR_CLEANUP_PROGRESS, + AGGR_EXCHANGE_PROGRESS, + AGGR_EXCHANGE_DONE +}; + +struct aggr_rifs_param { + int param_max_frames; + int param_max_len; + int param_rl; + int param_al; + struct ath_rc_series *param_rcs; +}; + +/* Per-node aggregation state */ +struct ath_node_aggr { + struct ath_atx tx; /* node transmit state */ + struct ath_arx rx; /* node receive state */ +}; + +/* driver-specific node state */ +struct ath_node { + struct list_head list; + struct ath_softc *an_sc; + atomic_t an_refcnt; + struct ath_chainmask_sel an_chainmask_sel; + struct ath_node_aggr an_aggr; + u8 an_smmode; /* SM Power save mode */ + u8 an_flags; + u8 an_addr[ETH_ALEN]; +}; + +void ath_tx_resume_tid(struct ath_softc *sc, + struct ath_atx_tid *tid); +enum ATH_AGGR_CHECK ath_tx_aggr_check(struct ath_softc *sc, + struct ath_node *an, u8 tidno); +void ath_tx_aggr_teardown(struct ath_softc *sc, + struct ath_node *an, u8 tidno); +void ath_rx_aggr_teardown(struct ath_softc *sc, + struct ath_node *an, u8 tidno); +int ath_rx_aggr_start(struct ath_softc *sc, + const u8 *addr, + u16 tid, + u16 *ssn); +int ath_rx_aggr_stop(struct ath_softc *sc, + const u8 *addr, + u16 tid); +int ath_tx_aggr_start(struct ath_softc *sc, + const u8 *addr, + u16 tid, + u16 *ssn); +int ath_tx_aggr_stop(struct ath_softc *sc, + const u8 *addr, + u16 tid); +void ath_newassoc(struct ath_softc *sc, + struct ath_node *node, int isnew, int isuapsd); +struct ath_node *ath_node_attach(struct ath_softc *sc, + u8 addr[ETH_ALEN], int if_id); +void ath_node_detach(struct ath_softc *sc, struct ath_node *an, bool bh_flag); +struct ath_node *ath_node_get(struct ath_softc *sc, u8 addr[ETH_ALEN]); +void ath_node_put(struct ath_softc *sc, struct ath_node *an, bool bh_flag); +struct ath_node *ath_node_find(struct ath_softc *sc, u8 *addr); + +/*******************/ +/* Beacon Handling */ +/*******************/ + +/* + * Regardless of the number of beacons we stagger, (i.e. regardless of the + * number of BSSIDs) if a given beacon does not go out even after waiting this + * number of beacon intervals, the game's up. + */ +#define BSTUCK_THRESH (9 * ATH_BCBUF) +#define ATH_BCBUF 4 /* number of beacon buffers */ +#define ATH_DEFAULT_BINTVAL 100 /* default beacon interval in TU */ +#define ATH_DEFAULT_BMISS_LIMIT 10 +#define ATH_BEACON_AIFS_DEFAULT 0 /* Default aifs for ap beacon q */ +#define ATH_BEACON_CWMIN_DEFAULT 0 /* Default cwmin for ap beacon q */ +#define ATH_BEACON_CWMAX_DEFAULT 0 /* Default cwmax for ap beacon q */ +#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024) + +/* beacon configuration */ +struct ath_beacon_config { + u16 beacon_interval; + u16 listen_interval; + u16 dtim_period; + u16 bmiss_timeout; + u8 dtim_count; + u8 tim_offset; + union { + u64 last_tsf; + u8 last_tstamp[8]; + } u; /* last received beacon/probe response timestamp of this BSS. */ +}; + +/* offsets in a beacon frame for + * quick acess of beacon content by low-level driver */ +struct ath_beacon_offset { + u8 *bo_tim; /* start of atim/dtim */ +}; + +void ath9k_beacon_tasklet(unsigned long data); +void ath_beacon_config(struct ath_softc *sc, int if_id); +int ath_beaconq_setup(struct ath_hal *ah); +int ath_beacon_alloc(struct ath_softc *sc, int if_id); +void ath_bstuck_process(struct ath_softc *sc); +void ath_beacon_tasklet(struct ath_softc *sc, int *needmark); +void ath_beacon_free(struct ath_softc *sc); +void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp); +void ath_beacon_sync(struct ath_softc *sc, int if_id); +void ath_update_beacon_info(struct ath_softc *sc, int avgbrssi); +void ath_get_beaconconfig(struct ath_softc *sc, + int if_id, + struct ath_beacon_config *conf); +int ath_update_beacon(struct ath_softc *sc, + int if_id, + struct ath_beacon_offset *bo, + struct sk_buff *skb, + int mcast); +/********/ +/* VAPs */ +/********/ + +/* + * Define the scheme that we select MAC address for multiple + * BSS on the same radio. The very first VAP will just use the MAC + * address from the EEPROM. For the next 3 VAPs, we set the + * U/L bit (bit 1) in MAC address, and use the next two bits as the + * index of the VAP. + */ + +#define ATH_SET_VAP_BSSID_MASK(bssid_mask) \ + ((bssid_mask)[0] &= ~(((ATH_BCBUF-1)<<2)|0x02)) + +/* VAP configuration (from protocol layer) */ +struct ath_vap_config { + u32 av_fixed_rateset; + u32 av_fixed_retryset; +}; + +/* driver-specific vap state */ +struct ath_vap { + struct ieee80211_vif *av_if_data; + enum ath9k_opmode av_opmode; /* VAP operational mode */ + struct ath_buf *av_bcbuf; /* beacon buffer */ + struct ath_beacon_offset av_boff; /* dynamic update state */ + struct ath_tx_control av_btxctl; /* txctl information for beacon */ + int av_bslot; /* beacon slot index */ + struct ath_txq av_mcastq; /* multicast transmit queue */ + struct ath_vap_config av_config;/* vap configuration parameters*/ + struct ath_rate_node *rc_node; +}; + +int ath_vap_attach(struct ath_softc *sc, + int if_id, + struct ieee80211_vif *if_data, + enum ath9k_opmode opmode); +int ath_vap_detach(struct ath_softc *sc, int if_id); +int ath_vap_config(struct ath_softc *sc, + int if_id, struct ath_vap_config *if_config); +int ath_vap_listen(struct ath_softc *sc, int if_id); + +/*********************/ +/* Antenna diversity */ +/*********************/ + +#define ATH_ANT_DIV_MAX_CFG 2 +#define ATH_ANT_DIV_MIN_IDLE_US 1000000 /* us */ +#define ATH_ANT_DIV_MIN_SCAN_US 50000 /* us */ + +enum ATH_ANT_DIV_STATE{ + ATH_ANT_DIV_IDLE, + ATH_ANT_DIV_SCAN, /* evaluating antenna */ +}; + +struct ath_antdiv { + struct ath_softc *antdiv_sc; + u8 antdiv_start; + enum ATH_ANT_DIV_STATE antdiv_state; + u8 antdiv_num_antcfg; + u8 antdiv_curcfg; + u8 antdiv_bestcfg; + int32_t antdivf_rssitrig; + int32_t antdiv_lastbrssi[ATH_ANT_DIV_MAX_CFG]; + u64 antdiv_lastbtsf[ATH_ANT_DIV_MAX_CFG]; + u64 antdiv_laststatetsf; + u8 antdiv_bssid[ETH_ALEN]; +}; + +void ath_slow_ant_div_init(struct ath_antdiv *antdiv, + struct ath_softc *sc, int32_t rssitrig); +void ath_slow_ant_div_start(struct ath_antdiv *antdiv, + u8 num_antcfg, + const u8 *bssid); +void ath_slow_ant_div_stop(struct ath_antdiv *antdiv); +void ath_slow_ant_div(struct ath_antdiv *antdiv, + struct ieee80211_hdr *wh, + struct ath_rx_status *rx_stats); +void ath_setdefantenna(void *sc, u32 antenna); + +/********************/ +/* Main driver core */ +/********************/ + +/* + * Default cache line size, in bytes. + * Used when PCI device not fully initialized by bootrom/BIOS +*/ +#define DEFAULT_CACHELINE 32 +#define ATH_DEFAULT_NOISE_FLOOR -95 +#define ATH_REGCLASSIDS_MAX 10 +#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */ +#define ATH_PREAMBLE_SHORT (1<<0) +#define ATH_PROTECT_ENABLE (1<<1) +#define ATH_MAX_SW_RETRIES 10 +/* Num farmes difference in tx to flip default recv */ +#define ATH_ANTENNA_DIFF 2 +#define ATH_CHAN_MAX 255 +#define IEEE80211_WEP_NKID 4 /* number of key ids */ +#define IEEE80211_RATE_VAL 0x7f +/* + * The key cache is used for h/w cipher state and also for + * tracking station state such as the current tx antenna. + * We also setup a mapping table between key cache slot indices + * and station state to short-circuit node lookups on rx. + * Different parts have different size key caches. We handle + * up to ATH_KEYMAX entries (could dynamically allocate state). + */ +#define ATH_KEYMAX 128 /* max key cache size we handle */ + +#define RESET_RETRY_TXQ 0x00000001 +#define ATH_IF_ID_ANY 0xff + +#define ATH_TXPOWER_MAX 100 /* .5 dBm units */ + +#define RSSI_LPF_THRESHOLD -20 +#define ATH_RSSI_EP_MULTIPLIER (1<<7) /* pow2 to optimize out * and / */ +#define ATH_RATE_DUMMY_MARKER 0 +#define ATH_RSSI_LPF_LEN 10 +#define ATH_RSSI_DUMMY_MARKER 0x127 + +#define ATH_EP_MUL(x, mul) ((x) * (mul)) +#define ATH_EP_RND(x, mul) \ + ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) +#define ATH_RSSI_OUT(x) \ + (((x) != ATH_RSSI_DUMMY_MARKER) ? \ + (ATH_EP_RND((x), ATH_RSSI_EP_MULTIPLIER)) : ATH_RSSI_DUMMY_MARKER) +#define ATH_RSSI_IN(x) \ + (ATH_EP_MUL((x), ATH_RSSI_EP_MULTIPLIER)) +#define ATH_LPF_RSSI(x, y, len) \ + ((x != ATH_RSSI_DUMMY_MARKER) ? \ + (((x) * ((len) - 1) + (y)) / (len)) : (y)) +#define ATH_RSSI_LPF(x, y) do { \ + if ((y) >= RSSI_LPF_THRESHOLD) \ + x = ATH_LPF_RSSI((x), \ + ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN); \ + } while (0) + + +enum PROT_MODE { + PROT_M_NONE = 0, + PROT_M_RTSCTS, + PROT_M_CTSONLY +}; + +enum RATE_TYPE { + NORMAL_RATE = 0, + HALF_RATE, + QUARTER_RATE +}; + +struct ath_ht_info { + enum ath9k_ht_macmode tx_chan_width; + u16 maxampdu; + u8 mpdudensity; + u8 ext_chan_offset; +}; + +struct ath_softc { + struct ieee80211_hw *hw; + struct pci_dev *pdev; + void __iomem *mem; + struct tasklet_struct intr_tq; + struct tasklet_struct bcon_tasklet; + struct ath_config sc_config; /* load-time parameters */ + int sc_debug; + struct ath_hal *sc_ah; + struct ath_rate_softc *sc_rc; /* tx rate control support */ + u32 sc_intrstatus; + enum ath9k_opmode sc_opmode; /* current operating mode */ + + u8 sc_invalid; /* being detached */ + u8 sc_beacons; /* beacons running */ + u8 sc_scanning; /* scanning active */ + u8 sc_txaggr; /* enable 11n tx aggregation */ + u8 sc_rxaggr; /* enable 11n rx aggregation */ + u8 sc_update_chainmask; /* change chain mask */ + u8 sc_full_reset; /* force full reset */ + enum wireless_mode sc_curmode; /* current phy mode */ + u16 sc_curtxpow; + u16 sc_curaid; + u8 sc_curbssid[ETH_ALEN]; + u8 sc_myaddr[ETH_ALEN]; + enum PROT_MODE sc_protmode; + u8 sc_mcastantenna; + u8 sc_txantenna; /* data tx antenna (fixed or auto) */ + u8 sc_nbcnvaps; /* # of vaps sending beacons */ + u16 sc_nvaps; /* # of active virtual ap's */ + struct ath_vap *sc_vaps[ATH_BCBUF]; + enum ath9k_int sc_imask; + u8 sc_bssidmask[ETH_ALEN]; + u8 sc_defant; /* current default antenna */ + u8 sc_rxotherant; /* rx's on non-default antenna */ + u16 sc_cachelsz; + int sc_slotupdate; /* slot to next advance fsm */ + int sc_slottime; + u8 sc_noreset; + int sc_bslot[ATH_BCBUF]; + struct ath9k_node_stats sc_halstats; /* station-mode rssi stats */ + struct list_head node_list; + struct ath_ht_info sc_ht_info; + int16_t sc_noise_floor; /* signal noise floor in dBm */ + enum ath9k_ht_extprotspacing sc_ht_extprotspacing; + u8 sc_tx_chainmask; + u8 sc_rx_chainmask; + u8 sc_rxchaindetect_ref; + u8 sc_rxchaindetect_thresh5GHz; + u8 sc_rxchaindetect_thresh2GHz; + u8 sc_rxchaindetect_delta5GHz; + u8 sc_rxchaindetect_delta2GHz; + u32 sc_rtsaggrlimit; /* Chipset specific aggr limit */ + u32 sc_flags; +#ifdef CONFIG_SLOW_ANT_DIV + struct ath_antdiv sc_antdiv; +#endif + enum { + OK, /* no change needed */ + UPDATE, /* update pending */ + COMMIT /* beacon sent, commit change */ + } sc_updateslot; /* slot time update fsm */ + + /* Crypto */ + u32 sc_keymax; /* size of key cache */ + DECLARE_BITMAP(sc_keymap, ATH_KEYMAX); /* key use bit map */ + u8 sc_splitmic; /* split TKIP MIC keys */ + int sc_keytype; + + /* RX */ + struct list_head sc_rxbuf; + struct ath_descdma sc_rxdma; + int sc_rxbufsize; /* rx size based on mtu */ + u32 *sc_rxlink; /* link ptr in last RX desc */ + u32 sc_rxflush; /* rx flush in progress */ + u64 sc_lastrx; /* tsf of last rx'd frame */ + + /* TX */ + struct list_head sc_txbuf; + struct ath_txq sc_txq[ATH9K_NUM_TX_QUEUES]; + struct ath_descdma sc_txdma; + u32 sc_txqsetup; + u32 sc_txintrperiod; /* tx interrupt batching */ + int sc_haltype2q[ATH9K_WME_AC_VO+1]; /* HAL WME AC -> h/w qnum */ + u32 sc_ant_tx[8]; /* recent tx frames/antenna */ + + /* Beacon */ + struct ath9k_tx_queue_info sc_beacon_qi; + struct ath_descdma sc_bdma; + struct ath_txq *sc_cabq; + struct list_head sc_bbuf; + u32 sc_bhalq; + u32 sc_bmisscount; + u32 ast_be_xmit; /* beacons transmitted */ + + /* Rate */ + struct ieee80211_rate rates[IEEE80211_NUM_BANDS][ATH_RATE_MAX]; + const struct ath9k_rate_table *sc_currates; + u8 sc_rixmap[256]; /* IEEE to h/w rate table ix */ + u8 sc_protrix; /* protection rate index */ + struct { + u32 rateKbps; /* transfer rate in kbs */ + u8 ieeerate; /* IEEE rate */ + } sc_hwmap[256]; /* h/w rate ix mappings */ + + /* Channel, Band */ + struct ieee80211_channel channels[IEEE80211_NUM_BANDS][ATH_CHAN_MAX]; + struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; + struct ath9k_channel sc_curchan; + + /* Locks */ + spinlock_t sc_rxflushlock; + spinlock_t sc_rxbuflock; + spinlock_t sc_txbuflock; + spinlock_t sc_resetlock; + spinlock_t node_lock; +}; + +int ath_init(u16 devid, struct ath_softc *sc); +void ath_deinit(struct ath_softc *sc); +int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan); +int ath_suspend(struct ath_softc *sc); +irqreturn_t ath_isr(int irq, void *dev); +int ath_reset(struct ath_softc *sc); +void ath_scan_start(struct ath_softc *sc); +void ath_scan_end(struct ath_softc *sc); +int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan); +void ath_setup_rate(struct ath_softc *sc, + enum wireless_mode wMode, + enum RATE_TYPE type, + const struct ath9k_rate_table *rt); + +/*********************/ +/* Utility Functions */ +/*********************/ + +void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot); +int ath_keyset(struct ath_softc *sc, + u16 keyix, + struct ath9k_keyval *hk, + const u8 mac[ETH_ALEN]); +int ath_get_hal_qnum(u16 queue, struct ath_softc *sc); +int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc); +void ath_setslottime(struct ath_softc *sc); +void ath_update_txpow(struct ath_softc *sc); +int ath_cabq_update(struct ath_softc *); +void ath_get_currentCountry(struct ath_softc *sc, + struct ath9k_country_entry *ctry); +u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp); +void ath_internal_reset(struct ath_softc *sc); +u32 ath_chan2flags(struct ieee80211_channel *chan, struct ath_softc *sc); +dma_addr_t ath_skb_map_single(struct ath_softc *sc, + struct sk_buff *skb, + int direction, + dma_addr_t *pa); +void ath_skb_unmap_single(struct ath_softc *sc, + struct sk_buff *skb, + int direction, + dma_addr_t *pa); +void ath_mcast_merge(struct ath_softc *sc, u32 mfilt[2]); +enum ath9k_ht_macmode ath_cwm_macmode(struct ath_softc *sc); + +#endif /* CORE_H */ diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c new file mode 100644 index 0000000..bde162f --- /dev/null +++ b/drivers/net/wireless/ath9k/hw.c @@ -0,0 +1,8571 @@ +/* + * Copyright (c) 2008 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/io.h> +#include <asm/unaligned.h> + +#include "core.h" +#include "hw.h" +#include "reg.h" +#include "phy.h" +#include "initvals.h" + +static void ath9k_hw_iqcal_collect(struct ath_hal *ah); +static void ath9k_hw_iqcalibrate(struct ath_hal *ah, u8 numChains); +static void ath9k_hw_adc_gaincal_collect(struct ath_hal *ah); +static void ath9k_hw_adc_gaincal_calibrate(struct ath_hal *ah, + u8 numChains); +static void ath9k_hw_adc_dccal_collect(struct ath_hal *ah); +static void ath9k_hw_adc_dccal_calibrate(struct ath_hal *ah, + u8 numChains); + +static const u8 CLOCK_RATE[] = { 40, 80, 22, 44, 88, 40 }; +static const int16_t NOISE_FLOOR[] = { -96, -93, -98, -96, -93, -96 }; + +static const struct hal_percal_data iq_cal_multi_sample = { + IQ_MISMATCH_CAL, + MAX_CAL_SAMPLES, + PER_MIN_LOG_COUNT, + ath9k_hw_iqcal_collect, + ath9k_hw_iqcalibrate +}; +static const struct hal_percal_data iq_cal_single_sample = { + IQ_MISMATCH_CAL, + MIN_CAL_SAMPLES, + PER_MAX_LOG_COUNT, + ath9k_hw_iqcal_collect, + ath9k_hw_iqcalibrate +}; +static const struct hal_percal_data adc_gain_cal_multi_sample = { + ADC_GAIN_CAL, + MAX_CAL_SAMPLES, + PER_MIN_LOG_COUNT, + ath9k_hw_adc_gaincal_collect, + ath9k_hw_adc_gaincal_calibrate +}; +static const struct hal_percal_data adc_gain_cal_single_sample = { + ADC_GAIN_CAL, + MIN_CAL_SAMPLES, + PER_MAX_LOG_COUNT, + ath9k_hw_adc_gaincal_collect, + ath9k_hw_adc_gaincal_calibrate +}; +static const struct hal_percal_data adc_dc_cal_multi_sample = { + ADC_DC_CAL, + MAX_CAL_SAMPLES, + PER_MIN_LOG_COUNT, + ath9k_hw_adc_dccal_collect, + ath9k_hw_adc_dccal_calibrate +}; +static const struct hal_percal_data adc_dc_cal_single_sample = { + ADC_DC_CAL, + MIN_CAL_SAMPLES, + PER_MAX_LOG_COUNT, + ath9k_hw_adc_dccal_collect, + ath9k_hw_adc_dccal_calibrate +}; +static const struct hal_percal_data adc_init_dc_cal = { + ADC_DC_INIT_CAL, + MIN_CAL_SAMPLES, + INIT_LOG_COUNT, + ath9k_hw_adc_dccal_collect, + ath9k_hw_adc_dccal_calibrate +}; + +static const struct ath_hal ar5416hal = { + AR5416_MAGIC, + 0, + 0, + NULL, + NULL, + CTRY_DEFAULT, + 0, + 0, + 0, + 0, + 0, + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + }, +}; + +static struct ath9k_rate_table ar5416_11a_table = { + 8, + {0}, + { + {true, PHY_OFDM, 6000, 0x0b, 0x00, (0x80 | 12), 0}, + {true, PHY_OFDM, 9000, 0x0f, 0x00, 18, 0}, + {true, PHY_OFDM, 12000, 0x0a, 0x00, (0x80 | 24), 2}, + {true, PHY_OFDM, 18000, 0x0e, 0x00, 36, 2}, + {true, PHY_OFDM, 24000, 0x09, 0x00, (0x80 | 48), 4}, + {true, PHY_OFDM, 36000, 0x0d, 0x00, 72, 4}, + {true, PHY_OFDM, 48000, 0x08, 0x00, 96, 4}, + {true, PHY_OFDM, 54000, 0x0c, 0x00, 108, 4} + }, +}; + +static struct ath9k_rate_table ar5416_11b_table = { + 4, + {0}, + { + {true, PHY_CCK, 1000, 0x1b, 0x00, (0x80 | 2), 0}, + {true, PHY_CCK, 2000, 0x1a, 0x04, (0x80 | 4), 1}, + {true, PHY_CCK, 5500, 0x19, 0x04, (0x80 | 11), 1}, + {true, PHY_CCK, 11000, 0x18, 0x04, (0x80 | 22), 1} + }, +}; + +static struct ath9k_rate_table ar5416_11g_table = { + 12, + {0}, + { + {true, PHY_CCK, 1000, 0x1b, 0x00, (0x80 | 2), 0}, + {true, PHY_CCK, 2000, 0x1a, 0x04, (0x80 | 4), 1}, + {true, PHY_CCK, 5500, 0x19, 0x04, (0x80 | 11), 2}, + {true, PHY_CCK, 11000, 0x18, 0x04, (0x80 | 22), 3}, + + {false, PHY_OFDM, 6000, 0x0b, 0x00, 12, 4}, + {false, PHY_OFDM, 9000, 0x0f, 0x00, 18, 4}, + {true, PHY_OFDM, 12000, 0x0a, 0x00, 24, 6}, + {true, PHY_OFDM, 18000, 0x0e, 0x00, 36, 6}, + {true, PHY_OFDM, 24000, 0x09, 0x00, 48, 8}, + {true, PHY_OFDM, 36000, 0x0d, 0x00, 72, 8}, + {true, PHY_OFDM, 48000, 0x08, 0x00, 96, 8}, + {true, PHY_OFDM, 54000, 0x0c, 0x00, 108, 8} + }, +}; + +static struct ath9k_rate_table ar5416_11ng_table = { + 28, + {0}, + { + {true, PHY_CCK, 1000, 0x1b, 0x00, (0x80 | 2), 0}, + {true, PHY_CCK, 2000, 0x1a, 0x04, (0x80 | 4), 1}, + {true, PHY_CCK, 5500, 0x19, 0x04, (0x80 | 11), 2}, + {true, PHY_CCK, 11000, 0x18, 0x04, (0x80 | 22), 3}, + + {false, PHY_OFDM, 6000, 0x0b, 0x00, 12, 4}, + {false, PHY_OFDM, 9000, 0x0f, 0x00, 18, 4}, + {true, PHY_OFDM, 12000, 0x0a, 0x00, 24, 6}, + {true, PHY_OFDM, 18000, 0x0e, 0x00, 36, 6}, + {true, PHY_OFDM, 24000, 0x09, 0x00, 48, 8}, + {true, PHY_OFDM, 36000, 0x0d, 0x00, 72, 8}, + {true, PHY_OFDM, 48000, 0x08, 0x00, 96, 8}, + {true, PHY_OFDM, 54000, 0x0c, 0x00, 108, 8}, + {true, PHY_HT, 6500, 0x80, 0x00, 0, 4}, + {true, PHY_HT, 13000, 0x81, 0x00, 1, 6}, + {true, PHY_HT, 19500, 0x82, 0x00, 2, 6}, + {true, PHY_HT, 26000, 0x83, 0x00, 3, 8}, + {true, PHY_HT, 39000, 0x84, 0x00, 4, 8}, + {true, PHY_HT, 52000, 0x85, 0x00, 5, 8}, + {true, PHY_HT, 58500, 0x86, 0x00, 6, 8}, + {true, PHY_HT, 65000, 0x87, 0x00, 7, 8}, + {true, PHY_HT, 13000, 0x88, 0x00, 8, 4}, + {true, PHY_HT, 26000, 0x89, 0x00, 9, 6}, + {true, PHY_HT, 39000, 0x8a, 0x00, 10, 6}, + {true, PHY_HT, 52000, 0x8b, 0x00, 11, 8}, + {true, PHY_HT, 78000, 0x8c, 0x00, 12, 8}, + {true, PHY_HT, 104000, 0x8d, 0x00, 13, 8}, + {true, PHY_HT, 117000, 0x8e, 0x00, 14, 8}, + {true, PHY_HT, 130000, 0x8f, 0x00, 15, 8}, + }, +}; + +static struct ath9k_rate_table ar5416_11na_table = { + 24, + {0}, + { + {true, PHY_OFDM, 6000, 0x0b, 0x00, (0x80 | 12), 0}, + {true, PHY_OFDM, 9000, 0x0f, 0x00, 18, 0}, + {true, PHY_OFDM, 12000, 0x0a, 0x00, (0x80 | 24), 2}, + {true, PHY_OFDM, 18000, 0x0e, 0x00, 36, 2}, + {true, PHY_OFDM, 24000, 0x09, 0x00, (0x80 | 48), 4}, + {true, PHY_OFDM, 36000, 0x0d, 0x00, 72, 4}, + {true, PHY_OFDM, 48000, 0x08, 0x00, 96, 4}, + {true, PHY_OFDM, 54000, 0x0c, 0x00, 108, 4}, + {true, PHY_HT, 6500, 0x80, 0x00, 0, 0}, + {true, PHY_HT, 13000, 0x81, 0x00, 1, 2}, + {true, PHY_HT, 19500, 0x82, 0x00, 2, 2}, + {true, PHY_HT, 26000, 0x83, 0x00, 3, 4}, + {true, PHY_HT, 39000, 0x84, 0x00, 4, 4}, + {true, PHY_HT, 52000, 0x85, 0x00, 5, 4}, + {true, PHY_HT, 58500, 0x86, 0x00, 6, 4}, + {true, PHY_HT, 65000, 0x87, 0x00, 7, 4}, + {true, PHY_HT, 13000, 0x88, 0x00, 8, 0}, + {true, PHY_HT, 26000, 0x89, 0x00, 9, 2}, + {true, PHY_HT, 39000, 0x8a, 0x00, 10, 2}, + {true, PHY_HT, 52000, 0x8b, 0x00, 11, 4}, + {true, PHY_HT, 78000, 0x8c, 0x00, 12, 4}, + {true, PHY_HT, 104000, 0x8d, 0x00, 13, 4}, + {true, PHY_HT, 117000, 0x8e, 0x00, 14, 4}, + {true, PHY_HT, 130000, 0x8f, 0x00, 15, 4}, + }, +}; + +static enum wireless_mode ath9k_hw_chan2wmode(struct ath_hal *ah, + const struct ath9k_channel *chan) +{ + if (IS_CHAN_CCK(chan)) + return ATH9K_MODE_11A; + if (IS_CHAN_G(chan)) + return ATH9K_MODE_11G; + return ATH9K_MODE_11A; +} + +static bool ath9k_hw_wait(struct ath_hal *ah, + u32 reg, + u32 mask, + u32 val) +{ + int i; + + for (i = 0; i < (AH_TIMEOUT / AH_TIME_QUANTUM); i++) { + if ((REG_READ(ah, reg) & mask) == val) + return true; + + udelay(AH_TIME_QUANTUM); + } + DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO, + "%s: timeout on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n", + __func__, reg, REG_READ(ah, reg), mask, val); + return false; +} + +static bool ath9k_hw_eeprom_read(struct ath_hal *ah, u32 off, + u16 *data) +{ + (void) REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S)); + + if (!ath9k_hw_wait(ah, + AR_EEPROM_STATUS_DATA, + AR_EEPROM_STATUS_DATA_BUSY | + AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0)) { + return false; + } + + *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA), + AR_EEPROM_STATUS_DATA_VAL); + + return true; +} + +static int ath9k_hw_flash_map(struct ath_hal *ah) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + ahp->ah_cal_mem = ioremap(AR5416_EEPROM_START_ADDR, AR5416_EEPROM_MAX); + + if (!ahp->ah_cal_mem) { + DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, + "%s: cannot remap eeprom region \n", __func__); + return -EIO; + } + + return 0; +} + +static bool ath9k_hw_flash_read(struct ath_hal *ah, u32 off, + u16 *data) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + *data = ioread16(ahp->ah_cal_mem + off); + return true; +} + +static void ath9k_hw_read_revisions(struct ath_hal *ah) +{ + u32 val; + + val = REG_READ(ah, AR_SREV) & AR_SREV_ID; + + if (val == 0xFF) { + val = REG_READ(ah, AR_SREV); + + ah->ah_macVersion = + (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S; + + ah->ah_macRev = MS(val, AR_SREV_REVISION2); + ah->ah_isPciExpress = + (val & AR_SREV_TYPE2_HOST_MODE) ? 0 : 1; + + } else { + if (!AR_SREV_9100(ah)) + ah->ah_macVersion = MS(val, AR_SREV_VERSION); + + ah->ah_macRev = val & AR_SREV_REVISION; + + if (ah->ah_macVersion == AR_SREV_VERSION_5416_PCIE) + ah->ah_isPciExpress = true; + } +} + +u32 ath9k_hw_reverse_bits(u32 val, u32 n) +{ + u32 retval; + int i; + + for (i = 0, retval = 0; i < n; i++) { + retval = (retval << 1) | (val & 1); + val >>= 1; + } + return retval; +} + +static void ath9k_hw_set_defaults(struct ath_hal *ah) +{ + int i; + + ah->ah_config.dma_beacon_response_time = 2; + ah->ah_config.sw_beacon_response_time = 10; + ah->ah_config.additional_swba_backoff = 0; + ah->ah_config.ack_6mb = 0x0; + ah->ah_config.cwm_ignore_extcca = 0; + ah->ah_config.pcie_powersave_enable = 0; + ah->ah_config.pcie_l1skp_enable = 0; + ah->ah_config.pcie_clock_req = 0; + ah->ah_config.pcie_power_reset = 0x100; + ah->ah_config.pcie_restore = 0; + ah->ah_config.pcie_waen = 0; + ah->ah_config.analog_shiftreg = 1; + ah->ah_config.ht_enable = 1; + ah->ah_config.ofdm_trig_low = 200; + ah->ah_config.ofdm_trig_high = 500; + ah->ah_config.cck_trig_high = 200; + ah->ah_config.cck_trig_low = 100; + ah->ah_config.enable_ani = 0; + ah->ah_config.noise_immunity_level = 4; + ah->ah_config.ofdm_weaksignal_det = 1; + ah->ah_config.cck_weaksignal_thr = 0; + ah->ah_config.spur_immunity_level = 2; + ah->ah_config.firstep_level = 0; + ah->ah_config.rssi_thr_high = 40; + ah->ah_config.rssi_thr_low = 7; + ah->ah_config.diversity_control = 0; + ah->ah_config.antenna_switch_swap = 0; + + for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { + ah->ah_config.spurchans[i][0] = AR_NO_SPUR; + ah->ah_config.spurchans[i][1] = AR_NO_SPUR; + } + + ah->ah_config.intr_mitigation = 0; +} + +static inline void ath9k_hw_override_ini(struct ath_hal *ah, + struct ath9k_channel *chan) +{ + if (!AR_SREV_5416_V20_OR_LATER(ah) + || AR_SREV_9280_10_OR_LATER(ah)) + return; + + REG_WRITE(ah, 0x9800 + (651 << 2), 0x11); +} + +static inline void ath9k_hw_init_bb(struct ath_hal *ah, + struct ath9k_channel *chan) +{ + u32 synthDelay; + + synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; + if (IS_CHAN_CCK(chan)) + synthDelay = (4 * synthDelay) / 22; + else + synthDelay /= 10; + + REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); + + udelay(synthDelay + BASE_ACTIVATE_DELAY); +} + +static inline void ath9k_hw_init_interrupt_masks(struct ath_hal *ah, + enum ath9k_opmode opmode) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + ahp->ah_maskReg = AR_IMR_TXERR | + AR_IMR_TXURN | + AR_IMR_RXERR | + AR_IMR_RXORN | + AR_IMR_BCNMISC; + + if (ahp->ah_intrMitigation) + ahp->ah_maskReg |= AR_IMR_RXINTM | AR_IMR_RXMINTR; + else + ahp->ah_maskReg |= AR_IMR_RXOK; + + ahp->ah_maskReg |= AR_IMR_TXOK; + + if (opmode == ATH9K_M_HOSTAP) + ahp->ah_maskReg |= AR_IMR_MIB; + + REG_WRITE(ah, AR_IMR, ahp->ah_maskReg); + REG_WRITE(ah, AR_IMR_S2, REG_READ(ah, AR_IMR_S2) | AR_IMR_S2_GTT); + + if (!AR_SREV_9100(ah)) { + REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF); + REG_WRITE(ah, AR_INTR_SYNC_ENABLE, AR_INTR_SYNC_DEFAULT); + REG_WRITE(ah, AR_INTR_SYNC_MASK, 0); + } +} + +static inline void ath9k_hw_init_qos(struct ath_hal *ah) +{ + REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa); + REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210); + + REG_WRITE(ah, AR_QOS_NO_ACK, + SM(2, AR_QOS_NO_ACK_TWO_BIT) | + SM(5, AR_QOS_NO_ACK_BIT_OFF) | + SM(0, AR_QOS_NO_ACK_BYTE_OFF)); + + REG_WRITE(ah, AR_TXOP_X, AR_TXOP_X_VAL); + REG_WRITE(ah, AR_TXOP_0_3, 0xFFFFFFFF); + REG_WRITE(ah, AR_TXOP_4_7, 0xFFFFFFFF); + REG_WRITE(ah, AR_TXOP_8_11, 0xFFFFFFFF); + REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF); +} + +static void ath9k_hw_analog_shift_rmw(struct ath_hal *ah, + u32 reg, + u32 mask, + u32 shift, + u32 val) +{ + u32 regVal; + + regVal = REG_READ(ah, reg) & ~mask; + regVal |= (val << shift) & mask; + + REG_WRITE(ah, reg, regVal); + + if (ah->ah_config.analog_shiftreg) + udelay(100); + + return; +} + +static u8 ath9k_hw_get_num_ant_config(struct ath_hal_5416 *ahp, + enum ieee80211_band freq_band) +{ + struct ar5416_eeprom *eep = &ahp->ah_eeprom; + struct modal_eep_header *pModal = + &(eep->modalHeader[IEEE80211_BAND_5GHZ == freq_band]); + struct base_eep_header *pBase = &eep->baseEepHeader; + u8 num_ant_config; + + num_ant_config = 1; + + if (pBase->version >= 0x0E0D) + if (pModal->useAnt1) + num_ant_config += 1; + + return num_ant_config; +} + +static int +ath9k_hw_get_eeprom_antenna_cfg(struct ath_hal_5416 *ahp, + struct ath9k_channel *chan, + u8 index, + u16 *config) +{ + struct ar5416_eeprom *eep = &ahp->ah_eeprom; + struct modal_eep_header *pModal = + &(eep->modalHeader[IS_CHAN_2GHZ(chan)]); + struct base_eep_header *pBase = &eep->baseEepHeader; + + switch (index) { + case 0: + *config = pModal->antCtrlCommon & 0xFFFF; + return 0; + case 1: + if (pBase->version >= 0x0E0D) { + if (pModal->useAnt1) { + *config = + ((pModal->antCtrlCommon & 0xFFFF0000) >> 16); + return 0; + } + } + break; + default: + break; + } + + return -EINVAL; +} + +static inline bool ath9k_hw_nvram_read(struct ath_hal *ah, + u32 off, + u16 *data) +{ + if (ath9k_hw_use_flash(ah)) + return ath9k_hw_flash_read(ah, off, data); + else + return ath9k_hw_eeprom_read(ah, off, data); +} + +static inline bool ath9k_hw_fill_eeprom(struct ath_hal *ah) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + struct ar5416_eeprom *eep = &ahp->ah_eeprom; + u16 *eep_data; + int addr, ar5416_eep_start_loc = 0; + + if (!ath9k_hw_use_flash(ah)) { + DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, + "%s: Reading from EEPROM, not flash\n", __func__); + ar5416_eep_start_loc = 256; + } + if (AR_SREV_9100(ah)) + ar5416_eep_start_loc = 256; + + eep_data = (u16 *) eep; + for (addr = 0; + addr < sizeof(struct ar5416_eeprom) / sizeof(u16); + addr++) { + if (!ath9k_hw_nvram_read(ah, addr + ar5416_eep_start_loc, + eep_data)) { + DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, + "%s: Unable to read eeprom region \n", + __func__); + return false; + } + eep_data++; + } + return true; +} + +/* XXX: Clean me up, make me more legible */ +static bool +ath9k_hw_eeprom_set_board_values(struct ath_hal *ah, + struct ath9k_channel *chan) +{ + struct modal_eep_header *pModal; + int i, regChainOffset; + struct ath_hal_5416 *ahp = AH5416(ah); + struct ar5416_eeprom *eep = &ahp->ah_eeprom; + u8 txRxAttenLocal; + u16 ant_config; + + pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]); + + txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44; + + ath9k_hw_get_eeprom_antenna_cfg(ahp, chan, 1, &ant_config); + REG_WRITE(ah, AR_PHY_SWITCH_COM, ant_config); + + for (i = 0; i < AR5416_MAX_CHAINS; i++) { + if (AR_SREV_9280(ah)) { + if (i >= 2) + break; + } + + if (AR_SREV_5416_V20_OR_LATER(ah) && + (ahp->ah_rxchainmask == 5 || ahp->ah_txchainmask == 5) + && (i != 0)) + regChainOffset = (i == 1) ? 0x2000 : 0x1000; + else + regChainOffset = i * 0x1000; + + REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0 + regChainOffset, + pModal->antCtrlChain[i]); + + REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0) + regChainOffset, + (REG_READ(ah, + AR_PHY_TIMING_CTRL4(0) + + regChainOffset) & + ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | + AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) | + SM(pModal->iqCalICh[i], + AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) | + SM(pModal->iqCalQCh[i], + AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF)); + + if ((i == 0) || AR_SREV_5416_V20_OR_LATER(ah)) { + if ((eep->baseEepHeader.version & + AR5416_EEP_VER_MINOR_MASK) >= + AR5416_EEP_MINOR_VER_3) { + txRxAttenLocal = pModal->txRxAttenCh[i]; + if (AR_SREV_9280_10_OR_LATER(ah)) { + REG_RMW_FIELD(ah, + AR_PHY_GAIN_2GHZ + + regChainOffset, + AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, + pModal-> + bswMargin[i]); + REG_RMW_FIELD(ah, + AR_PHY_GAIN_2GHZ + + regChainOffset, + AR_PHY_GAIN_2GHZ_XATTEN1_DB, + pModal-> + bswAtten[i]); + REG_RMW_FIELD(ah, + AR_PHY_GAIN_2GHZ + + regChainOffset, + AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN, + pModal-> + xatten2Margin[i]); + REG_RMW_FIELD(ah, + AR_PHY_GAIN_2GHZ + + regChainOffset, + AR_PHY_GAIN_2GHZ_XATTEN2_DB, + pModal-> + xatten2Db[i]); + } else { + REG_WRITE(ah, + AR_PHY_GAIN_2GHZ + + regChainOffset, + (REG_READ(ah, + AR_PHY_GAIN_2GHZ + + regChainOffset) & + ~AR_PHY_GAIN_2GHZ_BSW_MARGIN) + | SM(pModal-> + bswMargin[i], + AR_PHY_GAIN_2GHZ_BSW_MARGIN)); + REG_WRITE(ah, + AR_PHY_GAIN_2GHZ + + regChainOffset, + (REG_READ(ah, + AR_PHY_GAIN_2GHZ + + regChainOffset) & + ~AR_PHY_GAIN_2GHZ_BSW_ATTEN) + | SM(pModal->bswAtten[i], + AR_PHY_GAIN_2GHZ_BSW_ATTEN)); + } + } + if (AR_SREV_9280_10_OR_LATER(ah)) { + REG_RMW_FIELD(ah, + AR_PHY_RXGAIN + + regChainOffset, + AR9280_PHY_RXGAIN_TXRX_ATTEN, + txRxAttenLocal); + REG_RMW_FIELD(ah, + AR_PHY_RXGAIN + + regChainOffset, + AR9280_PHY_RXGAIN_TXRX_MARGIN, + pModal->rxTxMarginCh[i]); + } else { + REG_WRITE(ah, + AR_PHY_RXGAIN + regChainOffset, + (REG_READ(ah, + AR_PHY_RXGAIN + + regChainOffset) & + ~AR_PHY_RXGAIN_TXRX_ATTEN) | + SM(txRxAttenLocal, + AR_PHY_RXGAIN_TXRX_ATTEN)); + REG_WRITE(ah, + AR_PHY_GAIN_2GHZ + + regChainOffset, + (REG_READ(ah, + AR_PHY_GAIN_2GHZ + + regChainOffset) & + ~AR_PHY_GAIN_2GHZ_RXTX_MARGIN) | + SM(pModal->rxTxMarginCh[i], + AR_PHY_GAIN_2GHZ_RXTX_MARGIN)); + } + } + } + + if (AR_SREV_9280_10_OR_LATER(ah)) { + if (IS_CHAN_2GHZ(chan)) { + ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH0, + AR_AN_RF2G1_CH0_OB, + AR_AN_RF2G1_CH0_OB_S, + pModal->ob); + ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH0, + AR_AN_RF2G1_CH0_DB, + AR_AN_RF2G1_CH0_DB_S, + pModal->db); + ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH1, + AR_AN_RF2G1_CH1_OB, + AR_AN_RF2G1_CH1_OB_S, + pModal->ob_ch1); + ath9k_hw_analog_shift_rmw(ah, AR_AN_RF2G1_CH1, + AR_AN_RF2G1_CH1_DB, + AR_AN_RF2G1_CH1_DB_S, + pModal->db_ch1); + } else { + ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH0, + AR_AN_RF5G1_CH0_OB5, + AR_AN_RF5G1_CH0_OB5_S, + pModal->ob); + ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH0, + AR_AN_RF5G1_CH0_DB5, + AR_AN_RF5G1_CH0_DB5_S, + pModal->db); + ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH1, + AR_AN_RF5G1_CH1_OB5, + AR_AN_RF5G1_CH1_OB5_S, + pModal->ob_ch1); + ath9k_hw_analog_shift_rmw(ah, AR_AN_RF5G1_CH1, + AR_AN_RF5G1_CH1_DB5, + AR_AN_RF5G1_CH1_DB5_S, + pModal->db_ch1); + } + ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2, + AR_AN_TOP2_XPABIAS_LVL, + AR_AN_TOP2_XPABIAS_LVL_S, + pModal->xpaBiasLvl); + ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2, + AR_AN_TOP2_LOCALBIAS, + AR_AN_TOP2_LOCALBIAS_S, + pModal->local_bias); + DPRINTF(ah->ah_sc, ATH_DBG_ANY, "ForceXPAon: %d\n", + pModal->force_xpaon); + REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG, + pModal->force_xpaon); + } + + REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH, + pModal->switchSettling); + REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC, + pModal->adcDesiredSize); + + if (!AR_SREV_9280_10_OR_LATER(ah)) + REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, + AR_PHY_DESIRED_SZ_PGA, + pModal->pgaDesiredSize); + + REG_WRITE(ah, AR_PHY_RF_CTL4, + SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF) + | SM(pModal->txEndToXpaOff, + AR_PHY_RF_CTL4_TX_END_XPAB_OFF) + | SM(pModal->txFrameToXpaOn, + AR_PHY_RF_CTL4_FRAME_XPAA_ON) + | SM(pModal->txFrameToXpaOn, + AR_PHY_RF_CTL4_FRAME_XPAB_ON)); + + REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON, + pModal->txEndToRxOn); + if (AR_SREV_9280_10_OR_LATER(ah)) { + REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62, + pModal->thresh62); + REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0, + AR_PHY_EXT_CCA0_THRESH62, + pModal->thresh62); + } else { + REG_RMW_FIELD(ah, AR_PHY_CCA, AR_PHY_CCA_THRESH62, + pModal->thresh62); + REG_RMW_FIELD(ah, AR_PHY_EXT_CCA, + AR_PHY_EXT_CCA_THRESH62, + pModal->thresh62); + } + + if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= + AR5416_EEP_MINOR_VER_2) { + REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, + AR_PHY_TX_END_DATA_START, + pModal->txFrameToDataStart); + REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_PA_ON, + pModal->txFrameToPaOn); + } + + if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= + AR5416_EEP_MINOR_VER_3) { + if (IS_CHAN_HT40(chan)) + REG_RMW_FIELD(ah, AR_PHY_SETTLING, + AR_PHY_SETTLING_SWITCH, + pModal->swSettleHt40); + } + + return true; +} + +static inline int ath9k_hw_check_eeprom(struct ath_hal *ah) +{ + u32 sum = 0, el; + u16 *eepdata; + int i; + struct ath_hal_5416 *ahp = AH5416(ah); + bool need_swap = false; + struct ar5416_eeprom *eep = + (struct ar5416_eeprom *) &ahp->ah_eeprom; + + if (!ath9k_hw_use_flash(ah)) { + u16 magic, magic2; + int addr; + + if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, + &magic)) { + DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, + "%s: Reading Magic # failed\n", __func__); + return false; + } + DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "%s: Read Magic = 0x%04X\n", + __func__, magic); + + if (magic != AR5416_EEPROM_MAGIC) { + magic2 = swab16(magic); + + if (magic2 == AR5416_EEPROM_MAGIC) { + need_swap = true; + eepdata = (u16 *) (&ahp->ah_eeprom); + + for (addr = 0; + addr < + sizeof(struct ar5416_eeprom) / + sizeof(u16); addr++) { + u16 temp; + + temp = swab16(*eepdata); + *eepdata = temp; + eepdata++; + + DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, + "0x%04X ", *eepdata); + if (((addr + 1) % 6) == 0) + DPRINTF(ah->ah_sc, + ATH_DBG_EEPROM, + "\n"); + } + } else { + DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, + "Invalid EEPROM Magic. " + "endianness missmatch.\n"); + return -EINVAL; + } + } + } + DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "need_swap = %s.\n", + need_swap ? "True" : "False"); + + if (need_swap) + el = swab16(ahp->ah_eeprom.baseEepHeader.length); + else + el = ahp->ah_eeprom.baseEepHeader.length; + + if (el > sizeof(struct ar5416_eeprom)) + el = sizeof(struct ar5416_eeprom) / sizeof(u16); + else + el = el / sizeof(u16); + + eepdata = (u16 *) (&ahp->ah_eeprom); + + for (i = 0; i < el; i++) + sum ^= *eepdata++; + + if (need_swap) { + u32 integer, j; + u16 word; + + DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, + "EEPROM Endianness is not native.. Changing \n"); + + word = swab16(eep->baseEepHeader.length); + eep->baseEepHeader.length = word; + + word = swab16(eep->baseEepHeader.checksum); + eep->baseEepHeader.checksum = word; + + word = swab16(eep->baseEepHeader.version); + eep->baseEepHeader.version = word; + + word = swab16(eep->baseEepHeader.regDmn[0]); + eep->baseEepHeader.regDmn[0] = word; + + word = swab16(eep->baseEepHeader.regDmn[1]); + eep->baseEepHeader.regDmn[1] = word; + + word = swab16(eep->baseEepHeader.rfSilent); + eep->baseEepHeader.rfSilent = word; + + word = swab16(eep->baseEepHeader.blueToothOptions); + eep->baseEepHeader.blueToothOptions = word; + + word = swab16(eep->baseEepHeader.deviceCap); + eep->baseEepHeader.deviceCap = word; + + for (j = 0; j < ARRAY_SIZE(eep->modalHeader); j++) { + struct modal_eep_header *pModal = + &eep->modalHeader[j]; + integer = swab32(pModal->antCtrlCommon); + pModal->antCtrlCommon = integer; + + for (i = 0; i < AR5416_MAX_CHAINS; i++) { + integer = swab32(pModal->antCtrlChain[i]); + pModal->antCtrlChain[i] = integer; + } + + for (i = 0; i < AR5416_EEPROM_MODAL_SPURS; i++) { + word = swab16(pModal->spurChans[i].spurChan); + pModal->spurChans[i].spurChan = word; + } + } + } + + if (sum != 0xffff || ar5416_get_eep_ver(ahp) != AR5416_EEP_VER || + ar5416_get_eep_rev(ahp) < AR5416_EEP_NO_BACK_VER) { + DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, + "Bad EEPROM checksum 0x%x or revision 0x%04x\n", + sum, ar5416_get_eep_ver(ahp)); + return -EINVAL; + } + + return 0; +} + +static bool ath9k_hw_chip_test(struct ath_hal *ah) +{ + u32 regAddr[2] = { AR_STA_ID0, AR_PHY_BASE + (8 << 2) }; + u32 regHold[2]; + u32 patternData[4] = { 0x55555555, + 0xaaaaaaaa, + 0x66666666, + 0x99999999 }; + int i, j; + + for (i = 0; i < 2; i++) { + u32 addr = regAddr[i]; + u32 wrData, rdData; + + regHold[i] = REG_READ(ah, addr); + for (j = 0; j < 0x100; j++) { + wrData = (j << 16) | j; + REG_WRITE(ah, addr, wrData); + rdData = REG_READ(ah, addr); + if (rdData != wrData) { + DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, + "%s: address test failed " + "addr: 0x%08x - wr:0x%08x != rd:0x%08x\n", + __func__, addr, wrData, rdData); + return false; + } + } + for (j = 0; j < 4; j++) { + wrData = patternData[j]; + REG_WRITE(ah, addr, wrData); + rdData = REG_READ(ah, addr); + if (wrData != rdData) { + DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, + "%s: address test failed " + "addr: 0x%08x - wr:0x%08x != rd:0x%08x\n", + __func__, addr, wrData, rdData); + return false; + } + } + REG_WRITE(ah, regAddr[i], regHold[i]); + } + udelay(100); + return true; +} + +u32 ath9k_hw_getrxfilter(struct ath_hal *ah) +{ + u32 bits = REG_READ(ah, AR_RX_FILTER); + u32 phybits = REG_READ(ah, AR_PHY_ERR); + + if (phybits & AR_PHY_ERR_RADAR) + bits |= ATH9K_RX_FILTER_PHYRADAR; + if (phybits & (AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING)) + bits |= ATH9K_RX_FILTER_PHYERR; + return bits; +} + +void ath9k_hw_setrxfilter(struct ath_hal *ah, u32 bits) +{ + u32 phybits; + + REG_WRITE(ah, AR_RX_FILTER, (bits & 0xffff) | AR_RX_COMPR_BAR); + phybits = 0; + if (bits & ATH9K_RX_FILTER_PHYRADAR) + phybits |= AR_PHY_ERR_RADAR; + if (bits & ATH9K_RX_FILTER_PHYERR) + phybits |= AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING; + REG_WRITE(ah, AR_PHY_ERR, phybits); + + if (phybits) + REG_WRITE(ah, AR_RXCFG, + REG_READ(ah, AR_RXCFG) | AR_RXCFG_ZLFDMA); + else + REG_WRITE(ah, AR_RXCFG, + REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA); +} + +bool ath9k_hw_setcapability(struct ath_hal *ah, + enum ath9k_capability_type type, + u32 capability, + u32 setting, + int *status) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + u32 v; + + switch (type) { + case ATH9K_CAP_TKIP_MIC: + if (setting) + ahp->ah_staId1Defaults |= + AR_STA_ID1_CRPT_MIC_ENABLE; + else + ahp->ah_staId1Defaults &= + ~AR_STA_ID1_CRPT_MIC_ENABLE; + return true; + case ATH9K_CAP_DIVERSITY: + v = REG_READ(ah, AR_PHY_CCK_DETECT); + if (setting) + v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV; + else + v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV; + REG_WRITE(ah, AR_PHY_CCK_DETECT, v); + return true; + case ATH9K_CAP_MCAST_KEYSRCH: + if (setting) + ahp->ah_staId1Defaults |= AR_STA_ID1_MCAST_KSRCH; + else + ahp->ah_staId1Defaults &= ~AR_STA_ID1_MCAST_KSRCH; + return true; + case ATH9K_CAP_TSF_ADJUST: + if (setting) + ahp->ah_miscMode |= AR_PCU_TX_ADD_TSF; + else + ahp->ah_miscMode &= ~AR_PCU_TX_ADD_TSF; + return true; + default: + return false; + } +} + +void ath9k_hw_dmaRegDump(struct ath_hal *ah) +{ + u32 val[ATH9K_NUM_DMA_DEBUG_REGS]; + int qcuOffset = 0, dcuOffset = 0; + u32 *qcuBase = &val[0], *dcuBase = &val[4]; + int i; + + REG_WRITE(ah, AR_MACMISC, + ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) | + (AR_MACMISC_MISC_OBS_BUS_1 << + AR_MACMISC_MISC_OBS_BUS_MSB_S))); + + DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "Raw DMA Debug values:\n"); + for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) { + if (i % 4 == 0) + DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "\n"); + + val[i] = REG_READ(ah, AR_DMADBG_0 + (i * sizeof(u32))); + DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "%d: %08x ", i, val[i]); + } + + DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "\n\n"); + DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, + "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n"); + + for (i = 0; i < ATH9K_NUM_QUEUES; + i++, qcuOffset += 4, dcuOffset += 5) { + if (i == 8) { + qcuOffset = 0; + qcuBase++; + } + + if (i == 6) { + dcuOffset = 0; + dcuBase++; + } + + DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, + "%2d %2x %1x %2x %2x\n", + i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset, + (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + + 3), + val[2] & (0x7 << (i * 3)) >> (i * 3), + (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset); + } + + DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "\n"); + DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, + "qcu_stitch state: %2x qcu_fetch state: %2x\n", + (val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22); + DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, + "qcu_complete state: %2x dcu_complete state: %2x\n", + (val[3] & 0x1c000000) >> 26, (val[6] & 0x3)); + DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, + "dcu_arb state: %2x dcu_fp state: %2x\n", + (val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27); + DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, + "chan_idle_dur: %3d chan_idle_dur_valid: %1d\n", + (val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10); + DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, + "txfifo_valid_0: %1d txfifo_valid_1: %1d\n", + (val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12); + DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, + "txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n", + (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17); + + DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "pcu observe 0x%x \n", + REG_READ(ah, AR_OBS_BUS_1)); + DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, + "AR_CR 0x%x \n", REG_READ(ah, AR_CR)); +} + +u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hal *ah, + u32 *rxc_pcnt, + u32 *rxf_pcnt, + u32 *txf_pcnt) +{ + static u32 cycles, rx_clear, rx_frame, tx_frame; + u32 good = 1; + + u32 rc = REG_READ(ah, AR_RCCNT); + u32 rf = REG_READ(ah, AR_RFCNT); + u32 tf = REG_READ(ah, AR_TFCNT); + u32 cc = REG_READ(ah, AR_CCCNT); + + if (cycles == 0 || cycles > cc) { + DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, + "%s: cycle counter wrap. ExtBusy = 0\n", + __func__); + good = 0; + } else { + u32 cc_d = cc - cycles; + u32 rc_d = rc - rx_clear; + u32 rf_d = rf - rx_frame; + u32 tf_d = tf - tx_frame; + + if (cc_d != 0) { + *rxc_pcnt = rc_d * 100 / cc_d; + *rxf_pcnt = rf_d * 100 / cc_d; + *txf_pcnt = tf_d * 100 / cc_d; + } else { + good = 0; + } + } + + cycles = cc; + rx_frame = rf; + rx_clear = rc; + tx_frame = tf; + + return good; +} + +void ath9k_hw_set11nmac2040(struct ath_hal *ah, enum ath9k_ht_macmode mode) +{ + u32 macmode; + + if (mode == ATH9K_HT_MACMODE_2040 && + !ah->ah_config.cwm_ignore_extcca) + macmode = AR_2040_JOINED_RX_CLEAR; + else + macmode = 0; + + REG_WRITE(ah, AR_2040_MODE, macmode); +} + +static void ath9k_hw_mark_phy_inactive(struct ath_hal *ah) +{ + REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS); +} + + +static struct ath_hal_5416 *ath9k_hw_newstate(u16 devid, + struct ath_softc *sc, + void __iomem *mem, + int *status) +{ + static const u8 defbssidmask[ETH_ALEN] = + { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + struct ath_hal_5416 *ahp; + struct ath_hal *ah; + + ahp = kzalloc(sizeof(struct ath_hal_5416), GFP_KERNEL); + if (ahp == NULL) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: cannot allocate memory for state block\n", + __func__); + *status = -ENOMEM; + return NULL; + } + + ah = &ahp->ah; + + memcpy(&ahp->ah, &ar5416hal, sizeof(struct ath_hal)); + + ah->ah_sc = sc; + ah->ah_sh = mem; + + ah->ah_devid = devid; + ah->ah_subvendorid = 0; + + ah->ah_flags = 0; + if ((devid == AR5416_AR9100_DEVID)) + ah->ah_macVersion = AR_SREV_VERSION_9100; + if (!AR_SREV_9100(ah)) + ah->ah_flags = AH_USE_EEPROM; + + ah->ah_powerLimit = MAX_RATE_POWER; + ah->ah_tpScale = ATH9K_TP_SCALE_MAX; + + ahp->ah_atimWindow = 0; + ahp->ah_diversityControl = ah->ah_config.diversity_control; + ahp->ah_antennaSwitchSwap = + ah->ah_config.antenna_switch_swap; + + ahp->ah_staId1Defaults = AR_STA_ID1_CRPT_MIC_ENABLE; + ahp->ah_beaconInterval = 100; + ahp->ah_enable32kHzClock = DONT_USE_32KHZ; + ahp->ah_slottime = (u32) -1; + ahp->ah_acktimeout = (u32) -1; + ahp->ah_ctstimeout = (u32) -1; + ahp->ah_globaltxtimeout = (u32) -1; + memcpy(&ahp->ah_bssidmask, defbssidmask, ETH_ALEN); + + ahp->ah_gBeaconRate = 0; + + return ahp; +} + +static int ath9k_hw_eeprom_attach(struct ath_hal *ah) +{ + int status; + + if (ath9k_hw_use_flash(ah)) + ath9k_hw_flash_map(ah); + + if (!ath9k_hw_fill_eeprom(ah)) + return -EIO; + + status = ath9k_hw_check_eeprom(ah); + + return status; +} + +u32 ath9k_hw_get_eeprom(struct ath_hal_5416 *ahp, + enum eeprom_param param) +{ + struct ar5416_eeprom *eep = &ahp->ah_eeprom; + struct modal_eep_header *pModal = eep->modalHeader; + struct base_eep_header *pBase = &eep->baseEepHeader; + + switch (param) { + case EEP_NFTHRESH_5: + return -pModal[0].noiseFloorThreshCh[0]; + case EEP_NFTHRESH_2: + return -pModal[1].noiseFloorThreshCh[0]; + case AR_EEPROM_MAC(0): + return pBase->macAddr[0] << 8 | pBase->macAddr[1]; + case AR_EEPROM_MAC(1): + return pBase->macAddr[2] << 8 | pBase->macAddr[3]; + case AR_EEPROM_MAC(2): + return pBase->macAddr[4] << 8 | pBase->macAddr[5]; + case EEP_REG_0: + return pBase->regDmn[0]; + case EEP_REG_1: + return pBase->regDmn[1]; + case EEP_OP_CAP: + return pBase->deviceCap; + case EEP_OP_MODE: + return pBase->opCapFlags; + case EEP_RF_SILENT: + return pBase->rfSilent; + case EEP_OB_5: + return pModal[0].ob; + case EEP_DB_5: + return pModal[0].db; + case EEP_OB_2: + return pModal[1].ob; + case EEP_DB_2: + return pModal[1].db; + case EEP_MINOR_REV: + return pBase->version & AR5416_EEP_VER_MINOR_MASK; + case EEP_TX_MASK: + return pBase->txMask; + case EEP_RX_MASK: + return pBase->rxMask; + default: + return 0; + } +} + +static inline int ath9k_hw_get_radiorev(struct ath_hal *ah) +{ + u32 val; + int i; + + REG_WRITE(ah, AR_PHY(0x36), 0x00007058); + for (i = 0; i < 8; i++) + REG_WRITE(ah, AR_PHY(0x20), 0x00010000); + val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff; + val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4); + return ath9k_hw_reverse_bits(val, 8); +} + +static inline int ath9k_hw_init_macaddr(struct ath_hal *ah) +{ + u32 sum; + int i; + u16 eeval; + struct ath_hal_5416 *ahp = AH5416(ah); + DECLARE_MAC_BUF(mac); + + sum = 0; + for (i = 0; i < 3; i++) { + eeval = ath9k_hw_get_eeprom(ahp, AR_EEPROM_MAC(i)); + sum += eeval; + ahp->ah_macaddr[2 * i] = eeval >> 8; + ahp->ah_macaddr[2 * i + 1] = eeval & 0xff; + } + if (sum == 0 || sum == 0xffff * 3) { + DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, + "%s: mac address read failed: %s\n", __func__, + print_mac(mac, ahp->ah_macaddr)); + return -EADDRNOTAVAIL; + } + + return 0; +} + +static inline int16_t ath9k_hw_interpolate(u16 target, + u16 srcLeft, + u16 srcRight, + int16_t targetLeft, + int16_t targetRight) +{ + int16_t rv; + + if (srcRight == srcLeft) { + rv = targetLeft; + } else { + rv = (int16_t) (((target - srcLeft) * targetRight + + (srcRight - target) * targetLeft) / + (srcRight - srcLeft)); + } + return rv; +} + +static inline u16 ath9k_hw_fbin2freq(u8 fbin, + bool is2GHz) +{ + + if (fbin == AR5416_BCHAN_UNUSED) + return fbin; + + return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin)); +} + +static u16 ath9k_hw_eeprom_get_spur_chan(struct ath_hal *ah, + u16 i, + bool is2GHz) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + struct ar5416_eeprom *eep = + (struct ar5416_eeprom *) &ahp->ah_eeprom; + u16 spur_val = AR_NO_SPUR; + + DPRINTF(ah->ah_sc, ATH_DBG_ANI, + "Getting spur idx %d is2Ghz. %d val %x\n", + i, is2GHz, ah->ah_config.spurchans[i][is2GHz]); + + switch (ah->ah_config.spurmode) { + case SPUR_DISABLE: + break; + case SPUR_ENABLE_IOCTL: + spur_val = ah->ah_config.spurchans[i][is2GHz]; + DPRINTF(ah->ah_sc, ATH_DBG_ANI, + "Getting spur val from new loc. %d\n", spur_val); + break; + case SPUR_ENABLE_EEPROM: + spur_val = eep->modalHeader[is2GHz].spurChans[i].spurChan; + break; + + } + return spur_val; +} + +static inline int ath9k_hw_rfattach(struct ath_hal *ah) +{ + bool rfStatus = false; + int ecode = 0; + + rfStatus = ath9k_hw_init_rf(ah, &ecode); + if (!rfStatus) { + DPRINTF(ah->ah_sc, ATH_DBG_RESET, + "%s: RF setup failed, status %u\n", __func__, + ecode); + return ecode; + } + + return 0; +} + +static int ath9k_hw_rf_claim(struct ath_hal *ah) +{ + u32 val; + + REG_WRITE(ah, AR_PHY(0), 0x00000007); + + val = ath9k_hw_get_radiorev(ah); + switch (val & AR_RADIO_SREV_MAJOR) { + case 0: + val = AR_RAD5133_SREV_MAJOR; + break; + case AR_RAD5133_SREV_MAJOR: + case AR_RAD5122_SREV_MAJOR: + case AR_RAD2133_SREV_MAJOR: + case AR_RAD2122_SREV_MAJOR: + break; + default: + DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, + "%s: 5G Radio Chip Rev 0x%02X is not " + "supported by this driver\n", + __func__, ah->ah_analog5GhzRev); + return -EOPNOTSUPP; + } + + ah->ah_analog5GhzRev = val; + + return 0; +} + +static inline void ath9k_hw_init_pll(struct ath_hal *ah, + struct ath9k_channel *chan) +{ + u32 pll; + + if (AR_SREV_9100(ah)) { + if (chan && IS_CHAN_5GHZ(chan)) + pll = 0x1450; + else + pll = 0x1458; + } else { + if (AR_SREV_9280_10_OR_LATER(ah)) { + pll = SM(0x5, AR_RTC_9160_PLL_REFDIV); + + if (chan && IS_CHAN_HALF_RATE(chan)) + pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL); + else if (chan && IS_CHAN_QUARTER_RATE(chan)) + pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL); + + if (chan && IS_CHAN_5GHZ(chan)) { + pll |= SM(0x28, AR_RTC_9160_PLL_DIV); + + + if (AR_SREV_9280_20(ah)) { + if (((chan->channel % 20) == 0) + || ((chan->channel % 10) == 0)) + pll = 0x2850; + else + pll = 0x142c; + } + } else { + pll |= SM(0x2c, AR_RTC_9160_PLL_DIV); + } + + } else if (AR_SREV_9160_10_OR_LATER(ah)) { + + pll = SM(0x5, AR_RTC_9160_PLL_REFDIV); + + if (chan && IS_CHAN_HALF_RATE(chan)) + pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL); + else if (chan && IS_CHAN_QUARTER_RATE(chan)) + pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL); + + if (chan && IS_CHAN_5GHZ(chan)) + pll |= SM(0x50, AR_RTC_9160_PLL_DIV); + else + pll |= SM(0x58, AR_RTC_9160_PLL_DIV); + } else { + pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2; + + if (chan && IS_CHAN_HALF_RATE(chan)) + pll |= SM(0x1, AR_RTC_PLL_CLKSEL); + else if (chan && IS_CHAN_QUARTER_RATE(chan)) + pll |= SM(0x2, AR_RTC_PLL_CLKSEL); + + if (chan && IS_CHAN_5GHZ(chan)) + pll |= SM(0xa, AR_RTC_PLL_DIV); + else + pll |= SM(0xb, AR_RTC_PLL_DIV); + } + } + REG_WRITE(ah, (u16) (AR_RTC_PLL_CONTROL), pll); + + udelay(RTC_PLL_SETTLE_DELAY); + + REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK); +} + +static void ath9k_hw_set_regs(struct ath_hal *ah, struct ath9k_channel *chan, + enum ath9k_ht_macmode macmode) +{ + u32 phymode; + struct ath_hal_5416 *ahp = AH5416(ah); + + phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40 + | AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH; + + if (IS_CHAN_HT40(chan)) { + phymode |= AR_PHY_FC_DYN2040_EN; + + if ((chan->chanmode == CHANNEL_A_HT40PLUS) || + (chan->chanmode == CHANNEL_G_HT40PLUS)) + phymode |= AR_PHY_FC_DYN2040_PRI_CH; + + if (ahp->ah_extprotspacing == ATH9K_HT_EXTPROTSPACING_25) + phymode |= AR_PHY_FC_DYN2040_EXT_CH; + } + REG_WRITE(ah, AR_PHY_TURBO, phymode); + + ath9k_hw_set11nmac2040(ah, macmode); + + REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S); + REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S); +} + +static void ath9k_hw_set_operating_mode(struct ath_hal *ah, int opmode) +{ + u32 val; + + val = REG_READ(ah, AR_STA_ID1); + val &= ~(AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC); + switch (opmode) { + case ATH9K_M_HOSTAP: + REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_STA_AP + | AR_STA_ID1_KSRCH_MODE); + REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); + break; + case ATH9K_M_IBSS: + REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_ADHOC + | AR_STA_ID1_KSRCH_MODE); + REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); + break; + case ATH9K_M_STA: + case ATH9K_M_MONITOR: + REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE); + break; + } +} + +static inline void +ath9k_hw_set_rfmode(struct ath_hal *ah, struct ath9k_channel *chan) +{ + u32 rfMode = 0; + + if (chan == NULL) + return; + + rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan)) + ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM; + + if (!AR_SREV_9280_10_OR_LATER(ah)) + rfMode |= (IS_CHAN_5GHZ(chan)) ? AR_PHY_MODE_RF5GHZ : + AR_PHY_MODE_RF2GHZ; + + if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) + rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE); + + REG_WRITE(ah, AR_PHY_MODE, rfMode); +} + +static bool ath9k_hw_set_reset(struct ath_hal *ah, int type) +{ + u32 rst_flags; + u32 tmpReg; + + REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | + AR_RTC_FORCE_WAKE_ON_INT); + + if (AR_SREV_9100(ah)) { + rst_flags = AR_RTC_RC_MAC_WARM | AR_RTC_RC_MAC_COLD | + AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET; + } else { + tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE); + if (tmpReg & + (AR_INTR_SYNC_LOCAL_TIMEOUT | + AR_INTR_SYNC_RADM_CPL_TIMEOUT)) { + REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0); + REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF); + } else { + REG_WRITE(ah, AR_RC, AR_RC_AHB); + } + + rst_flags = AR_RTC_RC_MAC_WARM; + if (type == ATH9K_RESET_COLD) + rst_flags |= AR_RTC_RC_MAC_COLD; + } + + REG_WRITE(ah, (u16) (AR_RTC_RC), rst_flags); + udelay(50); + + REG_WRITE(ah, (u16) (AR_RTC_RC), 0); + if (!ath9k_hw_wait(ah, (u16) (AR_RTC_RC), AR_RTC_RC_M, 0)) { + DPRINTF(ah->ah_sc, ATH_DBG_RESET, + "%s: RTC stuck in MAC reset\n", + __func__); + return false; + } + + if (!AR_SREV_9100(ah)) + REG_WRITE(ah, AR_RC, 0); + + ath9k_hw_init_pll(ah, NULL); + + if (AR_SREV_9100(ah)) + udelay(50); + + return true; +} + +static inline bool ath9k_hw_set_reset_power_on(struct ath_hal *ah) +{ + REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | + AR_RTC_FORCE_WAKE_ON_INT); + + REG_WRITE(ah, (u16) (AR_RTC_RESET), 0); + REG_WRITE(ah, (u16) (AR_RTC_RESET), 1); + + if (!ath9k_hw_wait(ah, + AR_RTC_STATUS, + AR_RTC_STATUS_M, + AR_RTC_STATUS_ON)) { + DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: RTC not waking up\n", + __func__); + return false; + } + + ath9k_hw_read_revisions(ah); + + return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM); +} + +static bool ath9k_hw_set_reset_reg(struct ath_hal *ah, + u32 type) +{ + REG_WRITE(ah, AR_RTC_FORCE_WAKE, + AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT); + + switch (type) { + case ATH9K_RESET_POWER_ON: + return ath9k_hw_set_reset_power_on(ah); + break; + case ATH9K_RESET_WARM: + case ATH9K_RESET_COLD: + return ath9k_hw_set_reset(ah, type); + break; + default: + return false; + } +} + +static inline +struct ath9k_channel *ath9k_hw_check_chan(struct ath_hal *ah, + struct ath9k_channel *chan) +{ + if (!(IS_CHAN_2GHZ(chan) ^ IS_CHAN_5GHZ(chan))) { + DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, + "%s: invalid channel %u/0x%x; not marked as " + "2GHz or 5GHz\n", __func__, chan->channel, + chan->channelFlags); + return NULL; + } + + if (!IS_CHAN_OFDM(chan) && + !IS_CHAN_CCK(chan) && + !IS_CHAN_HT20(chan) && + !IS_CHAN_HT40(chan)) { + DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, + "%s: invalid channel %u/0x%x; not marked as " + "OFDM or CCK or HT20 or HT40PLUS or HT40MINUS\n", + __func__, chan->channel, chan->channelFlags); + return NULL; + } + + return ath9k_regd_check_channel(ah, chan); +} + +static inline bool +ath9k_hw_get_lower_upper_index(u8 target, + u8 *pList, + u16 listSize, + u16 *indexL, + u16 *indexR) +{ + u16 i; + + if (target <= pList[0]) { + *indexL = *indexR = 0; + return true; + } + if (target >= pList[listSize - 1]) { + *indexL = *indexR = (u16) (listSize - 1); + return true; + } + + for (i = 0; i < listSize - 1; i++) { + if (pList[i] == target) { + *indexL = *indexR = i; + return true; + } + if (target < pList[i + 1]) { + *indexL = i; + *indexR = (u16) (i + 1); + return false; + } + } + return false; +} + +static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer) +{ + int16_t nfval; + int16_t sort[ATH9K_NF_CAL_HIST_MAX]; + int i, j; + + for (i = 0; i < ATH9K_NF_CAL_HIST_MAX; i++) + sort[i] = nfCalBuffer[i]; + + for (i = 0; i < ATH9K_NF_CAL_HIST_MAX - 1; i++) { + for (j = 1; j < ATH9K_NF_CAL_HIST_MAX - i; j++) { + if (sort[j] > sort[j - 1]) { + nfval = sort[j]; + sort[j] = sort[j - 1]; + sort[j - 1] = nfval; + } + } + } + nfval = sort[(ATH9K_NF_CAL_HIST_MAX - 1) >> 1]; + + return nfval; +} + +static void ath9k_hw_update_nfcal_hist_buffer(struct ath9k_nfcal_hist *h, + int16_t *nfarray) +{ + int i; + + for (i = 0; i < NUM_NF_READINGS; i++) { + h[i].nfCalBuffer[h[i].currIndex] = nfarray[i]; + + if (++h[i].currIndex >= ATH9K_NF_CAL_HIST_MAX) + h[i].currIndex = 0; + + if (h[i].invalidNFcount > 0) { + if (nfarray[i] < AR_PHY_CCA_MIN_BAD_VALUE + || nfarray[i] > AR_PHY_CCA_MAX_HIGH_VALUE) { + h[i].invalidNFcount = ATH9K_NF_CAL_HIST_MAX; + } else { + h[i].invalidNFcount--; + h[i].privNF = nfarray[i]; + } + } else { + h[i].privNF = + ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer); + } + } + return; +} + +static void ar5416GetNoiseFloor(struct ath_hal *ah, + int16_t nfarray[NUM_NF_READINGS]) +{ + int16_t nf; + + if (AR_SREV_9280_10_OR_LATER(ah)) + nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR); + else + nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR); + + if (nf & 0x100) + nf = 0 - ((nf ^ 0x1ff) + 1); + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "NF calibrated [ctl] [chain 0] is %d\n", nf); + nfarray[0] = nf; + + if (AR_SREV_9280_10_OR_LATER(ah)) + nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), + AR9280_PHY_CH1_MINCCA_PWR); + else + nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), + AR_PHY_CH1_MINCCA_PWR); + + if (nf & 0x100) + nf = 0 - ((nf ^ 0x1ff) + 1); + DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL, + "NF calibrated [ctl] [chain 1] is %d\n", nf); + nfarray[1] = nf; + + if (!AR_SREV_9280(ah)) { + nf = MS(REG_READ(ah, AR_PHY_CH2_CCA), + AR_PHY_CH2_MINCCA_PWR); + if (nf & 0x100) + nf = 0 - ((nf ^ 0x1ff) + 1); + DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL, + "NF calibrated [ctl] [chain 2] is %d\n", nf); + nfarray[2] = nf; + } + + if (AR_SREV_9280_10_OR_LATER(ah)) + nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), + AR9280_PHY_EXT_MINCCA_PWR); + else + nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), + AR_PHY_EXT_MINCCA_PWR); + + if (nf & 0x100) + nf = 0 - ((nf ^ 0x1ff) + 1); + DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL, + "NF calibrated [ext] [chain 0] is %d\n", nf); + nfarray[3] = nf; + + if (AR_SREV_9280_10_OR_LATER(ah)) + nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), + AR9280_PHY_CH1_EXT_MINCCA_PWR); + else + nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), + AR_PHY_CH1_EXT_MINCCA_PWR); + + if (nf & 0x100) + nf = 0 - ((nf ^ 0x1ff) + 1); + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "NF calibrated [ext] [chain 1] is %d\n", nf); + nfarray[4] = nf; + + if (!AR_SREV_9280(ah)) { + nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA), + AR_PHY_CH2_EXT_MINCCA_PWR); + if (nf & 0x100) + nf = 0 - ((nf ^ 0x1ff) + 1); + DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL, + "NF calibrated [ext] [chain 2] is %d\n", nf); + nfarray[5] = nf; + } +} + +static bool +getNoiseFloorThresh(struct ath_hal *ah, + const struct ath9k_channel *chan, + int16_t *nft) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + switch (chan->chanmode) { + case CHANNEL_A: + case CHANNEL_A_HT20: + case CHANNEL_A_HT40PLUS: + case CHANNEL_A_HT40MINUS: + *nft = (int16_t) ath9k_hw_get_eeprom(ahp, EEP_NFTHRESH_5); + break; + case CHANNEL_B: + case CHANNEL_G: + case CHANNEL_G_HT20: + case CHANNEL_G_HT40PLUS: + case CHANNEL_G_HT40MINUS: + *nft = (int16_t) ath9k_hw_get_eeprom(ahp, EEP_NFTHRESH_2); + break; + default: + DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, + "%s: invalid channel flags 0x%x\n", __func__, + chan->channelFlags); + return false; + } + return true; +} + +static void ath9k_hw_start_nfcal(struct ath_hal *ah) +{ + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, + AR_PHY_AGC_CONTROL_ENABLE_NF); + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, + AR_PHY_AGC_CONTROL_NO_UPDATE_NF); + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); +} + +static void +ath9k_hw_loadnf(struct ath_hal *ah, struct ath9k_channel *chan) +{ + struct ath9k_nfcal_hist *h; + int i, j; + int32_t val; + const u32 ar5416_cca_regs[6] = { + AR_PHY_CCA, + AR_PHY_CH1_CCA, + AR_PHY_CH2_CCA, + AR_PHY_EXT_CCA, + AR_PHY_CH1_EXT_CCA, + AR_PHY_CH2_EXT_CCA + }; + u8 chainmask; + + if (AR_SREV_9280(ah)) + chainmask = 0x1B; + else + chainmask = 0x3F; + +#ifdef ATH_NF_PER_CHAN + h = chan->nfCalHist; +#else + h = ah->nfCalHist; +#endif + + for (i = 0; i < NUM_NF_READINGS; i++) { + if (chainmask & (1 << i)) { + val = REG_READ(ah, ar5416_cca_regs[i]); + val &= 0xFFFFFE00; + val |= (((u32) (h[i].privNF) << 1) & 0x1ff); + REG_WRITE(ah, ar5416_cca_regs[i], val); + } + } + + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, + AR_PHY_AGC_CONTROL_ENABLE_NF); + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, + AR_PHY_AGC_CONTROL_NO_UPDATE_NF); + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); + + for (j = 0; j < 1000; j++) { + if ((REG_READ(ah, AR_PHY_AGC_CONTROL) & + AR_PHY_AGC_CONTROL_NF) == 0) + break; + udelay(10); + } + + for (i = 0; i < NUM_NF_READINGS; i++) { + if (chainmask & (1 << i)) { + val = REG_READ(ah, ar5416_cca_regs[i]); + val &= 0xFFFFFE00; + val |= (((u32) (-50) << 1) & 0x1ff); + REG_WRITE(ah, ar5416_cca_regs[i], val); + } + } +} + +static int16_t ath9k_hw_getnf(struct ath_hal *ah, + struct ath9k_channel *chan) +{ + int16_t nf, nfThresh; + int16_t nfarray[NUM_NF_READINGS] = { 0 }; + struct ath9k_nfcal_hist *h; + u8 chainmask; + + if (AR_SREV_9280(ah)) + chainmask = 0x1B; + else + chainmask = 0x3F; + + chan->channelFlags &= (~CHANNEL_CW_INT); + if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) { + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "%s: NF did not complete in calibration window\n", + __func__); + nf = 0; + chan->rawNoiseFloor = nf; + return chan->rawNoiseFloor; + } else { + ar5416GetNoiseFloor(ah, nfarray); + nf = nfarray[0]; + if (getNoiseFloorThresh(ah, chan, &nfThresh) + && nf > nfThresh) { + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "%s: noise floor failed detected; " + "detected %d, threshold %d\n", __func__, + nf, nfThresh); + chan->channelFlags |= CHANNEL_CW_INT; + } + } + +#ifdef ATH_NF_PER_CHAN + h = chan->nfCalHist; +#else + h = ah->nfCalHist; +#endif + + ath9k_hw_update_nfcal_hist_buffer(h, nfarray); + chan->rawNoiseFloor = h[0].privNF; + + return chan->rawNoiseFloor; +} + +static void ath9k_hw_update_mibstats(struct ath_hal *ah, + struct ath9k_mib_stats *stats) +{ + stats->ackrcv_bad += REG_READ(ah, AR_ACK_FAIL); + stats->rts_bad += REG_READ(ah, AR_RTS_FAIL); + stats->fcs_bad += REG_READ(ah, AR_FCS_FAIL); + stats->rts_good += REG_READ(ah, AR_RTS_OK); + stats->beacons += REG_READ(ah, AR_BEACON_CNT); +} + +static void ath9k_enable_mib_counters(struct ath_hal *ah) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Enable mib counters\n"); + + ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats); + + REG_WRITE(ah, AR_FILT_OFDM, 0); + REG_WRITE(ah, AR_FILT_CCK, 0); + REG_WRITE(ah, AR_MIBC, + ~(AR_MIBC_COW | AR_MIBC_FMC | AR_MIBC_CMC | AR_MIBC_MCS) + & 0x0f); + REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); + REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); +} + +static void ath9k_hw_disable_mib_counters(struct ath_hal *ah) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Disabling MIB counters\n"); + + REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC | AR_MIBC_CMC); + + ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats); + + REG_WRITE(ah, AR_FILT_OFDM, 0); + REG_WRITE(ah, AR_FILT_CCK, 0); +} + +static int ath9k_hw_get_ani_channel_idx(struct ath_hal *ah, + struct ath9k_channel *chan) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + int i; + + for (i = 0; i < ARRAY_SIZE(ahp->ah_ani); i++) { + if (ahp->ah_ani[i].c.channel == chan->channel) + return i; + if (ahp->ah_ani[i].c.channel == 0) { + ahp->ah_ani[i].c.channel = chan->channel; + ahp->ah_ani[i].c.channelFlags = chan->channelFlags; + return i; + } + } + + DPRINTF(ah->ah_sc, ATH_DBG_ANI, + "No more channel states left. Using channel 0\n"); + return 0; +} + +static void ath9k_hw_ani_attach(struct ath_hal *ah) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + int i; + + ahp->ah_hasHwPhyCounters = 1; + + memset(ahp->ah_ani, 0, sizeof(ahp->ah_ani)); + for (i = 0; i < ARRAY_SIZE(ahp->ah_ani); i++) { + ahp->ah_ani[i].ofdmTrigHigh = ATH9K_ANI_OFDM_TRIG_HIGH; + ahp->ah_ani[i].ofdmTrigLow = ATH9K_ANI_OFDM_TRIG_LOW; + ahp->ah_ani[i].cckTrigHigh = ATH9K_ANI_CCK_TRIG_HIGH; + ahp->ah_ani[i].cckTrigLow = ATH9K_ANI_CCK_TRIG_LOW; + ahp->ah_ani[i].rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH; + ahp->ah_ani[i].rssiThrLow = ATH9K_ANI_RSSI_THR_LOW; + ahp->ah_ani[i].ofdmWeakSigDetectOff = + !ATH9K_ANI_USE_OFDM_WEAK_SIG; + ahp->ah_ani[i].cckWeakSigThreshold = + ATH9K_ANI_CCK_WEAK_SIG_THR; + ahp->ah_ani[i].spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL; + ahp->ah_ani[i].firstepLevel = ATH9K_ANI_FIRSTEP_LVL; + if (ahp->ah_hasHwPhyCounters) { + ahp->ah_ani[i].ofdmPhyErrBase = + AR_PHY_COUNTMAX - ATH9K_ANI_OFDM_TRIG_HIGH; + ahp->ah_ani[i].cckPhyErrBase = + AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH; + } + } + if (ahp->ah_hasHwPhyCounters) { + DPRINTF(ah->ah_sc, ATH_DBG_ANI, + "Setting OfdmErrBase = 0x%08x\n", + ahp->ah_ani[0].ofdmPhyErrBase); + DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n", + ahp->ah_ani[0].cckPhyErrBase); + + REG_WRITE(ah, AR_PHY_ERR_1, ahp->ah_ani[0].ofdmPhyErrBase); + REG_WRITE(ah, AR_PHY_ERR_2, ahp->ah_ani[0].cckPhyErrBase); + ath9k_enable_mib_counters(ah); + } + ahp->ah_aniPeriod = ATH9K_ANI_PERIOD; + if (ah->ah_config.enable_ani) + ahp->ah_procPhyErr |= HAL_PROCESS_ANI; +} + +static inline void ath9k_hw_ani_setup(struct ath_hal *ah) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + int i; + + const int totalSizeDesired[] = { -55, -55, -55, -55, -62 }; + const int coarseHigh[] = { -14, -14, -14, -14, -12 }; + const int coarseLow[] = { -64, -64, -64, -64, -70 }; + const int firpwr[] = { -78, -78, -78, -78, -80 }; + + for (i = 0; i < 5; i++) { + ahp->ah_totalSizeDesired[i] = totalSizeDesired[i]; + ahp->ah_coarseHigh[i] = coarseHigh[i]; + ahp->ah_coarseLow[i] = coarseLow[i]; + ahp->ah_firpwr[i] = firpwr[i]; + } +} + +static void ath9k_hw_ani_detach(struct ath_hal *ah) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Detaching Ani\n"); + if (ahp->ah_hasHwPhyCounters) { + ath9k_hw_disable_mib_counters(ah); + REG_WRITE(ah, AR_PHY_ERR_1, 0); + REG_WRITE(ah, AR_PHY_ERR_2, 0); + } +} + + +static bool ath9k_hw_ani_control(struct ath_hal *ah, + enum ath9k_ani_cmd cmd, int param) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + struct ar5416AniState *aniState = ahp->ah_curani; + + switch (cmd & ahp->ah_ani_function) { + case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{ + u32 level = param; + + if (level >= ARRAY_SIZE(ahp->ah_totalSizeDesired)) { + DPRINTF(ah->ah_sc, ATH_DBG_ANI, + "%s: level out of range (%u > %u)\n", + __func__, level, + (unsigned) ARRAY_SIZE(ahp-> + ah_totalSizeDesired)); + return false; + } + + REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, + AR_PHY_DESIRED_SZ_TOT_DES, + ahp->ah_totalSizeDesired[level]); + REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1, + AR_PHY_AGC_CTL1_COARSE_LOW, + ahp->ah_coarseLow[level]); + REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1, + AR_PHY_AGC_CTL1_COARSE_HIGH, + ahp->ah_coarseHigh[level]); + REG_RMW_FIELD(ah, AR_PHY_FIND_SIG, + AR_PHY_FIND_SIG_FIRPWR, + ahp->ah_firpwr[level]); + + if (level > aniState->noiseImmunityLevel) + ahp->ah_stats.ast_ani_niup++; + else if (level < aniState->noiseImmunityLevel) + ahp->ah_stats.ast_ani_nidown++; + aniState->noiseImmunityLevel = level; + break; + } + case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{ + const int m1ThreshLow[] = { 127, 50 }; + const int m2ThreshLow[] = { 127, 40 }; + const int m1Thresh[] = { 127, 0x4d }; + const int m2Thresh[] = { 127, 0x40 }; + const int m2CountThr[] = { 31, 16 }; + const int m2CountThrLow[] = { 63, 48 }; + u32 on = param ? 1 : 0; + + REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, + AR_PHY_SFCORR_LOW_M1_THRESH_LOW, + m1ThreshLow[on]); + REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, + AR_PHY_SFCORR_LOW_M2_THRESH_LOW, + m2ThreshLow[on]); + REG_RMW_FIELD(ah, AR_PHY_SFCORR, + AR_PHY_SFCORR_M1_THRESH, + m1Thresh[on]); + REG_RMW_FIELD(ah, AR_PHY_SFCORR, + AR_PHY_SFCORR_M2_THRESH, + m2Thresh[on]); + REG_RMW_FIELD(ah, AR_PHY_SFCORR, + AR_PHY_SFCORR_M2COUNT_THR, + m2CountThr[on]); + REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, + AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW, + m2CountThrLow[on]); + + REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, + AR_PHY_SFCORR_EXT_M1_THRESH_LOW, + m1ThreshLow[on]); + REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, + AR_PHY_SFCORR_EXT_M2_THRESH_LOW, + m2ThreshLow[on]); + REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, + AR_PHY_SFCORR_EXT_M1_THRESH, + m1Thresh[on]); + REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT, + AR_PHY_SFCORR_EXT_M2_THRESH, + m2Thresh[on]); + + if (on) + REG_SET_BIT(ah, AR_PHY_SFCORR_LOW, + AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); + else + REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW, + AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); + + if (!on != aniState->ofdmWeakSigDetectOff) { + if (on) + ahp->ah_stats.ast_ani_ofdmon++; + else + ahp->ah_stats.ast_ani_ofdmoff++; + aniState->ofdmWeakSigDetectOff = !on; + } + break; + } + case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{ + const int weakSigThrCck[] = { 8, 6 }; + u32 high = param ? 1 : 0; + + REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT, + AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK, + weakSigThrCck[high]); + if (high != aniState->cckWeakSigThreshold) { + if (high) + ahp->ah_stats.ast_ani_cckhigh++; + else + ahp->ah_stats.ast_ani_ccklow++; + aniState->cckWeakSigThreshold = high; + } + break; + } + case ATH9K_ANI_FIRSTEP_LEVEL:{ + const int firstep[] = { 0, 4, 8 }; + u32 level = param; + + if (level >= ARRAY_SIZE(firstep)) { + DPRINTF(ah->ah_sc, ATH_DBG_ANI, + "%s: level out of range (%u > %u)\n", + __func__, level, + (unsigned) ARRAY_SIZE(firstep)); + return false; + } + REG_RMW_FIELD(ah, AR_PHY_FIND_SIG, + AR_PHY_FIND_SIG_FIRSTEP, + firstep[level]); + if (level > aniState->firstepLevel) + ahp->ah_stats.ast_ani_stepup++; + else if (level < aniState->firstepLevel) + ahp->ah_stats.ast_ani_stepdown++; + aniState->firstepLevel = level; + break; + } + case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{ + const int cycpwrThr1[] = + { 2, 4, 6, 8, 10, 12, 14, 16 }; + u32 level = param; + + if (level >= ARRAY_SIZE(cycpwrThr1)) { + DPRINTF(ah->ah_sc, ATH_DBG_ANI, + "%s: level out of range (%u > %u)\n", + __func__, level, + (unsigned) + ARRAY_SIZE(cycpwrThr1)); + return false; + } + REG_RMW_FIELD(ah, AR_PHY_TIMING5, + AR_PHY_TIMING5_CYCPWR_THR1, + cycpwrThr1[level]); + if (level > aniState->spurImmunityLevel) + ahp->ah_stats.ast_ani_spurup++; + else if (level < aniState->spurImmunityLevel) + ahp->ah_stats.ast_ani_spurdown++; + aniState->spurImmunityLevel = level; + break; + } + case ATH9K_ANI_PRESENT: + break; + default: + DPRINTF(ah->ah_sc, ATH_DBG_ANI, + "%s: invalid cmd %u\n", __func__, cmd); + return false; + } + + DPRINTF(ah->ah_sc, ATH_DBG_ANI, "%s: ANI parameters:\n", __func__); + DPRINTF(ah->ah_sc, ATH_DBG_ANI, + "noiseImmunityLevel=%d, spurImmunityLevel=%d, " + "ofdmWeakSigDetectOff=%d\n", + aniState->noiseImmunityLevel, aniState->spurImmunityLevel, + !aniState->ofdmWeakSigDetectOff); + DPRINTF(ah->ah_sc, ATH_DBG_ANI, + "cckWeakSigThreshold=%d, " + "firstepLevel=%d, listenTime=%d\n", + aniState->cckWeakSigThreshold, aniState->firstepLevel, + aniState->listenTime); + DPRINTF(ah->ah_sc, ATH_DBG_ANI, + "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n", + aniState->cycleCount, aniState->ofdmPhyErrCount, + aniState->cckPhyErrCount); + return true; +} + +static void ath9k_ani_restart(struct ath_hal *ah) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + struct ar5416AniState *aniState; + + if (!DO_ANI(ah)) + return; + + aniState = ahp->ah_curani; + + aniState->listenTime = 0; + if (ahp->ah_hasHwPhyCounters) { + if (aniState->ofdmTrigHigh > AR_PHY_COUNTMAX) { + aniState->ofdmPhyErrBase = 0; + DPRINTF(ah->ah_sc, ATH_DBG_ANI, + "OFDM Trigger is too high for hw counters\n"); + } else { + aniState->ofdmPhyErrBase = + AR_PHY_COUNTMAX - aniState->ofdmTrigHigh; + } + if (aniState->cckTrigHigh > AR_PHY_COUNTMAX) { + aniState->cckPhyErrBase = 0; + DPRINTF(ah->ah_sc, ATH_DBG_ANI, + "CCK Trigger is too high for hw counters\n"); + } else { + aniState->cckPhyErrBase = + AR_PHY_COUNTMAX - aniState->cckTrigHigh; + } + DPRINTF(ah->ah_sc, ATH_DBG_ANI, + "%s: Writing ofdmbase=%u cckbase=%u\n", + __func__, aniState->ofdmPhyErrBase, + aniState->cckPhyErrBase); + REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase); + REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase); + REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); + REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); + + ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats); + } + aniState->ofdmPhyErrCount = 0; + aniState->cckPhyErrCount = 0; +} + +static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hal *ah) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + struct ath9k_channel *chan = ah->ah_curchan; + struct ar5416AniState *aniState; + enum wireless_mode mode; + int32_t rssi; + + if (!DO_ANI(ah)) + return; + + aniState = ahp->ah_curani; + + if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) { + if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, + aniState->noiseImmunityLevel + 1)) { + return; + } + } + + if (aniState->spurImmunityLevel < HAL_SPUR_IMMUNE_MAX) { + if (ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, + aniState->spurImmunityLevel + 1)) { + return; + } + } + + if (ah->ah_opmode == ATH9K_M_HOSTAP) { + if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) { + ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, + aniState->firstepLevel + 1); + } + return; + } + rssi = BEACON_RSSI(ahp); + if (rssi > aniState->rssiThrHigh) { + if (!aniState->ofdmWeakSigDetectOff) { + if (ath9k_hw_ani_control(ah, + ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, + false)) { + ath9k_hw_ani_control(ah, + ATH9K_ANI_SPUR_IMMUNITY_LEVEL, + 0); + return; + } + } + if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) { + ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, + aniState->firstepLevel + 1); + return; + } + } else if (rssi > aniState->rssiThrLow) { + if (aniState->ofdmWeakSigDetectOff) + ath9k_hw_ani_control(ah, + ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, + true); + if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) + ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, + aniState->firstepLevel + 1); + return; + } else { + mode = ath9k_hw_chan2wmode(ah, chan); + if (mode == ATH9K_MODE_11G || mode == ATH9K_MODE_11B) { + if (!aniState->ofdmWeakSigDetectOff) + ath9k_hw_ani_control(ah, + ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, + false); + if (aniState->firstepLevel > 0) + ath9k_hw_ani_control(ah, + ATH9K_ANI_FIRSTEP_LEVEL, + 0); + return; + } + } +} + +static void ath9k_hw_ani_cck_err_trigger(struct ath_hal *ah) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + struct ath9k_channel *chan = ah->ah_curchan; + struct ar5416AniState *aniState; + enum wireless_mode mode; + int32_t rssi; + + if (!DO_ANI(ah)) + return; + + aniState = ahp->ah_curani; + if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) { + if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, + aniState->noiseImmunityLevel + 1)) { + return; + } + } + if (ah->ah_opmode == ATH9K_M_HOSTAP) { + if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) { + ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, + aniState->firstepLevel + 1); + } + return; + } + rssi = BEACON_RSSI(ahp); + if (rssi > aniState->rssiThrLow) { + if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) + ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, + aniState->firstepLevel + 1); + } else { + mode = ath9k_hw_chan2wmode(ah, chan); + if (mode == ATH9K_MODE_11G || mode == ATH9K_MODE_11B) { + if (aniState->firstepLevel > 0) + ath9k_hw_ani_control(ah, + ATH9K_ANI_FIRSTEP_LEVEL, + 0); + } + } +} + +static void ath9k_ani_reset(struct ath_hal *ah) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + struct ar5416AniState *aniState; + struct ath9k_channel *chan = ah->ah_curchan; + int index; + + if (!DO_ANI(ah)) + return; + + index = ath9k_hw_get_ani_channel_idx(ah, chan); + aniState = &ahp->ah_ani[index]; + ahp->ah_curani = aniState; + + if (DO_ANI(ah) && ah->ah_opmode != ATH9K_M_STA + && ah->ah_opmode != ATH9K_M_IBSS) { + DPRINTF(ah->ah_sc, ATH_DBG_ANI, + "%s: Reset ANI state opmode %u\n", __func__, + ah->ah_opmode); + ahp->ah_stats.ast_ani_reset++; + ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, 0); + ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0); + ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, 0); + ath9k_hw_ani_control(ah, + ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, + !ATH9K_ANI_USE_OFDM_WEAK_SIG); + ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR, + ATH9K_ANI_CCK_WEAK_SIG_THR); + ath9k_hw_setrxfilter(ah, + ath9k_hw_getrxfilter(ah) | + ATH9K_RX_FILTER_PHYERR); + if (ah->ah_opmode == ATH9K_M_HOSTAP) { + ahp->ah_curani->ofdmTrigHigh = + ah->ah_config.ofdm_trig_high; + ahp->ah_curani->ofdmTrigLow = + ah->ah_config.ofdm_trig_low; + ahp->ah_curani->cckTrigHigh = + ah->ah_config.cck_trig_high; + ahp->ah_curani->cckTrigLow = + ah->ah_config.cck_trig_low; + } + ath9k_ani_restart(ah); + return; + } + + if (aniState->noiseImmunityLevel != 0) + ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, + aniState->noiseImmunityLevel); + if (aniState->spurImmunityLevel != 0) + ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, + aniState->spurImmunityLevel); + if (aniState->ofdmWeakSigDetectOff) + ath9k_hw_ani_control(ah, + ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, + !aniState->ofdmWeakSigDetectOff); + if (aniState->cckWeakSigThreshold) + ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR, + aniState->cckWeakSigThreshold); + if (aniState->firstepLevel != 0) + ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, + aniState->firstepLevel); + if (ahp->ah_hasHwPhyCounters) { + ath9k_hw_setrxfilter(ah, + ath9k_hw_getrxfilter(ah) & + ~ATH9K_RX_FILTER_PHYERR); + ath9k_ani_restart(ah); + REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); + REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); + + } else { + ath9k_ani_restart(ah); + ath9k_hw_setrxfilter(ah, + ath9k_hw_getrxfilter(ah) | + ATH9K_RX_FILTER_PHYERR); + } +} + +void ath9k_hw_procmibevent(struct ath_hal *ah, + const struct ath9k_node_stats *stats) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + u32 phyCnt1, phyCnt2; + + DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Processing Mib Intr\n"); + + REG_WRITE(ah, AR_FILT_OFDM, 0); + REG_WRITE(ah, AR_FILT_CCK, 0); + if (!(REG_READ(ah, AR_SLP_MIB_CTRL) & AR_SLP_MIB_PENDING)) + REG_WRITE(ah, AR_SLP_MIB_CTRL, AR_SLP_MIB_CLEAR); + + ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats); + ahp->ah_stats.ast_nodestats = *stats; + + if (!DO_ANI(ah)) + return; + + phyCnt1 = REG_READ(ah, AR_PHY_ERR_1); + phyCnt2 = REG_READ(ah, AR_PHY_ERR_2); + if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) || + ((phyCnt2 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK)) { + struct ar5416AniState *aniState = ahp->ah_curani; + u32 ofdmPhyErrCnt, cckPhyErrCnt; + + ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase; + ahp->ah_stats.ast_ani_ofdmerrs += + ofdmPhyErrCnt - aniState->ofdmPhyErrCount; + aniState->ofdmPhyErrCount = ofdmPhyErrCnt; + + cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase; + ahp->ah_stats.ast_ani_cckerrs += + cckPhyErrCnt - aniState->cckPhyErrCount; + aniState->cckPhyErrCount = cckPhyErrCnt; + + if (aniState->ofdmPhyErrCount > aniState->ofdmTrigHigh) + ath9k_hw_ani_ofdm_err_trigger(ah); + if (aniState->cckPhyErrCount > aniState->cckTrigHigh) + ath9k_hw_ani_cck_err_trigger(ah); + + ath9k_ani_restart(ah); + } +} + +static void ath9k_hw_ani_lower_immunity(struct ath_hal *ah) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + struct ar5416AniState *aniState; + int32_t rssi; + + aniState = ahp->ah_curani; + + if (ah->ah_opmode == ATH9K_M_HOSTAP) { + if (aniState->firstepLevel > 0) { + if (ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, + aniState->firstepLevel - 1)) { + return; + } + } + } else { + rssi = BEACON_RSSI(ahp); + if (rssi > aniState->rssiThrHigh) { + /* XXX: Handle me */ + } else if (rssi > aniState->rssiThrLow) { + if (aniState->ofdmWeakSigDetectOff) { + if (ath9k_hw_ani_control(ah, + ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, + true) == + true) { + return; + } + } + if (aniState->firstepLevel > 0) { + if (ath9k_hw_ani_control + (ah, ATH9K_ANI_FIRSTEP_LEVEL, + aniState->firstepLevel - 1) == + true) { + return; + } + } + } else { + if (aniState->firstepLevel > 0) { + if (ath9k_hw_ani_control + (ah, ATH9K_ANI_FIRSTEP_LEVEL, + aniState->firstepLevel - 1) == + true) { + return; + } + } + } + } + + if (aniState->spurImmunityLevel > 0) { + if (ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, + aniState->spurImmunityLevel - 1)) { + return; + } + } + + if (aniState->noiseImmunityLevel > 0) { + ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, + aniState->noiseImmunityLevel - 1); + return; + } +} + +static int32_t ath9k_hw_ani_get_listen_time(struct ath_hal *ah) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + struct ar5416AniState *aniState; + u32 txFrameCount, rxFrameCount, cycleCount; + int32_t listenTime; + + txFrameCount = REG_READ(ah, AR_TFCNT); + rxFrameCount = REG_READ(ah, AR_RFCNT); + cycleCount = REG_READ(ah, AR_CCCNT); + + aniState = ahp->ah_curani; + if (aniState->cycleCount == 0 || aniState->cycleCount > cycleCount) { + + listenTime = 0; + ahp->ah_stats.ast_ani_lzero++; + } else { + int32_t ccdelta = cycleCount - aniState->cycleCount; + int32_t rfdelta = rxFrameCount - aniState->rxFrameCount; + int32_t tfdelta = txFrameCount - aniState->txFrameCount; + listenTime = (ccdelta - rfdelta - tfdelta) / 44000; + } + aniState->cycleCount = cycleCount; + aniState->txFrameCount = txFrameCount; + aniState->rxFrameCount = rxFrameCount; + + return listenTime; +} + +void ath9k_hw_ani_monitor(struct ath_hal *ah, + const struct ath9k_node_stats *stats, + struct ath9k_channel *chan) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + struct ar5416AniState *aniState; + int32_t listenTime; + + aniState = ahp->ah_curani; + ahp->ah_stats.ast_nodestats = *stats; + + listenTime = ath9k_hw_ani_get_listen_time(ah); + if (listenTime < 0) { + ahp->ah_stats.ast_ani_lneg++; + ath9k_ani_restart(ah); + return; + } + + aniState->listenTime += listenTime; + + if (ahp->ah_hasHwPhyCounters) { + u32 phyCnt1, phyCnt2; + u32 ofdmPhyErrCnt, cckPhyErrCnt; + + ath9k_hw_update_mibstats(ah, &ahp->ah_mibStats); + + phyCnt1 = REG_READ(ah, AR_PHY_ERR_1); + phyCnt2 = REG_READ(ah, AR_PHY_ERR_2); + + if (phyCnt1 < aniState->ofdmPhyErrBase || + phyCnt2 < aniState->cckPhyErrBase) { + if (phyCnt1 < aniState->ofdmPhyErrBase) { + DPRINTF(ah->ah_sc, ATH_DBG_ANI, + "%s: phyCnt1 0x%x, resetting " + "counter value to 0x%x\n", + __func__, phyCnt1, + aniState->ofdmPhyErrBase); + REG_WRITE(ah, AR_PHY_ERR_1, + aniState->ofdmPhyErrBase); + REG_WRITE(ah, AR_PHY_ERR_MASK_1, + AR_PHY_ERR_OFDM_TIMING); + } + if (phyCnt2 < aniState->cckPhyErrBase) { + DPRINTF(ah->ah_sc, ATH_DBG_ANI, + "%s: phyCnt2 0x%x, resetting " + "counter value to 0x%x\n", + __func__, phyCnt2, + aniState->cckPhyErrBase); + REG_WRITE(ah, AR_PHY_ERR_2, + aniState->cckPhyErrBase); + REG_WRITE(ah, AR_PHY_ERR_MASK_2, + AR_PHY_ERR_CCK_TIMING); + } + return; + } + + ofdmPhyErrCnt = phyCnt1 - aniState->ofdmPhyErrBase; + ahp->ah_stats.ast_ani_ofdmerrs += + ofdmPhyErrCnt - aniState->ofdmPhyErrCount; + aniState->ofdmPhyErrCount = ofdmPhyErrCnt; + + cckPhyErrCnt = phyCnt2 - aniState->cckPhyErrBase; + ahp->ah_stats.ast_ani_cckerrs += + cckPhyErrCnt - aniState->cckPhyErrCount; + aniState->cckPhyErrCount = cckPhyErrCnt; + } + + if (!DO_ANI(ah)) + return; + + if (aniState->listenTime > 5 * ahp->ah_aniPeriod) { + if (aniState->ofdmPhyErrCount <= aniState->listenTime * + aniState->ofdmTrigLow / 1000 && + aniState->cckPhyErrCount <= aniState->listenTime * + aniState->cckTrigLow / 1000) + ath9k_hw_ani_lower_immunity(ah); + ath9k_ani_restart(ah); + } else if (aniState->listenTime > ahp->ah_aniPeriod) { + if (aniState->ofdmPhyErrCount > aniState->listenTime * + aniState->ofdmTrigHigh / 1000) { + ath9k_hw_ani_ofdm_err_trigger(ah); + ath9k_ani_restart(ah); + } else if (aniState->cckPhyErrCount > + aniState->listenTime * aniState->cckTrigHigh / + 1000) { + ath9k_hw_ani_cck_err_trigger(ah); + ath9k_ani_restart(ah); + } + } +} + +#ifndef ATH_NF_PER_CHAN +static void ath9k_init_nfcal_hist_buffer(struct ath_hal *ah) +{ + int i, j; + + for (i = 0; i < NUM_NF_READINGS; i++) { + ah->nfCalHist[i].currIndex = 0; + ah->nfCalHist[i].privNF = AR_PHY_CCA_MAX_GOOD_VALUE; + ah->nfCalHist[i].invalidNFcount = + AR_PHY_CCA_FILTERWINDOW_LENGTH; + for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) { + ah->nfCalHist[i].nfCalBuffer[j] = + AR_PHY_CCA_MAX_GOOD_VALUE; + } + } + return; +} +#endif + +static void ath9k_hw_gpio_cfg_output_mux(struct ath_hal *ah, + u32 gpio, u32 type) +{ + int addr; + u32 gpio_shift, tmp; + + if (gpio > 11) + addr = AR_GPIO_OUTPUT_MUX3; + else if (gpio > 5) + addr = AR_GPIO_OUTPUT_MUX2; + else + addr = AR_GPIO_OUTPUT_MUX1; + + gpio_shift = (gpio % 6) * 5; + + if (AR_SREV_9280_20_OR_LATER(ah) + || (addr != AR_GPIO_OUTPUT_MUX1)) { + REG_RMW(ah, addr, (type << gpio_shift), + (0x1f << gpio_shift)); + } else { + tmp = REG_READ(ah, addr); + tmp = ((tmp & 0x1F0) << 1) | (tmp & ~0x1F0); + tmp &= ~(0x1f << gpio_shift); + tmp |= (type << gpio_shift); + REG_WRITE(ah, addr, tmp); + } +} + +static bool ath9k_hw_cfg_output(struct ath_hal *ah, u32 gpio, + enum ath9k_gpio_output_mux_type + halSignalType) +{ + u32 ah_signal_type; + u32 gpio_shift; + + static u32 MuxSignalConversionTable[] = { + + AR_GPIO_OUTPUT_MUX_AS_OUTPUT, + + AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED, + + AR_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED, + + AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED, + + AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED, + }; + + if ((halSignalType >= 0) + && (halSignalType < ARRAY_SIZE(MuxSignalConversionTable))) + ah_signal_type = MuxSignalConversionTable[halSignalType]; + else + return false; + + ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type); + + gpio_shift = 2 * gpio; + + REG_RMW(ah, + AR_GPIO_OE_OUT, + (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift), + (AR_GPIO_OE_OUT_DRV << gpio_shift)); + + return true; +} + +static bool ath9k_hw_set_gpio(struct ath_hal *ah, u32 gpio, + u32 val) +{ + REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio), + AR_GPIO_BIT(gpio)); + return true; +} + +static u32 ath9k_hw_gpio_get(struct ath_hal *ah, u32 gpio) +{ + if (gpio >= ah->ah_caps.num_gpio_pins) + return 0xffffffff; + + if (AR_SREV_9280_10_OR_LATER(ah)) { + return (MS + (REG_READ(ah, AR_GPIO_IN_OUT), + AR928X_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) != 0; + } else { + return (MS(REG_READ(ah, AR_GPIO_IN_OUT), AR_GPIO_IN_VAL) & + AR_GPIO_BIT(gpio)) != 0; + } +} + +static inline int ath9k_hw_post_attach(struct ath_hal *ah) +{ + int ecode; + + if (!ath9k_hw_chip_test(ah)) { + DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, + "%s: hardware self-test failed\n", __func__); + return -ENODEV; + } + + ecode = ath9k_hw_rf_claim(ah); + if (ecode != 0) + return ecode; + + ecode = ath9k_hw_eeprom_attach(ah); + if (ecode != 0) + return ecode; + ecode = ath9k_hw_rfattach(ah); + if (ecode != 0) + return ecode; + + if (!AR_SREV_9100(ah)) { + ath9k_hw_ani_setup(ah); + ath9k_hw_ani_attach(ah); + } + return 0; +} + +static u32 ath9k_hw_ini_fixup(struct ath_hal *ah, + struct ar5416_eeprom *pEepData, + u32 reg, u32 value) +{ + struct base_eep_header *pBase = &(pEepData->baseEepHeader); + + switch (ah->ah_devid) { + case AR9280_DEVID_PCI: + if (reg == 0x7894) { + DPRINTF(ah->ah_sc, ATH_DBG_ANY, + "ini VAL: %x EEPROM: %x\n", value, + (pBase->version & 0xff)); + + if ((pBase->version & 0xff) > 0x0a) { + DPRINTF(ah->ah_sc, ATH_DBG_ANY, + "PWDCLKIND: %d\n", + pBase->pwdclkind); + value &= ~AR_AN_TOP2_PWDCLKIND; + value |= AR_AN_TOP2_PWDCLKIND & (pBase-> + pwdclkind << AR_AN_TOP2_PWDCLKIND_S); + } else { + DPRINTF(ah->ah_sc, ATH_DBG_ANY, + "PWDCLKIND Earlier Rev\n"); + } + + DPRINTF(ah->ah_sc, ATH_DBG_ANY, + "final ini VAL: %x\n", value); + } + break; + } + return value; +} + +static bool ath9k_hw_fill_cap_info(struct ath_hal *ah) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + struct ath9k_hw_capabilities *pCap = &ah->ah_caps; + u16 capField = 0, eeval; + + eeval = ath9k_hw_get_eeprom(ahp, EEP_REG_0); + + ah->ah_currentRD = eeval; + + eeval = ath9k_hw_get_eeprom(ahp, EEP_REG_1); + ah->ah_currentRDExt = eeval; + + capField = ath9k_hw_get_eeprom(ahp, EEP_OP_CAP); + + if (ah->ah_opmode != ATH9K_M_HOSTAP && + ah->ah_subvendorid == AR_SUBVENDOR_ID_NEW_A) { + if (ah->ah_currentRD == 0x64 || ah->ah_currentRD == 0x65) + ah->ah_currentRD += 5; + else if (ah->ah_currentRD == 0x41) + ah->ah_currentRD = 0x43; + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "%s: regdomain mapped to 0x%x\n", __func__, + ah->ah_currentRD); + } + + eeval = ath9k_hw_get_eeprom(ahp, EEP_OP_MODE); + bitmap_zero(pCap->wireless_modes, ATH9K_MODE_MAX); + + if (eeval & AR5416_OPFLAGS_11A) { + set_bit(ATH9K_MODE_11A, pCap->wireless_modes); + if (ah->ah_config.ht_enable) { + if (!(eeval & AR5416_OPFLAGS_N_5G_HT20)) + set_bit(ATH9K_MODE_11NA_HT20, + pCap->wireless_modes); + if (!(eeval & AR5416_OPFLAGS_N_5G_HT40)) { + set_bit(ATH9K_MODE_11NA_HT40PLUS, + pCap->wireless_modes); + set_bit(ATH9K_MODE_11NA_HT40MINUS, + pCap->wireless_modes); + } + } + } + + if (eeval & AR5416_OPFLAGS_11G) { + set_bit(ATH9K_MODE_11B, pCap->wireless_modes); + set_bit(ATH9K_MODE_11G, pCap->wireless_modes); + if (ah->ah_config.ht_enable) { + if (!(eeval & AR5416_OPFLAGS_N_2G_HT20)) + set_bit(ATH9K_MODE_11NG_HT20, + pCap->wireless_modes); + if (!(eeval & AR5416_OPFLAGS_N_2G_HT40)) { + set_bit(ATH9K_MODE_11NG_HT40PLUS, + pCap->wireless_modes); + set_bit(ATH9K_MODE_11NG_HT40MINUS, + pCap->wireless_modes); + } + } + } + + pCap->tx_chainmask = ath9k_hw_get_eeprom(ahp, EEP_TX_MASK); + if ((ah->ah_isPciExpress) + || (eeval & AR5416_OPFLAGS_11A)) { + pCap->rx_chainmask = + ath9k_hw_get_eeprom(ahp, EEP_RX_MASK); + } else { + pCap->rx_chainmask = + (ath9k_hw_gpio_get(ah, 0)) ? 0x5 : 0x7; + } + + if (!(AR_SREV_9280(ah) && (ah->ah_macRev == 0))) + ahp->ah_miscMode |= AR_PCU_MIC_NEW_LOC_ENA; + + pCap->low_2ghz_chan = 2312; + pCap->high_2ghz_chan = 2732; + + pCap->low_5ghz_chan = 4920; + pCap->high_5ghz_chan = 6100; + + pCap->hw_caps &= ~ATH9K_HW_CAP_CIPHER_CKIP; + pCap->hw_caps |= ATH9K_HW_CAP_CIPHER_TKIP; + pCap->hw_caps |= ATH9K_HW_CAP_CIPHER_AESCCM; + + pCap->hw_caps &= ~ATH9K_HW_CAP_MIC_CKIP; + pCap->hw_caps |= ATH9K_HW_CAP_MIC_TKIP; + pCap->hw_caps |= ATH9K_HW_CAP_MIC_AESCCM; + + pCap->hw_caps |= ATH9K_HW_CAP_CHAN_SPREAD; + + if (ah->ah_config.ht_enable) + pCap->hw_caps |= ATH9K_HW_CAP_HT; + else + pCap->hw_caps &= ~ATH9K_HW_CAP_HT; + + pCap->hw_caps |= ATH9K_HW_CAP_GTT; + pCap->hw_caps |= ATH9K_HW_CAP_VEOL; + pCap->hw_caps |= ATH9K_HW_CAP_BSSIDMASK; + pCap->hw_caps &= ~ATH9K_HW_CAP_MCAST_KEYSEARCH; + + if (capField & AR_EEPROM_EEPCAP_MAXQCU) + pCap->total_queues = + MS(capField, AR_EEPROM_EEPCAP_MAXQCU); + else + pCap->total_queues = ATH9K_NUM_TX_QUEUES; + + if (capField & AR_EEPROM_EEPCAP_KC_ENTRIES) + pCap->keycache_size = + 1 << MS(capField, AR_EEPROM_EEPCAP_KC_ENTRIES); + else + pCap->keycache_size = AR_KEYTABLE_SIZE; + + pCap->hw_caps |= ATH9K_HW_CAP_FASTCC; + pCap->num_mr_retries = 4; + pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD; + + if (AR_SREV_9280_10_OR_LATER(ah)) + pCap->num_gpio_pins = AR928X_NUM_GPIO; + else + pCap->num_gpio_pins = AR_NUM_GPIO; + + if (AR_SREV_9280_10_OR_LATER(ah)) { + pCap->hw_caps |= ATH9K_HW_CAP_WOW; + pCap->hw_caps |= ATH9K_HW_CAP_WOW_MATCHPATTERN_EXACT; + } else { + pCap->hw_caps &= ~ATH9K_HW_CAP_WOW; + pCap->hw_caps &= ~ATH9K_HW_CAP_WOW_MATCHPATTERN_EXACT; + } + + if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) { + pCap->hw_caps |= ATH9K_HW_CAP_CST; + pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX; + } else { + pCap->rts_aggr_limit = (8 * 1024); + } + + pCap->hw_caps |= ATH9K_HW_CAP_ENHANCEDPM; + + ah->ah_rfsilent = ath9k_hw_get_eeprom(ahp, EEP_RF_SILENT); + if (ah->ah_rfsilent & EEP_RFSILENT_ENABLED) { + ahp->ah_gpioSelect = + MS(ah->ah_rfsilent, EEP_RFSILENT_GPIO_SEL); + ahp->ah_polarity = + MS(ah->ah_rfsilent, EEP_RFSILENT_POLARITY); + + ath9k_hw_setcapability(ah, ATH9K_CAP_RFSILENT, 1, true, + NULL); + pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT; + } + + if ((ah->ah_macVersion == AR_SREV_VERSION_5416_PCI) || + (ah->ah_macVersion == AR_SREV_VERSION_5416_PCIE) || + (ah->ah_macVersion == AR_SREV_VERSION_9160) || + (ah->ah_macVersion == AR_SREV_VERSION_9100) || + (ah->ah_macVersion == AR_SREV_VERSION_9280)) + pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP; + else + pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP; + + if (AR_SREV_9280(ah)) + pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS; + else + pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS; + + if (ah->ah_currentRDExt & (1 << REG_EXT_JAPAN_MIDBAND)) { + pCap->reg_cap = + AR_EEPROM_EEREGCAP_EN_KK_NEW_11A | + AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN | + AR_EEPROM_EEREGCAP_EN_KK_U2 | + AR_EEPROM_EEREGCAP_EN_KK_MIDBAND; + } else { + pCap->reg_cap = + AR_EEPROM_EEREGCAP_EN_KK_NEW_11A | + AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN; + } + + pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND; + + pCap->num_antcfg_5ghz = + ath9k_hw_get_num_ant_config(ahp, IEEE80211_BAND_5GHZ); + pCap->num_antcfg_2ghz = + ath9k_hw_get_num_ant_config(ahp, IEEE80211_BAND_2GHZ); + + return true; +} + +static void ar5416DisablePciePhy(struct ath_hal *ah) +{ + if (!AR_SREV_9100(ah)) + return; + + REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00); + REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924); + REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029); + REG_WRITE(ah, AR_PCIE_SERDES, 0x57160824); + REG_WRITE(ah, AR_PCIE_SERDES, 0x25980579); + REG_WRITE(ah, AR_PCIE_SERDES, 0x00000000); + REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40); + REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554); + REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007); + + REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); +} + +static void ath9k_set_power_sleep(struct ath_hal *ah, int setChip) +{ + REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); + if (setChip) { + REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, + AR_RTC_FORCE_WAKE_EN); + if (!AR_SREV_9100(ah)) + REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF); + + REG_CLR_BIT(ah, (u16) (AR_RTC_RESET), + AR_RTC_RESET_EN); + } +} + +static void ath9k_set_power_network_sleep(struct ath_hal *ah, int setChip) +{ + REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); + if (setChip) { + struct ath9k_hw_capabilities *pCap = &ah->ah_caps; + + if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { + REG_WRITE(ah, AR_RTC_FORCE_WAKE, + AR_RTC_FORCE_WAKE_ON_INT); + } else { + REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, + AR_RTC_FORCE_WAKE_EN); + } + } +} + +static bool ath9k_hw_set_power_awake(struct ath_hal *ah, + int setChip) +{ + u32 val; + int i; + + if (setChip) { + if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) == + AR_RTC_STATUS_SHUTDOWN) { + if (ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON) + != true) { + return false; + } + } + if (AR_SREV_9100(ah)) + REG_SET_BIT(ah, AR_RTC_RESET, + AR_RTC_RESET_EN); + + REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, + AR_RTC_FORCE_WAKE_EN); + udelay(50); + + for (i = POWER_UP_TIME / 50; i > 0; i--) { + val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M; + if (val == AR_RTC_STATUS_ON) + break; + udelay(50); + REG_SET_BIT(ah, AR_RTC_FORCE_WAKE, + AR_RTC_FORCE_WAKE_EN); + } + if (i == 0) { + DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT, + "%s: Failed to wakeup in %uus\n", + __func__, POWER_UP_TIME / 20); + return false; + } + } + + REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); + return true; +} + +bool ath9k_hw_setpower(struct ath_hal *ah, + enum ath9k_power_mode mode) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + static const char *modes[] = { + "AWAKE", + "FULL-SLEEP", + "NETWORK SLEEP", + "UNDEFINED" + }; + int status = true, setChip = true; + + DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT, "%s: %s -> %s (%s)\n", __func__, + modes[ahp->ah_powerMode], modes[mode], + setChip ? "set chip " : ""); + + switch (mode) { + case ATH9K_PM_AWAKE: + status = ath9k_hw_set_power_awake(ah, setChip); + break; + case ATH9K_PM_FULL_SLEEP: + ath9k_set_power_sleep(ah, setChip); + ahp->ah_chipFullSleep = true; + break; + case ATH9K_PM_NETWORK_SLEEP: + ath9k_set_power_network_sleep(ah, setChip); + break; + default: + DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT, + "%s: unknown power mode %u\n", __func__, mode); + return false; + } + ahp->ah_powerMode = mode; + return status; +} + +static struct ath_hal *ath9k_hw_do_attach(u16 devid, + struct ath_softc *sc, + void __iomem *mem, + int *status) +{ + struct ath_hal_5416 *ahp; + struct ath_hal *ah; + int ecode; +#ifndef CONFIG_SLOW_ANT_DIV + u32 i; + u32 j; +#endif + + ahp = ath9k_hw_newstate(devid, sc, mem, status); + if (ahp == NULL) + return NULL; + + ah = &ahp->ah; + + ath9k_hw_set_defaults(ah); + + if (ah->ah_config.intr_mitigation != 0) + ahp->ah_intrMitigation = true; + + if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { + DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: couldn't reset chip\n", + __func__); + ecode = -EIO; + goto bad; + } + + if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) { + DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: couldn't wakeup chip\n", + __func__); + ecode = -EIO; + goto bad; + } + + if (ah->ah_config.serialize_regmode == SER_REG_MODE_AUTO) { + if (ah->ah_macVersion == AR_SREV_VERSION_5416_PCI) { + ah->ah_config.serialize_regmode = + SER_REG_MODE_ON; + } else { + ah->ah_config.serialize_regmode = + SER_REG_MODE_OFF; + } + } + DPRINTF(ah->ah_sc, ATH_DBG_RESET, + "%s: serialize_regmode is %d\n", + __func__, ah->ah_config.serialize_regmode); + + if ((ah->ah_macVersion != AR_SREV_VERSION_5416_PCI) && + (ah->ah_macVersion != AR_SREV_VERSION_5416_PCIE) && + (ah->ah_macVersion != AR_SREV_VERSION_9160) && + (!AR_SREV_9100(ah)) && (!AR_SREV_9280(ah))) { + DPRINTF(ah->ah_sc, ATH_DBG_RESET, + "%s: Mac Chip Rev 0x%02x.%x is not supported by " + "this driver\n", __func__, + ah->ah_macVersion, ah->ah_macRev); + ecode = -EOPNOTSUPP; + goto bad; + } + + if (AR_SREV_9100(ah)) { + ahp->ah_iqCalData.calData = &iq_cal_multi_sample; + ahp->ah_suppCals = IQ_MISMATCH_CAL; + ah->ah_isPciExpress = false; + } + ah->ah_phyRev = REG_READ(ah, AR_PHY_CHIP_ID); + + if (AR_SREV_9160_10_OR_LATER(ah)) { + if (AR_SREV_9280_10_OR_LATER(ah)) { + ahp->ah_iqCalData.calData = &iq_cal_single_sample; + ahp->ah_adcGainCalData.calData = + &adc_gain_cal_single_sample; + ahp->ah_adcDcCalData.calData = + &adc_dc_cal_single_sample; + ahp->ah_adcDcCalInitData.calData = + &adc_init_dc_cal; + } else { + ahp->ah_iqCalData.calData = &iq_cal_multi_sample; + ahp->ah_adcGainCalData.calData = + &adc_gain_cal_multi_sample; + ahp->ah_adcDcCalData.calData = + &adc_dc_cal_multi_sample; + ahp->ah_adcDcCalInitData.calData = + &adc_init_dc_cal; + } + ahp->ah_suppCals = + ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL; + } + + if (AR_SREV_9160(ah)) { + ah->ah_config.enable_ani = 1; + ahp->ah_ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL | + ATH9K_ANI_FIRSTEP_LEVEL); + } else { + ahp->ah_ani_function = ATH9K_ANI_ALL; + if (AR_SREV_9280_10_OR_LATER(ah)) { + ahp->ah_ani_function &= + ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL; + } + } + + DPRINTF(ah->ah_sc, ATH_DBG_RESET, + "%s: This Mac Chip Rev 0x%02x.%x is \n", __func__, + ah->ah_macVersion, ah->ah_macRev); + + if (AR_SREV_9280_20_OR_LATER(ah)) { + INIT_INI_ARRAY(&ahp->ah_iniModes, ar9280Modes_9280_2, + ARRAY_SIZE(ar9280Modes_9280_2), 6); + INIT_INI_ARRAY(&ahp->ah_iniCommon, ar9280Common_9280_2, + ARRAY_SIZE(ar9280Common_9280_2), 2); + + if (ah->ah_config.pcie_clock_req) { + INIT_INI_ARRAY(&ahp->ah_iniPcieSerdes, + ar9280PciePhy_clkreq_off_L1_9280, + ARRAY_SIZE + (ar9280PciePhy_clkreq_off_L1_9280), + 2); + } else { + INIT_INI_ARRAY(&ahp->ah_iniPcieSerdes, + ar9280PciePhy_clkreq_always_on_L1_9280, + ARRAY_SIZE + (ar9280PciePhy_clkreq_always_on_L1_9280), + 2); + } + INIT_INI_ARRAY(&ahp->ah_iniModesAdditional, + ar9280Modes_fast_clock_9280_2, + ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), + 3); + } else if (AR_SREV_9280_10_OR_LATER(ah)) { + INIT_INI_ARRAY(&ahp->ah_iniModes, ar9280Modes_9280, + ARRAY_SIZE(ar9280Modes_9280), 6); + INIT_INI_ARRAY(&ahp->ah_iniCommon, ar9280Common_9280, + ARRAY_SIZE(ar9280Common_9280), 2); + } else if (AR_SREV_9160_10_OR_LATER(ah)) { + INIT_INI_ARRAY(&ahp->ah_iniModes, ar5416Modes_9160, + ARRAY_SIZE(ar5416Modes_9160), 6); + INIT_INI_ARRAY(&ahp->ah_iniCommon, ar5416Common_9160, + ARRAY_SIZE(ar5416Common_9160), 2); + INIT_INI_ARRAY(&ahp->ah_iniBank0, ar5416Bank0_9160, + ARRAY_SIZE(ar5416Bank0_9160), 2); + INIT_INI_ARRAY(&ahp->ah_iniBB_RfGain, ar5416BB_RfGain_9160, + ARRAY_SIZE(ar5416BB_RfGain_9160), 3); + INIT_INI_ARRAY(&ahp->ah_iniBank1, ar5416Bank1_9160, + ARRAY_SIZE(ar5416Bank1_9160), 2); + INIT_INI_ARRAY(&ahp->ah_iniBank2, ar5416Bank2_9160, + ARRAY_SIZE(ar5416Bank2_9160), 2); + INIT_INI_ARRAY(&ahp->ah_iniBank3, ar5416Bank3_9160, + ARRAY_SIZE(ar5416Bank3_9160), 3); + INIT_INI_ARRAY(&ahp->ah_iniBank6, ar5416Bank6_9160, + ARRAY_SIZE(ar5416Bank6_9160), 3); + INIT_INI_ARRAY(&ahp->ah_iniBank6TPC, ar5416Bank6TPC_9160, + ARRAY_SIZE(ar5416Bank6TPC_9160), 3); + INIT_INI_ARRAY(&ahp->ah_iniBank7, ar5416Bank7_9160, + ARRAY_SIZE(ar5416Bank7_9160), 2); + if (AR_SREV_9160_11(ah)) { + INIT_INI_ARRAY(&ahp->ah_iniAddac, + ar5416Addac_91601_1, + ARRAY_SIZE(ar5416Addac_91601_1), 2); + } else { + INIT_INI_ARRAY(&ahp->ah_iniAddac, ar5416Addac_9160, + ARRAY_SIZE(ar5416Addac_9160), 2); + } + } else if (AR_SREV_9100_OR_LATER(ah)) { + INIT_INI_ARRAY(&ahp->ah_iniModes, ar5416Modes_9100, + ARRAY_SIZE(ar5416Modes_9100), 6); + INIT_INI_ARRAY(&ahp->ah_iniCommon, ar5416Common_9100, + ARRAY_SIZE(ar5416Common_9100), 2); + INIT_INI_ARRAY(&ahp->ah_iniBank0, ar5416Bank0_9100, + ARRAY_SIZE(ar5416Bank0_9100), 2); + INIT_INI_ARRAY(&ahp->ah_iniBB_RfGain, ar5416BB_RfGain_9100, + ARRAY_SIZE(ar5416BB_RfGain_9100), 3); + INIT_INI_ARRAY(&ahp->ah_iniBank1, ar5416Bank1_9100, + ARRAY_SIZE(ar5416Bank1_9100), 2); + INIT_INI_ARRAY(&ahp->ah_iniBank2, ar5416Bank2_9100, + ARRAY_SIZE(ar5416Bank2_9100), 2); + INIT_INI_ARRAY(&ahp->ah_iniBank3, ar5416Bank3_9100, + ARRAY_SIZE(ar5416Bank3_9100), 3); + INIT_INI_ARRAY(&ahp->ah_iniBank6, ar5416Bank6_9100, + ARRAY_SIZE(ar5416Bank6_9100), 3); + INIT_INI_ARRAY(&ahp->ah_iniBank6TPC, ar5416Bank6TPC_9100, + ARRAY_SIZE(ar5416Bank6TPC_9100), 3); + INIT_INI_ARRAY(&ahp->ah_iniBank7, ar5416Bank7_9100, + ARRAY_SIZE(ar5416Bank7_9100), 2); + INIT_INI_ARRAY(&ahp->ah_iniAddac, ar5416Addac_9100, + ARRAY_SIZE(ar5416Addac_9100), 2); + } else { + INIT_INI_ARRAY(&ahp->ah_iniModes, ar5416Modes, + ARRAY_SIZE(ar5416Modes), 6); + INIT_INI_ARRAY(&ahp->ah_iniCommon, ar5416Common, + ARRAY_SIZE(ar5416Common), 2); + INIT_INI_ARRAY(&ahp->ah_iniBank0, ar5416Bank0, + ARRAY_SIZE(ar5416Bank0), 2); + INIT_INI_ARRAY(&ahp->ah_iniBB_RfGain, ar5416BB_RfGain, + ARRAY_SIZE(ar5416BB_RfGain), 3); + INIT_INI_ARRAY(&ahp->ah_iniBank1, ar5416Bank1, + ARRAY_SIZE(ar5416Bank1), 2); + INIT_INI_ARRAY(&ahp->ah_iniBank2, ar5416Bank2, + ARRAY_SIZE(ar5416Bank2), 2); + INIT_INI_ARRAY(&ahp->ah_iniBank3, ar5416Bank3, + ARRAY_SIZE(ar5416Bank3), 3); + INIT_INI_ARRAY(&ahp->ah_iniBank6, ar5416Bank6, + ARRAY_SIZE(ar5416Bank6), 3); + INIT_INI_ARRAY(&ahp->ah_iniBank6TPC, ar5416Bank6TPC, + ARRAY_SIZE(ar5416Bank6TPC), 3); + INIT_INI_ARRAY(&ahp->ah_iniBank7, ar5416Bank7, + ARRAY_SIZE(ar5416Bank7), 2); + INIT_INI_ARRAY(&ahp->ah_iniAddac, ar5416Addac, + ARRAY_SIZE(ar5416Addac), 2); + } + + if (ah->ah_isPciExpress) + ath9k_hw_configpcipowersave(ah, 0); + else + ar5416DisablePciePhy(ah); + + ecode = ath9k_hw_post_attach(ah); + if (ecode != 0) + goto bad; + +#ifndef CONFIG_SLOW_ANT_DIV + if (ah->ah_devid == AR9280_DEVID_PCI) { + for (i = 0; i < ahp->ah_iniModes.ia_rows; i++) { + u32 reg = INI_RA(&ahp->ah_iniModes, i, 0); + + for (j = 1; j < ahp->ah_iniModes.ia_columns; j++) { + u32 val = INI_RA(&ahp->ah_iniModes, i, j); + + INI_RA(&ahp->ah_iniModes, i, j) = + ath9k_hw_ini_fixup(ah, &ahp->ah_eeprom, + reg, val); + } + } + } +#endif + + if (!ath9k_hw_fill_cap_info(ah)) { + DPRINTF(ah->ah_sc, ATH_DBG_RESET, + "%s:failed ath9k_hw_fill_cap_info\n", __func__); + ecode = -EINVAL; + goto bad; + } + + ecode = ath9k_hw_init_macaddr(ah); + if (ecode != 0) { + DPRINTF(ah->ah_sc, ATH_DBG_RESET, + "%s: failed initializing mac address\n", + __func__); + goto bad; + } + + if (AR_SREV_9285(ah)) + ah->ah_txTrigLevel = (AR_FTRIG_256B >> AR_FTRIG_S); + else + ah->ah_txTrigLevel = (AR_FTRIG_512B >> AR_FTRIG_S); + +#ifndef ATH_NF_PER_CHAN + + ath9k_init_nfcal_hist_buffer(ah); +#endif + + return ah; + +bad: + if (ahp) + ath9k_hw_detach((struct ath_hal *) ahp); + if (status) + *status = ecode; + return NULL; +} + +void ath9k_hw_detach(struct ath_hal *ah) +{ + if (!AR_SREV_9100(ah)) + ath9k_hw_ani_detach(ah); + ath9k_hw_rfdetach(ah); + + ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP); + kfree(ah); +} + +bool ath9k_get_channel_edges(struct ath_hal *ah, + u16 flags, u16 *low, + u16 *high) +{ + struct ath9k_hw_capabilities *pCap = &ah->ah_caps; + + if (flags & CHANNEL_5GHZ) { + *low = pCap->low_5ghz_chan; + *high = pCap->high_5ghz_chan; + return true; + } + if ((flags & CHANNEL_2GHZ)) { + *low = pCap->low_2ghz_chan; + *high = pCap->high_2ghz_chan; + + return true; + } + return false; +} + +static inline bool ath9k_hw_fill_vpd_table(u8 pwrMin, + u8 pwrMax, + u8 *pPwrList, + u8 *pVpdList, + u16 + numIntercepts, + u8 *pRetVpdList) +{ + u16 i, k; + u8 currPwr = pwrMin; + u16 idxL = 0, idxR = 0; + + for (i = 0; i <= (pwrMax - pwrMin) / 2; i++) { + ath9k_hw_get_lower_upper_index(currPwr, pPwrList, + numIntercepts, &(idxL), + &(idxR)); + if (idxR < 1) + idxR = 1; + if (idxL == numIntercepts - 1) + idxL = (u16) (numIntercepts - 2); + if (pPwrList[idxL] == pPwrList[idxR]) + k = pVpdList[idxL]; + else + k = (u16) (((currPwr - + pPwrList[idxL]) * + pVpdList[idxR] + + (pPwrList[idxR] - + currPwr) * pVpdList[idxL]) / + (pPwrList[idxR] - + pPwrList[idxL])); + pRetVpdList[i] = (u8) k; + currPwr += 2; + } + + return true; +} + +static inline void +ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hal *ah, + struct ath9k_channel *chan, + struct cal_data_per_freq *pRawDataSet, + u8 *bChans, + u16 availPiers, + u16 tPdGainOverlap, + int16_t *pMinCalPower, + u16 *pPdGainBoundaries, + u8 *pPDADCValues, + u16 numXpdGains) +{ + int i, j, k; + int16_t ss; + u16 idxL = 0, idxR = 0, numPiers; + static u8 vpdTableL[AR5416_NUM_PD_GAINS] + [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; + static u8 vpdTableR[AR5416_NUM_PD_GAINS] + [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; + static u8 vpdTableI[AR5416_NUM_PD_GAINS] + [AR5416_MAX_PWR_RANGE_IN_HALF_DB]; + + u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR; + u8 minPwrT4[AR5416_NUM_PD_GAINS]; + u8 maxPwrT4[AR5416_NUM_PD_GAINS]; + int16_t vpdStep; + int16_t tmpVal; + u16 sizeCurrVpdTable, maxIndex, tgtIndex; + bool match; + int16_t minDelta = 0; + struct chan_centers centers; + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + + for (numPiers = 0; numPiers < availPiers; numPiers++) { + if (bChans[numPiers] == AR5416_BCHAN_UNUSED) + break; + } + + match = ath9k_hw_get_lower_upper_index((u8) + FREQ2FBIN(centers. + synth_center, + IS_CHAN_2GHZ + (chan)), bChans, + numPiers, &idxL, &idxR); + + if (match) { + for (i = 0; i < numXpdGains; i++) { + minPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][0]; + maxPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][4]; + ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], + pRawDataSet[idxL]. + pwrPdg[i], + pRawDataSet[idxL]. + vpdPdg[i], + AR5416_PD_GAIN_ICEPTS, + vpdTableI[i]); + } + } else { + for (i = 0; i < numXpdGains; i++) { + pVpdL = pRawDataSet[idxL].vpdPdg[i]; + pPwrL = pRawDataSet[idxL].pwrPdg[i]; + pVpdR = pRawDataSet[idxR].vpdPdg[i]; + pPwrR = pRawDataSet[idxR].pwrPdg[i]; + + minPwrT4[i] = max(pPwrL[0], pPwrR[0]); + + maxPwrT4[i] = + min(pPwrL[AR5416_PD_GAIN_ICEPTS - 1], + pPwrR[AR5416_PD_GAIN_ICEPTS - 1]); + + + ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], + pPwrL, pVpdL, + AR5416_PD_GAIN_ICEPTS, + vpdTableL[i]); + ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], + pPwrR, pVpdR, + AR5416_PD_GAIN_ICEPTS, + vpdTableR[i]); + + for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) { + vpdTableI[i][j] = + (u8) (ath9k_hw_interpolate + ((u16) + FREQ2FBIN(centers. + synth_center, + IS_CHAN_2GHZ + (chan)), + bChans[idxL], + bChans[idxR], vpdTableL[i] + [j], vpdTableR[i] + [j])); + } + } + } + + *pMinCalPower = (int16_t) (minPwrT4[0] / 2); + + k = 0; + for (i = 0; i < numXpdGains; i++) { + if (i == (numXpdGains - 1)) + pPdGainBoundaries[i] = + (u16) (maxPwrT4[i] / 2); + else + pPdGainBoundaries[i] = + (u16) ((maxPwrT4[i] + + minPwrT4[i + 1]) / 4); + + pPdGainBoundaries[i] = + min((u16) AR5416_MAX_RATE_POWER, + pPdGainBoundaries[i]); + + if ((i == 0) && !AR_SREV_5416_V20_OR_LATER(ah)) { + minDelta = pPdGainBoundaries[0] - 23; + pPdGainBoundaries[0] = 23; + } else { + minDelta = 0; + } + + if (i == 0) { + if (AR_SREV_9280_10_OR_LATER(ah)) + ss = (int16_t) (0 - (minPwrT4[i] / 2)); + else + ss = 0; + } else { + ss = (int16_t) ((pPdGainBoundaries[i - 1] - + (minPwrT4[i] / 2)) - + tPdGainOverlap + 1 + minDelta); + } + vpdStep = (int16_t) (vpdTableI[i][1] - vpdTableI[i][0]); + vpdStep = (int16_t) ((vpdStep < 1) ? 1 : vpdStep); + + while ((ss < 0) && (k < (AR5416_NUM_PDADC_VALUES - 1))) { + tmpVal = (int16_t) (vpdTableI[i][0] + ss * vpdStep); + pPDADCValues[k++] = + (u8) ((tmpVal < 0) ? 0 : tmpVal); + ss++; + } + + sizeCurrVpdTable = + (u8) ((maxPwrT4[i] - minPwrT4[i]) / 2 + 1); + tgtIndex = (u8) (pPdGainBoundaries[i] + tPdGainOverlap - + (minPwrT4[i] / 2)); + maxIndex = (tgtIndex < + sizeCurrVpdTable) ? tgtIndex : sizeCurrVpdTable; + + while ((ss < maxIndex) + && (k < (AR5416_NUM_PDADC_VALUES - 1))) { + pPDADCValues[k++] = vpdTableI[i][ss++]; + } + + vpdStep = (int16_t) (vpdTableI[i][sizeCurrVpdTable - 1] - + vpdTableI[i][sizeCurrVpdTable - 2]); + vpdStep = (int16_t) ((vpdStep < 1) ? 1 : vpdStep); + + if (tgtIndex > maxIndex) { + while ((ss <= tgtIndex) + && (k < (AR5416_NUM_PDADC_VALUES - 1))) { + tmpVal = (int16_t) ((vpdTableI[i] + [sizeCurrVpdTable - + 1] + (ss - maxIndex + + 1) * vpdStep)); + pPDADCValues[k++] = (u8) ((tmpVal > + 255) ? 255 : tmpVal); + ss++; + } + } + } + + while (i < AR5416_PD_GAINS_IN_MASK) { + pPdGainBoundaries[i] = pPdGainBoundaries[i - 1]; + i++; + } + + while (k < AR5416_NUM_PDADC_VALUES) { + pPDADCValues[k] = pPDADCValues[k - 1]; + k++; + } + return; +} + +static inline bool +ath9k_hw_set_power_cal_table(struct ath_hal *ah, + struct ar5416_eeprom *pEepData, + struct ath9k_channel *chan, + int16_t *pTxPowerIndexOffset) +{ + struct cal_data_per_freq *pRawDataset; + u8 *pCalBChans = NULL; + u16 pdGainOverlap_t2; + static u8 pdadcValues[AR5416_NUM_PDADC_VALUES]; + u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK]; + u16 numPiers, i, j; + int16_t tMinCalPower; + u16 numXpdGain, xpdMask; + u16 xpdGainValues[AR5416_NUM_PD_GAINS] = { 0, 0, 0, 0 }; + u32 reg32, regOffset, regChainOffset; + int16_t modalIdx; + struct ath_hal_5416 *ahp = AH5416(ah); + + modalIdx = IS_CHAN_2GHZ(chan) ? 1 : 0; + xpdMask = pEepData->modalHeader[modalIdx].xpdGain; + + if ((pEepData->baseEepHeader. + version & AR5416_EEP_VER_MINOR_MASK) >= + AR5416_EEP_MINOR_VER_2) { + pdGainOverlap_t2 = + pEepData->modalHeader[modalIdx].pdGainOverlap; + } else { + pdGainOverlap_t2 = + (u16) (MS + (REG_READ(ah, AR_PHY_TPCRG5), + AR_PHY_TPCRG5_PD_GAIN_OVERLAP)); + } + + if (IS_CHAN_2GHZ(chan)) { + pCalBChans = pEepData->calFreqPier2G; + numPiers = AR5416_NUM_2G_CAL_PIERS; + } else { + pCalBChans = pEepData->calFreqPier5G; + numPiers = AR5416_NUM_5G_CAL_PIERS; + } + + numXpdGain = 0; + + for (i = 1; i <= AR5416_PD_GAINS_IN_MASK; i++) { + if ((xpdMask >> (AR5416_PD_GAINS_IN_MASK - i)) & 1) { + if (numXpdGain >= AR5416_NUM_PD_GAINS) + break; + xpdGainValues[numXpdGain] = + (u16) (AR5416_PD_GAINS_IN_MASK - i); + numXpdGain++; + } + } + + REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN, + (numXpdGain - 1) & 0x3); + REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1, + xpdGainValues[0]); + REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2, + xpdGainValues[1]); + REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3, + xpdGainValues[2]); + + for (i = 0; i < AR5416_MAX_CHAINS; i++) { + if (AR_SREV_5416_V20_OR_LATER(ah) && + (ahp->ah_rxchainmask == 5 || ahp->ah_txchainmask == 5) + && (i != 0)) { + regChainOffset = (i == 1) ? 0x2000 : 0x1000; + } else + regChainOffset = i * 0x1000; + if (pEepData->baseEepHeader.txMask & (1 << i)) { + if (IS_CHAN_2GHZ(chan)) + pRawDataset = pEepData->calPierData2G[i]; + else + pRawDataset = pEepData->calPierData5G[i]; + + ath9k_hw_get_gain_boundaries_pdadcs(ah, chan, + pRawDataset, + pCalBChans, + numPiers, + pdGainOverlap_t2, + &tMinCalPower, + gainBoundaries, + pdadcValues, + numXpdGain); + + if ((i == 0) || AR_SREV_5416_V20_OR_LATER(ah)) { + + REG_WRITE(ah, + AR_PHY_TPCRG5 + regChainOffset, + SM(pdGainOverlap_t2, + AR_PHY_TPCRG5_PD_GAIN_OVERLAP) + | SM(gainBoundaries[0], + AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1) + | SM(gainBoundaries[1], + AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2) + | SM(gainBoundaries[2], + AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3) + | SM(gainBoundaries[3], + AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4)); + } + + regOffset = + AR_PHY_BASE + (672 << 2) + regChainOffset; + for (j = 0; j < 32; j++) { + reg32 = + ((pdadcValues[4 * j + 0] & 0xFF) << 0) + | ((pdadcValues[4 * j + 1] & 0xFF) << + 8) | ((pdadcValues[4 * j + 2] & + 0xFF) << 16) | + ((pdadcValues[4 * j + 3] & 0xFF) << + 24); + REG_WRITE(ah, regOffset, reg32); + + DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO, + "PDADC (%d,%4x): %4.4x %8.8x\n", + i, regChainOffset, regOffset, + reg32); + DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO, + "PDADC: Chain %d | PDADC %3d Value %3d | " + "PDADC %3d Value %3d | PDADC %3d Value %3d | " + "PDADC %3d Value %3d |\n", + i, 4 * j, pdadcValues[4 * j], + 4 * j + 1, pdadcValues[4 * j + 1], + 4 * j + 2, pdadcValues[4 * j + 2], + 4 * j + 3, + pdadcValues[4 * j + 3]); + + regOffset += 4; + } + } + } + *pTxPowerIndexOffset = 0; + + return true; +} + +void ath9k_hw_configpcipowersave(struct ath_hal *ah, int restore) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + u8 i; + + if (ah->ah_isPciExpress != true) + return; + + if (ah->ah_config.pcie_powersave_enable == 2) + return; + + if (restore) + return; + + if (AR_SREV_9280_20_OR_LATER(ah)) { + for (i = 0; i < ahp->ah_iniPcieSerdes.ia_rows; i++) { + REG_WRITE(ah, INI_RA(&ahp->ah_iniPcieSerdes, i, 0), + INI_RA(&ahp->ah_iniPcieSerdes, i, 1)); + } + udelay(1000); + } else if (AR_SREV_9280(ah) + && (ah->ah_macRev == AR_SREV_REVISION_9280_10)) { + REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00); + REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924); + + REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019); + REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820); + REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560); + + if (ah->ah_config.pcie_clock_req) + REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc); + else + REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd); + + REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40); + REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554); + REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007); + + REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); + + udelay(1000); + } else { + REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00); + REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924); + REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039); + REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824); + REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579); + REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff); + REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40); + REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554); + REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007); + REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); + } + + REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA); + + if (ah->ah_config.pcie_waen) { + REG_WRITE(ah, AR_WA, ah->ah_config.pcie_waen); + } else { + if (AR_SREV_9280(ah)) + REG_WRITE(ah, AR_WA, 0x0040073f); + else + REG_WRITE(ah, AR_WA, 0x0000073f); + } +} + +static inline void +ath9k_hw_get_legacy_target_powers(struct ath_hal *ah, + struct ath9k_channel *chan, + struct cal_target_power_leg *powInfo, + u16 numChannels, + struct cal_target_power_leg *pNewPower, + u16 numRates, + bool isExtTarget) +{ + u16 clo, chi; + int i; + int matchIndex = -1, lowIndex = -1; + u16 freq; + struct chan_centers centers; + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + freq = (isExtTarget) ? centers.ext_center : centers.ctl_center; + + if (freq <= ath9k_hw_fbin2freq(powInfo[0].bChannel, + IS_CHAN_2GHZ(chan))) { + matchIndex = 0; + } else { + for (i = 0; (i < numChannels) + && (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) { + if (freq == + ath9k_hw_fbin2freq(powInfo[i].bChannel, + IS_CHAN_2GHZ(chan))) { + matchIndex = i; + break; + } else if ((freq < + ath9k_hw_fbin2freq(powInfo[i].bChannel, + IS_CHAN_2GHZ(chan))) + && (freq > + ath9k_hw_fbin2freq(powInfo[i - 1]. + bChannel, + IS_CHAN_2GHZ + (chan)))) { + lowIndex = i - 1; + break; + } + } + if ((matchIndex == -1) && (lowIndex == -1)) + matchIndex = i - 1; + } + + if (matchIndex != -1) { + *pNewPower = powInfo[matchIndex]; + } else { + clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel, + IS_CHAN_2GHZ(chan)); + chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel, + IS_CHAN_2GHZ(chan)); + + for (i = 0; i < numRates; i++) { + pNewPower->tPow2x[i] = + (u8) ath9k_hw_interpolate(freq, clo, chi, + powInfo + [lowIndex]. + tPow2x[i], + powInfo + [lowIndex + + 1].tPow2x[i]); + } + } +} + +static inline void +ath9k_hw_get_target_powers(struct ath_hal *ah, + struct ath9k_channel *chan, + struct cal_target_power_ht *powInfo, + u16 numChannels, + struct cal_target_power_ht *pNewPower, + u16 numRates, + bool isHt40Target) +{ + u16 clo, chi; + int i; + int matchIndex = -1, lowIndex = -1; + u16 freq; + struct chan_centers centers; + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + freq = isHt40Target ? centers.synth_center : centers.ctl_center; + + if (freq <= + ath9k_hw_fbin2freq(powInfo[0].bChannel, IS_CHAN_2GHZ(chan))) { + matchIndex = 0; + } else { + for (i = 0; (i < numChannels) + && (powInfo[i].bChannel != AR5416_BCHAN_UNUSED); i++) { + if (freq == + ath9k_hw_fbin2freq(powInfo[i].bChannel, + IS_CHAN_2GHZ(chan))) { + matchIndex = i; + break; + } else + if ((freq < + ath9k_hw_fbin2freq(powInfo[i].bChannel, + IS_CHAN_2GHZ(chan))) + && (freq > + ath9k_hw_fbin2freq(powInfo[i - 1]. + bChannel, + IS_CHAN_2GHZ + (chan)))) { + lowIndex = i - 1; + break; + } + } + if ((matchIndex == -1) && (lowIndex == -1)) + matchIndex = i - 1; + } + + if (matchIndex != -1) { + *pNewPower = powInfo[matchIndex]; + } else { + clo = ath9k_hw_fbin2freq(powInfo[lowIndex].bChannel, + IS_CHAN_2GHZ(chan)); + chi = ath9k_hw_fbin2freq(powInfo[lowIndex + 1].bChannel, + IS_CHAN_2GHZ(chan)); + + for (i = 0; i < numRates; i++) { + pNewPower->tPow2x[i] = + (u8) ath9k_hw_interpolate(freq, clo, chi, + powInfo + [lowIndex]. + tPow2x[i], + powInfo + [lowIndex + + 1].tPow2x[i]); + } + } +} + +static inline u16 +ath9k_hw_get_max_edge_power(u16 freq, + struct cal_ctl_edges *pRdEdgesPower, + bool is2GHz) +{ + u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER; + int i; + + for (i = 0; (i < AR5416_NUM_BAND_EDGES) + && (pRdEdgesPower[i].bChannel != AR5416_BCHAN_UNUSED); i++) { + if (freq == ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, + is2GHz)) { + twiceMaxEdgePower = pRdEdgesPower[i].tPower; + break; + } else if ((i > 0) + && (freq < + ath9k_hw_fbin2freq(pRdEdgesPower[i]. + bChannel, is2GHz))) { + if (ath9k_hw_fbin2freq + (pRdEdgesPower[i - 1].bChannel, is2GHz) < freq + && pRdEdgesPower[i - 1].flag) { + twiceMaxEdgePower = + pRdEdgesPower[i - 1].tPower; + } + break; + } + } + return twiceMaxEdgePower; +} + +static inline bool +ath9k_hw_set_power_per_rate_table(struct ath_hal *ah, + struct ar5416_eeprom *pEepData, + struct ath9k_channel *chan, + int16_t *ratesArray, + u16 cfgCtl, + u8 AntennaReduction, + u8 twiceMaxRegulatoryPower, + u8 powerLimit) +{ + u8 twiceMaxEdgePower = AR5416_MAX_RATE_POWER; + static const u16 tpScaleReductionTable[5] = + { 0, 3, 6, 9, AR5416_MAX_RATE_POWER }; + + int i; + int8_t twiceLargestAntenna; + struct cal_ctl_data *rep; + struct cal_target_power_leg targetPowerOfdm, targetPowerCck = { + 0, { 0, 0, 0, 0} + }; + struct cal_target_power_leg targetPowerOfdmExt = { + 0, { 0, 0, 0, 0} }, targetPowerCckExt = { + 0, { 0, 0, 0, 0 } + }; + struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = { + 0, {0, 0, 0, 0} + }; + u8 scaledPower = 0, minCtlPower, maxRegAllowedPower; + u16 ctlModesFor11a[] = + { CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40 }; + u16 ctlModesFor11g[] = + { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT, + CTL_2GHT40 + }; + u16 numCtlModes, *pCtlMode, ctlMode, freq; + struct chan_centers centers; + int tx_chainmask; + u8 twiceMinEdgePower; + struct ath_hal_5416 *ahp = AH5416(ah); + + tx_chainmask = ahp->ah_txchainmask; + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + + twiceLargestAntenna = max( + pEepData->modalHeader + [IS_CHAN_2GHZ(chan)].antennaGainCh[0], + pEepData->modalHeader + [IS_CHAN_2GHZ(chan)].antennaGainCh[1]); + + twiceLargestAntenna = max((u8) twiceLargestAntenna, + pEepData->modalHeader + [IS_CHAN_2GHZ(chan)].antennaGainCh[2]); + + twiceLargestAntenna = + (int8_t) min(AntennaReduction - twiceLargestAntenna, 0); + + maxRegAllowedPower = twiceMaxRegulatoryPower + twiceLargestAntenna; + + if (ah->ah_tpScale != ATH9K_TP_SCALE_MAX) { + maxRegAllowedPower -= + (tpScaleReductionTable[(ah->ah_tpScale)] * 2); + } + + scaledPower = min(powerLimit, maxRegAllowedPower); + + switch (ar5416_get_ntxchains(tx_chainmask)) { + case 1: + break; + case 2: + scaledPower -= + pEepData->modalHeader[IS_CHAN_2GHZ(chan)]. + pwrDecreaseFor2Chain; + break; + case 3: + scaledPower -= + pEepData->modalHeader[IS_CHAN_2GHZ(chan)]. + pwrDecreaseFor3Chain; + break; + } + + scaledPower = max(0, (int32_t) scaledPower); + + if (IS_CHAN_2GHZ(chan)) { + numCtlModes = + ARRAY_SIZE(ctlModesFor11g) - + SUB_NUM_CTL_MODES_AT_2G_40; + pCtlMode = ctlModesFor11g; + + ath9k_hw_get_legacy_target_powers(ah, chan, + pEepData-> + calTargetPowerCck, + AR5416_NUM_2G_CCK_TARGET_POWERS, + &targetPowerCck, 4, + false); + ath9k_hw_get_legacy_target_powers(ah, chan, + pEepData-> + calTargetPower2G, + AR5416_NUM_2G_20_TARGET_POWERS, + &targetPowerOfdm, 4, + false); + ath9k_hw_get_target_powers(ah, chan, + pEepData->calTargetPower2GHT20, + AR5416_NUM_2G_20_TARGET_POWERS, + &targetPowerHt20, 8, false); + + if (IS_CHAN_HT40(chan)) { + numCtlModes = ARRAY_SIZE(ctlModesFor11g); + ath9k_hw_get_target_powers(ah, chan, + pEepData-> + calTargetPower2GHT40, + AR5416_NUM_2G_40_TARGET_POWERS, + &targetPowerHt40, 8, + true); + ath9k_hw_get_legacy_target_powers(ah, chan, + pEepData-> + calTargetPowerCck, + AR5416_NUM_2G_CCK_TARGET_POWERS, + &targetPowerCckExt, + 4, true); + ath9k_hw_get_legacy_target_powers(ah, chan, + pEepData-> + calTargetPower2G, + AR5416_NUM_2G_20_TARGET_POWERS, + &targetPowerOfdmExt, + 4, true); + } + } else { + + numCtlModes = + ARRAY_SIZE(ctlModesFor11a) - + SUB_NUM_CTL_MODES_AT_5G_40; + pCtlMode = ctlModesFor11a; + + ath9k_hw_get_legacy_target_powers(ah, chan, + pEepData-> + calTargetPower5G, + AR5416_NUM_5G_20_TARGET_POWERS, + &targetPowerOfdm, 4, + false); + ath9k_hw_get_target_powers(ah, chan, + pEepData->calTargetPower5GHT20, + AR5416_NUM_5G_20_TARGET_POWERS, + &targetPowerHt20, 8, false); + + if (IS_CHAN_HT40(chan)) { + numCtlModes = ARRAY_SIZE(ctlModesFor11a); + ath9k_hw_get_target_powers(ah, chan, + pEepData-> + calTargetPower5GHT40, + AR5416_NUM_5G_40_TARGET_POWERS, + &targetPowerHt40, 8, + true); + ath9k_hw_get_legacy_target_powers(ah, chan, + pEepData-> + calTargetPower5G, + AR5416_NUM_5G_20_TARGET_POWERS, + &targetPowerOfdmExt, + 4, true); + } + } + + for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) { + bool isHt40CtlMode = + (pCtlMode[ctlMode] == CTL_5GHT40) + || (pCtlMode[ctlMode] == CTL_2GHT40); + if (isHt40CtlMode) + freq = centers.synth_center; + else if (pCtlMode[ctlMode] & EXT_ADDITIVE) + freq = centers.ext_center; + else + freq = centers.ctl_center; + + if (ar5416_get_eep_ver(ahp) == 14 + && ar5416_get_eep_rev(ahp) <= 2) + twiceMaxEdgePower = AR5416_MAX_RATE_POWER; + + DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT, + "LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, " + "EXT_ADDITIVE %d\n", + ctlMode, numCtlModes, isHt40CtlMode, + (pCtlMode[ctlMode] & EXT_ADDITIVE)); + + for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i]; + i++) { + DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT, + " LOOP-Ctlidx %d: cfgCtl 0x%2.2x " + "pCtlMode 0x%2.2x ctlIndex 0x%2.2x " + "chan %d\n", + i, cfgCtl, pCtlMode[ctlMode], + pEepData->ctlIndex[i], chan->channel); + + if ((((cfgCtl & ~CTL_MODE_M) | + (pCtlMode[ctlMode] & CTL_MODE_M)) == + pEepData->ctlIndex[i]) + || + (((cfgCtl & ~CTL_MODE_M) | + (pCtlMode[ctlMode] & CTL_MODE_M)) == + ((pEepData-> + ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL))) { + rep = &(pEepData->ctlData[i]); + + twiceMinEdgePower = + ath9k_hw_get_max_edge_power(freq, + rep-> + ctlEdges + [ar5416_get_ntxchains + (tx_chainmask) + - 1], + IS_CHAN_2GHZ + (chan)); + + DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT, + " MATCH-EE_IDX %d: ch %d is2 %d " + "2xMinEdge %d chainmask %d chains %d\n", + i, freq, IS_CHAN_2GHZ(chan), + twiceMinEdgePower, tx_chainmask, + ar5416_get_ntxchains + (tx_chainmask)); + if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) { + twiceMaxEdgePower = + min(twiceMaxEdgePower, + twiceMinEdgePower); + } else { + twiceMaxEdgePower = + twiceMinEdgePower; + break; + } + } + } + + minCtlPower = min(twiceMaxEdgePower, scaledPower); + + DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT, + " SEL-Min ctlMode %d pCtlMode %d " + "2xMaxEdge %d sP %d minCtlPwr %d\n", + ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower, + scaledPower, minCtlPower); + + switch (pCtlMode[ctlMode]) { + case CTL_11B: + for (i = 0; i < ARRAY_SIZE(targetPowerCck.tPow2x); + i++) { + targetPowerCck.tPow2x[i] = + min(targetPowerCck.tPow2x[i], + minCtlPower); + } + break; + case CTL_11A: + case CTL_11G: + for (i = 0; i < ARRAY_SIZE(targetPowerOfdm.tPow2x); + i++) { + targetPowerOfdm.tPow2x[i] = + min(targetPowerOfdm.tPow2x[i], + minCtlPower); + } + break; + case CTL_5GHT20: + case CTL_2GHT20: + for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); + i++) { + targetPowerHt20.tPow2x[i] = + min(targetPowerHt20.tPow2x[i], + minCtlPower); + } + break; + case CTL_11B_EXT: + targetPowerCckExt.tPow2x[0] = + min(targetPowerCckExt.tPow2x[0], minCtlPower); + break; + case CTL_11A_EXT: + case CTL_11G_EXT: + targetPowerOfdmExt.tPow2x[0] = + min(targetPowerOfdmExt.tPow2x[0], minCtlPower); + break; + case CTL_5GHT40: + case CTL_2GHT40: + for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); + i++) { + targetPowerHt40.tPow2x[i] = + min(targetPowerHt40.tPow2x[i], + minCtlPower); + } + break; + default: + break; + } + } + + ratesArray[rate6mb] = ratesArray[rate9mb] = ratesArray[rate12mb] = + ratesArray[rate18mb] = ratesArray[rate24mb] = + targetPowerOfdm.tPow2x[0]; + ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1]; + ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2]; + ratesArray[rate54mb] = targetPowerOfdm.tPow2x[3]; + ratesArray[rateXr] = targetPowerOfdm.tPow2x[0]; + + for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) + ratesArray[rateHt20_0 + i] = targetPowerHt20.tPow2x[i]; + + if (IS_CHAN_2GHZ(chan)) { + ratesArray[rate1l] = targetPowerCck.tPow2x[0]; + ratesArray[rate2s] = ratesArray[rate2l] = + targetPowerCck.tPow2x[1]; + ratesArray[rate5_5s] = ratesArray[rate5_5l] = + targetPowerCck.tPow2x[2]; + ; + ratesArray[rate11s] = ratesArray[rate11l] = + targetPowerCck.tPow2x[3]; + ; + } + if (IS_CHAN_HT40(chan)) { + for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) { + ratesArray[rateHt40_0 + i] = + targetPowerHt40.tPow2x[i]; + } + ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0]; + ratesArray[rateDupCck] = targetPowerHt40.tPow2x[0]; + ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0]; + if (IS_CHAN_2GHZ(chan)) { + ratesArray[rateExtCck] = + targetPowerCckExt.tPow2x[0]; + } + } + return true; +} + +static int +ath9k_hw_set_txpower(struct ath_hal *ah, + struct ar5416_eeprom *pEepData, + struct ath9k_channel *chan, + u16 cfgCtl, + u8 twiceAntennaReduction, + u8 twiceMaxRegulatoryPower, + u8 powerLimit) +{ + struct modal_eep_header *pModal = + &(pEepData->modalHeader[IS_CHAN_2GHZ(chan)]); + int16_t ratesArray[Ar5416RateSize]; + int16_t txPowerIndexOffset = 0; + u8 ht40PowerIncForPdadc = 2; + int i; + + memset(ratesArray, 0, sizeof(ratesArray)); + + if ((pEepData->baseEepHeader. + version & AR5416_EEP_VER_MINOR_MASK) >= + AR5416_EEP_MINOR_VER_2) { + ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc; + } + + if (!ath9k_hw_set_power_per_rate_table(ah, pEepData, chan, + &ratesArray[0], cfgCtl, + twiceAntennaReduction, + twiceMaxRegulatoryPower, + powerLimit)) { + DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, + "ath9k_hw_set_txpower: unable to set " + "tx power per rate table\n"); + return -EIO; + } + + if (!ath9k_hw_set_power_cal_table + (ah, pEepData, chan, &txPowerIndexOffset)) { + DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, + "ath9k_hw_set_txpower: unable to set power table\n"); + return -EIO; + } + + for (i = 0; i < ARRAY_SIZE(ratesArray); i++) { + ratesArray[i] = + (int16_t) (txPowerIndexOffset + ratesArray[i]); + if (ratesArray[i] > AR5416_MAX_RATE_POWER) + ratesArray[i] = AR5416_MAX_RATE_POWER; + } + + if (AR_SREV_9280_10_OR_LATER(ah)) { + for (i = 0; i < Ar5416RateSize; i++) + ratesArray[i] -= AR5416_PWR_TABLE_OFFSET * 2; + } + + REG_WRITE(ah, AR_PHY_POWER_TX_RATE1, + ATH9K_POW_SM(ratesArray[rate18mb], 24) + | ATH9K_POW_SM(ratesArray[rate12mb], 16) + | ATH9K_POW_SM(ratesArray[rate9mb], 8) + | ATH9K_POW_SM(ratesArray[rate6mb], 0) + ); + REG_WRITE(ah, AR_PHY_POWER_TX_RATE2, + ATH9K_POW_SM(ratesArray[rate54mb], 24) + | ATH9K_POW_SM(ratesArray[rate48mb], 16) + | ATH9K_POW_SM(ratesArray[rate36mb], 8) + | ATH9K_POW_SM(ratesArray[rate24mb], 0) + ); + + if (IS_CHAN_2GHZ(chan)) { + REG_WRITE(ah, AR_PHY_POWER_TX_RATE3, + ATH9K_POW_SM(ratesArray[rate2s], 24) + | ATH9K_POW_SM(ratesArray[rate2l], 16) + | ATH9K_POW_SM(ratesArray[rateXr], 8) + | ATH9K_POW_SM(ratesArray[rate1l], 0) + ); + REG_WRITE(ah, AR_PHY_POWER_TX_RATE4, + ATH9K_POW_SM(ratesArray[rate11s], 24) + | ATH9K_POW_SM(ratesArray[rate11l], 16) + | ATH9K_POW_SM(ratesArray[rate5_5s], 8) + | ATH9K_POW_SM(ratesArray[rate5_5l], 0) + ); + } + + REG_WRITE(ah, AR_PHY_POWER_TX_RATE5, + ATH9K_POW_SM(ratesArray[rateHt20_3], 24) + | ATH9K_POW_SM(ratesArray[rateHt20_2], 16) + | ATH9K_POW_SM(ratesArray[rateHt20_1], 8) + | ATH9K_POW_SM(ratesArray[rateHt20_0], 0) + ); + REG_WRITE(ah, AR_PHY_POWER_TX_RATE6, + ATH9K_POW_SM(ratesArray[rateHt20_7], 24) + | ATH9K_POW_SM(ratesArray[rateHt20_6], 16) + | ATH9K_POW_SM(ratesArray[rateHt20_5], 8) + | ATH9K_POW_SM(ratesArray[rateHt20_4], 0) + ); + + if (IS_CHAN_HT40(chan)) { + REG_WRITE(ah, AR_PHY_POWER_TX_RATE7, + ATH9K_POW_SM(ratesArray[rateHt40_3] + + ht40PowerIncForPdadc, 24) + | ATH9K_POW_SM(ratesArray[rateHt40_2] + + ht40PowerIncForPdadc, 16) + | ATH9K_POW_SM(ratesArray[rateHt40_1] + + ht40PowerIncForPdadc, 8) + | ATH9K_POW_SM(ratesArray[rateHt40_0] + + ht40PowerIncForPdadc, 0) + ); + REG_WRITE(ah, AR_PHY_POWER_TX_RATE8, + ATH9K_POW_SM(ratesArray[rateHt40_7] + + ht40PowerIncForPdadc, 24) + | ATH9K_POW_SM(ratesArray[rateHt40_6] + + ht40PowerIncForPdadc, 16) + | ATH9K_POW_SM(ratesArray[rateHt40_5] + + ht40PowerIncForPdadc, 8) + | ATH9K_POW_SM(ratesArray[rateHt40_4] + + ht40PowerIncForPdadc, 0) + ); + + REG_WRITE(ah, AR_PHY_POWER_TX_RATE9, + ATH9K_POW_SM(ratesArray[rateExtOfdm], 24) + | ATH9K_POW_SM(ratesArray[rateExtCck], 16) + | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8) + | ATH9K_POW_SM(ratesArray[rateDupCck], 0) + ); + } + + REG_WRITE(ah, AR_PHY_POWER_TX_SUB, + ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6) + | ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0) + ); + + i = rate6mb; + if (IS_CHAN_HT40(chan)) + i = rateHt40_0; + else if (IS_CHAN_HT20(chan)) + i = rateHt20_0; + + if (AR_SREV_9280_10_OR_LATER(ah)) + ah->ah_maxPowerLevel = + ratesArray[i] + AR5416_PWR_TABLE_OFFSET * 2; + else + ah->ah_maxPowerLevel = ratesArray[i]; + + return 0; +} + +static inline void ath9k_hw_get_delta_slope_vals(struct ath_hal *ah, + u32 coef_scaled, + u32 *coef_mantissa, + u32 *coef_exponent) +{ + u32 coef_exp, coef_man; + + for (coef_exp = 31; coef_exp > 0; coef_exp--) + if ((coef_scaled >> coef_exp) & 0x1) + break; + + coef_exp = 14 - (coef_exp - COEF_SCALE_S); + + coef_man = coef_scaled + (1 << (COEF_SCALE_S - coef_exp - 1)); + + *coef_mantissa = coef_man >> (COEF_SCALE_S - coef_exp); + *coef_exponent = coef_exp - 16; +} + +static void +ath9k_hw_set_delta_slope(struct ath_hal *ah, + struct ath9k_channel *chan) +{ + u32 coef_scaled, ds_coef_exp, ds_coef_man; + u32 clockMhzScaled = 0x64000000; + struct chan_centers centers; + + if (IS_CHAN_HALF_RATE(chan)) + clockMhzScaled = clockMhzScaled >> 1; + else if (IS_CHAN_QUARTER_RATE(chan)) + clockMhzScaled = clockMhzScaled >> 2; + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + coef_scaled = clockMhzScaled / centers.synth_center; + + ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man, + &ds_coef_exp); + + REG_RMW_FIELD(ah, AR_PHY_TIMING3, + AR_PHY_TIMING3_DSC_MAN, ds_coef_man); + REG_RMW_FIELD(ah, AR_PHY_TIMING3, + AR_PHY_TIMING3_DSC_EXP, ds_coef_exp); + + coef_scaled = (9 * coef_scaled) / 10; + + ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man, + &ds_coef_exp); + + REG_RMW_FIELD(ah, AR_PHY_HALFGI, + AR_PHY_HALFGI_DSC_MAN, ds_coef_man); + REG_RMW_FIELD(ah, AR_PHY_HALFGI, + AR_PHY_HALFGI_DSC_EXP, ds_coef_exp); +} + +static void ath9k_hw_9280_spur_mitigate(struct ath_hal *ah, + struct ath9k_channel *chan) +{ + int bb_spur = AR_NO_SPUR; + int freq; + int bin, cur_bin; + int bb_spur_off, spur_subchannel_sd; + int spur_freq_sd; + int spur_delta_phase; + int denominator; + int upper, lower, cur_vit_mask; + int tmp, newVal; + int i; + int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8, + AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60 + }; + int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10, + AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60 + }; + int inc[4] = { 0, 100, 0, 0 }; + struct chan_centers centers; + + int8_t mask_m[123]; + int8_t mask_p[123]; + int8_t mask_amt; + int tmp_mask; + int cur_bb_spur; + bool is2GHz = IS_CHAN_2GHZ(chan); + + memset(&mask_m, 0, sizeof(int8_t) * 123); + memset(&mask_p, 0, sizeof(int8_t) * 123); + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + freq = centers.synth_center; + + ah->ah_config.spurmode = SPUR_ENABLE_EEPROM; + for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { + cur_bb_spur = ath9k_hw_eeprom_get_spur_chan(ah, i, is2GHz); + + if (is2GHz) + cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ; + else + cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ; + + if (AR_NO_SPUR == cur_bb_spur) + break; + cur_bb_spur = cur_bb_spur - freq; + + if (IS_CHAN_HT40(chan)) { + if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) && + (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) { + bb_spur = cur_bb_spur; + break; + } + } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) && + (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) { + bb_spur = cur_bb_spur; + break; + } + } + + if (AR_NO_SPUR == bb_spur) { + REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK, + AR_PHY_FORCE_CLKEN_CCK_MRC_MUX); + return; + } else { + REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK, + AR_PHY_FORCE_CLKEN_CCK_MRC_MUX); + } + + bin = bb_spur * 320; + + tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0)); + + newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI | + AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER | + AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK | + AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK); + REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal); + + newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL | + AR_PHY_SPUR_REG_ENABLE_MASK_PPM | + AR_PHY_SPUR_REG_MASK_RATE_SELECT | + AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI | + SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH)); + REG_WRITE(ah, AR_PHY_SPUR_REG, newVal); + + if (IS_CHAN_HT40(chan)) { + if (bb_spur < 0) { + spur_subchannel_sd = 1; + bb_spur_off = bb_spur + 10; + } else { + spur_subchannel_sd = 0; + bb_spur_off = bb_spur - 10; + } + } else { + spur_subchannel_sd = 0; + bb_spur_off = bb_spur; + } + + if (IS_CHAN_HT40(chan)) + spur_delta_phase = + ((bb_spur * 262144) / + 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE; + else + spur_delta_phase = + ((bb_spur * 524288) / + 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE; + + denominator = IS_CHAN_2GHZ(chan) ? 44 : 40; + spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff; + + newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC | + SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) | + SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE)); + REG_WRITE(ah, AR_PHY_TIMING11, newVal); + + newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S; + REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal); + + cur_bin = -6000; + upper = bin + 100; + lower = bin - 100; + + for (i = 0; i < 4; i++) { + int pilot_mask = 0; + int chan_mask = 0; + int bp = 0; + for (bp = 0; bp < 30; bp++) { + if ((cur_bin > lower) && (cur_bin < upper)) { + pilot_mask = pilot_mask | 0x1 << bp; + chan_mask = chan_mask | 0x1 << bp; + } + cur_bin += 100; + } + cur_bin += inc[i]; + REG_WRITE(ah, pilot_mask_reg[i], pilot_mask); + REG_WRITE(ah, chan_mask_reg[i], chan_mask); + } + + cur_vit_mask = 6100; + upper = bin + 120; + lower = bin - 120; + + for (i = 0; i < 123; i++) { + if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) { + + /* workaround for gcc bug #37014 */ + volatile int tmp = abs(cur_vit_mask - bin); + + if (tmp < 75) + mask_amt = 1; + else + mask_amt = 0; + if (cur_vit_mask < 0) + mask_m[abs(cur_vit_mask / 100)] = mask_amt; + else + mask_p[cur_vit_mask / 100] = mask_amt; + } + cur_vit_mask -= 100; + } + + tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28) + | (mask_m[48] << 26) | (mask_m[49] << 24) + | (mask_m[50] << 22) | (mask_m[51] << 20) + | (mask_m[52] << 18) | (mask_m[53] << 16) + | (mask_m[54] << 14) | (mask_m[55] << 12) + | (mask_m[56] << 10) | (mask_m[57] << 8) + | (mask_m[58] << 6) | (mask_m[59] << 4) + | (mask_m[60] << 2) | (mask_m[61] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask); + REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask); + + tmp_mask = (mask_m[31] << 28) + | (mask_m[32] << 26) | (mask_m[33] << 24) + | (mask_m[34] << 22) | (mask_m[35] << 20) + | (mask_m[36] << 18) | (mask_m[37] << 16) + | (mask_m[48] << 14) | (mask_m[39] << 12) + | (mask_m[40] << 10) | (mask_m[41] << 8) + | (mask_m[42] << 6) | (mask_m[43] << 4) + | (mask_m[44] << 2) | (mask_m[45] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask); + + tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28) + | (mask_m[18] << 26) | (mask_m[18] << 24) + | (mask_m[20] << 22) | (mask_m[20] << 20) + | (mask_m[22] << 18) | (mask_m[22] << 16) + | (mask_m[24] << 14) | (mask_m[24] << 12) + | (mask_m[25] << 10) | (mask_m[26] << 8) + | (mask_m[27] << 6) | (mask_m[28] << 4) + | (mask_m[29] << 2) | (mask_m[30] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask); + + tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28) + | (mask_m[2] << 26) | (mask_m[3] << 24) + | (mask_m[4] << 22) | (mask_m[5] << 20) + | (mask_m[6] << 18) | (mask_m[7] << 16) + | (mask_m[8] << 14) | (mask_m[9] << 12) + | (mask_m[10] << 10) | (mask_m[11] << 8) + | (mask_m[12] << 6) | (mask_m[13] << 4) + | (mask_m[14] << 2) | (mask_m[15] << 0); + REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask); + + tmp_mask = (mask_p[15] << 28) + | (mask_p[14] << 26) | (mask_p[13] << 24) + | (mask_p[12] << 22) | (mask_p[11] << 20) + | (mask_p[10] << 18) | (mask_p[9] << 16) + | (mask_p[8] << 14) | (mask_p[7] << 12) + | (mask_p[6] << 10) | (mask_p[5] << 8) + | (mask_p[4] << 6) | (mask_p[3] << 4) + | (mask_p[2] << 2) | (mask_p[1] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask); + + tmp_mask = (mask_p[30] << 28) + | (mask_p[29] << 26) | (mask_p[28] << 24) + | (mask_p[27] << 22) | (mask_p[26] << 20) + | (mask_p[25] << 18) | (mask_p[24] << 16) + | (mask_p[23] << 14) | (mask_p[22] << 12) + | (mask_p[21] << 10) | (mask_p[20] << 8) + | (mask_p[19] << 6) | (mask_p[18] << 4) + | (mask_p[17] << 2) | (mask_p[16] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask); + + tmp_mask = (mask_p[45] << 28) + | (mask_p[44] << 26) | (mask_p[43] << 24) + | (mask_p[42] << 22) | (mask_p[41] << 20) + | (mask_p[40] << 18) | (mask_p[39] << 16) + | (mask_p[38] << 14) | (mask_p[37] << 12) + | (mask_p[36] << 10) | (mask_p[35] << 8) + | (mask_p[34] << 6) | (mask_p[33] << 4) + | (mask_p[32] << 2) | (mask_p[31] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask); + + tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28) + | (mask_p[59] << 26) | (mask_p[58] << 24) + | (mask_p[57] << 22) | (mask_p[56] << 20) + | (mask_p[55] << 18) | (mask_p[54] << 16) + | (mask_p[53] << 14) | (mask_p[52] << 12) + | (mask_p[51] << 10) | (mask_p[50] << 8) + | (mask_p[49] << 6) | (mask_p[48] << 4) + | (mask_p[47] << 2) | (mask_p[46] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask); +} + +static void ath9k_hw_spur_mitigate(struct ath_hal *ah, + struct ath9k_channel *chan) +{ + int bb_spur = AR_NO_SPUR; + int bin, cur_bin; + int spur_freq_sd; + int spur_delta_phase; + int denominator; + int upper, lower, cur_vit_mask; + int tmp, new; + int i; + int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8, + AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60 + }; + int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10, + AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60 + }; + int inc[4] = { 0, 100, 0, 0 }; + + int8_t mask_m[123]; + int8_t mask_p[123]; + int8_t mask_amt; + int tmp_mask; + int cur_bb_spur; + bool is2GHz = IS_CHAN_2GHZ(chan); + + memset(&mask_m, 0, sizeof(int8_t) * 123); + memset(&mask_p, 0, sizeof(int8_t) * 123); + + for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { + cur_bb_spur = ath9k_hw_eeprom_get_spur_chan(ah, i, is2GHz); + if (AR_NO_SPUR == cur_bb_spur) + break; + cur_bb_spur = cur_bb_spur - (chan->channel * 10); + if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) { + bb_spur = cur_bb_spur; + break; + } + } + + if (AR_NO_SPUR == bb_spur) + return; + + bin = bb_spur * 32; + + tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0)); + new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI | + AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER | + AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK | + AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK); + + REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new); + + new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL | + AR_PHY_SPUR_REG_ENABLE_MASK_PPM | + AR_PHY_SPUR_REG_MASK_RATE_SELECT | + AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI | + SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH)); + REG_WRITE(ah, AR_PHY_SPUR_REG, new); + + spur_delta_phase = ((bb_spur * 524288) / 100) & + AR_PHY_TIMING11_SPUR_DELTA_PHASE; + + denominator = IS_CHAN_2GHZ(chan) ? 440 : 400; + spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff; + + new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC | + SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) | + SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE)); + REG_WRITE(ah, AR_PHY_TIMING11, new); + + cur_bin = -6000; + upper = bin + 100; + lower = bin - 100; + + for (i = 0; i < 4; i++) { + int pilot_mask = 0; + int chan_mask = 0; + int bp = 0; + for (bp = 0; bp < 30; bp++) { + if ((cur_bin > lower) && (cur_bin < upper)) { + pilot_mask = pilot_mask | 0x1 << bp; + chan_mask = chan_mask | 0x1 << bp; + } + cur_bin += 100; + } + cur_bin += inc[i]; + REG_WRITE(ah, pilot_mask_reg[i], pilot_mask); + REG_WRITE(ah, chan_mask_reg[i], chan_mask); + } + + cur_vit_mask = 6100; + upper = bin + 120; + lower = bin - 120; + + for (i = 0; i < 123; i++) { + if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) { + if ((abs(cur_vit_mask - bin)) < 75) + mask_amt = 1; + else + mask_amt = 0; + if (cur_vit_mask < 0) + mask_m[abs(cur_vit_mask / 100)] = mask_amt; + else + mask_p[cur_vit_mask / 100] = mask_amt; + } + cur_vit_mask -= 100; + } + + tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28) + | (mask_m[48] << 26) | (mask_m[49] << 24) + | (mask_m[50] << 22) | (mask_m[51] << 20) + | (mask_m[52] << 18) | (mask_m[53] << 16) + | (mask_m[54] << 14) | (mask_m[55] << 12) + | (mask_m[56] << 10) | (mask_m[57] << 8) + | (mask_m[58] << 6) | (mask_m[59] << 4) + | (mask_m[60] << 2) | (mask_m[61] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask); + REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask); + + tmp_mask = (mask_m[31] << 28) + | (mask_m[32] << 26) | (mask_m[33] << 24) + | (mask_m[34] << 22) | (mask_m[35] << 20) + | (mask_m[36] << 18) | (mask_m[37] << 16) + | (mask_m[48] << 14) | (mask_m[39] << 12) + | (mask_m[40] << 10) | (mask_m[41] << 8) + | (mask_m[42] << 6) | (mask_m[43] << 4) + | (mask_m[44] << 2) | (mask_m[45] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask); + + tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28) + | (mask_m[18] << 26) | (mask_m[18] << 24) + | (mask_m[20] << 22) | (mask_m[20] << 20) + | (mask_m[22] << 18) | (mask_m[22] << 16) + | (mask_m[24] << 14) | (mask_m[24] << 12) + | (mask_m[25] << 10) | (mask_m[26] << 8) + | (mask_m[27] << 6) | (mask_m[28] << 4) + | (mask_m[29] << 2) | (mask_m[30] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask); + + tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28) + | (mask_m[2] << 26) | (mask_m[3] << 24) + | (mask_m[4] << 22) | (mask_m[5] << 20) + | (mask_m[6] << 18) | (mask_m[7] << 16) + | (mask_m[8] << 14) | (mask_m[9] << 12) + | (mask_m[10] << 10) | (mask_m[11] << 8) + | (mask_m[12] << 6) | (mask_m[13] << 4) + | (mask_m[14] << 2) | (mask_m[15] << 0); + REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask); + + tmp_mask = (mask_p[15] << 28) + | (mask_p[14] << 26) | (mask_p[13] << 24) + | (mask_p[12] << 22) | (mask_p[11] << 20) + | (mask_p[10] << 18) | (mask_p[9] << 16) + | (mask_p[8] << 14) | (mask_p[7] << 12) + | (mask_p[6] << 10) | (mask_p[5] << 8) + | (mask_p[4] << 6) | (mask_p[3] << 4) + | (mask_p[2] << 2) | (mask_p[1] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask); + + tmp_mask = (mask_p[30] << 28) + | (mask_p[29] << 26) | (mask_p[28] << 24) + | (mask_p[27] << 22) | (mask_p[26] << 20) + | (mask_p[25] << 18) | (mask_p[24] << 16) + | (mask_p[23] << 14) | (mask_p[22] << 12) + | (mask_p[21] << 10) | (mask_p[20] << 8) + | (mask_p[19] << 6) | (mask_p[18] << 4) + | (mask_p[17] << 2) | (mask_p[16] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask); + + tmp_mask = (mask_p[45] << 28) + | (mask_p[44] << 26) | (mask_p[43] << 24) + | (mask_p[42] << 22) | (mask_p[41] << 20) + | (mask_p[40] << 18) | (mask_p[39] << 16) + | (mask_p[38] << 14) | (mask_p[37] << 12) + | (mask_p[36] << 10) | (mask_p[35] << 8) + | (mask_p[34] << 6) | (mask_p[33] << 4) + | (mask_p[32] << 2) | (mask_p[31] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask); + + tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28) + | (mask_p[59] << 26) | (mask_p[58] << 24) + | (mask_p[57] << 22) | (mask_p[56] << 20) + | (mask_p[55] << 18) | (mask_p[54] << 16) + | (mask_p[53] << 14) | (mask_p[52] << 12) + | (mask_p[51] << 10) | (mask_p[50] << 8) + | (mask_p[49] << 6) | (mask_p[48] << 4) + | (mask_p[47] << 2) | (mask_p[46] << 0); + REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask); + REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask); +} + +static inline void ath9k_hw_init_chain_masks(struct ath_hal *ah) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + int rx_chainmask, tx_chainmask; + + rx_chainmask = ahp->ah_rxchainmask; + tx_chainmask = ahp->ah_txchainmask; + + switch (rx_chainmask) { + case 0x5: + REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP, + AR_PHY_SWAP_ALT_CHAIN); + case 0x3: + if (((ah)->ah_macVersion <= AR_SREV_VERSION_9160)) { + REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7); + REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7); + break; + } + case 0x1: + case 0x2: + if (!AR_SREV_9280(ah)) + break; + case 0x7: + REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask); + REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask); + break; + default: + break; + } + + REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask); + if (tx_chainmask == 0x5) { + REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP, + AR_PHY_SWAP_ALT_CHAIN); + } + if (AR_SREV_9100(ah)) + REG_WRITE(ah, AR_PHY_ANALOG_SWAP, + REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001); +} + +static void ath9k_hw_set_addac(struct ath_hal *ah, + struct ath9k_channel *chan) +{ + struct modal_eep_header *pModal; + struct ath_hal_5416 *ahp = AH5416(ah); + struct ar5416_eeprom *eep = &ahp->ah_eeprom; + u8 biaslevel; + + if (ah->ah_macVersion != AR_SREV_VERSION_9160) + return; + + if (ar5416_get_eep_rev(ahp) < AR5416_EEP_MINOR_VER_7) + return; + + pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]); + + if (pModal->xpaBiasLvl != 0xff) { + biaslevel = pModal->xpaBiasLvl; + } else { + + u16 resetFreqBin, freqBin, freqCount = 0; + struct chan_centers centers; + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + + resetFreqBin = + FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan)); + freqBin = pModal->xpaBiasLvlFreq[0] & 0xff; + biaslevel = (u8) (pModal->xpaBiasLvlFreq[0] >> 14); + + freqCount++; + + while (freqCount < 3) { + if (pModal->xpaBiasLvlFreq[freqCount] == 0x0) + break; + + freqBin = pModal->xpaBiasLvlFreq[freqCount] & 0xff; + if (resetFreqBin >= freqBin) { + biaslevel = + (u8) (pModal-> + xpaBiasLvlFreq[freqCount] + >> 14); + } else { + break; + } + freqCount++; + } + } + + if (IS_CHAN_2GHZ(chan)) { + INI_RA(&ahp->ah_iniAddac, 7, 1) = + (INI_RA(&ahp->ah_iniAddac, 7, 1) & (~0x18)) | biaslevel + << 3; + } else { + INI_RA(&ahp->ah_iniAddac, 6, 1) = + (INI_RA(&ahp->ah_iniAddac, 6, 1) & (~0xc0)) | biaslevel + << 6; + } +} + +static u32 ath9k_hw_mac_usec(struct ath_hal *ah, u32 clks) +{ + if (ah->ah_curchan != NULL) + return clks / + CLOCK_RATE[ath9k_hw_chan2wmode(ah, ah->ah_curchan)]; + else + return clks / CLOCK_RATE[ATH9K_MODE_11B]; +} + +static u32 ath9k_hw_mac_to_usec(struct ath_hal *ah, u32 clks) +{ + struct ath9k_channel *chan = ah->ah_curchan; + + if (chan && IS_CHAN_HT40(chan)) + return ath9k_hw_mac_usec(ah, clks) / 2; + else + return ath9k_hw_mac_usec(ah, clks); +} + +static u32 ath9k_hw_mac_clks(struct ath_hal *ah, u32 usecs) +{ + if (ah->ah_curchan != NULL) + return usecs * CLOCK_RATE[ath9k_hw_chan2wmode(ah, + ah->ah_curchan)]; + else + return usecs * CLOCK_RATE[ATH9K_MODE_11B]; +} + +static u32 ath9k_hw_mac_to_clks(struct ath_hal *ah, u32 usecs) +{ + struct ath9k_channel *chan = ah->ah_curchan; + + if (chan && IS_CHAN_HT40(chan)) + return ath9k_hw_mac_clks(ah, usecs) * 2; + else + return ath9k_hw_mac_clks(ah, usecs); +} + +static bool ath9k_hw_set_ack_timeout(struct ath_hal *ah, u32 us) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_ACK))) { + DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: bad ack timeout %u\n", + __func__, us); + ahp->ah_acktimeout = (u32) -1; + return false; + } else { + REG_RMW_FIELD(ah, AR_TIME_OUT, + AR_TIME_OUT_ACK, ath9k_hw_mac_to_clks(ah, us)); + ahp->ah_acktimeout = us; + return true; + } +} + +static bool ath9k_hw_set_cts_timeout(struct ath_hal *ah, u32 us) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_CTS))) { + DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: bad cts timeout %u\n", + __func__, us); + ahp->ah_ctstimeout = (u32) -1; + return false; + } else { + REG_RMW_FIELD(ah, AR_TIME_OUT, + AR_TIME_OUT_CTS, ath9k_hw_mac_to_clks(ah, us)); + ahp->ah_ctstimeout = us; + return true; + } +} +static bool ath9k_hw_set_global_txtimeout(struct ath_hal *ah, + u32 tu) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + if (tu > 0xFFFF) { + DPRINTF(ah->ah_sc, ATH_DBG_XMIT, + "%s: bad global tx timeout %u\n", __func__, tu); + ahp->ah_globaltxtimeout = (u32) -1; + return false; + } else { + REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu); + ahp->ah_globaltxtimeout = tu; + return true; + } +} + +bool ath9k_hw_setslottime(struct ath_hal *ah, u32 us) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + if (us < ATH9K_SLOT_TIME_9 || us > ath9k_hw_mac_to_usec(ah, 0xffff)) { + DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: bad slot time %u\n", + __func__, us); + ahp->ah_slottime = (u32) -1; + return false; + } else { + REG_WRITE(ah, AR_D_GBL_IFS_SLOT, ath9k_hw_mac_to_clks(ah, us)); + ahp->ah_slottime = us; + return true; + } +} + +static inline void ath9k_hw_init_user_settings(struct ath_hal *ah) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + DPRINTF(ah->ah_sc, ATH_DBG_RESET, "--AP %s ahp->ah_miscMode 0x%x\n", + __func__, ahp->ah_miscMode); + if (ahp->ah_miscMode != 0) + REG_WRITE(ah, AR_PCU_MISC, + REG_READ(ah, AR_PCU_MISC) | ahp->ah_miscMode); + if (ahp->ah_slottime != (u32) -1) + ath9k_hw_setslottime(ah, ahp->ah_slottime); + if (ahp->ah_acktimeout != (u32) -1) + ath9k_hw_set_ack_timeout(ah, ahp->ah_acktimeout); + if (ahp->ah_ctstimeout != (u32) -1) + ath9k_hw_set_cts_timeout(ah, ahp->ah_ctstimeout); + if (ahp->ah_globaltxtimeout != (u32) -1) + ath9k_hw_set_global_txtimeout(ah, ahp->ah_globaltxtimeout); +} + +static inline int +ath9k_hw_process_ini(struct ath_hal *ah, + struct ath9k_channel *chan, + enum ath9k_ht_macmode macmode) +{ + int i, regWrites = 0; + struct ath_hal_5416 *ahp = AH5416(ah); + u32 modesIndex, freqIndex; + int status; + + switch (chan->chanmode) { + case CHANNEL_A: + case CHANNEL_A_HT20: + modesIndex = 1; + freqIndex = 1; + break; + case CHANNEL_A_HT40PLUS: + case CHANNEL_A_HT40MINUS: + modesIndex = 2; + freqIndex = 1; + break; + case CHANNEL_G: + case CHANNEL_G_HT20: + case CHANNEL_B: + modesIndex = 4; + freqIndex = 2; + break; + case CHANNEL_G_HT40PLUS: + case CHANNEL_G_HT40MINUS: + modesIndex = 3; + freqIndex = 2; + break; + + default: + return -EINVAL; + } + + REG_WRITE(ah, AR_PHY(0), 0x00000007); + + REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO); + + ath9k_hw_set_addac(ah, chan); + + if (AR_SREV_5416_V22_OR_LATER(ah)) { + REG_WRITE_ARRAY(&ahp->ah_iniAddac, 1, regWrites); + } else { + struct ar5416IniArray temp; + u32 addacSize = + sizeof(u32) * ahp->ah_iniAddac.ia_rows * + ahp->ah_iniAddac.ia_columns; + + memcpy(ahp->ah_addac5416_21, + ahp->ah_iniAddac.ia_array, addacSize); + + (ahp->ah_addac5416_21)[31 * + ahp->ah_iniAddac.ia_columns + 1] = 0; + + temp.ia_array = ahp->ah_addac5416_21; + temp.ia_columns = ahp->ah_iniAddac.ia_columns; + temp.ia_rows = ahp->ah_iniAddac.ia_rows; + REG_WRITE_ARRAY(&temp, 1, regWrites); + } + REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC); + + for (i = 0; i < ahp->ah_iniModes.ia_rows; i++) { + u32 reg = INI_RA(&ahp->ah_iniModes, i, 0); + u32 val = INI_RA(&ahp->ah_iniModes, i, modesIndex); + +#ifdef CONFIG_SLOW_ANT_DIV + if (ah->ah_devid == AR9280_DEVID_PCI) + val = ath9k_hw_ini_fixup(ah, &ahp->ah_eeprom, reg, + val); +#endif + + REG_WRITE(ah, reg, val); + + if (reg >= 0x7800 && reg < 0x78a0 + && ah->ah_config.analog_shiftreg) { + udelay(100); + } + + DO_DELAY(regWrites); + } + + for (i = 0; i < ahp->ah_iniCommon.ia_rows; i++) { + u32 reg = INI_RA(&ahp->ah_iniCommon, i, 0); + u32 val = INI_RA(&ahp->ah_iniCommon, i, 1); + + REG_WRITE(ah, reg, val); + + if (reg >= 0x7800 && reg < 0x78a0 + && ah->ah_config.analog_shiftreg) { + udelay(100); + } + + DO_DELAY(regWrites); + } + + ath9k_hw_write_regs(ah, modesIndex, freqIndex, regWrites); + + if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) { + REG_WRITE_ARRAY(&ahp->ah_iniModesAdditional, modesIndex, + regWrites); + } + + ath9k_hw_override_ini(ah, chan); + ath9k_hw_set_regs(ah, chan, macmode); + ath9k_hw_init_chain_masks(ah); + + status = ath9k_hw_set_txpower(ah, &ahp->ah_eeprom, chan, + ath9k_regd_get_ctl(ah, chan), + ath9k_regd_get_antenna_allowed(ah, + chan), + chan->maxRegTxPower * 2, + min((u32) MAX_RATE_POWER, + (u32) ah->ah_powerLimit)); + if (status != 0) { + DPRINTF(ah->ah_sc, ATH_DBG_POWER_MGMT, + "%s: error init'ing transmit power\n", __func__); + return -EIO; + } + + if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) { + DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, + "%s: ar5416SetRfRegs failed\n", __func__); + return -EIO; + } + + return 0; +} + +static inline void ath9k_hw_setup_calibration(struct ath_hal *ah, + struct hal_cal_list *currCal) +{ + REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0), + AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX, + currCal->calData->calCountMax); + + switch (currCal->calData->calType) { + case IQ_MISMATCH_CAL: + REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ); + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "%s: starting IQ Mismatch Calibration\n", + __func__); + break; + case ADC_GAIN_CAL: + REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN); + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "%s: starting ADC Gain Calibration\n", __func__); + break; + case ADC_DC_CAL: + REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER); + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "%s: starting ADC DC Calibration\n", __func__); + break; + case ADC_DC_INIT_CAL: + REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT); + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "%s: starting Init ADC DC Calibration\n", + __func__); + break; + } + + REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0), + AR_PHY_TIMING_CTRL4_DO_CAL); +} + +static inline void ath9k_hw_reset_calibration(struct ath_hal *ah, + struct hal_cal_list *currCal) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + int i; + + ath9k_hw_setup_calibration(ah, currCal); + + currCal->calState = CAL_RUNNING; + + for (i = 0; i < AR5416_MAX_CHAINS; i++) { + ahp->ah_Meas0.sign[i] = 0; + ahp->ah_Meas1.sign[i] = 0; + ahp->ah_Meas2.sign[i] = 0; + ahp->ah_Meas3.sign[i] = 0; + } + + ahp->ah_CalSamples = 0; +} + +static inline void +ath9k_hw_per_calibration(struct ath_hal *ah, + struct ath9k_channel *ichan, + u8 rxchainmask, + struct hal_cal_list *currCal, + bool *isCalDone) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + *isCalDone = false; + + if (currCal->calState == CAL_RUNNING) { + if (!(REG_READ(ah, + AR_PHY_TIMING_CTRL4(0)) & + AR_PHY_TIMING_CTRL4_DO_CAL)) { + + currCal->calData->calCollect(ah); + + ahp->ah_CalSamples++; + + if (ahp->ah_CalSamples >= + currCal->calData->calNumSamples) { + int i, numChains = 0; + for (i = 0; i < AR5416_MAX_CHAINS; i++) { + if (rxchainmask & (1 << i)) + numChains++; + } + + currCal->calData->calPostProc(ah, + numChains); + + ichan->CalValid |= + currCal->calData->calType; + currCal->calState = CAL_DONE; + *isCalDone = true; + } else { + ath9k_hw_setup_calibration(ah, currCal); + } + } + } else if (!(ichan->CalValid & currCal->calData->calType)) { + ath9k_hw_reset_calibration(ah, currCal); + } +} + +static inline bool ath9k_hw_run_init_cals(struct ath_hal *ah, + int init_cal_count) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + struct ath9k_channel ichan; + bool isCalDone; + struct hal_cal_list *currCal = ahp->ah_cal_list_curr; + const struct hal_percal_data *calData = currCal->calData; + int i; + + if (currCal == NULL) + return false; + + ichan.CalValid = 0; + + for (i = 0; i < init_cal_count; i++) { + ath9k_hw_reset_calibration(ah, currCal); + + if (!ath9k_hw_wait(ah, AR_PHY_TIMING_CTRL4(0), + AR_PHY_TIMING_CTRL4_DO_CAL, 0)) { + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "%s: Cal %d failed to complete in 100ms.\n", + __func__, calData->calType); + + ahp->ah_cal_list = ahp->ah_cal_list_last = + ahp->ah_cal_list_curr = NULL; + return false; + } + + ath9k_hw_per_calibration(ah, &ichan, ahp->ah_rxchainmask, + currCal, &isCalDone); + if (!isCalDone) { + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "%s: Not able to run Init Cal %d.\n", + __func__, calData->calType); + } + if (currCal->calNext) { + currCal = currCal->calNext; + calData = currCal->calData; + } + } + + ahp->ah_cal_list = ahp->ah_cal_list_last = ahp->ah_cal_list_curr = NULL; + return true; +} + +static inline bool +ath9k_hw_channel_change(struct ath_hal *ah, + struct ath9k_channel *chan, + enum ath9k_ht_macmode macmode) +{ + u32 synthDelay, qnum; + struct ath_hal_5416 *ahp = AH5416(ah); + + for (qnum = 0; qnum < AR_NUM_QCU; qnum++) { + if (ath9k_hw_numtxpending(ah, qnum)) { + DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, + "%s: Transmit frames pending on queue %d\n", + __func__, qnum); + return false; + } + } + + REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN); + if (!ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN, + AR_PHY_RFBUS_GRANT_EN)) { + DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO, + "%s: Could not kill baseband RX\n", __func__); + return false; + } + + ath9k_hw_set_regs(ah, chan, macmode); + + if (AR_SREV_9280_10_OR_LATER(ah)) { + if (!(ath9k_hw_ar9280_set_channel(ah, chan))) { + DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, + "%s: failed to set channel\n", __func__); + return false; + } + } else { + if (!(ath9k_hw_set_channel(ah, chan))) { + DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, + "%s: failed to set channel\n", __func__); + return false; + } + } + + if (ath9k_hw_set_txpower(ah, &ahp->ah_eeprom, chan, + ath9k_regd_get_ctl(ah, chan), + ath9k_regd_get_antenna_allowed(ah, chan), + chan->maxRegTxPower * 2, + min((u32) MAX_RATE_POWER, + (u32) ah->ah_powerLimit)) != 0) { + DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, + "%s: error init'ing transmit power\n", __func__); + return false; + } + + synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; + if (IS_CHAN_CCK(chan)) + synthDelay = (4 * synthDelay) / 22; + else + synthDelay /= 10; + + udelay(synthDelay + BASE_ACTIVATE_DELAY); + + REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0); + + if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) + ath9k_hw_set_delta_slope(ah, chan); + + if (AR_SREV_9280_10_OR_LATER(ah)) + ath9k_hw_9280_spur_mitigate(ah, chan); + else + ath9k_hw_spur_mitigate(ah, chan); + + if (!chan->oneTimeCalsDone) + chan->oneTimeCalsDone = true; + + return true; +} + +static bool ath9k_hw_chip_reset(struct ath_hal *ah, + struct ath9k_channel *chan) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM)) + return false; + + if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) + return false; + + ahp->ah_chipFullSleep = false; + + ath9k_hw_init_pll(ah, chan); + + ath9k_hw_set_rfmode(ah, chan); + + return true; +} + +static inline void ath9k_hw_set_dma(struct ath_hal *ah) +{ + u32 regval; + + regval = REG_READ(ah, AR_AHB_MODE); + REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN); + + regval = REG_READ(ah, AR_TXCFG) & ~AR_TXCFG_DMASZ_MASK; + REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B); + + REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->ah_txTrigLevel); + + regval = REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_DMASZ_MASK; + REG_WRITE(ah, AR_RXCFG, regval | AR_RXCFG_DMASZ_128B); + + REG_WRITE(ah, AR_RXFIFO_CFG, 0x200); + + if (AR_SREV_9285(ah)) { + REG_WRITE(ah, AR_PCU_TXBUF_CTRL, + AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE); + } else { + REG_WRITE(ah, AR_PCU_TXBUF_CTRL, + AR_PCU_TXBUF_CTRL_USABLE_SIZE); + } +} + +bool ath9k_hw_stopdmarecv(struct ath_hal *ah) +{ + REG_WRITE(ah, AR_CR, AR_CR_RXD); + if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0)) { + DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, + "%s: dma failed to stop in 10ms\n" + "AR_CR=0x%08x\nAR_DIAG_SW=0x%08x\n", + __func__, + REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW)); + return false; + } else { + return true; + } +} + +void ath9k_hw_startpcureceive(struct ath_hal *ah) +{ + REG_CLR_BIT(ah, AR_DIAG_SW, + (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); + + ath9k_enable_mib_counters(ah); + + ath9k_ani_reset(ah); +} + +void ath9k_hw_stoppcurecv(struct ath_hal *ah) +{ + REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS); + + ath9k_hw_disable_mib_counters(ah); +} + +static bool ath9k_hw_iscal_supported(struct ath_hal *ah, + struct ath9k_channel *chan, + enum hal_cal_types calType) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + bool retval = false; + + switch (calType & ahp->ah_suppCals) { + case IQ_MISMATCH_CAL: + if (!IS_CHAN_B(chan)) + retval = true; + break; + case ADC_GAIN_CAL: + case ADC_DC_CAL: + if (!IS_CHAN_B(chan) + && !(IS_CHAN_2GHZ(chan) && IS_CHAN_HT20(chan))) + retval = true; + break; + } + + return retval; +} + +static inline bool ath9k_hw_init_cal(struct ath_hal *ah, + struct ath9k_channel *chan) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + struct ath9k_channel *ichan = + ath9k_regd_check_channel(ah, chan); + + REG_WRITE(ah, AR_PHY_AGC_CONTROL, + REG_READ(ah, AR_PHY_AGC_CONTROL) | + AR_PHY_AGC_CONTROL_CAL); + + if (!ath9k_hw_wait + (ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, 0)) { + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "%s: offset calibration failed to complete in 1ms; " + "noisy environment?\n", __func__); + return false; + } + + REG_WRITE(ah, AR_PHY_AGC_CONTROL, + REG_READ(ah, AR_PHY_AGC_CONTROL) | + AR_PHY_AGC_CONTROL_NF); + + ahp->ah_cal_list = ahp->ah_cal_list_last = ahp->ah_cal_list_curr = + NULL; + + if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) { + if (ath9k_hw_iscal_supported(ah, chan, ADC_GAIN_CAL)) { + INIT_CAL(&ahp->ah_adcGainCalData); + INSERT_CAL(ahp, &ahp->ah_adcGainCalData); + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "%s: enabling ADC Gain Calibration.\n", + __func__); + } + if (ath9k_hw_iscal_supported(ah, chan, ADC_DC_CAL)) { + INIT_CAL(&ahp->ah_adcDcCalData); + INSERT_CAL(ahp, &ahp->ah_adcDcCalData); + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "%s: enabling ADC DC Calibration.\n", + __func__); + } + if (ath9k_hw_iscal_supported(ah, chan, IQ_MISMATCH_CAL)) { + INIT_CAL(&ahp->ah_iqCalData); + INSERT_CAL(ahp, &ahp->ah_iqCalData); + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "%s: enabling IQ Calibration.\n", + __func__); + } + + ahp->ah_cal_list_curr = ahp->ah_cal_list; + + if (ahp->ah_cal_list_curr) + ath9k_hw_reset_calibration(ah, + ahp->ah_cal_list_curr); + } + + ichan->CalValid = 0; + + return true; +} + + +bool ath9k_hw_reset(struct ath_hal *ah, enum ath9k_opmode opmode, + struct ath9k_channel *chan, + enum ath9k_ht_macmode macmode, + u8 txchainmask, u8 rxchainmask, + enum ath9k_ht_extprotspacing extprotspacing, + bool bChannelChange, + int *status) +{ +#define FAIL(_code) do { ecode = _code; goto bad; } while (0) + u32 saveLedState; + struct ath_hal_5416 *ahp = AH5416(ah); + struct ath9k_channel *curchan = ah->ah_curchan; + u32 saveDefAntenna; + u32 macStaId1; + int ecode; + int i, rx_chainmask; + + ahp->ah_extprotspacing = extprotspacing; + ahp->ah_txchainmask = txchainmask; + ahp->ah_rxchainmask = rxchainmask; + + if (AR_SREV_9280(ah)) { + ahp->ah_txchainmask &= 0x3; + ahp->ah_rxchainmask &= 0x3; + } + + if (ath9k_hw_check_chan(ah, chan) == NULL) { + DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, + "%s: invalid channel %u/0x%x; no mapping\n", + __func__, chan->channel, chan->channelFlags); + FAIL(-EINVAL); + } + + if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) + return false; + + if (curchan) + ath9k_hw_getnf(ah, curchan); + + if (bChannelChange && + (ahp->ah_chipFullSleep != true) && + (ah->ah_curchan != NULL) && + (chan->channel != ah->ah_curchan->channel) && + ((chan->channelFlags & CHANNEL_ALL) == + (ah->ah_curchan->channelFlags & CHANNEL_ALL)) && + (!AR_SREV_9280(ah) || (!IS_CHAN_A_5MHZ_SPACED(chan) && + !IS_CHAN_A_5MHZ_SPACED(ah-> + ah_curchan)))) { + + if (ath9k_hw_channel_change(ah, chan, macmode)) { + ath9k_hw_loadnf(ah, ah->ah_curchan); + ath9k_hw_start_nfcal(ah); + return true; + } + } + + saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA); + if (saveDefAntenna == 0) + saveDefAntenna = 1; + + macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B; + + saveLedState = REG_READ(ah, AR_CFG_LED) & + (AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL | + AR_CFG_LED_BLINK_THRESH_SEL | AR_CFG_LED_BLINK_SLOW); + + ath9k_hw_mark_phy_inactive(ah); + + if (!ath9k_hw_chip_reset(ah, chan)) { + DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s: chip reset failed\n", + __func__); + FAIL(-EIO); + } + + if (AR_SREV_9280(ah)) { + REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, + AR_GPIO_JTAG_DISABLE); + + if (test_bit(ATH9K_MODE_11A, ah->ah_caps.wireless_modes)) { + if (IS_CHAN_5GHZ(chan)) + ath9k_hw_set_gpio(ah, 9, 0); + else + ath9k_hw_set_gpio(ah, 9, 1); + } + ath9k_hw_cfg_output(ah, 9, ATH9K_GPIO_OUTPUT_MUX_AS_OUTPUT); + } + + ecode = ath9k_hw_process_ini(ah, chan, macmode); + if (ecode != 0) + goto bad; + + if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) + ath9k_hw_set_delta_slope(ah, chan); + + if (AR_SREV_9280_10_OR_LATER(ah)) + ath9k_hw_9280_spur_mitigate(ah, chan); + else + ath9k_hw_spur_mitigate(ah, chan); + + if (!ath9k_hw_eeprom_set_board_values(ah, chan)) { + DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, + "%s: error setting board options\n", __func__); + FAIL(-EIO); + } + + ath9k_hw_decrease_chain_power(ah, chan); + + REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(ahp->ah_macaddr)); + REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(ahp->ah_macaddr + 4) + | macStaId1 + | AR_STA_ID1_RTS_USE_DEF + | (ah->ah_config. + ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0) + | ahp->ah_staId1Defaults); + ath9k_hw_set_operating_mode(ah, opmode); + + REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(ahp->ah_bssidmask)); + REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(ahp->ah_bssidmask + 4)); + + REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna); + + REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(ahp->ah_bssid)); + REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(ahp->ah_bssid + 4) | + ((ahp->ah_assocId & 0x3fff) << AR_BSS_ID1_AID_S)); + + REG_WRITE(ah, AR_ISR, ~0); + + REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR); + + if (AR_SREV_9280_10_OR_LATER(ah)) { + if (!(ath9k_hw_ar9280_set_channel(ah, chan))) + FAIL(-EIO); + } else { + if (!(ath9k_hw_set_channel(ah, chan))) + FAIL(-EIO); + } + + for (i = 0; i < AR_NUM_DCU; i++) + REG_WRITE(ah, AR_DQCUMASK(i), 1 << i); + + ahp->ah_intrTxqs = 0; + for (i = 0; i < ah->ah_caps.total_queues; i++) + ath9k_hw_resettxqueue(ah, i); + + ath9k_hw_init_interrupt_masks(ah, opmode); + ath9k_hw_init_qos(ah); + + ath9k_hw_init_user_settings(ah); + + ah->ah_opmode = opmode; + + REG_WRITE(ah, AR_STA_ID1, + REG_READ(ah, AR_STA_ID1) | AR_STA_ID1_PRESERVE_SEQNUM); + + ath9k_hw_set_dma(ah); + + REG_WRITE(ah, AR_OBS, 8); + + if (ahp->ah_intrMitigation) { + + REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500); + REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000); + } + + ath9k_hw_init_bb(ah, chan); + + if (!ath9k_hw_init_cal(ah, chan)) + FAIL(-ENODEV); + + rx_chainmask = ahp->ah_rxchainmask; + if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) { + REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask); + REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask); + } + + REG_WRITE(ah, AR_CFG_LED, saveLedState | AR_CFG_SCLK_32KHZ); + + if (AR_SREV_9100(ah)) { + u32 mask; + mask = REG_READ(ah, AR_CFG); + if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) { + DPRINTF(ah->ah_sc, ATH_DBG_RESET, + "%s CFG Byte Swap Set 0x%x\n", __func__, + mask); + } else { + mask = + INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB; + REG_WRITE(ah, AR_CFG, mask); + DPRINTF(ah->ah_sc, ATH_DBG_RESET, + "%s Setting CFG 0x%x\n", __func__, + REG_READ(ah, AR_CFG)); + } + } else { +#ifdef __BIG_ENDIAN + REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD); +#endif + } + + return true; +bad: + if (status) + *status = ecode; + return false; +#undef FAIL +} + +bool ath9k_hw_phy_disable(struct ath_hal *ah) +{ + return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM); +} + +bool ath9k_hw_disable(struct ath_hal *ah) +{ + if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) + return false; + + return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD); +} + +bool +ath9k_hw_calibrate(struct ath_hal *ah, struct ath9k_channel *chan, + u8 rxchainmask, bool longcal, + bool *isCalDone) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + struct hal_cal_list *currCal = ahp->ah_cal_list_curr; + struct ath9k_channel *ichan = + ath9k_regd_check_channel(ah, chan); + + *isCalDone = true; + + if (ichan == NULL) { + DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, + "%s: invalid channel %u/0x%x; no mapping\n", + __func__, chan->channel, chan->channelFlags); + return false; + } + + if (currCal && + (currCal->calState == CAL_RUNNING || + currCal->calState == CAL_WAITING)) { + ath9k_hw_per_calibration(ah, ichan, rxchainmask, currCal, + isCalDone); + if (*isCalDone) { + ahp->ah_cal_list_curr = currCal = currCal->calNext; + + if (currCal->calState == CAL_WAITING) { + *isCalDone = false; + ath9k_hw_reset_calibration(ah, currCal); + } + } + } + + if (longcal) { + ath9k_hw_getnf(ah, ichan); + ath9k_hw_loadnf(ah, ah->ah_curchan); + ath9k_hw_start_nfcal(ah); + + if ((ichan->channelFlags & CHANNEL_CW_INT) != 0) { + + chan->channelFlags |= CHANNEL_CW_INT; + ichan->channelFlags &= ~CHANNEL_CW_INT; + } + } + + return true; +} + +static void ath9k_hw_iqcal_collect(struct ath_hal *ah) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + int i; + + for (i = 0; i < AR5416_MAX_CHAINS; i++) { + ahp->ah_totalPowerMeasI[i] += + REG_READ(ah, AR_PHY_CAL_MEAS_0(i)); + ahp->ah_totalPowerMeasQ[i] += + REG_READ(ah, AR_PHY_CAL_MEAS_1(i)); + ahp->ah_totalIqCorrMeas[i] += + (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i)); + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n", + ahp->ah_CalSamples, i, ahp->ah_totalPowerMeasI[i], + ahp->ah_totalPowerMeasQ[i], + ahp->ah_totalIqCorrMeas[i]); + } +} + +static void ath9k_hw_adc_gaincal_collect(struct ath_hal *ah) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + int i; + + for (i = 0; i < AR5416_MAX_CHAINS; i++) { + ahp->ah_totalAdcIOddPhase[i] += + REG_READ(ah, AR_PHY_CAL_MEAS_0(i)); + ahp->ah_totalAdcIEvenPhase[i] += + REG_READ(ah, AR_PHY_CAL_MEAS_1(i)); + ahp->ah_totalAdcQOddPhase[i] += + REG_READ(ah, AR_PHY_CAL_MEAS_2(i)); + ahp->ah_totalAdcQEvenPhase[i] += + REG_READ(ah, AR_PHY_CAL_MEAS_3(i)); + + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "%d: Chn %d oddi=0x%08x; eveni=0x%08x; " + "oddq=0x%08x; evenq=0x%08x;\n", + ahp->ah_CalSamples, i, + ahp->ah_totalAdcIOddPhase[i], + ahp->ah_totalAdcIEvenPhase[i], + ahp->ah_totalAdcQOddPhase[i], + ahp->ah_totalAdcQEvenPhase[i]); + } +} + +static void ath9k_hw_adc_dccal_collect(struct ath_hal *ah) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + int i; + + for (i = 0; i < AR5416_MAX_CHAINS; i++) { + ahp->ah_totalAdcDcOffsetIOddPhase[i] += + (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_0(i)); + ahp->ah_totalAdcDcOffsetIEvenPhase[i] += + (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_1(i)); + ahp->ah_totalAdcDcOffsetQOddPhase[i] += + (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i)); + ahp->ah_totalAdcDcOffsetQEvenPhase[i] += + (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i)); + + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "%d: Chn %d oddi=0x%08x; eveni=0x%08x; " + "oddq=0x%08x; evenq=0x%08x;\n", + ahp->ah_CalSamples, i, + ahp->ah_totalAdcDcOffsetIOddPhase[i], + ahp->ah_totalAdcDcOffsetIEvenPhase[i], + ahp->ah_totalAdcDcOffsetQOddPhase[i], + ahp->ah_totalAdcDcOffsetQEvenPhase[i]); + } +} + +static void ath9k_hw_iqcalibrate(struct ath_hal *ah, u8 numChains) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + u32 powerMeasQ, powerMeasI, iqCorrMeas; + u32 qCoffDenom, iCoffDenom; + int32_t qCoff, iCoff; + int iqCorrNeg, i; + + for (i = 0; i < numChains; i++) { + powerMeasI = ahp->ah_totalPowerMeasI[i]; + powerMeasQ = ahp->ah_totalPowerMeasQ[i]; + iqCorrMeas = ahp->ah_totalIqCorrMeas[i]; + + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "Starting IQ Cal and Correction for Chain %d\n", + i); + + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "Orignal: Chn %diq_corr_meas = 0x%08x\n", + i, ahp->ah_totalIqCorrMeas[i]); + + iqCorrNeg = 0; + + + if (iqCorrMeas > 0x80000000) { + iqCorrMeas = (0xffffffff - iqCorrMeas) + 1; + iqCorrNeg = 1; + } + + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI); + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ); + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n", + iqCorrNeg); + + iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128; + qCoffDenom = powerMeasQ / 64; + + if (powerMeasQ != 0) { + + iCoff = iqCorrMeas / iCoffDenom; + qCoff = powerMeasI / qCoffDenom - 64; + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "Chn %d iCoff = 0x%08x\n", i, iCoff); + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "Chn %d qCoff = 0x%08x\n", i, qCoff); + + + iCoff = iCoff & 0x3f; + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "New: Chn %d iCoff = 0x%08x\n", i, iCoff); + if (iqCorrNeg == 0x0) + iCoff = 0x40 - iCoff; + + if (qCoff > 15) + qCoff = 15; + else if (qCoff <= -16) + qCoff = 16; + + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "Chn %d : iCoff = 0x%x qCoff = 0x%x\n", + i, iCoff, qCoff); + + REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i), + AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF, + iCoff); + REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i), + AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF, + qCoff); + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "IQ Cal and Correction done for Chain %d\n", + i); + } + } + + REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0), + AR_PHY_TIMING_CTRL4_IQCORR_ENABLE); +} + +static void +ath9k_hw_adc_gaincal_calibrate(struct ath_hal *ah, u8 numChains) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset, + qEvenMeasOffset; + u32 qGainMismatch, iGainMismatch, val, i; + + for (i = 0; i < numChains; i++) { + iOddMeasOffset = ahp->ah_totalAdcIOddPhase[i]; + iEvenMeasOffset = ahp->ah_totalAdcIEvenPhase[i]; + qOddMeasOffset = ahp->ah_totalAdcQOddPhase[i]; + qEvenMeasOffset = ahp->ah_totalAdcQEvenPhase[i]; + + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "Starting ADC Gain Cal for Chain %d\n", i); + + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_odd_i = 0x%08x\n", i, + iOddMeasOffset); + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_even_i = 0x%08x\n", i, + iEvenMeasOffset); + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_odd_q = 0x%08x\n", i, + qOddMeasOffset); + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_even_q = 0x%08x\n", i, + qEvenMeasOffset); + + if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) { + iGainMismatch = + ((iEvenMeasOffset * 32) / + iOddMeasOffset) & 0x3f; + qGainMismatch = + ((qOddMeasOffset * 32) / + qEvenMeasOffset) & 0x3f; + + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "Chn %d gain_mismatch_i = 0x%08x\n", i, + iGainMismatch); + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "Chn %d gain_mismatch_q = 0x%08x\n", i, + qGainMismatch); + + val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i)); + val &= 0xfffff000; + val |= (qGainMismatch) | (iGainMismatch << 6); + REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val); + + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "ADC Gain Cal done for Chain %d\n", i); + } + } + + REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0), + REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) | + AR_PHY_NEW_ADC_GAIN_CORR_ENABLE); +} + +static void +ath9k_hw_adc_dccal_calibrate(struct ath_hal *ah, u8 numChains) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + u32 iOddMeasOffset, iEvenMeasOffset, val, i; + int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch; + const struct hal_percal_data *calData = + ahp->ah_cal_list_curr->calData; + u32 numSamples = + (1 << (calData->calCountMax + 5)) * calData->calNumSamples; + + for (i = 0; i < numChains; i++) { + iOddMeasOffset = ahp->ah_totalAdcDcOffsetIOddPhase[i]; + iEvenMeasOffset = ahp->ah_totalAdcDcOffsetIEvenPhase[i]; + qOddMeasOffset = ahp->ah_totalAdcDcOffsetQOddPhase[i]; + qEvenMeasOffset = ahp->ah_totalAdcDcOffsetQEvenPhase[i]; + + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "Starting ADC DC Offset Cal for Chain %d\n", i); + + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_odd_i = %d\n", i, + iOddMeasOffset); + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_even_i = %d\n", i, + iEvenMeasOffset); + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_odd_q = %d\n", i, + qOddMeasOffset); + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "Chn %d pwr_meas_even_q = %d\n", i, + qEvenMeasOffset); + + iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) / + numSamples) & 0x1ff; + qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) / + numSamples) & 0x1ff; + + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "Chn %d dc_offset_mismatch_i = 0x%08x\n", i, + iDcMismatch); + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "Chn %d dc_offset_mismatch_q = 0x%08x\n", i, + qDcMismatch); + + val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i)); + val &= 0xc0000fff; + val |= (qDcMismatch << 12) | (iDcMismatch << 21); + REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val); + + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "ADC DC Offset Cal done for Chain %d\n", i); + } + + REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0), + REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) | + AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE); +} + +bool ath9k_hw_set_txpowerlimit(struct ath_hal *ah, u32 limit) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + struct ath9k_channel *chan = ah->ah_curchan; + + ah->ah_powerLimit = min(limit, (u32) MAX_RATE_POWER); + + if (ath9k_hw_set_txpower(ah, &ahp->ah_eeprom, chan, + ath9k_regd_get_ctl(ah, chan), + ath9k_regd_get_antenna_allowed(ah, + chan), + chan->maxRegTxPower * 2, + min((u32) MAX_RATE_POWER, + (u32) ah->ah_powerLimit)) != 0) + return false; + + return true; +} + +void +ath9k_hw_get_channel_centers(struct ath_hal *ah, + struct ath9k_channel *chan, + struct chan_centers *centers) +{ + int8_t extoff; + struct ath_hal_5416 *ahp = AH5416(ah); + + if (!IS_CHAN_HT40(chan)) { + centers->ctl_center = centers->ext_center = + centers->synth_center = chan->channel; + return; + } + + if ((chan->chanmode == CHANNEL_A_HT40PLUS) || + (chan->chanmode == CHANNEL_G_HT40PLUS)) { + centers->synth_center = + chan->channel + HT40_CHANNEL_CENTER_SHIFT; + extoff = 1; + } else { + centers->synth_center = + chan->channel - HT40_CHANNEL_CENTER_SHIFT; + extoff = -1; + } + + centers->ctl_center = centers->synth_center - (extoff * + HT40_CHANNEL_CENTER_SHIFT); + centers->ext_center = centers->synth_center + (extoff * + ((ahp-> + ah_extprotspacing + == + ATH9K_HT_EXTPROTSPACING_20) + ? + HT40_CHANNEL_CENTER_SHIFT + : 15)); + +} + +void +ath9k_hw_reset_calvalid(struct ath_hal *ah, struct ath9k_channel *chan, + bool *isCalDone) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + struct ath9k_channel *ichan = + ath9k_regd_check_channel(ah, chan); + struct hal_cal_list *currCal = ahp->ah_cal_list_curr; + + *isCalDone = true; + + if (!AR_SREV_9100(ah) && !AR_SREV_9160_10_OR_LATER(ah)) + return; + + if (currCal == NULL) + return; + + if (ichan == NULL) { + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "%s: invalid channel %u/0x%x; no mapping\n", + __func__, chan->channel, chan->channelFlags); + return; + } + + + if (currCal->calState != CAL_DONE) { + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "%s: Calibration state incorrect, %d\n", + __func__, currCal->calState); + return; + } + + + if (!ath9k_hw_iscal_supported(ah, chan, currCal->calData->calType)) + return; + + DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, + "%s: Resetting Cal %d state for channel %u/0x%x\n", + __func__, currCal->calData->calType, chan->channel, + chan->channelFlags); + + ichan->CalValid &= ~currCal->calData->calType; + currCal->calState = CAL_WAITING; + + *isCalDone = false; +} + +void ath9k_hw_getmac(struct ath_hal *ah, u8 *mac) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + memcpy(mac, ahp->ah_macaddr, ETH_ALEN); +} + +bool ath9k_hw_setmac(struct ath_hal *ah, const u8 *mac) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + memcpy(ahp->ah_macaddr, mac, ETH_ALEN); + return true; +} + +void ath9k_hw_getbssidmask(struct ath_hal *ah, u8 *mask) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + memcpy(mask, ahp->ah_bssidmask, ETH_ALEN); +} + +bool +ath9k_hw_setbssidmask(struct ath_hal *ah, const u8 *mask) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + memcpy(ahp->ah_bssidmask, mask, ETH_ALEN); + + REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(ahp->ah_bssidmask)); + REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(ahp->ah_bssidmask + 4)); + + return true; +} + +#ifdef CONFIG_ATH9K_RFKILL +static void ath9k_enable_rfkill(struct ath_hal *ah) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, + AR_GPIO_INPUT_EN_VAL_RFSILENT_BB); + + REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2, + AR_GPIO_INPUT_MUX2_RFSILENT); + + ath9k_hw_cfg_gpio_input(ah, ahp->ah_gpioSelect); + REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB); + + if (ahp->ah_gpioBit == ath9k_hw_gpio_get(ah, ahp->ah_gpioSelect)) { + + ath9k_hw_set_gpio_intr(ah, ahp->ah_gpioSelect, + !ahp->ah_gpioBit); + } else { + ath9k_hw_set_gpio_intr(ah, ahp->ah_gpioSelect, + ahp->ah_gpioBit); + } +} +#endif + +void +ath9k_hw_write_associd(struct ath_hal *ah, const u8 *bssid, + u16 assocId) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + memcpy(ahp->ah_bssid, bssid, ETH_ALEN); + ahp->ah_assocId = assocId; + + REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(ahp->ah_bssid)); + REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(ahp->ah_bssid + 4) | + ((assocId & 0x3fff) << AR_BSS_ID1_AID_S)); +} + +u64 ath9k_hw_gettsf64(struct ath_hal *ah) +{ + u64 tsf; + + tsf = REG_READ(ah, AR_TSF_U32); + tsf = (tsf << 32) | REG_READ(ah, AR_TSF_L32); + return tsf; +} + +void ath9k_hw_reset_tsf(struct ath_hal *ah) +{ + int count; + + count = 0; + while (REG_READ(ah, AR_SLP32_MODE) & AR_SLP32_TSF_WRITE_STATUS) { + count++; + if (count > 10) { + DPRINTF(ah->ah_sc, ATH_DBG_RESET, + "%s: AR_SLP32_TSF_WRITE_STATUS limit exceeded\n", + __func__); + break; + } + udelay(10); + } + REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE); +} + +u32 ath9k_hw_getdefantenna(struct ath_hal *ah) +{ + return REG_READ(ah, AR_DEF_ANTENNA) & 0x7; +} + +void ath9k_hw_setantenna(struct ath_hal *ah, u32 antenna) +{ + REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7)); +} + +bool +ath9k_hw_setantennaswitch(struct ath_hal *ah, + enum ath9k_ant_setting settings, + struct ath9k_channel *chan, + u8 *tx_chainmask, + u8 *rx_chainmask, + u8 *antenna_cfgd) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + static u8 tx_chainmask_cfg, rx_chainmask_cfg; + + if (AR_SREV_9280(ah)) { + if (!tx_chainmask_cfg) { + + tx_chainmask_cfg = *tx_chainmask; + rx_chainmask_cfg = *rx_chainmask; + } + + switch (settings) { + case ATH9K_ANT_FIXED_A: + *tx_chainmask = ATH9K_ANTENNA0_CHAINMASK; + *rx_chainmask = ATH9K_ANTENNA0_CHAINMASK; + *antenna_cfgd = true; + break; + case ATH9K_ANT_FIXED_B: + if (ah->ah_caps.tx_chainmask > + ATH9K_ANTENNA1_CHAINMASK) { + *tx_chainmask = ATH9K_ANTENNA1_CHAINMASK; + } + *rx_chainmask = ATH9K_ANTENNA1_CHAINMASK; + *antenna_cfgd = true; + break; + case ATH9K_ANT_VARIABLE: + *tx_chainmask = tx_chainmask_cfg; + *rx_chainmask = rx_chainmask_cfg; + *antenna_cfgd = true; + break; + default: + break; + } + } else { + ahp->ah_diversityControl = settings; + } + + return true; +} + +void ath9k_hw_setopmode(struct ath_hal *ah) +{ + ath9k_hw_set_operating_mode(ah, ah->ah_opmode); +} + +bool +ath9k_hw_getcapability(struct ath_hal *ah, enum ath9k_capability_type type, + u32 capability, u32 *result) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + const struct ath9k_hw_capabilities *pCap = &ah->ah_caps; + + switch (type) { + case ATH9K_CAP_CIPHER: + switch (capability) { + case ATH9K_CIPHER_AES_CCM: + case ATH9K_CIPHER_AES_OCB: + case ATH9K_CIPHER_TKIP: + case ATH9K_CIPHER_WEP: + case ATH9K_CIPHER_MIC: + case ATH9K_CIPHER_CLR: + return true; + default: + return false; + } + case ATH9K_CAP_TKIP_MIC: + switch (capability) { + case 0: + return true; + case 1: + return (ahp->ah_staId1Defaults & + AR_STA_ID1_CRPT_MIC_ENABLE) ? true : + false; + } + case ATH9K_CAP_TKIP_SPLIT: + return (ahp->ah_miscMode & AR_PCU_MIC_NEW_LOC_ENA) ? + false : true; + case ATH9K_CAP_WME_TKIPMIC: + return 0; + case ATH9K_CAP_PHYCOUNTERS: + return ahp->ah_hasHwPhyCounters ? 0 : -ENXIO; + case ATH9K_CAP_DIVERSITY: + return (REG_READ(ah, AR_PHY_CCK_DETECT) & + AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV) ? + true : false; + case ATH9K_CAP_PHYDIAG: + return true; + case ATH9K_CAP_MCAST_KEYSRCH: + switch (capability) { + case 0: + return true; + case 1: + if (REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_ADHOC) { + return false; + } else { + return (ahp->ah_staId1Defaults & + AR_STA_ID1_MCAST_KSRCH) ? true : + false; + } + } + return false; + case ATH9K_CAP_TSF_ADJUST: + return (ahp->ah_miscMode & AR_PCU_TX_ADD_TSF) ? + true : false; + case ATH9K_CAP_RFSILENT: + if (capability == 3) + return false; + case ATH9K_CAP_ANT_CFG_2GHZ: + *result = pCap->num_antcfg_2ghz; + return true; + case ATH9K_CAP_ANT_CFG_5GHZ: + *result = pCap->num_antcfg_5ghz; + return true; + case ATH9K_CAP_TXPOW: + switch (capability) { + case 0: + return 0; + case 1: + *result = ah->ah_powerLimit; + return 0; + case 2: + *result = ah->ah_maxPowerLevel; + return 0; + case 3: + *result = ah->ah_tpScale; + return 0; + } + return false; + default: + return false; + } +} + +int +ath9k_hw_select_antconfig(struct ath_hal *ah, u32 cfg) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + struct ath9k_channel *chan = ah->ah_curchan; + const struct ath9k_hw_capabilities *pCap = &ah->ah_caps; + u16 ant_config; + u32 halNumAntConfig; + + halNumAntConfig = + IS_CHAN_2GHZ(chan) ? pCap->num_antcfg_2ghz : pCap-> + num_antcfg_5ghz; + + if (cfg < halNumAntConfig) { + if (!ath9k_hw_get_eeprom_antenna_cfg(ahp, chan, + cfg, &ant_config)) { + REG_WRITE(ah, AR_PHY_SWITCH_COM, ant_config); + return 0; + } + } + + return -EINVAL; +} + +bool ath9k_hw_intrpend(struct ath_hal *ah) +{ + u32 host_isr; + + if (AR_SREV_9100(ah)) + return true; + + host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE); + if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS)) + return true; + + host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE); + if ((host_isr & AR_INTR_SYNC_DEFAULT) + && (host_isr != AR_INTR_SPURIOUS)) + return true; + + return false; +} + +bool ath9k_hw_getisr(struct ath_hal *ah, enum ath9k_int *masked) +{ + u32 isr = 0; + u32 mask2 = 0; + struct ath9k_hw_capabilities *pCap = &ah->ah_caps; + u32 sync_cause = 0; + bool fatal_int = false; + + if (!AR_SREV_9100(ah)) { + if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) { + if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) + == AR_RTC_STATUS_ON) { + isr = REG_READ(ah, AR_ISR); + } + } + + sync_cause = + REG_READ(ah, + AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT; + + *masked = 0; + + if (!isr && !sync_cause) + return false; + } else { + *masked = 0; + isr = REG_READ(ah, AR_ISR); + } + + if (isr) { + struct ath_hal_5416 *ahp = AH5416(ah); + + if (isr & AR_ISR_BCNMISC) { + u32 isr2; + isr2 = REG_READ(ah, AR_ISR_S2); + if (isr2 & AR_ISR_S2_TIM) + mask2 |= ATH9K_INT_TIM; + if (isr2 & AR_ISR_S2_DTIM) + mask2 |= ATH9K_INT_DTIM; + if (isr2 & AR_ISR_S2_DTIMSYNC) + mask2 |= ATH9K_INT_DTIMSYNC; + if (isr2 & (AR_ISR_S2_CABEND)) + mask2 |= ATH9K_INT_CABEND; + if (isr2 & AR_ISR_S2_GTT) + mask2 |= ATH9K_INT_GTT; + if (isr2 & AR_ISR_S2_CST) + mask2 |= ATH9K_INT_CST; + } + + isr = REG_READ(ah, AR_ISR_RAC); + if (isr == 0xffffffff) { + *masked = 0; + return false; + } + + *masked = isr & ATH9K_INT_COMMON; + + if (ahp->ah_intrMitigation) { + + if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM)) + *masked |= ATH9K_INT_RX; + } + + if (isr & (AR_ISR_RXOK | AR_ISR_RXERR)) + *masked |= ATH9K_INT_RX; + if (isr & + (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR | + AR_ISR_TXEOL)) { + u32 s0_s, s1_s; + + *masked |= ATH9K_INT_TX; + + s0_s = REG_READ(ah, AR_ISR_S0_S); + ahp->ah_intrTxqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK); + ahp->ah_intrTxqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC); + + s1_s = REG_READ(ah, AR_ISR_S1_S); + ahp->ah_intrTxqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR); + ahp->ah_intrTxqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL); + } + + if (isr & AR_ISR_RXORN) { + DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, + "%s: receive FIFO overrun interrupt\n", + __func__); + } + + if (!AR_SREV_9100(ah)) { + if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { + u32 isr5 = REG_READ(ah, AR_ISR_S5_S); + if (isr5 & AR_ISR_S5_TIM_TIMER) + *masked |= ATH9K_INT_TIM_TIMER; + } + } + + *masked |= mask2; + } + if (AR_SREV_9100(ah)) + return true; + if (sync_cause) { + fatal_int = + (sync_cause & + (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR)) + ? true : false; + + if (fatal_int) { + if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) { + DPRINTF(ah->ah_sc, ATH_DBG_ANY, + "%s: received PCI FATAL interrupt\n", + __func__); + } + if (sync_cause & AR_INTR_SYNC_HOST1_PERR) { + DPRINTF(ah->ah_sc, ATH_DBG_ANY, + "%s: received PCI PERR interrupt\n", + __func__); + } + } + if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) { + DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, + "%s: AR_INTR_SYNC_RADM_CPL_TIMEOUT\n", + __func__); + REG_WRITE(ah, AR_RC, AR_RC_HOSTIF); + REG_WRITE(ah, AR_RC, 0); + *masked |= ATH9K_INT_FATAL; + } + if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) { + DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, + "%s: AR_INTR_SYNC_LOCAL_TIMEOUT\n", + __func__); + } + + REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause); + (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR); + } + return true; +} + +enum ath9k_int ath9k_hw_intrget(struct ath_hal *ah) +{ + return AH5416(ah)->ah_maskReg; +} + +enum ath9k_int ath9k_hw_set_interrupts(struct ath_hal *ah, enum ath9k_int ints) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + u32 omask = ahp->ah_maskReg; + u32 mask, mask2; + struct ath9k_hw_capabilities *pCap = &ah->ah_caps; + + DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "%s: 0x%x => 0x%x\n", __func__, + omask, ints); + + if (omask & ATH9K_INT_GLOBAL) { + DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "%s: disable IER\n", + __func__); + REG_WRITE(ah, AR_IER, AR_IER_DISABLE); + (void) REG_READ(ah, AR_IER); + if (!AR_SREV_9100(ah)) { + REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0); + (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE); + + REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0); + (void) REG_READ(ah, AR_INTR_SYNC_ENABLE); + } + } + + mask = ints & ATH9K_INT_COMMON; + mask2 = 0; + + if (ints & ATH9K_INT_TX) { + if (ahp->ah_txOkInterruptMask) + mask |= AR_IMR_TXOK; + if (ahp->ah_txDescInterruptMask) + mask |= AR_IMR_TXDESC; + if (ahp->ah_txErrInterruptMask) + mask |= AR_IMR_TXERR; + if (ahp->ah_txEolInterruptMask) + mask |= AR_IMR_TXEOL; + } + if (ints & ATH9K_INT_RX) { + mask |= AR_IMR_RXERR; + if (ahp->ah_intrMitigation) + mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM; + else + mask |= AR_IMR_RXOK | AR_IMR_RXDESC; + if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) + mask |= AR_IMR_GENTMR; + } + + if (ints & (ATH9K_INT_BMISC)) { + mask |= AR_IMR_BCNMISC; + if (ints & ATH9K_INT_TIM) + mask2 |= AR_IMR_S2_TIM; + if (ints & ATH9K_INT_DTIM) + mask2 |= AR_IMR_S2_DTIM; + if (ints & ATH9K_INT_DTIMSYNC) + mask2 |= AR_IMR_S2_DTIMSYNC; + if (ints & ATH9K_INT_CABEND) + mask2 |= (AR_IMR_S2_CABEND); + } + + if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) { + mask |= AR_IMR_BCNMISC; + if (ints & ATH9K_INT_GTT) + mask2 |= AR_IMR_S2_GTT; + if (ints & ATH9K_INT_CST) + mask2 |= AR_IMR_S2_CST; + } + + DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "%s: new IMR 0x%x\n", __func__, + mask); + REG_WRITE(ah, AR_IMR, mask); + mask = REG_READ(ah, AR_IMR_S2) & ~(AR_IMR_S2_TIM | + AR_IMR_S2_DTIM | + AR_IMR_S2_DTIMSYNC | + AR_IMR_S2_CABEND | + AR_IMR_S2_CABTO | + AR_IMR_S2_TSFOOR | + AR_IMR_S2_GTT | AR_IMR_S2_CST); + REG_WRITE(ah, AR_IMR_S2, mask | mask2); + ahp->ah_maskReg = ints; + + if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { + if (ints & ATH9K_INT_TIM_TIMER) + REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); + else + REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); + } + + if (ints & ATH9K_INT_GLOBAL) { + DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "%s: enable IER\n", + __func__); + REG_WRITE(ah, AR_IER, AR_IER_ENABLE); + if (!AR_SREV_9100(ah)) { + REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, + AR_INTR_MAC_IRQ); + REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ); + + + REG_WRITE(ah, AR_INTR_SYNC_ENABLE, + AR_INTR_SYNC_DEFAULT); + REG_WRITE(ah, AR_INTR_SYNC_MASK, + AR_INTR_SYNC_DEFAULT); + } + DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n", + REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER)); + } + + return omask; +} + +void +ath9k_hw_beaconinit(struct ath_hal *ah, + u32 next_beacon, u32 beacon_period) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + int flags = 0; + + ahp->ah_beaconInterval = beacon_period; + + switch (ah->ah_opmode) { + case ATH9K_M_STA: + case ATH9K_M_MONITOR: + REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon)); + REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff); + REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff); + flags |= AR_TBTT_TIMER_EN; + break; + case ATH9K_M_IBSS: + REG_SET_BIT(ah, AR_TXCFG, + AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY); + REG_WRITE(ah, AR_NEXT_NDP_TIMER, + TU_TO_USEC(next_beacon + + (ahp->ah_atimWindow ? ahp-> + ah_atimWindow : 1))); + flags |= AR_NDP_TIMER_EN; + case ATH9K_M_HOSTAP: + REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon)); + REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, + TU_TO_USEC(next_beacon - + ah->ah_config. + dma_beacon_response_time)); + REG_WRITE(ah, AR_NEXT_SWBA, + TU_TO_USEC(next_beacon - + ah->ah_config. + sw_beacon_response_time)); + flags |= + AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN; + break; + } + + REG_WRITE(ah, AR_BEACON_PERIOD, TU_TO_USEC(beacon_period)); + REG_WRITE(ah, AR_DMA_BEACON_PERIOD, TU_TO_USEC(beacon_period)); + REG_WRITE(ah, AR_SWBA_PERIOD, TU_TO_USEC(beacon_period)); + REG_WRITE(ah, AR_NDP_PERIOD, TU_TO_USEC(beacon_period)); + + beacon_period &= ~ATH9K_BEACON_ENA; + if (beacon_period & ATH9K_BEACON_RESET_TSF) { + beacon_period &= ~ATH9K_BEACON_RESET_TSF; + ath9k_hw_reset_tsf(ah); + } + + REG_SET_BIT(ah, AR_TIMER_MODE, flags); +} + +void +ath9k_hw_set_sta_beacon_timers(struct ath_hal *ah, + const struct ath9k_beacon_state *bs) +{ + u32 nextTbtt, beaconintval, dtimperiod, beacontimeout; + struct ath9k_hw_capabilities *pCap = &ah->ah_caps; + + REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt)); + + REG_WRITE(ah, AR_BEACON_PERIOD, + TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD)); + REG_WRITE(ah, AR_DMA_BEACON_PERIOD, + TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD)); + + REG_RMW_FIELD(ah, AR_RSSI_THR, + AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold); + + beaconintval = bs->bs_intval & ATH9K_BEACON_PERIOD; + + if (bs->bs_sleepduration > beaconintval) + beaconintval = bs->bs_sleepduration; + + dtimperiod = bs->bs_dtimperiod; + if (bs->bs_sleepduration > dtimperiod) + dtimperiod = bs->bs_sleepduration; + + if (beaconintval == dtimperiod) + nextTbtt = bs->bs_nextdtim; + else + nextTbtt = bs->bs_nexttbtt; + + DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "%s: next DTIM %d\n", __func__, + bs->bs_nextdtim); + DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "%s: next beacon %d\n", __func__, + nextTbtt); + DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "%s: beacon period %d\n", __func__, + beaconintval); + DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "%s: DTIM period %d\n", __func__, + dtimperiod); + + REG_WRITE(ah, AR_NEXT_DTIM, + TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP)); + REG_WRITE(ah, AR_NEXT_TIM, TU_TO_USEC(nextTbtt - SLEEP_SLOP)); + + REG_WRITE(ah, AR_SLEEP1, + SM((CAB_TIMEOUT_VAL << 3), AR_SLEEP1_CAB_TIMEOUT) + | AR_SLEEP1_ASSUME_DTIM); + + if (pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP) + beacontimeout = (BEACON_TIMEOUT_VAL << 3); + else + beacontimeout = MIN_BEACON_TIMEOUT_VAL; + + REG_WRITE(ah, AR_SLEEP2, + SM(beacontimeout, AR_SLEEP2_BEACON_TIMEOUT)); + + REG_WRITE(ah, AR_TIM_PERIOD, TU_TO_USEC(beaconintval)); + REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod)); + + REG_SET_BIT(ah, AR_TIMER_MODE, + AR_TBTT_TIMER_EN | AR_TIM_TIMER_EN | + AR_DTIM_TIMER_EN); + +} + +bool ath9k_hw_keyisvalid(struct ath_hal *ah, u16 entry) +{ + if (entry < ah->ah_caps.keycache_size) { + u32 val = REG_READ(ah, AR_KEYTABLE_MAC1(entry)); + if (val & AR_KEYTABLE_VALID) + return true; + } + return false; +} + +bool ath9k_hw_keyreset(struct ath_hal *ah, u16 entry) +{ + u32 keyType; + + if (entry >= ah->ah_caps.keycache_size) { + DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE, + "%s: entry %u out of range\n", __func__, entry); + return false; + } + keyType = REG_READ(ah, AR_KEYTABLE_TYPE(entry)); + + REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), 0); + REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), 0); + REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), 0); + REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), 0); + REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), 0); + REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), AR_KEYTABLE_TYPE_CLR); + REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), 0); + REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), 0); + + if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) { + u16 micentry = entry + 64; + + REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), 0); + REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0); + REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0); + REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0); + + } + + if (ah->ah_curchan == NULL) + return true; + + return true; +} + +bool +ath9k_hw_keysetmac(struct ath_hal *ah, u16 entry, + const u8 *mac) +{ + u32 macHi, macLo; + + if (entry >= ah->ah_caps.keycache_size) { + DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE, + "%s: entry %u out of range\n", __func__, entry); + return false; + } + + if (mac != NULL) { + macHi = (mac[5] << 8) | mac[4]; + macLo = (mac[3] << 24) | (mac[2] << 16) + | (mac[1] << 8) | mac[0]; + macLo >>= 1; + macLo |= (macHi & 1) << 31; + macHi >>= 1; + } else { + macLo = macHi = 0; + } + REG_WRITE(ah, AR_KEYTABLE_MAC0(entry), macLo); + REG_WRITE(ah, AR_KEYTABLE_MAC1(entry), macHi | AR_KEYTABLE_VALID); + + return true; +} + +bool +ath9k_hw_set_keycache_entry(struct ath_hal *ah, u16 entry, + const struct ath9k_keyval *k, + const u8 *mac, int xorKey) +{ + const struct ath9k_hw_capabilities *pCap = &ah->ah_caps; + u32 key0, key1, key2, key3, key4; + u32 keyType; + u32 xorMask = xorKey ? + (ATH9K_KEY_XOR << 24 | ATH9K_KEY_XOR << 16 | ATH9K_KEY_XOR << 8 + | ATH9K_KEY_XOR) : 0; + struct ath_hal_5416 *ahp = AH5416(ah); + + if (entry >= pCap->keycache_size) { + DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE, + "%s: entry %u out of range\n", __func__, entry); + return false; + } + switch (k->kv_type) { + case ATH9K_CIPHER_AES_OCB: + keyType = AR_KEYTABLE_TYPE_AES; + break; + case ATH9K_CIPHER_AES_CCM: + if (!(pCap->hw_caps & ATH9K_HW_CAP_CIPHER_AESCCM)) { + DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE, + "%s: AES-CCM not supported by " + "mac rev 0x%x\n", __func__, + ah->ah_macRev); + return false; + } + keyType = AR_KEYTABLE_TYPE_CCM; + break; + case ATH9K_CIPHER_TKIP: + keyType = AR_KEYTABLE_TYPE_TKIP; + if (ATH9K_IS_MIC_ENABLED(ah) + && entry + 64 >= pCap->keycache_size) { + DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE, + "%s: entry %u inappropriate for TKIP\n", + __func__, entry); + return false; + } + break; + case ATH9K_CIPHER_WEP: + if (k->kv_len < 40 / NBBY) { + DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE, + "%s: WEP key length %u too small\n", + __func__, k->kv_len); + return false; + } + if (k->kv_len <= 40 / NBBY) + keyType = AR_KEYTABLE_TYPE_40; + else if (k->kv_len <= 104 / NBBY) + keyType = AR_KEYTABLE_TYPE_104; + else + keyType = AR_KEYTABLE_TYPE_128; + break; + case ATH9K_CIPHER_CLR: + keyType = AR_KEYTABLE_TYPE_CLR; + break; + default: + DPRINTF(ah->ah_sc, ATH_DBG_KEYCACHE, + "%s: cipher %u not supported\n", __func__, + k->kv_type); + return false; + } + + key0 = get_unaligned_le32(k->kv_val + 0) ^ xorMask; + key1 = (get_unaligned_le16(k->kv_val + 4) ^ xorMask) & 0xffff; + key2 = get_unaligned_le32(k->kv_val + 6) ^ xorMask; + key3 = (get_unaligned_le16(k->kv_val + 10) ^ xorMask) & 0xffff; + key4 = get_unaligned_le32(k->kv_val + 12) ^ xorMask; + if (k->kv_len <= 104 / NBBY) + key4 &= 0xff; + + if (keyType == AR_KEYTABLE_TYPE_TKIP && ATH9K_IS_MIC_ENABLED(ah)) { + u16 micentry = entry + 64; + + REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), ~key0); + REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), ~key1); + REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2); + REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3); + REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4); + REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType); + (void) ath9k_hw_keysetmac(ah, entry, mac); + + if (ahp->ah_miscMode & AR_PCU_MIC_NEW_LOC_ENA) { + u32 mic0, mic1, mic2, mic3, mic4; + + mic0 = get_unaligned_le32(k->kv_mic + 0); + mic2 = get_unaligned_le32(k->kv_mic + 4); + mic1 = get_unaligned_le16(k->kv_txmic + 2) & 0xffff; + mic3 = get_unaligned_le16(k->kv_txmic + 0) & 0xffff; + mic4 = get_unaligned_le32(k->kv_txmic + 4); + REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0); + REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), mic1); + REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2); + REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), mic3); + REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), mic4); + REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry), + AR_KEYTABLE_TYPE_CLR); + + } else { + u32 mic0, mic2; + + mic0 = get_unaligned_le32(k->kv_mic + 0); + mic2 = get_unaligned_le32(k->kv_mic + 4); + REG_WRITE(ah, AR_KEYTABLE_KEY0(micentry), mic0); + REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0); + REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), mic2); + REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0); + REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0); + REG_WRITE(ah, AR_KEYTABLE_TYPE(micentry), + AR_KEYTABLE_TYPE_CLR); + } + REG_WRITE(ah, AR_KEYTABLE_MAC0(micentry), 0); + REG_WRITE(ah, AR_KEYTABLE_MAC1(micentry), 0); + REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0); + REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1); + } else { + REG_WRITE(ah, AR_KEYTABLE_KEY0(entry), key0); + REG_WRITE(ah, AR_KEYTABLE_KEY1(entry), key1); + REG_WRITE(ah, AR_KEYTABLE_KEY2(entry), key2); + REG_WRITE(ah, AR_KEYTABLE_KEY3(entry), key3); + REG_WRITE(ah, AR_KEYTABLE_KEY4(entry), key4); + REG_WRITE(ah, AR_KEYTABLE_TYPE(entry), keyType); + + (void) ath9k_hw_keysetmac(ah, entry, mac); + } + + if (ah->ah_curchan == NULL) + return true; + + return true; +} + +bool +ath9k_hw_updatetxtriglevel(struct ath_hal *ah, bool bIncTrigLevel) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + u32 txcfg, curLevel, newLevel; + enum ath9k_int omask; + + if (ah->ah_txTrigLevel >= MAX_TX_FIFO_THRESHOLD) + return false; + + omask = ath9k_hw_set_interrupts(ah, + ahp->ah_maskReg & ~ATH9K_INT_GLOBAL); + + txcfg = REG_READ(ah, AR_TXCFG); + curLevel = MS(txcfg, AR_FTRIG); + newLevel = curLevel; + if (bIncTrigLevel) { + if (curLevel < MAX_TX_FIFO_THRESHOLD) + newLevel++; + } else if (curLevel > MIN_TX_FIFO_THRESHOLD) + newLevel--; + if (newLevel != curLevel) + REG_WRITE(ah, AR_TXCFG, + (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG)); + + ath9k_hw_set_interrupts(ah, omask); + + ah->ah_txTrigLevel = newLevel; + + return newLevel != curLevel; +} + +bool ath9k_hw_set_txq_props(struct ath_hal *ah, int q, + const struct ath9k_tx_queue_info *qinfo) +{ + u32 cw; + struct ath_hal_5416 *ahp = AH5416(ah); + struct ath9k_hw_capabilities *pCap = &ah->ah_caps; + struct ath9k_tx_queue_info *qi; + + if (q >= pCap->total_queues) { + DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n", + __func__, q); + return false; + } + + qi = &ahp->ah_txq[q]; + if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { + DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue\n", + __func__); + return false; + } + + DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: queue %p\n", __func__, qi); + + qi->tqi_ver = qinfo->tqi_ver; + qi->tqi_subtype = qinfo->tqi_subtype; + qi->tqi_qflags = qinfo->tqi_qflags; + qi->tqi_priority = qinfo->tqi_priority; + if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT) + qi->tqi_aifs = min(qinfo->tqi_aifs, 255U); + else + qi->tqi_aifs = INIT_AIFS; + if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) { + cw = min(qinfo->tqi_cwmin, 1024U); + qi->tqi_cwmin = 1; + while (qi->tqi_cwmin < cw) + qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1; + } else + qi->tqi_cwmin = qinfo->tqi_cwmin; + if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) { + cw = min(qinfo->tqi_cwmax, 1024U); + qi->tqi_cwmax = 1; + while (qi->tqi_cwmax < cw) + qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1; + } else + qi->tqi_cwmax = INIT_CWMAX; + + if (qinfo->tqi_shretry != 0) + qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U); + else + qi->tqi_shretry = INIT_SH_RETRY; + if (qinfo->tqi_lgretry != 0) + qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U); + else + qi->tqi_lgretry = INIT_LG_RETRY; + qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod; + qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit; + qi->tqi_burstTime = qinfo->tqi_burstTime; + qi->tqi_readyTime = qinfo->tqi_readyTime; + + switch (qinfo->tqi_subtype) { + case ATH9K_WME_UPSD: + if (qi->tqi_type == ATH9K_TX_QUEUE_DATA) + qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS; + break; + default: + break; + } + return true; +} + +bool ath9k_hw_get_txq_props(struct ath_hal *ah, int q, + struct ath9k_tx_queue_info *qinfo) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + struct ath9k_hw_capabilities *pCap = &ah->ah_caps; + struct ath9k_tx_queue_info *qi; + + if (q >= pCap->total_queues) { + DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n", + __func__, q); + return false; + } + + qi = &ahp->ah_txq[q]; + if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { + DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue\n", + __func__); + return false; + } + + qinfo->tqi_qflags = qi->tqi_qflags; + qinfo->tqi_ver = qi->tqi_ver; + qinfo->tqi_subtype = qi->tqi_subtype; + qinfo->tqi_qflags = qi->tqi_qflags; + qinfo->tqi_priority = qi->tqi_priority; + qinfo->tqi_aifs = qi->tqi_aifs; + qinfo->tqi_cwmin = qi->tqi_cwmin; + qinfo->tqi_cwmax = qi->tqi_cwmax; + qinfo->tqi_shretry = qi->tqi_shretry; + qinfo->tqi_lgretry = qi->tqi_lgretry; + qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod; + qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit; + qinfo->tqi_burstTime = qi->tqi_burstTime; + qinfo->tqi_readyTime = qi->tqi_readyTime; + + return true; +} + +int +ath9k_hw_setuptxqueue(struct ath_hal *ah, enum ath9k_tx_queue type, + const struct ath9k_tx_queue_info *qinfo) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + struct ath9k_tx_queue_info *qi; + struct ath9k_hw_capabilities *pCap = &ah->ah_caps; + int q; + + switch (type) { + case ATH9K_TX_QUEUE_BEACON: + q = pCap->total_queues - 1; + break; + case ATH9K_TX_QUEUE_CAB: + q = pCap->total_queues - 2; + break; + case ATH9K_TX_QUEUE_PSPOLL: + q = 1; + break; + case ATH9K_TX_QUEUE_UAPSD: + q = pCap->total_queues - 3; + break; + case ATH9K_TX_QUEUE_DATA: + for (q = 0; q < pCap->total_queues; q++) + if (ahp->ah_txq[q].tqi_type == + ATH9K_TX_QUEUE_INACTIVE) + break; + if (q == pCap->total_queues) { + DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, + "%s: no available tx queue\n", __func__); + return -1; + } + break; + default: + DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: bad tx queue type %u\n", + __func__, type); + return -1; + } + + DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: queue %u\n", __func__, q); + + qi = &ahp->ah_txq[q]; + if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) { + DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, + "%s: tx queue %u already active\n", __func__, q); + return -1; + } + memset(qi, 0, sizeof(struct ath9k_tx_queue_info)); + qi->tqi_type = type; + if (qinfo == NULL) { + qi->tqi_qflags = + TXQ_FLAG_TXOKINT_ENABLE + | TXQ_FLAG_TXERRINT_ENABLE + | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE; + qi->tqi_aifs = INIT_AIFS; + qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT; + qi->tqi_cwmax = INIT_CWMAX; + qi->tqi_shretry = INIT_SH_RETRY; + qi->tqi_lgretry = INIT_LG_RETRY; + qi->tqi_physCompBuf = 0; + } else { + qi->tqi_physCompBuf = qinfo->tqi_physCompBuf; + (void) ath9k_hw_set_txq_props(ah, q, qinfo); + } + + return q; +} + +static void +ath9k_hw_set_txq_interrupts(struct ath_hal *ah, + struct ath9k_tx_queue_info *qi) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, + "%s: tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", + __func__, ahp->ah_txOkInterruptMask, + ahp->ah_txErrInterruptMask, ahp->ah_txDescInterruptMask, + ahp->ah_txEolInterruptMask, ahp->ah_txUrnInterruptMask); + + REG_WRITE(ah, AR_IMR_S0, + SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK) + | SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC)); + REG_WRITE(ah, AR_IMR_S1, + SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR) + | SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL)); + REG_RMW_FIELD(ah, AR_IMR_S2, + AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask); +} + +bool ath9k_hw_releasetxqueue(struct ath_hal *ah, u32 q) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + struct ath9k_hw_capabilities *pCap = &ah->ah_caps; + struct ath9k_tx_queue_info *qi; + + if (q >= pCap->total_queues) { + DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n", + __func__, q); + return false; + } + qi = &ahp->ah_txq[q]; + if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { + DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue %u\n", + __func__, q); + return false; + } + + DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: release queue %u\n", + __func__, q); + + qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE; + ahp->ah_txOkInterruptMask &= ~(1 << q); + ahp->ah_txErrInterruptMask &= ~(1 << q); + ahp->ah_txDescInterruptMask &= ~(1 << q); + ahp->ah_txEolInterruptMask &= ~(1 << q); + ahp->ah_txUrnInterruptMask &= ~(1 << q); + ath9k_hw_set_txq_interrupts(ah, qi); + + return true; +} + +bool ath9k_hw_resettxqueue(struct ath_hal *ah, u32 q) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + struct ath9k_hw_capabilities *pCap = &ah->ah_caps; + struct ath9k_channel *chan = ah->ah_curchan; + struct ath9k_tx_queue_info *qi; + u32 cwMin, chanCwMin, value; + + if (q >= pCap->total_queues) { + DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n", + __func__, q); + return false; + } + qi = &ahp->ah_txq[q]; + if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { + DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue %u\n", + __func__, q); + return true; + } + + DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: reset queue %u\n", __func__, q); + + if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) { + if (chan && IS_CHAN_B(chan)) + chanCwMin = INIT_CWMIN_11B; + else + chanCwMin = INIT_CWMIN; + + for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1); + } else + cwMin = qi->tqi_cwmin; + + REG_WRITE(ah, AR_DLCL_IFS(q), SM(cwMin, AR_D_LCL_IFS_CWMIN) + | SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) + | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS)); + + REG_WRITE(ah, AR_DRETRY_LIMIT(q), + SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) + | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) + | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH) + ); + + REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ); + REG_WRITE(ah, AR_DMISC(q), + AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2); + + if (qi->tqi_cbrPeriod) { + REG_WRITE(ah, AR_QCBRCFG(q), + SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) + | SM(qi->tqi_cbrOverflowLimit, + AR_Q_CBRCFG_OVF_THRESH)); + REG_WRITE(ah, AR_QMISC(q), + REG_READ(ah, + AR_QMISC(q)) | AR_Q_MISC_FSP_CBR | (qi-> + tqi_cbrOverflowLimit + ? + AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN + : + 0)); + } + if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) { + REG_WRITE(ah, AR_QRDYTIMECFG(q), + SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) | + AR_Q_RDYTIMECFG_EN); + } + + REG_WRITE(ah, AR_DCHNTIME(q), + SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) | + (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0)); + + if (qi->tqi_burstTime + && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) { + REG_WRITE(ah, AR_QMISC(q), + REG_READ(ah, + AR_QMISC(q)) | + AR_Q_MISC_RDYTIME_EXP_POLICY); + + } + + if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) { + REG_WRITE(ah, AR_DMISC(q), + REG_READ(ah, AR_DMISC(q)) | + AR_D_MISC_POST_FR_BKOFF_DIS); + } + if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) { + REG_WRITE(ah, AR_DMISC(q), + REG_READ(ah, AR_DMISC(q)) | + AR_D_MISC_FRAG_BKOFF_EN); + } + switch (qi->tqi_type) { + case ATH9K_TX_QUEUE_BEACON: + REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q)) + | AR_Q_MISC_FSP_DBA_GATED + | AR_Q_MISC_BEACON_USE + | AR_Q_MISC_CBR_INCR_DIS1); + + REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) + | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << + AR_D_MISC_ARB_LOCKOUT_CNTRL_S) + | AR_D_MISC_BEACON_USE + | AR_D_MISC_POST_FR_BKOFF_DIS); + break; + case ATH9K_TX_QUEUE_CAB: + REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q)) + | AR_Q_MISC_FSP_DBA_GATED + | AR_Q_MISC_CBR_INCR_DIS1 + | AR_Q_MISC_CBR_INCR_DIS0); + value = (qi->tqi_readyTime + - (ah->ah_config.sw_beacon_response_time - + ah->ah_config.dma_beacon_response_time) + - + ah->ah_config.additional_swba_backoff) * + 1024; + REG_WRITE(ah, AR_QRDYTIMECFG(q), + value | AR_Q_RDYTIMECFG_EN); + REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) + | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << + AR_D_MISC_ARB_LOCKOUT_CNTRL_S)); + break; + case ATH9K_TX_QUEUE_PSPOLL: + REG_WRITE(ah, AR_QMISC(q), + REG_READ(ah, + AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1); + break; + case ATH9K_TX_QUEUE_UAPSD: + REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) + | AR_D_MISC_POST_FR_BKOFF_DIS); + break; + default: + break; + } + + if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) { + REG_WRITE(ah, AR_DMISC(q), + REG_READ(ah, AR_DMISC(q)) | + SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL, + AR_D_MISC_ARB_LOCKOUT_CNTRL) | + AR_D_MISC_POST_FR_BKOFF_DIS); + } + + if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE) + ahp->ah_txOkInterruptMask |= 1 << q; + else + ahp->ah_txOkInterruptMask &= ~(1 << q); + if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE) + ahp->ah_txErrInterruptMask |= 1 << q; + else + ahp->ah_txErrInterruptMask &= ~(1 << q); + if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE) + ahp->ah_txDescInterruptMask |= 1 << q; + else + ahp->ah_txDescInterruptMask &= ~(1 << q); + if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE) + ahp->ah_txEolInterruptMask |= 1 << q; + else + ahp->ah_txEolInterruptMask &= ~(1 << q); + if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE) + ahp->ah_txUrnInterruptMask |= 1 << q; + else + ahp->ah_txUrnInterruptMask &= ~(1 << q); + ath9k_hw_set_txq_interrupts(ah, qi); + + return true; +} + +void ath9k_hw_gettxintrtxqs(struct ath_hal *ah, u32 *txqs) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + *txqs &= ahp->ah_intrTxqs; + ahp->ah_intrTxqs &= ~(*txqs); +} + +bool +ath9k_hw_filltxdesc(struct ath_hal *ah, struct ath_desc *ds, + u32 segLen, bool firstSeg, + bool lastSeg, const struct ath_desc *ds0) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + + if (firstSeg) { + ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore); + } else if (lastSeg) { + ads->ds_ctl0 = 0; + ads->ds_ctl1 = segLen; + ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2; + ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3; + } else { + ads->ds_ctl0 = 0; + ads->ds_ctl1 = segLen | AR_TxMore; + ads->ds_ctl2 = 0; + ads->ds_ctl3 = 0; + } + ads->ds_txstatus0 = ads->ds_txstatus1 = 0; + ads->ds_txstatus2 = ads->ds_txstatus3 = 0; + ads->ds_txstatus4 = ads->ds_txstatus5 = 0; + ads->ds_txstatus6 = ads->ds_txstatus7 = 0; + ads->ds_txstatus8 = ads->ds_txstatus9 = 0; + return true; +} + +void ath9k_hw_cleartxdesc(struct ath_hal *ah, struct ath_desc *ds) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + + ads->ds_txstatus0 = ads->ds_txstatus1 = 0; + ads->ds_txstatus2 = ads->ds_txstatus3 = 0; + ads->ds_txstatus4 = ads->ds_txstatus5 = 0; + ads->ds_txstatus6 = ads->ds_txstatus7 = 0; + ads->ds_txstatus8 = ads->ds_txstatus9 = 0; +} + +int +ath9k_hw_txprocdesc(struct ath_hal *ah, struct ath_desc *ds) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + + if ((ads->ds_txstatus9 & AR_TxDone) == 0) + return -EINPROGRESS; + + ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum); + ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp; + ds->ds_txstat.ts_status = 0; + ds->ds_txstat.ts_flags = 0; + + if (ads->ds_txstatus1 & AR_ExcessiveRetries) + ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY; + if (ads->ds_txstatus1 & AR_Filtered) + ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT; + if (ads->ds_txstatus1 & AR_FIFOUnderrun) + ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO; + if (ads->ds_txstatus9 & AR_TxOpExceeded) + ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP; + if (ads->ds_txstatus1 & AR_TxTimerExpired) + ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED; + + if (ads->ds_txstatus1 & AR_DescCfgErr) + ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR; + if (ads->ds_txstatus1 & AR_TxDataUnderrun) { + ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN; + ath9k_hw_updatetxtriglevel(ah, true); + } + if (ads->ds_txstatus1 & AR_TxDelimUnderrun) { + ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN; + ath9k_hw_updatetxtriglevel(ah, true); + } + if (ads->ds_txstatus0 & AR_TxBaStatus) { + ds->ds_txstat.ts_flags |= ATH9K_TX_BA; + ds->ds_txstat.ba_low = ads->AR_BaBitmapLow; + ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh; + } + + ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx); + switch (ds->ds_txstat.ts_rateindex) { + case 0: + ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0); + break; + case 1: + ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1); + break; + case 2: + ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2); + break; + case 3: + ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3); + break; + } + + ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined); + ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00); + ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01); + ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02); + ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10); + ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11); + ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12); + ds->ds_txstat.evm0 = ads->AR_TxEVM0; + ds->ds_txstat.evm1 = ads->AR_TxEVM1; + ds->ds_txstat.evm2 = ads->AR_TxEVM2; + ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt); + ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt); + ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt); + ds->ds_txstat.ts_antenna = 1; + + return 0; +} + +void +ath9k_hw_set11n_txdesc(struct ath_hal *ah, struct ath_desc *ds, + u32 pktLen, enum ath9k_pkt_type type, u32 txPower, + u32 keyIx, enum ath9k_key_type keyType, u32 flags) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + struct ath_hal_5416 *ahp = AH5416(ah); + + txPower += ahp->ah_txPowerIndexOffset; + if (txPower > 63) + txPower = 63; + + ads->ds_ctl0 = (pktLen & AR_FrameLen) + | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0) + | SM(txPower, AR_XmitPower) + | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0) + | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0) + | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0) + | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0); + + ads->ds_ctl1 = + (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0) + | SM(type, AR_FrameType) + | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0) + | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0) + | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0); + + ads->ds_ctl6 = SM(keyType, AR_EncrType); + + if (AR_SREV_9285(ah)) { + + ads->ds_ctl8 = 0; + ads->ds_ctl9 = 0; + ads->ds_ctl10 = 0; + ads->ds_ctl11 = 0; + } +} + +void +ath9k_hw_set11n_ratescenario(struct ath_hal *ah, struct ath_desc *ds, + struct ath_desc *lastds, + u32 durUpdateEn, u32 rtsctsRate, + u32 rtsctsDuration, + struct ath9k_11n_rate_series series[], + u32 nseries, u32 flags) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + struct ar5416_desc *last_ads = AR5416DESC(lastds); + u32 ds_ctl0; + + (void) nseries; + (void) rtsctsDuration; + + if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) { + ds_ctl0 = ads->ds_ctl0; + + if (flags & ATH9K_TXDESC_RTSENA) { + ds_ctl0 &= ~AR_CTSEnable; + ds_ctl0 |= AR_RTSEnable; + } else { + ds_ctl0 &= ~AR_RTSEnable; + ds_ctl0 |= AR_CTSEnable; + } + + ads->ds_ctl0 = ds_ctl0; + } else { + ads->ds_ctl0 = + (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable)); + } + + ads->ds_ctl2 = set11nTries(series, 0) + | set11nTries(series, 1) + | set11nTries(series, 2) + | set11nTries(series, 3) + | (durUpdateEn ? AR_DurUpdateEna : 0) + | SM(0, AR_BurstDur); + + ads->ds_ctl3 = set11nRate(series, 0) + | set11nRate(series, 1) + | set11nRate(series, 2) + | set11nRate(series, 3); + + ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0) + | set11nPktDurRTSCTS(series, 1); + + ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2) + | set11nPktDurRTSCTS(series, 3); + + ads->ds_ctl7 = set11nRateFlags(series, 0) + | set11nRateFlags(series, 1) + | set11nRateFlags(series, 2) + | set11nRateFlags(series, 3) + | SM(rtsctsRate, AR_RTSCTSRate); + last_ads->ds_ctl2 = ads->ds_ctl2; + last_ads->ds_ctl3 = ads->ds_ctl3; +} + +void +ath9k_hw_set11n_aggr_first(struct ath_hal *ah, struct ath_desc *ds, + u32 aggrLen) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + + ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr); + + ads->ds_ctl6 &= ~AR_AggrLen; + ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen); +} + +void +ath9k_hw_set11n_aggr_middle(struct ath_hal *ah, struct ath_desc *ds, + u32 numDelims) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + unsigned int ctl6; + + ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr); + + ctl6 = ads->ds_ctl6; + ctl6 &= ~AR_PadDelim; + ctl6 |= SM(numDelims, AR_PadDelim); + ads->ds_ctl6 = ctl6; +} + +void ath9k_hw_set11n_aggr_last(struct ath_hal *ah, struct ath_desc *ds) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + + ads->ds_ctl1 |= AR_IsAggr; + ads->ds_ctl1 &= ~AR_MoreAggr; + ads->ds_ctl6 &= ~AR_PadDelim; +} + +void ath9k_hw_clr11n_aggr(struct ath_hal *ah, struct ath_desc *ds) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + + ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr); +} + +void +ath9k_hw_set11n_burstduration(struct ath_hal *ah, struct ath_desc *ds, + u32 burstDuration) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + + ads->ds_ctl2 &= ~AR_BurstDur; + ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur); +} + +void +ath9k_hw_set11n_virtualmorefrag(struct ath_hal *ah, struct ath_desc *ds, + u32 vmf) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + + if (vmf) + ads->ds_ctl0 |= AR_VirtMoreFrag; + else + ads->ds_ctl0 &= ~AR_VirtMoreFrag; +} + +void ath9k_hw_putrxbuf(struct ath_hal *ah, u32 rxdp) +{ + REG_WRITE(ah, AR_RXDP, rxdp); +} + +void ath9k_hw_rxena(struct ath_hal *ah) +{ + REG_WRITE(ah, AR_CR, AR_CR_RXE); +} + +bool ath9k_hw_setrxabort(struct ath_hal *ah, bool set) +{ + if (set) { + + REG_SET_BIT(ah, AR_DIAG_SW, + (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); + + if (!ath9k_hw_wait + (ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE, 0)) { + u32 reg; + + REG_CLR_BIT(ah, AR_DIAG_SW, + (AR_DIAG_RX_DIS | + AR_DIAG_RX_ABORT)); + + reg = REG_READ(ah, AR_OBS_BUS_1); + DPRINTF(ah->ah_sc, ATH_DBG_FATAL, + "%s: rx failed to go idle in 10 ms RXSM=0x%x\n", + __func__, reg); + + return false; + } + } else { + REG_CLR_BIT(ah, AR_DIAG_SW, + (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); + } + + return true; +} + +void +ath9k_hw_setmcastfilter(struct ath_hal *ah, u32 filter0, + u32 filter1) +{ + REG_WRITE(ah, AR_MCAST_FIL0, filter0); + REG_WRITE(ah, AR_MCAST_FIL1, filter1); +} + +bool +ath9k_hw_setuprxdesc(struct ath_hal *ah, struct ath_desc *ds, + u32 size, u32 flags) +{ + struct ar5416_desc *ads = AR5416DESC(ds); + struct ath9k_hw_capabilities *pCap = &ah->ah_caps; + + ads->ds_ctl1 = size & AR_BufLen; + if (flags & ATH9K_RXDESC_INTREQ) + ads->ds_ctl1 |= AR_RxIntrReq; + + ads->ds_rxstatus8 &= ~AR_RxDone; + if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) + memset(&(ads->u), 0, sizeof(ads->u)); + return true; +} + +int +ath9k_hw_rxprocdesc(struct ath_hal *ah, struct ath_desc *ds, + u32 pa, struct ath_desc *nds, u64 tsf) +{ + struct ar5416_desc ads; + struct ar5416_desc *adsp = AR5416DESC(ds); + + if ((adsp->ds_rxstatus8 & AR_RxDone) == 0) + return -EINPROGRESS; + + ads.u.rx = adsp->u.rx; + + ds->ds_rxstat.rs_status = 0; + ds->ds_rxstat.rs_flags = 0; + + ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen; + ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp; + + ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined); + ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt00); + ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt01); + ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt02); + ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt10); + ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt11); + ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt12); + if (ads.ds_rxstatus8 & AR_RxKeyIdxValid) + ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx); + else + ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID; + + ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads)); + ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0; + + ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0; + ds->ds_rxstat.rs_moreaggr = + (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0; + ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna); + ds->ds_rxstat.rs_flags = + (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0; + ds->ds_rxstat.rs_flags |= + (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0; + + if (ads.ds_rxstatus8 & AR_PreDelimCRCErr) + ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE; + if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) + ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST; + if (ads.ds_rxstatus8 & AR_DecryptBusyErr) + ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY; + + if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) { + + if (ads.ds_rxstatus8 & AR_CRCErr) + ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC; + else if (ads.ds_rxstatus8 & AR_PHYErr) { + u32 phyerr; + + ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY; + phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode); + ds->ds_rxstat.rs_phyerr = phyerr; + } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr) + ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT; + else if (ads.ds_rxstatus8 & AR_MichaelErr) + ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC; + } + + return 0; +} + +static void ath9k_hw_setup_rate_table(struct ath_hal *ah, + struct ath9k_rate_table *rt) +{ + int i; + + if (rt->rateCodeToIndex[0] != 0) + return; + for (i = 0; i < 256; i++) + rt->rateCodeToIndex[i] = (u8) -1; + for (i = 0; i < rt->rateCount; i++) { + u8 code = rt->info[i].rateCode; + u8 cix = rt->info[i].controlRate; + + rt->rateCodeToIndex[code] = i; + rt->rateCodeToIndex[code | rt->info[i].shortPreamble] = i; + + rt->info[i].lpAckDuration = + ath9k_hw_computetxtime(ah, rt, + WLAN_CTRL_FRAME_SIZE, + cix, + false); + rt->info[i].spAckDuration = + ath9k_hw_computetxtime(ah, rt, + WLAN_CTRL_FRAME_SIZE, + cix, + true); + } +} + +const struct ath9k_rate_table *ath9k_hw_getratetable(struct ath_hal *ah, + u32 mode) +{ + struct ath9k_rate_table *rt; + switch (mode) { + case ATH9K_MODE_11A: + rt = &ar5416_11a_table; + break; + case ATH9K_MODE_11B: + rt = &ar5416_11b_table; + break; + case ATH9K_MODE_11G: + rt = &ar5416_11g_table; + break; + case ATH9K_MODE_11NG_HT20: + case ATH9K_MODE_11NG_HT40PLUS: + case ATH9K_MODE_11NG_HT40MINUS: + rt = &ar5416_11ng_table; + break; + case ATH9K_MODE_11NA_HT20: + case ATH9K_MODE_11NA_HT40PLUS: + case ATH9K_MODE_11NA_HT40MINUS: + rt = &ar5416_11na_table; + break; + default: + DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, "%s: invalid mode 0x%x\n", + __func__, mode); + return NULL; + } + ath9k_hw_setup_rate_table(ah, rt); + return rt; +} + +static const char *ath9k_hw_devname(u16 devid) +{ + switch (devid) { + case AR5416_DEVID_PCI: + case AR5416_DEVID_PCIE: + return "Atheros 5416"; + case AR9160_DEVID_PCI: + return "Atheros 9160"; + case AR9280_DEVID_PCI: + case AR9280_DEVID_PCIE: + return "Atheros 9280"; + } + return NULL; +} + +const char *ath9k_hw_probe(u16 vendorid, u16 devid) +{ + return vendorid == ATHEROS_VENDOR_ID ? + ath9k_hw_devname(devid) : NULL; +} + +struct ath_hal *ath9k_hw_attach(u16 devid, + struct ath_softc *sc, + void __iomem *mem, + int *error) +{ + struct ath_hal *ah = NULL; + + switch (devid) { + case AR5416_DEVID_PCI: + case AR5416_DEVID_PCIE: + case AR9160_DEVID_PCI: + case AR9280_DEVID_PCI: + case AR9280_DEVID_PCIE: + ah = ath9k_hw_do_attach(devid, sc, mem, error); + break; + default: + DPRINTF(ah->ah_sc, ATH_DBG_ANY, + "devid=0x%x not supported.\n", devid); + ah = NULL; + *error = -ENXIO; + break; + } + if (ah != NULL) { + ah->ah_devid = ah->ah_devid; + ah->ah_subvendorid = ah->ah_subvendorid; + ah->ah_macVersion = ah->ah_macVersion; + ah->ah_macRev = ah->ah_macRev; + ah->ah_phyRev = ah->ah_phyRev; + ah->ah_analog5GhzRev = ah->ah_analog5GhzRev; + ah->ah_analog2GhzRev = ah->ah_analog2GhzRev; + } + return ah; +} + +u16 +ath9k_hw_computetxtime(struct ath_hal *ah, + const struct ath9k_rate_table *rates, + u32 frameLen, u16 rateix, + bool shortPreamble) +{ + u32 bitsPerSymbol, numBits, numSymbols, phyTime, txTime; + u32 kbps; + + kbps = rates->info[rateix].rateKbps; + + if (kbps == 0) + return 0; + switch (rates->info[rateix].phy) { + + case PHY_CCK: + phyTime = CCK_PREAMBLE_BITS + CCK_PLCP_BITS; + if (shortPreamble && rates->info[rateix].shortPreamble) + phyTime >>= 1; + numBits = frameLen << 3; + txTime = CCK_SIFS_TIME + phyTime + + ((numBits * 1000) / kbps); + break; + case PHY_OFDM: + if (ah->ah_curchan && IS_CHAN_QUARTER_RATE(ah->ah_curchan)) { + bitsPerSymbol = + (kbps * OFDM_SYMBOL_TIME_QUARTER) / 1000; + + numBits = OFDM_PLCP_BITS + (frameLen << 3); + numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); + txTime = OFDM_SIFS_TIME_QUARTER + + OFDM_PREAMBLE_TIME_QUARTER + + (numSymbols * OFDM_SYMBOL_TIME_QUARTER); + } else if (ah->ah_curchan && + IS_CHAN_HALF_RATE(ah->ah_curchan)) { + bitsPerSymbol = + (kbps * OFDM_SYMBOL_TIME_HALF) / 1000; + + numBits = OFDM_PLCP_BITS + (frameLen << 3); + numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); + txTime = OFDM_SIFS_TIME_HALF + + OFDM_PREAMBLE_TIME_HALF + + (numSymbols * OFDM_SYMBOL_TIME_HALF); + } else { + bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME) / 1000; + + numBits = OFDM_PLCP_BITS + (frameLen << 3); + numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol); + txTime = OFDM_SIFS_TIME + OFDM_PREAMBLE_TIME + + (numSymbols * OFDM_SYMBOL_TIME); + } + break; + + default: + DPRINTF(ah->ah_sc, ATH_DBG_PHY_IO, + "%s: unknown phy %u (rate ix %u)\n", __func__, + rates->info[rateix].phy, rateix); + txTime = 0; + break; + } + return txTime; +} + +u32 ath9k_hw_mhz2ieee(struct ath_hal *ah, u32 freq, u32 flags) +{ + if (flags & CHANNEL_2GHZ) { + if (freq == 2484) + return 14; + if (freq < 2484) + return (freq - 2407) / 5; + else + return 15 + ((freq - 2512) / 20); + } else if (flags & CHANNEL_5GHZ) { + if (ath9k_regd_is_public_safety_sku(ah) && + IS_CHAN_IN_PUBLIC_SAFETY_BAND(freq)) { + return ((freq * 10) + + (((freq % 5) == 2) ? 5 : 0) - 49400) / 5; + } else if ((flags & CHANNEL_A) && (freq <= 5000)) { + return (freq - 4000) / 5; + } else { + return (freq - 5000) / 5; + } + } else { + if (freq == 2484) + return 14; + if (freq < 2484) + return (freq - 2407) / 5; + if (freq < 5000) { + if (ath9k_regd_is_public_safety_sku(ah) + && IS_CHAN_IN_PUBLIC_SAFETY_BAND(freq)) { + return ((freq * 10) + + (((freq % 5) == + 2) ? 5 : 0) - 49400) / 5; + } else if (freq > 4900) { + return (freq - 4000) / 5; + } else { + return 15 + ((freq - 2512) / 20); + } + } + return (freq - 5000) / 5; + } +} + +int16_t +ath9k_hw_getchan_noise(struct ath_hal *ah, struct ath9k_channel *chan) +{ + struct ath9k_channel *ichan; + + ichan = ath9k_regd_check_channel(ah, chan); + if (ichan == NULL) { + DPRINTF(ah->ah_sc, ATH_DBG_NF_CAL, + "%s: invalid channel %u/0x%x; no mapping\n", + __func__, chan->channel, chan->channelFlags); + return 0; + } + if (ichan->rawNoiseFloor == 0) { + enum wireless_mode mode = ath9k_hw_chan2wmode(ah, chan); + return NOISE_FLOOR[mode]; + } else + return ichan->rawNoiseFloor; +} + +bool ath9k_hw_set_tsfadjust(struct ath_hal *ah, u32 setting) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + if (setting) + ahp->ah_miscMode |= AR_PCU_TX_ADD_TSF; + else + ahp->ah_miscMode &= ~AR_PCU_TX_ADD_TSF; + return true; +} + +bool ath9k_hw_phycounters(struct ath_hal *ah) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + return ahp->ah_hasHwPhyCounters ? true : false; +} + +u32 ath9k_hw_gettxbuf(struct ath_hal *ah, u32 q) +{ + return REG_READ(ah, AR_QTXDP(q)); +} + +bool ath9k_hw_puttxbuf(struct ath_hal *ah, u32 q, + u32 txdp) +{ + REG_WRITE(ah, AR_QTXDP(q), txdp); + + return true; +} + +bool ath9k_hw_txstart(struct ath_hal *ah, u32 q) +{ + DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: queue %u\n", __func__, q); + + REG_WRITE(ah, AR_Q_TXE, 1 << q); + + return true; +} + +u32 ath9k_hw_numtxpending(struct ath_hal *ah, u32 q) +{ + u32 npend; + + npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT; + if (npend == 0) { + + if (REG_READ(ah, AR_Q_TXE) & (1 << q)) + npend = 1; + } + return npend; +} + +bool ath9k_hw_stoptxdma(struct ath_hal *ah, u32 q) +{ + u32 wait; + + REG_WRITE(ah, AR_Q_TXD, 1 << q); + + for (wait = 1000; wait != 0; wait--) { + if (ath9k_hw_numtxpending(ah, q) == 0) + break; + udelay(100); + } + + if (ath9k_hw_numtxpending(ah, q)) { + u32 tsfLow, j; + + DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, + "%s: Num of pending TX Frames %d on Q %d\n", + __func__, ath9k_hw_numtxpending(ah, q), q); + + for (j = 0; j < 2; j++) { + tsfLow = REG_READ(ah, AR_TSF_L32); + REG_WRITE(ah, AR_QUIET2, + SM(10, AR_QUIET2_QUIET_DUR)); + REG_WRITE(ah, AR_QUIET_PERIOD, 100); + REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10); + REG_SET_BIT(ah, AR_TIMER_MODE, + AR_QUIET_TIMER_EN); + + if ((REG_READ(ah, AR_TSF_L32) >> 10) == + (tsfLow >> 10)) { + break; + } + DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, + "%s: TSF have moved while trying to set " + "quiet time TSF: 0x%08x\n", + __func__, tsfLow); + } + + REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); + + udelay(200); + REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN); + + wait = 1000; + + while (ath9k_hw_numtxpending(ah, q)) { + if ((--wait) == 0) { + DPRINTF(ah->ah_sc, ATH_DBG_XMIT, + "%s: Failed to stop Tx DMA in 100 " + "msec after killing last frame\n", + __func__); + break; + } + udelay(100); + } + + REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); + } + + REG_WRITE(ah, AR_Q_TXD, 0); + return wait != 0; +} diff --git a/drivers/net/wireless/ath9k/hw.h b/drivers/net/wireless/ath9k/hw.h new file mode 100644 index 0000000..ae680f2 --- /dev/null +++ b/drivers/net/wireless/ath9k/hw.h @@ -0,0 +1,969 @@ +/* + * Copyright (c) 2008 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef HW_H +#define HW_H + +#include <linux/if_ether.h> +#include <linux/delay.h> + +struct ar5416_desc { + u32 ds_link; + u32 ds_data; + u32 ds_ctl0; + u32 ds_ctl1; + union { + struct { + u32 ctl2; + u32 ctl3; + u32 ctl4; + u32 ctl5; + u32 ctl6; + u32 ctl7; + u32 ctl8; + u32 ctl9; + u32 ctl10; + u32 ctl11; + u32 status0; + u32 status1; + u32 status2; + u32 status3; + u32 status4; + u32 status5; + u32 status6; + u32 status7; + u32 status8; + u32 status9; + } tx; + struct { + u32 status0; + u32 status1; + u32 status2; + u32 status3; + u32 status4; + u32 status5; + u32 status6; + u32 status7; + u32 status8; + } rx; + } u; +} __packed; + +#define AR5416DESC(_ds) ((struct ar5416_desc *)(_ds)) +#define AR5416DESC_CONST(_ds) ((const struct ar5416_desc *)(_ds)) + +#define ds_ctl2 u.tx.ctl2 +#define ds_ctl3 u.tx.ctl3 +#define ds_ctl4 u.tx.ctl4 +#define ds_ctl5 u.tx.ctl5 +#define ds_ctl6 u.tx.ctl6 +#define ds_ctl7 u.tx.ctl7 +#define ds_ctl8 u.tx.ctl8 +#define ds_ctl9 u.tx.ctl9 +#define ds_ctl10 u.tx.ctl10 +#define ds_ctl11 u.tx.ctl11 + +#define ds_txstatus0 u.tx.status0 +#define ds_txstatus1 u.tx.status1 +#define ds_txstatus2 u.tx.status2 +#define ds_txstatus3 u.tx.status3 +#define ds_txstatus4 u.tx.status4 +#define ds_txstatus5 u.tx.status5 +#define ds_txstatus6 u.tx.status6 +#define ds_txstatus7 u.tx.status7 +#define ds_txstatus8 u.tx.status8 +#define ds_txstatus9 u.tx.status9 + +#define ds_rxstatus0 u.rx.status0 +#define ds_rxstatus1 u.rx.status1 +#define ds_rxstatus2 u.rx.status2 +#define ds_rxstatus3 u.rx.status3 +#define ds_rxstatus4 u.rx.status4 +#define ds_rxstatus5 u.rx.status5 +#define ds_rxstatus6 u.rx.status6 +#define ds_rxstatus7 u.rx.status7 +#define ds_rxstatus8 u.rx.status8 + +#define AR_FrameLen 0x00000fff +#define AR_VirtMoreFrag 0x00001000 +#define AR_TxCtlRsvd00 0x0000e000 +#define AR_XmitPower 0x003f0000 +#define AR_XmitPower_S 16 +#define AR_RTSEnable 0x00400000 +#define AR_VEOL 0x00800000 +#define AR_ClrDestMask 0x01000000 +#define AR_TxCtlRsvd01 0x1e000000 +#define AR_TxIntrReq 0x20000000 +#define AR_DestIdxValid 0x40000000 +#define AR_CTSEnable 0x80000000 + +#define AR_BufLen 0x00000fff +#define AR_TxMore 0x00001000 +#define AR_DestIdx 0x000fe000 +#define AR_DestIdx_S 13 +#define AR_FrameType 0x00f00000 +#define AR_FrameType_S 20 +#define AR_NoAck 0x01000000 +#define AR_InsertTS 0x02000000 +#define AR_CorruptFCS 0x04000000 +#define AR_ExtOnly 0x08000000 +#define AR_ExtAndCtl 0x10000000 +#define AR_MoreAggr 0x20000000 +#define AR_IsAggr 0x40000000 + +#define AR_BurstDur 0x00007fff +#define AR_BurstDur_S 0 +#define AR_DurUpdateEna 0x00008000 +#define AR_XmitDataTries0 0x000f0000 +#define AR_XmitDataTries0_S 16 +#define AR_XmitDataTries1 0x00f00000 +#define AR_XmitDataTries1_S 20 +#define AR_XmitDataTries2 0x0f000000 +#define AR_XmitDataTries2_S 24 +#define AR_XmitDataTries3 0xf0000000 +#define AR_XmitDataTries3_S 28 + +#define AR_XmitRate0 0x000000ff +#define AR_XmitRate0_S 0 +#define AR_XmitRate1 0x0000ff00 +#define AR_XmitRate1_S 8 +#define AR_XmitRate2 0x00ff0000 +#define AR_XmitRate2_S 16 +#define AR_XmitRate3 0xff000000 +#define AR_XmitRate3_S 24 + +#define AR_PacketDur0 0x00007fff +#define AR_PacketDur0_S 0 +#define AR_RTSCTSQual0 0x00008000 +#define AR_PacketDur1 0x7fff0000 +#define AR_PacketDur1_S 16 +#define AR_RTSCTSQual1 0x80000000 + +#define AR_PacketDur2 0x00007fff +#define AR_PacketDur2_S 0 +#define AR_RTSCTSQual2 0x00008000 +#define AR_PacketDur3 0x7fff0000 +#define AR_PacketDur3_S 16 +#define AR_RTSCTSQual3 0x80000000 + +#define AR_AggrLen 0x0000ffff +#define AR_AggrLen_S 0 +#define AR_TxCtlRsvd60 0x00030000 +#define AR_PadDelim 0x03fc0000 +#define AR_PadDelim_S 18 +#define AR_EncrType 0x0c000000 +#define AR_EncrType_S 26 +#define AR_TxCtlRsvd61 0xf0000000 + +#define AR_2040_0 0x00000001 +#define AR_GI0 0x00000002 +#define AR_ChainSel0 0x0000001c +#define AR_ChainSel0_S 2 +#define AR_2040_1 0x00000020 +#define AR_GI1 0x00000040 +#define AR_ChainSel1 0x00000380 +#define AR_ChainSel1_S 7 +#define AR_2040_2 0x00000400 +#define AR_GI2 0x00000800 +#define AR_ChainSel2 0x00007000 +#define AR_ChainSel2_S 12 +#define AR_2040_3 0x00008000 +#define AR_GI3 0x00010000 +#define AR_ChainSel3 0x000e0000 +#define AR_ChainSel3_S 17 +#define AR_RTSCTSRate 0x0ff00000 +#define AR_RTSCTSRate_S 20 +#define AR_TxCtlRsvd70 0xf0000000 + +#define AR_TxRSSIAnt00 0x000000ff +#define AR_TxRSSIAnt00_S 0 +#define AR_TxRSSIAnt01 0x0000ff00 +#define AR_TxRSSIAnt01_S 8 +#define AR_TxRSSIAnt02 0x00ff0000 +#define AR_TxRSSIAnt02_S 16 +#define AR_TxStatusRsvd00 0x3f000000 +#define AR_TxBaStatus 0x40000000 +#define AR_TxStatusRsvd01 0x80000000 + +#define AR_FrmXmitOK 0x00000001 +#define AR_ExcessiveRetries 0x00000002 +#define AR_FIFOUnderrun 0x00000004 +#define AR_Filtered 0x00000008 +#define AR_RTSFailCnt 0x000000f0 +#define AR_RTSFailCnt_S 4 +#define AR_DataFailCnt 0x00000f00 +#define AR_DataFailCnt_S 8 +#define AR_VirtRetryCnt 0x0000f000 +#define AR_VirtRetryCnt_S 12 +#define AR_TxDelimUnderrun 0x00010000 +#define AR_TxDataUnderrun 0x00020000 +#define AR_DescCfgErr 0x00040000 +#define AR_TxTimerExpired 0x00080000 +#define AR_TxStatusRsvd10 0xfff00000 + +#define AR_SendTimestamp ds_txstatus2 +#define AR_BaBitmapLow ds_txstatus3 +#define AR_BaBitmapHigh ds_txstatus4 + +#define AR_TxRSSIAnt10 0x000000ff +#define AR_TxRSSIAnt10_S 0 +#define AR_TxRSSIAnt11 0x0000ff00 +#define AR_TxRSSIAnt11_S 8 +#define AR_TxRSSIAnt12 0x00ff0000 +#define AR_TxRSSIAnt12_S 16 +#define AR_TxRSSICombined 0xff000000 +#define AR_TxRSSICombined_S 24 + +#define AR_TxEVM0 ds_txstatus5 +#define AR_TxEVM1 ds_txstatus6 +#define AR_TxEVM2 ds_txstatus7 + +#define AR_TxDone 0x00000001 +#define AR_SeqNum 0x00001ffe +#define AR_SeqNum_S 1 +#define AR_TxStatusRsvd80 0x0001e000 +#define AR_TxOpExceeded 0x00020000 +#define AR_TxStatusRsvd81 0x001c0000 +#define AR_FinalTxIdx 0x00600000 +#define AR_FinalTxIdx_S 21 +#define AR_TxStatusRsvd82 0x01800000 +#define AR_PowerMgmt 0x02000000 +#define AR_TxStatusRsvd83 0xfc000000 + +#define AR_RxCTLRsvd00 0xffffffff + +#define AR_BufLen 0x00000fff +#define AR_RxCtlRsvd00 0x00001000 +#define AR_RxIntrReq 0x00002000 +#define AR_RxCtlRsvd01 0xffffc000 + +#define AR_RxRSSIAnt00 0x000000ff +#define AR_RxRSSIAnt00_S 0 +#define AR_RxRSSIAnt01 0x0000ff00 +#define AR_RxRSSIAnt01_S 8 +#define AR_RxRSSIAnt02 0x00ff0000 +#define AR_RxRSSIAnt02_S 16 +#define AR_RxRate 0xff000000 +#define AR_RxRate_S 24 +#define AR_RxStatusRsvd00 0xff000000 + +#define AR_DataLen 0x00000fff +#define AR_RxMore 0x00001000 +#define AR_NumDelim 0x003fc000 +#define AR_NumDelim_S 14 +#define AR_RxStatusRsvd10 0xff800000 + +#define AR_RcvTimestamp ds_rxstatus2 + +#define AR_GI 0x00000001 +#define AR_2040 0x00000002 +#define AR_Parallel40 0x00000004 +#define AR_Parallel40_S 2 +#define AR_RxStatusRsvd30 0x000000f8 +#define AR_RxAntenna 0xffffff00 +#define AR_RxAntenna_S 8 + +#define AR_RxRSSIAnt10 0x000000ff +#define AR_RxRSSIAnt10_S 0 +#define AR_RxRSSIAnt11 0x0000ff00 +#define AR_RxRSSIAnt11_S 8 +#define AR_RxRSSIAnt12 0x00ff0000 +#define AR_RxRSSIAnt12_S 16 +#define AR_RxRSSICombined 0xff000000 +#define AR_RxRSSICombined_S 24 + +#define AR_RxEVM0 ds_rxstatus4 +#define AR_RxEVM1 ds_rxstatus5 +#define AR_RxEVM2 ds_rxstatus6 + +#define AR_RxDone 0x00000001 +#define AR_RxFrameOK 0x00000002 +#define AR_CRCErr 0x00000004 +#define AR_DecryptCRCErr 0x00000008 +#define AR_PHYErr 0x00000010 +#define AR_MichaelErr 0x00000020 +#define AR_PreDelimCRCErr 0x00000040 +#define AR_RxStatusRsvd70 0x00000080 +#define AR_RxKeyIdxValid 0x00000100 +#define AR_KeyIdx 0x0000fe00 +#define AR_KeyIdx_S 9 +#define AR_PHYErrCode 0x0000ff00 +#define AR_PHYErrCode_S 8 +#define AR_RxMoreAggr 0x00010000 +#define AR_RxAggr 0x00020000 +#define AR_PostDelimCRCErr 0x00040000 +#define AR_RxStatusRsvd71 0x3ff80000 +#define AR_DecryptBusyErr 0x40000000 +#define AR_KeyMiss 0x80000000 + +#define AR5416_MAGIC 0x19641014 + +#define RXSTATUS_RATE(ah, ads) (AR_SREV_5416_V20_OR_LATER(ah) ? \ + MS(ads->ds_rxstatus0, AR_RxRate) : \ + (ads->ds_rxstatus3 >> 2) & 0xFF) +#define RXSTATUS_DUPLICATE(ah, ads) (AR_SREV_5416_V20_OR_LATER(ah) ? \ + MS(ads->ds_rxstatus3, AR_Parallel40) : \ + (ads->ds_rxstatus3 >> 10) & 0x1) + +#define set11nTries(_series, _index) \ + (SM((_series)[_index].Tries, AR_XmitDataTries##_index)) + +#define set11nRate(_series, _index) \ + (SM((_series)[_index].Rate, AR_XmitRate##_index)) + +#define set11nPktDurRTSCTS(_series, _index) \ + (SM((_series)[_index].PktDuration, AR_PacketDur##_index) | \ + ((_series)[_index].RateFlags & ATH9K_RATESERIES_RTS_CTS ? \ + AR_RTSCTSQual##_index : 0)) + +#define set11nRateFlags(_series, _index) \ + (((_series)[_index].RateFlags & ATH9K_RATESERIES_2040 ? \ + AR_2040_##_index : 0) \ + |((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \ + AR_GI##_index : 0) \ + |SM((_series)[_index].ChSel, AR_ChainSel##_index)) + +#define AR_SREV_9100(ah) ((ah->ah_macVersion) == AR_SREV_VERSION_9100) + +#define INIT_CONFIG_STATUS 0x00000000 +#define INIT_RSSI_THR 0x00000700 +#define INIT_BCON_CNTRL_REG 0x00000000 + +#define MIN_TX_FIFO_THRESHOLD 0x1 +#define MAX_TX_FIFO_THRESHOLD ((4096 / 64) - 1) +#define INIT_TX_FIFO_THRESHOLD MIN_TX_FIFO_THRESHOLD + +#define NUM_CORNER_FIX_BITS_2133 7 +#define CCK_OFDM_GAIN_DELTA 15 + +struct ar5416AniState { + struct ath9k_channel c; + u8 noiseImmunityLevel; + u8 spurImmunityLevel; + u8 firstepLevel; + u8 ofdmWeakSigDetectOff; + u8 cckWeakSigThreshold; + u32 listenTime; + u32 ofdmTrigHigh; + u32 ofdmTrigLow; + int32_t cckTrigHigh; + int32_t cckTrigLow; + int32_t rssiThrLow; + int32_t rssiThrHigh; + u32 noiseFloor; + u32 txFrameCount; + u32 rxFrameCount; + u32 cycleCount; + u32 ofdmPhyErrCount; + u32 cckPhyErrCount; + u32 ofdmPhyErrBase; + u32 cckPhyErrBase; + int16_t pktRssi[2]; + int16_t ofdmErrRssi[2]; + int16_t cckErrRssi[2]; +}; + +#define HAL_PROCESS_ANI 0x00000001 +#define HAL_RADAR_EN 0x80000000 +#define HAL_AR_EN 0x40000000 + +#define DO_ANI(ah) \ + ((AH5416(ah)->ah_procPhyErr & HAL_PROCESS_ANI)) + +struct ar5416Stats { + u32 ast_ani_niup; + u32 ast_ani_nidown; + u32 ast_ani_spurup; + u32 ast_ani_spurdown; + u32 ast_ani_ofdmon; + u32 ast_ani_ofdmoff; + u32 ast_ani_cckhigh; + u32 ast_ani_ccklow; + u32 ast_ani_stepup; + u32 ast_ani_stepdown; + u32 ast_ani_ofdmerrs; + u32 ast_ani_cckerrs; + u32 ast_ani_reset; + u32 ast_ani_lzero; + u32 ast_ani_lneg; + struct ath9k_mib_stats ast_mibstats; + struct ath9k_node_stats ast_nodestats; +}; + +#define AR5416_OPFLAGS_11A 0x01 +#define AR5416_OPFLAGS_11G 0x02 +#define AR5416_OPFLAGS_N_5G_HT40 0x04 +#define AR5416_OPFLAGS_N_2G_HT40 0x08 +#define AR5416_OPFLAGS_N_5G_HT20 0x10 +#define AR5416_OPFLAGS_N_2G_HT20 0x20 + +#define EEP_RFSILENT_ENABLED 0x0001 +#define EEP_RFSILENT_ENABLED_S 0 +#define EEP_RFSILENT_POLARITY 0x0002 +#define EEP_RFSILENT_POLARITY_S 1 +#define EEP_RFSILENT_GPIO_SEL 0x001c +#define EEP_RFSILENT_GPIO_SEL_S 2 + +#define AR5416_EEP_NO_BACK_VER 0x1 +#define AR5416_EEP_VER 0xE +#define AR5416_EEP_VER_MINOR_MASK 0x0FFF +#define AR5416_EEP_MINOR_VER_2 0x2 +#define AR5416_EEP_MINOR_VER_3 0x3 +#define AR5416_EEP_MINOR_VER_7 0x7 +#define AR5416_EEP_MINOR_VER_9 0x9 + +#define AR5416_EEP_START_LOC 256 +#define AR5416_NUM_5G_CAL_PIERS 8 +#define AR5416_NUM_2G_CAL_PIERS 4 +#define AR5416_NUM_5G_20_TARGET_POWERS 8 +#define AR5416_NUM_5G_40_TARGET_POWERS 8 +#define AR5416_NUM_2G_CCK_TARGET_POWERS 3 +#define AR5416_NUM_2G_20_TARGET_POWERS 4 +#define AR5416_NUM_2G_40_TARGET_POWERS 4 +#define AR5416_NUM_CTLS 24 +#define AR5416_NUM_BAND_EDGES 8 +#define AR5416_NUM_PD_GAINS 4 +#define AR5416_PD_GAINS_IN_MASK 4 +#define AR5416_PD_GAIN_ICEPTS 5 +#define AR5416_EEPROM_MODAL_SPURS 5 +#define AR5416_MAX_RATE_POWER 63 +#define AR5416_NUM_PDADC_VALUES 128 +#define AR5416_NUM_RATES 16 +#define AR5416_BCHAN_UNUSED 0xFF +#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64 +#define AR5416_EEPMISC_BIG_ENDIAN 0x01 +#define AR5416_MAX_CHAINS 3 +#define AR5416_ANT_16S 25 + +#define AR5416_NUM_ANT_CHAIN_FIELDS 7 +#define AR5416_NUM_ANT_COMMON_FIELDS 4 +#define AR5416_SIZE_ANT_CHAIN_FIELD 3 +#define AR5416_SIZE_ANT_COMMON_FIELD 4 +#define AR5416_ANT_CHAIN_MASK 0x7 +#define AR5416_ANT_COMMON_MASK 0xf +#define AR5416_CHAIN_0_IDX 0 +#define AR5416_CHAIN_1_IDX 1 +#define AR5416_CHAIN_2_IDX 2 + +#define AR5416_PWR_TABLE_OFFSET -5 +#define AR5416_LEGACY_CHAINMASK 1 + +enum eeprom_param { + EEP_NFTHRESH_5, + EEP_NFTHRESH_2, + EEP_MAC_MSW, + EEP_MAC_MID, + EEP_MAC_LSW, + EEP_REG_0, + EEP_REG_1, + EEP_OP_CAP, + EEP_OP_MODE, + EEP_RF_SILENT, + EEP_OB_5, + EEP_DB_5, + EEP_OB_2, + EEP_DB_2, + EEP_MINOR_REV, + EEP_TX_MASK, + EEP_RX_MASK, +}; + +enum ar5416_rates { + rate6mb, rate9mb, rate12mb, rate18mb, + rate24mb, rate36mb, rate48mb, rate54mb, + rate1l, rate2l, rate2s, rate5_5l, + rate5_5s, rate11l, rate11s, rateXr, + rateHt20_0, rateHt20_1, rateHt20_2, rateHt20_3, + rateHt20_4, rateHt20_5, rateHt20_6, rateHt20_7, + rateHt40_0, rateHt40_1, rateHt40_2, rateHt40_3, + rateHt40_4, rateHt40_5, rateHt40_6, rateHt40_7, + rateDupCck, rateDupOfdm, rateExtCck, rateExtOfdm, + Ar5416RateSize +}; + +struct base_eep_header { + u16 length; + u16 checksum; + u16 version; + u8 opCapFlags; + u8 eepMisc; + u16 regDmn[2]; + u8 macAddr[6]; + u8 rxMask; + u8 txMask; + u16 rfSilent; + u16 blueToothOptions; + u16 deviceCap; + u32 binBuildNumber; + u8 deviceType; + u8 pwdclkind; + u8 futureBase[32]; +} __packed; + +struct spur_chan { + u16 spurChan; + u8 spurRangeLow; + u8 spurRangeHigh; +} __packed; + +struct modal_eep_header { + u32 antCtrlChain[AR5416_MAX_CHAINS]; + u32 antCtrlCommon; + u8 antennaGainCh[AR5416_MAX_CHAINS]; + u8 switchSettling; + u8 txRxAttenCh[AR5416_MAX_CHAINS]; + u8 rxTxMarginCh[AR5416_MAX_CHAINS]; + u8 adcDesiredSize; + u8 pgaDesiredSize; + u8 xlnaGainCh[AR5416_MAX_CHAINS]; + u8 txEndToXpaOff; + u8 txEndToRxOn; + u8 txFrameToXpaOn; + u8 thresh62; + u8 noiseFloorThreshCh[AR5416_MAX_CHAINS]; + u8 xpdGain; + u8 xpd; + u8 iqCalICh[AR5416_MAX_CHAINS]; + u8 iqCalQCh[AR5416_MAX_CHAINS]; + u8 pdGainOverlap; + u8 ob; + u8 db; + u8 xpaBiasLvl; + u8 pwrDecreaseFor2Chain; + u8 pwrDecreaseFor3Chain; + u8 txFrameToDataStart; + u8 txFrameToPaOn; + u8 ht40PowerIncForPdadc; + u8 bswAtten[AR5416_MAX_CHAINS]; + u8 bswMargin[AR5416_MAX_CHAINS]; + u8 swSettleHt40; + u8 xatten2Db[AR5416_MAX_CHAINS]; + u8 xatten2Margin[AR5416_MAX_CHAINS]; + u8 ob_ch1; + u8 db_ch1; + u8 useAnt1:1, + force_xpaon:1, + local_bias:1, + femBandSelectUsed:1, xlnabufin:1, xlnaisel:2, xlnabufmode:1; + u8 futureModalar9280; + u16 xpaBiasLvlFreq[3]; + u8 futureModal[6]; + + struct spur_chan spurChans[AR5416_EEPROM_MODAL_SPURS]; +} __packed; + +struct cal_data_per_freq { + u8 pwrPdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS]; + u8 vpdPdg[AR5416_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS]; +} __packed; + +struct cal_target_power_leg { + u8 bChannel; + u8 tPow2x[4]; +} __packed; + +struct cal_target_power_ht { + u8 bChannel; + u8 tPow2x[8]; +} __packed; + +#ifdef __BIG_ENDIAN_BITFIELD +struct cal_ctl_edges { + u8 bChannel; + u8 flag:2, tPower:6; +} __packed; +#else +struct cal_ctl_edges { + u8 bChannel; + u8 tPower:6, flag:2; +} __packed; +#endif + +struct cal_ctl_data { + struct cal_ctl_edges + ctlEdges[AR5416_MAX_CHAINS][AR5416_NUM_BAND_EDGES]; +} __packed; + +struct ar5416_eeprom { + struct base_eep_header baseEepHeader; + u8 custData[64]; + struct modal_eep_header modalHeader[2]; + u8 calFreqPier5G[AR5416_NUM_5G_CAL_PIERS]; + u8 calFreqPier2G[AR5416_NUM_2G_CAL_PIERS]; + struct cal_data_per_freq + calPierData5G[AR5416_MAX_CHAINS][AR5416_NUM_5G_CAL_PIERS]; + struct cal_data_per_freq + calPierData2G[AR5416_MAX_CHAINS][AR5416_NUM_2G_CAL_PIERS]; + struct cal_target_power_leg + calTargetPower5G[AR5416_NUM_5G_20_TARGET_POWERS]; + struct cal_target_power_ht + calTargetPower5GHT20[AR5416_NUM_5G_20_TARGET_POWERS]; + struct cal_target_power_ht + calTargetPower5GHT40[AR5416_NUM_5G_40_TARGET_POWERS]; + struct cal_target_power_leg + calTargetPowerCck[AR5416_NUM_2G_CCK_TARGET_POWERS]; + struct cal_target_power_leg + calTargetPower2G[AR5416_NUM_2G_20_TARGET_POWERS]; + struct cal_target_power_ht + calTargetPower2GHT20[AR5416_NUM_2G_20_TARGET_POWERS]; + struct cal_target_power_ht + calTargetPower2GHT40[AR5416_NUM_2G_40_TARGET_POWERS]; + u8 ctlIndex[AR5416_NUM_CTLS]; + struct cal_ctl_data ctlData[AR5416_NUM_CTLS]; + u8 padding; +} __packed; + +struct ar5416IniArray { + u32 *ia_array; + u32 ia_rows; + u32 ia_columns; +}; + +#define INIT_INI_ARRAY(iniarray, array, rows, columns) do { \ + (iniarray)->ia_array = (u32 *)(array); \ + (iniarray)->ia_rows = (rows); \ + (iniarray)->ia_columns = (columns); \ + } while (0) + +#define INI_RA(iniarray, row, column) \ + (((iniarray)->ia_array)[(row) * ((iniarray)->ia_columns) + (column)]) + +#define INIT_CAL(_perCal) do { \ + (_perCal)->calState = CAL_WAITING; \ + (_perCal)->calNext = NULL; \ + } while (0) + +#define INSERT_CAL(_ahp, _perCal) \ + do { \ + if ((_ahp)->ah_cal_list_last == NULL) { \ + (_ahp)->ah_cal_list = \ + (_ahp)->ah_cal_list_last = (_perCal); \ + ((_ahp)->ah_cal_list_last)->calNext = (_perCal); \ + } else { \ + ((_ahp)->ah_cal_list_last)->calNext = (_perCal); \ + (_ahp)->ah_cal_list_last = (_perCal); \ + (_perCal)->calNext = (_ahp)->ah_cal_list; \ + } \ + } while (0) + +enum hal_cal_types { + ADC_DC_INIT_CAL = 0x1, + ADC_GAIN_CAL = 0x2, + ADC_DC_CAL = 0x4, + IQ_MISMATCH_CAL = 0x8 +}; + +enum hal_cal_state { + CAL_INACTIVE, + CAL_WAITING, + CAL_RUNNING, + CAL_DONE +}; + +#define MIN_CAL_SAMPLES 1 +#define MAX_CAL_SAMPLES 64 +#define INIT_LOG_COUNT 5 +#define PER_MIN_LOG_COUNT 2 +#define PER_MAX_LOG_COUNT 10 + +struct hal_percal_data { + enum hal_cal_types calType; + u32 calNumSamples; + u32 calCountMax; + void (*calCollect) (struct ath_hal *); + void (*calPostProc) (struct ath_hal *, u8); +}; + +struct hal_cal_list { + const struct hal_percal_data *calData; + enum hal_cal_state calState; + struct hal_cal_list *calNext; +}; + +struct ath_hal_5416 { + struct ath_hal ah; + struct ar5416_eeprom ah_eeprom; + u8 ah_macaddr[ETH_ALEN]; + u8 ah_bssid[ETH_ALEN]; + u8 ah_bssidmask[ETH_ALEN]; + u16 ah_assocId; + int16_t ah_curchanRadIndex; + u32 ah_maskReg; + struct ar5416Stats ah_stats; + u32 ah_txDescMask; + u32 ah_txOkInterruptMask; + u32 ah_txErrInterruptMask; + u32 ah_txDescInterruptMask; + u32 ah_txEolInterruptMask; + u32 ah_txUrnInterruptMask; + struct ath9k_tx_queue_info ah_txq[ATH9K_NUM_TX_QUEUES]; + enum ath9k_power_mode ah_powerMode; + bool ah_chipFullSleep; + u32 ah_atimWindow; + enum ath9k_ant_setting ah_diversityControl; + u16 ah_antennaSwitchSwap; + enum hal_cal_types ah_suppCals; + struct hal_cal_list ah_iqCalData; + struct hal_cal_list ah_adcGainCalData; + struct hal_cal_list ah_adcDcCalInitData; + struct hal_cal_list ah_adcDcCalData; + struct hal_cal_list *ah_cal_list; + struct hal_cal_list *ah_cal_list_last; + struct hal_cal_list *ah_cal_list_curr; +#define ah_totalPowerMeasI ah_Meas0.unsign +#define ah_totalPowerMeasQ ah_Meas1.unsign +#define ah_totalIqCorrMeas ah_Meas2.sign +#define ah_totalAdcIOddPhase ah_Meas0.unsign +#define ah_totalAdcIEvenPhase ah_Meas1.unsign +#define ah_totalAdcQOddPhase ah_Meas2.unsign +#define ah_totalAdcQEvenPhase ah_Meas3.unsign +#define ah_totalAdcDcOffsetIOddPhase ah_Meas0.sign +#define ah_totalAdcDcOffsetIEvenPhase ah_Meas1.sign +#define ah_totalAdcDcOffsetQOddPhase ah_Meas2.sign +#define ah_totalAdcDcOffsetQEvenPhase ah_Meas3.sign + union { + u32 unsign[AR5416_MAX_CHAINS]; + int32_t sign[AR5416_MAX_CHAINS]; + } ah_Meas0; + union { + u32 unsign[AR5416_MAX_CHAINS]; + int32_t sign[AR5416_MAX_CHAINS]; + } ah_Meas1; + union { + u32 unsign[AR5416_MAX_CHAINS]; + int32_t sign[AR5416_MAX_CHAINS]; + } ah_Meas2; + union { + u32 unsign[AR5416_MAX_CHAINS]; + int32_t sign[AR5416_MAX_CHAINS]; + } ah_Meas3; + u16 ah_CalSamples; + u32 ah_tx6PowerInHalfDbm; + u32 ah_staId1Defaults; + u32 ah_miscMode; + bool ah_tpcEnabled; + u32 ah_beaconInterval; + enum { + AUTO_32KHZ, + USE_32KHZ, + DONT_USE_32KHZ, + } ah_enable32kHzClock; + u32 *ah_analogBank0Data; + u32 *ah_analogBank1Data; + u32 *ah_analogBank2Data; + u32 *ah_analogBank3Data; + u32 *ah_analogBank6Data; + u32 *ah_analogBank6TPCData; + u32 *ah_analogBank7Data; + u32 *ah_addac5416_21; + u32 *ah_bank6Temp; + u32 ah_ofdmTxPower; + int16_t ah_txPowerIndexOffset; + u32 ah_slottime; + u32 ah_acktimeout; + u32 ah_ctstimeout; + u32 ah_globaltxtimeout; + u8 ah_gBeaconRate; + u32 ah_gpioSelect; + u32 ah_polarity; + u32 ah_gpioBit; + bool ah_eepEnabled; + u32 ah_procPhyErr; + bool ah_hasHwPhyCounters; + u32 ah_aniPeriod; + struct ar5416AniState *ah_curani; + struct ar5416AniState ah_ani[255]; + int ah_totalSizeDesired[5]; + int ah_coarseHigh[5]; + int ah_coarseLow[5]; + int ah_firpwr[5]; + u16 ah_ratesArray[16]; + u32 ah_intrTxqs; + bool ah_intrMitigation; + u32 ah_cycleCount; + u32 ah_ctlBusy; + u32 ah_extBusy; + enum ath9k_ht_extprotspacing ah_extprotspacing; + u8 ah_txchainmask; + u8 ah_rxchainmask; + int ah_hwp; + void __iomem *ah_cal_mem; + enum ath9k_ani_cmd ah_ani_function; + struct ar5416IniArray ah_iniModes; + struct ar5416IniArray ah_iniCommon; + struct ar5416IniArray ah_iniBank0; + struct ar5416IniArray ah_iniBB_RfGain; + struct ar5416IniArray ah_iniBank1; + struct ar5416IniArray ah_iniBank2; + struct ar5416IniArray ah_iniBank3; + struct ar5416IniArray ah_iniBank6; + struct ar5416IniArray ah_iniBank6TPC; + struct ar5416IniArray ah_iniBank7; + struct ar5416IniArray ah_iniAddac; + struct ar5416IniArray ah_iniPcieSerdes; + struct ar5416IniArray ah_iniModesAdditional; +}; +#define AH5416(_ah) ((struct ath_hal_5416 *)(_ah)) + +#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5)) + +#define IS_5416_EMU(ah) \ + ((ah->ah_devid == AR5416_DEVID_EMU) || \ + (ah->ah_devid == AR5416_DEVID_EMU_PCIE)) + +#define ar5416RfDetach(ah) do { \ + if (AH5416(ah)->ah_rfHal.rfDetach != NULL) \ + AH5416(ah)->ah_rfHal.rfDetach(ah); \ + } while (0) + +#define ath9k_hw_use_flash(_ah) \ + (!(_ah->ah_flags & AH_USE_EEPROM)) + + +#define DO_DELAY(x) do { \ + if ((++(x) % 64) == 0) \ + udelay(1); \ + } while (0) + +#define REG_WRITE_ARRAY(iniarray, column, regWr) do { \ + int r; \ + for (r = 0; r < ((iniarray)->ia_rows); r++) { \ + REG_WRITE(ah, INI_RA((iniarray), (r), 0), \ + INI_RA((iniarray), r, (column))); \ + DO_DELAY(regWr); \ + } \ + } while (0) + +#define BASE_ACTIVATE_DELAY 100 +#define RTC_PLL_SETTLE_DELAY 1000 +#define COEF_SCALE_S 24 +#define HT40_CHANNEL_CENTER_SHIFT 10 + +#define ar5416CheckOpMode(_opmode) \ + ((_opmode == ATH9K_M_STA) || (_opmode == ATH9K_M_IBSS) || \ + (_opmode == ATH9K_M_HOSTAP) || (_opmode == ATH9K_M_MONITOR)) + +#define AR5416_EEPROM_MAGIC_OFFSET 0x0 + +#define AR5416_EEPROM_S 2 +#define AR5416_EEPROM_OFFSET 0x2000 +#define AR5416_EEPROM_START_ADDR \ + (AR_SREV_9100(ah)) ? 0x1fff1000 : 0x503f1200 +#define AR5416_EEPROM_MAX 0xae0 +#define ar5416_get_eep_ver(_ahp) \ + (((_ahp)->ah_eeprom.baseEepHeader.version >> 12) & 0xF) +#define ar5416_get_eep_rev(_ahp) \ + (((_ahp)->ah_eeprom.baseEepHeader.version) & 0xFFF) +#define ar5416_get_ntxchains(_txchainmask) \ + (((_txchainmask >> 2) & 1) + \ + ((_txchainmask >> 1) & 1) + (_txchainmask & 1)) + +#define IS_EEP_MINOR_V3(_ahp) \ + (ath9k_hw_get_eeprom((_ahp), EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_3) + +#define FIXED_CCA_THRESHOLD 15 + +#ifdef __BIG_ENDIAN +#define AR5416_EEPROM_MAGIC 0x5aa5 +#else +#define AR5416_EEPROM_MAGIC 0xa55a +#endif + +#define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s)) + +#define ATH9K_ANTENNA0_CHAINMASK 0x1 +#define ATH9K_ANTENNA1_CHAINMASK 0x2 + +#define ATH9K_NUM_DMA_DEBUG_REGS 8 +#define ATH9K_NUM_QUEUES 10 + +#define HAL_NOISE_IMMUNE_MAX 4 +#define HAL_SPUR_IMMUNE_MAX 7 +#define HAL_FIRST_STEP_MAX 2 + +#define ATH9K_ANI_OFDM_TRIG_HIGH 500 +#define ATH9K_ANI_OFDM_TRIG_LOW 200 +#define ATH9K_ANI_CCK_TRIG_HIGH 200 +#define ATH9K_ANI_CCK_TRIG_LOW 100 +#define ATH9K_ANI_NOISE_IMMUNE_LVL 4 +#define ATH9K_ANI_USE_OFDM_WEAK_SIG true +#define ATH9K_ANI_CCK_WEAK_SIG_THR false +#define ATH9K_ANI_SPUR_IMMUNE_LVL 7 +#define ATH9K_ANI_FIRSTEP_LVL 0 +#define ATH9K_ANI_RSSI_THR_HIGH 40 +#define ATH9K_ANI_RSSI_THR_LOW 7 +#define ATH9K_ANI_PERIOD 100 + +#define AR_GPIOD_MASK 0x00001FFF +#define AR_GPIO_BIT(_gpio) (1 << (_gpio)) + +#define MAX_ANALOG_START 319 + +#define HAL_EP_RND(x, mul) \ + ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) +#define BEACON_RSSI(ahp) \ + HAL_EP_RND(ahp->ah_stats.ast_nodestats.ns_avgbrssi, \ + ATH9K_RSSI_EP_MULTIPLIER) + +#define ah_mibStats ah_stats.ast_mibstats + +#define AH_TIMEOUT 100000 +#define AH_TIME_QUANTUM 10 + +#define IS(_c, _f) (((_c)->channelFlags & _f) || 0) + +#define AR_KEYTABLE_SIZE 128 +#define POWER_UP_TIME 200000 + +#define EXT_ADDITIVE (0x8000) +#define CTL_11A_EXT (CTL_11A | EXT_ADDITIVE) +#define CTL_11G_EXT (CTL_11G | EXT_ADDITIVE) +#define CTL_11B_EXT (CTL_11B | EXT_ADDITIVE) + +#define SUB_NUM_CTL_MODES_AT_5G_40 2 +#define SUB_NUM_CTL_MODES_AT_2G_40 3 +#define SPUR_RSSI_THRESH 40 + +#define TU_TO_USEC(_tu) ((_tu) << 10) + +#define CAB_TIMEOUT_VAL 10 +#define BEACON_TIMEOUT_VAL 10 +#define MIN_BEACON_TIMEOUT_VAL 1 +#define SLEEP_SLOP 3 + +#define CCK_SIFS_TIME 10 +#define CCK_PREAMBLE_BITS 144 +#define CCK_PLCP_BITS 48 + +#define OFDM_SIFS_TIME 16 +#define OFDM_PREAMBLE_TIME 20 +#define OFDM_PLCP_BITS 22 +#define OFDM_SYMBOL_TIME 4 + +#define OFDM_SIFS_TIME_HALF 32 +#define OFDM_PREAMBLE_TIME_HALF 40 +#define OFDM_PLCP_BITS_HALF 22 +#define OFDM_SYMBOL_TIME_HALF 8 + +#define OFDM_SIFS_TIME_QUARTER 64 +#define OFDM_PREAMBLE_TIME_QUARTER 80 +#define OFDM_PLCP_BITS_QUARTER 22 +#define OFDM_SYMBOL_TIME_QUARTER 16 + +u32 ath9k_hw_get_eeprom(struct ath_hal_5416 *ahp, + enum eeprom_param param); + +#endif diff --git a/drivers/net/wireless/ath9k/initvals.h b/drivers/net/wireless/ath9k/initvals.h new file mode 100644 index 0000000..3dd3815 --- /dev/null +++ b/drivers/net/wireless/ath9k/initvals.h @@ -0,0 +1,3146 @@ +/* + * Copyright (c) 2008 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +static const u32 ar5416Modes_9100[][6] = { + { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, + { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, + { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, + { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 }, + { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, + { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf }, + { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, + { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, + { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, + { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, + { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 }, + { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, + { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, + { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, + { 0x00009850, 0x6de8b4e0, 0x6de8b4e0, 0x6de8b0de, 0x6de8b0de, 0x6de8b0de }, + { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e }, + { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e }, + { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 }, + { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, + { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 }, + { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 }, + { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 }, + { 0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134 }, + { 0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b }, + { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 }, + { 0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 }, + { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 }, + { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 }, + { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 }, + { 0x0000c9bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 }, + { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be }, + { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, + { 0x000099c8, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c }, + { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, + { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, + { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 }, + { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 }, + { 0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 }, + { 0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 }, + { 0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 }, + { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, + { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, + { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa }, + { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 }, + { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 }, + { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 }, + { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b }, + { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b }, + { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a }, + { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf }, + { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f }, + { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f }, + { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f }, + { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, +}; + +static const u32 ar5416Common_9100[][2] = { + { 0x0000000c, 0x00000000 }, + { 0x00000030, 0x00020015 }, + { 0x00000034, 0x00000005 }, + { 0x00000040, 0x00000000 }, + { 0x00000044, 0x00000008 }, + { 0x00000048, 0x00000008 }, + { 0x0000004c, 0x00000010 }, + { 0x00000050, 0x00000000 }, + { 0x00000054, 0x0000001f }, + { 0x00000800, 0x00000000 }, + { 0x00000804, 0x00000000 }, + { 0x00000808, 0x00000000 }, + { 0x0000080c, 0x00000000 }, + { 0x00000810, 0x00000000 }, + { 0x00000814, 0x00000000 }, + { 0x00000818, 0x00000000 }, + { 0x0000081c, 0x00000000 }, + { 0x00000820, 0x00000000 }, + { 0x00000824, 0x00000000 }, + { 0x00001040, 0x002ffc0f }, + { 0x00001044, 0x002ffc0f }, + { 0x00001048, 0x002ffc0f }, + { 0x0000104c, 0x002ffc0f }, + { 0x00001050, 0x002ffc0f }, + { 0x00001054, 0x002ffc0f }, + { 0x00001058, 0x002ffc0f }, + { 0x0000105c, 0x002ffc0f }, + { 0x00001060, 0x002ffc0f }, + { 0x00001064, 0x002ffc0f }, + { 0x00001230, 0x00000000 }, + { 0x00001270, 0x00000000 }, + { 0x00001038, 0x00000000 }, + { 0x00001078, 0x00000000 }, + { 0x000010b8, 0x00000000 }, + { 0x000010f8, 0x00000000 }, + { 0x00001138, 0x00000000 }, + { 0x00001178, 0x00000000 }, + { 0x000011b8, 0x00000000 }, + { 0x000011f8, 0x00000000 }, + { 0x00001238, 0x00000000 }, + { 0x00001278, 0x00000000 }, + { 0x000012b8, 0x00000000 }, + { 0x000012f8, 0x00000000 }, + { 0x00001338, 0x00000000 }, + { 0x00001378, 0x00000000 }, + { 0x000013b8, 0x00000000 }, + { 0x000013f8, 0x00000000 }, + { 0x00001438, 0x00000000 }, + { 0x00001478, 0x00000000 }, + { 0x000014b8, 0x00000000 }, + { 0x000014f8, 0x00000000 }, + { 0x00001538, 0x00000000 }, + { 0x00001578, 0x00000000 }, + { 0x000015b8, 0x00000000 }, + { 0x000015f8, 0x00000000 }, + { 0x00001638, 0x00000000 }, + { 0x00001678, 0x00000000 }, + { 0x000016b8, 0x00000000 }, + { 0x000016f8, 0x00000000 }, + { 0x00001738, 0x00000000 }, + { 0x00001778, 0x00000000 }, + { 0x000017b8, 0x00000000 }, + { 0x000017f8, 0x00000000 }, + { 0x0000103c, 0x00000000 }, + { 0x0000107c, 0x00000000 }, + { 0x000010bc, 0x00000000 }, + { 0x000010fc, 0x00000000 }, + { 0x0000113c, 0x00000000 }, + { 0x0000117c, 0x00000000 }, + { 0x000011bc, 0x00000000 }, + { 0x000011fc, 0x00000000 }, + { 0x0000123c, 0x00000000 }, + { 0x0000127c, 0x00000000 }, + { 0x000012bc, 0x00000000 }, + { 0x000012fc, 0x00000000 }, + { 0x0000133c, 0x00000000 }, + { 0x0000137c, 0x00000000 }, + { 0x000013bc, 0x00000000 }, + { 0x000013fc, 0x00000000 }, + { 0x0000143c, 0x00000000 }, + { 0x0000147c, 0x00000000 }, + { 0x00004030, 0x00000002 }, + { 0x0000403c, 0x00000002 }, + { 0x00007010, 0x00000000 }, + { 0x00007038, 0x000004c2 }, + { 0x00008004, 0x00000000 }, + { 0x00008008, 0x00000000 }, + { 0x0000800c, 0x00000000 }, + { 0x00008018, 0x00000700 }, + { 0x00008020, 0x00000000 }, + { 0x00008038, 0x00000000 }, + { 0x0000803c, 0x00000000 }, + { 0x00008048, 0x40000000 }, + { 0x00008054, 0x00000000 }, + { 0x00008058, 0x00000000 }, + { 0x0000805c, 0x000fc78f }, + { 0x00008060, 0x0000000f }, + { 0x00008064, 0x00000000 }, + { 0x000080c0, 0x2a82301a }, + { 0x000080c4, 0x05dc01e0 }, + { 0x000080c8, 0x1f402710 }, + { 0x000080cc, 0x01f40000 }, + { 0x000080d0, 0x00001e00 }, + { 0x000080d4, 0x00000000 }, + { 0x000080d8, 0x00400000 }, + { 0x000080e0, 0xffffffff }, + { 0x000080e4, 0x0000ffff }, + { 0x000080e8, 0x003f3f3f }, + { 0x000080ec, 0x00000000 }, + { 0x000080f0, 0x00000000 }, + { 0x000080f4, 0x00000000 }, + { 0x000080f8, 0x00000000 }, + { 0x000080fc, 0x00020000 }, + { 0x00008100, 0x00020000 }, + { 0x00008104, 0x00000001 }, + { 0x00008108, 0x00000052 }, + { 0x0000810c, 0x00000000 }, + { 0x00008110, 0x00000168 }, + { 0x00008118, 0x000100aa }, + { 0x0000811c, 0x00003210 }, + { 0x00008120, 0x08f04800 }, + { 0x00008124, 0x00000000 }, + { 0x00008128, 0x00000000 }, + { 0x0000812c, 0x00000000 }, + { 0x00008130, 0x00000000 }, + { 0x00008134, 0x00000000 }, + { 0x00008138, 0x00000000 }, + { 0x0000813c, 0x00000000 }, + { 0x00008144, 0x00000000 }, + { 0x00008168, 0x00000000 }, + { 0x0000816c, 0x00000000 }, + { 0x00008170, 0x32143320 }, + { 0x00008174, 0xfaa4fa50 }, + { 0x00008178, 0x00000100 }, + { 0x0000817c, 0x00000000 }, + { 0x000081c4, 0x00000000 }, + { 0x000081d0, 0x00003210 }, + { 0x000081ec, 0x00000000 }, + { 0x000081f0, 0x00000000 }, + { 0x000081f4, 0x00000000 }, + { 0x000081f8, 0x00000000 }, + { 0x000081fc, 0x00000000 }, + { 0x00008200, 0x00000000 }, + { 0x00008204, 0x00000000 }, + { 0x00008208, 0x00000000 }, + { 0x0000820c, 0x00000000 }, + { 0x00008210, 0x00000000 }, + { 0x00008214, 0x00000000 }, + { 0x00008218, 0x00000000 }, + { 0x0000821c, 0x00000000 }, + { 0x00008220, 0x00000000 }, + { 0x00008224, 0x00000000 }, + { 0x00008228, 0x00000000 }, + { 0x0000822c, 0x00000000 }, + { 0x00008230, 0x00000000 }, + { 0x00008234, 0x00000000 }, + { 0x00008238, 0x00000000 }, + { 0x0000823c, 0x00000000 }, + { 0x00008240, 0x00100000 }, + { 0x00008244, 0x0010f400 }, + { 0x00008248, 0x00000100 }, + { 0x0000824c, 0x0001e800 }, + { 0x00008250, 0x00000000 }, + { 0x00008254, 0x00000000 }, + { 0x00008258, 0x00000000 }, + { 0x0000825c, 0x400000ff }, + { 0x00008260, 0x00080922 }, + { 0x00008270, 0x00000000 }, + { 0x00008274, 0x40000000 }, + { 0x00008278, 0x003e4180 }, + { 0x0000827c, 0x00000000 }, + { 0x00008284, 0x0000002c }, + { 0x00008288, 0x0000002c }, + { 0x0000828c, 0x00000000 }, + { 0x00008294, 0x00000000 }, + { 0x00008298, 0x00000000 }, + { 0x00008300, 0x00000000 }, + { 0x00008304, 0x00000000 }, + { 0x00008308, 0x00000000 }, + { 0x0000830c, 0x00000000 }, + { 0x00008310, 0x00000000 }, + { 0x00008314, 0x00000000 }, + { 0x00008318, 0x00000000 }, + { 0x00008328, 0x00000000 }, + { 0x0000832c, 0x00000007 }, + { 0x00008330, 0x00000302 }, + { 0x00008334, 0x00000e00 }, + { 0x00008338, 0x00000000 }, + { 0x0000833c, 0x00000000 }, + { 0x00008340, 0x000107ff }, + { 0x00009808, 0x00000000 }, + { 0x0000980c, 0xad848e19 }, + { 0x00009810, 0x7d14e000 }, + { 0x00009814, 0x9c0a9f6b }, + { 0x0000981c, 0x00000000 }, + { 0x0000982c, 0x0000a000 }, + { 0x00009830, 0x00000000 }, + { 0x0000983c, 0x00200400 }, + { 0x00009840, 0x206a002e }, + { 0x0000984c, 0x1284233c }, + { 0x00009854, 0x00000859 }, + { 0x00009900, 0x00000000 }, + { 0x00009904, 0x00000000 }, + { 0x00009908, 0x00000000 }, + { 0x0000990c, 0x00000000 }, + { 0x0000991c, 0x10000fff }, + { 0x00009920, 0x05100000 }, + { 0x0000a920, 0x05100000 }, + { 0x0000b920, 0x05100000 }, + { 0x00009928, 0x00000001 }, + { 0x0000992c, 0x00000004 }, + { 0x00009934, 0x1e1f2022 }, + { 0x00009938, 0x0a0b0c0d }, + { 0x0000993c, 0x00000000 }, + { 0x00009948, 0x9280b212 }, + { 0x0000994c, 0x00020028 }, + { 0x00009954, 0x5d50e188 }, + { 0x00009958, 0x00081fff }, + { 0x0000c95c, 0x004b6a8e }, + { 0x0000c968, 0x000003ce }, + { 0x00009970, 0x190fb515 }, + { 0x00009974, 0x00000000 }, + { 0x00009978, 0x00000001 }, + { 0x0000997c, 0x00000000 }, + { 0x00009980, 0x00000000 }, + { 0x00009984, 0x00000000 }, + { 0x00009988, 0x00000000 }, + { 0x0000998c, 0x00000000 }, + { 0x00009990, 0x00000000 }, + { 0x00009994, 0x00000000 }, + { 0x00009998, 0x00000000 }, + { 0x0000999c, 0x00000000 }, + { 0x000099a0, 0x00000000 }, + { 0x000099a4, 0x00000001 }, + { 0x000099a8, 0x001fff00 }, + { 0x000099ac, 0x00000000 }, + { 0x000099b0, 0x03051000 }, + { 0x000099dc, 0x00000000 }, + { 0x000099e0, 0x00000200 }, + { 0x000099e4, 0xaaaaaaaa }, + { 0x000099e8, 0x3c466478 }, + { 0x000099ec, 0x000000aa }, + { 0x000099fc, 0x00001042 }, + { 0x00009b00, 0x00000000 }, + { 0x00009b04, 0x00000001 }, + { 0x00009b08, 0x00000002 }, + { 0x00009b0c, 0x00000003 }, + { 0x00009b10, 0x00000004 }, + { 0x00009b14, 0x00000005 }, + { 0x00009b18, 0x00000008 }, + { 0x00009b1c, 0x00000009 }, + { 0x00009b20, 0x0000000a }, + { 0x00009b24, 0x0000000b }, + { 0x00009b28, 0x0000000c }, + { 0x00009b2c, 0x0000000d }, + { 0x00009b30, 0x00000010 }, + { 0x00009b34, 0x00000011 }, + { 0x00009b38, 0x00000012 }, + { 0x00009b3c, 0x00000013 }, + { 0x00009b40, 0x00000014 }, + { 0x00009b44, 0x00000015 }, + { 0x00009b48, 0x00000018 }, + { 0x00009b4c, 0x00000019 }, + { 0x00009b50, 0x0000001a }, + { 0x00009b54, 0x0000001b }, + { 0x00009b58, 0x0000001c }, + { 0x00009b5c, 0x0000001d }, + { 0x00009b60, 0x00000020 }, + { 0x00009b64, 0x00000021 }, + { 0x00009b68, 0x00000022 }, + { 0x00009b6c, 0x00000023 }, + { 0x00009b70, 0x00000024 }, + { 0x00009b74, 0x00000025 }, + { 0x00009b78, 0x00000028 }, + { 0x00009b7c, 0x00000029 }, + { 0x00009b80, 0x0000002a }, + { 0x00009b84, 0x0000002b }, + { 0x00009b88, 0x0000002c }, + { 0x00009b8c, 0x0000002d }, + { 0x00009b90, 0x00000030 }, + { 0x00009b94, 0x00000031 }, + { 0x00009b98, 0x00000032 }, + { 0x00009b9c, 0x00000033 }, + { 0x00009ba0, 0x00000034 }, + { 0x00009ba4, 0x00000035 }, + { 0x00009ba8, 0x00000035 }, + { 0x00009bac, 0x00000035 }, + { 0x00009bb0, 0x00000035 }, + { 0x00009bb4, 0x00000035 }, + { 0x00009bb8, 0x00000035 }, + { 0x00009bbc, 0x00000035 }, + { 0x00009bc0, 0x00000035 }, + { 0x00009bc4, 0x00000035 }, + { 0x00009bc8, 0x00000035 }, + { 0x00009bcc, 0x00000035 }, + { 0x00009bd0, 0x00000035 }, + { 0x00009bd4, 0x00000035 }, + { 0x00009bd8, 0x00000035 }, + { 0x00009bdc, 0x00000035 }, + { 0x00009be0, 0x00000035 }, + { 0x00009be4, 0x00000035 }, + { 0x00009be8, 0x00000035 }, + { 0x00009bec, 0x00000035 }, + { 0x00009bf0, 0x00000035 }, + { 0x00009bf4, 0x00000035 }, + { 0x00009bf8, 0x00000010 }, + { 0x00009bfc, 0x0000001a }, + { 0x0000a210, 0x40806333 }, + { 0x0000a214, 0x00106c10 }, + { 0x0000a218, 0x009c4060 }, + { 0x0000a220, 0x018830c6 }, + { 0x0000a224, 0x00000400 }, + { 0x0000a228, 0x00000bb5 }, + { 0x0000a22c, 0x00000011 }, + { 0x0000a234, 0x20202020 }, + { 0x0000a238, 0x20202020 }, + { 0x0000a23c, 0x13c889af }, + { 0x0000a240, 0x38490a20 }, + { 0x0000a244, 0x00007bb6 }, + { 0x0000a248, 0x0fff3ffc }, + { 0x0000a24c, 0x00000001 }, + { 0x0000a250, 0x0000a000 }, + { 0x0000a254, 0x00000000 }, + { 0x0000a258, 0x0cc75380 }, + { 0x0000a25c, 0x0f0f0f01 }, + { 0x0000a260, 0xdfa91f01 }, + { 0x0000a268, 0x00000000 }, + { 0x0000a26c, 0x0ebae9c6 }, + { 0x0000b26c, 0x0ebae9c6 }, + { 0x0000c26c, 0x0ebae9c6 }, + { 0x0000d270, 0x00820820 }, + { 0x0000a278, 0x1ce739ce }, + { 0x0000a27c, 0x051701ce }, + { 0x0000a338, 0x00000000 }, + { 0x0000a33c, 0x00000000 }, + { 0x0000a340, 0x00000000 }, + { 0x0000a344, 0x00000000 }, + { 0x0000a348, 0x3fffffff }, + { 0x0000a34c, 0x3fffffff }, + { 0x0000a350, 0x3fffffff }, + { 0x0000a354, 0x0003ffff }, + { 0x0000a358, 0x79a8aa1f }, + { 0x0000d35c, 0x07ffffef }, + { 0x0000d360, 0x0fffffe7 }, + { 0x0000d364, 0x17ffffe5 }, + { 0x0000d368, 0x1fffffe4 }, + { 0x0000d36c, 0x37ffffe3 }, + { 0x0000d370, 0x3fffffe3 }, + { 0x0000d374, 0x57ffffe3 }, + { 0x0000d378, 0x5fffffe2 }, + { 0x0000d37c, 0x7fffffe2 }, + { 0x0000d380, 0x7f3c7bba }, + { 0x0000d384, 0xf3307ff0 }, + { 0x0000a388, 0x08000000 }, + { 0x0000a38c, 0x20202020 }, + { 0x0000a390, 0x20202020 }, + { 0x0000a394, 0x1ce739ce }, + { 0x0000a398, 0x000001ce }, + { 0x0000a39c, 0x00000001 }, + { 0x0000a3a0, 0x00000000 }, + { 0x0000a3a4, 0x00000000 }, + { 0x0000a3a8, 0x00000000 }, + { 0x0000a3ac, 0x00000000 }, + { 0x0000a3b0, 0x00000000 }, + { 0x0000a3b4, 0x00000000 }, + { 0x0000a3b8, 0x00000000 }, + { 0x0000a3bc, 0x00000000 }, + { 0x0000a3c0, 0x00000000 }, + { 0x0000a3c4, 0x00000000 }, + { 0x0000a3c8, 0x00000246 }, + { 0x0000a3cc, 0x20202020 }, + { 0x0000a3d0, 0x20202020 }, + { 0x0000a3d4, 0x20202020 }, + { 0x0000a3dc, 0x1ce739ce }, + { 0x0000a3e0, 0x000001ce }, +}; + +static const u32 ar5416Bank0_9100[][2] = { + { 0x000098b0, 0x1e5795e5 }, + { 0x000098e0, 0x02008020 }, +}; + +static const u32 ar5416BB_RfGain_9100[][3] = { + { 0x00009a00, 0x00000000, 0x00000000 }, + { 0x00009a04, 0x00000040, 0x00000040 }, + { 0x00009a08, 0x00000080, 0x00000080 }, + { 0x00009a0c, 0x000001a1, 0x00000141 }, + { 0x00009a10, 0x000001e1, 0x00000181 }, + { 0x00009a14, 0x00000021, 0x000001c1 }, + { 0x00009a18, 0x00000061, 0x00000001 }, + { 0x00009a1c, 0x00000168, 0x00000041 }, + { 0x00009a20, 0x000001a8, 0x000001a8 }, + { 0x00009a24, 0x000001e8, 0x000001e8 }, + { 0x00009a28, 0x00000028, 0x00000028 }, + { 0x00009a2c, 0x00000068, 0x00000068 }, + { 0x00009a30, 0x00000189, 0x000000a8 }, + { 0x00009a34, 0x000001c9, 0x00000169 }, + { 0x00009a38, 0x00000009, 0x000001a9 }, + { 0x00009a3c, 0x00000049, 0x000001e9 }, + { 0x00009a40, 0x00000089, 0x00000029 }, + { 0x00009a44, 0x00000170, 0x00000069 }, + { 0x00009a48, 0x000001b0, 0x00000190 }, + { 0x00009a4c, 0x000001f0, 0x000001d0 }, + { 0x00009a50, 0x00000030, 0x00000010 }, + { 0x00009a54, 0x00000070, 0x00000050 }, + { 0x00009a58, 0x00000191, 0x00000090 }, + { 0x00009a5c, 0x000001d1, 0x00000151 }, + { 0x00009a60, 0x00000011, 0x00000191 }, + { 0x00009a64, 0x00000051, 0x000001d1 }, + { 0x00009a68, 0x00000091, 0x00000011 }, + { 0x00009a6c, 0x000001b8, 0x00000051 }, + { 0x00009a70, 0x000001f8, 0x00000198 }, + { 0x00009a74, 0x00000038, 0x000001d8 }, + { 0x00009a78, 0x00000078, 0x00000018 }, + { 0x00009a7c, 0x00000199, 0x00000058 }, + { 0x00009a80, 0x000001d9, 0x00000098 }, + { 0x00009a84, 0x00000019, 0x00000159 }, + { 0x00009a88, 0x00000059, 0x00000199 }, + { 0x00009a8c, 0x00000099, 0x000001d9 }, + { 0x00009a90, 0x000000d9, 0x00000019 }, + { 0x00009a94, 0x000000f9, 0x00000059 }, + { 0x00009a98, 0x000000f9, 0x00000099 }, + { 0x00009a9c, 0x000000f9, 0x000000d9 }, + { 0x00009aa0, 0x000000f9, 0x000000f9 }, + { 0x00009aa4, 0x000000f9, 0x000000f9 }, + { 0x00009aa8, 0x000000f9, 0x000000f9 }, + { 0x00009aac, 0x000000f9, 0x000000f9 }, + { 0x00009ab0, 0x000000f9, 0x000000f9 }, + { 0x00009ab4, 0x000000f9, 0x000000f9 }, + { 0x00009ab8, 0x000000f9, 0x000000f9 }, + { 0x00009abc, 0x000000f9, 0x000000f9 }, + { 0x00009ac0, 0x000000f9, 0x000000f9 }, + { 0x00009ac4, 0x000000f9, 0x000000f9 }, + { 0x00009ac8, 0x000000f9, 0x000000f9 }, + { 0x00009acc, 0x000000f9, 0x000000f9 }, + { 0x00009ad0, 0x000000f9, 0x000000f9 }, + { 0x00009ad4, 0x000000f9, 0x000000f9 }, + { 0x00009ad8, 0x000000f9, 0x000000f9 }, + { 0x00009adc, 0x000000f9, 0x000000f9 }, + { 0x00009ae0, 0x000000f9, 0x000000f9 }, + { 0x00009ae4, 0x000000f9, 0x000000f9 }, + { 0x00009ae8, 0x000000f9, 0x000000f9 }, + { 0x00009aec, 0x000000f9, 0x000000f9 }, + { 0x00009af0, 0x000000f9, 0x000000f9 }, + { 0x00009af4, 0x000000f9, 0x000000f9 }, + { 0x00009af8, 0x000000f9, 0x000000f9 }, + { 0x00009afc, 0x000000f9, 0x000000f9 }, +}; + +static const u32 ar5416Bank1_9100[][2] = { + { 0x000098b0, 0x02108421 }, + { 0x000098ec, 0x00000008 }, +}; + +static const u32 ar5416Bank2_9100[][2] = { + { 0x000098b0, 0x0e73ff17 }, + { 0x000098e0, 0x00000420 }, +}; + +static const u32 ar5416Bank3_9100[][3] = { + { 0x000098f0, 0x01400018, 0x01c00018 }, +}; + +static const u32 ar5416Bank6_9100[][3] = { + + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00e00000, 0x00e00000 }, + { 0x0000989c, 0x005e0000, 0x005e0000 }, + { 0x0000989c, 0x00120000, 0x00120000 }, + { 0x0000989c, 0x00620000, 0x00620000 }, + { 0x0000989c, 0x00020000, 0x00020000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x40ff0000, 0x40ff0000 }, + { 0x0000989c, 0x005f0000, 0x005f0000 }, + { 0x0000989c, 0x00870000, 0x00870000 }, + { 0x0000989c, 0x00f90000, 0x00f90000 }, + { 0x0000989c, 0x007b0000, 0x007b0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00f50000, 0x00f50000 }, + { 0x0000989c, 0x00dc0000, 0x00dc0000 }, + { 0x0000989c, 0x00110000, 0x00110000 }, + { 0x0000989c, 0x006100a8, 0x006100a8 }, + { 0x0000989c, 0x004210a2, 0x004210a2 }, + { 0x0000989c, 0x0014008f, 0x0014008f }, + { 0x0000989c, 0x00c40003, 0x00c40003 }, + { 0x0000989c, 0x003000f2, 0x003000f2 }, + { 0x0000989c, 0x00440016, 0x00440016 }, + { 0x0000989c, 0x00410040, 0x00410040 }, + { 0x0000989c, 0x0001805e, 0x0001805e }, + { 0x0000989c, 0x0000c0ab, 0x0000c0ab }, + { 0x0000989c, 0x000000f1, 0x000000f1 }, + { 0x0000989c, 0x00002081, 0x00002081 }, + { 0x0000989c, 0x000000d4, 0x000000d4 }, + { 0x000098d0, 0x0000000f, 0x0010000f }, +}; + +static const u32 ar5416Bank6TPC_9100[][3] = { + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00e00000, 0x00e00000 }, + { 0x0000989c, 0x005e0000, 0x005e0000 }, + { 0x0000989c, 0x00120000, 0x00120000 }, + { 0x0000989c, 0x00620000, 0x00620000 }, + { 0x0000989c, 0x00020000, 0x00020000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x40ff0000, 0x40ff0000 }, + { 0x0000989c, 0x005f0000, 0x005f0000 }, + { 0x0000989c, 0x00870000, 0x00870000 }, + { 0x0000989c, 0x00f90000, 0x00f90000 }, + { 0x0000989c, 0x007b0000, 0x007b0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00f50000, 0x00f50000 }, + { 0x0000989c, 0x00dc0000, 0x00dc0000 }, + { 0x0000989c, 0x00110000, 0x00110000 }, + { 0x0000989c, 0x006100a8, 0x006100a8 }, + { 0x0000989c, 0x00423022, 0x00423022 }, + { 0x0000989c, 0x201400df, 0x201400df }, + { 0x0000989c, 0x00c40002, 0x00c40002 }, + { 0x0000989c, 0x003000f2, 0x003000f2 }, + { 0x0000989c, 0x00440016, 0x00440016 }, + { 0x0000989c, 0x00410040, 0x00410040 }, + { 0x0000989c, 0x0001805e, 0x0001805e }, + { 0x0000989c, 0x0000c0ab, 0x0000c0ab }, + { 0x0000989c, 0x000000e1, 0x000000e1 }, + { 0x0000989c, 0x00007081, 0x00007081 }, + { 0x0000989c, 0x000000d4, 0x000000d4 }, + { 0x000098d0, 0x0000000f, 0x0010000f }, +}; + +static const u32 ar5416Bank7_9100[][2] = { + { 0x0000989c, 0x00000500 }, + { 0x0000989c, 0x00000800 }, + { 0x000098cc, 0x0000000e }, +}; + +static const u32 ar5416Addac_9100[][2] = { + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000003 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x0000000c }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000030 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000060 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000058 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x000098c4, 0x00000000 }, +}; + +static const u32 ar5416Modes[][6] = { + { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, + { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, + { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, + { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 }, + { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, + { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf }, + { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, + { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, + { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, + { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, + { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 }, + { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, + { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, + { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, + { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 }, + { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec86d2e, 0x7ec84d2e, 0x7ec82d2e }, + { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e }, + { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 }, + { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, + { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 }, + { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 }, + { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 }, + { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, + { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a11, 0xd00a8a0d, 0xd00a8a0d }, + { 0x00009940, 0x00754604, 0x00754604, 0xfff81204, 0xfff81204, 0xfff81204 }, + { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 }, + { 0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e, 0xe250a51e }, + { 0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff, 0x3388ffff }, +#ifdef TB243 + { 0x00009960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 }, + { 0x0000a960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 }, + { 0x0000b960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 }, + { 0x00009964, 0x00000000, 0x00000000, 0x00002210, 0x00002210, 0x00001120 }, +#else + { 0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 }, + { 0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 }, + { 0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 }, + { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 }, +#endif + { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00, 0x001a0c00 }, + { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be }, + { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, + { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 }, + { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, + { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, + { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 }, + { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 }, + { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 }, + { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 }, + { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 }, + { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, + { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, + { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa }, + { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 }, + { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 }, + { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 }, + { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b }, + { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b }, + { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a }, + { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf }, + { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f }, + { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f }, + { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f }, + { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, +}; + +static const u32 ar5416Common[][2] = { + { 0x0000000c, 0x00000000 }, + { 0x00000030, 0x00020015 }, + { 0x00000034, 0x00000005 }, + { 0x00000040, 0x00000000 }, + { 0x00000044, 0x00000008 }, + { 0x00000048, 0x00000008 }, + { 0x0000004c, 0x00000010 }, + { 0x00000050, 0x00000000 }, + { 0x00000054, 0x0000001f }, + { 0x00000800, 0x00000000 }, + { 0x00000804, 0x00000000 }, + { 0x00000808, 0x00000000 }, + { 0x0000080c, 0x00000000 }, + { 0x00000810, 0x00000000 }, + { 0x00000814, 0x00000000 }, + { 0x00000818, 0x00000000 }, + { 0x0000081c, 0x00000000 }, + { 0x00000820, 0x00000000 }, + { 0x00000824, 0x00000000 }, + { 0x00001040, 0x002ffc0f }, + { 0x00001044, 0x002ffc0f }, + { 0x00001048, 0x002ffc0f }, + { 0x0000104c, 0x002ffc0f }, + { 0x00001050, 0x002ffc0f }, + { 0x00001054, 0x002ffc0f }, + { 0x00001058, 0x002ffc0f }, + { 0x0000105c, 0x002ffc0f }, + { 0x00001060, 0x002ffc0f }, + { 0x00001064, 0x002ffc0f }, + { 0x00001230, 0x00000000 }, + { 0x00001270, 0x00000000 }, + { 0x00001038, 0x00000000 }, + { 0x00001078, 0x00000000 }, + { 0x000010b8, 0x00000000 }, + { 0x000010f8, 0x00000000 }, + { 0x00001138, 0x00000000 }, + { 0x00001178, 0x00000000 }, + { 0x000011b8, 0x00000000 }, + { 0x000011f8, 0x00000000 }, + { 0x00001238, 0x00000000 }, + { 0x00001278, 0x00000000 }, + { 0x000012b8, 0x00000000 }, + { 0x000012f8, 0x00000000 }, + { 0x00001338, 0x00000000 }, + { 0x00001378, 0x00000000 }, + { 0x000013b8, 0x00000000 }, + { 0x000013f8, 0x00000000 }, + { 0x00001438, 0x00000000 }, + { 0x00001478, 0x00000000 }, + { 0x000014b8, 0x00000000 }, + { 0x000014f8, 0x00000000 }, + { 0x00001538, 0x00000000 }, + { 0x00001578, 0x00000000 }, + { 0x000015b8, 0x00000000 }, + { 0x000015f8, 0x00000000 }, + { 0x00001638, 0x00000000 }, + { 0x00001678, 0x00000000 }, + { 0x000016b8, 0x00000000 }, + { 0x000016f8, 0x00000000 }, + { 0x00001738, 0x00000000 }, + { 0x00001778, 0x00000000 }, + { 0x000017b8, 0x00000000 }, + { 0x000017f8, 0x00000000 }, + { 0x0000103c, 0x00000000 }, + { 0x0000107c, 0x00000000 }, + { 0x000010bc, 0x00000000 }, + { 0x000010fc, 0x00000000 }, + { 0x0000113c, 0x00000000 }, + { 0x0000117c, 0x00000000 }, + { 0x000011bc, 0x00000000 }, + { 0x000011fc, 0x00000000 }, + { 0x0000123c, 0x00000000 }, + { 0x0000127c, 0x00000000 }, + { 0x000012bc, 0x00000000 }, + { 0x000012fc, 0x00000000 }, + { 0x0000133c, 0x00000000 }, + { 0x0000137c, 0x00000000 }, + { 0x000013bc, 0x00000000 }, + { 0x000013fc, 0x00000000 }, + { 0x0000143c, 0x00000000 }, + { 0x0000147c, 0x00000000 }, + { 0x00020010, 0x00000003 }, + { 0x00020038, 0x000004c2 }, + { 0x00008004, 0x00000000 }, + { 0x00008008, 0x00000000 }, + { 0x0000800c, 0x00000000 }, + { 0x00008018, 0x00000700 }, + { 0x00008020, 0x00000000 }, + { 0x00008038, 0x00000000 }, + { 0x0000803c, 0x00000000 }, + { 0x00008048, 0x40000000 }, + { 0x00008054, 0x00004000 }, + { 0x00008058, 0x00000000 }, + { 0x0000805c, 0x000fc78f }, + { 0x00008060, 0x0000000f }, + { 0x00008064, 0x00000000 }, + { 0x000080c0, 0x2a82301a }, + { 0x000080c4, 0x05dc01e0 }, + { 0x000080c8, 0x1f402710 }, + { 0x000080cc, 0x01f40000 }, + { 0x000080d0, 0x00001e00 }, + { 0x000080d4, 0x00000000 }, + { 0x000080d8, 0x00400000 }, + { 0x000080e0, 0xffffffff }, + { 0x000080e4, 0x0000ffff }, + { 0x000080e8, 0x003f3f3f }, + { 0x000080ec, 0x00000000 }, + { 0x000080f0, 0x00000000 }, + { 0x000080f4, 0x00000000 }, + { 0x000080f8, 0x00000000 }, + { 0x000080fc, 0x00020000 }, + { 0x00008100, 0x00020000 }, + { 0x00008104, 0x00000001 }, + { 0x00008108, 0x00000052 }, + { 0x0000810c, 0x00000000 }, + { 0x00008110, 0x00000168 }, + { 0x00008118, 0x000100aa }, + { 0x0000811c, 0x00003210 }, + { 0x00008120, 0x08f04800 }, + { 0x00008124, 0x00000000 }, + { 0x00008128, 0x00000000 }, + { 0x0000812c, 0x00000000 }, + { 0x00008130, 0x00000000 }, + { 0x00008134, 0x00000000 }, + { 0x00008138, 0x00000000 }, + { 0x0000813c, 0x00000000 }, + { 0x00008144, 0x00000000 }, + { 0x00008168, 0x00000000 }, + { 0x0000816c, 0x00000000 }, + { 0x00008170, 0x32143320 }, + { 0x00008174, 0xfaa4fa50 }, + { 0x00008178, 0x00000100 }, + { 0x0000817c, 0x00000000 }, + { 0x000081c4, 0x00000000 }, + { 0x000081d0, 0x00003210 }, + { 0x000081ec, 0x00000000 }, + { 0x000081f0, 0x00000000 }, + { 0x000081f4, 0x00000000 }, + { 0x000081f8, 0x00000000 }, + { 0x000081fc, 0x00000000 }, + { 0x00008200, 0x00000000 }, + { 0x00008204, 0x00000000 }, + { 0x00008208, 0x00000000 }, + { 0x0000820c, 0x00000000 }, + { 0x00008210, 0x00000000 }, + { 0x00008214, 0x00000000 }, + { 0x00008218, 0x00000000 }, + { 0x0000821c, 0x00000000 }, + { 0x00008220, 0x00000000 }, + { 0x00008224, 0x00000000 }, + { 0x00008228, 0x00000000 }, + { 0x0000822c, 0x00000000 }, + { 0x00008230, 0x00000000 }, + { 0x00008234, 0x00000000 }, + { 0x00008238, 0x00000000 }, + { 0x0000823c, 0x00000000 }, + { 0x00008240, 0x00100000 }, + { 0x00008244, 0x0010f400 }, + { 0x00008248, 0x00000100 }, + { 0x0000824c, 0x0001e800 }, + { 0x00008250, 0x00000000 }, + { 0x00008254, 0x00000000 }, + { 0x00008258, 0x00000000 }, + { 0x0000825c, 0x400000ff }, + { 0x00008260, 0x00080922 }, + { 0x00008270, 0x00000000 }, + { 0x00008274, 0x40000000 }, + { 0x00008278, 0x003e4180 }, + { 0x0000827c, 0x00000000 }, + { 0x00008284, 0x0000002c }, + { 0x00008288, 0x0000002c }, + { 0x0000828c, 0x00000000 }, + { 0x00008294, 0x00000000 }, + { 0x00008298, 0x00000000 }, + { 0x00008300, 0x00000000 }, + { 0x00008304, 0x00000000 }, + { 0x00008308, 0x00000000 }, + { 0x0000830c, 0x00000000 }, + { 0x00008310, 0x00000000 }, + { 0x00008314, 0x00000000 }, + { 0x00008318, 0x00000000 }, + { 0x00008328, 0x00000000 }, + { 0x0000832c, 0x00000007 }, + { 0x00008330, 0x00000302 }, + { 0x00008334, 0x00000e00 }, + { 0x00008338, 0x00000000 }, + { 0x0000833c, 0x00000000 }, + { 0x00008340, 0x000107ff }, + { 0x00009808, 0x00000000 }, + { 0x0000980c, 0xad848e19 }, + { 0x00009810, 0x7d14e000 }, + { 0x00009814, 0x9c0a9f6b }, + { 0x0000981c, 0x00000000 }, + { 0x0000982c, 0x0000a000 }, + { 0x00009830, 0x00000000 }, + { 0x0000983c, 0x00200400 }, + { 0x00009840, 0x206a01ae }, + { 0x0000984c, 0x1284233c }, + { 0x00009854, 0x00000859 }, + { 0x00009900, 0x00000000 }, + { 0x00009904, 0x00000000 }, + { 0x00009908, 0x00000000 }, + { 0x0000990c, 0x00000000 }, + { 0x0000991c, 0x10000fff }, + { 0x00009920, 0x05100000 }, + { 0x0000a920, 0x05100000 }, + { 0x0000b920, 0x05100000 }, + { 0x00009928, 0x00000001 }, + { 0x0000992c, 0x00000004 }, + { 0x00009934, 0x1e1f2022 }, + { 0x00009938, 0x0a0b0c0d }, + { 0x0000993c, 0x00000000 }, + { 0x00009948, 0x9280b212 }, + { 0x0000994c, 0x00020028 }, + { 0x0000c95c, 0x004b6a8e }, + { 0x0000c968, 0x000003ce }, + { 0x00009970, 0x190fb514 }, + { 0x00009974, 0x00000000 }, + { 0x00009978, 0x00000001 }, + { 0x0000997c, 0x00000000 }, + { 0x00009980, 0x00000000 }, + { 0x00009984, 0x00000000 }, + { 0x00009988, 0x00000000 }, + { 0x0000998c, 0x00000000 }, + { 0x00009990, 0x00000000 }, + { 0x00009994, 0x00000000 }, + { 0x00009998, 0x00000000 }, + { 0x0000999c, 0x00000000 }, + { 0x000099a0, 0x00000000 }, + { 0x000099a4, 0x00000001 }, + { 0x000099a8, 0x201fff00 }, + { 0x000099ac, 0x006f0000 }, + { 0x000099b0, 0x03051000 }, + { 0x000099dc, 0x00000000 }, + { 0x000099e0, 0x00000200 }, + { 0x000099e4, 0xaaaaaaaa }, + { 0x000099e8, 0x3c466478 }, + { 0x000099ec, 0x0cc80caa }, + { 0x000099fc, 0x00001042 }, + { 0x00009b00, 0x00000000 }, + { 0x00009b04, 0x00000001 }, + { 0x00009b08, 0x00000002 }, + { 0x00009b0c, 0x00000003 }, + { 0x00009b10, 0x00000004 }, + { 0x00009b14, 0x00000005 }, + { 0x00009b18, 0x00000008 }, + { 0x00009b1c, 0x00000009 }, + { 0x00009b20, 0x0000000a }, + { 0x00009b24, 0x0000000b }, + { 0x00009b28, 0x0000000c }, + { 0x00009b2c, 0x0000000d }, + { 0x00009b30, 0x00000010 }, + { 0x00009b34, 0x00000011 }, + { 0x00009b38, 0x00000012 }, + { 0x00009b3c, 0x00000013 }, + { 0x00009b40, 0x00000014 }, + { 0x00009b44, 0x00000015 }, + { 0x00009b48, 0x00000018 }, + { 0x00009b4c, 0x00000019 }, + { 0x00009b50, 0x0000001a }, + { 0x00009b54, 0x0000001b }, + { 0x00009b58, 0x0000001c }, + { 0x00009b5c, 0x0000001d }, + { 0x00009b60, 0x00000020 }, + { 0x00009b64, 0x00000021 }, + { 0x00009b68, 0x00000022 }, + { 0x00009b6c, 0x00000023 }, + { 0x00009b70, 0x00000024 }, + { 0x00009b74, 0x00000025 }, + { 0x00009b78, 0x00000028 }, + { 0x00009b7c, 0x00000029 }, + { 0x00009b80, 0x0000002a }, + { 0x00009b84, 0x0000002b }, + { 0x00009b88, 0x0000002c }, + { 0x00009b8c, 0x0000002d }, + { 0x00009b90, 0x00000030 }, + { 0x00009b94, 0x00000031 }, + { 0x00009b98, 0x00000032 }, + { 0x00009b9c, 0x00000033 }, + { 0x00009ba0, 0x00000034 }, + { 0x00009ba4, 0x00000035 }, + { 0x00009ba8, 0x00000035 }, + { 0x00009bac, 0x00000035 }, + { 0x00009bb0, 0x00000035 }, + { 0x00009bb4, 0x00000035 }, + { 0x00009bb8, 0x00000035 }, + { 0x00009bbc, 0x00000035 }, + { 0x00009bc0, 0x00000035 }, + { 0x00009bc4, 0x00000035 }, + { 0x00009bc8, 0x00000035 }, + { 0x00009bcc, 0x00000035 }, + { 0x00009bd0, 0x00000035 }, + { 0x00009bd4, 0x00000035 }, + { 0x00009bd8, 0x00000035 }, + { 0x00009bdc, 0x00000035 }, + { 0x00009be0, 0x00000035 }, + { 0x00009be4, 0x00000035 }, + { 0x00009be8, 0x00000035 }, + { 0x00009bec, 0x00000035 }, + { 0x00009bf0, 0x00000035 }, + { 0x00009bf4, 0x00000035 }, + { 0x00009bf8, 0x00000010 }, + { 0x00009bfc, 0x0000001a }, + { 0x0000a210, 0x40806333 }, + { 0x0000a214, 0x00106c10 }, + { 0x0000a218, 0x009c4060 }, + { 0x0000a220, 0x018830c6 }, + { 0x0000a224, 0x00000400 }, + { 0x0000a228, 0x001a0bb5 }, + { 0x0000a22c, 0x00000000 }, + { 0x0000a234, 0x20202020 }, + { 0x0000a238, 0x20202020 }, + { 0x0000a23c, 0x13c889ae }, + { 0x0000a240, 0x38490a20 }, + { 0x0000a244, 0x00007bb6 }, + { 0x0000a248, 0x0fff3ffc }, + { 0x0000a24c, 0x00000001 }, + { 0x0000a250, 0x0000a000 }, + { 0x0000a254, 0x00000000 }, + { 0x0000a258, 0x0cc75380 }, + { 0x0000a25c, 0x0f0f0f01 }, + { 0x0000a260, 0xdfa91f01 }, + { 0x0000a268, 0x00000001 }, + { 0x0000a26c, 0x0ebae9c6 }, + { 0x0000b26c, 0x0ebae9c6 }, + { 0x0000c26c, 0x0ebae9c6 }, + { 0x0000d270, 0x00820820 }, + { 0x0000a278, 0x1ce739ce }, + { 0x0000a27c, 0x050701ce }, + { 0x0000a338, 0x00000000 }, + { 0x0000a33c, 0x00000000 }, + { 0x0000a340, 0x00000000 }, + { 0x0000a344, 0x00000000 }, + { 0x0000a348, 0x3fffffff }, + { 0x0000a34c, 0x3fffffff }, + { 0x0000a350, 0x3fffffff }, + { 0x0000a354, 0x0003ffff }, + { 0x0000a358, 0x79a8aa33 }, + { 0x0000d35c, 0x07ffffef }, + { 0x0000d360, 0x0fffffe7 }, + { 0x0000d364, 0x17ffffe5 }, + { 0x0000d368, 0x1fffffe4 }, + { 0x0000d36c, 0x37ffffe3 }, + { 0x0000d370, 0x3fffffe3 }, + { 0x0000d374, 0x57ffffe3 }, + { 0x0000d378, 0x5fffffe2 }, + { 0x0000d37c, 0x7fffffe2 }, + { 0x0000d380, 0x7f3c7bba }, + { 0x0000d384, 0xf3307ff0 }, + { 0x0000a388, 0x0c000000 }, + { 0x0000a38c, 0x20202020 }, + { 0x0000a390, 0x20202020 }, + { 0x0000a394, 0x1ce739ce }, + { 0x0000a398, 0x000001ce }, + { 0x0000a39c, 0x00000001 }, + { 0x0000a3a0, 0x00000000 }, + { 0x0000a3a4, 0x00000000 }, + { 0x0000a3a8, 0x00000000 }, + { 0x0000a3ac, 0x00000000 }, + { 0x0000a3b0, 0x00000000 }, + { 0x0000a3b4, 0x00000000 }, + { 0x0000a3b8, 0x00000000 }, + { 0x0000a3bc, 0x00000000 }, + { 0x0000a3c0, 0x00000000 }, + { 0x0000a3c4, 0x00000000 }, + { 0x0000a3c8, 0x00000246 }, + { 0x0000a3cc, 0x20202020 }, + { 0x0000a3d0, 0x20202020 }, + { 0x0000a3d4, 0x20202020 }, + { 0x0000a3dc, 0x1ce739ce }, + { 0x0000a3e0, 0x000001ce }, +}; + +static const u32 ar5416Bank0[][2] = { + { 0x000098b0, 0x1e5795e5 }, + { 0x000098e0, 0x02008020 }, +}; + +static const u32 ar5416BB_RfGain[][3] = { + { 0x00009a00, 0x00000000, 0x00000000 }, + { 0x00009a04, 0x00000040, 0x00000040 }, + { 0x00009a08, 0x00000080, 0x00000080 }, + { 0x00009a0c, 0x000001a1, 0x00000141 }, + { 0x00009a10, 0x000001e1, 0x00000181 }, + { 0x00009a14, 0x00000021, 0x000001c1 }, + { 0x00009a18, 0x00000061, 0x00000001 }, + { 0x00009a1c, 0x00000168, 0x00000041 }, + { 0x00009a20, 0x000001a8, 0x000001a8 }, + { 0x00009a24, 0x000001e8, 0x000001e8 }, + { 0x00009a28, 0x00000028, 0x00000028 }, + { 0x00009a2c, 0x00000068, 0x00000068 }, + { 0x00009a30, 0x00000189, 0x000000a8 }, + { 0x00009a34, 0x000001c9, 0x00000169 }, + { 0x00009a38, 0x00000009, 0x000001a9 }, + { 0x00009a3c, 0x00000049, 0x000001e9 }, + { 0x00009a40, 0x00000089, 0x00000029 }, + { 0x00009a44, 0x00000170, 0x00000069 }, + { 0x00009a48, 0x000001b0, 0x00000190 }, + { 0x00009a4c, 0x000001f0, 0x000001d0 }, + { 0x00009a50, 0x00000030, 0x00000010 }, + { 0x00009a54, 0x00000070, 0x00000050 }, + { 0x00009a58, 0x00000191, 0x00000090 }, + { 0x00009a5c, 0x000001d1, 0x00000151 }, + { 0x00009a60, 0x00000011, 0x00000191 }, + { 0x00009a64, 0x00000051, 0x000001d1 }, + { 0x00009a68, 0x00000091, 0x00000011 }, + { 0x00009a6c, 0x000001b8, 0x00000051 }, + { 0x00009a70, 0x000001f8, 0x00000198 }, + { 0x00009a74, 0x00000038, 0x000001d8 }, + { 0x00009a78, 0x00000078, 0x00000018 }, + { 0x00009a7c, 0x00000199, 0x00000058 }, + { 0x00009a80, 0x000001d9, 0x00000098 }, + { 0x00009a84, 0x00000019, 0x00000159 }, + { 0x00009a88, 0x00000059, 0x00000199 }, + { 0x00009a8c, 0x00000099, 0x000001d9 }, + { 0x00009a90, 0x000000d9, 0x00000019 }, + { 0x00009a94, 0x000000f9, 0x00000059 }, + { 0x00009a98, 0x000000f9, 0x00000099 }, + { 0x00009a9c, 0x000000f9, 0x000000d9 }, + { 0x00009aa0, 0x000000f9, 0x000000f9 }, + { 0x00009aa4, 0x000000f9, 0x000000f9 }, + { 0x00009aa8, 0x000000f9, 0x000000f9 }, + { 0x00009aac, 0x000000f9, 0x000000f9 }, + { 0x00009ab0, 0x000000f9, 0x000000f9 }, + { 0x00009ab4, 0x000000f9, 0x000000f9 }, + { 0x00009ab8, 0x000000f9, 0x000000f9 }, + { 0x00009abc, 0x000000f9, 0x000000f9 }, + { 0x00009ac0, 0x000000f9, 0x000000f9 }, + { 0x00009ac4, 0x000000f9, 0x000000f9 }, + { 0x00009ac8, 0x000000f9, 0x000000f9 }, + { 0x00009acc, 0x000000f9, 0x000000f9 }, + { 0x00009ad0, 0x000000f9, 0x000000f9 }, + { 0x00009ad4, 0x000000f9, 0x000000f9 }, + { 0x00009ad8, 0x000000f9, 0x000000f9 }, + { 0x00009adc, 0x000000f9, 0x000000f9 }, + { 0x00009ae0, 0x000000f9, 0x000000f9 }, + { 0x00009ae4, 0x000000f9, 0x000000f9 }, + { 0x00009ae8, 0x000000f9, 0x000000f9 }, + { 0x00009aec, 0x000000f9, 0x000000f9 }, + { 0x00009af0, 0x000000f9, 0x000000f9 }, + { 0x00009af4, 0x000000f9, 0x000000f9 }, + { 0x00009af8, 0x000000f9, 0x000000f9 }, + { 0x00009afc, 0x000000f9, 0x000000f9 }, +}; + +static const u32 ar5416Bank1[][2] = { + { 0x000098b0, 0x02108421}, + { 0x000098ec, 0x00000008}, +}; + +static const u32 ar5416Bank2[][2] = { + { 0x000098b0, 0x0e73ff17}, + { 0x000098e0, 0x00000420}, +}; + +static const u32 ar5416Bank3[][3] = { + { 0x000098f0, 0x01400018, 0x01c00018 }, +}; + +static const u32 ar5416Bank6[][3] = { + + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00e00000, 0x00e00000 }, + { 0x0000989c, 0x005e0000, 0x005e0000 }, + { 0x0000989c, 0x00120000, 0x00120000 }, + { 0x0000989c, 0x00620000, 0x00620000 }, + { 0x0000989c, 0x00020000, 0x00020000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x005f0000, 0x005f0000 }, + { 0x0000989c, 0x00870000, 0x00870000 }, + { 0x0000989c, 0x00f90000, 0x00f90000 }, + { 0x0000989c, 0x007b0000, 0x007b0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00f50000, 0x00f50000 }, + { 0x0000989c, 0x00dc0000, 0x00dc0000 }, + { 0x0000989c, 0x00110000, 0x00110000 }, + { 0x0000989c, 0x006100a8, 0x006100a8 }, + { 0x0000989c, 0x004210a2, 0x004210a2 }, + { 0x0000989c, 0x0014000f, 0x0014000f }, + { 0x0000989c, 0x00c40002, 0x00c40002 }, + { 0x0000989c, 0x003000f2, 0x003000f2 }, + { 0x0000989c, 0x00440016, 0x00440016 }, + { 0x0000989c, 0x00410040, 0x00410040 }, + { 0x0000989c, 0x000180d6, 0x000180d6 }, + { 0x0000989c, 0x0000c0aa, 0x0000c0aa }, + { 0x0000989c, 0x000000b1, 0x000000b1 }, + { 0x0000989c, 0x00002000, 0x00002000 }, + { 0x0000989c, 0x000000d4, 0x000000d4 }, + { 0x000098d0, 0x0000000f, 0x0010000f }, +}; + + +static const u32 ar5416Bank6TPC[][3] = { + + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00e00000, 0x00e00000 }, + { 0x0000989c, 0x005e0000, 0x005e0000 }, + { 0x0000989c, 0x00120000, 0x00120000 }, + { 0x0000989c, 0x00620000, 0x00620000 }, + { 0x0000989c, 0x00020000, 0x00020000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x40ff0000, 0x40ff0000 }, + { 0x0000989c, 0x005f0000, 0x005f0000 }, + { 0x0000989c, 0x00870000, 0x00870000 }, + { 0x0000989c, 0x00f90000, 0x00f90000 }, + { 0x0000989c, 0x007b0000, 0x007b0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00f50000, 0x00f50000 }, + { 0x0000989c, 0x00dc0000, 0x00dc0000 }, + { 0x0000989c, 0x00110000, 0x00110000 }, + { 0x0000989c, 0x006100a8, 0x006100a8 }, + { 0x0000989c, 0x00423022, 0x00423022 }, + { 0x0000989c, 0x2014008f, 0x2014008f }, + { 0x0000989c, 0x00c40002, 0x00c40002 }, + { 0x0000989c, 0x003000f2, 0x003000f2 }, + { 0x0000989c, 0x00440016, 0x00440016 }, + { 0x0000989c, 0x00410040, 0x00410040 }, + { 0x0000989c, 0x0001805e, 0x0001805e }, + { 0x0000989c, 0x0000c0ab, 0x0000c0ab }, + { 0x0000989c, 0x000000e1, 0x000000e1 }, + { 0x0000989c, 0x00007080, 0x00007080 }, + { 0x0000989c, 0x000000d4, 0x000000d4 }, + { 0x000098d0, 0x0000000f, 0x0010000f }, +}; + +static const u32 ar5416Bank7[][2] = { + { 0x0000989c, 0x00000500 }, + { 0x0000989c, 0x00000800 }, + { 0x000098cc, 0x0000000e }, +}; + +static const u32 ar5416Addac[][2] = { + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000010 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x000000c0 }, + {0x0000989c, 0x00000015 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x000098cc, 0x00000000 }, +}; + + +static const u32 ar5416Modes_9160[][6] = { + { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, + { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, + { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, + { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 }, + { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, + { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf }, + { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, + { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, + { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, + { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, + { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 }, + { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, + { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, + { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, + { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 }, + { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e }, + { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e }, + { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 }, + { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, + { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 }, + { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 }, + { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 }, + { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, + { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d }, + { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 }, + { 0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 }, + { 0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 }, + { 0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 }, + { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 }, + { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00 }, + { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be }, + { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, + { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 }, + { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, + { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, + { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 }, + { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 }, + { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 }, + { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 }, + { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 }, + { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, + { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, + { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa }, + { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 }, + { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 }, + { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 }, + { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b }, + { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b }, + { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a }, + { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf }, + { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f }, + { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f }, + { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f }, + { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, +}; + +static const u32 ar5416Common_9160[][2] = { + { 0x0000000c, 0x00000000 }, + { 0x00000030, 0x00020015 }, + { 0x00000034, 0x00000005 }, + { 0x00000040, 0x00000000 }, + { 0x00000044, 0x00000008 }, + { 0x00000048, 0x00000008 }, + { 0x0000004c, 0x00000010 }, + { 0x00000050, 0x00000000 }, + { 0x00000054, 0x0000001f }, + { 0x00000800, 0x00000000 }, + { 0x00000804, 0x00000000 }, + { 0x00000808, 0x00000000 }, + { 0x0000080c, 0x00000000 }, + { 0x00000810, 0x00000000 }, + { 0x00000814, 0x00000000 }, + { 0x00000818, 0x00000000 }, + { 0x0000081c, 0x00000000 }, + { 0x00000820, 0x00000000 }, + { 0x00000824, 0x00000000 }, + { 0x00001040, 0x002ffc0f }, + { 0x00001044, 0x002ffc0f }, + { 0x00001048, 0x002ffc0f }, + { 0x0000104c, 0x002ffc0f }, + { 0x00001050, 0x002ffc0f }, + { 0x00001054, 0x002ffc0f }, + { 0x00001058, 0x002ffc0f }, + { 0x0000105c, 0x002ffc0f }, + { 0x00001060, 0x002ffc0f }, + { 0x00001064, 0x002ffc0f }, + { 0x00001230, 0x00000000 }, + { 0x00001270, 0x00000000 }, + { 0x00001038, 0x00000000 }, + { 0x00001078, 0x00000000 }, + { 0x000010b8, 0x00000000 }, + { 0x000010f8, 0x00000000 }, + { 0x00001138, 0x00000000 }, + { 0x00001178, 0x00000000 }, + { 0x000011b8, 0x00000000 }, + { 0x000011f8, 0x00000000 }, + { 0x00001238, 0x00000000 }, + { 0x00001278, 0x00000000 }, + { 0x000012b8, 0x00000000 }, + { 0x000012f8, 0x00000000 }, + { 0x00001338, 0x00000000 }, + { 0x00001378, 0x00000000 }, + { 0x000013b8, 0x00000000 }, + { 0x000013f8, 0x00000000 }, + { 0x00001438, 0x00000000 }, + { 0x00001478, 0x00000000 }, + { 0x000014b8, 0x00000000 }, + { 0x000014f8, 0x00000000 }, + { 0x00001538, 0x00000000 }, + { 0x00001578, 0x00000000 }, + { 0x000015b8, 0x00000000 }, + { 0x000015f8, 0x00000000 }, + { 0x00001638, 0x00000000 }, + { 0x00001678, 0x00000000 }, + { 0x000016b8, 0x00000000 }, + { 0x000016f8, 0x00000000 }, + { 0x00001738, 0x00000000 }, + { 0x00001778, 0x00000000 }, + { 0x000017b8, 0x00000000 }, + { 0x000017f8, 0x00000000 }, + { 0x0000103c, 0x00000000 }, + { 0x0000107c, 0x00000000 }, + { 0x000010bc, 0x00000000 }, + { 0x000010fc, 0x00000000 }, + { 0x0000113c, 0x00000000 }, + { 0x0000117c, 0x00000000 }, + { 0x000011bc, 0x00000000 }, + { 0x000011fc, 0x00000000 }, + { 0x0000123c, 0x00000000 }, + { 0x0000127c, 0x00000000 }, + { 0x000012bc, 0x00000000 }, + { 0x000012fc, 0x00000000 }, + { 0x0000133c, 0x00000000 }, + { 0x0000137c, 0x00000000 }, + { 0x000013bc, 0x00000000 }, + { 0x000013fc, 0x00000000 }, + { 0x0000143c, 0x00000000 }, + { 0x0000147c, 0x00000000 }, + { 0x00004030, 0x00000002 }, + { 0x0000403c, 0x00000002 }, + { 0x00007010, 0x00000020 }, + { 0x00007038, 0x000004c2 }, + { 0x00008004, 0x00000000 }, + { 0x00008008, 0x00000000 }, + { 0x0000800c, 0x00000000 }, + { 0x00008018, 0x00000700 }, + { 0x00008020, 0x00000000 }, + { 0x00008038, 0x00000000 }, + { 0x0000803c, 0x00000000 }, + { 0x00008048, 0x40000000 }, + { 0x00008054, 0x00000000 }, + { 0x00008058, 0x00000000 }, + { 0x0000805c, 0x000fc78f }, + { 0x00008060, 0x0000000f }, + { 0x00008064, 0x00000000 }, + { 0x000080c0, 0x2a82301a }, + { 0x000080c4, 0x05dc01e0 }, + { 0x000080c8, 0x1f402710 }, + { 0x000080cc, 0x01f40000 }, + { 0x000080d0, 0x00001e00 }, + { 0x000080d4, 0x00000000 }, + { 0x000080d8, 0x00400000 }, + { 0x000080e0, 0xffffffff }, + { 0x000080e4, 0x0000ffff }, + { 0x000080e8, 0x003f3f3f }, + { 0x000080ec, 0x00000000 }, + { 0x000080f0, 0x00000000 }, + { 0x000080f4, 0x00000000 }, + { 0x000080f8, 0x00000000 }, + { 0x000080fc, 0x00020000 }, + { 0x00008100, 0x00020000 }, + { 0x00008104, 0x00000001 }, + { 0x00008108, 0x00000052 }, + { 0x0000810c, 0x00000000 }, + { 0x00008110, 0x00000168 }, + { 0x00008118, 0x000100aa }, + { 0x0000811c, 0x00003210 }, + { 0x00008120, 0x08f04800 }, + { 0x00008124, 0x00000000 }, + { 0x00008128, 0x00000000 }, + { 0x0000812c, 0x00000000 }, + { 0x00008130, 0x00000000 }, + { 0x00008134, 0x00000000 }, + { 0x00008138, 0x00000000 }, + { 0x0000813c, 0x00000000 }, + { 0x00008144, 0x00000000 }, + { 0x00008168, 0x00000000 }, + { 0x0000816c, 0x00000000 }, + { 0x00008170, 0x32143320 }, + { 0x00008174, 0xfaa4fa50 }, + { 0x00008178, 0x00000100 }, + { 0x0000817c, 0x00000000 }, + { 0x000081c4, 0x00000000 }, + { 0x000081d0, 0x00003210 }, + { 0x000081ec, 0x00000000 }, + { 0x000081f0, 0x00000000 }, + { 0x000081f4, 0x00000000 }, + { 0x000081f8, 0x00000000 }, + { 0x000081fc, 0x00000000 }, + { 0x00008200, 0x00000000 }, + { 0x00008204, 0x00000000 }, + { 0x00008208, 0x00000000 }, + { 0x0000820c, 0x00000000 }, + { 0x00008210, 0x00000000 }, + { 0x00008214, 0x00000000 }, + { 0x00008218, 0x00000000 }, + { 0x0000821c, 0x00000000 }, + { 0x00008220, 0x00000000 }, + { 0x00008224, 0x00000000 }, + { 0x00008228, 0x00000000 }, + { 0x0000822c, 0x00000000 }, + { 0x00008230, 0x00000000 }, + { 0x00008234, 0x00000000 }, + { 0x00008238, 0x00000000 }, + { 0x0000823c, 0x00000000 }, + { 0x00008240, 0x00100000 }, + { 0x00008244, 0x0010f400 }, + { 0x00008248, 0x00000100 }, + { 0x0000824c, 0x0001e800 }, + { 0x00008250, 0x00000000 }, + { 0x00008254, 0x00000000 }, + { 0x00008258, 0x00000000 }, + { 0x0000825c, 0x400000ff }, + { 0x00008260, 0x00080922 }, + { 0x00008270, 0x00000000 }, + { 0x00008274, 0x40000000 }, + { 0x00008278, 0x003e4180 }, + { 0x0000827c, 0x00000000 }, + { 0x00008284, 0x0000002c }, + { 0x00008288, 0x0000002c }, + { 0x0000828c, 0x00000000 }, + { 0x00008294, 0x00000000 }, + { 0x00008298, 0x00000000 }, + { 0x00008300, 0x00000000 }, + { 0x00008304, 0x00000000 }, + { 0x00008308, 0x00000000 }, + { 0x0000830c, 0x00000000 }, + { 0x00008310, 0x00000000 }, + { 0x00008314, 0x00000000 }, + { 0x00008318, 0x00000000 }, + { 0x00008328, 0x00000000 }, + { 0x0000832c, 0x00000007 }, + { 0x00008330, 0x00000302 }, + { 0x00008334, 0x00000e00 }, + { 0x00008338, 0x00000000 }, + { 0x0000833c, 0x00000000 }, + { 0x00008340, 0x000107ff }, + { 0x00009808, 0x00000000 }, + { 0x0000980c, 0xad848e19 }, + { 0x00009810, 0x7d14e000 }, + { 0x00009814, 0x9c0a9f6b }, + { 0x0000981c, 0x00000000 }, + { 0x0000982c, 0x0000a000 }, + { 0x00009830, 0x00000000 }, + { 0x0000983c, 0x00200400 }, + { 0x00009840, 0x206a01ae }, + { 0x0000984c, 0x1284233c }, + { 0x00009854, 0x00000859 }, + { 0x00009900, 0x00000000 }, + { 0x00009904, 0x00000000 }, + { 0x00009908, 0x00000000 }, + { 0x0000990c, 0x00000000 }, + { 0x0000991c, 0x10000fff }, + { 0x00009920, 0x05100000 }, + { 0x0000a920, 0x05100000 }, + { 0x0000b920, 0x05100000 }, + { 0x00009928, 0x00000001 }, + { 0x0000992c, 0x00000004 }, + { 0x00009934, 0x1e1f2022 }, + { 0x00009938, 0x0a0b0c0d }, + { 0x0000993c, 0x00000000 }, + { 0x00009948, 0x9280b212 }, + { 0x0000994c, 0x00020028 }, + { 0x00009954, 0x5f3ca3de }, + { 0x00009958, 0x2108ecff }, + { 0x00009940, 0x00750604 }, + { 0x0000c95c, 0x004b6a8e }, + { 0x0000c968, 0x000003ce }, + { 0x00009970, 0x190fb515 }, + { 0x00009974, 0x00000000 }, + { 0x00009978, 0x00000001 }, + { 0x0000997c, 0x00000000 }, + { 0x00009980, 0x00000000 }, + { 0x00009984, 0x00000000 }, + { 0x00009988, 0x00000000 }, + { 0x0000998c, 0x00000000 }, + { 0x00009990, 0x00000000 }, + { 0x00009994, 0x00000000 }, + { 0x00009998, 0x00000000 }, + { 0x0000999c, 0x00000000 }, + { 0x000099a0, 0x00000000 }, + { 0x000099a4, 0x00000001 }, + { 0x000099a8, 0x201fff00 }, + { 0x000099ac, 0x006f0000 }, + { 0x000099b0, 0x03051000 }, + { 0x000099dc, 0x00000000 }, + { 0x000099e0, 0x00000200 }, + { 0x000099e4, 0xaaaaaaaa }, + { 0x000099e8, 0x3c466478 }, + { 0x000099ec, 0x0cc80caa }, + { 0x000099fc, 0x00001042 }, + { 0x00009b00, 0x00000000 }, + { 0x00009b04, 0x00000001 }, + { 0x00009b08, 0x00000002 }, + { 0x00009b0c, 0x00000003 }, + { 0x00009b10, 0x00000004 }, + { 0x00009b14, 0x00000005 }, + { 0x00009b18, 0x00000008 }, + { 0x00009b1c, 0x00000009 }, + { 0x00009b20, 0x0000000a }, + { 0x00009b24, 0x0000000b }, + { 0x00009b28, 0x0000000c }, + { 0x00009b2c, 0x0000000d }, + { 0x00009b30, 0x00000010 }, + { 0x00009b34, 0x00000011 }, + { 0x00009b38, 0x00000012 }, + { 0x00009b3c, 0x00000013 }, + { 0x00009b40, 0x00000014 }, + { 0x00009b44, 0x00000015 }, + { 0x00009b48, 0x00000018 }, + { 0x00009b4c, 0x00000019 }, + { 0x00009b50, 0x0000001a }, + { 0x00009b54, 0x0000001b }, + { 0x00009b58, 0x0000001c }, + { 0x00009b5c, 0x0000001d }, + { 0x00009b60, 0x00000020 }, + { 0x00009b64, 0x00000021 }, + { 0x00009b68, 0x00000022 }, + { 0x00009b6c, 0x00000023 }, + { 0x00009b70, 0x00000024 }, + { 0x00009b74, 0x00000025 }, + { 0x00009b78, 0x00000028 }, + { 0x00009b7c, 0x00000029 }, + { 0x00009b80, 0x0000002a }, + { 0x00009b84, 0x0000002b }, + { 0x00009b88, 0x0000002c }, + { 0x00009b8c, 0x0000002d }, + { 0x00009b90, 0x00000030 }, + { 0x00009b94, 0x00000031 }, + { 0x00009b98, 0x00000032 }, + { 0x00009b9c, 0x00000033 }, + { 0x00009ba0, 0x00000034 }, + { 0x00009ba4, 0x00000035 }, + { 0x00009ba8, 0x00000035 }, + { 0x00009bac, 0x00000035 }, + { 0x00009bb0, 0x00000035 }, + { 0x00009bb4, 0x00000035 }, + { 0x00009bb8, 0x00000035 }, + { 0x00009bbc, 0x00000035 }, + { 0x00009bc0, 0x00000035 }, + { 0x00009bc4, 0x00000035 }, + { 0x00009bc8, 0x00000035 }, + { 0x00009bcc, 0x00000035 }, + { 0x00009bd0, 0x00000035 }, + { 0x00009bd4, 0x00000035 }, + { 0x00009bd8, 0x00000035 }, + { 0x00009bdc, 0x00000035 }, + { 0x00009be0, 0x00000035 }, + { 0x00009be4, 0x00000035 }, + { 0x00009be8, 0x00000035 }, + { 0x00009bec, 0x00000035 }, + { 0x00009bf0, 0x00000035 }, + { 0x00009bf4, 0x00000035 }, + { 0x00009bf8, 0x00000010 }, + { 0x00009bfc, 0x0000001a }, + { 0x0000a210, 0x40806333 }, + { 0x0000a214, 0x00106c10 }, + { 0x0000a218, 0x009c4060 }, + { 0x0000a220, 0x018830c6 }, + { 0x0000a224, 0x00000400 }, + { 0x0000a228, 0x001a0bb5 }, + { 0x0000a22c, 0x00000000 }, + { 0x0000a234, 0x20202020 }, + { 0x0000a238, 0x20202020 }, + { 0x0000a23c, 0x13c889af }, + { 0x0000a240, 0x38490a20 }, + { 0x0000a244, 0x00007bb6 }, + { 0x0000a248, 0x0fff3ffc }, + { 0x0000a24c, 0x00000001 }, + { 0x0000a250, 0x0000a000 }, + { 0x0000a254, 0x00000000 }, + { 0x0000a258, 0x0cc75380 }, + { 0x0000a25c, 0x0f0f0f01 }, + { 0x0000a260, 0xdfa91f01 }, + { 0x0000a268, 0x00000001 }, + { 0x0000a26c, 0x0ebae9c6 }, + { 0x0000b26c, 0x0ebae9c6 }, + { 0x0000c26c, 0x0ebae9c6 }, + { 0x0000d270, 0x00820820 }, + { 0x0000a278, 0x1ce739ce }, + { 0x0000a27c, 0x050701ce }, + { 0x0000a338, 0x00000000 }, + { 0x0000a33c, 0x00000000 }, + { 0x0000a340, 0x00000000 }, + { 0x0000a344, 0x00000000 }, + { 0x0000a348, 0x3fffffff }, + { 0x0000a34c, 0x3fffffff }, + { 0x0000a350, 0x3fffffff }, + { 0x0000a354, 0x0003ffff }, + { 0x0000a358, 0x79a8aa33 }, + { 0x0000d35c, 0x07ffffef }, + { 0x0000d360, 0x0fffffe7 }, + { 0x0000d364, 0x17ffffe5 }, + { 0x0000d368, 0x1fffffe4 }, + { 0x0000d36c, 0x37ffffe3 }, + { 0x0000d370, 0x3fffffe3 }, + { 0x0000d374, 0x57ffffe3 }, + { 0x0000d378, 0x5fffffe2 }, + { 0x0000d37c, 0x7fffffe2 }, + { 0x0000d380, 0x7f3c7bba }, + { 0x0000d384, 0xf3307ff0 }, + { 0x0000a388, 0x0c000000 }, + { 0x0000a38c, 0x20202020 }, + { 0x0000a390, 0x20202020 }, + { 0x0000a394, 0x1ce739ce }, + { 0x0000a398, 0x000001ce }, + { 0x0000a39c, 0x00000001 }, + { 0x0000a3a0, 0x00000000 }, + { 0x0000a3a4, 0x00000000 }, + { 0x0000a3a8, 0x00000000 }, + { 0x0000a3ac, 0x00000000 }, + { 0x0000a3b0, 0x00000000 }, + { 0x0000a3b4, 0x00000000 }, + { 0x0000a3b8, 0x00000000 }, + { 0x0000a3bc, 0x00000000 }, + { 0x0000a3c0, 0x00000000 }, + { 0x0000a3c4, 0x00000000 }, + { 0x0000a3c8, 0x00000246 }, + { 0x0000a3cc, 0x20202020 }, + { 0x0000a3d0, 0x20202020 }, + { 0x0000a3d4, 0x20202020 }, + { 0x0000a3dc, 0x1ce739ce }, + { 0x0000a3e0, 0x000001ce }, +}; + +static const u32 ar5416Bank0_9160[][2] = { + { 0x000098b0, 0x1e5795e5 }, + { 0x000098e0, 0x02008020 }, +}; + +static const u32 ar5416BB_RfGain_9160[][3] = { + { 0x00009a00, 0x00000000, 0x00000000 }, + { 0x00009a04, 0x00000040, 0x00000040 }, + { 0x00009a08, 0x00000080, 0x00000080 }, + { 0x00009a0c, 0x000001a1, 0x00000141 }, + { 0x00009a10, 0x000001e1, 0x00000181 }, + { 0x00009a14, 0x00000021, 0x000001c1 }, + { 0x00009a18, 0x00000061, 0x00000001 }, + { 0x00009a1c, 0x00000168, 0x00000041 }, + { 0x00009a20, 0x000001a8, 0x000001a8 }, + { 0x00009a24, 0x000001e8, 0x000001e8 }, + { 0x00009a28, 0x00000028, 0x00000028 }, + { 0x00009a2c, 0x00000068, 0x00000068 }, + { 0x00009a30, 0x00000189, 0x000000a8 }, + { 0x00009a34, 0x000001c9, 0x00000169 }, + { 0x00009a38, 0x00000009, 0x000001a9 }, + { 0x00009a3c, 0x00000049, 0x000001e9 }, + { 0x00009a40, 0x00000089, 0x00000029 }, + { 0x00009a44, 0x00000170, 0x00000069 }, + { 0x00009a48, 0x000001b0, 0x00000190 }, + { 0x00009a4c, 0x000001f0, 0x000001d0 }, + { 0x00009a50, 0x00000030, 0x00000010 }, + { 0x00009a54, 0x00000070, 0x00000050 }, + { 0x00009a58, 0x00000191, 0x00000090 }, + { 0x00009a5c, 0x000001d1, 0x00000151 }, + { 0x00009a60, 0x00000011, 0x00000191 }, + { 0x00009a64, 0x00000051, 0x000001d1 }, + { 0x00009a68, 0x00000091, 0x00000011 }, + { 0x00009a6c, 0x000001b8, 0x00000051 }, + { 0x00009a70, 0x000001f8, 0x00000198 }, + { 0x00009a74, 0x00000038, 0x000001d8 }, + { 0x00009a78, 0x00000078, 0x00000018 }, + { 0x00009a7c, 0x00000199, 0x00000058 }, + { 0x00009a80, 0x000001d9, 0x00000098 }, + { 0x00009a84, 0x00000019, 0x00000159 }, + { 0x00009a88, 0x00000059, 0x00000199 }, + { 0x00009a8c, 0x00000099, 0x000001d9 }, + { 0x00009a90, 0x000000d9, 0x00000019 }, + { 0x00009a94, 0x000000f9, 0x00000059 }, + { 0x00009a98, 0x000000f9, 0x00000099 }, + { 0x00009a9c, 0x000000f9, 0x000000d9 }, + { 0x00009aa0, 0x000000f9, 0x000000f9 }, + { 0x00009aa4, 0x000000f9, 0x000000f9 }, + { 0x00009aa8, 0x000000f9, 0x000000f9 }, + { 0x00009aac, 0x000000f9, 0x000000f9 }, + { 0x00009ab0, 0x000000f9, 0x000000f9 }, + { 0x00009ab4, 0x000000f9, 0x000000f9 }, + { 0x00009ab8, 0x000000f9, 0x000000f9 }, + { 0x00009abc, 0x000000f9, 0x000000f9 }, + { 0x00009ac0, 0x000000f9, 0x000000f9 }, + { 0x00009ac4, 0x000000f9, 0x000000f9 }, + { 0x00009ac8, 0x000000f9, 0x000000f9 }, + { 0x00009acc, 0x000000f9, 0x000000f9 }, + { 0x00009ad0, 0x000000f9, 0x000000f9 }, + { 0x00009ad4, 0x000000f9, 0x000000f9 }, + { 0x00009ad8, 0x000000f9, 0x000000f9 }, + { 0x00009adc, 0x000000f9, 0x000000f9 }, + { 0x00009ae0, 0x000000f9, 0x000000f9 }, + { 0x00009ae4, 0x000000f9, 0x000000f9 }, + { 0x00009ae8, 0x000000f9, 0x000000f9 }, + { 0x00009aec, 0x000000f9, 0x000000f9 }, + { 0x00009af0, 0x000000f9, 0x000000f9 }, + { 0x00009af4, 0x000000f9, 0x000000f9 }, + { 0x00009af8, 0x000000f9, 0x000000f9 }, + { 0x00009afc, 0x000000f9, 0x000000f9 }, +}; + +static const u32 ar5416Bank1_9160[][2] = { + { 0x000098b0, 0x02108421 }, + { 0x000098ec, 0x00000008 }, +}; + +static const u32 ar5416Bank2_9160[][2] = { + { 0x000098b0, 0x0e73ff17 }, + { 0x000098e0, 0x00000420 }, +}; + +static const u32 ar5416Bank3_9160[][3] = { + { 0x000098f0, 0x01400018, 0x01c00018 }, +}; + +static const u32 ar5416Bank6_9160[][3] = { + + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00e00000, 0x00e00000 }, + { 0x0000989c, 0x005e0000, 0x005e0000 }, + { 0x0000989c, 0x00120000, 0x00120000 }, + { 0x0000989c, 0x00620000, 0x00620000 }, + { 0x0000989c, 0x00020000, 0x00020000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x40ff0000, 0x40ff0000 }, + { 0x0000989c, 0x005f0000, 0x005f0000 }, + { 0x0000989c, 0x00870000, 0x00870000 }, + { 0x0000989c, 0x00f90000, 0x00f90000 }, + { 0x0000989c, 0x007b0000, 0x007b0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00f50000, 0x00f50000 }, + { 0x0000989c, 0x00dc0000, 0x00dc0000 }, + { 0x0000989c, 0x00110000, 0x00110000 }, + { 0x0000989c, 0x006100a8, 0x006100a8 }, + { 0x0000989c, 0x004210a2, 0x004210a2 }, + { 0x0000989c, 0x0014008f, 0x0014008f }, + { 0x0000989c, 0x00c40003, 0x00c40003 }, + { 0x0000989c, 0x003000f2, 0x003000f2 }, + { 0x0000989c, 0x00440016, 0x00440016 }, + { 0x0000989c, 0x00410040, 0x00410040 }, + { 0x0000989c, 0x0001805e, 0x0001805e }, + { 0x0000989c, 0x0000c0ab, 0x0000c0ab }, + { 0x0000989c, 0x000000f1, 0x000000f1 }, + { 0x0000989c, 0x00002081, 0x00002081 }, + { 0x0000989c, 0x000000d4, 0x000000d4 }, + { 0x000098d0, 0x0000000f, 0x0010000f }, +}; + +static const u32 ar5416Bank6TPC_9160[][3] = { + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00000000, 0x00000000 }, + { 0x0000989c, 0x00e00000, 0x00e00000 }, + { 0x0000989c, 0x005e0000, 0x005e0000 }, + { 0x0000989c, 0x00120000, 0x00120000 }, + { 0x0000989c, 0x00620000, 0x00620000 }, + { 0x0000989c, 0x00020000, 0x00020000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x40ff0000, 0x40ff0000 }, + { 0x0000989c, 0x005f0000, 0x005f0000 }, + { 0x0000989c, 0x00870000, 0x00870000 }, + { 0x0000989c, 0x00f90000, 0x00f90000 }, + { 0x0000989c, 0x007b0000, 0x007b0000 }, + { 0x0000989c, 0x00ff0000, 0x00ff0000 }, + { 0x0000989c, 0x00f50000, 0x00f50000 }, + { 0x0000989c, 0x00dc0000, 0x00dc0000 }, + { 0x0000989c, 0x00110000, 0x00110000 }, + { 0x0000989c, 0x006100a8, 0x006100a8 }, + { 0x0000989c, 0x00423022, 0x00423022 }, + { 0x0000989c, 0x2014008f, 0x2014008f }, + { 0x0000989c, 0x00c40002, 0x00c40002 }, + { 0x0000989c, 0x003000f2, 0x003000f2 }, + { 0x0000989c, 0x00440016, 0x00440016 }, + { 0x0000989c, 0x00410040, 0x00410040 }, + { 0x0000989c, 0x0001805e, 0x0001805e }, + { 0x0000989c, 0x0000c0ab, 0x0000c0ab }, + { 0x0000989c, 0x000000e1, 0x000000e1 }, + { 0x0000989c, 0x00007080, 0x00007080 }, + { 0x0000989c, 0x000000d4, 0x000000d4 }, + { 0x000098d0, 0x0000000f, 0x0010000f }, +}; + +static const u32 ar5416Bank7_9160[][2] = { + { 0x0000989c, 0x00000500 }, + { 0x0000989c, 0x00000800 }, + { 0x000098cc, 0x0000000e }, +}; + + +static u32 ar5416Addac_9160[][2] = { + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x000000c0 }, + {0x0000989c, 0x00000018 }, + {0x0000989c, 0x00000004 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x000000c0 }, + {0x0000989c, 0x00000019 }, + {0x0000989c, 0x00000004 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000004 }, + {0x0000989c, 0x00000003 }, + {0x0000989c, 0x00000008 }, + {0x0000989c, 0x00000000 }, + {0x000098cc, 0x00000000 }, +}; + + +static u32 ar5416Addac_91601_1[][2] = { + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x000000c0 }, + {0x0000989c, 0x00000018 }, + {0x0000989c, 0x00000004 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x000000c0 }, + {0x0000989c, 0x00000019 }, + {0x0000989c, 0x00000004 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x0000989c, 0x00000000 }, + {0x000098cc, 0x00000000 }, +}; + + + +static const u32 ar9280Modes_9280[][6] = { + { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, + { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, + { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, + { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 }, + { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801080, 0x08400840, 0x06e006e0 }, + { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f }, + { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, + { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, + { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, + { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, + { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 }, + { 0x00009848, 0x00028566, 0x00028566, 0x00028563, 0x00028563, 0x00028563 }, + { 0x0000a848, 0x00028566, 0x00028566, 0x00028563, 0x00028563, 0x00028563 }, + { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 }, + { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e }, + { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e }, + { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d20, 0x00049d20, 0x00049d18 }, + { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, + { 0x00009868, 0x5ac64190, 0x5ac64190, 0x5ac64190, 0x5ac64190, 0x5ac64190 }, + { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 }, + { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 }, + { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, + { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d }, + { 0x00009944, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010 }, + { 0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 }, + { 0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 }, + { 0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210 }, + { 0x0000c9b8, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a }, + { 0x0000c9bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 }, + { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, + { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, + { 0x000099c8, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c }, + { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, + { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, + { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x00009a00, 0x00008184, 0x00008184, 0x00000214, 0x00000214, 0x00000214 }, + { 0x00009a04, 0x00008188, 0x00008188, 0x00000218, 0x00000218, 0x00000218 }, + { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000224, 0x00000224, 0x00000224 }, + { 0x00009a0c, 0x00008190, 0x00008190, 0x00000228, 0x00000228, 0x00000228 }, + { 0x00009a10, 0x00008194, 0x00008194, 0x0000022c, 0x0000022c, 0x0000022c }, + { 0x00009a14, 0x00008200, 0x00008200, 0x00000230, 0x00000230, 0x00000230 }, + { 0x00009a18, 0x00008204, 0x00008204, 0x000002a4, 0x000002a4, 0x000002a4 }, + { 0x00009a1c, 0x00008208, 0x00008208, 0x000002a8, 0x000002a8, 0x000002a8 }, + { 0x00009a20, 0x0000820c, 0x0000820c, 0x000002ac, 0x000002ac, 0x000002ac }, + { 0x00009a24, 0x00008210, 0x00008210, 0x000002b0, 0x000002b0, 0x000002b0 }, + { 0x00009a28, 0x00008214, 0x00008214, 0x000002b4, 0x000002b4, 0x000002b4 }, + { 0x00009a2c, 0x00008280, 0x00008280, 0x000002b8, 0x000002b8, 0x000002b8 }, + { 0x00009a30, 0x00008284, 0x00008284, 0x00000390, 0x00000390, 0x00000390 }, + { 0x00009a34, 0x00008288, 0x00008288, 0x00000394, 0x00000394, 0x00000394 }, + { 0x00009a38, 0x0000828c, 0x0000828c, 0x00000398, 0x00000398, 0x00000398 }, + { 0x00009a3c, 0x00008290, 0x00008290, 0x00000334, 0x00000334, 0x00000334 }, + { 0x00009a40, 0x00008300, 0x00008300, 0x00000338, 0x00000338, 0x00000338 }, + { 0x00009a44, 0x00008304, 0x00008304, 0x000003ac, 0x000003ac, 0x000003ac }, + { 0x00009a48, 0x00008308, 0x00008308, 0x000003b0, 0x000003b0, 0x000003b0 }, + { 0x00009a4c, 0x0000830c, 0x0000830c, 0x000003b4, 0x000003b4, 0x000003b4 }, + { 0x00009a50, 0x00008310, 0x00008310, 0x000003b8, 0x000003b8, 0x000003b8 }, + { 0x00009a54, 0x00008314, 0x00008314, 0x000003a5, 0x000003a5, 0x000003a5 }, + { 0x00009a58, 0x00008380, 0x00008380, 0x000003a9, 0x000003a9, 0x000003a9 }, + { 0x00009a5c, 0x00008384, 0x00008384, 0x000003ad, 0x000003ad, 0x000003ad }, + { 0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194 }, + { 0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0 }, + { 0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c }, + { 0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8 }, + { 0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284 }, + { 0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288 }, + { 0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224 }, + { 0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290 }, + { 0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300 }, + { 0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304 }, + { 0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308 }, + { 0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c }, + { 0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380 }, + { 0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384 }, + { 0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700 }, + { 0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704 }, + { 0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708 }, + { 0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c }, + { 0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780 }, + { 0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784 }, + { 0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00 }, + { 0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04 }, + { 0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08 }, + { 0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c }, + { 0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80 }, + { 0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84 }, + { 0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88 }, + { 0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c }, + { 0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90 }, + { 0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80 }, + { 0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84 }, + { 0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88 }, + { 0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c }, + { 0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90 }, + { 0x00009ae8, 0x0000b780, 0x0000b780, 0x0000930c, 0x0000930c, 0x0000930c }, + { 0x00009aec, 0x0000b784, 0x0000b784, 0x00009310, 0x00009310, 0x00009310 }, + { 0x00009af0, 0x0000b788, 0x0000b788, 0x00009384, 0x00009384, 0x00009384 }, + { 0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009388, 0x00009388, 0x00009388 }, + { 0x00009af8, 0x0000b790, 0x0000b790, 0x00009324, 0x00009324, 0x00009324 }, + { 0x00009afc, 0x0000b794, 0x0000b794, 0x00009704, 0x00009704, 0x00009704 }, + { 0x00009b00, 0x0000b798, 0x0000b798, 0x000096a4, 0x000096a4, 0x000096a4 }, + { 0x00009b04, 0x0000d784, 0x0000d784, 0x000096a8, 0x000096a8, 0x000096a8 }, + { 0x00009b08, 0x0000d788, 0x0000d788, 0x00009710, 0x00009710, 0x00009710 }, + { 0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009714, 0x00009714, 0x00009714 }, + { 0x00009b10, 0x0000d790, 0x0000d790, 0x00009720, 0x00009720, 0x00009720 }, + { 0x00009b14, 0x0000f780, 0x0000f780, 0x00009724, 0x00009724, 0x00009724 }, + { 0x00009b18, 0x0000f784, 0x0000f784, 0x00009728, 0x00009728, 0x00009728 }, + { 0x00009b1c, 0x0000f788, 0x0000f788, 0x0000972c, 0x0000972c, 0x0000972c }, + { 0x00009b20, 0x0000f78c, 0x0000f78c, 0x000097a0, 0x000097a0, 0x000097a0 }, + { 0x00009b24, 0x0000f790, 0x0000f790, 0x000097a4, 0x000097a4, 0x000097a4 }, + { 0x00009b28, 0x0000f794, 0x0000f794, 0x000097a8, 0x000097a8, 0x000097a8 }, + { 0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x000097b0, 0x000097b0, 0x000097b0 }, + { 0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x000097b4, 0x000097b4, 0x000097b4 }, + { 0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x000097b8, 0x000097b8, 0x000097b8 }, + { 0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x000097a5, 0x000097a5, 0x000097a5 }, + { 0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x000097a9, 0x000097a9, 0x000097a9 }, + { 0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x000097ad, 0x000097ad, 0x000097ad }, + { 0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x000097b1, 0x000097b1, 0x000097b1 }, + { 0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x000097b5, 0x000097b5, 0x000097b5 }, + { 0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x000097b9, 0x000097b9, 0x000097b9 }, + { 0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x000097c5, 0x000097c5, 0x000097c5 }, + { 0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x000097c9, 0x000097c9, 0x000097c9 }, + { 0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x000097d1, 0x000097d1, 0x000097d1 }, + { 0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x000097d5, 0x000097d5, 0x000097d5 }, + { 0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x000097d9, 0x000097d9, 0x000097d9 }, + { 0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x000097c6, 0x000097c6, 0x000097c6 }, + { 0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x000097ca, 0x000097ca, 0x000097ca }, + { 0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x000097ce, 0x000097ce, 0x000097ce }, + { 0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x000097d2, 0x000097d2, 0x000097d2 }, + { 0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x000097d6, 0x000097d6, 0x000097d6 }, + { 0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x000097c3, 0x000097c3, 0x000097c3 }, + { 0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x000097c7, 0x000097c7, 0x000097c7 }, + { 0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x000097cb, 0x000097cb, 0x000097cb }, + { 0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x000097cf, 0x000097cf, 0x000097cf }, + { 0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x000097d7, 0x000097d7, 0x000097d7 }, + { 0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009b98, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009b9c, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009ba0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009ba4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009ba8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bac, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bb0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bb4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bb8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bbc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bc0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bc4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bc8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bcc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bd0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bd4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bd8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bdc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009be0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009be4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009be8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bec, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bf0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x0000a204, 0x00000444, 0x00000444, 0x00000444, 0x00000444, 0x00000444 }, + { 0x0000a208, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788 }, + { 0x0000a20c, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019 }, + { 0x0000b20c, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019, 0x000c6019 }, + { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, + { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, + { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 }, + { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002 }, + { 0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009 }, + { 0x0000a30c, 0x0000a006, 0x0000a006, 0x0000b00b, 0x0000b00b, 0x0000b00b }, + { 0x0000a310, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012 }, + { 0x0000a314, 0x00011014, 0x00011014, 0x00012048, 0x00012048, 0x00012048 }, + { 0x0000a318, 0x0001504a, 0x0001504a, 0x0001604a, 0x0001604a, 0x0001604a }, + { 0x0000a31c, 0x0001904c, 0x0001904c, 0x0001a211, 0x0001a211, 0x0001a211 }, + { 0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213 }, + { 0x0000a324, 0x00020092, 0x00020092, 0x0002121b, 0x0002121b, 0x0002121b }, + { 0x0000a328, 0x0002410a, 0x0002410a, 0x00024412, 0x00024412, 0x00024412 }, + { 0x0000a32c, 0x0002710c, 0x0002710c, 0x00028414, 0x00028414, 0x00028414 }, + { 0x0000a330, 0x0002b18b, 0x0002b18b, 0x0002b44a, 0x0002b44a, 0x0002b44a }, + { 0x0000a334, 0x0002e1cc, 0x0002e1cc, 0x00030649, 0x00030649, 0x00030649 }, + { 0x0000a338, 0x000321ec, 0x000321ec, 0x0003364b, 0x0003364b, 0x0003364b }, + { 0x0000a33c, 0x000321ec, 0x000321ec, 0x00038a49, 0x00038a49, 0x00038a49 }, + { 0x0000a340, 0x000321ec, 0x000321ec, 0x0003be48, 0x0003be48, 0x0003be48 }, + { 0x0000a344, 0x000321ec, 0x000321ec, 0x0003ee4a, 0x0003ee4a, 0x0003ee4a }, + { 0x0000a348, 0x000321ec, 0x000321ec, 0x00042e88, 0x00042e88, 0x00042e88 }, + { 0x0000a34c, 0x000321ec, 0x000321ec, 0x00046e8a, 0x00046e8a, 0x00046e8a }, + { 0x0000a350, 0x000321ec, 0x000321ec, 0x00049ec9, 0x00049ec9, 0x00049ec9 }, + { 0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42, 0x0004bf42 }, + { 0x0000784c, 0x0e4f048c, 0x0e4f048c, 0x0e4d048c, 0x0e4d048c, 0x0e4d048c }, + { 0x00007854, 0x12031828, 0x12031828, 0x12035828, 0x12035828, 0x12035828 }, + { 0x00007870, 0x807ec400, 0x807ec400, 0x807ec000, 0x807ec000, 0x807ec000 }, + { 0x0000788c, 0x00010000, 0x00010000, 0x00110000, 0x00110000, 0x00110000 }, +}; + +static const u32 ar9280Common_9280[][2] = { + { 0x0000000c, 0x00000000 }, + { 0x00000030, 0x00020015 }, + { 0x00000034, 0x00000005 }, + { 0x00000040, 0x00000000 }, + { 0x00000044, 0x00000008 }, + { 0x00000048, 0x00000008 }, + { 0x0000004c, 0x00000010 }, + { 0x00000050, 0x00000000 }, + { 0x00000054, 0x0000001f }, + { 0x00000800, 0x00000000 }, + { 0x00000804, 0x00000000 }, + { 0x00000808, 0x00000000 }, + { 0x0000080c, 0x00000000 }, + { 0x00000810, 0x00000000 }, + { 0x00000814, 0x00000000 }, + { 0x00000818, 0x00000000 }, + { 0x0000081c, 0x00000000 }, + { 0x00000820, 0x00000000 }, + { 0x00000824, 0x00000000 }, + { 0x00001040, 0x002ffc0f }, + { 0x00001044, 0x002ffc0f }, + { 0x00001048, 0x002ffc0f }, + { 0x0000104c, 0x002ffc0f }, + { 0x00001050, 0x002ffc0f }, + { 0x00001054, 0x002ffc0f }, + { 0x00001058, 0x002ffc0f }, + { 0x0000105c, 0x002ffc0f }, + { 0x00001060, 0x002ffc0f }, + { 0x00001064, 0x002ffc0f }, + { 0x00001230, 0x00000000 }, + { 0x00001270, 0x00000000 }, + { 0x00001038, 0x00000000 }, + { 0x00001078, 0x00000000 }, + { 0x000010b8, 0x00000000 }, + { 0x000010f8, 0x00000000 }, + { 0x00001138, 0x00000000 }, + { 0x00001178, 0x00000000 }, + { 0x000011b8, 0x00000000 }, + { 0x000011f8, 0x00000000 }, + { 0x00001238, 0x00000000 }, + { 0x00001278, 0x00000000 }, + { 0x000012b8, 0x00000000 }, + { 0x000012f8, 0x00000000 }, + { 0x00001338, 0x00000000 }, + { 0x00001378, 0x00000000 }, + { 0x000013b8, 0x00000000 }, + { 0x000013f8, 0x00000000 }, + { 0x00001438, 0x00000000 }, + { 0x00001478, 0x00000000 }, + { 0x000014b8, 0x00000000 }, + { 0x000014f8, 0x00000000 }, + { 0x00001538, 0x00000000 }, + { 0x00001578, 0x00000000 }, + { 0x000015b8, 0x00000000 }, + { 0x000015f8, 0x00000000 }, + { 0x00001638, 0x00000000 }, + { 0x00001678, 0x00000000 }, + { 0x000016b8, 0x00000000 }, + { 0x000016f8, 0x00000000 }, + { 0x00001738, 0x00000000 }, + { 0x00001778, 0x00000000 }, + { 0x000017b8, 0x00000000 }, + { 0x000017f8, 0x00000000 }, + { 0x0000103c, 0x00000000 }, + { 0x0000107c, 0x00000000 }, + { 0x000010bc, 0x00000000 }, + { 0x000010fc, 0x00000000 }, + { 0x0000113c, 0x00000000 }, + { 0x0000117c, 0x00000000 }, + { 0x000011bc, 0x00000000 }, + { 0x000011fc, 0x00000000 }, + { 0x0000123c, 0x00000000 }, + { 0x0000127c, 0x00000000 }, + { 0x000012bc, 0x00000000 }, + { 0x000012fc, 0x00000000 }, + { 0x0000133c, 0x00000000 }, + { 0x0000137c, 0x00000000 }, + { 0x000013bc, 0x00000000 }, + { 0x000013fc, 0x00000000 }, + { 0x0000143c, 0x00000000 }, + { 0x0000147c, 0x00000000 }, + { 0x00004030, 0x00000002 }, + { 0x0000403c, 0x00000002 }, + { 0x00004024, 0x0000001f }, + { 0x00007010, 0x00000033 }, + { 0x00007038, 0x000004c2 }, + { 0x00008004, 0x00000000 }, + { 0x00008008, 0x00000000 }, + { 0x0000800c, 0x00000000 }, + { 0x00008018, 0x00000700 }, + { 0x00008020, 0x00000000 }, + { 0x00008038, 0x00000000 }, + { 0x0000803c, 0x00000000 }, + { 0x00008048, 0x40000000 }, + { 0x00008054, 0x00000000 }, + { 0x00008058, 0x00000000 }, + { 0x0000805c, 0x000fc78f }, + { 0x00008060, 0x0000000f }, + { 0x00008064, 0x00000000 }, + { 0x00008070, 0x00000000 }, + { 0x000080c0, 0x2a82301a }, + { 0x000080c4, 0x05dc01e0 }, + { 0x000080c8, 0x1f402710 }, + { 0x000080cc, 0x01f40000 }, + { 0x000080d0, 0x00001e00 }, + { 0x000080d4, 0x00000000 }, + { 0x000080d8, 0x00400000 }, + { 0x000080e0, 0xffffffff }, + { 0x000080e4, 0x0000ffff }, + { 0x000080e8, 0x003f3f3f }, + { 0x000080ec, 0x00000000 }, + { 0x000080f0, 0x00000000 }, + { 0x000080f4, 0x00000000 }, + { 0x000080f8, 0x00000000 }, + { 0x000080fc, 0x00020000 }, + { 0x00008100, 0x00020000 }, + { 0x00008104, 0x00000001 }, + { 0x00008108, 0x00000052 }, + { 0x0000810c, 0x00000000 }, + { 0x00008110, 0x00000168 }, + { 0x00008118, 0x000100aa }, + { 0x0000811c, 0x00003210 }, + { 0x00008120, 0x08f04800 }, + { 0x00008124, 0x00000000 }, + { 0x00008128, 0x00000000 }, + { 0x0000812c, 0x00000000 }, + { 0x00008130, 0x00000000 }, + { 0x00008134, 0x00000000 }, + { 0x00008138, 0x00000000 }, + { 0x0000813c, 0x00000000 }, + { 0x00008144, 0x00000000 }, + { 0x00008168, 0x00000000 }, + { 0x0000816c, 0x00000000 }, + { 0x00008170, 0x32143320 }, + { 0x00008174, 0xfaa4fa50 }, + { 0x00008178, 0x00000100 }, + { 0x0000817c, 0x00000000 }, + { 0x000081c4, 0x00000000 }, + { 0x000081d0, 0x00003210 }, + { 0x000081ec, 0x00000000 }, + { 0x000081f0, 0x00000000 }, + { 0x000081f4, 0x00000000 }, + { 0x000081f8, 0x00000000 }, + { 0x000081fc, 0x00000000 }, + { 0x00008200, 0x00000000 }, + { 0x00008204, 0x00000000 }, + { 0x00008208, 0x00000000 }, + { 0x0000820c, 0x00000000 }, + { 0x00008210, 0x00000000 }, + { 0x00008214, 0x00000000 }, + { 0x00008218, 0x00000000 }, + { 0x0000821c, 0x00000000 }, + { 0x00008220, 0x00000000 }, + { 0x00008224, 0x00000000 }, + { 0x00008228, 0x00000000 }, + { 0x0000822c, 0x00000000 }, + { 0x00008230, 0x00000000 }, + { 0x00008234, 0x00000000 }, + { 0x00008238, 0x00000000 }, + { 0x0000823c, 0x00000000 }, + { 0x00008240, 0x00100000 }, + { 0x00008244, 0x0010f400 }, + { 0x00008248, 0x00000100 }, + { 0x0000824c, 0x0001e800 }, + { 0x00008250, 0x00000000 }, + { 0x00008254, 0x00000000 }, + { 0x00008258, 0x00000000 }, + { 0x0000825c, 0x400000ff }, + { 0x00008260, 0x00080922 }, + { 0x00008270, 0x00000000 }, + { 0x00008274, 0x40000000 }, + { 0x00008278, 0x003e4180 }, + { 0x0000827c, 0x00000000 }, + { 0x00008284, 0x0000002c }, + { 0x00008288, 0x0000002c }, + { 0x0000828c, 0x00000000 }, + { 0x00008294, 0x00000000 }, + { 0x00008298, 0x00000000 }, + { 0x00008300, 0x00000000 }, + { 0x00008304, 0x00000000 }, + { 0x00008308, 0x00000000 }, + { 0x0000830c, 0x00000000 }, + { 0x00008310, 0x00000000 }, + { 0x00008314, 0x00000000 }, + { 0x00008318, 0x00000000 }, + { 0x00008328, 0x00000000 }, + { 0x0000832c, 0x00000007 }, + { 0x00008330, 0x00000302 }, + { 0x00008334, 0x00000e00 }, + { 0x00008338, 0x00000000 }, + { 0x0000833c, 0x00000000 }, + { 0x00008340, 0x000107ff }, + { 0x00008344, 0x00000000 }, + { 0x00009808, 0x00000000 }, + { 0x0000980c, 0xaf268e30 }, + { 0x00009810, 0xfd14e000 }, + { 0x00009814, 0x9c0a9f6b }, + { 0x0000981c, 0x00000000 }, + { 0x0000982c, 0x0000a000 }, + { 0x00009830, 0x00000000 }, + { 0x0000983c, 0x00200400 }, + { 0x00009840, 0x206a01ae }, + { 0x0000984c, 0x0040233c }, + { 0x0000a84c, 0x0040233c }, + { 0x00009854, 0x00000044 }, + { 0x00009900, 0x00000000 }, + { 0x00009904, 0x00000000 }, + { 0x00009908, 0x00000000 }, + { 0x0000990c, 0x00000000 }, + { 0x0000991c, 0x10000fff }, + { 0x00009920, 0x04900000 }, + { 0x0000a920, 0x04900000 }, + { 0x00009928, 0x00000001 }, + { 0x0000992c, 0x00000004 }, + { 0x00009934, 0x1e1f2022 }, + { 0x00009938, 0x0a0b0c0d }, + { 0x0000993c, 0x00000000 }, + { 0x00009948, 0x9280c00a }, + { 0x0000994c, 0x00020028 }, + { 0x00009954, 0xe250a51e }, + { 0x00009958, 0x3388ffff }, + { 0x00009940, 0x00781204 }, + { 0x0000c95c, 0x004b6a8e }, + { 0x0000c968, 0x000003ce }, + { 0x00009970, 0x190fb514 }, + { 0x00009974, 0x00000000 }, + { 0x00009978, 0x00000001 }, + { 0x0000997c, 0x00000000 }, + { 0x00009980, 0x00000000 }, + { 0x00009984, 0x00000000 }, + { 0x00009988, 0x00000000 }, + { 0x0000998c, 0x00000000 }, + { 0x00009990, 0x00000000 }, + { 0x00009994, 0x00000000 }, + { 0x00009998, 0x00000000 }, + { 0x0000999c, 0x00000000 }, + { 0x000099a0, 0x00000000 }, + { 0x000099a4, 0x00000001 }, + { 0x000099a8, 0x201fff00 }, + { 0x000099ac, 0x006f00c4 }, + { 0x000099b0, 0x03051000 }, + { 0x000099b4, 0x00000820 }, + { 0x000099dc, 0x00000000 }, + { 0x000099e0, 0x00000000 }, + { 0x000099e4, 0xaaaaaaaa }, + { 0x000099e8, 0x3c466478 }, + { 0x000099ec, 0x0cc80caa }, + { 0x000099fc, 0x00001042 }, + { 0x0000a210, 0x4080a333 }, + { 0x0000a214, 0x40206c10 }, + { 0x0000a218, 0x009c4060 }, + { 0x0000a220, 0x01834061 }, + { 0x0000a224, 0x00000400 }, + { 0x0000a228, 0x000003b5 }, + { 0x0000a22c, 0x23277200 }, + { 0x0000a234, 0x20202020 }, + { 0x0000a238, 0x20202020 }, + { 0x0000a23c, 0x13c889af }, + { 0x0000a240, 0x38490a20 }, + { 0x0000a244, 0x00007bb6 }, + { 0x0000a248, 0x0fff3ffc }, + { 0x0000a24c, 0x00000001 }, + { 0x0000a250, 0x001da000 }, + { 0x0000a254, 0x00000000 }, + { 0x0000a258, 0x0cdbd380 }, + { 0x0000a25c, 0x0f0f0f01 }, + { 0x0000a260, 0xdfa91f01 }, + { 0x0000a268, 0x00000000 }, + { 0x0000a26c, 0x0ebae9c6 }, + { 0x0000b26c, 0x0ebae9c6 }, + { 0x0000d270, 0x00820820 }, + { 0x0000a278, 0x1ce739ce }, + { 0x0000a27c, 0x050701ce }, + { 0x0000a358, 0x7999aa0f }, + { 0x0000d35c, 0x07ffffef }, + { 0x0000d360, 0x0fffffe7 }, + { 0x0000d364, 0x17ffffe5 }, + { 0x0000d368, 0x1fffffe4 }, + { 0x0000d36c, 0x37ffffe3 }, + { 0x0000d370, 0x3fffffe3 }, + { 0x0000d374, 0x57ffffe3 }, + { 0x0000d378, 0x5fffffe2 }, + { 0x0000d37c, 0x7fffffe2 }, + { 0x0000d380, 0x7f3c7bba }, + { 0x0000d384, 0xf3307ff0 }, + { 0x0000a388, 0x0c000000 }, + { 0x0000a38c, 0x20202020 }, + { 0x0000a390, 0x20202020 }, + { 0x0000a394, 0x1ce739ce }, + { 0x0000a398, 0x000001ce }, + { 0x0000a39c, 0x00000001 }, + { 0x0000a3a0, 0x00000000 }, + { 0x0000a3a4, 0x00000000 }, + { 0x0000a3a8, 0x00000000 }, + { 0x0000a3ac, 0x00000000 }, + { 0x0000a3b0, 0x00000000 }, + { 0x0000a3b4, 0x00000000 }, + { 0x0000a3b8, 0x00000000 }, + { 0x0000a3bc, 0x00000000 }, + { 0x0000a3c0, 0x00000000 }, + { 0x0000a3c4, 0x00000000 }, + { 0x0000a3c8, 0x00000246 }, + { 0x0000a3cc, 0x20202020 }, + { 0x0000a3d0, 0x20202020 }, + { 0x0000a3d4, 0x20202020 }, + { 0x0000a3dc, 0x1ce739ce }, + { 0x0000a3e0, 0x000001ce }, + { 0x0000a3e4, 0x00000000 }, + { 0x0000a3e8, 0x18c43433 }, + { 0x0000a3ec, 0x00f38081 }, + { 0x00007800, 0x00040000 }, + { 0x00007804, 0xdb005012 }, + { 0x00007808, 0x04924914 }, + { 0x0000780c, 0x21084210 }, + { 0x00007810, 0x6d801300 }, + { 0x00007814, 0x0019beff }, + { 0x00007818, 0x07e40000 }, + { 0x0000781c, 0x00492000 }, + { 0x00007820, 0x92492480 }, + { 0x00007824, 0x00040000 }, + { 0x00007828, 0xdb005012 }, + { 0x0000782c, 0x04924914 }, + { 0x00007830, 0x21084210 }, + { 0x00007834, 0x6d801300 }, + { 0x00007838, 0x0019beff }, + { 0x0000783c, 0x07e40000 }, + { 0x00007840, 0x00492000 }, + { 0x00007844, 0x92492480 }, + { 0x00007848, 0x00120000 }, + { 0x00007850, 0x54214514 }, + { 0x00007858, 0x92592692 }, + { 0x00007860, 0x52802000 }, + { 0x00007864, 0x0a8e370e }, + { 0x00007868, 0xc0102850 }, + { 0x0000786c, 0x812d4000 }, + { 0x00007874, 0x001b6db0 }, + { 0x00007878, 0x00376b63 }, + { 0x0000787c, 0x06db6db6 }, + { 0x00007880, 0x006d8000 }, + { 0x00007884, 0xffeffffe }, + { 0x00007888, 0xffeffffe }, + { 0x00007890, 0x00060aeb }, + { 0x00007894, 0x5a108000 }, + { 0x00007898, 0x2a850160 }, +}; + + + + +static const u32 ar9280Modes_9280_2[][6] = { + { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, + { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, + { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, + { 0x000010f0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000008 }, + { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, + { 0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b, 0x0988004f }, + { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 }, + { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, + { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, + { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, + { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, + { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, + { 0x00009840, 0x206a012e, 0x206a012e, 0x206a022e, 0x206a022e, 0x206a022e }, + { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 }, + { 0x00009848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063 }, + { 0x0000a848, 0x00001066, 0x00001066, 0x00001063, 0x00001063, 0x00001063 }, + { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 }, + { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec88d2e, 0x7ec88d2e, 0x7ec88d2e }, + { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e }, + { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 }, + { 0x0000c864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, + { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, + { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 }, + { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 }, + { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, + { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d }, + { 0x00009944, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010, 0xdfbc1010 }, + { 0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 }, + { 0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 }, + { 0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210 }, + { 0x0000c9b8, 0x0000000f, 0x0000000f, 0x0000001c, 0x0000001c, 0x0000001c }, + { 0x0000c9bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 }, + { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, + { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, + { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 }, + { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, + { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, + { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x00009a00, 0x00008184, 0x00008184, 0x00000290, 0x00000290, 0x00000290 }, + { 0x00009a04, 0x00008188, 0x00008188, 0x00000300, 0x00000300, 0x00000300 }, + { 0x00009a08, 0x0000818c, 0x0000818c, 0x00000304, 0x00000304, 0x00000304 }, + { 0x00009a0c, 0x00008190, 0x00008190, 0x00000308, 0x00000308, 0x00000308 }, + { 0x00009a10, 0x00008194, 0x00008194, 0x0000030c, 0x0000030c, 0x0000030c }, + { 0x00009a14, 0x00008200, 0x00008200, 0x00008000, 0x00008000, 0x00008000 }, + { 0x00009a18, 0x00008204, 0x00008204, 0x00008004, 0x00008004, 0x00008004 }, + { 0x00009a1c, 0x00008208, 0x00008208, 0x00008008, 0x00008008, 0x00008008 }, + { 0x00009a20, 0x0000820c, 0x0000820c, 0x0000800c, 0x0000800c, 0x0000800c }, + { 0x00009a24, 0x00008210, 0x00008210, 0x00008080, 0x00008080, 0x00008080 }, + { 0x00009a28, 0x00008214, 0x00008214, 0x00008084, 0x00008084, 0x00008084 }, + { 0x00009a2c, 0x00008280, 0x00008280, 0x00008088, 0x00008088, 0x00008088 }, + { 0x00009a30, 0x00008284, 0x00008284, 0x0000808c, 0x0000808c, 0x0000808c }, + { 0x00009a34, 0x00008288, 0x00008288, 0x00008100, 0x00008100, 0x00008100 }, + { 0x00009a38, 0x0000828c, 0x0000828c, 0x00008104, 0x00008104, 0x00008104 }, + { 0x00009a3c, 0x00008290, 0x00008290, 0x00008108, 0x00008108, 0x00008108 }, + { 0x00009a40, 0x00008300, 0x00008300, 0x0000810c, 0x0000810c, 0x0000810c }, + { 0x00009a44, 0x00008304, 0x00008304, 0x00008110, 0x00008110, 0x00008110 }, + { 0x00009a48, 0x00008308, 0x00008308, 0x00008114, 0x00008114, 0x00008114 }, + { 0x00009a4c, 0x0000830c, 0x0000830c, 0x00008180, 0x00008180, 0x00008180 }, + { 0x00009a50, 0x00008310, 0x00008310, 0x00008184, 0x00008184, 0x00008184 }, + { 0x00009a54, 0x00008314, 0x00008314, 0x00008188, 0x00008188, 0x00008188 }, + { 0x00009a58, 0x00008380, 0x00008380, 0x0000818c, 0x0000818c, 0x0000818c }, + { 0x00009a5c, 0x00008384, 0x00008384, 0x00008190, 0x00008190, 0x00008190 }, + { 0x00009a60, 0x00008388, 0x00008388, 0x00008194, 0x00008194, 0x00008194 }, + { 0x00009a64, 0x0000838c, 0x0000838c, 0x000081a0, 0x000081a0, 0x000081a0 }, + { 0x00009a68, 0x00008390, 0x00008390, 0x0000820c, 0x0000820c, 0x0000820c }, + { 0x00009a6c, 0x00008394, 0x00008394, 0x000081a8, 0x000081a8, 0x000081a8 }, + { 0x00009a70, 0x0000a380, 0x0000a380, 0x00008284, 0x00008284, 0x00008284 }, + { 0x00009a74, 0x0000a384, 0x0000a384, 0x00008288, 0x00008288, 0x00008288 }, + { 0x00009a78, 0x0000a388, 0x0000a388, 0x00008224, 0x00008224, 0x00008224 }, + { 0x00009a7c, 0x0000a38c, 0x0000a38c, 0x00008290, 0x00008290, 0x00008290 }, + { 0x00009a80, 0x0000a390, 0x0000a390, 0x00008300, 0x00008300, 0x00008300 }, + { 0x00009a84, 0x0000a394, 0x0000a394, 0x00008304, 0x00008304, 0x00008304 }, + { 0x00009a88, 0x0000a780, 0x0000a780, 0x00008308, 0x00008308, 0x00008308 }, + { 0x00009a8c, 0x0000a784, 0x0000a784, 0x0000830c, 0x0000830c, 0x0000830c }, + { 0x00009a90, 0x0000a788, 0x0000a788, 0x00008380, 0x00008380, 0x00008380 }, + { 0x00009a94, 0x0000a78c, 0x0000a78c, 0x00008384, 0x00008384, 0x00008384 }, + { 0x00009a98, 0x0000a790, 0x0000a790, 0x00008700, 0x00008700, 0x00008700 }, + { 0x00009a9c, 0x0000a794, 0x0000a794, 0x00008704, 0x00008704, 0x00008704 }, + { 0x00009aa0, 0x0000ab84, 0x0000ab84, 0x00008708, 0x00008708, 0x00008708 }, + { 0x00009aa4, 0x0000ab88, 0x0000ab88, 0x0000870c, 0x0000870c, 0x0000870c }, + { 0x00009aa8, 0x0000ab8c, 0x0000ab8c, 0x00008780, 0x00008780, 0x00008780 }, + { 0x00009aac, 0x0000ab90, 0x0000ab90, 0x00008784, 0x00008784, 0x00008784 }, + { 0x00009ab0, 0x0000ab94, 0x0000ab94, 0x00008b00, 0x00008b00, 0x00008b00 }, + { 0x00009ab4, 0x0000af80, 0x0000af80, 0x00008b04, 0x00008b04, 0x00008b04 }, + { 0x00009ab8, 0x0000af84, 0x0000af84, 0x00008b08, 0x00008b08, 0x00008b08 }, + { 0x00009abc, 0x0000af88, 0x0000af88, 0x00008b0c, 0x00008b0c, 0x00008b0c }, + { 0x00009ac0, 0x0000af8c, 0x0000af8c, 0x00008b80, 0x00008b80, 0x00008b80 }, + { 0x00009ac4, 0x0000af90, 0x0000af90, 0x00008b84, 0x00008b84, 0x00008b84 }, + { 0x00009ac8, 0x0000af94, 0x0000af94, 0x00008b88, 0x00008b88, 0x00008b88 }, + { 0x00009acc, 0x0000b380, 0x0000b380, 0x00008b8c, 0x00008b8c, 0x00008b8c }, + { 0x00009ad0, 0x0000b384, 0x0000b384, 0x00008b90, 0x00008b90, 0x00008b90 }, + { 0x00009ad4, 0x0000b388, 0x0000b388, 0x00008f80, 0x00008f80, 0x00008f80 }, + { 0x00009ad8, 0x0000b38c, 0x0000b38c, 0x00008f84, 0x00008f84, 0x00008f84 }, + { 0x00009adc, 0x0000b390, 0x0000b390, 0x00008f88, 0x00008f88, 0x00008f88 }, + { 0x00009ae0, 0x0000b394, 0x0000b394, 0x00008f8c, 0x00008f8c, 0x00008f8c }, + { 0x00009ae4, 0x0000b398, 0x0000b398, 0x00008f90, 0x00008f90, 0x00008f90 }, + { 0x00009ae8, 0x0000b780, 0x0000b780, 0x0000930c, 0x0000930c, 0x0000930c }, + { 0x00009aec, 0x0000b784, 0x0000b784, 0x00009310, 0x00009310, 0x00009310 }, + { 0x00009af0, 0x0000b788, 0x0000b788, 0x00009384, 0x00009384, 0x00009384 }, + { 0x00009af4, 0x0000b78c, 0x0000b78c, 0x00009388, 0x00009388, 0x00009388 }, + { 0x00009af8, 0x0000b790, 0x0000b790, 0x00009324, 0x00009324, 0x00009324 }, + { 0x00009afc, 0x0000b794, 0x0000b794, 0x00009704, 0x00009704, 0x00009704 }, + { 0x00009b00, 0x0000b798, 0x0000b798, 0x000096a4, 0x000096a4, 0x000096a4 }, + { 0x00009b04, 0x0000d784, 0x0000d784, 0x000096a8, 0x000096a8, 0x000096a8 }, + { 0x00009b08, 0x0000d788, 0x0000d788, 0x00009710, 0x00009710, 0x00009710 }, + { 0x00009b0c, 0x0000d78c, 0x0000d78c, 0x00009714, 0x00009714, 0x00009714 }, + { 0x00009b10, 0x0000d790, 0x0000d790, 0x00009720, 0x00009720, 0x00009720 }, + { 0x00009b14, 0x0000f780, 0x0000f780, 0x00009724, 0x00009724, 0x00009724 }, + { 0x00009b18, 0x0000f784, 0x0000f784, 0x00009728, 0x00009728, 0x00009728 }, + { 0x00009b1c, 0x0000f788, 0x0000f788, 0x0000972c, 0x0000972c, 0x0000972c }, + { 0x00009b20, 0x0000f78c, 0x0000f78c, 0x000097a0, 0x000097a0, 0x000097a0 }, + { 0x00009b24, 0x0000f790, 0x0000f790, 0x000097a4, 0x000097a4, 0x000097a4 }, + { 0x00009b28, 0x0000f794, 0x0000f794, 0x000097a8, 0x000097a8, 0x000097a8 }, + { 0x00009b2c, 0x0000f7a4, 0x0000f7a4, 0x000097b0, 0x000097b0, 0x000097b0 }, + { 0x00009b30, 0x0000f7a8, 0x0000f7a8, 0x000097b4, 0x000097b4, 0x000097b4 }, + { 0x00009b34, 0x0000f7ac, 0x0000f7ac, 0x000097b8, 0x000097b8, 0x000097b8 }, + { 0x00009b38, 0x0000f7b0, 0x0000f7b0, 0x000097a5, 0x000097a5, 0x000097a5 }, + { 0x00009b3c, 0x0000f7b4, 0x0000f7b4, 0x000097a9, 0x000097a9, 0x000097a9 }, + { 0x00009b40, 0x0000f7a1, 0x0000f7a1, 0x000097ad, 0x000097ad, 0x000097ad }, + { 0x00009b44, 0x0000f7a5, 0x0000f7a5, 0x000097b1, 0x000097b1, 0x000097b1 }, + { 0x00009b48, 0x0000f7a9, 0x0000f7a9, 0x000097b5, 0x000097b5, 0x000097b5 }, + { 0x00009b4c, 0x0000f7ad, 0x0000f7ad, 0x000097b9, 0x000097b9, 0x000097b9 }, + { 0x00009b50, 0x0000f7b1, 0x0000f7b1, 0x000097c5, 0x000097c5, 0x000097c5 }, + { 0x00009b54, 0x0000f7b5, 0x0000f7b5, 0x000097c9, 0x000097c9, 0x000097c9 }, + { 0x00009b58, 0x0000f7c5, 0x0000f7c5, 0x000097d1, 0x000097d1, 0x000097d1 }, + { 0x00009b5c, 0x0000f7c9, 0x0000f7c9, 0x000097d5, 0x000097d5, 0x000097d5 }, + { 0x00009b60, 0x0000f7cd, 0x0000f7cd, 0x000097d9, 0x000097d9, 0x000097d9 }, + { 0x00009b64, 0x0000f7d1, 0x0000f7d1, 0x000097c6, 0x000097c6, 0x000097c6 }, + { 0x00009b68, 0x0000f7d5, 0x0000f7d5, 0x000097ca, 0x000097ca, 0x000097ca }, + { 0x00009b6c, 0x0000f7c2, 0x0000f7c2, 0x000097ce, 0x000097ce, 0x000097ce }, + { 0x00009b70, 0x0000f7c6, 0x0000f7c6, 0x000097d2, 0x000097d2, 0x000097d2 }, + { 0x00009b74, 0x0000f7ca, 0x0000f7ca, 0x000097d6, 0x000097d6, 0x000097d6 }, + { 0x00009b78, 0x0000f7ce, 0x0000f7ce, 0x000097c3, 0x000097c3, 0x000097c3 }, + { 0x00009b7c, 0x0000f7d2, 0x0000f7d2, 0x000097c7, 0x000097c7, 0x000097c7 }, + { 0x00009b80, 0x0000f7d6, 0x0000f7d6, 0x000097cb, 0x000097cb, 0x000097cb }, + { 0x00009b84, 0x0000f7c3, 0x0000f7c3, 0x000097cf, 0x000097cf, 0x000097cf }, + { 0x00009b88, 0x0000f7c7, 0x0000f7c7, 0x000097d7, 0x000097d7, 0x000097d7 }, + { 0x00009b8c, 0x0000f7cb, 0x0000f7cb, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009b90, 0x0000f7d3, 0x0000f7d3, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009b94, 0x0000f7d7, 0x0000f7d7, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009b98, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009b9c, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009ba0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009ba4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009ba8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bac, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bb0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bb4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bb8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bbc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bc0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bc4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bc8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bcc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bd0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bd4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bd8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bdc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009be0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009be4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009be8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bec, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bf0, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bf4, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bf8, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x00009bfc, 0x0000f7db, 0x0000f7db, 0x000097db, 0x000097db, 0x000097db }, + { 0x0000a204, 0x00000444, 0x00000444, 0x00000444, 0x00000444, 0x00000444 }, + { 0x0000a208, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788, 0x803e4788 }, + { 0x0000a20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 }, + { 0x0000b20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 }, + { 0x0000a21c, 0x1463800a, 0x1463800a, 0x1463800a, 0x1463800a, 0x1463800a }, + { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, + { 0x0000a250, 0x001ff000, 0x001ff000, 0x001da000, 0x001da000, 0x001da000 }, + { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 }, + { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002 }, + { 0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009 }, + { 0x0000a30c, 0x0000a006, 0x0000a006, 0x0000b00b, 0x0000b00b, 0x0000b00b }, + { 0x0000a310, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012, 0x0000e012 }, + { 0x0000a314, 0x00011014, 0x00011014, 0x00012048, 0x00012048, 0x00012048 }, + { 0x0000a318, 0x0001504a, 0x0001504a, 0x0001604a, 0x0001604a, 0x0001604a }, + { 0x0000a31c, 0x0001904c, 0x0001904c, 0x0001a211, 0x0001a211, 0x0001a211 }, + { 0x0000a320, 0x0001c04e, 0x0001c04e, 0x0001e213, 0x0001e213, 0x0001e213 }, + { 0x0000a324, 0x00020092, 0x00020092, 0x0002121b, 0x0002121b, 0x0002121b }, + { 0x0000a328, 0x0002410a, 0x0002410a, 0x00024412, 0x00024412, 0x00024412 }, + { 0x0000a32c, 0x0002710c, 0x0002710c, 0x00028414, 0x00028414, 0x00028414 }, + { 0x0000a330, 0x0002b18b, 0x0002b18b, 0x0002b44a, 0x0002b44a, 0x0002b44a }, + { 0x0000a334, 0x0002e1cc, 0x0002e1cc, 0x00030649, 0x00030649, 0x00030649 }, + { 0x0000a338, 0x000321ec, 0x000321ec, 0x0003364b, 0x0003364b, 0x0003364b }, + { 0x0000a33c, 0x000321ec, 0x000321ec, 0x00038a49, 0x00038a49, 0x00038a49 }, + { 0x0000a340, 0x000321ec, 0x000321ec, 0x0003be48, 0x0003be48, 0x0003be48 }, + { 0x0000a344, 0x000321ec, 0x000321ec, 0x0003ee4a, 0x0003ee4a, 0x0003ee4a }, + { 0x0000a348, 0x000321ec, 0x000321ec, 0x00042e88, 0x00042e88, 0x00042e88 }, + { 0x0000a34c, 0x000321ec, 0x000321ec, 0x00046e8a, 0x00046e8a, 0x00046e8a }, + { 0x0000a350, 0x000321ec, 0x000321ec, 0x00049ec9, 0x00049ec9, 0x00049ec9 }, + { 0x0000a354, 0x000321ec, 0x000321ec, 0x0004bf42, 0x0004bf42, 0x0004bf42 }, + { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, + { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, + { 0x00007894, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000 }, +}; + +static const u32 ar9280Common_9280_2[][2] = { + { 0x0000000c, 0x00000000 }, + { 0x00000030, 0x00020015 }, + { 0x00000034, 0x00000005 }, + { 0x00000040, 0x00000000 }, + { 0x00000044, 0x00000008 }, + { 0x00000048, 0x00000008 }, + { 0x0000004c, 0x00000010 }, + { 0x00000050, 0x00000000 }, + { 0x00000054, 0x0000001f }, + { 0x00000800, 0x00000000 }, + { 0x00000804, 0x00000000 }, + { 0x00000808, 0x00000000 }, + { 0x0000080c, 0x00000000 }, + { 0x00000810, 0x00000000 }, + { 0x00000814, 0x00000000 }, + { 0x00000818, 0x00000000 }, + { 0x0000081c, 0x00000000 }, + { 0x00000820, 0x00000000 }, + { 0x00000824, 0x00000000 }, + { 0x00001040, 0x002ffc0f }, + { 0x00001044, 0x002ffc0f }, + { 0x00001048, 0x002ffc0f }, + { 0x0000104c, 0x002ffc0f }, + { 0x00001050, 0x002ffc0f }, + { 0x00001054, 0x002ffc0f }, + { 0x00001058, 0x002ffc0f }, + { 0x0000105c, 0x002ffc0f }, + { 0x00001060, 0x002ffc0f }, + { 0x00001064, 0x002ffc0f }, + { 0x00001230, 0x00000000 }, + { 0x00001270, 0x00000000 }, + { 0x00001038, 0x00000000 }, + { 0x00001078, 0x00000000 }, + { 0x000010b8, 0x00000000 }, + { 0x000010f8, 0x00000000 }, + { 0x00001138, 0x00000000 }, + { 0x00001178, 0x00000000 }, + { 0x000011b8, 0x00000000 }, + { 0x000011f8, 0x00000000 }, + { 0x00001238, 0x00000000 }, + { 0x00001278, 0x00000000 }, + { 0x000012b8, 0x00000000 }, + { 0x000012f8, 0x00000000 }, + { 0x00001338, 0x00000000 }, + { 0x00001378, 0x00000000 }, + { 0x000013b8, 0x00000000 }, + { 0x000013f8, 0x00000000 }, + { 0x00001438, 0x00000000 }, + { 0x00001478, 0x00000000 }, + { 0x000014b8, 0x00000000 }, + { 0x000014f8, 0x00000000 }, + { 0x00001538, 0x00000000 }, + { 0x00001578, 0x00000000 }, + { 0x000015b8, 0x00000000 }, + { 0x000015f8, 0x00000000 }, + { 0x00001638, 0x00000000 }, + { 0x00001678, 0x00000000 }, + { 0x000016b8, 0x00000000 }, + { 0x000016f8, 0x00000000 }, + { 0x00001738, 0x00000000 }, + { 0x00001778, 0x00000000 }, + { 0x000017b8, 0x00000000 }, + { 0x000017f8, 0x00000000 }, + { 0x0000103c, 0x00000000 }, + { 0x0000107c, 0x00000000 }, + { 0x000010bc, 0x00000000 }, + { 0x000010fc, 0x00000000 }, + { 0x0000113c, 0x00000000 }, + { 0x0000117c, 0x00000000 }, + { 0x000011bc, 0x00000000 }, + { 0x000011fc, 0x00000000 }, + { 0x0000123c, 0x00000000 }, + { 0x0000127c, 0x00000000 }, + { 0x000012bc, 0x00000000 }, + { 0x000012fc, 0x00000000 }, + { 0x0000133c, 0x00000000 }, + { 0x0000137c, 0x00000000 }, + { 0x000013bc, 0x00000000 }, + { 0x000013fc, 0x00000000 }, + { 0x0000143c, 0x00000000 }, + { 0x0000147c, 0x00000000 }, + { 0x00004030, 0x00000002 }, + { 0x0000403c, 0x00000002 }, + { 0x00004024, 0x0000001f }, + { 0x00004060, 0x00000000 }, + { 0x00004064, 0x00000000 }, + { 0x00007010, 0x00000033 }, + { 0x00007034, 0x00000002 }, + { 0x00007038, 0x000004c2 }, + { 0x00008004, 0x00000000 }, + { 0x00008008, 0x00000000 }, + { 0x0000800c, 0x00000000 }, + { 0x00008018, 0x00000700 }, + { 0x00008020, 0x00000000 }, + { 0x00008038, 0x00000000 }, + { 0x0000803c, 0x00000000 }, + { 0x00008048, 0x40000000 }, + { 0x00008054, 0x00000000 }, + { 0x00008058, 0x00000000 }, + { 0x0000805c, 0x000fc78f }, + { 0x00008060, 0x0000000f }, + { 0x00008064, 0x00000000 }, + { 0x00008070, 0x00000000 }, + { 0x000080c0, 0x2a80001a }, + { 0x000080c4, 0x05dc01e0 }, + { 0x000080c8, 0x1f402710 }, + { 0x000080cc, 0x01f40000 }, + { 0x000080d0, 0x00001e00 }, + { 0x000080d4, 0x00000000 }, + { 0x000080d8, 0x00400000 }, + { 0x000080e0, 0xffffffff }, + { 0x000080e4, 0x0000ffff }, + { 0x000080e8, 0x003f3f3f }, + { 0x000080ec, 0x00000000 }, + { 0x000080f0, 0x00000000 }, + { 0x000080f4, 0x00000000 }, + { 0x000080f8, 0x00000000 }, + { 0x000080fc, 0x00020000 }, + { 0x00008100, 0x00020000 }, + { 0x00008104, 0x00000001 }, + { 0x00008108, 0x00000052 }, + { 0x0000810c, 0x00000000 }, + { 0x00008110, 0x00000168 }, + { 0x00008118, 0x000100aa }, + { 0x0000811c, 0x00003210 }, + { 0x00008120, 0x08f04800 }, + { 0x00008124, 0x00000000 }, + { 0x00008128, 0x00000000 }, + { 0x0000812c, 0x00000000 }, + { 0x00008130, 0x00000000 }, + { 0x00008134, 0x00000000 }, + { 0x00008138, 0x00000000 }, + { 0x0000813c, 0x00000000 }, + { 0x00008144, 0x00000000 }, + { 0x00008168, 0x00000000 }, + { 0x0000816c, 0x00000000 }, + { 0x00008170, 0x32143320 }, + { 0x00008174, 0xfaa4fa50 }, + { 0x00008178, 0x00000100 }, + { 0x0000817c, 0x00000000 }, + { 0x000081c0, 0x00000000 }, + { 0x000081d0, 0x00003210 }, + { 0x000081ec, 0x00000000 }, + { 0x000081f0, 0x00000000 }, + { 0x000081f4, 0x00000000 }, + { 0x000081f8, 0x00000000 }, + { 0x000081fc, 0x00000000 }, + { 0x00008200, 0x00000000 }, + { 0x00008204, 0x00000000 }, + { 0x00008208, 0x00000000 }, + { 0x0000820c, 0x00000000 }, + { 0x00008210, 0x00000000 }, + { 0x00008214, 0x00000000 }, + { 0x00008218, 0x00000000 }, + { 0x0000821c, 0x00000000 }, + { 0x00008220, 0x00000000 }, + { 0x00008224, 0x00000000 }, + { 0x00008228, 0x00000000 }, + { 0x0000822c, 0x00000000 }, + { 0x00008230, 0x00000000 }, + { 0x00008234, 0x00000000 }, + { 0x00008238, 0x00000000 }, + { 0x0000823c, 0x00000000 }, + { 0x00008240, 0x00100000 }, + { 0x00008244, 0x0010f400 }, + { 0x00008248, 0x00000100 }, + { 0x0000824c, 0x0001e800 }, + { 0x00008250, 0x00000000 }, + { 0x00008254, 0x00000000 }, + { 0x00008258, 0x00000000 }, + { 0x0000825c, 0x400000ff }, + { 0x00008260, 0x00080922 }, + { 0x00008270, 0x00000000 }, + { 0x00008274, 0x40000000 }, + { 0x00008278, 0x003e4180 }, + { 0x0000827c, 0x00000000 }, + { 0x00008284, 0x0000002c }, + { 0x00008288, 0x0000002c }, + { 0x0000828c, 0x00000000 }, + { 0x00008294, 0x00000000 }, + { 0x00008298, 0x00000000 }, + { 0x0000829c, 0x00000000 }, + { 0x00008300, 0x00000040 }, + { 0x00008314, 0x00000000 }, + { 0x00008328, 0x00000000 }, + { 0x0000832c, 0x00000007 }, + { 0x00008330, 0x00000302 }, + { 0x00008334, 0x00000e00 }, + { 0x00008338, 0x00000000 }, + { 0x0000833c, 0x00000000 }, + { 0x00008340, 0x000107ff }, + { 0x00008344, 0x00581043 }, + { 0x00009808, 0x00000000 }, + { 0x0000980c, 0xafa68e30 }, + { 0x00009810, 0xfd14e000 }, + { 0x00009814, 0x9c0a9f6b }, + { 0x0000981c, 0x00000000 }, + { 0x0000982c, 0x0000a000 }, + { 0x00009830, 0x00000000 }, + { 0x0000983c, 0x00200400 }, + { 0x0000984c, 0x0040233c }, + { 0x0000a84c, 0x0040233c }, + { 0x00009854, 0x00000044 }, + { 0x00009900, 0x00000000 }, + { 0x00009904, 0x00000000 }, + { 0x00009908, 0x00000000 }, + { 0x0000990c, 0x00000000 }, + { 0x00009910, 0x01002310 }, + { 0x0000991c, 0x10000fff }, + { 0x00009920, 0x04900000 }, + { 0x0000a920, 0x04900000 }, + { 0x00009928, 0x00000001 }, + { 0x0000992c, 0x00000004 }, + { 0x00009934, 0x1e1f2022 }, + { 0x00009938, 0x0a0b0c0d }, + { 0x0000993c, 0x00000000 }, + { 0x00009948, 0x9280c00a }, + { 0x0000994c, 0x00020028 }, + { 0x00009954, 0x5f3ca3de }, + { 0x00009958, 0x2108ecff }, + { 0x00009940, 0x14750604 }, + { 0x0000c95c, 0x004b6a8e }, + { 0x0000c968, 0x000003ce }, + { 0x00009970, 0x190fb515 }, + { 0x00009974, 0x00000000 }, + { 0x00009978, 0x00000001 }, + { 0x0000997c, 0x00000000 }, + { 0x00009980, 0x00000000 }, + { 0x00009984, 0x00000000 }, + { 0x00009988, 0x00000000 }, + { 0x0000998c, 0x00000000 }, + { 0x00009990, 0x00000000 }, + { 0x00009994, 0x00000000 }, + { 0x00009998, 0x00000000 }, + { 0x0000999c, 0x00000000 }, + { 0x000099a0, 0x00000000 }, + { 0x000099a4, 0x00000001 }, + { 0x000099a8, 0x201fff00 }, + { 0x000099ac, 0x006f0000 }, + { 0x000099b0, 0x03051000 }, + { 0x000099b4, 0x00000820 }, + { 0x000099dc, 0x00000000 }, + { 0x000099e0, 0x00000000 }, + { 0x000099e4, 0xaaaaaaaa }, + { 0x000099e8, 0x3c466478 }, + { 0x000099ec, 0x0cc80caa }, + { 0x000099f0, 0x00000000 }, + { 0x000099fc, 0x00001042 }, + { 0x0000a210, 0x4080a333 }, + { 0x0000a214, 0x40206c10 }, + { 0x0000a218, 0x009c4060 }, + { 0x0000a220, 0x01834061 }, + { 0x0000a224, 0x00000400 }, + { 0x0000a228, 0x000003b5 }, + { 0x0000a22c, 0x233f71c0 }, + { 0x0000a234, 0x20202020 }, + { 0x0000a238, 0x20202020 }, + { 0x0000a23c, 0x13c88000 }, + { 0x0000a240, 0x38490a20 }, + { 0x0000a244, 0x00007bb6 }, + { 0x0000a248, 0x0fff3ffc }, + { 0x0000a24c, 0x00000000 }, + { 0x0000a254, 0x00000000 }, + { 0x0000a258, 0x0cdbd380 }, + { 0x0000a25c, 0x0f0f0f01 }, + { 0x0000a260, 0xdfa91f01 }, + { 0x0000a268, 0x00000000 }, + { 0x0000a26c, 0x0ebae9c6 }, + { 0x0000b26c, 0x0ebae9c6 }, + { 0x0000d270, 0x00820820 }, + { 0x0000a278, 0x1ce739ce }, + { 0x0000a27c, 0x050701ce }, + { 0x0000d35c, 0x07ffffef }, + { 0x0000d360, 0x0fffffe7 }, + { 0x0000d364, 0x17ffffe5 }, + { 0x0000d368, 0x1fffffe4 }, + { 0x0000d36c, 0x37ffffe3 }, + { 0x0000d370, 0x3fffffe3 }, + { 0x0000d374, 0x57ffffe3 }, + { 0x0000d378, 0x5fffffe2 }, + { 0x0000d37c, 0x7fffffe2 }, + { 0x0000d380, 0x7f3c7bba }, + { 0x0000d384, 0xf3307ff0 }, + { 0x0000a388, 0x0c000000 }, + { 0x0000a38c, 0x20202020 }, + { 0x0000a390, 0x20202020 }, + { 0x0000a394, 0x1ce739ce }, + { 0x0000a398, 0x000001ce }, + { 0x0000a39c, 0x00000001 }, + { 0x0000a3a0, 0x00000000 }, + { 0x0000a3a4, 0x00000000 }, + { 0x0000a3a8, 0x00000000 }, + { 0x0000a3ac, 0x00000000 }, + { 0x0000a3b0, 0x00000000 }, + { 0x0000a3b4, 0x00000000 }, + { 0x0000a3b8, 0x00000000 }, + { 0x0000a3bc, 0x00000000 }, + { 0x0000a3c0, 0x00000000 }, + { 0x0000a3c4, 0x00000000 }, + { 0x0000a3c8, 0x00000246 }, + { 0x0000a3cc, 0x20202020 }, + { 0x0000a3d0, 0x20202020 }, + { 0x0000a3d4, 0x20202020 }, + { 0x0000a3dc, 0x1ce739ce }, + { 0x0000a3e0, 0x000001ce }, + { 0x0000a3e4, 0x00000000 }, + { 0x0000a3e8, 0x18c43433 }, + { 0x0000a3ec, 0x00f70081 }, + { 0x00007800, 0x00040000 }, + { 0x00007804, 0xdb005012 }, + { 0x00007808, 0x04924914 }, + { 0x0000780c, 0x21084210 }, + { 0x00007810, 0x6d801300 }, + { 0x00007814, 0x0019beff }, + { 0x00007818, 0x07e41000 }, + { 0x0000781c, 0x00392000 }, + { 0x00007820, 0x92592480 }, + { 0x00007824, 0x00040000 }, + { 0x00007828, 0xdb005012 }, + { 0x0000782c, 0x04924914 }, + { 0x00007830, 0x21084210 }, + { 0x00007834, 0x6d801300 }, + { 0x00007838, 0x0019beff }, + { 0x0000783c, 0x07e40000 }, + { 0x00007840, 0x00392000 }, + { 0x00007844, 0x92592480 }, + { 0x00007848, 0x00100000 }, + { 0x0000784c, 0x773f0567 }, + { 0x00007850, 0x54214514 }, + { 0x00007854, 0x12035828 }, + { 0x00007858, 0x9259269a }, + { 0x00007860, 0x52802000 }, + { 0x00007864, 0x0a8e370e }, + { 0x00007868, 0xc0102850 }, + { 0x0000786c, 0x812d4000 }, + { 0x00007870, 0x807ec400 }, + { 0x00007874, 0x001b6db0 }, + { 0x00007878, 0x00376b63 }, + { 0x0000787c, 0x06db6db6 }, + { 0x00007880, 0x006d8000 }, + { 0x00007884, 0xffeffffe }, + { 0x00007888, 0xffeffffe }, + { 0x0000788c, 0x00010000 }, + { 0x00007890, 0x02060aeb }, + { 0x00007898, 0x2a850160 }, +}; + +static const u32 ar9280Modes_fast_clock_9280_2[][3] = { + { 0x00001030, 0x00000268, 0x000004d0 }, + { 0x00001070, 0x0000018c, 0x00000318 }, + { 0x000010b0, 0x00000fd0, 0x00001fa0 }, + { 0x00008014, 0x044c044c, 0x08980898 }, + { 0x0000801c, 0x148ec02b, 0x148ec057 }, + { 0x00008318, 0x000044c0, 0x00008980 }, + { 0x00009820, 0x02020200, 0x02020200 }, + { 0x00009824, 0x00000f0f, 0x00000f0f }, + { 0x00009828, 0x0b020001, 0x0b020001 }, + { 0x00009834, 0x00000f0f, 0x00000f0f }, + { 0x00009844, 0x03721821, 0x03721821 }, + { 0x00009914, 0x00000898, 0x00000898 }, + { 0x00009918, 0x0000000b, 0x00000016 }, + { 0x00009944, 0xdfbc1210, 0xdfbc1210 }, +}; + + + +static const u32 ar9280PciePhy_clkreq_off_L1_9280[][2] = { + {0x00004040, 0x9248fd00 }, + {0x00004040, 0x24924924 }, + {0x00004040, 0xa8000019 }, + {0x00004040, 0x13160820 }, + {0x00004040, 0xe5980560 }, + {0x00004040, 0x401dcffc }, + {0x00004040, 0x1aaabe40 }, + {0x00004040, 0xbe105554 }, + {0x00004040, 0x00043007 }, + {0x00004044, 0x00000000 }, +}; + + + +static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = { + {0x00004040, 0x9248fd00 }, + {0x00004040, 0x24924924 }, + {0x00004040, 0xa8000019 }, + {0x00004040, 0x13160820 }, + {0x00004040, 0xe5980560 }, + {0x00004040, 0x401dcffd }, + {0x00004040, 0x1aaabe40 }, + {0x00004040, 0xbe105554 }, + {0x00004040, 0x00043007 }, + {0x00004044, 0x00000000 }, +}; diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c new file mode 100644 index 0000000..2888778 --- /dev/null +++ b/drivers/net/wireless/ath9k/main.c @@ -0,0 +1,1470 @@ +/* + * Copyright (c) 2008 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* mac80211 and PCI callbacks */ + +#include <linux/nl80211.h> +#include "core.h" + +#define ATH_PCI_VERSION "0.1" + +#define IEEE80211_HTCAP_MAXRXAMPDU_FACTOR 13 +#define IEEE80211_ACTION_CAT_HT 7 +#define IEEE80211_ACTION_HT_TXCHWIDTH 0 + +static char *dev_info = "ath9k"; + +MODULE_AUTHOR("Atheros Communications"); +MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards."); +MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards"); +MODULE_LICENSE("Dual BSD/GPL"); + +static struct pci_device_id ath_pci_id_table[] __devinitdata = { + { PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */ + { PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */ + { PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */ + { PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */ + { PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */ + { 0 } +}; + +static int ath_get_channel(struct ath_softc *sc, + struct ieee80211_channel *chan) +{ + int i; + + for (i = 0; i < sc->sc_ah->ah_nchan; i++) { + if (sc->sc_ah->ah_channels[i].channel == chan->center_freq) + return i; + } + + return -1; +} + +static u32 ath_get_extchanmode(struct ath_softc *sc, + struct ieee80211_channel *chan) +{ + u32 chanmode = 0; + u8 ext_chan_offset = sc->sc_ht_info.ext_chan_offset; + enum ath9k_ht_macmode tx_chan_width = sc->sc_ht_info.tx_chan_width; + + switch (chan->band) { + case IEEE80211_BAND_2GHZ: + if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_NONE) && + (tx_chan_width == ATH9K_HT_MACMODE_20)) + chanmode = CHANNEL_G_HT20; + if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_ABOVE) && + (tx_chan_width == ATH9K_HT_MACMODE_2040)) + chanmode = CHANNEL_G_HT40PLUS; + if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_BELOW) && + (tx_chan_width == ATH9K_HT_MACMODE_2040)) + chanmode = CHANNEL_G_HT40MINUS; + break; + case IEEE80211_BAND_5GHZ: + if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_NONE) && + (tx_chan_width == ATH9K_HT_MACMODE_20)) + chanmode = CHANNEL_A_HT20; + if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_ABOVE) && + (tx_chan_width == ATH9K_HT_MACMODE_2040)) + chanmode = CHANNEL_A_HT40PLUS; + if ((ext_chan_offset == IEEE80211_HT_IE_CHA_SEC_BELOW) && + (tx_chan_width == ATH9K_HT_MACMODE_2040)) + chanmode = CHANNEL_A_HT40MINUS; + break; + default: + break; + } + + return chanmode; +} + + +static int ath_setkey_tkip(struct ath_softc *sc, + struct ieee80211_key_conf *key, + struct ath9k_keyval *hk, + const u8 *addr) +{ + u8 *key_rxmic = NULL; + u8 *key_txmic = NULL; + + key_txmic = key->key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY; + key_rxmic = key->key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY; + + if (addr == NULL) { + /* Group key installation */ + memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic)); + return ath_keyset(sc, key->keyidx, hk, addr); + } + if (!sc->sc_splitmic) { + /* + * data key goes at first index, + * the hal handles the MIC keys at index+64. + */ + memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic)); + memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic)); + return ath_keyset(sc, key->keyidx, hk, addr); + } + /* + * TX key goes at first index, RX key at +32. + * The hal handles the MIC keys at index+64. + */ + memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic)); + if (!ath_keyset(sc, key->keyidx, hk, NULL)) { + /* Txmic entry failed. No need to proceed further */ + DPRINTF(sc, ATH_DBG_KEYCACHE, + "%s Setting TX MIC Key Failed\n", __func__); + return 0; + } + + memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic)); + /* XXX delete tx key on failure? */ + return ath_keyset(sc, key->keyidx+32, hk, addr); +} + +static int ath_key_config(struct ath_softc *sc, + const u8 *addr, + struct ieee80211_key_conf *key) +{ + struct ieee80211_vif *vif; + struct ath9k_keyval hk; + const u8 *mac = NULL; + int ret = 0; + enum ieee80211_if_types opmode; + + memset(&hk, 0, sizeof(hk)); + + switch (key->alg) { + case ALG_WEP: + hk.kv_type = ATH9K_CIPHER_WEP; + break; + case ALG_TKIP: + hk.kv_type = ATH9K_CIPHER_TKIP; + break; + case ALG_CCMP: + hk.kv_type = ATH9K_CIPHER_AES_CCM; + break; + default: + return -EINVAL; + } + + hk.kv_len = key->keylen; + memcpy(hk.kv_val, key->key, key->keylen); + + if (!sc->sc_vaps[0]) + return -EIO; + + vif = sc->sc_vaps[0]->av_if_data; + opmode = vif->type; + + /* + * Strategy: + * For _M_STA mc tx, we will not setup a key at all since we never + * tx mc. + * _M_STA mc rx, we will use the keyID. + * for _M_IBSS mc tx, we will use the keyID, and no macaddr. + * for _M_IBSS mc rx, we will alloc a slot and plumb the mac of the + * peer node. BUT we will plumb a cleartext key so that we can do + * perSta default key table lookup in software. + */ + if (is_broadcast_ether_addr(addr)) { + switch (opmode) { + case IEEE80211_IF_TYPE_STA: + /* default key: could be group WPA key + * or could be static WEP key */ + mac = NULL; + break; + case IEEE80211_IF_TYPE_IBSS: + break; + case IEEE80211_IF_TYPE_AP: + break; + default: + ASSERT(0); + break; + } + } else { + mac = addr; + } + + if (key->alg == ALG_TKIP) + ret = ath_setkey_tkip(sc, key, &hk, mac); + else + ret = ath_keyset(sc, key->keyidx, &hk, mac); + + if (!ret) + return -EIO; + + sc->sc_keytype = hk.kv_type; + return 0; +} + +static void ath_key_delete(struct ath_softc *sc, struct ieee80211_key_conf *key) +{ +#define ATH_MAX_NUM_KEYS 4 + int freeslot; + + freeslot = (key->keyidx >= ATH_MAX_NUM_KEYS) ? 1 : 0; + ath_key_reset(sc, key->keyidx, freeslot); +#undef ATH_MAX_NUM_KEYS +} + +static void setup_ht_cap(struct ieee80211_ht_info *ht_info) +{ +/* Until mac80211 includes these fields */ + +#define IEEE80211_HT_CAP_DSSSCCK40 0x1000 +#define IEEE80211_HT_CAP_MAXRXAMPDU_65536 0x3 /* 2 ^ 16 */ +#define IEEE80211_HT_CAP_MPDUDENSITY_8 0x6 /* 8 usec */ + + ht_info->ht_supported = 1; + ht_info->cap = (u16)IEEE80211_HT_CAP_SUP_WIDTH + |(u16)IEEE80211_HT_CAP_MIMO_PS + |(u16)IEEE80211_HT_CAP_SGI_40 + |(u16)IEEE80211_HT_CAP_DSSSCCK40; + + ht_info->ampdu_factor = IEEE80211_HT_CAP_MAXRXAMPDU_65536; + ht_info->ampdu_density = IEEE80211_HT_CAP_MPDUDENSITY_8; + /* setup supported mcs set */ + memset(ht_info->supp_mcs_set, 0, 16); + ht_info->supp_mcs_set[0] = 0xff; + ht_info->supp_mcs_set[1] = 0xff; + ht_info->supp_mcs_set[12] = IEEE80211_HT_CAP_MCS_TX_DEFINED; +} + +static int ath_rate2idx(struct ath_softc *sc, int rate) +{ + int i = 0, cur_band, n_rates; + struct ieee80211_hw *hw = sc->hw; + + cur_band = hw->conf.channel->band; + n_rates = sc->sbands[cur_band].n_bitrates; + + for (i = 0; i < n_rates; i++) { + if (sc->sbands[cur_band].bitrates[i].bitrate == rate) + break; + } + + /* + * NB:mac80211 validates rx rate index against the supported legacy rate + * index only (should be done against ht rates also), return the highest + * legacy rate index for rx rate which does not match any one of the + * supported basic and extended rates to make mac80211 happy. + * The following hack will be cleaned up once the issue with + * the rx rate index validation in mac80211 is fixed. + */ + if (i == n_rates) + return n_rates - 1; + return i; +} + +static void ath9k_rx_prepare(struct ath_softc *sc, + struct sk_buff *skb, + struct ath_recv_status *status, + struct ieee80211_rx_status *rx_status) +{ + struct ieee80211_hw *hw = sc->hw; + struct ieee80211_channel *curchan = hw->conf.channel; + + memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); + + rx_status->mactime = status->tsf; + rx_status->band = curchan->band; + rx_status->freq = curchan->center_freq; + rx_status->noise = ATH_DEFAULT_NOISE_FLOOR; + rx_status->signal = rx_status->noise + status->rssi; + rx_status->rate_idx = ath_rate2idx(sc, (status->rateKbps / 100)); + rx_status->antenna = status->antenna; + rx_status->qual = status->rssi * 100 / 64; + + if (status->flags & ATH_RX_MIC_ERROR) + rx_status->flag |= RX_FLAG_MMIC_ERROR; + if (status->flags & ATH_RX_FCS_ERROR) + rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; + + rx_status->flag |= RX_FLAG_TSFT; +} + +static u8 parse_mpdudensity(u8 mpdudensity) +{ + /* + * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing": + * 0 for no restriction + * 1 for 1/4 us + * 2 for 1/2 us + * 3 for 1 us + * 4 for 2 us + * 5 for 4 us + * 6 for 8 us + * 7 for 16 us + */ + switch (mpdudensity) { + case 0: + return 0; + case 1: + case 2: + case 3: + /* Our lower layer calculations limit our precision to + 1 microsecond */ + return 1; + case 4: + return 2; + case 5: + return 4; + case 6: + return 8; + case 7: + return 16; + default: + return 0; + } +} + +static int ath9k_start(struct ieee80211_hw *hw) +{ + struct ath_softc *sc = hw->priv; + struct ieee80211_channel *curchan = hw->conf.channel; + int error = 0, pos; + + DPRINTF(sc, ATH_DBG_CONFIG, "%s: Starting driver with " + "initial channel: %d MHz\n", __func__, curchan->center_freq); + + /* setup initial channel */ + + pos = ath_get_channel(sc, curchan); + if (pos == -1) { + DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid channel\n", __func__); + return -EINVAL; + } + + sc->sc_ah->ah_channels[pos].chanmode = + (curchan->band == IEEE80211_BAND_2GHZ) ? CHANNEL_G : CHANNEL_A; + + /* open ath_dev */ + error = ath_open(sc, &sc->sc_ah->ah_channels[pos]); + if (error) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: Unable to complete ath_open\n", __func__); + return error; + } + + ieee80211_wake_queues(hw); + return 0; +} + +static int ath9k_tx(struct ieee80211_hw *hw, + struct sk_buff *skb) +{ + struct ath_softc *sc = hw->priv; + int hdrlen, padsize; + + /* Add the padding after the header if this is not already done */ + hdrlen = ieee80211_get_hdrlen_from_skb(skb); + if (hdrlen & 3) { + padsize = hdrlen % 4; + if (skb_headroom(skb) < padsize) + return -1; + skb_push(skb, padsize); + memmove(skb->data, skb->data + padsize, hdrlen); + } + + DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting packet, skb: %p\n", + __func__, + skb); + + if (ath_tx_start(sc, skb) != 0) { + DPRINTF(sc, ATH_DBG_XMIT, "%s: TX failed\n", __func__); + dev_kfree_skb_any(skb); + /* FIXME: Check for proper return value from ATH_DEV */ + return 0; + } + + return 0; +} + +static void ath9k_stop(struct ieee80211_hw *hw) +{ + struct ath_softc *sc = hw->priv; + int error; + + DPRINTF(sc, ATH_DBG_CONFIG, "%s: Driver halt\n", __func__); + + error = ath_suspend(sc); + if (error) + DPRINTF(sc, ATH_DBG_CONFIG, + "%s: Device is no longer present\n", __func__); + + ieee80211_stop_queues(hw); +} + +static int ath9k_add_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct ath_softc *sc = hw->priv; + int error, ic_opmode = 0; + + /* Support only vap for now */ + + if (sc->sc_nvaps) + return -ENOBUFS; + + switch (conf->type) { + case IEEE80211_IF_TYPE_STA: + ic_opmode = ATH9K_M_STA; + break; + case IEEE80211_IF_TYPE_IBSS: + ic_opmode = ATH9K_M_IBSS; + break; + default: + DPRINTF(sc, ATH_DBG_FATAL, + "%s: Only STA and IBSS are supported currently\n", + __func__); + return -EOPNOTSUPP; + } + + DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach a VAP of type: %d\n", + __func__, + ic_opmode); + + error = ath_vap_attach(sc, 0, conf->vif, ic_opmode); + if (error) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: Unable to attach vap, error: %d\n", + __func__, error); + return error; + } + + return 0; +} + +static void ath9k_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_if_init_conf *conf) +{ + struct ath_softc *sc = hw->priv; + struct ath_vap *avp; + int error; + + DPRINTF(sc, ATH_DBG_CONFIG, "%s: Detach VAP\n", __func__); + + avp = sc->sc_vaps[0]; + if (avp == NULL) { + DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n", + __func__); + return; + } + +#ifdef CONFIG_SLOW_ANT_DIV + ath_slow_ant_div_stop(&sc->sc_antdiv); +#endif + + /* Update ratectrl */ + ath_rate_newstate(sc, avp); + + /* Reclaim beacon resources */ + if (sc->sc_opmode == ATH9K_M_HOSTAP || sc->sc_opmode == ATH9K_M_IBSS) { + ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq); + ath_beacon_return(sc, avp); + } + + /* Set interrupt mask */ + sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS); + ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask & ~ATH9K_INT_GLOBAL); + sc->sc_beacons = 0; + + error = ath_vap_detach(sc, 0); + if (error) + DPRINTF(sc, ATH_DBG_FATAL, + "%s: Unable to detach vap, error: %d\n", + __func__, error); +} + +static int ath9k_config(struct ieee80211_hw *hw, + struct ieee80211_conf *conf) +{ + struct ath_softc *sc = hw->priv; + struct ieee80211_channel *curchan = hw->conf.channel; + int pos; + + DPRINTF(sc, ATH_DBG_CONFIG, "%s: Set channel: %d MHz\n", + __func__, + curchan->center_freq); + + pos = ath_get_channel(sc, curchan); + if (pos == -1) { + DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid channel\n", __func__); + return -EINVAL; + } + + sc->sc_ah->ah_channels[pos].chanmode = + (curchan->band == IEEE80211_BAND_2GHZ) ? + CHANNEL_G : CHANNEL_A; + + if (sc->sc_curaid && hw->conf.ht_conf.ht_supported) + sc->sc_ah->ah_channels[pos].chanmode = + ath_get_extchanmode(sc, curchan); + + sc->sc_config.txpowlimit = 2 * conf->power_level; + + /* set h/w channel */ + if (ath_set_channel(sc, &sc->sc_ah->ah_channels[pos]) < 0) + DPRINTF(sc, ATH_DBG_FATAL, "%s: Unable to set channel\n", + __func__); + + return 0; +} + +static int ath9k_config_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_if_conf *conf) +{ + struct ath_softc *sc = hw->priv; + struct ath_vap *avp; + u32 rfilt = 0; + int error, i; + DECLARE_MAC_BUF(mac); + + avp = sc->sc_vaps[0]; + if (avp == NULL) { + DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n", + __func__); + return -EINVAL; + } + + if ((conf->changed & IEEE80211_IFCC_BSSID) && + !is_zero_ether_addr(conf->bssid)) { + switch (vif->type) { + case IEEE80211_IF_TYPE_STA: + case IEEE80211_IF_TYPE_IBSS: + /* Update ratectrl about the new state */ + ath_rate_newstate(sc, avp); + + /* Set rx filter */ + rfilt = ath_calcrxfilter(sc); + ath9k_hw_setrxfilter(sc->sc_ah, rfilt); + + /* Set BSSID */ + memcpy(sc->sc_curbssid, conf->bssid, ETH_ALEN); + sc->sc_curaid = 0; + ath9k_hw_write_associd(sc->sc_ah, sc->sc_curbssid, + sc->sc_curaid); + + /* Set aggregation protection mode parameters */ + sc->sc_config.ath_aggr_prot = 0; + + /* + * Reset our TSF so that its value is lower than the + * beacon that we are trying to catch. + * Only then hw will update its TSF register with the + * new beacon. Reset the TSF before setting the BSSID + * to avoid allowing in any frames that would update + * our TSF only to have us clear it + * immediately thereafter. + */ + ath9k_hw_reset_tsf(sc->sc_ah); + + /* Disable BMISS interrupt when we're not associated */ + ath9k_hw_set_interrupts(sc->sc_ah, + sc->sc_imask & + ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS)); + sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS); + + DPRINTF(sc, ATH_DBG_CONFIG, + "%s: RX filter 0x%x bssid %s aid 0x%x\n", + __func__, rfilt, + print_mac(mac, sc->sc_curbssid), sc->sc_curaid); + + /* need to reconfigure the beacon */ + sc->sc_beacons = 0; + + break; + default: + break; + } + } + + if ((conf->changed & IEEE80211_IFCC_BEACON) && + (vif->type == IEEE80211_IF_TYPE_IBSS)) { + /* + * Allocate and setup the beacon frame. + * + * Stop any previous beacon DMA. This may be + * necessary, for example, when an ibss merge + * causes reconfiguration; we may be called + * with beacon transmission active. + */ + ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq); + + error = ath_beacon_alloc(sc, 0); + if (error != 0) + return error; + + ath_beacon_sync(sc, 0); + } + + /* Check for WLAN_CAPABILITY_PRIVACY ? */ + if ((avp->av_opmode != IEEE80211_IF_TYPE_STA)) { + for (i = 0; i < IEEE80211_WEP_NKID; i++) + if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i)) + ath9k_hw_keysetmac(sc->sc_ah, + (u16)i, + sc->sc_curbssid); + } + + /* Only legacy IBSS for now */ + if (vif->type == IEEE80211_IF_TYPE_IBSS) + ath_update_chainmask(sc, 0); + + return 0; +} + +#define SUPPORTED_FILTERS \ + (FIF_PROMISC_IN_BSS | \ + FIF_ALLMULTI | \ + FIF_CONTROL | \ + FIF_OTHER_BSS | \ + FIF_BCN_PRBRESP_PROMISC | \ + FIF_FCSFAIL) + +/* Accept unicast, bcast and mcast frames */ + +static void ath9k_configure_filter(struct ieee80211_hw *hw, + unsigned int changed_flags, + unsigned int *total_flags, + int mc_count, + struct dev_mc_list *mclist) +{ + struct ath_softc *sc = hw->priv; + + changed_flags &= SUPPORTED_FILTERS; + *total_flags &= SUPPORTED_FILTERS; + + if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { + if (*total_flags & FIF_BCN_PRBRESP_PROMISC) + ath_scan_start(sc); + else + ath_scan_end(sc); + } +} + +static void ath9k_sta_notify(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + enum sta_notify_cmd cmd, + const u8 *addr) +{ + struct ath_softc *sc = hw->priv; + struct ath_node *an; + unsigned long flags; + DECLARE_MAC_BUF(mac); + + spin_lock_irqsave(&sc->node_lock, flags); + an = ath_node_find(sc, (u8 *) addr); + spin_unlock_irqrestore(&sc->node_lock, flags); + + switch (cmd) { + case STA_NOTIFY_ADD: + spin_lock_irqsave(&sc->node_lock, flags); + if (!an) { + ath_node_attach(sc, (u8 *)addr, 0); + DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach a node: %s\n", + __func__, + print_mac(mac, addr)); + } else { + ath_node_get(sc, (u8 *)addr); + } + spin_unlock_irqrestore(&sc->node_lock, flags); + break; + case STA_NOTIFY_REMOVE: + if (!an) + DPRINTF(sc, ATH_DBG_FATAL, + "%s: Removal of a non-existent node\n", + __func__); + else { + ath_node_put(sc, an, ATH9K_BH_STATUS_INTACT); + DPRINTF(sc, ATH_DBG_CONFIG, "%s: Put a node: %s\n", + __func__, + print_mac(mac, addr)); + } + break; + default: + break; + } +} + +static int ath9k_conf_tx(struct ieee80211_hw *hw, + u16 queue, + const struct ieee80211_tx_queue_params *params) +{ + struct ath_softc *sc = hw->priv; + struct ath9k_tx_queue_info qi; + int ret = 0, qnum; + + if (queue >= WME_NUM_AC) + return 0; + + qi.tqi_aifs = params->aifs; + qi.tqi_cwmin = params->cw_min; + qi.tqi_cwmax = params->cw_max; + qi.tqi_burstTime = params->txop; + qnum = ath_get_hal_qnum(queue, sc); + + DPRINTF(sc, ATH_DBG_CONFIG, + "%s: Configure tx [queue/halq] [%d/%d], " + "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n", + __func__, + queue, + qnum, + params->aifs, + params->cw_min, + params->cw_max, + params->txop); + + ret = ath_txq_update(sc, qnum, &qi); + if (ret) + DPRINTF(sc, ATH_DBG_FATAL, + "%s: TXQ Update failed\n", __func__); + + return ret; +} + +static int ath9k_set_key(struct ieee80211_hw *hw, + enum set_key_cmd cmd, + const u8 *local_addr, + const u8 *addr, + struct ieee80211_key_conf *key) +{ + struct ath_softc *sc = hw->priv; + int ret = 0; + + DPRINTF(sc, ATH_DBG_KEYCACHE, " %s: Set HW Key\n", __func__); + + switch (cmd) { + case SET_KEY: + ret = ath_key_config(sc, addr, key); + if (!ret) { + set_bit(key->keyidx, sc->sc_keymap); + key->hw_key_idx = key->keyidx; + /* push IV and Michael MIC generation to stack */ + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + } + break; + case DISABLE_KEY: + ath_key_delete(sc, key); + clear_bit(key->keyidx, sc->sc_keymap); + sc->sc_keytype = ATH9K_CIPHER_CLR; + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static void ath9k_ht_conf(struct ath_softc *sc, + struct ieee80211_bss_conf *bss_conf) +{ +#define IEEE80211_HT_CAP_40MHZ_INTOLERANT BIT(14) + struct ath_ht_info *ht_info = &sc->sc_ht_info; + + if (bss_conf->assoc_ht) { + ht_info->ext_chan_offset = + bss_conf->ht_bss_conf->bss_cap & + IEEE80211_HT_IE_CHA_SEC_OFFSET; + + if (!(bss_conf->ht_conf->cap & + IEEE80211_HT_CAP_40MHZ_INTOLERANT) && + (bss_conf->ht_bss_conf->bss_cap & + IEEE80211_HT_IE_CHA_WIDTH)) + ht_info->tx_chan_width = ATH9K_HT_MACMODE_2040; + else + ht_info->tx_chan_width = ATH9K_HT_MACMODE_20; + + ath9k_hw_set11nmac2040(sc->sc_ah, ht_info->tx_chan_width); + ht_info->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR + + bss_conf->ht_conf->ampdu_factor); + ht_info->mpdudensity = + parse_mpdudensity(bss_conf->ht_conf->ampdu_density); + + } + +#undef IEEE80211_HT_CAP_40MHZ_INTOLERANT +} + +static void ath9k_bss_assoc_info(struct ath_softc *sc, + struct ieee80211_bss_conf *bss_conf) +{ + struct ieee80211_hw *hw = sc->hw; + struct ieee80211_channel *curchan = hw->conf.channel; + struct ath_vap *avp; + int pos; + DECLARE_MAC_BUF(mac); + + if (bss_conf->assoc) { + DPRINTF(sc, ATH_DBG_CONFIG, "%s: Bss Info ASSOC %d\n", + __func__, + bss_conf->aid); + + avp = sc->sc_vaps[0]; + if (avp == NULL) { + DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid interface\n", + __func__); + return; + } + + /* New association, store aid */ + if (avp->av_opmode == ATH9K_M_STA) { + sc->sc_curaid = bss_conf->aid; + ath9k_hw_write_associd(sc->sc_ah, sc->sc_curbssid, + sc->sc_curaid); + } + + /* Configure the beacon */ + ath_beacon_config(sc, 0); + sc->sc_beacons = 1; + + /* Reset rssi stats */ + sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; + sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; + sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; + sc->sc_halstats.ns_avgtxrate = ATH_RATE_DUMMY_MARKER; + + /* Update chainmask */ + ath_update_chainmask(sc, bss_conf->assoc_ht); + + DPRINTF(sc, ATH_DBG_CONFIG, + "%s: bssid %s aid 0x%x\n", + __func__, + print_mac(mac, sc->sc_curbssid), sc->sc_curaid); + + DPRINTF(sc, ATH_DBG_CONFIG, "%s: Set channel: %d MHz\n", + __func__, + curchan->center_freq); + + pos = ath_get_channel(sc, curchan); + if (pos == -1) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: Invalid channel\n", __func__); + return; + } + + if (hw->conf.ht_conf.ht_supported) + sc->sc_ah->ah_channels[pos].chanmode = + ath_get_extchanmode(sc, curchan); + else + sc->sc_ah->ah_channels[pos].chanmode = + (curchan->band == IEEE80211_BAND_2GHZ) ? + CHANNEL_G : CHANNEL_A; + + /* set h/w channel */ + if (ath_set_channel(sc, &sc->sc_ah->ah_channels[pos]) < 0) + DPRINTF(sc, ATH_DBG_FATAL, + "%s: Unable to set channel\n", + __func__); + + ath_rate_newstate(sc, avp); + /* Update ratectrl about the new state */ + ath_rc_node_update(hw, avp->rc_node); + } else { + DPRINTF(sc, ATH_DBG_CONFIG, + "%s: Bss Info DISSOC\n", __func__); + sc->sc_curaid = 0; + } +} + +static void ath9k_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + u32 changed) +{ + struct ath_softc *sc = hw->priv; + + if (changed & BSS_CHANGED_ERP_PREAMBLE) { + DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed PREAMBLE %d\n", + __func__, + bss_conf->use_short_preamble); + if (bss_conf->use_short_preamble) + sc->sc_flags |= ATH_PREAMBLE_SHORT; + else + sc->sc_flags &= ~ATH_PREAMBLE_SHORT; + } + + if (changed & BSS_CHANGED_ERP_CTS_PROT) { + DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed CTS PROT %d\n", + __func__, + bss_conf->use_cts_prot); + if (bss_conf->use_cts_prot && + hw->conf.channel->band != IEEE80211_BAND_5GHZ) + sc->sc_flags |= ATH_PROTECT_ENABLE; + else + sc->sc_flags &= ~ATH_PROTECT_ENABLE; + } + + if (changed & BSS_CHANGED_HT) { + DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed HT %d\n", + __func__, + bss_conf->assoc_ht); + ath9k_ht_conf(sc, bss_conf); + } + + if (changed & BSS_CHANGED_ASSOC) { + DPRINTF(sc, ATH_DBG_CONFIG, "%s: BSS Changed ASSOC %d\n", + __func__, + bss_conf->assoc); + ath9k_bss_assoc_info(sc, bss_conf); + } +} + +static u64 ath9k_get_tsf(struct ieee80211_hw *hw) +{ + u64 tsf; + struct ath_softc *sc = hw->priv; + struct ath_hal *ah = sc->sc_ah; + + tsf = ath9k_hw_gettsf64(ah); + + return tsf; +} + +static void ath9k_reset_tsf(struct ieee80211_hw *hw) +{ + struct ath_softc *sc = hw->priv; + struct ath_hal *ah = sc->sc_ah; + + ath9k_hw_reset_tsf(ah); +} + +static int ath9k_ampdu_action(struct ieee80211_hw *hw, + enum ieee80211_ampdu_mlme_action action, + const u8 *addr, + u16 tid, + u16 *ssn) +{ + struct ath_softc *sc = hw->priv; + int ret = 0; + + switch (action) { + case IEEE80211_AMPDU_RX_START: + ret = ath_rx_aggr_start(sc, addr, tid, ssn); + if (ret < 0) + DPRINTF(sc, ATH_DBG_FATAL, + "%s: Unable to start RX aggregation\n", + __func__); + break; + case IEEE80211_AMPDU_RX_STOP: + ret = ath_rx_aggr_stop(sc, addr, tid); + if (ret < 0) + DPRINTF(sc, ATH_DBG_FATAL, + "%s: Unable to stop RX aggregation\n", + __func__); + break; + case IEEE80211_AMPDU_TX_START: + ret = ath_tx_aggr_start(sc, addr, tid, ssn); + if (ret < 0) + DPRINTF(sc, ATH_DBG_FATAL, + "%s: Unable to start TX aggregation\n", + __func__); + else + ieee80211_start_tx_ba_cb_irqsafe(hw, (u8 *)addr, tid); + break; + case IEEE80211_AMPDU_TX_STOP: + ret = ath_tx_aggr_stop(sc, addr, tid); + if (ret < 0) + DPRINTF(sc, ATH_DBG_FATAL, + "%s: Unable to stop TX aggregation\n", + __func__); + + ieee80211_stop_tx_ba_cb_irqsafe(hw, (u8 *)addr, tid); + break; + default: + DPRINTF(sc, ATH_DBG_FATAL, + "%s: Unknown AMPDU action\n", __func__); + } + + return ret; +} + +static struct ieee80211_ops ath9k_ops = { + .tx = ath9k_tx, + .start = ath9k_start, + .stop = ath9k_stop, + .add_interface = ath9k_add_interface, + .remove_interface = ath9k_remove_interface, + .config = ath9k_config, + .config_interface = ath9k_config_interface, + .configure_filter = ath9k_configure_filter, + .get_stats = NULL, + .sta_notify = ath9k_sta_notify, + .conf_tx = ath9k_conf_tx, + .get_tx_stats = NULL, + .bss_info_changed = ath9k_bss_info_changed, + .set_tim = NULL, + .set_key = ath9k_set_key, + .hw_scan = NULL, + .get_tkip_seq = NULL, + .set_rts_threshold = NULL, + .set_frag_threshold = NULL, + .set_retry_limit = NULL, + .get_tsf = ath9k_get_tsf, + .reset_tsf = ath9k_reset_tsf, + .tx_last_beacon = NULL, + .ampdu_action = ath9k_ampdu_action +}; + +void ath_get_beaconconfig(struct ath_softc *sc, + int if_id, + struct ath_beacon_config *conf) +{ + struct ieee80211_hw *hw = sc->hw; + + /* fill in beacon config data */ + + conf->beacon_interval = hw->conf.beacon_int; + conf->listen_interval = 100; + conf->dtim_count = 1; + conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval; +} + +int ath_update_beacon(struct ath_softc *sc, + int if_id, + struct ath_beacon_offset *bo, + struct sk_buff *skb, + int mcast) +{ + return 0; +} + +void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, + struct ath_xmit_status *tx_status, struct ath_node *an) +{ + struct ieee80211_hw *hw = sc->hw; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + + DPRINTF(sc, ATH_DBG_XMIT, + "%s: TX complete: skb: %p\n", __func__, skb); + + if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK || + tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) { + /* free driver's private data area of tx_info */ + if (tx_info->driver_data[0] != NULL) + kfree(tx_info->driver_data[0]); + tx_info->driver_data[0] = NULL; + } + + if (tx_status->flags & ATH_TX_BAR) { + tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; + tx_status->flags &= ~ATH_TX_BAR; + } + if (tx_status->flags) + tx_info->status.excessive_retries = 1; + + tx_info->status.retry_count = tx_status->retries; + + ieee80211_tx_status(hw, skb); + if (an) + ath_node_put(sc, an, ATH9K_BH_STATUS_CHANGE); +} + +int ath__rx_indicate(struct ath_softc *sc, + struct sk_buff *skb, + struct ath_recv_status *status, + u16 keyix) +{ + struct ieee80211_hw *hw = sc->hw; + struct ath_node *an = NULL; + struct ieee80211_rx_status rx_status; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + int hdrlen = ieee80211_get_hdrlen_from_skb(skb); + int padsize; + enum ATH_RX_TYPE st; + + /* see if any padding is done by the hw and remove it */ + if (hdrlen & 3) { + padsize = hdrlen % 4; + memmove(skb->data + padsize, skb->data, hdrlen); + skb_pull(skb, padsize); + } + + /* remove FCS before passing up to protocol stack */ + skb_trim(skb, (skb->len - FCS_LEN)); + + /* Prepare rx status */ + ath9k_rx_prepare(sc, skb, status, &rx_status); + + if (!(keyix == ATH9K_RXKEYIX_INVALID) && + !(status->flags & ATH_RX_DECRYPT_ERROR)) { + rx_status.flag |= RX_FLAG_DECRYPTED; + } else if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_PROTECTED) + && !(status->flags & ATH_RX_DECRYPT_ERROR) + && skb->len >= hdrlen + 4) { + keyix = skb->data[hdrlen + 3] >> 6; + + if (test_bit(keyix, sc->sc_keymap)) + rx_status.flag |= RX_FLAG_DECRYPTED; + } + + spin_lock_bh(&sc->node_lock); + an = ath_node_find(sc, hdr->addr2); + spin_unlock_bh(&sc->node_lock); + + if (an) { + ath_rx_input(sc, an, + hw->conf.ht_conf.ht_supported, + skb, status, &st); + } + if (!an || (st != ATH_RX_CONSUMED)) + __ieee80211_rx(hw, skb, &rx_status); + + return 0; +} + +int ath_rx_subframe(struct ath_node *an, + struct sk_buff *skb, + struct ath_recv_status *status) +{ + struct ath_softc *sc = an->an_sc; + struct ieee80211_hw *hw = sc->hw; + struct ieee80211_rx_status rx_status; + + /* Prepare rx status */ + ath9k_rx_prepare(sc, skb, status, &rx_status); + if (!(status->flags & ATH_RX_DECRYPT_ERROR)) + rx_status.flag |= RX_FLAG_DECRYPTED; + + __ieee80211_rx(hw, skb, &rx_status); + + return 0; +} + +enum ath9k_ht_macmode ath_cwm_macmode(struct ath_softc *sc) +{ + return sc->sc_ht_info.tx_chan_width; +} + +static int ath_detach(struct ath_softc *sc) +{ + struct ieee80211_hw *hw = sc->hw; + + DPRINTF(sc, ATH_DBG_CONFIG, "%s: Detach ATH hw\n", __func__); + + /* Unregister hw */ + + ieee80211_unregister_hw(hw); + + /* unregister Rate control */ + ath_rate_control_unregister(); + + /* tx/rx cleanup */ + + ath_rx_cleanup(sc); + ath_tx_cleanup(sc); + + /* Deinit */ + + ath_deinit(sc); + + return 0; +} + +static int ath_attach(u16 devid, + struct ath_softc *sc) +{ + struct ieee80211_hw *hw = sc->hw; + int error = 0; + + DPRINTF(sc, ATH_DBG_CONFIG, "%s: Attach ATH hw\n", __func__); + + error = ath_init(devid, sc); + if (error != 0) + return error; + + /* Init nodes */ + + INIT_LIST_HEAD(&sc->node_list); + spin_lock_init(&sc->node_lock); + + /* get mac address from hardware and set in mac80211 */ + + SET_IEEE80211_PERM_ADDR(hw, sc->sc_myaddr); + + /* setup channels and rates */ + + sc->sbands[IEEE80211_BAND_2GHZ].channels = + sc->channels[IEEE80211_BAND_2GHZ]; + sc->sbands[IEEE80211_BAND_2GHZ].bitrates = + sc->rates[IEEE80211_BAND_2GHZ]; + sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ; + + if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) + /* Setup HT capabilities for 2.4Ghz*/ + setup_ht_cap(&sc->sbands[IEEE80211_BAND_2GHZ].ht_info); + + hw->wiphy->bands[IEEE80211_BAND_2GHZ] = + &sc->sbands[IEEE80211_BAND_2GHZ]; + + if (test_bit(ATH9K_MODE_11A, sc->sc_ah->ah_caps.wireless_modes)) { + sc->sbands[IEEE80211_BAND_5GHZ].channels = + sc->channels[IEEE80211_BAND_5GHZ]; + sc->sbands[IEEE80211_BAND_5GHZ].bitrates = + sc->rates[IEEE80211_BAND_5GHZ]; + sc->sbands[IEEE80211_BAND_5GHZ].band = + IEEE80211_BAND_5GHZ; + + if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) + /* Setup HT capabilities for 5Ghz*/ + setup_ht_cap(&sc->sbands[IEEE80211_BAND_5GHZ].ht_info); + + hw->wiphy->bands[IEEE80211_BAND_5GHZ] = + &sc->sbands[IEEE80211_BAND_5GHZ]; + } + + /* FIXME: Have to figure out proper hw init values later */ + + hw->queues = 4; + hw->ampdu_queues = 1; + + /* Register rate control */ + hw->rate_control_algorithm = "ath9k_rate_control"; + error = ath_rate_control_register(); + if (error != 0) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: Unable to register rate control " + "algorithm:%d\n", __func__, error); + ath_rate_control_unregister(); + goto bad; + } + + error = ieee80211_register_hw(hw); + if (error != 0) { + ath_rate_control_unregister(); + goto bad; + } + + /* initialize tx/rx engine */ + + error = ath_tx_init(sc, ATH_TXBUF); + if (error != 0) + goto bad1; + + error = ath_rx_init(sc, ATH_RXBUF); + if (error != 0) + goto bad1; + + return 0; +bad1: + ath_detach(sc); +bad: + return error; +} + +static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + void __iomem *mem; + struct ath_softc *sc; + struct ieee80211_hw *hw; + const char *athname; + u8 csz; + u32 val; + int ret = 0; + + if (pci_enable_device(pdev)) + return -EIO; + + /* XXX 32-bit addressing only */ + if (pci_set_dma_mask(pdev, 0xffffffff)) { + printk(KERN_ERR "ath_pci: 32-bit DMA not available\n"); + ret = -ENODEV; + goto bad; + } + + /* + * Cache line size is used to size and align various + * structures used to communicate with the hardware. + */ + pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz); + if (csz == 0) { + /* + * Linux 2.4.18 (at least) writes the cache line size + * register as a 16-bit wide register which is wrong. + * We must have this setup properly for rx buffer + * DMA to work so force a reasonable value here if it + * comes up zero. + */ + csz = L1_CACHE_BYTES / sizeof(u32); + pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz); + } + /* + * The default setting of latency timer yields poor results, + * set it to the value used by other systems. It may be worth + * tweaking this setting more. + */ + pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8); + + pci_set_master(pdev); + + /* + * Disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state. + */ + pci_read_config_dword(pdev, 0x40, &val); + if ((val & 0x0000ff00) != 0) + pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); + + ret = pci_request_region(pdev, 0, "ath9k"); + if (ret) { + dev_err(&pdev->dev, "PCI memory region reserve error\n"); + ret = -ENODEV; + goto bad; + } + + mem = pci_iomap(pdev, 0, 0); + if (!mem) { + printk(KERN_ERR "PCI memory map error\n") ; + ret = -EIO; + goto bad1; + } + + hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops); + if (hw == NULL) { + printk(KERN_ERR "ath_pci: no memory for ieee80211_hw\n"); + goto bad2; + } + + hw->flags = IEEE80211_HW_SIGNAL_DBM | + IEEE80211_HW_NOISE_DBM; + + SET_IEEE80211_DEV(hw, &pdev->dev); + pci_set_drvdata(pdev, hw); + + sc = hw->priv; + sc->hw = hw; + sc->pdev = pdev; + sc->mem = mem; + + if (ath_attach(id->device, sc) != 0) { + ret = -ENODEV; + goto bad3; + } + + /* setup interrupt service routine */ + + if (request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath", sc)) { + printk(KERN_ERR "%s: request_irq failed\n", + wiphy_name(hw->wiphy)); + ret = -EIO; + goto bad4; + } + + athname = ath9k_hw_probe(id->vendor, id->device); + + printk(KERN_INFO "%s: %s: mem=0x%lx, irq=%d\n", + wiphy_name(hw->wiphy), + athname ? athname : "Atheros ???", + (unsigned long)mem, pdev->irq); + + return 0; +bad4: + ath_detach(sc); +bad3: + ieee80211_free_hw(hw); +bad2: + pci_iounmap(pdev, mem); +bad1: + pci_release_region(pdev, 0); +bad: + pci_disable_device(pdev); + return ret; +} + +static void ath_pci_remove(struct pci_dev *pdev) +{ + struct ieee80211_hw *hw = pci_get_drvdata(pdev); + struct ath_softc *sc = hw->priv; + + if (pdev->irq) + free_irq(pdev->irq, sc); + ath_detach(sc); + pci_iounmap(pdev, sc->mem); + pci_release_region(pdev, 0); + pci_disable_device(pdev); + ieee80211_free_hw(hw); +} + +#ifdef CONFIG_PM + +static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state) +{ + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, 3); + + return 0; +} + +static int ath_pci_resume(struct pci_dev *pdev) +{ + u32 val; + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + pci_restore_state(pdev); + /* + * Suspend/Resume resets the PCI configuration space, so we have to + * re-disable the RETRY_TIMEOUT register (0x41) to keep + * PCI Tx retries from interfering with C3 CPU state + */ + pci_read_config_dword(pdev, 0x40, &val); + if ((val & 0x0000ff00) != 0) + pci_write_config_dword(pdev, 0x40, val & 0xffff00ff); + + return 0; +} + +#endif /* CONFIG_PM */ + +MODULE_DEVICE_TABLE(pci, ath_pci_id_table); + +static struct pci_driver ath_pci_driver = { + .name = "ath9k", + .id_table = ath_pci_id_table, + .probe = ath_pci_probe, + .remove = ath_pci_remove, +#ifdef CONFIG_PM + .suspend = ath_pci_suspend, + .resume = ath_pci_resume, +#endif /* CONFIG_PM */ +}; + +static int __init init_ath_pci(void) +{ + printk(KERN_INFO "%s: %s\n", dev_info, ATH_PCI_VERSION); + + if (pci_register_driver(&ath_pci_driver) < 0) { + printk(KERN_ERR + "ath_pci: No devices found, driver not installed.\n"); + pci_unregister_driver(&ath_pci_driver); + return -ENODEV; + } + + return 0; +} +module_init(init_ath_pci); + +static void __exit exit_ath_pci(void) +{ + pci_unregister_driver(&ath_pci_driver); + printk(KERN_INFO "%s: driver unloaded\n", dev_info); +} +module_exit(exit_ath_pci); diff --git a/drivers/net/wireless/ath9k/phy.c b/drivers/net/wireless/ath9k/phy.c new file mode 100644 index 0000000..eb9121f --- /dev/null +++ b/drivers/net/wireless/ath9k/phy.c @@ -0,0 +1,436 @@ +/* + * Copyright (c) 2008 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "core.h" +#include "hw.h" +#include "reg.h" +#include "phy.h" + +void +ath9k_hw_write_regs(struct ath_hal *ah, u32 modesIndex, u32 freqIndex, + int regWrites) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + REG_WRITE_ARRAY(&ahp->ah_iniBB_RfGain, freqIndex, regWrites); +} + +bool +ath9k_hw_set_channel(struct ath_hal *ah, struct ath9k_channel *chan) +{ + u32 channelSel = 0; + u32 bModeSynth = 0; + u32 aModeRefSel = 0; + u32 reg32 = 0; + u16 freq; + struct chan_centers centers; + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + freq = centers.synth_center; + + if (freq < 4800) { + u32 txctl; + + if (((freq - 2192) % 5) == 0) { + channelSel = ((freq - 672) * 2 - 3040) / 10; + bModeSynth = 0; + } else if (((freq - 2224) % 5) == 0) { + channelSel = ((freq - 704) * 2 - 3040) / 10; + bModeSynth = 1; + } else { + DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, + "%s: invalid channel %u MHz\n", __func__, + freq); + return false; + } + + channelSel = (channelSel << 2) & 0xff; + channelSel = ath9k_hw_reverse_bits(channelSel, 8); + + txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL); + if (freq == 2484) { + + REG_WRITE(ah, AR_PHY_CCK_TX_CTRL, + txctl | AR_PHY_CCK_TX_CTRL_JAPAN); + } else { + REG_WRITE(ah, AR_PHY_CCK_TX_CTRL, + txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN); + } + + } else if ((freq % 20) == 0 && freq >= 5120) { + channelSel = + ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8); + aModeRefSel = ath9k_hw_reverse_bits(1, 2); + } else if ((freq % 10) == 0) { + channelSel = + ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8); + if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) + aModeRefSel = ath9k_hw_reverse_bits(2, 2); + else + aModeRefSel = ath9k_hw_reverse_bits(1, 2); + } else if ((freq % 5) == 0) { + channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8); + aModeRefSel = ath9k_hw_reverse_bits(1, 2); + } else { + DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, + "%s: invalid channel %u MHz\n", __func__, freq); + return false; + } + + reg32 = + (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) | + (1 << 5) | 0x1; + + REG_WRITE(ah, AR_PHY(0x37), reg32); + + ah->ah_curchan = chan; + + AH5416(ah)->ah_curchanRadIndex = -1; + + return true; +} + +bool +ath9k_hw_ar9280_set_channel(struct ath_hal *ah, + struct ath9k_channel *chan) +{ + u16 bMode, fracMode, aModeRefSel = 0; + u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0; + struct chan_centers centers; + u32 refDivA = 24; + + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + freq = centers.synth_center; + + reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL); + reg32 &= 0xc0000000; + + if (freq < 4800) { + u32 txctl; + + bMode = 1; + fracMode = 1; + aModeRefSel = 0; + channelSel = (freq * 0x10000) / 15; + + txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL); + if (freq == 2484) { + + REG_WRITE(ah, AR_PHY_CCK_TX_CTRL, + txctl | AR_PHY_CCK_TX_CTRL_JAPAN); + } else { + REG_WRITE(ah, AR_PHY_CCK_TX_CTRL, + txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN); + } + } else { + bMode = 0; + fracMode = 0; + + if ((freq % 20) == 0) { + aModeRefSel = 3; + } else if ((freq % 10) == 0) { + aModeRefSel = 2; + } else { + aModeRefSel = 0; + + fracMode = 1; + refDivA = 1; + channelSel = (freq * 0x8000) / 15; + + REG_RMW_FIELD(ah, AR_AN_SYNTH9, + AR_AN_SYNTH9_REFDIVA, refDivA); + } + if (!fracMode) { + ndiv = (freq * (refDivA >> aModeRefSel)) / 60; + channelSel = ndiv & 0x1ff; + channelFrac = (ndiv & 0xfffffe00) * 2; + channelSel = (channelSel << 17) | channelFrac; + } + } + + reg32 = reg32 | + (bMode << 29) | + (fracMode << 28) | (aModeRefSel << 26) | (channelSel); + + REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32); + + ah->ah_curchan = chan; + + AH5416(ah)->ah_curchanRadIndex = -1; + + return true; +} + +static void +ath9k_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32, + u32 numBits, u32 firstBit, + u32 column) +{ + u32 tmp32, mask, arrayEntry, lastBit; + int32_t bitPosition, bitsLeft; + + tmp32 = ath9k_hw_reverse_bits(reg32, numBits); + arrayEntry = (firstBit - 1) / 8; + bitPosition = (firstBit - 1) % 8; + bitsLeft = numBits; + while (bitsLeft > 0) { + lastBit = (bitPosition + bitsLeft > 8) ? + 8 : bitPosition + bitsLeft; + mask = (((1 << lastBit) - 1) ^ ((1 << bitPosition) - 1)) << + (column * 8); + rfBuf[arrayEntry] &= ~mask; + rfBuf[arrayEntry] |= ((tmp32 << bitPosition) << + (column * 8)) & mask; + bitsLeft -= 8 - bitPosition; + tmp32 = tmp32 >> (8 - bitPosition); + bitPosition = 0; + arrayEntry++; + } +} + +bool +ath9k_hw_set_rf_regs(struct ath_hal *ah, struct ath9k_channel *chan, + u16 modesIndex) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + u32 eepMinorRev; + u32 ob5GHz = 0, db5GHz = 0; + u32 ob2GHz = 0, db2GHz = 0; + int regWrites = 0; + + if (AR_SREV_9280_10_OR_LATER(ah)) + return true; + + eepMinorRev = ath9k_hw_get_eeprom(ahp, EEP_MINOR_REV); + + RF_BANK_SETUP(ahp->ah_analogBank0Data, &ahp->ah_iniBank0, 1); + + RF_BANK_SETUP(ahp->ah_analogBank1Data, &ahp->ah_iniBank1, 1); + + RF_BANK_SETUP(ahp->ah_analogBank2Data, &ahp->ah_iniBank2, 1); + + RF_BANK_SETUP(ahp->ah_analogBank3Data, &ahp->ah_iniBank3, + modesIndex); + { + int i; + for (i = 0; i < ahp->ah_iniBank6TPC.ia_rows; i++) { + ahp->ah_analogBank6Data[i] = + INI_RA(&ahp->ah_iniBank6TPC, i, modesIndex); + } + } + + if (eepMinorRev >= 2) { + if (IS_CHAN_2GHZ(chan)) { + ob2GHz = ath9k_hw_get_eeprom(ahp, EEP_OB_2); + db2GHz = ath9k_hw_get_eeprom(ahp, EEP_DB_2); + ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data, + ob2GHz, 3, 197, 0); + ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data, + db2GHz, 3, 194, 0); + } else { + ob5GHz = ath9k_hw_get_eeprom(ahp, EEP_OB_5); + db5GHz = ath9k_hw_get_eeprom(ahp, EEP_DB_5); + ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data, + ob5GHz, 3, 203, 0); + ath9k_phy_modify_rx_buffer(ahp->ah_analogBank6Data, + db5GHz, 3, 200, 0); + } + } + + RF_BANK_SETUP(ahp->ah_analogBank7Data, &ahp->ah_iniBank7, 1); + + REG_WRITE_RF_ARRAY(&ahp->ah_iniBank0, ahp->ah_analogBank0Data, + regWrites); + REG_WRITE_RF_ARRAY(&ahp->ah_iniBank1, ahp->ah_analogBank1Data, + regWrites); + REG_WRITE_RF_ARRAY(&ahp->ah_iniBank2, ahp->ah_analogBank2Data, + regWrites); + REG_WRITE_RF_ARRAY(&ahp->ah_iniBank3, ahp->ah_analogBank3Data, + regWrites); + REG_WRITE_RF_ARRAY(&ahp->ah_iniBank6TPC, ahp->ah_analogBank6Data, + regWrites); + REG_WRITE_RF_ARRAY(&ahp->ah_iniBank7, ahp->ah_analogBank7Data, + regWrites); + + return true; +} + +void +ath9k_hw_rfdetach(struct ath_hal *ah) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + if (ahp->ah_analogBank0Data != NULL) { + kfree(ahp->ah_analogBank0Data); + ahp->ah_analogBank0Data = NULL; + } + if (ahp->ah_analogBank1Data != NULL) { + kfree(ahp->ah_analogBank1Data); + ahp->ah_analogBank1Data = NULL; + } + if (ahp->ah_analogBank2Data != NULL) { + kfree(ahp->ah_analogBank2Data); + ahp->ah_analogBank2Data = NULL; + } + if (ahp->ah_analogBank3Data != NULL) { + kfree(ahp->ah_analogBank3Data); + ahp->ah_analogBank3Data = NULL; + } + if (ahp->ah_analogBank6Data != NULL) { + kfree(ahp->ah_analogBank6Data); + ahp->ah_analogBank6Data = NULL; + } + if (ahp->ah_analogBank6TPCData != NULL) { + kfree(ahp->ah_analogBank6TPCData); + ahp->ah_analogBank6TPCData = NULL; + } + if (ahp->ah_analogBank7Data != NULL) { + kfree(ahp->ah_analogBank7Data); + ahp->ah_analogBank7Data = NULL; + } + if (ahp->ah_addac5416_21 != NULL) { + kfree(ahp->ah_addac5416_21); + ahp->ah_addac5416_21 = NULL; + } + if (ahp->ah_bank6Temp != NULL) { + kfree(ahp->ah_bank6Temp); + ahp->ah_bank6Temp = NULL; + } +} + +bool ath9k_hw_init_rf(struct ath_hal *ah, int *status) +{ + struct ath_hal_5416 *ahp = AH5416(ah); + + if (!AR_SREV_9280_10_OR_LATER(ah)) { + + ahp->ah_analogBank0Data = + kzalloc((sizeof(u32) * + ahp->ah_iniBank0.ia_rows), GFP_KERNEL); + ahp->ah_analogBank1Data = + kzalloc((sizeof(u32) * + ahp->ah_iniBank1.ia_rows), GFP_KERNEL); + ahp->ah_analogBank2Data = + kzalloc((sizeof(u32) * + ahp->ah_iniBank2.ia_rows), GFP_KERNEL); + ahp->ah_analogBank3Data = + kzalloc((sizeof(u32) * + ahp->ah_iniBank3.ia_rows), GFP_KERNEL); + ahp->ah_analogBank6Data = + kzalloc((sizeof(u32) * + ahp->ah_iniBank6.ia_rows), GFP_KERNEL); + ahp->ah_analogBank6TPCData = + kzalloc((sizeof(u32) * + ahp->ah_iniBank6TPC.ia_rows), GFP_KERNEL); + ahp->ah_analogBank7Data = + kzalloc((sizeof(u32) * + ahp->ah_iniBank7.ia_rows), GFP_KERNEL); + + if (ahp->ah_analogBank0Data == NULL + || ahp->ah_analogBank1Data == NULL + || ahp->ah_analogBank2Data == NULL + || ahp->ah_analogBank3Data == NULL + || ahp->ah_analogBank6Data == NULL + || ahp->ah_analogBank6TPCData == NULL + || ahp->ah_analogBank7Data == NULL) { + DPRINTF(ah->ah_sc, ATH_DBG_FATAL, + "%s: cannot allocate RF banks\n", + __func__); + *status = -ENOMEM; + return false; + } + + ahp->ah_addac5416_21 = + kzalloc((sizeof(u32) * + ahp->ah_iniAddac.ia_rows * + ahp->ah_iniAddac.ia_columns), GFP_KERNEL); + if (ahp->ah_addac5416_21 == NULL) { + DPRINTF(ah->ah_sc, ATH_DBG_FATAL, + "%s: cannot allocate ah_addac5416_21\n", + __func__); + *status = -ENOMEM; + return false; + } + + ahp->ah_bank6Temp = + kzalloc((sizeof(u32) * + ahp->ah_iniBank6.ia_rows), GFP_KERNEL); + if (ahp->ah_bank6Temp == NULL) { + DPRINTF(ah->ah_sc, ATH_DBG_FATAL, + "%s: cannot allocate ah_bank6Temp\n", + __func__); + *status = -ENOMEM; + return false; + } + } + + return true; +} + +void +ath9k_hw_decrease_chain_power(struct ath_hal *ah, struct ath9k_channel *chan) +{ + int i, regWrites = 0; + struct ath_hal_5416 *ahp = AH5416(ah); + u32 bank6SelMask; + u32 *bank6Temp = ahp->ah_bank6Temp; + + switch (ahp->ah_diversityControl) { + case ATH9K_ANT_FIXED_A: + bank6SelMask = + (ahp-> + ah_antennaSwitchSwap & ANTSWAP_AB) ? REDUCE_CHAIN_0 : + REDUCE_CHAIN_1; + break; + case ATH9K_ANT_FIXED_B: + bank6SelMask = + (ahp-> + ah_antennaSwitchSwap & ANTSWAP_AB) ? REDUCE_CHAIN_1 : + REDUCE_CHAIN_0; + break; + case ATH9K_ANT_VARIABLE: + return; + break; + default: + return; + break; + } + + for (i = 0; i < ahp->ah_iniBank6.ia_rows; i++) + bank6Temp[i] = ahp->ah_analogBank6Data[i]; + + REG_WRITE(ah, AR_PHY_BASE + 0xD8, bank6SelMask); + + ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 189, 0); + ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 190, 0); + ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 191, 0); + ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 192, 0); + ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 193, 0); + ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 222, 0); + ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 245, 0); + ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 246, 0); + ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 247, 0); + + REG_WRITE_RF_ARRAY(&ahp->ah_iniBank6, bank6Temp, regWrites); + + REG_WRITE(ah, AR_PHY_BASE + 0xD8, 0x00000053); +#ifdef ALTER_SWITCH + REG_WRITE(ah, PHY_SWITCH_CHAIN_0, + (REG_READ(ah, PHY_SWITCH_CHAIN_0) & ~0x38) + | ((REG_READ(ah, PHY_SWITCH_CHAIN_0) >> 3) & 0x38)); +#endif +} diff --git a/drivers/net/wireless/ath9k/phy.h b/drivers/net/wireless/ath9k/phy.h new file mode 100644 index 0000000..0cd399a --- /dev/null +++ b/drivers/net/wireless/ath9k/phy.h @@ -0,0 +1,543 @@ +/* + * Copyright (c) 2008 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef PHY_H +#define PHY_H + +bool ath9k_hw_ar9280_set_channel(struct ath_hal *ah, + struct ath9k_channel + *chan); +bool ath9k_hw_set_channel(struct ath_hal *ah, + struct ath9k_channel *chan); +void ath9k_hw_write_regs(struct ath_hal *ah, u32 modesIndex, + u32 freqIndex, int regWrites); +bool ath9k_hw_set_rf_regs(struct ath_hal *ah, + struct ath9k_channel *chan, + u16 modesIndex); +void ath9k_hw_decrease_chain_power(struct ath_hal *ah, + struct ath9k_channel *chan); +bool ath9k_hw_init_rf(struct ath_hal *ah, + int *status); + +#define AR_PHY_BASE 0x9800 +#define AR_PHY(_n) (AR_PHY_BASE + ((_n)<<2)) + +#define AR_PHY_TEST 0x9800 +#define PHY_AGC_CLR 0x10000000 +#define RFSILENT_BB 0x00002000 + +#define AR_PHY_TURBO 0x9804 +#define AR_PHY_FC_TURBO_MODE 0x00000001 +#define AR_PHY_FC_TURBO_SHORT 0x00000002 +#define AR_PHY_FC_DYN2040_EN 0x00000004 +#define AR_PHY_FC_DYN2040_PRI_ONLY 0x00000008 +#define AR_PHY_FC_DYN2040_PRI_CH 0x00000010 +#define AR_PHY_FC_DYN2040_EXT_CH 0x00000020 +#define AR_PHY_FC_HT_EN 0x00000040 +#define AR_PHY_FC_SHORT_GI_40 0x00000080 +#define AR_PHY_FC_WALSH 0x00000100 +#define AR_PHY_FC_SINGLE_HT_LTF1 0x00000200 + +#define AR_PHY_TIMING2 0x9810 +#define AR_PHY_TIMING3 0x9814 +#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000 +#define AR_PHY_TIMING3_DSC_MAN_S 17 +#define AR_PHY_TIMING3_DSC_EXP 0x0001E000 +#define AR_PHY_TIMING3_DSC_EXP_S 13 + +#define AR_PHY_CHIP_ID 0x9818 +#define AR_PHY_CHIP_ID_REV_0 0x80 +#define AR_PHY_CHIP_ID_REV_1 0x81 +#define AR_PHY_CHIP_ID_9160_REV_0 0xb0 + +#define AR_PHY_ACTIVE 0x981C +#define AR_PHY_ACTIVE_EN 0x00000001 +#define AR_PHY_ACTIVE_DIS 0x00000000 + +#define AR_PHY_RF_CTL2 0x9824 +#define AR_PHY_TX_END_DATA_START 0x000000FF +#define AR_PHY_TX_END_DATA_START_S 0 +#define AR_PHY_TX_END_PA_ON 0x0000FF00 +#define AR_PHY_TX_END_PA_ON_S 8 + +#define AR_PHY_RF_CTL3 0x9828 +#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000 +#define AR_PHY_TX_END_TO_A2_RX_ON_S 16 + +#define AR_PHY_ADC_CTL 0x982C +#define AR_PHY_ADC_CTL_OFF_INBUFGAIN 0x00000003 +#define AR_PHY_ADC_CTL_OFF_INBUFGAIN_S 0 +#define AR_PHY_ADC_CTL_OFF_PWDDAC 0x00002000 +#define AR_PHY_ADC_CTL_OFF_PWDBANDGAP 0x00004000 +#define AR_PHY_ADC_CTL_OFF_PWDADC 0x00008000 +#define AR_PHY_ADC_CTL_ON_INBUFGAIN 0x00030000 +#define AR_PHY_ADC_CTL_ON_INBUFGAIN_S 16 + +#define AR_PHY_ADC_SERIAL_CTL 0x9830 +#define AR_PHY_SEL_INTERNAL_ADDAC 0x00000000 +#define AR_PHY_SEL_EXTERNAL_RADIO 0x00000001 + +#define AR_PHY_RF_CTL4 0x9834 +#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF 0xFF000000 +#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF_S 24 +#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF 0x00FF0000 +#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF_S 16 +#define AR_PHY_RF_CTL4_FRAME_XPAB_ON 0x0000FF00 +#define AR_PHY_RF_CTL4_FRAME_XPAB_ON_S 8 +#define AR_PHY_RF_CTL4_FRAME_XPAA_ON 0x000000FF +#define AR_PHY_RF_CTL4_FRAME_XPAA_ON_S 0 + +#define AR_PHY_SETTLING 0x9844 +#define AR_PHY_SETTLING_SWITCH 0x00003F80 +#define AR_PHY_SETTLING_SWITCH_S 7 + +#define AR_PHY_RXGAIN 0x9848 +#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000 +#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12 +#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000 +#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18 +#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80 +#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7 +#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000 +#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14 + +#define AR_PHY_DESIRED_SZ 0x9850 +#define AR_PHY_DESIRED_SZ_ADC 0x000000FF +#define AR_PHY_DESIRED_SZ_ADC_S 0 +#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00 +#define AR_PHY_DESIRED_SZ_PGA_S 8 +#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000 +#define AR_PHY_DESIRED_SZ_TOT_DES_S 20 + +#define AR_PHY_FIND_SIG 0x9858 +#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000 +#define AR_PHY_FIND_SIG_FIRSTEP_S 12 +#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000 +#define AR_PHY_FIND_SIG_FIRPWR_S 18 + +#define AR_PHY_AGC_CTL1 0x985C +#define AR_PHY_AGC_CTL1_COARSE_LOW 0x00007F80 +#define AR_PHY_AGC_CTL1_COARSE_LOW_S 7 +#define AR_PHY_AGC_CTL1_COARSE_HIGH 0x003F8000 +#define AR_PHY_AGC_CTL1_COARSE_HIGH_S 15 + +#define AR_PHY_AGC_CONTROL 0x9860 +#define AR_PHY_AGC_CONTROL_CAL 0x00000001 +#define AR_PHY_AGC_CONTROL_NF 0x00000002 +#define AR_PHY_AGC_CONTROL_ENABLE_NF 0x00008000 +#define AR_PHY_AGC_CONTROL_FLTR_CAL 0x00010000 +#define AR_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000 + +#define AR_PHY_CCA 0x9864 +#define AR_PHY_MINCCA_PWR 0x0FF80000 +#define AR_PHY_MINCCA_PWR_S 19 +#define AR_PHY_CCA_THRESH62 0x0007F000 +#define AR_PHY_CCA_THRESH62_S 12 +#define AR9280_PHY_MINCCA_PWR 0x1FF00000 +#define AR9280_PHY_MINCCA_PWR_S 20 +#define AR9280_PHY_CCA_THRESH62 0x000FF000 +#define AR9280_PHY_CCA_THRESH62_S 12 + +#define AR_PHY_SFCORR_LOW 0x986C +#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001 +#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00 +#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8 +#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000 +#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14 +#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000 +#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21 + +#define AR_PHY_SFCORR 0x9868 +#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F +#define AR_PHY_SFCORR_M2COUNT_THR_S 0 +#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000 +#define AR_PHY_SFCORR_M1_THRESH_S 17 +#define AR_PHY_SFCORR_M2_THRESH 0x7F000000 +#define AR_PHY_SFCORR_M2_THRESH_S 24 + +#define AR_PHY_SLEEP_CTR_CONTROL 0x9870 +#define AR_PHY_SLEEP_CTR_LIMIT 0x9874 +#define AR_PHY_SYNTH_CONTROL 0x9874 +#define AR_PHY_SLEEP_SCAL 0x9878 + +#define AR_PHY_PLL_CTL 0x987c +#define AR_PHY_PLL_CTL_40 0xaa +#define AR_PHY_PLL_CTL_40_5413 0x04 +#define AR_PHY_PLL_CTL_44 0xab +#define AR_PHY_PLL_CTL_44_2133 0xeb +#define AR_PHY_PLL_CTL_40_2133 0xea + +#define AR_PHY_RX_DELAY 0x9914 +#define AR_PHY_SEARCH_START_DELAY 0x9918 +#define AR_PHY_RX_DELAY_DELAY 0x00003FFF + +#define AR_PHY_TIMING_CTRL4(_i) (0x9920 + ((_i) << 12)) +#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF 0x01F +#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF_S 0 +#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF 0x7E0 +#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF_S 5 +#define AR_PHY_TIMING_CTRL4_IQCORR_ENABLE 0x800 +#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX 0xF000 +#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX_S 12 +#define AR_PHY_TIMING_CTRL4_DO_CAL 0x10000 + +#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI 0x80000000 +#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER 0x40000000 +#define AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK 0x20000000 +#define AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK 0x10000000 + +#define AR_PHY_TIMING5 0x9924 +#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE +#define AR_PHY_TIMING5_CYCPWR_THR1_S 1 + +#define AR_PHY_POWER_TX_RATE1 0x9934 +#define AR_PHY_POWER_TX_RATE2 0x9938 +#define AR_PHY_POWER_TX_RATE_MAX 0x993c +#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040 + +#define AR_PHY_FRAME_CTL 0x9944 +#define AR_PHY_FRAME_CTL_TX_CLIP 0x00000038 +#define AR_PHY_FRAME_CTL_TX_CLIP_S 3 + +#define AR_PHY_TXPWRADJ 0x994C +#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA 0x00000FC0 +#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA_S 6 +#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX 0x00FC0000 +#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX_S 18 + +#define AR_PHY_RADAR_EXT 0x9940 +#define AR_PHY_RADAR_EXT_ENA 0x00004000 + +#define AR_PHY_RADAR_0 0x9954 +#define AR_PHY_RADAR_0_ENA 0x00000001 +#define AR_PHY_RADAR_0_FFT_ENA 0x80000000 +#define AR_PHY_RADAR_0_INBAND 0x0000003e +#define AR_PHY_RADAR_0_INBAND_S 1 +#define AR_PHY_RADAR_0_PRSSI 0x00000FC0 +#define AR_PHY_RADAR_0_PRSSI_S 6 +#define AR_PHY_RADAR_0_HEIGHT 0x0003F000 +#define AR_PHY_RADAR_0_HEIGHT_S 12 +#define AR_PHY_RADAR_0_RRSSI 0x00FC0000 +#define AR_PHY_RADAR_0_RRSSI_S 18 +#define AR_PHY_RADAR_0_FIRPWR 0x7F000000 +#define AR_PHY_RADAR_0_FIRPWR_S 24 + +#define AR_PHY_RADAR_1 0x9958 +#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000 +#define AR_PHY_RADAR_1_USE_FIR128 0x00400000 +#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000 +#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16 +#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000 +#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000 +#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000 +#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00 +#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8 +#define AR_PHY_RADAR_1_MAXLEN 0x000000FF +#define AR_PHY_RADAR_1_MAXLEN_S 0 + +#define AR_PHY_SWITCH_CHAIN_0 0x9960 +#define AR_PHY_SWITCH_COM 0x9964 + +#define AR_PHY_SIGMA_DELTA 0x996C +#define AR_PHY_SIGMA_DELTA_ADC_SEL 0x00000003 +#define AR_PHY_SIGMA_DELTA_ADC_SEL_S 0 +#define AR_PHY_SIGMA_DELTA_FILT2 0x000000F8 +#define AR_PHY_SIGMA_DELTA_FILT2_S 3 +#define AR_PHY_SIGMA_DELTA_FILT1 0x00001F00 +#define AR_PHY_SIGMA_DELTA_FILT1_S 8 +#define AR_PHY_SIGMA_DELTA_ADC_CLIP 0x01FFE000 +#define AR_PHY_SIGMA_DELTA_ADC_CLIP_S 13 + +#define AR_PHY_RESTART 0x9970 +#define AR_PHY_RESTART_DIV_GC 0x001C0000 +#define AR_PHY_RESTART_DIV_GC_S 18 + +#define AR_PHY_RFBUS_REQ 0x997C +#define AR_PHY_RFBUS_REQ_EN 0x00000001 + +#define AR_PHY_TIMING7 0x9980 +#define AR_PHY_TIMING8 0x9984 +#define AR_PHY_TIMING8_PILOT_MASK_2 0x000FFFFF +#define AR_PHY_TIMING8_PILOT_MASK_2_S 0 + +#define AR_PHY_BIN_MASK2_1 0x9988 +#define AR_PHY_BIN_MASK2_2 0x998c +#define AR_PHY_BIN_MASK2_3 0x9990 +#define AR_PHY_BIN_MASK2_4 0x9994 + +#define AR_PHY_BIN_MASK_1 0x9900 +#define AR_PHY_BIN_MASK_2 0x9904 +#define AR_PHY_BIN_MASK_3 0x9908 + +#define AR_PHY_MASK_CTL 0x990c + +#define AR_PHY_BIN_MASK2_4_MASK_4 0x00003FFF +#define AR_PHY_BIN_MASK2_4_MASK_4_S 0 + +#define AR_PHY_TIMING9 0x9998 +#define AR_PHY_TIMING10 0x999c +#define AR_PHY_TIMING10_PILOT_MASK_2 0x000FFFFF +#define AR_PHY_TIMING10_PILOT_MASK_2_S 0 + +#define AR_PHY_TIMING11 0x99a0 +#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF +#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0 +#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000 +#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20 +#define AR_PHY_TIMING11_USE_SPUR_IN_AGC 0x40000000 +#define AR_PHY_TIMING11_USE_SPUR_IN_SELFCOR 0x80000000 + +#define AR_PHY_RX_CHAINMASK 0x99a4 +#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (0x99b4 + ((_i) << 12)) +#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000 +#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000 +#define AR_PHY_MULTICHAIN_GAIN_CTL 0x99ac + +#define AR_PHY_EXT_CCA0 0x99b8 +#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF +#define AR_PHY_EXT_CCA0_THRESH62_S 0 + +#define AR_PHY_EXT_CCA 0x99bc +#define AR_PHY_EXT_CCA_CYCPWR_THR1 0x0000FE00 +#define AR_PHY_EXT_CCA_CYCPWR_THR1_S 9 +#define AR_PHY_EXT_CCA_THRESH62 0x007F0000 +#define AR_PHY_EXT_CCA_THRESH62_S 16 +#define AR_PHY_EXT_MINCCA_PWR 0xFF800000 +#define AR_PHY_EXT_MINCCA_PWR_S 23 +#define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000 +#define AR9280_PHY_EXT_MINCCA_PWR_S 16 + +#define AR_PHY_SFCORR_EXT 0x99c0 +#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F +#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0 +#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80 +#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7 +#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000 +#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14 +#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000 +#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21 +#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28 + +#define AR_PHY_HALFGI 0x99D0 +#define AR_PHY_HALFGI_DSC_MAN 0x0007FFF0 +#define AR_PHY_HALFGI_DSC_MAN_S 4 +#define AR_PHY_HALFGI_DSC_EXP 0x0000000F +#define AR_PHY_HALFGI_DSC_EXP_S 0 + +#define AR_PHY_CHAN_INFO_MEMORY 0x99DC +#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001 + +#define AR_PHY_HEAVY_CLIP_ENABLE 0x99E0 + +#define AR_PHY_M_SLEEP 0x99f0 +#define AR_PHY_REFCLKDLY 0x99f4 +#define AR_PHY_REFCLKPD 0x99f8 + +#define AR_PHY_CALMODE 0x99f0 + +#define AR_PHY_CALMODE_IQ 0x00000000 +#define AR_PHY_CALMODE_ADC_GAIN 0x00000001 +#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002 +#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003 + +#define AR_PHY_CAL_MEAS_0(_i) (0x9c10 + ((_i) << 12)) +#define AR_PHY_CAL_MEAS_1(_i) (0x9c14 + ((_i) << 12)) +#define AR_PHY_CAL_MEAS_2(_i) (0x9c18 + ((_i) << 12)) +#define AR_PHY_CAL_MEAS_3(_i) (0x9c1c + ((_i) << 12)) + +#define AR_PHY_CURRENT_RSSI 0x9c1c +#define AR9280_PHY_CURRENT_RSSI 0x9c3c + +#define AR_PHY_RFBUS_GRANT 0x9C20 +#define AR_PHY_RFBUS_GRANT_EN 0x00000001 + +#define AR_PHY_CHAN_INFO_GAIN_DIFF 0x9CF4 +#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320 + +#define AR_PHY_CHAN_INFO_GAIN 0x9CFC + +#define AR_PHY_MODE 0xA200 +#define AR_PHY_MODE_AR2133 0x08 +#define AR_PHY_MODE_AR5111 0x00 +#define AR_PHY_MODE_AR5112 0x08 +#define AR_PHY_MODE_DYNAMIC 0x04 +#define AR_PHY_MODE_RF2GHZ 0x02 +#define AR_PHY_MODE_RF5GHZ 0x00 +#define AR_PHY_MODE_CCK 0x01 +#define AR_PHY_MODE_OFDM 0x00 +#define AR_PHY_MODE_DYN_CCK_DISABLE 0x100 + +#define AR_PHY_CCK_TX_CTRL 0xA204 +#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010 + +#define AR_PHY_CCK_DETECT 0xA208 +#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F +#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0 +/* [12:6] settling time for antenna switch */ +#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0 +#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6 +#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000 + +#define AR_PHY_GAIN_2GHZ 0xA20C +#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN 0x00FC0000 +#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN_S 18 +#define AR_PHY_GAIN_2GHZ_BSW_MARGIN 0x00003C00 +#define AR_PHY_GAIN_2GHZ_BSW_MARGIN_S 10 +#define AR_PHY_GAIN_2GHZ_BSW_ATTEN 0x0000001F +#define AR_PHY_GAIN_2GHZ_BSW_ATTEN_S 0 + +#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN 0x003E0000 +#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN_S 17 +#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN 0x0001F000 +#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN_S 12 +#define AR_PHY_GAIN_2GHZ_XATTEN2_DB 0x00000FC0 +#define AR_PHY_GAIN_2GHZ_XATTEN2_DB_S 6 +#define AR_PHY_GAIN_2GHZ_XATTEN1_DB 0x0000003F +#define AR_PHY_GAIN_2GHZ_XATTEN1_DB_S 0 + +#define AR_PHY_CCK_RXCTRL4 0xA21C +#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT 0x01F80000 +#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT_S 19 + +#define AR_PHY_DAG_CTRLCCK 0xA228 +#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200 +#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00 +#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10 + +#define AR_PHY_FORCE_CLKEN_CCK 0xA22C +#define AR_PHY_FORCE_CLKEN_CCK_MRC_MUX 0x00000040 + +#define AR_PHY_POWER_TX_RATE3 0xA234 +#define AR_PHY_POWER_TX_RATE4 0xA238 + +#define AR_PHY_SCRM_SEQ_XR 0xA23C +#define AR_PHY_HEADER_DETECT_XR 0xA240 +#define AR_PHY_CHIRP_DETECTED_XR 0xA244 +#define AR_PHY_BLUETOOTH 0xA254 + +#define AR_PHY_TPCRG1 0xA258 +#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000 +#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14 + +#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000 +#define AR_PHY_TPCRG1_PD_GAIN_1_S 16 +#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000 +#define AR_PHY_TPCRG1_PD_GAIN_2_S 18 +#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000 +#define AR_PHY_TPCRG1_PD_GAIN_3_S 20 + +#define AR_PHY_VIT_MASK2_M_46_61 0xa3a0 +#define AR_PHY_MASK2_M_31_45 0xa3a4 +#define AR_PHY_MASK2_M_16_30 0xa3a8 +#define AR_PHY_MASK2_M_00_15 0xa3ac +#define AR_PHY_MASK2_P_15_01 0xa3b8 +#define AR_PHY_MASK2_P_30_16 0xa3bc +#define AR_PHY_MASK2_P_45_31 0xa3c0 +#define AR_PHY_MASK2_P_61_45 0xa3c4 +#define AR_PHY_SPUR_REG 0x994c + +#define AR_PHY_SPUR_REG_MASK_RATE_CNTL (0xFF << 18) +#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18 + +#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000 +#define AR_PHY_SPUR_REG_MASK_RATE_SELECT (0xFF << 9) +#define AR_PHY_SPUR_REG_MASK_RATE_SELECT_S 9 +#define AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI 0x100 +#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x7F +#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0 + +#define AR_PHY_PILOT_MASK_01_30 0xa3b0 +#define AR_PHY_PILOT_MASK_31_60 0xa3b4 + +#define AR_PHY_CHANNEL_MASK_01_30 0x99d4 +#define AR_PHY_CHANNEL_MASK_31_60 0x99d8 + +#define AR_PHY_ANALOG_SWAP 0xa268 +#define AR_PHY_SWAP_ALT_CHAIN 0x00000040 + +#define AR_PHY_TPCRG5 0xA26C +#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F +#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0 +#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0 +#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4 +#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00 +#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10 +#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000 +#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16 +#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000 +#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22 + +#define AR_PHY_POWER_TX_RATE5 0xA38C +#define AR_PHY_POWER_TX_RATE6 0xA390 + +#define AR_PHY_CAL_CHAINMASK 0xA39C + +#define AR_PHY_POWER_TX_SUB 0xA3C8 +#define AR_PHY_POWER_TX_RATE7 0xA3CC +#define AR_PHY_POWER_TX_RATE8 0xA3D0 +#define AR_PHY_POWER_TX_RATE9 0xA3D4 + +#define AR_PHY_XPA_CFG 0xA3D8 +#define AR_PHY_FORCE_XPA_CFG 0x000000001 +#define AR_PHY_FORCE_XPA_CFG_S 0 + +#define AR_PHY_CH1_CCA 0xa864 +#define AR_PHY_CH1_MINCCA_PWR 0x0FF80000 +#define AR_PHY_CH1_MINCCA_PWR_S 19 +#define AR9280_PHY_CH1_MINCCA_PWR 0x1FF00000 +#define AR9280_PHY_CH1_MINCCA_PWR_S 20 + +#define AR_PHY_CH2_CCA 0xb864 +#define AR_PHY_CH2_MINCCA_PWR 0x0FF80000 +#define AR_PHY_CH2_MINCCA_PWR_S 19 + +#define AR_PHY_CH1_EXT_CCA 0xa9bc +#define AR_PHY_CH1_EXT_MINCCA_PWR 0xFF800000 +#define AR_PHY_CH1_EXT_MINCCA_PWR_S 23 +#define AR9280_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000 +#define AR9280_PHY_CH1_EXT_MINCCA_PWR_S 16 + +#define AR_PHY_CH2_EXT_CCA 0xb9bc +#define AR_PHY_CH2_EXT_MINCCA_PWR 0xFF800000 +#define AR_PHY_CH2_EXT_MINCCA_PWR_S 23 + +#define REG_WRITE_RF_ARRAY(iniarray, regData, regWr) do { \ + int r; \ + for (r = 0; r < ((iniarray)->ia_rows); r++) { \ + REG_WRITE(ah, INI_RA((iniarray), r, 0), (regData)[r]); \ + DPRINTF(ah->ah_sc, ATH_DBG_CHANNEL, \ + "RF 0x%x V 0x%x\n", \ + INI_RA((iniarray), r, 0), (regData)[r]); \ + DO_DELAY(regWr); \ + } \ + } while (0) + +#define ATH9K_KEY_XOR 0xaa + +#define ATH9K_IS_MIC_ENABLED(ah) \ + (AH5416(ah)->ah_staId1Defaults & AR_STA_ID1_CRPT_MIC_ENABLE) + +#define ANTSWAP_AB 0x0001 +#define REDUCE_CHAIN_0 0x00000050 +#define REDUCE_CHAIN_1 0x00000051 + +#define RF_BANK_SETUP(_bank, _iniarray, _col) do { \ + int i; \ + for (i = 0; i < (_iniarray)->ia_rows; i++) \ + (_bank)[i] = INI_RA((_iniarray), i, _col);; \ + } while (0) + +#endif diff --git a/drivers/net/wireless/ath9k/rc.c b/drivers/net/wireless/ath9k/rc.c new file mode 100644 index 0000000..73c460a --- /dev/null +++ b/drivers/net/wireless/ath9k/rc.c @@ -0,0 +1,2126 @@ +/* + * Copyright (c) 2004 Video54 Technologies, Inc. + * Copyright (c) 2004-2008 Atheros Communications, Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * Atheros rate control algorithm + */ + +#include "core.h" +#include "../net/mac80211/rate.h" + +static u32 tx_triglevel_max; + +static struct ath_rate_table ar5416_11na_ratetable = { + 42, + { + { TRUE, TRUE, WLAN_PHY_OFDM, 6000, /* 6 Mb */ + 5400, 0x0b, 0x00, 12, + 0, 2, 1, 0, 0, 0, 0, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 9000, /* 9 Mb */ + 7800, 0x0f, 0x00, 18, + 0, 3, 1, 1, 1, 1, 1, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 12 Mb */ + 10000, 0x0a, 0x00, 24, + 2, 4, 2, 2, 2, 2, 2, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 18 Mb */ + 13900, 0x0e, 0x00, 36, + 2, 6, 2, 3, 3, 3, 3, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 24 Mb */ + 17300, 0x09, 0x00, 48, + 4, 10, 3, 4, 4, 4, 4, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 36000, /* 36 Mb */ + 23000, 0x0d, 0x00, 72, + 4, 14, 3, 5, 5, 5, 5, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 48000, /* 48 Mb */ + 27400, 0x08, 0x00, 96, + 4, 20, 3, 6, 6, 6, 6, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 54000, /* 54 Mb */ + 29300, 0x0c, 0x00, 108, + 4, 23, 3, 7, 7, 7, 7, 0 }, + { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 6500, /* 6.5 Mb */ + 6400, 0x80, 0x00, 0, + 0, 2, 3, 8, 24, 8, 24, 3216 }, + { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 13000, /* 13 Mb */ + 12700, 0x81, 0x00, 1, + 2, 4, 3, 9, 25, 9, 25, 6434 }, + { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 19500, /* 19.5 Mb */ + 18800, 0x82, 0x00, 2, + 2, 6, 3, 10, 26, 10, 26, 9650 }, + { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 26000, /* 26 Mb */ + 25000, 0x83, 0x00, 3, + 4, 10, 3, 11, 27, 11, 27, 12868 }, + { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 39000, /* 39 Mb */ + 36700, 0x84, 0x00, 4, + 4, 14, 3, 12, 28, 12, 28, 19304 }, + { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 52000, /* 52 Mb */ + 48100, 0x85, 0x00, 5, + 4, 20, 3, 13, 29, 13, 29, 25740 }, + { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 58500, /* 58.5 Mb */ + 53500, 0x86, 0x00, 6, + 4, 23, 3, 14, 30, 14, 30, 28956 }, + { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 65000, /* 65 Mb */ + 59000, 0x87, 0x00, 7, + 4, 25, 3, 15, 31, 15, 32, 32180 }, + { FALSE, FALSE, WLAN_PHY_HT_20_DS, 13000, /* 13 Mb */ + 12700, 0x88, 0x00, + 8, 0, 2, 3, 16, 33, 16, 33, 6430 }, + { FALSE, FALSE, WLAN_PHY_HT_20_DS, 26000, /* 26 Mb */ + 24800, 0x89, 0x00, 9, + 2, 4, 3, 17, 34, 17, 34, 12860 }, + { FALSE, FALSE, WLAN_PHY_HT_20_DS, 39000, /* 39 Mb */ + 36600, 0x8a, 0x00, 10, + 2, 6, 3, 18, 35, 18, 35, 19300 }, + { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 52000, /* 52 Mb */ + 48100, 0x8b, 0x00, 11, + 4, 10, 3, 19, 36, 19, 36, 25736 }, + { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 78000, /* 78 Mb */ + 69500, 0x8c, 0x00, 12, + 4, 14, 3, 20, 37, 20, 37, 38600 }, + { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 104000, /* 104 Mb */ + 89500, 0x8d, 0x00, 13, + 4, 20, 3, 21, 38, 21, 38, 51472 }, + { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 117000, /* 117 Mb */ + 98900, 0x8e, 0x00, 14, + 4, 23, 3, 22, 39, 22, 39, 57890 }, + { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 130000, /* 130 Mb */ + 108300, 0x8f, 0x00, 15, + 4, 25, 3, 23, 40, 23, 41, 64320 }, + { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 13500, /* 13.5 Mb */ + 13200, 0x80, 0x00, 0, + 0, 2, 3, 8, 24, 24, 24, 6684 }, + { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 27500, /* 27.0 Mb */ + 25900, 0x81, 0x00, 1, + 2, 4, 3, 9, 25, 25, 25, 13368 }, + { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 40500, /* 40.5 Mb */ + 38600, 0x82, 0x00, 2, + 2, 6, 3, 10, 26, 26, 26, 20052 }, + { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 54000, /* 54 Mb */ + 49800, 0x83, 0x00, 3, + 4, 10, 3, 11, 27, 27, 27, 26738 }, + { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 81500, /* 81 Mb */ + 72200, 0x84, 0x00, 4, + 4, 14, 3, 12, 28, 28, 28, 40104 }, + { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 108000, /* 108 Mb */ + 92900, 0x85, 0x00, 5, + 4, 20, 3, 13, 29, 29, 29, 53476 }, + { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 121500, /* 121.5 Mb */ + 102700, 0x86, 0x00, 6, + 4, 23, 3, 14, 30, 30, 30, 60156 }, + { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 135000, /* 135 Mb */ + 112000, 0x87, 0x00, 7, + 4, 25, 3, 15, 31, 32, 32, 66840 }, + { FALSE, TRUE_40, WLAN_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */ + 122000, 0x87, 0x00, 7, + 4, 25, 3, 15, 31, 32, 32, 74200 }, + { FALSE, FALSE, WLAN_PHY_HT_40_DS, 27000, /* 27 Mb */ + 25800, 0x88, 0x00, 8, + 0, 2, 3, 16, 33, 33, 33, 13360 }, + { FALSE, FALSE, WLAN_PHY_HT_40_DS, 54000, /* 54 Mb */ + 49800, 0x89, 0x00, 9, + 2, 4, 3, 17, 34, 34, 34, 26720 }, + { FALSE, FALSE, WLAN_PHY_HT_40_DS, 81000, /* 81 Mb */ + 71900, 0x8a, 0x00, 10, + 2, 6, 3, 18, 35, 35, 35, 40080 }, + { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 108000, /* 108 Mb */ + 92500, 0x8b, 0x00, 11, + 4, 10, 3, 19, 36, 36, 36, 53440 }, + { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 162000, /* 162 Mb */ + 130300, 0x8c, 0x00, 12, + 4, 14, 3, 20, 37, 37, 37, 80160 }, + { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 216000, /* 216 Mb */ + 162800, 0x8d, 0x00, 13, + 4, 20, 3, 21, 38, 38, 38, 106880 }, + { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 243000, /* 243 Mb */ + 178200, 0x8e, 0x00, 14, + 4, 23, 3, 22, 39, 39, 39, 120240 }, + { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 270000, /* 270 Mb */ + 192100, 0x8f, 0x00, 15, + 4, 25, 3, 23, 40, 41, 41, 133600 }, + { TRUE_40, FALSE, WLAN_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */ + 207000, 0x8f, 0x00, 15, + 4, 25, 3, 23, 40, 41, 41, 148400 }, + }, + 50, /* probe interval */ + 50, /* rssi reduce interval */ + WLAN_RC_HT_FLAG, /* Phy rates allowed initially */ +}; + +/* TRUE_ALL - valid for 20/40/Legacy, + * TRUE - Legacy only, + * TRUE_20 - HT 20 only, + * TRUE_40 - HT 40 only */ + +/* 4ms frame limit not used for NG mode. The values filled + * for HT are the 64K max aggregate limit */ + +static struct ath_rate_table ar5416_11ng_ratetable = { + 46, + { + { TRUE_ALL, TRUE_ALL, WLAN_PHY_CCK, 1000, /* 1 Mb */ + 900, 0x1b, 0x00, 2, + 0, 0, 1, 0, 0, 0, 0, 0 }, + { TRUE_ALL, TRUE_ALL, WLAN_PHY_CCK, 2000, /* 2 Mb */ + 1900, 0x1a, 0x04, 4, + 1, 1, 1, 1, 1, 1, 1, 0 }, + { TRUE_ALL, TRUE_ALL, WLAN_PHY_CCK, 5500, /* 5.5 Mb */ + 4900, 0x19, 0x04, 11, + 2, 2, 2, 2, 2, 2, 2, 0 }, + { TRUE_ALL, TRUE_ALL, WLAN_PHY_CCK, 11000, /* 11 Mb */ + 8100, 0x18, 0x04, 22, + 3, 3, 2, 3, 3, 3, 3, 0 }, + { FALSE, FALSE, WLAN_PHY_OFDM, 6000, /* 6 Mb */ + 5400, 0x0b, 0x00, 12, + 4, 2, 1, 4, 4, 4, 4, 0 }, + { FALSE, FALSE, WLAN_PHY_OFDM, 9000, /* 9 Mb */ + 7800, 0x0f, 0x00, 18, + 4, 3, 1, 5, 5, 5, 5, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 12 Mb */ + 10100, 0x0a, 0x00, 24, + 6, 4, 1, 6, 6, 6, 6, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 18 Mb */ + 14100, 0x0e, 0x00, 36, + 6, 6, 2, 7, 7, 7, 7, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 24 Mb */ + 17700, 0x09, 0x00, 48, + 8, 10, 3, 8, 8, 8, 8, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 36000, /* 36 Mb */ + 23700, 0x0d, 0x00, 72, + 8, 14, 3, 9, 9, 9, 9, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 48000, /* 48 Mb */ + 27400, 0x08, 0x00, 96, + 8, 20, 3, 10, 10, 10, 10, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 54000, /* 54 Mb */ + 30900, 0x0c, 0x00, 108, + 8, 23, 3, 11, 11, 11, 11, 0 }, + { FALSE, FALSE, WLAN_PHY_HT_20_SS, 6500, /* 6.5 Mb */ + 6400, 0x80, 0x00, 0, + 4, 2, 3, 12, 28, 12, 28, 3216 }, + { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 13000, /* 13 Mb */ + 12700, 0x81, 0x00, 1, + 6, 4, 3, 13, 29, 13, 29, 6434 }, + { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 19500, /* 19.5 Mb */ + 18800, 0x82, 0x00, 2, + 6, 6, 3, 14, 30, 14, 30, 9650 }, + { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 26000, /* 26 Mb */ + 25000, 0x83, 0x00, 3, + 8, 10, 3, 15, 31, 15, 31, 12868 }, + { TRUE_20, TRUE_20, WLAN_PHY_HT_20_SS, 39000, /* 39 Mb */ + 36700, 0x84, 0x00, 4, + 8, 14, 3, 16, 32, 16, 32, 19304 }, + { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 52000, /* 52 Mb */ + 48100, 0x85, 0x00, 5, + 8, 20, 3, 17, 33, 17, 33, 25740 }, + { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 58500, /* 58.5 Mb */ + 53500, 0x86, 0x00, 6, + 8, 23, 3, 18, 34, 18, 34, 28956 }, + { FALSE, TRUE_20, WLAN_PHY_HT_20_SS, 65000, /* 65 Mb */ + 59000, 0x87, 0x00, 7, + 8, 25, 3, 19, 35, 19, 36, 32180 }, + { FALSE, FALSE, WLAN_PHY_HT_20_DS, 13000, /* 13 Mb */ + 12700, 0x88, 0x00, 8, + 4, 2, 3, 20, 37, 20, 37, 6430 }, + { FALSE, FALSE, WLAN_PHY_HT_20_DS, 26000, /* 26 Mb */ + 24800, 0x89, 0x00, 9, + 6, 4, 3, 21, 38, 21, 38, 12860 }, + { FALSE, FALSE, WLAN_PHY_HT_20_DS, 39000, /* 39 Mb */ + 36600, 0x8a, 0x00, 10, + 6, 6, 3, 22, 39, 22, 39, 19300 }, + { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 52000, /* 52 Mb */ + 48100, 0x8b, 0x00, 11, + 8, 10, 3, 23, 40, 23, 40, 25736 }, + { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 78000, /* 78 Mb */ + 69500, 0x8c, 0x00, 12, + 8, 14, 3, 24, 41, 24, 41, 38600 }, + { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 104000, /* 104 Mb */ + 89500, 0x8d, 0x00, 13, + 8, 20, 3, 25, 42, 25, 42, 51472 }, + { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 117000, /* 117 Mb */ + 98900, 0x8e, 0x00, 14, + 8, 23, 3, 26, 43, 26, 44, 57890 }, + { TRUE_20, FALSE, WLAN_PHY_HT_20_DS, 130000, /* 130 Mb */ + 108300, 0x8f, 0x00, 15, + 8, 25, 3, 27, 44, 27, 45, 64320 }, + { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 13500, /* 13.5 Mb */ + 13200, 0x80, 0x00, 0, + 8, 2, 3, 12, 28, 28, 28, 6684 }, + { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 27500, /* 27.0 Mb */ + 25900, 0x81, 0x00, 1, + 8, 4, 3, 13, 29, 29, 29, 13368 }, + { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 40500, /* 40.5 Mb */ + 38600, 0x82, 0x00, 2, + 8, 6, 3, 14, 30, 30, 30, 20052 }, + { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 54000, /* 54 Mb */ + 49800, 0x83, 0x00, 3, + 8, 10, 3, 15, 31, 31, 31, 26738 }, + { TRUE_40, TRUE_40, WLAN_PHY_HT_40_SS, 81500, /* 81 Mb */ + 72200, 0x84, 0x00, 4, + 8, 14, 3, 16, 32, 32, 32, 40104 }, + { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 108000, /* 108 Mb */ + 92900, 0x85, 0x00, 5, + 8, 20, 3, 17, 33, 33, 33, 53476 }, + { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 121500, /* 121.5 Mb */ + 102700, 0x86, 0x00, 6, + 8, 23, 3, 18, 34, 34, 34, 60156 }, + { FALSE, TRUE_40, WLAN_PHY_HT_40_SS, 135000, /* 135 Mb */ + 112000, 0x87, 0x00, 7, + 8, 23, 3, 19, 35, 36, 36, 66840 }, + { FALSE, TRUE_40, WLAN_PHY_HT_40_SS_HGI, 150000, /* 150 Mb */ + 122000, 0x87, 0x00, 7, + 8, 25, 3, 19, 35, 36, 36, 74200 }, + { FALSE, FALSE, WLAN_PHY_HT_40_DS, 27000, /* 27 Mb */ + 25800, 0x88, 0x00, 8, + 8, 2, 3, 20, 37, 37, 37, 13360 }, + { FALSE, FALSE, WLAN_PHY_HT_40_DS, 54000, /* 54 Mb */ + 49800, 0x89, 0x00, 9, + 8, 4, 3, 21, 38, 38, 38, 26720 }, + { FALSE, FALSE, WLAN_PHY_HT_40_DS, 81000, /* 81 Mb */ + 71900, 0x8a, 0x00, 10, + 8, 6, 3, 22, 39, 39, 39, 40080 }, + { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 108000, /* 108 Mb */ + 92500, 0x8b, 0x00, 11, + 8, 10, 3, 23, 40, 40, 40, 53440 }, + { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 162000, /* 162 Mb */ + 130300, 0x8c, 0x00, 12, + 8, 14, 3, 24, 41, 41, 41, 80160 }, + { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 216000, /* 216 Mb */ + 162800, 0x8d, 0x00, 13, + 8, 20, 3, 25, 42, 42, 42, 106880 }, + { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 243000, /* 243 Mb */ + 178200, 0x8e, 0x00, 14, + 8, 23, 3, 26, 43, 43, 43, 120240 }, + { TRUE_40, FALSE, WLAN_PHY_HT_40_DS, 270000, /* 270 Mb */ + 192100, 0x8f, 0x00, 15, + 8, 23, 3, 27, 44, 45, 45, 133600 }, + { TRUE_40, FALSE, WLAN_PHY_HT_40_DS_HGI, 300000, /* 300 Mb */ + 207000, 0x8f, 0x00, 15, + 8, 25, 3, 27, 44, 45, 45, 148400 }, + }, + 50, /* probe interval */ + 50, /* rssi reduce interval */ + WLAN_RC_HT_FLAG, /* Phy rates allowed initially */ +}; + +static struct ath_rate_table ar5416_11a_ratetable = { + 8, + { + { TRUE, TRUE, WLAN_PHY_OFDM, 6000, /* 6 Mb */ + 5400, 0x0b, 0x00, (0x80|12), + 0, 2, 1, 0, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 9000, /* 9 Mb */ + 7800, 0x0f, 0x00, 18, + 0, 3, 1, 1, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 12 Mb */ + 10000, 0x0a, 0x00, (0x80|24), + 2, 4, 2, 2, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 18 Mb */ + 13900, 0x0e, 0x00, 36, + 2, 6, 2, 3, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 24 Mb */ + 17300, 0x09, 0x00, (0x80|48), + 4, 10, 3, 4, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 36000, /* 36 Mb */ + 23000, 0x0d, 0x00, 72, + 4, 14, 3, 5, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 48000, /* 48 Mb */ + 27400, 0x08, 0x00, 96, + 4, 19, 3, 6, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 54000, /* 54 Mb */ + 29300, 0x0c, 0x00, 108, + 4, 23, 3, 7, 0 }, + }, + 50, /* probe interval */ + 50, /* rssi reduce interval */ + 0, /* Phy rates allowed initially */ +}; + +static struct ath_rate_table ar5416_11a_ratetable_Half = { + 8, + { + { TRUE, TRUE, WLAN_PHY_OFDM, 3000, /* 6 Mb */ + 2700, 0x0b, 0x00, (0x80|6), + 0, 2, 1, 0, 0}, + { TRUE, TRUE, WLAN_PHY_OFDM, 4500, /* 9 Mb */ + 3900, 0x0f, 0x00, 9, + 0, 3, 1, 1, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 6000, /* 12 Mb */ + 5000, 0x0a, 0x00, (0x80|12), + 2, 4, 2, 2, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 9000, /* 18 Mb */ + 6950, 0x0e, 0x00, 18, + 2, 6, 2, 3, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 24 Mb */ + 8650, 0x09, 0x00, (0x80|24), + 4, 10, 3, 4, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 36 Mb */ + 11500, 0x0d, 0x00, 36, + 4, 14, 3, 5, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 48 Mb */ + 13700, 0x08, 0x00, 48, + 4, 19, 3, 6, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 27000, /* 54 Mb */ + 14650, 0x0c, 0x00, 54, + 4, 23, 3, 7, 0 }, + }, + 50, /* probe interval */ + 50, /* rssi reduce interval */ + 0, /* Phy rates allowed initially */ +}; + +static struct ath_rate_table ar5416_11a_ratetable_Quarter = { + 8, + { + { TRUE, TRUE, WLAN_PHY_OFDM, 1500, /* 6 Mb */ + 1350, 0x0b, 0x00, (0x80|3), + 0, 2, 1, 0, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 2250, /* 9 Mb */ + 1950, 0x0f, 0x00, 4, + 0, 3, 1, 1, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 3000, /* 12 Mb */ + 2500, 0x0a, 0x00, (0x80|6), + 2, 4, 2, 2, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 4500, /* 18 Mb */ + 3475, 0x0e, 0x00, 9, + 2, 6, 2, 3, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 6000, /* 25 Mb */ + 4325, 0x09, 0x00, (0x80|12), + 4, 10, 3, 4, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 9000, /* 36 Mb */ + 5750, 0x0d, 0x00, 18, + 4, 14, 3, 5, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 48 Mb */ + 6850, 0x08, 0x00, 24, + 4, 19, 3, 6, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 13500, /* 54 Mb */ + 7325, 0x0c, 0x00, 27, + 4, 23, 3, 7, 0 }, + }, + 50, /* probe interval */ + 50, /* rssi reduce interval */ + 0, /* Phy rates allowed initially */ +}; + +static struct ath_rate_table ar5416_11g_ratetable = { + 12, + { + { TRUE, TRUE, WLAN_PHY_CCK, 1000, /* 1 Mb */ + 900, 0x1b, 0x00, 2, + 0, 0, 1, 0, 0 }, + { TRUE, TRUE, WLAN_PHY_CCK, 2000, /* 2 Mb */ + 1900, 0x1a, 0x04, 4, + 1, 1, 1, 1, 0 }, + { TRUE, TRUE, WLAN_PHY_CCK, 5500, /* 5.5 Mb */ + 4900, 0x19, 0x04, 11, + 2, 2, 2, 2, 0 }, + { TRUE, TRUE, WLAN_PHY_CCK, 11000, /* 11 Mb */ + 8100, 0x18, 0x04, 22, + 3, 3, 2, 3, 0 }, + { FALSE, FALSE, WLAN_PHY_OFDM, 6000, /* 6 Mb */ + 5400, 0x0b, 0x00, 12, + 4, 2, 1, 4, 0 }, + { FALSE, FALSE, WLAN_PHY_OFDM, 9000, /* 9 Mb */ + 7800, 0x0f, 0x00, 18, + 4, 3, 1, 5, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 12000, /* 12 Mb */ + 10000, 0x0a, 0x00, 24, + 6, 4, 1, 6, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 18000, /* 18 Mb */ + 13900, 0x0e, 0x00, 36, + 6, 6, 2, 7, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 24000, /* 24 Mb */ + 17300, 0x09, 0x00, 48, + 8, 10, 3, 8, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 36000, /* 36 Mb */ + 23000, 0x0d, 0x00, 72, + 8, 14, 3, 9, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 48000, /* 48 Mb */ + 27400, 0x08, 0x00, 96, + 8, 19, 3, 10, 0 }, + { TRUE, TRUE, WLAN_PHY_OFDM, 54000, /* 54 Mb */ + 29300, 0x0c, 0x00, 108, + 8, 23, 3, 11, 0 }, + }, + 50, /* probe interval */ + 50, /* rssi reduce interval */ + 0, /* Phy rates allowed initially */ +}; + +static struct ath_rate_table ar5416_11b_ratetable = { + 4, + { + { TRUE, TRUE, WLAN_PHY_CCK, 1000, /* 1 Mb */ + 900, 0x1b, 0x00, (0x80|2), + 0, 0, 1, 0, 0 }, + { TRUE, TRUE, WLAN_PHY_CCK, 2000, /* 2 Mb */ + 1800, 0x1a, 0x04, (0x80|4), + 1, 1, 1, 1, 0 }, + { TRUE, TRUE, WLAN_PHY_CCK, 5500, /* 5.5 Mb */ + 4300, 0x19, 0x04, (0x80|11), + 1, 2, 2, 2, 0 }, + { TRUE, TRUE, WLAN_PHY_CCK, 11000, /* 11 Mb */ + 7100, 0x18, 0x04, (0x80|22), + 1, 4, 100, 3, 0 }, + }, + 100, /* probe interval */ + 100, /* rssi reduce interval */ + 0, /* Phy rates allowed initially */ +}; + +static void ar5416_attach_ratetables(struct ath_rate_softc *sc) +{ + /* + * Attach rate tables. + */ + sc->hw_rate_table[ATH9K_MODE_11B] = &ar5416_11b_ratetable; + sc->hw_rate_table[ATH9K_MODE_11A] = &ar5416_11a_ratetable; + sc->hw_rate_table[ATH9K_MODE_11G] = &ar5416_11g_ratetable; + + sc->hw_rate_table[ATH9K_MODE_11NA_HT20] = &ar5416_11na_ratetable; + sc->hw_rate_table[ATH9K_MODE_11NG_HT20] = &ar5416_11ng_ratetable; + sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS] = + &ar5416_11na_ratetable; + sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS] = + &ar5416_11na_ratetable; + sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS] = + &ar5416_11ng_ratetable; + sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS] = + &ar5416_11ng_ratetable; +} + +static void ar5416_setquarter_ratetable(struct ath_rate_softc *sc) +{ + sc->hw_rate_table[ATH9K_MODE_11A] = &ar5416_11a_ratetable_Quarter; + return; +} + +static void ar5416_sethalf_ratetable(struct ath_rate_softc *sc) +{ + sc->hw_rate_table[ATH9K_MODE_11A] = &ar5416_11a_ratetable_Half; + return; +} + +static void ar5416_setfull_ratetable(struct ath_rate_softc *sc) +{ + sc->hw_rate_table[ATH9K_MODE_11A] = &ar5416_11a_ratetable; + return; +} + +/* + * Return the median of three numbers + */ +static inline int8_t median(int8_t a, int8_t b, int8_t c) +{ + if (a >= b) { + if (b >= c) + return b; + else if (a > c) + return c; + else + return a; + } else { + if (a >= c) + return a; + else if (b >= c) + return c; + else + return b; + } +} + +static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table, + struct ath_tx_ratectrl *rate_ctrl) +{ + u8 i, j, idx, idx_next; + + for (i = rate_ctrl->max_valid_rate - 1; i > 0; i--) { + for (j = 0; j <= i-1; j++) { + idx = rate_ctrl->valid_rate_index[j]; + idx_next = rate_ctrl->valid_rate_index[j+1]; + + if (rate_table->info[idx].ratekbps > + rate_table->info[idx_next].ratekbps) { + rate_ctrl->valid_rate_index[j] = idx_next; + rate_ctrl->valid_rate_index[j+1] = idx; + } + } + } +} + +/* Access functions for valid_txrate_mask */ + +static void ath_rc_init_valid_txmask(struct ath_tx_ratectrl *rate_ctrl) +{ + u8 i; + + for (i = 0; i < rate_ctrl->rate_table_size; i++) + rate_ctrl->valid_rate_index[i] = FALSE; +} + +static inline void ath_rc_set_valid_txmask(struct ath_tx_ratectrl *rate_ctrl, + u8 index, int valid_tx_rate) +{ + ASSERT(index <= rate_ctrl->rate_table_size); + rate_ctrl->valid_rate_index[index] = valid_tx_rate ? TRUE : FALSE; +} + +static inline int ath_rc_isvalid_txmask(struct ath_tx_ratectrl *rate_ctrl, + u8 index) +{ + ASSERT(index <= rate_ctrl->rate_table_size); + return rate_ctrl->valid_rate_index[index]; +} + +/* Iterators for valid_txrate_mask */ +static inline int +ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table, + struct ath_tx_ratectrl *rate_ctrl, + u8 cur_valid_txrate, + u8 *next_idx) +{ + u8 i; + + for (i = 0; i < rate_ctrl->max_valid_rate - 1; i++) { + if (rate_ctrl->valid_rate_index[i] == cur_valid_txrate) { + *next_idx = rate_ctrl->valid_rate_index[i+1]; + return TRUE; + } + } + + /* No more valid rates */ + *next_idx = 0; + return FALSE; +} + +/* Return true only for single stream */ + +static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw) +{ + if (WLAN_RC_PHY_HT(phy) & !(capflag & WLAN_RC_HT_FLAG)) + return FALSE; + if (WLAN_RC_PHY_DS(phy) && !(capflag & WLAN_RC_DS_FLAG)) + return FALSE; + if (WLAN_RC_PHY_SGI(phy) && !(capflag & WLAN_RC_SGI_FLAG)) + return FALSE; + if (!ignore_cw && WLAN_RC_PHY_HT(phy)) + if (WLAN_RC_PHY_40(phy) && !(capflag & WLAN_RC_40_FLAG)) + return FALSE; + if (!WLAN_RC_PHY_40(phy) && (capflag & WLAN_RC_40_FLAG)) + return FALSE; + return TRUE; +} + +static inline int +ath_rc_get_nextlowervalid_txrate(const struct ath_rate_table *rate_table, + struct ath_tx_ratectrl *rate_ctrl, + u8 cur_valid_txrate, u8 *next_idx) +{ + int8_t i; + + for (i = 1; i < rate_ctrl->max_valid_rate ; i++) { + if (rate_ctrl->valid_rate_index[i] == cur_valid_txrate) { + *next_idx = rate_ctrl->valid_rate_index[i-1]; + return TRUE; + } + } + return FALSE; +} + +/* + * Initialize the Valid Rate Index from valid entries in Rate Table + */ +static u8 +ath_rc_sib_init_validrates(struct ath_rate_node *ath_rc_priv, + const struct ath_rate_table *rate_table, + u32 capflag) +{ + struct ath_tx_ratectrl *rate_ctrl; + u8 i, hi = 0; + u32 valid; + + rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv); + for (i = 0; i < rate_table->rate_cnt; i++) { + valid = (ath_rc_priv->single_stream ? + rate_table->info[i].valid_single_stream : + rate_table->info[i].valid); + if (valid == TRUE) { + u32 phy = rate_table->info[i].phy; + u8 valid_rate_count = 0; + + if (!ath_rc_valid_phyrate(phy, capflag, FALSE)) + continue; + + valid_rate_count = rate_ctrl->valid_phy_ratecnt[phy]; + + rate_ctrl->valid_phy_rateidx[phy][valid_rate_count] = i; + rate_ctrl->valid_phy_ratecnt[phy] += 1; + ath_rc_set_valid_txmask(rate_ctrl, i, TRUE); + hi = A_MAX(hi, i); + } + } + return hi; +} + +/* + * Initialize the Valid Rate Index from Rate Set + */ +static u8 +ath_rc_sib_setvalid_rates(struct ath_rate_node *ath_rc_priv, + const struct ath_rate_table *rate_table, + struct ath_rateset *rateset, + u32 capflag) +{ + /* XXX: Clean me up and make identation friendly */ + u8 i, j, hi = 0; + struct ath_tx_ratectrl *rate_ctrl = + (struct ath_tx_ratectrl *)(ath_rc_priv); + + /* Use intersection of working rates and valid rates */ + for (i = 0; i < rateset->rs_nrates; i++) { + for (j = 0; j < rate_table->rate_cnt; j++) { + u32 phy = rate_table->info[j].phy; + u32 valid = (ath_rc_priv->single_stream ? + rate_table->info[j].valid_single_stream : + rate_table->info[j].valid); + + /* We allow a rate only if its valid and the + * capflag matches one of the validity + * (TRUE/TRUE_20/TRUE_40) flags */ + + /* XXX: catch the negative of this branch + * first and then continue */ + if (((rateset->rs_rates[i] & 0x7F) == + (rate_table->info[j].dot11rate & 0x7F)) && + ((valid & WLAN_RC_CAP_MODE(capflag)) == + WLAN_RC_CAP_MODE(capflag)) && + !WLAN_RC_PHY_HT(phy)) { + + u8 valid_rate_count = 0; + + if (!ath_rc_valid_phyrate(phy, capflag, FALSE)) + continue; + + valid_rate_count = + rate_ctrl->valid_phy_ratecnt[phy]; + + rate_ctrl->valid_phy_rateidx[phy] + [valid_rate_count] = j; + rate_ctrl->valid_phy_ratecnt[phy] += 1; + ath_rc_set_valid_txmask(rate_ctrl, j, TRUE); + hi = A_MAX(hi, j); + } + } + } + return hi; +} + +static u8 +ath_rc_sib_setvalid_htrates(struct ath_rate_node *ath_rc_priv, + const struct ath_rate_table *rate_table, + u8 *mcs_set, u32 capflag) +{ + u8 i, j, hi = 0; + struct ath_tx_ratectrl *rate_ctrl = + (struct ath_tx_ratectrl *)(ath_rc_priv); + + /* Use intersection of working rates and valid rates */ + for (i = 0; i < ((struct ath_rateset *)mcs_set)->rs_nrates; i++) { + for (j = 0; j < rate_table->rate_cnt; j++) { + u32 phy = rate_table->info[j].phy; + u32 valid = (ath_rc_priv->single_stream ? + rate_table->info[j].valid_single_stream : + rate_table->info[j].valid); + + if (((((struct ath_rateset *) + mcs_set)->rs_rates[i] & 0x7F) != + (rate_table->info[j].dot11rate & 0x7F)) || + !WLAN_RC_PHY_HT(phy) || + !WLAN_RC_PHY_HT_VALID(valid, capflag)) + continue; + + if (!ath_rc_valid_phyrate(phy, capflag, FALSE)) + continue; + + rate_ctrl->valid_phy_rateidx[phy] + [rate_ctrl->valid_phy_ratecnt[phy]] = j; + rate_ctrl->valid_phy_ratecnt[phy] += 1; + ath_rc_set_valid_txmask(rate_ctrl, j, TRUE); + hi = A_MAX(hi, j); + } + } + return hi; +} + +/* + * Attach to a device instance. Setup the public definition + * of how much per-node space we need and setup the private + * phy tables that have rate control parameters. + */ +struct ath_rate_softc *ath_rate_attach(struct ath_hal *ah) +{ + struct ath_rate_softc *asc; + + /* we are only in user context so we can sleep for memory */ + asc = kzalloc(sizeof(struct ath_rate_softc), GFP_KERNEL); + if (asc == NULL) + return NULL; + + ar5416_attach_ratetables(asc); + + /* Save Maximum TX Trigger Level (used for 11n) */ + tx_triglevel_max = ah->ah_caps.tx_triglevel_max; + /* return alias for ath_rate_softc * */ + return asc; +} + +static struct ath_rate_node *ath_rate_node_alloc(struct ath_vap *avp, + struct ath_rate_softc *rsc, + gfp_t gfp) +{ + struct ath_rate_node *anode; + + anode = kzalloc(sizeof(struct ath_rate_node), gfp); + if (anode == NULL) + return NULL; + + anode->avp = avp; + anode->asc = rsc; + avp->rc_node = anode; + + return anode; +} + +static void ath_rate_node_free(struct ath_rate_node *anode) +{ + if (anode != NULL) + kfree(anode); +} + +void ath_rate_detach(struct ath_rate_softc *asc) +{ + if (asc != NULL) + kfree(asc); +} + +u8 ath_rate_findrateix(struct ath_softc *sc, + u8 dot11rate) +{ + const struct ath_rate_table *ratetable; + struct ath_rate_softc *rsc = sc->sc_rc; + int i; + + ratetable = rsc->hw_rate_table[sc->sc_curmode]; + + if (WARN_ON(!ratetable)) + return 0; + + for (i = 0; i < ratetable->rate_cnt; i++) { + if ((ratetable->info[i].dot11rate & 0x7f) == (dot11rate & 0x7f)) + return i; + } + + return 0; +} + +/* + * Update rate-control state on a device state change. When + * operating as a station this includes associate/reassociate + * with an AP. Otherwise this gets called, for example, when + * the we transition to run state when operating as an AP. + */ +void ath_rate_newstate(struct ath_softc *sc, struct ath_vap *avp) +{ + struct ath_rate_softc *asc = sc->sc_rc; + + /* For half and quarter rate channles use different + * rate tables + */ + if (sc->sc_curchan.channelFlags & CHANNEL_HALF) + ar5416_sethalf_ratetable(asc); + else if (sc->sc_curchan.channelFlags & CHANNEL_QUARTER) + ar5416_setquarter_ratetable(asc); + else /* full rate */ + ar5416_setfull_ratetable(asc); + + if (avp->av_config.av_fixed_rateset != IEEE80211_FIXED_RATE_NONE) { + asc->fixedrix = + sc->sc_rixmap[avp->av_config.av_fixed_rateset & 0xff]; + /* NB: check the fixed rate exists */ + if (asc->fixedrix == 0xff) + asc->fixedrix = IEEE80211_FIXED_RATE_NONE; + } else { + asc->fixedrix = IEEE80211_FIXED_RATE_NONE; + } +} + +static u8 ath_rc_ratefind_ht(struct ath_softc *sc, + struct ath_rate_node *ath_rc_priv, + const struct ath_rate_table *rate_table, + int probe_allowed, int *is_probing, + int is_retry) +{ + u32 dt, best_thruput, this_thruput, now_msec; + u8 rate, next_rate, best_rate, maxindex, minindex; + int8_t rssi_last, rssi_reduce = 0, index = 0; + struct ath_tx_ratectrl *rate_ctrl = NULL; + + rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv ? + (ath_rc_priv) : NULL); + + *is_probing = FALSE; + + rssi_last = median(rate_ctrl->rssi_last, + rate_ctrl->rssi_last_prev, + rate_ctrl->rssi_last_prev2); + + /* + * Age (reduce) last ack rssi based on how old it is. + * The bizarre numbers are so the delta is 160msec, + * meaning we divide by 16. + * 0msec <= dt <= 25msec: don't derate + * 25msec <= dt <= 185msec: derate linearly from 0 to 10dB + * 185msec <= dt: derate by 10dB + */ + + now_msec = jiffies_to_msecs(jiffies); + dt = now_msec - rate_ctrl->rssi_time; + + if (dt >= 185) + rssi_reduce = 10; + else if (dt >= 25) + rssi_reduce = (u8)((dt - 25) >> 4); + + /* Now reduce rssi_last by rssi_reduce */ + if (rssi_last < rssi_reduce) + rssi_last = 0; + else + rssi_last -= rssi_reduce; + + /* + * Now look up the rate in the rssi table and return it. + * If no rates match then we return 0 (lowest rate) + */ + + best_thruput = 0; + maxindex = rate_ctrl->max_valid_rate-1; + + minindex = 0; + best_rate = minindex; + + /* + * Try the higher rate first. It will reduce memory moving time + * if we have very good channel characteristics. + */ + for (index = maxindex; index >= minindex ; index--) { + u8 per_thres; + + rate = rate_ctrl->valid_rate_index[index]; + if (rate > rate_ctrl->rate_max_phy) + continue; + + /* + * For TCP the average collision rate is around 11%, + * so we ignore PERs less than this. This is to + * prevent the rate we are currently using (whose + * PER might be in the 10-15 range because of TCP + * collisions) looking worse than the next lower + * rate whose PER has decayed close to 0. If we + * used to next lower rate, its PER would grow to + * 10-15 and we would be worse off then staying + * at the current rate. + */ + per_thres = rate_ctrl->state[rate].per; + if (per_thres < 12) + per_thres = 12; + + this_thruput = rate_table->info[rate].user_ratekbps * + (100 - per_thres); + + if (best_thruput <= this_thruput) { + best_thruput = this_thruput; + best_rate = rate; + } + } + + rate = best_rate; + + /* if we are retrying for more than half the number + * of max retries, use the min rate for the next retry + */ + if (is_retry) + rate = rate_ctrl->valid_rate_index[minindex]; + + rate_ctrl->rssi_last_lookup = rssi_last; + + /* + * Must check the actual rate (ratekbps) to account for + * non-monoticity of 11g's rate table + */ + + if (rate >= rate_ctrl->rate_max_phy && probe_allowed) { + rate = rate_ctrl->rate_max_phy; + + /* Probe the next allowed phy state */ + /* FIXME:XXXX Check to make sure ratMax is checked properly */ + if (ath_rc_get_nextvalid_txrate(rate_table, + rate_ctrl, rate, &next_rate) && + (now_msec - rate_ctrl->probe_time > + rate_table->probe_interval) && + (rate_ctrl->hw_maxretry_pktcnt >= 1)) { + rate = next_rate; + rate_ctrl->probe_rate = rate; + rate_ctrl->probe_time = now_msec; + rate_ctrl->hw_maxretry_pktcnt = 0; + *is_probing = TRUE; + } + } + + /* + * Make sure rate is not higher than the allowed maximum. + * We should also enforce the min, but I suspect the min is + * normally 1 rather than 0 because of the rate 9 vs 6 issue + * in the old code. + */ + if (rate > (rate_ctrl->rate_table_size - 1)) + rate = rate_ctrl->rate_table_size - 1; + + ASSERT((rate_table->info[rate].valid && !ath_rc_priv->single_stream) || + (rate_table->info[rate].valid_single_stream && + ath_rc_priv->single_stream)); + + return rate; +} + +static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table , + struct ath_rc_series *series, + u8 tries, + u8 rix, + int rtsctsenable) +{ + series->tries = tries; + series->flags = (rtsctsenable ? ATH_RC_RTSCTS_FLAG : 0) | + (WLAN_RC_PHY_DS(rate_table->info[rix].phy) ? + ATH_RC_DS_FLAG : 0) | + (WLAN_RC_PHY_40(rate_table->info[rix].phy) ? + ATH_RC_CW40_FLAG : 0) | + (WLAN_RC_PHY_SGI(rate_table->info[rix].phy) ? + ATH_RC_SGI_FLAG : 0); + + series->rix = rate_table->info[rix].base_index; + series->max_4ms_framelen = rate_table->info[rix].max_4ms_framelen; +} + +static u8 ath_rc_rate_getidx(struct ath_softc *sc, + struct ath_rate_node *ath_rc_priv, + const struct ath_rate_table *rate_table, + u8 rix, u16 stepdown, + u16 min_rate) +{ + u32 j; + u8 nextindex; + struct ath_tx_ratectrl *rate_ctrl = + (struct ath_tx_ratectrl *)(ath_rc_priv); + + if (min_rate) { + for (j = RATE_TABLE_SIZE; j > 0; j--) { + if (ath_rc_get_nextlowervalid_txrate(rate_table, + rate_ctrl, rix, &nextindex)) + rix = nextindex; + else + break; + } + } else { + for (j = stepdown; j > 0; j--) { + if (ath_rc_get_nextlowervalid_txrate(rate_table, + rate_ctrl, rix, &nextindex)) + rix = nextindex; + else + break; + } + } + return rix; +} + +static void ath_rc_ratefind(struct ath_softc *sc, + struct ath_rate_node *ath_rc_priv, + int num_tries, int num_rates, unsigned int rcflag, + struct ath_rc_series series[], int *is_probe, + int is_retry) +{ + u8 try_per_rate = 0, i = 0, rix, nrix; + struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc; + struct ath_rate_table *rate_table; + + rate_table = + (struct ath_rate_table *)asc->hw_rate_table[sc->sc_curmode]; + rix = ath_rc_ratefind_ht(sc, ath_rc_priv, rate_table, + (rcflag & ATH_RC_PROBE_ALLOWED) ? 1 : 0, + is_probe, is_retry); + nrix = rix; + + if ((rcflag & ATH_RC_PROBE_ALLOWED) && (*is_probe)) { + /* set one try for probe rates. For the + * probes don't enable rts */ + ath_rc_rate_set_series(rate_table, + &series[i++], 1, nrix, FALSE); + + try_per_rate = (num_tries/num_rates); + /* Get the next tried/allowed rate. No RTS for the next series + * after the probe rate + */ + nrix = ath_rc_rate_getidx(sc, + ath_rc_priv, rate_table, nrix, 1, FALSE); + ath_rc_rate_set_series(rate_table, + &series[i++], try_per_rate, nrix, 0); + } else { + try_per_rate = (num_tries/num_rates); + /* Set the choosen rate. No RTS for first series entry. */ + ath_rc_rate_set_series(rate_table, + &series[i++], try_per_rate, nrix, FALSE); + } + + /* Fill in the other rates for multirate retry */ + for ( ; i < num_rates; i++) { + u8 try_num; + u8 min_rate; + + try_num = ((i + 1) == num_rates) ? + num_tries - (try_per_rate * i) : try_per_rate ; + min_rate = (((i + 1) == num_rates) && + (rcflag & ATH_RC_MINRATE_LASTRATE)) ? 1 : 0; + + nrix = ath_rc_rate_getidx(sc, ath_rc_priv, + rate_table, nrix, 1, min_rate); + /* All other rates in the series have RTS enabled */ + ath_rc_rate_set_series(rate_table, + &series[i], try_num, nrix, TRUE); + } + + /* + * NB:Change rate series to enable aggregation when operating + * at lower MCS rates. When first rate in series is MCS2 + * in HT40 @ 2.4GHz, series should look like: + * + * {MCS2, MCS1, MCS0, MCS0}. + * + * When first rate in series is MCS3 in HT20 @ 2.4GHz, series should + * look like: + * + * {MCS3, MCS2, MCS1, MCS1} + * + * So, set fourth rate in series to be same as third one for + * above conditions. + */ + if ((sc->sc_curmode == ATH9K_MODE_11NG_HT20) || + (sc->sc_curmode == ATH9K_MODE_11NG_HT40PLUS) || + (sc->sc_curmode == ATH9K_MODE_11NG_HT40MINUS)) { + u8 dot11rate = rate_table->info[rix].dot11rate; + u8 phy = rate_table->info[rix].phy; + if (i == 4 && + ((dot11rate == 2 && phy == WLAN_RC_PHY_HT_40_SS) || + (dot11rate == 3 && phy == WLAN_RC_PHY_HT_20_SS))) { + series[3].rix = series[2].rix; + series[3].flags = series[2].flags; + series[3].max_4ms_framelen = series[2].max_4ms_framelen; + } + } +} + +/* + * Return the Tx rate series. + */ +void ath_rate_findrate(struct ath_softc *sc, + struct ath_rate_node *ath_rc_priv, + int num_tries, + int num_rates, + unsigned int rcflag, + struct ath_rc_series series[], + int *is_probe, + int is_retry) +{ + struct ath_vap *avp = ath_rc_priv->avp; + + DPRINTF(sc, ATH_DBG_RATE, "%s", __func__); + if (!num_rates || !num_tries) + return; + + if (avp->av_config.av_fixed_rateset == IEEE80211_FIXED_RATE_NONE) { + ath_rc_ratefind(sc, ath_rc_priv, num_tries, num_rates, + rcflag, series, is_probe, is_retry); + } else { + /* Fixed rate */ + int idx; + u8 flags; + u32 rix; + struct ath_rate_softc *asc = ath_rc_priv->asc; + struct ath_rate_table *rate_table; + + rate_table = (struct ath_rate_table *) + asc->hw_rate_table[sc->sc_curmode]; + + for (idx = 0; idx < 4; idx++) { + unsigned int mcs; + u8 series_rix = 0; + + series[idx].tries = + IEEE80211_RATE_IDX_ENTRY( + avp->av_config.av_fixed_retryset, idx); + + mcs = IEEE80211_RATE_IDX_ENTRY( + avp->av_config.av_fixed_rateset, idx); + + if (idx == 3 && (mcs & 0xf0) == 0x70) + mcs = (mcs & ~0xf0)|0x80; + + if (!(mcs & 0x80)) + flags = 0; + else + flags = ((ath_rc_priv->ht_cap & + WLAN_RC_DS_FLAG) ? + ATH_RC_DS_FLAG : 0) | + ((ath_rc_priv->ht_cap & + WLAN_RC_40_FLAG) ? + ATH_RC_CW40_FLAG : 0) | + ((ath_rc_priv->ht_cap & + WLAN_RC_SGI_FLAG) ? + ((ath_rc_priv->ht_cap & + WLAN_RC_40_FLAG) ? + ATH_RC_SGI_FLAG : 0) : 0); + + series[idx].rix = sc->sc_rixmap[mcs]; + series_rix = series[idx].rix; + + /* XXX: Give me some cleanup love */ + if ((flags & ATH_RC_CW40_FLAG) && + (flags & ATH_RC_SGI_FLAG)) + rix = rate_table->info[series_rix].ht_index; + else if (flags & ATH_RC_SGI_FLAG) + rix = rate_table->info[series_rix].sgi_index; + else if (flags & ATH_RC_CW40_FLAG) + rix = rate_table->info[series_rix].cw40index; + else + rix = rate_table->info[series_rix].base_index; + series[idx].max_4ms_framelen = + rate_table->info[rix].max_4ms_framelen; + series[idx].flags = flags; + } + } +} + +static void ath_rc_update_ht(struct ath_softc *sc, + struct ath_rate_node *ath_rc_priv, + struct ath_tx_info_priv *info_priv, + int tx_rate, int xretries, int retries) +{ + struct ath_tx_ratectrl *rate_ctrl; + u32 now_msec = jiffies_to_msecs(jiffies); + int state_change = FALSE, rate, count; + u8 last_per; + struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc; + struct ath_rate_table *rate_table = + (struct ath_rate_table *)asc->hw_rate_table[sc->sc_curmode]; + + static u32 nretry_to_per_lookup[10] = { + 100 * 0 / 1, + 100 * 1 / 4, + 100 * 1 / 2, + 100 * 3 / 4, + 100 * 4 / 5, + 100 * 5 / 6, + 100 * 6 / 7, + 100 * 7 / 8, + 100 * 8 / 9, + 100 * 9 / 10 + }; + + if (!ath_rc_priv) + return; + + rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv); + + ASSERT(tx_rate >= 0); + if (tx_rate < 0) + return; + + /* To compensate for some imbalance between ctrl and ext. channel */ + + if (WLAN_RC_PHY_40(rate_table->info[tx_rate].phy)) + info_priv->tx.ts_rssi = + info_priv->tx.ts_rssi < 3 ? 0 : + info_priv->tx.ts_rssi - 3; + + last_per = rate_ctrl->state[tx_rate].per; + + if (xretries) { + /* Update the PER. */ + if (xretries == 1) { + rate_ctrl->state[tx_rate].per += 30; + if (rate_ctrl->state[tx_rate].per > 100) + rate_ctrl->state[tx_rate].per = 100; + } else { + /* xretries == 2 */ + count = sizeof(nretry_to_per_lookup) / + sizeof(nretry_to_per_lookup[0]); + if (retries >= count) + retries = count - 1; + /* new_PER = 7/8*old_PER + 1/8*(currentPER) */ + rate_ctrl->state[tx_rate].per = + (u8)(rate_ctrl->state[tx_rate].per - + (rate_ctrl->state[tx_rate].per >> 3) + + ((100) >> 3)); + } + + /* xretries == 1 or 2 */ + + if (rate_ctrl->probe_rate == tx_rate) + rate_ctrl->probe_rate = 0; + + } else { /* xretries == 0 */ + /* Update the PER. */ + /* Make sure it doesn't index out of array's bounds. */ + count = sizeof(nretry_to_per_lookup) / + sizeof(nretry_to_per_lookup[0]); + if (retries >= count) + retries = count - 1; + if (info_priv->n_bad_frames) { + /* new_PER = 7/8*old_PER + 1/8*(currentPER) */ + /* + * Assuming that n_frames is not 0. The current PER + * from the retries is 100 * retries / (retries+1), + * since the first retries attempts failed, and the + * next one worked. For the one that worked, + * n_bad_frames subframes out of n_frames wored, + * so the PER for that part is + * 100 * n_bad_frames / n_frames, and it contributes + * 100 * n_bad_frames / (n_frames * (retries+1)) to + * the above PER. The expression below is a + * simplified version of the sum of these two terms. + */ + if (info_priv->n_frames > 0) + rate_ctrl->state[tx_rate].per + = (u8) + (rate_ctrl->state[tx_rate].per - + (rate_ctrl->state[tx_rate].per >> 3) + + ((100*(retries*info_priv->n_frames + + info_priv->n_bad_frames) / + (info_priv->n_frames * + (retries+1))) >> 3)); + } else { + /* new_PER = 7/8*old_PER + 1/8*(currentPER) */ + + rate_ctrl->state[tx_rate].per = (u8) + (rate_ctrl->state[tx_rate].per - + (rate_ctrl->state[tx_rate].per >> 3) + + (nretry_to_per_lookup[retries] >> 3)); + } + + rate_ctrl->rssi_last_prev2 = rate_ctrl->rssi_last_prev; + rate_ctrl->rssi_last_prev = rate_ctrl->rssi_last; + rate_ctrl->rssi_last = info_priv->tx.ts_rssi; + rate_ctrl->rssi_time = now_msec; + + /* + * If we got at most one retry then increase the max rate if + * this was a probe. Otherwise, ignore the probe. + */ + + if (rate_ctrl->probe_rate && rate_ctrl->probe_rate == tx_rate) { + if (retries > 0 || 2 * info_priv->n_bad_frames > + info_priv->n_frames) { + /* + * Since we probed with just a single attempt, + * any retries means the probe failed. Also, + * if the attempt worked, but more than half + * the subframes were bad then also consider + * the probe a failure. + */ + rate_ctrl->probe_rate = 0; + } else { + u8 probe_rate = 0; + + rate_ctrl->rate_max_phy = rate_ctrl->probe_rate; + probe_rate = rate_ctrl->probe_rate; + + if (rate_ctrl->state[probe_rate].per > 30) + rate_ctrl->state[probe_rate].per = 20; + + rate_ctrl->probe_rate = 0; + + /* + * Since this probe succeeded, we allow the next + * probe twice as soon. This allows the maxRate + * to move up faster if the probes are + * succesful. + */ + rate_ctrl->probe_time = now_msec - + rate_table->probe_interval / 2; + } + } + + if (retries > 0) { + /* + * Don't update anything. We don't know if + * this was because of collisions or poor signal. + * + * Later: if rssi_ack is close to + * rate_ctrl->state[txRate].rssi_thres and we see lots + * of retries, then we could increase + * rate_ctrl->state[txRate].rssi_thres. + */ + rate_ctrl->hw_maxretry_pktcnt = 0; + } else { + /* + * It worked with no retries. First ignore bogus (small) + * rssi_ack values. + */ + if (tx_rate == rate_ctrl->rate_max_phy && + rate_ctrl->hw_maxretry_pktcnt < 255) { + rate_ctrl->hw_maxretry_pktcnt++; + } + + if (info_priv->tx.ts_rssi >= + rate_table->info[tx_rate].rssi_ack_validmin) { + /* Average the rssi */ + if (tx_rate != rate_ctrl->rssi_sum_rate) { + rate_ctrl->rssi_sum_rate = tx_rate; + rate_ctrl->rssi_sum = + rate_ctrl->rssi_sum_cnt = 0; + } + + rate_ctrl->rssi_sum += info_priv->tx.ts_rssi; + rate_ctrl->rssi_sum_cnt++; + + if (rate_ctrl->rssi_sum_cnt > 4) { + int32_t rssi_ackAvg = + (rate_ctrl->rssi_sum + 2) / 4; + int8_t rssi_thres = + rate_ctrl->state[tx_rate]. + rssi_thres; + int8_t rssi_ack_vmin = + rate_table->info[tx_rate]. + rssi_ack_validmin; + + rate_ctrl->rssi_sum = + rate_ctrl->rssi_sum_cnt = 0; + + /* Now reduce the current + * rssi threshold. */ + if ((rssi_ackAvg < rssi_thres + 2) && + (rssi_thres > rssi_ack_vmin)) { + rate_ctrl->state[tx_rate]. + rssi_thres--; + } + + state_change = TRUE; + } + } + } + } + + /* For all cases */ + + /* + * If this rate looks bad (high PER) then stop using it for + * a while (except if we are probing). + */ + if (rate_ctrl->state[tx_rate].per >= 55 && tx_rate > 0 && + rate_table->info[tx_rate].ratekbps <= + rate_table->info[rate_ctrl->rate_max_phy].ratekbps) { + ath_rc_get_nextlowervalid_txrate(rate_table, rate_ctrl, + (u8) tx_rate, &rate_ctrl->rate_max_phy); + + /* Don't probe for a little while. */ + rate_ctrl->probe_time = now_msec; + } + + if (state_change) { + /* + * Make sure the rates above this have higher rssi thresholds. + * (Note: Monotonicity is kept within the OFDM rates and + * within the CCK rates. However, no adjustment is + * made to keep the rssi thresholds monotonically + * increasing between the CCK and OFDM rates.) + */ + for (rate = tx_rate; rate < + rate_ctrl->rate_table_size - 1; rate++) { + if (rate_table->info[rate+1].phy != + rate_table->info[tx_rate].phy) + break; + + if (rate_ctrl->state[rate].rssi_thres + + rate_table->info[rate].rssi_ack_deltamin > + rate_ctrl->state[rate+1].rssi_thres) { + rate_ctrl->state[rate+1].rssi_thres = + rate_ctrl->state[rate]. + rssi_thres + + rate_table->info[rate]. + rssi_ack_deltamin; + } + } + + /* Make sure the rates below this have lower rssi thresholds. */ + for (rate = tx_rate - 1; rate >= 0; rate--) { + if (rate_table->info[rate].phy != + rate_table->info[tx_rate].phy) + break; + + if (rate_ctrl->state[rate].rssi_thres + + rate_table->info[rate].rssi_ack_deltamin > + rate_ctrl->state[rate+1].rssi_thres) { + if (rate_ctrl->state[rate+1].rssi_thres < + rate_table->info[rate]. + rssi_ack_deltamin) + rate_ctrl->state[rate].rssi_thres = 0; + else { + rate_ctrl->state[rate].rssi_thres = + rate_ctrl->state[rate+1]. + rssi_thres - + rate_table->info[rate]. + rssi_ack_deltamin; + } + + if (rate_ctrl->state[rate].rssi_thres < + rate_table->info[rate]. + rssi_ack_validmin) { + rate_ctrl->state[rate].rssi_thres = + rate_table->info[rate]. + rssi_ack_validmin; + } + } + } + } + + /* Make sure the rates below this have lower PER */ + /* Monotonicity is kept only for rates below the current rate. */ + if (rate_ctrl->state[tx_rate].per < last_per) { + for (rate = tx_rate - 1; rate >= 0; rate--) { + if (rate_table->info[rate].phy != + rate_table->info[tx_rate].phy) + break; + + if (rate_ctrl->state[rate].per > + rate_ctrl->state[rate+1].per) { + rate_ctrl->state[rate].per = + rate_ctrl->state[rate+1].per; + } + } + } + + /* Maintain monotonicity for rates above the current rate */ + for (rate = tx_rate; rate < rate_ctrl->rate_table_size - 1; rate++) { + if (rate_ctrl->state[rate+1].per < rate_ctrl->state[rate].per) + rate_ctrl->state[rate+1].per = + rate_ctrl->state[rate].per; + } + + /* Every so often, we reduce the thresholds and + * PER (different for CCK and OFDM). */ + if (now_msec - rate_ctrl->rssi_down_time >= + rate_table->rssi_reduce_interval) { + + for (rate = 0; rate < rate_ctrl->rate_table_size; rate++) { + if (rate_ctrl->state[rate].rssi_thres > + rate_table->info[rate].rssi_ack_validmin) + rate_ctrl->state[rate].rssi_thres -= 1; + } + rate_ctrl->rssi_down_time = now_msec; + } + + /* Every so often, we reduce the thresholds + * and PER (different for CCK and OFDM). */ + if (now_msec - rate_ctrl->per_down_time >= + rate_table->rssi_reduce_interval) { + for (rate = 0; rate < rate_ctrl->rate_table_size; rate++) { + rate_ctrl->state[rate].per = + 7 * rate_ctrl->state[rate].per / 8; + } + + rate_ctrl->per_down_time = now_msec; + } +} + +/* + * This routine is called in rate control callback tx_status() to give + * the status of previous frames. + */ +static void ath_rc_update(struct ath_softc *sc, + struct ath_rate_node *ath_rc_priv, + struct ath_tx_info_priv *info_priv, int final_ts_idx, + int xretries, int long_retry) +{ + struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc; + struct ath_rate_table *rate_table; + struct ath_tx_ratectrl *rate_ctrl; + struct ath_rc_series rcs[4]; + u8 flags; + u32 series = 0, rix; + + memcpy(rcs, info_priv->rcs, 4 * sizeof(rcs[0])); + rate_table = (struct ath_rate_table *) + asc->hw_rate_table[sc->sc_curmode]; + rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv); + ASSERT(rcs[0].tries != 0); + + /* + * If the first rate is not the final index, there + * are intermediate rate failures to be processed. + */ + if (final_ts_idx != 0) { + /* Process intermediate rates that failed.*/ + for (series = 0; series < final_ts_idx ; series++) { + if (rcs[series].tries != 0) { + flags = rcs[series].flags; + /* If HT40 and we have switched mode from + * 40 to 20 => don't update */ + if ((flags & ATH_RC_CW40_FLAG) && + (rate_ctrl->rc_phy_mode != + (flags & ATH_RC_CW40_FLAG))) + return; + if ((flags & ATH_RC_CW40_FLAG) && + (flags & ATH_RC_SGI_FLAG)) + rix = rate_table->info[ + rcs[series].rix].ht_index; + else if (flags & ATH_RC_SGI_FLAG) + rix = rate_table->info[ + rcs[series].rix].sgi_index; + else if (flags & ATH_RC_CW40_FLAG) + rix = rate_table->info[ + rcs[series].rix].cw40index; + else + rix = rate_table->info[ + rcs[series].rix].base_index; + ath_rc_update_ht(sc, ath_rc_priv, + info_priv, rix, + xretries ? 1 : 2, + rcs[series].tries); + } + } + } else { + /* + * Handle the special case of MIMO PS burst, where the second + * aggregate is sent out with only one rate and one try. + * Treating it as an excessive retry penalizes the rate + * inordinately. + */ + if (rcs[0].tries == 1 && xretries == 1) + xretries = 2; + } + + flags = rcs[series].flags; + /* If HT40 and we have switched mode from 40 to 20 => don't update */ + if ((flags & ATH_RC_CW40_FLAG) && + (rate_ctrl->rc_phy_mode != (flags & ATH_RC_CW40_FLAG))) + return; + + if ((flags & ATH_RC_CW40_FLAG) && (flags & ATH_RC_SGI_FLAG)) + rix = rate_table->info[rcs[series].rix].ht_index; + else if (flags & ATH_RC_SGI_FLAG) + rix = rate_table->info[rcs[series].rix].sgi_index; + else if (flags & ATH_RC_CW40_FLAG) + rix = rate_table->info[rcs[series].rix].cw40index; + else + rix = rate_table->info[rcs[series].rix].base_index; + + ath_rc_update_ht(sc, ath_rc_priv, info_priv, rix, + xretries, long_retry); +} + + +/* + * Process a tx descriptor for a completed transmit (success or failure). + */ +static void ath_rate_tx_complete(struct ath_softc *sc, + struct ath_node *an, + struct ath_rate_node *rc_priv, + struct ath_tx_info_priv *info_priv) +{ + int final_ts_idx = info_priv->tx.ts_rateindex; + int tx_status = 0, is_underrun = 0; + struct ath_vap *avp; + + avp = rc_priv->avp; + if ((avp->av_config.av_fixed_rateset != IEEE80211_FIXED_RATE_NONE) + || info_priv->tx.ts_status & ATH9K_TXERR_FILT) + return; + + if (info_priv->tx.ts_rssi > 0) { + ATH_RSSI_LPF(an->an_chainmask_sel.tx_avgrssi, + info_priv->tx.ts_rssi); + } + + /* + * If underrun error is seen assume it as an excessive retry only + * if prefetch trigger level have reached the max (0x3f for 5416) + * Adjust the long retry as if the frame was tried ATH_11N_TXMAXTRY + * times. This affects how ratectrl updates PER for the failed rate. + */ + if (info_priv->tx.ts_flags & + (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN) && + ((sc->sc_ah->ah_txTrigLevel) >= tx_triglevel_max)) { + tx_status = 1; + is_underrun = 1; + } + + if ((info_priv->tx.ts_status & ATH9K_TXERR_XRETRY) || + (info_priv->tx.ts_status & ATH9K_TXERR_FIFO)) + tx_status = 1; + + ath_rc_update(sc, rc_priv, info_priv, final_ts_idx, tx_status, + (is_underrun) ? ATH_11N_TXMAXTRY : + info_priv->tx.ts_longretry); +} + + +/* + * Update the SIB's rate control information + * + * This should be called when the supported rates change + * (e.g. SME operation, wireless mode change) + * + * It will determine which rates are valid for use. + */ +static void ath_rc_sib_update(struct ath_softc *sc, + struct ath_rate_node *ath_rc_priv, + u32 capflag, int keep_state, + struct ath_rateset *negotiated_rates, + struct ath_rateset *negotiated_htrates) +{ + struct ath_rate_table *rate_table = NULL; + struct ath_rate_softc *asc = (struct ath_rate_softc *)sc->sc_rc; + struct ath_rateset *rateset = negotiated_rates; + u8 *ht_mcs = (u8 *)negotiated_htrates; + struct ath_tx_ratectrl *rate_ctrl = (struct ath_tx_ratectrl *) + (ath_rc_priv); + u8 i, j, k, hi = 0, hthi = 0; + + rate_table = (struct ath_rate_table *) + asc->hw_rate_table[sc->sc_curmode]; + + /* Initial rate table size. Will change depending + * on the working rate set */ + rate_ctrl->rate_table_size = MAX_TX_RATE_TBL; + + /* Initialize thresholds according to the global rate table */ + for (i = 0 ; (i < rate_ctrl->rate_table_size) && (!keep_state); i++) { + rate_ctrl->state[i].rssi_thres = + rate_table->info[i].rssi_ack_validmin; + rate_ctrl->state[i].per = 0; + } + + /* Determine the valid rates */ + ath_rc_init_valid_txmask(rate_ctrl); + + for (i = 0; i < WLAN_RC_PHY_MAX; i++) { + for (j = 0; j < MAX_TX_RATE_PHY; j++) + rate_ctrl->valid_phy_rateidx[i][j] = 0; + rate_ctrl->valid_phy_ratecnt[i] = 0; + } + rate_ctrl->rc_phy_mode = (capflag & WLAN_RC_40_FLAG); + + /* Set stream capability */ + ath_rc_priv->single_stream = (capflag & WLAN_RC_DS_FLAG) ? 0 : 1; + + if (!rateset->rs_nrates) { + /* No working rate, just initialize valid rates */ + hi = ath_rc_sib_init_validrates(ath_rc_priv, rate_table, + capflag); + } else { + /* Use intersection of working rates and valid rates */ + hi = ath_rc_sib_setvalid_rates(ath_rc_priv, rate_table, + rateset, capflag); + if (capflag & WLAN_RC_HT_FLAG) { + hthi = ath_rc_sib_setvalid_htrates(ath_rc_priv, + rate_table, + ht_mcs, + capflag); + } + hi = A_MAX(hi, hthi); + } + + rate_ctrl->rate_table_size = hi + 1; + rate_ctrl->rate_max_phy = 0; + ASSERT(rate_ctrl->rate_table_size <= MAX_TX_RATE_TBL); + + for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) { + for (j = 0; j < rate_ctrl->valid_phy_ratecnt[i]; j++) { + rate_ctrl->valid_rate_index[k++] = + rate_ctrl->valid_phy_rateidx[i][j]; + } + + if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, TRUE) + || !rate_ctrl->valid_phy_ratecnt[i]) + continue; + + rate_ctrl->rate_max_phy = rate_ctrl->valid_phy_rateidx[i][j-1]; + } + ASSERT(rate_ctrl->rate_table_size <= MAX_TX_RATE_TBL); + ASSERT(k <= MAX_TX_RATE_TBL); + + rate_ctrl->max_valid_rate = k; + /* + * Some third party vendors don't send the supported rate series in + * order. So sorting to make sure its in order, otherwise our RateFind + * Algo will select wrong rates + */ + ath_rc_sort_validrates(rate_table, rate_ctrl); + rate_ctrl->rate_max_phy = rate_ctrl->valid_rate_index[k-4]; +} + +/* + * Update rate-control state on station associate/reassociate. + */ +static int ath_rate_newassoc(struct ath_softc *sc, + struct ath_rate_node *ath_rc_priv, + unsigned int capflag, + struct ath_rateset *negotiated_rates, + struct ath_rateset *negotiated_htrates) +{ + + + ath_rc_priv->ht_cap = + ((capflag & ATH_RC_DS_FLAG) ? WLAN_RC_DS_FLAG : 0) | + ((capflag & ATH_RC_SGI_FLAG) ? WLAN_RC_SGI_FLAG : 0) | + ((capflag & ATH_RC_HT_FLAG) ? WLAN_RC_HT_FLAG : 0) | + ((capflag & ATH_RC_CW40_FLAG) ? WLAN_RC_40_FLAG : 0); + + ath_rc_sib_update(sc, ath_rc_priv, ath_rc_priv->ht_cap, 0, + negotiated_rates, negotiated_htrates); + + return 0; +} + +/* + * This routine is called to initialize the rate control parameters + * in the SIB. It is called initially during system initialization + * or when a station is associated with the AP. + */ +static void ath_rc_sib_init(struct ath_rate_node *ath_rc_priv) +{ + struct ath_tx_ratectrl *rate_ctrl; + + rate_ctrl = (struct ath_tx_ratectrl *)(ath_rc_priv); + rate_ctrl->rssi_down_time = jiffies_to_msecs(jiffies); +} + + +static void ath_setup_rates(struct ieee80211_local *local, struct sta_info *sta) + +{ + struct ieee80211_supported_band *sband; + struct ieee80211_hw *hw = local_to_hw(local); + struct ath_softc *sc = hw->priv; + struct ath_rate_node *rc_priv = sta->rate_ctrl_priv; + int i, j = 0; + + DPRINTF(sc, ATH_DBG_RATE, "%s", __func__); + sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; + for (i = 0; i < sband->n_bitrates; i++) { + if (sta->supp_rates[local->hw.conf.channel->band] & BIT(i)) { + rc_priv->neg_rates.rs_rates[j] + = (sband->bitrates[i].bitrate * 2) / 10; + j++; + } + } + rc_priv->neg_rates.rs_nrates = j; +} + +void ath_rc_node_update(struct ieee80211_hw *hw, struct ath_rate_node *rc_priv) +{ + struct ath_softc *sc = hw->priv; + u32 capflag = 0; + + if (hw->conf.ht_conf.ht_supported) { + capflag |= ATH_RC_HT_FLAG | ATH_RC_DS_FLAG; + if (sc->sc_ht_info.tx_chan_width == ATH9K_HT_MACMODE_2040) + capflag |= ATH_RC_CW40_FLAG; + } + + ath_rate_newassoc(sc, rc_priv, capflag, + &rc_priv->neg_rates, + &rc_priv->neg_ht_rates); + +} + +/* Rate Control callbacks */ +static void ath_tx_status(void *priv, struct net_device *dev, + struct sk_buff *skb) +{ + struct ath_softc *sc = priv; + struct ath_tx_info_priv *tx_info_priv; + struct ath_node *an; + struct sta_info *sta; + struct ieee80211_local *local; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr; + __le16 fc; + + local = hw_to_local(sc->hw); + hdr = (struct ieee80211_hdr *)skb->data; + fc = hdr->frame_control; + tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0]; + + spin_lock_bh(&sc->node_lock); + an = ath_node_find(sc, hdr->addr1); + spin_unlock_bh(&sc->node_lock); + + sta = sta_info_get(local, hdr->addr1); + if (!an || !sta || !ieee80211_is_data(fc)) { + if (tx_info->driver_data[0] != NULL) { + kfree(tx_info->driver_data[0]); + tx_info->driver_data[0] = NULL; + } + return; + } + if (tx_info->driver_data[0] != NULL) { + ath_rate_tx_complete(sc, an, sta->rate_ctrl_priv, tx_info_priv); + kfree(tx_info->driver_data[0]); + tx_info->driver_data[0] = NULL; + } +} + +static void ath_tx_aggr_resp(struct ath_softc *sc, + struct sta_info *sta, + struct ath_node *an, + u8 tidno) +{ + struct ieee80211_hw *hw = sc->hw; + struct ieee80211_local *local; + struct ath_atx_tid *txtid; + struct ieee80211_supported_band *sband; + u16 buffersize = 0; + int state; + DECLARE_MAC_BUF(mac); + + if (!sc->sc_txaggr) + return; + + txtid = ATH_AN_2_TID(an, tidno); + if (!txtid->paused) + return; + + local = hw_to_local(sc->hw); + sband = hw->wiphy->bands[hw->conf.channel->band]; + buffersize = IEEE80211_MIN_AMPDU_BUF << + sband->ht_info.ampdu_factor; /* FIXME */ + state = sta->ampdu_mlme.tid_state_tx[tidno]; + + if (state & HT_ADDBA_RECEIVED_MSK) { + txtid->addba_exchangecomplete = 1; + txtid->addba_exchangeinprogress = 0; + txtid->baw_size = buffersize; + + DPRINTF(sc, ATH_DBG_AGGR, + "%s: Resuming tid, buffersize: %d\n", + __func__, + buffersize); + + ath_tx_resume_tid(sc, txtid); + } +} + +static void ath_get_rate(void *priv, struct net_device *dev, + struct ieee80211_supported_band *sband, + struct sk_buff *skb, + struct rate_selection *sel) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct sta_info *sta; + struct ath_softc *sc = (struct ath_softc *)priv; + struct ieee80211_hw *hw = sc->hw; + struct ath_tx_info_priv *tx_info_priv; + struct ath_rate_node *ath_rc_priv; + struct ath_node *an; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + int is_probe, chk, ret; + s8 lowest_idx; + __le16 fc = hdr->frame_control; + u8 *qc, tid; + DECLARE_MAC_BUF(mac); + + DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__); + + /* allocate driver private area of tx_info */ + tx_info->driver_data[0] = kzalloc(sizeof(*tx_info_priv), GFP_ATOMIC); + ASSERT(tx_info->driver_data[0] != NULL); + tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0]; + + sta = sta_info_get(local, hdr->addr1); + lowest_idx = rate_lowest_index(local, sband, sta); + tx_info_priv->min_rate = (sband->bitrates[lowest_idx].bitrate * 2) / 10; + /* lowest rate for management and multicast/broadcast frames */ + if (!ieee80211_is_data(fc) || + is_multicast_ether_addr(hdr->addr1) || !sta) { + sel->rate_idx = lowest_idx; + return; + } + + ath_rc_priv = sta->rate_ctrl_priv; + + /* Find tx rate for unicast frames */ + ath_rate_findrate(sc, ath_rc_priv, + ATH_11N_TXMAXTRY, 4, + ATH_RC_PROBE_ALLOWED, + tx_info_priv->rcs, + &is_probe, + false); + if (is_probe) + sel->probe_idx = ((struct ath_tx_ratectrl *) + sta->rate_ctrl_priv)->probe_rate; + + /* Ratecontrol sometimes returns invalid rate index */ + if (tx_info_priv->rcs[0].rix != 0xff) + ath_rc_priv->prev_data_rix = tx_info_priv->rcs[0].rix; + else + tx_info_priv->rcs[0].rix = ath_rc_priv->prev_data_rix; + + sel->rate_idx = tx_info_priv->rcs[0].rix; + + /* Check if aggregation has to be enabled for this tid */ + + if (hw->conf.ht_conf.ht_supported) { + if (ieee80211_is_data_qos(fc)) { + qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & 0xf; + + spin_lock_bh(&sc->node_lock); + an = ath_node_find(sc, hdr->addr1); + spin_unlock_bh(&sc->node_lock); + + if (!an) { + DPRINTF(sc, ATH_DBG_AGGR, + "%s: Node not found to " + "init/chk TX aggr\n", __func__); + return; + } + + chk = ath_tx_aggr_check(sc, an, tid); + if (chk == AGGR_REQUIRED) { + ret = ieee80211_start_tx_ba_session(hw, + hdr->addr1, tid); + if (ret) + DPRINTF(sc, ATH_DBG_AGGR, + "%s: Unable to start tx " + "aggr for: %s\n", + __func__, + print_mac(mac, hdr->addr1)); + else + DPRINTF(sc, ATH_DBG_AGGR, + "%s: Started tx aggr for: %s\n", + __func__, + print_mac(mac, hdr->addr1)); + } else if (chk == AGGR_EXCHANGE_PROGRESS) + ath_tx_aggr_resp(sc, sta, an, tid); + } + } +} + +static void ath_rate_init(void *priv, void *priv_sta, + struct ieee80211_local *local, + struct sta_info *sta) +{ + struct ieee80211_supported_band *sband; + struct ieee80211_hw *hw = local_to_hw(local); + struct ieee80211_conf *conf = &local->hw.conf; + struct ath_softc *sc = hw->priv; + int i, j = 0; + + DPRINTF(sc, ATH_DBG_RATE, "%s\n", __func__); + + sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; + sta->txrate_idx = rate_lowest_index(local, sband, sta); + + ath_setup_rates(local, sta); + if (conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE) { + for (i = 0; i < MCS_SET_SIZE; i++) { + if (conf->ht_conf.supp_mcs_set[i/8] & (1<<(i%8))) + ((struct ath_rate_node *) + priv_sta)->neg_ht_rates.rs_rates[j++] = i; + if (j == ATH_RATE_MAX) + break; + } + ((struct ath_rate_node *)priv_sta)->neg_ht_rates.rs_nrates = j; + } + ath_rc_node_update(hw, priv_sta); +} + +static void ath_rate_clear(void *priv) +{ + return; +} + +static void *ath_rate_alloc(struct ieee80211_local *local) +{ + struct ieee80211_hw *hw = local_to_hw(local); + struct ath_softc *sc = hw->priv; + + DPRINTF(sc, ATH_DBG_RATE, "%s", __func__); + return local->hw.priv; +} + +static void ath_rate_free(void *priv) +{ + return; +} + +static void *ath_rate_alloc_sta(void *priv, gfp_t gfp) +{ + struct ath_softc *sc = priv; + struct ath_vap *avp = sc->sc_vaps[0]; + struct ath_rate_node *rate_priv; + + DPRINTF(sc, ATH_DBG_RATE, "%s", __func__); + rate_priv = ath_rate_node_alloc(avp, sc->sc_rc, gfp); + if (!rate_priv) { + DPRINTF(sc, ATH_DBG_FATAL, "%s:Unable to allocate" + "private rate control structure", __func__); + return NULL; + } + ath_rc_sib_init(rate_priv); + return rate_priv; +} + +static void ath_rate_free_sta(void *priv, void *priv_sta) +{ + struct ath_rate_node *rate_priv = priv_sta; + struct ath_softc *sc = priv; + + DPRINTF(sc, ATH_DBG_RATE, "%s", __func__); + ath_rate_node_free(rate_priv); +} + +static struct rate_control_ops ath_rate_ops = { + .module = NULL, + .name = "ath9k_rate_control", + .tx_status = ath_tx_status, + .get_rate = ath_get_rate, + .rate_init = ath_rate_init, + .clear = ath_rate_clear, + .alloc = ath_rate_alloc, + .free = ath_rate_free, + .alloc_sta = ath_rate_alloc_sta, + .free_sta = ath_rate_free_sta +}; + +int ath_rate_control_register(void) +{ + return ieee80211_rate_control_register(&ath_rate_ops); +} + +void ath_rate_control_unregister(void) +{ + ieee80211_rate_control_unregister(&ath_rate_ops); +} + diff --git a/drivers/net/wireless/ath9k/rc.h b/drivers/net/wireless/ath9k/rc.h new file mode 100644 index 0000000..71aef9c --- /dev/null +++ b/drivers/net/wireless/ath9k/rc.h @@ -0,0 +1,316 @@ +/* + * Copyright (c) 2004 Sam Leffler, Errno Consulting + * Copyright (c) 2004 Video54 Technologies, Inc. + * Copyright (c) 2008 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef RC_H +#define RC_H + +#include "ath9k.h" +/* + * Interface definitions for transmit rate control modules for the + * Atheros driver. + * + * A rate control module is responsible for choosing the transmit rate + * for each data frame. Management+control frames are always sent at + * a fixed rate. + * + * Only one module may be present at a time; the driver references + * rate control interfaces by symbol name. If multiple modules are + * to be supported we'll need to switch to a registration-based scheme + * as is currently done, for example, for authentication modules. + * + * An instance of the rate control module is attached to each device + * at attach time and detached when the device is destroyed. The module + * may associate data with each device and each node (station). Both + * sets of storage are opaque except for the size of the per-node storage + * which must be provided when the module is attached. + * + * The rate control module is notified for each state transition and + * station association/reassociation. Otherwise it is queried for a + * rate for each outgoing frame and provided status from each transmitted + * frame. Any ancillary processing is the responsibility of the module + * (e.g. if periodic processing is required then the module should setup + * it's own timer). + * + * In addition to the transmit rate for each frame the module must also + * indicate the number of attempts to make at the specified rate. If this + * number is != ATH_TXMAXTRY then an additional callback is made to setup + * additional transmit state. The rate control code is assumed to write + * this additional data directly to the transmit descriptor. + */ + +struct ath_softc; + +#define TRUE 1 +#define FALSE 0 + +#define ATH_RATE_MAX 30 +#define MCS_SET_SIZE 128 + +enum ieee80211_fixed_rate_mode { + IEEE80211_FIXED_RATE_NONE = 0, + IEEE80211_FIXED_RATE_MCS = 1 /* HT rates */ +}; + +/* + * Use the hal os glue code to get ms time + */ +#define IEEE80211_RATE_IDX_ENTRY(val, idx) (((val&(0xff<<(idx*8)))>>(idx*8))) + +#define SHORT_PRE 1 +#define LONG_PRE 0 + +#define WLAN_PHY_HT_20_SS WLAN_RC_PHY_HT_20_SS +#define WLAN_PHY_HT_20_DS WLAN_RC_PHY_HT_20_DS +#define WLAN_PHY_HT_20_DS_HGI WLAN_RC_PHY_HT_20_DS_HGI +#define WLAN_PHY_HT_40_SS WLAN_RC_PHY_HT_40_SS +#define WLAN_PHY_HT_40_SS_HGI WLAN_RC_PHY_HT_40_SS_HGI +#define WLAN_PHY_HT_40_DS WLAN_RC_PHY_HT_40_DS +#define WLAN_PHY_HT_40_DS_HGI WLAN_RC_PHY_HT_40_DS_HGI + +#define WLAN_PHY_OFDM PHY_OFDM +#define WLAN_PHY_CCK PHY_CCK + +#define TRUE_20 0x2 +#define TRUE_40 0x4 +#define TRUE_2040 (TRUE_20|TRUE_40) +#define TRUE_ALL (TRUE_2040|TRUE) + +enum { + WLAN_RC_PHY_HT_20_SS = 4, + WLAN_RC_PHY_HT_20_DS, + WLAN_RC_PHY_HT_40_SS, + WLAN_RC_PHY_HT_40_DS, + WLAN_RC_PHY_HT_20_SS_HGI, + WLAN_RC_PHY_HT_20_DS_HGI, + WLAN_RC_PHY_HT_40_SS_HGI, + WLAN_RC_PHY_HT_40_DS_HGI, + WLAN_RC_PHY_MAX +}; + +#define WLAN_RC_PHY_DS(_phy) ((_phy == WLAN_RC_PHY_HT_20_DS) \ + || (_phy == WLAN_RC_PHY_HT_40_DS) \ + || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \ + || (_phy == WLAN_RC_PHY_HT_40_DS_HGI)) +#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \ + || (_phy == WLAN_RC_PHY_HT_40_DS) \ + || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \ + || (_phy == WLAN_RC_PHY_HT_40_DS_HGI)) +#define WLAN_RC_PHY_SGI(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS_HGI) \ + || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \ + || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \ + || (_phy == WLAN_RC_PHY_HT_40_DS_HGI)) + +#define WLAN_RC_PHY_HT(_phy) (_phy >= WLAN_RC_PHY_HT_20_SS) + +/* Returns the capflag mode */ +#define WLAN_RC_CAP_MODE(capflag) (((capflag & WLAN_RC_HT_FLAG) ? \ + (capflag & WLAN_RC_40_FLAG) ? TRUE_40 : TRUE_20 : TRUE)) + +/* Return TRUE if flag supports HT20 && client supports HT20 or + * return TRUE if flag supports HT40 && client supports HT40. + * This is used becos some rates overlap between HT20/HT40. + */ + +#define WLAN_RC_PHY_HT_VALID(flag, capflag) (((flag & TRUE_20) && !(capflag \ + & WLAN_RC_40_FLAG)) || ((flag & TRUE_40) && \ + (capflag & WLAN_RC_40_FLAG))) + +#define WLAN_RC_DS_FLAG (0x01) +#define WLAN_RC_40_FLAG (0x02) +#define WLAN_RC_SGI_FLAG (0x04) +#define WLAN_RC_HT_FLAG (0x08) + +/* Index into the rate table */ +#define INIT_RATE_MAX_20 23 +#define INIT_RATE_MAX_40 40 + +#define RATE_TABLE_SIZE 64 + +/* XXX: Convert to kdoc */ +struct ath_rate_table { + int rate_cnt; + struct { + int valid; /* Valid for use in rate control */ + int valid_single_stream;/* Valid for use in rate control + for single stream operation */ + u8 phy; /* CCK/OFDM/TURBO/XR */ + u32 ratekbps; /* Rate in Kbits per second */ + u32 user_ratekbps; /* User rate in KBits per second */ + u8 ratecode; /* rate that goes into + hw descriptors */ + u8 short_preamble; /* Mask for enabling short preamble + in rate code for CCK */ + u8 dot11rate; /* Value that goes into supported + rates info element of MLME */ + u8 ctrl_rate; /* Index of next lower basic rate, + used for duration computation */ + int8_t rssi_ack_validmin; /* Rate control related */ + int8_t rssi_ack_deltamin; /* Rate control related */ + u8 base_index; /* base rate index */ + u8 cw40index; /* 40cap rate index */ + u8 sgi_index; /* shortgi rate index */ + u8 ht_index; /* shortgi rate index */ + u32 max_4ms_framelen; /* Maximum frame length(bytes) + for 4ms tx duration */ + } info[RATE_TABLE_SIZE]; + u32 probe_interval; /* interval for ratectrl to + probe for other rates */ + u32 rssi_reduce_interval; /* interval for ratectrl + to reduce RSSI */ + u8 initial_ratemax; /* the initial ratemax value used + in ath_rc_sib_update() */ +}; + +#define ATH_RC_PROBE_ALLOWED 0x00000001 +#define ATH_RC_MINRATE_LASTRATE 0x00000002 +#define ATH_RC_SHORT_PREAMBLE 0x00000004 + +struct ath_rc_series { + u8 rix; + u8 tries; + u8 flags; + u32 max_4ms_framelen; +}; + +/* rcs_flags definition */ +#define ATH_RC_DS_FLAG 0x01 +#define ATH_RC_CW40_FLAG 0x02 /* CW 40 */ +#define ATH_RC_SGI_FLAG 0x04 /* Short Guard Interval */ +#define ATH_RC_HT_FLAG 0x08 /* HT */ +#define ATH_RC_RTSCTS_FLAG 0x10 /* RTS-CTS */ + +/* + * State structures for new rate adaptation code + */ +#define MAX_TX_RATE_TBL 64 +#define MAX_TX_RATE_PHY 48 + +struct ath_tx_ratectrl_state { + int8_t rssi_thres; /* required rssi for this rate (dB) */ + u8 per; /* recent estimate of packet error rate (%) */ +}; + +struct ath_tx_ratectrl { + struct ath_tx_ratectrl_state state[MAX_TX_RATE_TBL]; /* state */ + int8_t rssi_last; /* last ack rssi */ + int8_t rssi_last_lookup; /* last ack rssi used for lookup */ + int8_t rssi_last_prev; /* previous last ack rssi */ + int8_t rssi_last_prev2; /* 2nd previous last ack rssi */ + int32_t rssi_sum_cnt; /* count of rssi_sum for averaging */ + int32_t rssi_sum_rate; /* rate that we are averaging */ + int32_t rssi_sum; /* running sum of rssi for averaging */ + u32 valid_txrate_mask; /* mask of valid rates */ + u8 rate_table_size; /* rate table size */ + u8 rate_max; /* max rate that has recently worked */ + u8 probe_rate; /* rate we are probing at */ + u32 rssi_time; /* msec timestamp for last ack rssi */ + u32 rssi_down_time; /* msec timestamp for last down step */ + u32 probe_time; /* msec timestamp for last probe */ + u8 hw_maxretry_pktcnt; /* num packets since we got + HW max retry error */ + u8 max_valid_rate; /* maximum number of valid rate */ + u8 valid_rate_index[MAX_TX_RATE_TBL]; /* valid rate index */ + u32 per_down_time; /* msec timstamp for last + PER down step */ + + /* 11n state */ + u8 valid_phy_ratecnt[WLAN_RC_PHY_MAX]; /* valid rate count */ + u8 valid_phy_rateidx[WLAN_RC_PHY_MAX][MAX_TX_RATE_TBL]; + u8 rc_phy_mode; + u8 rate_max_phy; /* Phy index for the max rate */ + u32 rate_max_lastused; /* msec timstamp of when we + last used rateMaxPhy */ + u32 probe_interval; /* interval for ratectrl to probe + for other rates */ +}; + +struct ath_rateset { + u8 rs_nrates; + u8 rs_rates[ATH_RATE_MAX]; +}; + +/* per-device state */ +struct ath_rate_softc { + /* phy tables that contain rate control data */ + const void *hw_rate_table[ATH9K_MODE_MAX]; + int fixedrix; /* -1 or index of fixed rate */ +}; + +/* per-node state */ +struct ath_rate_node { + struct ath_tx_ratectrl tx_ratectrl; /* rate control state proper */ + u32 prev_data_rix; /* rate idx of last data frame */ + + /* map of rate ix -> negotiated rate set ix */ + u8 rixmap[MAX_TX_RATE_TBL]; + + /* map of ht rate ix -> negotiated rate set ix */ + u8 ht_rixmap[MAX_TX_RATE_TBL]; + + u8 ht_cap; /* ht capabilities */ + u8 ant_tx; /* current transmit antenna */ + + u8 single_stream; /* When TRUE, only single + stream Tx possible */ + struct ath_rateset neg_rates; /* Negotiated rates */ + struct ath_rateset neg_ht_rates; /* Negotiated HT rates */ + struct ath_rate_softc *asc; /* back pointer to atheros softc */ + struct ath_vap *avp; /* back pointer to vap */ +}; + +/* Driver data of ieee80211_tx_info */ +struct ath_tx_info_priv { + struct ath_rc_series rcs[4]; + struct ath_tx_status tx; + int n_frames; + int n_bad_frames; + u8 min_rate; +}; + +/* + * Attach/detach a rate control module. + */ +struct ath_rate_softc *ath_rate_attach(struct ath_hal *ah); +void ath_rate_detach(struct ath_rate_softc *asc); + +/* + * Update/reset rate control state for 802.11 state transitions. + * Important mostly as the analog to ath_rate_newassoc when operating + * in station mode. + */ +void ath_rc_node_update(struct ieee80211_hw *hw, struct ath_rate_node *rc_priv); +void ath_rate_newstate(struct ath_softc *sc, struct ath_vap *avp); + +/* + * Return the tx rate series. + */ +void ath_rate_findrate(struct ath_softc *sc, struct ath_rate_node *ath_rc_priv, + int num_tries, int num_rates, + unsigned int rcflag, struct ath_rc_series[], + int *is_probe, int isretry); +/* + * Return rate index for given Dot11 Rate. + */ +u8 ath_rate_findrateix(struct ath_softc *sc, + u8 dot11_rate); + +/* Routines to register/unregister rate control algorithm */ +int ath_rate_control_register(void); +void ath_rate_control_unregister(void); + +#endif /* RC_H */ diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c new file mode 100644 index 0000000..2fe8061 --- /dev/null +++ b/drivers/net/wireless/ath9k/recv.c @@ -0,0 +1,1318 @@ +/* + * Copyright (c) 2008 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * Implementation of receive path. + */ + +#include "core.h" + +/* + * Setup and link descriptors. + * + * 11N: we can no longer afford to self link the last descriptor. + * MAC acknowledges BA status as long as it copies frames to host + * buffer (or rx fifo). This can incorrectly acknowledge packets + * to a sender if last desc is self-linked. + * + * NOTE: Caller should hold the rxbuf lock. + */ + +static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) +{ + struct ath_hal *ah = sc->sc_ah; + struct ath_desc *ds; + struct sk_buff *skb; + + ATH_RXBUF_RESET(bf); + + ds = bf->bf_desc; + ds->ds_link = 0; /* link to null */ + ds->ds_data = bf->bf_buf_addr; + + /* XXX For RADAR? + * virtual addr of the beginning of the buffer. */ + skb = bf->bf_mpdu; + ASSERT(skb != NULL); + ds->ds_vdata = skb->data; + + /* setup rx descriptors */ + ath9k_hw_setuprxdesc(ah, + ds, + skb_tailroom(skb), /* buffer size */ + 0); + + if (sc->sc_rxlink == NULL) + ath9k_hw_putrxbuf(ah, bf->bf_daddr); + else + *sc->sc_rxlink = bf->bf_daddr; + + sc->sc_rxlink = &ds->ds_link; + ath9k_hw_rxena(ah); +} + +/* Process received BAR frame */ + +static int ath_bar_rx(struct ath_softc *sc, + struct ath_node *an, + struct sk_buff *skb) +{ + struct ieee80211_bar *bar; + struct ath_arx_tid *rxtid; + struct sk_buff *tskb; + struct ath_recv_status *rx_status; + int tidno, index, cindex; + u16 seqno; + + /* look at BAR contents */ + + bar = (struct ieee80211_bar *)skb->data; + tidno = (le16_to_cpu(bar->control) & IEEE80211_BAR_CTL_TID_M) + >> IEEE80211_BAR_CTL_TID_S; + seqno = le16_to_cpu(bar->start_seq_num) >> IEEE80211_SEQ_SEQ_SHIFT; + + /* process BAR - indicate all pending RX frames till the BAR seqno */ + + rxtid = &an->an_aggr.rx.tid[tidno]; + + spin_lock_bh(&rxtid->tidlock); + + /* get relative index */ + + index = ATH_BA_INDEX(rxtid->seq_next, seqno); + + /* drop BAR if old sequence (index is too large) */ + + if ((index > rxtid->baw_size) && + (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2)))) + /* discard frame, ieee layer may not treat frame as a dup */ + goto unlock_and_free; + + /* complete receive processing for all pending frames upto BAR seqno */ + + cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); + while ((rxtid->baw_head != rxtid->baw_tail) && + (rxtid->baw_head != cindex)) { + tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf; + rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status; + rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL; + + if (tskb != NULL) + ath_rx_subframe(an, tskb, rx_status); + + INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); + INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); + } + + /* ... and indicate rest of the frames in-order */ + + while (rxtid->baw_head != rxtid->baw_tail && + rxtid->rxbuf[rxtid->baw_head].rx_wbuf != NULL) { + tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf; + rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status; + rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL; + + ath_rx_subframe(an, tskb, rx_status); + + INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); + INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); + } + +unlock_and_free: + spin_unlock_bh(&rxtid->tidlock); + /* free bar itself */ + dev_kfree_skb(skb); + return IEEE80211_FTYPE_CTL; +} + +/* Function to handle a subframe of aggregation when HT is enabled */ + +static int ath_ampdu_input(struct ath_softc *sc, + struct ath_node *an, + struct sk_buff *skb, + struct ath_recv_status *rx_status) +{ + struct ieee80211_hdr *hdr; + struct ath_arx_tid *rxtid; + struct ath_rxbuf *rxbuf; + u8 type, subtype; + u16 rxseq; + int tid = 0, index, cindex, rxdiff; + __le16 fc; + u8 *qc; + + hdr = (struct ieee80211_hdr *)skb->data; + fc = hdr->frame_control; + + /* collect stats of frames with non-zero version */ + + if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_VERS) != 0) { + dev_kfree_skb(skb); + return -1; + } + + type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE; + subtype = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_STYPE; + + if (ieee80211_is_back_req(fc)) + return ath_bar_rx(sc, an, skb); + + /* special aggregate processing only for qos unicast data frames */ + + if (!ieee80211_is_data(fc) || + !ieee80211_is_data_qos(fc) || + is_multicast_ether_addr(hdr->addr1)) + return ath_rx_subframe(an, skb, rx_status); + + /* lookup rx tid state */ + + if (ieee80211_is_data_qos(fc)) { + qc = ieee80211_get_qos_ctl(hdr); + tid = qc[0] & 0xf; + } + + if (sc->sc_opmode == ATH9K_M_STA) { + /* Drop the frame not belonging to me. */ + if (memcmp(hdr->addr1, sc->sc_myaddr, ETH_ALEN)) { + dev_kfree_skb(skb); + return -1; + } + } + + rxtid = &an->an_aggr.rx.tid[tid]; + + spin_lock(&rxtid->tidlock); + + rxdiff = (rxtid->baw_tail - rxtid->baw_head) & + (ATH_TID_MAX_BUFS - 1); + + /* + * If the ADDBA exchange has not been completed by the source, + * process via legacy path (i.e. no reordering buffer is needed) + */ + if (!rxtid->addba_exchangecomplete) { + spin_unlock(&rxtid->tidlock); + return ath_rx_subframe(an, skb, rx_status); + } + + /* extract sequence number from recvd frame */ + + rxseq = le16_to_cpu(hdr->seq_ctrl) >> IEEE80211_SEQ_SEQ_SHIFT; + + if (rxtid->seq_reset) { + rxtid->seq_reset = 0; + rxtid->seq_next = rxseq; + } + + index = ATH_BA_INDEX(rxtid->seq_next, rxseq); + + /* drop frame if old sequence (index is too large) */ + + if (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2))) { + /* discard frame, ieee layer may not treat frame as a dup */ + spin_unlock(&rxtid->tidlock); + dev_kfree_skb(skb); + return IEEE80211_FTYPE_DATA; + } + + /* sequence number is beyond block-ack window */ + + if (index >= rxtid->baw_size) { + + /* complete receive processing for all pending frames */ + + while (index >= rxtid->baw_size) { + + rxbuf = rxtid->rxbuf + rxtid->baw_head; + + if (rxbuf->rx_wbuf != NULL) { + ath_rx_subframe(an, rxbuf->rx_wbuf, + &rxbuf->rx_status); + rxbuf->rx_wbuf = NULL; + } + + INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); + INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); + + index--; + } + } + + /* add buffer to the recv ba window */ + + cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); + rxbuf = rxtid->rxbuf + cindex; + + if (rxbuf->rx_wbuf != NULL) { + spin_unlock(&rxtid->tidlock); + /* duplicate frame */ + dev_kfree_skb(skb); + return IEEE80211_FTYPE_DATA; + } + + rxbuf->rx_wbuf = skb; + rxbuf->rx_time = get_timestamp(); + rxbuf->rx_status = *rx_status; + + /* advance tail if sequence received is newer + * than any received so far */ + + if (index >= rxdiff) { + rxtid->baw_tail = cindex; + INCR(rxtid->baw_tail, ATH_TID_MAX_BUFS); + } + + /* indicate all in-order received frames */ + + while (rxtid->baw_head != rxtid->baw_tail) { + rxbuf = rxtid->rxbuf + rxtid->baw_head; + if (!rxbuf->rx_wbuf) + break; + + ath_rx_subframe(an, rxbuf->rx_wbuf, &rxbuf->rx_status); + rxbuf->rx_wbuf = NULL; + + INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); + INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); + } + + /* + * start a timer to flush all received frames if there are pending + * receive frames + */ + if (rxtid->baw_head != rxtid->baw_tail) + mod_timer(&rxtid->timer, ATH_RX_TIMEOUT); + else + del_timer_sync(&rxtid->timer); + + spin_unlock(&rxtid->tidlock); + return IEEE80211_FTYPE_DATA; +} + +/* Timer to flush all received sub-frames */ + +static void ath_rx_timer(unsigned long data) +{ + struct ath_arx_tid *rxtid = (struct ath_arx_tid *)data; + struct ath_node *an = rxtid->an; + struct ath_rxbuf *rxbuf; + int nosched; + + spin_lock_bh(&rxtid->tidlock); + while (rxtid->baw_head != rxtid->baw_tail) { + rxbuf = rxtid->rxbuf + rxtid->baw_head; + if (!rxbuf->rx_wbuf) { + INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); + INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); + continue; + } + + /* + * Stop if the next one is a very recent frame. + * + * Call get_timestamp in every iteration to protect against the + * case in which a new frame is received while we are executing + * this function. Using a timestamp obtained before entering + * the loop could lead to a very large time interval + * (a negative value typecast to unsigned), breaking the + * function's logic. + */ + if ((get_timestamp() - rxbuf->rx_time) < + (ATH_RX_TIMEOUT * HZ / 1000)) + break; + + ath_rx_subframe(an, rxbuf->rx_wbuf, + &rxbuf->rx_status); + rxbuf->rx_wbuf = NULL; + + INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); + INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); + } + + /* + * start a timer to flush all received frames if there are pending + * receive frames + */ + if (rxtid->baw_head != rxtid->baw_tail) + nosched = 0; + else + nosched = 1; /* no need to re-arm the timer again */ + + spin_unlock_bh(&rxtid->tidlock); +} + +/* Free all pending sub-frames in the re-ordering buffer */ + +static void ath_rx_flush_tid(struct ath_softc *sc, + struct ath_arx_tid *rxtid, int drop) +{ + struct ath_rxbuf *rxbuf; + + spin_lock_bh(&rxtid->tidlock); + while (rxtid->baw_head != rxtid->baw_tail) { + rxbuf = rxtid->rxbuf + rxtid->baw_head; + if (!rxbuf->rx_wbuf) { + INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); + INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); + continue; + } + + if (drop) + dev_kfree_skb(rxbuf->rx_wbuf); + else + ath_rx_subframe(rxtid->an, + rxbuf->rx_wbuf, + &rxbuf->rx_status); + + rxbuf->rx_wbuf = NULL; + + INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); + INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); + } + spin_unlock_bh(&rxtid->tidlock); +} + +static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, + u32 len) +{ + struct sk_buff *skb; + u32 off; + + /* + * Cache-line-align. This is important (for the + * 5210 at least) as not doing so causes bogus data + * in rx'd frames. + */ + + skb = dev_alloc_skb(len + sc->sc_cachelsz - 1); + if (skb != NULL) { + off = ((unsigned long) skb->data) % sc->sc_cachelsz; + if (off != 0) + skb_reserve(skb, sc->sc_cachelsz - off); + } else { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: skbuff alloc of size %u failed\n", + __func__, len); + return NULL; + } + + return skb; +} + +static void ath_rx_requeue(struct ath_softc *sc, struct sk_buff *skb) +{ + struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf; + + ASSERT(bf != NULL); + + spin_lock_bh(&sc->sc_rxbuflock); + if (bf->bf_status & ATH_BUFSTATUS_STALE) { + /* + * This buffer is still held for hw acess. + * Mark it as free to be re-queued it later. + */ + bf->bf_status |= ATH_BUFSTATUS_FREE; + } else { + /* XXX: we probably never enter here, remove after + * verification */ + list_add_tail(&bf->list, &sc->sc_rxbuf); + ath_rx_buf_link(sc, bf); + } + spin_unlock_bh(&sc->sc_rxbuflock); +} + +/* + * The skb indicated to upper stack won't be returned to us. + * So we have to allocate a new one and queue it by ourselves. + */ +static int ath_rx_indicate(struct ath_softc *sc, + struct sk_buff *skb, + struct ath_recv_status *status, + u16 keyix) +{ + struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf; + struct sk_buff *nskb; + int type; + + /* indicate frame to the stack, which will free the old skb. */ + type = ath__rx_indicate(sc, skb, status, keyix); + + /* allocate a new skb and queue it to for H/W processing */ + nskb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize); + if (nskb != NULL) { + bf->bf_mpdu = nskb; + bf->bf_buf_addr = ath_skb_map_single(sc, + nskb, + PCI_DMA_FROMDEVICE, + /* XXX: Remove get_dma_mem_context() */ + get_dma_mem_context(bf, bf_dmacontext)); + ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf; + + /* queue the new wbuf to H/W */ + ath_rx_requeue(sc, nskb); + } + + return type; +} + +static void ath_opmode_init(struct ath_softc *sc) +{ + struct ath_hal *ah = sc->sc_ah; + u32 rfilt, mfilt[2]; + + /* configure rx filter */ + rfilt = ath_calcrxfilter(sc); + ath9k_hw_setrxfilter(ah, rfilt); + + /* configure bssid mask */ + if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) + ath9k_hw_setbssidmask(ah, sc->sc_bssidmask); + + /* configure operational mode */ + ath9k_hw_setopmode(ah); + + /* Handle any link-level address change. */ + ath9k_hw_setmac(ah, sc->sc_myaddr); + + /* calculate and install multicast filter */ + mfilt[0] = mfilt[1] = ~0; + + ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); + DPRINTF(sc, ATH_DBG_CONFIG , + "%s: RX filter 0x%x, MC filter %08x:%08x\n", + __func__, rfilt, mfilt[0], mfilt[1]); +} + +int ath_rx_init(struct ath_softc *sc, int nbufs) +{ + struct sk_buff *skb; + struct ath_buf *bf; + int error = 0; + + do { + spin_lock_init(&sc->sc_rxflushlock); + sc->sc_rxflush = 0; + spin_lock_init(&sc->sc_rxbuflock); + + /* + * Cisco's VPN software requires that drivers be able to + * receive encapsulated frames that are larger than the MTU. + * Since we can't be sure how large a frame we'll get, setup + * to handle the larges on possible. + */ + sc->sc_rxbufsize = roundup(IEEE80211_MAX_MPDU_LEN, + min(sc->sc_cachelsz, + (u16)64)); + + DPRINTF(sc, ATH_DBG_CONFIG, "%s: cachelsz %u rxbufsize %u\n", + __func__, sc->sc_cachelsz, sc->sc_rxbufsize); + + /* Initialize rx descriptors */ + + error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, + "rx", nbufs, 1); + if (error != 0) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: failed to allocate rx descriptors: %d\n", + __func__, error); + break; + } + + /* Pre-allocate a wbuf for each rx buffer */ + + list_for_each_entry(bf, &sc->sc_rxbuf, list) { + skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize); + if (skb == NULL) { + error = -ENOMEM; + break; + } + + bf->bf_mpdu = skb; + bf->bf_buf_addr = + ath_skb_map_single(sc, skb, PCI_DMA_FROMDEVICE, + get_dma_mem_context(bf, bf_dmacontext)); + ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf; + } + sc->sc_rxlink = NULL; + + } while (0); + + if (error) + ath_rx_cleanup(sc); + + return error; +} + +/* Reclaim all rx queue resources */ + +void ath_rx_cleanup(struct ath_softc *sc) +{ + struct sk_buff *skb; + struct ath_buf *bf; + + list_for_each_entry(bf, &sc->sc_rxbuf, list) { + skb = bf->bf_mpdu; + if (skb) + dev_kfree_skb(skb); + } + + /* cleanup rx descriptors */ + + if (sc->sc_rxdma.dd_desc_len != 0) + ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); +} + +/* + * Calculate the receive filter according to the + * operating mode and state: + * + * o always accept unicast, broadcast, and multicast traffic + * o maintain current state of phy error reception (the hal + * may enable phy error frames for noise immunity work) + * o probe request frames are accepted only when operating in + * hostap, adhoc, or monitor modes + * o enable promiscuous mode according to the interface state + * o accept beacons: + * - when operating in adhoc mode so the 802.11 layer creates + * node table entries for peers, + * - when operating in station mode for collecting rssi data when + * the station is otherwise quiet, or + * - when operating as a repeater so we see repeater-sta beacons + * - when scanning + */ + +u32 ath_calcrxfilter(struct ath_softc *sc) +{ +#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR) + u32 rfilt; + + rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE) + | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST + | ATH9K_RX_FILTER_MCAST; + + /* If not a STA, enable processing of Probe Requests */ + if (sc->sc_opmode != ATH9K_M_STA) + rfilt |= ATH9K_RX_FILTER_PROBEREQ; + + /* Can't set HOSTAP into promiscous mode */ + if (sc->sc_opmode == ATH9K_M_MONITOR) { + rfilt |= ATH9K_RX_FILTER_PROM; + /* ??? To prevent from sending ACK */ + rfilt &= ~ATH9K_RX_FILTER_UCAST; + } + + if (sc->sc_opmode == ATH9K_M_STA || sc->sc_opmode == ATH9K_M_IBSS || + sc->sc_scanning) + rfilt |= ATH9K_RX_FILTER_BEACON; + + /* If in HOSTAP mode, want to enable reception of PSPOLL frames + & beacon frames */ + if (sc->sc_opmode == ATH9K_M_HOSTAP) + rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL); + return rfilt; +#undef RX_FILTER_PRESERVE +} + +/* Enable the receive h/w following a reset. */ + +int ath_startrecv(struct ath_softc *sc) +{ + struct ath_hal *ah = sc->sc_ah; + struct ath_buf *bf, *tbf; + + spin_lock_bh(&sc->sc_rxbuflock); + if (list_empty(&sc->sc_rxbuf)) + goto start_recv; + + sc->sc_rxlink = NULL; + list_for_each_entry_safe(bf, tbf, &sc->sc_rxbuf, list) { + if (bf->bf_status & ATH_BUFSTATUS_STALE) { + /* restarting h/w, no need for holding descriptors */ + bf->bf_status &= ~ATH_BUFSTATUS_STALE; + /* + * Upper layer may not be done with the frame yet so + * we can't just re-queue it to hardware. Remove it + * from h/w queue. It'll be re-queued when upper layer + * returns the frame and ath_rx_requeue_mpdu is called. + */ + if (!(bf->bf_status & ATH_BUFSTATUS_FREE)) { + list_del(&bf->list); + continue; + } + } + /* chain descriptors */ + ath_rx_buf_link(sc, bf); + } + + /* We could have deleted elements so the list may be empty now */ + if (list_empty(&sc->sc_rxbuf)) + goto start_recv; + + bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list); + ath9k_hw_putrxbuf(ah, bf->bf_daddr); + ath9k_hw_rxena(ah); /* enable recv descriptors */ + +start_recv: + spin_unlock_bh(&sc->sc_rxbuflock); + ath_opmode_init(sc); /* set filters, etc. */ + ath9k_hw_startpcureceive(ah); /* re-enable PCU/DMA engine */ + return 0; +} + +/* Disable the receive h/w in preparation for a reset. */ + +bool ath_stoprecv(struct ath_softc *sc) +{ + struct ath_hal *ah = sc->sc_ah; + u64 tsf; + bool stopped; + + ath9k_hw_stoppcurecv(ah); /* disable PCU */ + ath9k_hw_setrxfilter(ah, 0); /* clear recv filter */ + stopped = ath9k_hw_stopdmarecv(ah); /* disable DMA engine */ + mdelay(3); /* 3ms is long enough for 1 frame */ + tsf = ath9k_hw_gettsf64(ah); + sc->sc_rxlink = NULL; /* just in case */ + return stopped; +} + +/* Flush receive queue */ + +void ath_flushrecv(struct ath_softc *sc) +{ + /* + * ath_rx_tasklet may be used to handle rx interrupt and flush receive + * queue at the same time. Use a lock to serialize the access of rx + * queue. + * ath_rx_tasklet cannot hold the spinlock while indicating packets. + * Instead, do not claim the spinlock but check for a flush in + * progress (see references to sc_rxflush) + */ + spin_lock_bh(&sc->sc_rxflushlock); + sc->sc_rxflush = 1; + + ath_rx_tasklet(sc, 1); + + sc->sc_rxflush = 0; + spin_unlock_bh(&sc->sc_rxflushlock); +} + +/* Process an individual frame */ + +int ath_rx_input(struct ath_softc *sc, + struct ath_node *an, + int is_ampdu, + struct sk_buff *skb, + struct ath_recv_status *rx_status, + enum ATH_RX_TYPE *status) +{ + if (is_ampdu && sc->sc_rxaggr) { + *status = ATH_RX_CONSUMED; + return ath_ampdu_input(sc, an, skb, rx_status); + } else { + *status = ATH_RX_NON_CONSUMED; + return -1; + } +} + +/* Process receive queue, as well as LED, etc. */ + +int ath_rx_tasklet(struct ath_softc *sc, int flush) +{ +#define PA2DESC(_sc, _pa) \ + ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ + ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) + + struct ath_buf *bf, *bf_held = NULL; + struct ath_desc *ds; + struct ieee80211_hdr *hdr; + struct sk_buff *skb = NULL; + struct ath_recv_status rx_status; + struct ath_hal *ah = sc->sc_ah; + int type, rx_processed = 0; + u32 phyerr; + u8 chainreset = 0; + int retval; + __le16 fc; + + do { + /* If handling rx interrupt and flush is in progress => exit */ + if (sc->sc_rxflush && (flush == 0)) + break; + + spin_lock_bh(&sc->sc_rxbuflock); + if (list_empty(&sc->sc_rxbuf)) { + sc->sc_rxlink = NULL; + spin_unlock_bh(&sc->sc_rxbuflock); + break; + } + + bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list); + + /* + * There is a race condition that BH gets scheduled after sw + * writes RxE and before hw re-load the last descriptor to get + * the newly chained one. Software must keep the last DONE + * descriptor as a holding descriptor - software does so by + * marking it with the STALE flag. + */ + if (bf->bf_status & ATH_BUFSTATUS_STALE) { + bf_held = bf; + if (list_is_last(&bf_held->list, &sc->sc_rxbuf)) { + /* + * The holding descriptor is the last + * descriptor in queue. It's safe to + * remove the last holding descriptor + * in BH context. + */ + list_del(&bf_held->list); + bf_held->bf_status &= ~ATH_BUFSTATUS_STALE; + sc->sc_rxlink = NULL; + + if (bf_held->bf_status & ATH_BUFSTATUS_FREE) { + list_add_tail(&bf_held->list, + &sc->sc_rxbuf); + ath_rx_buf_link(sc, bf_held); + } + spin_unlock_bh(&sc->sc_rxbuflock); + break; + } + bf = list_entry(bf->list.next, struct ath_buf, list); + } + + ds = bf->bf_desc; + ++rx_processed; + + /* + * Must provide the virtual address of the current + * descriptor, the physical address, and the virtual + * address of the next descriptor in the h/w chain. + * This allows the HAL to look ahead to see if the + * hardware is done with a descriptor by checking the + * done bit in the following descriptor and the address + * of the current descriptor the DMA engine is working + * on. All this is necessary because of our use of + * a self-linked list to avoid rx overruns. + */ + retval = ath9k_hw_rxprocdesc(ah, + ds, + bf->bf_daddr, + PA2DESC(sc, ds->ds_link), + 0); + if (retval == -EINPROGRESS) { + struct ath_buf *tbf; + struct ath_desc *tds; + + if (list_is_last(&bf->list, &sc->sc_rxbuf)) { + spin_unlock_bh(&sc->sc_rxbuflock); + break; + } + + tbf = list_entry(bf->list.next, struct ath_buf, list); + + /* + * On some hardware the descriptor status words could + * get corrupted, including the done bit. Because of + * this, check if the next descriptor's done bit is + * set or not. + * + * If the next descriptor's done bit is set, the current + * descriptor has been corrupted. Force s/w to discard + * this descriptor and continue... + */ + + tds = tbf->bf_desc; + retval = ath9k_hw_rxprocdesc(ah, + tds, tbf->bf_daddr, + PA2DESC(sc, tds->ds_link), 0); + if (retval == -EINPROGRESS) { + spin_unlock_bh(&sc->sc_rxbuflock); + break; + } + } + + /* XXX: we do not support frames spanning + * multiple descriptors */ + bf->bf_status |= ATH_BUFSTATUS_DONE; + + skb = bf->bf_mpdu; + if (skb == NULL) { /* XXX ??? can this happen */ + spin_unlock_bh(&sc->sc_rxbuflock); + continue; + } + /* + * Now we know it's a completed frame, we can indicate the + * frame. Remove the previous holding descriptor and leave + * this one in the queue as the new holding descriptor. + */ + if (bf_held) { + list_del(&bf_held->list); + bf_held->bf_status &= ~ATH_BUFSTATUS_STALE; + if (bf_held->bf_status & ATH_BUFSTATUS_FREE) { + list_add_tail(&bf_held->list, &sc->sc_rxbuf); + /* try to requeue this descriptor */ + ath_rx_buf_link(sc, bf_held); + } + } + + bf->bf_status |= ATH_BUFSTATUS_STALE; + bf_held = bf; + /* + * Release the lock here in case ieee80211_input() return + * the frame immediately by calling ath_rx_mpdu_requeue(). + */ + spin_unlock_bh(&sc->sc_rxbuflock); + + if (flush) { + /* + * If we're asked to flush receive queue, directly + * chain it back at the queue without processing it. + */ + goto rx_next; + } + + hdr = (struct ieee80211_hdr *)skb->data; + fc = hdr->frame_control; + memzero(&rx_status, sizeof(struct ath_recv_status)); + + if (ds->ds_rxstat.rs_more) { + /* + * Frame spans multiple descriptors; this + * cannot happen yet as we don't support + * jumbograms. If not in monitor mode, + * discard the frame. + */ +#ifndef ERROR_FRAMES + /* + * Enable this if you want to see + * error frames in Monitor mode. + */ + if (sc->sc_opmode != ATH9K_M_MONITOR) + goto rx_next; +#endif + /* fall thru for monitor mode handling... */ + } else if (ds->ds_rxstat.rs_status != 0) { + if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC) + rx_status.flags |= ATH_RX_FCS_ERROR; + if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) { + phyerr = ds->ds_rxstat.rs_phyerr & 0x1f; + goto rx_next; + } + + if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) { + /* + * Decrypt error. We only mark packet status + * here and always push up the frame up to let + * mac80211 handle the actual error case, be + * it no decryption key or real decryption + * error. This let us keep statistics there. + */ + rx_status.flags |= ATH_RX_DECRYPT_ERROR; + } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) { + /* + * Demic error. We only mark frame status here + * and always push up the frame up to let + * mac80211 handle the actual error case. This + * let us keep statistics there. Hardware may + * post a false-positive MIC error. + */ + if (ieee80211_is_ctl(fc)) + /* + * Sometimes, we get invalid + * MIC failures on valid control frames. + * Remove these mic errors. + */ + ds->ds_rxstat.rs_status &= + ~ATH9K_RXERR_MIC; + else + rx_status.flags |= ATH_RX_MIC_ERROR; + } + /* + * Reject error frames with the exception of + * decryption and MIC failures. For monitor mode, + * we also ignore the CRC error. + */ + if (sc->sc_opmode == ATH9K_M_MONITOR) { + if (ds->ds_rxstat.rs_status & + ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | + ATH9K_RXERR_CRC)) + goto rx_next; + } else { + if (ds->ds_rxstat.rs_status & + ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) { + goto rx_next; + } + } + } + /* + * The status portion of the descriptor could get corrupted. + */ + if (sc->sc_rxbufsize < ds->ds_rxstat.rs_datalen) + goto rx_next; + /* + * Sync and unmap the frame. At this point we're + * committed to passing the sk_buff somewhere so + * clear buf_skb; this means a new sk_buff must be + * allocated when the rx descriptor is setup again + * to receive another frame. + */ + skb_put(skb, ds->ds_rxstat.rs_datalen); + skb->protocol = cpu_to_be16(ETH_P_CONTROL); + rx_status.tsf = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp); + rx_status.rateieee = + sc->sc_hwmap[ds->ds_rxstat.rs_rate].ieeerate; + rx_status.rateKbps = + sc->sc_hwmap[ds->ds_rxstat.rs_rate].rateKbps; + rx_status.ratecode = ds->ds_rxstat.rs_rate; + + /* HT rate */ + if (rx_status.ratecode & 0x80) { + /* TODO - add table to avoid division */ + if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) { + rx_status.flags |= ATH_RX_40MHZ; + rx_status.rateKbps = + (rx_status.rateKbps * 27) / 13; + } + if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI) + rx_status.rateKbps = + (rx_status.rateKbps * 10) / 9; + else + rx_status.flags |= ATH_RX_SHORT_GI; + } + + /* sc->sc_noise_floor is only available when the station + attaches to an AP, so we use a default value + if we are not yet attached. */ + + /* XXX we should use either sc->sc_noise_floor or + * ath_hal_getChanNoise(ah, &sc->sc_curchan) + * to calculate the noise floor. + * However, the value returned by ath_hal_getChanNoise + * seems to be incorrect (-31dBm on the last test), + * so we will use a hard-coded value until we + * figure out what is going on. + */ + rx_status.abs_rssi = + ds->ds_rxstat.rs_rssi + ATH_DEFAULT_NOISE_FLOOR; + + pci_dma_sync_single_for_cpu(sc->pdev, + bf->bf_buf_addr, + skb_tailroom(skb), + PCI_DMA_FROMDEVICE); + pci_unmap_single(sc->pdev, + bf->bf_buf_addr, + sc->sc_rxbufsize, + PCI_DMA_FROMDEVICE); + + /* XXX: Ah! make me more readable, use a helper */ + if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) { + if (ds->ds_rxstat.rs_moreaggr == 0) { + rx_status.rssictl[0] = + ds->ds_rxstat.rs_rssi_ctl0; + rx_status.rssictl[1] = + ds->ds_rxstat.rs_rssi_ctl1; + rx_status.rssictl[2] = + ds->ds_rxstat.rs_rssi_ctl2; + rx_status.rssi = ds->ds_rxstat.rs_rssi; + if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) { + rx_status.rssiextn[0] = + ds->ds_rxstat.rs_rssi_ext0; + rx_status.rssiextn[1] = + ds->ds_rxstat.rs_rssi_ext1; + rx_status.rssiextn[2] = + ds->ds_rxstat.rs_rssi_ext2; + rx_status.flags |= + ATH_RX_RSSI_EXTN_VALID; + } + rx_status.flags |= ATH_RX_RSSI_VALID | + ATH_RX_CHAIN_RSSI_VALID; + } + } else { + /* + * Need to insert the "combined" rssi into the + * status structure for upper layer processing + */ + rx_status.rssi = ds->ds_rxstat.rs_rssi; + rx_status.flags |= ATH_RX_RSSI_VALID; + } + + /* Pass frames up to the stack. */ + + type = ath_rx_indicate(sc, skb, + &rx_status, ds->ds_rxstat.rs_keyix); + + /* + * change the default rx antenna if rx diversity chooses the + * other antenna 3 times in a row. + */ + if (sc->sc_defant != ds->ds_rxstat.rs_antenna) { + if (++sc->sc_rxotherant >= 3) + ath_setdefantenna(sc, + ds->ds_rxstat.rs_antenna); + } else { + sc->sc_rxotherant = 0; + } + +#ifdef CONFIG_SLOW_ANT_DIV + if ((rx_status.flags & ATH_RX_RSSI_VALID) && + ieee80211_is_beacon(fc)) { + ath_slow_ant_div(&sc->sc_antdiv, hdr, &ds->ds_rxstat); + } +#endif + /* + * For frames successfully indicated, the buffer will be + * returned to us by upper layers by calling + * ath_rx_mpdu_requeue, either synchronusly or asynchronously. + * So we don't want to do it here in this loop. + */ + continue; + +rx_next: + bf->bf_status |= ATH_BUFSTATUS_FREE; + } while (TRUE); + + if (chainreset) { + DPRINTF(sc, ATH_DBG_CONFIG, + "%s: Reset rx chain mask. " + "Do internal reset\n", __func__); + ASSERT(flush == 0); + ath_internal_reset(sc); + } + + return 0; +#undef PA2DESC +} + +/* Process ADDBA request in per-TID data structure */ + +int ath_rx_aggr_start(struct ath_softc *sc, + const u8 *addr, + u16 tid, + u16 *ssn) +{ + struct ath_arx_tid *rxtid; + struct ath_node *an; + struct ieee80211_hw *hw = sc->hw; + struct ieee80211_supported_band *sband; + u16 buffersize = 0; + + spin_lock_bh(&sc->node_lock); + an = ath_node_find(sc, (u8 *) addr); + spin_unlock_bh(&sc->node_lock); + + if (!an) { + DPRINTF(sc, ATH_DBG_AGGR, + "%s: Node not found to initialize RX aggregation\n", + __func__); + return -1; + } + + sband = hw->wiphy->bands[hw->conf.channel->band]; + buffersize = IEEE80211_MIN_AMPDU_BUF << + sband->ht_info.ampdu_factor; /* FIXME */ + + rxtid = &an->an_aggr.rx.tid[tid]; + + spin_lock_bh(&rxtid->tidlock); + if (sc->sc_rxaggr) { + /* Allow aggregation reception + * Adjust rx BA window size. Peer might indicate a + * zero buffer size for a _dont_care_ condition. + */ + if (buffersize) + rxtid->baw_size = min(buffersize, rxtid->baw_size); + + /* set rx sequence number */ + rxtid->seq_next = *ssn; + + /* Allocate the receive buffers for this TID */ + DPRINTF(sc, ATH_DBG_AGGR, + "%s: Allcating rxbuffer for TID %d\n", __func__, tid); + + if (rxtid->rxbuf == NULL) { + /* + * If the rxbuff is not NULL at this point, we *probably* + * already allocated the buffer on a previous ADDBA, + * and this is a subsequent ADDBA that got through. + * Don't allocate, but use the value in the pointer, + * we zero it out when we de-allocate. + */ + rxtid->rxbuf = kmalloc(ATH_TID_MAX_BUFS * + sizeof(struct ath_rxbuf), GFP_ATOMIC); + } + if (rxtid->rxbuf == NULL) { + DPRINTF(sc, ATH_DBG_AGGR, + "%s: Unable to allocate RX buffer, " + "refusing ADDBA\n", __func__); + } else { + /* Ensure the memory is zeroed out (all internal + * pointers are null) */ + memzero(rxtid->rxbuf, ATH_TID_MAX_BUFS * + sizeof(struct ath_rxbuf)); + DPRINTF(sc, ATH_DBG_AGGR, + "%s: Allocated @%p\n", __func__, rxtid->rxbuf); + + /* Allow aggregation reception */ + rxtid->addba_exchangecomplete = 1; + } + } + spin_unlock_bh(&rxtid->tidlock); + + return 0; +} + +/* Process DELBA */ + +int ath_rx_aggr_stop(struct ath_softc *sc, + const u8 *addr, + u16 tid) +{ + struct ath_node *an; + + spin_lock_bh(&sc->node_lock); + an = ath_node_find(sc, (u8 *) addr); + spin_unlock_bh(&sc->node_lock); + + if (!an) { + DPRINTF(sc, ATH_DBG_AGGR, + "%s: RX aggr stop for non-existent node\n", __func__); + return -1; + } + + ath_rx_aggr_teardown(sc, an, tid); + return 0; +} + +/* Rx aggregation tear down */ + +void ath_rx_aggr_teardown(struct ath_softc *sc, + struct ath_node *an, u8 tid) +{ + struct ath_arx_tid *rxtid = &an->an_aggr.rx.tid[tid]; + + if (!rxtid->addba_exchangecomplete) + return; + + del_timer_sync(&rxtid->timer); + ath_rx_flush_tid(sc, rxtid, 0); + rxtid->addba_exchangecomplete = 0; + + /* De-allocate the receive buffer array allocated when addba started */ + + if (rxtid->rxbuf) { + DPRINTF(sc, ATH_DBG_AGGR, + "%s: Deallocating TID %d rxbuff @%p\n", + __func__, tid, rxtid->rxbuf); + kfree(rxtid->rxbuf); + + /* Set pointer to null to avoid reuse*/ + rxtid->rxbuf = NULL; + } +} + +/* Initialize per-node receive state */ + +void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an) +{ + if (sc->sc_rxaggr) { + struct ath_arx_tid *rxtid; + int tidno; + + /* Init per tid rx state */ + for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno]; + tidno < WME_NUM_TID; + tidno++, rxtid++) { + rxtid->an = an; + rxtid->seq_reset = 1; + rxtid->seq_next = 0; + rxtid->baw_size = WME_MAX_BA; + rxtid->baw_head = rxtid->baw_tail = 0; + + /* + * Ensure the buffer pointer is null at this point + * (needs to be allocated when addba is received) + */ + + rxtid->rxbuf = NULL; + setup_timer(&rxtid->timer, ath_rx_timer, + (unsigned long)rxtid); + spin_lock_init(&rxtid->tidlock); + + /* ADDBA state */ + rxtid->addba_exchangecomplete = 0; + } + } +} + +void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an) +{ + if (sc->sc_rxaggr) { + struct ath_arx_tid *rxtid; + int tidno, i; + + /* Init per tid rx state */ + for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno]; + tidno < WME_NUM_TID; + tidno++, rxtid++) { + + if (!rxtid->addba_exchangecomplete) + continue; + + /* must cancel timer first */ + del_timer_sync(&rxtid->timer); + + /* drop any pending sub-frames */ + ath_rx_flush_tid(sc, rxtid, 1); + + for (i = 0; i < ATH_TID_MAX_BUFS; i++) + ASSERT(rxtid->rxbuf[i].rx_wbuf == NULL); + + rxtid->addba_exchangecomplete = 0; + } + } + +} + +/* Cleanup per-node receive state */ + +void ath_rx_node_free(struct ath_softc *sc, struct ath_node *an) +{ + ath_rx_node_cleanup(sc, an); +} + +dma_addr_t ath_skb_map_single(struct ath_softc *sc, + struct sk_buff *skb, + int direction, + dma_addr_t *pa) +{ + /* + * NB: do NOT use skb->len, which is 0 on initialization. + * Use skb's entire data area instead. + */ + *pa = pci_map_single(sc->pdev, skb->data, + skb_end_pointer(skb) - skb->head, direction); + return *pa; +} + +void ath_skb_unmap_single(struct ath_softc *sc, + struct sk_buff *skb, + int direction, + dma_addr_t *pa) +{ + /* Unmap skb's entire data area */ + pci_unmap_single(sc->pdev, *pa, + skb_end_pointer(skb) - skb->head, direction); +} diff --git a/drivers/net/wireless/ath9k/reg.h b/drivers/net/wireless/ath9k/reg.h new file mode 100644 index 0000000..42b0890 --- /dev/null +++ b/drivers/net/wireless/ath9k/reg.h @@ -0,0 +1,1385 @@ +/* + * Copyright (c) 2008 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef REG_H +#define REG_H + +#define AR_CR 0x0008 +#define AR_CR_RXE 0x00000004 +#define AR_CR_RXD 0x00000020 +#define AR_CR_SWI 0x00000040 + +#define AR_RXDP 0x000C + +#define AR_CFG 0x0014 +#define AR_CFG_SWTD 0x00000001 +#define AR_CFG_SWTB 0x00000002 +#define AR_CFG_SWRD 0x00000004 +#define AR_CFG_SWRB 0x00000008 +#define AR_CFG_SWRG 0x00000010 +#define AR_CFG_AP_ADHOC_INDICATION 0x00000020 +#define AR_CFG_PHOK 0x00000100 +#define AR_CFG_CLK_GATE_DIS 0x00000400 +#define AR_CFG_EEBS 0x00000200 +#define AR_CFG_PCI_MASTER_REQ_Q_THRESH 0x00060000 +#define AR_CFG_PCI_MASTER_REQ_Q_THRESH_S 17 + +#define AR_MIRT 0x0020 +#define AR_MIRT_VAL 0x0000ffff +#define AR_MIRT_VAL_S 16 + +#define AR_IER 0x0024 +#define AR_IER_ENABLE 0x00000001 +#define AR_IER_DISABLE 0x00000000 + +#define AR_TIMT 0x0028 +#define AR_TIMT_LAST 0x0000ffff +#define AR_TIMT_LAST_S 0 +#define AR_TIMT_FIRST 0xffff0000 +#define AR_TIMT_FIRST_S 16 + +#define AR_RIMT 0x002C +#define AR_RIMT_LAST 0x0000ffff +#define AR_RIMT_LAST_S 0 +#define AR_RIMT_FIRST 0xffff0000 +#define AR_RIMT_FIRST_S 16 + +#define AR_DMASIZE_4B 0x00000000 +#define AR_DMASIZE_8B 0x00000001 +#define AR_DMASIZE_16B 0x00000002 +#define AR_DMASIZE_32B 0x00000003 +#define AR_DMASIZE_64B 0x00000004 +#define AR_DMASIZE_128B 0x00000005 +#define AR_DMASIZE_256B 0x00000006 +#define AR_DMASIZE_512B 0x00000007 + +#define AR_TXCFG 0x0030 +#define AR_TXCFG_DMASZ_MASK 0x00000003 +#define AR_TXCFG_DMASZ_4B 0 +#define AR_TXCFG_DMASZ_8B 1 +#define AR_TXCFG_DMASZ_16B 2 +#define AR_TXCFG_DMASZ_32B 3 +#define AR_TXCFG_DMASZ_64B 4 +#define AR_TXCFG_DMASZ_128B 5 +#define AR_TXCFG_DMASZ_256B 6 +#define AR_TXCFG_DMASZ_512B 7 +#define AR_FTRIG 0x000003F0 +#define AR_FTRIG_S 4 +#define AR_FTRIG_IMMED 0x00000000 +#define AR_FTRIG_64B 0x00000010 +#define AR_FTRIG_128B 0x00000020 +#define AR_FTRIG_192B 0x00000030 +#define AR_FTRIG_256B 0x00000040 +#define AR_FTRIG_512B 0x00000080 +#define AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY 0x00000800 + +#define AR_RXCFG 0x0034 +#define AR_RXCFG_CHIRP 0x00000008 +#define AR_RXCFG_ZLFDMA 0x00000010 +#define AR_RXCFG_DMASZ_MASK 0x00000007 +#define AR_RXCFG_DMASZ_4B 0 +#define AR_RXCFG_DMASZ_8B 1 +#define AR_RXCFG_DMASZ_16B 2 +#define AR_RXCFG_DMASZ_32B 3 +#define AR_RXCFG_DMASZ_64B 4 +#define AR_RXCFG_DMASZ_128B 5 +#define AR_RXCFG_DMASZ_256B 6 +#define AR_RXCFG_DMASZ_512B 7 + +#define AR_MIBC 0x0040 +#define AR_MIBC_COW 0x00000001 +#define AR_MIBC_FMC 0x00000002 +#define AR_MIBC_CMC 0x00000004 +#define AR_MIBC_MCS 0x00000008 + +#define AR_TOPS 0x0044 +#define AR_TOPS_MASK 0x0000FFFF + +#define AR_RXNPTO 0x0048 +#define AR_RXNPTO_MASK 0x000003FF + +#define AR_TXNPTO 0x004C +#define AR_TXNPTO_MASK 0x000003FF +#define AR_TXNPTO_QCU_MASK 0x000FFC00 + +#define AR_RPGTO 0x0050 +#define AR_RPGTO_MASK 0x000003FF + +#define AR_RPCNT 0x0054 +#define AR_RPCNT_MASK 0x0000001F + +#define AR_MACMISC 0x0058 +#define AR_MACMISC_PCI_EXT_FORCE 0x00000010 +#define AR_MACMISC_DMA_OBS 0x000001E0 +#define AR_MACMISC_DMA_OBS_S 5 +#define AR_MACMISC_DMA_OBS_LINE_0 0 +#define AR_MACMISC_DMA_OBS_LINE_1 1 +#define AR_MACMISC_DMA_OBS_LINE_2 2 +#define AR_MACMISC_DMA_OBS_LINE_3 3 +#define AR_MACMISC_DMA_OBS_LINE_4 4 +#define AR_MACMISC_DMA_OBS_LINE_5 5 +#define AR_MACMISC_DMA_OBS_LINE_6 6 +#define AR_MACMISC_DMA_OBS_LINE_7 7 +#define AR_MACMISC_DMA_OBS_LINE_8 8 +#define AR_MACMISC_MISC_OBS 0x00000E00 +#define AR_MACMISC_MISC_OBS_S 9 +#define AR_MACMISC_MISC_OBS_BUS_LSB 0x00007000 +#define AR_MACMISC_MISC_OBS_BUS_LSB_S 12 +#define AR_MACMISC_MISC_OBS_BUS_MSB 0x00038000 +#define AR_MACMISC_MISC_OBS_BUS_MSB_S 15 +#define AR_MACMISC_MISC_OBS_BUS_1 1 + +#define AR_GTXTO 0x0064 +#define AR_GTXTO_TIMEOUT_COUNTER 0x0000FFFF +#define AR_GTXTO_TIMEOUT_LIMIT 0xFFFF0000 +#define AR_GTXTO_TIMEOUT_LIMIT_S 16 + +#define AR_GTTM 0x0068 +#define AR_GTTM_USEC 0x00000001 +#define AR_GTTM_IGNORE_IDLE 0x00000002 +#define AR_GTTM_RESET_IDLE 0x00000004 +#define AR_GTTM_CST_USEC 0x00000008 + +#define AR_CST 0x006C +#define AR_CST_TIMEOUT_COUNTER 0x0000FFFF +#define AR_CST_TIMEOUT_LIMIT 0xFFFF0000 +#define AR_CST_TIMEOUT_LIMIT_S 16 + +#define AR_SREV_VERSION_9100 0x014 + +#define AR_SREV_5416_V20_OR_LATER(_ah) \ + (AR_SREV_9100((_ah)) || AR_SREV_5416_20_OR_LATER(_ah)) +#define AR_SREV_5416_V22_OR_LATER(_ah) \ + (AR_SREV_9100((_ah)) || AR_SREV_5416_22_OR_LATER(_ah)) + +#define AR_ISR 0x0080 +#define AR_ISR_RXOK 0x00000001 +#define AR_ISR_RXDESC 0x00000002 +#define AR_ISR_RXERR 0x00000004 +#define AR_ISR_RXNOPKT 0x00000008 +#define AR_ISR_RXEOL 0x00000010 +#define AR_ISR_RXORN 0x00000020 +#define AR_ISR_TXOK 0x00000040 +#define AR_ISR_TXDESC 0x00000080 +#define AR_ISR_TXERR 0x00000100 +#define AR_ISR_TXNOPKT 0x00000200 +#define AR_ISR_TXEOL 0x00000400 +#define AR_ISR_TXURN 0x00000800 +#define AR_ISR_MIB 0x00001000 +#define AR_ISR_SWI 0x00002000 +#define AR_ISR_RXPHY 0x00004000 +#define AR_ISR_RXKCM 0x00008000 +#define AR_ISR_SWBA 0x00010000 +#define AR_ISR_BRSSI 0x00020000 +#define AR_ISR_BMISS 0x00040000 +#define AR_ISR_BNR 0x00100000 +#define AR_ISR_RXCHIRP 0x00200000 +#define AR_ISR_BCNMISC 0x00800000 +#define AR_ISR_TIM 0x00800000 +#define AR_ISR_QCBROVF 0x02000000 +#define AR_ISR_QCBRURN 0x04000000 +#define AR_ISR_QTRIG 0x08000000 +#define AR_ISR_GENTMR 0x10000000 + +#define AR_ISR_TXMINTR 0x00080000 +#define AR_ISR_RXMINTR 0x01000000 +#define AR_ISR_TXINTM 0x40000000 +#define AR_ISR_RXINTM 0x80000000 + +#define AR_ISR_S0 0x0084 +#define AR_ISR_S0_QCU_TXOK 0x000003FF +#define AR_ISR_S0_QCU_TXOK_S 0 +#define AR_ISR_S0_QCU_TXDESC 0x03FF0000 +#define AR_ISR_S0_QCU_TXDESC_S 16 + +#define AR_ISR_S1 0x0088 +#define AR_ISR_S1_QCU_TXERR 0x000003FF +#define AR_ISR_S1_QCU_TXERR_S 0 +#define AR_ISR_S1_QCU_TXEOL 0x03FF0000 +#define AR_ISR_S1_QCU_TXEOL_S 16 + +#define AR_ISR_S2 0x008c +#define AR_ISR_S2_QCU_TXURN 0x000003FF +#define AR_ISR_S2_CST 0x00400000 +#define AR_ISR_S2_GTT 0x00800000 +#define AR_ISR_S2_TIM 0x01000000 +#define AR_ISR_S2_CABEND 0x02000000 +#define AR_ISR_S2_DTIMSYNC 0x04000000 +#define AR_ISR_S2_BCNTO 0x08000000 +#define AR_ISR_S2_CABTO 0x10000000 +#define AR_ISR_S2_DTIM 0x20000000 +#define AR_ISR_S2_TSFOOR 0x40000000 +#define AR_ISR_S2_TBTT_TIME 0x80000000 + +#define AR_ISR_S3 0x0090 +#define AR_ISR_S3_QCU_QCBROVF 0x000003FF +#define AR_ISR_S3_QCU_QCBRURN 0x03FF0000 + +#define AR_ISR_S4 0x0094 +#define AR_ISR_S4_QCU_QTRIG 0x000003FF +#define AR_ISR_S4_RESV0 0xFFFFFC00 + +#define AR_ISR_S5 0x0098 +#define AR_ISR_S5_TIMER_TRIG 0x000000FF +#define AR_ISR_S5_TIMER_THRESH 0x0007FE00 +#define AR_ISR_S5_TIM_TIMER 0x00000010 +#define AR_ISR_S5_DTIM_TIMER 0x00000020 +#define AR_ISR_S5_S 0x00d8 +#define AR_IMR_S5 0x00b8 +#define AR_IMR_S5_TIM_TIMER 0x00000010 +#define AR_IMR_S5_DTIM_TIMER 0x00000020 + + +#define AR_IMR 0x00a0 +#define AR_IMR_RXOK 0x00000001 +#define AR_IMR_RXDESC 0x00000002 +#define AR_IMR_RXERR 0x00000004 +#define AR_IMR_RXNOPKT 0x00000008 +#define AR_IMR_RXEOL 0x00000010 +#define AR_IMR_RXORN 0x00000020 +#define AR_IMR_TXOK 0x00000040 +#define AR_IMR_TXDESC 0x00000080 +#define AR_IMR_TXERR 0x00000100 +#define AR_IMR_TXNOPKT 0x00000200 +#define AR_IMR_TXEOL 0x00000400 +#define AR_IMR_TXURN 0x00000800 +#define AR_IMR_MIB 0x00001000 +#define AR_IMR_SWI 0x00002000 +#define AR_IMR_RXPHY 0x00004000 +#define AR_IMR_RXKCM 0x00008000 +#define AR_IMR_SWBA 0x00010000 +#define AR_IMR_BRSSI 0x00020000 +#define AR_IMR_BMISS 0x00040000 +#define AR_IMR_BNR 0x00100000 +#define AR_IMR_RXCHIRP 0x00200000 +#define AR_IMR_BCNMISC 0x00800000 +#define AR_IMR_TIM 0x00800000 +#define AR_IMR_QCBROVF 0x02000000 +#define AR_IMR_QCBRURN 0x04000000 +#define AR_IMR_QTRIG 0x08000000 +#define AR_IMR_GENTMR 0x10000000 + +#define AR_IMR_TXMINTR 0x00080000 +#define AR_IMR_RXMINTR 0x01000000 +#define AR_IMR_TXINTM 0x40000000 +#define AR_IMR_RXINTM 0x80000000 + +#define AR_IMR_S0 0x00a4 +#define AR_IMR_S0_QCU_TXOK 0x000003FF +#define AR_IMR_S0_QCU_TXOK_S 0 +#define AR_IMR_S0_QCU_TXDESC 0x03FF0000 +#define AR_IMR_S0_QCU_TXDESC_S 16 + +#define AR_IMR_S1 0x00a8 +#define AR_IMR_S1_QCU_TXERR 0x000003FF +#define AR_IMR_S1_QCU_TXERR_S 0 +#define AR_IMR_S1_QCU_TXEOL 0x03FF0000 +#define AR_IMR_S1_QCU_TXEOL_S 16 + +#define AR_IMR_S2 0x00ac +#define AR_IMR_S2_QCU_TXURN 0x000003FF +#define AR_IMR_S2_QCU_TXURN_S 0 +#define AR_IMR_S2_CST 0x00400000 +#define AR_IMR_S2_GTT 0x00800000 +#define AR_IMR_S2_TIM 0x01000000 +#define AR_IMR_S2_CABEND 0x02000000 +#define AR_IMR_S2_DTIMSYNC 0x04000000 +#define AR_IMR_S2_BCNTO 0x08000000 +#define AR_IMR_S2_CABTO 0x10000000 +#define AR_IMR_S2_DTIM 0x20000000 +#define AR_IMR_S2_TSFOOR 0x40000000 + +#define AR_IMR_S3 0x00b0 +#define AR_IMR_S3_QCU_QCBROVF 0x000003FF +#define AR_IMR_S3_QCU_QCBRURN 0x03FF0000 +#define AR_IMR_S3_QCU_QCBRURN_S 16 + +#define AR_IMR_S4 0x00b4 +#define AR_IMR_S4_QCU_QTRIG 0x000003FF +#define AR_IMR_S4_RESV0 0xFFFFFC00 + +#define AR_IMR_S5 0x00b8 +#define AR_IMR_S5_TIMER_TRIG 0x000000FF +#define AR_IMR_S5_TIMER_THRESH 0x0000FF00 + + +#define AR_ISR_RAC 0x00c0 +#define AR_ISR_S0_S 0x00c4 +#define AR_ISR_S0_QCU_TXOK 0x000003FF +#define AR_ISR_S0_QCU_TXOK_S 0 +#define AR_ISR_S0_QCU_TXDESC 0x03FF0000 +#define AR_ISR_S0_QCU_TXDESC_S 16 + +#define AR_ISR_S1_S 0x00c8 +#define AR_ISR_S1_QCU_TXERR 0x000003FF +#define AR_ISR_S1_QCU_TXERR_S 0 +#define AR_ISR_S1_QCU_TXEOL 0x03FF0000 +#define AR_ISR_S1_QCU_TXEOL_S 16 + +#define AR_ISR_S2_S 0x00cc +#define AR_ISR_S3_S 0x00d0 +#define AR_ISR_S4_S 0x00d4 +#define AR_ISR_S5_S 0x00d8 +#define AR_DMADBG_0 0x00e0 +#define AR_DMADBG_1 0x00e4 +#define AR_DMADBG_2 0x00e8 +#define AR_DMADBG_3 0x00ec +#define AR_DMADBG_4 0x00f0 +#define AR_DMADBG_5 0x00f4 +#define AR_DMADBG_6 0x00f8 +#define AR_DMADBG_7 0x00fc + +#define AR_NUM_QCU 10 +#define AR_QCU_0 0x0001 +#define AR_QCU_1 0x0002 +#define AR_QCU_2 0x0004 +#define AR_QCU_3 0x0008 +#define AR_QCU_4 0x0010 +#define AR_QCU_5 0x0020 +#define AR_QCU_6 0x0040 +#define AR_QCU_7 0x0080 +#define AR_QCU_8 0x0100 +#define AR_QCU_9 0x0200 + +#define AR_Q0_TXDP 0x0800 +#define AR_Q1_TXDP 0x0804 +#define AR_Q2_TXDP 0x0808 +#define AR_Q3_TXDP 0x080c +#define AR_Q4_TXDP 0x0810 +#define AR_Q5_TXDP 0x0814 +#define AR_Q6_TXDP 0x0818 +#define AR_Q7_TXDP 0x081c +#define AR_Q8_TXDP 0x0820 +#define AR_Q9_TXDP 0x0824 +#define AR_QTXDP(_i) (AR_Q0_TXDP + ((_i)<<2)) + +#define AR_Q_TXE 0x0840 +#define AR_Q_TXE_M 0x000003FF + +#define AR_Q_TXD 0x0880 +#define AR_Q_TXD_M 0x000003FF + +#define AR_Q0_CBRCFG 0x08c0 +#define AR_Q1_CBRCFG 0x08c4 +#define AR_Q2_CBRCFG 0x08c8 +#define AR_Q3_CBRCFG 0x08cc +#define AR_Q4_CBRCFG 0x08d0 +#define AR_Q5_CBRCFG 0x08d4 +#define AR_Q6_CBRCFG 0x08d8 +#define AR_Q7_CBRCFG 0x08dc +#define AR_Q8_CBRCFG 0x08e0 +#define AR_Q9_CBRCFG 0x08e4 +#define AR_QCBRCFG(_i) (AR_Q0_CBRCFG + ((_i)<<2)) +#define AR_Q_CBRCFG_INTERVAL 0x00FFFFFF +#define AR_Q_CBRCFG_INTERVAL_S 0 +#define AR_Q_CBRCFG_OVF_THRESH 0xFF000000 +#define AR_Q_CBRCFG_OVF_THRESH_S 24 + +#define AR_Q0_RDYTIMECFG 0x0900 +#define AR_Q1_RDYTIMECFG 0x0904 +#define AR_Q2_RDYTIMECFG 0x0908 +#define AR_Q3_RDYTIMECFG 0x090c +#define AR_Q4_RDYTIMECFG 0x0910 +#define AR_Q5_RDYTIMECFG 0x0914 +#define AR_Q6_RDYTIMECFG 0x0918 +#define AR_Q7_RDYTIMECFG 0x091c +#define AR_Q8_RDYTIMECFG 0x0920 +#define AR_Q9_RDYTIMECFG 0x0924 +#define AR_QRDYTIMECFG(_i) (AR_Q0_RDYTIMECFG + ((_i)<<2)) +#define AR_Q_RDYTIMECFG_DURATION 0x00FFFFFF +#define AR_Q_RDYTIMECFG_DURATION_S 0 +#define AR_Q_RDYTIMECFG_EN 0x01000000 + +#define AR_Q_ONESHOTARM_SC 0x0940 +#define AR_Q_ONESHOTARM_SC_M 0x000003FF +#define AR_Q_ONESHOTARM_SC_RESV0 0xFFFFFC00 + +#define AR_Q_ONESHOTARM_CC 0x0980 +#define AR_Q_ONESHOTARM_CC_M 0x000003FF +#define AR_Q_ONESHOTARM_CC_RESV0 0xFFFFFC00 + +#define AR_Q0_MISC 0x09c0 +#define AR_Q1_MISC 0x09c4 +#define AR_Q2_MISC 0x09c8 +#define AR_Q3_MISC 0x09cc +#define AR_Q4_MISC 0x09d0 +#define AR_Q5_MISC 0x09d4 +#define AR_Q6_MISC 0x09d8 +#define AR_Q7_MISC 0x09dc +#define AR_Q8_MISC 0x09e0 +#define AR_Q9_MISC 0x09e4 +#define AR_QMISC(_i) (AR_Q0_MISC + ((_i)<<2)) +#define AR_Q_MISC_FSP 0x0000000F +#define AR_Q_MISC_FSP_ASAP 0 +#define AR_Q_MISC_FSP_CBR 1 +#define AR_Q_MISC_FSP_DBA_GATED 2 +#define AR_Q_MISC_FSP_TIM_GATED 3 +#define AR_Q_MISC_FSP_BEACON_SENT_GATED 4 +#define AR_Q_MISC_FSP_BEACON_RCVD_GATED 5 +#define AR_Q_MISC_ONE_SHOT_EN 0x00000010 +#define AR_Q_MISC_CBR_INCR_DIS1 0x00000020 +#define AR_Q_MISC_CBR_INCR_DIS0 0x00000040 +#define AR_Q_MISC_BEACON_USE 0x00000080 +#define AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN 0x00000100 +#define AR_Q_MISC_RDYTIME_EXP_POLICY 0x00000200 +#define AR_Q_MISC_RESET_CBR_EXP_CTR 0x00000400 +#define AR_Q_MISC_DCU_EARLY_TERM_REQ 0x00000800 +#define AR_Q_MISC_RESV0 0xFFFFF000 + +#define AR_Q0_STS 0x0a00 +#define AR_Q1_STS 0x0a04 +#define AR_Q2_STS 0x0a08 +#define AR_Q3_STS 0x0a0c +#define AR_Q4_STS 0x0a10 +#define AR_Q5_STS 0x0a14 +#define AR_Q6_STS 0x0a18 +#define AR_Q7_STS 0x0a1c +#define AR_Q8_STS 0x0a20 +#define AR_Q9_STS 0x0a24 +#define AR_QSTS(_i) (AR_Q0_STS + ((_i)<<2)) +#define AR_Q_STS_PEND_FR_CNT 0x00000003 +#define AR_Q_STS_RESV0 0x000000FC +#define AR_Q_STS_CBR_EXP_CNT 0x0000FF00 +#define AR_Q_STS_RESV1 0xFFFF0000 + +#define AR_Q_RDYTIMESHDN 0x0a40 +#define AR_Q_RDYTIMESHDN_M 0x000003FF + + +#define AR_NUM_DCU 10 +#define AR_DCU_0 0x0001 +#define AR_DCU_1 0x0002 +#define AR_DCU_2 0x0004 +#define AR_DCU_3 0x0008 +#define AR_DCU_4 0x0010 +#define AR_DCU_5 0x0020 +#define AR_DCU_6 0x0040 +#define AR_DCU_7 0x0080 +#define AR_DCU_8 0x0100 +#define AR_DCU_9 0x0200 + +#define AR_D0_QCUMASK 0x1000 +#define AR_D1_QCUMASK 0x1004 +#define AR_D2_QCUMASK 0x1008 +#define AR_D3_QCUMASK 0x100c +#define AR_D4_QCUMASK 0x1010 +#define AR_D5_QCUMASK 0x1014 +#define AR_D6_QCUMASK 0x1018 +#define AR_D7_QCUMASK 0x101c +#define AR_D8_QCUMASK 0x1020 +#define AR_D9_QCUMASK 0x1024 +#define AR_DQCUMASK(_i) (AR_D0_QCUMASK + ((_i)<<2)) +#define AR_D_QCUMASK 0x000003FF +#define AR_D_QCUMASK_RESV0 0xFFFFFC00 + +#define AR_D_TXBLK_CMD 0x1038 +#define AR_D_TXBLK_DATA(i) (AR_D_TXBLK_CMD+(i)) + +#define AR_D0_LCL_IFS 0x1040 +#define AR_D1_LCL_IFS 0x1044 +#define AR_D2_LCL_IFS 0x1048 +#define AR_D3_LCL_IFS 0x104c +#define AR_D4_LCL_IFS 0x1050 +#define AR_D5_LCL_IFS 0x1054 +#define AR_D6_LCL_IFS 0x1058 +#define AR_D7_LCL_IFS 0x105c +#define AR_D8_LCL_IFS 0x1060 +#define AR_D9_LCL_IFS 0x1064 +#define AR_DLCL_IFS(_i) (AR_D0_LCL_IFS + ((_i)<<2)) +#define AR_D_LCL_IFS_CWMIN 0x000003FF +#define AR_D_LCL_IFS_CWMIN_S 0 +#define AR_D_LCL_IFS_CWMAX 0x000FFC00 +#define AR_D_LCL_IFS_CWMAX_S 10 +#define AR_D_LCL_IFS_AIFS 0x0FF00000 +#define AR_D_LCL_IFS_AIFS_S 20 + +#define AR_D_LCL_IFS_RESV0 0xF0000000 + +#define AR_D0_RETRY_LIMIT 0x1080 +#define AR_D1_RETRY_LIMIT 0x1084 +#define AR_D2_RETRY_LIMIT 0x1088 +#define AR_D3_RETRY_LIMIT 0x108c +#define AR_D4_RETRY_LIMIT 0x1090 +#define AR_D5_RETRY_LIMIT 0x1094 +#define AR_D6_RETRY_LIMIT 0x1098 +#define AR_D7_RETRY_LIMIT 0x109c +#define AR_D8_RETRY_LIMIT 0x10a0 +#define AR_D9_RETRY_LIMIT 0x10a4 +#define AR_DRETRY_LIMIT(_i) (AR_D0_RETRY_LIMIT + ((_i)<<2)) +#define AR_D_RETRY_LIMIT_FR_SH 0x0000000F +#define AR_D_RETRY_LIMIT_FR_SH_S 0 +#define AR_D_RETRY_LIMIT_STA_SH 0x00003F00 +#define AR_D_RETRY_LIMIT_STA_SH_S 8 +#define AR_D_RETRY_LIMIT_STA_LG 0x000FC000 +#define AR_D_RETRY_LIMIT_STA_LG_S 14 +#define AR_D_RETRY_LIMIT_RESV0 0xFFF00000 + +#define AR_D0_CHNTIME 0x10c0 +#define AR_D1_CHNTIME 0x10c4 +#define AR_D2_CHNTIME 0x10c8 +#define AR_D3_CHNTIME 0x10cc +#define AR_D4_CHNTIME 0x10d0 +#define AR_D5_CHNTIME 0x10d4 +#define AR_D6_CHNTIME 0x10d8 +#define AR_D7_CHNTIME 0x10dc +#define AR_D8_CHNTIME 0x10e0 +#define AR_D9_CHNTIME 0x10e4 +#define AR_DCHNTIME(_i) (AR_D0_CHNTIME + ((_i)<<2)) +#define AR_D_CHNTIME_DUR 0x000FFFFF +#define AR_D_CHNTIME_DUR_S 0 +#define AR_D_CHNTIME_EN 0x00100000 +#define AR_D_CHNTIME_RESV0 0xFFE00000 + +#define AR_D0_MISC 0x1100 +#define AR_D1_MISC 0x1104 +#define AR_D2_MISC 0x1108 +#define AR_D3_MISC 0x110c +#define AR_D4_MISC 0x1110 +#define AR_D5_MISC 0x1114 +#define AR_D6_MISC 0x1118 +#define AR_D7_MISC 0x111c +#define AR_D8_MISC 0x1120 +#define AR_D9_MISC 0x1124 +#define AR_DMISC(_i) (AR_D0_MISC + ((_i)<<2)) +#define AR_D_MISC_BKOFF_THRESH 0x0000003F +#define AR_D_MISC_RETRY_CNT_RESET_EN 0x00000040 +#define AR_D_MISC_CW_RESET_EN 0x00000080 +#define AR_D_MISC_FRAG_WAIT_EN 0x00000100 +#define AR_D_MISC_FRAG_BKOFF_EN 0x00000200 +#define AR_D_MISC_CW_BKOFF_EN 0x00001000 +#define AR_D_MISC_VIR_COL_HANDLING 0x0000C000 +#define AR_D_MISC_VIR_COL_HANDLING_S 14 +#define AR_D_MISC_VIR_COL_HANDLING_DEFAULT 0 +#define AR_D_MISC_VIR_COL_HANDLING_IGNORE 1 +#define AR_D_MISC_BEACON_USE 0x00010000 +#define AR_D_MISC_ARB_LOCKOUT_CNTRL 0x00060000 +#define AR_D_MISC_ARB_LOCKOUT_CNTRL_S 17 +#define AR_D_MISC_ARB_LOCKOUT_CNTRL_NONE 0 +#define AR_D_MISC_ARB_LOCKOUT_CNTRL_INTRA_FR 1 +#define AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL 2 +#define AR_D_MISC_ARB_LOCKOUT_IGNORE 0x00080000 +#define AR_D_MISC_SEQ_NUM_INCR_DIS 0x00100000 +#define AR_D_MISC_POST_FR_BKOFF_DIS 0x00200000 +#define AR_D_MISC_VIT_COL_CW_BKOFF_EN 0x00400000 +#define AR_D_MISC_BLOWN_IFS_RETRY_EN 0x00800000 +#define AR_D_MISC_RESV0 0xFF000000 + +#define AR_D_SEQNUM 0x1140 + +#define AR_D_GBL_IFS_SIFS 0x1030 +#define AR_D_GBL_IFS_SIFS_M 0x0000FFFF +#define AR_D_GBL_IFS_SIFS_RESV0 0xFFFFFFFF + +#define AR_D_TXBLK_BASE 0x1038 +#define AR_D_TXBLK_WRITE_BITMASK 0x0000FFFF +#define AR_D_TXBLK_WRITE_BITMASK_S 0 +#define AR_D_TXBLK_WRITE_SLICE 0x000F0000 +#define AR_D_TXBLK_WRITE_SLICE_S 16 +#define AR_D_TXBLK_WRITE_DCU 0x00F00000 +#define AR_D_TXBLK_WRITE_DCU_S 20 +#define AR_D_TXBLK_WRITE_COMMAND 0x0F000000 +#define AR_D_TXBLK_WRITE_COMMAND_S 24 + +#define AR_D_GBL_IFS_SLOT 0x1070 +#define AR_D_GBL_IFS_SLOT_M 0x0000FFFF +#define AR_D_GBL_IFS_SLOT_RESV0 0xFFFF0000 + +#define AR_D_GBL_IFS_EIFS 0x10b0 +#define AR_D_GBL_IFS_EIFS_M 0x0000FFFF +#define AR_D_GBL_IFS_EIFS_RESV0 0xFFFF0000 + +#define AR_D_GBL_IFS_MISC 0x10f0 +#define AR_D_GBL_IFS_MISC_LFSR_SLICE_SEL 0x00000007 +#define AR_D_GBL_IFS_MISC_TURBO_MODE 0x00000008 +#define AR_D_GBL_IFS_MISC_USEC_DURATION 0x000FFC00 +#define AR_D_GBL_IFS_MISC_DCU_ARBITER_DLY 0x00300000 +#define AR_D_GBL_IFS_MISC_RANDOM_LFSR_SLICE_DIS 0x01000000 +#define AR_D_GBL_IFS_MISC_SLOT_XMIT_WIND_LEN 0x06000000 +#define AR_D_GBL_IFS_MISC_FORCE_XMIT_SLOT_BOUND 0x08000000 +#define AR_D_GBL_IFS_MISC_IGNORE_BACKOFF 0x10000000 + +#define AR_D_FPCTL 0x1230 +#define AR_D_FPCTL_DCU 0x0000000F +#define AR_D_FPCTL_DCU_S 0 +#define AR_D_FPCTL_PREFETCH_EN 0x00000010 +#define AR_D_FPCTL_BURST_PREFETCH 0x00007FE0 +#define AR_D_FPCTL_BURST_PREFETCH_S 5 + +#define AR_D_TXPSE 0x1270 +#define AR_D_TXPSE_CTRL 0x000003FF +#define AR_D_TXPSE_RESV0 0x0000FC00 +#define AR_D_TXPSE_STATUS 0x00010000 +#define AR_D_TXPSE_RESV1 0xFFFE0000 + +#define AR_D_TXSLOTMASK 0x12f0 +#define AR_D_TXSLOTMASK_NUM 0x0000000F + +#define AR_CFG_LED 0x1f04 +#define AR_CFG_SCLK_RATE_IND 0x00000003 +#define AR_CFG_SCLK_RATE_IND_S 0 +#define AR_CFG_SCLK_32MHZ 0x00000000 +#define AR_CFG_SCLK_4MHZ 0x00000001 +#define AR_CFG_SCLK_1MHZ 0x00000002 +#define AR_CFG_SCLK_32KHZ 0x00000003 +#define AR_CFG_LED_BLINK_SLOW 0x00000008 +#define AR_CFG_LED_BLINK_THRESH_SEL 0x00000070 +#define AR_CFG_LED_MODE_SEL 0x00000380 +#define AR_CFG_LED_MODE_SEL_S 7 +#define AR_CFG_LED_POWER 0x00000280 +#define AR_CFG_LED_POWER_S 7 +#define AR_CFG_LED_NETWORK 0x00000300 +#define AR_CFG_LED_NETWORK_S 7 +#define AR_CFG_LED_MODE_PROP 0x0 +#define AR_CFG_LED_MODE_RPROP 0x1 +#define AR_CFG_LED_MODE_SPLIT 0x2 +#define AR_CFG_LED_MODE_RAND 0x3 +#define AR_CFG_LED_MODE_POWER_OFF 0x4 +#define AR_CFG_LED_MODE_POWER_ON 0x5 +#define AR_CFG_LED_MODE_NETWORK_OFF 0x4 +#define AR_CFG_LED_MODE_NETWORK_ON 0x6 +#define AR_CFG_LED_ASSOC_CTL 0x00000c00 +#define AR_CFG_LED_ASSOC_CTL_S 10 +#define AR_CFG_LED_ASSOC_NONE 0x0 +#define AR_CFG_LED_ASSOC_ACTIVE 0x1 +#define AR_CFG_LED_ASSOC_PENDING 0x2 + +#define AR_CFG_LED_BLINK_SLOW 0x00000008 +#define AR_CFG_LED_BLINK_SLOW_S 3 + +#define AR_CFG_LED_BLINK_THRESH_SEL 0x00000070 +#define AR_CFG_LED_BLINK_THRESH_SEL_S 4 + +#define AR_MAC_SLEEP 0x1f00 +#define AR_MAC_SLEEP_MAC_AWAKE 0x00000000 +#define AR_MAC_SLEEP_MAC_ASLEEP 0x00000001 + +#define AR_RC 0x4000 +#define AR_RC_AHB 0x00000001 +#define AR_RC_APB 0x00000002 +#define AR_RC_HOSTIF 0x00000100 + +#define AR_WA 0x4004 + +#define AR_PM_STATE 0x4008 +#define AR_PM_STATE_PME_D3COLD_VAUX 0x00100000 + +#define AR_HOST_TIMEOUT 0x4018 +#define AR_HOST_TIMEOUT_APB_CNTR 0x0000FFFF +#define AR_HOST_TIMEOUT_APB_CNTR_S 0 +#define AR_HOST_TIMEOUT_LCL_CNTR 0xFFFF0000 +#define AR_HOST_TIMEOUT_LCL_CNTR_S 16 + +#define AR_EEPROM 0x401c +#define AR_EEPROM_ABSENT 0x00000100 +#define AR_EEPROM_CORRUPT 0x00000200 +#define AR_EEPROM_PROT_MASK 0x03FFFC00 +#define AR_EEPROM_PROT_MASK_S 10 + +#define EEPROM_PROTECT_RP_0_31 0x0001 +#define EEPROM_PROTECT_WP_0_31 0x0002 +#define EEPROM_PROTECT_RP_32_63 0x0004 +#define EEPROM_PROTECT_WP_32_63 0x0008 +#define EEPROM_PROTECT_RP_64_127 0x0010 +#define EEPROM_PROTECT_WP_64_127 0x0020 +#define EEPROM_PROTECT_RP_128_191 0x0040 +#define EEPROM_PROTECT_WP_128_191 0x0080 +#define EEPROM_PROTECT_RP_192_255 0x0100 +#define EEPROM_PROTECT_WP_192_255 0x0200 +#define EEPROM_PROTECT_RP_256_511 0x0400 +#define EEPROM_PROTECT_WP_256_511 0x0800 +#define EEPROM_PROTECT_RP_512_1023 0x1000 +#define EEPROM_PROTECT_WP_512_1023 0x2000 +#define EEPROM_PROTECT_RP_1024_2047 0x4000 +#define EEPROM_PROTECT_WP_1024_2047 0x8000 + +#define AR_SREV \ + ((AR_SREV_9100(ah)) ? 0x0600 : 0x4020) + +#define AR_SREV_ID \ + ((AR_SREV_9100(ah)) ? 0x00000FFF : 0x000000FF) +#define AR_SREV_VERSION 0x000000F0 +#define AR_SREV_VERSION_S 4 +#define AR_SREV_REVISION 0x00000007 + +#define AR_SREV_ID2 0xFFFFFFFF +#define AR_SREV_VERSION2 0xFFFC0000 +#define AR_SREV_VERSION2_S 18 +#define AR_SREV_TYPE2 0x0003F000 +#define AR_SREV_TYPE2_S 12 +#define AR_SREV_TYPE2_CHAIN 0x00001000 +#define AR_SREV_TYPE2_HOST_MODE 0x00002000 +#define AR_SREV_REVISION2 0x00000F00 +#define AR_SREV_REVISION2_S 8 + +#define AR_SREV_VERSION_5416_PCI 0xD +#define AR_SREV_VERSION_5416_PCIE 0xC +#define AR_SREV_REVISION_5416_10 0 +#define AR_SREV_REVISION_5416_20 1 +#define AR_SREV_REVISION_5416_22 2 +#define AR_SREV_VERSION_9160 0x40 +#define AR_SREV_REVISION_9160_10 0 +#define AR_SREV_REVISION_9160_11 1 +#define AR_SREV_VERSION_9280 0x80 +#define AR_SREV_REVISION_9280_10 0 +#define AR_SREV_REVISION_9280_20 1 +#define AR_SREV_REVISION_9280_21 2 +#define AR_SREV_VERSION_9285 0xC0 +#define AR_SREV_REVISION_9285_10 0 + +#define AR_SREV_9100_OR_LATER(_ah) \ + (((_ah)->ah_macVersion >= AR_SREV_VERSION_5416_PCIE)) +#define AR_SREV_5416_20_OR_LATER(_ah) \ + (((_ah)->ah_macVersion >= AR_SREV_VERSION_9160) || \ + ((_ah)->ah_macRev >= AR_SREV_REVISION_5416_20)) +#define AR_SREV_5416_22_OR_LATER(_ah) \ + (((_ah)->ah_macVersion >= AR_SREV_VERSION_9160) || \ + ((_ah)->ah_macRev >= AR_SREV_REVISION_5416_22)) +#define AR_SREV_9160(_ah) \ + (((_ah)->ah_macVersion == AR_SREV_VERSION_9160)) +#define AR_SREV_9160_10_OR_LATER(_ah) \ + (((_ah)->ah_macVersion >= AR_SREV_VERSION_9160)) +#define AR_SREV_9160_11(_ah) \ + (AR_SREV_9160(_ah) && ((_ah)->ah_macRev == AR_SREV_REVISION_9160_11)) +#define AR_SREV_9280(_ah) \ + (((_ah)->ah_macVersion == AR_SREV_VERSION_9280)) +#define AR_SREV_9280_10_OR_LATER(_ah) \ + (((_ah)->ah_macVersion >= AR_SREV_VERSION_9280)) +#define AR_SREV_9280_20(_ah) \ + (((_ah)->ah_macVersion == AR_SREV_VERSION_9280) && \ + ((_ah)->ah_macRev >= AR_SREV_REVISION_9280_20)) +#define AR_SREV_9280_20_OR_LATER(_ah) \ + (((_ah)->ah_macVersion > AR_SREV_VERSION_9280) || \ + (((_ah)->ah_macVersion == AR_SREV_VERSION_9280) && \ + ((_ah)->ah_macRev >= AR_SREV_REVISION_9280_20))) + +#define AR_SREV_9285(_ah) (((_ah)->ah_macVersion == AR_SREV_VERSION_9285)) +#define AR_SREV_9285_10_OR_LATER(_ah) \ + (((_ah)->ah_macVersion >= AR_SREV_VERSION_9285)) + +#define AR_RADIO_SREV_MAJOR 0xf0 +#define AR_RAD5133_SREV_MAJOR 0xc0 +#define AR_RAD2133_SREV_MAJOR 0xd0 +#define AR_RAD5122_SREV_MAJOR 0xe0 +#define AR_RAD2122_SREV_MAJOR 0xf0 + +#define AR_AHB_MODE 0x4024 +#define AR_AHB_EXACT_WR_EN 0x00000000 +#define AR_AHB_BUF_WR_EN 0x00000001 +#define AR_AHB_EXACT_RD_EN 0x00000000 +#define AR_AHB_CACHELINE_RD_EN 0x00000002 +#define AR_AHB_PREFETCH_RD_EN 0x00000004 +#define AR_AHB_PAGE_SIZE_1K 0x00000000 +#define AR_AHB_PAGE_SIZE_2K 0x00000008 +#define AR_AHB_PAGE_SIZE_4K 0x00000010 + +#define AR_INTR_RTC_IRQ 0x00000001 +#define AR_INTR_MAC_IRQ 0x00000002 +#define AR_INTR_EEP_PROT_ACCESS 0x00000004 +#define AR_INTR_MAC_AWAKE 0x00020000 +#define AR_INTR_MAC_ASLEEP 0x00040000 +#define AR_INTR_SPURIOUS 0xFFFFFFFF + + +#define AR_INTR_SYNC_CAUSE_CLR 0x4028 + +#define AR_INTR_SYNC_CAUSE 0x4028 + +#define AR_INTR_SYNC_ENABLE 0x402c +#define AR_INTR_SYNC_ENABLE_GPIO 0xFFFC0000 +#define AR_INTR_SYNC_ENABLE_GPIO_S 18 + +enum { + AR_INTR_SYNC_RTC_IRQ = 0x00000001, + AR_INTR_SYNC_MAC_IRQ = 0x00000002, + AR_INTR_SYNC_EEPROM_ILLEGAL_ACCESS = 0x00000004, + AR_INTR_SYNC_APB_TIMEOUT = 0x00000008, + AR_INTR_SYNC_PCI_MODE_CONFLICT = 0x00000010, + AR_INTR_SYNC_HOST1_FATAL = 0x00000020, + AR_INTR_SYNC_HOST1_PERR = 0x00000040, + AR_INTR_SYNC_TRCV_FIFO_PERR = 0x00000080, + AR_INTR_SYNC_RADM_CPL_EP = 0x00000100, + AR_INTR_SYNC_RADM_CPL_DLLP_ABORT = 0x00000200, + AR_INTR_SYNC_RADM_CPL_TLP_ABORT = 0x00000400, + AR_INTR_SYNC_RADM_CPL_ECRC_ERR = 0x00000800, + AR_INTR_SYNC_RADM_CPL_TIMEOUT = 0x00001000, + AR_INTR_SYNC_LOCAL_TIMEOUT = 0x00002000, + AR_INTR_SYNC_PM_ACCESS = 0x00004000, + AR_INTR_SYNC_MAC_AWAKE = 0x00008000, + AR_INTR_SYNC_MAC_ASLEEP = 0x00010000, + AR_INTR_SYNC_MAC_SLEEP_ACCESS = 0x00020000, + AR_INTR_SYNC_ALL = 0x0003FFFF, + + + AR_INTR_SYNC_DEFAULT = (AR_INTR_SYNC_HOST1_FATAL | + AR_INTR_SYNC_HOST1_PERR | + AR_INTR_SYNC_RADM_CPL_EP | + AR_INTR_SYNC_RADM_CPL_DLLP_ABORT | + AR_INTR_SYNC_RADM_CPL_TLP_ABORT | + AR_INTR_SYNC_RADM_CPL_ECRC_ERR | + AR_INTR_SYNC_RADM_CPL_TIMEOUT | + AR_INTR_SYNC_LOCAL_TIMEOUT | + AR_INTR_SYNC_MAC_SLEEP_ACCESS), + + AR_INTR_SYNC_SPURIOUS = 0xFFFFFFFF, + +}; + +#define AR_INTR_ASYNC_MASK 0x4030 +#define AR_INTR_ASYNC_MASK_GPIO 0xFFFC0000 +#define AR_INTR_ASYNC_MASK_GPIO_S 18 + +#define AR_INTR_SYNC_MASK 0x4034 +#define AR_INTR_SYNC_MASK_GPIO 0xFFFC0000 +#define AR_INTR_SYNC_MASK_GPIO_S 18 + +#define AR_INTR_ASYNC_CAUSE_CLR 0x4038 +#define AR_INTR_ASYNC_CAUSE 0x4038 + +#define AR_INTR_ASYNC_ENABLE 0x403c +#define AR_INTR_ASYNC_ENABLE_GPIO 0xFFFC0000 +#define AR_INTR_ASYNC_ENABLE_GPIO_S 18 + +#define AR_PCIE_SERDES 0x4040 +#define AR_PCIE_SERDES2 0x4044 +#define AR_PCIE_PM_CTRL 0x4014 +#define AR_PCIE_PM_CTRL_ENA 0x00080000 + +#define AR_NUM_GPIO 14 +#define AR928X_NUM_GPIO 10 + +#define AR_GPIO_IN_OUT 0x4048 +#define AR_GPIO_IN_VAL 0x0FFFC000 +#define AR_GPIO_IN_VAL_S 14 +#define AR928X_GPIO_IN_VAL 0x000FFC00 +#define AR928X_GPIO_IN_VAL_S 10 + +#define AR_GPIO_OE_OUT 0x404c +#define AR_GPIO_OE_OUT_DRV 0x3 +#define AR_GPIO_OE_OUT_DRV_NO 0x0 +#define AR_GPIO_OE_OUT_DRV_LOW 0x1 +#define AR_GPIO_OE_OUT_DRV_HI 0x2 +#define AR_GPIO_OE_OUT_DRV_ALL 0x3 + +#define AR_GPIO_INTR_POL 0x4050 +#define AR_GPIO_INTR_POL_VAL 0x00001FFF +#define AR_GPIO_INTR_POL_VAL_S 0 + +#define AR_GPIO_INPUT_EN_VAL 0x4054 +#define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF 0x00000080 +#define AR_GPIO_INPUT_EN_VAL_RFSILENT_DEF_S 7 +#define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB 0x00008000 +#define AR_GPIO_INPUT_EN_VAL_RFSILENT_BB_S 15 +#define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000 +#define AR_GPIO_JTAG_DISABLE 0x00020000 + +#define AR_GPIO_INPUT_MUX1 0x4058 + +#define AR_GPIO_INPUT_MUX2 0x405c +#define AR_GPIO_INPUT_MUX2_CLK25 0x0000000f +#define AR_GPIO_INPUT_MUX2_CLK25_S 0 +#define AR_GPIO_INPUT_MUX2_RFSILENT 0x000000f0 +#define AR_GPIO_INPUT_MUX2_RFSILENT_S 4 +#define AR_GPIO_INPUT_MUX2_RTC_RESET 0x00000f00 +#define AR_GPIO_INPUT_MUX2_RTC_RESET_S 8 + +#define AR_GPIO_OUTPUT_MUX1 0x4060 +#define AR_GPIO_OUTPUT_MUX2 0x4064 +#define AR_GPIO_OUTPUT_MUX3 0x4068 + +#define AR_GPIO_OUTPUT_MUX_AS_OUTPUT 0 +#define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1 +#define AR_GPIO_OUTPUT_MUX_AS_PCIE_POWER_LED 2 +#define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED 5 +#define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED 6 + +#define AR_INPUT_STATE 0x406c + +#define AR_EEPROM_STATUS_DATA 0x407c +#define AR_EEPROM_STATUS_DATA_VAL 0x0000ffff +#define AR_EEPROM_STATUS_DATA_VAL_S 0 +#define AR_EEPROM_STATUS_DATA_BUSY 0x00010000 +#define AR_EEPROM_STATUS_DATA_BUSY_ACCESS 0x00020000 +#define AR_EEPROM_STATUS_DATA_PROT_ACCESS 0x00040000 +#define AR_EEPROM_STATUS_DATA_ABSENT_ACCESS 0x00080000 + +#define AR_OBS 0x4080 + +#define AR_PCIE_MSI 0x4094 +#define AR_PCIE_MSI_ENABLE 0x00000001 + + +#define AR_RTC_9160_PLL_DIV 0x000003ff +#define AR_RTC_9160_PLL_DIV_S 0 +#define AR_RTC_9160_PLL_REFDIV 0x00003C00 +#define AR_RTC_9160_PLL_REFDIV_S 10 +#define AR_RTC_9160_PLL_CLKSEL 0x0000C000 +#define AR_RTC_9160_PLL_CLKSEL_S 14 + +#define AR_RTC_BASE 0x00020000 +#define AR_RTC_RC \ + (AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0000) : 0x7000 +#define AR_RTC_RC_M 0x00000003 +#define AR_RTC_RC_MAC_WARM 0x00000001 +#define AR_RTC_RC_MAC_COLD 0x00000002 +#define AR_RTC_RC_COLD_RESET 0x00000004 +#define AR_RTC_RC_WARM_RESET 0x00000008 + +#define AR_RTC_PLL_CONTROL \ + (AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014 + +#define AR_RTC_PLL_DIV 0x0000001f +#define AR_RTC_PLL_DIV_S 0 +#define AR_RTC_PLL_DIV2 0x00000020 +#define AR_RTC_PLL_REFDIV_5 0x000000c0 +#define AR_RTC_PLL_CLKSEL 0x00000300 +#define AR_RTC_PLL_CLKSEL_S 8 + + + +#define AR_RTC_RESET \ + ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0040) : 0x7040) +#define AR_RTC_RESET_EN (0x00000001) + +#define AR_RTC_STATUS \ + ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0044) : 0x7044) + +#define AR_RTC_STATUS_M \ + ((AR_SREV_9100(ah)) ? 0x0000003f : 0x0000000f) + +#define AR_RTC_PM_STATUS_M 0x0000000f + +#define AR_RTC_STATUS_SHUTDOWN 0x00000001 +#define AR_RTC_STATUS_ON 0x00000002 +#define AR_RTC_STATUS_SLEEP 0x00000004 +#define AR_RTC_STATUS_WAKEUP 0x00000008 + +#define AR_RTC_SLEEP_CLK \ + ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0048) : 0x7048) +#define AR_RTC_FORCE_DERIVED_CLK 0x2 + +#define AR_RTC_FORCE_WAKE \ + ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x004c) : 0x704c) +#define AR_RTC_FORCE_WAKE_EN 0x00000001 +#define AR_RTC_FORCE_WAKE_ON_INT 0x00000002 + + +#define AR_RTC_INTR_CAUSE \ + ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0050) : 0x7050) + +#define AR_RTC_INTR_ENABLE \ + ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0054) : 0x7054) + +#define AR_RTC_INTR_MASK \ + ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058) + +#define AR_SEQ_MASK 0x8060 + +#define AR_AN_RF2G1_CH0 0x7810 +#define AR_AN_RF2G1_CH0_OB 0x03800000 +#define AR_AN_RF2G1_CH0_OB_S 23 +#define AR_AN_RF2G1_CH0_DB 0x1C000000 +#define AR_AN_RF2G1_CH0_DB_S 26 + +#define AR_AN_RF5G1_CH0 0x7818 +#define AR_AN_RF5G1_CH0_OB5 0x00070000 +#define AR_AN_RF5G1_CH0_OB5_S 16 +#define AR_AN_RF5G1_CH0_DB5 0x00380000 +#define AR_AN_RF5G1_CH0_DB5_S 19 + +#define AR_AN_RF2G1_CH1 0x7834 +#define AR_AN_RF2G1_CH1_OB 0x03800000 +#define AR_AN_RF2G1_CH1_OB_S 23 +#define AR_AN_RF2G1_CH1_DB 0x1C000000 +#define AR_AN_RF2G1_CH1_DB_S 26 + +#define AR_AN_RF5G1_CH1 0x783C +#define AR_AN_RF5G1_CH1_OB5 0x00070000 +#define AR_AN_RF5G1_CH1_OB5_S 16 +#define AR_AN_RF5G1_CH1_DB5 0x00380000 +#define AR_AN_RF5G1_CH1_DB5_S 19 + +#define AR_AN_TOP2 0x7894 +#define AR_AN_TOP2_XPABIAS_LVL 0xC0000000 +#define AR_AN_TOP2_XPABIAS_LVL_S 30 +#define AR_AN_TOP2_LOCALBIAS 0x00200000 +#define AR_AN_TOP2_LOCALBIAS_S 21 +#define AR_AN_TOP2_PWDCLKIND 0x00400000 +#define AR_AN_TOP2_PWDCLKIND_S 22 + +#define AR_AN_SYNTH9 0x7868 +#define AR_AN_SYNTH9_REFDIVA 0xf8000000 +#define AR_AN_SYNTH9_REFDIVA_S 27 + +#define AR_STA_ID0 0x8000 +#define AR_STA_ID1 0x8004 +#define AR_STA_ID1_SADH_MASK 0x0000FFFF +#define AR_STA_ID1_STA_AP 0x00010000 +#define AR_STA_ID1_ADHOC 0x00020000 +#define AR_STA_ID1_PWR_SAV 0x00040000 +#define AR_STA_ID1_KSRCHDIS 0x00080000 +#define AR_STA_ID1_PCF 0x00100000 +#define AR_STA_ID1_USE_DEFANT 0x00200000 +#define AR_STA_ID1_DEFANT_UPDATE 0x00400000 +#define AR_STA_ID1_RTS_USE_DEF 0x00800000 +#define AR_STA_ID1_ACKCTS_6MB 0x01000000 +#define AR_STA_ID1_BASE_RATE_11B 0x02000000 +#define AR_STA_ID1_SECTOR_SELF_GEN 0x04000000 +#define AR_STA_ID1_CRPT_MIC_ENABLE 0x08000000 +#define AR_STA_ID1_KSRCH_MODE 0x10000000 +#define AR_STA_ID1_PRESERVE_SEQNUM 0x20000000 +#define AR_STA_ID1_CBCIV_ENDIAN 0x40000000 +#define AR_STA_ID1_MCAST_KSRCH 0x80000000 + +#define AR_BSS_ID0 0x8008 +#define AR_BSS_ID1 0x800C +#define AR_BSS_ID1_U16 0x0000FFFF +#define AR_BSS_ID1_AID 0x07FF0000 +#define AR_BSS_ID1_AID_S 16 + +#define AR_BCN_RSSI_AVE 0x8010 +#define AR_BCN_RSSI_AVE_MASK 0x00000FFF + +#define AR_TIME_OUT 0x8014 +#define AR_TIME_OUT_ACK 0x00003FFF +#define AR_TIME_OUT_ACK_S 0 +#define AR_TIME_OUT_CTS 0x3FFF0000 +#define AR_TIME_OUT_CTS_S 16 + +#define AR_RSSI_THR 0x8018 +#define AR_RSSI_THR_MASK 0x000000FF +#define AR_RSSI_THR_BM_THR 0x0000FF00 +#define AR_RSSI_THR_BM_THR_S 8 +#define AR_RSSI_BCN_WEIGHT 0x1F000000 +#define AR_RSSI_BCN_WEIGHT_S 24 +#define AR_RSSI_BCN_RSSI_RST 0x20000000 + +#define AR_USEC 0x801c +#define AR_USEC_USEC 0x0000007F +#define AR_USEC_TX_LAT 0x007FC000 +#define AR_USEC_TX_LAT_S 14 +#define AR_USEC_RX_LAT 0x1F800000 +#define AR_USEC_RX_LAT_S 23 + +#define AR_RESET_TSF 0x8020 +#define AR_RESET_TSF_ONCE 0x01000000 + +#define AR_MAX_CFP_DUR 0x8038 +#define AR_CFP_VAL 0x0000FFFF + +#define AR_RX_FILTER 0x803C +#define AR_RX_FILTER_ALL 0x00000000 +#define AR_RX_UCAST 0x00000001 +#define AR_RX_MCAST 0x00000002 +#define AR_RX_BCAST 0x00000004 +#define AR_RX_CONTROL 0x00000008 +#define AR_RX_BEACON 0x00000010 +#define AR_RX_PROM 0x00000020 +#define AR_RX_PROBE_REQ 0x00000080 +#define AR_RX_MY_BEACON 0x00000200 +#define AR_RX_COMPR_BAR 0x00000400 +#define AR_RX_COMPR_BA 0x00000800 +#define AR_RX_UNCOM_BA_BAR 0x00001000 + +#define AR_MCAST_FIL0 0x8040 +#define AR_MCAST_FIL1 0x8044 + +#define AR_DIAG_SW 0x8048 +#define AR_DIAG_CACHE_ACK 0x00000001 +#define AR_DIAG_ACK_DIS 0x00000002 +#define AR_DIAG_CTS_DIS 0x00000004 +#define AR_DIAG_ENCRYPT_DIS 0x00000008 +#define AR_DIAG_DECRYPT_DIS 0x00000010 +#define AR_DIAG_RX_DIS 0x00000020 +#define AR_DIAG_LOOP_BACK 0x00000040 +#define AR_DIAG_CORR_FCS 0x00000080 +#define AR_DIAG_CHAN_INFO 0x00000100 +#define AR_DIAG_SCRAM_SEED 0x0001FE00 +#define AR_DIAG_SCRAM_SEED_S 8 +#define AR_DIAG_FRAME_NV0 0x00020000 +#define AR_DIAG_OBS_PT_SEL1 0x000C0000 +#define AR_DIAG_OBS_PT_SEL1_S 18 +#define AR_DIAG_FORCE_RX_CLEAR 0x00100000 +#define AR_DIAG_IGNORE_VIRT_CS 0x00200000 +#define AR_DIAG_FORCE_CH_IDLE_HIGH 0x00400000 +#define AR_DIAG_EIFS_CTRL_ENA 0x00800000 +#define AR_DIAG_DUAL_CHAIN_INFO 0x01000000 +#define AR_DIAG_RX_ABORT 0x02000000 +#define AR_DIAG_SATURATE_CYCLE_CNT 0x04000000 +#define AR_DIAG_OBS_PT_SEL2 0x08000000 +#define AR_DIAG_RX_CLEAR_CTL_LOW 0x10000000 +#define AR_DIAG_RX_CLEAR_EXT_LOW 0x20000000 + +#define AR_TSF_L32 0x804c +#define AR_TSF_U32 0x8050 + +#define AR_TST_ADDAC 0x8054 +#define AR_DEF_ANTENNA 0x8058 + +#define AR_AES_MUTE_MASK0 0x805c +#define AR_AES_MUTE_MASK0_FC 0x0000FFFF +#define AR_AES_MUTE_MASK0_QOS 0xFFFF0000 +#define AR_AES_MUTE_MASK0_QOS_S 16 + +#define AR_AES_MUTE_MASK1 0x8060 +#define AR_AES_MUTE_MASK1_SEQ 0x0000FFFF + +#define AR_GATED_CLKS 0x8064 +#define AR_GATED_CLKS_TX 0x00000002 +#define AR_GATED_CLKS_RX 0x00000004 +#define AR_GATED_CLKS_REG 0x00000008 + +#define AR_OBS_BUS_CTRL 0x8068 +#define AR_OBS_BUS_SEL_1 0x00040000 +#define AR_OBS_BUS_SEL_2 0x00080000 +#define AR_OBS_BUS_SEL_3 0x000C0000 +#define AR_OBS_BUS_SEL_4 0x08040000 +#define AR_OBS_BUS_SEL_5 0x08080000 + +#define AR_OBS_BUS_1 0x806c +#define AR_OBS_BUS_1_PCU 0x00000001 +#define AR_OBS_BUS_1_RX_END 0x00000002 +#define AR_OBS_BUS_1_RX_WEP 0x00000004 +#define AR_OBS_BUS_1_RX_BEACON 0x00000008 +#define AR_OBS_BUS_1_RX_FILTER 0x00000010 +#define AR_OBS_BUS_1_TX_HCF 0x00000020 +#define AR_OBS_BUS_1_QUIET_TIME 0x00000040 +#define AR_OBS_BUS_1_CHAN_IDLE 0x00000080 +#define AR_OBS_BUS_1_TX_HOLD 0x00000100 +#define AR_OBS_BUS_1_TX_FRAME 0x00000200 +#define AR_OBS_BUS_1_RX_FRAME 0x00000400 +#define AR_OBS_BUS_1_RX_CLEAR 0x00000800 +#define AR_OBS_BUS_1_WEP_STATE 0x0003F000 +#define AR_OBS_BUS_1_WEP_STATE_S 12 +#define AR_OBS_BUS_1_RX_STATE 0x01F00000 +#define AR_OBS_BUS_1_RX_STATE_S 20 +#define AR_OBS_BUS_1_TX_STATE 0x7E000000 +#define AR_OBS_BUS_1_TX_STATE_S 25 + +#define AR_LAST_TSTP 0x8080 +#define AR_NAV 0x8084 +#define AR_RTS_OK 0x8088 +#define AR_RTS_FAIL 0x808c +#define AR_ACK_FAIL 0x8090 +#define AR_FCS_FAIL 0x8094 +#define AR_BEACON_CNT 0x8098 + +#define AR_SLEEP1 0x80d4 +#define AR_SLEEP1_ASSUME_DTIM 0x00080000 +#define AR_SLEEP1_CAB_TIMEOUT 0xFFE00000 +#define AR_SLEEP1_CAB_TIMEOUT_S 21 + +#define AR_SLEEP2 0x80d8 +#define AR_SLEEP2_BEACON_TIMEOUT 0xFFE00000 +#define AR_SLEEP2_BEACON_TIMEOUT_S 21 + +#define AR_BSSMSKL 0x80e0 +#define AR_BSSMSKU 0x80e4 + +#define AR_TPC 0x80e8 +#define AR_TPC_ACK 0x0000003f +#define AR_TPC_ACK_S 0x00 +#define AR_TPC_CTS 0x00003f00 +#define AR_TPC_CTS_S 0x08 +#define AR_TPC_CHIRP 0x003f0000 +#define AR_TPC_CHIRP_S 0x16 + +#define AR_TFCNT 0x80ec +#define AR_RFCNT 0x80f0 +#define AR_RCCNT 0x80f4 +#define AR_CCCNT 0x80f8 + +#define AR_QUIET1 0x80fc +#define AR_QUIET1_NEXT_QUIET_S 0 +#define AR_QUIET1_NEXT_QUIET_M 0x0000ffff +#define AR_QUIET1_QUIET_ENABLE 0x00010000 +#define AR_QUIET1_QUIET_ACK_CTS_ENABLE 0x00020000 +#define AR_QUIET2 0x8100 +#define AR_QUIET2_QUIET_PERIOD_S 0 +#define AR_QUIET2_QUIET_PERIOD_M 0x0000ffff +#define AR_QUIET2_QUIET_DUR_S 16 +#define AR_QUIET2_QUIET_DUR 0xffff0000 + +#define AR_TSF_PARM 0x8104 +#define AR_TSF_INCREMENT_M 0x000000ff +#define AR_TSF_INCREMENT_S 0x00 + +#define AR_QOS_NO_ACK 0x8108 +#define AR_QOS_NO_ACK_TWO_BIT 0x0000000f +#define AR_QOS_NO_ACK_TWO_BIT_S 0 +#define AR_QOS_NO_ACK_BIT_OFF 0x00000070 +#define AR_QOS_NO_ACK_BIT_OFF_S 4 +#define AR_QOS_NO_ACK_BYTE_OFF 0x00000180 +#define AR_QOS_NO_ACK_BYTE_OFF_S 7 + +#define AR_PHY_ERR 0x810c + +#define AR_PHY_ERR_DCHIRP 0x00000008 +#define AR_PHY_ERR_RADAR 0x00000020 +#define AR_PHY_ERR_OFDM_TIMING 0x00020000 +#define AR_PHY_ERR_CCK_TIMING 0x02000000 + +#define AR_RXFIFO_CFG 0x8114 + + +#define AR_MIC_QOS_CONTROL 0x8118 +#define AR_MIC_QOS_SELECT 0x811c + +#define AR_PCU_MISC 0x8120 +#define AR_PCU_FORCE_BSSID_MATCH 0x00000001 +#define AR_PCU_MIC_NEW_LOC_ENA 0x00000004 +#define AR_PCU_TX_ADD_TSF 0x00000008 +#define AR_PCU_CCK_SIFS_MODE 0x00000010 +#define AR_PCU_RX_ANT_UPDT 0x00000800 +#define AR_PCU_TXOP_TBTT_LIMIT_ENA 0x00001000 +#define AR_PCU_MISS_BCN_IN_SLEEP 0x00004000 +#define AR_PCU_BUG_12306_FIX_ENA 0x00020000 +#define AR_PCU_FORCE_QUIET_COLL 0x00040000 +#define AR_PCU_TBTT_PROTECT 0x00200000 +#define AR_PCU_CLEAR_VMF 0x01000000 +#define AR_PCU_CLEAR_BA_VALID 0x04000000 + + +#define AR_FILT_OFDM 0x8124 +#define AR_FILT_OFDM_COUNT 0x00FFFFFF + +#define AR_FILT_CCK 0x8128 +#define AR_FILT_CCK_COUNT 0x00FFFFFF + +#define AR_PHY_ERR_1 0x812c +#define AR_PHY_ERR_1_COUNT 0x00FFFFFF +#define AR_PHY_ERR_MASK_1 0x8130 + +#define AR_PHY_ERR_2 0x8134 +#define AR_PHY_ERR_2_COUNT 0x00FFFFFF +#define AR_PHY_ERR_MASK_2 0x8138 + +#define AR_PHY_COUNTMAX (3 << 22) +#define AR_MIBCNT_INTRMASK (3 << 22) + +#define AR_TSF_THRESHOLD 0x813c +#define AR_TSF_THRESHOLD_VAL 0x0000FFFF + +#define AR_PHY_ERR_EIFS_MASK 8144 + +#define AR_PHY_ERR_3 0x8168 +#define AR_PHY_ERR_3_COUNT 0x00FFFFFF +#define AR_PHY_ERR_MASK_3 0x816c + +#define AR_TXSIFS 0x81d0 +#define AR_TXSIFS_TIME 0x000000FF +#define AR_TXSIFS_TX_LATENCY 0x00000F00 +#define AR_TXSIFS_TX_LATENCY_S 8 +#define AR_TXSIFS_ACK_SHIFT 0x00007000 +#define AR_TXSIFS_ACK_SHIFT_S 12 + +#define AR_TXOP_X 0x81ec +#define AR_TXOP_X_VAL 0x000000FF + + +#define AR_TXOP_0_3 0x81f0 +#define AR_TXOP_4_7 0x81f4 +#define AR_TXOP_8_11 0x81f8 +#define AR_TXOP_12_15 0x81fc + + +#define AR_NEXT_TBTT_TIMER 0x8200 +#define AR_NEXT_DMA_BEACON_ALERT 0x8204 +#define AR_NEXT_SWBA 0x8208 +#define AR_NEXT_CFP 0x8208 +#define AR_NEXT_HCF 0x820C +#define AR_NEXT_TIM 0x8210 +#define AR_NEXT_DTIM 0x8214 +#define AR_NEXT_QUIET_TIMER 0x8218 +#define AR_NEXT_NDP_TIMER 0x821C + +#define AR_BEACON_PERIOD 0x8220 +#define AR_DMA_BEACON_PERIOD 0x8224 +#define AR_SWBA_PERIOD 0x8228 +#define AR_HCF_PERIOD 0x822C +#define AR_TIM_PERIOD 0x8230 +#define AR_DTIM_PERIOD 0x8234 +#define AR_QUIET_PERIOD 0x8238 +#define AR_NDP_PERIOD 0x823C + +#define AR_TIMER_MODE 0x8240 +#define AR_TBTT_TIMER_EN 0x00000001 +#define AR_DBA_TIMER_EN 0x00000002 +#define AR_SWBA_TIMER_EN 0x00000004 +#define AR_HCF_TIMER_EN 0x00000008 +#define AR_TIM_TIMER_EN 0x00000010 +#define AR_DTIM_TIMER_EN 0x00000020 +#define AR_QUIET_TIMER_EN 0x00000040 +#define AR_NDP_TIMER_EN 0x00000080 +#define AR_TIMER_OVERFLOW_INDEX 0x00000700 +#define AR_TIMER_OVERFLOW_INDEX_S 8 +#define AR_TIMER_THRESH 0xFFFFF000 +#define AR_TIMER_THRESH_S 12 + +#define AR_SLP32_MODE 0x8244 +#define AR_SLP32_HALF_CLK_LATENCY 0x000FFFFF +#define AR_SLP32_ENA 0x00100000 +#define AR_SLP32_TSF_WRITE_STATUS 0x00200000 + +#define AR_SLP32_WAKE 0x8248 +#define AR_SLP32_WAKE_XTL_TIME 0x0000FFFF + +#define AR_SLP32_INC 0x824c +#define AR_SLP32_TST_INC 0x000FFFFF + +#define AR_SLP_CNT 0x8250 +#define AR_SLP_CYCLE_CNT 0x8254 + +#define AR_SLP_MIB_CTRL 0x8258 +#define AR_SLP_MIB_CLEAR 0x00000001 +#define AR_SLP_MIB_PENDING 0x00000002 + +#define AR_2040_MODE 0x8318 +#define AR_2040_JOINED_RX_CLEAR 0x00000001 + + +#define AR_EXTRCCNT 0x8328 + +#define AR_SELFGEN_MASK 0x832c + +#define AR_PCU_TXBUF_CTRL 0x8340 +#define AR_PCU_TXBUF_CTRL_SIZE_MASK 0x7FF +#define AR_PCU_TXBUF_CTRL_USABLE_SIZE 0x700 +#define AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE 0x380 + +#define AR_KEYTABLE_0 0x8800 +#define AR_KEYTABLE(_n) (AR_KEYTABLE_0 + ((_n)*32)) +#define AR_KEY_CACHE_SIZE 128 +#define AR_RSVD_KEYTABLE_ENTRIES 4 +#define AR_KEY_TYPE 0x00000007 +#define AR_KEYTABLE_TYPE_40 0x00000000 +#define AR_KEYTABLE_TYPE_104 0x00000001 +#define AR_KEYTABLE_TYPE_128 0x00000003 +#define AR_KEYTABLE_TYPE_TKIP 0x00000004 +#define AR_KEYTABLE_TYPE_AES 0x00000005 +#define AR_KEYTABLE_TYPE_CCM 0x00000006 +#define AR_KEYTABLE_TYPE_CLR 0x00000007 +#define AR_KEYTABLE_ANT 0x00000008 +#define AR_KEYTABLE_VALID 0x00008000 +#define AR_KEYTABLE_KEY0(_n) (AR_KEYTABLE(_n) + 0) +#define AR_KEYTABLE_KEY1(_n) (AR_KEYTABLE(_n) + 4) +#define AR_KEYTABLE_KEY2(_n) (AR_KEYTABLE(_n) + 8) +#define AR_KEYTABLE_KEY3(_n) (AR_KEYTABLE(_n) + 12) +#define AR_KEYTABLE_KEY4(_n) (AR_KEYTABLE(_n) + 16) +#define AR_KEYTABLE_TYPE(_n) (AR_KEYTABLE(_n) + 20) +#define AR_KEYTABLE_MAC0(_n) (AR_KEYTABLE(_n) + 24) +#define AR_KEYTABLE_MAC1(_n) (AR_KEYTABLE(_n) + 28) + +#endif diff --git a/drivers/net/wireless/ath9k/regd.c b/drivers/net/wireless/ath9k/regd.c new file mode 100644 index 0000000..62e2888 --- /dev/null +++ b/drivers/net/wireless/ath9k/regd.c @@ -0,0 +1,1026 @@ +/* + * Copyright (c) 2008 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include "core.h" +#include "hw.h" +#include "regd.h" +#include "regd_common.h" + +static int ath9k_regd_chansort(const void *a, const void *b) +{ + const struct ath9k_channel *ca = a; + const struct ath9k_channel *cb = b; + + return (ca->channel == cb->channel) ? + (ca->channelFlags & CHAN_FLAGS) - + (cb->channelFlags & CHAN_FLAGS) : ca->channel - cb->channel; +} + +static void +ath9k_regd_sort(void *a, u32 n, u32 size, ath_hal_cmp_t *cmp) +{ + u8 *aa = a; + u8 *ai, *t; + + for (ai = aa + size; --n >= 1; ai += size) + for (t = ai; t > aa; t -= size) { + u8 *u = t - size; + if (cmp(u, t) <= 0) + break; + swap(u, t, size); + } +} + +static u16 ath9k_regd_get_eepromRD(struct ath_hal *ah) +{ + return ah->ah_currentRD & ~WORLDWIDE_ROAMING_FLAG; +} + +static bool ath9k_regd_is_chan_bm_zero(u64 *bitmask) +{ + int i; + + for (i = 0; i < BMLEN; i++) { + if (bitmask[i] != 0) + return false; + } + return true; +} + +static bool ath9k_regd_is_eeprom_valid(struct ath_hal *ah) +{ + u16 rd = ath9k_regd_get_eepromRD(ah); + int i; + + if (rd & COUNTRY_ERD_FLAG) { + u16 cc = rd & ~COUNTRY_ERD_FLAG; + for (i = 0; i < ARRAY_SIZE(allCountries); i++) + if (allCountries[i].countryCode == cc) + return true; + } else { + for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) + if (regDomainPairs[i].regDmnEnum == rd) + return true; + } + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "%s: invalid regulatory domain/country code 0x%x\n", + __func__, rd); + return false; +} + +static bool ath9k_regd_is_fcc_midband_supported(struct ath_hal *ah) +{ + u32 regcap; + + regcap = ah->ah_caps.reg_cap; + + if (regcap & AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND) + return true; + else + return false; +} + +static bool ath9k_regd_is_ccode_valid(struct ath_hal *ah, + u16 cc) +{ + u16 rd; + int i; + + if (cc == CTRY_DEFAULT) + return true; + if (cc == CTRY_DEBUG) + return true; + + rd = ath9k_regd_get_eepromRD(ah); + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "%s: EEPROM regdomain 0x%x\n", + __func__, rd); + + if (rd & COUNTRY_ERD_FLAG) { + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "%s: EEPROM setting is country code %u\n", + __func__, rd & ~COUNTRY_ERD_FLAG); + return cc == (rd & ~COUNTRY_ERD_FLAG); + } + + for (i = 0; i < ARRAY_SIZE(allCountries); i++) { + if (cc == allCountries[i].countryCode) { +#ifdef AH_SUPPORT_11D + if ((rd & WORLD_SKU_MASK) == WORLD_SKU_PREFIX) + return true; +#endif + if (allCountries[i].regDmnEnum == rd || + rd == DEBUG_REG_DMN || rd == NO_ENUMRD) + return true; + } + } + return false; +} + +static void +ath9k_regd_get_wmodes_nreg(struct ath_hal *ah, + struct country_code_to_enum_rd *country, + struct regDomain *rd5GHz, + unsigned long *modes_allowed) +{ + bitmap_copy(modes_allowed, ah->ah_caps.wireless_modes, ATH9K_MODE_MAX); + + if (test_bit(ATH9K_MODE_11G, ah->ah_caps.wireless_modes) && + (!country->allow11g)) + clear_bit(ATH9K_MODE_11G, modes_allowed); + + if (test_bit(ATH9K_MODE_11A, ah->ah_caps.wireless_modes) && + (ath9k_regd_is_chan_bm_zero(rd5GHz->chan11a))) + clear_bit(ATH9K_MODE_11A, modes_allowed); + + if (test_bit(ATH9K_MODE_11NG_HT20, ah->ah_caps.wireless_modes) + && (!country->allow11ng20)) + clear_bit(ATH9K_MODE_11NG_HT20, modes_allowed); + + if (test_bit(ATH9K_MODE_11NA_HT20, ah->ah_caps.wireless_modes) + && (!country->allow11na20)) + clear_bit(ATH9K_MODE_11NA_HT20, modes_allowed); + + if (test_bit(ATH9K_MODE_11NG_HT40PLUS, ah->ah_caps.wireless_modes) && + (!country->allow11ng40)) + clear_bit(ATH9K_MODE_11NG_HT40PLUS, modes_allowed); + + if (test_bit(ATH9K_MODE_11NG_HT40MINUS, ah->ah_caps.wireless_modes) && + (!country->allow11ng40)) + clear_bit(ATH9K_MODE_11NG_HT40MINUS, modes_allowed); + + if (test_bit(ATH9K_MODE_11NA_HT40PLUS, ah->ah_caps.wireless_modes) && + (!country->allow11na40)) + clear_bit(ATH9K_MODE_11NA_HT40PLUS, modes_allowed); + + if (test_bit(ATH9K_MODE_11NA_HT40MINUS, ah->ah_caps.wireless_modes) && + (!country->allow11na40)) + clear_bit(ATH9K_MODE_11NA_HT40MINUS, modes_allowed); +} + +bool ath9k_regd_is_public_safety_sku(struct ath_hal *ah) +{ + u16 rd; + + rd = ath9k_regd_get_eepromRD(ah); + + switch (rd) { + case FCC4_FCCA: + case (CTRY_UNITED_STATES_FCC49 | COUNTRY_ERD_FLAG): + return true; + case DEBUG_REG_DMN: + case NO_ENUMRD: + if (ah->ah_countryCode == CTRY_UNITED_STATES_FCC49) + return true; + break; + } + return false; +} + +static struct country_code_to_enum_rd* +ath9k_regd_find_country(u16 countryCode) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(allCountries); i++) { + if (allCountries[i].countryCode == countryCode) + return &allCountries[i]; + } + return NULL; +} + +static u16 ath9k_regd_get_default_country(struct ath_hal *ah) +{ + u16 rd; + int i; + + rd = ath9k_regd_get_eepromRD(ah); + if (rd & COUNTRY_ERD_FLAG) { + struct country_code_to_enum_rd *country = NULL; + u16 cc = rd & ~COUNTRY_ERD_FLAG; + + country = ath9k_regd_find_country(cc); + if (country != NULL) + return cc; + } + + for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) + if (regDomainPairs[i].regDmnEnum == rd) { + if (regDomainPairs[i].singleCC != 0) + return regDomainPairs[i].singleCC; + else + i = ARRAY_SIZE(regDomainPairs); + } + return CTRY_DEFAULT; +} + +static bool ath9k_regd_is_valid_reg_domain(int regDmn, + struct regDomain *rd) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(regDomains); i++) { + if (regDomains[i].regDmnEnum == regDmn) { + if (rd != NULL) { + memcpy(rd, ®Domains[i], + sizeof(struct regDomain)); + } + return true; + } + } + return false; +} + +static bool ath9k_regd_is_valid_reg_domainPair(int regDmnPair) +{ + int i; + + if (regDmnPair == NO_ENUMRD) + return false; + for (i = 0; i < ARRAY_SIZE(regDomainPairs); i++) { + if (regDomainPairs[i].regDmnEnum == regDmnPair) + return true; + } + return false; +} + +static bool +ath9k_regd_get_wmode_regdomain(struct ath_hal *ah, int regDmn, + u16 channelFlag, struct regDomain *rd) +{ + int i, found; + u64 flags = NO_REQ; + struct reg_dmn_pair_mapping *regPair = NULL; + int regOrg; + + regOrg = regDmn; + if (regDmn == CTRY_DEFAULT) { + u16 rdnum; + rdnum = ath9k_regd_get_eepromRD(ah); + + if (!(rdnum & COUNTRY_ERD_FLAG)) { + if (ath9k_regd_is_valid_reg_domain(rdnum, NULL) || + ath9k_regd_is_valid_reg_domainPair(rdnum)) { + regDmn = rdnum; + } + } + } + + if ((regDmn & MULTI_DOMAIN_MASK) == 0) { + for (i = 0, found = 0; + (i < ARRAY_SIZE(regDomainPairs)) && (!found); i++) { + if (regDomainPairs[i].regDmnEnum == regDmn) { + regPair = ®DomainPairs[i]; + found = 1; + } + } + if (!found) { + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "%s: Failed to find reg domain pair %u\n", + __func__, regDmn); + return false; + } + if (!(channelFlag & CHANNEL_2GHZ)) { + regDmn = regPair->regDmn5GHz; + flags = regPair->flags5GHz; + } + if (channelFlag & CHANNEL_2GHZ) { + regDmn = regPair->regDmn2GHz; + flags = regPair->flags2GHz; + } + } + + found = ath9k_regd_is_valid_reg_domain(regDmn, rd); + if (!found) { + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "%s: Failed to find unitary reg domain %u\n", + __func__, regDmn); + return false; + } else { + rd->pscan &= regPair->pscanMask; + if (((regOrg & MULTI_DOMAIN_MASK) == 0) && + (flags != NO_REQ)) { + rd->flags = flags; + } + + rd->flags &= (channelFlag & CHANNEL_2GHZ) ? + REG_DOMAIN_2GHZ_MASK : REG_DOMAIN_5GHZ_MASK; + return true; + } +} + +static bool ath9k_regd_is_bit_set(int bit, u64 *bitmask) +{ + int byteOffset, bitnum; + u64 val; + + byteOffset = bit / 64; + bitnum = bit - byteOffset * 64; + val = ((u64) 1) << bitnum; + if (bitmask[byteOffset] & val) + return true; + else + return false; +} + +static void +ath9k_regd_add_reg_classid(u8 *regclassids, u32 maxregids, + u32 *nregids, u8 regclassid) +{ + int i; + + if (regclassid == 0) + return; + + for (i = 0; i < maxregids; i++) { + if (regclassids[i] == regclassid) + return; + if (regclassids[i] == 0) + break; + } + + if (i == maxregids) + return; + else { + regclassids[i] = regclassid; + *nregids += 1; + } + + return; +} + +static bool +ath9k_regd_get_eeprom_reg_ext_bits(struct ath_hal *ah, + enum reg_ext_bitmap bit) +{ + return (ah->ah_currentRDExt & (1 << bit)) ? true : false; +} + +#ifdef ATH_NF_PER_CHAN + +static void ath9k_regd_init_rf_buffer(struct ath9k_channel *ichans, + int nchans) +{ + int i, j, next; + + for (next = 0; next < nchans; next++) { + for (i = 0; i < NUM_NF_READINGS; i++) { + ichans[next].nfCalHist[i].currIndex = 0; + ichans[next].nfCalHist[i].privNF = + AR_PHY_CCA_MAX_GOOD_VALUE; + ichans[next].nfCalHist[i].invalidNFcount = + AR_PHY_CCA_FILTERWINDOW_LENGTH; + for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) { + ichans[next].nfCalHist[i].nfCalBuffer[j] = + AR_PHY_CCA_MAX_GOOD_VALUE; + } + } + } +} +#endif + +static int ath9k_regd_is_chan_present(struct ath_hal *ah, + u16 c) +{ + int i; + + for (i = 0; i < 150; i++) { + if (!ah->ah_channels[i].channel) + return -1; + else if (ah->ah_channels[i].channel == c) + return i; + } + + return -1; +} + +static bool +ath9k_regd_add_channel(struct ath_hal *ah, + u16 c, + u16 c_lo, + u16 c_hi, + u16 maxChan, + u8 ctl, + int pos, + struct regDomain rd5GHz, + struct RegDmnFreqBand *fband, + struct regDomain *rd, + const struct cmode *cm, + struct ath9k_channel *ichans, + bool enableExtendedChannels) +{ + struct ath9k_channel *chan; + int ret; + u32 channelFlags = 0; + u8 privFlags = 0; + + if (!(c_lo <= c && c <= c_hi)) { + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "%s: c %u out of range [%u..%u]\n", + __func__, c, c_lo, c_hi); + return false; + } + if ((fband->channelBW == CHANNEL_HALF_BW) && + !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_CHAN_HALFRATE)) { + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "%s: Skipping %u half rate channel\n", + __func__, c); + return false; + } + + if ((fband->channelBW == CHANNEL_QUARTER_BW) && + !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_CHAN_QUARTERRATE)) { + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "%s: Skipping %u quarter rate channel\n", + __func__, c); + return false; + } + + if (((c + fband->channelSep) / 2) > (maxChan + HALF_MAXCHANBW)) { + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "%s: c %u > maxChan %u\n", + __func__, c, maxChan); + return false; + } + + if ((fband->usePassScan & IS_ECM_CHAN) && !enableExtendedChannels) { + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "Skipping ecm channel\n"); + return false; + } + + if ((rd->flags & NO_HOSTAP) && (ah->ah_opmode == ATH9K_M_HOSTAP)) { + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "Skipping HOSTAP channel\n"); + return false; + } + + if (IS_HT40_MODE(cm->mode) && + !(ath9k_regd_get_eeprom_reg_ext_bits(ah, REG_EXT_FCC_DFS_HT40)) && + (fband->useDfs) && + (rd->conformanceTestLimit != MKK)) { + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "Skipping HT40 channel (en_fcc_dfs_ht40 = 0)\n"); + return false; + } + + if (IS_HT40_MODE(cm->mode) && + !(ath9k_regd_get_eeprom_reg_ext_bits(ah, + REG_EXT_JAPAN_NONDFS_HT40)) && + !(fband->useDfs) && (rd->conformanceTestLimit == MKK)) { + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "Skipping HT40 channel (en_jap_ht40 = 0)\n"); + return false; + } + + if (IS_HT40_MODE(cm->mode) && + !(ath9k_regd_get_eeprom_reg_ext_bits(ah, REG_EXT_JAPAN_DFS_HT40)) && + (fband->useDfs) && + (rd->conformanceTestLimit == MKK)) { + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "Skipping HT40 channel (en_jap_dfs_ht40 = 0)\n"); + return false; + } + + /* Calculate channel flags */ + + channelFlags = cm->flags; + + switch (fband->channelBW) { + case CHANNEL_HALF_BW: + channelFlags |= CHANNEL_HALF; + break; + case CHANNEL_QUARTER_BW: + channelFlags |= CHANNEL_QUARTER; + break; + } + + if (fband->usePassScan & rd->pscan) + channelFlags |= CHANNEL_PASSIVE; + else + channelFlags &= ~CHANNEL_PASSIVE; + if (fband->useDfs & rd->dfsMask) + privFlags = CHANNEL_DFS; + else + privFlags = 0; + if (rd->flags & LIMIT_FRAME_4MS) + privFlags |= CHANNEL_4MS_LIMIT; + if (privFlags & CHANNEL_DFS) + privFlags |= CHANNEL_DISALLOW_ADHOC; + if (rd->flags & ADHOC_PER_11D) + privFlags |= CHANNEL_PER_11D_ADHOC; + + if (channelFlags & CHANNEL_PASSIVE) { + if ((c < 2412) || (c > 2462)) { + if (rd5GHz.regDmnEnum == MKK1 || + rd5GHz.regDmnEnum == MKK2) { + u32 regcap = ah->ah_caps.reg_cap; + if (!(regcap & + (AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN | + AR_EEPROM_EEREGCAP_EN_KK_U2 | + AR_EEPROM_EEREGCAP_EN_KK_MIDBAND)) && + isUNII1OddChan(c)) { + channelFlags &= ~CHANNEL_PASSIVE; + } else { + privFlags |= CHANNEL_DISALLOW_ADHOC; + } + } else { + privFlags |= CHANNEL_DISALLOW_ADHOC; + } + } + } + + if ((cm->mode == ATH9K_MODE_11A) || + (cm->mode == ATH9K_MODE_11NA_HT20) || + (cm->mode == ATH9K_MODE_11NA_HT40PLUS) || + (cm->mode == ATH9K_MODE_11NA_HT40MINUS)) { + if (rd->flags & (ADHOC_NO_11A | DISALLOW_ADHOC_11A)) + privFlags |= CHANNEL_DISALLOW_ADHOC; + } + + /* Fill in channel details */ + + ret = ath9k_regd_is_chan_present(ah, c); + if (ret == -1) { + chan = &ah->ah_channels[pos]; + chan->channel = c; + chan->maxRegTxPower = fband->powerDfs; + chan->antennaMax = fband->antennaMax; + chan->regDmnFlags = rd->flags; + chan->maxTxPower = AR5416_MAX_RATE_POWER; + chan->minTxPower = AR5416_MAX_RATE_POWER; + chan->channelFlags = channelFlags; + chan->privFlags = privFlags; + } else { + chan = &ah->ah_channels[ret]; + chan->channelFlags |= channelFlags; + chan->privFlags |= privFlags; + } + + /* Set CTLs */ + + if ((cm->flags & CHANNEL_ALL) == CHANNEL_A) + chan->conformanceTestLimit[0] = ctl; + else if ((cm->flags & CHANNEL_ALL) == CHANNEL_B) + chan->conformanceTestLimit[1] = ctl; + else if ((cm->flags & CHANNEL_ALL) == CHANNEL_G) + chan->conformanceTestLimit[2] = ctl; + + return (ret == -1) ? true : false; +} + +static bool ath9k_regd_japan_check(struct ath_hal *ah, + int b, + struct regDomain *rd5GHz) +{ + bool skipband = false; + int i; + u32 regcap; + + for (i = 0; i < ARRAY_SIZE(j_bandcheck); i++) { + if (j_bandcheck[i].freqbandbit == b) { + regcap = ah->ah_caps.reg_cap; + if ((j_bandcheck[i].eepromflagtocheck & regcap) == 0) { + skipband = true; + } else if ((regcap & AR_EEPROM_EEREGCAP_EN_KK_U2) || + (regcap & AR_EEPROM_EEREGCAP_EN_KK_MIDBAND)) { + rd5GHz->dfsMask |= DFS_MKK4; + rd5GHz->pscan |= PSCAN_MKK3; + } + break; + } + } + + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "%s: Skipping %d freq band\n", + __func__, j_bandcheck[i].freqbandbit); + + return skipband; +} + +bool +ath9k_regd_init_channels(struct ath_hal *ah, + u32 maxchans, + u32 *nchans, u8 *regclassids, + u32 maxregids, u32 *nregids, u16 cc, + bool enableOutdoor, + bool enableExtendedChannels) +{ + u16 maxChan = 7000; + struct country_code_to_enum_rd *country = NULL; + struct regDomain rd5GHz, rd2GHz; + const struct cmode *cm; + struct ath9k_channel *ichans = &ah->ah_channels[0]; + int next = 0, b; + u8 ctl; + int regdmn; + u16 chanSep; + unsigned long *modes_avail; + DECLARE_BITMAP(modes_allowed, ATH9K_MODE_MAX); + + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "%s: cc %u %s %s\n", + __func__, cc, + enableOutdoor ? "Enable outdoor" : "", + enableExtendedChannels ? "Enable ecm" : ""); + + if (!ath9k_regd_is_ccode_valid(ah, cc)) { + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "%s: invalid country code %d\n", __func__, cc); + return false; + } + + if (!ath9k_regd_is_eeprom_valid(ah)) { + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "%s: invalid EEPROM contents\n", __func__); + return false; + } + + ah->ah_countryCode = ath9k_regd_get_default_country(ah); + + if (ah->ah_countryCode == CTRY_DEFAULT) { + ah->ah_countryCode = cc & COUNTRY_CODE_MASK; + if ((ah->ah_countryCode == CTRY_DEFAULT) && + (ath9k_regd_get_eepromRD(ah) == CTRY_DEFAULT)) { + ah->ah_countryCode = CTRY_UNITED_STATES; + } + } + +#ifdef AH_SUPPORT_11D + if (ah->ah_countryCode == CTRY_DEFAULT) { + regdmn = ath9k_regd_get_eepromRD(ah); + country = NULL; + } else { +#endif + country = ath9k_regd_find_country(ah->ah_countryCode); + if (country == NULL) { + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "Country is NULL!!!!, cc= %d\n", + ah->ah_countryCode); + return false; + } else { + regdmn = country->regDmnEnum; +#ifdef AH_SUPPORT_11D + if (((ath9k_regd_get_eepromRD(ah) & + WORLD_SKU_MASK) == WORLD_SKU_PREFIX) && + (cc == CTRY_UNITED_STATES)) { + if (!isWwrSKU_NoMidband(ah) + && ath9k_regd_is_fcc_midband_supported(ah)) + regdmn = FCC3_FCCA; + else + regdmn = FCC1_FCCA; + } +#endif + } +#ifdef AH_SUPPORT_11D + } +#endif + if (!ath9k_regd_get_wmode_regdomain(ah, + regdmn, + ~CHANNEL_2GHZ, + &rd5GHz)) { + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "%s: couldn't find unitary " + "5GHz reg domain for country %u\n", + __func__, ah->ah_countryCode); + return false; + } + if (!ath9k_regd_get_wmode_regdomain(ah, + regdmn, + CHANNEL_2GHZ, + &rd2GHz)) { + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "%s: couldn't find unitary 2GHz " + "reg domain for country %u\n", + __func__, ah->ah_countryCode); + return false; + } + + if (!isWwrSKU(ah) && ((rd5GHz.regDmnEnum == FCC1) || + (rd5GHz.regDmnEnum == FCC2))) { + if (ath9k_regd_is_fcc_midband_supported(ah)) { + if (!ath9k_regd_get_wmode_regdomain(ah, + FCC3_FCCA, + ~CHANNEL_2GHZ, + &rd5GHz)) { + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "%s: couldn't find unitary 5GHz " + "reg domain for country %u\n", + __func__, ah->ah_countryCode); + return false; + } + } + } + + if (country == NULL) { + modes_avail = ah->ah_caps.wireless_modes; + } else { + ath9k_regd_get_wmodes_nreg(ah, country, &rd5GHz, modes_allowed); + modes_avail = modes_allowed; + + if (!enableOutdoor) + maxChan = country->outdoorChanStart; + } + + next = 0; + + if (maxchans > ARRAY_SIZE(ah->ah_channels)) + maxchans = ARRAY_SIZE(ah->ah_channels); + + for (cm = modes; cm < &modes[ARRAY_SIZE(modes)]; cm++) { + u16 c, c_hi, c_lo; + u64 *channelBM = NULL; + struct regDomain *rd = NULL; + struct RegDmnFreqBand *fband = NULL, *freqs; + int8_t low_adj = 0, hi_adj = 0; + + if (!test_bit(cm->mode, modes_avail)) { + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "%s: !avail mode %d flags 0x%x\n", + __func__, cm->mode, cm->flags); + continue; + } + if (!ath9k_get_channel_edges(ah, cm->flags, &c_lo, &c_hi)) { + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "%s: channels 0x%x not supported " + "by hardware\n", + __func__, cm->flags); + continue; + } + + switch (cm->mode) { + case ATH9K_MODE_11A: + case ATH9K_MODE_11NA_HT20: + case ATH9K_MODE_11NA_HT40PLUS: + case ATH9K_MODE_11NA_HT40MINUS: + rd = &rd5GHz; + channelBM = rd->chan11a; + freqs = ®Dmn5GhzFreq[0]; + ctl = rd->conformanceTestLimit; + break; + case ATH9K_MODE_11B: + rd = &rd2GHz; + channelBM = rd->chan11b; + freqs = ®Dmn2GhzFreq[0]; + ctl = rd->conformanceTestLimit | CTL_11B; + break; + case ATH9K_MODE_11G: + case ATH9K_MODE_11NG_HT20: + case ATH9K_MODE_11NG_HT40PLUS: + case ATH9K_MODE_11NG_HT40MINUS: + rd = &rd2GHz; + channelBM = rd->chan11g; + freqs = ®Dmn2Ghz11gFreq[0]; + ctl = rd->conformanceTestLimit | CTL_11G; + break; + default: + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "%s: Unknown HAL mode 0x%x\n", __func__, + cm->mode); + continue; + } + + if (ath9k_regd_is_chan_bm_zero(channelBM)) + continue; + + if ((cm->mode == ATH9K_MODE_11NA_HT40PLUS) || + (cm->mode == ATH9K_MODE_11NG_HT40PLUS)) { + hi_adj = -20; + } + + if ((cm->mode == ATH9K_MODE_11NA_HT40MINUS) || + (cm->mode == ATH9K_MODE_11NG_HT40MINUS)) { + low_adj = 20; + } + + /* XXX: Add a helper here instead */ + for (b = 0; b < 64 * BMLEN; b++) { + if (ath9k_regd_is_bit_set(b, channelBM)) { + fband = &freqs[b]; + if (rd5GHz.regDmnEnum == MKK1 + || rd5GHz.regDmnEnum == MKK2) { + if (ath9k_regd_japan_check(ah, + b, + &rd5GHz)) + continue; + } + + ath9k_regd_add_reg_classid(regclassids, + maxregids, + nregids, + fband-> + regClassId); + + if (IS_HT40_MODE(cm->mode) && (rd == &rd5GHz)) { + chanSep = 40; + if (fband->lowChannel == 5280) + low_adj += 20; + + if (fband->lowChannel == 5170) + continue; + } else + chanSep = fband->channelSep; + + for (c = fband->lowChannel + low_adj; + ((c <= (fband->highChannel + hi_adj)) && + (c >= (fband->lowChannel + low_adj))); + c += chanSep) { + if (next >= maxchans) { + DPRINTF(ah->ah_sc, + ATH_DBG_REGULATORY, + "%s: too many channels " + "for channel table\n", + __func__); + goto done; + } + if (ath9k_regd_add_channel(ah, + c, c_lo, c_hi, + maxChan, ctl, + next, + rd5GHz, + fband, rd, cm, + ichans, + enableExtendedChannels)) + next++; + } + if (IS_HT40_MODE(cm->mode) && + (fband->lowChannel == 5280)) { + low_adj -= 20; + } + } + } + } +done: + if (next != 0) { + int i; + + if (next > ARRAY_SIZE(ah->ah_channels)) { + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "%s: too many channels %u; truncating to %u\n", + __func__, next, + (int) ARRAY_SIZE(ah->ah_channels)); + next = ARRAY_SIZE(ah->ah_channels); + } +#ifdef ATH_NF_PER_CHAN + ath9k_regd_init_rf_buffer(ichans, next); +#endif + ath9k_regd_sort(ichans, next, + sizeof(struct ath9k_channel), + ath9k_regd_chansort); + + ah->ah_nchan = next; + + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "Channel list:\n"); + for (i = 0; i < next; i++) { + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "chan: %d flags: 0x%x\n", + ah->ah_channels[i].channel, + ah->ah_channels[i].channelFlags); + } + } + *nchans = next; + + ah->ah_countryCode = ah->ah_countryCode; + + ah->ah_currentRDInUse = regdmn; + ah->ah_currentRD5G = rd5GHz.regDmnEnum; + ah->ah_currentRD2G = rd2GHz.regDmnEnum; + if (country == NULL) { + ah->ah_iso[0] = 0; + ah->ah_iso[1] = 0; + } else { + ah->ah_iso[0] = country->isoName[0]; + ah->ah_iso[1] = country->isoName[1]; + } + + return next != 0; +} + +struct ath9k_channel* +ath9k_regd_check_channel(struct ath_hal *ah, + const struct ath9k_channel *c) +{ + struct ath9k_channel *base, *cc; + + int flags = c->channelFlags & CHAN_FLAGS; + int n, lim; + + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "%s: channel %u/0x%x (0x%x) requested\n", __func__, + c->channel, c->channelFlags, flags); + + cc = ah->ah_curchan; + if (cc != NULL && cc->channel == c->channel && + (cc->channelFlags & CHAN_FLAGS) == flags) { + if ((cc->privFlags & CHANNEL_INTERFERENCE) && + (cc->privFlags & CHANNEL_DFS)) + return NULL; + else + return cc; + } + + base = ah->ah_channels; + n = ah->ah_nchan; + + for (lim = n; lim != 0; lim >>= 1) { + int d; + cc = &base[lim >> 1]; + d = c->channel - cc->channel; + if (d == 0) { + if ((cc->channelFlags & CHAN_FLAGS) == flags) { + if ((cc->privFlags & CHANNEL_INTERFERENCE) && + (cc->privFlags & CHANNEL_DFS)) + return NULL; + else + return cc; + } + d = flags - (cc->channelFlags & CHAN_FLAGS); + } + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, + "%s: channel %u/0x%x d %d\n", __func__, + cc->channel, cc->channelFlags, d); + if (d > 0) { + base = cc + 1; + lim--; + } + } + DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, "%s: no match for %u/0x%x\n", + __func__, c->channel, c->channelFlags); + return NULL; +} + +u32 +ath9k_regd_get_antenna_allowed(struct ath_hal *ah, + struct ath9k_channel *chan) +{ + struct ath9k_channel *ichan = NULL; + + ichan = ath9k_regd_check_channel(ah, chan); + if (!ichan) + return 0; + + return ichan->antennaMax; +} + +u32 ath9k_regd_get_ctl(struct ath_hal *ah, struct ath9k_channel *chan) +{ + u32 ctl = NO_CTL; + struct ath9k_channel *ichan; + + if (ah->ah_countryCode == CTRY_DEFAULT && isWwrSKU(ah)) { + if (IS_CHAN_B(chan)) + ctl = SD_NO_CTL | CTL_11B; + else if (IS_CHAN_G(chan)) + ctl = SD_NO_CTL | CTL_11G; + else + ctl = SD_NO_CTL | CTL_11A; + } else { + ichan = ath9k_regd_check_channel(ah, chan); + if (ichan != NULL) { + /* FIXME */ + if (IS_CHAN_A(ichan)) + ctl = ichan->conformanceTestLimit[0]; + else if (IS_CHAN_B(ichan)) + ctl = ichan->conformanceTestLimit[1]; + else if (IS_CHAN_G(ichan)) + ctl = ichan->conformanceTestLimit[2]; + + if (IS_CHAN_G(chan) && (ctl & 0xf) == CTL_11B) + ctl = (ctl & ~0xf) | CTL_11G; + } + } + return ctl; +} + +void ath9k_regd_get_current_country(struct ath_hal *ah, + struct ath9k_country_entry *ctry) +{ + u16 rd = ath9k_regd_get_eepromRD(ah); + + ctry->isMultidomain = false; + if (rd == CTRY_DEFAULT) + ctry->isMultidomain = true; + else if (!(rd & COUNTRY_ERD_FLAG)) + ctry->isMultidomain = isWwrSKU(ah); + + ctry->countryCode = ah->ah_countryCode; + ctry->regDmnEnum = ah->ah_currentRD; + ctry->regDmn5G = ah->ah_currentRD5G; + ctry->regDmn2G = ah->ah_currentRD2G; + ctry->iso[0] = ah->ah_iso[0]; + ctry->iso[1] = ah->ah_iso[1]; + ctry->iso[2] = ah->ah_iso[2]; +} diff --git a/drivers/net/wireless/ath9k/regd.h b/drivers/net/wireless/ath9k/regd.h new file mode 100644 index 0000000..0ecd344 --- /dev/null +++ b/drivers/net/wireless/ath9k/regd.h @@ -0,0 +1,412 @@ +/* + * Copyright (c) 2008 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef REGD_H +#define REGD_H + +#include "ath9k.h" + +#define BMLEN 2 +#define BMZERO {(u64) 0, (u64) 0} + +#define BM(_fa, _fb, _fc, _fd, _fe, _ff, _fg, _fh, _fi, _fj, _fk, _fl) \ + {((((_fa >= 0) && (_fa < 64)) ? \ + (((u64) 1) << _fa) : (u64) 0) | \ + (((_fb >= 0) && (_fb < 64)) ? \ + (((u64) 1) << _fb) : (u64) 0) | \ + (((_fc >= 0) && (_fc < 64)) ? \ + (((u64) 1) << _fc) : (u64) 0) | \ + (((_fd >= 0) && (_fd < 64)) ? \ + (((u64) 1) << _fd) : (u64) 0) | \ + (((_fe >= 0) && (_fe < 64)) ? \ + (((u64) 1) << _fe) : (u64) 0) | \ + (((_ff >= 0) && (_ff < 64)) ? \ + (((u64) 1) << _ff) : (u64) 0) | \ + (((_fg >= 0) && (_fg < 64)) ? \ + (((u64) 1) << _fg) : (u64) 0) | \ + (((_fh >= 0) && (_fh < 64)) ? \ + (((u64) 1) << _fh) : (u64) 0) | \ + (((_fi >= 0) && (_fi < 64)) ? \ + (((u64) 1) << _fi) : (u64) 0) | \ + (((_fj >= 0) && (_fj < 64)) ? \ + (((u64) 1) << _fj) : (u64) 0) | \ + (((_fk >= 0) && (_fk < 64)) ? \ + (((u64) 1) << _fk) : (u64) 0) | \ + (((_fl >= 0) && (_fl < 64)) ? \ + (((u64) 1) << _fl) : (u64) 0) | \ + ((((_fa > 63) && (_fa < 128)) ? \ + (((u64) 1) << (_fa - 64)) : (u64) 0) | \ + (((_fb > 63) && (_fb < 128)) ? \ + (((u64) 1) << (_fb - 64)) : (u64) 0) | \ + (((_fc > 63) && (_fc < 128)) ? \ + (((u64) 1) << (_fc - 64)) : (u64) 0) | \ + (((_fd > 63) && (_fd < 128)) ? \ + (((u64) 1) << (_fd - 64)) : (u64) 0) | \ + (((_fe > 63) && (_fe < 128)) ? \ + (((u64) 1) << (_fe - 64)) : (u64) 0) | \ + (((_ff > 63) && (_ff < 128)) ? \ + (((u64) 1) << (_ff - 64)) : (u64) 0) | \ + (((_fg > 63) && (_fg < 128)) ? \ + (((u64) 1) << (_fg - 64)) : (u64) 0) | \ + (((_fh > 63) && (_fh < 128)) ? \ + (((u64) 1) << (_fh - 64)) : (u64) 0) | \ + (((_fi > 63) && (_fi < 128)) ? \ + (((u64) 1) << (_fi - 64)) : (u64) 0) | \ + (((_fj > 63) && (_fj < 128)) ? \ + (((u64) 1) << (_fj - 64)) : (u64) 0) | \ + (((_fk > 63) && (_fk < 128)) ? \ + (((u64) 1) << (_fk - 64)) : (u64) 0) | \ + (((_fl > 63) && (_fl < 128)) ? \ + (((u64) 1) << (_fl - 64)) : (u64) 0)))} + +#define DEF_REGDMN FCC1_FCCA +#define DEF_DMN_5 FCC1 +#define DEF_DMN_2 FCCA +#define COUNTRY_ERD_FLAG 0x8000 +#define WORLDWIDE_ROAMING_FLAG 0x4000 +#define SUPER_DOMAIN_MASK 0x0fff +#define COUNTRY_CODE_MASK 0x3fff +#define CF_INTERFERENCE (CHANNEL_CW_INT | CHANNEL_RADAR_INT) +#define CHANNEL_14 (2484) +#define IS_11G_CH14(_ch,_cf) \ + (((_ch) == CHANNEL_14) && ((_cf) == CHANNEL_G)) + +#define NO_PSCAN 0x0ULL +#define PSCAN_FCC 0x0000000000000001ULL +#define PSCAN_FCC_T 0x0000000000000002ULL +#define PSCAN_ETSI 0x0000000000000004ULL +#define PSCAN_MKK1 0x0000000000000008ULL +#define PSCAN_MKK2 0x0000000000000010ULL +#define PSCAN_MKKA 0x0000000000000020ULL +#define PSCAN_MKKA_G 0x0000000000000040ULL +#define PSCAN_ETSIA 0x0000000000000080ULL +#define PSCAN_ETSIB 0x0000000000000100ULL +#define PSCAN_ETSIC 0x0000000000000200ULL +#define PSCAN_WWR 0x0000000000000400ULL +#define PSCAN_MKKA1 0x0000000000000800ULL +#define PSCAN_MKKA1_G 0x0000000000001000ULL +#define PSCAN_MKKA2 0x0000000000002000ULL +#define PSCAN_MKKA2_G 0x0000000000004000ULL +#define PSCAN_MKK3 0x0000000000008000ULL +#define PSCAN_DEFER 0x7FFFFFFFFFFFFFFFULL +#define IS_ECM_CHAN 0x8000000000000000ULL + +#define isWwrSKU(_ah) \ + (((ath9k_regd_get_eepromRD((_ah)) & WORLD_SKU_MASK) == \ + WORLD_SKU_PREFIX) || \ + (ath9k_regd_get_eepromRD(_ah) == WORLD)) + +#define isWwrSKU_NoMidband(_ah) \ + ((ath9k_regd_get_eepromRD((_ah)) == WOR3_WORLD) || \ + (ath9k_regd_get_eepromRD(_ah) == WOR4_WORLD) || \ + (ath9k_regd_get_eepromRD(_ah) == WOR5_ETSIC)) + +#define isUNII1OddChan(ch) \ + ((ch == 5170) || (ch == 5190) || (ch == 5210) || (ch == 5230)) + +#define IS_HT40_MODE(_mode) \ + (((_mode == ATH9K_MODE_11NA_HT40PLUS || \ + _mode == ATH9K_MODE_11NG_HT40PLUS || \ + _mode == ATH9K_MODE_11NA_HT40MINUS || \ + _mode == ATH9K_MODE_11NG_HT40MINUS) ? true : false)) + +#define CHAN_FLAGS (CHANNEL_ALL|CHANNEL_HALF|CHANNEL_QUARTER) + +#define swap(_a, _b, _size) { \ + u8 *s = _b; \ + int i = _size; \ + do { \ + u8 tmp = *_a; \ + *_a++ = *s; \ + *s++ = tmp; \ + } while (--i); \ + _a -= _size; \ +} + + +#define HALF_MAXCHANBW 10 + +#define MULTI_DOMAIN_MASK 0xFF00 + +#define WORLD_SKU_MASK 0x00F0 +#define WORLD_SKU_PREFIX 0x0060 + +#define CHANNEL_HALF_BW 10 +#define CHANNEL_QUARTER_BW 5 + +typedef int ath_hal_cmp_t(const void *, const void *); + +struct reg_dmn_pair_mapping { + u16 regDmnEnum; + u16 regDmn5GHz; + u16 regDmn2GHz; + u32 flags5GHz; + u32 flags2GHz; + u64 pscanMask; + u16 singleCC; +}; + +struct ccmap { + char isoName[3]; + u16 countryCode; +}; + +struct country_code_to_enum_rd { + u16 countryCode; + u16 regDmnEnum; + const char *isoName; + const char *name; + bool allow11g; + bool allow11aTurbo; + bool allow11gTurbo; + bool allow11ng20; + bool allow11ng40; + bool allow11na20; + bool allow11na40; + u16 outdoorChanStart; +}; + +struct RegDmnFreqBand { + u16 lowChannel; + u16 highChannel; + u8 powerDfs; + u8 antennaMax; + u8 channelBW; + u8 channelSep; + u64 useDfs; + u64 usePassScan; + u8 regClassId; +}; + +struct regDomain { + u16 regDmnEnum; + u8 conformanceTestLimit; + u64 dfsMask; + u64 pscan; + u32 flags; + u64 chan11a[BMLEN]; + u64 chan11a_turbo[BMLEN]; + u64 chan11a_dyn_turbo[BMLEN]; + u64 chan11b[BMLEN]; + u64 chan11g[BMLEN]; + u64 chan11g_turbo[BMLEN]; +}; + +struct cmode { + u32 mode; + u32 flags; +}; + +#define YES true +#define NO false + +struct japan_bandcheck { + u16 freqbandbit; + u32 eepromflagtocheck; +}; + +struct common_mode_power { + u16 lchan; + u16 hchan; + u8 pwrlvl; +}; + +enum CountryCode { + CTRY_ALBANIA = 8, + CTRY_ALGERIA = 12, + CTRY_ARGENTINA = 32, + CTRY_ARMENIA = 51, + CTRY_AUSTRALIA = 36, + CTRY_AUSTRIA = 40, + CTRY_AZERBAIJAN = 31, + CTRY_BAHRAIN = 48, + CTRY_BELARUS = 112, + CTRY_BELGIUM = 56, + CTRY_BELIZE = 84, + CTRY_BOLIVIA = 68, + CTRY_BOSNIA_HERZ = 70, + CTRY_BRAZIL = 76, + CTRY_BRUNEI_DARUSSALAM = 96, + CTRY_BULGARIA = 100, + CTRY_CANADA = 124, + CTRY_CHILE = 152, + CTRY_CHINA = 156, + CTRY_COLOMBIA = 170, + CTRY_COSTA_RICA = 188, + CTRY_CROATIA = 191, + CTRY_CYPRUS = 196, + CTRY_CZECH = 203, + CTRY_DENMARK = 208, + CTRY_DOMINICAN_REPUBLIC = 214, + CTRY_ECUADOR = 218, + CTRY_EGYPT = 818, + CTRY_EL_SALVADOR = 222, + CTRY_ESTONIA = 233, + CTRY_FAEROE_ISLANDS = 234, + CTRY_FINLAND = 246, + CTRY_FRANCE = 250, + CTRY_GEORGIA = 268, + CTRY_GERMANY = 276, + CTRY_GREECE = 300, + CTRY_GUATEMALA = 320, + CTRY_HONDURAS = 340, + CTRY_HONG_KONG = 344, + CTRY_HUNGARY = 348, + CTRY_ICELAND = 352, + CTRY_INDIA = 356, + CTRY_INDONESIA = 360, + CTRY_IRAN = 364, + CTRY_IRAQ = 368, + CTRY_IRELAND = 372, + CTRY_ISRAEL = 376, + CTRY_ITALY = 380, + CTRY_JAMAICA = 388, + CTRY_JAPAN = 392, + CTRY_JORDAN = 400, + CTRY_KAZAKHSTAN = 398, + CTRY_KENYA = 404, + CTRY_KOREA_NORTH = 408, + CTRY_KOREA_ROC = 410, + CTRY_KOREA_ROC2 = 411, + CTRY_KOREA_ROC3 = 412, + CTRY_KUWAIT = 414, + CTRY_LATVIA = 428, + CTRY_LEBANON = 422, + CTRY_LIBYA = 434, + CTRY_LIECHTENSTEIN = 438, + CTRY_LITHUANIA = 440, + CTRY_LUXEMBOURG = 442, + CTRY_MACAU = 446, + CTRY_MACEDONIA = 807, + CTRY_MALAYSIA = 458, + CTRY_MALTA = 470, + CTRY_MEXICO = 484, + CTRY_MONACO = 492, + CTRY_MOROCCO = 504, + CTRY_NEPAL = 524, + CTRY_NETHERLANDS = 528, + CTRY_NETHERLANDS_ANTILLES = 530, + CTRY_NEW_ZEALAND = 554, + CTRY_NICARAGUA = 558, + CTRY_NORWAY = 578, + CTRY_OMAN = 512, + CTRY_PAKISTAN = 586, + CTRY_PANAMA = 591, + CTRY_PAPUA_NEW_GUINEA = 598, + CTRY_PARAGUAY = 600, + CTRY_PERU = 604, + CTRY_PHILIPPINES = 608, + CTRY_POLAND = 616, + CTRY_PORTUGAL = 620, + CTRY_PUERTO_RICO = 630, + CTRY_QATAR = 634, + CTRY_ROMANIA = 642, + CTRY_RUSSIA = 643, + CTRY_SAUDI_ARABIA = 682, + CTRY_SERBIA_MONTENEGRO = 891, + CTRY_SINGAPORE = 702, + CTRY_SLOVAKIA = 703, + CTRY_SLOVENIA = 705, + CTRY_SOUTH_AFRICA = 710, + CTRY_SPAIN = 724, + CTRY_SRI_LANKA = 144, + CTRY_SWEDEN = 752, + CTRY_SWITZERLAND = 756, + CTRY_SYRIA = 760, + CTRY_TAIWAN = 158, + CTRY_THAILAND = 764, + CTRY_TRINIDAD_Y_TOBAGO = 780, + CTRY_TUNISIA = 788, + CTRY_TURKEY = 792, + CTRY_UAE = 784, + CTRY_UKRAINE = 804, + CTRY_UNITED_KINGDOM = 826, + CTRY_UNITED_STATES = 840, + CTRY_UNITED_STATES_FCC49 = 842, + CTRY_URUGUAY = 858, + CTRY_UZBEKISTAN = 860, + CTRY_VENEZUELA = 862, + CTRY_VIET_NAM = 704, + CTRY_YEMEN = 887, + CTRY_ZIMBABWE = 716, + CTRY_JAPAN1 = 393, + CTRY_JAPAN2 = 394, + CTRY_JAPAN3 = 395, + CTRY_JAPAN4 = 396, + CTRY_JAPAN5 = 397, + CTRY_JAPAN6 = 4006, + CTRY_JAPAN7 = 4007, + CTRY_JAPAN8 = 4008, + CTRY_JAPAN9 = 4009, + CTRY_JAPAN10 = 4010, + CTRY_JAPAN11 = 4011, + CTRY_JAPAN12 = 4012, + CTRY_JAPAN13 = 4013, + CTRY_JAPAN14 = 4014, + CTRY_JAPAN15 = 4015, + CTRY_JAPAN16 = 4016, + CTRY_JAPAN17 = 4017, + CTRY_JAPAN18 = 4018, + CTRY_JAPAN19 = 4019, + CTRY_JAPAN20 = 4020, + CTRY_JAPAN21 = 4021, + CTRY_JAPAN22 = 4022, + CTRY_JAPAN23 = 4023, + CTRY_JAPAN24 = 4024, + CTRY_JAPAN25 = 4025, + CTRY_JAPAN26 = 4026, + CTRY_JAPAN27 = 4027, + CTRY_JAPAN28 = 4028, + CTRY_JAPAN29 = 4029, + CTRY_JAPAN30 = 4030, + CTRY_JAPAN31 = 4031, + CTRY_JAPAN32 = 4032, + CTRY_JAPAN33 = 4033, + CTRY_JAPAN34 = 4034, + CTRY_JAPAN35 = 4035, + CTRY_JAPAN36 = 4036, + CTRY_JAPAN37 = 4037, + CTRY_JAPAN38 = 4038, + CTRY_JAPAN39 = 4039, + CTRY_JAPAN40 = 4040, + CTRY_JAPAN41 = 4041, + CTRY_JAPAN42 = 4042, + CTRY_JAPAN43 = 4043, + CTRY_JAPAN44 = 4044, + CTRY_JAPAN45 = 4045, + CTRY_JAPAN46 = 4046, + CTRY_JAPAN47 = 4047, + CTRY_JAPAN48 = 4048, + CTRY_JAPAN49 = 4049, + CTRY_JAPAN50 = 4050, + CTRY_JAPAN51 = 4051, + CTRY_JAPAN52 = 4052, + CTRY_JAPAN53 = 4053, + CTRY_JAPAN54 = 4054, + CTRY_JAPAN55 = 4055, + CTRY_JAPAN56 = 4056, + CTRY_JAPAN57 = 4057, + CTRY_JAPAN58 = 4058, + CTRY_JAPAN59 = 4059, + CTRY_AUSTRALIA2 = 5000, + CTRY_CANADA2 = 5001, + CTRY_BELGIUM2 = 5002 +}; + +void ath9k_regd_get_current_country(struct ath_hal *ah, + struct ath9k_country_entry *ctry); + +#endif diff --git a/drivers/net/wireless/ath9k/regd_common.h b/drivers/net/wireless/ath9k/regd_common.h new file mode 100644 index 0000000..9112c03 --- /dev/null +++ b/drivers/net/wireless/ath9k/regd_common.h @@ -0,0 +1,1915 @@ +/* + * Copyright (c) 2008 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef REGD_COMMON_H +#define REGD_COMMON_H + +enum EnumRd { + NO_ENUMRD = 0x00, + NULL1_WORLD = 0x03, + NULL1_ETSIB = 0x07, + NULL1_ETSIC = 0x08, + FCC1_FCCA = 0x10, + FCC1_WORLD = 0x11, + FCC4_FCCA = 0x12, + FCC5_FCCA = 0x13, + FCC6_FCCA = 0x14, + + FCC2_FCCA = 0x20, + FCC2_WORLD = 0x21, + FCC2_ETSIC = 0x22, + FCC6_WORLD = 0x23, + FRANCE_RES = 0x31, + FCC3_FCCA = 0x3A, + FCC3_WORLD = 0x3B, + + ETSI1_WORLD = 0x37, + ETSI3_ETSIA = 0x32, + ETSI2_WORLD = 0x35, + ETSI3_WORLD = 0x36, + ETSI4_WORLD = 0x30, + ETSI4_ETSIC = 0x38, + ETSI5_WORLD = 0x39, + ETSI6_WORLD = 0x34, + ETSI_RESERVED = 0x33, + + MKK1_MKKA = 0x40, + MKK1_MKKB = 0x41, + APL4_WORLD = 0x42, + MKK2_MKKA = 0x43, + APL_RESERVED = 0x44, + APL2_WORLD = 0x45, + APL2_APLC = 0x46, + APL3_WORLD = 0x47, + MKK1_FCCA = 0x48, + APL2_APLD = 0x49, + MKK1_MKKA1 = 0x4A, + MKK1_MKKA2 = 0x4B, + MKK1_MKKC = 0x4C, + + APL3_FCCA = 0x50, + APL1_WORLD = 0x52, + APL1_FCCA = 0x53, + APL1_APLA = 0x54, + APL1_ETSIC = 0x55, + APL2_ETSIC = 0x56, + APL5_WORLD = 0x58, + APL6_WORLD = 0x5B, + APL7_FCCA = 0x5C, + APL8_WORLD = 0x5D, + APL9_WORLD = 0x5E, + + WOR0_WORLD = 0x60, + WOR1_WORLD = 0x61, + WOR2_WORLD = 0x62, + WOR3_WORLD = 0x63, + WOR4_WORLD = 0x64, + WOR5_ETSIC = 0x65, + + WOR01_WORLD = 0x66, + WOR02_WORLD = 0x67, + EU1_WORLD = 0x68, + + WOR9_WORLD = 0x69, + WORA_WORLD = 0x6A, + WORB_WORLD = 0x6B, + + MKK3_MKKB = 0x80, + MKK3_MKKA2 = 0x81, + MKK3_MKKC = 0x82, + + MKK4_MKKB = 0x83, + MKK4_MKKA2 = 0x84, + MKK4_MKKC = 0x85, + + MKK5_MKKB = 0x86, + MKK5_MKKA2 = 0x87, + MKK5_MKKC = 0x88, + + MKK6_MKKB = 0x89, + MKK6_MKKA2 = 0x8A, + MKK6_MKKC = 0x8B, + + MKK7_MKKB = 0x8C, + MKK7_MKKA2 = 0x8D, + MKK7_MKKC = 0x8E, + + MKK8_MKKB = 0x8F, + MKK8_MKKA2 = 0x90, + MKK8_MKKC = 0x91, + + MKK14_MKKA1 = 0x92, + MKK15_MKKA1 = 0x93, + + MKK10_FCCA = 0xD0, + MKK10_MKKA1 = 0xD1, + MKK10_MKKC = 0xD2, + MKK10_MKKA2 = 0xD3, + + MKK11_MKKA = 0xD4, + MKK11_FCCA = 0xD5, + MKK11_MKKA1 = 0xD6, + MKK11_MKKC = 0xD7, + MKK11_MKKA2 = 0xD8, + + MKK12_MKKA = 0xD9, + MKK12_FCCA = 0xDA, + MKK12_MKKA1 = 0xDB, + MKK12_MKKC = 0xDC, + MKK12_MKKA2 = 0xDD, + + MKK13_MKKB = 0xDE, + + MKK3_MKKA = 0xF0, + MKK3_MKKA1 = 0xF1, + MKK3_FCCA = 0xF2, + MKK4_MKKA = 0xF3, + MKK4_MKKA1 = 0xF4, + MKK4_FCCA = 0xF5, + MKK9_MKKA = 0xF6, + MKK10_MKKA = 0xF7, + MKK6_MKKA1 = 0xF8, + MKK6_FCCA = 0xF9, + MKK7_MKKA1 = 0xFA, + MKK7_FCCA = 0xFB, + MKK9_FCCA = 0xFC, + MKK9_MKKA1 = 0xFD, + MKK9_MKKC = 0xFE, + MKK9_MKKA2 = 0xFF, + + APL1 = 0x0150, + APL2 = 0x0250, + APL3 = 0x0350, + APL4 = 0x0450, + APL5 = 0x0550, + APL6 = 0x0650, + APL7 = 0x0750, + APL8 = 0x0850, + APL9 = 0x0950, + APL10 = 0x1050, + + ETSI1 = 0x0130, + ETSI2 = 0x0230, + ETSI3 = 0x0330, + ETSI4 = 0x0430, + ETSI5 = 0x0530, + ETSI6 = 0x0630, + ETSIA = 0x0A30, + ETSIB = 0x0B30, + ETSIC = 0x0C30, + + FCC1 = 0x0110, + FCC2 = 0x0120, + FCC3 = 0x0160, + FCC4 = 0x0165, + FCC5 = 0x0510, + FCC6 = 0x0610, + FCCA = 0x0A10, + + APLD = 0x0D50, + + MKK1 = 0x0140, + MKK2 = 0x0240, + MKK3 = 0x0340, + MKK4 = 0x0440, + MKK5 = 0x0540, + MKK6 = 0x0640, + MKK7 = 0x0740, + MKK8 = 0x0840, + MKK9 = 0x0940, + MKK10 = 0x0B40, + MKK11 = 0x1140, + MKK12 = 0x1240, + MKK13 = 0x0C40, + MKK14 = 0x1440, + MKK15 = 0x1540, + MKKA = 0x0A40, + MKKC = 0x0A50, + + NULL1 = 0x0198, + WORLD = 0x0199, + DEBUG_REG_DMN = 0x01ff, +}; + +enum { + FCC = 0x10, + MKK = 0x40, + ETSI = 0x30, +}; + +enum { + NO_REQ = 0x00000000, + DISALLOW_ADHOC_11A = 0x00000001, + DISALLOW_ADHOC_11A_TURB = 0x00000002, + NEED_NFC = 0x00000004, + + ADHOC_PER_11D = 0x00000008, + ADHOC_NO_11A = 0x00000010, + + PUBLIC_SAFETY_DOMAIN = 0x00000020, + LIMIT_FRAME_4MS = 0x00000040, + + NO_HOSTAP = 0x00000080, + + REQ_MASK = 0x000000FF, +}; + +#define REG_DOMAIN_2GHZ_MASK (REQ_MASK & \ + (!(ADHOC_NO_11A | DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB))) +#define REG_DOMAIN_5GHZ_MASK REQ_MASK + +static struct reg_dmn_pair_mapping regDomainPairs[] = { + {NO_ENUMRD, DEBUG_REG_DMN, DEBUG_REG_DMN, NO_REQ, NO_REQ, + PSCAN_DEFER, 0}, + {NULL1_WORLD, NULL1, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + {NULL1_ETSIB, NULL1, ETSIB, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + {NULL1_ETSIC, NULL1, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + + {FCC2_FCCA, FCC2, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + {FCC2_WORLD, FCC2, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + {FCC2_ETSIC, FCC2, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + {FCC3_FCCA, FCC3, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + {FCC3_WORLD, FCC3, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + {FCC4_FCCA, FCC4, FCCA, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, + 0}, + {FCC5_FCCA, FCC5, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + {FCC6_FCCA, FCC6, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + {FCC6_WORLD, FCC6, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + + {ETSI1_WORLD, ETSI1, WORLD, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, + 0}, + {ETSI2_WORLD, ETSI2, WORLD, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, + 0}, + {ETSI3_WORLD, ETSI3, WORLD, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, + 0}, + {ETSI4_WORLD, ETSI4, WORLD, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, + 0}, + {ETSI5_WORLD, ETSI5, WORLD, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, + 0}, + {ETSI6_WORLD, ETSI6, WORLD, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, + 0}, + + {ETSI3_ETSIA, ETSI3, WORLD, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, + 0}, + {FRANCE_RES, ETSI3, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + + {FCC1_WORLD, FCC1, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + {FCC1_FCCA, FCC1, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + {APL1_WORLD, APL1, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + {APL2_WORLD, APL2, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + {APL3_WORLD, APL3, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + {APL4_WORLD, APL4, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + {APL5_WORLD, APL5, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + {APL6_WORLD, APL6, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + {APL8_WORLD, APL8, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + {APL9_WORLD, APL9, WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + + {APL3_FCCA, APL3, FCCA, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + {APL1_ETSIC, APL1, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + {APL2_ETSIC, APL2, ETSIC, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + {APL2_APLD, APL2, APLD, NO_REQ, NO_REQ, PSCAN_DEFER,}, + + {MKK1_MKKA, MKK1, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1 | PSCAN_MKKA, CTRY_JAPAN}, + {MKK1_MKKB, MKK1, MKKA, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC | + LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK1 | PSCAN_MKKA | PSCAN_MKKA_G, + CTRY_JAPAN1}, + {MKK1_FCCA, MKK1, FCCA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1, CTRY_JAPAN2}, + {MKK1_MKKA1, MKK1, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN4}, + {MKK1_MKKA2, MKK1, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN5}, + {MKK1_MKKC, MKK1, MKKC, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1, CTRY_JAPAN6}, + + {MKK2_MKKA, MKK2, MKKA, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC | + LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK2 | PSCAN_MKKA | PSCAN_MKKA_G, + CTRY_JAPAN3}, + + {MKK3_MKKA, MKK3, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKKA, CTRY_JAPAN25}, + {MKK3_MKKB, MKK3, MKKA, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC | + LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKKA | PSCAN_MKKA_G, + CTRY_JAPAN7}, + {MKK3_MKKA1, MKK3, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN26}, + {MKK3_MKKA2, MKK3, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN8}, + {MKK3_MKKC, MKK3, MKKC, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + NO_PSCAN, CTRY_JAPAN9}, + {MKK3_FCCA, MKK3, FCCA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + NO_PSCAN, CTRY_JAPAN27}, + + {MKK4_MKKA, MKK4, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK3, CTRY_JAPAN36}, + {MKK4_MKKB, MKK4, MKKA, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC | + LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G, + CTRY_JAPAN10}, + {MKK4_MKKA1, MKK4, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN28}, + {MKK4_MKKA2, MKK4, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN11}, + {MKK4_MKKC, MKK4, MKKC, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK3, CTRY_JAPAN12}, + {MKK4_FCCA, MKK4, FCCA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK3, CTRY_JAPAN29}, + + {MKK5_MKKB, MKK5, MKKA, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC | + LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G, + CTRY_JAPAN13}, + {MKK5_MKKA2, MKK5, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN14}, + {MKK5_MKKC, MKK5, MKKC, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK3, CTRY_JAPAN15}, + + {MKK6_MKKB, MKK6, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1 | PSCAN_MKKA | PSCAN_MKKA_G, CTRY_JAPAN16}, + {MKK6_MKKA1, MKK6, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN30}, + {MKK6_MKKA2, MKK6, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN17}, + {MKK6_MKKC, MKK6, MKKC, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1, CTRY_JAPAN18}, + {MKK6_FCCA, MKK6, FCCA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + NO_PSCAN, CTRY_JAPAN31}, + + {MKK7_MKKB, MKK7, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G, + CTRY_JAPAN19}, + {MKK7_MKKA1, MKK7, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN32}, + {MKK7_MKKA2, MKK7, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, + CTRY_JAPAN20}, + {MKK7_MKKC, MKK7, MKKC, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN21}, + {MKK7_FCCA, MKK7, FCCA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN33}, + + {MKK8_MKKB, MKK8, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G, + CTRY_JAPAN22}, + {MKK8_MKKA2, MKK8, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, + CTRY_JAPAN23}, + {MKK8_MKKC, MKK8, MKKC, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN24}, + + {MKK9_MKKA, MKK9, MKKA, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC | + LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK2 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G, + CTRY_JAPAN34}, + {MKK9_FCCA, MKK9, FCCA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + NO_PSCAN, CTRY_JAPAN37}, + {MKK9_MKKA1, MKK9, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN38}, + {MKK9_MKKA2, MKK9, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN40}, + {MKK9_MKKC, MKK9, MKKC, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + NO_PSCAN, CTRY_JAPAN39}, + + {MKK10_MKKA, MKK10, MKKA, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC | + LIMIT_FRAME_4MS, NEED_NFC, PSCAN_MKK2 | PSCAN_MKK3, CTRY_JAPAN35}, + {MKK10_FCCA, MKK10, FCCA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + NO_PSCAN, CTRY_JAPAN41}, + {MKK10_MKKA1, MKK10, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN42}, + {MKK10_MKKA2, MKK10, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN44}, + {MKK10_MKKC, MKK10, MKKC, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + NO_PSCAN, CTRY_JAPAN43}, + + {MKK11_MKKA, MKK11, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK3, CTRY_JAPAN45}, + {MKK11_FCCA, MKK11, FCCA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK3, CTRY_JAPAN46}, + {MKK11_MKKA1, MKK11, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN47}, + {MKK11_MKKA2, MKK11, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, CTRY_JAPAN49}, + {MKK11_MKKC, MKK11, MKKC, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK3, CTRY_JAPAN48}, + + {MKK12_MKKA, MKK12, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN50}, + {MKK12_FCCA, MKK12, FCCA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN51}, + {MKK12_MKKA1, MKK12, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA1 | PSCAN_MKKA1_G, + CTRY_JAPAN52}, + {MKK12_MKKA2, MKK12, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA2 | PSCAN_MKKA2_G, + CTRY_JAPAN54}, + {MKK12_MKKC, MKK12, MKKC, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1 | PSCAN_MKK3, CTRY_JAPAN53}, + + {MKK13_MKKB, MKK13, MKKA, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB | NEED_NFC | + LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1 | PSCAN_MKK3 | PSCAN_MKKA | PSCAN_MKKA_G, + CTRY_JAPAN57}, + + {MKK14_MKKA1, MKK14, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN58}, + {MKK15_MKKA1, MKK15, MKKA, + DISALLOW_ADHOC_11A_TURB | NEED_NFC | LIMIT_FRAME_4MS, NEED_NFC, + PSCAN_MKK1 | PSCAN_MKKA1 | PSCAN_MKKA1_G, CTRY_JAPAN59}, + + {WOR0_WORLD, WOR0_WORLD, WOR0_WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, + 0}, + {WOR1_WORLD, WOR1_WORLD, WOR1_WORLD, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, + 0}, + {WOR2_WORLD, WOR2_WORLD, WOR2_WORLD, DISALLOW_ADHOC_11A_TURB, + NO_REQ, PSCAN_DEFER, 0}, + {WOR3_WORLD, WOR3_WORLD, WOR3_WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, + 0}, + {WOR4_WORLD, WOR4_WORLD, WOR4_WORLD, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, + 0}, + {WOR5_ETSIC, WOR5_ETSIC, WOR5_ETSIC, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, + 0}, + {WOR01_WORLD, WOR01_WORLD, WOR01_WORLD, NO_REQ, NO_REQ, + PSCAN_DEFER, 0}, + {WOR02_WORLD, WOR02_WORLD, WOR02_WORLD, NO_REQ, NO_REQ, + PSCAN_DEFER, 0}, + {EU1_WORLD, EU1_WORLD, EU1_WORLD, NO_REQ, NO_REQ, PSCAN_DEFER, 0}, + {WOR9_WORLD, WOR9_WORLD, WOR9_WORLD, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, + 0}, + {WORA_WORLD, WORA_WORLD, WORA_WORLD, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, + 0}, + {WORB_WORLD, WORB_WORLD, WORB_WORLD, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, NO_REQ, PSCAN_DEFER, + 0}, +}; + +#define NO_INTERSECT_REQ 0xFFFFFFFF +#define NO_UNION_REQ 0 + +static struct country_code_to_enum_rd allCountries[] = { + {CTRY_DEBUG, NO_ENUMRD, "DB", "DEBUG", YES, YES, YES, YES, YES, + YES, YES, 7000}, + {CTRY_DEFAULT, DEF_REGDMN, "NA", "NO_COUNTRY_SET", YES, YES, YES, + YES, YES, YES, YES, 7000}, + {CTRY_ALBANIA, NULL1_WORLD, "AL", "ALBANIA", YES, NO, YES, YES, NO, + NO, NO, 7000}, + {CTRY_ALGERIA, NULL1_WORLD, "DZ", "ALGERIA", YES, NO, YES, YES, NO, + NO, NO, 7000}, + {CTRY_ARGENTINA, APL3_WORLD, "AR", "ARGENTINA", YES, NO, NO, YES, + NO, YES, NO, 7000}, + {CTRY_ARMENIA, ETSI4_WORLD, "AM", "ARMENIA", YES, NO, YES, YES, + YES, NO, NO, 7000}, + {CTRY_AUSTRALIA, FCC2_WORLD, "AU", "AUSTRALIA", YES, YES, YES, YES, + YES, YES, YES, 7000}, + {CTRY_AUSTRALIA2, FCC6_WORLD, "AU", "AUSTRALIA2", YES, YES, YES, + YES, YES, YES, YES, 7000}, + {CTRY_AUSTRIA, ETSI1_WORLD, "AT", "AUSTRIA", YES, NO, YES, YES, + YES, YES, YES, 7000}, + {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ", "AZERBAIJAN", YES, YES, YES, + YES, YES, YES, YES, 7000}, + {CTRY_BAHRAIN, APL6_WORLD, "BH", "BAHRAIN", YES, NO, YES, YES, YES, + YES, NO, 7000}, + {CTRY_BELARUS, ETSI1_WORLD, "BY", "BELARUS", YES, NO, YES, YES, + YES, YES, YES, 7000}, + {CTRY_BELGIUM, ETSI1_WORLD, "BE", "BELGIUM", YES, NO, YES, YES, + YES, YES, YES, 7000}, + {CTRY_BELGIUM2, ETSI4_WORLD, "BL", "BELGIUM", YES, NO, YES, YES, + YES, YES, YES, 7000}, + {CTRY_BELIZE, APL1_ETSIC, "BZ", "BELIZE", YES, YES, YES, YES, YES, + YES, YES, 7000}, + {CTRY_BOLIVIA, APL1_ETSIC, "BO", "BOLVIA", YES, YES, YES, YES, YES, + YES, YES, 7000}, + {CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA", "BOSNIA_HERZGOWINA", YES, NO, + YES, YES, YES, YES, NO, 7000}, + {CTRY_BRAZIL, FCC3_WORLD, "BR", "BRAZIL", YES, NO, NO, YES, NO, + YES, NO, 7000}, + {CTRY_BRUNEI_DARUSSALAM, APL1_WORLD, "BN", "BRUNEI DARUSSALAM", + YES, YES, YES, YES, YES, YES, YES, 7000}, + {CTRY_BULGARIA, ETSI6_WORLD, "BG", "BULGARIA", YES, NO, YES, YES, + YES, YES, YES, 7000}, + {CTRY_CANADA, FCC2_FCCA, "CA", "CANADA", YES, YES, YES, YES, YES, + YES, YES, 7000}, + {CTRY_CANADA2, FCC6_FCCA, "CA", "CANADA2", YES, YES, YES, YES, YES, + YES, YES, 7000}, + {CTRY_CHILE, APL6_WORLD, "CL", "CHILE", YES, YES, YES, YES, YES, + YES, YES, 7000}, + {CTRY_CHINA, APL1_WORLD, "CN", "CHINA", YES, YES, YES, YES, YES, + YES, YES, 7000}, + {CTRY_COLOMBIA, FCC1_FCCA, "CO", "COLOMBIA", YES, NO, YES, YES, + YES, YES, NO, 7000}, + {CTRY_COSTA_RICA, FCC1_WORLD, "CR", "COSTA RICA", YES, NO, YES, + YES, YES, YES, NO, 7000}, + {CTRY_CROATIA, ETSI3_WORLD, "HR", "CROATIA", YES, NO, YES, YES, + YES, YES, NO, 7000}, + {CTRY_CYPRUS, ETSI1_WORLD, "CY", "CYPRUS", YES, YES, YES, YES, YES, + YES, YES, 7000}, + {CTRY_CZECH, ETSI3_WORLD, "CZ", "CZECH REPUBLIC", YES, NO, YES, + YES, YES, YES, YES, 7000}, + {CTRY_DENMARK, ETSI1_WORLD, "DK", "DENMARK", YES, NO, YES, YES, + YES, YES, YES, 7000}, + {CTRY_DOMINICAN_REPUBLIC, FCC1_FCCA, "DO", "DOMINICAN REPUBLIC", + YES, YES, YES, YES, YES, YES, YES, 7000}, + {CTRY_ECUADOR, FCC1_WORLD, "EC", "ECUADOR", YES, NO, NO, YES, YES, + YES, NO, 7000}, + {CTRY_EGYPT, ETSI3_WORLD, "EG", "EGYPT", YES, NO, YES, YES, YES, + YES, NO, 7000}, + {CTRY_EL_SALVADOR, FCC1_WORLD, "SV", "EL SALVADOR", YES, NO, YES, + YES, YES, YES, NO, 7000}, + {CTRY_ESTONIA, ETSI1_WORLD, "EE", "ESTONIA", YES, NO, YES, YES, + YES, YES, YES, 7000}, + {CTRY_FINLAND, ETSI1_WORLD, "FI", "FINLAND", YES, NO, YES, YES, + YES, YES, YES, 7000}, + {CTRY_FRANCE, ETSI1_WORLD, "FR", "FRANCE", YES, NO, YES, YES, YES, + YES, YES, 7000}, + {CTRY_GEORGIA, ETSI4_WORLD, "GE", "GEORGIA", YES, YES, YES, YES, + YES, YES, YES, 7000}, + {CTRY_GERMANY, ETSI1_WORLD, "DE", "GERMANY", YES, NO, YES, YES, + YES, YES, YES, 7000}, + {CTRY_GREECE, ETSI1_WORLD, "GR", "GREECE", YES, NO, YES, YES, YES, + YES, YES, 7000}, + {CTRY_GUATEMALA, FCC1_FCCA, "GT", "GUATEMALA", YES, YES, YES, YES, + YES, YES, YES, 7000}, + {CTRY_HONDURAS, NULL1_WORLD, "HN", "HONDURAS", YES, NO, YES, YES, + YES, NO, NO, 7000}, + {CTRY_HONG_KONG, FCC2_WORLD, "HK", "HONG KONG", YES, YES, YES, YES, + YES, YES, YES, 7000}, + {CTRY_HUNGARY, ETSI1_WORLD, "HU", "HUNGARY", YES, NO, YES, YES, + YES, YES, YES, 7000}, + {CTRY_ICELAND, ETSI1_WORLD, "IS", "ICELAND", YES, NO, YES, YES, + YES, YES, YES, 7000}, + {CTRY_INDIA, APL6_WORLD, "IN", "INDIA", YES, NO, YES, YES, YES, + YES, NO, 7000}, + {CTRY_INDONESIA, APL1_WORLD, "ID", "INDONESIA", YES, NO, YES, YES, + YES, YES, NO, 7000}, + {CTRY_IRAN, APL1_WORLD, "IR", "IRAN", YES, YES, YES, YES, YES, YES, + YES, 7000}, + {CTRY_IRELAND, ETSI1_WORLD, "IE", "IRELAND", YES, NO, YES, YES, + YES, YES, YES, 7000}, + {CTRY_ISRAEL, NULL1_WORLD, "IL", "ISRAEL", YES, NO, YES, YES, YES, + NO, NO, 7000}, + {CTRY_ITALY, ETSI1_WORLD, "IT", "ITALY", YES, NO, YES, YES, YES, + YES, YES, 7000}, + {CTRY_JAMAICA, ETSI1_WORLD, "JM", "JAMAICA", YES, NO, YES, YES, + YES, YES, YES, 7000}, + + {CTRY_JAPAN, MKK1_MKKA, "JP", "JAPAN", YES, NO, NO, YES, YES, YES, + YES, 7000}, + {CTRY_JAPAN1, MKK1_MKKB, "JP", "JAPAN1", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN2, MKK1_FCCA, "JP", "JAPAN2", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN3, MKK2_MKKA, "JP", "JAPAN3", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN4, MKK1_MKKA1, "JP", "JAPAN4", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN5, MKK1_MKKA2, "JP", "JAPAN5", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN6, MKK1_MKKC, "JP", "JAPAN6", YES, NO, NO, YES, YES, + YES, YES, 7000}, + + {CTRY_JAPAN7, MKK3_MKKB, "JP", "JAPAN7", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN8, MKK3_MKKA2, "JP", "JAPAN8", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN9, MKK3_MKKC, "JP", "JAPAN9", YES, NO, NO, YES, YES, + YES, YES, 7000}, + + {CTRY_JAPAN10, MKK4_MKKB, "JP", "JAPAN10", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN11, MKK4_MKKA2, "JP", "JAPAN11", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN12, MKK4_MKKC, "JP", "JAPAN12", YES, NO, NO, YES, YES, + YES, YES, 7000}, + + {CTRY_JAPAN13, MKK5_MKKB, "JP", "JAPAN13", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN14, MKK5_MKKA2, "JP", "JAPAN14", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN15, MKK5_MKKC, "JP", "JAPAN15", YES, NO, NO, YES, YES, + YES, YES, 7000}, + + {CTRY_JAPAN16, MKK6_MKKB, "JP", "JAPAN16", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN17, MKK6_MKKA2, "JP", "JAPAN17", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN18, MKK6_MKKC, "JP", "JAPAN18", YES, NO, NO, YES, YES, + YES, YES, 7000}, + + {CTRY_JAPAN19, MKK7_MKKB, "JP", "JAPAN19", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN20, MKK7_MKKA2, "JP", "JAPAN20", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN21, MKK7_MKKC, "JP", "JAPAN21", YES, NO, NO, YES, YES, + YES, YES, 7000}, + + {CTRY_JAPAN22, MKK8_MKKB, "JP", "JAPAN22", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN23, MKK8_MKKA2, "JP", "JAPAN23", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN24, MKK8_MKKC, "JP", "JAPAN24", YES, NO, NO, YES, YES, + YES, YES, 7000}, + + {CTRY_JAPAN25, MKK3_MKKA, "JP", "JAPAN25", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN26, MKK3_MKKA1, "JP", "JAPAN26", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN27, MKK3_FCCA, "JP", "JAPAN27", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN28, MKK4_MKKA1, "JP", "JAPAN28", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN29, MKK4_FCCA, "JP", "JAPAN29", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN30, MKK6_MKKA1, "JP", "JAPAN30", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN31, MKK6_FCCA, "JP", "JAPAN31", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN32, MKK7_MKKA1, "JP", "JAPAN32", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN33, MKK7_FCCA, "JP", "JAPAN33", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN34, MKK9_MKKA, "JP", "JAPAN34", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN35, MKK10_MKKA, "JP", "JAPAN35", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN36, MKK4_MKKA, "JP", "JAPAN36", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN37, MKK9_FCCA, "JP", "JAPAN37", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN38, MKK9_MKKA1, "JP", "JAPAN38", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN39, MKK9_MKKC, "JP", "JAPAN39", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN40, MKK9_MKKA2, "JP", "JAPAN40", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN41, MKK10_FCCA, "JP", "JAPAN41", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN42, MKK10_MKKA1, "JP", "JAPAN42", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN43, MKK10_MKKC, "JP", "JAPAN43", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN44, MKK10_MKKA2, "JP", "JAPAN44", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN45, MKK11_MKKA, "JP", "JAPAN45", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN46, MKK11_FCCA, "JP", "JAPAN46", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN47, MKK11_MKKA1, "JP", "JAPAN47", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN48, MKK11_MKKC, "JP", "JAPAN48", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN49, MKK11_MKKA2, "JP", "JAPAN49", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN50, MKK12_MKKA, "JP", "JAPAN50", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN51, MKK12_FCCA, "JP", "JAPAN51", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN52, MKK12_MKKA1, "JP", "JAPAN52", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN53, MKK12_MKKC, "JP", "JAPAN53", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN54, MKK12_MKKA2, "JP", "JAPAN54", YES, NO, NO, YES, YES, + YES, YES, 7000}, + + {CTRY_JAPAN57, MKK13_MKKB, "JP", "JAPAN57", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN58, MKK14_MKKA1, "JP", "JAPAN58", YES, NO, NO, YES, YES, + YES, YES, 7000}, + {CTRY_JAPAN59, MKK15_MKKA1, "JP", "JAPAN59", YES, NO, NO, YES, YES, + YES, YES, 7000}, + + {CTRY_JORDAN, ETSI2_WORLD, "JO", "JORDAN", YES, NO, YES, YES, YES, + YES, NO, 7000}, + {CTRY_KAZAKHSTAN, NULL1_WORLD, "KZ", "KAZAKHSTAN", YES, NO, YES, + YES, YES, NO, NO, 7000}, + {CTRY_KOREA_NORTH, APL9_WORLD, "KP", "NORTH KOREA", YES, NO, NO, + YES, YES, YES, YES, 7000}, + {CTRY_KOREA_ROC, APL9_WORLD, "KR", "KOREA REPUBLIC", YES, NO, NO, + YES, NO, YES, NO, 7000}, + {CTRY_KOREA_ROC2, APL2_WORLD, "K2", "KOREA REPUBLIC2", YES, NO, NO, + YES, NO, YES, NO, 7000}, + {CTRY_KOREA_ROC3, APL9_WORLD, "K3", "KOREA REPUBLIC3", YES, NO, NO, + YES, NO, YES, NO, 7000}, + {CTRY_KUWAIT, NULL1_WORLD, "KW", "KUWAIT", YES, NO, YES, YES, YES, + NO, NO, 7000}, + {CTRY_LATVIA, ETSI1_WORLD, "LV", "LATVIA", YES, NO, YES, YES, YES, + YES, YES, 7000}, + {CTRY_LEBANON, NULL1_WORLD, "LB", "LEBANON", YES, NO, YES, YES, + YES, NO, NO, 7000}, + {CTRY_LIECHTENSTEIN, ETSI1_WORLD, "LI", "LIECHTENSTEIN", YES, NO, + YES, YES, YES, YES, YES, 7000}, + {CTRY_LITHUANIA, ETSI1_WORLD, "LT", "LITHUANIA", YES, NO, YES, YES, + YES, YES, YES, 7000}, + {CTRY_LUXEMBOURG, ETSI1_WORLD, "LU", "LUXEMBOURG", YES, NO, YES, + YES, YES, YES, YES, 7000}, + {CTRY_MACAU, FCC2_WORLD, "MO", "MACAU", YES, YES, YES, YES, YES, + YES, YES, 7000}, + {CTRY_MACEDONIA, NULL1_WORLD, "MK", "MACEDONIA", YES, NO, YES, YES, + YES, NO, NO, 7000}, + {CTRY_MALAYSIA, APL8_WORLD, "MY", "MALAYSIA", YES, NO, NO, YES, NO, + YES, NO, 7000}, + {CTRY_MALTA, ETSI1_WORLD, "MT", "MALTA", YES, NO, YES, YES, YES, + YES, YES, 7000}, + {CTRY_MEXICO, FCC1_FCCA, "MX", "MEXICO", YES, YES, YES, YES, YES, + YES, YES, 7000}, + {CTRY_MONACO, ETSI4_WORLD, "MC", "MONACO", YES, YES, YES, YES, YES, + YES, YES, 7000}, + {CTRY_MOROCCO, NULL1_WORLD, "MA", "MOROCCO", YES, NO, YES, YES, + YES, NO, NO, 7000}, + {CTRY_NEPAL, APL1_WORLD, "NP", "NEPAL", YES, NO, YES, YES, YES, + YES, YES, 7000}, + {CTRY_NETHERLANDS, ETSI1_WORLD, "NL", "NETHERLANDS", YES, NO, YES, + YES, YES, YES, YES, 7000}, + {CTRY_NETHERLANDS_ANTILLES, ETSI1_WORLD, "AN", + "NETHERLANDS-ANTILLES", YES, NO, YES, YES, YES, YES, YES, 7000}, + {CTRY_NEW_ZEALAND, FCC2_ETSIC, "NZ", "NEW ZEALAND", YES, NO, YES, + YES, YES, YES, NO, 7000}, + {CTRY_NORWAY, ETSI1_WORLD, "NO", "NORWAY", YES, NO, YES, YES, YES, + YES, YES, 7000}, + {CTRY_OMAN, APL6_WORLD, "OM", "OMAN", YES, NO, YES, YES, YES, YES, + NO, 7000}, + {CTRY_PAKISTAN, NULL1_WORLD, "PK", "PAKISTAN", YES, NO, YES, YES, + YES, NO, NO, 7000}, + {CTRY_PANAMA, FCC1_FCCA, "PA", "PANAMA", YES, YES, YES, YES, YES, + YES, YES, 7000}, + {CTRY_PAPUA_NEW_GUINEA, FCC1_WORLD, "PG", "PAPUA NEW GUINEA", YES, + YES, YES, YES, YES, YES, YES, 7000}, + {CTRY_PERU, APL1_WORLD, "PE", "PERU", YES, NO, YES, YES, YES, YES, + NO, 7000}, + {CTRY_PHILIPPINES, APL1_WORLD, "PH", "PHILIPPINES", YES, YES, YES, + YES, YES, YES, YES, 7000}, + {CTRY_POLAND, ETSI1_WORLD, "PL", "POLAND", YES, NO, YES, YES, YES, + YES, YES, 7000}, + {CTRY_PORTUGAL, ETSI1_WORLD, "PT", "PORTUGAL", YES, NO, YES, YES, + YES, YES, YES, 7000}, + {CTRY_PUERTO_RICO, FCC1_FCCA, "PR", "PUERTO RICO", YES, YES, YES, + YES, YES, YES, YES, 7000}, + {CTRY_QATAR, NULL1_WORLD, "QA", "QATAR", YES, NO, YES, YES, YES, + NO, NO, 7000}, + {CTRY_ROMANIA, NULL1_WORLD, "RO", "ROMANIA", YES, NO, YES, YES, + YES, NO, NO, 7000}, + {CTRY_RUSSIA, NULL1_WORLD, "RU", "RUSSIA", YES, NO, YES, YES, YES, + NO, NO, 7000}, + {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA", "SAUDI ARABIA", YES, NO, + YES, YES, YES, NO, NO, 7000}, + {CTRY_SERBIA_MONTENEGRO, ETSI1_WORLD, "CS", "SERBIA & MONTENEGRO", + YES, NO, YES, YES, YES, YES, YES, 7000}, + {CTRY_SINGAPORE, APL6_WORLD, "SG", "SINGAPORE", YES, YES, YES, YES, + YES, YES, YES, 7000}, + {CTRY_SLOVAKIA, ETSI1_WORLD, "SK", "SLOVAK REPUBLIC", YES, NO, YES, + YES, YES, YES, YES, 7000}, + {CTRY_SLOVENIA, ETSI1_WORLD, "SI", "SLOVENIA", YES, NO, YES, YES, + YES, YES, YES, 7000}, + {CTRY_SOUTH_AFRICA, FCC3_WORLD, "ZA", "SOUTH AFRICA", YES, NO, YES, + YES, YES, YES, NO, 7000}, + {CTRY_SPAIN, ETSI1_WORLD, "ES", "SPAIN", YES, NO, YES, YES, YES, + YES, YES, 7000}, + {CTRY_SRI_LANKA, FCC3_WORLD, "LK", "SRI LANKA", YES, NO, YES, YES, + YES, YES, NO, 7000}, + {CTRY_SWEDEN, ETSI1_WORLD, "SE", "SWEDEN", YES, NO, YES, YES, YES, + YES, YES, 7000}, + {CTRY_SWITZERLAND, ETSI1_WORLD, "CH", "SWITZERLAND", YES, NO, YES, + YES, YES, YES, YES, 7000}, + {CTRY_SYRIA, NULL1_WORLD, "SY", "SYRIA", YES, NO, YES, YES, YES, + NO, NO, 7000}, + {CTRY_TAIWAN, APL3_FCCA, "TW", "TAIWAN", YES, YES, YES, YES, YES, + YES, YES, 7000}, + {CTRY_THAILAND, NULL1_WORLD, "TH", "THAILAND", YES, NO, YES, YES, + YES, NO, NO, 7000}, + {CTRY_TRINIDAD_Y_TOBAGO, ETSI4_WORLD, "TT", "TRINIDAD & TOBAGO", + YES, NO, YES, YES, YES, YES, NO, 7000}, + {CTRY_TUNISIA, ETSI3_WORLD, "TN", "TUNISIA", YES, NO, YES, YES, + YES, YES, NO, 7000}, + {CTRY_TURKEY, ETSI3_WORLD, "TR", "TURKEY", YES, NO, YES, YES, YES, + YES, NO, 7000}, + {CTRY_UKRAINE, NULL1_WORLD, "UA", "UKRAINE", YES, NO, YES, YES, + YES, NO, NO, 7000}, + {CTRY_UAE, NULL1_WORLD, "AE", "UNITED ARAB EMIRATES", YES, NO, YES, + YES, YES, NO, NO, 7000}, + {CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB", "UNITED KINGDOM", YES, NO, + YES, YES, YES, YES, YES, 7000}, + {CTRY_UNITED_STATES, FCC3_FCCA, "US", "UNITED STATES", YES, YES, + YES, YES, YES, YES, YES, 5825}, + {CTRY_UNITED_STATES_FCC49, FCC4_FCCA, "PS", + "UNITED STATES (PUBLIC SAFETY)", YES, YES, YES, YES, YES, YES, + YES, 7000}, + {CTRY_URUGUAY, APL2_WORLD, "UY", "URUGUAY", YES, NO, YES, YES, YES, + YES, NO, 7000}, + {CTRY_UZBEKISTAN, FCC3_FCCA, "UZ", "UZBEKISTAN", YES, YES, YES, + YES, YES, YES, YES, 7000}, + {CTRY_VENEZUELA, APL2_ETSIC, "VE", "VENEZUELA", YES, NO, YES, YES, + YES, YES, NO, 7000}, + {CTRY_VIET_NAM, NULL1_WORLD, "VN", "VIET NAM", YES, NO, YES, YES, + YES, NO, NO, 7000}, + {CTRY_YEMEN, NULL1_WORLD, "YE", "YEMEN", YES, NO, YES, YES, YES, + NO, NO, 7000}, + {CTRY_ZIMBABWE, NULL1_WORLD, "ZW", "ZIMBABWE", YES, NO, YES, YES, + YES, NO, NO, 7000} +}; + +enum { + NO_DFS = 0x0000000000000000ULL, + DFS_FCC3 = 0x0000000000000001ULL, + DFS_ETSI = 0x0000000000000002ULL, + DFS_MKK4 = 0x0000000000000004ULL, +}; + +enum { + F1_4915_4925, + F1_4935_4945, + F1_4920_4980, + F1_4942_4987, + F1_4945_4985, + F1_4950_4980, + F1_5035_5040, + F1_5040_5080, + F1_5055_5055, + + F1_5120_5240, + + F1_5170_5230, + F2_5170_5230, + + F1_5180_5240, + F2_5180_5240, + F3_5180_5240, + F4_5180_5240, + F5_5180_5240, + F6_5180_5240, + F7_5180_5240, + F8_5180_5240, + + F1_5180_5320, + + F1_5240_5280, + + F1_5260_5280, + + F1_5260_5320, + F2_5260_5320, + F3_5260_5320, + F4_5260_5320, + F5_5260_5320, + F6_5260_5320, + + F1_5260_5700, + + F1_5280_5320, + + F1_5500_5580, + + F1_5500_5620, + + F1_5500_5700, + F2_5500_5700, + F3_5500_5700, + F4_5500_5700, + F5_5500_5700, + + F1_5660_5700, + + F1_5745_5805, + F2_5745_5805, + F3_5745_5805, + + F1_5745_5825, + F2_5745_5825, + F3_5745_5825, + F4_5745_5825, + F5_5745_5825, + F6_5745_5825, + + W1_4920_4980, + W1_5040_5080, + W1_5170_5230, + W1_5180_5240, + W1_5260_5320, + W1_5745_5825, + W1_5500_5700, + A_DEMO_ALL_CHANNELS +}; + +static struct RegDmnFreqBand regDmn5GhzFreq[] = { + {4915, 4925, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 16}, + {4935, 4945, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 16}, + {4920, 4980, 23, 0, 20, 20, NO_DFS, PSCAN_MKK2, 7}, + {4942, 4987, 27, 6, 5, 5, NO_DFS, PSCAN_FCC, 0}, + {4945, 4985, 30, 6, 10, 5, NO_DFS, PSCAN_FCC, 0}, + {4950, 4980, 33, 6, 20, 5, NO_DFS, PSCAN_FCC, 0}, + {5035, 5040, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 12}, + {5040, 5080, 23, 0, 20, 20, NO_DFS, PSCAN_MKK2, 2}, + {5055, 5055, 23, 0, 10, 5, NO_DFS, PSCAN_MKK2, 12}, + + {5120, 5240, 5, 6, 20, 20, NO_DFS, NO_PSCAN, 0}, + + {5170, 5230, 23, 0, 20, 20, NO_DFS, PSCAN_MKK1 | PSCAN_MKK2, 1}, + {5170, 5230, 20, 0, 20, 20, NO_DFS, PSCAN_MKK1 | PSCAN_MKK2, 1}, + + {5180, 5240, 15, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 0}, + {5180, 5240, 17, 6, 20, 20, NO_DFS, NO_PSCAN, 1}, + {5180, 5240, 18, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 0}, + {5180, 5240, 20, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 0}, + {5180, 5240, 23, 0, 20, 20, NO_DFS, PSCAN_FCC | PSCAN_ETSI, 0}, + {5180, 5240, 23, 6, 20, 20, NO_DFS, PSCAN_FCC, 0}, + {5180, 5240, 20, 0, 20, 20, NO_DFS, PSCAN_MKK1 | PSCAN_MKK3, 0}, + {5180, 5240, 23, 6, 20, 20, NO_DFS, NO_PSCAN, 0}, + + {5180, 5320, 20, 6, 20, 20, NO_DFS, PSCAN_ETSI, 0}, + + {5240, 5280, 23, 0, 20, 20, DFS_FCC3, PSCAN_FCC | PSCAN_ETSI, 0}, + + {5260, 5280, 23, 0, 20, 20, DFS_FCC3 | DFS_ETSI, + PSCAN_FCC | PSCAN_ETSI, 0}, + + {5260, 5320, 18, 0, 20, 20, DFS_FCC3 | DFS_ETSI, + PSCAN_FCC | PSCAN_ETSI, 0}, + + {5260, 5320, 20, 0, 20, 20, DFS_FCC3 | DFS_ETSI | DFS_MKK4, + PSCAN_FCC | PSCAN_ETSI | PSCAN_MKK3, 0}, + + + {5260, 5320, 20, 6, 20, 20, DFS_FCC3 | DFS_ETSI, + PSCAN_FCC | PSCAN_ETSI, 2}, + {5260, 5320, 23, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 2}, + {5260, 5320, 23, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 0}, + {5260, 5320, 30, 0, 20, 20, NO_DFS, NO_PSCAN, 0}, + + {5260, 5700, 5, 6, 20, 20, DFS_FCC3 | DFS_ETSI, NO_PSCAN, 0}, + + {5280, 5320, 17, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 0}, + + {5500, 5580, 23, 6, 20, 20, DFS_FCC3, PSCAN_FCC, 0}, + + {5500, 5620, 30, 6, 20, 20, DFS_ETSI, PSCAN_ETSI, 0}, + + {5500, 5700, 20, 6, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_FCC, 4}, + {5500, 5700, 27, 0, 20, 20, DFS_FCC3 | DFS_ETSI, + PSCAN_FCC | PSCAN_ETSI, 0}, + {5500, 5700, 30, 0, 20, 20, DFS_FCC3 | DFS_ETSI, + PSCAN_FCC | PSCAN_ETSI, 0}, + {5500, 5700, 23, 0, 20, 20, DFS_FCC3 | DFS_ETSI | DFS_MKK4, + PSCAN_MKK3 | PSCAN_FCC, 0}, + {5500, 5700, 30, 6, 20, 20, DFS_ETSI, PSCAN_ETSI, 0}, + + {5660, 5700, 23, 6, 20, 20, DFS_FCC3, PSCAN_FCC, 0}, + + {5745, 5805, 23, 0, 20, 20, NO_DFS, NO_PSCAN, 0}, + {5745, 5805, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 0}, + {5745, 5805, 30, 6, 20, 20, NO_DFS, PSCAN_ETSI, 0}, + {5745, 5825, 5, 6, 20, 20, NO_DFS, NO_PSCAN, 0}, + {5745, 5825, 17, 0, 20, 20, NO_DFS, NO_PSCAN, 0}, + {5745, 5825, 20, 0, 20, 20, NO_DFS, NO_PSCAN, 0}, + {5745, 5825, 30, 0, 20, 20, NO_DFS, NO_PSCAN, 0}, + {5745, 5825, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 3}, + {5745, 5825, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 0}, + + + {4920, 4980, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0}, + {5040, 5080, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0}, + {5170, 5230, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0}, + {5180, 5240, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0}, + {5260, 5320, 30, 0, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, 0}, + {5745, 5825, 30, 0, 20, 20, NO_DFS, PSCAN_WWR, 0}, + {5500, 5700, 30, 0, 20, 20, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, 0}, + {4920, 6100, 30, 6, 20, 20, NO_DFS, NO_PSCAN, 0}, +}; + +enum { + T1_5130_5650, + T1_5150_5670, + + T1_5200_5200, + T2_5200_5200, + T3_5200_5200, + T4_5200_5200, + T5_5200_5200, + T6_5200_5200, + T7_5200_5200, + T8_5200_5200, + + T1_5200_5280, + T2_5200_5280, + T3_5200_5280, + T4_5200_5280, + T5_5200_5280, + T6_5200_5280, + + T1_5200_5240, + T1_5210_5210, + T2_5210_5210, + T3_5210_5210, + T4_5210_5210, + T5_5210_5210, + T6_5210_5210, + T7_5210_5210, + T8_5210_5210, + T9_5210_5210, + T10_5210_5210, + T1_5240_5240, + + T1_5210_5250, + T1_5210_5290, + T2_5210_5290, + T3_5210_5290, + + T1_5280_5280, + T2_5280_5280, + T1_5290_5290, + T2_5290_5290, + T3_5290_5290, + T1_5250_5290, + T2_5250_5290, + T3_5250_5290, + T4_5250_5290, + + T1_5540_5660, + T2_5540_5660, + T3_5540_5660, + T1_5760_5800, + T2_5760_5800, + T3_5760_5800, + T4_5760_5800, + T5_5760_5800, + T6_5760_5800, + T7_5760_5800, + + T1_5765_5805, + T2_5765_5805, + T3_5765_5805, + T4_5765_5805, + T5_5765_5805, + T6_5765_5805, + T7_5765_5805, + T8_5765_5805, + T9_5765_5805, + + WT1_5210_5250, + WT1_5290_5290, + WT1_5540_5660, + WT1_5760_5800, +}; + +enum { + F1_2312_2372, + F2_2312_2372, + + F1_2412_2472, + F2_2412_2472, + F3_2412_2472, + + F1_2412_2462, + F2_2412_2462, + + F1_2432_2442, + + F1_2457_2472, + + F1_2467_2472, + + F1_2484_2484, + F2_2484_2484, + + F1_2512_2732, + + W1_2312_2372, + W1_2412_2412, + W1_2417_2432, + W1_2437_2442, + W1_2447_2457, + W1_2462_2462, + W1_2467_2467, + W2_2467_2467, + W1_2472_2472, + W2_2472_2472, + W1_2484_2484, + W2_2484_2484, +}; + +static struct RegDmnFreqBand regDmn2GhzFreq[] = { + {2312, 2372, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0}, + {2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0}, + + {2412, 2472, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0}, + {2412, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA, 0}, + {2412, 2472, 30, 0, 20, 5, NO_DFS, NO_PSCAN, 0}, + + {2412, 2462, 27, 6, 20, 5, NO_DFS, NO_PSCAN, 0}, + {2412, 2462, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA, 0}, + + {2432, 2442, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0}, + + {2457, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0}, + + {2467, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA2 | PSCAN_MKKA, 0}, + + {2484, 2484, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0}, + {2484, 2484, 20, 0, 20, 5, NO_DFS, + PSCAN_MKKA | PSCAN_MKKA1 | PSCAN_MKKA2, 0}, + + {2512, 2732, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0}, + + {2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0}, + {2412, 2412, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0}, + {2417, 2432, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0}, + {2437, 2442, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0}, + {2447, 2457, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0}, + {2462, 2462, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0}, + {2467, 2467, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0}, + {2467, 2467, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0}, + {2472, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0}, + {2472, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0}, + {2484, 2484, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0}, + {2484, 2484, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0}, +}; + +enum { + G1_2312_2372, + G2_2312_2372, + + G1_2412_2472, + G2_2412_2472, + G3_2412_2472, + + G1_2412_2462, + G2_2412_2462, + + G1_2432_2442, + + G1_2457_2472, + + G1_2512_2732, + + G1_2467_2472, + + WG1_2312_2372, + WG1_2412_2462, + WG1_2467_2472, + WG2_2467_2472, + G_DEMO_ALL_CHANNELS +}; + +static struct RegDmnFreqBand regDmn2Ghz11gFreq[] = { + {2312, 2372, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0}, + {2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0}, + + {2412, 2472, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0}, + {2412, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA_G, 0}, + {2412, 2472, 30, 0, 20, 5, NO_DFS, NO_PSCAN, 0}, + + {2412, 2462, 27, 6, 20, 5, NO_DFS, NO_PSCAN, 0}, + {2412, 2462, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA_G, 0}, + + {2432, 2442, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0}, + + {2457, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0}, + + {2512, 2732, 5, 6, 20, 5, NO_DFS, NO_PSCAN, 0}, + + {2467, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_MKKA2 | PSCAN_MKKA, 0}, + + {2312, 2372, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0}, + {2412, 2462, 20, 0, 20, 5, NO_DFS, NO_PSCAN, 0}, + {2467, 2472, 20, 0, 20, 5, NO_DFS, PSCAN_WWR | IS_ECM_CHAN, 0}, + {2467, 2472, 20, 0, 20, 5, NO_DFS, NO_PSCAN | IS_ECM_CHAN, 0}, + {2312, 2732, 27, 6, 20, 5, NO_DFS, NO_PSCAN, 0}, +}; + +enum { + T1_2312_2372, + T1_2437_2437, + T2_2437_2437, + T3_2437_2437, + T1_2512_2732 +}; + +static struct regDomain regDomains[] = { + + {DEBUG_REG_DMN, FCC, DFS_FCC3, NO_PSCAN, NO_REQ, + BM(A_DEMO_ALL_CHANNELS, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BM(T1_5130_5650, T1_5150_5670, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BM(T1_5200_5240, T1_5280_5280, T1_5540_5660, T1_5765_5805, -1, -1, + -1, -1, -1, -1, -1, -1), + BM(F1_2312_2372, F1_2412_2472, F1_2484_2484, F1_2512_2732, -1, -1, + -1, -1, -1, -1, -1, -1), + BM(G_DEMO_ALL_CHANNELS, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BM(T1_2312_2372, T1_2437_2437, T1_2512_2732, -1, -1, -1, -1, -1, + -1, -1, -1, -1)}, + + {APL1, FCC, NO_DFS, NO_PSCAN, NO_REQ, + BM(F4_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T1_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO}, + + {APL2, FCC, NO_DFS, NO_PSCAN, NO_REQ, + BM(F1_5745_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T1_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T2_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO}, + + {APL3, FCC, NO_DFS, NO_PSCAN, NO_REQ, + BM(F1_5280_5320, F2_5745_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BM(T1_5290_5290, T1_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BM(T1_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO}, + + {APL4, FCC, NO_DFS, NO_PSCAN, NO_REQ, + BM(F4_5180_5240, F3_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BM(T1_5210_5210, T3_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BM(T1_5200_5200, T3_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BMZERO, + BMZERO, + BMZERO}, + + {APL5, FCC, NO_DFS, NO_PSCAN, NO_REQ, + BM(F2_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T4_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T4_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO}, + + {APL6, ETSI, DFS_ETSI, PSCAN_FCC_T | PSCAN_FCC, NO_REQ, + BM(F4_5180_5240, F2_5260_5320, F3_5745_5825, -1, -1, -1, -1, -1, + -1, -1, -1, -1), + BM(T2_5210_5210, T1_5250_5290, T1_5760_5800, -1, -1, -1, -1, -1, + -1, -1, -1, -1), + BM(T1_5200_5280, T5_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BMZERO, + BMZERO, + BMZERO}, + + {APL7, ETSI, DFS_ETSI, PSCAN_ETSI, NO_REQ, + BM(F1_5280_5320, F5_5500_5700, F3_5745_5805, -1, -1, -1, -1, -1, + -1, -1, -1, -1), + BM(T3_5290_5290, T5_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BM(T1_5540_5660, T6_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BMZERO, + BMZERO, + BMZERO}, + + {APL8, ETSI, NO_DFS, NO_PSCAN, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, + BM(F6_5260_5320, F4_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BM(T2_5290_5290, T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BM(T1_5280_5280, T1_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BMZERO, + BMZERO, + BMZERO}, + + {APL9, ETSI, DFS_ETSI, PSCAN_ETSI, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, + BM(F1_5180_5320, F1_5500_5620, F3_5745_5805, -1, -1, -1, -1, -1, + -1, -1, -1, -1), + BM(T3_5290_5290, T5_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BM(T1_5540_5660, T6_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BMZERO, + BMZERO, + BMZERO}, + + {APL10, ETSI, DFS_ETSI, PSCAN_ETSI, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, + BM(F1_5180_5320, F5_5500_5700, F3_5745_5805, -1, -1, -1, -1, -1, + -1, -1, -1, -1), + BM(T3_5290_5290, T5_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BM(T1_5540_5660, T6_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BMZERO, + BMZERO, + BMZERO}, + + {ETSI1, ETSI, DFS_ETSI, PSCAN_ETSI, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, + BM(F4_5180_5240, F2_5260_5320, F2_5500_5700, -1, -1, -1, -1, -1, + -1, -1, -1, -1), + BM(T1_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T2_5200_5280, T2_5540_5660, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BMZERO, + BMZERO, + BMZERO}, + + {ETSI2, ETSI, DFS_ETSI, PSCAN_ETSI, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, + BM(F3_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T3_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T2_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO}, + + {ETSI3, ETSI, DFS_ETSI, PSCAN_ETSI, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, + BM(F4_5180_5240, F2_5260_5320, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BM(T1_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T2_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO}, + + {ETSI4, ETSI, DFS_ETSI, PSCAN_ETSI, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, + BM(F3_5180_5240, F1_5260_5320, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BM(T2_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T3_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO}, + + {ETSI5, ETSI, DFS_ETSI, PSCAN_ETSI, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, + BM(F1_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T4_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T3_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO}, + + {ETSI6, ETSI, DFS_ETSI, PSCAN_ETSI, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, + BM(F5_5180_5240, F1_5260_5280, F3_5500_5700, -1, -1, -1, -1, -1, + -1, -1, -1, -1), + BM(T1_5210_5250, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T4_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO}, + + {FCC1, FCC, NO_DFS, NO_PSCAN, NO_REQ, + BM(F2_5180_5240, F4_5260_5320, F5_5745_5825, -1, -1, -1, -1, -1, + -1, -1, -1, -1), + BM(T6_5210_5210, T2_5250_5290, T6_5760_5800, -1, -1, -1, -1, -1, + -1, -1, -1, -1), + BM(T1_5200_5240, T2_5280_5280, T7_5765_5805, -1, -1, -1, -1, -1, + -1, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO}, + + {FCC2, FCC, NO_DFS, NO_PSCAN, NO_REQ, + BM(F6_5180_5240, F5_5260_5320, F6_5745_5825, -1, -1, -1, -1, -1, + -1, -1, -1, -1), + BM(T7_5210_5210, T3_5250_5290, T2_5760_5800, -1, -1, -1, -1, -1, + -1, -1, -1, -1), + BM(T7_5200_5200, T1_5240_5240, T2_5280_5280, T1_5765_5805, -1, -1, + -1, -1, -1, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO}, + + {FCC3, FCC, DFS_FCC3, PSCAN_FCC | PSCAN_FCC_T, NO_REQ, + BM(F2_5180_5240, F3_5260_5320, F1_5500_5700, F5_5745_5825, -1, -1, + -1, -1, -1, -1, -1, -1), + BM(T6_5210_5210, T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BM(T4_5200_5200, T8_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BMZERO, + BMZERO, + BMZERO}, + + {FCC4, FCC, DFS_FCC3, PSCAN_FCC | PSCAN_FCC_T, NO_REQ, + BM(F1_4942_4987, F1_4945_4985, F1_4950_4980, -1, -1, -1, -1, -1, + -1, -1, -1, -1), + BM(T8_5210_5210, T4_5250_5290, T7_5760_5800, -1, -1, -1, -1, -1, + -1, -1, -1, -1), + BM(T1_5200_5240, T1_5280_5280, T9_5765_5805, -1, -1, -1, -1, -1, + -1, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO}, + + {FCC5, FCC, NO_DFS, NO_PSCAN, NO_REQ, + BM(F2_5180_5240, F6_5745_5825, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BM(T6_5210_5210, T2_5760_5800, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BM(T8_5200_5200, T7_5765_5805, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BMZERO, + BMZERO, + BMZERO}, + + {FCC6, FCC, DFS_FCC3, PSCAN_FCC, NO_REQ, + BM(F8_5180_5240, F5_5260_5320, F1_5500_5580, F1_5660_5700, + F6_5745_5825, -1, -1, -1, -1, -1, -1, -1), + BM(T7_5210_5210, T3_5250_5290, T2_5760_5800, -1, -1, -1, -1, -1, + -1, -1, -1, -1), + BM(T7_5200_5200, T1_5240_5240, T2_5280_5280, T1_5765_5805, -1, -1, + -1, -1, -1, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO}, + + {MKK1, MKK, NO_DFS, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB, + BM(F1_5170_5230, F4_5180_5240, F2_5260_5320, F4_5500_5700, -1, -1, + -1, -1, -1, -1, -1, -1), + BM(T7_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T5_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO}, + + {MKK2, MKK, NO_DFS, PSCAN_MKK2, DISALLOW_ADHOC_11A_TURB, + BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040, + F1_5055_5055, F1_5040_5080, F1_5170_5230, F4_5180_5240, + F2_5260_5320, F4_5500_5700, -1, -1), + BM(T7_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T5_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO}, + + + {MKK3, MKK, NO_DFS, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB, + BM(F4_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T9_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T1_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO}, + + + {MKK4, MKK, DFS_MKK4, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB, + BM(F4_5180_5240, F2_5260_5320, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BM(T10_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T6_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO}, + + + {MKK5, MKK, DFS_MKK4, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB, + BM(F4_5180_5240, F2_5260_5320, F4_5500_5700, -1, -1, -1, -1, -1, + -1, -1, -1, -1), + BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T5_5200_5280, T3_5540_5660, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BMZERO, + BMZERO, + BMZERO}, + + + {MKK6, MKK, NO_DFS, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB, + BM(F2_5170_5230, F4_5180_5240, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BM(T3_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T6_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO}, + + + {MKK7, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3, + DISALLOW_ADHOC_11A_TURB, + BM(F1_5170_5230, F4_5180_5240, F2_5260_5320, -1, -1, -1, -1, -1, + -1, -1, -1, -1), + BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T5_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO}, + + + {MKK8, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3, + DISALLOW_ADHOC_11A_TURB, + BM(F1_5170_5230, F4_5180_5240, F2_5260_5320, F4_5500_5700, -1, -1, + -1, -1, -1, -1, -1, -1), + BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T5_5200_5280, T3_5540_5660, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BMZERO, + BMZERO, + BMZERO}, + + + {MKK9, MKK, NO_DFS, PSCAN_MKK2 | PSCAN_MKK3, + DISALLOW_ADHOC_11A_TURB, + BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040, + F1_5055_5055, F1_5040_5080, F4_5180_5240, -1, -1, -1, -1, -1), + BM(T9_5210_5210, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T1_5200_5200, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO}, + + + {MKK10, MKK, DFS_MKK4, PSCAN_MKK2 | PSCAN_MKK3, + DISALLOW_ADHOC_11A_TURB, + BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040, + F1_5055_5055, F1_5040_5080, F4_5180_5240, F2_5260_5320, -1, -1, + -1, -1), + BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T1_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO}, + + + {MKK11, MKK, DFS_MKK4, PSCAN_MKK3, DISALLOW_ADHOC_11A_TURB, + BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040, + F1_5055_5055, F1_5040_5080, F4_5180_5240, F2_5260_5320, + F4_5500_5700, -1, -1, -1), + BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T1_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO}, + + + {MKK12, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3, + DISALLOW_ADHOC_11A_TURB, + BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040, + F1_5055_5055, F1_5040_5080, F1_5170_5230, F4_5180_5240, + F2_5260_5320, F4_5500_5700, -1, -1), + BM(T3_5210_5290, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T1_5200_5280, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO}, + + + {MKK13, MKK, DFS_MKK4, PSCAN_MKK1 | PSCAN_MKK3, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, + BM(F1_5170_5230, F7_5180_5240, F2_5260_5320, F4_5500_5700, -1, -1, + -1, -1, -1, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO, + BMZERO, + BMZERO}, + + + {MKK14, MKK, DFS_MKK4, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB, + BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040, + F1_5040_5080, F1_5055_5055, F1_5170_5230, F4_5180_5240, -1, -1, + -1, -1), + BMZERO, + BMZERO, + BMZERO, + BMZERO, + BMZERO}, + + + {MKK15, MKK, DFS_MKK4, PSCAN_MKK1, DISALLOW_ADHOC_11A_TURB, + BM(F1_4915_4925, F1_4935_4945, F1_4920_4980, F1_5035_5040, + F1_5040_5080, F1_5055_5055, F1_5170_5230, F4_5180_5240, + F2_5260_5320, -1, -1, -1), + BMZERO, + BMZERO, + BMZERO, + BMZERO, + BMZERO}, + + + {APLD, NO_CTL, NO_DFS, NO_PSCAN, NO_REQ, + BMZERO, + BMZERO, + BMZERO, + BM(F2_2312_2372, F2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BM(G2_2312_2372, G2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BMZERO}, + + {ETSIA, NO_CTL, NO_DFS, PSCAN_ETSIA, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, + BMZERO, + BMZERO, + BMZERO, + BM(F1_2457_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(G1_2457_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)}, + + {ETSIB, ETSI, NO_DFS, PSCAN_ETSIB, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, + BMZERO, + BMZERO, + BMZERO, + BM(F1_2432_2442, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(G1_2432_2442, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)}, + + {ETSIC, ETSI, NO_DFS, PSCAN_ETSIC, + DISALLOW_ADHOC_11A | DISALLOW_ADHOC_11A_TURB, + BMZERO, + BMZERO, + BMZERO, + BM(F3_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(G3_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)}, + + {FCCA, FCC, NO_DFS, NO_PSCAN, NO_REQ, + BMZERO, + BMZERO, + BMZERO, + BM(F1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(G1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)}, + + {MKKA, MKK, NO_DFS, + PSCAN_MKKA | PSCAN_MKKA_G | PSCAN_MKKA1 | PSCAN_MKKA1_G | + PSCAN_MKKA2 | PSCAN_MKKA2_G, DISALLOW_ADHOC_11A_TURB, + BMZERO, + BMZERO, + BMZERO, + BM(F2_2412_2462, F1_2467_2472, F2_2484_2484, -1, -1, -1, -1, -1, + -1, -1, -1, -1), + BM(G2_2412_2462, G1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1), + BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)}, + + {MKKC, MKK, NO_DFS, NO_PSCAN, NO_REQ, + BMZERO, + BMZERO, + BMZERO, + BM(F2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(G2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)}, + + {WORLD, ETSI, NO_DFS, NO_PSCAN, NO_REQ, + BMZERO, + BMZERO, + BMZERO, + BM(F2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(G2_2412_2472, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T2_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)}, + + {WOR0_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D, + BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825, + W1_5500_5700, -1, -1, -1, -1, -1, -1, -1), + BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1, + -1, -1, -1, -1, -1), + BMZERO, + BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472, + W1_2417_2432, W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1, + -1, -1), + BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1), + BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)}, + + {WOR01_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, + ADHOC_PER_11D, + BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825, + W1_5500_5700, -1, -1, -1, -1, -1, -1, -1), + BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1, + -1, -1, -1, -1, -1), + BMZERO, + BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2417_2432, + W1_2447_2457, -1, -1, -1, -1, -1, -1, -1), + BM(WG1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)}, + + {WOR02_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, + ADHOC_PER_11D, + BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825, + W1_5500_5700, -1, -1, -1, -1, -1, -1, -1), + BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1, + -1, -1, -1, -1, -1), + BMZERO, + BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472, + W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1), + BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1), + BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)}, + + {EU1_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D, + BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825, + W1_5500_5700, -1, -1, -1, -1, -1, -1, -1), + BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1, + -1, -1, -1, -1, -1), + BMZERO, + BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W2_2472_2472, + W1_2417_2432, W1_2447_2457, W2_2467_2467, -1, -1, -1, -1, -1), + BM(WG1_2412_2462, WG2_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1), + BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)}, + + {WOR1_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A, + BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825, + W1_5500_5700, -1, -1, -1, -1, -1, -1, -1), + BMZERO, + BMZERO, + BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472, + W1_2417_2432, W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1, + -1, -1), + BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1), + BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)}, + + {WOR2_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A, + BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825, + W1_5500_5700, -1, -1, -1, -1, -1, -1, -1), + BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1, + -1, -1, -1, -1, -1), + BMZERO, + BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472, + W1_2417_2432, W1_2447_2457, W1_2467_2467, W1_2484_2484, -1, -1, + -1, -1), + BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1), + BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)}, + + {WOR3_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_PER_11D, + BM(W1_5260_5320, W1_5180_5240, W1_5170_5230, W1_5745_5825, -1, -1, + -1, -1, -1, -1, -1, -1), + BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1, + -1, -1, -1, -1, -1), + BMZERO, + BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472, + W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1), + BM(WG1_2412_2462, WG2_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1), + BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)}, + + {WOR4_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A, + BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, -1, -1, -1, -1, -1, + -1, -1, -1, -1), + BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1, + -1, -1, -1, -1, -1), + BMZERO, + BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2417_2432, + W1_2447_2457, -1, -1, -1, -1, -1, -1, -1), + BM(WG1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)}, + + {WOR5_ETSIC, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A, + BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, -1, -1, -1, -1, -1, + -1, -1, -1, -1), + BMZERO, + BMZERO, + BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472, + W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1), + BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1), + BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)}, + + {WOR9_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A, + BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, W1_5500_5700, -1, -1, + -1, -1, -1, -1, -1, -1), + BM(WT1_5210_5250, WT1_5290_5290, WT1_5760_5800, -1, -1, -1, -1, + -1, -1, -1, -1, -1), + BMZERO, + BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2417_2432, + W1_2447_2457, -1, -1, -1, -1, -1, -1, -1), + BM(WG1_2412_2462, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1), + BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)}, + + {WORA_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A, + BM(W1_5260_5320, W1_5180_5240, W1_5745_5825, W1_5500_5700, -1, -1, + -1, -1, -1, -1, -1, -1), + BMZERO, + BMZERO, + BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472, + W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1), + BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1), + BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)}, + + {WORB_WORLD, NO_CTL, DFS_FCC3 | DFS_ETSI, PSCAN_WWR, ADHOC_NO_11A, + BM(W1_5260_5320, W1_5180_5240, W1_5500_5700, -1, -1, -1, -1, -1, + -1, -1, -1, -1), + BMZERO, + BMZERO, + BM(W1_2412_2412, W1_2437_2442, W1_2462_2462, W1_2472_2472, + W1_2417_2432, W1_2447_2457, W1_2467_2467, -1, -1, -1, -1, -1), + BM(WG1_2412_2462, WG1_2467_2472, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1), + BM(T3_2437_2437, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1)}, + + {NULL1, NO_CTL, NO_DFS, NO_PSCAN, NO_REQ, + BMZERO, + BMZERO, + BMZERO, + BMZERO, + BMZERO, + BMZERO} +}; + +static const struct cmode modes[] = { + {ATH9K_MODE_11A, CHANNEL_A}, + {ATH9K_MODE_11B, CHANNEL_B}, + {ATH9K_MODE_11G, CHANNEL_G}, + {ATH9K_MODE_11NG_HT20, CHANNEL_G_HT20}, + {ATH9K_MODE_11NG_HT40PLUS, CHANNEL_G_HT40PLUS}, + {ATH9K_MODE_11NG_HT40MINUS, CHANNEL_G_HT40MINUS}, + {ATH9K_MODE_11NA_HT20, CHANNEL_A_HT20}, + {ATH9K_MODE_11NA_HT40PLUS, CHANNEL_A_HT40PLUS}, + {ATH9K_MODE_11NA_HT40MINUS, CHANNEL_A_HT40MINUS}, +}; + +static struct japan_bandcheck j_bandcheck[] = { + {F1_5170_5230, AR_EEPROM_EEREGCAP_EN_KK_U1_ODD}, + {F4_5180_5240, AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN}, + {F2_5260_5320, AR_EEPROM_EEREGCAP_EN_KK_U2}, + {F4_5500_5700, AR_EEPROM_EEREGCAP_EN_KK_MIDBAND} +}; + + +#endif diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c new file mode 100644 index 0000000..157f830 --- /dev/null +++ b/drivers/net/wireless/ath9k/xmit.c @@ -0,0 +1,2871 @@ +/* + * Copyright (c) 2008 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * Implementation of transmit path. + */ + +#include "core.h" + +#define BITS_PER_BYTE 8 +#define OFDM_PLCP_BITS 22 +#define HT_RC_2_MCS(_rc) ((_rc) & 0x0f) +#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1) +#define L_STF 8 +#define L_LTF 8 +#define L_SIG 4 +#define HT_SIG 8 +#define HT_STF 4 +#define HT_LTF(_ns) (4 * (_ns)) +#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */ +#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */ +#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2) +#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18) + +#define OFDM_SIFS_TIME 16 + +static u32 bits_per_symbol[][2] = { + /* 20MHz 40MHz */ + { 26, 54 }, /* 0: BPSK */ + { 52, 108 }, /* 1: QPSK 1/2 */ + { 78, 162 }, /* 2: QPSK 3/4 */ + { 104, 216 }, /* 3: 16-QAM 1/2 */ + { 156, 324 }, /* 4: 16-QAM 3/4 */ + { 208, 432 }, /* 5: 64-QAM 2/3 */ + { 234, 486 }, /* 6: 64-QAM 3/4 */ + { 260, 540 }, /* 7: 64-QAM 5/6 */ + { 52, 108 }, /* 8: BPSK */ + { 104, 216 }, /* 9: QPSK 1/2 */ + { 156, 324 }, /* 10: QPSK 3/4 */ + { 208, 432 }, /* 11: 16-QAM 1/2 */ + { 312, 648 }, /* 12: 16-QAM 3/4 */ + { 416, 864 }, /* 13: 64-QAM 2/3 */ + { 468, 972 }, /* 14: 64-QAM 3/4 */ + { 520, 1080 }, /* 15: 64-QAM 5/6 */ +}; + +#define IS_HT_RATE(_rate) ((_rate) & 0x80) + +/* + * Insert a chain of ath_buf (descriptors) on a multicast txq + * but do NOT start tx DMA on this queue. + * NB: must be called with txq lock held + */ + +static void ath_tx_mcastqaddbuf(struct ath_softc *sc, + struct ath_txq *txq, + struct list_head *head) +{ + struct ath_hal *ah = sc->sc_ah; + struct ath_buf *bf; + + if (list_empty(head)) + return; + + /* + * Insert the frame on the outbound list and + * pass it on to the hardware. + */ + bf = list_first_entry(head, struct ath_buf, list); + + /* + * The CAB queue is started from the SWBA handler since + * frames only go out on DTIM and to avoid possible races. + */ + ath9k_hw_set_interrupts(ah, 0); + + /* + * If there is anything in the mcastq, we want to set + * the "more data" bit in the last item in the queue to + * indicate that there is "more data". It makes sense to add + * it here since you are *always* going to have + * more data when adding to this queue, no matter where + * you call from. + */ + + if (txq->axq_depth) { + struct ath_buf *lbf; + struct ieee80211_hdr *hdr; + + /* + * Add the "more data flag" to the last frame + */ + + lbf = list_entry(txq->axq_q.prev, struct ath_buf, list); + hdr = (struct ieee80211_hdr *) + ((struct sk_buff *)(lbf->bf_mpdu))->data; + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); + } + + /* + * Now, concat the frame onto the queue + */ + list_splice_tail_init(head, &txq->axq_q); + txq->axq_depth++; + txq->axq_totalqueued++; + txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list); + + DPRINTF(sc, ATH_DBG_QUEUE, + "%s: txq depth = %d\n", __func__, txq->axq_depth); + if (txq->axq_link != NULL) { + *txq->axq_link = bf->bf_daddr; + DPRINTF(sc, ATH_DBG_XMIT, + "%s: link[%u](%p)=%llx (%p)\n", + __func__, + txq->axq_qnum, txq->axq_link, + ito64(bf->bf_daddr), bf->bf_desc); + } + txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link); + ath9k_hw_set_interrupts(ah, sc->sc_imask); +} + +/* + * Insert a chain of ath_buf (descriptors) on a txq and + * assume the descriptors are already chained together by caller. + * NB: must be called with txq lock held + */ + +static void ath_tx_txqaddbuf(struct ath_softc *sc, + struct ath_txq *txq, struct list_head *head) +{ + struct ath_hal *ah = sc->sc_ah; + struct ath_buf *bf; + /* + * Insert the frame on the outbound list and + * pass it on to the hardware. + */ + + if (list_empty(head)) + return; + + bf = list_first_entry(head, struct ath_buf, list); + + list_splice_tail_init(head, &txq->axq_q); + txq->axq_depth++; + txq->axq_totalqueued++; + txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list); + + DPRINTF(sc, ATH_DBG_QUEUE, + "%s: txq depth = %d\n", __func__, txq->axq_depth); + + if (txq->axq_link == NULL) { + ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); + DPRINTF(sc, ATH_DBG_XMIT, + "%s: TXDP[%u] = %llx (%p)\n", + __func__, txq->axq_qnum, + ito64(bf->bf_daddr), bf->bf_desc); + } else { + *txq->axq_link = bf->bf_daddr; + DPRINTF(sc, ATH_DBG_XMIT, "%s: link[%u] (%p)=%llx (%p)\n", + __func__, + txq->axq_qnum, txq->axq_link, + ito64(bf->bf_daddr), bf->bf_desc); + } + txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link); + ath9k_hw_txstart(ah, txq->axq_qnum); +} + +/* Get transmit rate index using rate in Kbps */ + +static int ath_tx_findindex(const struct ath9k_rate_table *rt, int rate) +{ + int i; + int ndx = 0; + + for (i = 0; i < rt->rateCount; i++) { + if (rt->info[i].rateKbps == rate) { + ndx = i; + break; + } + } + + return ndx; +} + +/* Check if it's okay to send out aggregates */ + +static int ath_aggr_query(struct ath_softc *sc, + struct ath_node *an, u8 tidno) +{ + struct ath_atx_tid *tid; + tid = ATH_AN_2_TID(an, tidno); + + if (tid->addba_exchangecomplete || tid->addba_exchangeinprogress) + return 1; + else + return 0; +} + +static enum ath9k_pkt_type get_hal_packet_type(struct ieee80211_hdr *hdr) +{ + enum ath9k_pkt_type htype; + __le16 fc; + + fc = hdr->frame_control; + + /* Calculate Atheros packet type from IEEE80211 packet header */ + + if (ieee80211_is_beacon(fc)) + htype = ATH9K_PKT_TYPE_BEACON; + else if (ieee80211_is_probe_resp(fc)) + htype = ATH9K_PKT_TYPE_PROBE_RESP; + else if (ieee80211_is_atim(fc)) + htype = ATH9K_PKT_TYPE_ATIM; + else if (ieee80211_is_pspoll(fc)) + htype = ATH9K_PKT_TYPE_PSPOLL; + else + htype = ATH9K_PKT_TYPE_NORMAL; + + return htype; +} + +static void fill_min_rates(struct sk_buff *skb, struct ath_tx_control *txctl) +{ + struct ieee80211_hdr *hdr; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + struct ath_tx_info_priv *tx_info_priv; + __le16 fc; + + hdr = (struct ieee80211_hdr *)skb->data; + fc = hdr->frame_control; + tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0]; + + if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) { + txctl->use_minrate = 1; + txctl->min_rate = tx_info_priv->min_rate; + } else if (ieee80211_is_data(fc)) { + if (ieee80211_is_nullfunc(fc) || + /* Port Access Entity (IEEE 802.1X) */ + (skb->protocol == cpu_to_be16(0x888E))) { + txctl->use_minrate = 1; + txctl->min_rate = tx_info_priv->min_rate; + } + if (is_multicast_ether_addr(hdr->addr1)) + txctl->mcast_rate = tx_info_priv->min_rate; + } + +} + +/* This function will setup additional txctl information, mostly rate stuff */ +/* FIXME: seqno, ps */ +static int ath_tx_prepare(struct ath_softc *sc, + struct sk_buff *skb, + struct ath_tx_control *txctl) +{ + struct ieee80211_hw *hw = sc->hw; + struct ieee80211_hdr *hdr; + struct ath_rc_series *rcs; + struct ath_txq *txq = NULL; + const struct ath9k_rate_table *rt; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + struct ath_tx_info_priv *tx_info_priv; + int hdrlen; + u8 rix, antenna; + __le16 fc; + u8 *qc; + + memset(txctl, 0, sizeof(struct ath_tx_control)); + + txctl->dev = sc; + hdr = (struct ieee80211_hdr *)skb->data; + hdrlen = ieee80211_get_hdrlen_from_skb(skb); + fc = hdr->frame_control; + + rt = sc->sc_currates; + BUG_ON(!rt); + + /* Fill misc fields */ + + spin_lock_bh(&sc->node_lock); + txctl->an = ath_node_get(sc, hdr->addr1); + /* create a temp node, if the node is not there already */ + if (!txctl->an) + txctl->an = ath_node_attach(sc, hdr->addr1, 0); + spin_unlock_bh(&sc->node_lock); + + if (ieee80211_is_data_qos(fc)) { + qc = ieee80211_get_qos_ctl(hdr); + txctl->tidno = qc[0] & 0xf; + } + + txctl->if_id = 0; + txctl->nextfraglen = 0; + txctl->frmlen = skb->len + FCS_LEN - (hdrlen & 3); + txctl->txpower = MAX_RATE_POWER; /* FIXME */ + + /* Fill Key related fields */ + + txctl->keytype = ATH9K_KEY_TYPE_CLEAR; + txctl->keyix = ATH9K_TXKEYIX_INVALID; + + if (tx_info->control.hw_key) { + txctl->keyix = tx_info->control.hw_key->hw_key_idx; + txctl->frmlen += tx_info->control.icv_len; + + if (sc->sc_keytype == ATH9K_CIPHER_WEP) + txctl->keytype = ATH9K_KEY_TYPE_WEP; + else if (sc->sc_keytype == ATH9K_CIPHER_TKIP) + txctl->keytype = ATH9K_KEY_TYPE_TKIP; + else if (sc->sc_keytype == ATH9K_CIPHER_AES_CCM) + txctl->keytype = ATH9K_KEY_TYPE_AES; + } + + /* Fill packet type */ + + txctl->atype = get_hal_packet_type(hdr); + + /* Fill qnum */ + + txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc); + txq = &sc->sc_txq[txctl->qnum]; + spin_lock_bh(&txq->axq_lock); + + /* Try to avoid running out of descriptors */ + if (txq->axq_depth >= (ATH_TXBUF - 20)) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: TX queue: %d is full, depth: %d\n", + __func__, + txctl->qnum, + txq->axq_depth); + ieee80211_stop_queue(hw, skb_get_queue_mapping(skb)); + txq->stopped = 1; + spin_unlock_bh(&txq->axq_lock); + return -1; + } + + spin_unlock_bh(&txq->axq_lock); + + /* Fill rate */ + + fill_min_rates(skb, txctl); + + /* Fill flags */ + + txctl->flags = ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */ + + if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) + tx_info->flags |= ATH9K_TXDESC_NOACK; + if (tx_info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) + tx_info->flags |= ATH9K_TXDESC_RTSENA; + + /* + * Setup for rate calculations. + */ + tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0]; + rcs = tx_info_priv->rcs; + + if (ieee80211_is_data(fc) && !txctl->use_minrate) { + + /* Enable HT only for DATA frames and not for EAPOL */ + txctl->ht = (hw->conf.ht_conf.ht_supported && + (tx_info->flags & IEEE80211_TX_CTL_AMPDU)); + + if (is_multicast_ether_addr(hdr->addr1)) { + rcs[0].rix = (u8) + ath_tx_findindex(rt, txctl->mcast_rate); + + /* + * mcast packets are not re-tried. + */ + rcs[0].tries = 1; + } + /* For HT capable stations, we save tidno for later use. + * We also override seqno set by upper layer with the one + * in tx aggregation state. + * + * First, the fragmentation stat is determined. + * If fragmentation is on, the sequence number is + * not overridden, since it has been + * incremented by the fragmentation routine. + */ + if (likely(!(txctl->flags & ATH9K_TXDESC_FRAG_IS_ON)) && + txctl->ht && sc->sc_txaggr) { + struct ath_atx_tid *tid; + + tid = ATH_AN_2_TID(txctl->an, txctl->tidno); + + hdr->seq_ctrl = cpu_to_le16(tid->seq_next << + IEEE80211_SEQ_SEQ_SHIFT); + txctl->seqno = tid->seq_next; + INCR(tid->seq_next, IEEE80211_SEQ_MAX); + } + } else { + /* for management and control frames, + * or for NULL and EAPOL frames */ + if (txctl->min_rate) + rcs[0].rix = ath_rate_findrateix(sc, txctl->min_rate); + else + rcs[0].rix = 0; + rcs[0].tries = ATH_MGT_TXMAXTRY; + } + rix = rcs[0].rix; + + /* + * Calculate duration. This logically belongs in the 802.11 + * layer but it lacks sufficient information to calculate it. + */ + if ((txctl->flags & ATH9K_TXDESC_NOACK) == 0 && !ieee80211_is_ctl(fc)) { + u16 dur; + /* + * XXX not right with fragmentation. + */ + if (sc->sc_flags & ATH_PREAMBLE_SHORT) + dur = rt->info[rix].spAckDuration; + else + dur = rt->info[rix].lpAckDuration; + + if (le16_to_cpu(hdr->frame_control) & + IEEE80211_FCTL_MOREFRAGS) { + dur += dur; /* Add additional 'SIFS + ACK' */ + + /* + ** Compute size of next fragment in order to compute + ** durations needed to update NAV. + ** The last fragment uses the ACK duration only. + ** Add time for next fragment. + */ + dur += ath9k_hw_computetxtime(sc->sc_ah, rt, + txctl->nextfraglen, + rix, sc->sc_flags & ATH_PREAMBLE_SHORT); + } + + if (ieee80211_has_morefrags(fc) || + (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) { + /* + ** Force hardware to use computed duration for next + ** fragment by disabling multi-rate retry, which + ** updates duration based on the multi-rate + ** duration table. + */ + rcs[1].tries = rcs[2].tries = rcs[3].tries = 0; + rcs[1].rix = rcs[2].rix = rcs[3].rix = 0; + /* reset tries but keep rate index */ + rcs[0].tries = ATH_TXMAXTRY; + } + + hdr->duration_id = cpu_to_le16(dur); + } + + /* + * Determine if a tx interrupt should be generated for + * this descriptor. We take a tx interrupt to reap + * descriptors when the h/w hits an EOL condition or + * when the descriptor is specifically marked to generate + * an interrupt. We periodically mark descriptors in this + * way to insure timely replenishing of the supply needed + * for sending frames. Defering interrupts reduces system + * load and potentially allows more concurrent work to be + * done but if done to aggressively can cause senders to + * backup. + * + * NB: use >= to deal with sc_txintrperiod changing + * dynamically through sysctl. + */ + spin_lock_bh(&txq->axq_lock); + if ((++txq->axq_intrcnt >= sc->sc_txintrperiod)) { + txctl->flags |= ATH9K_TXDESC_INTREQ; + txq->axq_intrcnt = 0; + } + spin_unlock_bh(&txq->axq_lock); + + if (is_multicast_ether_addr(hdr->addr1)) { + antenna = sc->sc_mcastantenna + 1; + sc->sc_mcastantenna = (sc->sc_mcastantenna + 1) & 0x1; + } else + antenna = sc->sc_txantenna; + +#ifdef USE_LEGACY_HAL + txctl->antenna = antenna; +#endif + return 0; +} + +/* To complete a chain of buffers associated a frame */ + +static void ath_tx_complete_buf(struct ath_softc *sc, + struct ath_buf *bf, + struct list_head *bf_q, + int txok, int sendbar) +{ + struct sk_buff *skb = bf->bf_mpdu; + struct ath_xmit_status tx_status; + dma_addr_t *pa; + + /* + * Set retry information. + * NB: Don't use the information in the descriptor, because the frame + * could be software retried. + */ + tx_status.retries = bf->bf_retries; + tx_status.flags = 0; + + if (sendbar) + tx_status.flags = ATH_TX_BAR; + + if (!txok) { + tx_status.flags |= ATH_TX_ERROR; + + if (bf->bf_isxretried) + tx_status.flags |= ATH_TX_XRETRY; + } + /* Unmap this frame */ + pa = get_dma_mem_context(bf, bf_dmacontext); + pci_unmap_single(sc->pdev, + *pa, + skb->len, + PCI_DMA_TODEVICE); + /* complete this frame */ + ath_tx_complete(sc, skb, &tx_status, bf->bf_node); + + /* + * Return the list of ath_buf of this mpdu to free queue + */ + spin_lock_bh(&sc->sc_txbuflock); + list_splice_tail_init(bf_q, &sc->sc_txbuf); + spin_unlock_bh(&sc->sc_txbuflock); +} + +/* + * queue up a dest/ac pair for tx scheduling + * NB: must be called with txq lock held + */ + +static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) +{ + struct ath_atx_ac *ac = tid->ac; + + /* + * if tid is paused, hold off + */ + if (tid->paused) + return; + + /* + * add tid to ac atmost once + */ + if (tid->sched) + return; + + tid->sched = true; + list_add_tail(&tid->list, &ac->tid_q); + + /* + * add node ac to txq atmost once + */ + if (ac->sched) + return; + + ac->sched = true; + list_add_tail(&ac->list, &txq->axq_acq); +} + +/* pause a tid */ + +static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid) +{ + struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum]; + + spin_lock_bh(&txq->axq_lock); + + tid->paused++; + + spin_unlock_bh(&txq->axq_lock); +} + +/* resume a tid and schedule aggregate */ + +void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) +{ + struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum]; + + ASSERT(tid->paused > 0); + spin_lock_bh(&txq->axq_lock); + + tid->paused--; + + if (tid->paused > 0) + goto unlock; + + if (list_empty(&tid->buf_q)) + goto unlock; + + /* + * Add this TID to scheduler and try to send out aggregates + */ + ath_tx_queue_tid(txq, tid); + ath_txq_schedule(sc, txq); +unlock: + spin_unlock_bh(&txq->axq_lock); +} + +/* Compute the number of bad frames */ + +static int ath_tx_num_badfrms(struct ath_softc *sc, + struct ath_buf *bf, int txok) +{ + struct ath_node *an = bf->bf_node; + int isnodegone = (an->an_flags & ATH_NODE_CLEAN); + struct ath_buf *bf_last = bf->bf_lastbf; + struct ath_desc *ds = bf_last->bf_desc; + u16 seq_st = 0; + u32 ba[WME_BA_BMP_SIZE >> 5]; + int ba_index; + int nbad = 0; + int isaggr = 0; + + if (isnodegone || ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED) + return 0; + + isaggr = bf->bf_isaggr; + if (isaggr) { + seq_st = ATH_DS_BA_SEQ(ds); + memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3); + } + + while (bf) { + ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno); + if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index))) + nbad++; + + bf = bf->bf_next; + } + + return nbad; +} + +static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) +{ + struct sk_buff *skb; + struct ieee80211_hdr *hdr; + + bf->bf_isretried = 1; + bf->bf_retries++; + + skb = bf->bf_mpdu; + hdr = (struct ieee80211_hdr *)skb->data; + hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); +} + +/* Update block ack window */ + +static void ath_tx_update_baw(struct ath_softc *sc, + struct ath_atx_tid *tid, int seqno) +{ + int index, cindex; + + index = ATH_BA_INDEX(tid->seq_start, seqno); + cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); + + tid->tx_buf[cindex] = NULL; + + while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) { + INCR(tid->seq_start, IEEE80211_SEQ_MAX); + INCR(tid->baw_head, ATH_TID_MAX_BUFS); + } +} + +/* + * ath_pkt_dur - compute packet duration (NB: not NAV) + * + * rix - rate index + * pktlen - total bytes (delims + data + fcs + pads + pad delims) + * width - 0 for 20 MHz, 1 for 40 MHz + * half_gi - to use 4us v/s 3.6 us for symbol time + */ + +static u32 ath_pkt_duration(struct ath_softc *sc, + u8 rix, + struct ath_buf *bf, + int width, + int half_gi, + bool shortPreamble) +{ + const struct ath9k_rate_table *rt = sc->sc_currates; + u32 nbits, nsymbits, duration, nsymbols; + u8 rc; + int streams, pktlen; + + pktlen = bf->bf_isaggr ? bf->bf_al : bf->bf_frmlen; + rc = rt->info[rix].rateCode; + + /* + * for legacy rates, use old function to compute packet duration + */ + if (!IS_HT_RATE(rc)) + return ath9k_hw_computetxtime(sc->sc_ah, + rt, + pktlen, + rix, + shortPreamble); + /* + * find number of symbols: PLCP + data + */ + nbits = (pktlen << 3) + OFDM_PLCP_BITS; + nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width]; + nsymbols = (nbits + nsymbits - 1) / nsymbits; + + if (!half_gi) + duration = SYMBOL_TIME(nsymbols); + else + duration = SYMBOL_TIME_HALFGI(nsymbols); + + /* + * addup duration for legacy/ht training and signal fields + */ + streams = HT_RC_2_STREAMS(rc); + duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); + return duration; +} + +/* Rate module function to set rate related fields in tx descriptor */ + +static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf) +{ + struct ath_hal *ah = sc->sc_ah; + const struct ath9k_rate_table *rt; + struct ath_desc *ds = bf->bf_desc; + struct ath_desc *lastds = bf->bf_lastbf->bf_desc; + struct ath9k_11n_rate_series series[4]; + int i, flags, rtsctsena = 0, dynamic_mimops = 0; + u32 ctsduration = 0; + u8 rix = 0, cix, ctsrate = 0; + u32 aggr_limit_with_rts = sc->sc_rtsaggrlimit; + struct ath_node *an = (struct ath_node *) bf->bf_node; + + /* + * get the cix for the lowest valid rix. + */ + rt = sc->sc_currates; + for (i = 4; i--;) { + if (bf->bf_rcs[i].tries) { + rix = bf->bf_rcs[i].rix; + break; + } + } + flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)); + cix = rt->info[rix].controlRate; + + /* + * If 802.11g protection is enabled, determine whether + * to use RTS/CTS or just CTS. Note that this is only + * done for OFDM/HT unicast frames. + */ + if (sc->sc_protmode != PROT_M_NONE && + (rt->info[rix].phy == PHY_OFDM || + rt->info[rix].phy == PHY_HT) && + (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { + if (sc->sc_protmode == PROT_M_RTSCTS) + flags = ATH9K_TXDESC_RTSENA; + else if (sc->sc_protmode == PROT_M_CTSONLY) + flags = ATH9K_TXDESC_CTSENA; + + cix = rt->info[sc->sc_protrix].controlRate; + rtsctsena = 1; + } + + /* For 11n, the default behavior is to enable RTS for + * hw retried frames. We enable the global flag here and + * let rate series flags determine which rates will actually + * use RTS. + */ + if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf->bf_isdata) { + BUG_ON(!an); + /* + * 802.11g protection not needed, use our default behavior + */ + if (!rtsctsena) + flags = ATH9K_TXDESC_RTSENA; + /* + * For dynamic MIMO PS, RTS needs to precede the first aggregate + * and the second aggregate should have any protection at all. + */ + if (an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) { + if (!bf->bf_aggrburst) { + flags = ATH9K_TXDESC_RTSENA; + dynamic_mimops = 1; + } else { + flags = 0; + } + } + } + + /* + * Set protection if aggregate protection on + */ + if (sc->sc_config.ath_aggr_prot && + (!bf->bf_isaggr || (bf->bf_isaggr && bf->bf_al < 8192))) { + flags = ATH9K_TXDESC_RTSENA; + cix = rt->info[sc->sc_protrix].controlRate; + rtsctsena = 1; + } + + /* + * For AR5416 - RTS cannot be followed by a frame larger than 8K. + */ + if (bf->bf_isaggr && (bf->bf_al > aggr_limit_with_rts)) { + /* + * Ensure that in the case of SM Dynamic power save + * while we are bursting the second aggregate the + * RTS is cleared. + */ + flags &= ~(ATH9K_TXDESC_RTSENA); + } + + /* + * CTS transmit rate is derived from the transmit rate + * by looking in the h/w rate table. We must also factor + * in whether or not a short preamble is to be used. + */ + /* NB: cix is set above where RTS/CTS is enabled */ + BUG_ON(cix == 0xff); + ctsrate = rt->info[cix].rateCode | + (bf->bf_shpreamble ? rt->info[cix].shortPreamble : 0); + + /* + * Setup HAL rate series + */ + memzero(series, sizeof(struct ath9k_11n_rate_series) * 4); + + for (i = 0; i < 4; i++) { + if (!bf->bf_rcs[i].tries) + continue; + + rix = bf->bf_rcs[i].rix; + + series[i].Rate = rt->info[rix].rateCode | + (bf->bf_shpreamble ? rt->info[rix].shortPreamble : 0); + + series[i].Tries = bf->bf_rcs[i].tries; + + series[i].RateFlags = ( + (bf->bf_rcs[i].flags & ATH_RC_RTSCTS_FLAG) ? + ATH9K_RATESERIES_RTS_CTS : 0) | + ((bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) ? + ATH9K_RATESERIES_2040 : 0) | + ((bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG) ? + ATH9K_RATESERIES_HALFGI : 0); + + series[i].PktDuration = ath_pkt_duration( + sc, rix, bf, + (bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) != 0, + (bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG), + bf->bf_shpreamble); + + if ((an->an_smmode == ATH_SM_PWRSAV_STATIC) && + (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG) == 0) { + /* + * When sending to an HT node that has enabled static + * SM/MIMO power save, send at single stream rates but + * use maximum allowed transmit chains per user, + * hardware, regulatory, or country limits for + * better range. + */ + series[i].ChSel = sc->sc_tx_chainmask; + } else { + if (bf->bf_ht) + series[i].ChSel = + ath_chainmask_sel_logic(sc, an); + else + series[i].ChSel = sc->sc_tx_chainmask; + } + + if (rtsctsena) + series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; + + /* + * Set RTS for all rates if node is in dynamic powersave + * mode and we are using dual stream rates. + */ + if (dynamic_mimops && (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG)) + series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; + } + + /* + * For non-HT devices, calculate RTS/CTS duration in software + * and disable multi-rate retry. + */ + if (flags && !(ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)) { + /* + * Compute the transmit duration based on the frame + * size and the size of an ACK frame. We call into the + * HAL to do the computation since it depends on the + * characteristics of the actual PHY being used. + * + * NB: CTS is assumed the same size as an ACK so we can + * use the precalculated ACK durations. + */ + if (flags & ATH9K_TXDESC_RTSENA) { /* SIFS + CTS */ + ctsduration += bf->bf_shpreamble ? + rt->info[cix].spAckDuration : + rt->info[cix].lpAckDuration; + } + + ctsduration += series[0].PktDuration; + + if ((bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { /* SIFS + ACK */ + ctsduration += bf->bf_shpreamble ? + rt->info[rix].spAckDuration : + rt->info[rix].lpAckDuration; + } + + /* + * Disable multi-rate retry when using RTS/CTS by clearing + * series 1, 2 and 3. + */ + memzero(&series[1], sizeof(struct ath9k_11n_rate_series) * 3); + } + + /* + * set dur_update_en for l-sig computation except for PS-Poll frames + */ + ath9k_hw_set11n_ratescenario(ah, ds, lastds, + !bf->bf_ispspoll, + ctsrate, + ctsduration, + series, 4, flags); + if (sc->sc_config.ath_aggr_prot && flags) + ath9k_hw_set11n_burstduration(ah, ds, 8192); +} + +/* + * Function to send a normal HT (non-AMPDU) frame + * NB: must be called with txq lock held + */ + +static int ath_tx_send_normal(struct ath_softc *sc, + struct ath_txq *txq, + struct ath_atx_tid *tid, + struct list_head *bf_head) +{ + struct ath_buf *bf; + struct sk_buff *skb; + struct ieee80211_tx_info *tx_info; + struct ath_tx_info_priv *tx_info_priv; + + BUG_ON(list_empty(bf_head)); + + bf = list_first_entry(bf_head, struct ath_buf, list); + bf->bf_isampdu = 0; /* regular HT frame */ + + skb = (struct sk_buff *)bf->bf_mpdu; + tx_info = IEEE80211_SKB_CB(skb); + tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0]; + memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0])); + + /* update starting sequence number for subsequent ADDBA request */ + INCR(tid->seq_start, IEEE80211_SEQ_MAX); + + /* Queue to h/w without aggregation */ + bf->bf_nframes = 1; + bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */ + ath_buf_set_rate(sc, bf); + ath_tx_txqaddbuf(sc, txq, bf_head); + + return 0; +} + +/* flush tid's software queue and send frames as non-ampdu's */ + +static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) +{ + struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum]; + struct ath_buf *bf; + struct list_head bf_head; + INIT_LIST_HEAD(&bf_head); + + ASSERT(tid->paused > 0); + spin_lock_bh(&txq->axq_lock); + + tid->paused--; + + if (tid->paused > 0) { + spin_unlock_bh(&txq->axq_lock); + return; + } + + while (!list_empty(&tid->buf_q)) { + bf = list_first_entry(&tid->buf_q, struct ath_buf, list); + ASSERT(!bf->bf_isretried); + list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list); + ath_tx_send_normal(sc, txq, tid, &bf_head); + } + + spin_unlock_bh(&txq->axq_lock); +} + +/* Completion routine of an aggregate */ + +static void ath_tx_complete_aggr_rifs(struct ath_softc *sc, + struct ath_txq *txq, + struct ath_buf *bf, + struct list_head *bf_q, + int txok) +{ + struct ath_node *an = bf->bf_node; + struct ath_atx_tid *tid = ATH_AN_2_TID(an, bf->bf_tidno); + struct ath_buf *bf_last = bf->bf_lastbf; + struct ath_desc *ds = bf_last->bf_desc; + struct ath_buf *bf_next, *bf_lastq = NULL; + struct list_head bf_head, bf_pending; + u16 seq_st = 0; + u32 ba[WME_BA_BMP_SIZE >> 5]; + int isaggr, txfail, txpending, sendbar = 0, needreset = 0; + int isnodegone = (an->an_flags & ATH_NODE_CLEAN); + + isaggr = bf->bf_isaggr; + if (isaggr) { + if (txok) { + if (ATH_DS_TX_BA(ds)) { + /* + * extract starting sequence and + * block-ack bitmap + */ + seq_st = ATH_DS_BA_SEQ(ds); + memcpy(ba, + ATH_DS_BA_BITMAP(ds), + WME_BA_BMP_SIZE >> 3); + } else { + memzero(ba, WME_BA_BMP_SIZE >> 3); + + /* + * AR5416 can become deaf/mute when BA + * issue happens. Chip needs to be reset. + * But AP code may have sychronization issues + * when perform internal reset in this routine. + * Only enable reset in STA mode for now. + */ + if (sc->sc_opmode == ATH9K_M_STA) + needreset = 1; + } + } else { + memzero(ba, WME_BA_BMP_SIZE >> 3); + } + } + + INIT_LIST_HEAD(&bf_pending); + INIT_LIST_HEAD(&bf_head); + + while (bf) { + txfail = txpending = 0; + bf_next = bf->bf_next; + + if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) { + /* transmit completion, subframe is + * acked by block ack */ + } else if (!isaggr && txok) { + /* transmit completion */ + } else { + + if (!tid->cleanup_inprogress && !isnodegone && + ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) { + if (bf->bf_retries < ATH_MAX_SW_RETRIES) { + ath_tx_set_retry(sc, bf); + txpending = 1; + } else { + bf->bf_isxretried = 1; + txfail = 1; + sendbar = 1; + } + } else { + /* + * cleanup in progress, just fail + * the un-acked sub-frames + */ + txfail = 1; + } + } + /* + * Remove ath_buf's of this sub-frame from aggregate queue. + */ + if (bf_next == NULL) { /* last subframe in the aggregate */ + ASSERT(bf->bf_lastfrm == bf_last); + + /* + * The last descriptor of the last sub frame could be + * a holding descriptor for h/w. If that's the case, + * bf->bf_lastfrm won't be in the bf_q. + * Make sure we handle bf_q properly here. + */ + + if (!list_empty(bf_q)) { + bf_lastq = list_entry(bf_q->prev, + struct ath_buf, list); + list_cut_position(&bf_head, + bf_q, &bf_lastq->list); + } else { + /* + * XXX: if the last subframe only has one + * descriptor which is also being used as + * a holding descriptor. Then the ath_buf + * is not in the bf_q at all. + */ + INIT_LIST_HEAD(&bf_head); + } + } else { + ASSERT(!list_empty(bf_q)); + list_cut_position(&bf_head, + bf_q, &bf->bf_lastfrm->list); + } + + if (!txpending) { + /* + * complete the acked-ones/xretried ones; update + * block-ack window + */ + spin_lock_bh(&txq->axq_lock); + ath_tx_update_baw(sc, tid, bf->bf_seqno); + spin_unlock_bh(&txq->axq_lock); + + /* complete this sub-frame */ + ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar); + } else { + /* + * retry the un-acked ones + */ + /* + * XXX: if the last descriptor is holding descriptor, + * in order to requeue the frame to software queue, we + * need to allocate a new descriptor and + * copy the content of holding descriptor to it. + */ + if (bf->bf_next == NULL && + bf_last->bf_status & ATH_BUFSTATUS_STALE) { + struct ath_buf *tbf; + + /* allocate new descriptor */ + spin_lock_bh(&sc->sc_txbuflock); + ASSERT(!list_empty((&sc->sc_txbuf))); + tbf = list_first_entry(&sc->sc_txbuf, + struct ath_buf, list); + list_del(&tbf->list); + spin_unlock_bh(&sc->sc_txbuflock); + + ATH_TXBUF_RESET(tbf); + + /* copy descriptor content */ + tbf->bf_mpdu = bf_last->bf_mpdu; + tbf->bf_node = bf_last->bf_node; + tbf->bf_buf_addr = bf_last->bf_buf_addr; + *(tbf->bf_desc) = *(bf_last->bf_desc); + + /* link it to the frame */ + if (bf_lastq) { + bf_lastq->bf_desc->ds_link = + tbf->bf_daddr; + bf->bf_lastfrm = tbf; + ath9k_hw_cleartxdesc(sc->sc_ah, + bf->bf_lastfrm->bf_desc); + } else { + tbf->bf_state = bf_last->bf_state; + tbf->bf_lastfrm = tbf; + ath9k_hw_cleartxdesc(sc->sc_ah, + tbf->bf_lastfrm->bf_desc); + + /* copy the DMA context */ + copy_dma_mem_context( + get_dma_mem_context(tbf, + bf_dmacontext), + get_dma_mem_context(bf_last, + bf_dmacontext)); + } + list_add_tail(&tbf->list, &bf_head); + } else { + /* + * Clear descriptor status words for + * software retry + */ + ath9k_hw_cleartxdesc(sc->sc_ah, + bf->bf_lastfrm->bf_desc); + } + + /* + * Put this buffer to the temporary pending + * queue to retain ordering + */ + list_splice_tail_init(&bf_head, &bf_pending); + } + + bf = bf_next; + } + + /* + * node is already gone. no more assocication + * with the node. the node might have been freed + * any node acces can result in panic.note tid + * is part of the node. + */ + if (isnodegone) + return; + + if (tid->cleanup_inprogress) { + /* check to see if we're done with cleaning the h/w queue */ + spin_lock_bh(&txq->axq_lock); + + if (tid->baw_head == tid->baw_tail) { + tid->addba_exchangecomplete = 0; + tid->addba_exchangeattempts = 0; + spin_unlock_bh(&txq->axq_lock); + + tid->cleanup_inprogress = false; + + /* send buffered frames as singles */ + ath_tx_flush_tid(sc, tid); + } else + spin_unlock_bh(&txq->axq_lock); + + return; + } + + /* + * prepend un-acked frames to the beginning of the pending frame queue + */ + if (!list_empty(&bf_pending)) { + spin_lock_bh(&txq->axq_lock); + /* Note: we _prepend_, we _do_not_ at to + * the end of the queue ! */ + list_splice(&bf_pending, &tid->buf_q); + ath_tx_queue_tid(txq, tid); + spin_unlock_bh(&txq->axq_lock); + } + + if (needreset) + ath_internal_reset(sc); + + return; +} + +/* Process completed xmit descriptors from the specified queue */ + +static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) +{ + struct ath_hal *ah = sc->sc_ah; + struct ath_buf *bf, *lastbf, *bf_held = NULL; + struct list_head bf_head; + struct ath_desc *ds, *tmp_ds; + struct sk_buff *skb; + struct ieee80211_tx_info *tx_info; + struct ath_tx_info_priv *tx_info_priv; + int nacked, txok, nbad = 0, isrifs = 0; + int status; + + DPRINTF(sc, ATH_DBG_QUEUE, + "%s: tx queue %d (%x), link %p\n", __func__, + txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), + txq->axq_link); + + nacked = 0; + for (;;) { + spin_lock_bh(&txq->axq_lock); + txq->axq_intrcnt = 0; /* reset periodic desc intr count */ + if (list_empty(&txq->axq_q)) { + txq->axq_link = NULL; + txq->axq_linkbuf = NULL; + spin_unlock_bh(&txq->axq_lock); + break; + } + bf = list_first_entry(&txq->axq_q, struct ath_buf, list); + + /* + * There is a race condition that a BH gets scheduled + * after sw writes TxE and before hw re-load the last + * descriptor to get the newly chained one. + * Software must keep the last DONE descriptor as a + * holding descriptor - software does so by marking + * it with the STALE flag. + */ + bf_held = NULL; + if (bf->bf_status & ATH_BUFSTATUS_STALE) { + bf_held = bf; + if (list_is_last(&bf_held->list, &txq->axq_q)) { + /* FIXME: + * The holding descriptor is the last + * descriptor in queue. It's safe to remove + * the last holding descriptor in BH context. + */ + spin_unlock_bh(&txq->axq_lock); + break; + } else { + /* Lets work with the next buffer now */ + bf = list_entry(bf_held->list.next, + struct ath_buf, list); + } + } + + lastbf = bf->bf_lastbf; + ds = lastbf->bf_desc; /* NB: last decriptor */ + + status = ath9k_hw_txprocdesc(ah, ds); + if (status == -EINPROGRESS) { + spin_unlock_bh(&txq->axq_lock); + break; + } + if (bf->bf_desc == txq->axq_lastdsWithCTS) + txq->axq_lastdsWithCTS = NULL; + if (ds == txq->axq_gatingds) + txq->axq_gatingds = NULL; + + /* + * Remove ath_buf's of the same transmit unit from txq, + * however leave the last descriptor back as the holding + * descriptor for hw. + */ + lastbf->bf_status |= ATH_BUFSTATUS_STALE; + INIT_LIST_HEAD(&bf_head); + + if (!list_is_singular(&lastbf->list)) + list_cut_position(&bf_head, + &txq->axq_q, lastbf->list.prev); + + txq->axq_depth--; + + if (bf->bf_isaggr) + txq->axq_aggr_depth--; + + txok = (ds->ds_txstat.ts_status == 0); + + spin_unlock_bh(&txq->axq_lock); + + if (bf_held) { + list_del(&bf_held->list); + spin_lock_bh(&sc->sc_txbuflock); + list_add_tail(&bf_held->list, &sc->sc_txbuf); + spin_unlock_bh(&sc->sc_txbuflock); + } + + if (!bf->bf_isampdu) { + /* + * This frame is sent out as a single frame. + * Use hardware retry status for this frame. + */ + bf->bf_retries = ds->ds_txstat.ts_longretry; + if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) + bf->bf_isxretried = 1; + nbad = 0; + } else { + nbad = ath_tx_num_badfrms(sc, bf, txok); + } + skb = bf->bf_mpdu; + tx_info = IEEE80211_SKB_CB(skb); + tx_info_priv = (struct ath_tx_info_priv *) + tx_info->driver_data[0]; + if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) + tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; + if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 && + (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { + if (ds->ds_txstat.ts_status == 0) + nacked++; + + if (bf->bf_isdata) { + if (isrifs) + tmp_ds = bf->bf_rifslast->bf_desc; + else + tmp_ds = ds; + memcpy(&tx_info_priv->tx, + &tmp_ds->ds_txstat, + sizeof(tx_info_priv->tx)); + tx_info_priv->n_frames = bf->bf_nframes; + tx_info_priv->n_bad_frames = nbad; + } + } + + /* + * Complete this transmit unit + */ + if (bf->bf_isampdu) + ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok); + else + ath_tx_complete_buf(sc, bf, &bf_head, txok, 0); + + /* Wake up mac80211 queue */ + + spin_lock_bh(&txq->axq_lock); + if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <= + (ATH_TXBUF - 20)) { + int qnum; + qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc); + if (qnum != -1) { + ieee80211_wake_queue(sc->hw, qnum); + txq->stopped = 0; + } + + } + + /* + * schedule any pending packets if aggregation is enabled + */ + if (sc->sc_txaggr) + ath_txq_schedule(sc, txq); + spin_unlock_bh(&txq->axq_lock); + } + return nacked; +} + +static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) +{ + struct ath_hal *ah = sc->sc_ah; + + (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum); + DPRINTF(sc, ATH_DBG_XMIT, "%s: tx queue [%u] %x, link %p\n", + __func__, txq->axq_qnum, + ath9k_hw_gettxbuf(ah, txq->axq_qnum), txq->axq_link); +} + +/* Drain only the data queues */ + +static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx) +{ + struct ath_hal *ah = sc->sc_ah; + int i; + int npend = 0; + enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc); + + /* XXX return value */ + if (!sc->sc_invalid) { + for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { + if (ATH_TXQ_SETUP(sc, i)) { + ath_tx_stopdma(sc, &sc->sc_txq[i]); + + /* The TxDMA may not really be stopped. + * Double check the hal tx pending count */ + npend += ath9k_hw_numtxpending(ah, + sc->sc_txq[i].axq_qnum); + } + } + } + + if (npend) { + int status; + + /* TxDMA not stopped, reset the hal */ + DPRINTF(sc, ATH_DBG_XMIT, + "%s: Unable to stop TxDMA. Reset HAL!\n", __func__); + + spin_lock_bh(&sc->sc_resetlock); + if (!ath9k_hw_reset(ah, sc->sc_opmode, + &sc->sc_curchan, ht_macmode, + sc->sc_tx_chainmask, sc->sc_rx_chainmask, + sc->sc_ht_extprotspacing, true, &status)) { + + DPRINTF(sc, ATH_DBG_FATAL, + "%s: unable to reset hardware; hal status %u\n", + __func__, + status); + } + spin_unlock_bh(&sc->sc_resetlock); + } + + for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { + if (ATH_TXQ_SETUP(sc, i)) + ath_tx_draintxq(sc, &sc->sc_txq[i], retry_tx); + } +} + +/* Add a sub-frame to block ack window */ + +static void ath_tx_addto_baw(struct ath_softc *sc, + struct ath_atx_tid *tid, + struct ath_buf *bf) +{ + int index, cindex; + + if (bf->bf_isretried) + return; + + index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno); + cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); + + ASSERT(tid->tx_buf[cindex] == NULL); + tid->tx_buf[cindex] = bf; + + if (index >= ((tid->baw_tail - tid->baw_head) & + (ATH_TID_MAX_BUFS - 1))) { + tid->baw_tail = cindex; + INCR(tid->baw_tail, ATH_TID_MAX_BUFS); + } +} + +/* + * Function to send an A-MPDU + * NB: must be called with txq lock held + */ + +static int ath_tx_send_ampdu(struct ath_softc *sc, + struct ath_txq *txq, + struct ath_atx_tid *tid, + struct list_head *bf_head, + struct ath_tx_control *txctl) +{ + struct ath_buf *bf; + struct sk_buff *skb; + struct ieee80211_tx_info *tx_info; + struct ath_tx_info_priv *tx_info_priv; + + BUG_ON(list_empty(bf_head)); + + bf = list_first_entry(bf_head, struct ath_buf, list); + bf->bf_isampdu = 1; + bf->bf_seqno = txctl->seqno; /* save seqno and tidno in buffer */ + bf->bf_tidno = txctl->tidno; + + /* + * Do not queue to h/w when any of the following conditions is true: + * - there are pending frames in software queue + * - the TID is currently paused for ADDBA/BAR request + * - seqno is not within block-ack window + * - h/w queue depth exceeds low water mark + */ + if (!list_empty(&tid->buf_q) || tid->paused || + !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) || + txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) { + /* + * Add this frame to software queue for scheduling later + * for aggregation. + */ + list_splice_tail_init(bf_head, &tid->buf_q); + ath_tx_queue_tid(txq, tid); + return 0; + } + + skb = (struct sk_buff *)bf->bf_mpdu; + tx_info = IEEE80211_SKB_CB(skb); + tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0]; + memcpy(bf->bf_rcs, tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0])); + + /* Add sub-frame to BAW */ + ath_tx_addto_baw(sc, tid, bf); + + /* Queue to h/w without aggregation */ + bf->bf_nframes = 1; + bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */ + ath_buf_set_rate(sc, bf); + ath_tx_txqaddbuf(sc, txq, bf_head); + return 0; +} + +/* + * looks up the rate + * returns aggr limit based on lowest of the rates + */ + +static u32 ath_lookup_rate(struct ath_softc *sc, + struct ath_buf *bf) +{ + const struct ath9k_rate_table *rt = sc->sc_currates; + struct sk_buff *skb; + struct ieee80211_tx_info *tx_info; + struct ath_tx_info_priv *tx_info_priv; + u32 max_4ms_framelen, frame_length; + u16 aggr_limit, legacy = 0, maxampdu; + int i; + + + skb = (struct sk_buff *)bf->bf_mpdu; + tx_info = IEEE80211_SKB_CB(skb); + tx_info_priv = (struct ath_tx_info_priv *) + tx_info->driver_data[0]; + memcpy(bf->bf_rcs, + tx_info_priv->rcs, 4 * sizeof(tx_info_priv->rcs[0])); + + /* + * Find the lowest frame length among the rate series that will have a + * 4ms transmit duration. + * TODO - TXOP limit needs to be considered. + */ + max_4ms_framelen = ATH_AMPDU_LIMIT_MAX; + + for (i = 0; i < 4; i++) { + if (bf->bf_rcs[i].tries) { + frame_length = bf->bf_rcs[i].max_4ms_framelen; + + if (rt->info[bf->bf_rcs[i].rix].phy != PHY_HT) { + legacy = 1; + break; + } + + max_4ms_framelen = min(max_4ms_framelen, frame_length); + } + } + + /* + * limit aggregate size by the minimum rate if rate selected is + * not a probe rate, if rate selected is a probe rate then + * avoid aggregation of this packet. + */ + if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy) + return 0; + + aggr_limit = min(max_4ms_framelen, + (u32)ATH_AMPDU_LIMIT_DEFAULT); + + /* + * h/w can accept aggregates upto 16 bit lengths (65535). + * The IE, however can hold upto 65536, which shows up here + * as zero. Ignore 65536 since we are constrained by hw. + */ + maxampdu = sc->sc_ht_info.maxampdu; + if (maxampdu) + aggr_limit = min(aggr_limit, maxampdu); + + return aggr_limit; +} + +/* + * returns the number of delimiters to be added to + * meet the minimum required mpdudensity. + * caller should make sure that the rate is HT rate . + */ + +static int ath_compute_num_delims(struct ath_softc *sc, + struct ath_buf *bf, + u16 frmlen) +{ + const struct ath9k_rate_table *rt = sc->sc_currates; + u32 nsymbits, nsymbols, mpdudensity; + u16 minlen; + u8 rc, flags, rix; + int width, half_gi, ndelim, mindelim; + + /* Select standard number of delimiters based on frame length alone */ + ndelim = ATH_AGGR_GET_NDELIM(frmlen); + + /* + * If encryption enabled, hardware requires some more padding between + * subframes. + * TODO - this could be improved to be dependent on the rate. + * The hardware can keep up at lower rates, but not higher rates + */ + if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) + ndelim += ATH_AGGR_ENCRYPTDELIM; + + /* + * Convert desired mpdu density from microeconds to bytes based + * on highest rate in rate series (i.e. first rate) to determine + * required minimum length for subframe. Take into account + * whether high rate is 20 or 40Mhz and half or full GI. + */ + mpdudensity = sc->sc_ht_info.mpdudensity; + + /* + * If there is no mpdu density restriction, no further calculation + * is needed. + */ + if (mpdudensity == 0) + return ndelim; + + rix = bf->bf_rcs[0].rix; + flags = bf->bf_rcs[0].flags; + rc = rt->info[rix].rateCode; + width = (flags & ATH_RC_CW40_FLAG) ? 1 : 0; + half_gi = (flags & ATH_RC_SGI_FLAG) ? 1 : 0; + + if (half_gi) + nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity); + else + nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity); + + if (nsymbols == 0) + nsymbols = 1; + + nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width]; + minlen = (nsymbols * nsymbits) / BITS_PER_BYTE; + + /* Is frame shorter than required minimum length? */ + if (frmlen < minlen) { + /* Get the minimum number of delimiters required. */ + mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ; + ndelim = max(mindelim, ndelim); + } + + return ndelim; +} + +/* + * For aggregation from software buffer queue. + * NB: must be called with txq lock held + */ + +static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, + struct ath_atx_tid *tid, + struct list_head *bf_q, + struct ath_buf **bf_last, + struct aggr_rifs_param *param, + int *prev_frames) +{ +#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) + struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL; + struct list_head bf_head; + int rl = 0, nframes = 0, ndelim; + u16 aggr_limit = 0, al = 0, bpad = 0, + al_delta, h_baw = tid->baw_size / 2; + enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; + int prev_al = 0, is_ds_rate = 0; + INIT_LIST_HEAD(&bf_head); + + BUG_ON(list_empty(&tid->buf_q)); + + bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list); + + do { + bf = list_first_entry(&tid->buf_q, struct ath_buf, list); + + /* + * do not step over block-ack window + */ + if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) { + status = ATH_AGGR_BAW_CLOSED; + break; + } + + if (!rl) { + aggr_limit = ath_lookup_rate(sc, bf); + rl = 1; + /* + * Is rate dual stream + */ + is_ds_rate = + (bf->bf_rcs[0].flags & ATH_RC_DS_FLAG) ? 1 : 0; + } + + /* + * do not exceed aggregation limit + */ + al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen; + + if (nframes && (aggr_limit < + (al + bpad + al_delta + prev_al))) { + status = ATH_AGGR_LIMITED; + break; + } + + /* + * do not exceed subframe limit + */ + if ((nframes + *prev_frames) >= + min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) { + status = ATH_AGGR_LIMITED; + break; + } + + /* + * add padding for previous frame to aggregation length + */ + al += bpad + al_delta; + + /* + * Get the delimiters needed to meet the MPDU + * density for this node. + */ + ndelim = ath_compute_num_delims(sc, bf_first, bf->bf_frmlen); + + bpad = PADBYTES(al_delta) + (ndelim << 2); + + bf->bf_next = NULL; + bf->bf_lastfrm->bf_desc->ds_link = 0; + + /* + * this packet is part of an aggregate + * - remove all descriptors belonging to this frame from + * software queue + * - add it to block ack window + * - set up descriptors for aggregation + */ + list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list); + ath_tx_addto_baw(sc, tid, bf); + + list_for_each_entry(tbf, &bf_head, list) { + ath9k_hw_set11n_aggr_middle(sc->sc_ah, + tbf->bf_desc, ndelim); + } + + /* + * link buffers of this frame to the aggregate + */ + list_splice_tail_init(&bf_head, bf_q); + nframes++; + + if (bf_prev) { + bf_prev->bf_next = bf; + bf_prev->bf_lastfrm->bf_desc->ds_link = bf->bf_daddr; + } + bf_prev = bf; + +#ifdef AGGR_NOSHORT + /* + * terminate aggregation on a small packet boundary + */ + if (bf->bf_frmlen < ATH_AGGR_MINPLEN) { + status = ATH_AGGR_SHORTPKT; + break; + } +#endif + } while (!list_empty(&tid->buf_q)); + + bf_first->bf_al = al; + bf_first->bf_nframes = nframes; + *bf_last = bf_prev; + return status; +#undef PADBYTES +} + +/* + * process pending frames possibly doing a-mpdu aggregation + * NB: must be called with txq lock held + */ + +static void ath_tx_sched_aggr(struct ath_softc *sc, + struct ath_txq *txq, struct ath_atx_tid *tid) +{ + struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL; + enum ATH_AGGR_STATUS status; + struct list_head bf_q; + struct aggr_rifs_param param = {0, 0, 0, 0, NULL}; + int prev_frames = 0; + + do { + if (list_empty(&tid->buf_q)) + return; + + INIT_LIST_HEAD(&bf_q); + + status = ath_tx_form_aggr(sc, tid, &bf_q, &bf_lastaggr, ¶m, + &prev_frames); + + /* + * no frames picked up to be aggregated; block-ack + * window is not open + */ + if (list_empty(&bf_q)) + break; + + bf = list_first_entry(&bf_q, struct ath_buf, list); + bf_last = list_entry(bf_q.prev, struct ath_buf, list); + bf->bf_lastbf = bf_last; + + /* + * if only one frame, send as non-aggregate + */ + if (bf->bf_nframes == 1) { + ASSERT(bf->bf_lastfrm == bf_last); + + bf->bf_isaggr = 0; + /* + * clear aggr bits for every descriptor + * XXX TODO: is there a way to optimize it? + */ + list_for_each_entry(tbf, &bf_q, list) { + ath9k_hw_clr11n_aggr(sc->sc_ah, tbf->bf_desc); + } + + ath_buf_set_rate(sc, bf); + ath_tx_txqaddbuf(sc, txq, &bf_q); + continue; + } + + /* + * setup first desc with rate and aggr info + */ + bf->bf_isaggr = 1; + ath_buf_set_rate(sc, bf); + ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al); + + /* + * anchor last frame of aggregate correctly + */ + ASSERT(bf_lastaggr); + ASSERT(bf_lastaggr->bf_lastfrm == bf_last); + tbf = bf_lastaggr; + ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc); + + /* XXX: We don't enter into this loop, consider removing this */ + while (!list_empty(&bf_q) && !list_is_last(&tbf->list, &bf_q)) { + tbf = list_entry(tbf->list.next, struct ath_buf, list); + ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc); + } + + txq->axq_aggr_depth++; + + /* + * Normal aggregate, queue to hardware + */ + ath_tx_txqaddbuf(sc, txq, &bf_q); + + } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH && + status != ATH_AGGR_BAW_CLOSED); +} + +/* Called with txq lock held */ + +static void ath_tid_drain(struct ath_softc *sc, + struct ath_txq *txq, + struct ath_atx_tid *tid, + bool bh_flag) +{ + struct ath_buf *bf; + struct list_head bf_head; + INIT_LIST_HEAD(&bf_head); + + for (;;) { + if (list_empty(&tid->buf_q)) + break; + bf = list_first_entry(&tid->buf_q, struct ath_buf, list); + + list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list); + + /* update baw for software retried frame */ + if (bf->bf_isretried) + ath_tx_update_baw(sc, tid, bf->bf_seqno); + + /* + * do not indicate packets while holding txq spinlock. + * unlock is intentional here + */ + if (likely(bh_flag)) + spin_unlock_bh(&txq->axq_lock); + else + spin_unlock(&txq->axq_lock); + + /* complete this sub-frame */ + ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); + + if (likely(bh_flag)) + spin_lock_bh(&txq->axq_lock); + else + spin_lock(&txq->axq_lock); + } + + /* + * TODO: For frame(s) that are in the retry state, we will reuse the + * sequence number(s) without setting the retry bit. The + * alternative is to give up on these and BAR the receiver's window + * forward. + */ + tid->seq_next = tid->seq_start; + tid->baw_tail = tid->baw_head; +} + +/* + * Drain all pending buffers + * NB: must be called with txq lock held + */ + +static void ath_txq_drain_pending_buffers(struct ath_softc *sc, + struct ath_txq *txq, + bool bh_flag) +{ + struct ath_atx_ac *ac, *ac_tmp; + struct ath_atx_tid *tid, *tid_tmp; + + list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { + list_del(&ac->list); + ac->sched = false; + list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) { + list_del(&tid->list); + tid->sched = false; + ath_tid_drain(sc, txq, tid, bh_flag); + } + } +} + +static int ath_tx_start_dma(struct ath_softc *sc, + struct sk_buff *skb, + struct scatterlist *sg, + u32 n_sg, + struct ath_tx_control *txctl) +{ + struct ath_node *an = txctl->an; + struct ath_buf *bf = NULL; + struct list_head bf_head; + struct ath_desc *ds; + struct ath_hal *ah = sc->sc_ah; + struct ath_txq *txq = &sc->sc_txq[txctl->qnum]; + struct ath_tx_info_priv *tx_info_priv; + struct ath_rc_series *rcs; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); + __le16 fc = hdr->frame_control; + + /* For each sglist entry, allocate an ath_buf for DMA */ + INIT_LIST_HEAD(&bf_head); + spin_lock_bh(&sc->sc_txbuflock); + if (unlikely(list_empty(&sc->sc_txbuf))) { + spin_unlock_bh(&sc->sc_txbuflock); + return -ENOMEM; + } + + bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list); + list_del(&bf->list); + spin_unlock_bh(&sc->sc_txbuflock); + + list_add_tail(&bf->list, &bf_head); + + /* set up this buffer */ + ATH_TXBUF_RESET(bf); + bf->bf_frmlen = txctl->frmlen; + bf->bf_isdata = ieee80211_is_data(fc); + bf->bf_isbar = ieee80211_is_back_req(fc); + bf->bf_ispspoll = ieee80211_is_pspoll(fc); + bf->bf_flags = txctl->flags; + bf->bf_shpreamble = sc->sc_flags & ATH_PREAMBLE_SHORT; + bf->bf_keytype = txctl->keytype; + tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0]; + rcs = tx_info_priv->rcs; + bf->bf_rcs[0] = rcs[0]; + bf->bf_rcs[1] = rcs[1]; + bf->bf_rcs[2] = rcs[2]; + bf->bf_rcs[3] = rcs[3]; + bf->bf_node = an; + bf->bf_mpdu = skb; + bf->bf_buf_addr = sg_dma_address(sg); + + /* setup descriptor */ + ds = bf->bf_desc; + ds->ds_link = 0; + ds->ds_data = bf->bf_buf_addr; + + /* + * Save the DMA context in the first ath_buf + */ + copy_dma_mem_context(get_dma_mem_context(bf, bf_dmacontext), + get_dma_mem_context(txctl, dmacontext)); + + /* + * Formulate first tx descriptor with tx controls. + */ + ath9k_hw_set11n_txdesc(ah, + ds, + bf->bf_frmlen, /* frame length */ + txctl->atype, /* Atheros packet type */ + min(txctl->txpower, (u16)60), /* txpower */ + txctl->keyix, /* key cache index */ + txctl->keytype, /* key type */ + txctl->flags); /* flags */ + ath9k_hw_filltxdesc(ah, + ds, + sg_dma_len(sg), /* segment length */ + true, /* first segment */ + (n_sg == 1) ? true : false, /* last segment */ + ds); /* first descriptor */ + + bf->bf_lastfrm = bf; + bf->bf_ht = txctl->ht; + + spin_lock_bh(&txq->axq_lock); + + if (txctl->ht && sc->sc_txaggr) { + struct ath_atx_tid *tid = ATH_AN_2_TID(an, txctl->tidno); + if (ath_aggr_query(sc, an, txctl->tidno)) { + /* + * Try aggregation if it's a unicast data frame + * and the destination is HT capable. + */ + ath_tx_send_ampdu(sc, txq, tid, &bf_head, txctl); + } else { + /* + * Send this frame as regular when ADDBA exchange + * is neither complete nor pending. + */ + ath_tx_send_normal(sc, txq, tid, &bf_head); + } + } else { + bf->bf_lastbf = bf; + bf->bf_nframes = 1; + ath_buf_set_rate(sc, bf); + + if (ieee80211_is_back_req(fc)) { + /* This is required for resuming tid + * during BAR completion */ + bf->bf_tidno = txctl->tidno; + } + + if (is_multicast_ether_addr(hdr->addr1)) { + struct ath_vap *avp = sc->sc_vaps[txctl->if_id]; + + /* + * When servicing one or more stations in power-save + * mode (or) if there is some mcast data waiting on + * mcast queue (to prevent out of order delivery of + * mcast,bcast packets) multicast frames must be + * buffered until after the beacon. We use the private + * mcast queue for that. + */ + /* XXX? more bit in 802.11 frame header */ + spin_lock_bh(&avp->av_mcastq.axq_lock); + if (txctl->ps || avp->av_mcastq.axq_depth) + ath_tx_mcastqaddbuf(sc, + &avp->av_mcastq, &bf_head); + else + ath_tx_txqaddbuf(sc, txq, &bf_head); + spin_unlock_bh(&avp->av_mcastq.axq_lock); + } else + ath_tx_txqaddbuf(sc, txq, &bf_head); + } + spin_unlock_bh(&txq->axq_lock); + return 0; +} + +static void xmit_map_sg(struct ath_softc *sc, + struct sk_buff *skb, + dma_addr_t *pa, + struct ath_tx_control *txctl) +{ + struct ath_xmit_status tx_status; + struct ath_atx_tid *tid; + struct scatterlist sg; + + *pa = pci_map_single(sc->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); + + /* setup S/G list */ + memset(&sg, 0, sizeof(struct scatterlist)); + sg_dma_address(&sg) = *pa; + sg_dma_len(&sg) = skb->len; + + if (ath_tx_start_dma(sc, skb, &sg, 1, txctl) != 0) { + /* + * We have to do drop frame here. + */ + pci_unmap_single(sc->pdev, *pa, skb->len, PCI_DMA_TODEVICE); + + tx_status.retries = 0; + tx_status.flags = ATH_TX_ERROR; + + if (txctl->ht && sc->sc_txaggr) { + /* Reclaim the seqno. */ + tid = ATH_AN_2_TID((struct ath_node *) + txctl->an, txctl->tidno); + DECR(tid->seq_next, IEEE80211_SEQ_MAX); + } + ath_tx_complete(sc, skb, &tx_status, txctl->an); + } +} + +/* Initialize TX queue and h/w */ + +int ath_tx_init(struct ath_softc *sc, int nbufs) +{ + int error = 0; + + do { + spin_lock_init(&sc->sc_txbuflock); + + /* Setup tx descriptors */ + error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, + "tx", nbufs * ATH_FRAG_PER_MSDU, ATH_TXDESC); + if (error != 0) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: failed to allocate tx descriptors: %d\n", + __func__, error); + break; + } + + /* XXX allocate beacon state together with vap */ + error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, + "beacon", ATH_BCBUF, 1); + if (error != 0) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: failed to allocate " + "beacon descripotrs: %d\n", + __func__, error); + break; + } + + } while (0); + + if (error != 0) + ath_tx_cleanup(sc); + + return error; +} + +/* Reclaim all tx queue resources */ + +int ath_tx_cleanup(struct ath_softc *sc) +{ + /* cleanup beacon descriptors */ + if (sc->sc_bdma.dd_desc_len != 0) + ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); + + /* cleanup tx descriptors */ + if (sc->sc_txdma.dd_desc_len != 0) + ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); + + return 0; +} + +/* Setup a h/w transmit queue */ + +struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) +{ + struct ath_hal *ah = sc->sc_ah; + struct ath9k_tx_queue_info qi; + int qnum; + + memzero(&qi, sizeof(qi)); + qi.tqi_subtype = subtype; + qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; + qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; + qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; + qi.tqi_physCompBuf = 0; + + /* + * Enable interrupts only for EOL and DESC conditions. + * We mark tx descriptors to receive a DESC interrupt + * when a tx queue gets deep; otherwise waiting for the + * EOL to reap descriptors. Note that this is done to + * reduce interrupt load and this only defers reaping + * descriptors, never transmitting frames. Aside from + * reducing interrupts this also permits more concurrency. + * The only potential downside is if the tx queue backs + * up in which case the top half of the kernel may backup + * due to a lack of tx descriptors. + * + * The UAPSD queue is an exception, since we take a desc- + * based intr on the EOSP frames. + */ + if (qtype == ATH9K_TX_QUEUE_UAPSD) + qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE; + else + qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | + TXQ_FLAG_TXDESCINT_ENABLE; + qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi); + if (qnum == -1) { + /* + * NB: don't print a message, this happens + * normally on parts with too few tx queues + */ + return NULL; + } + if (qnum >= ARRAY_SIZE(sc->sc_txq)) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: hal qnum %u out of range, max %u!\n", + __func__, qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq)); + ath9k_hw_releasetxqueue(ah, qnum); + return NULL; + } + if (!ATH_TXQ_SETUP(sc, qnum)) { + struct ath_txq *txq = &sc->sc_txq[qnum]; + + txq->axq_qnum = qnum; + txq->axq_link = NULL; + INIT_LIST_HEAD(&txq->axq_q); + INIT_LIST_HEAD(&txq->axq_acq); + spin_lock_init(&txq->axq_lock); + txq->axq_depth = 0; + txq->axq_aggr_depth = 0; + txq->axq_totalqueued = 0; + txq->axq_intrcnt = 0; + txq->axq_linkbuf = NULL; + sc->sc_txqsetup |= 1<<qnum; + } + return &sc->sc_txq[qnum]; +} + +/* Reclaim resources for a setup queue */ + +void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) +{ + ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum); + sc->sc_txqsetup &= ~(1<<txq->axq_qnum); +} + +/* + * Setup a hardware data transmit queue for the specified + * access control. The hal may not support all requested + * queues in which case it will return a reference to a + * previously setup queue. We record the mapping from ac's + * to h/w queues for use by ath_tx_start and also track + * the set of h/w queues being used to optimize work in the + * transmit interrupt handler and related routines. + */ + +int ath_tx_setup(struct ath_softc *sc, int haltype) +{ + struct ath_txq *txq; + + if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: HAL AC %u out of range, max %zu!\n", + __func__, haltype, ARRAY_SIZE(sc->sc_haltype2q)); + return 0; + } + txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype); + if (txq != NULL) { + sc->sc_haltype2q[haltype] = txq->axq_qnum; + return 1; + } else + return 0; +} + +int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype) +{ + int qnum; + + switch (qtype) { + case ATH9K_TX_QUEUE_DATA: + if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: HAL AC %u out of range, max %zu!\n", + __func__, + haltype, ARRAY_SIZE(sc->sc_haltype2q)); + return -1; + } + qnum = sc->sc_haltype2q[haltype]; + break; + case ATH9K_TX_QUEUE_BEACON: + qnum = sc->sc_bhalq; + break; + case ATH9K_TX_QUEUE_CAB: + qnum = sc->sc_cabq->axq_qnum; + break; + default: + qnum = -1; + } + return qnum; +} + +/* Update parameters for a transmit queue */ + +int ath_txq_update(struct ath_softc *sc, int qnum, + struct ath9k_tx_queue_info *qinfo) +{ + struct ath_hal *ah = sc->sc_ah; + int error = 0; + struct ath9k_tx_queue_info qi; + + if (qnum == sc->sc_bhalq) { + /* + * XXX: for beacon queue, we just save the parameter. + * It will be picked up by ath_beaconq_config when + * it's necessary. + */ + sc->sc_beacon_qi = *qinfo; + return 0; + } + + ASSERT(sc->sc_txq[qnum].axq_qnum == qnum); + + ath9k_hw_get_txq_props(ah, qnum, &qi); + qi.tqi_aifs = qinfo->tqi_aifs; + qi.tqi_cwmin = qinfo->tqi_cwmin; + qi.tqi_cwmax = qinfo->tqi_cwmax; + qi.tqi_burstTime = qinfo->tqi_burstTime; + qi.tqi_readyTime = qinfo->tqi_readyTime; + + if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) { + DPRINTF(sc, ATH_DBG_FATAL, + "%s: unable to update hardware queue %u!\n", + __func__, qnum); + error = -EIO; + } else { + ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */ + } + + return error; +} + +int ath_cabq_update(struct ath_softc *sc) +{ + struct ath9k_tx_queue_info qi; + int qnum = sc->sc_cabq->axq_qnum; + struct ath_beacon_config conf; + + ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); + /* + * Ensure the readytime % is within the bounds. + */ + if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND) + sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND; + else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND) + sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND; + + ath_get_beaconconfig(sc, ATH_IF_ID_ANY, &conf); + qi.tqi_readyTime = + (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100; + ath_txq_update(sc, qnum, &qi); + + return 0; +} + +int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb) +{ + struct ath_tx_control txctl; + int error = 0; + + error = ath_tx_prepare(sc, skb, &txctl); + if (error == 0) + /* + * Start DMA mapping. + * ath_tx_start_dma() will be called either synchronously + * or asynchrounsly once DMA is complete. + */ + xmit_map_sg(sc, skb, + get_dma_mem_context(&txctl, dmacontext), + &txctl); + else + ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE); + + /* failed packets will be dropped by the caller */ + return error; +} + +/* Deferred processing of transmit interrupt */ + +void ath_tx_tasklet(struct ath_softc *sc) +{ + u64 tsf = ath9k_hw_gettsf64(sc->sc_ah); + int i, nacked = 0; + u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1); + + ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask); + + /* + * Process each active queue. + */ + for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { + if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) + nacked += ath_tx_processq(sc, &sc->sc_txq[i]); + } + if (nacked) + sc->sc_lastrx = tsf; +} + +void ath_tx_draintxq(struct ath_softc *sc, + struct ath_txq *txq, bool retry_tx) +{ + struct ath_buf *bf, *lastbf; + struct list_head bf_head; + + INIT_LIST_HEAD(&bf_head); + + /* + * NB: this assumes output has been stopped and + * we do not need to block ath_tx_tasklet + */ + for (;;) { + spin_lock_bh(&txq->axq_lock); + + if (list_empty(&txq->axq_q)) { + txq->axq_link = NULL; + txq->axq_linkbuf = NULL; + spin_unlock_bh(&txq->axq_lock); + break; + } + + bf = list_first_entry(&txq->axq_q, struct ath_buf, list); + + if (bf->bf_status & ATH_BUFSTATUS_STALE) { + list_del(&bf->list); + spin_unlock_bh(&txq->axq_lock); + + spin_lock_bh(&sc->sc_txbuflock); + list_add_tail(&bf->list, &sc->sc_txbuf); + spin_unlock_bh(&sc->sc_txbuflock); + continue; + } + + lastbf = bf->bf_lastbf; + if (!retry_tx) + lastbf->bf_desc->ds_txstat.ts_flags = + ATH9K_TX_SW_ABORTED; + + /* remove ath_buf's of the same mpdu from txq */ + list_cut_position(&bf_head, &txq->axq_q, &lastbf->list); + txq->axq_depth--; + + spin_unlock_bh(&txq->axq_lock); + + if (bf->bf_isampdu) + ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0); + else + ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); + } + + /* flush any pending frames if aggregation is enabled */ + if (sc->sc_txaggr) { + if (!retry_tx) { + spin_lock_bh(&txq->axq_lock); + ath_txq_drain_pending_buffers(sc, txq, + ATH9K_BH_STATUS_CHANGE); + spin_unlock_bh(&txq->axq_lock); + } + } +} + +/* Drain the transmit queues and reclaim resources */ + +void ath_draintxq(struct ath_softc *sc, bool retry_tx) +{ + /* stop beacon queue. The beacon will be freed when + * we go to INIT state */ + if (!sc->sc_invalid) { + (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq); + DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__, + ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq)); + } + + ath_drain_txdataq(sc, retry_tx); +} + +u32 ath_txq_depth(struct ath_softc *sc, int qnum) +{ + return sc->sc_txq[qnum].axq_depth; +} + +u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum) +{ + return sc->sc_txq[qnum].axq_aggr_depth; +} + +/* Check if an ADDBA is required. A valid node must be passed. */ +enum ATH_AGGR_CHECK ath_tx_aggr_check(struct ath_softc *sc, + struct ath_node *an, + u8 tidno) +{ + struct ath_atx_tid *txtid; + DECLARE_MAC_BUF(mac); + + if (!sc->sc_txaggr) + return AGGR_NOT_REQUIRED; + + /* ADDBA exchange must be completed before sending aggregates */ + txtid = ATH_AN_2_TID(an, tidno); + + if (txtid->addba_exchangecomplete) + return AGGR_EXCHANGE_DONE; + + if (txtid->cleanup_inprogress) + return AGGR_CLEANUP_PROGRESS; + + if (txtid->addba_exchangeinprogress) + return AGGR_EXCHANGE_PROGRESS; + + if (!txtid->addba_exchangecomplete) { + if (!txtid->addba_exchangeinprogress && + (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) { + txtid->addba_exchangeattempts++; + return AGGR_REQUIRED; + } + } + + return AGGR_NOT_REQUIRED; +} + +/* Start TX aggregation */ + +int ath_tx_aggr_start(struct ath_softc *sc, + const u8 *addr, + u16 tid, + u16 *ssn) +{ + struct ath_atx_tid *txtid; + struct ath_node *an; + + spin_lock_bh(&sc->node_lock); + an = ath_node_find(sc, (u8 *) addr); + spin_unlock_bh(&sc->node_lock); + + if (!an) { + DPRINTF(sc, ATH_DBG_AGGR, + "%s: Node not found to initialize " + "TX aggregation\n", __func__); + return -1; + } + + if (sc->sc_txaggr) { + txtid = ATH_AN_2_TID(an, tid); + txtid->addba_exchangeinprogress = 1; + ath_tx_pause_tid(sc, txtid); + } + + return 0; +} + +/* Stop tx aggregation */ + +int ath_tx_aggr_stop(struct ath_softc *sc, + const u8 *addr, + u16 tid) +{ + struct ath_node *an; + + spin_lock_bh(&sc->node_lock); + an = ath_node_find(sc, (u8 *) addr); + spin_unlock_bh(&sc->node_lock); + + if (!an) { + DPRINTF(sc, ATH_DBG_AGGR, + "%s: TX aggr stop for non-existent node\n", __func__); + return -1; + } + + ath_tx_aggr_teardown(sc, an, tid); + return 0; +} + +/* + * Performs transmit side cleanup when TID changes from aggregated to + * unaggregated. + * - Pause the TID and mark cleanup in progress + * - Discard all retry frames from the s/w queue. + */ + +void ath_tx_aggr_teardown(struct ath_softc *sc, + struct ath_node *an, u8 tid) +{ + struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); + struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum]; + struct ath_buf *bf; + struct list_head bf_head; + INIT_LIST_HEAD(&bf_head); + + DPRINTF(sc, ATH_DBG_AGGR, "%s: teardown TX aggregation\n", __func__); + + if (txtid->cleanup_inprogress) /* cleanup is in progress */ + return; + + if (!txtid->addba_exchangecomplete) { + txtid->addba_exchangeattempts = 0; + return; + } + + /* TID must be paused first */ + ath_tx_pause_tid(sc, txtid); + + /* drop all software retried frames and mark this TID */ + spin_lock_bh(&txq->axq_lock); + while (!list_empty(&txtid->buf_q)) { + bf = list_first_entry(&txtid->buf_q, struct ath_buf, list); + if (!bf->bf_isretried) { + /* + * NB: it's based on the assumption that + * software retried frame will always stay + * at the head of software queue. + */ + break; + } + list_cut_position(&bf_head, + &txtid->buf_q, &bf->bf_lastfrm->list); + ath_tx_update_baw(sc, txtid, bf->bf_seqno); + + /* complete this sub-frame */ + ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); + } + + if (txtid->baw_head != txtid->baw_tail) { + spin_unlock_bh(&txq->axq_lock); + txtid->cleanup_inprogress = true; + } else { + txtid->addba_exchangecomplete = 0; + txtid->addba_exchangeattempts = 0; + spin_unlock_bh(&txq->axq_lock); + ath_tx_flush_tid(sc, txtid); + } +} + +/* + * Tx scheduling logic + * NB: must be called with txq lock held + */ + +void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) +{ + struct ath_atx_ac *ac; + struct ath_atx_tid *tid; + + /* nothing to schedule */ + if (list_empty(&txq->axq_acq)) + return; + /* + * get the first node/ac pair on the queue + */ + ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); + list_del(&ac->list); + ac->sched = false; + + /* + * process a single tid per destination + */ + do { + /* nothing to schedule */ + if (list_empty(&ac->tid_q)) + return; + + tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list); + list_del(&tid->list); + tid->sched = false; + + if (tid->paused) /* check next tid to keep h/w busy */ + continue; + + if (!(tid->an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) || + ((txq->axq_depth % 2) == 0)) { + ath_tx_sched_aggr(sc, txq, tid); + } + + /* + * add tid to round-robin queue if more frames + * are pending for the tid + */ + if (!list_empty(&tid->buf_q)) + ath_tx_queue_tid(txq, tid); + + /* only schedule one TID at a time */ + break; + } while (!list_empty(&ac->tid_q)); + + /* + * schedule AC if more TIDs need processing + */ + if (!list_empty(&ac->tid_q)) { + /* + * add dest ac to txq if not already added + */ + if (!ac->sched) { + ac->sched = true; + list_add_tail(&ac->list, &txq->axq_acq); + } + } +} + +/* Initialize per-node transmit state */ + +void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) +{ + if (sc->sc_txaggr) { + struct ath_atx_tid *tid; + struct ath_atx_ac *ac; + int tidno, acno; + + sc->sc_ht_info.maxampdu = ATH_AMPDU_LIMIT_DEFAULT; + + /* + * Init per tid tx state + */ + for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno]; + tidno < WME_NUM_TID; + tidno++, tid++) { + tid->an = an; + tid->tidno = tidno; + tid->seq_start = tid->seq_next = 0; + tid->baw_size = WME_MAX_BA; + tid->baw_head = tid->baw_tail = 0; + tid->sched = false; + tid->paused = false; + tid->cleanup_inprogress = false; + INIT_LIST_HEAD(&tid->buf_q); + + acno = TID_TO_WME_AC(tidno); + tid->ac = &an->an_aggr.tx.ac[acno]; + + /* ADDBA state */ + tid->addba_exchangecomplete = 0; + tid->addba_exchangeinprogress = 0; + tid->addba_exchangeattempts = 0; + } + + /* + * Init per ac tx state + */ + for (acno = 0, ac = &an->an_aggr.tx.ac[acno]; + acno < WME_NUM_AC; acno++, ac++) { + ac->sched = false; + INIT_LIST_HEAD(&ac->tid_q); + + switch (acno) { + case WME_AC_BE: + ac->qnum = ath_tx_get_qnum(sc, + ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE); + break; + case WME_AC_BK: + ac->qnum = ath_tx_get_qnum(sc, + ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK); + break; + case WME_AC_VI: + ac->qnum = ath_tx_get_qnum(sc, + ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI); + break; + case WME_AC_VO: + ac->qnum = ath_tx_get_qnum(sc, + ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO); + break; + } + } + } +} + +/* Cleanupthe pending buffers for the node. */ + +void ath_tx_node_cleanup(struct ath_softc *sc, + struct ath_node *an, bool bh_flag) +{ + int i; + struct ath_atx_ac *ac, *ac_tmp; + struct ath_atx_tid *tid, *tid_tmp; + struct ath_txq *txq; + for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { + if (ATH_TXQ_SETUP(sc, i)) { + txq = &sc->sc_txq[i]; + + if (likely(bh_flag)) + spin_lock_bh(&txq->axq_lock); + else + spin_lock(&txq->axq_lock); + + list_for_each_entry_safe(ac, + ac_tmp, &txq->axq_acq, list) { + tid = list_first_entry(&ac->tid_q, + struct ath_atx_tid, list); + if (tid && tid->an != an) + continue; + list_del(&ac->list); + ac->sched = false; + + list_for_each_entry_safe(tid, + tid_tmp, &ac->tid_q, list) { + list_del(&tid->list); + tid->sched = false; + ath_tid_drain(sc, txq, tid, bh_flag); + tid->addba_exchangecomplete = 0; + tid->addba_exchangeattempts = 0; + tid->cleanup_inprogress = false; + } + } + + if (likely(bh_flag)) + spin_unlock_bh(&txq->axq_lock); + else + spin_unlock(&txq->axq_lock); + } + } +} + +/* Cleanup per node transmit state */ + +void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an) +{ + if (sc->sc_txaggr) { + struct ath_atx_tid *tid; + int tidno, i; + + /* Init per tid rx state */ + for (tidno = 0, tid = &an->an_aggr.tx.tid[tidno]; + tidno < WME_NUM_TID; + tidno++, tid++) { + + for (i = 0; i < ATH_TID_MAX_BUFS; i++) + ASSERT(tid->tx_buf[i] == NULL); + } + } +} diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index b8407d5..ed09e48 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -2719,7 +2719,7 @@ static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct iwl_priv *priv = hw->priv; - IWL_DEBUG_MAC80211("enter\n"); + IWL_DEBUG_MACDUMP("enter\n"); if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { IWL_DEBUG_MAC80211("leave - monitor\n"); @@ -2733,7 +2733,7 @@ static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) if (iwl_tx_skb(priv, skb)) dev_kfree_skb_any(skb); - IWL_DEBUG_MAC80211("leave\n"); + IWL_DEBUG_MACDUMP("leave\n"); return 0; } diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h index b4ffd33..d2daa17 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debug.h +++ b/drivers/net/wireless/iwlwifi/iwl-debug.h @@ -114,7 +114,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv) #define IWL_DL_MAC80211 (1 << 1) #define IWL_DL_HOST_COMMAND (1 << 2) #define IWL_DL_STATE (1 << 3) - +#define IWL_DL_MACDUMP (1 << 4) #define IWL_DL_RADIO (1 << 7) #define IWL_DL_POWER (1 << 8) #define IWL_DL_TEMP (1 << 9) @@ -154,6 +154,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv) #define IWL_DEBUG_INFO(f, a...) IWL_DEBUG(IWL_DL_INFO, f, ## a) #define IWL_DEBUG_MAC80211(f, a...) IWL_DEBUG(IWL_DL_MAC80211, f, ## a) +#define IWL_DEBUG_MACDUMP(f, a...) IWL_DEBUG(IWL_DL_MACDUMP, f, ## a) #define IWL_DEBUG_TEMP(f, a...) IWL_DEBUG(IWL_DL_TEMP, f, ## a) #define IWL_DEBUG_SCAN(f, a...) IWL_DEBUG(IWL_DL_SCAN, f, ## a) #define IWL_DEBUG_RX(f, a...) IWL_DEBUG(IWL_DL_RX, f, ## a) diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index aa98c76..4108c7c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c @@ -764,20 +764,19 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct iwl_tfd_frame *tfd; - u32 *control_flags; - int txq_id = skb_get_queue_mapping(skb); - struct iwl_tx_queue *txq = NULL; - struct iwl_queue *q = NULL; + struct iwl_tx_queue *txq; + struct iwl_queue *q; + struct iwl_cmd *out_cmd; + struct iwl_tx_cmd *tx_cmd; + int swq_id, txq_id; dma_addr_t phys_addr; dma_addr_t txcmd_phys; dma_addr_t scratch_phys; - struct iwl_cmd *out_cmd = NULL; - struct iwl_tx_cmd *tx_cmd; u16 len, idx, len_org; u16 seq_number = 0; - u8 id, hdr_len, unicast; - u8 sta_id; __le16 fc; + u8 hdr_len, unicast; + u8 sta_id; u8 wait_write_ptr = 0; u8 tid = 0; u8 *qc = NULL; @@ -802,7 +801,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) } unicast = !is_multicast_ether_addr(hdr->addr1); - id = 0; fc = hdr->frame_control; @@ -840,14 +838,16 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) IWL_DEBUG_TX("station Id %d\n", sta_id); + swq_id = skb_get_queue_mapping(skb); + txq_id = swq_id; if (ieee80211_is_data_qos(fc)) { qc = ieee80211_get_qos_ctl(hdr); tid = qc[0] & 0xf; - seq_number = priv->stations[sta_id].tid[tid].seq_number & - IEEE80211_SCTL_SEQ; - hdr->seq_ctrl = cpu_to_le16(seq_number) | - (hdr->seq_ctrl & - __constant_cpu_to_le16(IEEE80211_SCTL_FRAG)); + seq_number = priv->stations[sta_id].tid[tid].seq_number; + seq_number &= IEEE80211_SCTL_SEQ; + hdr->seq_ctrl = hdr->seq_ctrl & + __constant_cpu_to_le16(IEEE80211_SCTL_FRAG); + hdr->seq_ctrl |= cpu_to_le16(seq_number); seq_number += 0x10; /* aggregation is on for this <sta,tid> */ if (info->flags & IEEE80211_TX_CTL_AMPDU) @@ -864,7 +864,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) /* Set up first empty TFD within this queue's circular TFD buffer */ tfd = &txq->bd[q->write_ptr]; memset(tfd, 0, sizeof(*tfd)); - control_flags = (u32 *) tfd; idx = get_cmd_index(q, q->write_ptr, 0); /* Set up driver data for this TFD */ @@ -983,8 +982,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) iwl_txq_update_write_ptr(priv, txq); spin_unlock_irqrestore(&priv->lock, flags); } else { - ieee80211_stop_queue(priv->hw, - skb_get_queue_mapping(skb)); + ieee80211_stop_queue(priv->hw, swq_id); } } @@ -1013,13 +1011,12 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; struct iwl_queue *q = &txq->q; struct iwl_tfd_frame *tfd; - u32 *control_flags; struct iwl_cmd *out_cmd; - u32 idx; - u16 fix_size; dma_addr_t phys_addr; - int len, ret; unsigned long flags; + int len, ret; + u32 idx; + u16 fix_size; cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); @@ -1045,7 +1042,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) tfd = &txq->bd[q->write_ptr]; memset(tfd, 0, sizeof(*tfd)); - control_flags = (u32 *) tfd; idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE); out_cmd = txq->cmd[idx]; diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c index b047306..1ebcafe 100644 --- a/drivers/net/wireless/orinoco.c +++ b/drivers/net/wireless/orinoco.c @@ -1998,13 +1998,6 @@ __orinoco_set_multicast_list(struct net_device *dev) else priv->mc_count = mc_count; } - - /* Since we can set the promiscuous flag when it wasn't asked - for, make sure the net_device knows about it. */ - if (priv->promiscuous) - dev->flags |= IFF_PROMISC; - else - dev->flags &= ~IFF_PROMISC; } /* This must be called from user context, without locks held - use diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h index cac9a51..4801a36 100644 --- a/drivers/net/wireless/p54/p54.h +++ b/drivers/net/wireless/p54/p54.h @@ -52,6 +52,7 @@ struct p54_common { int (*open)(struct ieee80211_hw *dev); void (*stop)(struct ieee80211_hw *dev); int mode; + u16 seqno; struct mutex conf_mutex; u8 mac_addr[ETH_ALEN]; u8 bssid[ETH_ALEN]; diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c index 4da89ea..83cd85e 100644 --- a/drivers/net/wireless/p54/p54common.c +++ b/drivers/net/wireless/p54/p54common.c @@ -553,6 +553,7 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb) struct ieee80211_tx_queue_stats *current_queue; struct p54_common *priv = dev->priv; struct p54_control_hdr *hdr; + struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data; struct p54_tx_control_allocdata *txhdr; size_t padding, len; u8 rate; @@ -605,6 +606,19 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb) if (padding) txhdr->align[0] = padding; + /* FIXME: The sequence that follows is needed for this driver to + * work with mac80211 since "mac80211: fix TX sequence numbers". + * As with the temporary code in rt2x00, changes will be needed + * to get proper sequence numbers on beacons. In addition, this + * patch places the sequence number in the hardware state, which + * limits us to a single virtual state. + */ + if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { + if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) + priv->seqno += 0x10; + ieee80211hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); + ieee80211hdr->seq_ctrl |= cpu_to_le16(priv->seqno); + } /* modifies skb->cb and with it info, so must be last! */ p54_assign_address(dev, skb, hdr, skb->len); @@ -803,8 +817,8 @@ static void p54_set_vdcf(struct ieee80211_hw *dev) if (dev->conf.flags & IEEE80211_CONF_SHORT_SLOT_TIME) { vdcf->slottime = 9; - vdcf->magic1 = 0x00; - vdcf->magic2 = 0x10; + vdcf->magic1 = 0x10; + vdcf->magic2 = 0x00; } else { vdcf->slottime = 20; vdcf->magic1 = 0x0a; diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index bd422fd..d065073 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c @@ -203,23 +203,43 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw, !test_bit(DEVICE_STARTED, &rt2x00dev->flags)) return -ENODEV; - /* - * We don't support mixed combinations of sta and ap virtual - * interfaces. We can only add this interface when the rival - * interface count is 0. - */ - if ((conf->type == IEEE80211_IF_TYPE_AP && rt2x00dev->intf_sta_count) || - (conf->type != IEEE80211_IF_TYPE_AP && rt2x00dev->intf_ap_count)) - return -ENOBUFS; - - /* - * Check if we exceeded the maximum amount of supported interfaces. - */ - if ((conf->type == IEEE80211_IF_TYPE_AP && - rt2x00dev->intf_ap_count >= rt2x00dev->ops->max_ap_intf) || - (conf->type != IEEE80211_IF_TYPE_AP && - rt2x00dev->intf_sta_count >= rt2x00dev->ops->max_sta_intf)) - return -ENOBUFS; + switch (conf->type) { + case IEEE80211_IF_TYPE_AP: + /* + * We don't support mixed combinations of + * sta and ap interfaces. + */ + if (rt2x00dev->intf_sta_count) + return -ENOBUFS; + + /* + * Check if we exceeded the maximum amount + * of supported interfaces. + */ + if (rt2x00dev->intf_ap_count >= rt2x00dev->ops->max_ap_intf) + return -ENOBUFS; + + break; + case IEEE80211_IF_TYPE_STA: + case IEEE80211_IF_TYPE_IBSS: + /* + * We don't support mixed combinations of + * sta and ap interfaces. + */ + if (rt2x00dev->intf_ap_count) + return -ENOBUFS; + + /* + * Check if we exceeded the maximum amount + * of supported interfaces. + */ + if (rt2x00dev->intf_sta_count >= rt2x00dev->ops->max_sta_intf) + return -ENOBUFS; + + break; + default: + return -EINVAL; + } /* * Loop through all beacon queues to find a free diff --git a/drivers/net/wireless/wavelan.c b/drivers/net/wireless/wavelan.c index 49ae970..136220b 100644 --- a/drivers/net/wireless/wavelan.c +++ b/drivers/net/wireless/wavelan.c @@ -1409,9 +1409,6 @@ static void wavelan_set_multicast_list(struct net_device * dev) lp->mc_count = 0; wv_82586_reconfig(dev); - - /* Tell the kernel that we are doing a really bad job. */ - dev->flags |= IFF_PROMISC; } } else /* Are there multicast addresses to send? */ diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c index b584c0e..00a3559 100644 --- a/drivers/net/wireless/wavelan_cs.c +++ b/drivers/net/wireless/wavelan_cs.c @@ -1412,9 +1412,6 @@ wavelan_set_multicast_list(struct net_device * dev) lp->mc_count = 0; wv_82593_reconfig(dev); - - /* Tell the kernel that we are doing a really bad job... */ - dev->flags |= IFF_PROMISC; } } else @@ -1433,9 +1430,6 @@ wavelan_set_multicast_list(struct net_device * dev) lp->mc_count = 0; wv_82593_reconfig(dev); - - /* Tell the kernel that we are doing a really bad job... */ - dev->flags |= IFF_ALLMULTI; } } else diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 902bbe7..c749bdb 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -329,7 +329,7 @@ static int xennet_open(struct net_device *dev) } spin_unlock_bh(&np->rx_lock); - xennet_maybe_wake_tx(dev); + netif_start_queue(dev); return 0; } diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 2622b65..3712b92 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -932,7 +932,7 @@ static struct ehci_qh *qh_append_tds ( list_del (&qtd->qtd_list); list_add (&dummy->qtd_list, qtd_list); - __list_splice (qtd_list, qh->qtd_list.prev); + list_splice_tail(qtd_list, &qh->qtd_list); ehci_qtd_init(ehci, qtd, qtd->qtd_dma); qh->dummy = qtd; diff --git a/include/linux/dm9000.h b/include/linux/dm9000.h index fc82446..c30879c 100644 --- a/include/linux/dm9000.h +++ b/include/linux/dm9000.h @@ -27,6 +27,7 @@ struct dm9000_plat_data { unsigned int flags; + unsigned char dev_addr[6]; /* allow replacement IO routines */ diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 8bb5e87..b4b038b 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -27,9 +27,24 @@ struct ethtool_cmd { __u8 autoneg; /* Enable or disable autonegotiation */ __u32 maxtxpkt; /* Tx pkts before generating tx int */ __u32 maxrxpkt; /* Rx pkts before generating rx int */ - __u32 reserved[4]; + __u16 speed_hi; + __u16 reserved2; + __u32 reserved[3]; }; +static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + + ep->speed = (__u16)speed; + ep->speed_hi = (__u16)(speed >> 16); +} + +static inline __u32 ethtool_cmd_speed(struct ethtool_cmd *ep) +{ + return (ep->speed_hi << 16) | ep->speed; +} + #define ETHTOOL_BUSINFO_LEN 32 /* these strings are set to whatever the driver author decides... */ struct ethtool_drvinfo { diff --git a/include/linux/list.h b/include/linux/list.h index 453916b..1d109e2 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -214,22 +214,62 @@ static inline int list_is_singular(const struct list_head *head) return !list_empty(head) && (head->next == head->prev); } +static inline void __list_cut_position(struct list_head *list, + struct list_head *head, struct list_head *entry) +{ + struct list_head *new_first = entry->next; + list->next = head->next; + list->next->prev = list; + list->prev = entry; + entry->next = list; + head->next = new_first; + new_first->prev = head; +} + +/** + * list_cut_position - cut a list into two + * @list: a new list to add all removed entries + * @head: a list with entries + * @entry: an entry within head, could be the head itself + * and if so we won't cut the list + * + * This helper moves the initial part of @head, up to and + * including @entry, from @head to @list. You should + * pass on @entry an element you know is on @head. @list + * should be an empty list or a list you do not care about + * losing its data. + * + */ +static inline void list_cut_position(struct list_head *list, + struct list_head *head, struct list_head *entry) +{ + if (list_empty(head)) + return; + if (list_is_singular(head) && + (head->next != entry && head != entry)) + return; + if (entry == head) + INIT_LIST_HEAD(list); + else + __list_cut_position(list, head, entry); +} + static inline void __list_splice(const struct list_head *list, - struct list_head *head) + struct list_head *prev, + struct list_head *next) { struct list_head *first = list->next; struct list_head *last = list->prev; - struct list_head *at = head->next; - first->prev = head; - head->next = first; + first->prev = prev; + prev->next = first; - last->next = at; - at->prev = last; + last->next = next; + next->prev = last; } /** - * list_splice - join two lists + * list_splice - join two lists, this is designed for stacks * @list: the new list to add. * @head: the place to add it in the first list. */ @@ -237,7 +277,19 @@ static inline void list_splice(const struct list_head *list, struct list_head *head) { if (!list_empty(list)) - __list_splice(list, head); + __list_splice(list, head, head->next); +} + +/** + * list_splice_tail - join two lists, each list being a queue + * @list: the new list to add. + * @head: the place to add it in the first list. + */ +static inline void list_splice_tail(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) + __list_splice(list, head->prev, head); } /** @@ -251,7 +303,24 @@ static inline void list_splice_init(struct list_head *list, struct list_head *head) { if (!list_empty(list)) { - __list_splice(list, head); + __list_splice(list, head, head->next); + INIT_LIST_HEAD(list); + } +} + +/** + * list_splice_tail_init - join two lists, each list being a queue, and + * reinitialise the emptied list. + * @list: the new list to add. + * @head: the place to add it in the first list. + * + * The list at @list is reinitialised + */ +static inline void list_splice_tail_init(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) { + __list_splice(list, head->prev, head); INIT_LIST_HEAD(list); } } diff --git a/include/net/request_sock.h b/include/net/request_sock.h index 8d6e991..cac811e 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -33,7 +33,7 @@ struct request_sock_ops { struct kmem_cache *slab; int (*rtx_syn_ack)(struct sock *sk, struct request_sock *req); - void (*send_ack)(struct sk_buff *skb, + void (*send_ack)(struct sock *sk, struct sk_buff *skb, struct request_sock *req); void (*send_reset)(struct sock *sk, struct sk_buff *skb); diff --git a/include/net/syncppp.h b/include/net/syncppp.h index e43f407..9e306f7 100644 --- a/include/net/syncppp.h +++ b/include/net/syncppp.h @@ -43,8 +43,6 @@ struct sppp u32 pp_rseq; /* remote sequence number */ struct slcp lcp; /* LCP params */ struct sipcp ipcp; /* IPCP params */ - u32 ibytes,obytes; /* Bytes in/out */ - u32 ipkts,opkts; /* Packets in/out */ struct timer_list pp_timer; struct net_device *pp_if; char pp_link_state; /* Link status */ diff --git a/net/core/dev.c b/net/core/dev.c index 01993ad..600bb23 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1939,22 +1939,6 @@ int netif_rx_ni(struct sk_buff *skb) EXPORT_SYMBOL(netif_rx_ni); -static inline struct net_device *skb_bond(struct sk_buff *skb) -{ - struct net_device *dev = skb->dev; - - if (dev->master) { - if (skb_bond_should_drop(skb)) { - kfree_skb(skb); - return NULL; - } - skb->dev = dev->master; - } - - return dev; -} - - static void net_tx_action(struct softirq_action *h) { struct softnet_data *sd = &__get_cpu_var(softnet_data); @@ -2181,6 +2165,7 @@ int netif_receive_skb(struct sk_buff *skb) { struct packet_type *ptype, *pt_prev; struct net_device *orig_dev; + struct net_device *null_or_orig; int ret = NET_RX_DROP; __be16 type; @@ -2194,10 +2179,14 @@ int netif_receive_skb(struct sk_buff *skb) if (!skb->iif) skb->iif = skb->dev->ifindex; - orig_dev = skb_bond(skb); - - if (!orig_dev) - return NET_RX_DROP; + null_or_orig = NULL; + orig_dev = skb->dev; + if (orig_dev->master) { + if (skb_bond_should_drop(skb)) + null_or_orig = orig_dev; /* deliver only exact match */ + else + skb->dev = orig_dev->master; + } __get_cpu_var(netdev_rx_stat).total++; @@ -2221,7 +2210,8 @@ int netif_receive_skb(struct sk_buff *skb) #endif list_for_each_entry_rcu(ptype, &ptype_all, list) { - if (!ptype->dev || ptype->dev == skb->dev) { + if (ptype->dev == null_or_orig || ptype->dev == skb->dev || + ptype->dev == orig_dev) { if (pt_prev) ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = ptype; @@ -2246,7 +2236,8 @@ ncls: list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { if (ptype->type == type && - (!ptype->dev || ptype->dev == skb->dev)) { + (ptype->dev == null_or_orig || ptype->dev == skb->dev || + ptype->dev == orig_dev)) { if (pt_prev) ret = deliver_skb(skb, pt_prev, orig_dev); pt_prev = ptype; diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 2498cda..5262364 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -168,7 +168,7 @@ #include <asm/div64.h> /* do_div */ #include <asm/timex.h> -#define VERSION "pktgen v2.69: Packet Generator for packet performance testing.\n" +#define VERSION "pktgen v2.70: Packet Generator for packet performance testing.\n" #define IP_NAME_SZ 32 #define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ @@ -189,6 +189,7 @@ #define F_FLOW_SEQ (1<<11) /* Sequential flows */ #define F_IPSEC_ON (1<<12) /* ipsec on for flows */ #define F_QUEUE_MAP_RND (1<<13) /* queue map Random */ +#define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */ /* Thread control flag bits */ #define T_TERMINATE (1<<0) @@ -621,6 +622,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v) if (pkt_dev->flags & F_QUEUE_MAP_RND) seq_printf(seq, "QUEUE_MAP_RND "); + if (pkt_dev->flags & F_QUEUE_MAP_CPU) + seq_printf(seq, "QUEUE_MAP_CPU "); + if (pkt_dev->cflows) { if (pkt_dev->flags & F_FLOW_SEQ) seq_printf(seq, "FLOW_SEQ "); /*in sequence flows*/ @@ -1134,6 +1138,12 @@ static ssize_t pktgen_if_write(struct file *file, else if (strcmp(f, "!QUEUE_MAP_RND") == 0) pkt_dev->flags &= ~F_QUEUE_MAP_RND; + + else if (strcmp(f, "QUEUE_MAP_CPU") == 0) + pkt_dev->flags |= F_QUEUE_MAP_CPU; + + else if (strcmp(f, "!QUEUE_MAP_CPU") == 0) + pkt_dev->flags &= ~F_QUEUE_MAP_CPU; #ifdef CONFIG_XFRM else if (strcmp(f, "IPSEC") == 0) pkt_dev->flags |= F_IPSEC_ON; @@ -1895,6 +1905,23 @@ static int pktgen_device_event(struct notifier_block *unused, return NOTIFY_DONE; } +static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev, const char *ifname) +{ + char b[IFNAMSIZ+5]; + int i = 0; + + for(i=0; ifname[i] != '@'; i++) { + if(i == IFNAMSIZ) + break; + + b[i] = ifname[i]; + } + b[i] = 0; + + return dev_get_by_name(&init_net, b); +} + + /* Associate pktgen_dev with a device. */ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname) @@ -1908,7 +1935,7 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname) pkt_dev->odev = NULL; } - odev = dev_get_by_name(&init_net, ifname); + odev = pktgen_dev_get_by_name(pkt_dev, ifname); if (!odev) { printk(KERN_ERR "pktgen: no such netdevice: \"%s\"\n", ifname); return -ENODEV; @@ -2129,7 +2156,11 @@ static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow) #endif static void set_cur_queue_map(struct pktgen_dev *pkt_dev) { - if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) { + + if (pkt_dev->flags & F_QUEUE_MAP_CPU) + pkt_dev->cur_queue_map = smp_processor_id(); + + else if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) { __u16 t; if (pkt_dev->flags & F_QUEUE_MAP_RND) { t = random32() % diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 1c2e3ec..b4bc6e09 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -229,7 +229,8 @@ extern void dccp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); extern int dccp_retransmit_skb(struct sock *sk); extern void dccp_send_ack(struct sock *sk); -extern void dccp_reqsk_send_ack(struct sk_buff *sk, struct request_sock *rsk); +extern void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, + struct request_sock *rsk); extern void dccp_send_sync(struct sock *sk, const u64 seq, const enum dccp_pkt_type pkt_type); diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 66dca5b..b2804e2d 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c @@ -296,7 +296,8 @@ int dccp_child_process(struct sock *parent, struct sock *child, EXPORT_SYMBOL_GPL(dccp_child_process); -void dccp_reqsk_send_ack(struct sk_buff *skb, struct request_sock *rsk) +void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, + struct request_sock *rsk) { DCCP_BUG("DCCP-ACK packets are never sent in LISTEN/RESPOND state"); } diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 4e73e57..21515d4 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -575,7 +575,7 @@ static int esp_init_state(struct xfrm_state *x) crypto_aead_ivsize(aead); if (x->props.mode == XFRM_MODE_TUNNEL) x->props.header_len += sizeof(struct iphdr); - else if (x->props.mode == XFRM_MODE_BEET) + else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6) x->props.header_len += IPV4_BEET_PHMAXLEN; if (x->encap) { struct xfrm_encap_tmpl *encap = x->encap; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 1bfa078..16fc6f4 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1509,14 +1509,14 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, /* BSD 4.2 compatibility hack :-( */ if (mtu == 0 && - old_mtu >= dst_metric(&rth->u.dst, RTAX_MTU) && + old_mtu >= dst_mtu(&rth->u.dst) && old_mtu >= 68 + (iph->ihl << 2)) old_mtu -= iph->ihl << 2; mtu = guess_mtu(old_mtu); } - if (mtu <= dst_metric(&rth->u.dst, RTAX_MTU)) { - if (mtu < dst_metric(&rth->u.dst, RTAX_MTU)) { + if (mtu <= dst_mtu(&rth->u.dst)) { + if (mtu < dst_mtu(&rth->u.dst)) { dst_confirm(&rth->u.dst); if (mtu < ip_rt_min_pmtu) { mtu = ip_rt_min_pmtu; @@ -1538,7 +1538,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu) { - if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= 68 && + if (dst_mtu(dst) > mtu && mtu >= 68 && !(dst_metric_locked(dst, RTAX_MTU))) { if (mtu < ip_rt_min_pmtu) { mtu = ip_rt_min_pmtu; @@ -1667,7 +1667,7 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag) if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0) rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl; - if (dst_metric(&rt->u.dst, RTAX_MTU) > IP_MAX_MTU) + if (dst_mtu(&rt->u.dst) > IP_MAX_MTU) rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU; if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0) rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40, @@ -3223,9 +3223,7 @@ int __init ip_rt_init(void) */ void __init ip_static_sysctl_init(void) { -#ifdef CONFIG_SYSCTL register_sysctl_paths(ipv4_route_path, ipv4_route_table); -#endif } #endif diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 91a8cfd..44c1e93 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -687,14 +687,14 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) inet_twsk_put(tw); } -static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, +static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, struct request_sock *req) { tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent, 0, - tcp_v4_md5_do_lookup(skb->sk, ip_hdr(skb)->daddr)); + tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr)); } /* diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 204c421..f976fc5 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -609,7 +609,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) { /* Out of window: send ACK and drop. */ if (!(flg & TCP_FLAG_RST)) - req->rsk_ops->send_ack(skb, req); + req->rsk_ops->send_ack(sk, skb, req); if (paws_reject) NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); return NULL; @@ -618,89 +618,87 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, /* In sequence, PAWS is OK. */ if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1)) - req->ts_recent = tmp_opt.rcv_tsval; + req->ts_recent = tmp_opt.rcv_tsval; - if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { - /* Truncate SYN, it is out of window starting - at tcp_rsk(req)->rcv_isn + 1. */ - flg &= ~TCP_FLAG_SYN; - } + if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { + /* Truncate SYN, it is out of window starting + at tcp_rsk(req)->rcv_isn + 1. */ + flg &= ~TCP_FLAG_SYN; + } - /* RFC793: "second check the RST bit" and - * "fourth, check the SYN bit" - */ - if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { - TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); - goto embryonic_reset; - } + /* RFC793: "second check the RST bit" and + * "fourth, check the SYN bit" + */ + if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { + TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); + goto embryonic_reset; + } - /* ACK sequence verified above, just make sure ACK is - * set. If ACK not set, just silently drop the packet. - */ - if (!(flg & TCP_FLAG_ACK)) - return NULL; - - /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ - if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && - TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { - inet_rsk(req)->acked = 1; - return NULL; - } + /* ACK sequence verified above, just make sure ACK is + * set. If ACK not set, just silently drop the packet. + */ + if (!(flg & TCP_FLAG_ACK)) + return NULL; - /* OK, ACK is valid, create big socket and - * feed this segment to it. It will repeat all - * the tests. THIS SEGMENT MUST MOVE SOCKET TO - * ESTABLISHED STATE. If it will be dropped after - * socket is created, wait for troubles. - */ - child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, - req, NULL); - if (child == NULL) - goto listen_overflow; + /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ + if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && + TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { + inet_rsk(req)->acked = 1; + return NULL; + } + + /* OK, ACK is valid, create big socket and + * feed this segment to it. It will repeat all + * the tests. THIS SEGMENT MUST MOVE SOCKET TO + * ESTABLISHED STATE. If it will be dropped after + * socket is created, wait for troubles. + */ + child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); + if (child == NULL) + goto listen_overflow; #ifdef CONFIG_TCP_MD5SIG - else { - /* Copy over the MD5 key from the original socket */ - struct tcp_md5sig_key *key; - struct tcp_sock *tp = tcp_sk(sk); - key = tp->af_specific->md5_lookup(sk, child); - if (key != NULL) { - /* - * We're using one, so create a matching key on the - * newsk structure. If we fail to get memory then we - * end up not copying the key across. Shucks. - */ - char *newkey = kmemdup(key->key, key->keylen, - GFP_ATOMIC); - if (newkey) { - if (!tcp_alloc_md5sig_pool()) - BUG(); - tp->af_specific->md5_add(child, child, - newkey, - key->keylen); - } + else { + /* Copy over the MD5 key from the original socket */ + struct tcp_md5sig_key *key; + struct tcp_sock *tp = tcp_sk(sk); + key = tp->af_specific->md5_lookup(sk, child); + if (key != NULL) { + /* + * We're using one, so create a matching key on the + * newsk structure. If we fail to get memory then we + * end up not copying the key across. Shucks. + */ + char *newkey = kmemdup(key->key, key->keylen, + GFP_ATOMIC); + if (newkey) { + if (!tcp_alloc_md5sig_pool()) + BUG(); + tp->af_specific->md5_add(child, child, newkey, + key->keylen); } } + } #endif - inet_csk_reqsk_queue_unlink(sk, req, prev); - inet_csk_reqsk_queue_removed(sk, req); + inet_csk_reqsk_queue_unlink(sk, req, prev); + inet_csk_reqsk_queue_removed(sk, req); - inet_csk_reqsk_queue_add(sk, req, child); - return child; + inet_csk_reqsk_queue_add(sk, req, child); + return child; - listen_overflow: - if (!sysctl_tcp_abort_on_overflow) { - inet_rsk(req)->acked = 1; - return NULL; - } +listen_overflow: + if (!sysctl_tcp_abort_on_overflow) { + inet_rsk(req)->acked = 1; + return NULL; + } - embryonic_reset: - NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); - if (!(flg & TCP_FLAG_RST)) - req->rsk_ops->send_reset(sk, skb); +embryonic_reset: + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); + if (!(flg & TCP_FLAG_RST)) + req->rsk_ops->send_reset(sk, skb); - inet_csk_reqsk_queue_drop(sk, req, prev); - return NULL; + inet_csk_reqsk_queue_drop(sk, req, prev); + return NULL; } /* diff --git a/net/ipv4/xfrm4_mode_beet.c b/net/ipv4/xfrm4_mode_beet.c index 9c798ab..6341818 100644 --- a/net/ipv4/xfrm4_mode_beet.c +++ b/net/ipv4/xfrm4_mode_beet.c @@ -47,8 +47,10 @@ static int xfrm4_beet_output(struct xfrm_state *x, struct sk_buff *skb) if (unlikely(optlen)) hdrlen += IPV4_BEET_PHMAXLEN - (optlen & 4); - skb_set_network_header(skb, IPV4_BEET_PHMAXLEN - x->props.header_len - - hdrlen); + skb_set_network_header(skb, -x->props.header_len - + hdrlen + (XFRM_MODE_SKB_CB(skb)->ihl - sizeof(*top_iph))); + if (x->sel.family != AF_INET6) + skb->network_header += IPV4_BEET_PHMAXLEN; skb->mac_header = skb->network_header + offsetof(struct iphdr, protocol); skb->transport_header = skb->network_header + sizeof(*top_iph); diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index c6bb4c6..b181b08 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -521,6 +521,10 @@ static int esp6_init_state(struct xfrm_state *x) crypto_aead_ivsize(aead); switch (x->props.mode) { case XFRM_MODE_BEET: + if (x->sel.family != AF_INET6) + x->props.header_len += IPV4_BEET_PHMAXLEN + + (sizeof(struct ipv6hdr) - sizeof(struct iphdr)); + break; case XFRM_MODE_TRANSPORT: break; case XFRM_MODE_TUNNEL: diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 86540b2..5a3e87e 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1249,7 +1249,7 @@ install_route: if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0) rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1; - if (!dst_metric(&rt->u.dst, RTAX_MTU)) + if (!dst_mtu(&rt->u.dst)) rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev); if (!dst_metric(&rt->u.dst, RTAX_ADVMSS)) rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst)); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 78185a4..5b90b36 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -69,7 +69,8 @@ #include <linux/scatterlist.h> static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb); -static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req); +static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, + struct request_sock *req); static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); @@ -1138,10 +1139,11 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) inet_twsk_put(tw); } -static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req) +static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, + struct request_sock *req) { tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent, - tcp_v6_md5_do_lookup(skb->sk, &ipv6_hdr(skb)->daddr)); + tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr)); } diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c index d6ce400..bbd48b1 100644 --- a/net/ipv6/xfrm6_mode_beet.c +++ b/net/ipv6/xfrm6_mode_beet.c @@ -40,16 +40,39 @@ static void xfrm6_beet_make_header(struct sk_buff *skb) static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb) { struct ipv6hdr *top_iph; - - skb_set_network_header(skb, -x->props.header_len); + struct ip_beet_phdr *ph; + struct iphdr *iphv4; + int optlen, hdr_len; + + iphv4 = ip_hdr(skb); + hdr_len = 0; + optlen = XFRM_MODE_SKB_CB(skb)->optlen; + if (unlikely(optlen)) + hdr_len += IPV4_BEET_PHMAXLEN - (optlen & 4); + + skb_set_network_header(skb, -x->props.header_len - hdr_len); + if (x->sel.family != AF_INET6) + skb->network_header += IPV4_BEET_PHMAXLEN; skb->mac_header = skb->network_header + offsetof(struct ipv6hdr, nexthdr); skb->transport_header = skb->network_header + sizeof(*top_iph); - __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl); + ph = (struct ip_beet_phdr *)__skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl-hdr_len); xfrm6_beet_make_header(skb); top_iph = ipv6_hdr(skb); + if (unlikely(optlen)) { + + BUG_ON(optlen < 0); + + ph->padlen = 4 - (optlen & 4); + ph->hdrlen = optlen / 8; + ph->nexthdr = top_iph->nexthdr; + if (ph->padlen) + memset(ph + 1, IPOPT_NOP, ph->padlen); + + top_iph->nexthdr = IPPROTO_BEETPH; + } ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr); ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr); diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 0c02c47..aa5a191 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -245,10 +245,13 @@ static int ieee80211_open(struct net_device *dev) case IEEE80211_IF_TYPE_AP: sdata->bss = &sdata->u.ap; break; + case IEEE80211_IF_TYPE_MESH_POINT: + /* mesh ifaces must set allmulti to forward mcast traffic */ + atomic_inc(&local->iff_allmultis); + break; case IEEE80211_IF_TYPE_STA: case IEEE80211_IF_TYPE_MNTR: case IEEE80211_IF_TYPE_IBSS: - case IEEE80211_IF_TYPE_MESH_POINT: /* no special treatment */ break; case IEEE80211_IF_TYPE_INVALID: @@ -495,6 +498,9 @@ static int ieee80211_stop(struct net_device *dev) netif_addr_unlock_bh(local->mdev); break; case IEEE80211_IF_TYPE_MESH_POINT: + /* allmulti is always set on mesh ifaces */ + atomic_dec(&local->iff_allmultis); + /* fall through */ case IEEE80211_IF_TYPE_STA: case IEEE80211_IF_TYPE_IBSS: sdata->u.sta.state = IEEE80211_DISABLED; diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index 669eafa..7495fbb 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -214,8 +214,7 @@ void ieee80211s_stop(void); void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); /* Mesh paths */ -int mesh_nexthop_lookup(u8 *next_hop, struct sk_buff *skb, - struct net_device *dev); +int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev); void mesh_path_start_discovery(struct net_device *dev); struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev); struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev); @@ -286,6 +285,4 @@ static inline void mesh_path_activate(struct mesh_path *mpath) #define mesh_allocated 0 #endif -#define MESH_PREQ(skb) (skb->cb + 30) - #endif /* IEEE80211S_H */ diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 7fa149e..08aca44 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -758,29 +758,30 @@ enddiscovery: /** * ieee80211s_lookup_nexthop - put the appropriate next hop on a mesh frame * - * @next_hop: output argument for next hop address - * @skb: frame to be sent + * @skb: 802.11 frame to be sent * @dev: network device the frame will be sent through + * @fwd_frame: true if this frame was originally from a different host * * Returns: 0 if the next hop was found. Nonzero otherwise. If no next hop is * found, the function will start a path discovery and queue the frame so it is * sent when the path is resolved. This means the caller must not free the skb * in this case. */ -int mesh_nexthop_lookup(u8 *next_hop, struct sk_buff *skb, - struct net_device *dev) +int mesh_nexthop_lookup(struct sk_buff *skb, struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct sk_buff *skb_to_free = NULL; struct mesh_path *mpath; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + u8 *dst_addr = hdr->addr3; int err = 0; rcu_read_lock(); - mpath = mesh_path_lookup(skb->data, dev); + mpath = mesh_path_lookup(dst_addr, dev); if (!mpath) { - mesh_path_add(skb->data, dev); - mpath = mesh_path_lookup(skb->data, dev); + mesh_path_add(dst_addr, dev); + mpath = mesh_path_lookup(dst_addr, dev); if (!mpath) { dev_kfree_skb(skb); sdata->u.sta.mshstats.dropped_frames_no_route++; @@ -792,13 +793,13 @@ int mesh_nexthop_lookup(u8 *next_hop, struct sk_buff *skb, if (mpath->flags & MESH_PATH_ACTIVE) { if (time_after(jiffies, mpath->exp_time - msecs_to_jiffies(sdata->u.sta.mshcfg.path_refresh_time)) - && skb->pkt_type != PACKET_OTHERHOST + && !memcmp(dev->dev_addr, hdr->addr4, ETH_ALEN) && !(mpath->flags & MESH_PATH_RESOLVING) && !(mpath->flags & MESH_PATH_FIXED)) { mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH); } - memcpy(next_hop, mpath->next_hop->addr, + memcpy(hdr->addr1, mpath->next_hop->addr, ETH_ALEN); } else { if (!(mpath->flags & MESH_PATH_RESOLVING)) { diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 5f88a2e..838ee60 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c @@ -388,18 +388,15 @@ void mesh_path_tx_pending(struct mesh_path *mpath) void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct mesh_path *mpath; u32 dsn = 0; - if (skb->pkt_type == PACKET_OTHERHOST) { - struct ieee80211s_hdr *prev_meshhdr; - int mshhdrlen; + if (memcmp(hdr->addr4, dev->dev_addr, ETH_ALEN) != 0) { u8 *ra, *da; - prev_meshhdr = ((struct ieee80211s_hdr *)skb->cb); - mshhdrlen = ieee80211_get_mesh_hdrlen(prev_meshhdr); - da = skb->data; - ra = MESH_PREQ(skb); + da = hdr->addr3; + ra = hdr->addr2; mpath = mesh_path_lookup(da, dev); if (mpath) dsn = ++mpath->dsn; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 6d9ae67c..6db8545 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1109,20 +1109,9 @@ ieee80211_data_to_8023(struct ieee80211_rx_data *rx) hdrlen = ieee80211_get_hdrlen(fc); - if (ieee80211_vif_is_mesh(&sdata->vif)) { - int meshhdrlen = ieee80211_get_mesh_hdrlen( + if (ieee80211_vif_is_mesh(&sdata->vif)) + hdrlen += ieee80211_get_mesh_hdrlen( (struct ieee80211s_hdr *) (skb->data + hdrlen)); - /* Copy on cb: - * - mesh header: to be used for mesh forwarding - * decision. It will also be used as mesh header template at - * tx.c:ieee80211_subif_start_xmit() if interface - * type is mesh and skb->pkt_type == PACKET_OTHERHOST - * - ta: to be used if a RERR needs to be sent. - */ - memcpy(skb->cb, skb->data + hdrlen, meshhdrlen); - memcpy(MESH_PREQ(skb), hdr->addr2, ETH_ALEN); - hdrlen += meshhdrlen; - } /* convert IEEE 802.11 header + possible LLC headers into Ethernet * header @@ -1269,38 +1258,6 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) } } - /* Mesh forwarding */ - if (ieee80211_vif_is_mesh(&sdata->vif)) { - u8 *mesh_ttl = &((struct ieee80211s_hdr *)skb->cb)->ttl; - (*mesh_ttl)--; - - if (is_multicast_ether_addr(skb->data)) { - if (*mesh_ttl > 0) { - xmit_skb = skb_copy(skb, GFP_ATOMIC); - if (xmit_skb) - xmit_skb->pkt_type = PACKET_OTHERHOST; - else if (net_ratelimit()) - printk(KERN_DEBUG "%s: failed to clone " - "multicast frame\n", dev->name); - } else - IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.sta, - dropped_frames_ttl); - } else if (skb->pkt_type != PACKET_OTHERHOST && - compare_ether_addr(dev->dev_addr, skb->data) != 0) { - if (*mesh_ttl == 0) { - IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.sta, - dropped_frames_ttl); - dev_kfree_skb(skb); - skb = NULL; - } else { - xmit_skb = skb; - xmit_skb->pkt_type = PACKET_OTHERHOST; - if (!(dev->flags & IFF_PROMISC)) - skb = NULL; - } - } - } - if (skb) { /* deliver to local stack */ skb->protocol = eth_type_trans(skb, dev); @@ -1431,6 +1388,63 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) } static ieee80211_rx_result debug_noinline +ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) +{ + struct ieee80211_hdr *hdr; + struct ieee80211s_hdr *mesh_hdr; + unsigned int hdrlen; + struct sk_buff *skb = rx->skb, *fwd_skb; + + hdr = (struct ieee80211_hdr *) skb->data; + hdrlen = ieee80211_hdrlen(hdr->frame_control); + mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); + + if (!ieee80211_is_data(hdr->frame_control)) + return RX_CONTINUE; + + if (!mesh_hdr->ttl) + /* illegal frame */ + return RX_DROP_MONITOR; + + if (compare_ether_addr(rx->dev->dev_addr, hdr->addr3) == 0) + return RX_CONTINUE; + + mesh_hdr->ttl--; + + if (rx->flags & IEEE80211_RX_RA_MATCH) { + if (!mesh_hdr->ttl) + IEEE80211_IFSTA_MESH_CTR_INC(&rx->sdata->u.sta, + dropped_frames_ttl); + else { + struct ieee80211_hdr *fwd_hdr; + fwd_skb = skb_copy(skb, GFP_ATOMIC); + + if (!fwd_skb && net_ratelimit()) + printk(KERN_DEBUG "%s: failed to clone mesh frame\n", + rx->dev->name); + + fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; + /* + * Save TA to addr1 to send TA a path error if a + * suitable next hop is not found + */ + memcpy(fwd_hdr->addr1, fwd_hdr->addr2, ETH_ALEN); + memcpy(fwd_hdr->addr2, rx->dev->dev_addr, ETH_ALEN); + fwd_skb->dev = rx->local->mdev; + fwd_skb->iif = rx->dev->ifindex; + dev_queue_xmit(fwd_skb); + } + } + + if (is_multicast_ether_addr(hdr->addr3) || + rx->dev->flags & IFF_PROMISC) + return RX_CONTINUE; + else + return RX_DROP_MONITOR; +} + + +static ieee80211_rx_result debug_noinline ieee80211_rx_h_data(struct ieee80211_rx_data *rx) { struct net_device *dev = rx->dev; @@ -1663,10 +1677,12 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, rx->sdata = sdata; rx->dev = sdata->dev; -#define CALL_RXH(rxh) \ - res = rxh(rx); \ - if (res != RX_CONTINUE) \ - goto rxh_done; +#define CALL_RXH(rxh) \ + do { \ + res = rxh(rx); \ + if (res != RX_CONTINUE) \ + goto rxh_done; \ + } while (0); CALL_RXH(ieee80211_rx_h_passive_scan) CALL_RXH(ieee80211_rx_h_check) @@ -1678,6 +1694,8 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, /* must be after MMIC verify so header is counted in MPDU mic */ CALL_RXH(ieee80211_rx_h_remove_qos_control) CALL_RXH(ieee80211_rx_h_amsdu) + if (ieee80211_vif_is_mesh(&sdata->vif)) + CALL_RXH(ieee80211_rx_h_mesh_fwding); CALL_RXH(ieee80211_rx_h_data) CALL_RXH(ieee80211_rx_h_ctrl) CALL_RXH(ieee80211_rx_h_mgmt) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 771ec68..4788f7b 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1301,6 +1301,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct net_device *odev = NULL; struct ieee80211_sub_if_data *osdata; int headroom; @@ -1328,6 +1329,20 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, osdata = IEEE80211_DEV_TO_SUB_IF(odev); + if (ieee80211_vif_is_mesh(&osdata->vif) && + ieee80211_is_data(hdr->frame_control)) { + if (ieee80211_is_data(hdr->frame_control)) { + if (is_multicast_ether_addr(hdr->addr3)) + memcpy(hdr->addr1, hdr->addr3, ETH_ALEN); + else + if (mesh_nexthop_lookup(skb, odev)) + return 0; + if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0) + IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.sta, + fwded_frames); + } + } + may_encrypt = !skb->do_not_encrypt; headroom = osdata->local->tx_headroom; @@ -1472,30 +1487,17 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, case IEEE80211_IF_TYPE_MESH_POINT: fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS); /* RA TA DA SA */ - if (is_multicast_ether_addr(skb->data)) - memcpy(hdr.addr1, skb->data, ETH_ALEN); - else if (mesh_nexthop_lookup(hdr.addr1, skb, dev)) - return 0; + memset(hdr.addr1, 0, ETH_ALEN); memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN); memcpy(hdr.addr3, skb->data, ETH_ALEN); memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN); - if (skb->pkt_type == PACKET_OTHERHOST) { - /* Forwarded frame, keep mesh ttl and seqnum */ - struct ieee80211s_hdr *prev_meshhdr; - prev_meshhdr = ((struct ieee80211s_hdr *)skb->cb); - meshhdrlen = ieee80211_get_mesh_hdrlen(prev_meshhdr); - memcpy(&mesh_hdr, prev_meshhdr, meshhdrlen); - sdata->u.sta.mshstats.fwded_frames++; - } else { - if (!sdata->u.sta.mshcfg.dot11MeshTTL) { - /* Do not send frames with mesh_ttl == 0 */ - sdata->u.sta.mshstats.dropped_frames_ttl++; - ret = 0; - goto fail; - } - meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, - sdata); + if (!sdata->u.sta.mshcfg.dot11MeshTTL) { + /* Do not send frames with mesh_ttl == 0 */ + sdata->u.sta.mshstats.dropped_frames_ttl++; + ret = 0; + goto fail; } + meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, sdata); hdrlen = 30; break; #endif @@ -1543,7 +1545,8 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, * Drop unicast frames to unauthorised stations unless they are * EAPOL frames from the local station. */ - if (unlikely(!is_multicast_ether_addr(hdr.addr1) && + if (!ieee80211_vif_is_mesh(&sdata->vif) && + unlikely(!is_multicast_ether_addr(hdr.addr1) && !(sta_flags & WLAN_STA_AUTHORIZED) && !(ethertype == ETH_P_PAE && compare_ether_addr(dev->dev_addr, diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index c519d09..9d1830d 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1032,10 +1032,10 @@ void nf_conntrack_cleanup(void) nf_ct_free_hashtable(nf_conntrack_hash, nf_conntrack_vmalloc, nf_conntrack_htable_size); - nf_conntrack_proto_fini(); - nf_conntrack_helper_fini(); - nf_conntrack_expect_fini(); nf_conntrack_acct_fini(); + nf_conntrack_expect_fini(); + nf_conntrack_helper_fini(); + nf_conntrack_proto_fini(); } struct hlist_head *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced) diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 869ef93..8509db1 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -324,6 +324,7 @@ static int log_invalid_proto_min = 0; static int log_invalid_proto_max = 255; static struct ctl_table_header *nf_ct_sysctl_header; +static struct ctl_table_header *nf_ct_netfilter_header; static ctl_table nf_ct_sysctl_table[] = { { @@ -384,12 +385,6 @@ static ctl_table nf_ct_sysctl_table[] = { static ctl_table nf_ct_netfilter_table[] = { { - .ctl_name = NET_NETFILTER, - .procname = "netfilter", - .mode = 0555, - .child = nf_ct_sysctl_table, - }, - { .ctl_name = NET_NF_CONNTRACK_MAX, .procname = "nf_conntrack_max", .data = &nf_conntrack_max, @@ -409,18 +404,29 @@ EXPORT_SYMBOL_GPL(nf_ct_log_invalid); static int nf_conntrack_standalone_init_sysctl(void) { - nf_ct_sysctl_header = + nf_ct_netfilter_header = register_sysctl_paths(nf_ct_path, nf_ct_netfilter_table); - if (nf_ct_sysctl_header == NULL) { - printk("nf_conntrack: can't register to sysctl.\n"); - return -ENOMEM; - } + if (!nf_ct_netfilter_header) + goto out; + + nf_ct_sysctl_header = + register_sysctl_paths(nf_net_netfilter_sysctl_path, + nf_ct_sysctl_table); + if (!nf_ct_sysctl_header) + goto out_unregister_netfilter; + return 0; +out_unregister_netfilter: + unregister_sysctl_table(nf_ct_netfilter_header); +out: + printk("nf_conntrack: can't register to sysctl.\n"); + return -ENOMEM; } static void nf_conntrack_standalone_fini_sysctl(void) { + unregister_sysctl_table(nf_ct_netfilter_header); unregister_sysctl_table(nf_ct_sysctl_header); } #else diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 4840aff..ba1d121 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -189,7 +189,7 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - struct Qdisc *q, *txq_root = txq->qdisc; + struct Qdisc *q, *txq_root = txq->qdisc_sleeping; if (!(txq_root->flags & TCQ_F_BUILTIN) && txq_root->handle == handle) @@ -792,8 +792,8 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, goto err_out3; } } - if (parent && !(sch->flags & TCQ_F_INGRESS)) - list_add_tail(&sch->list, &dev_queue->qdisc->list); + if ((parent != TC_H_ROOT) && !(sch->flags & TCQ_F_INGRESS)) + list_add_tail(&sch->list, &dev_queue->qdisc_sleeping->list); return sch; } @@ -1236,11 +1236,11 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) q_idx = 0; dev_queue = netdev_get_tx_queue(dev, 0); - if (tc_dump_qdisc_root(dev_queue->qdisc, skb, cb, &q_idx, s_q_idx) < 0) + if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0) goto done; dev_queue = &dev->rx_queue; - if (tc_dump_qdisc_root(dev_queue->qdisc, skb, cb, &q_idx, s_q_idx) < 0) + if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0) goto done; cont: diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c index b210a88..7f07152 100644 --- a/net/wanrouter/wanmain.c +++ b/net/wanrouter/wanmain.c @@ -57,7 +57,6 @@ #include <linux/vmalloc.h> /* vmalloc, vfree */ #include <asm/uaccess.h> /* copy_to/from_user */ #include <linux/init.h> /* __initfunc et al. */ -#include <net/syncppp.h> #define KMEM_SAFETYZONE 8 @@ -567,9 +566,6 @@ static int wanrouter_device_new_if(struct wan_device *wandev, { wanif_conf_t *cnf; struct net_device *dev = NULL; -#ifdef CONFIG_WANPIPE_MULTPPP - struct ppp_device *pppdev=NULL; -#endif int err; if ((wandev->state == WAN_UNCONFIGURED) || (wandev->new_if == NULL)) @@ -588,25 +584,10 @@ static int wanrouter_device_new_if(struct wan_device *wandev, goto out; if (cnf->config_id == WANCONFIG_MPPP) { -#ifdef CONFIG_WANPIPE_MULTPPP - pppdev = kzalloc(sizeof(struct ppp_device), GFP_KERNEL); - err = -ENOBUFS; - if (pppdev == NULL) - goto out; - pppdev->dev = kzalloc(sizeof(struct net_device), GFP_KERNEL); - if (pppdev->dev == NULL) { - kfree(pppdev); - err = -ENOBUFS; - goto out; - } - err = wandev->new_if(wandev, (struct net_device *)pppdev, cnf); - dev = pppdev->dev; -#else printk(KERN_INFO "%s: Wanpipe Mulit-Port PPP support has not been compiled in!\n", wandev->name); err = -EPROTONOSUPPORT; goto out; -#endif } else { dev = kzalloc(sizeof(struct net_device), GFP_KERNEL); err = -ENOBUFS; @@ -661,17 +642,9 @@ static int wanrouter_device_new_if(struct wan_device *wandev, kfree(dev->priv); dev->priv = NULL; -#ifdef CONFIG_WANPIPE_MULTPPP - if (cnf->config_id == WANCONFIG_MPPP) - kfree(pppdev); - else - kfree(dev); -#else /* Sync PPP is disabled */ if (cnf->config_id != WANCONFIG_MPPP) kfree(dev); -#endif - out: kfree(cnf); return err; |