From a9f7c5363b7d2d836ad2be2d96fd4af3fd0d349a Mon Sep 17 00:00:00 2001 From: Greg Rose Date: Wed, 2 Mar 2011 06:52:30 +0000 Subject: ixgbevf: Fix Version String The kernel version string is off by a major version number since new silicon was just introduced and also uses the wrong format for the version postfix. Signed-off-by: Greg Rose Signed-off-by: Jeff Kirsher --- drivers/net/ixgbevf/ixgbevf_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c index c1fb2c1..2f6bc31 100644 --- a/drivers/net/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ixgbevf/ixgbevf_main.c @@ -51,7 +51,7 @@ char ixgbevf_driver_name[] = "ixgbevf"; static const char ixgbevf_driver_string[] = "Intel(R) 82599 Virtual Function"; -#define DRV_VERSION "1.1.0-k0" +#define DRV_VERSION "2.0.0-k2" const char ixgbevf_driver_version[] = DRV_VERSION; static char ixgbevf_copyright[] = "Copyright (c) 2009 - 2010 Intel Corporation."; -- cgit v1.1 From 422e05d12af9c1063d57210074b4e6db36a0cf39 Mon Sep 17 00:00:00 2001 From: Greg Rose Date: Sat, 12 Mar 2011 02:01:29 +0000 Subject: ixgbevf: Fix Driver String Change the driver string to match the PF driver string format. Signed-off-by: Greg Rose Signed-off-by: Jeff Kirsher --- drivers/net/ixgbevf/ixgbevf_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c index 2f6bc31..054ab05 100644 --- a/drivers/net/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ixgbevf/ixgbevf_main.c @@ -49,7 +49,7 @@ char ixgbevf_driver_name[] = "ixgbevf"; static const char ixgbevf_driver_string[] = - "Intel(R) 82599 Virtual Function"; + "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; #define DRV_VERSION "2.0.0-k2" const char ixgbevf_driver_version[] = DRV_VERSION; -- cgit v1.1 From 09b068d45737abb49320ab25cb4ed2916017ace7 Mon Sep 17 00:00:00 2001 From: Carolyn Wyborny Date: Fri, 11 Mar 2011 20:42:13 -0800 Subject: igb: Add Energy Efficient Ethernet (EEE) for i350 devices. This patch adds the EEE feature for i350 devices, enabled by default. Signed-off-by: Carolyn Wyborny Tested-by: Jeff Pieper Signed-off-by: Jeff Kirsher --- drivers/net/igb/e1000_82575.c | 46 ++++++++++++++++++++++++++++++++++++++++- drivers/net/igb/e1000_82575.h | 1 + drivers/net/igb/e1000_defines.h | 7 +++++++ drivers/net/igb/e1000_hw.h | 1 + drivers/net/igb/e1000_regs.h | 4 ++++ drivers/net/igb/igb_main.c | 8 ++++++- 6 files changed, 65 insertions(+), 2 deletions(-) diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c index 65c1833..20d172a 100644 --- a/drivers/net/igb/e1000_82575.c +++ b/drivers/net/igb/e1000_82575.c @@ -195,7 +195,11 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) mac->arc_subsystem_valid = (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK) ? true : false; - + /* enable EEE on i350 parts */ + if (mac->type == e1000_i350) + dev_spec->eee_disable = false; + else + dev_spec->eee_disable = true; /* physical interface link setup */ mac->ops.setup_physical_interface = (hw->phy.media_type == e1000_media_type_copper) @@ -1754,6 +1758,46 @@ u16 igb_rxpbs_adjust_82580(u32 data) return ret_val; } +/** + * igb_set_eee_i350 - Enable/disable EEE support + * @hw: pointer to the HW structure + * + * Enable/disable EEE based on setting in dev_spec structure. + * + **/ +s32 igb_set_eee_i350(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 ipcnfg, eeer, ctrl_ext; + + ctrl_ext = rd32(E1000_CTRL_EXT); + if ((hw->mac.type != e1000_i350) || + (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK)) + goto out; + ipcnfg = rd32(E1000_IPCNFG); + eeer = rd32(E1000_EEER); + + /* enable or disable per user setting */ + if (!(hw->dev_spec._82575.eee_disable)) { + ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | + E1000_IPCNFG_EEE_100M_AN); + eeer |= (E1000_EEER_TX_LPI_EN | + E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + + } else { + ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | + E1000_IPCNFG_EEE_100M_AN); + eeer &= ~(E1000_EEER_TX_LPI_EN | + E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + } + wr32(E1000_IPCNFG, ipcnfg); + wr32(E1000_EEER, eeer); +out: + + return ret_val; +} static struct e1000_mac_operations e1000_mac_ops_82575 = { .init_hw = igb_init_hw_82575, .check_for_link = igb_check_for_link_82575, diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h index 1d01af2..dd6df34 100644 --- a/drivers/net/igb/e1000_82575.h +++ b/drivers/net/igb/e1000_82575.h @@ -251,5 +251,6 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int); void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); u16 igb_rxpbs_adjust_82580(u32 data); +s32 igb_set_eee_i350(struct e1000_hw *); #endif diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h index 92e11da..7926781 100644 --- a/drivers/net/igb/e1000_defines.h +++ b/drivers/net/igb/e1000_defines.h @@ -758,6 +758,13 @@ #define E1000_MDIC_ERROR 0x40000000 #define E1000_MDIC_DEST 0x80000000 +/* Energy Efficient Ethernet */ +#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* EEE Enable 1G AN */ +#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* EEE Enable 100M AN */ +#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEE Tx LPI Enable */ +#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEE Rx LPI Enable */ +#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */ + /* SerDes Control */ #define E1000_GEN_CTL_READY 0x80000000 #define E1000_GEN_CTL_ADDRESS_SHIFT 8 diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h index eec9ed7..17569bf 100644 --- a/drivers/net/igb/e1000_hw.h +++ b/drivers/net/igb/e1000_hw.h @@ -488,6 +488,7 @@ struct e1000_mbx_info { struct e1000_dev_spec_82575 { bool sgmii_active; bool global_device_reset; + bool eee_disable; }; struct e1000_hw { diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h index 6171354..b2f8e59 100644 --- a/drivers/net/igb/e1000_regs.h +++ b/drivers/net/igb/e1000_regs.h @@ -329,6 +329,10 @@ /* DMA Coalescing registers */ #define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ +/* Energy Efficient Ethernet "EEE" register */ +#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet */ + /* OS2BMC Registers */ #define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ #define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 3666b96..8643f8c 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -2014,7 +2014,13 @@ static int __devinit igb_probe(struct pci_dev *pdev, adapter->msix_entries ? "MSI-X" : (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", adapter->num_rx_queues, adapter->num_tx_queues); - + switch (hw->mac.type) { + case e1000_i350: + igb_set_eee_i350(hw); + break; + default: + break; + } return 0; err_register: -- cgit v1.1 From 4322e561a93ec7ee034b603a6c610e7be90d4e8a Mon Sep 17 00:00:00 2001 From: Carolyn Wyborny Date: Fri, 11 Mar 2011 20:43:18 -0800 Subject: igb: Update NVM functions to work with i350 devices This patch adds functions and functions pointers to accommodate differences between NVM interfaces and options for i350 devices, 82580 devices and the rest. Signed-off-by: Carolyn Wyborny Tested-by: Jeff Pieper Signed-off-by: Jeff Kirsher --- drivers/net/igb/e1000_82575.c | 239 +++++++++++++++++++++++++++++++++++++++- drivers/net/igb/e1000_defines.h | 3 + drivers/net/igb/e1000_hw.h | 3 +- drivers/net/igb/e1000_nvm.c | 64 ++++++++++- drivers/net/igb/e1000_nvm.h | 1 + drivers/net/igb/igb_ethtool.c | 2 +- drivers/net/igb/igb_main.c | 2 +- 7 files changed, 306 insertions(+), 8 deletions(-) diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c index 20d172a..6b256c2 100644 --- a/drivers/net/igb/e1000_82575.c +++ b/drivers/net/igb/e1000_82575.c @@ -64,7 +64,14 @@ static s32 igb_reset_init_script_82575(struct e1000_hw *); static s32 igb_read_mac_addr_82575(struct e1000_hw *); static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw); static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw); - +static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw); +static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw); +static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset); +static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset); +static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw); +static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); static const u16 e1000_82580_rxpbs_table[] = { 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; @@ -237,10 +244,32 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) */ size += NVM_WORD_SIZE_BASE_SHIFT; - /* EEPROM access above 16k is unsupported */ - if (size > 14) - size = 14; nvm->word_size = 1 << size; + if (nvm->word_size == (1 << 15)) + nvm->page_size = 128; + + /* NVM Function Pointers */ + nvm->ops.acquire = igb_acquire_nvm_82575; + if (nvm->word_size < (1 << 15)) + nvm->ops.read = igb_read_nvm_eerd; + else + nvm->ops.read = igb_read_nvm_spi; + + nvm->ops.release = igb_release_nvm_82575; + switch (hw->mac.type) { + case e1000_82580: + nvm->ops.validate = igb_validate_nvm_checksum_82580; + nvm->ops.update = igb_update_nvm_checksum_82580; + break; + case e1000_i350: + nvm->ops.validate = igb_validate_nvm_checksum_i350; + nvm->ops.update = igb_update_nvm_checksum_i350; + break; + default: + nvm->ops.validate = igb_validate_nvm_checksum; + nvm->ops.update = igb_update_nvm_checksum; + } + nvm->ops.write = igb_write_nvm_spi; /* if part supports SR-IOV then initialize mailbox parameters */ switch (mac->type) { @@ -1759,6 +1788,207 @@ u16 igb_rxpbs_adjust_82580(u32 data) } /** + * igb_validate_nvm_checksum_with_offset - Validate EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +{ + s32 ret_val = 0; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + hw_dbg("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_with_offset - Update EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, + &checksum); + if (ret_val) + hw_dbg("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} + +/** + * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 eeprom_regions_count = 1; + u16 j, nvm_data; + u16 nvm_offset; + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error\n"); + goto out; + } + + if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { + /* if chekcsums compatibility bit is set validate checksums + * for all 4 ports. */ + eeprom_regions_count = 4; + } + + for (j = 0; j < eeprom_regions_count; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_82580 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val; + u16 j, nvm_data; + u16 nvm_offset; + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + hw_dbg("NVM Read Error while updating checksum" + " compatibility bit.\n"); + goto out; + } + + if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) { + /* set compatibility bit to validate checksums appropriately */ + nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; + ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, + &nvm_data); + if (ret_val) { + hw_dbg("NVM Write Error while updating checksum" + " compatibility bit.\n"); + goto out; + } + } + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 j; + u16 nvm_offset; + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} + +/** + * igb_update_nvm_checksum_i350 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 j; + u16 nvm_offset; + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val != 0) + goto out; + } + +out: + return ret_val; +} +/** * igb_set_eee_i350 - Enable/disable EEE support * @hw: pointer to the HW structure * @@ -1798,6 +2028,7 @@ out: return ret_val; } + static struct e1000_mac_operations e1000_mac_ops_82575 = { .init_hw = igb_init_hw_82575, .check_for_link = igb_check_for_link_82575, diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h index 7926781..97969ad 100644 --- a/drivers/net/igb/e1000_defines.h +++ b/drivers/net/igb/e1000_defines.h @@ -566,6 +566,8 @@ #define NVM_INIT_CONTROL3_PORT_A 0x0024 #define NVM_ALT_MAC_ADDR_PTR 0x0037 #define NVM_CHECKSUM_REG 0x003F +#define NVM_COMPATIBILITY_REG_3 0x0003 +#define NVM_COMPATIBILITY_BIT_MASK 0x8000 #define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ #define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ @@ -600,6 +602,7 @@ /* NVM Commands - SPI */ #define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ #define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ #define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ #define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ #define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h index 17569bf..27153e8 100644 --- a/drivers/net/igb/e1000_hw.h +++ b/drivers/net/igb/e1000_hw.h @@ -336,6 +336,8 @@ struct e1000_nvm_operations { s32 (*read)(struct e1000_hw *, u16, u16, u16 *); void (*release)(struct e1000_hw *); s32 (*write)(struct e1000_hw *, u16, u16, u16 *); + s32 (*update)(struct e1000_hw *); + s32 (*validate)(struct e1000_hw *); }; struct e1000_info { @@ -422,7 +424,6 @@ struct e1000_phy_info { struct e1000_nvm_info { struct e1000_nvm_operations ops; - enum e1000_nvm_type type; enum e1000_nvm_override override; diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c index 6b5cc2c..75bf36a 100644 --- a/drivers/net/igb/e1000_nvm.c +++ b/drivers/net/igb/e1000_nvm.c @@ -318,6 +318,68 @@ out: } /** + * igb_read_nvm_spi - Read EEPROM's using SPI + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + **/ +s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u16 word_in; + u8 read_opcode = NVM_READ_OPCODE_SPI; + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + hw_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = igb_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + igb_standby_nvm(hw); + + if ((nvm->address_bits == 8) && (offset >= 128)) + read_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); + + /* + * Read the data. SPI NVMs increment the address with each byte + * read and will roll over if reading beyond the end. This allows + * us to read the whole NVM from any offset + */ + for (i = 0; i < words; i++) { + word_in = igb_shift_in_eec_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + +release: + nvm->ops.release(hw); + +out: + return ret_val; +} + +/** * igb_read_nvm_eerd - Reads EEPROM using EERD register * @hw: pointer to the HW structure * @offset: offset of word in the EEPROM to read @@ -353,7 +415,7 @@ s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) break; data[i] = (rd32(E1000_EERD) >> - E1000_NVM_RW_REG_DATA); + E1000_NVM_RW_REG_DATA); } out: diff --git a/drivers/net/igb/e1000_nvm.h b/drivers/net/igb/e1000_nvm.h index 29c956a..7f43564 100644 --- a/drivers/net/igb/e1000_nvm.h +++ b/drivers/net/igb/e1000_nvm.h @@ -35,6 +35,7 @@ s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num); s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size); s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); s32 igb_validate_nvm_checksum(struct e1000_hw *hw); s32 igb_update_nvm_checksum(struct e1000_hw *hw); diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index 78d420b..df15a91 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c @@ -721,7 +721,7 @@ static int igb_set_eeprom(struct net_device *netdev, /* Update the checksum over the first part of the EEPROM if needed * and flush shadow RAM for 82573 controllers */ if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG))) - igb_update_nvm_checksum(hw); + hw->nvm.ops.update(hw); kfree(eeprom_buff); return ret_val; diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 8643f8c..8c6af11 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -1884,7 +1884,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, hw->mac.ops.reset_hw(hw); /* make sure the NVM is good */ - if (igb_validate_nvm_checksum(hw) < 0) { + if (hw->nvm.ops.validate(hw) < 0) { dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); err = -EIO; goto err_eeprom; -- cgit v1.1 From 831ec0b4226cec7ea34f5c4c9810e78aeb2069bf Mon Sep 17 00:00:00 2001 From: Carolyn Wyborny Date: Fri, 11 Mar 2011 20:43:54 -0800 Subject: igb: Add DMA Coalescing feature to driver This patch add DMA Coalescing which is a power-saving feature that coalesces DMA writes in order to stay in a low-power state as much as possible. Feature is disabled by default. Signed-off-by: Carolyn Wyborny Tested-by: Jeff Pieper Signed-off-by: Jeff Kirsher --- drivers/net/igb/e1000_defines.h | 29 ++++++++++++++++++++- drivers/net/igb/e1000_regs.h | 9 +++++++ drivers/net/igb/igb.h | 6 +++++ drivers/net/igb/igb_ethtool.c | 6 +++++ drivers/net/igb/igb_main.c | 57 +++++++++++++++++++++++++++++++++++++++++ 5 files changed, 106 insertions(+), 1 deletion(-) diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h index 97969ad..9bb1928 100644 --- a/drivers/net/igb/e1000_defines.h +++ b/drivers/net/igb/e1000_defines.h @@ -287,7 +287,34 @@ #define E1000_TCTL_COLD 0x003ff000 /* collision distance */ #define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ -/* Transmit Arbitration Count */ +/* DMA Coalescing register fields */ +#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing + * Watchdog Timer */ +#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive + * Threshold */ +#define E1000_DMACR_DMACTHR_SHIFT 16 +#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe + * transactions */ +#define E1000_DMACR_DMAC_LX_SHIFT 28 +#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ + +#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit + * Threshold */ + +#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ + +#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate + * Threshold */ +#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in + * current window */ + +#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic + * Current Cnt */ + +#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold + * High val */ +#define E1000_FCRTC_RTH_COAL_SHIFT 4 +#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */ /* SerDes Control */ #define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h index b2f8e59..ad77ed5 100644 --- a/drivers/net/igb/e1000_regs.h +++ b/drivers/net/igb/e1000_regs.h @@ -106,6 +106,15 @@ #define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) +/* DMA Coalescing registers */ +#define E1000_DMACR 0x02508 /* Control Register */ +#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ +#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ +#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ +#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ +#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ +#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ + /* TX Rate Limit Registers */ #define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */ #define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */ diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index bbc5ebf..1c687e2 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h @@ -333,6 +333,12 @@ struct igb_adapter { #define IGB_FLAG_DCA_ENABLED (1 << 1) #define IGB_FLAG_QUAD_PORT_A (1 << 2) #define IGB_FLAG_QUEUE_PAIRS (1 << 3) +#define IGB_FLAG_DMAC (1 << 4) + +/* DMA Coalescing defines */ +#define IGB_MIN_TXPBSIZE 20408 +#define IGB_TX_BUF_4096 4096 +#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ #define IGB_82576_TSYNC_SHIFT 19 #define IGB_82580_TSYNC_SHIFT 24 diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index df15a91..d976733 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c @@ -2009,6 +2009,12 @@ static int igb_set_coalesce(struct net_device *netdev, if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs) return -EINVAL; + /* If ITR is disabled, disable DMAC */ + if (ec->rx_coalesce_usecs == 0) { + if (adapter->flags & IGB_FLAG_DMAC) + adapter->flags &= ~IGB_FLAG_DMAC; + } + /* convert to rate of irq's per second */ if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) adapter->rx_itr_setting = ec->rx_coalesce_usecs; diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 8c6af11..49476f7 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -1674,7 +1674,58 @@ void igb_reset(struct igb_adapter *adapter) if (hw->mac.ops.init_hw(hw)) dev_err(&pdev->dev, "Hardware Error\n"); + if (hw->mac.type > e1000_82580) { + if (adapter->flags & IGB_FLAG_DMAC) { + u32 reg; + /* + * DMA Coalescing high water mark needs to be higher + * than * the * Rx threshold. The Rx threshold is + * currently * pba - 6, so we * should use a high water + * mark of pba * - 4. */ + hwm = (pba - 4) << 10; + + reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT) + & E1000_DMACR_DMACTHR_MASK); + + /* transition to L0x or L1 if available..*/ + reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); + + /* watchdog timer= +-1000 usec in 32usec intervals */ + reg |= (1000 >> 5); + wr32(E1000_DMACR, reg); + + /* no lower threshold to disable coalescing(smart fifb) + * -UTRESH=0*/ + wr32(E1000_DMCRTRH, 0); + + /* set hwm to PBA - 2 * max frame size */ + wr32(E1000_FCRTC, hwm); + + /* + * This sets the time to wait before requesting tran- + * sition to * low power state to number of usecs needed + * to receive 1 512 * byte frame at gigabit line rate + */ + reg = rd32(E1000_DMCTLX); + reg |= IGB_DMCTLX_DCFLUSH_DIS; + + /* Delay 255 usec before entering Lx state. */ + reg |= 0xFF; + wr32(E1000_DMCTLX, reg); + + /* free space in Tx packet buffer to wake from DMAC */ + wr32(E1000_DMCTXTH, + (IGB_MIN_TXPBSIZE - + (IGB_TX_BUF_4096 + adapter->max_frame_size)) + >> 6); + + /* make low power state decision controlled by DMAC */ + reg = rd32(E1000_PCIEMISC); + reg |= E1000_PCIEMISC_LX_DECISION; + wr32(E1000_PCIEMISC, reg); + } /* end if IGB_FLAG_DMAC set */ + } if (hw->mac.type == e1000_82580) { u32 reg = rd32(E1000_PCIEMISC); wr32(E1000_PCIEMISC, @@ -2157,6 +2208,9 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter) random_ether_addr(mac_addr); igb_set_vf_mac(adapter, i, mac_addr); } + /* DMA Coalescing is not supported in IOV mode. */ + if (adapter->flags & IGB_FLAG_DMAC) + adapter->flags &= ~IGB_FLAG_DMAC; } #endif /* CONFIG_PCI_IOV */ } @@ -2331,6 +2385,9 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) /* Explicitly disable IRQ since the NIC can be in any state. */ igb_irq_disable(adapter); + if (hw->mac.type == e1000_i350) + adapter->flags &= ~IGB_FLAG_DMAC; + set_bit(__IGB_DOWN, &adapter->state); return 0; } -- cgit v1.1 From 0d1fe82deacdcc90458558b5d6a4a5af5db9a6c6 Mon Sep 17 00:00:00 2001 From: Carolyn Wyborny Date: Fri, 11 Mar 2011 20:58:19 -0800 Subject: igb: Bump version to 3.0.6 This patch updates igb version to 3.0.6. Signed-off-by: Carolyn Wyborny Tested-by: Jeff Pieper Signed-off-by: Jeff Kirsher --- drivers/net/igb/igb_main.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 49476f7..b4f92b0 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -50,7 +50,12 @@ #endif #include "igb.h" -#define DRV_VERSION "2.4.13-k2" +#define MAJ 3 +#define MIN 0 +#define BUILD 6 +#define KFIX 2 +#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ +__stringify(BUILD) "-k" __stringify(KFIX) char igb_driver_name[] = "igb"; char igb_driver_version[] = DRV_VERSION; static const char igb_driver_string[] = -- cgit v1.1 From 3032309b49622497430ecd2b40ff51fb204c35e8 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 1 Mar 2011 05:25:35 +0000 Subject: ixgbe: DCB, implement capabilities flags This implements dcbnl get and set capabilities ops. The devices supported by ixgbe can be configured to run in IEEE or CEE modes but not both. With the DCBX set capabilities bit we add an explicit signal that must be used to toggle between these modes. This patch adds logic to fail the CEE command set_hw_all() which programs the device with a CEE configuration if the CEE caps bit is not set. Similarly, IEEE set commands will fail if the IEEE caps bit is not set. We allow most CEE config set commands to occur because they do not touch the hardware until set_hw_all() is called. The one exception to the above is the {set|get}app routines. These must always be protected by caps bits to ensure side effects do not corrupt the current configured mode. By requiring the caps bit to be set correctly we can maintain a consistent configuration in the hardware for CEE or IEEE modes and prevent partial hardware configurations that may occur if user space does not send a complete IEEE or CEE configurations. It is expected that user space will signal a DCBX mode before programming device. Signed-off-by: John Fastabend Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe.h | 1 + drivers/net/ixgbe/ixgbe_dcb_nl.c | 129 ++++++++++++++++++++++++++++----------- drivers/net/ixgbe/ixgbe_main.c | 1 + 3 files changed, 95 insertions(+), 36 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 1e546fc..815edfd 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -341,6 +341,7 @@ struct ixgbe_adapter { struct ixgbe_dcb_config dcb_cfg; struct ixgbe_dcb_config temp_dcb_cfg; u8 dcb_set_bitmap; + u8 dcbx_cap; enum ixgbe_fc_mode last_lfc_mode; /* Interrupt Throttle Rate */ diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c index d7f0024..d7c456f 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c @@ -346,7 +346,8 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) struct ixgbe_adapter *adapter = netdev_priv(netdev); int ret; - if (!adapter->dcb_set_bitmap) + if (!adapter->dcb_set_bitmap || + !(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) return DCB_NO_HW_CHG; ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, @@ -448,40 +449,38 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - u8 rval = 0; - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - switch (capid) { - case DCB_CAP_ATTR_PG: - *cap = true; - break; - case DCB_CAP_ATTR_PFC: - *cap = true; - break; - case DCB_CAP_ATTR_UP2TC: - *cap = false; - break; - case DCB_CAP_ATTR_PG_TCS: - *cap = 0x80; - break; - case DCB_CAP_ATTR_PFC_TCS: - *cap = 0x80; - break; - case DCB_CAP_ATTR_GSP: - *cap = true; - break; - case DCB_CAP_ATTR_BCN: - *cap = false; - break; - default: - rval = -EINVAL; - break; - } - } else { - rval = -EINVAL; + switch (capid) { + case DCB_CAP_ATTR_PG: + *cap = true; + break; + case DCB_CAP_ATTR_PFC: + *cap = true; + break; + case DCB_CAP_ATTR_UP2TC: + *cap = false; + break; + case DCB_CAP_ATTR_PG_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_PFC_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_GSP: + *cap = true; + break; + case DCB_CAP_ATTR_BCN: + *cap = false; + break; + case DCB_CAP_ATTR_DCBX: + *cap = adapter->dcbx_cap; + break; + default: + *cap = false; + break; } - return rval; + return 0; } static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) @@ -542,13 +541,17 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) */ static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) { + struct ixgbe_adapter *adapter = netdev_priv(netdev); u8 rval = 0; + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return rval; + switch (idtype) { case DCB_APP_IDTYPE_ETHTYPE: #ifdef IXGBE_FCOE if (id == ETH_P_FCOE) - rval = ixgbe_fcoe_getapp(netdev_priv(netdev)); + rval = ixgbe_fcoe_getapp(adapter); #endif break; case DCB_APP_IDTYPE_PORTNUM: @@ -571,14 +574,17 @@ static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) static u8 ixgbe_dcbnl_setapp(struct net_device *netdev, u8 idtype, u16 id, u8 up) { + struct ixgbe_adapter *adapter = netdev_priv(netdev); u8 rval = 1; + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return rval; + switch (idtype) { case DCB_APP_IDTYPE_ETHTYPE: #ifdef IXGBE_FCOE if (id == ETH_P_FCOE) { u8 old_tc; - struct ixgbe_adapter *adapter = netdev_priv(netdev); /* Get current programmed tc */ old_tc = adapter->fcoe.tc; @@ -640,6 +646,9 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, /* naively give each TC a bwg to map onto CEE hardware */ __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7}; + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + if (!adapter->ixgbe_ieee_ets) { adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets), GFP_KERNEL); @@ -647,7 +656,6 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, return -ENOMEM; } - memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets)); ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame); @@ -686,6 +694,9 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, struct ixgbe_adapter *adapter = netdev_priv(dev); int err; + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + if (!adapter->ixgbe_ieee_pfc) { adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc), GFP_KERNEL); @@ -698,6 +709,51 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, return err; } +static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + return adapter->dcbx_cap; +} + +static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ieee_ets ets = {0}; + struct ieee_pfc pfc = {0}; + + /* no support for LLD_MANAGED modes or CEE+IEEE */ + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || + ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) || + !(mode & DCB_CAP_DCBX_HOST)) + return 1; + + if (mode == adapter->dcbx_cap) + return 0; + + adapter->dcbx_cap = mode; + + /* ETS and PFC defaults */ + ets.ets_cap = 8; + pfc.pfc_cap = 8; + + if (mode & DCB_CAP_DCBX_VER_IEEE) { + ixgbe_dcbnl_ieee_setets(dev, &ets); + ixgbe_dcbnl_ieee_setpfc(dev, &pfc); + } else if (mode & DCB_CAP_DCBX_VER_CEE) { + adapter->dcb_set_bitmap |= (BIT_PFC & BIT_PG_TX & BIT_PG_RX); + ixgbe_dcbnl_set_all(dev); + } else { + /* Drop into single TC mode strict priority as this + * indicates CEE and IEEE versions are disabled + */ + ixgbe_dcbnl_ieee_setets(dev, &ets); + ixgbe_dcbnl_ieee_setpfc(dev, &pfc); + ixgbe_dcbnl_set_state(dev, 0); + } + + return 0; +} + const struct dcbnl_rtnl_ops dcbnl_ops = { .ieee_getets = ixgbe_dcbnl_ieee_getets, .ieee_setets = ixgbe_dcbnl_ieee_setets, @@ -724,5 +780,6 @@ const struct dcbnl_rtnl_ops dcbnl_ops = { .setpfcstate = ixgbe_dcbnl_setpfcstate, .getapp = ixgbe_dcbnl_getapp, .setapp = ixgbe_dcbnl_setapp, + .getdcbx = ixgbe_dcbnl_getdcbx, + .setdcbx = ixgbe_dcbnl_setdcbx, }; - diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 5998dc9..4aeade8 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -5190,6 +5190,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->dcb_cfg.rx_pba_cfg = pba_equal; adapter->dcb_cfg.pfc_mode_enable = false; adapter->dcb_set_bitmap = 0x00; + adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, adapter->ring_feature[RING_F_DCB].indices); -- cgit v1.1 From f8628d404505e61bfc63638744656ede69227766 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Wed, 23 Feb 2011 05:57:47 +0000 Subject: ixgbe: DCB, implement ieee_setapp dcbnl ops Implement ieee_setapp dcbnl ops in ixgbe. This is required to setup FCoE which requires dedicated resources. If the app data is not for FCoE then no action is taken in ixgbe except to add it to the dcb_app_list. Signed-off-by: John Fastabend Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_dcb_nl.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c index d7c456f..8d139f7 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c @@ -709,6 +709,35 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, return err; } +static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, + struct dcb_app *app) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; +#ifdef IXGBE_FCOE + if (app->selector == 1 && app->protocol == ETH_P_FCOE) { + if (adapter->fcoe.tc == app->priority) + goto setapp; + + /* In IEEE mode map up to tc 1:1 */ + adapter->fcoe.tc = app->priority; + adapter->fcoe.up = app->priority; + + /* Force hardware reset required to push FCoE + * setup on {tx|rx}_rings + */ + adapter->dcb_set_bitmap |= BIT_APP_UPCHG; + ixgbe_dcbnl_set_all(dev); + } + +setapp: +#endif + dcb_setapp(dev, app); + return 0; +} + static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev) { struct ixgbe_adapter *adapter = netdev_priv(dev); @@ -759,6 +788,7 @@ const struct dcbnl_rtnl_ops dcbnl_ops = { .ieee_setets = ixgbe_dcbnl_ieee_setets, .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc, .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc, + .ieee_setapp = ixgbe_dcbnl_ieee_setapp, .getstate = ixgbe_dcbnl_get_state, .setstate = ixgbe_dcbnl_set_state, .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, -- cgit v1.1 From dc166e22ede5ffb46b5b18b99ba0321ae545f89b Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Wed, 23 Feb 2011 05:57:52 +0000 Subject: ixgbe: DCB remove ixgbe_fcoe_getapp routine Remove ixgbe_fcoe_getapp() and use the generic kernel routine instead. Also add application priority to the kernel maintained list on setapp so applications and stacks can query the value. Signed-off-by: John Fastabend Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_dcb_nl.c | 28 +++++++++++++--------------- drivers/net/ixgbe/ixgbe_fcoe.c | 15 --------------- 2 files changed, 13 insertions(+), 30 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c index 8d139f7..d4b2914 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c @@ -542,24 +542,15 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - u8 rval = 0; + struct dcb_app app = { + .selector = idtype, + .protocol = id, + }; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) - return rval; + return 0; - switch (idtype) { - case DCB_APP_IDTYPE_ETHTYPE: -#ifdef IXGBE_FCOE - if (id == ETH_P_FCOE) - rval = ixgbe_fcoe_getapp(adapter); -#endif - break; - case DCB_APP_IDTYPE_PORTNUM: - break; - default: - break; - } - return rval; + return dcb_getapp(netdev, &app); } /** @@ -576,10 +567,17 @@ static u8 ixgbe_dcbnl_setapp(struct net_device *netdev, { struct ixgbe_adapter *adapter = netdev_priv(netdev); u8 rval = 1; + struct dcb_app app = { + .selector = idtype, + .protocol = id, + .priority = up + }; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) return rval; + rval = dcb_setapp(netdev, &app); + switch (idtype) { case DCB_APP_IDTYPE_ETHTYPE: #ifdef IXGBE_FCOE diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index 00af15a..dba7d77 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c @@ -813,21 +813,6 @@ out_disable: #ifdef CONFIG_IXGBE_DCB /** - * ixgbe_fcoe_getapp - retrieves current user priority bitmap for FCoE - * @adapter : ixgbe adapter - * - * Finds out the corresponding user priority bitmap from the current - * traffic class that FCoE belongs to. Returns 0 as the invalid user - * priority bitmap to indicate an error. - * - * Returns : 802.1p user priority bitmap for FCoE - */ -u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter) -{ - return 1 << adapter->fcoe.up; -} - -/** * ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE * @adapter : ixgbe adapter * @up : 802.1p user priority bitmap -- cgit v1.1 From e5b646355770d34eab360ebae93c56c407dfe803 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 8 Mar 2011 03:44:52 +0000 Subject: ixgbe: DCB, use multiple Tx rings per traffic class This enables multiple {Tx|Rx} rings per traffic class while in DCB mode. In order to get this working as expected the tc_to_tx net device mapping is configured as well as the prio_tc_map. skb priorities are mapped across a range of queue pairs to get a distribution per traffic class. The maximum number of queue pairs used while in DCB mode is capped at 64. The hardware max is actually 128 queues but 64 is sufficient for now and allocating more seemed a bit excessive. It is easy enough to increase the cap later if need be. To get the 802.1Q priority tags inserted correctly ixgbe was previously using the skb queue_mapping field to directly set the 802.1Q priority. This no longer works because we have removed the 1:1 mapping between queues and traffic class. Each ring is aligned with an 802.1Qaz traffic class so here we add an extra field to the ring struct to identify the 802.1Q traffic class. This uses an extra byte of the ixgbe_ring struct fortunately there was a 2byte hole, struct ixgbe_ring { void * desc; /* 0 8 */ struct device * dev; /* 8 8 */ struct net_device * netdev; /* 16 8 */ union { struct ixgbe_tx_buffer * tx_buffer_info; /* 8 */ struct ixgbe_rx_buffer * rx_buffer_info; /* 8 */ }; /* 24 8 */ long unsigned int state; /* 32 8 */ u8 atr_sample_rate; /* 40 1 */ u8 atr_count; /* 41 1 */ u16 count; /* 42 2 */ u16 rx_buf_len; /* 44 2 */ u16 next_to_use; /* 46 2 */ u16 next_to_clean; /* 48 2 */ u8 queue_index; /* 50 1 */ u8 reg_idx; /* 51 1 */ u16 work_limit; /* 52 2 */ /* XXX 2 bytes hole, try to pack */ u8 * tail; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ Now we can set the VLAN priority directly and it will be correct. User space can indicate the 802.1Qaz priority using the SO_PRIORITY setsocket() option and QOS layer will steer the skb to the correct rings. Additionally using the multiq qdisc with a queue_mapping action works as well. Signed-off-by: John Fastabend Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe.h | 4 +- drivers/net/ixgbe/ixgbe_dcb_nl.c | 7 +- drivers/net/ixgbe/ixgbe_main.c | 339 ++++++++++++++++++++------------------- 3 files changed, 182 insertions(+), 168 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index 815edfd..b7e62d5 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -209,6 +209,7 @@ struct ixgbe_ring { * associated with this ring, which is * different for DCB and RSS modes */ + u8 dcb_tc; u16 work_limit; /* max work per interrupt */ @@ -243,7 +244,7 @@ enum ixgbe_ring_f_enum { RING_F_ARRAY_SIZE /* must be last in enum set */ }; -#define IXGBE_MAX_DCB_INDICES 8 +#define IXGBE_MAX_DCB_INDICES 64 #define IXGBE_MAX_RSS_INDICES 16 #define IXGBE_MAX_VMDQ_INDICES 64 #define IXGBE_MAX_FDIR_INDICES 64 @@ -542,6 +543,7 @@ extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, struct ixgbe_ring *ring); extern void ixgbe_set_rx_mode(struct net_device *netdev); +extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); #ifdef IXGBE_FCOE extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); extern int ixgbe_fso(struct ixgbe_adapter *adapter, diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c index d4b2914..b7b6db3 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c @@ -145,6 +145,9 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) } adapter->flags |= IXGBE_FLAG_DCB_ENABLED; + if (!netdev_get_num_tc(netdev)) + ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS); + ixgbe_init_interrupt_scheme(adapter); if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); @@ -169,6 +172,8 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) break; } + ixgbe_setup_tc(netdev, 0); + ixgbe_init_interrupt_scheme(adapter); if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); @@ -351,7 +356,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) return DCB_NO_HW_CHG; ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, - adapter->ring_feature[RING_F_DCB].indices); + MAX_TRAFFIC_CLASS); if (ret) return DCB_NO_HW_CHG; diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 4aeade8..3694226 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -652,7 +652,7 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring, static u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx) { int tc = -1; - int dcb_i = adapter->ring_feature[RING_F_DCB].indices; + int dcb_i = netdev_get_num_tc(adapter->netdev); /* if DCB is not enabled the queues have no TC */ if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) @@ -4258,24 +4258,6 @@ static void ixgbe_reset_task(struct work_struct *work) ixgbe_reinit_locked(adapter); } -#ifdef CONFIG_IXGBE_DCB -static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) -{ - bool ret = false; - struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB]; - - if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) - return ret; - - f->mask = 0x7 << 3; - adapter->num_rx_queues = f->indices; - adapter->num_tx_queues = f->indices; - ret = true; - - return ret; -} -#endif - /** * ixgbe_set_rss_queues: Allocate queues for RSS * @adapter: board private structure to initialize @@ -4346,19 +4328,26 @@ static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) **/ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) { - bool ret = false; struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; - f->indices = min((int)num_online_cpus(), f->indices); - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { - adapter->num_rx_queues = 1; - adapter->num_tx_queues = 1; + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + return false; + + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { #ifdef CONFIG_IXGBE_DCB - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - e_info(probe, "FCoE enabled with DCB\n"); - ixgbe_set_dcb_queues(adapter); - } + int tc; + struct net_device *dev = adapter->netdev; + + tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up); + f->indices = dev->tc_to_txq[tc].count; + f->mask = dev->tc_to_txq[tc].offset; #endif + } else { + f->indices = min((int)num_online_cpus(), f->indices); + + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { e_info(probe, "FCoE enabled with RSS\n"); if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || @@ -4371,14 +4360,45 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) f->mask = adapter->num_rx_queues; adapter->num_rx_queues += f->indices; adapter->num_tx_queues += f->indices; + } - ret = true; + return true; +} +#endif /* IXGBE_FCOE */ + +#ifdef CONFIG_IXGBE_DCB +static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) +{ + bool ret = false; + struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB]; + int i, q; + + if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) + return ret; + + f->indices = 0; + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + q = min((int)num_online_cpus(), MAX_TRAFFIC_CLASS); + f->indices += q; } + f->mask = 0x7 << 3; + adapter->num_rx_queues = f->indices; + adapter->num_tx_queues = f->indices; + ret = true; + +#ifdef IXGBE_FCOE + /* FCoE enabled queues require special configuration done through + * configure_fcoe() and others. Here we map FCoE indices onto the + * DCB queue pairs allowing FCoE to own configuration later. + */ + ixgbe_set_fcoe_queues(adapter); +#endif + return ret; } +#endif -#endif /* IXGBE_FCOE */ /** * ixgbe_set_sriov_queues: Allocate queues for IOV use * @adapter: board private structure to initialize @@ -4414,16 +4434,16 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) if (ixgbe_set_sriov_queues(adapter)) goto done; -#ifdef IXGBE_FCOE - if (ixgbe_set_fcoe_queues(adapter)) - goto done; - -#endif /* IXGBE_FCOE */ #ifdef CONFIG_IXGBE_DCB if (ixgbe_set_dcb_queues(adapter)) goto done; #endif +#ifdef IXGBE_FCOE + if (ixgbe_set_fcoe_queues(adapter)) + goto done; + +#endif /* IXGBE_FCOE */ if (ixgbe_set_fdir_queues(adapter)) goto done; @@ -4515,6 +4535,91 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) } #ifdef CONFIG_IXGBE_DCB + +/* ixgbe_get_first_reg_idx - Return first register index associated with ring */ +void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, + unsigned int *tx, unsigned int *rx) +{ + struct net_device *dev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; + u8 num_tcs = netdev_get_num_tc(dev); + + *tx = 0; + *rx = 0; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + *tx = tc << 3; + *rx = tc << 2; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + if (num_tcs == 8) { + if (tc < 3) { + *tx = tc << 5; + *rx = tc << 4; + } else if (tc < 5) { + *tx = ((tc + 2) << 4); + *rx = tc << 4; + } else if (tc < num_tcs) { + *tx = ((tc + 8) << 3); + *rx = tc << 4; + } + } else if (num_tcs == 4) { + *rx = tc << 5; + switch (tc) { + case 0: + *tx = 0; + break; + case 1: + *tx = 64; + break; + case 2: + *tx = 96; + break; + case 3: + *tx = 112; + break; + default: + break; + } + } + break; + default: + break; + } +} + +#define IXGBE_MAX_Q_PER_TC (IXGBE_MAX_DCB_INDICES / MAX_TRAFFIC_CLASS) + +/* ixgbe_setup_tc - routine to configure net_device for multiple traffic + * classes. + * + * @netdev: net device to configure + * @tc: number of traffic classes to enable + */ +int ixgbe_setup_tc(struct net_device *dev, u8 tc) +{ + int i; + unsigned int q, offset = 0; + + if (!tc) { + netdev_reset_tc(dev); + } else { + if (netdev_set_num_tc(dev, tc)) + return -EINVAL; + + /* Partition Tx queues evenly amongst traffic classes */ + for (i = 0; i < tc; i++) { + q = min((int)num_online_cpus(), IXGBE_MAX_Q_PER_TC); + netdev_set_prio_tc_map(dev, i, i); + netdev_set_tc_queue(dev, i, q, offset); + offset += q; + } + } + return 0; +} + /** * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB * @adapter: board private structure to initialize @@ -4524,72 +4629,27 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) **/ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) { - int i; - bool ret = false; - int dcb_i = adapter->ring_feature[RING_F_DCB].indices; + struct net_device *dev = adapter->netdev; + int i, j, k; + u8 num_tcs = netdev_get_num_tc(dev); if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) return false; - /* the number of queues is assumed to be symmetric */ - switch (adapter->hw.mac.type) { - case ixgbe_mac_82598EB: - for (i = 0; i < dcb_i; i++) { - adapter->rx_ring[i]->reg_idx = i << 3; - adapter->tx_ring[i]->reg_idx = i << 2; - } - ret = true; - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - if (dcb_i == 8) { - /* - * Tx TC0 starts at: descriptor queue 0 - * Tx TC1 starts at: descriptor queue 32 - * Tx TC2 starts at: descriptor queue 64 - * Tx TC3 starts at: descriptor queue 80 - * Tx TC4 starts at: descriptor queue 96 - * Tx TC5 starts at: descriptor queue 104 - * Tx TC6 starts at: descriptor queue 112 - * Tx TC7 starts at: descriptor queue 120 - * - * Rx TC0-TC7 are offset by 16 queues each - */ - for (i = 0; i < 3; i++) { - adapter->tx_ring[i]->reg_idx = i << 5; - adapter->rx_ring[i]->reg_idx = i << 4; - } - for ( ; i < 5; i++) { - adapter->tx_ring[i]->reg_idx = ((i + 2) << 4); - adapter->rx_ring[i]->reg_idx = i << 4; - } - for ( ; i < dcb_i; i++) { - adapter->tx_ring[i]->reg_idx = ((i + 8) << 3); - adapter->rx_ring[i]->reg_idx = i << 4; - } - ret = true; - } else if (dcb_i == 4) { - /* - * Tx TC0 starts at: descriptor queue 0 - * Tx TC1 starts at: descriptor queue 64 - * Tx TC2 starts at: descriptor queue 96 - * Tx TC3 starts at: descriptor queue 112 - * - * Rx TC0-TC3 are offset by 32 queues each - */ - adapter->tx_ring[0]->reg_idx = 0; - adapter->tx_ring[1]->reg_idx = 64; - adapter->tx_ring[2]->reg_idx = 96; - adapter->tx_ring[3]->reg_idx = 112; - for (i = 0 ; i < dcb_i; i++) - adapter->rx_ring[i]->reg_idx = i << 5; - ret = true; + for (i = 0, k = 0; i < num_tcs; i++) { + unsigned int tx_s, rx_s; + u16 count = dev->tc_to_txq[i].count; + + ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s); + for (j = 0; j < count; j++, k++) { + adapter->tx_ring[k]->reg_idx = tx_s + j; + adapter->rx_ring[k]->reg_idx = rx_s + j; + adapter->tx_ring[k]->dcb_tc = i; + adapter->rx_ring[k]->dcb_tc = i; } - break; - default: - break; } - return ret; + + return true; } #endif @@ -4635,33 +4695,6 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) return false; -#ifdef CONFIG_IXGBE_DCB - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - struct ixgbe_fcoe *fcoe = &adapter->fcoe; - - ixgbe_cache_ring_dcb(adapter); - /* find out queues in TC for FCoE */ - fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1; - fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1; - /* - * In 82599, the number of Tx queues for each traffic - * class for both 8-TC and 4-TC modes are: - * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7 - * 8 TCs: 32 32 16 16 8 8 8 8 - * 4 TCs: 64 64 32 32 - * We have max 8 queues for FCoE, where 8 the is - * FCoE redirection table size. If TC for FCoE is - * less than or equal to TC3, we have enough queues - * to add max of 8 queues for FCoE, so we start FCoE - * Tx queue from the next one, i.e., reg_idx + 1. - * If TC for FCoE is above TC3, implying 8 TC mode, - * and we need 8 for FCoE, we have to take all queues - * in that traffic class for FCoE. - */ - if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3)) - fcoe_tx_i--; - } -#endif /* CONFIG_IXGBE_DCB */ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) @@ -4718,16 +4751,16 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) if (ixgbe_cache_ring_sriov(adapter)) return; +#ifdef CONFIG_IXGBE_DCB + if (ixgbe_cache_ring_dcb(adapter)) + return; +#endif + #ifdef IXGBE_FCOE if (ixgbe_cache_ring_fcoe(adapter)) return; - #endif /* IXGBE_FCOE */ -#ifdef CONFIG_IXGBE_DCB - if (ixgbe_cache_ring_dcb(adapter)) - return; -#endif if (ixgbe_cache_ring_fdir(adapter)) return; @@ -5192,7 +5225,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->dcb_set_bitmap = 0x00; adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, - adapter->ring_feature[RING_F_DCB].indices); + MAX_TRAFFIC_CLASS); #endif @@ -6664,18 +6697,12 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) protocol = vlan_get_protocol(skb); - if ((protocol == htons(ETH_P_FCOE)) || - (protocol == htons(ETH_P_FIP))) { - if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { - txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); - txq += adapter->ring_feature[RING_F_FCOE].mask; - return txq; -#ifdef CONFIG_IXGBE_DCB - } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - txq = adapter->fcoe.up; - return txq; -#endif - } + if (((protocol == htons(ETH_P_FCOE)) || + (protocol == htons(ETH_P_FIP))) && + (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { + txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); + txq += adapter->ring_feature[RING_F_FCOE].mask; + return txq; } #endif @@ -6685,15 +6712,6 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) return txq; } - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - if (skb->priority == TC_PRIO_CONTROL) - txq = adapter->ring_feature[RING_F_DCB].indices-1; - else - txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) - >> 13; - return txq; - } - return skb_tx_hash(dev, skb); } @@ -6715,13 +6733,13 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, tx_flags |= vlan_tx_tag_get(skb); if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; - tx_flags |= ((skb->queue_mapping & 0x7) << 13); + tx_flags |= tx_ring->dcb_tc << 13; } tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_VLAN; } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED && skb->priority != TC_PRIO_CONTROL) { - tx_flags |= ((skb->queue_mapping & 0x7) << 13); + tx_flags |= tx_ring->dcb_tc << 13; tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; tx_flags |= IXGBE_TX_FLAGS_VLAN; } @@ -6730,20 +6748,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, /* for FCoE with DCB, we force the priority to what * was specified by the switch */ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && - (protocol == htons(ETH_P_FCOE) || - protocol == htons(ETH_P_FIP))) { -#ifdef CONFIG_IXGBE_DCB - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { - tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK - << IXGBE_TX_FLAGS_VLAN_SHIFT); - tx_flags |= ((adapter->fcoe.up << 13) - << IXGBE_TX_FLAGS_VLAN_SHIFT); - } -#endif - /* flag for FCoE offloads */ - if (protocol == htons(ETH_P_FCOE)) - tx_flags |= IXGBE_TX_FLAGS_FCOE; - } + (protocol == htons(ETH_P_FCOE))) + tx_flags |= IXGBE_TX_FLAGS_FCOE; #endif /* four things can cause us to need a context descriptor */ @@ -7157,8 +7163,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, else indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES); +#if defined(CONFIG_DCB) indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES); -#ifdef IXGBE_FCOE +#elif defined(IXGBE_FCOE) indices += min_t(unsigned int, num_possible_cpus(), IXGBE_MAX_FCOE_INDICES); #endif -- cgit v1.1 From 24095aa347a32673cf220fc0bd0b78d28ba0a69e Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Wed, 23 Feb 2011 05:58:03 +0000 Subject: ixgbe: enable ndo_tc_setup This patch adds the ndo_tc_setup to ixgbe. By default we set the device to use strict priority. Signed-off-by: John Fastabend Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_main.c | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 3694226..3ce0f4f 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -4606,7 +4606,10 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) if (!tc) { netdev_reset_tc(dev); } else { - if (netdev_set_num_tc(dev, tc)) + struct ixgbe_adapter *adapter = netdev_priv(dev); + + /* Hardware supports up to 8 traffic classes */ + if (tc > MAX_TRAFFIC_CLASS || netdev_set_num_tc(dev, tc)) return -EINVAL; /* Partition Tx queues evenly amongst traffic classes */ @@ -4616,6 +4619,22 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) netdev_set_tc_queue(dev, i, q, offset); offset += q; } + + /* This enables multiple traffic class support in the hardware + * which defaults to strict priority transmission by default. + * If traffic classes are already enabled perhaps through DCB + * code path then existing configuration will be used. + */ + if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) && + dev->dcbnl_ops && dev->dcbnl_ops->setdcbx) { + struct ieee_ets ets = { + .prio_tc = {0, 1, 2, 3, 4, 5, 6, 7}, + }; + u8 mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; + + dev->dcbnl_ops->setdcbx(dev, mode); + dev->dcbnl_ops->ieee_setets(dev, &ets); + } } return 0; } @@ -7022,6 +7041,9 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, .ndo_get_vf_config = ixgbe_ndo_get_vf_config, .ndo_get_stats64 = ixgbe_get_stats64, +#ifdef CONFIG_IXGBE_DCB + .ndo_setup_tc = ixgbe_setup_tc, +#endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbe_netpoll, #endif -- cgit v1.1 From 8187cd485b1a74b6ae258786b9ade3ecaafec318 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Wed, 23 Feb 2011 05:58:08 +0000 Subject: ixgbe: DCB: enable RSS to be used with DCB RSS had previously been disabled when DCB was enabled because DCB was single queued per traffic class. Now that DCB implements multiple Tx/Rx rings per traffic class enable RSS. Here RSS hashes across the queues in the traffic class. Signed-off-by: John Fastabend Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_dcb_nl.c | 2 -- drivers/net/ixgbe/ixgbe_main.c | 32 +++++++++++++++++++++++++------- 2 files changed, 25 insertions(+), 9 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c index b7b6db3..91ff51c 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c @@ -129,7 +129,6 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) netdev->netdev_ops->ndo_stop(netdev); ixgbe_clear_interrupt_scheme(adapter); - adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; switch (adapter->hw.mac.type) { case ixgbe_mac_82598EB: adapter->last_lfc_mode = adapter->hw.fc.current_mode; @@ -162,7 +161,6 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) adapter->temp_dcb_cfg.pfc_mode_enable = false; adapter->dcb_cfg.pfc_mode_enable = false; adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; - adapter->flags |= IXGBE_FLAG_RSS_ENABLED; switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 3ce0f4f..be2e145 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -2892,17 +2892,20 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) ); switch (mask) { +#ifdef CONFIG_IXGBE_DCB + case (IXGBE_FLAG_DCB_ENABLED | IXGBE_FLAG_RSS_ENABLED): + mrqc = IXGBE_MRQC_RTRSS8TCEN; + break; + case (IXGBE_FLAG_DCB_ENABLED): + mrqc = IXGBE_MRQC_RT8TCEN; + break; +#endif /* CONFIG_IXGBE_DCB */ case (IXGBE_FLAG_RSS_ENABLED): mrqc = IXGBE_MRQC_RSSEN; break; case (IXGBE_FLAG_SRIOV_ENABLED): mrqc = IXGBE_MRQC_VMDQEN; break; -#ifdef CONFIG_IXGBE_DCB - case (IXGBE_FLAG_DCB_ENABLED): - mrqc = IXGBE_MRQC_RT8TCEN; - break; -#endif /* CONFIG_IXGBE_DCB */ default: break; } @@ -3672,6 +3675,23 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) /* reconfigure the hardware */ ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); + + /* Enable RSS Hash per TC */ + if (hw->mac.type != ixgbe_mac_82598EB) { + int i; + u32 reg = 0; + + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { + u8 msb = 0; + u8 cnt = adapter->netdev->tc_to_txq[i].count; + + while (cnt >>= 1) + msb++; + + reg |= msb << IXGBE_RQTC_SHIFT_TC(i); + } + IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg); + } } #endif @@ -7343,8 +7363,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED); - if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) - adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; #ifdef CONFIG_IXGBE_DCB netdev->dcbnl_ops = &dcbnl_ops; -- cgit v1.1 From 3b97fd695453ced96f22bdf1a84453f6744d25cc Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Wed, 23 Feb 2011 05:58:14 +0000 Subject: ixgbe: DCB, missed translation from 8021Qaz TSA to CEE link strict The patch below allowed IEEE 802.1Qaz and CEE DCB hardware configurations to use common hardware set routines, commit 88eb696cc6a7af8f9272266965b1a4dd7d6a931b Author: John Fastabend Date: Thu Feb 10 03:02:11 2011 -0800 ixgbe: DCB, abstract out dcb_config from DCB hardware configuration However the case when CEE link strict and group strict are set was missed and are currently being mapped incorrectly in some configurations. This patch resolves this. Signed-off-by: John Fastabend Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_dcb.c | 24 +----------------------- drivers/net/ixgbe/ixgbe_dcb_nl.c | 24 ++++++++++++++++++++++-- 2 files changed, 23 insertions(+), 25 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c index c2ee6fc..e2e7a29 100644 --- a/drivers/net/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ixgbe/ixgbe_dcb.c @@ -292,30 +292,8 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en) } s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, - u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa) + u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type) { - int i; - u8 prio_type[IEEE_8021QAZ_MAX_TCS]; - - /* Map TSA onto CEE prio type */ - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { - switch (tsa[i]) { - case IEEE_8021QAZ_TSA_STRICT: - prio_type[i] = 2; - break; - case IEEE_8021QAZ_TSA_ETS: - prio_type[i] = 0; - break; - default: - /* Hardware only supports priority strict or - * ETS transmission selection algorithms if - * we receive some other value from dcbnl - * throw an error - */ - return -EINVAL; - } - } - switch (hw->mac.type) { case ixgbe_mac_82598EB: ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c index 91ff51c..8abef8d 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c @@ -642,8 +642,9 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, { struct ixgbe_adapter *adapter = netdev_priv(dev); __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS]; + __u8 prio_type[IEEE_8021QAZ_MAX_TCS]; int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; - int err; + int i, err; /* naively give each TC a bwg to map onto CEE hardware */ __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7}; @@ -659,9 +660,28 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets)); + /* Map TSA onto CEE prio type */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + prio_type[i] = 2; + break; + case IEEE_8021QAZ_TSA_ETS: + prio_type[i] = 0; + break; + default: + /* Hardware only supports priority strict or + * ETS transmission selection algorithms if + * we receive some other value from dcbnl + * throw an error + */ + return -EINVAL; + } + } + ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame); err = ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max, - bwg_id, ets->tc_tsa); + bwg_id, prio_type); return err; } -- cgit v1.1 From 17049d30c2dec6f26d6165cc135578f9e41d53d3 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Wed, 23 Feb 2011 05:58:19 +0000 Subject: ixgbe: IEEE 802.1Qaz, implement priority assignment table This patch adds support to use the priority assignment table in the ieee_ets structure to map priorities to traffic classes. Previously ixgbe only supported a 1:1 mapping. Now we can enable and disable hardware DCB support when multiple traffic classes are actually being used. This allows the default case all priorities mapped to traffic class 0 to work in normal hardware mode and utilize the full packet buffer. This patch does not address putting the hardware in 4TC mode so packet buffer space may be underutilized in this case. A follow up patch can address this optimization. But at least we have the hooks to do this now. Also CEE will behave as it always has and map priorities 1:1 with traffic classes. Signed-off-by: John Fastabend Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_dcb.c | 13 ++++++++----- drivers/net/ixgbe/ixgbe_dcb.h | 4 ++-- drivers/net/ixgbe/ixgbe_dcb_82599.c | 17 ++++++++++------- drivers/net/ixgbe/ixgbe_dcb_82599.h | 9 ++++++--- drivers/net/ixgbe/ixgbe_dcb_nl.c | 12 ++++++++++-- 5 files changed, 36 insertions(+), 19 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c index e2e7a29..e7b551a 100644 --- a/drivers/net/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ixgbe/ixgbe_dcb.c @@ -246,6 +246,8 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, u8 bwgid[MAX_TRAFFIC_CLASS]; u16 refill[MAX_TRAFFIC_CLASS]; u16 max[MAX_TRAFFIC_CLASS]; + /* CEE does not define a priority to tc mapping so map 1:1 */ + u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7}; /* Unpack CEE standard containers */ ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en); @@ -264,7 +266,7 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, case ixgbe_mac_X540: ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->rx_pba_cfg, pfc_en, refill, max, bwgid, - ptype); + ptype, prio_tc); break; default: break; @@ -292,7 +294,8 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en) } s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, - u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type) + u16 *refill, u16 *max, u8 *bwg_id, + u8 *prio_type, u8 *prio_tc) { switch (hw->mac.type) { case ixgbe_mac_82598EB: @@ -306,11 +309,11 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, case ixgbe_mac_82599EB: case ixgbe_mac_X540: ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, - bwg_id, prio_type); + bwg_id, prio_type, prio_tc); ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, prio_type); - ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, - bwg_id, prio_type); + ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, + prio_type, prio_tc); break; default: break; diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h index 515bc27..944838f 100644 --- a/drivers/net/ixgbe/ixgbe_dcb.h +++ b/drivers/net/ixgbe/ixgbe_dcb.h @@ -159,8 +159,8 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *, struct ixgbe_dcb_config *, int, u8); /* DCB hw initialization */ -s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, - u16 *refill, u16 *max, u8 *bwg_id, u8 *prio_type); +s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, + u8 *bwg_id, u8 *prio_type, u8 *tc_prio); s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en); s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c index beaa1c1..0a482bb 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c @@ -85,7 +85,8 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, u16 *max, u8 *bwg_id, - u8 *prio_type) + u8 *prio_type, + u8 *prio_tc) { u32 reg = 0; u32 credit_refill = 0; @@ -102,7 +103,7 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, /* Map all traffic classes to their UP, 1 to 1 */ reg = 0; for (i = 0; i < MAX_TRAFFIC_CLASS; i++) - reg |= (i << (i * IXGBE_RTRUP2TC_UP_SHIFT)); + reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT)); IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); /* Configure traffic class credits and priority */ @@ -194,7 +195,8 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, u16 *max, u8 *bwg_id, - u8 *prio_type) + u8 *prio_type, + u8 *prio_tc) { u32 reg; u8 i; @@ -211,7 +213,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, /* Map all traffic classes to their UP, 1 to 1 */ reg = 0; for (i = 0; i < MAX_TRAFFIC_CLASS; i++) - reg |= (i << (i * IXGBE_RTTUP2TC_UP_SHIFT)); + reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT)); IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg); /* Configure traffic class credits and priority */ @@ -424,15 +426,16 @@ static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw) */ s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 rx_pba, u8 pfc_en, u16 *refill, - u16 *max, u8 *bwg_id, u8 *prio_type) + u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc) { ixgbe_dcb_config_packet_buffers_82599(hw, rx_pba); ixgbe_dcb_config_82599(hw); - ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, prio_type); + ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, + prio_type, prio_tc); ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, prio_type); ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, - bwg_id, prio_type); + bwg_id, prio_type, prio_tc); ixgbe_dcb_config_pfc_82599(hw, pfc_en); ixgbe_dcb_config_tc_stats_82599(hw); diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h index 0b39ab4..148fd8b 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82599.h +++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h @@ -109,7 +109,8 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, u16 *max, u8 *bwg_id, - u8 *prio_type); + u8 *prio_type, + u8 *prio_tc); s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, @@ -121,10 +122,12 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, u16 *max, u8 *bwg_id, - u8 *prio_type); + u8 *prio_type, + u8 *prio_tc); s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 rx_pba, u8 pfc_en, u16 *refill, - u16 *max, u8 *bwg_id, u8 *prio_type); + u16 *max, u8 *bwg_id, u8 *prio_type, + u8 *prio_tc); #endif /* _DCB_82599_CONFIG_H */ diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c index 8abef8d..fec4c72 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c @@ -416,6 +416,8 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) { u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS]; u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS]; + /* Priority to TC mapping in CEE case default to 1:1 */ + u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7}; int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; #ifdef CONFIG_FCOE @@ -437,7 +439,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) DCB_TX_CONFIG, prio_type); ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max, - bwg_id, prio_type); + bwg_id, prio_type, prio_tc); } if (adapter->dcb_cfg.pfc_mode_enable) @@ -645,6 +647,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, __u8 prio_type[IEEE_8021QAZ_MAX_TCS]; int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; int i, err; + __u64 *p = (__u64 *) ets->prio_tc; /* naively give each TC a bwg to map onto CEE hardware */ __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7}; @@ -679,9 +682,14 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, } } + if (*p) + ixgbe_dcbnl_set_state(dev, 1); + else + ixgbe_dcbnl_set_state(dev, 0); + ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame); err = ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max, - bwg_id, prio_type); + bwg_id, prio_type, ets->prio_tc); return err; } -- cgit v1.1 From c27931da83bc486e192c8dfdab903ba51e176c54 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Wed, 23 Feb 2011 05:58:25 +0000 Subject: ixgbe: DCB during ifup use correct CEE or IEEE mode DCB settings are cleared in the hardware across link events during ifup ixgbe reprograms the hardware for DCB if it is enabled. Now that we have two modes CEE or IEEE we need to use the correct set of configuration data. This patch checks the dcbx_cap bits and then enables the device in the correct mode. Signed-off-by: John Fastabend Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_main.c | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index be2e145..38f9758 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -3658,15 +3658,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) if (hw->mac.type == ixgbe_mac_82598EB) netif_set_gso_max_size(adapter->netdev, 32768); -#ifdef CONFIG_FCOE - if (adapter->netdev->features & NETIF_F_FCOE_MTU) - max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); -#endif - - ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, - DCB_TX_CONFIG); - ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, - DCB_RX_CONFIG); /* Enable VLAN tag insert/strip */ adapter->netdev->features |= NETIF_F_HW_VLAN_RX; @@ -3674,7 +3665,26 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); /* reconfigure the hardware */ - ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); + if (adapter->dcbx_cap & (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE)) { +#ifdef CONFIG_FCOE + if (adapter->netdev->features & NETIF_F_FCOE_MTU) + max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); +#endif + ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, + DCB_TX_CONFIG); + ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, + DCB_RX_CONFIG); + ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); + } else { + struct net_device *dev = adapter->netdev; + + if (adapter->ixgbe_ieee_ets) + dev->dcbnl_ops->ieee_setets(dev, + adapter->ixgbe_ieee_ets); + if (adapter->ixgbe_ieee_pfc) + dev->dcbnl_ops->ieee_setpfc(dev, + adapter->ixgbe_ieee_pfc); + } /* Enable RSS Hash per TC */ if (hw->mac.type != ixgbe_mac_82598EB) { -- cgit v1.1 From 7e7eb4346329da3b9fd4b8d4a5a66d327d9fff6c Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Fri, 4 Mar 2011 03:20:59 +0000 Subject: ixgbe: remove timer reset to 0 on timeout The VF mailbox polling for acks and messages would reset the timer to zero on a timeout. Under heavy load a timeout may actually occur without being the result of an error and when this occurs it is not practical to perform a full VF driver reset on every message timeout. Instead, just return an error (which is already done) and the VF driver will have an opportunity to retry the operation. Signed-off-by: Emil Tantilov Acked-by: Greg Rose Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_mbx.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c index c7ed82e..1ff0eef 100644 --- a/drivers/net/ixgbe/ixgbe_mbx.c +++ b/drivers/net/ixgbe/ixgbe_mbx.c @@ -154,9 +154,6 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) udelay(mbx->usec_delay); } - /* if we failed, all future posted messages fail until reset */ - if (!countdown) - mbx->timeout = 0; out: return countdown ? 0 : IXGBE_ERR_MBX; } @@ -183,9 +180,6 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) udelay(mbx->usec_delay); } - /* if we failed, all future posted messages fail until reset */ - if (!countdown) - mbx->timeout = 0; out: return countdown ? 0 : IXGBE_ERR_MBX; } -- cgit v1.1 From 9dda173667207fe59d380e522d318c144dc032f7 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Sat, 5 Mar 2011 01:28:07 +0000 Subject: ixgbe: update PHY code to support 100Mbps as well as 1G/10G This change updates the PHY setup code to support 100Mbps capable PHYs as well as 10G and 1Gbps. Signed-off-by: Emil Tantilov Tested-by: Stephen Ko Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_82598.c | 1 + drivers/net/ixgbe/ixgbe_phy.c | 335 ++++++++++++++++++++++++++++------------ drivers/net/ixgbe/ixgbe_phy.h | 1 + drivers/net/ixgbe/ixgbe_type.h | 7 + 4 files changed, 245 insertions(+), 99 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index ff23907..845c679 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c @@ -158,6 +158,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) switch (hw->phy.type) { case ixgbe_phy_tn: + phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; phy->ops.check_link = &ixgbe_check_phy_link_tnx; phy->ops.get_firmware_version = &ixgbe_get_phy_firmware_version_tnx; diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c index 9190a8f..f72f705 100644 --- a/drivers/net/ixgbe/ixgbe_phy.c +++ b/drivers/net/ixgbe/ixgbe_phy.c @@ -402,49 +402,89 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, **/ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) { - s32 status = IXGBE_NOT_IMPLEMENTED; + s32 status = 0; u32 time_out; u32 max_time_out = 10; - u16 autoneg_reg; + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + bool autoneg = false; + ixgbe_link_speed speed; - /* - * Set advertisement settings in PHY based on autoneg_advertised - * settings. If autoneg_advertised = 0, then advertise default values - * tnx devices cannot be "forced" to a autoneg 10G and fail. But can - * for a 1G. - */ - hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, &autoneg_reg); + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + /* Set or unset auto-negotiation 10G advertisement */ + hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, + MDIO_MMD_AN, + &autoneg_reg); - if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL) autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; - else - autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; - hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, MDIO_MMD_AN, autoneg_reg); + hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, + MDIO_MMD_AN, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + /* Set or unset auto-negotiation 1G advertisement */ + hw->phy.ops.read_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; + + hw->phy.ops.write_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + MDIO_MMD_AN, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_100_FULL) { + /* Set or unset auto-negotiation 100M advertisement */ + hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= ~ADVERTISE_100FULL; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) + autoneg_reg |= ADVERTISE_100FULL; + + hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, + autoneg_reg); + } /* Restart PHY autonegotiation and wait for completion */ - hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, &autoneg_reg); + hw->phy.ops.read_reg(hw, MDIO_CTRL1, + MDIO_MMD_AN, &autoneg_reg); autoneg_reg |= MDIO_AN_CTRL1_RESTART; - hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_AN, autoneg_reg); + hw->phy.ops.write_reg(hw, MDIO_CTRL1, + MDIO_MMD_AN, autoneg_reg); /* Wait for autonegotiation to finish */ for (time_out = 0; time_out < max_time_out; time_out++) { udelay(10); /* Restart PHY autonegotiation and wait for completion */ - status = hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, - &autoneg_reg); + status = hw->phy.ops.read_reg(hw, MDIO_STAT1, + MDIO_MMD_AN, + &autoneg_reg); autoneg_reg &= MDIO_AN_STAT1_COMPLETE; if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) { - status = 0; break; } } - if (time_out == max_time_out) + if (time_out == max_time_out) { status = IXGBE_ERR_LINK_SETUP; + hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out"); + } return status; } @@ -473,6 +513,9 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, if (speed & IXGBE_LINK_SPEED_1GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + if (speed & IXGBE_LINK_SPEED_100_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; + /* Setup link based on the new speed settings */ hw->phy.ops.setup_link(hw); @@ -513,6 +556,180 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, } /** + * ixgbe_check_phy_link_tnx - Determine link and speed status + * @hw: pointer to hardware structure + * + * Reads the VS1 register to determine if link is up and the current speed for + * the PHY. + **/ +s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up) +{ + s32 status = 0; + u32 time_out; + u32 max_time_out = 10; + u16 phy_link = 0; + u16 phy_speed = 0; + u16 phy_data = 0; + + /* Initialize speed and link to default case */ + *link_up = false; + *speed = IXGBE_LINK_SPEED_10GB_FULL; + + /* + * Check current speed and link status of the PHY register. + * This is a vendor specific register and may have to + * be changed for other copper PHYs. + */ + for (time_out = 0; time_out < max_time_out; time_out++) { + udelay(10); + status = hw->phy.ops.read_reg(hw, + MDIO_STAT1, + MDIO_MMD_VEND1, + &phy_data); + phy_link = phy_data & + IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; + phy_speed = phy_data & + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; + if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { + *link_up = true; + if (phy_speed == + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS) + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + } + } + + return status; +} + +/** + * ixgbe_setup_phy_link_tnx - Set and restart autoneg + * @hw: pointer to hardware structure + * + * Restart autonegotiation and PHY and waits for completion. + **/ +s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) +{ + s32 status = 0; + u32 time_out; + u32 max_time_out = 10; + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + bool autoneg = false; + ixgbe_link_speed speed; + + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + /* Set or unset auto-negotiation 10G advertisement */ + hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; + + hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, + MDIO_MMD_AN, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + /* Set or unset auto-negotiation 1G advertisement */ + hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; + + hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, + MDIO_MMD_AN, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_100_FULL) { + /* Set or unset auto-negotiation 100M advertisement */ + hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= ~ADVERTISE_100FULL; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) + autoneg_reg |= ADVERTISE_100FULL; + + hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, + MDIO_MMD_AN, + autoneg_reg); + } + + /* Restart PHY autonegotiation and wait for completion */ + hw->phy.ops.read_reg(hw, MDIO_CTRL1, + MDIO_MMD_AN, &autoneg_reg); + + autoneg_reg |= MDIO_AN_CTRL1_RESTART; + + hw->phy.ops.write_reg(hw, MDIO_CTRL1, + MDIO_MMD_AN, autoneg_reg); + + /* Wait for autonegotiation to finish */ + for (time_out = 0; time_out < max_time_out; time_out++) { + udelay(10); + /* Restart PHY autonegotiation and wait for completion */ + status = hw->phy.ops.read_reg(hw, MDIO_STAT1, + MDIO_MMD_AN, + &autoneg_reg); + + autoneg_reg &= MDIO_AN_STAT1_COMPLETE; + if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) + break; + } + + if (time_out == max_time_out) { + status = IXGBE_ERR_LINK_SETUP; + hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out"); + } + + return status; +} + +/** + * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version + * @hw: pointer to hardware structure + * @firmware_version: pointer to the PHY Firmware Version + **/ +s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, + u16 *firmware_version) +{ + s32 status = 0; + + status = hw->phy.ops.read_reg(hw, TNX_FW_REV, + MDIO_MMD_VEND1, + firmware_version); + + return status; +} + +/** + * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version + * @hw: pointer to hardware structure + * @firmware_version: pointer to the PHY Firmware Version + **/ +s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, + u16 *firmware_version) +{ + s32 status = 0; + + status = hw->phy.ops.read_reg(hw, AQ_FW_REV, + MDIO_MMD_VEND1, + firmware_version); + + return status; +} + +/** * ixgbe_reset_phy_nl - Performs a PHY reset * @hw: pointer to hardware structure **/ @@ -1477,86 +1694,6 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) } /** - * ixgbe_check_phy_link_tnx - Determine link and speed status - * @hw: pointer to hardware structure - * - * Reads the VS1 register to determine if link is up and the current speed for - * the PHY. - **/ -s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, - bool *link_up) -{ - s32 status = 0; - u32 time_out; - u32 max_time_out = 10; - u16 phy_link = 0; - u16 phy_speed = 0; - u16 phy_data = 0; - - /* Initialize speed and link to default case */ - *link_up = false; - *speed = IXGBE_LINK_SPEED_10GB_FULL; - - /* - * Check current speed and link status of the PHY register. - * This is a vendor specific register and may have to - * be changed for other copper PHYs. - */ - for (time_out = 0; time_out < max_time_out; time_out++) { - udelay(10); - status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS, - MDIO_MMD_VEND1, - &phy_data); - phy_link = phy_data & - IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; - phy_speed = phy_data & - IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; - if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { - *link_up = true; - if (phy_speed == - IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS) - *speed = IXGBE_LINK_SPEED_1GB_FULL; - break; - } - } - - return status; -} - -/** - * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version - * @hw: pointer to hardware structure - * @firmware_version: pointer to the PHY Firmware Version - **/ -s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, - u16 *firmware_version) -{ - s32 status = 0; - - status = hw->phy.ops.read_reg(hw, TNX_FW_REV, MDIO_MMD_VEND1, - firmware_version); - - return status; -} - -/** - * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version - * @hw: pointer to hardware structure - * @firmware_version: pointer to the PHY Firmware Version -**/ -s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, - u16 *firmware_version) -{ - s32 status = 0; - - status = hw->phy.ops.read_reg(hw, AQ_FW_REV, MDIO_MMD_VEND1, - firmware_version); - - return status; -} - -/** * ixgbe_tn_check_overtemp - Checks if an overtemp occured. * @hw: pointer to hardware structure * diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h index 9bf2783..197bdd1 100644 --- a/drivers/net/ixgbe/ixgbe_phy.h +++ b/drivers/net/ixgbe/ixgbe_phy.h @@ -108,6 +108,7 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *link_up); +s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, u16 *firmware_version); s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index f190a4a..63d862d 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -1009,6 +1009,13 @@ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ +/* MII clause 22/28 definitions */ +#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ +#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ +#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ +#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ +#define IXGBE_MII_AUTONEG_REG 0x0 + #define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 #define IXGBE_MAX_PHY_ADDR 32 -- cgit v1.1 From 6fb456a07c68913516da9de90d3849ee9821dea8 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Sat, 5 Mar 2011 08:02:18 +0000 Subject: ixgbe: correct typo in define name VF Free Running Timer register name missing an F. Signed-off-by: Emil Tantilov Acked-by: Greg Rose Tested-by: Evan Swanson Signed-off-by: Jeff Kirsher --- drivers/net/ixgbevf/ethtool.c | 4 ++-- drivers/net/ixgbevf/regs.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c index fa29b3c..0563ab2 100644 --- a/drivers/net/ixgbevf/ethtool.c +++ b/drivers/net/ixgbevf/ethtool.c @@ -172,7 +172,7 @@ static char *ixgbevf_reg_names[] = { "IXGBE_VFSTATUS", "IXGBE_VFLINKS", "IXGBE_VFRXMEMWRAP", - "IXGBE_VFRTIMER", + "IXGBE_VFFRTIMER", "IXGBE_VTEICR", "IXGBE_VTEICS", "IXGBE_VTEIMS", @@ -240,7 +240,7 @@ static void ixgbevf_get_regs(struct net_device *netdev, regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS); regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS); regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP); - regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFRTIMER); + regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER); /* Interrupt */ /* don't read EICR because it can clear interrupt causes, instead diff --git a/drivers/net/ixgbevf/regs.h b/drivers/net/ixgbevf/regs.h index fb80ca1..189200e 100644 --- a/drivers/net/ixgbevf/regs.h +++ b/drivers/net/ixgbevf/regs.h @@ -31,7 +31,7 @@ #define IXGBE_VFCTRL 0x00000 #define IXGBE_VFSTATUS 0x00008 #define IXGBE_VFLINKS 0x00010 -#define IXGBE_VFRTIMER 0x00048 +#define IXGBE_VFFRTIMER 0x00048 #define IXGBE_VFRXMEMWRAP 0x03190 #define IXGBE_VTEICR 0x00100 #define IXGBE_VTEICS 0x00104 -- cgit v1.1 From 1390a59452a0895d3fea5b5504fa75ba36c13a74 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Wed, 9 Mar 2011 04:46:16 +0000 Subject: ixgbe: DCB, set minimum bandwidth per traffic class DCB provides a guaranteed bandwidth in the case with 0% bandwidth then no bandwidth is guaranteed. However the traffic class should still be able to transmit traffic. For this to work the traffic class must be given the minimum credits required to send a frame. Signed-off-by: John Fastabend Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_dcb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c index e7b551a..41c529f 100644 --- a/drivers/net/ixgbe/ixgbe_dcb.c +++ b/drivers/net/ixgbe/ixgbe_dcb.c @@ -64,7 +64,7 @@ s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, __u16 *max, int max_frame) val = min_credit; refill[i] = val; - max[i] = (bw[i] * MAX_CREDIT)/100; + max[i] = bw[i] ? (bw[i] * MAX_CREDIT)/100 : min_credit; } return 0; } -- cgit v1.1 From ff4ab2061199cdb938282d302d5044b1858e28c8 Mon Sep 17 00:00:00 2001 From: Lior Levy Date: Fri, 11 Mar 2011 02:03:07 +0000 Subject: ixgbe: add support for VF Transmit rate limit using iproute2 Implemented ixgbe_ndo_set_vf_bw function which is being used by iproute2 tool. In addition, updated ixgbe_ndo_get_vf_config function to show the actual rate limit to the user. The rate limitation can be configured only when the link is up and the link speed is 10Gb. The rate limit value can be 0 or ranged between 11 and actual link speed measured in Mbps. A value of '0' disables the rate limit for this specific VF. iproute2 usage will be 'ip link set ethX vf Y rate Z'. After the command is made, the rate will be changed instantly. To view the current rate limit, use 'ip link show ethX'. The rates will be zeroed only upon driver reload or a link speed change. This feature is being supported by 82599 and X540 devices. Signed-off-by: Lior Levy Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe.h | 2 + drivers/net/ixgbe/ixgbe_main.c | 1 + drivers/net/ixgbe/ixgbe_sriov.c | 85 ++++++++++++++++++++++++++++++++++++++++- drivers/net/ixgbe/ixgbe_sriov.h | 1 + drivers/net/ixgbe/ixgbe_type.h | 6 +++ 5 files changed, 93 insertions(+), 2 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h index b7e62d5..8d46802 100644 --- a/drivers/net/ixgbe/ixgbe.h +++ b/drivers/net/ixgbe/ixgbe.h @@ -118,6 +118,7 @@ struct vf_data_storage { bool pf_set_mac; u16 pf_vlan; /* When set, guest VLAN config not allowed. */ u16 pf_qos; + u16 tx_rate; }; /* wrapper around a pointer to a socket buffer, @@ -468,6 +469,7 @@ struct ixgbe_adapter { DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); unsigned int num_vfs; struct vf_data_storage *vfinfo; + int vf_rate_link_speed; }; enum ixbge_state_t { diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 38f9758..f17e4a7 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -6217,6 +6217,7 @@ static void ixgbe_watchdog_task(struct work_struct *work) (flow_tx ? "TX" : "None")))); netif_carrier_on(netdev); + ixgbe_check_vf_rate_limit(adapter); } else { /* Force detection of hung controller */ for (i = 0; i < adapter->num_tx_queues; i++) { diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c index 58c9b45..6e50d83 100644 --- a/drivers/net/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ixgbe/ixgbe_sriov.c @@ -478,9 +478,90 @@ out: return err; } +static int ixgbe_link_mbps(int internal_link_speed) +{ + switch (internal_link_speed) { + case IXGBE_LINK_SPEED_100_FULL: + return 100; + case IXGBE_LINK_SPEED_1GB_FULL: + return 1000; + case IXGBE_LINK_SPEED_10GB_FULL: + return 10000; + default: + return 0; + } +} + +static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate, + int link_speed) +{ + int rf_dec, rf_int; + u32 bcnrc_val; + + if (tx_rate != 0) { + /* Calculate the rate factor values to set */ + rf_int = link_speed / tx_rate; + rf_dec = (link_speed - (rf_int * tx_rate)); + rf_dec = (rf_dec * (1<vf_rate_link_speed == 0) + return; + + actual_link_speed = ixgbe_link_mbps(adapter->link_speed); + if (actual_link_speed != adapter->vf_rate_link_speed) { + reset_rate = true; + adapter->vf_rate_link_speed = 0; + dev_info(&adapter->pdev->dev, + "Link speed has been changed. VF Transmit rate " + "is disabled\n"); + } + + for (i = 0; i < adapter->num_vfs; i++) { + if (reset_rate) + adapter->vfinfo[i].tx_rate = 0; + + ixgbe_set_vf_rate_limit(&adapter->hw, i, + adapter->vfinfo[i].tx_rate, + actual_link_speed); + } +} + int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate) { - return -EOPNOTSUPP; + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + int actual_link_speed; + + actual_link_speed = ixgbe_link_mbps(adapter->link_speed); + if ((vf >= adapter->num_vfs) || (!adapter->link_up) || + (tx_rate > actual_link_speed) || (actual_link_speed != 10000) || + ((tx_rate != 0) && (tx_rate <= 10))) + /* rate limit cannot be set to 10Mb or less in 10Gb adapters */ + return -EINVAL; + + adapter->vf_rate_link_speed = actual_link_speed; + adapter->vfinfo[vf].tx_rate = (u16)tx_rate; + ixgbe_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed); + + return 0; } int ixgbe_ndo_get_vf_config(struct net_device *netdev, @@ -491,7 +572,7 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev, return -EINVAL; ivi->vf = vf; memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); - ivi->tx_rate = 0; + ivi->tx_rate = adapter->vfinfo[vf].tx_rate; ivi->vlan = adapter->vfinfo[vf].pf_vlan; ivi->qos = adapter->vfinfo[vf].pf_qos; return 0; diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h index e7dd029..3417556 100644 --- a/drivers/net/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ixgbe/ixgbe_sriov.h @@ -40,6 +40,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); +void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); #endif /* _IXGBE_SRIOV_H_ */ diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h index 63d862d..25c1fb7 100644 --- a/drivers/net/ixgbe/ixgbe_type.h +++ b/drivers/net/ixgbe/ixgbe_type.h @@ -533,6 +533,12 @@ #define IXGBE_RTTDTECC 0x04990 #define IXGBE_RTTDTECC_NO_BCN 0x00000100 #define IXGBE_RTTBCNRC 0x04984 +#define IXGBE_RTTBCNRC_RS_ENA 0x80000000 +#define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14 +#define IXGBE_RTTBCNRC_RF_INT_MASK \ + (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT) + /* FCoE registers */ #define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ -- cgit v1.1 From 1f4a0244ff002672be855ff2eaa4a29a63d42d42 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Thu, 10 Mar 2011 12:06:12 +0000 Subject: ixgbe: DCB, PFC not cleared until reset occurs The PFC configuration is not cleared until the device is reset. This has not been a problem because setting DCB attributes forced a hardware reset. Now that we no longer require this reset to occur PFC remains configured even after being disabled until the device is reset. This removes a goto in the PFC hardware set routines for 82598 and 82599 devices that was short circuiting the clear. Signed-off-by: John Fastabend Tested-by: Ross Brattain Signed-off-by: Jeff Kirsher --- drivers/net/ixgbe/ixgbe_dcb_82598.c | 44 +++++++++++++++--------------- drivers/net/ixgbe/ixgbe_dcb_82599.c | 54 ++++++++++++++++++------------------- 2 files changed, 47 insertions(+), 51 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c index c97cf91..1bc57e5 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82598.c +++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c @@ -233,21 +233,27 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) u32 reg, rx_pba_size; u8 i; - if (!pfc_en) - goto out; - - /* Enable Transmit Priority Flow Control */ - reg = IXGBE_READ_REG(hw, IXGBE_RMCS); - reg &= ~IXGBE_RMCS_TFCE_802_3X; - /* correct the reporting of our flow control status */ - reg |= IXGBE_RMCS_TFCE_PRIORITY; - IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); - - /* Enable Receive Priority Flow Control */ - reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); - reg &= ~IXGBE_FCTRL_RFCE; - reg |= IXGBE_FCTRL_RPFCE; - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); + if (pfc_en) { + /* Enable Transmit Priority Flow Control */ + reg = IXGBE_READ_REG(hw, IXGBE_RMCS); + reg &= ~IXGBE_RMCS_TFCE_802_3X; + /* correct the reporting of our flow control status */ + reg |= IXGBE_RMCS_TFCE_PRIORITY; + IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); + + /* Enable Receive Priority Flow Control */ + reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); + reg &= ~IXGBE_FCTRL_RFCE; + reg |= IXGBE_FCTRL_RPFCE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); + + /* Configure pause time */ + for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800); + + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400); + } /* * Configure flow control thresholds and enable priority flow control @@ -273,14 +279,6 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg); } - /* Configure pause time */ - for (i = 0; i < (MAX_TRAFFIC_CLASS >> 1); i++) - IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), 0x68006800); - - /* Configure flow control refresh threshold value */ - IXGBE_WRITE_REG(hw, IXGBE_FCRTV, 0x3400); - -out: return 0; } diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c index 0a482bb..025af8c 100644 --- a/drivers/net/ixgbe/ixgbe_dcb_82599.c +++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c @@ -253,13 +253,6 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en) { u32 i, reg, rx_pba_size; - /* If PFC is disabled globally then fall back to LFC. */ - if (!pfc_en) { - for (i = 0; i < MAX_TRAFFIC_CLASS; i++) - hw->mac.ops.fc_enable(hw, i); - goto out; - } - /* Configure PFC Tx thresholds per TC */ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { int enabled = pfc_en & (1 << i); @@ -278,28 +271,33 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en) IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); } - /* Configure pause time (2 TCs per register) */ - reg = hw->fc.pause_time | (hw->fc.pause_time << 16); - for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) - IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); - - /* Configure flow control refresh threshold value */ - IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); - - /* Enable Transmit PFC */ - reg = IXGBE_FCCFG_TFCE_PRIORITY; - IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg); + if (pfc_en) { + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time | (hw->fc.pause_time << 16); + for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + + + reg = IXGBE_FCCFG_TFCE_PRIORITY; + IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg); + /* + * Enable Receive PFC + * We will always honor XOFF frames we receive when + * we are in PFC mode. + */ + reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); + reg &= ~IXGBE_MFLCN_RFCE; + reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF; + IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); + + } else { + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) + hw->mac.ops.fc_enable(hw, i); + } - /* - * Enable Receive PFC - * We will always honor XOFF frames we receive when - * we are in PFC mode. - */ - reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); - reg &= ~IXGBE_MFLCN_RFCE; - reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF; - IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); -out: return 0; } -- cgit v1.1