summaryrefslogtreecommitdiffstats
path: root/sys/dev/ixgbe/if_ixv.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/ixgbe/if_ixv.c')
-rw-r--r--sys/dev/ixgbe/if_ixv.c92
1 files changed, 61 insertions, 31 deletions
diff --git a/sys/dev/ixgbe/if_ixv.c b/sys/dev/ixgbe/if_ixv.c
index a550a85..cc2d63f 100644
--- a/sys/dev/ixgbe/if_ixv.c
+++ b/sys/dev/ixgbe/if_ixv.c
@@ -43,7 +43,7 @@
/*********************************************************************
* Driver version
*********************************************************************/
-char ixv_driver_version[] = "1.2.5";
+char ixv_driver_version[] = "1.4.0";
/*********************************************************************
* PCI Device ID Table
@@ -151,6 +151,10 @@ MODULE_DEPEND(ixv, ether, 1, 1, 1);
** TUNEABLE PARAMETERS:
*/
+/* Number of Queues - do not exceed MSIX vectors - 1 */
+static int ixv_num_queues = 1;
+TUNABLE_INT("hw.ixv.num_queues", &ixv_num_queues);
+
/*
** AIM: Adaptive Interrupt Moderation
** which means that the interrupt rate
@@ -338,6 +342,11 @@ ixv_attach(device_t dev)
ixgbe_reset_hw(hw);
+ /* Get the Mailbox API version */
+ device_printf(dev,"MBX API %d negotiation: %d\n",
+ ixgbe_mbox_api_11,
+ ixgbevf_negotiate_api_version(hw, ixgbe_mbox_api_11));
+
error = ixgbe_init_hw(hw);
if (error) {
device_printf(dev,"Hardware Initialization Failure\n");
@@ -1313,10 +1322,13 @@ static int
ixv_setup_msix(struct adapter *adapter)
{
device_t dev = adapter->dev;
- int rid, want;
+ int rid, want, msgs;
- /* First try MSI/X */
+ /* Must have at least 2 MSIX vectors */
+ msgs = pci_msix_count(dev);
+ if (msgs < 2)
+ goto out;
rid = PCIR_BAR(3);
adapter->msix_mem = bus_alloc_resource_any(dev,
SYS_RES_MEMORY, &rid, RF_ACTIVE);
@@ -1327,11 +1339,16 @@ ixv_setup_msix(struct adapter *adapter)
}
/*
- ** Want two vectors: one for a queue,
+ ** Want vectors for the queues,
** plus an additional for mailbox.
*/
- want = 2;
- if ((pci_alloc_msix(dev, &want) == 0) && (want == 2)) {
+ want = adapter->num_queues + 1;
+ if (want > msgs) {
+ want = msgs;
+ adapter->num_queues = msgs - 1;
+ } else
+ msgs = want;
+ if ((pci_alloc_msix(dev, &msgs) == 0) && (msgs == want)) {
device_printf(adapter->dev,
"Using MSIX interrupts with %d vectors\n", want);
return (want);
@@ -1370,7 +1387,9 @@ ixv_allocate_pci_resources(struct adapter *adapter)
rman_get_bushandle(adapter->pci_mem);
adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
- adapter->num_queues = 1;
+ /* Pick up the tuneable queues */
+ adapter->num_queues = ixv_num_queues;
+
adapter->hw.back = &adapter->osdep;
/*
@@ -1591,32 +1610,41 @@ ixv_initialize_receive_units(struct adapter *adapter)
{
struct rx_ring *rxr = adapter->rx_rings;
struct ixgbe_hw *hw = &adapter->hw;
- struct ifnet *ifp = adapter->ifp;
- u32 bufsz, fctrl, rxcsum, hlreg;
-
-
- /* Enable broadcasts */
- fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- fctrl |= IXGBE_FCTRL_BAM;
- fctrl |= IXGBE_FCTRL_DPF;
- fctrl |= IXGBE_FCTRL_PMCF;
- IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+ struct ifnet *ifp = adapter->ifp;
+ u32 bufsz, rxcsum, psrtype;
+ int max_frame;
- /* Set for Jumbo Frames? */
- hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- if (ifp->if_mtu > ETHERMTU) {
- hlreg |= IXGBE_HLREG0_JUMBOEN;
+ if (ifp->if_mtu > ETHERMTU)
bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- } else {
- hlreg &= ~IXGBE_HLREG0_JUMBOEN;
+ else
bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- }
- IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
+
+ psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR |
+ IXGBE_PSRTYPE_L2HDR;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
+
+ /* Tell PF our expected packet-size */
+ max_frame = ifp->if_mtu + IXGBE_MTU_HDR;
+ ixgbevf_rlpml_set_vf(hw, max_frame);
for (int i = 0; i < adapter->num_queues; i++, rxr++) {
u64 rdba = rxr->rxdma.dma_paddr;
u32 reg, rxdctl;
+ /* Disable the queue */
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ rxdctl &= ~(IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
+ for (int j = 0; j < 10; j++) {
+ if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
+ IXGBE_RXDCTL_ENABLE)
+ msec_delay(1);
+ else
+ break;
+ }
+ wmb();
/* Setup the Base and Length of the Rx Descriptor Ring */
IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
(rdba & 0x00000000ffffffffULL));
@@ -1625,6 +1653,10 @@ ixv_initialize_receive_units(struct adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
+ /* Reset the ring indices */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), 0);
+
/* Set up the SRRCTL register */
reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
@@ -1633,14 +1665,14 @@ ixv_initialize_receive_units(struct adapter *adapter)
reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
- /* Setup the HW Rx Head and Tail Descriptor Pointers */
- IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
+ /* Set the Tail Pointer */
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
adapter->num_rx_desc - 1);
+
/* Set the processing limit */
rxr->process_limit = ixv_rx_process_limit;
- /* Set Rx Tail register */
+ /* Capture Rx Tail index */
rxr->tail = IXGBE_VFRDT(rxr->me);
/* Do the queue enabling last */
@@ -2033,9 +2065,7 @@ ixv_add_stats_sysctls(struct adapter *adapter)
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
CTLFLAG_RD, &(txr->total_packets),
"TX Packets");
- SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_bytes",
- CTLFLAG_RD, &(txr->bytes), 0,
- "TX Bytes");
+
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_no_desc",
CTLFLAG_RD, &(txr->no_desc_avail),
"# of times not enough descriptors were available during TX");
OpenPOWER on IntegriCloud