diff options
author | Jiri Kosina <jkosina@suse.cz> | 2010-12-10 15:19:18 +0100 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2010-12-10 15:19:18 +0100 |
commit | 2ade0c1d9d93b7642212657ef76f4a1e30233711 (patch) | |
tree | 63bc720c0ffe5f4760cac4ed617b9870b050175e /drivers/net/ehea | |
parent | 504499f22c08a03e2e19dc88d31aa0ecd2ac815e (diff) | |
parent | 6313e3c21743cc88bb5bd8aa72948ee1e83937b6 (diff) | |
download | op-kernel-dev-2ade0c1d9d93b7642212657ef76f4a1e30233711.zip op-kernel-dev-2ade0c1d9d93b7642212657ef76f4a1e30233711.tar.gz |
Merge branch 'master' into upstream
Diffstat (limited to 'drivers/net/ehea')
-rw-r--r-- | drivers/net/ehea/ehea.h | 2 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_main.c | 60 |
2 files changed, 47 insertions, 15 deletions
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 1321cb6..8e745e7 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h @@ -396,7 +396,9 @@ struct ehea_port_res { int swqe_ll_count; u32 swqe_id_counter; u64 tx_packets; + u64 tx_bytes; u64 rx_packets; + u64 rx_bytes; u32 poll_counter; struct net_lro_mgr lro_mgr; struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS]; diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index bb7d306..3d0af08 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -330,7 +330,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev) struct ehea_port *port = netdev_priv(dev); struct net_device_stats *stats = &port->stats; struct hcp_ehea_port_cb2 *cb2; - u64 hret, rx_packets, tx_packets; + u64 hret, rx_packets, tx_packets, rx_bytes = 0, tx_bytes = 0; int i; memset(stats, 0, sizeof(*stats)); @@ -353,18 +353,22 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev) ehea_dump(cb2, sizeof(*cb2), "net_device_stats"); rx_packets = 0; - for (i = 0; i < port->num_def_qps; i++) + for (i = 0; i < port->num_def_qps; i++) { rx_packets += port->port_res[i].rx_packets; + rx_bytes += port->port_res[i].rx_bytes; + } tx_packets = 0; - for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) + for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { tx_packets += port->port_res[i].tx_packets; + tx_bytes += port->port_res[i].tx_bytes; + } stats->tx_packets = tx_packets; stats->multicast = cb2->rxmcp; stats->rx_errors = cb2->rxuerr; - stats->rx_bytes = cb2->rxo; - stats->tx_bytes = cb2->txo; + stats->rx_bytes = rx_bytes; + stats->tx_bytes = tx_bytes; stats->rx_packets = rx_packets; out_herr: @@ -396,6 +400,7 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) skb_arr_rq1[index] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); if (!skb_arr_rq1[index]) { + ehea_info("Unable to allocate enough skb in the array\n"); pr->rq1_skba.os_skbs = fill_wqes - i; break; } @@ -418,13 +423,20 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) struct net_device *dev = pr->port->netdev; int i; - for (i = 0; i < pr->rq1_skba.len; i++) { + if (nr_rq1a > pr->rq1_skba.len) { + ehea_error("NR_RQ1A bigger than skb array len\n"); + return; + } + + for (i = 0; i < nr_rq1a; i++) { skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); - if (!skb_arr_rq1[i]) + if (!skb_arr_rq1[i]) { + ehea_info("No enough memory to allocate skb array\n"); break; + } } /* Ring doorbell */ - ehea_update_rq1a(pr->qp, nr_rq1a); + ehea_update_rq1a(pr->qp, i); } static int ehea_refill_rq_def(struct ehea_port_res *pr, @@ -703,6 +715,7 @@ static int ehea_proc_rwqes(struct net_device *dev, int skb_arr_rq2_len = pr->rq2_skba.len; int skb_arr_rq3_len = pr->rq3_skba.len; int processed, processed_rq1, processed_rq2, processed_rq3; + u64 processed_bytes = 0; int wqe_index, last_wqe_index, rq, port_reset; processed = processed_rq1 = processed_rq2 = processed_rq3 = 0; @@ -730,8 +743,10 @@ static int ehea_proc_rwqes(struct net_device *dev, skb = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); - if (!skb) + if (!skb) { + ehea_info("Not enough memory to allocate skb\n"); break; + } } skb_copy_to_linear_data(skb, ((char *)cqe) + 64, cqe->num_bytes_transfered - 4); @@ -760,6 +775,7 @@ static int ehea_proc_rwqes(struct net_device *dev, processed_rq3++; } + processed_bytes += skb->len; ehea_proc_skb(pr, cqe, skb); } else { pr->p_stats.poll_receive_errors++; @@ -775,6 +791,7 @@ static int ehea_proc_rwqes(struct net_device *dev, lro_flush_all(&pr->lro_mgr); pr->rx_packets += processed; + pr->rx_bytes += processed_bytes; ehea_refill_rq1(pr, last_wqe_index, processed_rq1); ehea_refill_rq2(pr, processed_rq2); @@ -1509,9 +1526,20 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, enum ehea_eq_type eq_type = EHEA_EQ; struct ehea_qp_init_attr *init_attr = NULL; int ret = -EIO; + u64 tx_bytes, rx_bytes, tx_packets, rx_packets; + + tx_bytes = pr->tx_bytes; + tx_packets = pr->tx_packets; + rx_bytes = pr->rx_bytes; + rx_packets = pr->rx_packets; memset(pr, 0, sizeof(struct ehea_port_res)); + pr->tx_bytes = rx_bytes; + pr->tx_packets = tx_packets; + pr->rx_bytes = rx_bytes; + pr->rx_packets = rx_packets; + pr->port = port; spin_lock_init(&pr->xmit_lock); spin_lock_init(&pr->netif_queue); @@ -2249,6 +2277,14 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) memset(swqe, 0, SWQE_HEADER_SIZE); atomic_dec(&pr->swqe_avail); + if (vlan_tx_tag_present(skb)) { + swqe->tx_control |= EHEA_SWQE_VLAN_INSERT; + swqe->vlan_tag = vlan_tx_tag_get(skb); + } + + pr->tx_packets++; + pr->tx_bytes += skb->len; + if (skb->len <= SWQE3_MAX_IMM) { u32 sig_iv = port->sig_comp_iv; u32 swqe_num = pr->swqe_id_counter; @@ -2279,11 +2315,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) } pr->swqe_id_counter += 1; - if (vlan_tx_tag_present(skb)) { - swqe->tx_control |= EHEA_SWQE_VLAN_INSERT; - swqe->vlan_tag = vlan_tx_tag_get(skb); - } - if (netif_msg_tx_queued(port)) { ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr); ehea_dump(swqe, 512, "swqe"); @@ -2295,7 +2326,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) } ehea_post_swqe(pr->qp, swqe); - pr->tx_packets++; if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { spin_lock_irqsave(&pr->netif_queue, flags); |