summaryrefslogtreecommitdiffstats
path: root/hw/net/fsl_etsec/rings.c
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2015-07-27 14:53:42 +0100
committerPeter Maydell <peter.maydell@linaro.org>2015-07-27 14:53:42 +0100
commit122e7dab8ac549c8c5a9e1e13aa2464190e888de (patch)
tree110679c12752916fc4ae9b011b9d7fa98a2127ae /hw/net/fsl_etsec/rings.c
parente40db4c6d391419c0039fe274c74df32a6ca1a28 (diff)
parentf9f7492ea4a9dda538fedeec31399fb940533a16 (diff)
downloadhqemu-122e7dab8ac549c8c5a9e1e13aa2464190e888de.zip
hqemu-122e7dab8ac549c8c5a9e1e13aa2464190e888de.tar.gz
Merge remote-tracking branch 'remotes/stefanha/tags/net-pull-request' into staging
Pull request Here are NIC fixes from Fam Zheng that prevent rx hangs (caused by NIC models where .can_receive() stops rx but qemu_flush_queued_packets() isn't called). # gpg: Signature made Mon Jul 27 14:51:48 2015 BST using RSA key ID 81AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" * remotes/stefanha/tags/net-pull-request: axienet: Flush queued packets when rx is done dp8393x: Flush packets when link comes up stellaris_enet: Flush queued packets when read done mipsnet: Flush queued packets when receiving is enabled milkymist-minimac2: Flush queued packets when link comes up mcf_fec: Drop mcf_fec_can_receive etsec: Flush queue when rx buffer is consumed etsec: Move etsec_can_receive into etsec_receive usbnet: Drop usbnet_can_receive eepro100: Drop nic_can_receive pcnet: Drop pcnet_can_receive xgmac: Drop packets with eth_can_rx is false. hw/net: fix mcf_fec driver receiver hw/net: add simple phy support to mcf_fec driver hw/net: add ANLPAR bit definitions to generic mii hw/net: create common collection of MII definitions Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'hw/net/fsl_etsec/rings.c')
-rw-r--r--hw/net/fsl_etsec/rings.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/hw/net/fsl_etsec/rings.c b/hw/net/fsl_etsec/rings.c
index d4a494f..68e7b6d 100644
--- a/hw/net/fsl_etsec/rings.c
+++ b/hw/net/fsl_etsec/rings.c
@@ -481,40 +481,42 @@ static void rx_init_frame(eTSEC *etsec, const uint8_t *buf, size_t size)
etsec->rx_buffer_len, etsec->rx_padding);
}
-void etsec_rx_ring_write(eTSEC *etsec, const uint8_t *buf, size_t size)
+ssize_t etsec_rx_ring_write(eTSEC *etsec, const uint8_t *buf, size_t size)
{
int ring_nbr = 0; /* Always use ring0 (no filer) */
if (etsec->rx_buffer_len != 0) {
RING_DEBUG("%s: We can't receive now,"
" a buffer is already in the pipe\n", __func__);
- return;
+ return 0;
}
if (etsec->regs[RSTAT].value & 1 << (23 - ring_nbr)) {
RING_DEBUG("%s: The ring is halted\n", __func__);
- return;
+ return -1;
}
if (etsec->regs[DMACTRL].value & DMACTRL_GRS) {
RING_DEBUG("%s: Graceful receive stop\n", __func__);
- return;
+ return -1;
}
if (!(etsec->regs[MACCFG1].value & MACCFG1_RX_EN)) {
RING_DEBUG("%s: MAC Receive not enabled\n", __func__);
- return;
+ return -1;
}
if ((etsec->regs[RCTRL].value & RCTRL_RSF) && (size < 60)) {
/* CRC is not in the packet yet, so short frame is below 60 bytes */
RING_DEBUG("%s: Drop short frame\n", __func__);
- return;
+ return -1;
}
rx_init_frame(etsec, buf, size);
etsec_walk_rx_ring(etsec, ring_nbr);
+
+ return size;
}
void etsec_walk_rx_ring(eTSEC *etsec, int ring_nbr)
@@ -644,6 +646,9 @@ void etsec_walk_rx_ring(eTSEC *etsec, int ring_nbr)
} else {
etsec->rx_buffer_len = 0;
etsec->rx_buffer = NULL;
+ if (etsec->need_flush) {
+ qemu_flush_queued_packets(qemu_get_queue(etsec->nic));
+ }
}
RING_DEBUG("eTSEC End of ring_write: remaining_data:%zu\n", remaining_data);
OpenPOWER on IntegriCloud