summaryrefslogtreecommitdiffstats
path: root/hw/net/fsl_etsec
diff options
context:
space:
mode:
Diffstat (limited to 'hw/net/fsl_etsec')
-rw-r--r--hw/net/fsl_etsec/etsec.c20
-rw-r--r--hw/net/fsl_etsec/etsec.h4
-rw-r--r--hw/net/fsl_etsec/rings.c17
3 files changed, 24 insertions, 17 deletions
diff --git a/hw/net/fsl_etsec/etsec.c b/hw/net/fsl_etsec/etsec.c
index c57365f..0f5cf44 100644
--- a/hw/net/fsl_etsec/etsec.c
+++ b/hw/net/fsl_etsec/etsec.c
@@ -338,25 +338,26 @@ static void etsec_reset(DeviceState *d)
MII_SR_100X_FD_CAPS | MII_SR_100T4_CAPS;
}
-static int etsec_can_receive(NetClientState *nc)
-{
- eTSEC *etsec = qemu_get_nic_opaque(nc);
-
- return etsec->rx_buffer_len == 0;
-}
-
static ssize_t etsec_receive(NetClientState *nc,
const uint8_t *buf,
size_t size)
{
+ ssize_t ret;
eTSEC *etsec = qemu_get_nic_opaque(nc);
#if defined(HEX_DUMP)
fprintf(stderr, "%s receive size:%d\n", etsec->nic->nc.name, size);
qemu_hexdump(buf, stderr, "", size);
#endif
- etsec_rx_ring_write(etsec, buf, size);
- return size;
+ /* Flush is unnecessary as are already in receiving path */
+ etsec->need_flush = false;
+ ret = etsec_rx_ring_write(etsec, buf, size);
+ if (ret == 0) {
+ /* The packet will be queued, let's flush it when buffer is avilable
+ * again. */
+ etsec->need_flush = true;
+ }
+ return ret;
}
@@ -370,7 +371,6 @@ static void etsec_set_link_status(NetClientState *nc)
static NetClientInfo net_etsec_info = {
.type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
- .can_receive = etsec_can_receive,
.receive = etsec_receive,
.link_status_changed = etsec_set_link_status,
};
diff --git a/hw/net/fsl_etsec/etsec.h b/hw/net/fsl_etsec/etsec.h
index 78d2c57..e7dc0a4 100644
--- a/hw/net/fsl_etsec/etsec.h
+++ b/hw/net/fsl_etsec/etsec.h
@@ -144,6 +144,8 @@ typedef struct eTSEC {
QEMUBH *bh;
struct ptimer_state *ptimer;
+ /* Whether we should flush the rx queue when buffer becomes available. */
+ bool need_flush;
} eTSEC;
#define TYPE_ETSEC_COMMON "eTSEC"
@@ -162,7 +164,7 @@ DeviceState *etsec_create(hwaddr base,
void etsec_walk_tx_ring(eTSEC *etsec, int ring_nbr);
void etsec_walk_rx_ring(eTSEC *etsec, int ring_nbr);
-void etsec_rx_ring_write(eTSEC *etsec, const uint8_t *buf, size_t size);
+ssize_t etsec_rx_ring_write(eTSEC *etsec, const uint8_t *buf, size_t size);
void etsec_write_miim(eTSEC *etsec,
eTSEC_Register *reg,
diff --git a/hw/net/fsl_etsec/rings.c b/hw/net/fsl_etsec/rings.c
index d4a494f..68e7b6d 100644
--- a/hw/net/fsl_etsec/rings.c
+++ b/hw/net/fsl_etsec/rings.c
@@ -481,40 +481,42 @@ static void rx_init_frame(eTSEC *etsec, const uint8_t *buf, size_t size)
etsec->rx_buffer_len, etsec->rx_padding);
}
-void etsec_rx_ring_write(eTSEC *etsec, const uint8_t *buf, size_t size)
+ssize_t etsec_rx_ring_write(eTSEC *etsec, const uint8_t *buf, size_t size)
{
int ring_nbr = 0; /* Always use ring0 (no filer) */
if (etsec->rx_buffer_len != 0) {
RING_DEBUG("%s: We can't receive now,"
" a buffer is already in the pipe\n", __func__);
- return;
+ return 0;
}
if (etsec->regs[RSTAT].value & 1 << (23 - ring_nbr)) {
RING_DEBUG("%s: The ring is halted\n", __func__);
- return;
+ return -1;
}
if (etsec->regs[DMACTRL].value & DMACTRL_GRS) {
RING_DEBUG("%s: Graceful receive stop\n", __func__);
- return;
+ return -1;
}
if (!(etsec->regs[MACCFG1].value & MACCFG1_RX_EN)) {
RING_DEBUG("%s: MAC Receive not enabled\n", __func__);
- return;
+ return -1;
}
if ((etsec->regs[RCTRL].value & RCTRL_RSF) && (size < 60)) {
/* CRC is not in the packet yet, so short frame is below 60 bytes */
RING_DEBUG("%s: Drop short frame\n", __func__);
- return;
+ return -1;
}
rx_init_frame(etsec, buf, size);
etsec_walk_rx_ring(etsec, ring_nbr);
+
+ return size;
}
void etsec_walk_rx_ring(eTSEC *etsec, int ring_nbr)
@@ -644,6 +646,9 @@ void etsec_walk_rx_ring(eTSEC *etsec, int ring_nbr)
} else {
etsec->rx_buffer_len = 0;
etsec->rx_buffer = NULL;
+ if (etsec->need_flush) {
+ qemu_flush_queued_packets(qemu_get_queue(etsec->nic));
+ }
}
RING_DEBUG("eTSEC End of ring_write: remaining_data:%zu\n", remaining_data);
OpenPOWER on IntegriCloud