From 954e1d2ccdb661d5c8b7f69340d118fa7ba7fb85 Mon Sep 17 00:00:00 2001 From: jlemon Date: Sat, 25 Nov 2000 07:35:38 +0000 Subject: Lock down the network interface queues. The queue mutex must be obtained before adding/removing packets from the queue. Also, the if_obytes and if_omcasts fields should only be manipulated under protection of the mutex. IF_ENQUEUE, IF_PREPEND, and IF_DEQUEUE perform all necessary locking on the queue. An IF_LOCK macro is provided, as well as the old (mutex-less) versions of the macros in the form _IF_ENQUEUE, _IF_QFULL, for code which needs them, but their use is discouraged. Two new macros are introduced: IF_DRAIN() to drain a queue, and IF_HANDOFF, which takes care of locking/enqueue, and also statistics updating/start if necessary. --- sys/dev/hea/eni_receive.c | 16 +++++++--------- sys/dev/hea/eni_transmit.c | 10 +++++----- 2 files changed, 12 insertions(+), 14 deletions(-) (limited to 'sys/dev/hea') diff --git a/sys/dev/hea/eni_receive.c b/sys/dev/hea/eni_receive.c index fd4eb98..8e87752 100644 --- a/sys/dev/hea/eni_receive.c +++ b/sys/dev/hea/eni_receive.c @@ -585,7 +585,7 @@ send_dma: /* * Place buffer on receive queue waiting for RX_DMA */ - if ( IF_QFULL ( &eup->eu_rxqueue ) ) { + if ( _IF_QFULL ( &eup->eu_rxqueue ) ) { /* * We haven't done anything we can't back out * of. Drop request and service it next time. @@ -605,7 +605,7 @@ send_dma: vct->vci_control &= ~VCI_IN_SERVICE; return; } else { - IF_ENQUEUE ( &eup->eu_rxqueue, m ); + _IF_ENQUEUE ( &eup->eu_rxqueue, m ); /* * Advance the RX_WR pointer to cause * the adapter to work on this DMA list. @@ -685,7 +685,7 @@ eni_recv_drain ( eup ) s = splimp(); /* Pop first buffer */ - IF_DEQUEUE ( &eup->eu_rxqueue, m ); + _IF_DEQUEUE ( &eup->eu_rxqueue, m ); while ( m ) { u_long *up; u_long pdulen; @@ -712,12 +712,12 @@ eni_recv_drain ( eup ) */ if ( start > stop ) { /* We wrapped */ if ( !(DMA_Rdptr >= stop && DMA_Rdptr < start) ) { - IF_PREPEND ( &eup->eu_rxqueue, m ); + _IF_PREPEND ( &eup->eu_rxqueue, m ); goto finish; } } else { if ( DMA_Rdptr < stop && DMA_Rdptr >= start ) { - IF_PREPEND ( &eup->eu_rxqueue, m ); + _IF_PREPEND ( &eup->eu_rxqueue, m ); goto finish; } } @@ -802,9 +802,8 @@ eni_recv_drain ( eup ) /* * Schedule callback */ - if ( !IF_QFULL ( &atm_intrq ) ) { + if (IF_HANDOFF(&atm_intrq, m, NULL)) { que++; - IF_ENQUEUE ( &atm_intrq, m ); } else { eup->eu_stats.eni_st_drv.drv_rv_intrq++; eup->eu_pif.pif_ierrors++; @@ -812,7 +811,6 @@ eni_recv_drain ( eup ) log ( LOG_ERR, "eni_receive_drain: ATM_INTRQ is full. Unable to pass up stack.\n" ); #endif - KB_FREEALL ( m ); } } else { /* @@ -825,7 +823,7 @@ next_buffer: /* * Look for next buffer */ - IF_DEQUEUE ( &eup->eu_rxqueue, m ); + _IF_DEQUEUE ( &eup->eu_rxqueue, m ); } finish: (void) splx(s); diff --git a/sys/dev/hea/eni_transmit.c b/sys/dev/hea/eni_transmit.c index 2f6f945..2fd5b24 100644 --- a/sys/dev/hea/eni_transmit.c +++ b/sys/dev/hea/eni_transmit.c @@ -271,7 +271,7 @@ eni_xmit_drain ( eup ) /* * Pull the top element (PDU) off */ - IF_DEQUEUE ( &eup->eu_txqueue, m ); + _IF_DEQUEUE ( &eup->eu_txqueue, m ); /* * As long as there are valid elements */ @@ -317,7 +317,7 @@ eni_xmit_drain ( eup ) * Haven't finished this PDU yet - replace * it as the head of list. */ - IF_PREPEND ( &eup->eu_txqueue, m ); + _IF_PREPEND ( &eup->eu_txqueue, m ); /* * If this one isn't done, none of the others * are either. @@ -331,7 +331,7 @@ eni_xmit_drain ( eup ) * Haven't finished this PDU yet - replace * it as the head of list. */ - IF_PREPEND ( &eup->eu_txqueue, m ); + _IF_PREPEND ( &eup->eu_txqueue, m ); /* * If this one isn't done, none of the others * are either. @@ -388,7 +388,7 @@ eni_xmit_drain ( eup ) /* * Look for next completed transmit PDU */ - IF_DEQUEUE ( &eup->eu_txqueue, m ); + _IF_DEQUEUE ( &eup->eu_txqueue, m ); } /* * We've drained the queue... @@ -823,7 +823,7 @@ retry: * Place buffers onto transmit queue for draining */ s2 = splimp(); - IF_ENQUEUE ( &eup->eu_txqueue, m ); + _IF_ENQUEUE ( &eup->eu_txqueue, m ); (void) splx(s2); /* -- cgit v1.1