summaryrefslogtreecommitdiffstats
path: root/sys/netatm/atm_subr.c
diff options
context:
space:
mode:
authorarr <arr@FreeBSD.org>2002-06-14 19:31:07 +0000
committerarr <arr@FreeBSD.org>2002-06-14 19:31:07 +0000
commit399bacf14bfacf43aa223796853e0c060b30b525 (patch)
tree7cce6bd9237724e7700fdebc197816130d415833 /sys/netatm/atm_subr.c
parent42cf959f1837b06c3ec3b8fa7bb33a47c85b92af (diff)
downloadFreeBSD-src-399bacf14bfacf43aa223796853e0c060b30b525.zip
FreeBSD-src-399bacf14bfacf43aa223796853e0c060b30b525.tar.gz
- Chainsaw the storage pool code. This was being used by a bunch of code
within the HARP atm stack and the hea and hfa device drivers, but since all of these systems were changed to use UMA zones, there is no use for the api any longer.
Diffstat (limited to 'sys/netatm/atm_subr.c')
-rw-r--r--sys/netatm/atm_subr.c346
1 files changed, 1 insertions, 345 deletions
diff --git a/sys/netatm/atm_subr.c b/sys/netatm/atm_subr.c
index 43d651d..da0ea4c 100644
--- a/sys/netatm/atm_subr.c
+++ b/sys/netatm/atm_subr.c
@@ -69,7 +69,6 @@ __RCSID("@(#) $FreeBSD$");
struct atm_pif *atm_interface_head = NULL;
struct atm_ncm *atm_netconv_head = NULL;
Atm_endpoint *atm_endpoints[ENDPT_MAX+1] = {NULL};
-struct sp_info *atm_pool_head = NULL;
struct stackq_entry *atm_stackq_head = NULL, *atm_stackq_tail;
struct atm_sock_stat atm_sock_stat = { { 0 } };
int atm_init = 0;
@@ -84,15 +83,12 @@ uma_zone_t atm_attributes_zone;
/*
* Local functions
*/
-static void atm_compact(struct atm_time *);
static KTimeout_ret atm_timexp(void *);
/*
* Local variables
*/
static struct atm_time *atm_timeq = NULL;
-static struct atm_time atm_compactimer = {0, 0};
-
static uma_zone_t atm_stackq_zone;
/*
@@ -148,351 +144,11 @@ atm_initialize()
/*
* Prime the timer
*/
- (void) timeout(atm_timexp, (void *)0, hz/ATM_HZ);
-
- /*
- * Start the compaction timer
- */
- atm_timeout(&atm_compactimer, SPOOL_COMPACT, atm_compact);
+ (void)timeout(atm_timexp, (void *)0, hz/ATM_HZ);
}
/*
- * Allocate a Control Block
- *
- * Gets a new control block allocated from the specified storage pool,
- * acquiring memory for new pool chunks if required. The returned control
- * block's contents will be cleared.
- *
- * Arguments:
- * sip pointer to sp_info for storage pool
- *
- * Returns:
- * addr pointer to allocated control block
- * 0 allocation failed
- *
- */
-void *
-atm_allocate(sip)
- struct sp_info *sip;
-{
- void *bp;
- struct sp_chunk *scp;
- struct sp_link *slp;
- int s = splnet();
-
- /*
- * Count calls
- */
- sip->si_allocs++;
-
- /*
- * Are there any free in the pool?
- */
- if (sip->si_free) {
-
- /*
- * Find first chunk with a free block
- */
- for (scp = sip->si_poolh; scp; scp = scp->sc_next) {
- if (scp->sc_freeh != NULL)
- break;
- }
-
- } else {
-
- /*
- * No free blocks - have to allocate a new
- * chunk (but put a limit to this)
- */
- struct sp_link *slp_next;
- int i;
-
- /*
- * First time for this pool??
- */
- if (sip->si_chunksiz == 0) {
- size_t n;
-
- /*
- * Initialize pool information
- */
- n = sizeof(struct sp_chunk) +
- sip->si_blkcnt *
- (sip->si_blksiz + sizeof(struct sp_link));
- sip->si_chunksiz = roundup(n, SPOOL_ROUNDUP);
-
- /*
- * Place pool on kernel chain
- */
- LINK2TAIL(sip, struct sp_info, atm_pool_head, si_next);
- }
-
- if (sip->si_chunks >= sip->si_maxallow) {
- sip->si_fails++;
- (void) splx(s);
- return (NULL);
- }
-
- scp = malloc(sip->si_chunksiz, M_DEVBUF, M_NOWAIT | M_ZERO);
- if (scp == NULL) {
- sip->si_fails++;
- (void) splx(s);
- return (NULL);
- }
- scp->sc_info = sip;
- scp->sc_magic = SPOOL_MAGIC;
-
- /*
- * Divy up chunk into free blocks
- */
- slp = (struct sp_link *)(scp + 1);
- scp->sc_freeh = slp;
-
- for (i = sip->si_blkcnt; i > 1; i--) {
- slp_next = (struct sp_link *)((caddr_t)(slp + 1) +
- sip->si_blksiz);
- slp->sl_u.slu_next = slp_next;
- slp = slp_next;
- }
- slp->sl_u.slu_next = NULL;
- scp->sc_freet = slp;
-
- /*
- * Add new chunk to end of pool
- */
- if (sip->si_poolh)
- sip->si_poolt->sc_next = scp;
- else
- sip->si_poolh = scp;
- sip->si_poolt = scp;
-
- sip->si_chunks++;
- sip->si_total += sip->si_blkcnt;
- sip->si_free += sip->si_blkcnt;
- if (sip->si_chunks > sip->si_maxused)
- sip->si_maxused = sip->si_chunks;
- }
-
- /*
- * Allocate the first free block in chunk
- */
- slp = scp->sc_freeh;
- scp->sc_freeh = slp->sl_u.slu_next;
- scp->sc_used++;
- sip->si_free--;
- bp = (slp + 1);
-
- /*
- * Save link back to pool chunk
- */
- slp->sl_u.slu_chunk = scp;
-
- /*
- * Clear out block
- */
- bzero(bp, sip->si_blksiz);
-
- (void) splx(s);
- return (bp);
-}
-
-
-/*
- * Free a Control Block
- *
- * Returns a previously allocated control block back to the owners
- * storage pool.
- *
- * Arguments:
- * bp pointer to block to be freed
- *
- * Returns:
- * none
- *
- */
-void
-atm_free(bp)
- void *bp;
-{
- struct sp_info *sip;
- struct sp_chunk *scp;
- struct sp_link *slp;
- int s = splnet();
-
- /*
- * Get containing chunk and pool info
- */
- slp = (struct sp_link *)bp;
- slp--;
- scp = slp->sl_u.slu_chunk;
- if (scp->sc_magic != SPOOL_MAGIC)
- panic("atm_free: chunk magic missing");
- sip = scp->sc_info;
-
- /*
- * Add block to free chain
- */
- if (scp->sc_freeh) {
- scp->sc_freet->sl_u.slu_next = slp;
- scp->sc_freet = slp;
- } else
- scp->sc_freeh = scp->sc_freet = slp;
- slp->sl_u.slu_next = NULL;
- sip->si_free++;
- scp->sc_used--;
-
- (void) splx(s);
- return;
-}
-
-
-/*
- * Storage Pool Compaction
- *
- * Called periodically in order to perform compaction of the
- * storage pools. Each pool will be checked to see if any chunks
- * can be freed, taking some care to avoid freeing too many chunks
- * in order to avoid memory thrashing.
- *
- * Called at splnet.
- *
- * Arguments:
- * tip pointer to timer control block (atm_compactimer)
- *
- * Returns:
- * none
- *
- */
-static void
-atm_compact(tip)
- struct atm_time *tip;
-{
- struct sp_info *sip;
- struct sp_chunk *scp;
- int i;
- struct sp_chunk *scp_prev;
-
- /*
- * Check out all storage pools
- */
- for (sip = atm_pool_head; sip; sip = sip->si_next) {
-
- /*
- * Always keep a minimum number of chunks around
- */
- if (sip->si_chunks <= SPOOL_MIN_CHUNK)
- continue;
-
- /*
- * Maximum chunks to free at one time will leave
- * pool with at least 50% utilization, but never
- * go below minimum chunk count.
- */
- i = ((sip->si_free * 2) - sip->si_total) / sip->si_blkcnt;
- i = MIN(i, sip->si_chunks - SPOOL_MIN_CHUNK);
-
- /*
- * Look for chunks to free
- */
- scp_prev = NULL;
- for (scp = sip->si_poolh; scp && i > 0; ) {
-
- if (scp->sc_used == 0) {
-
- /*
- * Found a chunk to free, so do it
- */
- if (scp_prev) {
- scp_prev->sc_next = scp->sc_next;
- if (sip->si_poolt == scp)
- sip->si_poolt = scp_prev;
- } else
- sip->si_poolh = scp->sc_next;
-
- free((caddr_t)scp, M_DEVBUF);
-
- /*
- * Update pool controls
- */
- sip->si_chunks--;
- sip->si_total -= sip->si_blkcnt;
- sip->si_free -= sip->si_blkcnt;
- i--;
- if (scp_prev)
- scp = scp_prev->sc_next;
- else
- scp = sip->si_poolh;
- } else {
- scp_prev = scp;
- scp = scp->sc_next;
- }
- }
- }
-
- /*
- * Restart the compaction timer
- */
- atm_timeout(&atm_compactimer, SPOOL_COMPACT, atm_compact);
-
- return;
-}
-
-
-/*
- * Release a Storage Pool
- *
- * Frees all dynamic storage acquired for a storage pool.
- * This function is normally called just prior to a module's unloading.
- *
- * Arguments:
- * sip pointer to sp_info for storage pool
- *
- * Returns:
- * none
- *
- */
-void
-atm_release_pool(sip)
- struct sp_info *sip;
-{
- struct sp_chunk *scp, *scp_next;
- int s = splnet();
-
- /*
- * Free each chunk in pool
- */
- for (scp = sip->si_poolh; scp; scp = scp_next) {
-
- /*
- * Check for memory leaks
- */
- if (scp->sc_used)
- panic("atm_release_pool: unfreed blocks");
-
- scp_next = scp->sc_next;
- free((caddr_t)scp, M_DEVBUF);
- }
-
- /*
- * Update pool controls
- */
- sip->si_poolh = NULL;
- sip->si_chunks = 0;
- sip->si_total = 0;
- sip->si_free = 0;
-
- /*
- * Unlink pool from active chain
- */
- sip->si_chunksiz = 0;
- UNLINK(sip, struct sp_info, atm_pool_head, si_next);
-
- (void) splx(s);
- return;
-}
-
-/*
* Handle timer tick expiration
*
* Decrement tick count in first block on timer queue. If there
OpenPOWER on IntegriCloud