diff options
author | attilio <attilio@FreeBSD.org> | 2009-09-18 13:48:38 +0000 |
---|---|---|
committer | attilio <attilio@FreeBSD.org> | 2009-09-18 13:48:38 +0000 |
commit | 6aa8dd54a5c22d81c6c1d7297b49d77869da4bdc (patch) | |
tree | ca59ff119063dbd453c065ee7426e8a502174250 /sys/kern/subr_devstat.c | |
parent | c5e6411ab0edaeeea43e807adb2b606cd10c38c8 (diff) | |
download | FreeBSD-src-6aa8dd54a5c22d81c6c1d7297b49d77869da4bdc.zip FreeBSD-src-6aa8dd54a5c22d81c6c1d7297b49d77869da4bdc.tar.gz |
Don't allocate new unnecessary pages when devstat_alloc() looses the
run for re-acuiring the lock, but recheck if new pages are allocatable
from the pool and free the previously allocated ones.
Tested by: pho, Giovanni Trematerra
<giovanni dot trematerra at gmail dot com>
Diffstat (limited to 'sys/kern/subr_devstat.c')
-rw-r--r-- | sys/kern/subr_devstat.c | 43 |
1 files changed, 27 insertions, 16 deletions
diff --git a/sys/kern/subr_devstat.c b/sys/kern/subr_devstat.c index bbfed44..e90df59 100644 --- a/sys/kern/subr_devstat.c +++ b/sys/kern/subr_devstat.c @@ -469,7 +469,7 @@ static struct devstat * devstat_alloc(void) { struct devstat *dsp; - struct statspage *spp; + struct statspage *spp, *spp2; u_int u; static int once; @@ -479,6 +479,7 @@ devstat_alloc(void) UID_ROOT, GID_WHEEL, 0400, DEVSTAT_DEVICE_NAME); once = 1; } + spp2 = NULL; mtx_lock(&devstat_mutex); for (;;) { TAILQ_FOREACH(spp, &pagelist, list) { @@ -487,24 +488,30 @@ devstat_alloc(void) } if (spp != NULL) break; - /* - * We had no free slot in any of our pages, drop the mutex - * and get another page. In theory we could have more than - * one process doing this at the same time and consequently - * we may allocate more pages than we will need. That is - * Just Too Bad[tm], we can live with that. - */ mtx_unlock(&devstat_mutex); - spp = malloc(sizeof *spp, M_DEVSTAT, M_ZERO | M_WAITOK); - spp->stat = malloc(PAGE_SIZE, M_DEVSTAT, M_ZERO | M_WAITOK); - spp->nfree = statsperpage; - mtx_lock(&devstat_mutex); + spp2 = malloc(sizeof *spp, M_DEVSTAT, M_ZERO | M_WAITOK); + spp2->stat = malloc(PAGE_SIZE, M_DEVSTAT, M_ZERO | M_WAITOK); + spp2->nfree = statsperpage; + /* - * It would make more sense to add the new page at the head - * but the order on the list determine the sequence of the - * mapping so we can't do that. + * If free statspages were added while the lock was released + * just reuse them. */ - TAILQ_INSERT_TAIL(&pagelist, spp, list); + mtx_lock(&devstat_mutex); + TAILQ_FOREACH(spp, &pagelist, list) + if (spp->nfree > 0) + break; + if (spp == NULL) { + spp = spp2; + + /* + * It would make more sense to add the new page at the + * head but the order on the list determine the + * sequence of the mapping so we can't do that. + */ + TAILQ_INSERT_TAIL(&pagelist, spp, list); + } else + break; } dsp = spp->stat; for (u = 0; u < statsperpage; u++) { @@ -515,6 +522,10 @@ devstat_alloc(void) spp->nfree--; dsp->allocated = 1; mtx_unlock(&devstat_mutex); + if (spp2 != NULL && spp2 != spp) { + free(spp2->stat, M_DEVSTAT); + free(spp2, M_DEVSTAT); + } return (dsp); } |