summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authormdf <mdf@FreeBSD.org>2010-09-13 18:48:23 +0000
committermdf <mdf@FreeBSD.org>2010-09-13 18:48:23 +0000
commit3ed6eac561ccce2958e668867ea38fd005bc635b (patch)
treeb0bd953f5f99c28abddea5f8c7bb39d837607940 /sys/vm
parent9a10f7c4328c6763a33844ed9635ef3c2a7f1e84 (diff)
downloadFreeBSD-src-3ed6eac561ccce2958e668867ea38fd005bc635b.zip
FreeBSD-src-3ed6eac561ccce2958e668867ea38fd005bc635b.tar.gz
Revert r212370, as it causes a LOR on powerpc. powerpc does a few
unexpected things in copyout(9) and so wiring the user buffer is not sufficient to perform a copyout(9) while holding a random mutex. Requested by: nwhitehorn
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/uma_core.c54
-rw-r--r--sys/vm/vm_phys.c27
-rw-r--r--sys/vm/vm_reserv.c9
3 files changed, 73 insertions, 17 deletions
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 23b88ee..558b4c7 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -3175,16 +3175,36 @@ sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
uma_keg_t kz;
uma_zone_t z;
uma_keg_t k;
- int count, error, i;
+ char *buffer;
+ int buflen, count, error, i;
- sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
-
- count = 0;
mtx_lock(&uma_mtx);
+restart:
+ mtx_assert(&uma_mtx, MA_OWNED);
+ count = 0;
LIST_FOREACH(kz, &uma_kegs, uk_link) {
LIST_FOREACH(z, &kz->uk_zones, uz_link)
count++;
}
+ mtx_unlock(&uma_mtx);
+
+ buflen = sizeof(ush) + count * (sizeof(uth) + sizeof(ups) *
+ (mp_maxid + 1)) + 1;
+ buffer = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
+
+ mtx_lock(&uma_mtx);
+ i = 0;
+ LIST_FOREACH(kz, &uma_kegs, uk_link) {
+ LIST_FOREACH(z, &kz->uk_zones, uz_link)
+ i++;
+ }
+ if (i > count) {
+ free(buffer, M_TEMP);
+ goto restart;
+ }
+ count = i;
+
+ sbuf_new(&sbuf, buffer, buflen, SBUF_FIXEDLEN);
/*
* Insert stream header.
@@ -3193,7 +3213,11 @@ sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
ush.ush_version = UMA_STREAM_VERSION;
ush.ush_maxcpus = (mp_maxid + 1);
ush.ush_count = count;
- (void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
+ if (sbuf_bcat(&sbuf, &ush, sizeof(ush)) < 0) {
+ mtx_unlock(&uma_mtx);
+ error = ENOMEM;
+ goto out;
+ }
LIST_FOREACH(kz, &uma_kegs, uk_link) {
LIST_FOREACH(z, &kz->uk_zones, uz_link) {
@@ -3226,7 +3250,12 @@ sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
uth.uth_frees = z->uz_frees;
uth.uth_fails = z->uz_fails;
uth.uth_sleeps = z->uz_sleeps;
- (void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
+ if (sbuf_bcat(&sbuf, &uth, sizeof(uth)) < 0) {
+ ZONE_UNLOCK(z);
+ mtx_unlock(&uma_mtx);
+ error = ENOMEM;
+ goto out;
+ }
/*
* While it is not normally safe to access the cache
* bucket pointers while not on the CPU that owns the
@@ -3251,14 +3280,21 @@ sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
ups.ups_allocs = cache->uc_allocs;
ups.ups_frees = cache->uc_frees;
skip:
- (void)sbuf_bcat(&sbuf, &ups, sizeof(ups));
+ if (sbuf_bcat(&sbuf, &ups, sizeof(ups)) < 0) {
+ ZONE_UNLOCK(z);
+ mtx_unlock(&uma_mtx);
+ error = ENOMEM;
+ goto out;
+ }
}
ZONE_UNLOCK(z);
}
}
mtx_unlock(&uma_mtx);
- error = sbuf_finish(&sbuf);
- sbuf_delete(&sbuf);
+ sbuf_finish(&sbuf);
+ error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
+out:
+ free(buffer, M_TEMP);
return (error);
}
diff --git a/sys/vm/vm_phys.c b/sys/vm/vm_phys.c
index 26e4981..e75c340 100644
--- a/sys/vm/vm_phys.c
+++ b/sys/vm/vm_phys.c
@@ -123,9 +123,12 @@ sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
{
struct sbuf sbuf;
struct vm_freelist *fl;
+ char *cbuf;
+ const int cbufsize = vm_nfreelists*(VM_NFREEORDER + 1)*81;
int error, flind, oind, pind;
- sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
+ cbuf = malloc(cbufsize, M_TEMP, M_WAITOK | M_ZERO);
+ sbuf_new(&sbuf, cbuf, cbufsize, SBUF_FIXEDLEN);
for (flind = 0; flind < vm_nfreelists; flind++) {
sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
"\n ORDER (SIZE) | NUMBER"
@@ -146,8 +149,10 @@ sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
sbuf_printf(&sbuf, "\n");
}
}
- error = sbuf_finish(&sbuf);
+ sbuf_finish(&sbuf);
+ error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
sbuf_delete(&sbuf);
+ free(cbuf, M_TEMP);
return (error);
}
@@ -159,9 +164,12 @@ sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
{
struct sbuf sbuf;
struct vm_phys_seg *seg;
+ char *cbuf;
+ const int cbufsize = VM_PHYSSEG_MAX*(VM_NFREEORDER + 1)*81;
int error, segind;
- sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
+ cbuf = malloc(cbufsize, M_TEMP, M_WAITOK | M_ZERO);
+ sbuf_new(&sbuf, cbuf, cbufsize, SBUF_FIXEDLEN);
for (segind = 0; segind < vm_phys_nsegs; segind++) {
sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
seg = &vm_phys_segs[segind];
@@ -172,8 +180,10 @@ sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
sbuf_printf(&sbuf, "domain: %d\n", seg->domain);
sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
}
- error = sbuf_finish(&sbuf);
+ sbuf_finish(&sbuf);
+ error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
sbuf_delete(&sbuf);
+ free(cbuf, M_TEMP);
return (error);
}
@@ -185,18 +195,23 @@ static int
sysctl_vm_phys_lookup_lists(SYSCTL_HANDLER_ARGS)
{
struct sbuf sbuf;
+ char *cbuf;
+ const int cbufsize = (vm_nfreelists + 1) * VM_NDOMAIN * 81;
int domain, error, flind, ndomains;
ndomains = vm_nfreelists - VM_NFREELIST + 1;
- sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
+ cbuf = malloc(cbufsize, M_TEMP, M_WAITOK | M_ZERO);
+ sbuf_new(&sbuf, cbuf, cbufsize, SBUF_FIXEDLEN);
for (domain = 0; domain < ndomains; domain++) {
sbuf_printf(&sbuf, "\nDOMAIN %d:\n\n", domain);
for (flind = 0; flind < vm_nfreelists; flind++)
sbuf_printf(&sbuf, " [%d]:\t%p\n", flind,
vm_phys_lookup_lists[domain][flind]);
}
- error = sbuf_finish(&sbuf);
+ sbuf_finish(&sbuf);
+ error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
sbuf_delete(&sbuf);
+ free(cbuf, M_TEMP);
return (error);
}
#endif
diff --git a/sys/vm/vm_reserv.c b/sys/vm/vm_reserv.c
index aa8e80f..d9e908f 100644
--- a/sys/vm/vm_reserv.c
+++ b/sys/vm/vm_reserv.c
@@ -180,9 +180,12 @@ sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
{
struct sbuf sbuf;
vm_reserv_t rv;
+ char *cbuf;
+ const int cbufsize = (VM_NRESERVLEVEL + 1) * 81;
int counter, error, level, unused_pages;
- sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
+ cbuf = malloc(cbufsize, M_TEMP, M_WAITOK | M_ZERO);
+ sbuf_new(&sbuf, cbuf, cbufsize, SBUF_FIXEDLEN);
sbuf_printf(&sbuf, "\nLEVEL SIZE NUMBER\n\n");
for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) {
counter = 0;
@@ -196,8 +199,10 @@ sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
sbuf_printf(&sbuf, "%5.5d: %6.6dK, %6.6d\n", level,
unused_pages * (PAGE_SIZE / 1024), counter);
}
- error = sbuf_finish(&sbuf);
+ sbuf_finish(&sbuf);
+ error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf));
sbuf_delete(&sbuf);
+ free(cbuf, M_TEMP);
return (error);
}
OpenPOWER on IntegriCloud