summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoravg <avg@FreeBSD.org>2017-04-14 14:11:49 +0000
committeravg <avg@FreeBSD.org>2017-04-14 14:11:49 +0000
commitc0a82412b02d0d886ea819e86cdb18382b26212a (patch)
treee945e4955d1690ffe680527a8471b7d54335ebaa
parent6bb419c3a1aa77dc8c7a299112c52571e8187530 (diff)
downloadFreeBSD-src-c0a82412b02d0d886ea819e86cdb18382b26212a.zip
FreeBSD-src-c0a82412b02d0d886ea819e86cdb18382b26212a.tar.gz
MFC r315077: uma: eliminate uk_slabsize field
-rw-r--r--sys/vm/uma_core.c25
-rw-r--r--sys/vm/uma_int.h1
-rw-r--r--tools/tools/umastat/umastat.c1
3 files changed, 12 insertions, 15 deletions
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 35f65e6..6f2cc11 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -1218,15 +1218,16 @@ keg_small_init(uma_keg_t keg)
u_int memused;
u_int wastedspace;
u_int shsize;
+ u_int slabsize;
if (keg->uk_flags & UMA_ZONE_PCPU) {
u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU;
- keg->uk_slabsize = sizeof(struct pcpu);
+ slabsize = sizeof(struct pcpu);
keg->uk_ppera = howmany(ncpus * sizeof(struct pcpu),
PAGE_SIZE);
} else {
- keg->uk_slabsize = UMA_SLAB_SIZE;
+ slabsize = UMA_SLAB_SIZE;
keg->uk_ppera = 1;
}
@@ -1236,8 +1237,8 @@ keg_small_init(uma_keg_t keg)
* allocation bits for we round it up.
*/
rsize = keg->uk_size;
- if (rsize < keg->uk_slabsize / SLAB_SETSIZE)
- rsize = keg->uk_slabsize / SLAB_SETSIZE;
+ if (rsize < slabsize / SLAB_SETSIZE)
+ rsize = slabsize / SLAB_SETSIZE;
if (rsize & keg->uk_align)
rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
keg->uk_rsize = rsize;
@@ -1251,12 +1252,12 @@ keg_small_init(uma_keg_t keg)
else
shsize = sizeof(struct uma_slab);
- keg->uk_ipers = (keg->uk_slabsize - shsize) / rsize;
+ keg->uk_ipers = (slabsize - shsize) / rsize;
KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
memused = keg->uk_ipers * rsize + shsize;
- wastedspace = keg->uk_slabsize - memused;
+ wastedspace = slabsize - memused;
/*
* We can't do OFFPAGE if we're internal or if we've been
@@ -1277,9 +1278,9 @@ keg_small_init(uma_keg_t keg)
* Historically this was not done because the VM could not
* efficiently handle contiguous allocations.
*/
- if ((wastedspace >= keg->uk_slabsize / UMA_MAX_WASTE) &&
- (keg->uk_ipers < (keg->uk_slabsize / keg->uk_rsize))) {
- keg->uk_ipers = keg->uk_slabsize / keg->uk_rsize;
+ if ((wastedspace >= slabsize / UMA_MAX_WASTE) &&
+ (keg->uk_ipers < (slabsize / keg->uk_rsize))) {
+ keg->uk_ipers = slabsize / keg->uk_rsize;
KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_SETSIZE,
("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
#ifdef UMA_DEBUG
@@ -1288,8 +1289,8 @@ keg_small_init(uma_keg_t keg)
"maximum wasted space allowed = %d, "
"calculated ipers = %d, "
"new wasted space = %d\n", keg->uk_name, wastedspace,
- keg->uk_slabsize / UMA_MAX_WASTE, keg->uk_ipers,
- keg->uk_slabsize - keg->uk_ipers * keg->uk_rsize);
+ slabsize / UMA_MAX_WASTE, keg->uk_ipers,
+ slabsize - keg->uk_ipers * keg->uk_rsize);
#endif
keg->uk_flags |= UMA_ZONE_OFFPAGE;
}
@@ -1322,7 +1323,6 @@ keg_large_init(uma_keg_t keg)
("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
- keg->uk_slabsize = keg->uk_ppera * PAGE_SIZE;
keg->uk_ipers = 1;
keg->uk_rsize = keg->uk_size;
@@ -1374,7 +1374,6 @@ keg_cachespread_init(uma_keg_t keg)
pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
keg->uk_rsize = rsize;
keg->uk_ppera = pages;
- keg->uk_slabsize = UMA_SLAB_SIZE;
keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
KASSERT(keg->uk_ipers <= SLAB_SETSIZE,
diff --git a/sys/vm/uma_int.h b/sys/vm/uma_int.h
index 2789e55..8423d1c 100644
--- a/sys/vm/uma_int.h
+++ b/sys/vm/uma_int.h
@@ -210,7 +210,6 @@ struct uma_keg {
vm_offset_t uk_kva; /* Zone base KVA */
uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */
- uint16_t uk_slabsize; /* Slab size for this keg */
uint16_t uk_pgoff; /* Offset to uma_slab struct */
uint16_t uk_ppera; /* pages per allocation from backend */
uint16_t uk_ipers; /* Items per slab */
diff --git a/tools/tools/umastat/umastat.c b/tools/tools/umastat/umastat.c
index 77bc590..8989be4 100644
--- a/tools/tools/umastat/umastat.c
+++ b/tools/tools/umastat/umastat.c
@@ -378,7 +378,6 @@ main(int argc, char *argv[])
printf(" uk_rsize = %d\n", kz.uk_rsize);
printf(" uk_maxpages = %d\n", kz.uk_maxpages);
- printf(" uk_slabsize = %d\n", kz.uk_slabsize);
printf(" uk_pgoff = %d\n", kz.uk_pgoff);
printf(" uk_ppera = %d\n", kz.uk_ppera);
printf(" uk_ipers = %d\n", kz.uk_ipers);
OpenPOWER on IntegriCloud