summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorbmilekic <bmilekic@FreeBSD.org>2004-07-29 15:25:40 +0000
committerbmilekic <bmilekic@FreeBSD.org>2004-07-29 15:25:40 +0000
commitc998cc2027b9a45d7c7afade7d33795b4e4fa5d3 (patch)
tree941ec00c8de13276da0999465053ba8d29e5c938
parent20b7bec3b8c2d55c222bf46d13f6ef4c61bc249d (diff)
downloadFreeBSD-src-c998cc2027b9a45d7c7afade7d33795b4e4fa5d3.zip
FreeBSD-src-c998cc2027b9a45d7c7afade7d33795b4e4fa5d3.tar.gz
Rework the way slab header storage space is calculated in UMA.
- zone_large_init() stays pretty much the same. - zone_small_init() will try to stash the slab header in the slab page being allocated if the amount of calculated wasted space is less than UMA_MAX_WASTE (for both the UMA_ZONE_REFCNT case and regular case). If the amount of wasted space is >= UMA_MAX_WASTE, then UMA_ZONE_OFFPAGE will be set and the slab header will be allocated separately for better use of space. - uma_startup() calculates the maximum ipers required in offpage slabs (so that the offpage slab header zone(s) can be sized accordingly). The algorithm used to calculate this replaces the old calculation (which only happened to work coincidentally). We now iterate over possible object sizes, starting from the smallest one, until we determine that wastedspace calculated in zone_small_init() might end up being greater than UMA_MAX_WASTE, at which point we use the found object size to compute the maximum possible ipers. The reason this works is because: - wastedspace versus objectsize is a see-saw function with local minima all equal to zero and local maxima growing directly proportioned to objectsize. This implies that for objects up to or equal a certain objectsize, the see-saw remains entirely below UMA_MAX_WASTE, so for those objectsizes it is impossible to ever go OFFPAGE for slab headers. - ipers (items-per-slab) versus objectsize is an inversely proportional function which falls off very quickly (very large for small objectsizes). - To determine the maximum ipers we'll ever need from OFFPAGE slab headers we first find the largest objectsize for which we are guaranteed to not go offpage for and use it to compute ipers (as though we were offpage). Since the only objectsizes allowed to go offpage are bigger than the found objectsize, and since ipers vs objectsize is inversely proportional (and monotonically decreasing), then we are guaranteed that the ipers computed is always >= what we will ever need in offpage slab headers. - Define UMA_FRITM_SZ and UMA_FRITMREF_SZ to be the actual (possibly padded) size of each freelist index so that offset calculations are fixed. This might fix weird data corruption problems and certainly allows ARM to now boot to at least single-user (via simulator). Tested on i386 UP by me. Tested on sparc64 SMP by fenner. Tested on ARM simulator to single-user by cognet.
-rw-r--r--sys/vm/uma_core.c218
-rw-r--r--sys/vm/uma_int.h10
2 files changed, 176 insertions, 52 deletions
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 25d00b4..d6b9ba5 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -129,6 +129,10 @@ static int uma_boot_free = 0;
/* Is the VM done starting up? */
static int booted = 0;
+/* Maximum number of allowed items-per-slab if the slab header is OFFPAGE */
+static u_int uma_max_ipers;
+static u_int uma_max_ipers_ref;
+
/*
* This is the handle used to schedule events that need to happen
* outside of the allocation fast path.
@@ -1011,9 +1015,10 @@ static void
zone_small_init(uma_zone_t zone)
{
uma_keg_t keg;
- int rsize;
- int memused;
- int ipers;
+ u_int rsize;
+ u_int memused;
+ u_int wastedspace;
+ u_int shsize;
keg = zone->uz_keg;
KASSERT(keg != NULL, ("Keg is null in zone_small_init"));
@@ -1021,40 +1026,53 @@ zone_small_init(uma_zone_t zone)
if (rsize < UMA_SMALLEST_UNIT)
rsize = UMA_SMALLEST_UNIT;
-
if (rsize & keg->uk_align)
rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
keg->uk_rsize = rsize;
-
- rsize += 1; /* Account for the byte of linkage */
- keg->uk_ipers = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) / rsize;
keg->uk_ppera = 1;
- KASSERT(keg->uk_ipers != 0, ("zone_small_init: ipers is 0, uh-oh!"));
- memused = keg->uk_ipers * keg->uk_rsize;
+ if (keg->uk_flags & UMA_ZONE_REFCNT) {
+ rsize += UMA_FRITMREF_SZ; /* linkage & refcnt */
+ shsize = sizeof(struct uma_slab_refcnt);
+ } else {
+ rsize += UMA_FRITM_SZ; /* Account for linkage */
+ shsize = sizeof(struct uma_slab);
+ }
+
+ keg->uk_ipers = (UMA_SLAB_SIZE - shsize) / rsize;
+ KASSERT(keg->uk_ipers != 0, ("zone_small_init: ipers is 0"));
+ memused = keg->uk_ipers * rsize + shsize;
+ wastedspace = UMA_SLAB_SIZE - memused;
- /* Can we do any better? */
- if ((keg->uk_flags & UMA_ZONE_REFCNT) ||
- ((UMA_SLAB_SIZE - memused) >= UMA_MAX_WASTE)) {
- /*
- * We can't do this if we're internal or if we've been
- * asked to not go to the VM for buckets. If we do this we
- * may end up going to the VM (kmem_map) for slabs which we
- * do not want to do if we're UMA_ZFLAG_CACHEONLY as a
- * result of UMA_ZONE_VM, which clearly forbids it.
- */
- if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
- (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
- return;
- ipers = UMA_SLAB_SIZE / keg->uk_rsize;
- if ((keg->uk_flags & UMA_ZONE_REFCNT) ||
- (ipers > keg->uk_ipers)) {
- keg->uk_flags |= UMA_ZONE_OFFPAGE;
- if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
- keg->uk_flags |= UMA_ZONE_HASH;
- keg->uk_ipers = ipers;
- }
+ /*
+ * We can't do OFFPAGE if we're internal or if we've been
+ * asked to not go to the VM for buckets. If we do this we
+ * may end up going to the VM (kmem_map) for slabs which we
+ * do not want to do if we're UMA_ZFLAG_CACHEONLY as a
+ * result of UMA_ZONE_VM, which clearly forbids it.
+ */
+ if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
+ (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
+ return;
+
+ if ((wastedspace >= UMA_MAX_WASTE) &&
+ (keg->uk_ipers < (UMA_SLAB_SIZE / keg->uk_rsize))) {
+ keg->uk_ipers = UMA_SLAB_SIZE / keg->uk_rsize;
+ KASSERT(keg->uk_ipers <= 255,
+ ("zone_small_init: keg->uk_ipers too high!"));
+#ifdef UMA_DEBUG
+ printf("UMA decided we need offpage slab headers for "
+ "zone: %s, calculated wastedspace = %d, "
+ "maximum wasted space allowed = %d, "
+ "calculated ipers = %d, "
+ "new wasted space = %d\n", zone->uz_name, wastedspace,
+ UMA_MAX_WASTE, keg->uk_ipers,
+ UMA_SLAB_SIZE - keg->uk_ipers * keg->uk_rsize);
+#endif
+ keg->uk_flags |= UMA_ZONE_OFFPAGE;
+ if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
+ keg->uk_flags |= UMA_ZONE_HASH;
}
}
@@ -1137,20 +1155,31 @@ keg_ctor(void *mem, int size, void *udata)
keg->uk_init = zero_init;
/*
- * The +1 byte added to uk_size is to account for the byte of
+ * The +UMA_FRITM_SZ added to uk_size is to account for the
* linkage that is added to the size in zone_small_init(). If
* we don't account for this here then we may end up in
* zone_small_init() with a calculated 'ipers' of 0.
*/
- if ((keg->uk_size+1) > (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
- zone_large_init(zone);
- else
- zone_small_init(zone);
+ if (keg->uk_flags & UMA_ZONE_REFCNT) {
+ if ((keg->uk_size+UMA_FRITMREF_SZ) >
+ (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)))
+ zone_large_init(zone);
+ else
+ zone_small_init(zone);
+ } else {
+ if ((keg->uk_size+UMA_FRITM_SZ) >
+ (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
+ zone_large_init(zone);
+ else
+ zone_small_init(zone);
+ }
- if (keg->uk_flags & UMA_ZONE_REFCNT)
- keg->uk_slabzone = slabrefzone;
- else if (keg->uk_flags & UMA_ZONE_OFFPAGE)
- keg->uk_slabzone = slabzone;
+ if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
+ if (keg->uk_flags & UMA_ZONE_REFCNT)
+ keg->uk_slabzone = slabrefzone;
+ else
+ keg->uk_slabzone = slabzone;
+ }
/*
* If we haven't booted yet we need allocations to go through the
@@ -1181,17 +1210,35 @@ keg_ctor(void *mem, int size, void *udata)
* justified offset into the memory on an ALIGN_PTR boundary.
*/
if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
- int totsize;
+ u_int totsize;
/* Size of the slab struct and free list */
- totsize = sizeof(struct uma_slab) + keg->uk_ipers;
+ if (keg->uk_flags & UMA_ZONE_REFCNT)
+ totsize = sizeof(struct uma_slab_refcnt) +
+ keg->uk_ipers * UMA_FRITMREF_SZ;
+ else
+ totsize = sizeof(struct uma_slab) +
+ keg->uk_ipers * UMA_FRITM_SZ;
+
if (totsize & UMA_ALIGN_PTR)
totsize = (totsize & ~UMA_ALIGN_PTR) +
(UMA_ALIGN_PTR + 1);
keg->uk_pgoff = UMA_SLAB_SIZE - totsize;
- totsize = keg->uk_pgoff + sizeof(struct uma_slab)
- + keg->uk_ipers;
- /* I don't think it's possible, but I'll make sure anyway */
+
+ if (keg->uk_flags & UMA_ZONE_REFCNT)
+ totsize = keg->uk_pgoff + sizeof(struct uma_slab_refcnt)
+ + keg->uk_ipers * UMA_FRITMREF_SZ;
+ else
+ totsize = keg->uk_pgoff + sizeof(struct uma_slab)
+ + keg->uk_ipers * UMA_FRITM_SZ;
+
+ /*
+ * The only way the following is possible is if with our
+ * UMA_ALIGN_PTR adjustments we are now bigger than
+ * UMA_SLAB_SIZE. I haven't checked whether this is
+ * mathematically possible for all cases, so we make
+ * sure here anyway.
+ */
if (totsize > UMA_SLAB_SIZE) {
printf("zone %s ipers %d rsize %d size %d\n",
zone->uz_name, keg->uk_ipers, keg->uk_rsize,
@@ -1395,7 +1442,8 @@ uma_startup(void *bootmem)
{
struct uma_zctor_args args;
uma_slab_t slab;
- int slabsize;
+ u_int slabsize;
+ u_int objsize, totsize, wsize;
int i;
#ifdef UMA_DEBUG
@@ -1413,6 +1461,77 @@ uma_startup(void *bootmem)
*/
mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF | MTX_RECURSE);
+ /*
+ * Figure out the maximum number of items-per-slab we'll have if
+ * we're using the OFFPAGE slab header to track free items, given
+ * all possible object sizes and the maximum desired wastage
+ * (UMA_MAX_WASTE).
+ *
+ * We iterate until we find an object size for
+ * which the calculated wastage in zone_small_init() will be
+ * enough to warrant OFFPAGE. Since wastedspace versus objsize
+ * is an overall increasing see-saw function, we find the smallest
+ * objsize such that the wastage is always acceptable for objects
+ * with that objsize or smaller. Since a smaller objsize always
+ * generates a larger possible uma_max_ipers, we use this computed
+ * objsize to calculate the largest ipers possible. Since the
+ * ipers calculated for OFFPAGE slab headers is always larger than
+ * the ipers initially calculated in zone_small_init(), we use
+ * the former's equation (UMA_SLAB_SIZE / keg->uk_rsize) to
+ * obtain the maximum ipers possible for offpage slab headers.
+ *
+ * It should be noted that ipers versus objsize is an inversly
+ * proportional function which drops off rather quickly so as
+ * long as our UMA_MAX_WASTE is such that the objsize we calculate
+ * falls into the portion of the inverse relation AFTER the steep
+ * falloff, then uma_max_ipers shouldn't be too high (~10 on i386).
+ *
+ * Note that we have 8-bits (1 byte) to use as a freelist index
+ * inside the actual slab header itself and this is enough to
+ * accomodate us. In the worst case, a UMA_SMALLEST_UNIT sized
+ * object with offpage slab header would have ipers =
+ * UMA_SLAB_SIZE / UMA_SMALLEST_UNIT (currently = 256), which is
+ * 1 greater than what our byte-integer freelist index can
+ * accomodate, but we know that this situation never occurs as
+ * for UMA_SMALLEST_UNIT-sized objects, we will never calculate
+ * that we need to go to offpage slab headers. Or, if we do,
+ * then we trap that condition below and panic in the INVARIANTS case.
+ */
+ wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab) - UMA_MAX_WASTE;
+ totsize = wsize;
+ objsize = UMA_SMALLEST_UNIT;
+ while (totsize >= wsize) {
+ totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) /
+ (objsize + UMA_FRITM_SZ);
+ totsize *= (UMA_FRITM_SZ + objsize);
+ objsize++;
+ }
+ if (objsize > UMA_SMALLEST_UNIT)
+ objsize--;
+ uma_max_ipers = UMA_SLAB_SIZE / objsize;
+
+ wsize = UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt) - UMA_MAX_WASTE;
+ totsize = wsize;
+ objsize = UMA_SMALLEST_UNIT;
+ while (totsize >= wsize) {
+ totsize = (UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt)) /
+ (objsize + UMA_FRITMREF_SZ);
+ totsize *= (UMA_FRITMREF_SZ + objsize);
+ objsize++;
+ }
+ if (objsize > UMA_SMALLEST_UNIT)
+ objsize--;
+ uma_max_ipers_ref = UMA_SLAB_SIZE / objsize;
+
+ KASSERT((uma_max_ipers_ref <= 255) && (uma_max_ipers <= 255),
+ ("uma_startup: calculated uma_max_ipers values too large!"));
+
+#ifdef UMA_DEBUG
+ printf("Calculated uma_max_ipers (for OFFPAGE) is %d\n", uma_max_ipers);
+ printf("Calculated uma_max_ipers_slab (for OFFPAGE) is %d\n",
+ uma_max_ipers_ref);
+#endif
+
/* "manually" create the initial zone */
args.name = "UMA Kegs";
args.size = sizeof(struct uma_keg);
@@ -1468,9 +1587,7 @@ uma_startup(void *bootmem)
* This is the max number of free list items we'll have with
* offpage slabs.
*/
- slabsize = UMA_SLAB_SIZE - sizeof(struct uma_slab);
- slabsize /= UMA_MAX_WASTE;
- slabsize++; /* In case there it's rounded */
+ slabsize = uma_max_ipers * UMA_FRITM_SZ;
slabsize += sizeof(struct uma_slab);
/* Now make a zone for slab headers */
@@ -1483,10 +1600,7 @@ uma_startup(void *bootmem)
* We also create a zone for the bigger slabs with reference
* counts in them, to accomodate UMA_ZONE_REFCNT zones.
*/
- slabsize = UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt);
- slabsize /= UMA_MAX_WASTE;
- slabsize++;
- slabsize += 4 * slabsize;
+ slabsize = uma_max_ipers_ref * UMA_FRITMREF_SZ;
slabsize += sizeof(struct uma_slab_refcnt);
slabrefzone = uma_zcreate("UMA RCntSlabs",
slabsize,
diff --git a/sys/vm/uma_int.h b/sys/vm/uma_int.h
index a4cbe5f..1f62d27 100644
--- a/sys/vm/uma_int.h
+++ b/sys/vm/uma_int.h
@@ -271,6 +271,16 @@ typedef struct uma_slab * uma_slab_t;
typedef struct uma_slab_refcnt * uma_slabrefcnt_t;
/*
+ * These give us the size of one free item reference within our corresponding
+ * uma_slab structures, so that our calculations during zone setup are correct
+ * regardless of what the compiler decides to do with padding the structure
+ * arrays within uma_slab.
+ */
+#define UMA_FRITM_SZ (sizeof(struct uma_slab) - sizeof(struct uma_slab_head))
+#define UMA_FRITMREF_SZ (sizeof(struct uma_slab_refcnt) - \
+ sizeof(struct uma_slab_head))
+
+/*
* Zone management structure
*
* TODO: Optimize for cache line size
OpenPOWER on IntegriCloud