summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormarkj <markj@FreeBSD.org>2017-09-27 14:19:47 +0000
committermarkj <markj@FreeBSD.org>2017-09-27 14:19:47 +0000
commit81448270d4454329f3302889a4d99f3bbca26f4e (patch)
treef45427fccf0e2472e340c9c1fde73c2e275432d3
parent852f7231a6ada52f1978c907073ac6116dd02ff8 (diff)
downloadFreeBSD-src-81448270d4454329f3302889a4d99f3bbca26f4e.zip
FreeBSD-src-81448270d4454329f3302889a4d99f3bbca26f4e.tar.gz
MFC r323544:
Fix a logic error in the item size calculation for internal UMA zones.
-rw-r--r--sys/vm/uma_core.c17
-rw-r--r--sys/vm/vm_page.c3
2 files changed, 13 insertions, 7 deletions
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index 26439dc..8504a72 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -1326,10 +1326,6 @@ keg_large_init(uma_keg_t keg)
keg->uk_ipers = 1;
keg->uk_rsize = keg->uk_size;
- /* We can't do OFFPAGE if we're internal, bail out here. */
- if (keg->uk_flags & UMA_ZFLAG_INTERNAL)
- return;
-
/* Check whether we have enough space to not do OFFPAGE. */
if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) {
shsize = sizeof(struct uma_slab);
@@ -1337,8 +1333,17 @@ keg_large_init(uma_keg_t keg)
shsize = (shsize & ~UMA_ALIGN_PTR) +
(UMA_ALIGN_PTR + 1);
- if ((PAGE_SIZE * keg->uk_ppera) - keg->uk_rsize < shsize)
- keg->uk_flags |= UMA_ZONE_OFFPAGE;
+ if (PAGE_SIZE * keg->uk_ppera - keg->uk_rsize < shsize) {
+ /*
+ * We can't do OFFPAGE if we're internal, in which case
+ * we need an extra page per allocation to contain the
+ * slab header.
+ */
+ if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0)
+ keg->uk_flags |= UMA_ZONE_OFFPAGE;
+ else
+ keg->uk_ppera++;
+ }
}
if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 8ce3673..c2af86f 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -464,7 +464,8 @@ vm_page_startup(vm_offset_t vaddr)
* in proportion to the zone structure size.
*/
pages_per_zone = howmany(sizeof(struct uma_zone) +
- sizeof(struct uma_cache) * (mp_maxid + 1), UMA_SLAB_SIZE);
+ sizeof(struct uma_cache) * (mp_maxid + 1) +
+ roundup2(sizeof(struct uma_slab), sizeof(void *)), UMA_SLAB_SIZE);
if (pages_per_zone > 1) {
/* Reserve more pages so that we don't run out. */
boot_pages = UMA_BOOT_PAGES_ZONES * pages_per_zone;
OpenPOWER on IntegriCloud