summaryrefslogtreecommitdiffstats
path: root/sys/vm
diff options
context:
space:
mode:
authorbmilekic <bmilekic@FreeBSD.org>2004-05-31 21:46:06 +0000
committerbmilekic <bmilekic@FreeBSD.org>2004-05-31 21:46:06 +0000
commitf7574a2276b935509aba6b131a39c685a68e61d2 (patch)
treedacbb577a5d3ed365d11df0435010eee4c5380da /sys/vm
parentd5d90e314729317ee9cce434f3c548b3f4aaaf04 (diff)
downloadFreeBSD-src-f7574a2276b935509aba6b131a39c685a68e61d2.zip
FreeBSD-src-f7574a2276b935509aba6b131a39c685a68e61d2.tar.gz
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of extensions to the UMA framework, all included herein. Extensions to UMA worth noting: - Better layering between slab <-> zone caches; introduce Keg structure which splits off slab cache away from the zone structure and allows multiple zones to be stacked on top of a single Keg (single type of slab cache); perhaps we should look into defining a subset API on top of the Keg for special use by malloc(9), for example. - UMA_ZONE_REFCNT zones can now be added, and reference counters automagically allocated for them within the end of the associated slab structures. uma_find_refcnt() does a kextract to fetch the slab struct reference from the underlying page, and lookup the corresponding refcnt. mbuma things worth noting: - integrates mbuf & cluster allocations with extended UMA and provides caches for commonly-allocated items; defines several zones (two primary, one secondary) and two kegs. - change up certain code paths that always used to do: m_get() + m_clget() to instead just use m_getcl() and try to take advantage of the newly defined secondary Packet zone. - netstat(1) and systat(1) quickly hacked up to do basic stat reporting but additional stats work needs to be done once some other details within UMA have been taken care of and it becomes clearer to how stats will work within the modified framework. From the user perspective, one implication is that the NMBCLUSTERS compile-time option is no longer used. The maximum number of clusters is still capped off according to maxusers, but it can be made unlimited by setting the kern.ipc.nmbclusters boot-time tunable to zero. Work should be done to write an appropriate sysctl handler allowing dynamic tuning of kern.ipc.nmbclusters at runtime. Additional things worth noting/known issues (READ): - One report of 'ips' (ServeRAID) driver acting really slow in conjunction with mbuma. Need more data. Latest report is that ips is equally sucking with and without mbuma. - Giant leak in NFS code sometimes occurs, can't reproduce but currently analyzing; brueffer is able to reproduce but THIS IS NOT an mbuma-specific problem and currently occurs even WITHOUT mbuma. - Issues in network locking: there is at least one code path in the rip code where one or more locks are acquired and we end up in m_prepend() with M_WAITOK, which causes WITNESS to whine from within UMA. Current temporary solution: force all UMA allocations to be M_NOWAIT from within UMA for now to avoid deadlocks unless WITNESS is defined and we can determine with certainty that we're not holding any locks when we're M_WAITOK. - I've seen at least one weird socketbuffer empty-but- mbuf-still-attached panic. I don't believe this to be related to mbuma but please keep your eyes open, turn on debugging, and capture crash dumps. This change removes more code than it adds. A paper is available detailing the change and considering various performance issues, it was presented at BSDCan2004: http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf Please read the paper for Future Work and implementation details, as well as credits. Testing and Debugging: rwatson, brueffer, Ketrien I. Saihr-Kesenchedra, ... Reviewed by: Lots of people (for different parts)
Diffstat (limited to 'sys/vm')
-rw-r--r--sys/vm/uma.h78
-rw-r--r--sys/vm/uma_core.c890
-rw-r--r--sys/vm/uma_dbg.c34
-rw-r--r--sys/vm/uma_int.h175
-rw-r--r--sys/vm/vm_kern.c10
5 files changed, 832 insertions, 355 deletions
diff --git a/sys/vm/uma.h b/sys/vm/uma.h
index 4de1efa..0d34ca3 100644
--- a/sys/vm/uma.h
+++ b/sys/vm/uma.h
@@ -43,7 +43,7 @@
/* Types and type defs */
-struct uma_zone;
+struct uma_zone;
/* Opaque type used as a handle to the zone */
typedef struct uma_zone * uma_zone_t;
@@ -157,12 +157,46 @@ typedef void (*uma_fini)(void *mem, int size);
* A pointer to a structure which is intended to be opaque to users of
* the interface. The value may be null if the wait flag is not set.
*/
-
uma_zone_t uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
uma_init uminit, uma_fini fini, int align,
u_int16_t flags);
/*
+ * Create a secondary uma zone
+ *
+ * Arguments:
+ * name The text name of the zone for debugging and stats, this memory
+ * should not be freed until the zone has been deallocated.
+ * ctor The constructor that is called when the object is allocated
+ * dtor The destructor that is called when the object is freed.
+ * zinit An initializer that sets up the initial state of the memory
+ * as the object passes from the Keg's slab to the Zone's cache.
+ * zfini A discard function that undoes initialization done by init
+ * as the object passes from the Zone's cache to the Keg's slab.
+ *
+ * ctor/dtor/zinit/zfini may all be null, see notes above.
+ * Note that the zinit and zfini specified here are NOT
+ * exactly the same as the init/fini specified to uma_zcreate()
+ * when creating a master zone. These zinit/zfini are called
+ * on the TRANSITION from keg to zone (and vice-versa). Once
+ * these are set, the primary zone may alter its init/fini
+ * (which are called when the object passes from VM to keg)
+ * using uma_zone_set_init/fini()) as well as its own
+ * zinit/zfini (unset by default for master zone) with
+ * uma_zone_set_zinit/zfini() (note subtle 'z' prefix).
+ *
+ * align A bitmask that corisponds to the requested alignment
+ * eg 4 would be 0x3
+ * flags A set of parameters that control the behavior of the zone
+ *
+ * Returns:
+ * A pointer to a structure which is intended to be opaque to users of
+ * the interface. The value may be null if the wait flag is not set.
+ */
+uma_zone_t uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
+ uma_init zinit, uma_fini zfini, uma_zone_t master);
+
+/*
* Definitions for uma_zcreate flags
*
* These flags share space with UMA_ZFLAGs in uma_int.h. Be careful not to
@@ -185,6 +219,9 @@ uma_zone_t uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
* Use a hash table instead of caching
* information in the vm_page.
*/
+#define UMA_ZONE_SECONDARY 0x0200 /* Zone is a Secondary Zone */
+#define UMA_ZONE_REFCNT 0x0400 /* Allocate refcnts in slabs */
+#define UMA_ZONE_MAXBUCKET 0x0800 /* Use largest buckets */
/* Definitions for align */
#define UMA_ALIGN_PTR (sizeof(void *) - 1) /* Alignment fit for ptr */
@@ -201,7 +238,6 @@ uma_zone_t uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
* zone The zone we want to destroy.
*
*/
-
void uma_zdestroy(uma_zone_t zone);
/*
@@ -376,6 +412,28 @@ int uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int size);
void uma_zone_set_max(uma_zone_t zone, int nitems);
/*
+ * The following two routines (uma_zone_set_init/fini)
+ * are used to set the backend init/fini pair which acts on an
+ * object as it becomes allocated and is placed in a slab within
+ * the specified zone's backing keg. These should probably not
+ * be changed once allocations have already begun and only
+ * immediately upon zone creation.
+ */
+void uma_zone_set_init(uma_zone_t zone, uma_init uminit);
+void uma_zone_set_fini(uma_zone_t zone, uma_fini fini);
+
+/*
+ * The following two routines (uma_zone_set_zinit/zfini) are
+ * used to set the zinit/zfini pair which acts on an object as
+ * it passes from the backing Keg's slab cache to the
+ * specified Zone's bucket cache. These should probably not
+ * be changed once allocations have already begun and
+ * only immediately upon zone creation.
+ */
+void uma_zone_set_zinit(uma_zone_t zone, uma_init zinit);
+void uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini);
+
+/*
* Replaces the standard page_alloc or obj_alloc functions for this zone
*
* Arguments:
@@ -430,5 +488,19 @@ void uma_zone_set_freef(uma_zone_t zone, uma_free freef);
*/
void uma_prealloc(uma_zone_t zone, int itemcnt);
+/*
+ * Used to lookup the reference counter allocated for an item
+ * from a UMA_ZONE_REFCNT zone. For UMA_ZONE_REFCNT zones,
+ * reference counters are allocated for items and stored in
+ * the underlying slab header.
+ *
+ * Arguments:
+ * zone The UMA_ZONE_REFCNT zone to which the item belongs.
+ * item The address of the item for which we want a refcnt.
+ *
+ * Returns:
+ * A pointer to a u_int32_t reference counter.
+ */
+u_int32_t *uma_find_refcnt(uma_zone_t zone, void *item);
#endif
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index f693540..82d60c6 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -84,15 +84,19 @@ __FBSDID("$FreeBSD$");
#include <machine/vmparam.h>
/*
- * This is the zone from which all zones are spawned. The idea is that even
- * the zone heads are allocated from the allocator, so we use the bss section
- * to bootstrap us.
+ * This is the zone and keg from which all zones are spawned. The idea is that
+ * even the zone & keg heads are allocated from the allocator, so we use the
+ * bss section to bootstrap us.
*/
-static struct uma_zone masterzone;
-static uma_zone_t zones = &masterzone;
+static struct uma_keg masterkeg;
+static struct uma_zone masterzone_k;
+static struct uma_zone masterzone_z;
+static uma_zone_t kegs = &masterzone_k;
+static uma_zone_t zones = &masterzone_z;
/* This is the zone from which all of uma_slab_t's are allocated. */
static uma_zone_t slabzone;
+static uma_zone_t slabrefzone; /* With refcounters (for UMA_ZONE_REFCNT) */
/*
* The initial hash tables come out of this zone so they can be allocated
@@ -107,10 +111,10 @@ static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
*/
static int bucketdisable = 1;
-/* Linked list of all zones in the system */
-static LIST_HEAD(,uma_zone) uma_zones = LIST_HEAD_INITIALIZER(&uma_zones);
+/* Linked list of all kegs in the system */
+static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(&uma_kegs);
-/* This mutex protects the zone list */
+/* This mutex protects the keg list */
static struct mtx uma_mtx;
/* These are the pcpu cache locks */
@@ -144,6 +148,16 @@ struct uma_zctor_args {
uma_dtor dtor;
uma_init uminit;
uma_fini fini;
+ uma_keg_t keg;
+ int align;
+ u_int16_t flags;
+};
+
+struct uma_kctor_args {
+ uma_zone_t zone;
+ size_t size;
+ uma_init uminit;
+ uma_fini fini;
int align;
u_int16_t flags;
};
@@ -179,6 +193,8 @@ static uma_slab_t slab_zalloc(uma_zone_t, int);
static void cache_drain(uma_zone_t);
static void bucket_drain(uma_zone_t, uma_bucket_t);
static void bucket_cache_drain(uma_zone_t zone);
+static void keg_ctor(void *, int, void *);
+static void keg_dtor(void *, int, void *);
static void zone_ctor(void *, int, void *);
static void zone_dtor(void *, int, void *);
static void zero_init(void *, int);
@@ -202,6 +218,8 @@ static int uma_zalloc_bucket(uma_zone_t zone, int flags);
static uma_slab_t uma_zone_slab(uma_zone_t zone, int flags);
static void *uma_slab_alloc(uma_zone_t zone, uma_slab_t slab);
static void zone_drain(uma_zone_t);
+static void uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
+ uma_fini fini, int align, u_int16_t flags);
void uma_print_zone(uma_zone_t);
void uma_print_stats(void);
@@ -328,10 +346,12 @@ uma_timeout(void *unused)
static void
zone_timeout(uma_zone_t zone)
{
+ uma_keg_t keg;
uma_cache_t cache;
u_int64_t alloc;
int cpu;
+ keg = zone->uz_keg;
alloc = 0;
/*
@@ -344,7 +364,7 @@ zone_timeout(uma_zone_t zone)
* to lock and do it here instead so that the statistics don't get too
* far out of sync.
*/
- if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) {
+ if (!(keg->uk_flags & UMA_ZFLAG_INTERNAL)) {
for (cpu = 0; cpu <= mp_maxid; cpu++) {
if (CPU_ABSENT(cpu))
continue;
@@ -369,8 +389,8 @@ zone_timeout(uma_zone_t zone)
* may be a little aggressive. Should I allow for two collisions max?
*/
- if (zone->uz_flags & UMA_ZONE_HASH &&
- zone->uz_pages / zone->uz_ppera >= zone->uz_hash.uh_hashsize) {
+ if (keg->uk_flags & UMA_ZONE_HASH &&
+ keg->uk_pages / keg->uk_ppera >= keg->uk_hash.uh_hashsize) {
struct uma_hash newhash;
struct uma_hash oldhash;
int ret;
@@ -381,14 +401,14 @@ zone_timeout(uma_zone_t zone)
* I have to do everything in stages and check for
* races.
*/
- newhash = zone->uz_hash;
+ newhash = keg->uk_hash;
ZONE_UNLOCK(zone);
ret = hash_alloc(&newhash);
ZONE_LOCK(zone);
if (ret) {
- if (hash_expand(&zone->uz_hash, &newhash)) {
- oldhash = zone->uz_hash;
- zone->uz_hash = newhash;
+ if (hash_expand(&keg->uk_hash, &newhash)) {
+ oldhash = keg->uk_hash;
+ keg->uk_hash = newhash;
} else
oldhash = newhash;
@@ -530,7 +550,7 @@ bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
mzone = 0;
/* We have to lookup the slab again for malloc.. */
- if (zone->uz_flags & UMA_ZONE_MALLOC)
+ if (zone->uz_keg->uk_flags & UMA_ZONE_MALLOC)
mzone = 1;
while (bucket->ub_cnt > 0) {
@@ -636,29 +656,32 @@ static void
zone_drain(uma_zone_t zone)
{
struct slabhead freeslabs = {};
+ uma_keg_t keg;
uma_slab_t slab;
uma_slab_t n;
u_int8_t flags;
u_int8_t *mem;
int i;
+ keg = zone->uz_keg;
+
/*
- * We don't want to take pages from staticly allocated zones at this
+ * We don't want to take pages from statically allocated zones at this
* time
*/
- if (zone->uz_flags & UMA_ZONE_NOFREE || zone->uz_freef == NULL)
+ if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
return;
ZONE_LOCK(zone);
#ifdef UMA_DEBUG
- printf("%s free items: %u\n", zone->uz_name, zone->uz_free);
+ printf("%s free items: %u\n", zone->uz_name, keg->uk_free);
#endif
bucket_cache_drain(zone);
- if (zone->uz_free == 0)
+ if (keg->uk_free == 0)
goto finished;
- slab = LIST_FIRST(&zone->uz_free_slab);
+ slab = LIST_FIRST(&keg->uk_free_slab);
while (slab) {
n = LIST_NEXT(slab, us_link);
@@ -669,11 +692,11 @@ zone_drain(uma_zone_t zone)
}
LIST_REMOVE(slab, us_link);
- zone->uz_pages -= zone->uz_ppera;
- zone->uz_free -= zone->uz_ipers;
+ keg->uk_pages -= keg->uk_ppera;
+ keg->uk_free -= keg->uk_ipers;
- if (zone->uz_flags & UMA_ZONE_HASH)
- UMA_HASH_REMOVE(&zone->uz_hash, slab, slab->us_data);
+ if (keg->uk_flags & UMA_ZONE_HASH)
+ UMA_HASH_REMOVE(&keg->uk_hash, slab, slab->us_data);
SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
@@ -684,34 +707,34 @@ finished:
while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
- if (zone->uz_fini)
- for (i = 0; i < zone->uz_ipers; i++)
- zone->uz_fini(
- slab->us_data + (zone->uz_rsize * i),
- zone->uz_size);
+ if (keg->uk_fini)
+ for (i = 0; i < keg->uk_ipers; i++)
+ keg->uk_fini(
+ slab->us_data + (keg->uk_rsize * i),
+ keg->uk_size);
flags = slab->us_flags;
mem = slab->us_data;
- if (zone->uz_flags & UMA_ZONE_OFFPAGE)
- uma_zfree_internal(slabzone, slab, NULL, 0);
- if (zone->uz_flags & UMA_ZONE_MALLOC) {
+ if ((keg->uk_flags & UMA_ZONE_MALLOC) ||
+ (keg->uk_flags & UMA_ZONE_REFCNT)) {
vm_object_t obj;
if (flags & UMA_SLAB_KMEM)
obj = kmem_object;
else
obj = NULL;
- for (i = 0; i < zone->uz_ppera; i++)
+ for (i = 0; i < keg->uk_ppera; i++)
vsetobj((vm_offset_t)mem + (i * PAGE_SIZE),
obj);
}
+ if (keg->uk_flags & UMA_ZONE_OFFPAGE)
+ uma_zfree_internal(keg->uk_slabzone, slab, NULL, 0);
#ifdef UMA_DEBUG
printf("%s: Returning %d bytes.\n",
- zone->uz_name, UMA_SLAB_SIZE * zone->uz_ppera);
+ zone->uz_name, UMA_SLAB_SIZE * keg->uk_ppera);
#endif
- zone->uz_freef(mem, UMA_SLAB_SIZE * zone->uz_ppera, flags);
+ keg->uk_freef(mem, UMA_SLAB_SIZE * keg->uk_ppera, flags);
}
-
}
/*
@@ -728,20 +751,23 @@ finished:
static uma_slab_t
slab_zalloc(uma_zone_t zone, int wait)
{
- uma_slab_t slab; /* Starting slab */
+ uma_slabrefcnt_t slabref;
+ uma_slab_t slab;
+ uma_keg_t keg;
u_int8_t *mem;
u_int8_t flags;
int i;
slab = NULL;
+ keg = zone->uz_keg;
#ifdef UMA_DEBUG
printf("slab_zalloc: Allocating a new slab for %s\n", zone->uz_name);
#endif
ZONE_UNLOCK(zone);
- if (zone->uz_flags & UMA_ZONE_OFFPAGE) {
- slab = uma_zalloc_internal(slabzone, NULL, wait);
+ if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
+ slab = uma_zalloc_internal(keg->uk_slabzone, NULL, wait);
if (slab == NULL) {
ZONE_LOCK(zone);
return NULL;
@@ -755,12 +781,12 @@ slab_zalloc(uma_zone_t zone, int wait)
* Malloced items are zeroed in uma_zalloc.
*/
- if ((zone->uz_flags & UMA_ZONE_MALLOC) == 0)
+ if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
wait |= M_ZERO;
else
wait &= ~M_ZERO;
- mem = zone->uz_allocf(zone, zone->uz_ppera * UMA_SLAB_SIZE,
+ mem = keg->uk_allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE,
&flags, wait);
if (mem == NULL) {
ZONE_LOCK(zone);
@@ -768,32 +794,39 @@ slab_zalloc(uma_zone_t zone, int wait)
}
/* Point the slab into the allocated memory */
- if (!(zone->uz_flags & UMA_ZONE_OFFPAGE))
- slab = (uma_slab_t )(mem + zone->uz_pgoff);
+ if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
+ slab = (uma_slab_t )(mem + keg->uk_pgoff);
- if (zone->uz_flags & UMA_ZONE_MALLOC)
- for (i = 0; i < zone->uz_ppera; i++)
+ if ((keg->uk_flags & UMA_ZONE_MALLOC) ||
+ (keg->uk_flags & UMA_ZONE_REFCNT))
+ for (i = 0; i < keg->uk_ppera; i++)
vsetslab((vm_offset_t)mem + (i * PAGE_SIZE), slab);
- slab->us_zone = zone;
+ slab->us_keg = keg;
slab->us_data = mem;
- slab->us_freecount = zone->uz_ipers;
+ slab->us_freecount = keg->uk_ipers;
slab->us_firstfree = 0;
slab->us_flags = flags;
- for (i = 0; i < zone->uz_ipers; i++)
- slab->us_freelist[i] = i+1;
+ for (i = 0; i < keg->uk_ipers; i++)
+ slab->us_freelist[i].us_item = i+1;
- if (zone->uz_init)
- for (i = 0; i < zone->uz_ipers; i++)
- zone->uz_init(slab->us_data + (zone->uz_rsize * i),
- zone->uz_size);
+ if (keg->uk_flags & UMA_ZONE_REFCNT) {
+ slabref = (uma_slabrefcnt_t)slab;
+ for (i = 0; i < keg->uk_ipers; i++)
+ slabref->us_freelist[i].us_refcnt = 0;
+ }
+
+ if (keg->uk_init)
+ for (i = 0; i < keg->uk_ipers; i++)
+ keg->uk_init(slab->us_data + (keg->uk_rsize * i),
+ keg->uk_size);
ZONE_LOCK(zone);
- if (zone->uz_flags & UMA_ZONE_HASH)
- UMA_HASH_INSERT(&zone->uz_hash, slab, mem);
+ if (keg->uk_flags & UMA_ZONE_HASH)
+ UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
- zone->uz_pages += zone->uz_ppera;
- zone->uz_free += zone->uz_ipers;
+ keg->uk_pages += keg->uk_ppera;
+ keg->uk_free += keg->uk_ipers;
return (slab);
}
@@ -806,6 +839,10 @@ slab_zalloc(uma_zone_t zone, int wait)
static void *
startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
{
+ uma_keg_t keg;
+
+ keg = zone->uz_keg;
+
/*
* Check our small startup cache to see if it has pages remaining.
*/
@@ -827,11 +864,11 @@ startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
* Now that we've booted reset these users to their real allocator.
*/
#ifdef UMA_MD_SMALL_ALLOC
- zone->uz_allocf = uma_small_alloc;
+ keg->uk_allocf = uma_small_alloc;
#else
- zone->uz_allocf = page_alloc;
+ keg->uk_allocf = page_alloc;
#endif
- return zone->uz_allocf(zone, bytes, pflag, wait);
+ return keg->uk_allocf(zone, bytes, pflag, wait);
}
/*
@@ -877,7 +914,7 @@ obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
vm_page_t p;
int pages, startpages;
- object = zone->uz_obj;
+ object = zone->uz_keg->uk_obj;
retkva = 0;
/*
@@ -887,7 +924,7 @@ obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
p = TAILQ_LAST(&object->memq, pglist);
pages = p != NULL ? p->pindex + 1 : 0;
startpages = pages;
- zkva = zone->uz_kva + pages * PAGE_SIZE;
+ zkva = zone->uz_keg->uk_kva + pages * PAGE_SIZE;
for (; bytes > 0; bytes -= PAGE_SIZE) {
p = vm_page_alloc(object, pages,
VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
@@ -965,29 +1002,33 @@ zero_init(void *mem, int size)
static void
zone_small_init(uma_zone_t zone)
{
+ uma_keg_t keg;
int rsize;
int memused;
int ipers;
- rsize = zone->uz_size;
+ keg = zone->uz_keg;
+ KASSERT(keg != NULL, ("Keg is null in zone_small_init"));
+ rsize = keg->uk_size;
if (rsize < UMA_SMALLEST_UNIT)
rsize = UMA_SMALLEST_UNIT;
- if (rsize & zone->uz_align)
- rsize = (rsize & ~zone->uz_align) + (zone->uz_align + 1);
+ if (rsize & keg->uk_align)
+ rsize = (rsize & ~keg->uk_align) + (keg->uk_align + 1);
- zone->uz_rsize = rsize;
+ keg->uk_rsize = rsize;
rsize += 1; /* Account for the byte of linkage */
- zone->uz_ipers = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) / rsize;
- zone->uz_ppera = 1;
+ keg->uk_ipers = (UMA_SLAB_SIZE - sizeof(struct uma_slab)) / rsize;
+ keg->uk_ppera = 1;
- KASSERT(zone->uz_ipers != 0, ("zone_small_init: ipers is 0, uh-oh!"));
- memused = zone->uz_ipers * zone->uz_rsize;
+ KASSERT(keg->uk_ipers != 0, ("zone_small_init: ipers is 0, uh-oh!"));
+ memused = keg->uk_ipers * keg->uk_rsize;
/* Can we do any better? */
- if ((UMA_SLAB_SIZE - memused) >= UMA_MAX_WASTE) {
+ if ((keg->uk_flags & UMA_ZONE_REFCNT) ||
+ ((UMA_SLAB_SIZE - memused) >= UMA_MAX_WASTE)) {
/*
* We can't do this if we're internal or if we've been
* asked to not go to the VM for buckets. If we do this we
@@ -995,15 +1036,16 @@ zone_small_init(uma_zone_t zone)
* do not want to do if we're UMA_ZFLAG_CACHEONLY as a
* result of UMA_ZONE_VM, which clearly forbids it.
*/
- if ((zone->uz_flags & UMA_ZFLAG_INTERNAL) ||
- (zone->uz_flags & UMA_ZFLAG_CACHEONLY))
+ if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
+ (keg->uk_flags & UMA_ZFLAG_CACHEONLY))
return;
- ipers = UMA_SLAB_SIZE / zone->uz_rsize;
- if (ipers > zone->uz_ipers) {
- zone->uz_flags |= UMA_ZONE_OFFPAGE;
- if ((zone->uz_flags & UMA_ZONE_MALLOC) == 0)
- zone->uz_flags |= UMA_ZONE_HASH;
- zone->uz_ipers = ipers;
+ ipers = UMA_SLAB_SIZE / keg->uk_rsize;
+ if ((keg->uk_flags & UMA_ZONE_REFCNT) ||
+ (ipers > keg->uk_ipers)) {
+ keg->uk_flags |= UMA_ZONE_OFFPAGE;
+ if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
+ keg->uk_flags |= UMA_ZONE_HASH;
+ keg->uk_ipers = ipers;
}
}
}
@@ -1022,179 +1064,298 @@ zone_small_init(uma_zone_t zone)
static void
zone_large_init(uma_zone_t zone)
{
+ uma_keg_t keg;
int pages;
- KASSERT((zone->uz_flags & UMA_ZFLAG_CACHEONLY) == 0,
+ keg = zone->uz_keg;
+
+ KASSERT(keg != NULL, ("Keg is null in zone_large_init"));
+ KASSERT((keg->uk_flags & UMA_ZFLAG_CACHEONLY) == 0,
("zone_large_init: Cannot large-init a UMA_ZFLAG_CACHEONLY zone"));
- pages = zone->uz_size / UMA_SLAB_SIZE;
+ pages = keg->uk_size / UMA_SLAB_SIZE;
/* Account for remainder */
- if ((pages * UMA_SLAB_SIZE) < zone->uz_size)
+ if ((pages * UMA_SLAB_SIZE) < keg->uk_size)
pages++;
- zone->uz_ppera = pages;
- zone->uz_ipers = 1;
+ keg->uk_ppera = pages;
+ keg->uk_ipers = 1;
- zone->uz_flags |= UMA_ZONE_OFFPAGE;
- if ((zone->uz_flags & UMA_ZONE_MALLOC) == 0)
- zone->uz_flags |= UMA_ZONE_HASH;
+ keg->uk_flags |= UMA_ZONE_OFFPAGE;
+ if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
+ keg->uk_flags |= UMA_ZONE_HASH;
- zone->uz_rsize = zone->uz_size;
+ keg->uk_rsize = keg->uk_size;
}
/*
- * Zone header ctor. This initializes all fields, locks, etc. And inserts
- * the zone onto the global zone list.
+ * Keg header ctor. This initializes all fields, locks, etc. And inserts
+ * the keg onto the global keg list.
*
* Arguments/Returns follow uma_ctor specifications
- * udata Actually uma_zcreat_args
+ * udata Actually uma_kctor_args
*/
-
static void
-zone_ctor(void *mem, int size, void *udata)
+keg_ctor(void *mem, int size, void *udata)
{
- struct uma_zctor_args *arg = udata;
- uma_zone_t zone = mem;
- int privlc;
+ struct uma_kctor_args *arg = udata;
+ uma_keg_t keg = mem;
+ uma_zone_t zone;
- bzero(zone, size);
- zone->uz_name = arg->name;
- zone->uz_size = arg->size;
- zone->uz_ctor = arg->ctor;
- zone->uz_dtor = arg->dtor;
- zone->uz_init = arg->uminit;
- zone->uz_fini = arg->fini;
- zone->uz_align = arg->align;
- zone->uz_free = 0;
- zone->uz_pages = 0;
- zone->uz_flags = arg->flags;
- zone->uz_allocf = page_alloc;
- zone->uz_freef = page_free;
+ bzero(keg, size);
+ keg->uk_size = arg->size;
+ keg->uk_init = arg->uminit;
+ keg->uk_fini = arg->fini;
+ keg->uk_align = arg->align;
+ keg->uk_free = 0;
+ keg->uk_pages = 0;
+ keg->uk_flags = arg->flags;
+ keg->uk_allocf = page_alloc;
+ keg->uk_freef = page_free;
+ keg->uk_recurse = 0;
+ keg->uk_slabzone = NULL;
- if (arg->flags & UMA_ZONE_ZINIT)
- zone->uz_init = zero_init;
+ /*
+ * The master zone is passed to us at keg-creation time.
+ */
+ zone = arg->zone;
+ zone->uz_keg = keg;
if (arg->flags & UMA_ZONE_VM)
- zone->uz_flags |= UMA_ZFLAG_CACHEONLY;
+ keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
+
+ if (arg->flags & UMA_ZONE_ZINIT)
+ keg->uk_init = zero_init;
/*
- * XXX:
- * The +1 byte added to uz_size is to account for the byte of
+ * The +1 byte added to uk_size is to account for the byte of
* linkage that is added to the size in zone_small_init(). If
* we don't account for this here then we may end up in
* zone_small_init() with a calculated 'ipers' of 0.
*/
- if ((zone->uz_size+1) > (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
+ if ((keg->uk_size+1) > (UMA_SLAB_SIZE - sizeof(struct uma_slab)))
zone_large_init(zone);
else
zone_small_init(zone);
+
+ if (keg->uk_flags & UMA_ZONE_REFCNT)
+ keg->uk_slabzone = slabrefzone;
+ else if (keg->uk_flags & UMA_ZONE_OFFPAGE)
+ keg->uk_slabzone = slabzone;
+
/*
* If we haven't booted yet we need allocations to go through the
* startup cache until the vm is ready.
*/
- if (zone->uz_ppera == 1) {
+ if (keg->uk_ppera == 1) {
#ifdef UMA_MD_SMALL_ALLOC
- zone->uz_allocf = uma_small_alloc;
- zone->uz_freef = uma_small_free;
+ keg->uk_allocf = uma_small_alloc;
+ keg->uk_freef = uma_small_free;
#endif
if (booted == 0)
- zone->uz_allocf = startup_alloc;
+ keg->uk_allocf = startup_alloc;
}
+
+ /*
+ * Initialize keg's lock (shared among zones) through
+ * Master zone
+ */
+ zone->uz_lock = &keg->uk_lock;
if (arg->flags & UMA_ZONE_MTXCLASS)
- privlc = 1;
+ ZONE_LOCK_INIT(zone, 1);
else
- privlc = 0;
+ ZONE_LOCK_INIT(zone, 0);
/*
* If we're putting the slab header in the actual page we need to
* figure out where in each page it goes. This calculates a right
* justified offset into the memory on an ALIGN_PTR boundary.
*/
- if (!(zone->uz_flags & UMA_ZONE_OFFPAGE)) {
+ if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
int totsize;
/* Size of the slab struct and free list */
- totsize = sizeof(struct uma_slab) + zone->uz_ipers;
+ totsize = sizeof(struct uma_slab) + keg->uk_ipers;
if (totsize & UMA_ALIGN_PTR)
totsize = (totsize & ~UMA_ALIGN_PTR) +
(UMA_ALIGN_PTR + 1);
- zone->uz_pgoff = UMA_SLAB_SIZE - totsize;
- totsize = zone->uz_pgoff + sizeof(struct uma_slab)
- + zone->uz_ipers;
+ keg->uk_pgoff = UMA_SLAB_SIZE - totsize;
+ totsize = keg->uk_pgoff + sizeof(struct uma_slab)
+ + keg->uk_ipers;
/* I don't think it's possible, but I'll make sure anyway */
if (totsize > UMA_SLAB_SIZE) {
printf("zone %s ipers %d rsize %d size %d\n",
- zone->uz_name, zone->uz_ipers, zone->uz_rsize,
- zone->uz_size);
+ zone->uz_name, keg->uk_ipers, keg->uk_rsize,
+ keg->uk_size);
panic("UMA slab won't fit.\n");
}
}
- if (zone->uz_flags & UMA_ZONE_HASH)
- hash_alloc(&zone->uz_hash);
+ if (keg->uk_flags & UMA_ZONE_HASH)
+ hash_alloc(&keg->uk_hash);
#ifdef UMA_DEBUG
printf("%s(%p) size = %d ipers = %d ppera = %d pgoff = %d\n",
zone->uz_name, zone,
- zone->uz_size, zone->uz_ipers,
- zone->uz_ppera, zone->uz_pgoff);
+ keg->uk_size, keg->uk_ipers,
+ keg->uk_ppera, keg->uk_pgoff);
#endif
- ZONE_LOCK_INIT(zone, privlc);
+
+ LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
mtx_lock(&uma_mtx);
- LIST_INSERT_HEAD(&uma_zones, zone, uz_link);
+ LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
mtx_unlock(&uma_mtx);
+}
+
+/*
+ * Zone header ctor. This initializes all fields, locks, etc.
+ *
+ * Arguments/Returns follow uma_ctor specifications
+ * udata Actually uma_zctor_args
+ */
+
+static void
+zone_ctor(void *mem, int size, void *udata)
+{
+ struct uma_zctor_args *arg = udata;
+ uma_zone_t zone = mem;
+ uma_zone_t z;
+ uma_keg_t keg;
+
+ bzero(zone, size);
+ zone->uz_name = arg->name;
+ zone->uz_ctor = arg->ctor;
+ zone->uz_dtor = arg->dtor;
+ zone->uz_init = NULL;
+ zone->uz_fini = NULL;
+ zone->uz_allocs = 0;
+ zone->uz_fills = zone->uz_count = 0;
+
+ if (arg->flags & UMA_ZONE_SECONDARY) {
+ KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
+ keg = arg->keg;
+ zone->uz_keg = keg;
+ zone->uz_init = arg->uminit;
+ zone->uz_fini = arg->fini;
+ zone->uz_lock = &keg->uk_lock;
+ mtx_lock(&uma_mtx);
+ ZONE_LOCK(zone);
+ keg->uk_flags |= UMA_ZONE_SECONDARY;
+ LIST_FOREACH(z, &keg->uk_zones, uz_link) {
+ if (LIST_NEXT(z, uz_link) == NULL) {
+ LIST_INSERT_AFTER(z, zone, uz_link);
+ break;
+ }
+ }
+ ZONE_UNLOCK(zone);
+ mtx_unlock(&uma_mtx);
+ } else if (arg->keg == NULL) {
+ uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
+ arg->align, arg->flags);
+ } else {
+ struct uma_kctor_args karg;
+
+ /* We should only be here from uma_startup() */
+ karg.size = arg->size;
+ karg.uminit = arg->uminit;
+ karg.fini = arg->fini;
+ karg.align = arg->align;
+ karg.flags = arg->flags;
+ karg.zone = zone;
+ keg_ctor(arg->keg, sizeof(struct uma_keg), &karg);
+ }
+ keg = zone->uz_keg;
+ zone->uz_lock = &keg->uk_lock;
/*
* Some internal zones don't have room allocated for the per cpu
* caches. If we're internal, bail out here.
*/
- if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
+ if (keg->uk_flags & UMA_ZFLAG_INTERNAL) {
+ KASSERT((keg->uk_flags & UMA_ZONE_SECONDARY) == 0,
+ ("Secondary zone requested UMA_ZFLAG_INTERNAL"));
return;
+ }
- if (zone->uz_ipers <= BUCKET_MAX)
- zone->uz_count = zone->uz_ipers;
+ if (keg->uk_flags & UMA_ZONE_MAXBUCKET)
+ zone->uz_count = BUCKET_MAX;
+ else if (keg->uk_ipers <= BUCKET_MAX)
+ zone->uz_count = keg->uk_ipers;
else
zone->uz_count = BUCKET_MAX;
}
/*
- * Zone header dtor. This frees all data, destroys locks, frees the hash table
- * and removes the zone from the global list.
+ * Keg header dtor. This frees all data, destroys locks, frees the hash
+ * table and removes the keg from the global list.
*
* Arguments/Returns follow uma_dtor specifications
* udata unused
*/
+static void
+keg_dtor(void *arg, int size, void *udata)
+{
+ uma_keg_t keg;
+ keg = (uma_keg_t)arg;
+ mtx_lock(&keg->uk_lock);
+ if (keg->uk_free != 0) {
+ printf("Freed UMA keg was not empty (%d items). "
+ " Lost %d pages of memory.\n",
+ keg->uk_free, keg->uk_pages);
+ }
+ mtx_unlock(&keg->uk_lock);
+
+ if (keg->uk_flags & UMA_ZONE_HASH)
+ hash_free(&keg->uk_hash);
+
+ mtx_destroy(&keg->uk_lock);
+}
+
+/*
+ * Zone header dtor.
+ *
+ * Arguments/Returns follow uma_dtor specifications
+ * udata unused
+ */
static void
zone_dtor(void *arg, int size, void *udata)
{
uma_zone_t zone;
+ uma_keg_t keg;
zone = (uma_zone_t)arg;
+ keg = zone->uz_keg;
- if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
+ if (!(keg->uk_flags & UMA_ZFLAG_INTERNAL))
cache_drain(zone);
+
mtx_lock(&uma_mtx);
- LIST_REMOVE(zone, uz_link);
zone_drain(zone);
- mtx_unlock(&uma_mtx);
-
- ZONE_LOCK(zone);
- if (zone->uz_free != 0) {
- printf("Zone %s was not empty (%d items). "
- " Lost %d pages of memory.\n",
- zone->uz_name, zone->uz_free, zone->uz_pages);
- uma_print_zone(zone);
+ if (keg->uk_flags & UMA_ZONE_SECONDARY) {
+ LIST_REMOVE(zone, uz_link);
+ /*
+ * XXX there are some races here where
+ * the zone can be drained but zone lock
+ * released and then refilled before we
+ * remove it... we dont care for now
+ */
+ ZONE_LOCK(zone);
+ if (LIST_EMPTY(&keg->uk_zones))
+ keg->uk_flags &= ~UMA_ZONE_SECONDARY;
+ ZONE_UNLOCK(zone);
+ mtx_unlock(&uma_mtx);
+ } else {
+ LIST_REMOVE(keg, uk_link);
+ LIST_REMOVE(zone, uz_link);
+ mtx_unlock(&uma_mtx);
+ uma_zfree_internal(kegs, keg, NULL, 0);
}
-
- ZONE_UNLOCK(zone);
- if (zone->uz_flags & UMA_ZONE_HASH)
- hash_free(&zone->uz_hash);
-
- ZONE_LOCK_FINI(zone);
+ zone->uz_keg = NULL;
}
+
/*
* Traverses every zone in the system and calls a callback
*
@@ -1208,11 +1369,14 @@ zone_dtor(void *arg, int size, void *udata)
static void
zone_foreach(void (*zfunc)(uma_zone_t))
{
+ uma_keg_t keg;
uma_zone_t zone;
mtx_lock(&uma_mtx);
- LIST_FOREACH(zone, &uma_zones, uz_link)
- zfunc(zone);
+ LIST_FOREACH(keg, &uma_kegs, uk_link) {
+ LIST_FOREACH(zone, &keg->uk_zones, uz_link)
+ zfunc(zone);
+ }
mtx_unlock(&uma_mtx);
}
@@ -1227,25 +1391,23 @@ uma_startup(void *bootmem)
int i;
#ifdef UMA_DEBUG
- printf("Creating uma zone headers zone.\n");
+ printf("Creating uma keg headers zone and keg.\n");
#endif
mtx_init(&uma_mtx, "UMA lock", NULL, MTX_DEF);
- /* "manually" Create the initial zone */
- args.name = "UMA Zones";
- args.size = sizeof(struct uma_zone) +
- (sizeof(struct uma_cache) * (mp_maxid + 1));
- args.ctor = zone_ctor;
- args.dtor = zone_dtor;
+
+ /* "manually" create the initial zone */
+ args.name = "UMA Kegs";
+ args.size = sizeof(struct uma_keg);
+ args.ctor = keg_ctor;
+ args.dtor = keg_dtor;
args.uminit = zero_init;
args.fini = NULL;
+ args.keg = &masterkeg;
args.align = 32 - 1;
args.flags = UMA_ZFLAG_INTERNAL;
/* The initial zone has no Per cpu queues so it's smaller */
- zone_ctor(zones, sizeof(struct uma_zone), &args);
+ zone_ctor(kegs, sizeof(struct uma_zone), &args);
- /* Initialize the pcpu cache lock set once and for all */
- for (i = 0; i <= mp_maxid; i++)
- CPU_LOCK_INIT(i);
#ifdef UMA_DEBUG
printf("Filling boot free list.\n");
#endif
@@ -1258,7 +1420,30 @@ uma_startup(void *bootmem)
}
#ifdef UMA_DEBUG
- printf("Creating slab zone.\n");
+ printf("Creating uma zone headers zone and keg.\n");
+#endif
+ args.name = "UMA Zones";
+ args.size = sizeof(struct uma_zone) +
+ (sizeof(struct uma_cache) * (mp_maxid + 1));
+ args.ctor = zone_ctor;
+ args.dtor = zone_dtor;
+ args.uminit = zero_init;
+ args.fini = NULL;
+ args.keg = NULL;
+ args.align = 32 - 1;
+ args.flags = UMA_ZFLAG_INTERNAL;
+ /* The initial zone has no Per cpu queues so it's smaller */
+ zone_ctor(zones, sizeof(struct uma_zone), &args);
+
+#ifdef UMA_DEBUG
+ printf("Initializing pcpu cache locks.\n");
+#endif
+ /* Initialize the pcpu cache lock set once and for all */
+ for (i = 0; i <= mp_maxid; i++)
+ CPU_LOCK_INIT(i);
+
+#ifdef UMA_DEBUG
+ printf("Creating slab and hash zones.\n");
#endif
/*
@@ -1276,6 +1461,20 @@ uma_startup(void *bootmem)
NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
+ /*
+ * We also create a zone for the bigger slabs with reference
+ * counts in them, to accomodate UMA_ZONE_REFCNT zones.
+ */
+ slabsize = UMA_SLAB_SIZE - sizeof(struct uma_slab_refcnt);
+ slabsize /= UMA_MAX_WASTE;
+ slabsize++;
+ slabsize += 4 * slabsize;
+ slabsize += sizeof(struct uma_slab_refcnt);
+ slabrefzone = uma_zcreate("UMA RCntSlabs",
+ slabsize,
+ NULL, NULL, NULL, NULL,
+ UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
+
hashzone = uma_zcreate("UMA Hash",
sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
NULL, NULL, NULL, NULL,
@@ -1321,6 +1520,21 @@ uma_startup3(void)
#endif
}
+static void
+uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
+ int align, u_int16_t flags)
+{
+ struct uma_kctor_args args;
+
+ args.size = size;
+ args.uminit = uminit;
+ args.fini = fini;
+ args.align = align;
+ args.flags = flags;
+ args.zone = zone;
+ zone = uma_zalloc_internal(kegs, &args, M_WAITOK);
+}
+
/* See uma.h */
uma_zone_t
uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
@@ -1338,6 +1552,27 @@ uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
args.fini = fini;
args.align = align;
args.flags = flags;
+ args.keg = NULL;
+
+ return (uma_zalloc_internal(zones, &args, M_WAITOK));
+}
+
+/* See uma.h */
+uma_zone_t
+uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
+ uma_init zinit, uma_fini zfini, uma_zone_t master)
+{
+ struct uma_zctor_args args;
+
+ args.name = name;
+ args.size = master->uz_keg->uk_size;
+ args.ctor = ctor;
+ args.dtor = dtor;
+ args.uminit = zinit;
+ args.fini = zfini;
+ args.align = master->uz_keg->uk_align;
+ args.flags = master->uz_keg->uk_flags | UMA_ZONE_SECONDARY;
+ args.keg = master->uz_keg;
return (uma_zalloc_internal(zones, &args, M_WAITOK));
}
@@ -1357,35 +1592,25 @@ uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
uma_cache_t cache;
uma_bucket_t bucket;
int cpu;
+ int badness = 1;
/* This is the fast path allocation */
#ifdef UMA_DEBUG_ALLOC_1
printf("Allocating one item from %s(%p)\n", zone->uz_name, zone);
#endif
-#ifdef INVARIANTS
- /*
- * To make sure that WAITOK or NOWAIT is set, but not more than
- * one, and check against the API botches that are common.
- * The uma code implies M_WAITOK if M_NOWAIT is not set, so
- * we default to waiting if none of the flags is set.
- */
- cpu = flags & (M_WAITOK | M_NOWAIT | M_DONTWAIT | M_TRYWAIT);
- if (cpu != M_NOWAIT && cpu != M_WAITOK) {
- static struct timeval lasterr;
- static int curerr, once;
- if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
- printf("Bad uma_zalloc flags: %x\n", cpu);
- backtrace();
- once++;
- }
- }
-#endif
if (!(flags & M_NOWAIT)) {
KASSERT(curthread->td_intr_nesting_level == 0,
("malloc(M_WAITOK) in interrupt context"));
- WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
- "malloc() of \"%s\"", zone->uz_name);
+#ifdef WITNESS
+ badness = WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
+ "malloc(M_WAITOK) of \"%s\", forcing M_NOWAIT",
+ zone->uz_name);
+#endif
+ if (badness) {
+ flags &= ~M_WAITOK;
+ flags |= M_NOWAIT;
+ }
}
zalloc_restart:
@@ -1413,9 +1638,9 @@ zalloc_start:
#endif
CPU_UNLOCK(cpu);
if (zone->uz_ctor)
- zone->uz_ctor(item, zone->uz_size, udata);
+ zone->uz_ctor(item,zone->uz_keg->uk_size,udata);
if (flags & M_ZERO)
- bzero(item, zone->uz_size);
+ bzero(item, zone->uz_keg->uk_size);
return (item);
} else if (cache->uc_freebucket) {
/*
@@ -1465,6 +1690,7 @@ zalloc_start:
/* Bump up our uz_count so we get here less */
if (zone->uz_count < BUCKET_MAX)
zone->uz_count++;
+
/*
* Now lets just fill a bucket and put it on the free list. If that
* works we'll restart the allocation from the begining.
@@ -1488,6 +1714,9 @@ static uma_slab_t
uma_zone_slab(uma_zone_t zone, int flags)
{
uma_slab_t slab;
+ uma_keg_t keg;
+
+ keg = zone->uz_keg;
/*
* This is to prevent us from recursively trying to allocate
@@ -1498,7 +1727,7 @@ uma_zone_slab(uma_zone_t zone, int flags)
* things happen. So instead we return a NULL bucket, and make
* the code that allocates buckets smart enough to deal with it
*/
- if (zone->uz_flags & UMA_ZFLAG_INTERNAL && zone->uz_recurse != 0)
+ if (keg->uk_flags & UMA_ZFLAG_INTERNAL && keg->uk_recurse != 0)
return (NULL);
slab = NULL;
@@ -1509,14 +1738,14 @@ uma_zone_slab(uma_zone_t zone, int flags)
* used over those that are totally full. This helps to reduce
* fragmentation.
*/
- if (zone->uz_free != 0) {
- if (!LIST_EMPTY(&zone->uz_part_slab)) {
- slab = LIST_FIRST(&zone->uz_part_slab);
+ if (keg->uk_free != 0) {
+ if (!LIST_EMPTY(&keg->uk_part_slab)) {
+ slab = LIST_FIRST(&keg->uk_part_slab);
} else {
- slab = LIST_FIRST(&zone->uz_free_slab);
+ slab = LIST_FIRST(&keg->uk_free_slab);
LIST_REMOVE(slab, us_link);
- LIST_INSERT_HEAD(&zone->uz_part_slab, slab,
- us_link);
+ LIST_INSERT_HEAD(&keg->uk_part_slab, slab,
+ us_link);
}
return (slab);
}
@@ -1527,27 +1756,28 @@ uma_zone_slab(uma_zone_t zone, int flags)
if (flags & M_NOVM)
break;
- if (zone->uz_maxpages &&
- zone->uz_pages >= zone->uz_maxpages) {
- zone->uz_flags |= UMA_ZFLAG_FULL;
+ if (keg->uk_maxpages &&
+ keg->uk_pages >= keg->uk_maxpages) {
+ keg->uk_flags |= UMA_ZFLAG_FULL;
if (flags & M_NOWAIT)
break;
else
- msleep(zone, &zone->uz_lock, PVM,
+ msleep(keg, &keg->uk_lock, PVM,
"zonelimit", 0);
continue;
}
- zone->uz_recurse++;
+ keg->uk_recurse++;
slab = slab_zalloc(zone, flags);
- zone->uz_recurse--;
+ keg->uk_recurse--;
+
/*
* If we got a slab here it's safe to mark it partially used
* and return. We assume that the caller is going to remove
* at least one item.
*/
if (slab) {
- LIST_INSERT_HEAD(&zone->uz_part_slab, slab, us_link);
+ LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
return (slab);
}
/*
@@ -1564,22 +1794,25 @@ uma_zone_slab(uma_zone_t zone, int flags)
static void *
uma_slab_alloc(uma_zone_t zone, uma_slab_t slab)
{
+ uma_keg_t keg;
void *item;
u_int8_t freei;
+ keg = zone->uz_keg;
+
freei = slab->us_firstfree;
- slab->us_firstfree = slab->us_freelist[freei];
- item = slab->us_data + (zone->uz_rsize * freei);
+ slab->us_firstfree = slab->us_freelist[freei].us_item;
+ item = slab->us_data + (keg->uk_rsize * freei);
slab->us_freecount--;
- zone->uz_free--;
+ keg->uk_free--;
#ifdef INVARIANTS
uma_dbg_alloc(zone, slab, item);
#endif
/* Move this slab to the full list */
if (slab->us_freecount == 0) {
LIST_REMOVE(slab, us_link);
- LIST_INSERT_HEAD(&zone->uz_full_slab, slab, us_link);
+ LIST_INSERT_HEAD(&keg->uk_full_slab, slab, us_link);
}
return (item);
@@ -1590,6 +1823,7 @@ uma_zalloc_bucket(uma_zone_t zone, int flags)
{
uma_bucket_t bucket;
uma_slab_t slab;
+ int16_t saved;
int max;
/*
@@ -1603,7 +1837,7 @@ uma_zalloc_bucket(uma_zone_t zone, int flags)
int bflags;
bflags = (flags & ~M_ZERO);
- if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
+ if (zone->uz_keg->uk_flags & UMA_ZFLAG_CACHEONLY)
bflags |= M_NOVM;
ZONE_UNLOCK(zone);
@@ -1628,18 +1862,36 @@ uma_zalloc_bucket(uma_zone_t zone, int flags)
max = MIN(bucket->ub_entries, zone->uz_count);
/* Try to keep the buckets totally full */
+ saved = bucket->ub_cnt;
while (bucket->ub_cnt < max &&
(slab = uma_zone_slab(zone, flags)) != NULL) {
while (slab->us_freecount && bucket->ub_cnt < max) {
bucket->ub_bucket[bucket->ub_cnt++] =
uma_slab_alloc(zone, slab);
}
+
/* Don't block on the next fill */
flags |= M_NOWAIT;
}
- zone->uz_fills--;
+ /*
+ * We unlock here because we need to call the zone's init.
+ * It should be safe to unlock because the slab dealt with
+ * above is already on the appropriate list within the keg
+ * and the bucket we filled is not yet on any list, so we
+ * own it.
+ */
+ if (zone->uz_init != NULL) {
+ int i;
+
+ ZONE_UNLOCK(zone);
+ for (i = saved; i < bucket->ub_cnt; i++)
+ zone->uz_init(bucket->ub_bucket[i],
+ zone->uz_keg->uk_size);
+ ZONE_LOCK(zone);
+ }
+ zone->uz_fills--;
if (bucket->ub_cnt != 0) {
LIST_INSERT_HEAD(&zone->uz_full_bucket,
bucket, ub_link);
@@ -1668,10 +1920,12 @@ done:
static void *
uma_zalloc_internal(uma_zone_t zone, void *udata, int flags)
{
+ uma_keg_t keg;
uma_slab_t slab;
void *item;
item = NULL;
+ keg = zone->uz_keg;
#ifdef UMA_DEBUG_ALLOC
printf("INTERNAL: Allocating one item from %s(%p)\n", zone->uz_name, zone);
@@ -1688,10 +1942,18 @@ uma_zalloc_internal(uma_zone_t zone, void *udata, int flags)
ZONE_UNLOCK(zone);
+ /*
+ * We have to call both the zone's init (not the keg's init)
+ * and the zone's ctor. This is because the item is going from
+ * a keg slab directly to the user, and the user is expecting it
+ * to be both zone-init'd as well as zone-ctor'd.
+ */
+ if (zone->uz_init != NULL)
+ zone->uz_init(item, keg->uk_size);
if (zone->uz_ctor != NULL)
- zone->uz_ctor(item, zone->uz_size, udata);
+ zone->uz_ctor(item, keg->uk_size, udata);
if (flags & M_ZERO)
- bzero(item, zone->uz_size);
+ bzero(item, keg->uk_size);
return (item);
}
@@ -1700,6 +1962,7 @@ uma_zalloc_internal(uma_zone_t zone, void *udata, int flags)
void
uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
{
+ uma_keg_t keg;
uma_cache_t cache;
uma_bucket_t bucket;
int bflags;
@@ -1708,6 +1971,8 @@ uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
/* This is the fast path free */
skip = 0;
+ keg = zone->uz_keg;
+
#ifdef UMA_DEBUG_ALLOC_1
printf("Freeing item %p to %s(%p)\n", item, zone->uz_name, zone);
#endif
@@ -1716,11 +1981,11 @@ uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
* a little longer for the limits to be reset.
*/
- if (zone->uz_flags & UMA_ZFLAG_FULL)
+ if (keg->uk_flags & UMA_ZFLAG_FULL)
goto zfree_internal;
if (zone->uz_dtor) {
- zone->uz_dtor(item, zone->uz_size, udata);
+ zone->uz_dtor(item, keg->uk_size, udata);
skip = 1;
}
@@ -1745,7 +2010,7 @@ zfree_start:
bucket->ub_cnt++;
#ifdef INVARIANTS
ZONE_LOCK(zone);
- if (zone->uz_flags & UMA_ZONE_MALLOC)
+ if (keg->uk_flags & UMA_ZONE_MALLOC)
uma_dbg_free(zone, udata, item);
else
uma_dbg_free(zone, NULL, item);
@@ -1810,7 +2075,7 @@ zfree_start:
#endif
bflags = M_NOWAIT;
- if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
+ if (keg->uk_flags & UMA_ZFLAG_CACHEONLY)
bflags |= M_NOVM;
bucket = bucket_alloc(zone->uz_count, bflags);
if (bucket) {
@@ -1836,7 +2101,7 @@ zfree_internal:
*/
if (skip) {
ZONE_LOCK(zone);
- if (zone->uz_flags & UMA_ZONE_MALLOC)
+ if (keg->uk_flags & UMA_ZONE_MALLOC)
uma_dbg_free(zone, udata, item);
else
uma_dbg_free(zone, NULL, item);
@@ -1846,7 +2111,6 @@ zfree_internal:
uma_zfree_internal(zone, item, udata, skip);
return;
-
}
/*
@@ -1862,20 +2126,25 @@ static void
uma_zfree_internal(uma_zone_t zone, void *item, void *udata, int skip)
{
uma_slab_t slab;
+ uma_keg_t keg;
u_int8_t *mem;
u_int8_t freei;
+ keg = zone->uz_keg;
+
if (!skip && zone->uz_dtor)
- zone->uz_dtor(item, zone->uz_size, udata);
+ zone->uz_dtor(item, keg->uk_size, udata);
+ if (zone->uz_fini)
+ zone->uz_fini(item, keg->uk_size);
ZONE_LOCK(zone);
- if (!(zone->uz_flags & UMA_ZONE_MALLOC)) {
+ if (!(keg->uk_flags & UMA_ZONE_MALLOC)) {
mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
- if (zone->uz_flags & UMA_ZONE_HASH)
- slab = hash_sfind(&zone->uz_hash, mem);
+ if (keg->uk_flags & UMA_ZONE_HASH)
+ slab = hash_sfind(&keg->uk_hash, mem);
else {
- mem += zone->uz_pgoff;
+ mem += keg->uk_pgoff;
slab = (uma_slab_t)mem;
}
} else {
@@ -1883,36 +2152,36 @@ uma_zfree_internal(uma_zone_t zone, void *item, void *udata, int skip)
}
/* Do we need to remove from any lists? */
- if (slab->us_freecount+1 == zone->uz_ipers) {
+ if (slab->us_freecount+1 == keg->uk_ipers) {
LIST_REMOVE(slab, us_link);
- LIST_INSERT_HEAD(&zone->uz_free_slab, slab, us_link);
+ LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
} else if (slab->us_freecount == 0) {
LIST_REMOVE(slab, us_link);
- LIST_INSERT_HEAD(&zone->uz_part_slab, slab, us_link);
+ LIST_INSERT_HEAD(&keg->uk_part_slab, slab, us_link);
}
/* Slab management stuff */
freei = ((unsigned long)item - (unsigned long)slab->us_data)
- / zone->uz_rsize;
+ / keg->uk_rsize;
#ifdef INVARIANTS
if (!skip)
uma_dbg_free(zone, slab, item);
#endif
- slab->us_freelist[freei] = slab->us_firstfree;
+ slab->us_freelist[freei].us_item = slab->us_firstfree;
slab->us_firstfree = freei;
slab->us_freecount++;
/* Zone statistics */
- zone->uz_free++;
+ keg->uk_free++;
- if (zone->uz_flags & UMA_ZFLAG_FULL) {
- if (zone->uz_pages < zone->uz_maxpages)
- zone->uz_flags &= ~UMA_ZFLAG_FULL;
+ if (keg->uk_flags & UMA_ZFLAG_FULL) {
+ if (keg->uk_pages < keg->uk_maxpages)
+ keg->uk_flags &= ~UMA_ZFLAG_FULL;
/* We can handle one more allocation */
- wakeup_one(zone);
+ wakeup_one(keg);
}
ZONE_UNLOCK(zone);
@@ -1922,24 +2191,71 @@ uma_zfree_internal(uma_zone_t zone, void *item, void *udata, int skip)
void
uma_zone_set_max(uma_zone_t zone, int nitems)
{
+ uma_keg_t keg;
+
+ keg = zone->uz_keg;
ZONE_LOCK(zone);
- if (zone->uz_ppera > 1)
- zone->uz_maxpages = nitems * zone->uz_ppera;
+ if (keg->uk_ppera > 1)
+ keg->uk_maxpages = nitems * keg->uk_ppera;
else
- zone->uz_maxpages = nitems / zone->uz_ipers;
+ keg->uk_maxpages = nitems / keg->uk_ipers;
- if (zone->uz_maxpages * zone->uz_ipers < nitems)
- zone->uz_maxpages++;
+ if (keg->uk_maxpages * keg->uk_ipers < nitems)
+ keg->uk_maxpages++;
ZONE_UNLOCK(zone);
}
/* See uma.h */
void
+uma_zone_set_init(uma_zone_t zone, uma_init uminit)
+{
+ ZONE_LOCK(zone);
+ KASSERT(zone->uz_keg->uk_pages == 0,
+ ("uma_zone_set_init on non-empty keg"));
+ zone->uz_keg->uk_init = uminit;
+ ZONE_UNLOCK(zone);
+}
+
+/* See uma.h */
+void
+uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
+{
+ ZONE_LOCK(zone);
+ KASSERT(zone->uz_keg->uk_pages == 0,
+ ("uma_zone_set_fini on non-empty keg"));
+ zone->uz_keg->uk_fini = fini;
+ ZONE_UNLOCK(zone);
+}
+
+/* See uma.h */
+void
+uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
+{
+ ZONE_LOCK(zone);
+ KASSERT(zone->uz_keg->uk_pages == 0,
+ ("uma_zone_set_zinit on non-empty keg"));
+ zone->uz_init = zinit;
+ ZONE_UNLOCK(zone);
+}
+
+/* See uma.h */
+void
+uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
+{
+ ZONE_LOCK(zone);
+ KASSERT(zone->uz_keg->uk_pages == 0,
+ ("uma_zone_set_zfini on non-empty keg"));
+ zone->uz_fini = zfini;
+ ZONE_UNLOCK(zone);
+}
+
+/* See uma.h */
+void
uma_zone_set_freef(uma_zone_t zone, uma_free freef)
{
ZONE_LOCK(zone);
- zone->uz_freef = freef;
+ zone->uz_keg->uk_freef = freef;
ZONE_UNLOCK(zone);
}
@@ -1948,8 +2264,8 @@ void
uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
{
ZONE_LOCK(zone);
- zone->uz_flags |= UMA_ZFLAG_PRIVALLOC;
- zone->uz_allocf = allocf;
+ zone->uz_keg->uk_flags |= UMA_ZFLAG_PRIVALLOC;
+ zone->uz_keg->uk_allocf = allocf;
ZONE_UNLOCK(zone);
}
@@ -1957,12 +2273,14 @@ uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
int
uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
{
- int pages;
+ uma_keg_t keg;
vm_offset_t kva;
+ int pages;
- pages = count / zone->uz_ipers;
+ keg = zone->uz_keg;
+ pages = count / keg->uk_ipers;
- if (pages * zone->uz_ipers < count)
+ if (pages * keg->uk_ipers < count)
pages++;
kva = kmem_alloc_pageable(kernel_map, pages * UMA_SLAB_SIZE);
@@ -1978,11 +2296,11 @@ uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
pages, obj);
}
ZONE_LOCK(zone);
- zone->uz_kva = kva;
- zone->uz_obj = obj;
- zone->uz_maxpages = pages;
- zone->uz_allocf = obj_alloc;
- zone->uz_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC;
+ keg->uk_kva = kva;
+ keg->uk_obj = obj;
+ keg->uk_maxpages = pages;
+ keg->uk_allocf = obj_alloc;
+ keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC;
ZONE_UNLOCK(zone);
return (1);
}
@@ -1993,20 +2311,41 @@ uma_prealloc(uma_zone_t zone, int items)
{
int slabs;
uma_slab_t slab;
+ uma_keg_t keg;
+ keg = zone->uz_keg;
ZONE_LOCK(zone);
- slabs = items / zone->uz_ipers;
- if (slabs * zone->uz_ipers < items)
+ slabs = items / keg->uk_ipers;
+ if (slabs * keg->uk_ipers < items)
slabs++;
while (slabs > 0) {
slab = slab_zalloc(zone, M_WAITOK);
- LIST_INSERT_HEAD(&zone->uz_free_slab, slab, us_link);
+ LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
slabs--;
}
ZONE_UNLOCK(zone);
}
/* See uma.h */
+u_int32_t *
+uma_find_refcnt(uma_zone_t zone, void *item)
+{
+ uma_slabrefcnt_t slab;
+ uma_keg_t keg;
+ u_int32_t *refcnt;
+ int idx;
+
+ keg = zone->uz_keg;
+ slab = (uma_slabrefcnt_t)vtoslab((vm_offset_t)item & (~UMA_SLAB_MASK));
+ KASSERT(slab != NULL,
+ ("uma_find_refcnt(): zone possibly not UMA_ZONE_REFCNT"));
+ idx = ((unsigned long)item - (unsigned long)slab->us_data)
+ / keg->uk_rsize;
+ refcnt = &(slab->us_freelist[idx].us_refcnt);
+ return refcnt;
+}
+
+/* See uma.h */
void
uma_reclaim(void)
{
@@ -2021,6 +2360,7 @@ uma_reclaim(void)
* zones are drained. We have to do the same for buckets.
*/
zone_drain(slabzone);
+ zone_drain(slabrefzone);
bucket_zone_drain();
}
@@ -2044,7 +2384,6 @@ uma_large_malloc(int size, int wait)
uma_zfree_internal(slabzone, slab, NULL, 0);
}
-
return (mem);
}
@@ -2065,8 +2404,8 @@ uma_print_stats(void)
static void
slab_print(uma_slab_t slab)
{
- printf("slab: zone %p, data %p, freecount %d, firstfree %d\n",
- slab->us_zone, slab->us_data, slab->us_freecount,
+ printf("slab: keg %p, data %p, freecount %d, firstfree %d\n",
+ slab->us_keg, slab->us_data, slab->us_freecount,
slab->us_firstfree);
}
@@ -2084,21 +2423,23 @@ void
uma_print_zone(uma_zone_t zone)
{
uma_cache_t cache;
+ uma_keg_t keg;
uma_slab_t slab;
int i;
+ keg = zone->uz_keg;
printf("%s(%p) size %d(%d) flags %d ipers %d ppera %d out %d free %d\n",
- zone->uz_name, zone, zone->uz_size, zone->uz_rsize, zone->uz_flags,
- zone->uz_ipers, zone->uz_ppera,
- (zone->uz_ipers * zone->uz_pages) - zone->uz_free, zone->uz_free);
+ zone->uz_name, zone, keg->uk_size, keg->uk_rsize, keg->uk_flags,
+ keg->uk_ipers, keg->uk_ppera,
+ (keg->uk_ipers * keg->uk_pages) - keg->uk_free, keg->uk_free);
printf("Part slabs:\n");
- LIST_FOREACH(slab, &zone->uz_part_slab, us_link)
+ LIST_FOREACH(slab, &keg->uk_part_slab, us_link)
slab_print(slab);
printf("Free slabs:\n");
- LIST_FOREACH(slab, &zone->uz_free_slab, us_link)
+ LIST_FOREACH(slab, &keg->uk_free_slab, us_link)
slab_print(slab);
printf("Full slabs:\n");
- LIST_FOREACH(slab, &zone->uz_full_slab, us_link)
+ LIST_FOREACH(slab, &keg->uk_full_slab, us_link)
slab_print(slab);
for (i = 0; i <= mp_maxid; i++) {
if (CPU_ABSENT(i))
@@ -2122,6 +2463,7 @@ sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
int totalfree;
char *tmpbuf, *offset;
uma_zone_t z;
+ uma_keg_t zk;
char *p;
int cpu;
int cachefree;
@@ -2130,8 +2472,10 @@ sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
cnt = 0;
mtx_lock(&uma_mtx);
- LIST_FOREACH(z, &uma_zones, uz_link)
- cnt++;
+ LIST_FOREACH(zk, &uma_kegs, uk_link) {
+ LIST_FOREACH(z, &zk->uk_zones, uz_link)
+ cnt++;
+ }
mtx_unlock(&uma_mtx);
MALLOC(tmpbuf, char *, (cnt == 0 ? 1 : cnt) * linesize,
M_TEMP, M_WAITOK);
@@ -2144,10 +2488,11 @@ sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
goto out;
offset = tmpbuf;
mtx_lock(&uma_mtx);
- LIST_FOREACH(z, &uma_zones, uz_link) {
+ LIST_FOREACH(zk, &uma_kegs, uk_link) {
+ LIST_FOREACH(z, &zk->uk_zones, uz_link) {
if (cnt == 0) /* list may have changed size */
break;
- if (!(z->uz_flags & UMA_ZFLAG_INTERNAL)) {
+ if (!(zk->uk_flags & UMA_ZFLAG_INTERNAL)) {
for (cpu = 0; cpu <= mp_maxid; cpu++) {
if (CPU_ABSENT(cpu))
continue;
@@ -2156,7 +2501,7 @@ sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
}
ZONE_LOCK(z);
cachefree = 0;
- if (!(z->uz_flags & UMA_ZFLAG_INTERNAL)) {
+ if (!(zk->uk_flags & UMA_ZFLAG_INTERNAL)) {
for (cpu = 0; cpu <= mp_maxid; cpu++) {
if (CPU_ABSENT(cpu))
continue;
@@ -2171,12 +2516,12 @@ sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
LIST_FOREACH(bucket, &z->uz_full_bucket, ub_link) {
cachefree += bucket->ub_cnt;
}
- totalfree = z->uz_free + cachefree;
+ totalfree = zk->uk_free + cachefree;
len = snprintf(offset, linesize,
"%-12.12s %6.6u, %8.8u, %6.6u, %6.6u, %8.8llu\n",
- z->uz_name, z->uz_size,
- z->uz_maxpages * z->uz_ipers,
- (z->uz_ipers * (z->uz_pages / z->uz_ppera)) - totalfree,
+ z->uz_name, zk->uk_size,
+ zk->uk_maxpages * zk->uk_ipers,
+ (zk->uk_ipers * (zk->uk_pages / zk->uk_ppera)) - totalfree,
totalfree,
(unsigned long long)z->uz_allocs);
ZONE_UNLOCK(z);
@@ -2185,6 +2530,7 @@ sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
p[1] = ':';
cnt--;
offset += len;
+ }
}
mtx_unlock(&uma_mtx);
*offset++ = '\0';
diff --git a/sys/vm/uma_dbg.c b/sys/vm/uma_dbg.c
index 85d067d..0f845cf 100644
--- a/sys/vm/uma_dbg.c
+++ b/sys/vm/uma_dbg.c
@@ -192,15 +192,17 @@ static uma_slab_t
uma_dbg_getslab(uma_zone_t zone, void *item)
{
uma_slab_t slab;
+ uma_keg_t keg;
u_int8_t *mem;
+ keg = zone->uz_keg;
mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
- if (zone->uz_flags & UMA_ZONE_MALLOC) {
+ if (keg->uk_flags & UMA_ZONE_MALLOC) {
slab = vtoslab((vm_offset_t)mem);
- } else if (zone->uz_flags & UMA_ZONE_HASH) {
- slab = hash_sfind(&zone->uz_hash, mem);
+ } else if (keg->uk_flags & UMA_ZONE_HASH) {
+ slab = hash_sfind(&keg->uk_hash, mem);
} else {
- mem += zone->uz_pgoff;
+ mem += keg->uk_pgoff;
slab = (uma_slab_t)mem;
}
@@ -215,8 +217,10 @@ uma_dbg_getslab(uma_zone_t zone, void *item)
void
uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
{
+ uma_keg_t keg;
int freei;
+ keg = zone->uz_keg;
if (slab == NULL) {
slab = uma_dbg_getslab(zone, item);
if (slab == NULL)
@@ -225,9 +229,9 @@ uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
}
freei = ((unsigned long)item - (unsigned long)slab->us_data)
- / zone->uz_rsize;
+ / keg->uk_rsize;
- slab->us_freelist[freei] = 255;
+ slab->us_freelist[freei].us_item = 255;
return;
}
@@ -241,8 +245,10 @@ uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
void
uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
{
+ uma_keg_t keg;
int freei;
+ keg = zone->uz_keg;
if (slab == NULL) {
slab = uma_dbg_getslab(zone, item);
if (slab == NULL)
@@ -251,22 +257,22 @@ uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
}
freei = ((unsigned long)item - (unsigned long)slab->us_data)
- / zone->uz_rsize;
+ / keg->uk_rsize;
- if (freei >= zone->uz_ipers)
+ if (freei >= keg->uk_ipers)
panic("zone: %s(%p) slab %p freelist %d out of range 0-%d\n",
- zone->uz_name, zone, slab, freei, zone->uz_ipers-1);
+ zone->uz_name, zone, slab, freei, keg->uk_ipers-1);
- if (((freei * zone->uz_rsize) + slab->us_data) != item) {
+ if (((freei * keg->uk_rsize) + slab->us_data) != item) {
printf("zone: %s(%p) slab %p freed address %p unaligned.\n",
zone->uz_name, zone, slab, item);
panic("should be %p\n",
- (freei * zone->uz_rsize) + slab->us_data);
+ (freei * keg->uk_rsize) + slab->us_data);
}
- if (slab->us_freelist[freei] != 255) {
+ if (slab->us_freelist[freei].us_item != 255) {
printf("Slab at %p, freei %d = %d.\n",
- slab, freei, slab->us_freelist[freei]);
+ slab, freei, slab->us_freelist[freei].us_item);
panic("Duplicate free of item %p from zone %p(%s)\n",
item, zone, zone->uz_name);
}
@@ -276,5 +282,5 @@ uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
* Until then the count of valid slabs will make sure we don't
* accidentally follow this and assume it's a valid index.
*/
- slab->us_freelist[freei] = 0;
+ slab->us_freelist[freei].us_item = 0;
}
diff --git a/sys/vm/uma_int.h b/sys/vm/uma_int.h
index 35acfde..a4cbe5f 100644
--- a/sys/vm/uma_int.h
+++ b/sys/vm/uma_int.h
@@ -35,10 +35,10 @@
/*
* Here's a quick description of the relationship between the objects:
*
- * Zones contain lists of slabs which are stored in either the full bin, empty
+ * Kegs contain lists of slabs which are stored in either the full bin, empty
* bin, or partially allocated bin, to reduce fragmentation. They also contain
* the user supplied value for size, which is adjusted for alignment purposes
- * and rsize is the result of that. The zone also stores information for
+ * and rsize is the result of that. The Keg also stores information for
* managing a hash of page addresses that maps pages to uma_slab_t structures
* for pages that don't have embedded uma_slab_t's.
*
@@ -67,6 +67,20 @@
* so at this time it may not make sense to optimize for it. This can, of
* course, be solved with dynamic slab sizes.
*
+ * Kegs may serve multiple Zones but by far most of the time they only serve
+ * one. When a Zone is created, a Keg is allocated and setup for it. While
+ * the backing Keg stores slabs, the Zone caches Buckets of items allocated
+ * from the slabs. Each Zone is equipped with an init/fini and ctor/dtor
+ * pair, as well as with its own set of small per-CPU caches, layered above
+ * the Zone's general Bucket cache.
+ *
+ * The PCPU caches are protected by their own locks, while the Zones backed
+ * by the same Keg all share a common Keg lock (to coalesce contention on
+ * the backing slabs). The backing Keg typically only serves one Zone but
+ * in the case of multiple Zones, one of the Zones is considered the
+ * Master Zone and all Zone-related stats from the Keg are done in the
+ * Master Zone. For an example of a Multi-Zone setup, refer to the
+ * Mbuf allocation code.
*/
/*
@@ -134,28 +148,6 @@
SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h), \
(mem))], (s), uma_slab, us_hlink);
-/* Page management structure */
-
-/* Sorry for the union, but space efficiency is important */
-struct uma_slab {
- uma_zone_t us_zone; /* Zone we live in */
- union {
- LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */
- unsigned long _us_size; /* Size of allocation */
- } us_type;
- SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */
- u_int8_t *us_data; /* First item */
- u_int8_t us_flags; /* Page flags see uma.h */
- u_int8_t us_freecount; /* How many are free? */
- u_int8_t us_firstfree; /* First free item index */
- u_int8_t us_freelist[1]; /* Free List (actually larger) */
-};
-
-#define us_link us_type._us_link
-#define us_size us_type._us_size
-
-typedef struct uma_slab * uma_slab_t;
-
/* Hash table for freed address -> slab translation */
SLIST_HEAD(slabhead, uma_slab);
@@ -188,6 +180,97 @@ struct uma_cache {
typedef struct uma_cache * uma_cache_t;
/*
+ * Keg management structure
+ *
+ * TODO: Optimize for cache line size
+ *
+ */
+struct uma_keg {
+ LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */
+
+ struct mtx uk_lock; /* Lock for the keg */
+ struct uma_hash uk_hash;
+
+ LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */
+ LIST_HEAD(,uma_slab) uk_part_slab; /* partially allocated slabs */
+ LIST_HEAD(,uma_slab) uk_free_slab; /* empty slab list */
+ LIST_HEAD(,uma_slab) uk_full_slab; /* full slabs */
+
+ u_int32_t uk_recurse; /* Allocation recursion count */
+ u_int32_t uk_align; /* Alignment mask */
+ u_int32_t uk_pages; /* Total page count */
+ u_int32_t uk_free; /* Count of items free in slabs */
+ u_int32_t uk_size; /* Requested size of each item */
+ u_int32_t uk_rsize; /* Real size of each item */
+ u_int32_t uk_maxpages; /* Maximum number of pages to alloc */
+
+ uma_init uk_init; /* Keg's init routine */
+ uma_fini uk_fini; /* Keg's fini routine */
+ uma_alloc uk_allocf; /* Allocation function */
+ uma_free uk_freef; /* Free routine */
+
+ struct vm_object *uk_obj; /* Zone specific object */
+ vm_offset_t uk_kva; /* Base kva for zones with objs */
+ uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */
+
+ u_int16_t uk_pgoff; /* Offset to uma_slab struct */
+ u_int16_t uk_ppera; /* pages per allocation from backend */
+ u_int16_t uk_ipers; /* Items per slab */
+ u_int16_t uk_flags; /* Internal flags */
+};
+
+/* Simpler reference to uma_keg for internal use. */
+typedef struct uma_keg * uma_keg_t;
+
+/* Page management structure */
+
+/* Sorry for the union, but space efficiency is important */
+struct uma_slab_head {
+ uma_keg_t us_keg; /* Keg we live in */
+ union {
+ LIST_ENTRY(uma_slab) _us_link; /* slabs in zone */
+ unsigned long _us_size; /* Size of allocation */
+ } us_type;
+ SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */
+ u_int8_t *us_data; /* First item */
+ u_int8_t us_flags; /* Page flags see uma.h */
+ u_int8_t us_freecount; /* How many are free? */
+ u_int8_t us_firstfree; /* First free item index */
+};
+
+/* The standard slab structure */
+struct uma_slab {
+ struct uma_slab_head us_head; /* slab header data */
+ struct {
+ u_int8_t us_item;
+ } us_freelist[1]; /* actual number bigger */
+};
+
+/*
+ * The slab structure for UMA_ZONE_REFCNT zones for whose items we
+ * maintain reference counters in the slab for.
+ */
+struct uma_slab_refcnt {
+ struct uma_slab_head us_head; /* slab header data */
+ struct {
+ u_int8_t us_item;
+ u_int32_t us_refcnt;
+ } us_freelist[1]; /* actual number bigger */
+};
+
+#define us_keg us_head.us_keg
+#define us_link us_head.us_type._us_link
+#define us_size us_head.us_type._us_size
+#define us_hlink us_head.us_hlink
+#define us_data us_head.us_data
+#define us_flags us_head.us_flags
+#define us_freecount us_head.us_freecount
+#define us_firstfree us_head.us_firstfree
+
+typedef struct uma_slab * uma_slab_t;
+typedef struct uma_slab_refcnt * uma_slabrefcnt_t;
+
+/*
* Zone management structure
*
* TODO: Optimize for cache line size
@@ -195,42 +278,22 @@ typedef struct uma_cache * uma_cache_t;
*/
struct uma_zone {
char *uz_name; /* Text name of the zone */
- LIST_ENTRY(uma_zone) uz_link; /* List of all zones */
- u_int32_t uz_align; /* Alignment mask */
- u_int32_t uz_pages; /* Total page count */
-
-/* Used during alloc / free */
- struct mtx uz_lock; /* Lock for the zone */
- u_int32_t uz_free; /* Count of items free in slabs */
- u_int16_t uz_ipers; /* Items per slab */
- u_int16_t uz_flags; /* Internal flags */
-
- LIST_HEAD(,uma_slab) uz_part_slab; /* partially allocated slabs */
- LIST_HEAD(,uma_slab) uz_free_slab; /* empty slab list */
- LIST_HEAD(,uma_slab) uz_full_slab; /* full slabs */
+ struct mtx *uz_lock; /* Lock for the zone (keg's lock) */
+ uma_keg_t uz_keg; /* Our underlying Keg */
+
+ LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */
LIST_HEAD(,uma_bucket) uz_full_bucket; /* full buckets */
LIST_HEAD(,uma_bucket) uz_free_bucket; /* Buckets for frees */
- u_int32_t uz_size; /* Requested size of each item */
- u_int32_t uz_rsize; /* Real size of each item */
-
- struct uma_hash uz_hash;
- u_int16_t uz_pgoff; /* Offset to uma_slab struct */
- u_int16_t uz_ppera; /* pages per allocation from backend */
uma_ctor uz_ctor; /* Constructor for each allocation */
uma_dtor uz_dtor; /* Destructor */
- u_int64_t uz_allocs; /* Total number of allocations */
-
uma_init uz_init; /* Initializer for each item */
uma_fini uz_fini; /* Discards memory */
- uma_alloc uz_allocf; /* Allocation function */
- uma_free uz_freef; /* Free routine */
- struct vm_object *uz_obj; /* Zone specific object */
- vm_offset_t uz_kva; /* Base kva for zones with objs */
- u_int32_t uz_maxpages; /* Maximum number of pages to alloc */
- int uz_recurse; /* Allocation recursion count */
+
+ u_int64_t uz_allocs; /* Total number of allocations */
uint16_t uz_fills; /* Outstanding bucket fills */
uint16_t uz_count; /* Highest value ub_ptr can have */
+
/*
* This HAS to be the last item because we adjust the zone size
* based on NCPU and then allocate the space for the zones.
@@ -256,16 +319,16 @@ void uma_large_free(uma_slab_t slab);
#define ZONE_LOCK_INIT(z, lc) \
do { \
if ((lc)) \
- mtx_init(&(z)->uz_lock, (z)->uz_name, \
+ mtx_init((z)->uz_lock, (z)->uz_name, \
(z)->uz_name, MTX_DEF | MTX_DUPOK); \
else \
- mtx_init(&(z)->uz_lock, (z)->uz_name, \
+ mtx_init((z)->uz_lock, (z)->uz_name, \
"UMA zone", MTX_DEF | MTX_DUPOK); \
} while (0)
-#define ZONE_LOCK_FINI(z) mtx_destroy(&(z)->uz_lock)
-#define ZONE_LOCK(z) mtx_lock(&(z)->uz_lock)
-#define ZONE_UNLOCK(z) mtx_unlock(&(z)->uz_lock)
+#define ZONE_LOCK_FINI(z) mtx_destroy((z)->uz_lock)
+#define ZONE_LOCK(z) mtx_lock((z)->uz_lock)
+#define ZONE_UNLOCK(z) mtx_unlock((z)->uz_lock)
#define CPU_LOCK_INIT(cpu) \
mtx_init(&uma_pcpu_mtx[(cpu)], "UMA pcpu", "UMA pcpu", \
diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c
index 3e21a99..f71785f 100644
--- a/sys/vm/vm_kern.c
+++ b/sys/vm/vm_kern.c
@@ -320,16 +320,6 @@ kmem_malloc(map, size, flags)
vm_map_lock(map);
if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
vm_map_unlock(map);
- if (map != kmem_map) {
- static int last_report; /* when we did it (in ticks) */
- if (ticks < last_report ||
- (ticks - last_report) >= hz) {
- last_report = ticks;
- printf("Out of mbuf address space!\n");
- printf("Consider increasing NMBCLUSTERS\n");
- }
- return (0);
- }
if ((flags & M_NOWAIT) == 0)
panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated",
(long)size, (long)map->size);
OpenPOWER on IntegriCloud