summaryrefslogtreecommitdiffstats
path: root/sys/vm/uma_int.h
diff options
context:
space:
mode:
authorglebius <glebius@FreeBSD.org>2013-04-08 19:10:45 +0000
committerglebius <glebius@FreeBSD.org>2013-04-08 19:10:45 +0000
commit7f9db020a246190e72c9b9656997403221c48a3a (patch)
treeaadd654864cd16caea405f5ff2772f2fcd665103 /sys/vm/uma_int.h
parent4312ec3f0dd92a3dedf58a28ebc6f4ae3a1a7672 (diff)
downloadFreeBSD-src-7f9db020a246190e72c9b9656997403221c48a3a.zip
FreeBSD-src-7f9db020a246190e72c9b9656997403221c48a3a.tar.gz
Merge from projects/counters: UMA_ZONE_PCPU zones.
These zones have slab size == sizeof(struct pcpu), but request from VM enough pages to fit (uk_slabsize * mp_ncpus). An item allocated from such zone would have a separate twin for each CPU in the system, and these twins are at a distance of sizeof(struct pcpu) from each other. This magic value of distance would allow us to make some optimizations later. To address private item from a CPU simple arithmetics should be used: item = (type *)((char *)base + sizeof(struct pcpu) * curcpu) These arithmetics are available as zpcpu_get() macro in pcpu.h. To introduce non-page size slabs a new field had been added to uma_keg uk_slabsize. This shifted some frequently used fields of uma_keg to the fourth cache line on amd64. To mitigate this pessimization, uma_keg fields were a bit rearranged and least frequently used uk_name and uk_link moved down to the fourth cache line. All other fields, that are dereferenced frequently fit into first three cache lines. Sponsored by: Nginx, Inc.
Diffstat (limited to 'sys/vm/uma_int.h')
-rw-r--r--sys/vm/uma_int.h12
1 files changed, 7 insertions, 5 deletions
diff --git a/sys/vm/uma_int.h b/sys/vm/uma_int.h
index b8c2be0..ee02a46 100644
--- a/sys/vm/uma_int.h
+++ b/sys/vm/uma_int.h
@@ -120,8 +120,8 @@
#define UMA_BOOT_PAGES 64 /* Pages allocated for startup */
-/* Max waste before going to off page slab management */
-#define UMA_MAX_WASTE (UMA_SLAB_SIZE / 10)
+/* Max waste percentage before going to off page slab management */
+#define UMA_MAX_WASTE 10
/*
* I doubt there will be many cases where this is exceeded. This is the initial
@@ -197,12 +197,9 @@ typedef struct uma_cache * uma_cache_t;
*
*/
struct uma_keg {
- LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */
-
struct mtx uk_lock; /* Lock for the keg */
struct uma_hash uk_hash;
- const char *uk_name; /* Name of creating zone. */
LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */
LIST_HEAD(,uma_slab) uk_part_slab; /* partially allocated slabs */
LIST_HEAD(,uma_slab) uk_free_slab; /* empty slab list */
@@ -225,10 +222,15 @@ struct uma_keg {
vm_offset_t uk_kva; /* Zone base KVA */
uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */
+ u_int16_t uk_slabsize; /* Slab size for this keg */
u_int16_t uk_pgoff; /* Offset to uma_slab struct */
u_int16_t uk_ppera; /* pages per allocation from backend */
u_int16_t uk_ipers; /* Items per slab */
u_int32_t uk_flags; /* Internal flags */
+
+ /* Least used fields go to the last cache line. */
+ const char *uk_name; /* Name of creating zone. */
+ LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */
};
typedef struct uma_keg * uma_keg_t;
OpenPOWER on IntegriCloud