summaryrefslogtreecommitdiffstats
path: root/lib/libmemstat
diff options
context:
space:
mode:
Diffstat (limited to 'lib/libmemstat')
-rw-r--r--lib/libmemstat/Makefile30
-rw-r--r--lib/libmemstat/libmemstat.3496
-rw-r--r--lib/libmemstat/memstat.c421
-rw-r--r--lib/libmemstat/memstat.h174
-rw-r--r--lib/libmemstat/memstat_all.c58
-rw-r--r--lib/libmemstat/memstat_internal.h124
-rw-r--r--lib/libmemstat/memstat_malloc.c409
-rw-r--r--lib/libmemstat/memstat_uma.c465
8 files changed, 2177 insertions, 0 deletions
diff --git a/lib/libmemstat/Makefile b/lib/libmemstat/Makefile
new file mode 100644
index 0000000..6114bf7
--- /dev/null
+++ b/lib/libmemstat/Makefile
@@ -0,0 +1,30 @@
+# $FreeBSD$
+
+WARNS?= 3
+LIB= memstat
+SHLIB_MAJOR= 2
+DPADD= ${LIBKVM}
+LDADD= -lkvm
+SRCS+= memstat.c
+SRCS+= memstat_all.c
+SRCS+= memstat_malloc.c
+SRCS+= memstat_uma.c
+INCS= memstat.h
+
+MAN= libmemstat.3
+
+MLINKS+= libmemstat.3 memstat_mtl_alloc.3
+MLINKS+= libmemstat.3 memstat_mtl_first.3
+MLINKS+= libmemstat.3 memstat_mtl_next.3
+MLINKS+= libmemstat.3 memstat_mtl_find.3
+MLINKS+= libmemstat.3 memstat_mtl_free.3
+MLINKS+= libmemstat.3 memstat_mtl_geterror.3
+MLINKS+= libmemstat.3 memstat_strerror.3
+MLINKS+= libmemstat.3 memstat_sysctl_all.3
+MLINKS+= libmemstat.3 memstat_sysctl_malloc.3
+MLINKS+= libmemstat.3 memstat_sysctl_uma.3
+MLINKS+= libmemstat.3 memstat_kvm_all.3
+MLINKS+= libmemstat.3 memstat_kvm_malloc.3
+MLINKS+= libmemstat.3 memstat_kvm_uma.3
+
+.include <bsd.lib.mk>
diff --git a/lib/libmemstat/libmemstat.3 b/lib/libmemstat/libmemstat.3
new file mode 100644
index 0000000..82ec53a
--- /dev/null
+++ b/lib/libmemstat/libmemstat.3
@@ -0,0 +1,496 @@
+.\" Copyright (c) 2005 Robert N. M. Watson
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd June 27, 2005
+.Os
+.Dt LIBMEMSTAT 3
+.Sh NAME
+.Nm libmemstat
+.Nd "library interface to retrieve kernel memory allocator statistics"
+.Sh LIBRARY
+.Lb libmemstat
+.Sh SYNOPSIS
+.In sys/types.h
+.In memstat.h
+.Ss General Functions
+.Ft "const char *"
+.Fn memstat_strerror "int error"
+.Ss Memory Type List Management Functions
+.Ft "struct memory_type_list *"
+.Fn memstat_mtl_alloc "void"
+.Ft "struct memory_type *"
+.Fn memstat_mtl_first "struct memory_type_list *list"
+.Ft "struct memory_type *"
+.Fn memstat_mtl_next "struct memory_type *mtp"
+.Ft "struct memory_type *"
+.Fo memstat_mtl_find
+.Fa "struct memory_type_list *list" "int allocator" "const char *name"
+.Fc
+.Ft void
+.Fn memstat_mtl_free "struct memory_type_list *list"
+.Ft int
+.Fn memstat_mtl_geterror "struct memory_type_list *list"
+.Ss Allocator Query Functions
+.Ft int
+.Fn memstat_kvm_all "struct memory_type_list *list" "void *kvm_handle"
+.Ft int
+.Fn memstat_kvm_malloc "struct memory_type_list *list" "void *kvm_handle"
+.Ft int
+.Fn memstat_kvm_uma "struct memory_type_list *list" "void *kvm_handle"
+.Ft int
+.Fn memstat_sysctl_all "struct memory_type_list *list" "int flags"
+.Ft int
+.Fn memstat_sysctl_malloc "struct memory_type_list *list" "int flags"
+.Ft int
+.Fn memstat_sysctl_uma "struct memory_type_list *list" "int flags"
+.Ss Memory Type Accessor Methods
+.Ft "const char *"
+.Fn memstat_get_name "const struct memory_type *mtp"
+.Ft int
+.Fn memstat_get_allocator "const struct memory_type *mtp"
+.Ft uint64_t
+.Fn memstat_get_countlimit "const struct memory_type *mtp"
+.Ft uint64_t
+.Fn memstat_get_byteslimit "const struct memory_type *mtp"
+.Ft uint64_t
+.Fn memstat_get_sizemask "const struct memory_type *mtp"
+.Ft uint64_t
+.Fn memstat_get_size "const struct memory_type *mtp"
+.Ft uint64_t
+.Fn memstat_get_memalloced "const struct memory_type *mtp"
+.Ft uint64_t
+.Fn memstat_get_memfreed "const struct memory_type *mtp"
+.Ft uint64_t
+.Fn memstat_get_numallocs "const struct memory_type *mtp"
+.Ft uint64_t
+.Fn memstat_get_numfrees "const struct memory_type *mtp"
+.Ft uint64_t
+.Fn memstat_get_bytes "const struct memory_type *mtp"
+.Ft uint64_t
+.Fn memstat_get_count "const struct memory_type *mtp"
+.Ft uint64_t
+.Fn memstat_get_free "const struct memory_type *mtp"
+.Ft uint64_t
+.Fn memstat_get_failures "const struct memory_type *mtp"
+.Ft "void *"
+.Fn memstat_get_caller_pointer "const struct memory_type *mtp" "int index"
+.Ft void
+.Fo memstat_set_caller_pointer
+.Fa "struct memory_type *mtp" "int index" "void *value"
+.Fc
+.Ft uint64_t
+.Fn memstat_get_caller_uint64 "const struct memory_type *mtp" "int index"
+.Ft void
+.Fo memstat_set_caller_uint64
+.Fa "struct memory_type *mtp" "int index" "uint64_t value"
+.Fc
+.Ft uint64_t
+.Fn memstat_get_zonefree "const struct memory_type *mtp"
+.Ft uint64_t
+.Fn memstat_get_kegfree "const struct memory_type *mtp"
+.Ft uint64_t
+.Fn memstat_get_percpu_memalloced "const struct memory_type *mtp" "int cpu"
+.Ft uint64_t
+.Fn memstat_get_percpu_memfreed "const struct memory_type *mtp" "int cpu"
+.Ft uint64_t
+.Fn memstat_get_percpu_numallocs "const struct memory_type *mtp" "int cpu"
+.Ft uint64_t
+.Fn memstat_get_percpu_numfrees "const struct memory_type *mtp" "int cpu"
+.Ft uint64_t
+.Fn memstat_get_percpu_sizemask "const struct memory_type *mtp" "int cpu"
+.Ft "void *"
+.Fo memstat_get_percpu_caller_pointer
+.Fa "const struct memory_type *mtp" "int cpu" "int index"
+.Fc
+.Ft void
+.Fo memstat_set_percpu_caller_pointer
+.Fa "struct memory_type *mtp" "int cpu" "int index" "void *value"
+.Fc
+.Ft uint64_t
+.Fo memstat_get_percpu_caller_uint64
+.Fa "const struct memory_type *mtp" "int cpu" "int index"
+.Fc
+.Ft void
+.Fo memstat_set_percpu_caller_uint64
+.Fa "struct memory_type *mtp" "int cpu" "int index" "uint64_t value"
+.Fc
+.Ft uint64_t
+.Fn memstat_get_percpu_free "const struct memory_type *mtp" "int cpu"
+.Sh DESCRIPTION
+.Nm
+provides an interface to retrieve kernel memory allocator statistics, for
+the purposes of debugging and system monitoring, insulating applications
+from implementation details of the allocators, and allowing a tool to
+transparently support multiple allocators.
+.Nm
+supports both retrieving a single statistics snapshot, as well as
+incrementally updating statistics for long-term monitoring.
+.Pp
+.Nm
+describes each memory type using a
+.Vt "struct memory_type" ,
+an opaque memory type accessed by the application using accessor functions
+in the library.
+.Nm
+returns and updates chains of
+.Vt "struct memory_type"
+via a
+.Vt "struct memory_type_list" ,
+which will be allocated by calling
+.Fn memstat_mtl_alloc ,
+and freed on completion using
+.Fn memstat_mtl_free .
+Lists of memory types are populated via calls that query the kernel for
+statistics information; currently:
+.Fn memstat_kvm_all ,
+.Fn memstat_kvm_malloc ,
+.Fn memstat_kvm_uma ,
+.Fn memstat_sysctl_all ,
+.Fn memstat_sysctl_uma ,
+and
+.Fn memstat_sysctl_malloc .
+Repeated calls will incrementally update the list of memory types, permitting
+tracking over time without recreating all list state.
+If an error is detected during a query call, error condition information may
+be retrieved using
+.Fn memstat_mtl_geterror ,
+and converted to a user-readable string using
+.Fn memstat_strerror .
+.Pp
+Freeing the list will free all memory type data in the list, and so
+invalidates any outstanding pointers to entries in the list.
+.Vt "struct memory_type"
+entries in the list may be iterated over using
+.Fn memstat_mtl_first
+and
+.Fn memstat_mtl_next ,
+which respectively return the first entry in a list, and the next entry in a
+list.
+.Fn memstat_mtl_find ,
+which will return a pointer to the first entry matching the passed
+parameters.
+.Pp
+A series of accessor methods is provided to access fields of the structure,
+including retrieving statistics and properties, as well as setting of caller
+owned fields.
+Direct application access to the data structure fields is not supported.
+.Ss Library Vt memory_type Ss Fields
+Each
+.Vt "struct memory_type"
+holds a description of the memory type, including its name and the allocator
+it is managed by, as well as current statistics on use.
+Some statistics are directly measured, others are derived from directly
+measured statistics.
+Certain high level statistics are present across all available allocators,
+such as the number of allocation and free operations; other measurements,
+such as the quantity of free items in per-CPU caches, or administrative
+limit on the number of allocations, is available only for specific
+allocators.
+.Ss Caller Vt memory_type Ss Fields
+.Vt "struct memory_type"
+includes fields to allow the application to store data, in the form of
+pointers and 64-bit integers, with memory types.
+For example, the application author might make use of one of the caller
+pointers to reference a more complex data structure tracking long-term
+behavior of the memory type, or a window system object that is used to
+render the state of the memory type.
+General and per-CPU storage is provided with each
+.Vt "struct memory_type"
+in the form of an array of pointers and integers.
+The array entries are accessed via the
+.Fa index
+argument to the get and set accessor methods.
+Possible values of
+.Fa index
+range between
+0
+and
+.Dv MEMSTAT_MAXCALLER .
+.Pp
+Caller-owned fields are initialized to
+0
+or
+.Dv NULL
+when a new
+.Vt "struct memory_type"
+is allocated and attached to a memory type list; these fields retain their
+values across queries that update library-owned fields.
+.Ss Allocator Types
+Currently,
+.Nm
+supports two kernel allocators:
+.Dv ALLOCATOR_UMA
+for
+.Xr uma 9 ,
+and
+.Dv ALLOCATOR_MALLOC
+for
+.Xr malloc 9 .
+These values may be passed to
+.Fn memstat_mtl_find ,
+and will be returned by
+.Fn memstat_get_allocator .
+Two additional constants in the allocator name space are defined:
+.Dv ALLOCATOR_UNKNOWN ,
+which will only be returned as a result of a library error, and
+.Dv ALLOCATOR_ANY ,
+which can be used to specify that returning types matching any allocator is
+permittible from
+.Fn memstat_mtl_find .
+.Ss Access Method List
+The following accessor methods are defined, of which some will be valid for
+a given memory type:
+.Bl -tag -width indent
+.It Fn memstat_get_name
+Return a pointer to the name of the memory type.
+Memory for the name is owned by
+.Nm
+and will be valid through a call to
+.Fn memstat_mtl_free .
+Note that names will be unique with respect to a single allocator, but that
+the same name might be used by different memory types owned by different
+memory allocators.
+.It Fn memstat_get_allocator
+Return an integer identifier for the memory allocator that owns the memory
+type.
+.It Fn memstat_get_countlimit
+If the memory type has an administrative limit on the number of simultaneous
+allocations, return it.
+.It Fn memstat_get_byteslimit
+If the memory type has an administrative limit on the number of bytes of
+memory that may be simultaenously allocated for the memory type, return it.
+.It Fn memstat_get_sizemask
+If the memory type supports variable allocation sizes, return a bitmask of
+sizes allocated for the memory type.
+.It Fn memstat_get_size
+If the memory type supports a fixed allocation size, return that size.
+.It Fn memstat_get_memalloced
+Return the total number of bytes allocated for the memory type over its
+lifetime.
+.It Fn memstat_get_memfreed
+Return the total number of bytes freed for the memory type over its lifetime.
+.It Fn memstat_get_numallocs
+Return the total number of allocations for the memory type over its lifetime.
+.It Fn memstat_get_numfrees
+Return the total number of frees for the memory type over its lifetime.
+.It Fn memstat_get_bytes
+Return the current number of bytes allocated to the memory type.
+.It Fn memstat_get_count
+Return the current number of allocations for the memory type.
+.It Fn memstat_get_free
+If the memory allocator supports a cache, return the number of items in the
+cache.
+.It Fn memstat_get_failures
+If the memory allocator and type permit allocation failures, return the
+number of allocation failures measured.
+.It Fn memstat_get_caller_pointer
+Return a caller-owned pointer for the memory type.
+.It Fn memstat_set_caller_pointer
+Set a caller-owned pointer for the memory type.
+.It Fn memstat_get_caller_uint64
+Return a caller-owned integer for the memory type.
+.It Fn memstat_set_caller_uint64
+Set a caller-owned integer for the memory type.
+.It Fn memstat_get_zonefree
+If the memory allocator supports a multi-level allocation structure, return
+the number of cached items in the zone.
+These items will be in a fully constructed state available for immediate
+use.
+.It Fn memstat_get_kegfree
+If the memory allocator supports a multi-level allocation structure, return
+the number of cached items in the keg.
+These items may be in a partially constructed state, and may require further
+processing before they can be made available for use.
+.It Fn memstat_get_percpu_memalloced
+If the memory allocator supports per-CPU statistics, return the number of
+bytes of memory allocated for the memory type on the CPU over its lifetime.
+.It Fn memstat_get_percpu_memfreed
+If the memory allocator supports per-CPU statistics, return the number of
+bytes of memory freed from the memory type on the CPU over its lifetime.
+.It Fn memstat_get_percpu_numallocs
+If the memory allocator supports per-CPU statistics, return the number of
+allocations for the memory type on the CPU over its lifetime.
+.It Fn memstat_get_percpu_numfrees
+If the memory allocator supports per-CPU statistics, return the number of
+frees for the memory type on the CPU over its lifetime.
+.It Fn memstat_get_percpu_sizemask
+If the memory allocator supports variable size memory allocation and per-CPU
+statistics, return the size bitmask for the memory type on the CPU.
+.It Fn memstat_get_percpu_caller_pointer
+Return a caller-owned per-CPU pointer for the memory type.
+.It Fn memstat_set_percpu_caller_pointer
+Set a caller-owned per-CPU pointer for the memory type.
+.It Fn memstat_get_percpu_caller_uint64
+Return a caller-owned per-CPU integer for the memory type.
+.It Fn memsttat_set_percpu_caller_uint64
+Set a caller-owned per-CPU integer for the memory type.
+.It Fn memstat_get_percpu_free
+If the memory allocator supports a per-CPU cache, return the number of free
+items in the per-CPU cache of the designated CPU.
+.El
+.Sh RETURN VALUES
+.Nm
+functions fall into three categories: functions returning a pointer to an
+object, functions returning an integer return value, and functions
+implementing accessor methods returning data from a
+.Vt "struct memory_type" .
+.Pp
+Functions returning a pointer to an object will generally return
+.Dv NULL
+on failure.
+.Fn memstat_mtl_alloc
+will return an error value via
+.Va errno ,
+which will consist of the value
+.Er ENOMEM .
+Functions
+.Fn memstat_mtl_first ,
+.Fn memstat_mtl_next ,
+and
+.Fn memstat_mtl_find
+will return
+.Dv NULL
+when there is no entry or match in the list; however, this is not considered
+a failure mode and no error value is available.
+.Pp
+Functions returning an integer success value will return
+0
+on success, or
+\-1
+on failure.
+If a failure is returned, the list error access method,
+.Fn memstat_mtl_geterror ,
+may be used to retrieve the error state.
+The string representation of the error may be retrieved using
+.Fn memstat_strerror .
+Possible error values are:
+.Bl -tag -width ".Dv MEMSTAT_ERROR_KVM_SHORTREAD"
+.It Dv MEMSTAT_ERROR_UNDEFINED
+Undefined error.
+Occurs if
+.Fn memstat_mtl_geterror
+is called on a list before an error associated with the list has occurred.
+.It Dv MEMSTAT_ERROR_NOMEMORY
+Insufficient memory.
+Occurs if library calls to
+.Xr malloc 3
+fail, or if a system call to retrieve kernel statistics fails with
+.Er ENOMEM .
+.It Dv MEMSTAT_ERROR_VERSION
+Returned if the current version of
+.Nm
+is unable to interpret the statistics data returned by the kernel due to an
+explicit version mismatch, or to differences in data structures that cannot
+be reconciled.
+.It Dv MEMSTAT_ERROR_PERMISSION
+Returned if a statistics source returns
+.Va errno
+values of
+.Er EACCES
+or
+.Er EPERM .
+.It Dv MEMSTAT_ERROR_TOOMANYCPUS
+Returned if the compile-time limit on the number of CPUs in
+.Nm
+is lower than the number of CPUs returned by a statistics data source.
+.It Dv MEMSTAT_ERROR_DATAERROR
+Returned if
+.Nm
+is unable to interpret statistics data returned by the data source, even
+though there does not appear to be a version problem.
+.It Dv MEMSTAT_ERROR_KVM
+Returned if
+.Nm
+experiences an error while using
+.Xr kvm 3
+interfaces to query statistics data.
+Use
+.Xr kvm_geterr 3
+to retrieve the error.
+.It Dv MEMSTAT_ERROR_KVM_NOSYMBOL
+Returned if
+.Nm
+is unable to read a required symbol from the kernel being operated on.
+.It Dv MEMSTAT_ERROR_KVM_SHORTREAD
+Returned if
+.Nm
+attempts to read data from a live memory image or kernel core dump and
+insufficient data is returned.
+.El
+.Pp
+Finally, functions returning data from a
+.Vt "struct memory_type"
+pointer are not permitted to fail, and directly return either a statistic
+or pointer to a string.
+.Sh EXAMPLES
+Create a memory type list, query the
+.Xr uma 9
+memory allocator for available statistics, and print out the number of
+allocations performed by the
+.Dv mbuf
+zone.
+.Bd -literal -offset indent
+struct memory_type_list *mtlp;
+struct memory_type *mtp;
+uint64_t mbuf_count;
+
+mtlp = memstat_mtl_alloc();
+if (mtlp == NULL)
+ err(-1, "memstat_mtl_alloc");
+if (memstat_sysctl_uma(mtlp, 0) < 0)
+ err(-1, "memstat_sysctl_uma");
+mtp = memstat_mtl_find(mtlp, ALLOCATOR_UMA, "mbuf");
+if (mtp == NULL)
+ errx(-1, "memstat_mtl_find: mbuf not found");
+mbuf_count = memstat_get_count(mtp);
+memstat_mtl_free(mtlp);
+
+printf("mbufs: %llu\en", (unsigned long long)mbuf_count);
+.Ed
+.Sh SEE ALSO
+.Xr malloc 9 ,
+.Xr uma 9
+.Sh HISTORY
+The
+.Nm
+library appeared in
+.Fx 6.0 .
+.Sh AUTHORS
+The kernel memory allocator changes necessary to support a general purpose
+monitoring library, along with the library, were written by
+.An Robert Watson Aq rwatson@FreeBSD.org .
+.Sh BUGS
+There are memory allocators in the kernel, such as the VM page allocator
+and
+.Nm sf_buf
+allocator, which are not currently supported by
+.Nm .
+.Pp
+Once a memory type is present on a memory type list, it will not be removed
+even if the kernel no longer presents information on the type via its
+monitoring interfaces.
+In order to flush removed memory types, it is necessary to free the entire
+list and allocate a new one.
diff --git a/lib/libmemstat/memstat.c b/lib/libmemstat/memstat.c
new file mode 100644
index 0000000..28e4813
--- /dev/null
+++ b/lib/libmemstat/memstat.c
@@ -0,0 +1,421 @@
+/*-
+ * Copyright (c) 2005 Robert N. M. Watson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+#include <sys/sysctl.h>
+
+#include <err.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "memstat.h"
+#include "memstat_internal.h"
+
+const char *
+memstat_strerror(int error)
+{
+
+ switch (error) {
+ case MEMSTAT_ERROR_NOMEMORY:
+ return ("Cannot allocate memory");
+ case MEMSTAT_ERROR_VERSION:
+ return ("Version mismatch");
+ case MEMSTAT_ERROR_PERMISSION:
+ return ("Permission denied");
+ case MEMSTAT_ERROR_TOOMANYCPUS:
+ return ("Too many CPUs");
+ case MEMSTAT_ERROR_DATAERROR:
+ return ("Data format error");
+ case MEMSTAT_ERROR_KVM:
+ return ("KVM error");
+ case MEMSTAT_ERROR_KVM_NOSYMBOL:
+ return ("KVM unable to find symbol");
+ case MEMSTAT_ERROR_KVM_SHORTREAD:
+ return ("KVM short read");
+ case MEMSTAT_ERROR_UNDEFINED:
+ default:
+ return ("Unknown error");
+ }
+}
+
+struct memory_type_list *
+memstat_mtl_alloc(void)
+{
+ struct memory_type_list *mtlp;
+
+ mtlp = malloc(sizeof(*mtlp));
+ if (mtlp == NULL)
+ return (NULL);
+
+ LIST_INIT(&mtlp->mtl_list);
+ mtlp->mtl_error = MEMSTAT_ERROR_UNDEFINED;
+ return (mtlp);
+}
+
+struct memory_type *
+memstat_mtl_first(struct memory_type_list *list)
+{
+
+ return (LIST_FIRST(&list->mtl_list));
+}
+
+struct memory_type *
+memstat_mtl_next(struct memory_type *mtp)
+{
+
+ return (LIST_NEXT(mtp, mt_list));
+}
+
+void
+_memstat_mtl_empty(struct memory_type_list *list)
+{
+ struct memory_type *mtp;
+
+ while ((mtp = LIST_FIRST(&list->mtl_list))) {
+ LIST_REMOVE(mtp, mt_list);
+ free(mtp);
+ }
+}
+
+void
+memstat_mtl_free(struct memory_type_list *list)
+{
+
+ _memstat_mtl_empty(list);
+ free(list);
+}
+
+int
+memstat_mtl_geterror(struct memory_type_list *list)
+{
+
+ return (list->mtl_error);
+}
+
+/*
+ * Look for an existing memory_type entry in a memory_type list, based on the
+ * allocator and name of the type. If not found, return NULL. No errno or
+ * memstat error.
+ */
+struct memory_type *
+memstat_mtl_find(struct memory_type_list *list, int allocator,
+ const char *name)
+{
+ struct memory_type *mtp;
+
+ LIST_FOREACH(mtp, &list->mtl_list, mt_list) {
+ if ((mtp->mt_allocator == allocator ||
+ allocator == ALLOCATOR_ANY) &&
+ strcmp(mtp->mt_name, name) == 0)
+ return (mtp);
+ }
+ return (NULL);
+}
+
+/*
+ * Allocate a new memory_type with the specificed allocator type and name,
+ * then insert into the list. The structure will be zero'd.
+ *
+ * libmemstat(3) internal function.
+ */
+struct memory_type *
+_memstat_mt_allocate(struct memory_type_list *list, int allocator,
+ const char *name)
+{
+ struct memory_type *mtp;
+
+ mtp = malloc(sizeof(*mtp));
+ if (mtp == NULL)
+ return (NULL);
+
+ bzero(mtp, sizeof(*mtp));
+
+ mtp->mt_allocator = allocator;
+ strlcpy(mtp->mt_name, name, MEMTYPE_MAXNAME);
+ LIST_INSERT_HEAD(&list->mtl_list, mtp, mt_list);
+ return (mtp);
+}
+
+/*
+ * Reset any libmemstat(3)-owned statistics in a memory_type record so that
+ * it can be reused without incremental addition problems. Caller-owned
+ * memory is left "as-is", and must be updated by the caller if desired.
+ *
+ * libmemstat(3) internal function.
+ */
+void
+_memstat_mt_reset_stats(struct memory_type *mtp)
+{
+ int i;
+
+ mtp->mt_countlimit = 0;
+ mtp->mt_byteslimit = 0;
+ mtp->mt_sizemask = 0;
+ mtp->mt_size = 0;
+
+ mtp->mt_memalloced = 0;
+ mtp->mt_memfreed = 0;
+ mtp->mt_numallocs = 0;
+ mtp->mt_numfrees = 0;
+ mtp->mt_bytes = 0;
+ mtp->mt_count = 0;
+ mtp->mt_free = 0;
+ mtp->mt_failures = 0;
+
+ mtp->mt_zonefree = 0;
+ mtp->mt_kegfree = 0;
+
+ for (i = 0; i < MEMSTAT_MAXCPU; i++) {
+ mtp->mt_percpu_alloc[i].mtp_memalloced = 0;
+ mtp->mt_percpu_alloc[i].mtp_memfreed = 0;
+ mtp->mt_percpu_alloc[i].mtp_numallocs = 0;
+ mtp->mt_percpu_alloc[i].mtp_numfrees = 0;
+ mtp->mt_percpu_alloc[i].mtp_sizemask = 0;
+ mtp->mt_percpu_cache[i].mtp_free = 0;
+ }
+}
+
+/*
+ * Accessor methods for struct memory_type. Avoids encoding the structure
+ * ABI into the application.
+ */
+const char *
+memstat_get_name(const struct memory_type *mtp)
+{
+
+ return (mtp->mt_name);
+}
+
+int
+memstat_get_allocator(const struct memory_type *mtp)
+{
+
+ return (mtp->mt_allocator);
+}
+
+uint64_t
+memstat_get_countlimit(const struct memory_type *mtp)
+{
+
+ return (mtp->mt_countlimit);
+}
+
+uint64_t
+memstat_get_byteslimit(const struct memory_type *mtp)
+{
+
+ return (mtp->mt_byteslimit);
+}
+
+uint64_t
+memstat_get_sizemask(const struct memory_type *mtp)
+{
+
+ return (mtp->mt_sizemask);
+}
+
+uint64_t
+memstat_get_size(const struct memory_type *mtp)
+{
+
+ return (mtp->mt_size);
+}
+
+uint64_t
+memstat_get_memalloced(const struct memory_type *mtp)
+{
+
+ return (mtp->mt_memalloced);
+}
+
+uint64_t
+memstat_get_memfreed(const struct memory_type *mtp)
+{
+
+ return (mtp->mt_memfreed);
+}
+
+uint64_t
+memstat_get_numallocs(const struct memory_type *mtp)
+{
+
+ return (mtp->mt_numallocs);
+}
+
+uint64_t
+memstat_get_numfrees(const struct memory_type *mtp)
+{
+
+ return (mtp->mt_numfrees);
+}
+
+uint64_t
+memstat_get_bytes(const struct memory_type *mtp)
+{
+
+ return (mtp->mt_bytes);
+}
+
+uint64_t
+memstat_get_count(const struct memory_type *mtp)
+{
+
+ return (mtp->mt_count);
+}
+
+uint64_t
+memstat_get_free(const struct memory_type *mtp)
+{
+
+ return (mtp->mt_free);
+}
+
+uint64_t
+memstat_get_failures(const struct memory_type *mtp)
+{
+
+ return (mtp->mt_failures);
+}
+
+void *
+memstat_get_caller_pointer(const struct memory_type *mtp, int index)
+{
+
+ return (mtp->mt_caller_pointer[index]);
+}
+
+void
+memstat_set_caller_pointer(struct memory_type *mtp, int index, void *value)
+{
+
+ mtp->mt_caller_pointer[index] = value;
+}
+
+uint64_t
+memstat_get_caller_uint64(const struct memory_type *mtp, int index)
+{
+
+ return (mtp->mt_caller_uint64[index]);
+}
+
+void
+memstat_set_caller_uint64(struct memory_type *mtp, int index, uint64_t value)
+{
+
+ mtp->mt_caller_uint64[index] = value;
+}
+
+uint64_t
+memstat_get_zonefree(const struct memory_type *mtp)
+{
+
+ return (mtp->mt_zonefree);
+}
+
+uint64_t
+memstat_get_kegfree(const struct memory_type *mtp)
+{
+
+ return (mtp->mt_kegfree);
+}
+
+uint64_t
+memstat_get_percpu_memalloced(const struct memory_type *mtp, int cpu)
+{
+
+ return (mtp->mt_percpu_alloc[cpu].mtp_memalloced);
+}
+
+uint64_t
+memstat_get_percpu_memfreed(const struct memory_type *mtp, int cpu)
+{
+
+ return (mtp->mt_percpu_alloc[cpu].mtp_memfreed);
+}
+
+uint64_t
+memstat_get_percpu_numallocs(const struct memory_type *mtp, int cpu)
+{
+
+ return (mtp->mt_percpu_alloc[cpu].mtp_numallocs);
+}
+
+uint64_t
+memstat_get_percpu_numfrees(const struct memory_type *mtp, int cpu)
+{
+
+ return (mtp->mt_percpu_alloc[cpu].mtp_numfrees);
+}
+
+uint64_t
+memstat_get_percpu_sizemask(const struct memory_type *mtp, int cpu)
+{
+
+ return (mtp->mt_percpu_alloc[cpu].mtp_sizemask);
+}
+
+void *
+memstat_get_percpu_caller_pointer(const struct memory_type *mtp, int cpu,
+ int index)
+{
+
+ return (mtp->mt_percpu_alloc[cpu].mtp_caller_pointer[index]);
+}
+
+void
+memstat_set_percpu_caller_pointer(struct memory_type *mtp, int cpu,
+ int index, void *value)
+{
+
+ mtp->mt_percpu_alloc[cpu].mtp_caller_pointer[index] = value;
+}
+
+uint64_t
+memstat_get_percpu_caller_uint64(const struct memory_type *mtp, int cpu,
+ int index)
+{
+
+ return (mtp->mt_percpu_alloc[cpu].mtp_caller_uint64[index]);
+}
+
+void
+memstat_set_percpu_caller_uint64(struct memory_type *mtp, int cpu, int index,
+ uint64_t value)
+{
+
+ mtp->mt_percpu_alloc[cpu].mtp_caller_uint64[index] = value;
+}
+
+uint64_t
+memstat_get_percpu_free(const struct memory_type *mtp, int cpu)
+{
+
+ return (mtp->mt_percpu_cache[cpu].mtp_free);
+}
diff --git a/lib/libmemstat/memstat.h b/lib/libmemstat/memstat.h
new file mode 100644
index 0000000..aaa85702
--- /dev/null
+++ b/lib/libmemstat/memstat.h
@@ -0,0 +1,174 @@
+/*-
+ * Copyright (c) 2005 Robert N. M. Watson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MEMSTAT_H_
+#define _MEMSTAT_H_
+
+/*
+ * Number of CPU slots in library-internal data structures. This should be
+ * at least the value of MAXCPU from param.h.
+ */
+#define MEMSTAT_MAXCPU 32
+
+/*
+ * Amount of caller data to maintain for each caller data slot. Applications
+ * must not request more than this number of caller save data, or risk
+ * corrupting internal libmemstat(3) data structures. A compile time check
+ * in the application is probably appropriate.
+ */
+#define MEMSTAT_MAXCALLER 16
+
+/*
+ * libmemstat(3) is able to extract memory data from different allocators;
+ * when it does so, it tags which allocator it got the data from so that
+ * consumers can determine which fields are usable, as data returned varies
+ * some.
+ */
+#define ALLOCATOR_UNKNOWN 0
+#define ALLOCATOR_MALLOC 1
+#define ALLOCATOR_UMA 2
+#define ALLOCATOR_ANY 255
+
+/*
+ * Library maximum type name. Should be max(set of name maximums over
+ * various allocators).
+ */
+#define MEMTYPE_MAXNAME 32
+
+/*
+ * Library error conditions, mostly from the underlying data sources. On
+ * failure, functions typically return (-1) or (NULL); on success, (0) or a
+ * valid data pointer. The error from the last operation is stored in
+ * struct memory_type_list, and accessed via memstat_get_error(list).
+ */
+#define MEMSTAT_ERROR_UNDEFINED 0 /* Initialization value. */
+#define MEMSTAT_ERROR_NOMEMORY 1 /* Out of memory. */
+#define MEMSTAT_ERROR_VERSION 2 /* Unsupported version. */
+#define MEMSTAT_ERROR_PERMISSION 3 /* Permission denied. */
+#define MEMSTAT_ERROR_TOOMANYCPUS 4 /* Too many CPUs. */
+#define MEMSTAT_ERROR_DATAERROR 5 /* Error in stat data. */
+#define MEMSTAT_ERROR_KVM 6 /* See kvm_geterr() for err. */
+#define MEMSTAT_ERROR_KVM_NOSYMBOL 7 /* Symbol not available. */
+#define MEMSTAT_ERROR_KVM_SHORTREAD 8 /* Short kvm_read return. */
+
+/*
+ * Forward declare struct memory_type, which holds per-type properties and
+ * statistics. This is an opaque type, to be frobbed only from within the
+ * library, in order to avoid building ABI assumptions into the application.
+ * Accessor methods should be used to get and sometimes set the fields from
+ * consumers of the library.
+ */
+struct memory_type;
+
+/*
+ * struct memory_type_list is the head of a list of memory types and
+ * statistics.
+ */
+struct memory_type_list;
+
+__BEGIN_DECLS
+/*
+ * Functions that operate without memory type or memory type list context.
+ */
+const char *memstat_strerror(int error);
+
+/*
+ * Functions for managing memory type and statistics data.
+ */
+struct memory_type_list *memstat_mtl_alloc(void);
+struct memory_type *memstat_mtl_first(struct memory_type_list *list);
+struct memory_type *memstat_mtl_next(struct memory_type *mtp);
+struct memory_type *memstat_mtl_find(struct memory_type_list *list,
+ int allocator, const char *name);
+void memstat_mtl_free(struct memory_type_list *list);
+int memstat_mtl_geterror(struct memory_type_list *list);
+
+/*
+ * Functions to retrieve data from a live kernel using sysctl.
+ */
+int memstat_sysctl_all(struct memory_type_list *list, int flags);
+int memstat_sysctl_malloc(struct memory_type_list *list, int flags);
+int memstat_sysctl_uma(struct memory_type_list *list, int flags);
+
+/*
+ * Functions to retrieve data from a kernel core (or /dev/kmem).
+ */
+int memstat_kvm_all(struct memory_type_list *list, void *kvm_handle);
+int memstat_kvm_malloc(struct memory_type_list *list, void *kvm_handle);
+int memstat_kvm_uma(struct memory_type_list *list, void *kvm_handle);
+
+/*
+ * Accessor methods for struct memory_type.
+ */
+const char *memstat_get_name(const struct memory_type *mtp);
+int memstat_get_allocator(const struct memory_type *mtp);
+uint64_t memstat_get_countlimit(const struct memory_type *mtp);
+uint64_t memstat_get_byteslimit(const struct memory_type *mtp);
+uint64_t memstat_get_sizemask(const struct memory_type *mtp);
+uint64_t memstat_get_size(const struct memory_type *mtp);
+uint64_t memstat_get_memalloced(const struct memory_type *mtp);
+uint64_t memstat_get_memfreed(const struct memory_type *mtp);
+uint64_t memstat_get_numallocs(const struct memory_type *mtp);
+uint64_t memstat_get_numfrees(const struct memory_type *mtp);
+uint64_t memstat_get_bytes(const struct memory_type *mtp);
+uint64_t memstat_get_count(const struct memory_type *mtp);
+uint64_t memstat_get_free(const struct memory_type *mtp);
+uint64_t memstat_get_failures(const struct memory_type *mtp);
+void *memstat_get_caller_pointer(const struct memory_type *mtp,
+ int index);
+void memstat_set_caller_pointer(struct memory_type *mtp,
+ int index, void *value);
+uint64_t memstat_get_caller_uint64(const struct memory_type *mtp,
+ int index);
+void memstat_set_caller_uint64(struct memory_type *mtp, int index,
+ uint64_t value);
+uint64_t memstat_get_zonefree(const struct memory_type *mtp);
+uint64_t memstat_get_kegfree(const struct memory_type *mtp);
+uint64_t memstat_get_percpu_memalloced(const struct memory_type *mtp,
+ int cpu);
+uint64_t memstat_get_percpu_memfreed(const struct memory_type *mtp,
+ int cpu);
+uint64_t memstat_get_percpu_numallocs(const struct memory_type *mtp,
+ int cpu);
+uint64_t memstat_get_percpu_numfrees(const struct memory_type *mtp,
+ int cpu);
+uint64_t memstat_get_percpu_sizemask(const struct memory_type *mtp,
+ int cpu);
+void *memstat_get_percpu_caller_pointer(
+ const struct memory_type *mtp, int cpu, int index);
+void memstat_set_percpu_caller_pointer(struct memory_type *mtp,
+ int cpu, int index, void *value);
+uint64_t memstat_get_percpu_caller_uint64(
+ const struct memory_type *mtp, int cpu, int index);
+void memstat_set_percpu_caller_uint64(struct memory_type *mtp,
+ int cpu, int index, uint64_t value);
+uint64_t memstat_get_percpu_free(const struct memory_type *mtp,
+ int cpu);
+__END_DECLS
+
+#endif /* !_MEMSTAT_H_ */
diff --git a/lib/libmemstat/memstat_all.c b/lib/libmemstat/memstat_all.c
new file mode 100644
index 0000000..bd74b8a
--- /dev/null
+++ b/lib/libmemstat/memstat_all.c
@@ -0,0 +1,58 @@
+/*-
+ * Copyright (c) 2005 Robert N. M. Watson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/types.h>
+#include <sys/queue.h>
+
+#include "memstat.h"
+
+/*
+ * Query all available memory allocator sources. Currently this consists of
+ * malloc(9) and UMA(9).
+ */
+int
+memstat_sysctl_all(struct memory_type_list *mtlp, int flags)
+{
+
+ if (memstat_sysctl_malloc(mtlp, flags) < 0)
+ return (-1);
+ if (memstat_sysctl_uma(mtlp, flags) < 0)
+ return (-1);
+ return (0);
+}
+
+int
+memstat_kvm_all(struct memory_type_list *mtlp, void *kvm_handle)
+{
+
+ if (memstat_kvm_malloc(mtlp, kvm_handle) < 0)
+ return (-1);
+ if (memstat_kvm_uma(mtlp, kvm_handle) < 0)
+ return (-1);
+ return (0);
+}
diff --git a/lib/libmemstat/memstat_internal.h b/lib/libmemstat/memstat_internal.h
new file mode 100644
index 0000000..7123518
--- /dev/null
+++ b/lib/libmemstat/memstat_internal.h
@@ -0,0 +1,124 @@
+/*-
+ * Copyright (c) 2005 Robert N. M. Watson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MEMSTAT_INTERNAL_H_
+#define _MEMSTAT_INTERNAL_H_
+
+/*
+ * memstat maintains its own internal notion of statistics on each memory
+ * type, common across UMA and kernel malloc. Some fields are straight from
+ * the allocator statistics, others are derived when extracted from the
+ * kernel. A struct memory_type will describe each type supported by an
+ * allocator. memory_type structures can be chained into lists.
+ */
+struct memory_type {
+ /*
+ * Static properties of type.
+ */
+ int mt_allocator; /* malloc(9), uma(9), etc. */
+ char mt_name[MEMTYPE_MAXNAME]; /* name of memory type. */
+
+ /*
+ * (Relatively) static zone settings, that don't uniquely identify
+ * the zone, but also don't change much.
+ */
+ uint64_t mt_countlimit; /* 0, or maximum allocations. */
+ uint64_t mt_byteslimit; /* 0, or maximum bytes. */
+ uint64_t mt_sizemask; /* malloc: allocated size bitmask. */
+ uint64_t mt_size; /* uma: size of objects. */
+
+ /*
+ * Zone or type information that includes all caches and any central
+ * zone state. Depending on the allocator, this may be synthesized
+ * from several sources, or directly measured.
+ */
+ uint64_t mt_memalloced; /* Bytes allocated over life time. */
+ uint64_t mt_memfreed; /* Bytes freed over life time. */
+ uint64_t mt_numallocs; /* Allocations over life time. */
+ uint64_t mt_numfrees; /* Frees over life time. */
+ uint64_t mt_bytes; /* Bytes currently allocated. */
+ uint64_t mt_count; /* Number of current allocations. */
+ uint64_t mt_free; /* Number of cached free items. */
+ uint64_t mt_failures; /* Number of allocation failures. */
+
+ /*
+ * Caller-owned memory.
+ */
+ void *mt_caller_pointer[MEMSTAT_MAXCALLER]; /* Pointers. */
+ uint64_t mt_caller_uint64[MEMSTAT_MAXCALLER]; /* Integers. */
+
+ /*
+ * For allocators making use of per-CPU caches, we also provide raw
+ * statistics from the central allocator and each per-CPU cache,
+ * which (combined) sometimes make up the above general statistics.
+ *
+ * First, central zone/type state, all numbers excluding any items
+ * cached in per-CPU caches.
+ *
+ * XXXRW: Might be desirable to separately expose allocation stats
+ * from zone, which should (combined with per-cpu) add up to the
+ * global stats above.
+ */
+ uint64_t mt_zonefree; /* Free items in zone. */
+ uint64_t mt_kegfree; /* Free items in keg. */
+
+ /*
+ * Per-CPU measurements fall into two categories: per-CPU allocation,
+ * and per-CPU cache state.
+ */
+ struct {
+ uint64_t mtp_memalloced;/* Per-CPU mt_memalloced. */
+ uint64_t mtp_memfreed; /* Per-CPU mt_memfreed. */
+ uint64_t mtp_numallocs; /* Per-CPU mt_numallocs. */
+ uint64_t mtp_numfrees; /* Per-CPU mt_numfrees. */
+ uint64_t mtp_sizemask; /* Per-CPU mt_sizemask. */
+ void *mtp_caller_pointer[MEMSTAT_MAXCALLER];
+ uint64_t mtp_caller_uint64[MEMSTAT_MAXCALLER];
+ } mt_percpu_alloc[MEMSTAT_MAXCPU];
+
+ struct {
+ uint64_t mtp_free; /* Per-CPU cache free items. */
+ } mt_percpu_cache[MEMSTAT_MAXCPU];
+
+ LIST_ENTRY(memory_type) mt_list; /* List of types. */
+};
+
+/*
+ * Description of struct memory_type_list is in memstat.h.
+ */
+struct memory_type_list {
+ LIST_HEAD(, memory_type) mtl_list;
+ int mtl_error;
+};
+
+void _memstat_mtl_empty(struct memory_type_list *list);
+struct memory_type *_memstat_mt_allocate(struct memory_type_list *list,
+ int allocator, const char *name);
+void _memstat_mt_reset_stats(struct memory_type *mtp);
+
+#endif /* !_MEMSTAT_INTERNAL_H_ */
diff --git a/lib/libmemstat/memstat_malloc.c b/lib/libmemstat/memstat_malloc.c
new file mode 100644
index 0000000..70320f5
--- /dev/null
+++ b/lib/libmemstat/memstat_malloc.c
@@ -0,0 +1,409 @@
+/*-
+ * Copyright (c) 2005 Robert N. M. Watson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/cdefs.h>
+#include <sys/param.h>
+#include <sys/malloc.h>
+#include <sys/sysctl.h>
+
+#include <err.h>
+#include <errno.h>
+#include <kvm.h>
+#include <nlist.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "memstat.h"
+#include "memstat_internal.h"
+
+static struct nlist namelist[] = {
+#define X_KMEMSTATISTICS 0
+ { .n_name = "_kmemstatistics" },
+#define X_MP_MAXCPUS 1
+ { .n_name = "_mp_maxcpus" },
+ { .n_name = "" },
+};
+
+/*
+ * Extract malloc(9) statistics from the running kernel, and store all memory
+ * type information in the passed list. For each type, check the list for an
+ * existing entry with the right name/allocator -- if present, update that
+ * entry. Otherwise, add a new entry. On error, the entire list will be
+ * cleared, as entries will be in an inconsistent state.
+ *
+ * To reduce the level of work for a list that starts empty, we keep around a
+ * hint as to whether it was empty when we began, so we can avoid searching
+ * the list for entries to update. Updates are O(n^2) due to searching for
+ * each entry before adding it.
+ */
+int
+memstat_sysctl_malloc(struct memory_type_list *list, int flags)
+{
+ struct malloc_type_stream_header *mtshp;
+ struct malloc_type_header *mthp;
+ struct malloc_type_stats *mtsp;
+ struct memory_type *mtp;
+ int count, hint_dontsearch, i, j, maxcpus;
+ char *buffer, *p;
+ size_t size;
+
+ hint_dontsearch = LIST_EMPTY(&list->mtl_list);
+
+ /*
+ * Query the number of CPUs, number of malloc types so that we can
+ * guess an initial buffer size. We loop until we succeed or really
+ * fail. Note that the value of maxcpus we query using sysctl is not
+ * the version we use when processing the real data -- that is read
+ * from the header.
+ */
+retry:
+ size = sizeof(maxcpus);
+ if (sysctlbyname("kern.smp.maxcpus", &maxcpus, &size, NULL, 0) < 0) {
+ if (errno == EACCES || errno == EPERM)
+ list->mtl_error = MEMSTAT_ERROR_PERMISSION;
+ else
+ list->mtl_error = MEMSTAT_ERROR_DATAERROR;
+ return (-1);
+ }
+ if (size != sizeof(maxcpus)) {
+ list->mtl_error = MEMSTAT_ERROR_DATAERROR;
+ return (-1);
+ }
+
+ if (maxcpus > MEMSTAT_MAXCPU) {
+ list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS;
+ return (-1);
+ }
+
+ size = sizeof(count);
+ if (sysctlbyname("kern.malloc_count", &count, &size, NULL, 0) < 0) {
+ if (errno == EACCES || errno == EPERM)
+ list->mtl_error = MEMSTAT_ERROR_PERMISSION;
+ else
+ list->mtl_error = MEMSTAT_ERROR_VERSION;
+ return (-1);
+ }
+ if (size != sizeof(count)) {
+ list->mtl_error = MEMSTAT_ERROR_DATAERROR;
+ return (-1);
+ }
+
+ size = sizeof(*mthp) + count * (sizeof(*mthp) + sizeof(*mtsp) *
+ maxcpus);
+
+ buffer = malloc(size);
+ if (buffer == NULL) {
+ list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
+ return (-1);
+ }
+
+ if (sysctlbyname("kern.malloc_stats", buffer, &size, NULL, 0) < 0) {
+ /*
+ * XXXRW: ENOMEM is an ambiguous return, we should bound the
+ * number of loops, perhaps.
+ */
+ if (errno == ENOMEM) {
+ free(buffer);
+ goto retry;
+ }
+ if (errno == EACCES || errno == EPERM)
+ list->mtl_error = MEMSTAT_ERROR_PERMISSION;
+ else
+ list->mtl_error = MEMSTAT_ERROR_VERSION;
+ free(buffer);
+ return (-1);
+ }
+
+ if (size == 0) {
+ free(buffer);
+ return (0);
+ }
+
+ if (size < sizeof(*mtshp)) {
+ list->mtl_error = MEMSTAT_ERROR_VERSION;
+ free(buffer);
+ return (-1);
+ }
+ p = buffer;
+ mtshp = (struct malloc_type_stream_header *)p;
+ p += sizeof(*mtshp);
+
+ if (mtshp->mtsh_version != MALLOC_TYPE_STREAM_VERSION) {
+ list->mtl_error = MEMSTAT_ERROR_VERSION;
+ free(buffer);
+ return (-1);
+ }
+
+ if (mtshp->mtsh_maxcpus > MEMSTAT_MAXCPU) {
+ list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS;
+ free(buffer);
+ return (-1);
+ }
+
+ /*
+ * For the remainder of this function, we are quite trusting about
+ * the layout of structures and sizes, since we've determined we have
+ * a matching version and acceptable CPU count.
+ */
+ maxcpus = mtshp->mtsh_maxcpus;
+ count = mtshp->mtsh_count;
+ for (i = 0; i < count; i++) {
+ mthp = (struct malloc_type_header *)p;
+ p += sizeof(*mthp);
+
+ if (hint_dontsearch == 0) {
+ mtp = memstat_mtl_find(list, ALLOCATOR_MALLOC,
+ mthp->mth_name);
+ } else
+ mtp = NULL;
+ if (mtp == NULL)
+ mtp = _memstat_mt_allocate(list, ALLOCATOR_MALLOC,
+ mthp->mth_name);
+ if (mtp == NULL) {
+ _memstat_mtl_empty(list);
+ free(buffer);
+ list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
+ return (-1);
+ }
+
+ /*
+ * Reset the statistics on a current node.
+ */
+ _memstat_mt_reset_stats(mtp);
+
+ for (j = 0; j < maxcpus; j++) {
+ mtsp = (struct malloc_type_stats *)p;
+ p += sizeof(*mtsp);
+
+ /*
+ * Sumarize raw statistics across CPUs into coalesced
+ * statistics.
+ */
+ mtp->mt_memalloced += mtsp->mts_memalloced;
+ mtp->mt_memfreed += mtsp->mts_memfreed;
+ mtp->mt_numallocs += mtsp->mts_numallocs;
+ mtp->mt_numfrees += mtsp->mts_numfrees;
+ mtp->mt_sizemask |= mtsp->mts_size;
+
+ /*
+ * Copies of per-CPU statistics.
+ */
+ mtp->mt_percpu_alloc[j].mtp_memalloced =
+ mtsp->mts_memalloced;
+ mtp->mt_percpu_alloc[j].mtp_memfreed =
+ mtsp->mts_memfreed;
+ mtp->mt_percpu_alloc[j].mtp_numallocs =
+ mtsp->mts_numallocs;
+ mtp->mt_percpu_alloc[j].mtp_numfrees =
+ mtsp->mts_numfrees;
+ mtp->mt_percpu_alloc[j].mtp_sizemask =
+ mtsp->mts_size;
+ }
+
+ /*
+ * Derived cross-CPU statistics.
+ */
+ mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed;
+ mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
+ }
+
+ free(buffer);
+
+ return (0);
+}
+
+static int
+kread(kvm_t *kvm, void *kvm_pointer, void *address, size_t size,
+ size_t offset)
+{
+ ssize_t ret;
+
+ ret = kvm_read(kvm, (unsigned long)kvm_pointer + offset, address,
+ size);
+ if (ret < 0)
+ return (MEMSTAT_ERROR_KVM);
+ if ((size_t)ret != size)
+ return (MEMSTAT_ERROR_KVM_SHORTREAD);
+ return (0);
+}
+
+static int
+kread_string(kvm_t *kvm, const void *kvm_pointer, char *buffer, int buflen)
+{
+ ssize_t ret;
+ int i;
+
+ for (i = 0; i < buflen; i++) {
+ ret = kvm_read(kvm, __DECONST(unsigned long, kvm_pointer) +
+ i, &(buffer[i]), sizeof(char));
+ if (ret < 0)
+ return (MEMSTAT_ERROR_KVM);
+ if ((size_t)ret != sizeof(char))
+ return (MEMSTAT_ERROR_KVM_SHORTREAD);
+ if (buffer[i] == '\0')
+ return (0);
+ }
+ /* Truncate. */
+ buffer[i-1] = '\0';
+ return (0);
+}
+
+static int
+kread_symbol(kvm_t *kvm, int index, void *address, size_t size,
+ size_t offset)
+{
+ ssize_t ret;
+
+ ret = kvm_read(kvm, namelist[index].n_value + offset, address, size);
+ if (ret < 0)
+ return (MEMSTAT_ERROR_KVM);
+ if ((size_t)ret != size)
+ return (MEMSTAT_ERROR_KVM_SHORTREAD);
+ return (0);
+}
+
+int
+memstat_kvm_malloc(struct memory_type_list *list, void *kvm_handle)
+{
+ struct memory_type *mtp;
+ void *kmemstatistics;
+ int hint_dontsearch, j, mp_maxcpus, ret;
+ char name[MEMTYPE_MAXNAME];
+ struct malloc_type_stats mts[MEMSTAT_MAXCPU], *mtsp;
+ struct malloc_type type, *typep;
+ kvm_t *kvm;
+
+ kvm = (kvm_t *)kvm_handle;
+
+ hint_dontsearch = LIST_EMPTY(&list->mtl_list);
+
+ if (kvm_nlist(kvm, namelist) != 0) {
+ list->mtl_error = MEMSTAT_ERROR_KVM;
+ return (-1);
+ }
+
+ if (namelist[X_KMEMSTATISTICS].n_type == 0 ||
+ namelist[X_KMEMSTATISTICS].n_value == 0) {
+ list->mtl_error = MEMSTAT_ERROR_KVM_NOSYMBOL;
+ return (-1);
+ }
+
+ ret = kread_symbol(kvm, X_MP_MAXCPUS, &mp_maxcpus,
+ sizeof(mp_maxcpus), 0);
+ if (ret != 0) {
+ list->mtl_error = ret;
+ return (-1);
+ }
+
+ if (mp_maxcpus > MEMSTAT_MAXCPU) {
+ list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS;
+ return (-1);
+ }
+
+ ret = kread_symbol(kvm, X_KMEMSTATISTICS, &kmemstatistics,
+ sizeof(kmemstatistics), 0);
+ if (ret != 0) {
+ list->mtl_error = ret;
+ return (-1);
+ }
+
+ for (typep = kmemstatistics; typep != NULL; typep = type.ks_next) {
+ ret = kread(kvm, typep, &type, sizeof(type), 0);
+ if (ret != 0) {
+ _memstat_mtl_empty(list);
+ list->mtl_error = ret;
+ return (-1);
+ }
+ ret = kread_string(kvm, (void *)type.ks_shortdesc, name,
+ MEMTYPE_MAXNAME);
+ if (ret != 0) {
+ _memstat_mtl_empty(list);
+ list->mtl_error = ret;
+ return (-1);
+ }
+
+ /*
+ * Take advantage of explicit knowledge that
+ * malloc_type_internal is simply an array of statistics
+ * structures of number MAXCPU. Since our compile-time
+ * value for MAXCPU may differ from the kernel's, we
+ * populate our own array.
+ */
+ ret = kread(kvm, type.ks_handle, mts, mp_maxcpus *
+ sizeof(struct malloc_type_stats), 0);
+ if (ret != 0) {
+ _memstat_mtl_empty(list);
+ list->mtl_error = ret;
+ return (-1);
+ }
+
+ if (hint_dontsearch == 0) {
+ mtp = memstat_mtl_find(list, ALLOCATOR_MALLOC, name);
+ } else
+ mtp = NULL;
+ if (mtp == NULL)
+ mtp = _memstat_mt_allocate(list, ALLOCATOR_MALLOC,
+ name);
+ if (mtp == NULL) {
+ _memstat_mtl_empty(list);
+ list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
+ return (-1);
+ }
+
+ /*
+ * This logic is replicated from kern_malloc.c, and should
+ * be kept in sync.
+ */
+ _memstat_mt_reset_stats(mtp);
+ for (j = 0; j < mp_maxcpus; j++) {
+ mtsp = &mts[j];
+ mtp->mt_memalloced += mtsp->mts_memalloced;
+ mtp->mt_memfreed += mtsp->mts_memfreed;
+ mtp->mt_numallocs += mtsp->mts_numallocs;
+ mtp->mt_numfrees += mtsp->mts_numfrees;
+ mtp->mt_sizemask |= mtsp->mts_size;
+
+ mtp->mt_percpu_alloc[j].mtp_memalloced =
+ mtsp->mts_memalloced;
+ mtp->mt_percpu_alloc[j].mtp_memfreed =
+ mtsp->mts_memfreed;
+ mtp->mt_percpu_alloc[j].mtp_numallocs =
+ mtsp->mts_numallocs;
+ mtp->mt_percpu_alloc[j].mtp_numfrees =
+ mtsp->mts_numfrees;
+ mtp->mt_percpu_alloc[j].mtp_sizemask =
+ mtsp->mts_size;
+ }
+
+ mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed;
+ mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
+ }
+
+ return (0);
+}
diff --git a/lib/libmemstat/memstat_uma.c b/lib/libmemstat/memstat_uma.c
new file mode 100644
index 0000000..b24721f
--- /dev/null
+++ b/lib/libmemstat/memstat_uma.c
@@ -0,0 +1,465 @@
+/*-
+ * Copyright (c) 2005-2006 Robert N. M. Watson
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include <sys/param.h>
+#include <sys/sysctl.h>
+
+#define LIBMEMSTAT /* Cause vm_page.h not to include opt_vmpage.h */
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+
+#include <vm/uma.h>
+#include <vm/uma_int.h>
+
+#include <err.h>
+#include <errno.h>
+#include <kvm.h>
+#include <nlist.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "memstat.h"
+#include "memstat_internal.h"
+
+static struct nlist namelist[] = {
+#define X_UMA_KEGS 0
+ { .n_name = "_uma_kegs" },
+#define X_MP_MAXID 1
+ { .n_name = "_mp_maxid" },
+#define X_ALL_CPUS 2
+ { .n_name = "_all_cpus" },
+ { .n_name = "" },
+};
+
+/*
+ * Extract uma(9) statistics from the running kernel, and store all memory
+ * type information in the passed list. For each type, check the list for an
+ * existing entry with the right name/allocator -- if present, update that
+ * entry. Otherwise, add a new entry. On error, the entire list will be
+ * cleared, as entries will be in an inconsistent state.
+ *
+ * To reduce the level of work for a list that starts empty, we keep around a
+ * hint as to whether it was empty when we began, so we can avoid searching
+ * the list for entries to update. Updates are O(n^2) due to searching for
+ * each entry before adding it.
+ */
+int
+memstat_sysctl_uma(struct memory_type_list *list, int flags)
+{
+ struct uma_stream_header *ushp;
+ struct uma_type_header *uthp;
+ struct uma_percpu_stat *upsp;
+ struct memory_type *mtp;
+ int count, hint_dontsearch, i, j, maxcpus;
+ char *buffer, *p;
+ size_t size;
+
+ hint_dontsearch = LIST_EMPTY(&list->mtl_list);
+
+ /*
+ * Query the number of CPUs, number of malloc types so that we can
+ * guess an initial buffer size. We loop until we succeed or really
+ * fail. Note that the value of maxcpus we query using sysctl is not
+ * the version we use when processing the real data -- that is read
+ * from the header.
+ */
+retry:
+ size = sizeof(maxcpus);
+ if (sysctlbyname("kern.smp.maxcpus", &maxcpus, &size, NULL, 0) < 0) {
+ if (errno == EACCES || errno == EPERM)
+ list->mtl_error = MEMSTAT_ERROR_PERMISSION;
+ else
+ list->mtl_error = MEMSTAT_ERROR_DATAERROR;
+ return (-1);
+ }
+ if (size != sizeof(maxcpus)) {
+ list->mtl_error = MEMSTAT_ERROR_DATAERROR;
+ return (-1);
+ }
+
+ if (maxcpus > MEMSTAT_MAXCPU) {
+ list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS;
+ return (-1);
+ }
+
+ size = sizeof(count);
+ if (sysctlbyname("vm.zone_count", &count, &size, NULL, 0) < 0) {
+ if (errno == EACCES || errno == EPERM)
+ list->mtl_error = MEMSTAT_ERROR_PERMISSION;
+ else
+ list->mtl_error = MEMSTAT_ERROR_VERSION;
+ return (-1);
+ }
+ if (size != sizeof(count)) {
+ list->mtl_error = MEMSTAT_ERROR_DATAERROR;
+ return (-1);
+ }
+
+ size = sizeof(*uthp) + count * (sizeof(*uthp) + sizeof(*upsp) *
+ maxcpus);
+
+ buffer = malloc(size);
+ if (buffer == NULL) {
+ list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
+ return (-1);
+ }
+
+ if (sysctlbyname("vm.zone_stats", buffer, &size, NULL, 0) < 0) {
+ /*
+ * XXXRW: ENOMEM is an ambiguous return, we should bound the
+ * number of loops, perhaps.
+ */
+ if (errno == ENOMEM) {
+ free(buffer);
+ goto retry;
+ }
+ if (errno == EACCES || errno == EPERM)
+ list->mtl_error = MEMSTAT_ERROR_PERMISSION;
+ else
+ list->mtl_error = MEMSTAT_ERROR_VERSION;
+ free(buffer);
+ return (-1);
+ }
+
+ if (size == 0) {
+ free(buffer);
+ return (0);
+ }
+
+ if (size < sizeof(*ushp)) {
+ list->mtl_error = MEMSTAT_ERROR_VERSION;
+ free(buffer);
+ return (-1);
+ }
+ p = buffer;
+ ushp = (struct uma_stream_header *)p;
+ p += sizeof(*ushp);
+
+ if (ushp->ush_version != UMA_STREAM_VERSION) {
+ list->mtl_error = MEMSTAT_ERROR_VERSION;
+ free(buffer);
+ return (-1);
+ }
+
+ if (ushp->ush_maxcpus > MEMSTAT_MAXCPU) {
+ list->mtl_error = MEMSTAT_ERROR_TOOMANYCPUS;
+ free(buffer);
+ return (-1);
+ }
+
+ /*
+ * For the remainder of this function, we are quite trusting about
+ * the layout of structures and sizes, since we've determined we have
+ * a matching version and acceptable CPU count.
+ */
+ maxcpus = ushp->ush_maxcpus;
+ count = ushp->ush_count;
+ for (i = 0; i < count; i++) {
+ uthp = (struct uma_type_header *)p;
+ p += sizeof(*uthp);
+
+ if (hint_dontsearch == 0) {
+ mtp = memstat_mtl_find(list, ALLOCATOR_UMA,
+ uthp->uth_name);
+ } else
+ mtp = NULL;
+ if (mtp == NULL)
+ mtp = _memstat_mt_allocate(list, ALLOCATOR_UMA,
+ uthp->uth_name);
+ if (mtp == NULL) {
+ _memstat_mtl_empty(list);
+ free(buffer);
+ list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
+ return (-1);
+ }
+
+ /*
+ * Reset the statistics on a current node.
+ */
+ _memstat_mt_reset_stats(mtp);
+
+ mtp->mt_numallocs = uthp->uth_allocs;
+ mtp->mt_numfrees = uthp->uth_frees;
+ mtp->mt_failures = uthp->uth_fails;
+
+ for (j = 0; j < maxcpus; j++) {
+ upsp = (struct uma_percpu_stat *)p;
+ p += sizeof(*upsp);
+
+ mtp->mt_percpu_cache[j].mtp_free =
+ upsp->ups_cache_free;
+ mtp->mt_free += upsp->ups_cache_free;
+ mtp->mt_numallocs += upsp->ups_allocs;
+ mtp->mt_numfrees += upsp->ups_frees;
+ }
+
+ mtp->mt_size = uthp->uth_size;
+ mtp->mt_memalloced = mtp->mt_numallocs * uthp->uth_size;
+ mtp->mt_memfreed = mtp->mt_numfrees * uthp->uth_size;
+ mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed;
+ mtp->mt_countlimit = uthp->uth_limit;
+ mtp->mt_byteslimit = uthp->uth_limit * uthp->uth_size;
+
+ mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
+ mtp->mt_zonefree = uthp->uth_zone_free;
+
+ /*
+ * UMA secondary zones share a keg with the primary zone. To
+ * avoid double-reporting of free items, report keg free
+ * items only in the primary zone.
+ */
+ if (!(uthp->uth_zone_flags & UTH_ZONE_SECONDARY)) {
+ mtp->mt_kegfree = uthp->uth_keg_free;
+ mtp->mt_free += mtp->mt_kegfree;
+ }
+ mtp->mt_free += mtp->mt_zonefree;
+ }
+
+ free(buffer);
+
+ return (0);
+}
+
+static int
+kread(kvm_t *kvm, void *kvm_pointer, void *address, size_t size,
+ size_t offset)
+{
+ ssize_t ret;
+
+ ret = kvm_read(kvm, (unsigned long)kvm_pointer + offset, address,
+ size);
+ if (ret < 0)
+ return (MEMSTAT_ERROR_KVM);
+ if ((size_t)ret != size)
+ return (MEMSTAT_ERROR_KVM_SHORTREAD);
+ return (0);
+}
+
+static int
+kread_string(kvm_t *kvm, void *kvm_pointer, char *buffer, int buflen)
+{
+ ssize_t ret;
+ int i;
+
+ for (i = 0; i < buflen; i++) {
+ ret = kvm_read(kvm, (unsigned long)kvm_pointer + i,
+ &(buffer[i]), sizeof(char));
+ if (ret < 0)
+ return (MEMSTAT_ERROR_KVM);
+ if ((size_t)ret != sizeof(char))
+ return (MEMSTAT_ERROR_KVM_SHORTREAD);
+ if (buffer[i] == '\0')
+ return (0);
+ }
+ /* Truncate. */
+ buffer[i-1] = '\0';
+ return (0);
+}
+
+static int
+kread_symbol(kvm_t *kvm, int index, void *address, size_t size,
+ size_t offset)
+{
+ ssize_t ret;
+
+ ret = kvm_read(kvm, namelist[index].n_value + offset, address, size);
+ if (ret < 0)
+ return (MEMSTAT_ERROR_KVM);
+ if ((size_t)ret != size)
+ return (MEMSTAT_ERROR_KVM_SHORTREAD);
+ return (0);
+}
+
+/*
+ * memstat_kvm_uma() is similar to memstat_sysctl_uma(), only it extracts
+ * UMA(9) statistics from a kernel core/memory file.
+ */
+int
+memstat_kvm_uma(struct memory_type_list *list, void *kvm_handle)
+{
+ LIST_HEAD(, uma_keg) uma_kegs;
+ struct memory_type *mtp;
+ struct uma_bucket *ubp, ub;
+ struct uma_cache *ucp, *ucp_array;
+ struct uma_zone *uzp, uz;
+ struct uma_keg *kzp, kz;
+ int hint_dontsearch, i, mp_maxid, ret;
+ char name[MEMTYPE_MAXNAME];
+ __cpumask_t all_cpus;
+ kvm_t *kvm;
+
+ kvm = (kvm_t *)kvm_handle;
+ hint_dontsearch = LIST_EMPTY(&list->mtl_list);
+ if (kvm_nlist(kvm, namelist) != 0) {
+ list->mtl_error = MEMSTAT_ERROR_KVM;
+ return (-1);
+ }
+ if (namelist[X_UMA_KEGS].n_type == 0 ||
+ namelist[X_UMA_KEGS].n_value == 0) {
+ list->mtl_error = MEMSTAT_ERROR_KVM_NOSYMBOL;
+ return (-1);
+ }
+ ret = kread_symbol(kvm, X_MP_MAXID, &mp_maxid, sizeof(mp_maxid), 0);
+ if (ret != 0) {
+ list->mtl_error = ret;
+ return (-1);
+ }
+ ret = kread_symbol(kvm, X_UMA_KEGS, &uma_kegs, sizeof(uma_kegs), 0);
+ if (ret != 0) {
+ list->mtl_error = ret;
+ return (-1);
+ }
+ ret = kread_symbol(kvm, X_ALL_CPUS, &all_cpus, sizeof(all_cpus), 0);
+ if (ret != 0) {
+ list->mtl_error = ret;
+ return (-1);
+ }
+ ucp_array = malloc(sizeof(struct uma_cache) * (mp_maxid + 1));
+ if (ucp_array == NULL) {
+ list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
+ return (-1);
+ }
+ for (kzp = LIST_FIRST(&uma_kegs); kzp != NULL; kzp =
+ LIST_NEXT(&kz, uk_link)) {
+ ret = kread(kvm, kzp, &kz, sizeof(kz), 0);
+ if (ret != 0) {
+ free(ucp_array);
+ _memstat_mtl_empty(list);
+ list->mtl_error = ret;
+ return (-1);
+ }
+ for (uzp = LIST_FIRST(&kz.uk_zones); uzp != NULL; uzp =
+ LIST_NEXT(&uz, uz_link)) {
+ ret = kread(kvm, uzp, &uz, sizeof(uz), 0);
+ if (ret != 0) {
+ free(ucp_array);
+ _memstat_mtl_empty(list);
+ list->mtl_error = ret;
+ return (-1);
+ }
+ ret = kread(kvm, uzp, ucp_array,
+ sizeof(struct uma_cache) * (mp_maxid + 1),
+ offsetof(struct uma_zone, uz_cpu[0]));
+ if (ret != 0) {
+ free(ucp_array);
+ _memstat_mtl_empty(list);
+ list->mtl_error = ret;
+ return (-1);
+ }
+ ret = kread_string(kvm, uz.uz_name, name,
+ MEMTYPE_MAXNAME);
+ if (ret != 0) {
+ free(ucp_array);
+ _memstat_mtl_empty(list);
+ list->mtl_error = ret;
+ return (-1);
+ }
+ if (hint_dontsearch == 0) {
+ mtp = memstat_mtl_find(list, ALLOCATOR_UMA,
+ name);
+ } else
+ mtp = NULL;
+ if (mtp == NULL)
+ mtp = _memstat_mt_allocate(list, ALLOCATOR_UMA,
+ name);
+ if (mtp == NULL) {
+ free(ucp_array);
+ _memstat_mtl_empty(list);
+ list->mtl_error = MEMSTAT_ERROR_NOMEMORY;
+ return (-1);
+ }
+ /*
+ * Reset the statistics on a current node.
+ */
+ _memstat_mt_reset_stats(mtp);
+ mtp->mt_numallocs = uz.uz_allocs;
+ mtp->mt_numfrees = uz.uz_frees;
+ mtp->mt_failures = uz.uz_fails;
+ if (kz.uk_flags & UMA_ZFLAG_INTERNAL)
+ goto skip_percpu;
+ for (i = 0; i < mp_maxid + 1; i++) {
+ if ((all_cpus & (1 << i)) == 0)
+ continue;
+ ucp = &ucp_array[i];
+ mtp->mt_numallocs += ucp->uc_allocs;
+ mtp->mt_numfrees += ucp->uc_frees;
+
+ if (ucp->uc_allocbucket != NULL) {
+ ret = kread(kvm, ucp->uc_allocbucket,
+ &ub, sizeof(ub), 0);
+ if (ret != 0) {
+ free(ucp_array);
+ _memstat_mtl_empty(list);
+ list->mtl_error = ret;
+ return (-1);
+ }
+ mtp->mt_free += ub.ub_cnt;
+ }
+ if (ucp->uc_freebucket != NULL) {
+ ret = kread(kvm, ucp->uc_freebucket,
+ &ub, sizeof(ub), 0);
+ if (ret != 0) {
+ free(ucp_array);
+ _memstat_mtl_empty(list);
+ list->mtl_error = ret;
+ return (-1);
+ }
+ mtp->mt_free += ub.ub_cnt;
+ }
+ }
+skip_percpu:
+ mtp->mt_size = kz.uk_size;
+ mtp->mt_memalloced = mtp->mt_numallocs * mtp->mt_size;
+ mtp->mt_memfreed = mtp->mt_numfrees * mtp->mt_size;
+ mtp->mt_bytes = mtp->mt_memalloced - mtp->mt_memfreed;
+ if (kz.uk_ppera > 1)
+ mtp->mt_countlimit = kz.uk_maxpages /
+ kz.uk_ipers;
+ else
+ mtp->mt_countlimit = kz.uk_maxpages *
+ kz.uk_ipers;
+ mtp->mt_byteslimit = mtp->mt_countlimit * mtp->mt_size;
+ mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
+ for (ubp = LIST_FIRST(&uz.uz_full_bucket); ubp !=
+ NULL; ubp = LIST_NEXT(&ub, ub_link)) {
+ ret = kread(kvm, ubp, &ub, sizeof(ub), 0);
+ mtp->mt_zonefree += ub.ub_cnt;
+ }
+ if (!((kz.uk_flags & UMA_ZONE_SECONDARY) &&
+ LIST_FIRST(&kz.uk_zones) != uzp)) {
+ mtp->mt_kegfree = kz.uk_free;
+ mtp->mt_free += mtp->mt_kegfree;
+ }
+ mtp->mt_free += mtp->mt_zonefree;
+ }
+ }
+ free(ucp_array);
+ return (0);
+}
OpenPOWER on IntegriCloud