summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorbmilekic <bmilekic@FreeBSD.org>2001-09-30 01:58:39 +0000
committerbmilekic <bmilekic@FreeBSD.org>2001-09-30 01:58:39 +0000
commit5b4fe25981cd9b92f1fba2da8c4ce7be3a7a37e1 (patch)
treef7d4110dfccc8085ec2e36a342d82da2c9a55ffe
parentc605847554205a074c69591402a21d9edb6d06a7 (diff)
downloadFreeBSD-src-5b4fe25981cd9b92f1fba2da8c4ce7be3a7a37e1.zip
FreeBSD-src-5b4fe25981cd9b92f1fba2da8c4ce7be3a7a37e1.tar.gz
Re-enable mbtypes statistics in the mbuf allocator. I disabled these
when I changed the allocator bits. This implements per-CPU mbtypes stats by keeping net number of decrements/increments of a given mbtype per-CPU and then summing all of the per-CPU mbtypes to produce the total net number of allocated mbufs of the given mbtype. Counters are carefully balanced to avoid/prevent underflows/overflows. mbtypes stats are re-enabled with the idea that we may occasionally (although very rarely) observe slight inconsistencies in the stat reporting. Most of the time, we should be fine, though. Also make appropriate modifications to netstat(1) and systat(1) to do the necessary reporting. Submitted by: Jiangyi Liu <jyliu@163.net>
-rw-r--r--sys/kern/subr_mbuf.c84
-rw-r--r--sys/sys/mbuf.h17
-rw-r--r--usr.bin/netstat/mbuf.c76
-rw-r--r--usr.bin/systat/mbufs.c63
4 files changed, 144 insertions, 96 deletions
diff --git a/sys/kern/subr_mbuf.c b/sys/kern/subr_mbuf.c
index 65095e0..c4e35d0 100644
--- a/sys/kern/subr_mbuf.c
+++ b/sys/kern/subr_mbuf.c
@@ -104,6 +104,7 @@ struct mb_container {
struct mtx *mc_lock;
int mc_numowner;
u_int mc_starved;
+ long *mc_types;
u_long *mc_objcount;
u_long *mc_numpgs;
};
@@ -234,6 +235,14 @@ struct mtx mbuf_gen, mbuf_pcpu[NCPU];
(mb_bckt)->mb_numfree++; \
(*((mb_lst)->mb_cont.mc_objcount))++;
+#define MB_MBTYPES_INC(mb_cnt, mb_type, mb_num) \
+ if ((mb_type) != MT_NOTMBUF) \
+ (*((mb_cnt)->mb_cont.mc_types + (mb_type))) += (mb_num)
+
+#define MB_MBTYPES_DEC(mb_cnt, mb_type, mb_num) \
+ if ((mb_type) != MT_NOTMBUF) \
+ (*((mb_cnt)->mb_cont.mc_types + (mb_type))) -= (mb_num)
+
/*
* Ownership of buckets/containers is represented by integers. The PCPU
* lists range from 0 to NCPU-1. We need a free numerical id for the general
@@ -276,9 +285,9 @@ SYSCTL_OPAQUE(_kern_ipc, OID_AUTO, mb_statpcpu, CTLFLAG_RD, mb_statpcpu,
/*
* Prototypes of local allocator routines.
*/
-static __inline void *mb_alloc(struct mb_lstmngr *, int);
-void *mb_alloc_wait(struct mb_lstmngr *);
-static __inline void mb_free(struct mb_lstmngr *, void *);
+static __inline void *mb_alloc(struct mb_lstmngr *, int, short);
+void *mb_alloc_wait(struct mb_lstmngr *, short);
+static __inline void mb_free(struct mb_lstmngr *, void *, short);
static void mbuf_init(void *);
struct mb_bucket *mb_pop_cont(struct mb_lstmngr *, int,
struct mb_pcpu_list *);
@@ -379,6 +388,9 @@ mbuf_init(void *dummy)
&(mb_statpcpu[MB_GENLIST_OWNER].mb_mbpgs);
mb_list_clust.ml_genlist->mb_cont.mc_numpgs =
&(mb_statpcpu[MB_GENLIST_OWNER].mb_clpgs);
+ mb_list_mbuf.ml_genlist->mb_cont.mc_types =
+ &(mb_statpcpu[MB_GENLIST_OWNER].mb_mbtypes[0]);
+ mb_list_clust.ml_genlist->mb_cont.mc_types = NULL;
SLIST_INIT(&(mb_list_mbuf.ml_genlist->mb_cont.mc_bhead));
SLIST_INIT(&(mb_list_clust.ml_genlist->mb_cont.mc_bhead));
@@ -390,6 +402,7 @@ mbuf_init(void *dummy)
mbstat.m_minclsize = MINCLSIZE;
mbstat.m_mlen = MLEN;
mbstat.m_mhlen = MHLEN;
+ mbstat.m_numtypes = MT_NTYPES;
/*
* Allocate and initialize PCPU containers.
@@ -423,6 +436,9 @@ mbuf_init(void *dummy)
&(mb_statpcpu[i].mb_mbpgs);
mb_list_clust.ml_cntlst[i]->mb_cont.mc_numpgs =
&(mb_statpcpu[i].mb_clpgs);
+ mb_list_mbuf.ml_cntlst[i]->mb_cont.mc_types =
+ &(mb_statpcpu[i].mb_mbtypes[0]);
+ mb_list_clust.ml_cntlst[i]->mb_cont.mc_types = NULL;
SLIST_INIT(&(mb_list_mbuf.ml_cntlst[i]->mb_cont.mc_bhead));
SLIST_INIT(&(mb_list_clust.ml_cntlst[i]->mb_cont.mc_bhead));
@@ -520,7 +536,7 @@ mb_pop_cont(struct mb_lstmngr *mb_list, int how, struct mb_pcpu_list *cnt_lst)
*/
static __inline
void *
-mb_alloc(struct mb_lstmngr *mb_list, int how)
+mb_alloc(struct mb_lstmngr *mb_list, int how, short type)
{
struct mb_pcpu_list *cnt_lst;
struct mb_bucket *bucket;
@@ -538,6 +554,7 @@ mb_alloc(struct mb_lstmngr *mb_list, int how)
* from the container.
*/
MB_GET_OBJECT(m, bucket, cnt_lst);
+ MB_MBTYPES_INC(cnt_lst, type, 1);
MB_UNLOCK_CONT(cnt_lst);
} else {
struct mb_gen_list *gen_list;
@@ -580,6 +597,7 @@ mb_alloc(struct mb_lstmngr *mb_list, int how)
bucket->mb_numfree;
}
MB_UNLOCK_CONT(gen_list);
+ MB_MBTYPES_INC(cnt_lst, type, 1);
MB_UNLOCK_CONT(cnt_lst);
} else {
/*
@@ -591,6 +609,7 @@ mb_alloc(struct mb_lstmngr *mb_list, int how)
bucket->mb_numfree--;
m = bucket->mb_free[(bucket->mb_numfree)];
(*(cnt_lst->mb_cont.mc_objcount))--;
+ MB_MBTYPES_INC(cnt_lst, type, 1);
MB_UNLOCK_CONT(cnt_lst);
} else {
if (how == M_TRYWAIT) {
@@ -600,7 +619,7 @@ mb_alloc(struct mb_lstmngr *mb_list, int how)
* steal from other lists.
*/
mb_list->ml_mapfull = 1;
- m = mb_alloc_wait(mb_list);
+ m = mb_alloc_wait(mb_list, type);
} else
/* XXX: No consistency. */
mbstat.m_drops++;
@@ -619,7 +638,7 @@ mb_alloc(struct mb_lstmngr *mb_list, int how)
* starved cv.
*/
void *
-mb_alloc_wait(struct mb_lstmngr *mb_list)
+mb_alloc_wait(struct mb_lstmngr *mb_list, short type)
{
struct mb_pcpu_list *cnt_lst;
struct mb_gen_list *gen_list;
@@ -649,6 +668,7 @@ mb_alloc_wait(struct mb_lstmngr *mb_list)
if ((bucket = SLIST_FIRST(&(cnt_lst->mb_cont.mc_bhead))) !=
NULL) {
MB_GET_OBJECT(m, bucket, cnt_lst);
+ MB_MBTYPES_INC(cnt_lst, type, 1);
MB_UNLOCK_CONT(cnt_lst);
mbstat.m_wait++; /* XXX: No consistency. */
return (m);
@@ -667,6 +687,7 @@ mb_alloc_wait(struct mb_lstmngr *mb_list)
MB_LOCK_CONT(gen_list);
if ((bucket = SLIST_FIRST(&(gen_list->mb_cont.mc_bhead))) != NULL) {
MB_GET_OBJECT(m, bucket, gen_list);
+ MB_MBTYPES_INC(gen_list, type, 1);
MB_UNLOCK_CONT(gen_list);
mbstat.m_wait++; /* XXX: No consistency. */
return (m);
@@ -680,6 +701,7 @@ mb_alloc_wait(struct mb_lstmngr *mb_list)
if ((cv_ret == 0) &&
((bucket = SLIST_FIRST(&(gen_list->mb_cont.mc_bhead))) != NULL)) {
MB_GET_OBJECT(m, bucket, gen_list);
+ MB_MBTYPES_INC(gen_list, type, 1);
mbstat.m_wait++; /* XXX: No consistency. */
} else {
mbstat.m_drops++; /* XXX: No consistency. */
@@ -706,7 +728,7 @@ mb_alloc_wait(struct mb_lstmngr *mb_list)
*/
static __inline
void
-mb_free(struct mb_lstmngr *mb_list, void *m)
+mb_free(struct mb_lstmngr *mb_list, void *m, short type)
{
struct mb_pcpu_list *cnt_lst;
struct mb_gen_list *gen_list;
@@ -737,6 +759,7 @@ retry_lock:
* dealing with the general list, but this is expected.
*/
MB_PUT_OBJECT(m, bucket, gen_list);
+ MB_MBTYPES_DEC(gen_list, type, 1);
if (gen_list->mb_cont.mc_starved > 0)
cv_signal(&(gen_list->mgl_mstarved));
MB_UNLOCK_CONT(gen_list);
@@ -751,6 +774,7 @@ retry_lock:
}
MB_PUT_OBJECT(m, bucket, cnt_lst);
+ MB_MBTYPES_DEC(cnt_lst, type, 1);
if (cnt_lst->mb_cont.mc_starved > 0) {
/*
@@ -823,6 +847,22 @@ retry_lock:
(*(cnt_lst->mb_cont.mc_numpgs))--;
(*(gen_list->mb_cont.mc_numpgs))++;
+ /*
+ * While we're at it, transfer some of the mbtypes
+ * "count load" onto the general list's mbtypes
+ * array, seeing as how we're moving the bucket
+ * there now, meaning that the freeing of objects
+ * there will now decrement the _general list's_
+ * mbtypes counters, and no longer our PCPU list's
+ * mbtypes counters. We do this for the type presently
+ * being freed in an effort to keep the mbtypes
+ * counters approximately balanced across all lists.
+ */
+ MB_MBTYPES_DEC(cnt_lst, type, (PAGE_SIZE /
+ mb_list->ml_objsize) - bucket->mb_numfree);
+ MB_MBTYPES_INC(gen_list, type, (PAGE_SIZE /
+ mb_list->ml_objsize) - bucket->mb_numfree);
+
MB_UNLOCK_CONT(gen_list);
MB_UNLOCK_CONT(cnt_lst);
break;
@@ -883,7 +923,7 @@ void _mext_free(struct mbuf *);
void _mclfree(struct mbuf *);
#define _m_get(m, how, type) do { \
- (m) = (struct mbuf *)mb_alloc(&mb_list_mbuf, (how)); \
+ (m) = (struct mbuf *)mb_alloc(&mb_list_mbuf, (how), (type)); \
if ((m) != NULL) { \
(m)->m_type = (type); \
(m)->m_next = NULL; \
@@ -894,7 +934,7 @@ void _mclfree(struct mbuf *);
} while (0)
#define _m_gethdr(m, how, type) do { \
- (m) = (struct mbuf *)mb_alloc(&mb_list_mbuf, (how)); \
+ (m) = (struct mbuf *)mb_alloc(&mb_list_mbuf, (how), (type)); \
if ((m) != NULL) { \
(m)->m_type = (type); \
(m)->m_next = NULL; \
@@ -916,7 +956,7 @@ void _mclfree(struct mbuf *);
m_freem((m)->m_pkthdr.aux); \
(m)->m_pkthdr.aux = NULL; \
} \
- mb_free(&mb_list_mbuf, (m)); \
+ mb_free(&mb_list_mbuf, (m), (m)->m_type); \
} while (0)
#define _mext_init_ref(m) do { \
@@ -935,7 +975,7 @@ _mext_free(struct mbuf *mb)
{
if (mb->m_ext.ext_type == EXT_CLUSTER)
- mb_free(&mb_list_clust, (caddr_t)mb->m_ext.ext_buf);
+ mb_free(&mb_list_clust, (caddr_t)mb->m_ext.ext_buf, MT_NOTMBUF);
else
(*(mb->m_ext.ext_free))(mb->m_ext.ext_buf, mb->m_ext.ext_args);
@@ -949,7 +989,7 @@ void
_mclfree(struct mbuf *mb)
{
- mb_free(&mb_list_clust, (caddr_t)mb->m_ext.ext_buf);
+ mb_free(&mb_list_clust, (caddr_t)mb->m_ext.ext_buf, MT_NOTMBUF);
mb->m_ext.ext_buf = NULL;
return;
}
@@ -1014,7 +1054,7 @@ void
m_clget(struct mbuf *mb, int how)
{
- mb->m_ext.ext_buf = (caddr_t)mb_alloc(&mb_list_clust, how);
+ mb->m_ext.ext_buf = (caddr_t)mb_alloc(&mb_list_clust, how, MT_NOTMBUF);
if (mb->m_ext.ext_buf != NULL) {
_mext_init_ref(mb);
if (mb->m_ext.ref_cnt == NULL)
@@ -1048,3 +1088,21 @@ m_extadd(struct mbuf *mb, caddr_t buf, u_int size,
}
return;
}
+
+/*
+ * Change type for mbuf `mb'; this is a relatively expensive operation and
+ * should be avoided.
+ */
+void
+m_chtype(struct mbuf *mb, short new_type)
+{
+ struct mb_gen_list *gen_list;
+
+ gen_list = MB_GET_GEN_LIST(&mb_list_mbuf);
+ MB_LOCK_CONT(gen_list);
+ MB_MBTYPES_DEC(gen_list, mb->m_type, 1);
+ MB_MBTYPES_INC(gen_list, new_type, 1);
+ MB_UNLOCK_CONT(gen_list);
+ mb->m_type = new_type;
+ return;
+}
diff --git a/sys/sys/mbuf.h b/sys/sys/mbuf.h
index e44bf4b..1f18173 100644
--- a/sys/sys/mbuf.h
+++ b/sys/sys/mbuf.h
@@ -185,7 +185,7 @@ struct mbuf {
/*
* mbuf types
*/
-#define MT_FREE 0 /* should be on free list */
+#define MT_NOTMBUF 0 /* USED INTERNALLY ONLY! Object is not mbuf */
#define MT_DATA 1 /* dynamic (data) allocation */
#define MT_HEADER 2 /* packet header */
#if 0
@@ -216,11 +216,12 @@ struct mbpstat {
u_long mb_mbpgs;
u_long mb_clfree;
u_long mb_clpgs;
+ long mb_mbtypes[MT_NTYPES];
short mb_active;
};
/*
- * General mbuf statistics structure.
+ * General mbuf allocator statistics structure.
* XXX: Modifications of these are not protected by any mutex locks nor by
* any atomic() manipulations. As a result, we may occasionally lose
* a count or two. Luckily, not all of these fields are modified at all
@@ -231,13 +232,15 @@ struct mbstat {
u_long m_drops; /* times failed to allocate */
u_long m_wait; /* times succesfully returned from wait */
u_long m_drain; /* times drained protocols for space */
- u_long m_mcfail; /* times m_copym failed */
- u_long m_mpfail; /* times m_pullup failed */
+ u_long m_mcfail; /* XXX: times m_copym failed */
+ u_long m_mpfail; /* XXX: times m_pullup failed */
u_long m_msize; /* length of an mbuf */
u_long m_mclbytes; /* length of an mbuf cluster */
u_long m_minclsize; /* min length of data to allocate a cluster */
u_long m_mlen; /* length of data in an mbuf */
u_long m_mhlen; /* length of data in a header mbuf */
+ short m_numtypes; /* number of mbtypes (gives # elems in mbpstat's
+ mb_mbtypes[] array. */
};
/*
@@ -393,9 +396,10 @@ struct mbstat {
} while (0)
/*
- * change mbuf to new type
+ * Change mbuf to new type.
+ * This is a relatively expensive operation and should be avoided.
*/
-#define MCHTYPE(m, t) (m)->m_type = (t)
+#define MCHTYPE(m, t) m_chtype((m), (t))
/* length to m_copy to copy all */
#define M_COPYALL 1000000000
@@ -430,6 +434,7 @@ void m_aux_delete(struct mbuf *, struct mbuf *);
struct mbuf *m_aux_find(struct mbuf *, int, int);
struct mbuf *m_aux_find2(struct mbuf *, int, int, void *);
void m_cat(struct mbuf *, struct mbuf *);
+void m_chtype(struct mbuf *, short);
void m_clget(struct mbuf *, int);
void m_extadd(struct mbuf *, caddr_t, u_int,
void (*free)(caddr_t, void *), void *, short, int);
diff --git a/usr.bin/netstat/mbuf.c b/usr.bin/netstat/mbuf.c
index 61e3e2f..0c1fdd4 100644
--- a/usr.bin/netstat/mbuf.c
+++ b/usr.bin/netstat/mbuf.c
@@ -54,10 +54,8 @@ static const char rcsid[] =
#define YES 1
typedef int bool;
-/* XXX: mbtypes stats temporarily disactivated. */
-#if 0
static struct mbtypenames {
- int mt_type;
+ short mt_type;
char *mt_name;
} mbtypenames[] = {
{ MT_DATA, "data" },
@@ -92,7 +90,6 @@ static struct mbtypenames {
#endif
{ 0, 0 }
};
-#endif /* 0 */
/*
* Print mbuf statistics.
@@ -102,42 +99,17 @@ mbpr(u_long mbaddr, u_long mbtaddr, u_long nmbcaddr, u_long nmbufaddr,
u_long mblimaddr, u_long cllimaddr, u_long cpusaddr, u_long pgsaddr,
u_long mbpaddr)
{
- int i, nmbufs, nmbclusters, page_size, num_objs;
+ int i, j, nmbufs, nmbclusters, page_size, num_objs;
u_int mbuf_limit, clust_limit;
u_long totspace[2], totused[2], totnum, totfree;
+ short nmbtypes;
size_t mlen;
+ long *mbtypes = NULL;
struct mbstat *mbstat = NULL;
struct mbpstat **mbpstat = NULL;
-
-/* XXX: mbtypes stats temporarily disabled. */
-#if 0
- int nmbtypes;
- size_t mbtypeslen;
struct mbtypenames *mp;
- u_long *mbtypes = NULL;
bool *seen = NULL;
- /*
- * XXX
- * We can't kread() mbtypeslen from a core image so we'll
- * bogusly assume it's the same as in the running kernel.
- */
- if (sysctlbyname("kern.ipc.mbtypes", NULL, &mbtypeslen, NULL, 0) < 0) {
- warn("sysctl: retrieving mbtypes length");
- goto err;
- }
- if ((mbtypes = malloc(mbtypeslen)) == NULL) {
- warn("malloc: %lu bytes for mbtypes", (u_long)mbtypeslen);
- goto err;
- }
-
- nmbtypes = mbtypeslen / sizeof(*mbtypes);
- if ((seen = calloc(nmbtypes, sizeof(*seen))) == NULL) {
- warn("calloc");
- goto err;
- }
-#endif
-
mlen = sizeof *mbstat;
if ((mbstat = malloc(mlen)) == NULL) {
warn("malloc: cannot allocate memory for mbstat");
@@ -168,10 +140,6 @@ mbpr(u_long mbaddr, u_long mbtaddr, u_long nmbcaddr, u_long nmbufaddr,
goto err;
if (kread(mbaddr, (char *)mbstat, sizeof mbstat))
goto err;
-#if 0
- if (kread(mbtaddr, (char *)mbtypes, mbtypeslen))
- goto err;
-#endif
if (kread(nmbcaddr, (char *)&nmbclusters, sizeof(int)))
goto err;
if (kread(nmbufaddr, (char *)&nmbufs, sizeof(int)))
@@ -194,13 +162,6 @@ mbpr(u_long mbaddr, u_long mbtaddr, u_long nmbcaddr, u_long nmbufaddr,
warn("sysctl: retrieving mbstat");
goto err;
}
-#if 0
- if (sysctlbyname("kern.ipc.mbtypes", mbtypes, &mbtypeslen, NULL,
- 0) < 0) {
- warn("sysctl: retrieving mbtypes");
- goto err;
- }
-#endif
mlen = sizeof(int);
if (sysctlbyname("kern.ipc.nmbclusters", &nmbclusters, &mlen,
NULL, 0) < 0) {
@@ -233,6 +194,16 @@ mbpr(u_long mbaddr, u_long mbtaddr, u_long nmbcaddr, u_long nmbufaddr,
}
}
+ nmbtypes = mbstat->m_numtypes;
+ if ((seen = calloc(nmbtypes, sizeof(*seen))) == NULL) {
+ warn("calloc: cannot allocate memory for mbtypes seen flag");
+ goto err;
+ }
+ if ((mbtypes = calloc(nmbtypes, sizeof(long *))) == NULL) {
+ warn("calloc: cannot allocate memory for mbtypes");
+ goto err;
+ }
+
for (i = 0; i < num_objs; i++)
mbpstat[i] = mbpstat[0] + i;
@@ -250,6 +221,8 @@ mbpr(u_long mbaddr, u_long mbtaddr, u_long nmbcaddr, u_long nmbufaddr,
(mbpstat[GENLST]->mb_mbpgs * MBPERPG));
totnum = mbpstat[GENLST]->mb_mbpgs * MBPERPG;
totfree = mbpstat[GENLST]->mb_mbfree;
+ for (j = 1; j < nmbtypes; j++)
+ mbtypes[j] += mbpstat[GENLST]->mb_mbtypes[j];
totspace[0] = mbpstat[GENLST]->mb_mbpgs * page_size;
for (i = 0; i < (num_objs - 1); i++) {
if (mbpstat[i]->mb_active == 0)
@@ -260,11 +233,26 @@ mbpr(u_long mbaddr, u_long mbtaddr, u_long nmbcaddr, u_long nmbufaddr,
totspace[0] += mbpstat[i]->mb_mbpgs * page_size;
totnum += mbpstat[i]->mb_mbpgs * MBPERPG;
totfree += mbpstat[i]->mb_mbfree;
+ for (j = 1; j < nmbtypes; j++)
+ mbtypes[j] += mbpstat[i]->mb_mbtypes[j];
}
totused[0] = totnum - totfree;
printf("\tTotal:\t\t%lu/%lu (in use/in pool)\n", totused[0], totnum);
printf("\tMaximum number allowed on each CPU list: %d\n", mbuf_limit);
printf("\tMaximum possible: %d\n", nmbufs);
+ printf("\tAllocated mbuf types:\n");
+ for (mp = mbtypenames; mp->mt_name; mp++) {
+ if (mbtypes[mp->mt_type]) {
+ seen[mp->mt_type] = YES;
+ printf("\t %lu mbufs allocated to %s\n",
+ mbtypes[mp->mt_type], mp->mt_name);
+ }
+ }
+ for (i = 1; i < nmbtypes; i++) {
+ if (!seen[i] && mbtypes[i])
+ printf("\t %lu mbufs allocated to <mbuf type: %d>\n",
+ mbtypes[i], i);
+ }
printf("\t%lu%% of mbuf map consumed\n", ((totspace[0] * 100) / (nmbufs
* MSIZE)));
@@ -300,12 +288,10 @@ mbpr(u_long mbaddr, u_long mbtaddr, u_long nmbcaddr, u_long nmbufaddr,
printf("%lu calls to protocol drain routines\n", mbstat->m_drain);
err:
-#if 0
if (mbtypes != NULL)
free(mbtypes);
if (seen != NULL)
free(seen);
-#endif
if (mbstat != NULL)
free(mbstat);
if (mbpstat != NULL) {
diff --git a/usr.bin/systat/mbufs.c b/usr.bin/systat/mbufs.c
index 1983e0a..5dee078 100644
--- a/usr.bin/systat/mbufs.c
+++ b/usr.bin/systat/mbufs.c
@@ -50,16 +50,14 @@ static const char rcsid[] =
#include "extern.h"
static struct mbpstat **mbpstat;
+static struct mbstat *mbstat;
static int num_objs;
+static long *m_mbtypes;
+static short nmbtypes;
#define GENLST (num_objs - 1)
-/* XXX: mbtypes stats temporarily disabled. */
-#if 0
-static u_long *m_mbtypes;
-static int nmbtypes;
-
static struct mtnames {
- int mt_type;
+ short mt_type;
char *mt_name;
} mtnames[] = {
{ MT_DATA, "data"},
@@ -69,9 +67,7 @@ static struct mtnames {
{ MT_CONTROL, "control"},
{ MT_OOBDATA, "oobdata"}
};
-
#define NNAMES (sizeof (mtnames) / sizeof (mtnames[0]))
-#endif
WINDOW *
openmbufs()
@@ -106,12 +102,24 @@ showmbufs()
char buf[10];
char *mtname;
-/* XXX: mbtypes stats temporarily disabled (will be back soon!) */
-#if 0
+ totfree = mbpstat[GENLST]->mb_mbfree;
+ for (i = 1; i < nmbtypes; i++)
+ m_mbtypes[i] += mbpstat[GENLST]->mb_mbtypes[i];
+ for (i = 0; i < GENLST; i++) {
+ if (mbpstat[i]->mb_active == 0)
+ continue;
+ totfree += mbpstat[i]->mb_mbfree;
+ for (j = 1; j < nmbtypes; j++)
+ m_mbtypes[j] += mbpstat[i]->mb_mbtypes[j];
+ }
+
+ /*
+ * Print totals for different mbuf types.
+ */
for (j = 0; j < wnd->_maxy; j++) {
max = 0, index = -1;
for (i = 0; i < wnd->_maxy; i++) {
- if (i == MT_FREE)
+ if (i == MT_NOTMBUF)
continue;
if (i >= nmbtypes)
break;
@@ -144,18 +152,10 @@ showmbufs()
wclrtoeol(wnd);
m_mbtypes[index] = 0;
}
-#endif
/*
* Print total number of free mbufs.
*/
- totfree = mbpstat[GENLST]->mb_mbfree;
- for (i = 0; i < (num_objs - 1); i++) {
- if (mbpstat[i]->mb_active == 0)
- continue;
- totfree += mbpstat[i]->mb_mbfree;
- }
- j = 0; /* XXX */
if (totfree > 0) {
mvwprintw(wnd, 1+j, 0, "%-10.10s", "free");
if (totfree > 60) {
@@ -179,19 +179,22 @@ initmbufs()
{
int i;
size_t len;
-#if 0
- size_t mbtypeslen;
- if (sysctlbyname("kern.ipc.mbtypes", NULL, &mbtypeslen, NULL, 0) < 0) {
- error("sysctl getting mbtypes size failed");
+ len = sizeof *mbstat;
+ if ((mbstat = malloc(len)) == NULL) {
+ error("malloc mbstat failed");
return 0;
}
- if ((m_mbtypes = calloc(1, mbtypeslen)) == NULL) {
- error("calloc mbtypes failed");
+ if (sysctlbyname("kern.ipc.mbstat", mbstat, &len, NULL, 0) < 0) {
+ error("sysctl retrieving mbstat");
return 0;
}
- nmbtypes = mbtypeslen / sizeof(*m_mbtypes);
-#endif
+ nmbtypes = mbstat->m_numtypes;
+ if ((m_mbtypes = calloc(nmbtypes, sizeof(long *))) == NULL) {
+ error("calloc m_mbtypes failed");
+ return 0;
+ }
+
if (sysctlbyname("kern.ipc.mb_statpcpu", NULL, &len, NULL, 0) < 0) {
error("sysctl getting mbpstat total size failed");
return 0;
@@ -205,6 +208,7 @@ initmbufs()
error("calloc mbpstat structures failed");
return 0;
}
+
for (i = 0; i < num_objs; i++)
mbpstat[i] = mbpstat[0] + i;
@@ -219,9 +223,4 @@ fetchmbufs()
len = num_objs * sizeof(struct mbpstat);
if (sysctlbyname("kern.ipc.mb_statpcpu", mbpstat[0], &len, NULL, 0) < 0)
printw("sysctl: mbpstat: %s", strerror(errno));
-#if 0
- len = nmbtypes * sizeof *m_mbtypes;
- if (sysctlbyname("kern.ipc.mbtypes", m_mbtypes, &len, 0, 0) < 0)
- printw("sysctl: mbtypes: %s", strerror(errno));
-#endif
}
OpenPOWER on IntegriCloud