summaryrefslogtreecommitdiffstats
path: root/sys
diff options
context:
space:
mode:
authorpeter <peter@FreeBSD.org>2008-12-02 06:50:26 +0000
committerpeter <peter@FreeBSD.org>2008-12-02 06:50:26 +0000
commit76037b082e725faf10083e8fe83315236781569b (patch)
tree5a4b696f7bcc8472ff22e36ac8044ca39efcc17b /sys
parentfd1b5dd075223f114ecc0f3bb7c016032a129bc5 (diff)
parent0cd59a18e366e541876a23e59b53ce6246f0cec8 (diff)
downloadFreeBSD-src-76037b082e725faf10083e8fe83315236781569b.zip
FreeBSD-src-76037b082e725faf10083e8fe83315236781569b.tar.gz
Merge user/peter/kinfo branch as of r185547 into head.
This changes struct kinfo_filedesc and kinfo_vmentry such that they are same on both 32 and 64 bit platforms like i386/amd64 and won't require sysctl wrapping. Two new OIDs are assigned. The old ones are available under COMPAT_FREEBSD7 - but it isn't that simple. The superceded interface was never actually released on 7.x. The other main change is to pack the data passed to userland via the sysctl. kf_structsize and kve_structsize are reduced for the copyout. If you have a process with 100,000+ sockets open, the unpacked records require a 132MB+ copyout. With packing, it is "only" ~35MB. (Still seriously unpleasant, but not quite as devastating). A similar problem exists for the vmentry structure - have lots and lots of shared libraries and small mmaps and its copyout gets expensive too. My immediate problem is valgrind. It traditionally achieves this functionality by parsing procfs output, in a packed format. Secondly, when tracing 32 bit binaries on amd64 under valgrind, it uses a cross compiled 32 bit binary which ran directly into the differing data structures in 32 vs 64 bit mode. (valgrind uses this to track file descriptor operations and this therefore affected every single 32 bit binary) I've added two utility functions to libutil to unpack the structures into a fixed record length and to make it a little more convenient to use.
Diffstat (limited to 'sys')
-rw-r--r--sys/kern/kern_descrip.c268
-rw-r--r--sys/kern/kern_proc.c185
-rw-r--r--sys/sys/sysctl.h7
-rw-r--r--sys/sys/user.h85
4 files changed, 529 insertions, 16 deletions
diff --git a/sys/kern/kern_descrip.c b/sys/kern/kern_descrip.c
index 833aa2c..c981411 100644
--- a/sys/kern/kern_descrip.c
+++ b/sys/kern/kern_descrip.c
@@ -2509,6 +2509,259 @@ sysctl_kern_file(SYSCTL_HANDLER_ARGS)
SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD,
0, 0, sysctl_kern_file, "S,xfile", "Entire file table");
+#ifdef KINFO_OFILE_SIZE
+CTASSERT(sizeof(struct kinfo_ofile) == KINFO_OFILE_SIZE);
+#endif
+
+#ifdef COMPAT_FREEBSD7
+static int
+export_vnode_for_osysctl(struct vnode *vp, int type,
+ struct kinfo_ofile *kif, struct filedesc *fdp, struct sysctl_req *req)
+{
+ int error;
+ char *fullpath, *freepath;
+ int vfslocked;
+
+ bzero(kif, sizeof(*kif));
+ kif->kf_structsize = sizeof(*kif);
+
+ vref(vp);
+ kif->kf_fd = type;
+ kif->kf_type = KF_TYPE_VNODE;
+ /* This function only handles directories. */
+ KASSERT(vp->v_type == VDIR, ("export_vnode_for_osysctl: vnode not directory"));
+ kif->kf_vnode_type = KF_VTYPE_VDIR;
+
+ /*
+ * This is not a true file descriptor, so we set a bogus refcount
+ * and offset to indicate these fields should be ignored.
+ */
+ kif->kf_ref_count = -1;
+ kif->kf_offset = -1;
+
+ freepath = NULL;
+ fullpath = "-";
+ FILEDESC_SUNLOCK(fdp);
+ vn_fullpath(curthread, vp, &fullpath, &freepath);
+ vfslocked = VFS_LOCK_GIANT(vp->v_mount);
+ vrele(vp);
+ VFS_UNLOCK_GIANT(vfslocked);
+ strlcpy(kif->kf_path, fullpath, sizeof(kif->kf_path));
+ if (freepath != NULL)
+ free(freepath, M_TEMP);
+ error = SYSCTL_OUT(req, kif, sizeof(*kif));
+ FILEDESC_SLOCK(fdp);
+ return (error);
+}
+
+/*
+ * Get per-process file descriptors for use by procstat(1), et al.
+ */
+static int
+sysctl_kern_proc_ofiledesc(SYSCTL_HANDLER_ARGS)
+{
+ char *fullpath, *freepath;
+ struct kinfo_ofile *kif;
+ struct filedesc *fdp;
+ int error, i, *name;
+ struct socket *so;
+ struct vnode *vp;
+ struct file *fp;
+ struct proc *p;
+ struct tty *tp;
+ int vfslocked;
+
+ name = (int *)arg1;
+ if ((p = pfind((pid_t)name[0])) == NULL)
+ return (ESRCH);
+ if ((error = p_candebug(curthread, p))) {
+ PROC_UNLOCK(p);
+ return (error);
+ }
+ fdp = fdhold(p);
+ PROC_UNLOCK(p);
+ if (fdp == NULL)
+ return (ENOENT);
+ kif = malloc(sizeof(*kif), M_TEMP, M_WAITOK);
+ FILEDESC_SLOCK(fdp);
+ if (fdp->fd_cdir != NULL)
+ export_vnode_for_osysctl(fdp->fd_cdir, KF_FD_TYPE_CWD, kif,
+ fdp, req);
+ if (fdp->fd_rdir != NULL)
+ export_vnode_for_osysctl(fdp->fd_rdir, KF_FD_TYPE_ROOT, kif,
+ fdp, req);
+ if (fdp->fd_jdir != NULL)
+ export_vnode_for_osysctl(fdp->fd_jdir, KF_FD_TYPE_JAIL, kif,
+ fdp, req);
+ for (i = 0; i < fdp->fd_nfiles; i++) {
+ if ((fp = fdp->fd_ofiles[i]) == NULL)
+ continue;
+ bzero(kif, sizeof(*kif));
+ kif->kf_structsize = sizeof(*kif);
+ vp = NULL;
+ so = NULL;
+ tp = NULL;
+ kif->kf_fd = i;
+ switch (fp->f_type) {
+ case DTYPE_VNODE:
+ kif->kf_type = KF_TYPE_VNODE;
+ vp = fp->f_vnode;
+ break;
+
+ case DTYPE_SOCKET:
+ kif->kf_type = KF_TYPE_SOCKET;
+ so = fp->f_data;
+ break;
+
+ case DTYPE_PIPE:
+ kif->kf_type = KF_TYPE_PIPE;
+ break;
+
+ case DTYPE_FIFO:
+ kif->kf_type = KF_TYPE_FIFO;
+ vp = fp->f_vnode;
+ vref(vp);
+ break;
+
+ case DTYPE_KQUEUE:
+ kif->kf_type = KF_TYPE_KQUEUE;
+ break;
+
+ case DTYPE_CRYPTO:
+ kif->kf_type = KF_TYPE_CRYPTO;
+ break;
+
+ case DTYPE_MQUEUE:
+ kif->kf_type = KF_TYPE_MQUEUE;
+ break;
+
+ case DTYPE_SHM:
+ kif->kf_type = KF_TYPE_SHM;
+ break;
+
+ case DTYPE_SEM:
+ kif->kf_type = KF_TYPE_SEM;
+ break;
+
+ case DTYPE_PTS:
+ kif->kf_type = KF_TYPE_PTS;
+ tp = fp->f_data;
+ break;
+
+ default:
+ kif->kf_type = KF_TYPE_UNKNOWN;
+ break;
+ }
+ kif->kf_ref_count = fp->f_count;
+ if (fp->f_flag & FREAD)
+ kif->kf_flags |= KF_FLAG_READ;
+ if (fp->f_flag & FWRITE)
+ kif->kf_flags |= KF_FLAG_WRITE;
+ if (fp->f_flag & FAPPEND)
+ kif->kf_flags |= KF_FLAG_APPEND;
+ if (fp->f_flag & FASYNC)
+ kif->kf_flags |= KF_FLAG_ASYNC;
+ if (fp->f_flag & FFSYNC)
+ kif->kf_flags |= KF_FLAG_FSYNC;
+ if (fp->f_flag & FNONBLOCK)
+ kif->kf_flags |= KF_FLAG_NONBLOCK;
+ if (fp->f_flag & O_DIRECT)
+ kif->kf_flags |= KF_FLAG_DIRECT;
+ if (fp->f_flag & FHASLOCK)
+ kif->kf_flags |= KF_FLAG_HASLOCK;
+ kif->kf_offset = fp->f_offset;
+ if (vp != NULL) {
+ vref(vp);
+ switch (vp->v_type) {
+ case VNON:
+ kif->kf_vnode_type = KF_VTYPE_VNON;
+ break;
+ case VREG:
+ kif->kf_vnode_type = KF_VTYPE_VREG;
+ break;
+ case VDIR:
+ kif->kf_vnode_type = KF_VTYPE_VDIR;
+ break;
+ case VBLK:
+ kif->kf_vnode_type = KF_VTYPE_VBLK;
+ break;
+ case VCHR:
+ kif->kf_vnode_type = KF_VTYPE_VCHR;
+ break;
+ case VLNK:
+ kif->kf_vnode_type = KF_VTYPE_VLNK;
+ break;
+ case VSOCK:
+ kif->kf_vnode_type = KF_VTYPE_VSOCK;
+ break;
+ case VFIFO:
+ kif->kf_vnode_type = KF_VTYPE_VFIFO;
+ break;
+ case VBAD:
+ kif->kf_vnode_type = KF_VTYPE_VBAD;
+ break;
+ default:
+ kif->kf_vnode_type = KF_VTYPE_UNKNOWN;
+ break;
+ }
+ /*
+ * It is OK to drop the filedesc lock here as we will
+ * re-validate and re-evaluate its properties when
+ * the loop continues.
+ */
+ freepath = NULL;
+ fullpath = "-";
+ FILEDESC_SUNLOCK(fdp);
+ vn_fullpath(curthread, vp, &fullpath, &freepath);
+ vfslocked = VFS_LOCK_GIANT(vp->v_mount);
+ vrele(vp);
+ VFS_UNLOCK_GIANT(vfslocked);
+ strlcpy(kif->kf_path, fullpath,
+ sizeof(kif->kf_path));
+ if (freepath != NULL)
+ free(freepath, M_TEMP);
+ FILEDESC_SLOCK(fdp);
+ }
+ if (so != NULL) {
+ struct sockaddr *sa;
+
+ if (so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa)
+ == 0 && sa->sa_len <= sizeof(kif->kf_sa_local)) {
+ bcopy(sa, &kif->kf_sa_local, sa->sa_len);
+ free(sa, M_SONAME);
+ }
+ if (so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa)
+ == 00 && sa->sa_len <= sizeof(kif->kf_sa_peer)) {
+ bcopy(sa, &kif->kf_sa_peer, sa->sa_len);
+ free(sa, M_SONAME);
+ }
+ kif->kf_sock_domain =
+ so->so_proto->pr_domain->dom_family;
+ kif->kf_sock_type = so->so_type;
+ kif->kf_sock_protocol = so->so_proto->pr_protocol;
+ }
+ if (tp != NULL) {
+ strlcpy(kif->kf_path, tty_devname(tp),
+ sizeof(kif->kf_path));
+ }
+ error = SYSCTL_OUT(req, kif, sizeof(*kif));
+ if (error)
+ break;
+ }
+ FILEDESC_SUNLOCK(fdp);
+ fddrop(fdp);
+ free(kif, M_TEMP);
+ return (0);
+}
+
+static SYSCTL_NODE(_kern_proc, KERN_PROC_OFILEDESC, ofiledesc, CTLFLAG_RD,
+ sysctl_kern_proc_ofiledesc, "Process ofiledesc entries");
+#endif /* COMPAT_FREEBSD7 */
+
+#ifdef KINFO_FILE_SIZE
+CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE);
+#endif
+
static int
export_vnode_for_sysctl(struct vnode *vp, int type,
struct kinfo_file *kif, struct filedesc *fdp, struct sysctl_req *req)
@@ -2518,7 +2771,6 @@ export_vnode_for_sysctl(struct vnode *vp, int type,
int vfslocked;
bzero(kif, sizeof(*kif));
- kif->kf_structsize = sizeof(*kif);
vref(vp);
kif->kf_fd = type;
@@ -2544,7 +2796,11 @@ export_vnode_for_sysctl(struct vnode *vp, int type,
strlcpy(kif->kf_path, fullpath, sizeof(kif->kf_path));
if (freepath != NULL)
free(freepath, M_TEMP);
- error = SYSCTL_OUT(req, kif, sizeof(*kif));
+ /* Pack record size down */
+ kif->kf_structsize = offsetof(struct kinfo_file, kf_path) +
+ strlen(kif->kf_path) + 1;
+ kif->kf_structsize = roundup(kif->kf_structsize, sizeof(uint64_t));
+ error = SYSCTL_OUT(req, kif, kif->kf_structsize);
FILEDESC_SLOCK(fdp);
return (error);
}
@@ -2592,7 +2848,6 @@ sysctl_kern_proc_filedesc(SYSCTL_HANDLER_ARGS)
if ((fp = fdp->fd_ofiles[i]) == NULL)
continue;
bzero(kif, sizeof(*kif));
- kif->kf_structsize = sizeof(*kif);
vp = NULL;
so = NULL;
tp = NULL;
@@ -2739,7 +2994,12 @@ sysctl_kern_proc_filedesc(SYSCTL_HANDLER_ARGS)
strlcpy(kif->kf_path, tty_devname(tp),
sizeof(kif->kf_path));
}
- error = SYSCTL_OUT(req, kif, sizeof(*kif));
+ /* Pack record size down */
+ kif->kf_structsize = offsetof(struct kinfo_file, kf_path) +
+ strlen(kif->kf_path) + 1;
+ kif->kf_structsize = roundup(kif->kf_structsize,
+ sizeof(uint64_t));
+ error = SYSCTL_OUT(req, kif, kif->kf_structsize);
if (error)
break;
}
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index a331a42..4e5ceea 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -32,6 +32,7 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
+#include "opt_compat.h"
#include "opt_ddb.h"
#include "opt_kdtrace.h"
#include "opt_ktrace.h"
@@ -1337,13 +1338,18 @@ sysctl_kern_proc_sv_name(SYSCTL_HANDLER_ARGS)
return (sysctl_handle_string(oidp, sv_name, 0, req));
}
+#ifdef KINFO_OVMENTRY_SIZE
+CTASSERT(sizeof(struct kinfo_ovmentry) == KINFO_OVMENTRY_SIZE);
+#endif
+
+#ifdef COMPAT_FREEBSD7
static int
-sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS)
+sysctl_kern_proc_ovmmap(SYSCTL_HANDLER_ARGS)
{
vm_map_entry_t entry, tmp_entry;
unsigned int last_timestamp;
char *fullpath, *freepath;
- struct kinfo_vmentry *kve;
+ struct kinfo_ovmentry *kve;
struct vattr va;
struct ucred *cred;
int error, *name;
@@ -1497,6 +1503,176 @@ sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS)
free(kve, M_TEMP);
return (error);
}
+#endif /* COMPAT_FREEBSD7 */
+
+#ifdef KINFO_VMENTRY_SIZE
+CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE);
+#endif
+
+static int
+sysctl_kern_proc_vmmap(SYSCTL_HANDLER_ARGS)
+{
+ vm_map_entry_t entry, tmp_entry;
+ unsigned int last_timestamp;
+ char *fullpath, *freepath;
+ struct kinfo_vmentry *kve;
+ struct vattr va;
+ struct ucred *cred;
+ int error, *name;
+ struct vnode *vp;
+ struct proc *p;
+ vm_map_t map;
+
+ name = (int *)arg1;
+ if ((p = pfind((pid_t)name[0])) == NULL)
+ return (ESRCH);
+ if (p->p_flag & P_WEXIT) {
+ PROC_UNLOCK(p);
+ return (ESRCH);
+ }
+ if ((error = p_candebug(curthread, p))) {
+ PROC_UNLOCK(p);
+ return (error);
+ }
+ _PHOLD(p);
+ PROC_UNLOCK(p);
+
+ kve = malloc(sizeof(*kve), M_TEMP, M_WAITOK);
+
+ map = &p->p_vmspace->vm_map; /* XXXRW: More locking required? */
+ vm_map_lock_read(map);
+ for (entry = map->header.next; entry != &map->header;
+ entry = entry->next) {
+ vm_object_t obj, tobj, lobj;
+ vm_offset_t addr;
+ int vfslocked;
+
+ if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
+ continue;
+
+ bzero(kve, sizeof(*kve));
+
+ kve->kve_private_resident = 0;
+ obj = entry->object.vm_object;
+ if (obj != NULL) {
+ VM_OBJECT_LOCK(obj);
+ if (obj->shadow_count == 1)
+ kve->kve_private_resident =
+ obj->resident_page_count;
+ }
+ kve->kve_resident = 0;
+ addr = entry->start;
+ while (addr < entry->end) {
+ if (pmap_extract(map->pmap, addr))
+ kve->kve_resident++;
+ addr += PAGE_SIZE;
+ }
+
+ for (lobj = tobj = obj; tobj; tobj = tobj->backing_object) {
+ if (tobj != obj)
+ VM_OBJECT_LOCK(tobj);
+ if (lobj != obj)
+ VM_OBJECT_UNLOCK(lobj);
+ lobj = tobj;
+ }
+
+ kve->kve_fileid = 0;
+ kve->kve_fsid = 0;
+ freepath = NULL;
+ fullpath = "";
+ if (lobj) {
+ vp = NULL;
+ switch(lobj->type) {
+ case OBJT_DEFAULT:
+ kve->kve_type = KVME_TYPE_DEFAULT;
+ break;
+ case OBJT_VNODE:
+ kve->kve_type = KVME_TYPE_VNODE;
+ vp = lobj->handle;
+ vref(vp);
+ break;
+ case OBJT_SWAP:
+ kve->kve_type = KVME_TYPE_SWAP;
+ break;
+ case OBJT_DEVICE:
+ kve->kve_type = KVME_TYPE_DEVICE;
+ break;
+ case OBJT_PHYS:
+ kve->kve_type = KVME_TYPE_PHYS;
+ break;
+ case OBJT_DEAD:
+ kve->kve_type = KVME_TYPE_DEAD;
+ break;
+ default:
+ kve->kve_type = KVME_TYPE_UNKNOWN;
+ break;
+ }
+ if (lobj != obj)
+ VM_OBJECT_UNLOCK(lobj);
+
+ kve->kve_ref_count = obj->ref_count;
+ kve->kve_shadow_count = obj->shadow_count;
+ VM_OBJECT_UNLOCK(obj);
+ if (vp != NULL) {
+ vn_fullpath(curthread, vp, &fullpath,
+ &freepath);
+ cred = curthread->td_ucred;
+ vfslocked = VFS_LOCK_GIANT(vp->v_mount);
+ vn_lock(vp, LK_SHARED | LK_RETRY);
+ if (VOP_GETATTR(vp, &va, cred) == 0) {
+ kve->kve_fileid = va.va_fileid;
+ kve->kve_fsid = va.va_fsid;
+ }
+ vput(vp);
+ VFS_UNLOCK_GIANT(vfslocked);
+ }
+ } else {
+ kve->kve_type = KVME_TYPE_NONE;
+ kve->kve_ref_count = 0;
+ kve->kve_shadow_count = 0;
+ }
+
+ kve->kve_start = entry->start;
+ kve->kve_end = entry->end;
+ kve->kve_offset = entry->offset;
+
+ if (entry->protection & VM_PROT_READ)
+ kve->kve_protection |= KVME_PROT_READ;
+ if (entry->protection & VM_PROT_WRITE)
+ kve->kve_protection |= KVME_PROT_WRITE;
+ if (entry->protection & VM_PROT_EXECUTE)
+ kve->kve_protection |= KVME_PROT_EXEC;
+
+ if (entry->eflags & MAP_ENTRY_COW)
+ kve->kve_flags |= KVME_FLAG_COW;
+ if (entry->eflags & MAP_ENTRY_NEEDS_COPY)
+ kve->kve_flags |= KVME_FLAG_NEEDS_COPY;
+
+ strlcpy(kve->kve_path, fullpath, sizeof(kve->kve_path));
+ if (freepath != NULL)
+ free(freepath, M_TEMP);
+
+ last_timestamp = map->timestamp;
+ vm_map_unlock_read(map);
+ /* Pack record size down */
+ kve->kve_structsize = offsetof(struct kinfo_vmentry, kve_path) +
+ strlen(kve->kve_path) + 1;
+ kve->kve_structsize = roundup(kve->kve_structsize,
+ sizeof(uint64_t));
+ error = SYSCTL_OUT(req, kve, kve->kve_structsize);
+ vm_map_lock_read(map);
+ if (error)
+ break;
+ if (last_timestamp + 1 != map->timestamp) {
+ vm_map_lookup_entry(map, addr - 1, &tmp_entry);
+ entry = tmp_entry;
+ }
+ }
+ vm_map_unlock_read(map);
+ PRELE(p);
+ free(kve, M_TEMP);
+ return (error);
+}
#if defined(STACK) || defined(DDB)
static int
@@ -1669,6 +1845,11 @@ static SYSCTL_NODE(_kern_proc, (KERN_PROC_PID | KERN_PROC_INC_THREAD), pid_td,
static SYSCTL_NODE(_kern_proc, (KERN_PROC_PROC | KERN_PROC_INC_THREAD), proc_td,
CTLFLAG_RD, sysctl_kern_proc, "Return process table, no threads");
+#ifdef COMPAT_FREEBSD7
+static SYSCTL_NODE(_kern_proc, KERN_PROC_OVMMAP, ovmmap, CTLFLAG_RD,
+ sysctl_kern_proc_ovmmap, "Old Process vm map entries");
+#endif
+
static SYSCTL_NODE(_kern_proc, KERN_PROC_VMMAP, vmmap, CTLFLAG_RD,
sysctl_kern_proc_vmmap, "Process vm map entries");
diff --git a/sys/sys/sysctl.h b/sys/sys/sysctl.h
index e605e9f..f1cb04a 100644
--- a/sys/sys/sysctl.h
+++ b/sys/sys/sysctl.h
@@ -540,13 +540,16 @@ TAILQ_HEAD(sysctl_ctx_list, sysctl_ctx_entry);
#define KERN_PROC_RGID 10 /* by real group id */
#define KERN_PROC_GID 11 /* by effective group id */
#define KERN_PROC_PATHNAME 12 /* path to executable */
-#define KERN_PROC_VMMAP 13 /* VM map entries for process */
-#define KERN_PROC_FILEDESC 14 /* File descriptors for process */
+#define KERN_PROC_OVMMAP 13 /* Old VM map entries for process */
+#define KERN_PROC_OFILEDESC 14 /* Old file descriptors for process */
#define KERN_PROC_KSTACK 15 /* Kernel stacks for process */
#define KERN_PROC_INC_THREAD 0x10 /*
* modifier for pid, pgrp, tty,
* uid, ruid, gid, rgid and proc
+ * This effectively uses 16-31
*/
+#define KERN_PROC_VMMAP 32 /* VM map entries for process */
+#define KERN_PROC_FILEDESC 33 /* File descriptors for process */
/*
* KERN_IPC identifiers
diff --git a/sys/sys/user.h b/sys/sys/user.h
index a12669f..93a78d3 100644
--- a/sys/sys/user.h
+++ b/sys/sys/user.h
@@ -277,20 +277,55 @@ struct user {
#define KF_FLAG_DIRECT 0x00000040
#define KF_FLAG_HASLOCK 0x00000080
-struct kinfo_file {
+/*
+ * Old format. Has variable hidden padding due to alignment.
+ * This is a compatability hack for pre-build 7.1 packages.
+ */
+#if defined(__amd64__)
+#define KINFO_OFILE_SIZE 1328
+#endif
+#if defined(__i386__)
+#define KINFO_OFILE_SIZE 1324
+#endif
+
+struct kinfo_ofile {
int kf_structsize; /* Size of kinfo_file. */
int kf_type; /* Descriptor type. */
int kf_fd; /* Array index. */
int kf_ref_count; /* Reference count. */
int kf_flags; /* Flags. */
+ /* XXX Hidden alignment padding here on amd64 */
off_t kf_offset; /* Seek location. */
int kf_vnode_type; /* Vnode type. */
int kf_sock_domain; /* Socket domain. */
int kf_sock_type; /* Socket type. */
int kf_sock_protocol; /* Socket protocol. */
- char kf_path[PATH_MAX]; /* Path to file, if any. */
+ char kf_path[PATH_MAX]; /* Path to file, if any. */
+ struct sockaddr_storage kf_sa_local; /* Socket address. */
+ struct sockaddr_storage kf_sa_peer; /* Peer address. */
+};
+
+#if defined(__amd64__) || defined(__i386__)
+#define KINFO_FILE_SIZE 1392
+#endif
+
+struct kinfo_file {
+ int kf_structsize; /* Variable size of record. */
+ int kf_type; /* Descriptor type. */
+ int kf_fd; /* Array index. */
+ int kf_ref_count; /* Reference count. */
+ int kf_flags; /* Flags. */
+ int _kf_pad0; /* Round to 64 bit alignment */
+ uint64_t kf_offset; /* Seek location. */
+ int kf_vnode_type; /* Vnode type. */
+ int kf_sock_domain; /* Socket domain. */
+ int kf_sock_type; /* Socket type. */
+ int kf_sock_protocol; /* Socket protocol. */
struct sockaddr_storage kf_sa_local; /* Socket address. */
struct sockaddr_storage kf_sa_peer; /* Peer address. */
+ int _kf_ispare[16]; /* Space for more stuff. */
+ /* Truncated before copyout in sysctl */
+ char kf_path[PATH_MAX]; /* Path to file, if any. */
};
/*
@@ -313,11 +348,18 @@ struct kinfo_file {
#define KVME_FLAG_COW 0x00000001
#define KVME_FLAG_NEEDS_COPY 0x00000002
-struct kinfo_vmentry {
+#if defined(__amd64__)
+#define KINFO_OVMENTRY_SIZE 1168
+#endif
+#if defined(__i386__)
+#define KINFO_OVMENTRY_SIZE 1128
+#endif
+
+struct kinfo_ovmentry {
int kve_structsize; /* Size of kinfo_vmmapentry. */
int kve_type; /* Type of map entry. */
- void *kve_start; /* Starting pointer. */
- void *kve_end; /* Finishing pointer. */
+ void *kve_start; /* Starting address. */
+ void *kve_end; /* Finishing address. */
int kve_flags; /* Flags on map entry. */
int kve_resident; /* Number of resident pages. */
int kve_private_resident; /* Number of private pages. */
@@ -327,11 +369,35 @@ struct kinfo_vmentry {
char kve_path[PATH_MAX]; /* Path to VM obj, if any. */
void *_kve_pspare[8]; /* Space for more stuff. */
off_t kve_offset; /* Mapping offset in object */
- uint64_t kve_fileid; /* inode number of vnode */
+ uint64_t kve_fileid; /* inode number if vnode */
dev_t kve_fsid; /* dev_t of vnode location */
int _kve_ispare[3]; /* Space for more stuff. */
};
+#if defined(__amd64__) || defined(__i386__)
+#define KINFO_VMENTRY_SIZE 1160
+#endif
+
+struct kinfo_vmentry {
+ int kve_structsize; /* Variable size of record. */
+ int kve_type; /* Type of map entry. */
+ uint64_t kve_start; /* Starting address. */
+ uint64_t kve_end; /* Finishing address. */
+ uint64_t kve_offset; /* Mapping offset in object */
+ uint64_t kve_fileid; /* inode number if vnode */
+ uint32_t kve_fsid; /* dev_t of vnode location */
+ int kve_flags; /* Flags on map entry. */
+ int kve_resident; /* Number of resident pages. */
+ int kve_private_resident; /* Number of private pages. */
+ int kve_protection; /* Protection bitmask. */
+ int kve_ref_count; /* VM obj ref count. */
+ int kve_shadow_count; /* VM obj shadow count. */
+ int _kve_pad0; /* 64bit align next field */
+ int _kve_ispare[16]; /* Space for more stuff. */
+ /* Truncated before copyout in sysctl */
+ char kve_path[PATH_MAX]; /* Path to VM obj, if any. */
+};
+
/*
* The KERN_PROC_KSTACK sysctl allows a process to dump the kernel stacks of
* another process as a series of entries. Each stack is represented by a
@@ -343,12 +409,15 @@ struct kinfo_vmentry {
#define KKST_STATE_SWAPPED 1 /* Stack swapped out. */
#define KKST_STATE_RUNNING 2 /* Stack ephemeral. */
+#if defined(__amd64__) || defined(__i386__)
+#define KINFO_KSTACK_SIZE 1096
+#endif
+
struct kinfo_kstack {
lwpid_t kkst_tid; /* ID of thread. */
int kkst_state; /* Validity of stack. */
char kkst_trace[KKST_MAXLEN]; /* String representing stack. */
- void *_kkst_pspare[8]; /* Space for more stuff. */
- int _kkst_ispare[8]; /* Space for more stuff. */
+ int _kkst_ispare[16]; /* Space for more stuff. */
};
#endif
OpenPOWER on IntegriCloud