summaryrefslogtreecommitdiffstats
path: root/cddl
diff options
context:
space:
mode:
authorsson <sson@FreeBSD.org>2009-05-27 01:30:23 +0000
committersson <sson@FreeBSD.org>2009-05-27 01:30:23 +0000
commitdc2eb1b92ac858ad80ad8666106694e31132c066 (patch)
treeab9301fa2457910452e91e55cd8898082645985d /cddl
parentf8b28486a955d6d96d6bb28e0c908a897cf83d1e (diff)
downloadFreeBSD-src-dc2eb1b92ac858ad80ad8666106694e31132c066.zip
FreeBSD-src-dc2eb1b92ac858ad80ad8666106694e31132c066.tar.gz
Add the OpenSolaris lockstat(1M) command. Requires the dtrace driver,
the lockstat provider, and the ksyms(4) pseudo driver kernel modules. Approved by: gnn (mentor)
Diffstat (limited to 'cddl')
-rw-r--r--cddl/contrib/opensolaris/cmd/lockstat/lockstat.1875
-rw-r--r--cddl/contrib/opensolaris/cmd/lockstat/lockstat.c1917
-rw-r--r--cddl/contrib/opensolaris/cmd/lockstat/sym.c311
-rw-r--r--cddl/usr.sbin/Makefile1
-rw-r--r--cddl/usr.sbin/lockstat/Makefile40
5 files changed, 3144 insertions, 0 deletions
diff --git a/cddl/contrib/opensolaris/cmd/lockstat/lockstat.1 b/cddl/contrib/opensolaris/cmd/lockstat/lockstat.1
new file mode 100644
index 0000000..f3fc8e6
--- /dev/null
+++ b/cddl/contrib/opensolaris/cmd/lockstat/lockstat.1
@@ -0,0 +1,875 @@
+'\" te
+.\" CDDL HEADER START
+.\"
+.\" The contents of this file are subject to the terms of the
+.\" Common Development and Distribution License (the "License").
+.\" You may not use this file except in compliance with the License.
+.\"
+.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+.\" or http://www.opensolaris.org/os/licensing.
+.\" See the License for the specific language governing permissions
+.\" and limitations under the License.
+.\"
+.\" When distributing Covered Code, include this CDDL HEADER in each
+.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+.\" If applicable, add the following below this CDDL HEADER, with the
+.\" fields enclosed by brackets "[]" replaced with your own identifying
+.\" information: Portions Copyright [yyyy] [name of copyright owner]
+.\"
+.\" CDDL HEADER END
+.\" Copyright (c) 2008, Sun Microsystems, Inc. All Rights Reserved.
+.TH lockstat 1M "28 Feb 2008" "SunOS 5.11" "System Administration Commands"
+.SH NAME
+lockstat \- report kernel lock and profiling statistics
+.SH SYNOPSIS
+.LP
+.nf
+\fBlockstat\fR [\fB-ACEHI\fR] [\fB-e\fR \fIevent_list\fR] [\fB-i\fR \fIrate\fR]
+ [\fB-b\fR | \fB-t\fR | \fB-h\fR | \fB-s\fR \fIdepth\fR] [\fB-n\fR \fInrecords\fR]
+ [\fB-l\fR \fIlock\fR [, \fIsize\fR]] [\fB-d\fR \fIduration\fR]
+ [\fB-f\fR \fIfunction\fR [, \fIsize\fR]] [\fB-T\fR] [\fB-ckgwWRpP\fR] [\fB-D\fR \fIcount\fR]
+ [\fB-o\fR \fIfilename\fR] [\fB-x\fR \fIopt\fR [=val]] \fIcommand\fR [\fIargs\fR]
+.fi
+
+.SH DESCRIPTION
+.sp
+.LP
+The \fBlockstat\fR utility gathers and displays kernel locking and profiling statistics. \fBlockstat\fR allows you to specify which events to watch (for example, spin on adaptive mutex, block on read access to rwlock due to waiting writers, and so forth) how much
+data to gather for each event, and how to display the data. By default, \fBlockstat\fR monitors all lock contention events, gathers frequency and timing data about those events, and displays the data in decreasing frequency order, so that the most common events appear first.
+.sp
+.LP
+\fBlockstat\fR gathers data until the specified command completes. For example, to gather statistics for a fixed-time interval, use \fBsleep\fR(1) as
+the command, as follows:
+.sp
+.LP
+\fBexample#\fR \fBlockstat\fR \fBsleep\fR \fB5\fR
+.sp
+.LP
+When the \fB-I\fR option is specified, \fBlockstat\fR establishes a per-processor high-level periodic interrupt source to gather profiling data. The interrupt handler simply generates a \fBlockstat\fR event whose caller is the interrupted PC (program counter).
+The profiling event is just like any other \fBlockstat\fR event, so all of the normal \fBlockstat\fR options are applicable.
+.sp
+.LP
+\fBlockstat\fR relies on DTrace to modify the running kernel's text to intercept events of interest. This imposes a small but measurable overhead on all system activity, so access to \fBlockstat\fR is restricted to super-user by default. The system administrator
+can permit other users to use \fBlockstat\fR by granting them additional DTrace privileges. Refer to the \fISolaris Dynamic Tracing Guide\fR for more information about DTrace security features.
+.SH OPTIONS
+.sp
+.LP
+The following options are supported:
+.SS "Event Selection"
+.sp
+.LP
+If no event selection options are specified, the default is \fB-C\fR.
+.sp
+.ne 2
+.mk
+.na
+\fB\fB-A\fR\fR
+.ad
+.sp .6
+.RS 4n
+Watch all lock events. \fB-A\fR is equivalent to \fB-CH\fR.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fB-C\fR\fR
+.ad
+.sp .6
+.RS 4n
+Watch contention events.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fB-E\fR\fR
+.ad
+.sp .6
+.RS 4n
+Watch error events.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fB\fR\fB-e\fR \fIevent_list\fR\fR
+.ad
+.sp .6
+.RS 4n
+Only watch the specified events. \fIevent\fR \fIlist\fR is a comma-separated list of events or ranges of events such as 1,4-7,35. Run \fBlockstat\fR with no arguments to get a brief description of all events.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fB-H\fR\fR
+.ad
+.sp .6
+.RS 4n
+Watch hold events.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fB-I\fR\fR
+.ad
+.sp .6
+.RS 4n
+Watch profiling interrupt events.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fB\fR\fB-i\fR \fIrate\fR\fR
+.ad
+.sp .6
+.RS 4n
+Interrupt rate (per second) for \fB-I\fR. The default is 97 Hz, so that profiling doesn't run in lockstep with the clock interrupt (which runs at 100 Hz).
+.RE
+
+.SS "Data Gathering"
+.sp
+.ne 2
+.mk
+.na
+\fB\fB-x\fR \fIarg\fR[=\fIval\fR]\fR
+.ad
+.sp .6
+.RS 4n
+Enable or modify a DTrace runtime option or D compiler option. The list of options is found in the \fI\fR. Boolean options are enabled by specifying their name. Options with values are set by separating the option name and
+value with an equals sign (=).
+.RE
+
+.SS "Data Gathering (Mutually Exclusive)"
+.sp
+.ne 2
+.mk
+.na
+\fB\fB-b\fR\fR
+.ad
+.sp .6
+.RS 4n
+Basic statistics: lock, caller, number of events.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fB-h\fR\fR
+.ad
+.sp .6
+.RS 4n
+Histogram: Timing plus time-distribution histograms.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fB\fR\fB-s\fR \fIdepth\fR\fR
+.ad
+.sp .6
+.RS 4n
+Stack trace: Histogram plus stack traces up to \fIdepth\fR frames deep.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fB-t\fR\fR
+.ad
+.sp .6
+.RS 4n
+Timing: Basic plus timing for all events [default].
+.RE
+
+.SS "Data Filtering"
+.sp
+.ne 2
+.mk
+.na
+\fB\fB\fR\fB-d\fR \fIduration\fR\fR
+.ad
+.sp .6
+.RS 4n
+Only watch events longer than \fIduration\fR.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fB\fR\fB-f\fR \fIfunc[,size]\fR\fR
+.ad
+.sp .6
+.RS 4n
+Only watch events generated by \fIfunc\fR, which can be specified as a symbolic name or hex address. \fIsize\fR defaults to the \fBELF\fR symbol size if available, or \fB1\fR if not.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fB\fR\fB-l\fR \fIlock[,size]\fR\fR
+.ad
+.sp .6
+.RS 4n
+Only watch \fIlock\fR, which can be specified as a symbolic name or hex address. \fBsize\fR defaults to the \fBELF\fR symbol size or \fB1\fR if the symbol size is not available.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fB\fR\fB-n\fR \fInrecords\fR\fR
+.ad
+.sp .6
+.RS 4n
+Maximum number of data records.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fB-T\fR\fR
+.ad
+.sp .6
+.RS 4n
+Trace (rather than sample) events [off by default].
+.RE
+
+.SS "Data Reporting"
+.sp
+.ne 2
+.mk
+.na
+\fB\fB-c\fR\fR
+.ad
+.sp .6
+.RS 4n
+Coalesce lock data for lock arrays (for example, \fBpse_mutex[]\fR).
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fB\fR\fB-D\fR \fIcount\fR\fR
+.ad
+.sp .6
+.RS 4n
+Only display the top \fIcount\fR events of each type.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fB-g\fR\fR
+.ad
+.sp .6
+.RS 4n
+Show total events generated by function. For example, if \fBfoo()\fR calls \fBbar()\fR in a loop, the work done by \fBbar()\fR counts as work generated by \fBfoo()\fR (along with any work done by \fBfoo()\fR itself).
+The \fB-g\fR option works by counting the total number of stack frames in which each function appears. This implies two things: (1) the data reported by \fB-g\fR can be misleading if the stack traces are not deep enough, and (2) functions that are called recursively might show
+greater than 100% activity. In light of issue (1), the default data gathering mode when using \fB-g\fR is \fB-s\fR \fB50\fR.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fB-k\fR\fR
+.ad
+.sp .6
+.RS 4n
+Coalesce PCs within functions.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fB\fR\fB-o\fR \fIfilename\fR\fR
+.ad
+.sp .6
+.RS 4n
+Direct output to \fIfilename\fR.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fB-P\fR\fR
+.ad
+.sp .6
+.RS 4n
+Sort data by (\fIcount * time\fR) product.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fB-p\fR\fR
+.ad
+.sp .6
+.RS 4n
+Parsable output format.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fB-R\fR\fR
+.ad
+.sp .6
+.RS 4n
+Display rates (events per second) rather than counts.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fB-W\fR\fR
+.ad
+.sp .6
+.RS 4n
+Whichever: distinguish events only by caller, not by lock.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fB-w\fR\fR
+.ad
+.sp .6
+.RS 4n
+Wherever: distinguish events only by lock, not by caller.
+.RE
+
+.SH DISPLAY FORMATS
+.sp
+.LP
+The following headers appear over various columns of data.
+.sp
+.ne 2
+.mk
+.na
+\fB\fBCount\fR or \fBops/s\fR\fR
+.ad
+.sp .6
+.RS 4n
+Number of times this event occurred, or the rate (times per second) if \fB-R\fR was specified.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fBindv\fR\fR
+.ad
+.sp .6
+.RS 4n
+Percentage of all events represented by this individual event.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fBgenr\fR\fR
+.ad
+.sp .6
+.RS 4n
+Percentage of all events generated by this function.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fBcuml\fR\fR
+.ad
+.sp .6
+.RS 4n
+Cumulative percentage; a running total of the individuals.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fBrcnt\fR\fR
+.ad
+.sp .6
+.RS 4n
+Average reference count. This will always be \fB1\fR for exclusive locks (mutexes, spin locks, rwlocks held as writer) but can be greater than \fB1\fR for shared locks (rwlocks held as reader).
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fBnsec\fR\fR
+.ad
+.sp .6
+.RS 4n
+Average duration of the events in nanoseconds, as appropriate for the event. For the profiling event, duration means interrupt latency.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fBLock\fR\fR
+.ad
+.sp .6
+.RS 4n
+Address of the lock; displayed symbolically if possible.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fBCPU+PIL\fR\fR
+.ad
+.sp .6
+.RS 4n
+\fBCPU\fR plus processor interrupt level (\fBPIL\fR). For example, if \fBCPU\fR 4 is interrupted while at \fBPIL\fR 6, this will be reported as \fBcpu[4]+6\fR.
+.RE
+
+.sp
+.ne 2
+.mk
+.na
+\fB\fBCaller\fR\fR
+.ad
+.sp .6
+.RS 4n
+Address of the caller; displayed symbolically if possible.
+.RE
+
+.SH EXAMPLES
+.LP
+\fBExample 1 \fRMeasuring Kernel Lock Contention
+.sp
+.in +2
+.nf
+example# \fBlockstat sleep 5\fR
+Adaptive mutex spin: 2210 events in 5.055 seconds (437 events/sec)
+.fi
+.in -2
+.sp
+
+.sp
+.in +2
+.nf
+Count indv cuml rcnt nsec Lock Caller
+------------------------------------------------------------------------
+ 269 12% 12% 1.00 2160 service_queue background+0xdc
+ 249 11% 23% 1.00 86 service_queue qenable_locked+0x64
+ 228 10% 34% 1.00 131 service_queue background+0x15c
+ 68 3% 37% 1.00 79 0x30000024070 untimeout+0x1c
+ 59 3% 40% 1.00 384 0x300066fa8e0 background+0xb0
+ 43 2% 41% 1.00 30 rqcred_lock svc_getreq+0x3c
+ 42 2% 43% 1.00 341 0x30006834eb8 background+0xb0
+ 41 2% 45% 1.00 135 0x30000021058 untimeout+0x1c
+ 40 2% 47% 1.00 39 rqcred_lock svc_getreq+0x260
+ 37 2% 49% 1.00 2372 0x300068e83d0 hmestart+0x1c4
+ 36 2% 50% 1.00 77 0x30000021058 timeout_common+0x4
+ 36 2% 52% 1.00 354 0x300066fa120 background+0xb0
+ 32 1% 53% 1.00 97 0x30000024070 timeout_common+0x4
+ 31 1% 55% 1.00 2923 0x300069883d0 hmestart+0x1c4
+ 29 1% 56% 1.00 366 0x300066fb290 background+0xb0
+ 28 1% 57% 1.00 117 0x3000001e040 untimeout+0x1c
+ 25 1% 59% 1.00 93 0x3000001e040 timeout_common+0x4
+ 22 1% 60% 1.00 25 0x30005161110 sync_stream_buf+0xdc
+ 21 1% 60% 1.00 291 0x30006834eb8 putq+0xa4
+ 19 1% 61% 1.00 43 0x3000515dcb0 mdf_alloc+0xc
+ 18 1% 62% 1.00 456 0x30006834eb8 qenable+0x8
+ 18 1% 63% 1.00 61 service_queue queuerun+0x168
+ 17 1% 64% 1.00 268 0x30005418ee8 vmem_free+0x3c
+[...]
+
+R/W reader blocked by writer: 76 events in 5.055 seconds (15 events/sec)
+
+Count indv cuml rcnt nsec Lock Caller
+------------------------------------------------------------------------
+ 23 30% 30% 1.00 22590137 0x300098ba358 ufs_dirlook+0xd0
+ 17 22% 53% 1.00 5820995 0x3000ad815e8 find_bp+0x10
+ 13 17% 70% 1.00 2639918 0x300098ba360 ufs_iget+0x198
+ 4 5% 75% 1.00 3193015 0x300098ba360 ufs_getattr+0x54
+ 3 4% 79% 1.00 7953418 0x3000ad817c0 find_bp+0x10
+ 3 4% 83% 1.00 935211 0x3000ad815e8 find_read_lof+0x14
+ 2 3% 86% 1.00 16357310 0x300073a4720 find_bp+0x10
+ 2 3% 88% 1.00 2072433 0x300073a4720 find_read_lof+0x14
+ 2 3% 91% 1.00 1606153 0x300073a4370 find_bp+0x10
+ 1 1% 92% 1.00 2656909 0x300107e7400 ufs_iget+0x198
+[...]
+.fi
+.in -2
+.sp
+
+.LP
+\fBExample 2 \fRMeasuring Hold Times
+.sp
+.in +2
+.nf
+example# \fBlockstat -H -D 10 sleep 1\fR
+Adaptive mutex spin: 513 events
+.fi
+.in -2
+.sp
+
+.sp
+.in +2
+.nf
+Count indv cuml rcnt nsec Lock Caller
+-------------------------------------------------------------------------
+ 480 5% 5% 1.00 1136 0x300007718e8 putnext+0x40
+ 286 3% 9% 1.00 666 0x3000077b430 getf+0xd8
+ 271 3% 12% 1.00 537 0x3000077b430 msgio32+0x2fc
+ 270 3% 15% 1.00 3670 0x300007718e8 strgetmsg+0x3d4
+ 270 3% 18% 1.00 1016 0x300007c38b0 getq_noenab+0x200
+ 264 3% 20% 1.00 1649 0x300007718e8 strgetmsg+0xa70
+ 216 2% 23% 1.00 6251 tcp_mi_lock tcp_snmp_get+0xfc
+ 206 2% 25% 1.00 602 thread_free_lock clock+0x250
+ 138 2% 27% 1.00 485 0x300007c3998 putnext+0xb8
+ 138 2% 28% 1.00 3706 0x300007718e8 strrput+0x5b8
+-------------------------------------------------------------------------
+[...]
+.fi
+.in -2
+.sp
+
+.LP
+\fBExample 3 \fRMeasuring Hold Times for Stack Traces Containing a Specific Function
+.sp
+.in +2
+.nf
+example# \fBlockstat -H -f tcp_rput_data -s 50 -D 10 sleep 1\fR
+Adaptive mutex spin: 11 events in 1.023 seconds (11
+events/sec)
+.fi
+.in -2
+.sp
+
+.sp
+.in +2
+.nf
+-------------------------------------------------------------------------
+Count indv cuml rcnt nsec Lock Caller
+ 9 82% 82% 1.00 2540 0x30000031380 tcp_rput_data+0x2b90
+
+ nsec ------ Time Distribution ------ count Stack
+ 256 |@@@@@@@@@@@@@@@@ 5 tcp_rput_data+0x2b90
+ 512 |@@@@@@ 2 putnext+0x78
+ 1024 |@@@ 1 ip_rput+0xec4
+ 2048 | 0 _c_putnext+0x148
+ 4096 | 0 hmeread+0x31c
+ 8192 | 0 hmeintr+0x36c
+ 16384 |@@@ 1
+sbus_intr_wrapper+0x30
+[...]
+
+Count indv cuml rcnt nsec Lock Caller
+ 1 9% 91% 1.00 1036 0x30000055380 freemsg+0x44
+
+ nsec ------ Time Distribution ------ count Stack
+ 1024 |@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ 1 freemsg+0x44
+ tcp_rput_data+0x2fd0
+ putnext+0x78
+ ip_rput+0xec4
+ _c_putnext+0x148
+ hmeread+0x31c
+ hmeintr+0x36c
+
+sbus_intr_wrapper+0x30
+-------------------------------------------------------------------------
+[...]
+.fi
+.in -2
+.sp
+
+.LP
+\fBExample 4 \fRBasic Kernel Profiling
+.sp
+.LP
+For basic profiling, we don't care whether the profiling interrupt sampled \fBfoo()\fR\fB+0x4c\fR or \fBfoo()\fR\fB+0x78\fR; we care only that it sampled somewhere in \fBfoo()\fR, so we use \fB-k\fR. The \fBCPU\fR and \fBPIL\fR aren't relevant to basic profiling because we are measuring the system as a whole, not a particular \fBCPU\fR or interrupt level, so we use \fB-W\fR.
+
+.sp
+.in +2
+.nf
+example# \fBlockstat -kIW -D 20 ./polltest\fR
+Profiling interrupt: 82 events in 0.424 seconds (194
+events/sec)
+.fi
+.in -2
+.sp
+
+.sp
+.in +2
+.nf
+Count indv cuml rcnt nsec Hottest CPU+PIL Caller
+-----------------------------------------------------------------------
+ 8 10% 10% 1.00 698 cpu[1] utl0
+ 6 7% 17% 1.00 299 cpu[0] read
+ 5 6% 23% 1.00 124 cpu[1] getf
+ 4 5% 28% 1.00 327 cpu[0] fifo_read
+ 4 5% 33% 1.00 112 cpu[1] poll
+ 4 5% 38% 1.00 212 cpu[1] uiomove
+ 4 5% 43% 1.00 361 cpu[1] mutex_tryenter
+ 3 4% 46% 1.00 682 cpu[0] write
+ 3 4% 50% 1.00 89 cpu[0] pcache_poll
+ 3 4% 54% 1.00 118 cpu[1] set_active_fd
+ 3 4% 57% 1.00 105 cpu[0] syscall_trap32
+ 3 4% 61% 1.00 640 cpu[1] (usermode)
+ 2 2% 63% 1.00 127 cpu[1] fifo_poll
+ 2 2% 66% 1.00 300 cpu[1] fifo_write
+ 2 2% 68% 1.00 669 cpu[0] releasef
+ 2 2% 71% 1.00 112 cpu[1] bt_getlowbit
+ 2 2% 73% 1.00 247 cpu[1] splx
+ 2 2% 76% 1.00 503 cpu[0] mutex_enter
+ 2 2% 78% 1.00 467 cpu[0]+10 disp_lock_enter
+ 2 2% 80% 1.00 139 cpu[1] default_copyin
+-----------------------------------------------------------------------
+[...]
+.fi
+.in -2
+.sp
+
+.LP
+\fBExample 5 \fRGenerated-load Profiling
+.sp
+.LP
+In the example above, 5% of the samples were in \fBpoll()\fR. This tells us how much time was spent inside \fBpoll()\fR itself, but tells us nothing about how much work was \fBgenerated\fR by \fBpoll()\fR; that is, how much time we spent
+in functions called by \fBpoll()\fR. To determine that, we use the \fB-g\fR option. The example below shows that although \fBpolltest\fR spends only 5% of its time in \fBpoll()\fR itself, \fBpoll()\fR-induced work accounts for 34% of
+the load.
+
+.sp
+.LP
+Note that the functions that generate the profiling interrupt (\fBlockstat_intr()\fR, \fBcyclic_fire()\fR, and so forth) appear in every stack trace, and therefore are considered to have generated 100% of the load. This illustrates an important point: the generated
+load percentages do \fBnot\fR add up to 100% because they are not independent. If 72% of all stack traces contain both \fBfoo()\fR and \fBbar()\fR, then both \fBfoo()\fR and \fBbar()\fR are 72% load generators.
+
+.sp
+.in +2
+.nf
+example# \fBlockstat -kgIW -D 20 ./polltest\fR
+Profiling interrupt: 80 events in 0.412 seconds (194 events/sec)
+.fi
+.in -2
+.sp
+
+.sp
+.in +2
+.nf
+Count genr cuml rcnt nsec Hottest CPU+PIL Caller
+-------------------------------------------------------------------------
+ 80 100% ---- 1.00 310 cpu[1] lockstat_intr
+ 80 100% ---- 1.00 310 cpu[1] cyclic_fire
+ 80 100% ---- 1.00 310 cpu[1] cbe_level14
+ 80 100% ---- 1.00 310 cpu[1] current_thread
+ 27 34% ---- 1.00 176 cpu[1] poll
+ 20 25% ---- 1.00 221 cpu[0] write
+ 19 24% ---- 1.00 249 cpu[1] read
+ 17 21% ---- 1.00 232 cpu[0] write32
+ 17 21% ---- 1.00 207 cpu[1] pcache_poll
+ 14 18% ---- 1.00 319 cpu[0] fifo_write
+ 13 16% ---- 1.00 214 cpu[1] read32
+ 10 12% ---- 1.00 208 cpu[1] fifo_read
+ 10 12% ---- 1.00 787 cpu[1] utl0
+ 9 11% ---- 1.00 178 cpu[0] pcacheset_resolve
+ 9 11% ---- 1.00 262 cpu[0] uiomove
+ 7 9% ---- 1.00 506 cpu[1] (usermode)
+ 5 6% ---- 1.00 195 cpu[1] fifo_poll
+ 5 6% ---- 1.00 136 cpu[1] syscall_trap32
+ 4 5% ---- 1.00 139 cpu[0] releasef
+ 3 4% ---- 1.00 277 cpu[1] polllock
+-------------------------------------------------------------------------
+[...]
+.fi
+.in -2
+.sp
+
+.LP
+\fBExample 6 \fRGathering Lock Contention and Profiling Data for a Specific Module
+.sp
+.LP
+In this example we use the \fB-f\fR option not to specify a single function, but rather to specify the entire text space of the \fBsbus\fR module. We gather both lock contention and profiling statistics so that contention can be correlated with overall load on the
+module.
+
+.sp
+.in +2
+.nf
+example# \fBmodinfo | grep sbus\fR
+24 102a8b6f b8b4 59 1 sbus (SBus (sysio) nexus driver)
+.fi
+.in -2
+.sp
+
+.sp
+.in +2
+.nf
+example# \fBlockstat -kICE -f 0x102a8b6f,0xb8b4 sleep 10\fR
+Adaptive mutex spin: 39 events in 10.042 seconds (4 events/sec)
+.fi
+.in -2
+.sp
+
+.sp
+.in +2
+.nf
+Count indv cuml rcnt nsec Lock Caller
+-------------------------------------------------------------------------
+ 15 38% 38% 1.00 206 0x30005160528 sync_stream_buf
+ 7 18% 56% 1.00 14 0x30005160d18 sync_stream_buf
+ 6 15% 72% 1.00 27 0x300060c3118 sync_stream_buf
+ 5 13% 85% 1.00 24 0x300060c3510 sync_stream_buf
+ 2 5% 90% 1.00 29 0x300060c2d20 sync_stream_buf
+ 2 5% 95% 1.00 24 0x30005161cf8 sync_stream_buf
+ 1 3% 97% 1.00 21 0x30005161110 sync_stream_buf
+ 1 3% 100% 1.00 23 0x30005160130 sync_stream_buf
+[...]
+
+Adaptive mutex block: 9 events in 10.042 seconds (1 events/sec)
+
+Count indv cuml rcnt nsec Lock Caller
+-------------------------------------------------------------------------
+ 4 44% 44% 1.00 156539 0x30005160528 sync_stream_buf
+ 2 22% 67% 1.00 763516 0x30005160d18 sync_stream_buf
+ 1 11% 78% 1.00 462130 0x300060c3510 sync_stream_buf
+ 1 11% 89% 1.00 288749 0x30005161110 sync_stream_buf
+ 1 11% 100% 1.00 1015374 0x30005160130 sync_stream_buf
+[...]
+
+Profiling interrupt: 229 events in 10.042 seconds (23 events/sec)
+
+Count indv cuml rcnt nsec Hottest CPU+PIL Caller
+
+-------------------------------------------------------------------------
+ 89 39% 39% 1.00 426 cpu[0]+6 sync_stream_buf
+ 64 28% 67% 1.00 398 cpu[0]+6 sbus_intr_wrapper
+ 23 10% 77% 1.00 324 cpu[0]+6 iommu_dvma_kaddr_load
+ 21 9% 86% 1.00 512 cpu[0]+6 iommu_tlb_flush
+ 14 6% 92% 1.00 342 cpu[0]+6 iommu_dvma_unload
+ 13 6% 98% 1.00 306 cpu[1] iommu_dvma_sync
+ 5 2% 100% 1.00 389 cpu[1] iommu_dma_bindhdl
+-------------------------------------------------------------------------
+[...]
+.fi
+.in -2
+.sp
+
+.LP
+\fBExample 7 \fRDetermining the Average PIL (processor interrupt level) for a CPU
+.sp
+.in +2
+.nf
+example# \fBlockstat -Iw -l cpu[3] ./testprog\fR
+
+Profiling interrupt: 14791 events in 152.463 seconds (97 events/sec)
+
+Count indv cuml rcnt nsec CPU+PIL Hottest Caller
+
+-----------------------------------------------------------------------
+13641 92% 92% 1.00 253 cpu[3] (usermode)
+ 579 4% 96% 1.00 325 cpu[3]+6 ip_ocsum+0xe8
+ 375 3% 99% 1.00 411 cpu[3]+10 splx
+ 154 1% 100% 1.00 527 cpu[3]+4 fas_intr_svc+0x80
+ 41 0% 100% 1.00 293 cpu[3]+13 send_mondo+0x18
+ 1 0% 100% 1.00 266 cpu[3]+12 zsa_rxint+0x400
+-----------------------------------------------------------------------
+[...]
+.fi
+.in -2
+.sp
+
+.LP
+\fBExample 8 \fRDetermining which Subsystem is Causing the System to be Busy
+.sp
+.in +2
+.nf
+example# \fBlockstat -s 10 -I sleep 20\fR
+
+Profiling interrupt: 4863 events in 47.375 seconds (103 events/sec)
+
+Count indv cuml rcnt nsec CPU+PIL Caller
+
+-----------------------------------------------------------------------
+1929 40% 40% 0.00 3215 cpu[0] usec_delay+0x78
+ nsec ------ Time Distribution ------ count Stack
+ 4096 |@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ 1872 ata_wait+0x90
+ 8192 | 27 acersb_get_intr_status+0x34
+16384 | 29 ata_set_feature+0x124
+32768 | 1 ata_disk_start+0x15c
+ ata_hba_start+0xbc
+ ghd_waitq_process_and \e
+ _mutex_hold+0x70
+ ghd_waitq_process_and \e
+ _mutex_exit+0x4
+ ghd_transport+0x12c
+ ata_disk_tran_start+0x108
+-----------------------------------------------------------------------
+[...]
+.fi
+.in -2
+.sp
+
+.SH ATTRIBUTES
+.sp
+.LP
+See \fBattributes\fR(5) for descriptions of the following attributes:
+.sp
+
+.sp
+.TS
+tab() box;
+cw(2.75i) |cw(2.75i)
+lw(2.75i) |lw(2.75i)
+.
+ATTRIBUTE TYPEATTRIBUTE VALUE
+_
+AvailabilitySUNWdtrc
+.TE
+
+.SH SEE ALSO
+.sp
+.LP
+\fBdtrace\fR(1M), \fBplockstat\fR(1M), \fBattributes\fR(5), \fBlockstat\fR(7D), \fBmutex\fR(9F), \fBrwlock\fR(9F)
+.sp
+.LP
+\fISolaris Dynamic Tracing Guide\fR
+.SH NOTES
+.sp
+.LP
+The profiling support provided by \fBlockstat\fR \fB-I\fR replaces the old (and undocumented) \fB/usr/bin/kgmon\fR and \fB/dev/profile\fR.
+.sp
+.LP
+Tail-call elimination can affect call sites. For example, if \fBfoo()\fR\fB+0x50\fR calls \fBbar()\fR and the last thing \fBbar()\fR does is call \fBmutex_exit()\fR, the compiler can arrange for \fBbar()\fR to
+branch to \fBmutex_exit()\fRwith a return address of \fBfoo()\fR\fB+0x58\fR. Thus, the \fBmutex_exit()\fR in \fBbar()\fR will appear as though it occurred at \fBfoo()\fR\fB+0x58\fR.
+.sp
+.LP
+The \fBPC\fR in the stack frame in which an interrupt occurs can be bogus because, between function calls, the compiler is free to use the return address register for local storage.
+.sp
+.LP
+When using the \fB-I\fR and \fB-s\fR options together, the interrupted PC will usually not appear anywhere in the stack since the interrupt handler is entered asynchronously, not by a function call from that \fBPC\fR.
+.sp
+.LP
+The \fBlockstat\fR technology is provided on an as-is basis. The format and content of \fBlockstat\fR output reflect the current Solaris kernel implementation and are therefore subject to change in future releases.
diff --git a/cddl/contrib/opensolaris/cmd/lockstat/lockstat.c b/cddl/contrib/opensolaris/cmd/lockstat/lockstat.c
new file mode 100644
index 0000000..ab35a91
--- /dev/null
+++ b/cddl/contrib/opensolaris/cmd/lockstat/lockstat.c
@@ -0,0 +1,1917 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <stdio.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <strings.h>
+#include <ctype.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <errno.h>
+#include <limits.h>
+#include <sys/types.h>
+#include <sys/modctl.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#include <dtrace.h>
+#include <sys/lockstat.h>
+#include <alloca.h>
+#include <signal.h>
+#include <assert.h>
+
+#if !defined(sun)
+/* needed for FreeBSD */
+#include <sys/time.h>
+#include <sys/resource.h>
+typedef uintptr_t pc_t;
+
+#define mergesort(a, b, c, d) lsmergesort(a, b, c, d)
+#endif
+
+#define LOCKSTAT_OPTSTR "x:bths:n:d:i:l:f:e:ckwWgCHEATID:RpPo:V"
+
+#define LS_MAX_STACK_DEPTH 50
+#define LS_MAX_EVENTS 64
+
+typedef struct lsrec {
+ struct lsrec *ls_next; /* next in hash chain */
+ uintptr_t ls_lock; /* lock address */
+ uintptr_t ls_caller; /* caller address */
+ uint32_t ls_count; /* cumulative event count */
+ uint32_t ls_event; /* type of event */
+ uintptr_t ls_refcnt; /* cumulative reference count */
+ uint64_t ls_time; /* cumulative event duration */
+ uint32_t ls_hist[64]; /* log2(duration) histogram */
+ uintptr_t ls_stack[LS_MAX_STACK_DEPTH];
+} lsrec_t;
+
+typedef struct lsdata {
+ struct lsrec *lsd_next; /* next available */
+ int lsd_count; /* number of records */
+} lsdata_t;
+
+/*
+ * Definitions for the types of experiments which can be run. They are
+ * listed in increasing order of memory cost and processing time cost.
+ * The numerical value of each type is the number of bytes needed per record.
+ */
+#define LS_BASIC offsetof(lsrec_t, ls_time)
+#define LS_TIME offsetof(lsrec_t, ls_hist[0])
+#define LS_HIST offsetof(lsrec_t, ls_stack[0])
+#define LS_STACK(depth) offsetof(lsrec_t, ls_stack[depth])
+
+static void report_stats(FILE *, lsrec_t **, size_t, uint64_t, uint64_t);
+static void report_trace(FILE *, lsrec_t **);
+
+extern int symtab_init(void);
+extern char *addr_to_sym(uintptr_t, uintptr_t *, size_t *);
+extern uintptr_t sym_to_addr(char *name);
+extern size_t sym_size(char *name);
+extern char *strtok_r(char *, const char *, char **);
+
+#define DEFAULT_NRECS 10000
+#define DEFAULT_HZ 97
+#define MAX_HZ 1000
+#define MIN_AGGSIZE (16 * 1024)
+#define MAX_AGGSIZE (32 * 1024 * 1024)
+
+static int g_stkdepth;
+static int g_topn = INT_MAX;
+static hrtime_t g_elapsed;
+static int g_rates = 0;
+static int g_pflag = 0;
+static int g_Pflag = 0;
+static int g_wflag = 0;
+static int g_Wflag = 0;
+static int g_cflag = 0;
+static int g_kflag = 0;
+static int g_gflag = 0;
+static int g_Vflag = 0;
+static int g_tracing = 0;
+static size_t g_recsize;
+static size_t g_nrecs;
+static int g_nrecs_used;
+static uchar_t g_enabled[LS_MAX_EVENTS];
+static hrtime_t g_min_duration[LS_MAX_EVENTS];
+static dtrace_hdl_t *g_dtp;
+static char *g_predicate;
+static char *g_ipredicate;
+static char *g_prog;
+static int g_proglen;
+static int g_dropped;
+
+typedef struct ls_event_info {
+ char ev_type;
+ char ev_lhdr[20];
+ char ev_desc[80];
+ char ev_units[10];
+ char ev_name[DTRACE_NAMELEN];
+ char *ev_predicate;
+ char *ev_acquire;
+} ls_event_info_t;
+
+static ls_event_info_t g_event_info[LS_MAX_EVENTS] = {
+ { 'C', "Lock", "Adaptive mutex spin", "nsec",
+ "lockstat:::adaptive-spin" },
+ { 'C', "Lock", "Adaptive mutex block", "nsec",
+ "lockstat:::adaptive-block" },
+ { 'C', "Lock", "Spin lock spin", "nsec",
+ "lockstat:::spin-spin" },
+ { 'C', "Lock", "Thread lock spin", "nsec",
+ "lockstat:::thread-spin" },
+ { 'C', "Lock", "R/W writer blocked by writer", "nsec",
+ "lockstat:::rw-block", "arg2 == 0 && arg3 == 1" },
+ { 'C', "Lock", "R/W writer blocked by readers", "nsec",
+ "lockstat:::rw-block", "arg2 == 0 && arg3 == 0 && arg4" },
+ { 'C', "Lock", "R/W reader blocked by writer", "nsec",
+ "lockstat:::rw-block", "arg2 != 0 && arg3 == 1" },
+ { 'C', "Lock", "R/W reader blocked by write wanted", "nsec",
+ "lockstat:::rw-block", "arg2 != 0 && arg3 == 0 && arg4" },
+ { 'C', "Lock", "Unknown event (type 8)", "units" },
+ { 'C', "Lock", "Unknown event (type 9)", "units" },
+ { 'C', "Lock", "Unknown event (type 10)", "units" },
+ { 'C', "Lock", "Unknown event (type 11)", "units" },
+ { 'C', "Lock", "Unknown event (type 12)", "units" },
+ { 'C', "Lock", "Unknown event (type 13)", "units" },
+ { 'C', "Lock", "Unknown event (type 14)", "units" },
+ { 'C', "Lock", "Unknown event (type 15)", "units" },
+ { 'C', "Lock", "Unknown event (type 16)", "units" },
+ { 'C', "Lock", "Unknown event (type 17)", "units" },
+ { 'C', "Lock", "Unknown event (type 18)", "units" },
+ { 'C', "Lock", "Unknown event (type 19)", "units" },
+ { 'C', "Lock", "Unknown event (type 20)", "units" },
+ { 'C', "Lock", "Unknown event (type 21)", "units" },
+ { 'C', "Lock", "Unknown event (type 22)", "units" },
+ { 'C', "Lock", "Unknown event (type 23)", "units" },
+ { 'C', "Lock", "Unknown event (type 24)", "units" },
+ { 'C', "Lock", "Unknown event (type 25)", "units" },
+ { 'C', "Lock", "Unknown event (type 26)", "units" },
+ { 'C', "Lock", "Unknown event (type 27)", "units" },
+ { 'C', "Lock", "Unknown event (type 28)", "units" },
+ { 'C', "Lock", "Unknown event (type 29)", "units" },
+ { 'C', "Lock", "Unknown event (type 30)", "units" },
+ { 'C', "Lock", "Unknown event (type 31)", "units" },
+ { 'H', "Lock", "Adaptive mutex hold", "nsec",
+ "lockstat:::adaptive-release", NULL,
+ "lockstat:::adaptive-acquire" },
+ { 'H', "Lock", "Spin lock hold", "nsec",
+ "lockstat:::spin-release", NULL,
+ "lockstat:::spin-acquire" },
+ { 'H', "Lock", "R/W writer hold", "nsec",
+ "lockstat:::rw-release", "arg1 == 0",
+ "lockstat:::rw-acquire" },
+ { 'H', "Lock", "R/W reader hold", "nsec",
+ "lockstat:::rw-release", "arg1 != 0",
+ "lockstat:::rw-acquire" },
+ { 'H', "Lock", "Unknown event (type 36)", "units" },
+ { 'H', "Lock", "Unknown event (type 37)", "units" },
+ { 'H', "Lock", "Unknown event (type 38)", "units" },
+ { 'H', "Lock", "Unknown event (type 39)", "units" },
+ { 'H', "Lock", "Unknown event (type 40)", "units" },
+ { 'H', "Lock", "Unknown event (type 41)", "units" },
+ { 'H', "Lock", "Unknown event (type 42)", "units" },
+ { 'H', "Lock", "Unknown event (type 43)", "units" },
+ { 'H', "Lock", "Unknown event (type 44)", "units" },
+ { 'H', "Lock", "Unknown event (type 45)", "units" },
+ { 'H', "Lock", "Unknown event (type 46)", "units" },
+ { 'H', "Lock", "Unknown event (type 47)", "units" },
+ { 'H', "Lock", "Unknown event (type 48)", "units" },
+ { 'H', "Lock", "Unknown event (type 49)", "units" },
+ { 'H', "Lock", "Unknown event (type 50)", "units" },
+ { 'H', "Lock", "Unknown event (type 51)", "units" },
+ { 'H', "Lock", "Unknown event (type 52)", "units" },
+ { 'H', "Lock", "Unknown event (type 53)", "units" },
+ { 'H', "Lock", "Unknown event (type 54)", "units" },
+ { 'H', "Lock", "Unknown event (type 55)", "units" },
+#if defined(sun)
+ { 'I', "CPU+PIL", "Profiling interrupt", "nsec",
+#else
+ /* FreeBSD */
+ { 'I', "CPU+Pri_Class", "Profiling interrupt", "nsec",
+#endif
+ "profile:::profile-97", NULL },
+ { 'I', "Lock", "Unknown event (type 57)", "units" },
+ { 'I', "Lock", "Unknown event (type 58)", "units" },
+ { 'I', "Lock", "Unknown event (type 59)", "units" },
+ { 'E', "Lock", "Recursive lock entry detected", "(N/A)",
+ "lockstat:::rw-release", NULL, "lockstat:::rw-acquire" },
+ { 'E', "Lock", "Lockstat enter failure", "(N/A)" },
+ { 'E', "Lock", "Lockstat exit failure", "nsec" },
+ { 'E', "Lock", "Lockstat record failure", "(N/A)" },
+};
+
+#if !defined(sun)
+static char *g_pri_class[] = {
+ "",
+ "Intr",
+ "RealT",
+ "TShar",
+ "Idle"
+};
+#endif
+
+static void
+fail(int do_perror, const char *message, ...)
+{
+ va_list args;
+ int save_errno = errno;
+
+ va_start(args, message);
+ (void) fprintf(stderr, "lockstat: ");
+ (void) vfprintf(stderr, message, args);
+ va_end(args);
+ if (do_perror)
+ (void) fprintf(stderr, ": %s", strerror(save_errno));
+ (void) fprintf(stderr, "\n");
+ exit(2);
+}
+
+static void
+dfail(const char *message, ...)
+{
+ va_list args;
+
+ va_start(args, message);
+ (void) fprintf(stderr, "lockstat: ");
+ (void) vfprintf(stderr, message, args);
+ va_end(args);
+ (void) fprintf(stderr, ": %s\n",
+ dtrace_errmsg(g_dtp, dtrace_errno(g_dtp)));
+
+ exit(2);
+}
+
+static void
+show_events(char event_type, char *desc)
+{
+ int i, first = -1, last;
+
+ for (i = 0; i < LS_MAX_EVENTS; i++) {
+ ls_event_info_t *evp = &g_event_info[i];
+ if (evp->ev_type != event_type ||
+ strncmp(evp->ev_desc, "Unknown event", 13) == 0)
+ continue;
+ if (first == -1)
+ first = i;
+ last = i;
+ }
+
+ (void) fprintf(stderr,
+ "\n%s events (lockstat -%c or lockstat -e %d-%d):\n\n",
+ desc, event_type, first, last);
+
+ for (i = first; i <= last; i++)
+ (void) fprintf(stderr,
+ "%4d = %s\n", i, g_event_info[i].ev_desc);
+}
+
+static void
+usage(void)
+{
+ (void) fprintf(stderr,
+ "Usage: lockstat [options] command [args]\n"
+ "\nEvent selection options:\n\n"
+ " -C watch contention events [on by default]\n"
+ " -E watch error events [off by default]\n"
+ " -H watch hold events [off by default]\n"
+ " -I watch interrupt events [off by default]\n"
+ " -A watch all lock events [equivalent to -CH]\n"
+ " -e event_list only watch the specified events (shown below);\n"
+ " <event_list> is a comma-separated list of\n"
+ " events or ranges of events, e.g. 1,4-7,35\n"
+ " -i rate interrupt rate for -I [default: %d Hz]\n"
+ "\nData gathering options:\n\n"
+ " -b basic statistics (lock, caller, event count)\n"
+ " -t timing for all events [default]\n"
+ " -h histograms for event times\n"
+ " -s depth stack traces <depth> deep\n"
+ " -x opt[=val] enable or modify DTrace options\n"
+ "\nData filtering options:\n\n"
+ " -n nrecords maximum number of data records [default: %d]\n"
+ " -l lock[,size] only watch <lock>, which can be specified as a\n"
+ " symbolic name or hex address; <size> defaults\n"
+ " to the ELF symbol size if available, 1 if not\n"
+ " -f func[,size] only watch events generated by <func>\n"
+ " -d duration only watch events longer than <duration>\n"
+ " -T trace (rather than sample) events\n"
+ "\nData reporting options:\n\n"
+ " -c coalesce lock data for arrays like pse_mutex[]\n"
+ " -k coalesce PCs within functions\n"
+ " -g show total events generated by function\n"
+ " -w wherever: don't distinguish events by caller\n"
+ " -W whichever: don't distinguish events by lock\n"
+ " -R display rates rather than counts\n"
+ " -p parsable output format (awk(1)-friendly)\n"
+ " -P sort lock data by (count * avg_time) product\n"
+ " -D n only display top <n> events of each type\n"
+ " -o filename send output to <filename>\n",
+ DEFAULT_HZ, DEFAULT_NRECS);
+
+ show_events('C', "Contention");
+ show_events('H', "Hold-time");
+ show_events('I', "Interrupt");
+ show_events('E', "Error");
+ (void) fprintf(stderr, "\n");
+
+ exit(1);
+}
+
+static int
+lockcmp(lsrec_t *a, lsrec_t *b)
+{
+ int i;
+
+ if (a->ls_event < b->ls_event)
+ return (-1);
+ if (a->ls_event > b->ls_event)
+ return (1);
+
+ for (i = g_stkdepth - 1; i >= 0; i--) {
+ if (a->ls_stack[i] < b->ls_stack[i])
+ return (-1);
+ if (a->ls_stack[i] > b->ls_stack[i])
+ return (1);
+ }
+
+ if (a->ls_caller < b->ls_caller)
+ return (-1);
+ if (a->ls_caller > b->ls_caller)
+ return (1);
+
+ if (a->ls_lock < b->ls_lock)
+ return (-1);
+ if (a->ls_lock > b->ls_lock)
+ return (1);
+
+ return (0);
+}
+
+static int
+countcmp(lsrec_t *a, lsrec_t *b)
+{
+ if (a->ls_event < b->ls_event)
+ return (-1);
+ if (a->ls_event > b->ls_event)
+ return (1);
+
+ return (b->ls_count - a->ls_count);
+}
+
+static int
+timecmp(lsrec_t *a, lsrec_t *b)
+{
+ if (a->ls_event < b->ls_event)
+ return (-1);
+ if (a->ls_event > b->ls_event)
+ return (1);
+
+ if (a->ls_time < b->ls_time)
+ return (1);
+ if (a->ls_time > b->ls_time)
+ return (-1);
+
+ return (0);
+}
+
+static int
+lockcmp_anywhere(lsrec_t *a, lsrec_t *b)
+{
+ if (a->ls_event < b->ls_event)
+ return (-1);
+ if (a->ls_event > b->ls_event)
+ return (1);
+
+ if (a->ls_lock < b->ls_lock)
+ return (-1);
+ if (a->ls_lock > b->ls_lock)
+ return (1);
+
+ return (0);
+}
+
+static int
+lock_and_count_cmp_anywhere(lsrec_t *a, lsrec_t *b)
+{
+ if (a->ls_event < b->ls_event)
+ return (-1);
+ if (a->ls_event > b->ls_event)
+ return (1);
+
+ if (a->ls_lock < b->ls_lock)
+ return (-1);
+ if (a->ls_lock > b->ls_lock)
+ return (1);
+
+ return (b->ls_count - a->ls_count);
+}
+
+static int
+sitecmp_anylock(lsrec_t *a, lsrec_t *b)
+{
+ int i;
+
+ if (a->ls_event < b->ls_event)
+ return (-1);
+ if (a->ls_event > b->ls_event)
+ return (1);
+
+ for (i = g_stkdepth - 1; i >= 0; i--) {
+ if (a->ls_stack[i] < b->ls_stack[i])
+ return (-1);
+ if (a->ls_stack[i] > b->ls_stack[i])
+ return (1);
+ }
+
+ if (a->ls_caller < b->ls_caller)
+ return (-1);
+ if (a->ls_caller > b->ls_caller)
+ return (1);
+
+ return (0);
+}
+
+static int
+site_and_count_cmp_anylock(lsrec_t *a, lsrec_t *b)
+{
+ int i;
+
+ if (a->ls_event < b->ls_event)
+ return (-1);
+ if (a->ls_event > b->ls_event)
+ return (1);
+
+ for (i = g_stkdepth - 1; i >= 0; i--) {
+ if (a->ls_stack[i] < b->ls_stack[i])
+ return (-1);
+ if (a->ls_stack[i] > b->ls_stack[i])
+ return (1);
+ }
+
+ if (a->ls_caller < b->ls_caller)
+ return (-1);
+ if (a->ls_caller > b->ls_caller)
+ return (1);
+
+ return (b->ls_count - a->ls_count);
+}
+
+static void
+lsmergesort(int (*cmp)(lsrec_t *, lsrec_t *), lsrec_t **a, lsrec_t **b, int n)
+{
+ int m = n / 2;
+ int i, j;
+
+ if (m > 1)
+ lsmergesort(cmp, a, b, m);
+ if (n - m > 1)
+ lsmergesort(cmp, a + m, b + m, n - m);
+ for (i = m; i > 0; i--)
+ b[i - 1] = a[i - 1];
+ for (j = m - 1; j < n - 1; j++)
+ b[n + m - j - 2] = a[j + 1];
+ while (i < j)
+ *a++ = cmp(b[i], b[j]) < 0 ? b[i++] : b[j--];
+ *a = b[i];
+}
+
+static void
+coalesce(int (*cmp)(lsrec_t *, lsrec_t *), lsrec_t **lock, int n)
+{
+ int i, j;
+ lsrec_t *target, *current;
+
+ target = lock[0];
+
+ for (i = 1; i < n; i++) {
+ current = lock[i];
+ if (cmp(current, target) != 0) {
+ target = current;
+ continue;
+ }
+ current->ls_event = LS_MAX_EVENTS;
+ target->ls_count += current->ls_count;
+ target->ls_refcnt += current->ls_refcnt;
+ if (g_recsize < LS_TIME)
+ continue;
+ target->ls_time += current->ls_time;
+ if (g_recsize < LS_HIST)
+ continue;
+ for (j = 0; j < 64; j++)
+ target->ls_hist[j] += current->ls_hist[j];
+ }
+}
+
+static void
+coalesce_symbol(uintptr_t *addrp)
+{
+ uintptr_t symoff;
+ size_t symsize;
+
+ if (addr_to_sym(*addrp, &symoff, &symsize) != NULL && symoff < symsize)
+ *addrp -= symoff;
+}
+
+static void
+predicate_add(char **pred, char *what, char *cmp, uintptr_t value)
+{
+ char *new;
+ int len, newlen;
+
+ if (what == NULL)
+ return;
+
+ if (*pred == NULL) {
+ *pred = malloc(1);
+ *pred[0] = '\0';
+ }
+
+ len = strlen(*pred);
+ newlen = len + strlen(what) + 32 + strlen("( && )");
+ new = malloc(newlen);
+
+ if (*pred[0] != '\0') {
+ if (cmp != NULL) {
+ (void) sprintf(new, "(%s) && (%s %s 0x%p)",
+ *pred, what, cmp, (void *)value);
+ } else {
+ (void) sprintf(new, "(%s) && (%s)", *pred, what);
+ }
+ } else {
+ if (cmp != NULL) {
+ (void) sprintf(new, "%s %s 0x%p",
+ what, cmp, (void *)value);
+ } else {
+ (void) sprintf(new, "%s", what);
+ }
+ }
+
+ free(*pred);
+ *pred = new;
+}
+
+static void
+predicate_destroy(char **pred)
+{
+ free(*pred);
+ *pred = NULL;
+}
+
+static void
+filter_add(char **filt, char *what, uintptr_t base, uintptr_t size)
+{
+ char buf[256], *c = buf, *new;
+ int len, newlen;
+
+ if (*filt == NULL) {
+ *filt = malloc(1);
+ *filt[0] = '\0';
+ }
+
+#if defined(sun)
+ (void) sprintf(c, "%s(%s >= 0x%p && %s < 0x%p)", *filt[0] != '\0' ?
+ " || " : "", what, (void *)base, what, (void *)(base + size));
+#else
+ (void) sprintf(c, "%s(%s >= %p && %s < %p)", *filt[0] != '\0' ?
+ " || " : "", what, (void *)base, what, (void *)(base + size));
+#endif
+
+ newlen = (len = strlen(*filt) + 1) + strlen(c);
+ new = malloc(newlen);
+ bcopy(*filt, new, len);
+ (void) strcat(new, c);
+ free(*filt);
+ *filt = new;
+}
+
+static void
+filter_destroy(char **filt)
+{
+ free(*filt);
+ *filt = NULL;
+}
+
+static void
+dprog_add(const char *fmt, ...)
+{
+ va_list args;
+ int size, offs;
+ char c;
+
+ va_start(args, fmt);
+ size = vsnprintf(&c, 1, fmt, args) + 1;
+ va_end(args);
+
+ if (g_proglen == 0) {
+ offs = 0;
+ } else {
+ offs = g_proglen - 1;
+ }
+
+ g_proglen = offs + size;
+
+ if ((g_prog = realloc(g_prog, g_proglen)) == NULL)
+ fail(1, "failed to reallocate program text");
+
+ va_start(args, fmt);
+ (void) vsnprintf(&g_prog[offs], size, fmt, args);
+ va_end(args);
+}
+
+/*
+ * This function may read like an open sewer, but keep in mind that programs
+ * that generate other programs are rarely pretty. If one has the unenviable
+ * task of maintaining or -- worse -- extending this code, use the -V option
+ * to examine the D program as generated by this function.
+ */
+static void
+dprog_addevent(int event)
+{
+ ls_event_info_t *info = &g_event_info[event];
+ char *pred = NULL;
+ char stack[20];
+ const char *arg0, *caller;
+ char *arg1 = "arg1";
+ char buf[80];
+ hrtime_t dur;
+ int depth;
+
+ if (info->ev_name[0] == '\0')
+ return;
+
+ if (info->ev_type == 'I') {
+ /*
+ * For interrupt events, arg0 (normally the lock pointer) is
+ * the CPU address plus the current pil, and arg1 (normally
+ * the number of nanoseconds) is the number of nanoseconds
+ * late -- and it's stored in arg2.
+ */
+#if defined(sun)
+ arg0 = "(uintptr_t)curthread->t_cpu + \n"
+ "\t curthread->t_cpu->cpu_profile_pil";
+#else
+ arg0 = "(uintptr_t)(curthread->td_oncpu << 16) + \n"
+ "\t 0x01000000 + curthread->td_pri_class";
+#endif
+ caller = "(uintptr_t)arg0";
+ arg1 = "arg2";
+ } else {
+ arg0 = "(uintptr_t)arg0";
+ caller = "caller";
+ }
+
+ if (g_recsize > LS_HIST) {
+ for (depth = 0; g_recsize > LS_STACK(depth); depth++)
+ continue;
+
+ if (g_tracing) {
+ (void) sprintf(stack, "\tstack(%d);\n", depth);
+ } else {
+ (void) sprintf(stack, ", stack(%d)", depth);
+ }
+ } else {
+ (void) sprintf(stack, "");
+ }
+
+ if (info->ev_acquire != NULL) {
+ /*
+ * If this is a hold event, we need to generate an additional
+ * clause for the acquire; the clause for the release will be
+ * generated with the aggregating statement, below.
+ */
+ dprog_add("%s\n", info->ev_acquire);
+ predicate_add(&pred, info->ev_predicate, NULL, 0);
+ predicate_add(&pred, g_predicate, NULL, 0);
+ if (pred != NULL)
+ dprog_add("/%s/\n", pred);
+
+ dprog_add("{\n");
+ (void) sprintf(buf, "self->ev%d[(uintptr_t)arg0]", event);
+
+ if (info->ev_type == 'H') {
+ dprog_add("\t%s = timestamp;\n", buf);
+ } else {
+ /*
+ * If this isn't a hold event, it's the recursive
+ * error event. For this, we simply bump the
+ * thread-local, per-lock count.
+ */
+ dprog_add("\t%s++;\n", buf);
+ }
+
+ dprog_add("}\n\n");
+ predicate_destroy(&pred);
+ pred = NULL;
+
+ if (info->ev_type == 'E') {
+ /*
+ * If this is the recursive lock error event, we need
+ * to generate an additional clause to decrement the
+ * thread-local, per-lock count. This assures that we
+ * only execute the aggregating clause if we have
+ * recursive entry.
+ */
+ dprog_add("%s\n", info->ev_name);
+ dprog_add("/%s/\n{\n\t%s--;\n}\n\n", buf, buf);
+ }
+
+ predicate_add(&pred, buf, NULL, 0);
+
+ if (info->ev_type == 'H') {
+ (void) sprintf(buf, "timestamp -\n\t "
+ "self->ev%d[(uintptr_t)arg0]", event);
+ }
+
+ arg1 = buf;
+ } else {
+ predicate_add(&pred, info->ev_predicate, NULL, 0);
+ if (info->ev_type != 'I')
+ predicate_add(&pred, g_predicate, NULL, 0);
+ else
+ predicate_add(&pred, g_ipredicate, NULL, 0);
+ }
+
+ if ((dur = g_min_duration[event]) != 0)
+ predicate_add(&pred, arg1, ">=", dur);
+
+ dprog_add("%s\n", info->ev_name);
+
+ if (pred != NULL)
+ dprog_add("/%s/\n", pred);
+ predicate_destroy(&pred);
+
+ dprog_add("{\n");
+
+ if (g_tracing) {
+ dprog_add("\ttrace(%dULL);\n", event);
+ dprog_add("\ttrace(%s);\n", arg0);
+ dprog_add("\ttrace(%s);\n", caller);
+ dprog_add(stack);
+ } else {
+ /*
+ * The ordering here is important: when we process the
+ * aggregate, we count on the fact that @avg appears before
+ * @hist in program order to assure that @avg is assigned the
+ * first aggregation variable ID and @hist assigned the
+ * second; see the comment in process_aggregate() for details.
+ */
+ dprog_add("\t@avg[%dULL, %s, %s%s] = avg(%s);\n",
+ event, arg0, caller, stack, arg1);
+
+ if (g_recsize >= LS_HIST) {
+ dprog_add("\t@hist[%dULL, %s, %s%s] = quantize"
+ "(%s);\n", event, arg0, caller, stack, arg1);
+ }
+ }
+
+ if (info->ev_acquire != NULL)
+ dprog_add("\tself->ev%d[arg0] = 0;\n", event);
+
+ dprog_add("}\n\n");
+}
+
+static void
+dprog_compile()
+{
+ dtrace_prog_t *prog;
+ dtrace_proginfo_t info;
+
+ if (g_Vflag) {
+ (void) fprintf(stderr, "lockstat: vvvv D program vvvv\n");
+ (void) fputs(g_prog, stderr);
+ (void) fprintf(stderr, "lockstat: ^^^^ D program ^^^^\n");
+ }
+
+ if ((prog = dtrace_program_strcompile(g_dtp, g_prog,
+ DTRACE_PROBESPEC_NAME, 0, 0, NULL)) == NULL)
+ dfail("failed to compile program");
+
+ if (dtrace_program_exec(g_dtp, prog, &info) == -1)
+ dfail("failed to enable probes");
+
+ if (dtrace_go(g_dtp) != 0)
+ dfail("couldn't start tracing");
+}
+
+static void
+#if defined(sun)
+status_fire(void)
+#else
+status_fire(int i)
+#endif
+{}
+
+static void
+status_init(void)
+{
+ dtrace_optval_t val, status, agg;
+ struct sigaction act;
+ struct itimerspec ts;
+ struct sigevent ev;
+ timer_t tid;
+
+ if (dtrace_getopt(g_dtp, "statusrate", &status) == -1)
+ dfail("failed to get 'statusrate'");
+
+ if (dtrace_getopt(g_dtp, "aggrate", &agg) == -1)
+ dfail("failed to get 'statusrate'");
+
+ /*
+ * We would want to awaken at a rate that is the GCD of the statusrate
+ * and the aggrate -- but that seems a bit absurd. Instead, we'll
+ * simply awaken at a rate that is the more frequent of the two, which
+ * assures that we're never later than the interval implied by the
+ * more frequent rate.
+ */
+ val = status < agg ? status : agg;
+
+ (void) sigemptyset(&act.sa_mask);
+ act.sa_flags = 0;
+ act.sa_handler = status_fire;
+ (void) sigaction(SIGUSR1, &act, NULL);
+
+ ev.sigev_notify = SIGEV_SIGNAL;
+ ev.sigev_signo = SIGUSR1;
+
+ if (timer_create(CLOCK_REALTIME, &ev, &tid) == -1)
+ dfail("cannot create CLOCK_REALTIME timer");
+
+ ts.it_value.tv_sec = val / NANOSEC;
+ ts.it_value.tv_nsec = val % NANOSEC;
+ ts.it_interval = ts.it_value;
+
+ if (timer_settime(tid, TIMER_RELTIME, &ts, NULL) == -1)
+ dfail("cannot set time on CLOCK_REALTIME timer");
+}
+
+static void
+status_check(void)
+{
+ if (!g_tracing && dtrace_aggregate_snap(g_dtp) != 0)
+ dfail("failed to snap aggregate");
+
+ if (dtrace_status(g_dtp) == -1)
+ dfail("dtrace_status()");
+}
+
+static void
+lsrec_fill(lsrec_t *lsrec, const dtrace_recdesc_t *rec, int nrecs, caddr_t data)
+{
+ bzero(lsrec, g_recsize);
+ lsrec->ls_count = 1;
+
+ if ((g_recsize > LS_HIST && nrecs < 4) || (nrecs < 3))
+ fail(0, "truncated DTrace record");
+
+ if (rec->dtrd_size != sizeof (uint64_t))
+ fail(0, "bad event size in first record");
+
+ /* LINTED - alignment */
+ lsrec->ls_event = (uint32_t)*((uint64_t *)(data + rec->dtrd_offset));
+ rec++;
+
+ if (rec->dtrd_size != sizeof (uintptr_t))
+ fail(0, "bad lock address size in second record");
+
+ /* LINTED - alignment */
+ lsrec->ls_lock = *((uintptr_t *)(data + rec->dtrd_offset));
+ rec++;
+
+ if (rec->dtrd_size != sizeof (uintptr_t))
+ fail(0, "bad caller size in third record");
+
+ /* LINTED - alignment */
+ lsrec->ls_caller = *((uintptr_t *)(data + rec->dtrd_offset));
+ rec++;
+
+ if (g_recsize > LS_HIST) {
+ int frames, i;
+ pc_t *stack;
+
+ frames = rec->dtrd_size / sizeof (pc_t);
+ /* LINTED - alignment */
+ stack = (pc_t *)(data + rec->dtrd_offset);
+
+ for (i = 1; i < frames; i++)
+ lsrec->ls_stack[i - 1] = stack[i];
+ }
+}
+
+/*ARGSUSED*/
+static int
+count_aggregate(const dtrace_aggdata_t *agg, void *arg)
+{
+ *((size_t *)arg) += 1;
+
+ return (DTRACE_AGGWALK_NEXT);
+}
+
+static int
+process_aggregate(const dtrace_aggdata_t *agg, void *arg)
+{
+ const dtrace_aggdesc_t *aggdesc = agg->dtada_desc;
+ caddr_t data = agg->dtada_data;
+ lsdata_t *lsdata = arg;
+ lsrec_t *lsrec = lsdata->lsd_next;
+ const dtrace_recdesc_t *rec;
+ uint64_t *avg, *quantized;
+ int i, j;
+
+ assert(lsdata->lsd_count < g_nrecs);
+
+ /*
+ * Aggregation variable IDs are guaranteed to be generated in program
+ * order, and they are guaranteed to start from DTRACE_AGGVARIDNONE
+ * plus one. As "avg" appears before "hist" in program order, we know
+ * that "avg" will be allocated the first aggregation variable ID, and
+ * "hist" will be allocated the second aggregation variable ID -- and
+ * we therefore use the aggregation variable ID to differentiate the
+ * cases.
+ */
+ if (aggdesc->dtagd_varid > DTRACE_AGGVARIDNONE + 1) {
+ /*
+ * If this is the histogram entry. We'll copy the quantized
+ * data into lc_hist, and jump over the rest.
+ */
+ rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1];
+
+ if (aggdesc->dtagd_varid != DTRACE_AGGVARIDNONE + 2)
+ fail(0, "bad variable ID in aggregation record");
+
+ if (rec->dtrd_size !=
+ DTRACE_QUANTIZE_NBUCKETS * sizeof (uint64_t))
+ fail(0, "bad quantize size in aggregation record");
+
+ /* LINTED - alignment */
+ quantized = (uint64_t *)(data + rec->dtrd_offset);
+
+ for (i = DTRACE_QUANTIZE_ZEROBUCKET, j = 0;
+ i < DTRACE_QUANTIZE_NBUCKETS; i++, j++)
+ lsrec->ls_hist[j] = quantized[i];
+
+ goto out;
+ }
+
+ lsrec_fill(lsrec, &aggdesc->dtagd_rec[1],
+ aggdesc->dtagd_nrecs - 1, data);
+
+ rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1];
+
+ if (rec->dtrd_size != 2 * sizeof (uint64_t))
+ fail(0, "bad avg size in aggregation record");
+
+ /* LINTED - alignment */
+ avg = (uint64_t *)(data + rec->dtrd_offset);
+ lsrec->ls_count = (uint32_t)avg[0];
+ lsrec->ls_time = (uintptr_t)avg[1];
+
+ if (g_recsize >= LS_HIST)
+ return (DTRACE_AGGWALK_NEXT);
+
+out:
+ lsdata->lsd_next = (lsrec_t *)((uintptr_t)lsrec + g_recsize);
+ lsdata->lsd_count++;
+
+ return (DTRACE_AGGWALK_NEXT);
+}
+
+static int
+process_trace(const dtrace_probedata_t *pdata, void *arg)
+{
+ lsdata_t *lsdata = arg;
+ lsrec_t *lsrec = lsdata->lsd_next;
+ dtrace_eprobedesc_t *edesc = pdata->dtpda_edesc;
+ caddr_t data = pdata->dtpda_data;
+
+ if (lsdata->lsd_count >= g_nrecs)
+ return (DTRACE_CONSUME_NEXT);
+
+ lsrec_fill(lsrec, edesc->dtepd_rec, edesc->dtepd_nrecs, data);
+
+ lsdata->lsd_next = (lsrec_t *)((uintptr_t)lsrec + g_recsize);
+ lsdata->lsd_count++;
+
+ return (DTRACE_CONSUME_NEXT);
+}
+
+static int
+process_data(FILE *out, char *data)
+{
+ lsdata_t lsdata;
+
+ /* LINTED - alignment */
+ lsdata.lsd_next = (lsrec_t *)data;
+ lsdata.lsd_count = 0;
+
+ if (g_tracing) {
+ if (dtrace_consume(g_dtp, out,
+ process_trace, NULL, &lsdata) != 0)
+ dfail("failed to consume buffer");
+
+ return (lsdata.lsd_count);
+ }
+
+ if (dtrace_aggregate_walk_keyvarsorted(g_dtp,
+ process_aggregate, &lsdata) != 0)
+ dfail("failed to walk aggregate");
+
+ return (lsdata.lsd_count);
+}
+
+/*ARGSUSED*/
+static int
+drophandler(const dtrace_dropdata_t *data, void *arg)
+{
+ g_dropped++;
+ (void) fprintf(stderr, "lockstat: warning: %s", data->dtdda_msg);
+ return (DTRACE_HANDLE_OK);
+}
+
+int
+main(int argc, char **argv)
+{
+ char *data_buf;
+ lsrec_t *lsp, **current, **first, **sort_buf, **merge_buf;
+ FILE *out = stdout;
+ char c;
+ pid_t child;
+ int status;
+ int i, j;
+ hrtime_t duration;
+ char *addrp, *offp, *sizep, *evp, *lastp, *p;
+ uintptr_t addr;
+ size_t size, off;
+ int events_specified = 0;
+ int exec_errno = 0;
+ uint32_t event;
+ char *filt = NULL, *ifilt = NULL;
+ static uint64_t ev_count[LS_MAX_EVENTS + 1];
+ static uint64_t ev_time[LS_MAX_EVENTS + 1];
+ dtrace_optval_t aggsize;
+ char aggstr[10];
+ long ncpus;
+ int dynvar = 0;
+ int err;
+
+ if ((g_dtp = dtrace_open(DTRACE_VERSION, 0, &err)) == NULL) {
+ fail(0, "cannot open dtrace library: %s",
+ dtrace_errmsg(NULL, err));
+ }
+
+ if (dtrace_handle_drop(g_dtp, &drophandler, NULL) == -1)
+ dfail("couldn't establish drop handler");
+
+ if (symtab_init() == -1)
+ fail(1, "can't load kernel symbols");
+
+ g_nrecs = DEFAULT_NRECS;
+
+ while ((c = getopt(argc, argv, LOCKSTAT_OPTSTR)) != EOF) {
+ switch (c) {
+ case 'b':
+ g_recsize = LS_BASIC;
+ break;
+
+ case 't':
+ g_recsize = LS_TIME;
+ break;
+
+ case 'h':
+ g_recsize = LS_HIST;
+ break;
+
+ case 's':
+ if (!isdigit(optarg[0]))
+ usage();
+ g_stkdepth = atoi(optarg);
+ if (g_stkdepth > LS_MAX_STACK_DEPTH)
+ fail(0, "max stack depth is %d",
+ LS_MAX_STACK_DEPTH);
+ g_recsize = LS_STACK(g_stkdepth);
+ break;
+
+ case 'n':
+ if (!isdigit(optarg[0]))
+ usage();
+ g_nrecs = atoi(optarg);
+ break;
+
+ case 'd':
+ if (!isdigit(optarg[0]))
+ usage();
+ duration = atoll(optarg);
+
+ /*
+ * XXX -- durations really should be per event
+ * since the units are different, but it's hard
+ * to express this nicely in the interface.
+ * Not clear yet what the cleanest solution is.
+ */
+ for (i = 0; i < LS_MAX_EVENTS; i++)
+ if (g_event_info[i].ev_type != 'E')
+ g_min_duration[i] = duration;
+
+ break;
+
+ case 'i':
+ if (!isdigit(optarg[0]))
+ usage();
+ i = atoi(optarg);
+ if (i <= 0)
+ usage();
+ if (i > MAX_HZ)
+ fail(0, "max interrupt rate is %d Hz", MAX_HZ);
+
+ for (j = 0; j < LS_MAX_EVENTS; j++)
+ if (strcmp(g_event_info[j].ev_desc,
+ "Profiling interrupt") == 0)
+ break;
+
+ (void) sprintf(g_event_info[j].ev_name,
+ "profile:::profile-%d", i);
+ break;
+
+ case 'l':
+ case 'f':
+ addrp = strtok(optarg, ",");
+ sizep = strtok(NULL, ",");
+ addrp = strtok(optarg, ",+");
+ offp = strtok(NULL, ",");
+
+ size = sizep ? strtoul(sizep, NULL, 0) : 1;
+ off = offp ? strtoul(offp, NULL, 0) : 0;
+
+ if (addrp[0] == '0') {
+ addr = strtoul(addrp, NULL, 16) + off;
+ } else {
+ addr = sym_to_addr(addrp) + off;
+ if (sizep == NULL)
+ size = sym_size(addrp) - off;
+ if (addr - off == 0)
+ fail(0, "symbol '%s' not found", addrp);
+ if (size == 0)
+ size = 1;
+ }
+
+
+ if (c == 'l') {
+ filter_add(&filt, "arg0", addr, size);
+ } else {
+ filter_add(&filt, "caller", addr, size);
+ filter_add(&ifilt, "arg0", addr, size);
+ }
+ break;
+
+ case 'e':
+ evp = strtok_r(optarg, ",", &lastp);
+ while (evp) {
+ int ev1, ev2;
+ char *evp2;
+
+ (void) strtok(evp, "-");
+ evp2 = strtok(NULL, "-");
+ ev1 = atoi(evp);
+ ev2 = evp2 ? atoi(evp2) : ev1;
+ if ((uint_t)ev1 >= LS_MAX_EVENTS ||
+ (uint_t)ev2 >= LS_MAX_EVENTS || ev1 > ev2)
+ fail(0, "-e events out of range");
+ for (i = ev1; i <= ev2; i++)
+ g_enabled[i] = 1;
+ evp = strtok_r(NULL, ",", &lastp);
+ }
+ events_specified = 1;
+ break;
+
+ case 'c':
+ g_cflag = 1;
+ break;
+
+ case 'k':
+ g_kflag = 1;
+ break;
+
+ case 'w':
+ g_wflag = 1;
+ break;
+
+ case 'W':
+ g_Wflag = 1;
+ break;
+
+ case 'g':
+ g_gflag = 1;
+ break;
+
+ case 'C':
+ case 'E':
+ case 'H':
+ case 'I':
+ for (i = 0; i < LS_MAX_EVENTS; i++)
+ if (g_event_info[i].ev_type == c)
+ g_enabled[i] = 1;
+ events_specified = 1;
+ break;
+
+ case 'A':
+ for (i = 0; i < LS_MAX_EVENTS; i++)
+ if (strchr("CH", g_event_info[i].ev_type))
+ g_enabled[i] = 1;
+ events_specified = 1;
+ break;
+
+ case 'T':
+ g_tracing = 1;
+ break;
+
+ case 'D':
+ if (!isdigit(optarg[0]))
+ usage();
+ g_topn = atoi(optarg);
+ break;
+
+ case 'R':
+ g_rates = 1;
+ break;
+
+ case 'p':
+ g_pflag = 1;
+ break;
+
+ case 'P':
+ g_Pflag = 1;
+ break;
+
+ case 'o':
+ if ((out = fopen(optarg, "w")) == NULL)
+ fail(1, "error opening file");
+ break;
+
+ case 'V':
+ g_Vflag = 1;
+ break;
+
+ default:
+ if (strchr(LOCKSTAT_OPTSTR, c) == NULL)
+ usage();
+ }
+ }
+
+ if (filt != NULL) {
+ predicate_add(&g_predicate, filt, NULL, 0);
+ filter_destroy(&filt);
+ }
+
+ if (ifilt != NULL) {
+ predicate_add(&g_ipredicate, ifilt, NULL, 0);
+ filter_destroy(&ifilt);
+ }
+
+ if (g_recsize == 0) {
+ if (g_gflag) {
+ g_stkdepth = LS_MAX_STACK_DEPTH;
+ g_recsize = LS_STACK(g_stkdepth);
+ } else {
+ g_recsize = LS_TIME;
+ }
+ }
+
+ if (g_gflag && g_recsize <= LS_STACK(0))
+ fail(0, "'-g' requires at least '-s 1' data gathering");
+
+ /*
+ * Make sure the alignment is reasonable
+ */
+ g_recsize = -(-g_recsize & -sizeof (uint64_t));
+
+ for (i = 0; i < LS_MAX_EVENTS; i++) {
+ /*
+ * If no events were specified, enable -C.
+ */
+ if (!events_specified && g_event_info[i].ev_type == 'C')
+ g_enabled[i] = 1;
+ }
+
+ for (i = 0; i < LS_MAX_EVENTS; i++) {
+ if (!g_enabled[i])
+ continue;
+
+ if (g_event_info[i].ev_acquire != NULL) {
+ /*
+ * If we've enabled a hold event, we must explicitly
+ * allocate dynamic variable space.
+ */
+ dynvar = 1;
+ }
+
+ dprog_addevent(i);
+ }
+
+ /*
+ * Make sure there are remaining arguments to specify a child command
+ * to execute.
+ */
+ if (argc <= optind)
+ usage();
+
+ if ((ncpus = sysconf(_SC_NPROCESSORS_ONLN)) == -1)
+ dfail("couldn't determine number of online CPUs");
+
+ /*
+ * By default, we set our data buffer size to be the number of records
+ * multiplied by the size of the record, doubled to account for some
+ * DTrace slop and divided by the number of CPUs. We silently clamp
+ * the aggregation size at both a minimum and a maximum to prevent
+ * absurdly low or high values.
+ */
+ if ((aggsize = (g_nrecs * g_recsize * 2) / ncpus) < MIN_AGGSIZE)
+ aggsize = MIN_AGGSIZE;
+
+ if (aggsize > MAX_AGGSIZE)
+ aggsize = MAX_AGGSIZE;
+
+ (void) sprintf(aggstr, "%lld", (long long)aggsize);
+
+ if (!g_tracing) {
+ if (dtrace_setopt(g_dtp, "bufsize", "4k") == -1)
+ dfail("failed to set 'bufsize'");
+
+ if (dtrace_setopt(g_dtp, "aggsize", aggstr) == -1)
+ dfail("failed to set 'aggsize'");
+
+ if (dynvar) {
+ /*
+ * If we're using dynamic variables, we set our
+ * dynamic variable size to be one megabyte per CPU,
+ * with a hard-limit of 32 megabytes. This may still
+ * be too small in some cases, but it can be tuned
+ * manually via -x if need be.
+ */
+ (void) sprintf(aggstr, "%ldm", ncpus < 32 ? ncpus : 32);
+
+ if (dtrace_setopt(g_dtp, "dynvarsize", aggstr) == -1)
+ dfail("failed to set 'dynvarsize'");
+ }
+ } else {
+ if (dtrace_setopt(g_dtp, "bufsize", aggstr) == -1)
+ dfail("failed to set 'bufsize'");
+ }
+
+ if (dtrace_setopt(g_dtp, "statusrate", "10sec") == -1)
+ dfail("failed to set 'statusrate'");
+
+ optind = 1;
+ while ((c = getopt(argc, argv, LOCKSTAT_OPTSTR)) != EOF) {
+ switch (c) {
+ case 'x':
+ if ((p = strchr(optarg, '=')) != NULL)
+ *p++ = '\0';
+
+ if (dtrace_setopt(g_dtp, optarg, p) != 0)
+ dfail("failed to set -x %s", optarg);
+ break;
+ }
+ }
+
+ argc -= optind;
+ argv += optind;
+
+ dprog_compile();
+ status_init();
+
+ g_elapsed = -gethrtime();
+
+ /*
+ * Spawn the specified command and wait for it to complete.
+ */
+ child = fork();
+ if (child == -1)
+ fail(1, "cannot fork");
+ if (child == 0) {
+ (void) dtrace_close(g_dtp);
+ (void) execvp(argv[0], &argv[0]);
+ exec_errno = errno;
+ exit(127);
+ }
+
+#if defined(sun)
+ while (waitpid(child, &status, WEXITED) != child)
+#else
+ while (waitpid(child, &status, 0) != child)
+#endif
+ status_check();
+
+ g_elapsed += gethrtime();
+
+ if (WIFEXITED(status)) {
+ if (WEXITSTATUS(status) != 0) {
+ if (exec_errno != 0) {
+ errno = exec_errno;
+ fail(1, "could not execute %s", argv[0]);
+ }
+ (void) fprintf(stderr,
+ "lockstat: warning: %s exited with code %d\n",
+ argv[0], WEXITSTATUS(status));
+ }
+ } else {
+ (void) fprintf(stderr,
+ "lockstat: warning: %s died on signal %d\n",
+ argv[0], WTERMSIG(status));
+ }
+
+ if (dtrace_stop(g_dtp) == -1)
+ dfail("failed to stop dtrace");
+
+ /*
+ * Before we read out the results, we need to allocate our buffer.
+ * If we're tracing, then we'll just use the precalculated size. If
+ * we're not, then we'll take a snapshot of the aggregate, and walk
+ * it to count the number of records.
+ */
+ if (!g_tracing) {
+ if (dtrace_aggregate_snap(g_dtp) != 0)
+ dfail("failed to snap aggregate");
+
+ g_nrecs = 0;
+
+ if (dtrace_aggregate_walk(g_dtp,
+ count_aggregate, &g_nrecs) != 0)
+ dfail("failed to walk aggregate");
+ }
+
+#if defined(sun)
+ if ((data_buf = memalign(sizeof (uint64_t),
+ (g_nrecs + 1) * g_recsize)) == NULL)
+#else
+ if (posix_memalign((void **)&data_buf, sizeof (uint64_t),
+ (g_nrecs + 1) * g_recsize) )
+#endif
+ fail(1, "Memory allocation failed");
+
+ /*
+ * Read out the DTrace data.
+ */
+ g_nrecs_used = process_data(out, data_buf);
+
+ if (g_nrecs_used > g_nrecs || g_dropped)
+ (void) fprintf(stderr, "lockstat: warning: "
+ "ran out of data records (use -n for more)\n");
+
+ /* LINTED - alignment */
+ for (i = 0, lsp = (lsrec_t *)data_buf; i < g_nrecs_used; i++,
+ /* LINTED - alignment */
+ lsp = (lsrec_t *)((char *)lsp + g_recsize)) {
+ ev_count[lsp->ls_event] += lsp->ls_count;
+ ev_time[lsp->ls_event] += lsp->ls_time;
+ }
+
+ /*
+ * If -g was specified, convert stacks into individual records.
+ */
+ if (g_gflag) {
+ lsrec_t *newlsp, *oldlsp;
+
+#if defined(sun)
+ newlsp = memalign(sizeof (uint64_t),
+ g_nrecs_used * LS_TIME * (g_stkdepth + 1));
+#else
+ posix_memalign((void **)&newlsp, sizeof (uint64_t),
+ g_nrecs_used * LS_TIME * (g_stkdepth + 1));
+#endif
+ if (newlsp == NULL)
+ fail(1, "Cannot allocate space for -g processing");
+ lsp = newlsp;
+ /* LINTED - alignment */
+ for (i = 0, oldlsp = (lsrec_t *)data_buf; i < g_nrecs_used; i++,
+ /* LINTED - alignment */
+ oldlsp = (lsrec_t *)((char *)oldlsp + g_recsize)) {
+ int fr;
+ int caller_in_stack = 0;
+
+ if (oldlsp->ls_count == 0)
+ continue;
+
+ for (fr = 0; fr < g_stkdepth; fr++) {
+ if (oldlsp->ls_stack[fr] == 0)
+ break;
+ if (oldlsp->ls_stack[fr] == oldlsp->ls_caller)
+ caller_in_stack = 1;
+ bcopy(oldlsp, lsp, LS_TIME);
+ lsp->ls_caller = oldlsp->ls_stack[fr];
+ /* LINTED - alignment */
+ lsp = (lsrec_t *)((char *)lsp + LS_TIME);
+ }
+ if (!caller_in_stack) {
+ bcopy(oldlsp, lsp, LS_TIME);
+ /* LINTED - alignment */
+ lsp = (lsrec_t *)((char *)lsp + LS_TIME);
+ }
+ }
+ g_nrecs = g_nrecs_used =
+ ((uintptr_t)lsp - (uintptr_t)newlsp) / LS_TIME;
+ g_recsize = LS_TIME;
+ g_stkdepth = 0;
+ free(data_buf);
+ data_buf = (char *)newlsp;
+ }
+
+ if ((sort_buf = calloc(2 * (g_nrecs + 1),
+ sizeof (void *))) == NULL)
+ fail(1, "Sort buffer allocation failed");
+ merge_buf = sort_buf + (g_nrecs + 1);
+
+ /*
+ * Build the sort buffer, discarding zero-count records along the way.
+ */
+ /* LINTED - alignment */
+ for (i = 0, lsp = (lsrec_t *)data_buf; i < g_nrecs_used; i++,
+ /* LINTED - alignment */
+ lsp = (lsrec_t *)((char *)lsp + g_recsize)) {
+ if (lsp->ls_count == 0)
+ lsp->ls_event = LS_MAX_EVENTS;
+ sort_buf[i] = lsp;
+ }
+
+ if (g_nrecs_used == 0)
+ exit(0);
+
+ /*
+ * Add a sentinel after the last record
+ */
+ sort_buf[i] = lsp;
+ lsp->ls_event = LS_MAX_EVENTS;
+
+ if (g_tracing) {
+ report_trace(out, sort_buf);
+ return (0);
+ }
+
+ /*
+ * Application of -g may have resulted in multiple records
+ * with the same signature; coalesce them.
+ */
+ if (g_gflag) {
+ mergesort(lockcmp, sort_buf, merge_buf, g_nrecs_used);
+ coalesce(lockcmp, sort_buf, g_nrecs_used);
+ }
+
+ /*
+ * Coalesce locks within the same symbol if -c option specified.
+ * Coalesce PCs within the same function if -k option specified.
+ */
+ if (g_cflag || g_kflag) {
+ for (i = 0; i < g_nrecs_used; i++) {
+ int fr;
+ lsp = sort_buf[i];
+ if (g_cflag)
+ coalesce_symbol(&lsp->ls_lock);
+ if (g_kflag) {
+ for (fr = 0; fr < g_stkdepth; fr++)
+ coalesce_symbol(&lsp->ls_stack[fr]);
+ coalesce_symbol(&lsp->ls_caller);
+ }
+ }
+ mergesort(lockcmp, sort_buf, merge_buf, g_nrecs_used);
+ coalesce(lockcmp, sort_buf, g_nrecs_used);
+ }
+
+ /*
+ * Coalesce callers if -w option specified
+ */
+ if (g_wflag) {
+ mergesort(lock_and_count_cmp_anywhere,
+ sort_buf, merge_buf, g_nrecs_used);
+ coalesce(lockcmp_anywhere, sort_buf, g_nrecs_used);
+ }
+
+ /*
+ * Coalesce locks if -W option specified
+ */
+ if (g_Wflag) {
+ mergesort(site_and_count_cmp_anylock,
+ sort_buf, merge_buf, g_nrecs_used);
+ coalesce(sitecmp_anylock, sort_buf, g_nrecs_used);
+ }
+
+ /*
+ * Sort data by contention count (ls_count) or total time (ls_time),
+ * depending on g_Pflag. Override g_Pflag if time wasn't measured.
+ */
+ if (g_recsize < LS_TIME)
+ g_Pflag = 0;
+
+ if (g_Pflag)
+ mergesort(timecmp, sort_buf, merge_buf, g_nrecs_used);
+ else
+ mergesort(countcmp, sort_buf, merge_buf, g_nrecs_used);
+
+ /*
+ * Display data by event type
+ */
+ first = &sort_buf[0];
+ while ((event = (*first)->ls_event) < LS_MAX_EVENTS) {
+ current = first;
+ while ((lsp = *current)->ls_event == event)
+ current++;
+ report_stats(out, first, current - first, ev_count[event],
+ ev_time[event]);
+ first = current;
+ }
+
+ return (0);
+}
+
+static char *
+format_symbol(char *buf, uintptr_t addr, int show_size)
+{
+ uintptr_t symoff;
+ char *symname;
+ size_t symsize;
+
+ symname = addr_to_sym(addr, &symoff, &symsize);
+
+ if (show_size && symoff == 0)
+ (void) sprintf(buf, "%s[%ld]", symname, (long)symsize);
+ else if (symoff == 0)
+ (void) sprintf(buf, "%s", symname);
+ else if (symoff < 16 && bcmp(symname, "cpu[", 4) == 0) /* CPU+PIL */
+#if defined(sun)
+ (void) sprintf(buf, "%s+%ld", symname, (long)symoff);
+#else
+ (void) sprintf(buf, "%s+%s", symname, g_pri_class[(int)symoff]);
+#endif
+ else if (symoff <= symsize || (symoff < 256 && addr != symoff))
+ (void) sprintf(buf, "%s+0x%llx", symname,
+ (unsigned long long)symoff);
+ else
+ (void) sprintf(buf, "0x%llx", (unsigned long long)addr);
+ return (buf);
+}
+
+static void
+report_stats(FILE *out, lsrec_t **sort_buf, size_t nrecs, uint64_t total_count,
+ uint64_t total_time)
+{
+ uint32_t event = sort_buf[0]->ls_event;
+ lsrec_t *lsp;
+ double ptotal = 0.0;
+ double percent;
+ int i, j, fr;
+ int displayed;
+ int first_bin, last_bin, max_bin_count, total_bin_count;
+ int rectype;
+ char buf[256];
+ char lhdr[80], chdr[80];
+
+ rectype = g_recsize;
+
+ if (g_topn == 0) {
+ (void) fprintf(out, "%20llu %s\n",
+ g_rates == 0 ? total_count :
+ ((unsigned long long)total_count * NANOSEC) / g_elapsed,
+ g_event_info[event].ev_desc);
+ return;
+ }
+
+ (void) sprintf(lhdr, "%s%s",
+ g_Wflag ? "Hottest " : "", g_event_info[event].ev_lhdr);
+ (void) sprintf(chdr, "%s%s",
+ g_wflag ? "Hottest " : "", "Caller");
+
+ if (!g_pflag)
+ (void) fprintf(out,
+ "\n%s: %.0f events in %.3f seconds (%.0f events/sec)\n\n",
+ g_event_info[event].ev_desc, (double)total_count,
+ (double)g_elapsed / NANOSEC,
+ (double)total_count * NANOSEC / g_elapsed);
+
+ if (!g_pflag && rectype < LS_HIST) {
+ (void) sprintf(buf, "%s", g_event_info[event].ev_units);
+ (void) fprintf(out, "%5s %4s %4s %4s %8s %-22s %-24s\n",
+ g_rates ? "ops/s" : "Count",
+ g_gflag ? "genr" : "indv",
+ "cuml", "rcnt", rectype >= LS_TIME ? buf : "", lhdr, chdr);
+ (void) fprintf(out, "---------------------------------"
+ "----------------------------------------------\n");
+ }
+
+ displayed = 0;
+ for (i = 0; i < nrecs; i++) {
+ lsp = sort_buf[i];
+
+ if (displayed++ >= g_topn)
+ break;
+
+ if (g_pflag) {
+ int j;
+
+ (void) fprintf(out, "%u %u",
+ lsp->ls_event, lsp->ls_count);
+ (void) fprintf(out, " %s",
+ format_symbol(buf, lsp->ls_lock, g_cflag));
+ (void) fprintf(out, " %s",
+ format_symbol(buf, lsp->ls_caller, 0));
+ (void) fprintf(out, " %f",
+ (double)lsp->ls_refcnt / lsp->ls_count);
+ if (rectype >= LS_TIME)
+ (void) fprintf(out, " %llu",
+ (unsigned long long)lsp->ls_time);
+ if (rectype >= LS_HIST) {
+ for (j = 0; j < 64; j++)
+ (void) fprintf(out, " %u",
+ lsp->ls_hist[j]);
+ }
+ for (j = 0; j < LS_MAX_STACK_DEPTH; j++) {
+ if (rectype <= LS_STACK(j) ||
+ lsp->ls_stack[j] == 0)
+ break;
+ (void) fprintf(out, " %s",
+ format_symbol(buf, lsp->ls_stack[j], 0));
+ }
+ (void) fprintf(out, "\n");
+ continue;
+ }
+
+ if (rectype >= LS_HIST) {
+ (void) fprintf(out, "---------------------------------"
+ "----------------------------------------------\n");
+ (void) sprintf(buf, "%s",
+ g_event_info[event].ev_units);
+ (void) fprintf(out, "%5s %4s %4s %4s %8s %-22s %-24s\n",
+ g_rates ? "ops/s" : "Count",
+ g_gflag ? "genr" : "indv",
+ "cuml", "rcnt", buf, lhdr, chdr);
+ }
+
+ if (g_Pflag && total_time != 0)
+ percent = (lsp->ls_time * 100.00) / total_time;
+ else
+ percent = (lsp->ls_count * 100.00) / total_count;
+
+ ptotal += percent;
+
+ if (rectype >= LS_TIME)
+ (void) sprintf(buf, "%llu",
+ (unsigned long long)(lsp->ls_time / lsp->ls_count));
+ else
+ buf[0] = '\0';
+
+ (void) fprintf(out, "%5llu ",
+ g_rates == 0 ? lsp->ls_count :
+ ((uint64_t)lsp->ls_count * NANOSEC) / g_elapsed);
+
+ (void) fprintf(out, "%3.0f%% ", percent);
+
+ if (g_gflag)
+ (void) fprintf(out, "---- ");
+ else
+ (void) fprintf(out, "%3.0f%% ", ptotal);
+
+ (void) fprintf(out, "%4.2f %8s ",
+ (double)lsp->ls_refcnt / lsp->ls_count, buf);
+
+ (void) fprintf(out, "%-22s ",
+ format_symbol(buf, lsp->ls_lock, g_cflag));
+
+ (void) fprintf(out, "%-24s\n",
+ format_symbol(buf, lsp->ls_caller, 0));
+
+ if (rectype < LS_HIST)
+ continue;
+
+ (void) fprintf(out, "\n");
+ (void) fprintf(out, "%10s %31s %-9s %-24s\n",
+ g_event_info[event].ev_units,
+ "------ Time Distribution ------",
+ g_rates ? "ops/s" : "count",
+ rectype > LS_STACK(0) ? "Stack" : "");
+
+ first_bin = 0;
+ while (lsp->ls_hist[first_bin] == 0)
+ first_bin++;
+
+ last_bin = 63;
+ while (lsp->ls_hist[last_bin] == 0)
+ last_bin--;
+
+ max_bin_count = 0;
+ total_bin_count = 0;
+ for (j = first_bin; j <= last_bin; j++) {
+ total_bin_count += lsp->ls_hist[j];
+ if (lsp->ls_hist[j] > max_bin_count)
+ max_bin_count = lsp->ls_hist[j];
+ }
+
+ /*
+ * If we went a few frames below the caller, ignore them
+ */
+ for (fr = 3; fr > 0; fr--)
+ if (lsp->ls_stack[fr] == lsp->ls_caller)
+ break;
+
+ for (j = first_bin; j <= last_bin; j++) {
+ uint_t depth = (lsp->ls_hist[j] * 30) / total_bin_count;
+ (void) fprintf(out, "%10llu |%s%s %-9u ",
+ 1ULL << j,
+ "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@" + 30 - depth,
+ " " + depth,
+ g_rates == 0 ? lsp->ls_hist[j] :
+ (uint_t)(((uint64_t)lsp->ls_hist[j] * NANOSEC) /
+ g_elapsed));
+ if (rectype <= LS_STACK(fr) || lsp->ls_stack[fr] == 0) {
+ (void) fprintf(out, "\n");
+ continue;
+ }
+ (void) fprintf(out, "%-24s\n",
+ format_symbol(buf, lsp->ls_stack[fr], 0));
+ fr++;
+ }
+ while (rectype > LS_STACK(fr) && lsp->ls_stack[fr] != 0) {
+ (void) fprintf(out, "%15s %-36s %-24s\n", "", "",
+ format_symbol(buf, lsp->ls_stack[fr], 0));
+ fr++;
+ }
+ }
+
+ if (!g_pflag)
+ (void) fprintf(out, "---------------------------------"
+ "----------------------------------------------\n");
+
+ (void) fflush(out);
+}
+
+static void
+report_trace(FILE *out, lsrec_t **sort_buf)
+{
+ lsrec_t *lsp;
+ int i, fr;
+ int rectype;
+ char buf[256], buf2[256];
+
+ rectype = g_recsize;
+
+ if (!g_pflag) {
+ (void) fprintf(out, "%5s %7s %11s %-24s %-24s\n",
+ "Event", "Time", "Owner", "Lock", "Caller");
+ (void) fprintf(out, "---------------------------------"
+ "----------------------------------------------\n");
+ }
+
+ for (i = 0; i < g_nrecs_used; i++) {
+
+ lsp = sort_buf[i];
+
+ if (lsp->ls_event >= LS_MAX_EVENTS || lsp->ls_count == 0)
+ continue;
+
+ (void) fprintf(out, "%2d %10llu %11p %-24s %-24s\n",
+ lsp->ls_event, (unsigned long long)lsp->ls_time,
+ (void *)lsp->ls_next,
+ format_symbol(buf, lsp->ls_lock, 0),
+ format_symbol(buf2, lsp->ls_caller, 0));
+
+ if (rectype <= LS_STACK(0))
+ continue;
+
+ /*
+ * If we went a few frames below the caller, ignore them
+ */
+ for (fr = 3; fr > 0; fr--)
+ if (lsp->ls_stack[fr] == lsp->ls_caller)
+ break;
+
+ while (rectype > LS_STACK(fr) && lsp->ls_stack[fr] != 0) {
+ (void) fprintf(out, "%53s %-24s\n", "",
+ format_symbol(buf, lsp->ls_stack[fr], 0));
+ fr++;
+ }
+ (void) fprintf(out, "\n");
+ }
+
+ (void) fflush(out);
+}
diff --git a/cddl/contrib/opensolaris/cmd/lockstat/sym.c b/cddl/contrib/opensolaris/cmd/lockstat/sym.c
new file mode 100644
index 0000000..78b27d2
--- /dev/null
+++ b/cddl/contrib/opensolaris/cmd/lockstat/sym.c
@@ -0,0 +1,311 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright (c) 1997-1999 by Sun Microsystems, Inc.
+ * All rights reserved.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <stdio.h>
+#include <fcntl.h>
+#include <ctype.h>
+#include <string.h>
+#include <signal.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <limits.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <libelf.h>
+#include <link.h>
+#include <elf.h>
+#if defined(sun)
+#include <sys/machelf.h>
+
+#include <kstat.h>
+#else
+/* FreeBSD */
+#include <sys/elf.h>
+#include <sys/ksyms.h>
+#endif
+#include <sys/cpuvar.h>
+
+typedef struct syment {
+ uintptr_t addr;
+ char *name;
+ size_t size;
+} syment_t;
+
+static syment_t *symbol_table;
+static int nsyms, maxsyms;
+static char maxsymname[64];
+
+#if defined(sun)
+#ifdef _ELF64
+#define elf_getshdr elf64_getshdr
+#else
+#define elf_getshdr elf32_getshdr
+#endif
+#endif
+
+static void
+add_symbol(char *name, uintptr_t addr, size_t size)
+{
+ syment_t *sep;
+
+ if (nsyms >= maxsyms) {
+ maxsyms += 10000;
+ symbol_table = realloc(symbol_table, maxsyms * sizeof (*sep));
+ if (symbol_table == NULL) {
+ (void) fprintf(stderr, "can't allocate symbol table\n");
+ exit(3);
+ }
+ }
+ sep = &symbol_table[nsyms++];
+
+ sep->name = name;
+ sep->addr = addr;
+ sep->size = size;
+}
+
+static void
+remove_symbol(uintptr_t addr)
+{
+ int i;
+ syment_t *sep = symbol_table;
+
+ for (i = 0; i < nsyms; i++, sep++)
+ if (sep->addr == addr)
+ sep->addr = 0;
+}
+
+#if defined(sun)
+static void
+fake_up_certain_popular_kernel_symbols(void)
+{
+ kstat_ctl_t *kc;
+ kstat_t *ksp;
+ char *name;
+
+ if ((kc = kstat_open()) == NULL)
+ return;
+
+ for (ksp = kc->kc_chain; ksp; ksp = ksp->ks_next) {
+ if (strcmp(ksp->ks_module, "cpu_info") == 0) {
+ if ((name = malloc(20)) == NULL)
+ break;
+ /*
+ * For consistency, keep cpu[0] and toss cpu0
+ * or any other such symbols.
+ */
+ if (ksp->ks_instance == 0)
+ remove_symbol((uintptr_t)ksp->ks_private);
+ (void) sprintf(name, "cpu[%d]", ksp->ks_instance);
+ add_symbol(name, (uintptr_t)ksp->ks_private,
+ sizeof (struct cpu));
+ }
+ }
+ (void) kstat_close(kc);
+}
+#else
+/* FreeBSD */
+static void
+fake_up_certain_popular_kernel_symbols(void)
+{
+ char *name;
+ uintptr_t addr;
+ int i;
+
+ /* Good for up to 256 CPUs */
+ for(i=0; i < 256; i++) {
+ if ((name = malloc(20)) == NULL)
+ break;
+ (void) sprintf(name, "cpu[%d]", i);
+ addr = 0x01000000 + (i << 16);
+ add_symbol(name, addr, sizeof (uintptr_t));
+ }
+}
+#endif /* !defined(sun) */
+
+static int
+symcmp(const void *p1, const void *p2)
+{
+ uintptr_t a1 = ((syment_t *)p1)->addr;
+ uintptr_t a2 = ((syment_t *)p2)->addr;
+
+ if (a1 < a2)
+ return (-1);
+ if (a1 > a2)
+ return (1);
+ return (0);
+}
+
+int
+symtab_init(void)
+{
+ Elf *elf;
+ Elf_Scn *scn = NULL;
+ Sym *symtab, *symp, *lastsym;
+ char *strtab;
+ uint_t cnt;
+ int fd;
+ int i;
+ int strindex = -1;
+#if !defined(sun)
+ void *ksyms;
+ size_t sz;
+#endif
+
+ if ((fd = open("/dev/ksyms", O_RDONLY)) == -1)
+ return (-1);
+
+#if defined(sun)
+ (void) elf_version(EV_CURRENT);
+
+ elf = elf_begin(fd, ELF_C_READ, NULL);
+#else
+ /* FreeBSD */
+ /*
+ * XXX - libelf needs to be fixed so it will work with
+ * non 'ordinary' files like /dev/ksyms. The following
+ * is a work around for now.
+ */
+ if (elf_version(EV_CURRENT) == EV_NONE) {
+ close(fd);
+ return (-1);
+ }
+ if (ioctl(fd, KIOCGSIZE, &sz) < 0) {
+ close(fd);
+ return (-1);
+ }
+ if (ioctl(fd, KIOCGADDR, &ksyms) < 0) {
+ close(fd);
+ return (-1);
+ }
+ if ((elf = elf_memory(ksyms, sz)) == NULL) {
+ close(fd);
+ return (-1);
+ }
+#endif
+
+ for (cnt = 1; (scn = elf_nextscn(elf, scn)) != NULL; cnt++) {
+ Shdr *shdr = elf_getshdr(scn);
+ if (shdr->sh_type == SHT_SYMTAB) {
+ symtab = (Sym *)elf_getdata(scn, NULL)->d_buf;
+ nsyms = shdr->sh_size / shdr->sh_entsize;
+ strindex = shdr->sh_link;
+ }
+ }
+
+ for (cnt = 1; (scn = elf_nextscn(elf, scn)) != NULL; cnt++) {
+ if (cnt == strindex)
+ strtab = (char *)elf_getdata(scn, NULL)->d_buf;
+ }
+
+ lastsym = symtab + nsyms;
+ nsyms = 0;
+ for (symp = symtab; symp < lastsym; symp++)
+ if ((uint_t)ELF32_ST_TYPE(symp->st_info) <= STT_FUNC &&
+ symp->st_size != 0)
+ add_symbol(symp->st_name + strtab,
+ (uintptr_t)symp->st_value, (size_t)symp->st_size);
+
+ fake_up_certain_popular_kernel_symbols();
+ (void) sprintf(maxsymname, "0x%lx", ULONG_MAX);
+ add_symbol(maxsymname, ULONG_MAX, 1);
+
+ qsort(symbol_table, nsyms, sizeof (syment_t), symcmp);
+
+ /*
+ * Destroy all duplicate symbols, then sort it again.
+ */
+ for (i = 0; i < nsyms - 1; i++)
+ if (symbol_table[i].addr == symbol_table[i + 1].addr)
+ symbol_table[i].addr = 0;
+
+ qsort(symbol_table, nsyms, sizeof (syment_t), symcmp);
+
+ while (symbol_table[1].addr == 0) {
+ symbol_table++;
+ nsyms--;
+ }
+ symbol_table[0].name = "(usermode)";
+ symbol_table[0].addr = 0;
+ symbol_table[0].size = 1;
+
+ close(fd);
+ return (0);
+}
+
+char *
+addr_to_sym(uintptr_t addr, uintptr_t *offset, size_t *sizep)
+{
+ int lo = 0;
+ int hi = nsyms - 1;
+ int mid;
+ syment_t *sep;
+
+ while (hi - lo > 1) {
+ mid = (lo + hi) / 2;
+ if (addr >= symbol_table[mid].addr) {
+ lo = mid;
+ } else {
+ hi = mid;
+ }
+ }
+ sep = &symbol_table[lo];
+ *offset = addr - sep->addr;
+ *sizep = sep->size;
+ return (sep->name);
+}
+
+uintptr_t
+sym_to_addr(char *name)
+{
+ int i;
+ syment_t *sep = symbol_table;
+
+ for (i = 0; i < nsyms; i++) {
+ if (strcmp(name, sep->name) == 0)
+ return (sep->addr);
+ sep++;
+ }
+ return (0);
+}
+
+size_t
+sym_size(char *name)
+{
+ int i;
+ syment_t *sep = symbol_table;
+
+ for (i = 0; i < nsyms; i++) {
+ if (strcmp(name, sep->name) == 0)
+ return (sep->size);
+ sep++;
+ }
+ return (0);
+}
diff --git a/cddl/usr.sbin/Makefile b/cddl/usr.sbin/Makefile
index 06c6894..f7c32f3 100644
--- a/cddl/usr.sbin/Makefile
+++ b/cddl/usr.sbin/Makefile
@@ -3,6 +3,7 @@
.include <bsd.own.mk>
SUBDIR= dtrace \
+ lockstat \
${_zdb}
.if ${MK_ZFS} != "no"
diff --git a/cddl/usr.sbin/lockstat/Makefile b/cddl/usr.sbin/lockstat/Makefile
new file mode 100644
index 0000000..9c135f9
--- /dev/null
+++ b/cddl/usr.sbin/lockstat/Makefile
@@ -0,0 +1,40 @@
+# $FreeBSD$
+
+.include "../../Makefile.inc"
+
+PROG= lockstat
+
+BINDIR?= /usr/sbin
+
+SRCS= lockstat.c sym.c
+
+WARNS= 1
+
+CFLAGS+= -I${.CURDIR}/../../../sys/cddl/compat/opensolaris \
+ -I${.CURDIR}/../../../cddl/compat/opensolaris/include \
+ -I${OPENSOLARIS_USR_DISTDIR}/head \
+ -I${OPENSOLARIS_USR_DISTDIR}/lib/libdtrace/common \
+ -I${OPENSOLARIS_USR_DISTDIR}/lib/libproc/common \
+ -I${OPENSOLARIS_SYS_DISTDIR}/uts/common \
+ -I${OPENSOLARIS_SYS_DISTDIR}/compat \
+ -I${.CURDIR}/../../../sys
+
+.PATH: ${OPENSOLARIS_USR_DISTDIR}/cmd/lockstat
+
+CFLAGS+= -DNEED_ERRLOC -g
+
+#YFLAGS+= -d
+
+LDFLAGS+= -pthread \
+ -L${.OBJDIR}/../../lib/libdtrace \
+ -L${.OBJDIR}/../../lib/libproc \
+ -L${.OBJDIR}/../../lib/libctf \
+ -L${.OBJDIR}/../../../lib/libelf
+
+LDADD+= -ldtrace -ly -ll -lproc -lctf -lelf -lz -lrt
+
+#DPADD+= ${LIBDTRACE} ${LIBPTHREAD} ${LIBL} ${LIBY} ${LIBZ}
+
+NO_MAN=
+
+.include <bsd.prog.mk>
OpenPOWER on IntegriCloud