summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorneel <neel@FreeBSD.org>2012-11-13 07:39:05 +0000
committerneel <neel@FreeBSD.org>2012-11-13 07:39:05 +0000
commit1a164db277254c1198a5923050a4e403737937c3 (patch)
tree96b9ec5962272d9c843abffb6d4db9b66e04efd3
parentf146c1921cfde17a6716187a95a05c21f5e46b94 (diff)
parenta841c9341b2afeabcf32a11c8bd91bbe3346177a (diff)
downloadFreeBSD-src-1a164db277254c1198a5923050a4e403737937c3.zip
FreeBSD-src-1a164db277254c1198a5923050a4e403737937c3.tar.gz
IFC @ r242940
-rw-r--r--Makefile.inc14
-rw-r--r--bin/ls/cmp.c20
-rw-r--r--bin/ls/extern.h12
-rw-r--r--bin/ls/ls.172
-rw-r--r--bin/ls/ls.c106
-rw-r--r--bin/ls/ls.h4
-rw-r--r--bin/ls/print.c4
-rw-r--r--bin/ls/util.c14
-rw-r--r--bin/sh/alias.c11
-rw-r--r--bin/sh/input.c6
-rw-r--r--bin/sh/input.h3
-rw-r--r--cddl/contrib/opensolaris/cmd/ztest/ztest.c112
-rw-r--r--cddl/contrib/opensolaris/lib/libdtrace/powerpc/dt_isadep.c75
-rw-r--r--cddl/lib/Makefile3
-rw-r--r--cddl/lib/libdtrace/Makefile4
-rw-r--r--cddl/usr.sbin/Makefile6
-rw-r--r--contrib/gdb/gdb/c-valprint.c16
-rw-r--r--contrib/gdb/gdb/cp-valprint.c9
-rw-r--r--contrib/gdb/gdb/dwarf2loc.c11
-rw-r--r--contrib/gdb/gdb/f-valprint.c11
-rw-r--r--contrib/gdb/gdb/jv-valprint.c14
-rw-r--r--contrib/gdb/gdb/p-valprint.c34
-rw-r--r--contrib/gdb/gdb/scm-valprint.c3
-rw-r--r--contrib/gdb/gdb/stack.c4
-rw-r--r--contrib/gdb/gdb/valprint.c59
-rw-r--r--contrib/gdb/gdb/value.h5
-rw-r--r--contrib/gdb/gdb/varobj.c6
-rw-r--r--contrib/jemalloc/ChangeLog41
-rw-r--r--contrib/jemalloc/FREEBSD-diffs26
-rw-r--r--contrib/jemalloc/VERSION2
-rw-r--r--contrib/jemalloc/doc/jemalloc.389
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/arena.h68
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/chunk.h8
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/chunk_dss.h14
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h2
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/ctl.h5
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/extent.h3
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/huge.h2
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h123
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/private_namespace.h40
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/prof.h5
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/rtree.h3
-rw-r--r--contrib/jemalloc/include/jemalloc/jemalloc.h11
-rw-r--r--contrib/jemalloc/include/jemalloc/jemalloc_defs.h9
-rw-r--r--contrib/jemalloc/src/arena.c699
-rw-r--r--contrib/jemalloc/src/base.c3
-rw-r--r--contrib/jemalloc/src/chunk.c165
-rw-r--r--contrib/jemalloc/src/chunk_dss.c37
-rw-r--r--contrib/jemalloc/src/chunk_mmap.c12
-rw-r--r--contrib/jemalloc/src/ctl.c361
-rw-r--r--contrib/jemalloc/src/huge.c7
-rw-r--r--contrib/jemalloc/src/jemalloc.c212
-rw-r--r--contrib/jemalloc/src/mutex.c2
-rw-r--r--contrib/jemalloc/src/prof.c42
-rw-r--r--contrib/jemalloc/src/rtree.c21
-rw-r--r--contrib/jemalloc/src/stats.c10
-rw-r--r--contrib/jemalloc/src/tcache.c4
-rw-r--r--contrib/jemalloc/src/util.c5
-rw-r--r--contrib/llvm/lib/Target/X86/X86Subtarget.cpp8
-rw-r--r--contrib/top/commands.c4
-rw-r--r--contrib/tzdata/asia2
-rw-r--r--contrib/tzdata/northamerica9
-rwxr-xr-xetc/pccard_ether1
-rw-r--r--lib/Makefile5
-rw-r--r--lib/libc/gen/isnan.c7
-rw-r--r--lib/libc/stdio/printf.32
-rw-r--r--lib/libcrypt/tests/Makefile10
-rw-r--r--lib/libcrypt/tests/crypt_tests.c54
-rw-r--r--lib/libproc/proc_bkpt.c3
-rw-r--r--lib/libproc/proc_regs.c8
-rw-r--r--lib/msun/src/k_rem_pio2.c2
-rw-r--r--lib/msun/src/s_isnan.c7
-rw-r--r--release/Makefile2
-rw-r--r--release/doc/share/xml/release.ent12
-rw-r--r--sbin/ifconfig/ifconfig.843
-rw-r--r--secure/usr.bin/bdes/bdes.12
-rw-r--r--share/man/man4/icmp6.44
-rw-r--r--share/man/man4/ipsec.42
-rw-r--r--share/mk/Makefile56
-rw-r--r--share/mk/atf.test.mk148
-rw-r--r--share/mk/bsd.progs.mk396
-rw-r--r--share/mk/bsd.subdir.mk5
-rw-r--r--share/mk/bsd.test.mk79
-rw-r--r--sys/amd64/amd64/identcpu.c16
-rw-r--r--sys/arm/arm/machdep.c30
-rw-r--r--sys/arm/at91/at91_machdep.c44
-rw-r--r--sys/arm/lpc/lpc_gpio.c2
-rw-r--r--sys/boot/common/Makefile.inc1
-rw-r--r--sys/boot/forth/menu.4th1
-rw-r--r--sys/boot/i386/boot2/sio.S6
-rw-r--r--sys/boot/i386/loader/Makefile2
-rw-r--r--sys/boot/ia64/common/Makefile2
-rw-r--r--sys/boot/pc98/Makefile.inc6
-rw-r--r--sys/boot/pc98/boot2/Makefile2
-rw-r--r--sys/boot/pc98/boot2/boot2.c6
-rw-r--r--sys/boot/pc98/btx/btx/btx.S17
-rw-r--r--sys/boot/pc98/cdboot/Makefile4
-rw-r--r--sys/boot/pc98/libpc98/comconsole.c36
-rw-r--r--sys/boot/pc98/loader/Makefile2
-rw-r--r--sys/boot/powerpc/ofw/Makefile2
-rw-r--r--sys/boot/powerpc/ps3/Makefile2
-rw-r--r--sys/boot/sparc64/loader/Makefile2
-rw-r--r--sys/cam/scsi/scsi_enc_ses.c9
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c4
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c90
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c19
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/arc.h1
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ioctl.c1
-rw-r--r--sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c1
-rw-r--r--sys/cddl/contrib/opensolaris/uts/powerpc/dtrace/fasttrap_isa.c30
-rw-r--r--sys/cddl/contrib/opensolaris/uts/powerpc/sys/fasttrap_isa.h49
-rw-r--r--sys/cddl/dev/dtrace/powerpc/dtrace_asm.S269
-rw-r--r--sys/cddl/dev/dtrace/powerpc/dtrace_isa.c534
-rw-r--r--sys/cddl/dev/dtrace/powerpc/dtrace_subr.c201
-rw-r--r--sys/cddl/dev/dtrace/powerpc/regset.h63
-rw-r--r--sys/cddl/dev/lockstat/lockstat.c3
-rw-r--r--sys/cddl/dev/profile/profile.c7
-rw-r--r--sys/conf/Makefile.pc984
-rw-r--r--sys/conf/files5
-rw-r--r--sys/conf/kern.pre.mk12
-rw-r--r--sys/dev/aac/aac_debug.c2
-rw-r--r--sys/dev/acpica/acpivar.h2
-rw-r--r--sys/dev/ahci/ahciem.c3
-rw-r--r--sys/dev/asmc/asmc.c2
-rw-r--r--sys/dev/ath/ath_hal/ah.h3
-rw-r--r--sys/dev/ath/ath_hal/ah_debug.h1
-rw-r--r--sys/dev/ath/ath_hal/ah_internal.h4
-rw-r--r--sys/dev/ath/if_ath.c44
-rw-r--r--sys/dev/ath/if_ath_alq.c172
-rw-r--r--sys/dev/ath/if_ath_alq.h82
-rw-r--r--sys/dev/ath/if_ath_rx.c4
-rw-r--r--sys/dev/ath/if_ath_rx_edma.c13
-rw-r--r--sys/dev/ath/if_ath_sysctl.c63
-rw-r--r--sys/dev/ath/if_ath_tx.c4
-rw-r--r--sys/dev/ath/if_ath_tx_edma.c119
-rw-r--r--sys/dev/ath/if_athvar.h8
-rw-r--r--sys/dev/bktr/bktr_audio.c2
-rw-r--r--sys/dev/ct/bshw_machdep.c65
-rw-r--r--sys/dev/ct/ct.c87
-rw-r--r--sys/dev/ct/ct_isa.c9
-rw-r--r--sys/dev/ct/ct_machdep.h31
-rw-r--r--sys/dev/drm/via_dma.c4
-rw-r--r--sys/dev/drm/via_dmablit.c2
-rw-r--r--sys/dev/etherswitch/arswitch/arswitch.c11
-rw-r--r--sys/dev/md/md.c9
-rw-r--r--sys/dev/mfi/mfi.c5
-rw-r--r--sys/dev/mfi/mfi_cam.c71
-rw-r--r--sys/dev/mfi/mfivar.h2
-rw-r--r--sys/dev/mn/if_mn.c18
-rw-r--r--sys/dev/nve/if_nve.c94
-rw-r--r--sys/dev/nxge/xgehal/xgehal-device.c2
-rw-r--r--sys/dev/puc/pucdata.c6
-rw-r--r--sys/dev/snc/dp83932.c89
-rw-r--r--sys/dev/snc/dp83932subr.c100
-rw-r--r--sys/dev/snc/if_snc.c26
-rw-r--r--sys/dev/snc/if_snc_cbus.c9
-rw-r--r--sys/dev/sound/pci/emu10kx.c2
-rw-r--r--sys/dev/twa/tw_cl_misc.c2
-rw-r--r--sys/dev/usb/controller/dwc_otg.c195
-rw-r--r--sys/dev/usb/controller/dwc_otg.h3
-rw-r--r--sys/dev/usb/controller/dwc_otgreg.h2
-rw-r--r--sys/dev/usb/net/if_udav.c18
-rw-r--r--sys/dev/usb/serial/u3g.c4
-rw-r--r--sys/dev/usb/serial/usb_serial.c29
-rw-r--r--sys/dev/usb/usbdevs5
-rw-r--r--sys/dev/xen/netback/netback_unit_tests.c6
-rw-r--r--sys/fs/cd9660/cd9660_vfsops.c3
-rw-r--r--sys/fs/devfs/devfs_vfsops.c3
-rw-r--r--sys/fs/ext2fs/ext2_vfsops.c3
-rw-r--r--sys/fs/fdescfs/fdesc_vfsops.c3
-rw-r--r--sys/fs/fuse/fuse_file.c1
-rw-r--r--sys/fs/fuse/fuse_internal.c47
-rw-r--r--sys/fs/fuse/fuse_internal.h31
-rw-r--r--sys/fs/fuse/fuse_io.c1
-rw-r--r--sys/fs/fuse/fuse_node.c17
-rw-r--r--sys/fs/fuse/fuse_node.h12
-rw-r--r--sys/fs/fuse/fuse_vfsops.c15
-rw-r--r--sys/fs/fuse/fuse_vnops.c57
-rw-r--r--sys/fs/msdosfs/msdosfs_vfsops.c1
-rw-r--r--sys/fs/nandfs/nandfs_vfsops.c1
-rw-r--r--sys/fs/nfsclient/nfs_clvfsops.c3
-rw-r--r--sys/fs/nullfs/null_vfsops.c3
-rw-r--r--sys/fs/pseudofs/pseudofs.c1
-rw-r--r--sys/fs/tmpfs/tmpfs_vfsops.c1
-rw-r--r--sys/fs/udf/udf_vfsops.c3
-rw-r--r--sys/fs/unionfs/union_vfsops.c5
-rw-r--r--sys/fs/unionfs/union_vnops.c3
-rw-r--r--sys/gnu/fs/reiserfs/reiserfs_vfsops.c1
-rw-r--r--sys/i386/include/vmparam.h9
-rw-r--r--sys/i386/xen/clock.c2
-rw-r--r--sys/kern/kern_malloc.c6
-rw-r--r--sys/kern/kern_mbuf.c11
-rw-r--r--sys/kern/sched_ule.c111
-rw-r--r--sys/kern/subr_param.c22
-rw-r--r--sys/kern/tty.c2
-rw-r--r--sys/kern/uipc_mqueue.c1
-rw-r--r--sys/kern/vfs_subr.c1
-rw-r--r--sys/mips/conf/AP91.hints1
-rw-r--r--sys/mips/conf/AP93.hints1
-rw-r--r--sys/mips/conf/AP96.hints8
-rw-r--r--sys/mips/conf/RSPRO.hints1
-rw-r--r--sys/modules/Makefile5
-rw-r--r--sys/modules/dtrace/Makefile4
-rw-r--r--sys/modules/nxge/Makefile2
-rw-r--r--sys/netinet/sctp_constants.h1
-rw-r--r--sys/netinet/sctp_indata.c37
-rw-r--r--sys/netinet/sctp_input.c7
-rw-r--r--sys/netinet/sctp_output.c3
-rw-r--r--sys/netinet/sctp_pcb.c9
-rw-r--r--sys/netinet/sctp_structs.h1
-rw-r--r--sys/netinet/sctp_timer.c5
-rw-r--r--sys/netinet/sctputil.c16
-rw-r--r--sys/netinet/tcp_output.c2
-rw-r--r--sys/netinet/tcp_subr.c28
-rw-r--r--sys/netinet/tcp_timewait.c1
-rw-r--r--sys/netinet6/in6_src.c5
-rw-r--r--sys/netpfil/ipfw/ip_fw_dynamic.c85
-rw-r--r--sys/netpfil/pf/if_pfsync.c135
-rw-r--r--sys/nfsclient/nfs_vfsops.c2
-rw-r--r--sys/ofed/drivers/infiniband/core/cma.c2
-rw-r--r--sys/ofed/drivers/infiniband/core/ud_header.c2
-rw-r--r--sys/ofed/drivers/infiniband/ulp/sdp/sdp_rx.c2
-rw-r--r--sys/ofed/include/linux/cdev.h2
-rw-r--r--sys/ofed/include/linux/pci.h10
-rw-r--r--sys/pc98/include/bus.h120
-rw-r--r--sys/pc98/pc98/machdep.c19
-rw-r--r--sys/powerpc/aim/locore32.S2
-rw-r--r--sys/powerpc/aim/locore64.S2
-rw-r--r--sys/powerpc/aim/trap.c53
-rw-r--r--sys/powerpc/aim/trap_subr32.S20
-rw-r--r--sys/powerpc/aim/trap_subr64.S20
-rw-r--r--sys/powerpc/conf/GENERIC3
-rw-r--r--sys/powerpc/include/bat.h4
-rw-r--r--sys/sys/_mutex.h22
-rw-r--r--sys/sys/_rwlock.h23
-rw-r--r--sys/sys/mount.h2
-rw-r--r--sys/ufs/ffs/ffs_softdep.c181
-rw-r--r--sys/ufs/ffs/ffs_vfsops.c4
-rw-r--r--sys/vm/vm_map.c38
-rw-r--r--tools/regression/bin/sh/builtins/alias3.012
-rw-r--r--tools/regression/bin/sh/builtins/alias3.0.stdout4
-rw-r--r--tools/regression/bin/sh/parser/alias10.05
-rw-r--r--tools/regression/bin/sh/parser/alias9.06
-rw-r--r--usr.bin/calendar/calendars/calendar.history2
-rw-r--r--usr.bin/clang/clang/Makefile2
-rw-r--r--usr.bin/locale/locale.16
-rw-r--r--usr.bin/locale/locale.c87
-rw-r--r--usr.bin/ssh-copy-id/ssh-copy-id.18
-rwxr-xr-xusr.bin/ssh-copy-id/ssh-copy-id.sh42
-rw-r--r--usr.bin/top/machine.c2
-rw-r--r--usr.sbin/wpa/wpa_supplicant/wpa_supplicant.871
251 files changed, 5617 insertions, 2349 deletions
diff --git a/Makefile.inc1 b/Makefile.inc1
index 81bb6bc..4273ecb 100644
--- a/Makefile.inc1
+++ b/Makefile.inc1
@@ -1138,7 +1138,7 @@ _aicasm= sys/modules/aic7xxx/aicasm
_share= share/syscons/scrnmaps
.endif
-.if ${MK_GCC} != "no" && ${MK_CLANG_IS_CC} == "no"
+.if ${MK_GCC} != "no" && (${MK_CLANG_IS_CC} == "no" || ${TARGET} == "pc98")
_gcc_tools= gnu/usr.bin/cc/cc_tools
.endif
@@ -1200,7 +1200,7 @@ _clang= usr.bin/clang
_clang_libs= lib/clang
.endif
-.if ${MK_GCC} != "no" && ${MK_CLANG_IS_CC} == "no"
+.if ${MK_GCC} != "no" && (${MK_CLANG_IS_CC} == "no" || ${TARGET} == "pc98")
_cc= gnu/usr.bin/cc
.endif
diff --git a/bin/ls/cmp.c b/bin/ls/cmp.c
index 84fb038..a2e46ff 100644
--- a/bin/ls/cmp.c
+++ b/bin/ls/cmp.c
@@ -78,7 +78,10 @@ modcmp(const FTSENT *a, const FTSENT *b)
if (b->fts_statp->st_mtim.tv_nsec <
a->fts_statp->st_mtim.tv_nsec)
return (-1);
- return (strcoll(a->fts_name, b->fts_name));
+ if (f_samesort)
+ return (strcoll(b->fts_name, a->fts_name));
+ else
+ return (strcoll(a->fts_name, b->fts_name));
}
int
@@ -104,7 +107,10 @@ acccmp(const FTSENT *a, const FTSENT *b)
if (b->fts_statp->st_atim.tv_nsec <
a->fts_statp->st_atim.tv_nsec)
return (-1);
- return (strcoll(a->fts_name, b->fts_name));
+ if (f_samesort)
+ return (strcoll(b->fts_name, a->fts_name));
+ else
+ return (strcoll(a->fts_name, b->fts_name));
}
int
@@ -130,7 +136,10 @@ birthcmp(const FTSENT *a, const FTSENT *b)
if (b->fts_statp->st_birthtim.tv_nsec <
a->fts_statp->st_birthtim.tv_nsec)
return (-1);
- return (strcoll(a->fts_name, b->fts_name));
+ if (f_samesort)
+ return (strcoll(b->fts_name, a->fts_name));
+ else
+ return (strcoll(a->fts_name, b->fts_name));
}
int
@@ -156,7 +165,10 @@ statcmp(const FTSENT *a, const FTSENT *b)
if (b->fts_statp->st_ctim.tv_nsec <
a->fts_statp->st_ctim.tv_nsec)
return (-1);
- return (strcoll(a->fts_name, b->fts_name));
+ if (f_samesort)
+ return (strcoll(b->fts_name, a->fts_name));
+ else
+ return (strcoll(a->fts_name, b->fts_name));
}
int
diff --git a/bin/ls/extern.h b/bin/ls/extern.h
index f290fbb..90a20a8 100644
--- a/bin/ls/extern.h
+++ b/bin/ls/extern.h
@@ -55,12 +55,12 @@ int prn_octal(const char *);
int prn_printable(const char *);
#ifdef COLORLS
void parsecolors(const char *cs);
-void colorquit(int);
+void colorquit(int);
-extern char *ansi_fgcol;
-extern char *ansi_bgcol;
-extern char *ansi_coloff;
-extern char *attrs_off;
-extern char *enter_bold;
+extern char *ansi_fgcol;
+extern char *ansi_bgcol;
+extern char *ansi_coloff;
+extern char *attrs_off;
+extern char *enter_bold;
#endif
extern int termwidth;
diff --git a/bin/ls/ls.1 b/bin/ls/ls.1
index cc5ff48..aa4fc18 100644
--- a/bin/ls/ls.1
+++ b/bin/ls/ls.1
@@ -32,7 +32,7 @@
.\" @(#)ls.1 8.7 (Berkeley) 7/29/94
.\" $FreeBSD$
.\"
-.Dd September 28, 2011
+.Dd November 8, 2012
.Dt LS 1
.Os
.Sh NAME
@@ -40,7 +40,7 @@
.Nd list directory contents
.Sh SYNOPSIS
.Nm
-.Op Fl ABCFGHILPRSTUWZabcdfghiklmnopqrstuwx1
+.Op Fl ABCFGHILPRSTUWZabcdfghiklmnopqrstuwxy1,
.Op Fl D Ar format
.Op Ar
.Sh DESCRIPTION
@@ -130,6 +130,8 @@ This option is equivalent to defining
.Ev CLICOLOR
in the environment.
(See below.)
+This functionality can be compiled out by removing the definition of
+.Ev COLORLS .
.It Fl H
Symbolic links on the command line are followed.
This option is assumed if
@@ -249,12 +251,35 @@ subsection below, except (if the long format is not also requested)
the directory totals are not output when the output is in a
single column, even if multi-column output is requested.
.It Fl t
-Sort by time modified (most recently modified
-first) before sorting the operands in lexicographical
-order.
+Sort by descending time modified (most recently modified first). If two files
+have the same modification timestamp, sort their names in ascending
+lexicographical order.
+The
+.Fl r
+option reverses both of these sort orders.
+.Pp
+Note that these sort orders are contradictory: the time sequence is in
+descending order, the lexicographical sort is in ascending order.
+This behavior is mandated by
+.St -p1003.2 .
+This feature can cause problems listing files stored with sequential names on
+FAT file systems, such as from digital cameras, where it is possible to have
+more than one image with the same timestamp.
+In such a case, the photos cannot be listed in the sequence in which
+they were taken.
+To ensure the same sort order for time and for lexicographical sorting, set the
+environment variable
+.Ev LS_SAMESORT
+or use the
+.Fl y
+option.
+This causes
+.Nm
+to reverse the lexicographal sort order when sorting files with the
+same modification timestamp.
.It Fl u
Use time of last access,
-instead of last modification
+instead of time of last modification
of the file for sorting
.Pq Fl t
or printing
@@ -268,6 +293,15 @@ The same as
.Fl C ,
except that the multi-column output is produced with entries sorted
across, rather than down, the columns.
+.It Fl y
+When the
+.Fl t
+option is set, sort the alphabetical output in the same order as the time output.
+This has the same effect as setting
+.Ev LS_SAMESORT .
+See the description of the
+.Fl t
+option for more details.
.It Fl 1
(The numeric digit
.Dq one . )
@@ -275,6 +309,15 @@ Force output to be
one entry per line.
This is the default when
output is not to a terminal.
+.It Fl ,
+(Comma) When the
+.Fl l
+option is set, print file sizes grouped and separated by thousands using the
+non-monetary separator returned by
+.Xr localeconv 3 ,
+typically a comma or period.
+If no locale is set, or the locale does not have a non-monetary separator, this
+option has no effect.
.El
.Pp
The
@@ -529,7 +572,7 @@ variable is defined.
.It Ev CLICOLOR_FORCE
Color sequences are normally disabled if the output is not directed to
a terminal.
-This can be overridden by setting this flag.
+This can be overridden by setting this variable.
The
.Ev TERM
variable still needs to reference a color capable terminal however
@@ -655,6 +698,14 @@ Not all columns have changeable widths.
The fields are,
in order: inode, block count, number of links, user name,
group name, flags, file size, file name.
+.It Ev LS_SAMESORT
+If this variable is set, the
+.Fl t
+option sorts the names of files with the same modification timestamp in the same
+sense as the time sort.
+See the description of the
+.Fl t
+option for more details.
.It Ev TERM
The
.Ev CLICOLOR
@@ -678,6 +729,7 @@ specification.
.Xr getfacl 1 ,
.Xr sort 1 ,
.Xr xterm 1 ,
+.Xr localeconv 3 ,
.Xr strftime 3 ,
.Xr strmode 3 ,
.Xr termcap 5 ,
@@ -716,3 +768,9 @@ option description might be a feature that was
based on the fact that single-column output
usually goes to something other than a terminal.
It is debatable whether this is a design bug.
+.Pp
+.St -p1003.2
+mandates opposite sort orders for files with the same timestamp when
+sorting with the
+.Fl t
+option.
diff --git a/bin/ls/ls.c b/bin/ls/ls.c
index b96d18b..be09035 100644
--- a/bin/ls/ls.c
+++ b/bin/ls/ls.c
@@ -109,10 +109,11 @@ int termwidth = 80; /* default terminal width */
int f_humanval; /* show human-readable file sizes */
int f_inode; /* print inode */
static int f_kblocks; /* print size in kilobytes */
+ int f_label; /* show MAC label */
static int f_listdir; /* list actual directory, not contents */
static int f_listdot; /* list files beginning with . */
-static int f_noautodot; /* do not automatically enable -A for root */
int f_longform; /* long listing format */
+static int f_noautodot; /* do not automatically enable -A for root */
static int f_nofollow; /* don't follow symbolic link arguments */
int f_nonprint; /* show unprintables as ? */
static int f_nosort; /* don't sort output */
@@ -122,19 +123,21 @@ static int f_numericonly; /* don't convert uid/gid to name */
int f_octal_escape; /* like f_octal but use C escapes if possible */
static int f_recursive; /* ls subdirectories also */
static int f_reversesort; /* reverse whatever sort is used */
- int f_sectime; /* print the real time for all files */
+ int f_samesort; /* sort time and name in same direction */
+ int f_sectime; /* print full time information */
static int f_singlecol; /* use single column output */
int f_size; /* list size in short listing */
+static int f_sizesort;
int f_slash; /* similar to f_type, but only for dirs */
int f_sortacross; /* sort across rows, not down columns */
int f_statustime; /* use time of last mode change */
static int f_stream; /* stream the output, separate with commas */
+ int f_thousands; /* show file sizes with thousands separators */
+ char *f_timeformat; /* user-specified time format */
static int f_timesort; /* sort by time vice name */
- char *f_timeformat; /* user-specified time format */
-static int f_sizesort;
int f_type; /* add type character for non-regular files */
static int f_whiteout; /* show whiteout entries */
- int f_label; /* show MAC label */
+
#ifdef COLORLS
int f_color; /* add type in color for non-regular files */
@@ -180,8 +183,10 @@ main(int argc, char *argv[])
}
fts_options = FTS_PHYSICAL;
- while ((ch = getopt(argc, argv,
- "1ABCD:FGHILPRSTUWZabcdfghiklmnopqrstuwx")) != -1) {
+ if (getenv("LS_SAMESORT"))
+ f_samesort = 1;
+ while ((ch = getopt(argc, argv,
+ "1ABCD:FGHILPRSTUWXZabcdfghiklmnopqrstuwxy,")) != -1) {
switch (ch) {
/*
* The -1, -C, -x and -l options all override each other so
@@ -192,17 +197,9 @@ main(int argc, char *argv[])
f_longform = 0;
f_stream = 0;
break;
- case 'B':
- f_nonprint = 0;
- f_octal = 1;
- f_octal_escape = 0;
- break;
case 'C':
f_sortacross = f_longform = f_singlecol = 0;
break;
- case 'D':
- f_timeformat = optarg;
- break;
case 'l':
f_longform = 1;
f_singlecol = 0;
@@ -229,16 +226,46 @@ main(int argc, char *argv[])
f_accesstime = 0;
f_statustime = 0;
break;
+ case 'a':
+ fts_options |= FTS_SEEDOT;
+ /* FALLTHROUGH */
+ case 'A':
+ f_listdot = 1;
+ break;
+ /* The -t and -S options override each other. */
+ case 'S':
+ f_sizesort = 1;
+ f_timesort = 0;
+ break;
+ case 't':
+ f_timesort = 1;
+ f_sizesort = 0;
+ break;
+ /* Other flags. Please keep alphabetic. */
+ case ',':
+ f_thousands = 1;
+ break;
+ case 'B':
+ f_nonprint = 0;
+ f_octal = 1;
+ f_octal_escape = 0;
+ break;
+ case 'D':
+ f_timeformat = optarg;
+ break;
case 'F':
f_type = 1;
f_slash = 0;
break;
+ case 'G':
+ setenv("CLICOLOR", "", 1);
+ break;
case 'H':
fts_options |= FTS_COMFOLLOW;
f_nofollow = 0;
break;
- case 'G':
- setenv("CLICOLOR", "", 1);
+ case 'I':
+ f_noautodot = 1;
break;
case 'L':
fts_options &= ~FTS_PHYSICAL;
@@ -254,14 +281,19 @@ main(int argc, char *argv[])
case 'R':
f_recursive = 1;
break;
- case 'a':
- fts_options |= FTS_SEEDOT;
- /* FALLTHROUGH */
- case 'A':
- f_listdot = 1;
+ case 'T':
+ f_sectime = 1;
break;
- case 'I':
- f_noautodot = 1;
+ case 'W':
+ f_whiteout = 1;
+ break;
+ case 'Z':
+ f_label = 1;
+ break;
+ case 'b':
+ f_nonprint = 0;
+ f_octal = 0;
+ f_octal_escape = 1;
break;
/* The -d option turns off the -R option. */
case 'd':
@@ -309,33 +341,13 @@ main(int argc, char *argv[])
case 's':
f_size = 1;
break;
- case 'T':
- f_sectime = 1;
- break;
- /* The -t and -S options override each other. */
- case 't':
- f_timesort = 1;
- f_sizesort = 0;
- break;
- case 'S':
- f_sizesort = 1;
- f_timesort = 0;
- break;
- case 'W':
- f_whiteout = 1;
- break;
- case 'b':
- f_nonprint = 0;
- f_octal = 0;
- f_octal_escape = 1;
- break;
case 'w':
f_nonprint = 0;
f_octal = 0;
f_octal_escape = 0;
break;
- case 'Z':
- f_label = 1;
+ case 'y':
+ f_samesort = 1;
break;
default:
case '?':
@@ -849,6 +861,8 @@ label_out:
d.s_size = sizelen;
d.s_user = maxuser;
}
+ if (f_thousands) /* make space for commas */
+ d.s_size += (d.s_size - 1) / 3;
printfcn(&d);
output = 1;
diff --git a/bin/ls/ls.h b/bin/ls/ls.h
index ee2a7a5..1a45eb4 100644
--- a/bin/ls/ls.h
+++ b/bin/ls/ls.h
@@ -49,12 +49,14 @@ extern int f_longform; /* long listing format */
extern int f_octal; /* print unprintables in octal */
extern int f_octal_escape; /* like f_octal but use C escapes if possible */
extern int f_nonprint; /* show unprintables as ? */
+extern int f_samesort; /* sort time and name in same direction */
extern int f_sectime; /* print the real time for all files */
extern int f_size; /* list size in short listing */
extern int f_slash; /* append a '/' if the file is a directory */
extern int f_sortacross; /* sort across rows, not down columns */
extern int f_statustime; /* use time of last mode change */
-extern char *f_timeformat; /* user-specified time format */
+extern int f_thousands; /* show file sizes with thousands separators */
+extern char *f_timeformat; /* user-specified time format */
extern int f_notabs; /* don't use tab-separated multi-col output */
extern int f_type; /* add type character for non-regular files */
#ifdef COLORLS
diff --git a/bin/ls/print.c b/bin/ls/print.c
index 5a0fc86..930d6ea 100644
--- a/bin/ls/print.c
+++ b/bin/ls/print.c
@@ -606,6 +606,10 @@ printsize(size_t width, off_t bytes)
humanize_number(buf, sizeof(buf), (int64_t)bytes, "",
HN_AUTOSCALE, HN_B | HN_NOSPACE | HN_DECIMAL);
(void)printf("%*s ", (u_int)width, buf);
+ } else if (f_thousands) { /* with commas */
+ /* This format assignment needed to work round gcc bug. */
+ const char *format = "%*j'd ";
+ (void)printf(format, (u_int)width, bytes);
} else
(void)printf("%*jd ", (u_int)width, bytes);
}
diff --git a/bin/ls/util.c b/bin/ls/util.c
index ecb1732..a50eca4 100644
--- a/bin/ls/util.c
+++ b/bin/ls/util.c
@@ -132,7 +132,7 @@ prn_printable(const char *s)
* to fix this as an efficient fix would involve a lookup table. Same goes
* for the rather inelegant code in prn_octal.
*
- * DES 1998/04/23
+ * DES 1998/04/23
*/
size_t
@@ -175,7 +175,7 @@ prn_octal(const char *s)
size_t clen;
unsigned char ch;
int goodchar, i, len, prtlen;
-
+
memset(&mbs, 0, sizeof(mbs));
len = 0;
while ((clen = mbrtowc(&wc, s, MB_LEN_MAX, &mbs)) != 0) {
@@ -200,9 +200,9 @@ prn_octal(const char *s)
for (i = 0; i < prtlen; i++) {
ch = (unsigned char)s[i];
putchar('\\');
- putchar('0' + (ch >> 6));
- putchar('0' + ((ch >> 3) & 7));
- putchar('0' + (ch & 7));
+ putchar('0' + (ch >> 6));
+ putchar('0' + ((ch >> 3) & 7));
+ putchar('0' + (ch & 7));
len += 4;
}
}
@@ -222,9 +222,9 @@ usage(void)
{
(void)fprintf(stderr,
#ifdef COLORLS
- "usage: ls [-ABCFGHILPRSTUWZabcdfghiklmnopqrstuwx1] [-D format]"
+ "usage: ls [-ABCFGHILPRSTUWZabcdfghiklmnopqrstuwxy1,] [-D format]"
#else
- "usage: ls [-ABCFHILPRSTUWZabcdfghiklmnopqrstuwx1] [-D format]"
+ "usage: ls [-ABCFHILPRSTUWZabcdfghiklmnopqrstuwxy1,] [-D format]"
#endif
" [file ...]\n");
exit(1);
diff --git a/bin/sh/alias.c b/bin/sh/alias.c
index fb0e922..da995bb 100644
--- a/bin/sh/alias.c
+++ b/bin/sh/alias.c
@@ -68,7 +68,18 @@ setalias(const char *name, const char *val)
if (equal(name, ap->name)) {
INTOFF;
ckfree(ap->val);
+ /* See HACK below. */
+#ifdef notyet
ap->val = savestr(val);
+#else
+ {
+ size_t len = strlen(val);
+ ap->val = ckmalloc(len + 2);
+ memcpy(ap->val, val, len);
+ ap->val[len] = ' ';
+ ap->val[len+1] = '\0';
+ }
+#endif
INTON;
return;
}
diff --git a/bin/sh/input.c b/bin/sh/input.c
index 12f285f..b1f0475 100644
--- a/bin/sh/input.c
+++ b/bin/sh/input.c
@@ -350,7 +350,7 @@ pungetc(void)
* We handle aliases this way.
*/
void
-pushstring(char *s, int len, void *ap)
+pushstring(char *s, int len, struct alias *ap)
{
struct strpush *sp;
@@ -365,9 +365,9 @@ pushstring(char *s, int len, void *ap)
sp->prevstring = parsenextc;
sp->prevnleft = parsenleft;
sp->prevlleft = parselleft;
- sp->ap = (struct alias *)ap;
+ sp->ap = ap;
if (ap)
- ((struct alias *)ap)->flag |= ALIASINUSE;
+ ap->flag |= ALIASINUSE;
parsenextc = s;
parsenleft = len;
INTON;
diff --git a/bin/sh/input.h b/bin/sh/input.h
index 92aba45..38be609 100644
--- a/bin/sh/input.h
+++ b/bin/sh/input.h
@@ -45,6 +45,7 @@ extern int parsenleft; /* number of characters left in input buffer */
extern char *parsenextc; /* next character in input buffer */
extern int init_editline; /* 0 == not setup, 1 == OK, -1 == failed */
+struct alias;
struct parsefile;
char *pfgets(char *, int);
@@ -52,7 +53,7 @@ int pgetc(void);
int preadbuffer(void);
int preadateof(void);
void pungetc(void);
-void pushstring(char *, int, void *);
+void pushstring(char *, int, struct alias *);
void setinputfile(const char *, int);
void setinputfd(int, int);
void setinputstring(char *, int);
diff --git a/cddl/contrib/opensolaris/cmd/ztest/ztest.c b/cddl/contrib/opensolaris/cmd/ztest/ztest.c
index 7840773..57efe53 100644
--- a/cddl/contrib/opensolaris/cmd/ztest/ztest.c
+++ b/cddl/contrib/opensolaris/cmd/ztest/ztest.c
@@ -121,8 +121,8 @@
#include <sys/fs/zfs.h>
#include <libnvpair.h>
-#define ZTEST_FD_DATA 3
-#define ZTEST_FD_RAND 4
+static int ztest_fd_data = -1;
+static int ztest_fd_rand = -1;
typedef struct ztest_shared_hdr {
uint64_t zh_hdr_size;
@@ -710,14 +710,17 @@ process_options(int argc, char **argv)
UINT64_MAX >> 2);
if (strlen(altdir) > 0) {
- char cmd[MAXNAMELEN];
- char realaltdir[MAXNAMELEN];
+ char *cmd;
+ char *realaltdir;
char *bin;
char *ztest;
char *isa;
int isalen;
- (void) realpath(getexecname(), cmd);
+ cmd = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
+ realaltdir = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
+
+ VERIFY(NULL != realpath(getexecname(), cmd));
if (0 != access(altdir, F_OK)) {
ztest_dump_core = B_FALSE;
fatal(B_TRUE, "invalid alternate ztest path: %s",
@@ -748,6 +751,9 @@ process_options(int argc, char **argv)
fatal(B_TRUE, "invalid alternate lib directory %s",
zo->zo_alt_libpath);
}
+
+ umem_free(cmd, MAXPATHLEN);
+ umem_free(realaltdir, MAXPATHLEN);
}
}
@@ -764,10 +770,12 @@ ztest_random(uint64_t range)
{
uint64_t r;
+ ASSERT3S(ztest_fd_rand, >=, 0);
+
if (range == 0)
return (0);
- if (read(ZTEST_FD_RAND, &r, sizeof (r)) != sizeof (r))
+ if (read(ztest_fd_rand, &r, sizeof (r)) != sizeof (r))
fatal(1, "short read from /dev/urandom");
return (r % range);
@@ -4703,7 +4711,18 @@ ztest_fault_inject(ztest_ds_t *zd, uint64_t id)
if (islog)
(void) rw_unlock(&ztest_name_lock);
} else {
+ /*
+ * Ideally we would like to be able to randomly
+ * call vdev_[on|off]line without holding locks
+ * to force unpredictable failures but the side
+ * effects of vdev_[on|off]line prevent us from
+ * doing so. We grab the ztest_vdev_lock here to
+ * prevent a race between injection testing and
+ * aux_vdev removal.
+ */
+ VERIFY(mutex_lock(&ztest_vdev_lock) == 0);
(void) vdev_online(spa, guid0, 0, NULL);
+ VERIFY(mutex_unlock(&ztest_vdev_lock) == 0);
}
}
@@ -5660,29 +5679,16 @@ ztest_init(ztest_shared_t *zs)
}
static void
-setup_fds(void)
+setup_data_fd(void)
{
- int fd;
-#ifdef illumos
-
- char *tmp = tempnam(NULL, NULL);
- fd = open(tmp, O_RDWR | O_CREAT, 0700);
- ASSERT3U(fd, ==, ZTEST_FD_DATA);
- (void) unlink(tmp);
- free(tmp);
-#else
- char tmp[MAXPATHLEN];
-
- strlcpy(tmp, ztest_opts.zo_dir, MAXPATHLEN);
- strlcat(tmp, "/ztest.XXXXXX", MAXPATHLEN);
- fd = mkstemp(tmp);
- ASSERT3U(fd, ==, ZTEST_FD_DATA);
-#endif
+ static char ztest_name_data[] = "/tmp/ztest.data.XXXXXX";
- fd = open("/dev/urandom", O_RDONLY);
- ASSERT3U(fd, ==, ZTEST_FD_RAND);
+ ztest_fd_data = mkstemp(ztest_name_data);
+ ASSERT3S(ztest_fd_data, >=, 0);
+ (void) unlink(ztest_name_data);
}
+
static int
shared_data_size(ztest_shared_hdr_t *hdr)
{
@@ -5703,15 +5709,11 @@ setup_hdr(void)
int size;
ztest_shared_hdr_t *hdr;
-#ifndef illumos
- pwrite(ZTEST_FD_DATA, "", 1, 0);
-#endif
-
hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
- PROT_READ | PROT_WRITE, MAP_SHARED, ZTEST_FD_DATA, 0);
+ PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0);
ASSERT(hdr != MAP_FAILED);
- VERIFY3U(0, ==, ftruncate(ZTEST_FD_DATA, sizeof (ztest_shared_hdr_t)));
+ VERIFY3U(0, ==, ftruncate(ztest_fd_data, sizeof (ztest_shared_hdr_t)));
hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t);
hdr->zh_opts_size = sizeof (ztest_shared_opts_t);
@@ -5722,7 +5724,7 @@ setup_hdr(void)
hdr->zh_ds_count = ztest_opts.zo_datasets;
size = shared_data_size(hdr);
- VERIFY3U(0, ==, ftruncate(ZTEST_FD_DATA, size));
+ VERIFY3U(0, ==, ftruncate(ztest_fd_data, size));
(void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
}
@@ -5735,14 +5737,14 @@ setup_data(void)
uint8_t *buf;
hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
- PROT_READ, MAP_SHARED, ZTEST_FD_DATA, 0);
+ PROT_READ, MAP_SHARED, ztest_fd_data, 0);
ASSERT(hdr != MAP_FAILED);
size = shared_data_size(hdr);
(void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()),
- PROT_READ | PROT_WRITE, MAP_SHARED, ZTEST_FD_DATA, 0);
+ PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0);
ASSERT(hdr != MAP_FAILED);
buf = (uint8_t *)hdr;
@@ -5761,12 +5763,13 @@ exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp)
{
pid_t pid;
int status;
- char cmdbuf[MAXPATHLEN];
+ char *cmdbuf = NULL;
pid = fork();
if (cmd == NULL) {
- (void) strlcpy(cmdbuf, getexecname(), sizeof (cmdbuf));
+ cmdbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
+ (void) strlcpy(cmdbuf, getexecname(), MAXPATHLEN);
cmd = cmdbuf;
}
@@ -5775,9 +5778,16 @@ exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp)
if (pid == 0) { /* child */
char *emptyargv[2] = { cmd, NULL };
+ char fd_data_str[12];
struct rlimit rl = { 1024, 1024 };
(void) setrlimit(RLIMIT_NOFILE, &rl);
+
+ (void) close(ztest_fd_rand);
+ VERIFY3U(11, >=,
+ snprintf(fd_data_str, 12, "%d", ztest_fd_data));
+ VERIFY0(setenv("ZTEST_FD_DATA", fd_data_str, 1));
+
(void) enable_extended_FILE_stdio(-1, -1);
if (libpath != NULL)
VERIFY(0 == setenv("LD_LIBRARY_PATH", libpath, 1));
@@ -5790,6 +5800,11 @@ exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp)
fatal(B_TRUE, "exec failed: %s", cmd);
}
+ if (cmdbuf != NULL) {
+ umem_free(cmdbuf, MAXPATHLEN);
+ cmd = NULL;
+ }
+
while (waitpid(pid, &status, 0) != pid)
continue;
if (statusp != NULL)
@@ -5854,39 +5869,41 @@ main(int argc, char **argv)
char timebuf[100];
char numbuf[6];
spa_t *spa;
- char cmd[MAXNAMELEN];
+ char *cmd;
boolean_t hasalt;
-
- boolean_t ischild = (0 == lseek(ZTEST_FD_DATA, 0, SEEK_CUR));
- ASSERT(ischild || errno == EBADF);
+ char *fd_data_str = getenv("ZTEST_FD_DATA");
(void) setvbuf(stdout, NULL, _IOLBF, 0);
dprintf_setup(&argc, argv);
- if (!ischild) {
+ ztest_fd_rand = open("/dev/urandom", O_RDONLY);
+ ASSERT3S(ztest_fd_rand, >=, 0);
+
+ if (!fd_data_str) {
process_options(argc, argv);
- setup_fds();
+ setup_data_fd();
setup_hdr();
setup_data();
bcopy(&ztest_opts, ztest_shared_opts,
sizeof (*ztest_shared_opts));
} else {
+ ztest_fd_data = atoi(fd_data_str);
setup_data();
bcopy(ztest_shared_opts, &ztest_opts, sizeof (ztest_opts));
}
ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count);
/* Override location of zpool.cache */
- (void) asprintf((char **)&spa_config_path, "%s/zpool.cache",
- ztest_opts.zo_dir);
+ VERIFY3U(asprintf((char **)&spa_config_path, "%s/zpool.cache",
+ ztest_opts.zo_dir), !=, -1);
ztest_ds = umem_alloc(ztest_opts.zo_datasets * sizeof (ztest_ds_t),
UMEM_NOFAIL);
zs = ztest_shared;
- if (ischild) {
+ if (fd_data_str) {
metaslab_gang_bang = ztest_opts.zo_metaslab_gang_bang;
metaslab_df_alloc_threshold =
zs->zs_metaslab_df_alloc_threshold;
@@ -5909,7 +5926,8 @@ main(int argc, char **argv)
(u_longlong_t)ztest_opts.zo_time);
}
- (void) strlcpy(cmd, getexecname(), sizeof (cmd));
+ cmd = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
+ (void) strlcpy(cmd, getexecname(), MAXNAMELEN);
zs->zs_do_init = B_TRUE;
if (strlen(ztest_opts.zo_alt_ztest) != 0) {
@@ -6050,5 +6068,7 @@ main(int argc, char **argv)
kills, iters - kills, (100.0 * kills) / MAX(1, iters));
}
+ umem_free(cmd, MAXNAMELEN);
+
return (0);
}
diff --git a/cddl/contrib/opensolaris/lib/libdtrace/powerpc/dt_isadep.c b/cddl/contrib/opensolaris/lib/libdtrace/powerpc/dt_isadep.c
new file mode 100644
index 0000000..1aeb95f
--- /dev/null
+++ b/cddl/contrib/opensolaris/lib/libdtrace/powerpc/dt_isadep.c
@@ -0,0 +1,75 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <stdlib.h>
+#include <assert.h>
+#include <errno.h>
+#include <string.h>
+#include <libgen.h>
+
+#include <dt_impl.h>
+#include <dt_pid.h>
+
+/*ARGSUSED*/
+int
+dt_pid_create_entry_probe(struct ps_prochandle *P, dtrace_hdl_t *dtp,
+ fasttrap_probe_spec_t *ftp, const GElf_Sym *symp)
+{
+
+ dt_dprintf("%s: unimplemented\n", __func__);
+ return (DT_PROC_ERR);
+}
+
+int
+dt_pid_create_return_probe(struct ps_prochandle *P, dtrace_hdl_t *dtp,
+ fasttrap_probe_spec_t *ftp, const GElf_Sym *symp, uint64_t *stret)
+{
+
+ dt_dprintf("%s: unimplemented\n", __func__);
+ return (DT_PROC_ERR);
+}
+
+/*ARGSUSED*/
+int
+dt_pid_create_offset_probe(struct ps_prochandle *P, dtrace_hdl_t *dtp,
+ fasttrap_probe_spec_t *ftp, const GElf_Sym *symp, ulong_t off)
+{
+
+ dt_dprintf("%s: unimplemented\n", __func__);
+ return (DT_PROC_ERR);
+}
+
+/*ARGSUSED*/
+int
+dt_pid_create_glob_offset_probes(struct ps_prochandle *P, dtrace_hdl_t *dtp,
+ fasttrap_probe_spec_t *ftp, const GElf_Sym *symp, const char *pattern)
+{
+
+ dt_dprintf("%s: unimplemented\n", __func__);
+ return (DT_PROC_ERR);
+}
diff --git a/cddl/lib/Makefile b/cddl/lib/Makefile
index 088ceb7..14c73b5 100644
--- a/cddl/lib/Makefile
+++ b/cddl/lib/Makefile
@@ -19,7 +19,8 @@ _libzpool= libzpool
.endif
.endif
-.if ${MACHINE_ARCH} == "amd64" || ${MACHINE_ARCH} == "i386" || ${MACHINE_CPUARCH} == "mips"
+.if ${MACHINE_ARCH} == "amd64" || ${MACHINE_ARCH} == "i386" || \
+ ${MACHINE_CPUARCH} == "mips" || ${MACHINE_CPUARCH} == "powerpc"
_drti= drti
_libdtrace= libdtrace
.endif
diff --git a/cddl/lib/libdtrace/Makefile b/cddl/lib/libdtrace/Makefile
index bdd8acc..3a79e87 100644
--- a/cddl/lib/libdtrace/Makefile
+++ b/cddl/lib/libdtrace/Makefile
@@ -74,6 +74,10 @@ CFLAGS+= -I${OPENSOLARIS_SYS_DISTDIR}/uts/sparc
CFLAGS+= -I${OPENSOLARIS_SYS_DISTDIR}/uts/mips
.PATH: ${.CURDIR}/../../../cddl/contrib/opensolaris/lib/libdtrace/mips
.PATH: ${.CURDIR}/../../../sys/cddl/dev/dtrace/mips
+.elif ${MACHINE_CPUARCH} == "powerpc"
+CFLAGS+= -I${OPENSOLARIS_SYS_DISTDIR}/uts/powerpc
+.PATH: ${.CURDIR}/../../../cddl/contrib/opensolaris/lib/libdtrace/powerpc
+.PATH: ${.CURDIR}/../../../sys/cddl/dev/dtrace/powerpc
.else
# temporary hack
CFLAGS+= -I${OPENSOLARIS_SYS_DISTDIR}/uts/intel
diff --git a/cddl/usr.sbin/Makefile b/cddl/usr.sbin/Makefile
index 42d124f..fb2c437 100644
--- a/cddl/usr.sbin/Makefile
+++ b/cddl/usr.sbin/Makefile
@@ -25,4 +25,10 @@ _lockstat= lockstat
_dtrace= dtrace
.endif
+.if ${MACHINE_CPUARCH} == "powerpc"
+_dtrace= dtrace
+_dtruss= dtruss
+_lockstat= lockstat
+.endif
+
.include <bsd.subdir.mk>
diff --git a/contrib/gdb/gdb/c-valprint.c b/contrib/gdb/gdb/c-valprint.c
index fd42ae0..999aa82 100644
--- a/contrib/gdb/gdb/c-valprint.c
+++ b/contrib/gdb/gdb/c-valprint.c
@@ -232,9 +232,8 @@ c_val_print (struct type *type, char *valaddr, int embedded_offset,
wtype = TYPE_TARGET_TYPE (type);
}
vt_val = value_at (wtype, vt_address, NULL);
- val_print (VALUE_TYPE (vt_val), VALUE_CONTENTS (vt_val), 0,
- VALUE_ADDRESS (vt_val), stream, format,
- deref_ref, recurse + 1, pretty);
+ common_val_print (vt_val, stream, format,
+ deref_ref, recurse + 1, pretty);
if (pretty)
{
fprintf_filtered (stream, "\n");
@@ -283,15 +282,8 @@ c_val_print (struct type *type, char *valaddr, int embedded_offset,
unpack_pointer (lookup_pointer_type (builtin_type_void),
valaddr + embedded_offset),
NULL);
- val_print (VALUE_TYPE (deref_val),
- VALUE_CONTENTS (deref_val),
- 0,
- VALUE_ADDRESS (deref_val),
- stream,
- format,
- deref_ref,
- recurse,
- pretty);
+ common_val_print (deref_val, stream, format, deref_ref,
+ recurse, pretty);
}
else
fputs_filtered ("???", stream);
diff --git a/contrib/gdb/gdb/cp-valprint.c b/contrib/gdb/gdb/cp-valprint.c
index 42a48f5..b5aff5d 100644
--- a/contrib/gdb/gdb/cp-valprint.c
+++ b/contrib/gdb/gdb/cp-valprint.c
@@ -361,8 +361,7 @@ cp_print_value_fields (struct type *type, struct type *real_type, char *valaddr,
(TYPE_FIELD_TYPE (type, i),
unpack_field_as_long (type, valaddr + offset, i));
- val_print (TYPE_FIELD_TYPE (type, i), VALUE_CONTENTS (v),
- 0, 0, stream, format, 0, recurse + 1, pretty);
+ common_val_print (v, stream, format, 0, recurse + 1, pretty);
}
}
else
@@ -426,8 +425,7 @@ cp_print_value_fields (struct type *type, struct type *real_type, char *valaddr,
v = value_from_pointer (lookup_pointer_type (builtin_type_unsigned_long),
*(unsigned long *) (valaddr + offset));
- val_print (VALUE_TYPE (v), VALUE_CONTENTS (v), 0, 0,
- stream, format, 0, recurse + 1, pretty);
+ common_val_print (v, stream, format, 0, recurse + 1, pretty);
fields_seen = 1;
if (vtblprint)
@@ -791,8 +789,7 @@ cp_print_hpacc_virtual_table_entries (struct type *type, int *vfuncs,
VALUE_TYPE (vf) = VALUE_TYPE (v); /* make it a pointer */
/* print out the entry */
- val_print (VALUE_TYPE (vf), VALUE_CONTENTS (vf), 0, 0,
- stream, format, 0, recurse + 1, pretty);
+ common_val_print (vf, stream, format, 0, recurse + 1, pretty);
field_physname
= TYPE_FN_FIELD_PHYSNAME (TYPE_FN_FIELDLIST1 (type, fn), oi);
/* pai: (temp) FIXME Maybe this should be DMGL_ANSI */
diff --git a/contrib/gdb/gdb/dwarf2loc.c b/contrib/gdb/gdb/dwarf2loc.c
index cdbeb10..208e9d1 100644
--- a/contrib/gdb/gdb/dwarf2loc.c
+++ b/contrib/gdb/gdb/dwarf2loc.c
@@ -492,9 +492,14 @@ loclist_read_variable (struct symbol *symbol, struct frame_info *frame)
data = find_location_expression (dlbaton, &size,
frame ? get_frame_pc (frame) : 0);
if (data == NULL)
- error ("Variable \"%s\" is not available.", SYMBOL_NATURAL_NAME (symbol));
-
- val = dwarf2_evaluate_loc_desc (symbol, frame, data, size, dlbaton->objfile);
+ {
+ val = allocate_value (SYMBOL_TYPE (symbol));
+ VALUE_LVAL (val) = not_lval;
+ VALUE_OPTIMIZED_OUT (val) = 1;
+ }
+ else
+ val = dwarf2_evaluate_loc_desc (symbol, frame, data, size,
+ dlbaton->objfile);
return val;
}
diff --git a/contrib/gdb/gdb/f-valprint.c b/contrib/gdb/gdb/f-valprint.c
index 805590f..d0bf016 100644
--- a/contrib/gdb/gdb/f-valprint.c
+++ b/contrib/gdb/gdb/f-valprint.c
@@ -444,15 +444,8 @@ f_val_print (struct type *type, char *valaddr, int embedded_offset,
unpack_pointer (lookup_pointer_type (builtin_type_void),
valaddr + embedded_offset),
NULL);
- val_print (VALUE_TYPE (deref_val),
- VALUE_CONTENTS (deref_val),
- 0,
- VALUE_ADDRESS (deref_val),
- stream,
- format,
- deref_ref,
- recurse,
- pretty);
+ common_val_print (deref_val, stream, format, deref_ref, recurse,
+ pretty);
}
else
fputs_filtered ("???", stream);
diff --git a/contrib/gdb/gdb/jv-valprint.c b/contrib/gdb/gdb/jv-valprint.c
index 8715257..a30377c 100644
--- a/contrib/gdb/gdb/jv-valprint.c
+++ b/contrib/gdb/gdb/jv-valprint.c
@@ -189,8 +189,7 @@ java_value_print (struct value *val, struct ui_file *stream, int format,
else
fprintf_filtered (stream, "%d..%d: ", i, i + reps - 1);
- val_print (VALUE_TYPE (v), VALUE_CONTENTS (v), 0, 0,
- stream, format, 2, 1, pretty);
+ common_val_print (v, stream, format, 2, 1, pretty);
things_printed++;
i += reps;
@@ -242,8 +241,7 @@ java_value_print (struct value *val, struct ui_file *stream, int format,
return 0;
}
- return (val_print (type, VALUE_CONTENTS (val), 0, address,
- stream, format, 1, 0, pretty));
+ return common_val_print (val, stream, format, 1, 0, pretty);
}
/* TYPE, VALADDR, ADDRESS, STREAM, RECURSE, and PRETTY have the
@@ -391,8 +389,7 @@ java_print_value_fields (struct type *type, char *valaddr, CORE_ADDR address,
v = value_from_longest (TYPE_FIELD_TYPE (type, i),
unpack_field_as_long (type, valaddr, i));
- val_print (TYPE_FIELD_TYPE (type, i), VALUE_CONTENTS (v), 0,
- 0, stream, format, 0, recurse + 1, pretty);
+ common_val_print (v, stream, format, 0, recurse + 1, pretty);
}
}
else
@@ -411,9 +408,8 @@ java_print_value_fields (struct type *type, char *valaddr, CORE_ADDR address,
struct type *t = check_typedef (VALUE_TYPE (v));
if (TYPE_CODE (t) == TYPE_CODE_STRUCT)
v = value_addr (v);
- val_print (VALUE_TYPE (v),
- VALUE_CONTENTS (v), 0, VALUE_ADDRESS (v),
- stream, format, 0, recurse + 1, pretty);
+ common_val_print (v, stream, format, 0, recurse + 1,
+ pretty);
}
}
else if (TYPE_FIELD_TYPE (type, i) == NULL)
diff --git a/contrib/gdb/gdb/p-valprint.c b/contrib/gdb/gdb/p-valprint.c
index eb92f77..6ac4c9e 100644
--- a/contrib/gdb/gdb/p-valprint.c
+++ b/contrib/gdb/gdb/p-valprint.c
@@ -238,9 +238,8 @@ pascal_val_print (struct type *type, char *valaddr, int embedded_offset,
wtype = TYPE_TARGET_TYPE (type);
}
vt_val = value_at (wtype, vt_address, NULL);
- val_print (VALUE_TYPE (vt_val), VALUE_CONTENTS (vt_val), 0,
- VALUE_ADDRESS (vt_val), stream, format,
- deref_ref, recurse + 1, pretty);
+ common_val_print (vt_val, stream, format, deref_ref,
+ recurse + 1, pretty);
if (pretty)
{
fprintf_filtered (stream, "\n");
@@ -291,10 +290,8 @@ pascal_val_print (struct type *type, char *valaddr, int embedded_offset,
unpack_pointer (lookup_pointer_type (builtin_type_void),
valaddr + embedded_offset),
NULL);
- val_print (VALUE_TYPE (deref_val),
- VALUE_CONTENTS (deref_val), 0,
- VALUE_ADDRESS (deref_val), stream, format,
- deref_ref, recurse + 1, pretty);
+ common_val_print (deref_val, stream, format, deref_ref,
+ recurse + 1, pretty);
}
else
fputs_filtered ("???", stream);
@@ -565,9 +562,7 @@ pascal_value_print (struct value *val, struct ui_file *stream, int format,
fprintf_filtered (stream, ") ");
}
}
- return val_print (type, VALUE_CONTENTS (val), VALUE_EMBEDDED_OFFSET (val),
- VALUE_ADDRESS (val) + VALUE_OFFSET (val),
- stream, format, 1, 0, pretty);
+ return common_val_print (val, stream, format, 1, 0, pretty);
}
@@ -583,7 +578,7 @@ static int pascal_static_field_print; /* Controls printing of static fields. */
static struct obstack dont_print_vb_obstack;
static struct obstack dont_print_statmem_obstack;
-static void pascal_object_print_static_field (struct type *, struct value *,
+static void pascal_object_print_static_field (struct value *,
struct ui_file *, int, int,
enum val_prettyprint);
@@ -844,8 +839,7 @@ pascal_object_print_value_fields (struct type *type, char *valaddr,
v = value_from_longest (TYPE_FIELD_TYPE (type, i),
unpack_field_as_long (type, valaddr, i));
- val_print (TYPE_FIELD_TYPE (type, i), VALUE_CONTENTS (v), 0, 0,
- stream, format, 0, recurse + 1, pretty);
+ common_val_print (v, stream, format, 0, recurse + 1, pretty);
}
}
else
@@ -864,9 +858,8 @@ pascal_object_print_value_fields (struct type *type, char *valaddr,
if (v == NULL)
fputs_filtered ("<optimized out>", stream);
else
- pascal_object_print_static_field (TYPE_FIELD_TYPE (type, i), v,
- stream, format, recurse + 1,
- pretty);
+ pascal_object_print_static_field (v, stream, format,
+ recurse + 1, pretty);
}
else
{
@@ -1005,14 +998,16 @@ pascal_object_print_value (struct type *type, char *valaddr, CORE_ADDR address,
static member classes in an obstack and refuse to print them more
than once.
- VAL contains the value to print, TYPE, STREAM, RECURSE, and PRETTY
+ VAL contains the value to print, STREAM, RECURSE, and PRETTY
have the same meanings as in c_val_print. */
static void
-pascal_object_print_static_field (struct type *type, struct value *val,
+pascal_object_print_static_field (struct value *val,
struct ui_file *stream, int format,
int recurse, enum val_prettyprint pretty)
{
+ struct type *type = VALUE_TYPE (val);
+
if (TYPE_CODE (type) == TYPE_CODE_STRUCT)
{
CORE_ADDR *first_dont_print;
@@ -1041,8 +1036,7 @@ pascal_object_print_static_field (struct type *type, struct value *val,
stream, format, recurse, pretty, NULL, 1);
return;
}
- val_print (type, VALUE_CONTENTS (val), 0, VALUE_ADDRESS (val),
- stream, format, 0, recurse, pretty);
+ common_val_print (val, stream, format, 0, recurse, pretty);
}
void
diff --git a/contrib/gdb/gdb/scm-valprint.c b/contrib/gdb/gdb/scm-valprint.c
index 737bafa..97972f2 100644
--- a/contrib/gdb/gdb/scm-valprint.c
+++ b/contrib/gdb/gdb/scm-valprint.c
@@ -390,6 +390,5 @@ int
scm_value_print (struct value *val, struct ui_file *stream, int format,
enum val_prettyprint pretty)
{
- return (val_print (VALUE_TYPE (val), VALUE_CONTENTS (val), 0,
- VALUE_ADDRESS (val), stream, format, 1, 0, pretty));
+ return (common_val_print (val, stream, format, 1, 0, pretty));
}
diff --git a/contrib/gdb/gdb/stack.c b/contrib/gdb/gdb/stack.c
index d42af9a..f3b1e02 100644
--- a/contrib/gdb/gdb/stack.c
+++ b/contrib/gdb/gdb/stack.c
@@ -354,9 +354,7 @@ print_frame_args (struct symbol *func, struct frame_info *fi, int num,
if (val)
{
- val_print (VALUE_TYPE (val), VALUE_CONTENTS (val), 0,
- VALUE_ADDRESS (val),
- stb->stream, 0, 0, 2, Val_no_prettyprint);
+ common_val_print (val, stb->stream, 0, 0, 2, Val_no_prettyprint);
ui_out_field_stream (uiout, "value", stb);
}
else
diff --git a/contrib/gdb/gdb/valprint.c b/contrib/gdb/gdb/valprint.c
index 294e09f..2df49f8 100644
--- a/contrib/gdb/gdb/valprint.c
+++ b/contrib/gdb/gdb/valprint.c
@@ -150,25 +150,66 @@ val_print (struct type *type, char *valaddr, int embedded_offset,
stream, format, deref_ref, recurse, pretty));
}
-/* Print the value VAL in C-ish syntax on stream STREAM.
- FORMAT is a format-letter, or 0 for print in natural format of data type.
- If the object printed is a string pointer, returns
- the number of string bytes printed. */
+/* Check whether the value VAL is printable. Return 1 if it is;
+ return 0 and print an appropriate error message to STREAM if it
+ is not. */
-int
-value_print (struct value *val, struct ui_file *stream, int format,
- enum val_prettyprint pretty)
+static int
+value_check_printable (struct value *val, struct ui_file *stream)
{
if (val == 0)
{
- printf_filtered ("<address of value unknown>");
+ fprintf_filtered (stream, "<address of value unknown>");
return 0;
}
+
if (VALUE_OPTIMIZED_OUT (val))
{
- printf_filtered ("<value optimized out>");
+ fprintf_filtered (stream, "<value optimized out>");
return 0;
}
+
+ return 1;
+}
+
+/* Print the value VAL onto stream STREAM according to FORMAT (a
+ letter, or 0 for natural format using TYPE).
+
+ If DEREF_REF is nonzero, then dereference references, otherwise just print
+ them like pointers.
+
+ The PRETTY parameter controls prettyprinting.
+
+ If the data are a string pointer, returns the number of string characters
+ printed.
+
+ This is a preferable interface to val_print, above, because it uses
+ GDB's value mechanism. */
+
+int
+common_val_print (struct value *val, struct ui_file *stream, int format,
+ int deref_ref, int recurse, enum val_prettyprint pretty)
+{
+ if (!value_check_printable (val, stream))
+ return 0;
+
+ return val_print (VALUE_TYPE(val), VALUE_CONTENTS_ALL (val),
+ VALUE_EMBEDDED_OFFSET (val), VALUE_ADDRESS (val),
+ stream, format, deref_ref, recurse, pretty);
+}
+
+/* Print the value VAL in C-ish syntax on stream STREAM.
+ FORMAT is a format-letter, or 0 for print in natural format of data type.
+ If the object printed is a string pointer, returns
+ the number of string bytes printed. */
+
+int
+value_print (struct value *val, struct ui_file *stream, int format,
+ enum val_prettyprint pretty)
+{
+ if (!value_check_printable (val, stream))
+ return 0;
+
return LA_VALUE_PRINT (val, stream, format, pretty);
}
diff --git a/contrib/gdb/gdb/value.h b/contrib/gdb/gdb/value.h
index 690edb9..a01b438 100644
--- a/contrib/gdb/gdb/value.h
+++ b/contrib/gdb/gdb/value.h
@@ -523,6 +523,11 @@ extern int val_print (struct type * type, char *valaddr,
int deref_ref, int recurse,
enum val_prettyprint pretty);
+extern int common_val_print (struct value *val,
+ struct ui_file *stream, int format,
+ int deref_ref, int recurse,
+ enum val_prettyprint pretty);
+
extern int val_print_string (CORE_ADDR addr, int len, int width, struct ui_file *stream);
extern void print_variable_value (struct symbol * var,
diff --git a/contrib/gdb/gdb/varobj.c b/contrib/gdb/gdb/varobj.c
index c662518..4821cf4 100644
--- a/contrib/gdb/gdb/varobj.c
+++ b/contrib/gdb/gdb/varobj.c
@@ -2093,10 +2093,8 @@ c_value_of_variable (struct varobj *var)
if (VALUE_LAZY (var->value))
gdb_value_fetch_lazy (var->value);
- val_print (VALUE_TYPE (var->value),
- VALUE_CONTENTS_RAW (var->value), 0,
- VALUE_ADDRESS (var->value), stb,
- format_code[(int) var->format], 1, 0, 0);
+ common_val_print (var->value, stb,
+ format_code[(int) var->format], 1, 0, 0);
thevalue = ui_file_xstrdup (stb, &dummy);
do_cleanups (old_chain);
return thevalue;
diff --git a/contrib/jemalloc/ChangeLog b/contrib/jemalloc/ChangeLog
index 231dd6d..ab3476c 100644
--- a/contrib/jemalloc/ChangeLog
+++ b/contrib/jemalloc/ChangeLog
@@ -6,6 +6,47 @@ found in the git revision history:
http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git
git://canonware.com/jemalloc.git
+* 3.2.0 (November 9, 2012)
+
+ In addition to a couple of bug fixes, this version modifies page run
+ allocation and dirty page purging algorithms in order to better control
+ page-level virtual memory fragmentation.
+
+ Incompatible changes:
+ - Change the "opt.lg_dirty_mult" default from 5 to 3 (32:1 to 8:1).
+
+ Bug fixes:
+ - Fix dss/mmap allocation precedence code to use recyclable mmap memory only
+ after primary dss allocation fails.
+ - Fix deadlock in the "arenas.purge" mallctl. This regression was introduced
+ in 3.1.0 by the addition of the "arena.<i>.purge" mallctl.
+
+* 3.1.0 (October 16, 2012)
+
+ New features:
+ - Auto-detect whether running inside Valgrind, thus removing the need to
+ manually specify MALLOC_CONF=valgrind:true.
+ - Add the "arenas.extend" mallctl, which allows applications to create
+ manually managed arenas.
+ - Add the ALLOCM_ARENA() flag for {,r,d}allocm().
+ - Add the "opt.dss", "arena.<i>.dss", and "stats.arenas.<i>.dss" mallctls,
+ which provide control over dss/mmap precedence.
+ - Add the "arena.<i>.purge" mallctl, which obsoletes "arenas.purge".
+ - Define LG_QUANTUM for hppa.
+
+ Incompatible changes:
+ - Disable tcache by default if running inside Valgrind, in order to avoid
+ making unallocated objects appear reachable to Valgrind.
+ - Drop const from malloc_usable_size() argument on Linux.
+
+ Bug fixes:
+ - Fix heap profiling crash if sampled object is freed via realloc(p, 0).
+ - Remove const from __*_hook variable declarations, so that glibc can modify
+ them during process forking.
+ - Fix mlockall(2)/madvise(2) interaction.
+ - Fix fork(2)-related deadlocks.
+ - Fix error return value for "thread.tcache.enabled" mallctl.
+
* 3.0.0 (May 11, 2012)
Although this version adds some major new features, the primary focus is on
diff --git a/contrib/jemalloc/FREEBSD-diffs b/contrib/jemalloc/FREEBSD-diffs
index 8ca4995..0372f91 100644
--- a/contrib/jemalloc/FREEBSD-diffs
+++ b/contrib/jemalloc/FREEBSD-diffs
@@ -1,5 +1,5 @@
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
-index 877c500..7d659a7 100644
+index 54b8747..91c4a4e 100644
--- a/doc/jemalloc.xml.in
+++ b/doc/jemalloc.xml.in
@@ -51,12 +51,23 @@
@@ -27,7 +27,7 @@ index 877c500..7d659a7 100644
<refsect2>
<title>Standard API</title>
<funcprototype>
-@@ -2101,4 +2112,16 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
+@@ -2170,4 +2181,16 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
<para>The <function>posix_memalign<parameter/></function> function conforms
to IEEE Std 1003.1-2001 (&ldquo;POSIX.1&rdquo;).</para>
</refsect1>
@@ -45,7 +45,7 @@ index 877c500..7d659a7 100644
+ </refsect1>
</refentry>
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
-index 268cd14..2acd2eb 100644
+index 475821a..73306ac 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -1,5 +1,8 @@
@@ -97,19 +97,19 @@ index de44e14..564d604 100644
bool malloc_mutex_init(malloc_mutex_t *mutex);
diff --git a/include/jemalloc/internal/private_namespace.h b/include/jemalloc/internal/private_namespace.h
-index b816647..b8ce6b1 100644
+index 06241cd..7b19906 100644
--- a/include/jemalloc/internal/private_namespace.h
+++ b/include/jemalloc/internal/private_namespace.h
-@@ -186,7 +186,6 @@
- #define iqalloc JEMALLOC_N(iqalloc)
+@@ -204,7 +204,6 @@
#define iralloc JEMALLOC_N(iralloc)
+ #define irallocx JEMALLOC_N(irallocx)
#define isalloc JEMALLOC_N(isalloc)
-#define isthreaded JEMALLOC_N(isthreaded)
#define ivsalloc JEMALLOC_N(ivsalloc)
#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
diff --git a/include/jemalloc/jemalloc.h.in b/include/jemalloc/jemalloc.h.in
-index ad06948..505dd38 100644
+index 31b1304..c3ef2f5 100644
--- a/include/jemalloc/jemalloc.h.in
+++ b/include/jemalloc/jemalloc.h.in
@@ -15,6 +15,7 @@ extern "C" {
@@ -122,7 +122,7 @@ index ad06948..505dd38 100644
#define ALLOCM_LG_ALIGN(la) (la)
diff --git a/include/jemalloc/jemalloc_FreeBSD.h b/include/jemalloc/jemalloc_FreeBSD.h
new file mode 100644
-index 0000000..9efab93
+index 0000000..9c97a13
--- /dev/null
+++ b/include/jemalloc/jemalloc_FreeBSD.h
@@ -0,0 +1,76 @@
@@ -203,7 +203,7 @@ index 0000000..9efab93
+#define pthread_mutex_lock _pthread_mutex_lock
+#define pthread_mutex_unlock _pthread_mutex_unlock
diff --git a/src/jemalloc.c b/src/jemalloc.c
-index bc54cd7..fa9fcf0 100644
+index 8a667b6..aaf5012 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -8,6 +8,10 @@ malloc_tsd_data(, arenas, arena_t *, NULL)
@@ -217,7 +217,7 @@ index bc54cd7..fa9fcf0 100644
/* Runtime configuration options. */
const char *je_malloc_conf;
#ifdef JEMALLOC_DEBUG
-@@ -429,7 +433,8 @@ malloc_conf_init(void)
+@@ -448,7 +452,8 @@ malloc_conf_init(void)
#endif
;
@@ -228,12 +228,12 @@ index bc54cd7..fa9fcf0 100644
* Do nothing; opts is already initialized to
* the value of the MALLOC_CONF environment
diff --git a/src/mutex.c b/src/mutex.c
-index 37a843e..4a90a05 100644
+index 55e18c2..6b6f438 100644
--- a/src/mutex.c
+++ b/src/mutex.c
@@ -66,6 +66,17 @@ pthread_create(pthread_t *__restrict thread,
#ifdef JEMALLOC_MUTEX_INIT_CB
- int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
+ JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
void *(calloc_cb)(size_t, size_t));
+
+__weak_reference(_pthread_mutex_init_calloc_cb_stub,
@@ -250,7 +250,7 @@ index 37a843e..4a90a05 100644
bool
diff --git a/src/util.c b/src/util.c
-index 9b73c3e..f94799f 100644
+index b3a0114..df1c5d5 100644
--- a/src/util.c
+++ b/src/util.c
@@ -58,6 +58,22 @@ wrtmessage(void *cbopaque, const char *s)
diff --git a/contrib/jemalloc/VERSION b/contrib/jemalloc/VERSION
index c0f4e74..5e64fc9 100644
--- a/contrib/jemalloc/VERSION
+++ b/contrib/jemalloc/VERSION
@@ -1 +1 @@
-3.0.0-0-gfc9b1dbf69f59d7ecfc4ac68da9847e017e1d046
+3.2.0-0-g87499f6748ebe4817571e817e9f680ccb5bf54a9
diff --git a/contrib/jemalloc/doc/jemalloc.3 b/contrib/jemalloc/doc/jemalloc.3
index f4a9282..eada516 100644
--- a/contrib/jemalloc/doc/jemalloc.3
+++ b/contrib/jemalloc/doc/jemalloc.3
@@ -2,12 +2,12 @@
.\" Title: JEMALLOC
.\" Author: Jason Evans
.\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
-.\" Date: 05/12/2012
+.\" Date: 11/09/2012
.\" Manual: User Manual
-.\" Source: jemalloc 3.0.0-0-gfc9b1dbf69f59d7ecfc4ac68da9847e017e1d046
+.\" Source: jemalloc 3.2.0-0-g87499f6748ebe4817571e817e9f680ccb5bf54a9
.\" Language: English
.\"
-.TH "JEMALLOC" "3" "05/12/2012" "jemalloc 3.0.0-0-gfc9b1dbf69f5" "User Manual"
+.TH "JEMALLOC" "3" "11/09/2012" "jemalloc 3.2.0-0-g87499f6748eb" "User Manual"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
@@ -31,7 +31,7 @@
jemalloc \- general purpose memory allocation functions
.SH "LIBRARY"
.PP
-This manual describes jemalloc 3\&.0\&.0\-0\-gfc9b1dbf69f59d7ecfc4ac68da9847e017e1d046\&. More information can be found at the
+This manual describes jemalloc 3\&.2\&.0\-0\-g87499f6748ebe4817571e817e9f680ccb5bf54a9\&. More information can be found at the
\m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
.PP
The following configuration options are enabled in libc\*(Aqs built\-in jemalloc:
@@ -310,6 +310,14 @@ Initialize newly allocated memory to contain zero bytes\&. In the growing reallo
For reallocation, fail rather than moving the object\&. This constraint can apply to both growth and shrinkage\&.
.RE
.PP
+\fBALLOCM_ARENA(\fR\fB\fIa\fR\fR\fB) \fR
+.RS 4
+Use the arena specified by the index
+\fIa\fR\&. This macro does not validate that
+\fIa\fR
+specifies an arena in the valid range\&.
+.RE
+.PP
The
\fBallocm\fR\fB\fR
function allocates at least
@@ -647,16 +655,23 @@ is specified during configuration, in which case it is enabled by default\&.
Virtual memory chunk size (log base 2)\&. The default chunk size is 4 MiB (2^22)\&.
.RE
.PP
+"opt\&.dss" (\fBconst char *\fR) r\-
+.RS 4
+dss (\fBsbrk\fR(2)) allocation precedence as related to
+\fBmmap\fR(2)
+allocation\&. The following settings are supported: \(lqdisabled\(rq, \(lqprimary\(rq, and \(lqsecondary\(rq (default)\&.
+.RE
+.PP
"opt\&.narenas" (\fBsize_t\fR) r\-
.RS 4
-Maximum number of arenas to use\&. The default maximum number of arenas is four times the number of CPUs, or one if there is a single CPU\&.
+Maximum number of arenas to use for automatic multiplexing of threads and arenas\&. The default is four times the number of CPUs, or one if there is a single CPU\&.
.RE
.PP
"opt\&.lg_dirty_mult" (\fBssize_t\fR) r\-
.RS 4
Per\-arena minimum ratio (log base 2) of active to dirty pages\&. Some dirty unused pages may be allowed to accumulate, within the limit set by the ratio (or one chunk worth of dirty pages, whichever is greater), before informing the kernel about some of those pages via
\fBmadvise\fR(2)
-or a similar system call\&. This provides the kernel with sufficient information to recycle dirty pages if physical memory becomes scarce and the pages remain unused\&. The default minimum ratio is 32:1 (2^5:1); an option value of \-1 will disable dirty page purging\&.
+or a similar system call\&. This provides the kernel with sufficient information to recycle dirty pages if physical memory becomes scarce and the pages remain unused\&. The default minimum ratio is 8:1 (2^3:1); an option value of \-1 will disable dirty page purging\&.
.RE
.PP
"opt\&.stats_print" (\fBbool\fR) r\-
@@ -676,7 +691,8 @@ Junk filling enabled/disabled\&. If enabled, each byte of uninitialized allocate
0xa5\&. All deallocated memory will be initialized to
0x5a\&. This is intended for debugging and will impact performance negatively\&. This option is disabled by default unless
\fB\-\-enable\-debug\fR
-is specified during configuration, in which case it is enabled by default\&.
+is specified during configuration, in which case it is enabled by default unless running inside
+\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2\&.
.RE
.PP
"opt\&.quarantine" (\fBsize_t\fR) r\- [\fB\-\-enable\-fill\fR]
@@ -684,7 +700,7 @@ is specified during configuration, in which case it is enabled by default\&.
Per thread quarantine size in bytes\&. If non\-zero, each thread maintains a FIFO object quarantine that stores up to the specified number of bytes of memory\&. The quarantined memory is not freed until it is released from quarantine, though it is immediately junk\-filled if the
"opt\&.junk"
option is enabled\&. This feature is of particular use in combination with
-\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which can detect attempts to access quarantined objects\&. This is intended for debugging and will impact performance negatively\&. The default quarantine size is 0\&.
+\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which can detect attempts to access quarantined objects\&. This is intended for debugging and will impact performance negatively\&. The default quarantine size is 0 unless running inside Valgrind, in which case the default is 16 MiB\&.
.RE
.PP
"opt\&.redzone" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR]
@@ -692,7 +708,7 @@ option is enabled\&. This feature is of particular use in combination with
Redzones enabled/disabled\&. If enabled, small allocations have redzones before and after them\&. Furthermore, if the
"opt\&.junk"
option is enabled, the redzones are checked for corruption during deallocation\&. However, the primary intended purpose of this feature is to be used in combination with
-\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which needs redzones in order to do effective buffer overflow/underflow detection\&. This option is intended for debugging and will impact performance negatively\&. This option is disabled by default\&.
+\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2, which needs redzones in order to do effective buffer overflow/underflow detection\&. This option is intended for debugging and will impact performance negatively\&. This option is disabled by default unless running inside Valgrind\&.
.RE
.PP
"opt\&.zero" (\fBbool\fR) r\- [\fB\-\-enable\-fill\fR]
@@ -714,15 +730,7 @@ enabled/disabled\&. This option is disabled by default\&.
"opt\&.valgrind" (\fBbool\fR) r\- [\fB\-\-enable\-valgrind\fR]
.RS 4
\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2
-support enabled/disabled\&. If enabled, several other options are automatically modified during options processing to work well with Valgrind:
-"opt\&.junk"
-and
-"opt\&.zero"
-are set to false,
-"opt\&.quarantine"
-is set to 16 MiB, and
-"opt\&.redzone"
-is set to true\&. This option is disabled by default\&.
+support enabled/disabled\&. This option is vestigal because jemalloc auto\-detects whether it is running inside Valgrind\&. This option is disabled by default, unless running inside Valgrind\&.
.RE
.PP
"opt\&.xmalloc" (\fBbool\fR) r\- [\fB\-\-enable\-xmalloc\fR]
@@ -749,7 +757,8 @@ This option is disabled by default\&.
.RS 4
Thread\-specific caching enabled/disabled\&. When there are multiple threads, each thread uses a thread\-specific cache for objects up to a certain size\&. Thread\-specific caching allows many allocations to be satisfied without performing any thread synchronization, at the cost of increased memory use\&. See the
"opt\&.lg_tcache_max"
-option for related tuning information\&. This option is enabled by default\&.
+option for related tuning information\&. This option is enabled by default unless running inside
+\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2\&.
.RE
.PP
"opt\&.lg_tcache_max" (\fBsize_t\fR) r\- [\fB\-\-enable\-tcache\fR]
@@ -845,9 +854,7 @@ option for information on analyzing heap profile output\&. This option is disabl
.PP
"thread\&.arena" (\fBunsigned\fR) rw
.RS 4
-Get or set the arena associated with the calling thread\&. The arena index must be less than the maximum number of arenas (see the
-"arenas\&.narenas"
-mallctl)\&. If the specified arena was not initialized beforehand (see the
+Get or set the arena associated with the calling thread\&. If the specified arena was not initialized beforehand (see the
"arenas\&.initialized"
mallctl), it will be automatically initialized as a side effect of calling this interface\&.
.RE
@@ -891,9 +898,23 @@ Enable/disable calling thread\*(Aqs tcache\&. The tcache is implicitly flushed a
Flush calling thread\*(Aqs tcache\&. This interface releases all cached objects and internal data structures associated with the calling thread\*(Aqs thread\-specific cache\&. Ordinarily, this interface need not be called, since automatic periodic incremental garbage collection occurs, and the thread cache is automatically discarded when a thread exits\&. However, garbage collection is triggered by allocation activity, so it is possible for a thread that stops allocating/deallocating to retain its cache indefinitely, in which case the developer may find manual flushing useful\&.
.RE
.PP
+"arena\&.<i>\&.purge" (\fBunsigned\fR) \-\-
+.RS 4
+Purge unused dirty pages for arena <i>, or for all arenas if <i> equals
+"arenas\&.narenas"\&.
+.RE
+.PP
+"arena\&.<i>\&.dss" (\fBconst char *\fR) rw
+.RS 4
+Set the precedence of dss allocation as related to mmap allocation for arena <i>, or for all arenas if <i> equals
+"arenas\&.narenas"\&. See
+"opt\&.dss"
+for supported settings\&.
+.RE
+.PP
"arenas\&.narenas" (\fBunsigned\fR) r\-
.RS 4
-Maximum number of arenas\&.
+Current limit on number of arenas\&.
.RE
.PP
"arenas\&.initialized" (\fBbool *\fR) r\-
@@ -958,6 +979,11 @@ Maximum size supported by this large size class\&.
Purge unused dirty pages for the specified arena, or for all arenas if none is specified\&.
.RE
.PP
+"arenas\&.extend" (\fBunsigned\fR) r\-
+.RS 4
+Extend the array of arenas by appending a new arena, and returning the new arena index\&.
+.RE
+.PP
"prof\&.active" (\fBbool\fR) rw [\fB\-\-enable\-prof\fR]
.RS 4
Control whether sampling is currently active\&. See the
@@ -997,7 +1023,9 @@ Total number of bytes allocated by the application\&.
"stats\&.active" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Total number of bytes in active pages allocated by the application\&. This is a multiple of the page size, and greater than or equal to
-"stats\&.allocated"\&.
+"stats\&.allocated"\&. This does not include
+"stats\&.arenas\&.<i>\&.pdirty"
+and pages entirely devoted to allocator metadata\&.
.RE
.PP
"stats\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
@@ -1036,6 +1064,15 @@ Cumulative number of huge allocation requests\&.
Cumulative number of huge deallocation requests\&.
.RE
.PP
+"stats\&.arenas\&.<i>\&.dss" (\fBconst char *\fR) r\-
+.RS 4
+dss (\fBsbrk\fR(2)) allocation precedence as related to
+\fBmmap\fR(2)
+allocation\&. See
+"opt\&.dss"
+for details\&.
+.RE
+.PP
"stats\&.arenas\&.<i>\&.nthreads" (\fBunsigned\fR) r\-
.RS 4
Number of threads currently assigned to arena\&.
@@ -1197,9 +1234,7 @@ This implementation does not provide much detail about the problems it detects,
\m[blue]\fBValgrind\fR\m[]\&\s-2\u[2]\d\s+2
tool if the
\fB\-\-enable\-valgrind\fR
-configuration option is enabled and the
-"opt\&.valgrind"
-option is enabled\&.
+configuration option is enabled\&.
.SH "DIAGNOSTIC MESSAGES"
.PP
If any of the memory allocation/deallocation functions detect an error or warning condition, a message will be printed to file descriptor
diff --git a/contrib/jemalloc/include/jemalloc/internal/arena.h b/contrib/jemalloc/include/jemalloc/internal/arena.h
index 0b0f640..561c9b6 100644
--- a/contrib/jemalloc/include/jemalloc/internal/arena.h
+++ b/contrib/jemalloc/include/jemalloc/internal/arena.h
@@ -38,10 +38,10 @@
*
* (nactive >> opt_lg_dirty_mult) >= ndirty
*
- * So, supposing that opt_lg_dirty_mult is 5, there can be no less than 32
- * times as many active pages as dirty pages.
+ * So, supposing that opt_lg_dirty_mult is 3, there can be no less than 8 times
+ * as many active pages as dirty pages.
*/
-#define LG_DIRTY_MULT_DEFAULT 5
+#define LG_DIRTY_MULT_DEFAULT 3
typedef struct arena_chunk_map_s arena_chunk_map_t;
typedef struct arena_chunk_s arena_chunk_t;
@@ -69,7 +69,7 @@ struct arena_chunk_map_s {
/*
* Linkage for run trees. There are two disjoint uses:
*
- * 1) arena_t's runs_avail_{clean,dirty} trees.
+ * 1) arena_t's runs_avail tree.
* 2) arena_run_t conceptually uses this linkage for in-use
* non-full runs, rather than directly embedding linkage.
*/
@@ -162,20 +162,24 @@ typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
/* Arena chunk header. */
struct arena_chunk_s {
/* Arena that owns the chunk. */
- arena_t *arena;
+ arena_t *arena;
- /* Linkage for the arena's chunks_dirty list. */
- ql_elm(arena_chunk_t) link_dirty;
-
- /*
- * True if the chunk is currently in the chunks_dirty list, due to
- * having at some point contained one or more dirty pages. Removal
- * from chunks_dirty is lazy, so (dirtied && ndirty == 0) is possible.
- */
- bool dirtied;
+ /* Linkage for tree of arena chunks that contain dirty runs. */
+ rb_node(arena_chunk_t) dirty_link;
/* Number of dirty pages. */
- size_t ndirty;
+ size_t ndirty;
+
+ /* Number of available runs. */
+ size_t nruns_avail;
+
+ /*
+ * Number of available run adjacencies. Clean and dirty available runs
+ * are not coalesced, which causes virtual memory fragmentation. The
+ * ratio of (nruns_avail-nruns_adjac):nruns_adjac is used for tracking
+ * this fragmentation.
+ * */
+ size_t nruns_adjac;
/*
* Map of pages within chunk that keeps track of free/large/small. The
@@ -183,7 +187,7 @@ struct arena_chunk_s {
* need to be tracked in the map. This omission saves a header page
* for common chunk sizes (e.g. 4 MiB).
*/
- arena_chunk_map_t map[1]; /* Dynamically sized. */
+ arena_chunk_map_t map[1]; /* Dynamically sized. */
};
typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
@@ -331,8 +335,10 @@ struct arena_s {
uint64_t prof_accumbytes;
- /* List of dirty-page-containing chunks this arena manages. */
- ql_head(arena_chunk_t) chunks_dirty;
+ dss_prec_t dss_prec;
+
+ /* Tree of dirty-page-containing chunks this arena manages. */
+ arena_chunk_tree_t chunks_dirty;
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
@@ -367,18 +373,9 @@ struct arena_s {
/*
* Size/address-ordered trees of this arena's available runs. The trees
- * are used for first-best-fit run allocation. The dirty tree contains
- * runs with dirty pages (i.e. very likely to have been touched and
- * therefore have associated physical pages), whereas the clean tree
- * contains runs with pages that either have no associated physical
- * pages, or have pages that the kernel may recycle at any time due to
- * previous madvise(2) calls. The dirty tree is used in preference to
- * the clean tree for allocations, because using dirty pages reduces
- * the amount of dirty purging necessary to keep the active:dirty page
- * ratio below the purge threshold.
+ * are used for first-best-fit run allocation.
*/
- arena_avail_tree_t runs_avail_clean;
- arena_avail_tree_t runs_avail_dirty;
+ arena_avail_tree_t runs_avail;
/* bins is used to store trees of free regions. */
arena_bin_t bins[NBINS];
@@ -422,13 +419,16 @@ void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk,
void *ptr);
void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
-void arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
- arena_stats_t *astats, malloc_bin_stats_t *bstats,
- malloc_large_stats_t *lstats);
void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
-void *arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero, bool try_tcache);
+void *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
+ size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
+ bool try_tcache_dalloc);
+dss_prec_t arena_dss_prec_get(arena_t *arena);
+void arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
+void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
+ size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
+ malloc_large_stats_t *lstats);
bool arena_new(arena_t *arena, unsigned ind);
void arena_boot(void);
void arena_prefork(arena_t *arena);
diff --git a/contrib/jemalloc/include/jemalloc/internal/chunk.h b/contrib/jemalloc/include/jemalloc/internal/chunk.h
index 8fb1fe6..87d8700 100644
--- a/contrib/jemalloc/include/jemalloc/internal/chunk.h
+++ b/contrib/jemalloc/include/jemalloc/internal/chunk.h
@@ -28,6 +28,7 @@
#ifdef JEMALLOC_H_EXTERNS
extern size_t opt_lg_chunk;
+extern const char *opt_dss;
/* Protects stats_chunks; currently not used for any other purpose. */
extern malloc_mutex_t chunks_mtx;
@@ -42,9 +43,14 @@ extern size_t chunk_npages;
extern size_t map_bias; /* Number of arena chunk header pages. */
extern size_t arena_maxclass; /* Max size class for arenas. */
-void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero);
+void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
+ dss_prec_t dss_prec);
+void chunk_unmap(void *chunk, size_t size);
void chunk_dealloc(void *chunk, size_t size, bool unmap);
bool chunk_boot(void);
+void chunk_prefork(void);
+void chunk_postfork_parent(void);
+void chunk_postfork_child(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/contrib/jemalloc/include/jemalloc/internal/chunk_dss.h b/contrib/jemalloc/include/jemalloc/internal/chunk_dss.h
index 6e2643b..6585f07 100644
--- a/contrib/jemalloc/include/jemalloc/internal/chunk_dss.h
+++ b/contrib/jemalloc/include/jemalloc/internal/chunk_dss.h
@@ -1,14 +1,28 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
+typedef enum {
+ dss_prec_disabled = 0,
+ dss_prec_primary = 1,
+ dss_prec_secondary = 2,
+
+ dss_prec_limit = 3
+} dss_prec_t ;
+#define DSS_PREC_DEFAULT dss_prec_secondary
+#define DSS_DEFAULT "secondary"
+
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
+extern const char *dss_prec_names[];
+
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
+dss_prec_t chunk_dss_prec_get(void);
+bool chunk_dss_prec_set(dss_prec_t dss_prec);
void *chunk_alloc_dss(size_t size, size_t alignment, bool *zero);
bool chunk_in_dss(void *chunk);
bool chunk_dss_boot(void);
diff --git a/contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h b/contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h
index b29f39e..f24abac 100644
--- a/contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h
+++ b/contrib/jemalloc/include/jemalloc/internal/chunk_mmap.h
@@ -9,7 +9,7 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-void pages_purge(void *addr, size_t length);
+bool pages_purge(void *addr, size_t length);
void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero);
bool chunk_dealloc_mmap(void *chunk, size_t size);
diff --git a/contrib/jemalloc/include/jemalloc/internal/ctl.h b/contrib/jemalloc/include/jemalloc/internal/ctl.h
index adf3827..0ffecc5 100644
--- a/contrib/jemalloc/include/jemalloc/internal/ctl.h
+++ b/contrib/jemalloc/include/jemalloc/internal/ctl.h
@@ -33,6 +33,7 @@ struct ctl_indexed_node_s {
struct ctl_arena_stats_s {
bool initialized;
unsigned nthreads;
+ const char *dss;
size_t pactive;
size_t pdirty;
arena_stats_t astats;
@@ -61,6 +62,7 @@ struct ctl_stats_s {
uint64_t nmalloc; /* huge_nmalloc */
uint64_t ndalloc; /* huge_ndalloc */
} huge;
+ unsigned narenas;
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
};
@@ -75,6 +77,9 @@ int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp);
int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
bool ctl_boot(void);
+void ctl_prefork(void);
+void ctl_postfork_parent(void);
+void ctl_postfork_child(void);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
diff --git a/contrib/jemalloc/include/jemalloc/internal/extent.h b/contrib/jemalloc/include/jemalloc/internal/extent.h
index 36af8be..ba95ca8 100644
--- a/contrib/jemalloc/include/jemalloc/internal/extent.h
+++ b/contrib/jemalloc/include/jemalloc/internal/extent.h
@@ -23,6 +23,9 @@ struct extent_node_s {
/* Total region size. */
size_t size;
+
+ /* True if zero-filled; used by chunk recycling code. */
+ bool zeroed;
};
typedef rb_tree(extent_node_t) extent_tree_t;
diff --git a/contrib/jemalloc/include/jemalloc/internal/huge.h b/contrib/jemalloc/include/jemalloc/internal/huge.h
index e8513c9..d987d37 100644
--- a/contrib/jemalloc/include/jemalloc/internal/huge.h
+++ b/contrib/jemalloc/include/jemalloc/internal/huge.h
@@ -22,7 +22,7 @@ void *huge_palloc(size_t size, size_t alignment, bool zero);
void *huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra);
void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero);
+ size_t alignment, bool zero, bool try_tcache_dalloc);
void huge_dalloc(void *ptr, bool unmap);
size_t huge_salloc(const void *ptr);
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
index 32cdc6f..4214c53 100644
--- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
@@ -270,6 +270,9 @@ static const bool config_ivsalloc =
# ifdef __arm__
# define LG_QUANTUM 3
# endif
+# ifdef __hppa__
+# define LG_QUANTUM 4
+# endif
# ifdef __mips__
# define LG_QUANTUM 3
# endif
@@ -424,6 +427,7 @@ static const bool config_ivsalloc =
VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \
} while (0)
#else
+#define RUNNING_ON_VALGRIND ((unsigned)0)
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed)
#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB)
#define VALGRIND_FREELIKE_BLOCK(addr, rzB)
@@ -510,13 +514,19 @@ extern size_t opt_narenas;
/* Number of CPUs. */
extern unsigned ncpus;
-extern malloc_mutex_t arenas_lock; /* Protects arenas initialization. */
+/* Protects arenas initialization (arenas, arenas_total). */
+extern malloc_mutex_t arenas_lock;
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
+ *
+ * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
+ * arenas. arenas[narenas_auto..narenas_total) are only used if the application
+ * takes some action to create them and allocate from them.
*/
extern arena_t **arenas;
-extern unsigned narenas;
+extern unsigned narenas_total;
+extern unsigned narenas_auto; /* Read-only after initialization. */
arena_t *arenas_extend(unsigned ind);
void arenas_cleanup(void *arg);
@@ -571,6 +581,7 @@ malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *)
size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment);
+unsigned narenas_total_get(void);
arena_t *choose_arena(arena_t *arena);
#endif
@@ -675,6 +686,18 @@ sa2u(size_t size, size_t alignment)
}
}
+JEMALLOC_INLINE unsigned
+narenas_total_get(void)
+{
+ unsigned narenas;
+
+ malloc_mutex_lock(&arenas_lock);
+ narenas = narenas_total;
+ malloc_mutex_unlock(&arenas_lock);
+
+ return (narenas);
+}
+
/* Choose an arena based on a per-thread value. */
JEMALLOC_INLINE arena_t *
choose_arena(arena_t *arena)
@@ -710,15 +733,24 @@ choose_arena(arena_t *arena)
#include "jemalloc/internal/quarantine.h"
#ifndef JEMALLOC_ENABLE_INLINE
+void *imallocx(size_t size, bool try_tcache, arena_t *arena);
void *imalloc(size_t size);
+void *icallocx(size_t size, bool try_tcache, arena_t *arena);
void *icalloc(size_t size);
+void *ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
+ arena_t *arena);
void *ipalloc(size_t usize, size_t alignment, bool zero);
size_t isalloc(const void *ptr, bool demote);
size_t ivsalloc(const void *ptr, bool demote);
size_t u2rz(size_t usize);
size_t p2rz(const void *ptr);
+void idallocx(void *ptr, bool try_tcache);
void idalloc(void *ptr);
+void iqallocx(void *ptr, bool try_tcache);
void iqalloc(void *ptr);
+void *irallocx(void *ptr, size_t size, size_t extra, size_t alignment,
+ bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc,
+ arena_t *arena);
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
bool zero, bool no_move);
malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
@@ -726,29 +758,44 @@ malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_INLINE void *
-imalloc(size_t size)
+imallocx(size_t size, bool try_tcache, arena_t *arena)
{
assert(size != 0);
if (size <= arena_maxclass)
- return (arena_malloc(NULL, size, false, true));
+ return (arena_malloc(arena, size, false, try_tcache));
else
return (huge_malloc(size, false));
}
JEMALLOC_INLINE void *
-icalloc(size_t size)
+imalloc(size_t size)
+{
+
+ return (imallocx(size, true, NULL));
+}
+
+JEMALLOC_INLINE void *
+icallocx(size_t size, bool try_tcache, arena_t *arena)
{
if (size <= arena_maxclass)
- return (arena_malloc(NULL, size, true, true));
+ return (arena_malloc(arena, size, true, try_tcache));
else
return (huge_malloc(size, true));
}
JEMALLOC_INLINE void *
-ipalloc(size_t usize, size_t alignment, bool zero)
+icalloc(size_t size)
+{
+
+ return (icallocx(size, true, NULL));
+}
+
+JEMALLOC_INLINE void *
+ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
+ arena_t *arena)
{
void *ret;
@@ -756,11 +803,11 @@ ipalloc(size_t usize, size_t alignment, bool zero)
assert(usize == sa2u(usize, alignment));
if (usize <= arena_maxclass && alignment <= PAGE)
- ret = arena_malloc(NULL, usize, zero, true);
+ ret = arena_malloc(arena, usize, zero, try_tcache);
else {
if (usize <= arena_maxclass) {
- ret = arena_palloc(choose_arena(NULL), usize, alignment,
- zero);
+ ret = arena_palloc(choose_arena(arena), usize,
+ alignment, zero);
} else if (alignment <= chunksize)
ret = huge_malloc(usize, zero);
else
@@ -771,6 +818,13 @@ ipalloc(size_t usize, size_t alignment, bool zero)
return (ret);
}
+JEMALLOC_INLINE void *
+ipalloc(size_t usize, size_t alignment, bool zero)
+{
+
+ return (ipallocx(usize, alignment, zero, true, NULL));
+}
+
/*
* Typical usage:
* void *ptr = [...]
@@ -829,7 +883,7 @@ p2rz(const void *ptr)
}
JEMALLOC_INLINE void
-idalloc(void *ptr)
+idallocx(void *ptr, bool try_tcache)
{
arena_chunk_t *chunk;
@@ -837,24 +891,38 @@ idalloc(void *ptr)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr)
- arena_dalloc(chunk->arena, chunk, ptr, true);
+ arena_dalloc(chunk->arena, chunk, ptr, try_tcache);
else
huge_dalloc(ptr, true);
}
JEMALLOC_INLINE void
-iqalloc(void *ptr)
+idalloc(void *ptr)
+{
+
+ idallocx(ptr, true);
+}
+
+JEMALLOC_INLINE void
+iqallocx(void *ptr, bool try_tcache)
{
if (config_fill && opt_quarantine)
quarantine(ptr);
else
- idalloc(ptr);
+ idallocx(ptr, try_tcache);
+}
+
+JEMALLOC_INLINE void
+iqalloc(void *ptr)
+{
+
+ iqallocx(ptr, true);
}
JEMALLOC_INLINE void *
-iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
- bool no_move)
+irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
+ bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
{
void *ret;
size_t oldsize;
@@ -877,7 +945,7 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
- ret = ipalloc(usize, alignment, zero);
+ ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
if (ret == NULL) {
if (extra == 0)
return (NULL);
@@ -885,7 +953,8 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
usize = sa2u(size, alignment);
if (usize == 0)
return (NULL);
- ret = ipalloc(usize, alignment, zero);
+ ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
+ arena);
if (ret == NULL)
return (NULL);
}
@@ -896,7 +965,7 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(ret, ptr, copysize);
- iqalloc(ptr);
+ iqallocx(ptr, try_tcache_dalloc);
return (ret);
}
@@ -910,15 +979,25 @@ iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
}
} else {
if (size + extra <= arena_maxclass) {
- return (arena_ralloc(ptr, oldsize, size, extra,
- alignment, zero, true));
+ return (arena_ralloc(arena, ptr, oldsize, size, extra,
+ alignment, zero, try_tcache_alloc,
+ try_tcache_dalloc));
} else {
return (huge_ralloc(ptr, oldsize, size, extra,
- alignment, zero));
+ alignment, zero, try_tcache_dalloc));
}
}
}
+JEMALLOC_INLINE void *
+iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
+ bool no_move)
+{
+
+ return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true,
+ NULL));
+}
+
malloc_tsd_externs(thread_allocated, thread_allocated_t)
malloc_tsd_funcs(JEMALLOC_INLINE, thread_allocated, thread_allocated_t,
THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup)
diff --git a/contrib/jemalloc/include/jemalloc/internal/private_namespace.h b/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
index b8ce6b1..7b19906 100644
--- a/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
+++ b/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
@@ -12,6 +12,8 @@
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked)
#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
+#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
+#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
#define arena_malloc JEMALLOC_N(arena_malloc)
#define arena_malloc_large JEMALLOC_N(arena_malloc_large)
#define arena_malloc_small JEMALLOC_N(arena_malloc_small)
@@ -51,14 +53,13 @@
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
#define arenas JEMALLOC_N(arenas)
-#define arenas_bin_i_index JEMALLOC_N(arenas_bin_i_index)
#define arenas_booted JEMALLOC_N(arenas_booted)
#define arenas_cleanup JEMALLOC_N(arenas_cleanup)
#define arenas_extend JEMALLOC_N(arenas_extend)
#define arenas_initialized JEMALLOC_N(arenas_initialized)
#define arenas_lock JEMALLOC_N(arenas_lock)
-#define arenas_lrun_i_index JEMALLOC_N(arenas_lrun_i_index)
#define arenas_tls JEMALLOC_N(arenas_tls)
+#define arenas_tsd JEMALLOC_N(arenas_tsd)
#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot)
#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper)
#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get)
@@ -101,9 +102,15 @@
#define chunk_dss_boot JEMALLOC_N(chunk_dss_boot)
#define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child)
#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent)
+#define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get)
+#define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set)
#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork)
#define chunk_in_dss JEMALLOC_N(chunk_in_dss)
#define chunk_npages JEMALLOC_N(chunk_npages)
+#define chunk_postfork_child JEMALLOC_N(chunk_postfork_child)
+#define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent)
+#define chunk_prefork JEMALLOC_N(chunk_prefork)
+#define chunk_unmap JEMALLOC_N(chunk_unmap)
#define chunks_mtx JEMALLOC_N(chunks_mtx)
#define chunks_rtree JEMALLOC_N(chunks_rtree)
#define chunksize JEMALLOC_N(chunksize)
@@ -129,6 +136,10 @@
#define ctl_bymib JEMALLOC_N(ctl_bymib)
#define ctl_byname JEMALLOC_N(ctl_byname)
#define ctl_nametomib JEMALLOC_N(ctl_nametomib)
+#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
+#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
+#define ctl_prefork JEMALLOC_N(ctl_prefork)
+#define dss_prec_names JEMALLOC_N(dss_prec_names)
#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
#define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter)
@@ -161,6 +172,7 @@
#define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse)
#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start)
#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search)
+#define get_errno JEMALLOC_N(get_errno)
#define hash JEMALLOC_N(hash)
#define huge_allocated JEMALLOC_N(huge_allocated)
#define huge_boot JEMALLOC_N(huge_boot)
@@ -180,11 +192,17 @@
#define huge_salloc JEMALLOC_N(huge_salloc)
#define iallocm JEMALLOC_N(iallocm)
#define icalloc JEMALLOC_N(icalloc)
+#define icallocx JEMALLOC_N(icallocx)
#define idalloc JEMALLOC_N(idalloc)
+#define idallocx JEMALLOC_N(idallocx)
#define imalloc JEMALLOC_N(imalloc)
+#define imallocx JEMALLOC_N(imallocx)
#define ipalloc JEMALLOC_N(ipalloc)
+#define ipallocx JEMALLOC_N(ipallocx)
#define iqalloc JEMALLOC_N(iqalloc)
+#define iqallocx JEMALLOC_N(iqallocx)
#define iralloc JEMALLOC_N(iralloc)
+#define irallocx JEMALLOC_N(irallocx)
#define isalloc JEMALLOC_N(isalloc)
#define ivsalloc JEMALLOC_N(ivsalloc)
#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
@@ -211,7 +229,9 @@
#define map_bias JEMALLOC_N(map_bias)
#define mb_write JEMALLOC_N(mb_write)
#define mutex_boot JEMALLOC_N(mutex_boot)
-#define narenas JEMALLOC_N(narenas)
+#define narenas_auto JEMALLOC_N(narenas_auto)
+#define narenas_total JEMALLOC_N(narenas_total)
+#define narenas_total_get JEMALLOC_N(narenas_total_get)
#define ncpus JEMALLOC_N(ncpus)
#define nhbins JEMALLOC_N(nhbins)
#define opt_abort JEMALLOC_N(opt_abort)
@@ -253,6 +273,9 @@
#define prof_lookup JEMALLOC_N(prof_lookup)
#define prof_malloc JEMALLOC_N(prof_malloc)
#define prof_mdump JEMALLOC_N(prof_mdump)
+#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
+#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
+#define prof_prefork JEMALLOC_N(prof_prefork)
#define prof_promote JEMALLOC_N(prof_promote)
#define prof_realloc JEMALLOC_N(prof_realloc)
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
@@ -263,6 +286,7 @@
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
#define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized)
#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls)
+#define prof_tdata_tsd JEMALLOC_N(prof_tdata_tsd)
#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot)
#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper)
#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get)
@@ -277,12 +301,13 @@
#define rtree_get JEMALLOC_N(rtree_get)
#define rtree_get_locked JEMALLOC_N(rtree_get_locked)
#define rtree_new JEMALLOC_N(rtree_new)
+#define rtree_postfork_child JEMALLOC_N(rtree_postfork_child)
+#define rtree_postfork_parent JEMALLOC_N(rtree_postfork_parent)
+#define rtree_prefork JEMALLOC_N(rtree_prefork)
#define rtree_set JEMALLOC_N(rtree_set)
#define s2u JEMALLOC_N(s2u)
#define sa2u JEMALLOC_N(sa2u)
-#define stats_arenas_i_bins_j_index JEMALLOC_N(stats_arenas_i_bins_j_index)
-#define stats_arenas_i_index JEMALLOC_N(stats_arenas_i_index)
-#define stats_arenas_i_lruns_j_index JEMALLOC_N(stats_arenas_i_lruns_j_index)
+#define set_errno JEMALLOC_N(set_errno)
#define stats_cactive JEMALLOC_N(stats_cactive)
#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
@@ -310,6 +335,7 @@
#define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized)
#define tcache_enabled_set JEMALLOC_N(tcache_enabled_set)
#define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls)
+#define tcache_enabled_tsd JEMALLOC_N(tcache_enabled_tsd)
#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot)
#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper)
#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
@@ -324,6 +350,7 @@
#define tcache_stats_merge JEMALLOC_N(tcache_stats_merge)
#define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup)
#define tcache_tls JEMALLOC_N(tcache_tls)
+#define tcache_tsd JEMALLOC_N(tcache_tsd)
#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot)
#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper)
#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get)
@@ -331,6 +358,7 @@
#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted)
#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized)
#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls)
+#define thread_allocated_tsd JEMALLOC_N(thread_allocated_tsd)
#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot)
#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper)
#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get)
diff --git a/contrib/jemalloc/include/jemalloc/internal/prof.h b/contrib/jemalloc/include/jemalloc/internal/prof.h
index c3e3f9e..47f22ad 100644
--- a/contrib/jemalloc/include/jemalloc/internal/prof.h
+++ b/contrib/jemalloc/include/jemalloc/internal/prof.h
@@ -223,6 +223,9 @@ void prof_tdata_cleanup(void *arg);
void prof_boot0(void);
void prof_boot1(void);
bool prof_boot2(void);
+void prof_prefork(void);
+void prof_postfork_parent(void);
+void prof_postfork_child(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
@@ -506,7 +509,7 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
if ((uintptr_t)cnt > (uintptr_t)1U) {
prof_ctx_set(ptr, cnt->ctx);
cnt->epoch++;
- } else
+ } else if (ptr != NULL)
prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
/*********/
mb_write();
diff --git a/contrib/jemalloc/include/jemalloc/internal/rtree.h b/contrib/jemalloc/include/jemalloc/internal/rtree.h
index 95d6355..9bd9854 100644
--- a/contrib/jemalloc/include/jemalloc/internal/rtree.h
+++ b/contrib/jemalloc/include/jemalloc/internal/rtree.h
@@ -36,6 +36,9 @@ struct rtree_s {
#ifdef JEMALLOC_H_EXTERNS
rtree_t *rtree_new(unsigned bits);
+void rtree_prefork(rtree_t *rtree);
+void rtree_postfork_parent(rtree_t *rtree);
+void rtree_postfork_child(rtree_t *rtree);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/contrib/jemalloc/include/jemalloc/jemalloc.h b/contrib/jemalloc/include/jemalloc/jemalloc.h
index 65440b3..629dbb4 100644
--- a/contrib/jemalloc/include/jemalloc/jemalloc.h
+++ b/contrib/jemalloc/include/jemalloc/jemalloc.h
@@ -7,12 +7,12 @@ extern "C" {
#include <limits.h>
#include <strings.h>
-#define JEMALLOC_VERSION "3.0.0-0-gfc9b1dbf69f59d7ecfc4ac68da9847e017e1d046"
+#define JEMALLOC_VERSION "3.2.0-0-g87499f6748ebe4817571e817e9f680ccb5bf54a9"
#define JEMALLOC_VERSION_MAJOR 3
-#define JEMALLOC_VERSION_MINOR 0
+#define JEMALLOC_VERSION_MINOR 2
#define JEMALLOC_VERSION_BUGFIX 0
#define JEMALLOC_VERSION_NREV 0
-#define JEMALLOC_VERSION_GID "fc9b1dbf69f59d7ecfc4ac68da9847e017e1d046"
+#define JEMALLOC_VERSION_GID "87499f6748ebe4817571e817e9f680ccb5bf54a9"
#include "jemalloc_defs.h"
#include "jemalloc_FreeBSD.h"
@@ -26,6 +26,8 @@ extern "C" {
#endif
#define ALLOCM_ZERO ((int)0x40)
#define ALLOCM_NO_MOVE ((int)0x80)
+/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */
+#define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
#define ALLOCM_SUCCESS 0
#define ALLOCM_ERR_OOM 1
@@ -60,7 +62,8 @@ JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
-JEMALLOC_EXPORT size_t je_malloc_usable_size(const void *ptr);
+JEMALLOC_EXPORT size_t je_malloc_usable_size(
+ JEMALLOC_USABLE_SIZE_CONST void *ptr);
JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
const char *), void *je_cbopaque, const char *opts);
JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
diff --git a/contrib/jemalloc/include/jemalloc/jemalloc_defs.h b/contrib/jemalloc/include/jemalloc/jemalloc_defs.h
index 85571b9..169078b 100644
--- a/contrib/jemalloc/include/jemalloc/jemalloc_defs.h
+++ b/contrib/jemalloc/include/jemalloc/jemalloc_defs.h
@@ -223,6 +223,15 @@
#define JEMALLOC_OVERRIDE_VALLOC
/*
+ * At least Linux omits the "const" in:
+ *
+ * size_t malloc_usable_size(const void *ptr);
+ *
+ * Match the operating system's prototype.
+ */
+#define JEMALLOC_USABLE_SIZE_CONST const
+
+/*
* Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
*/
/* #undef JEMALLOC_ZONE */
diff --git a/contrib/jemalloc/src/arena.c b/contrib/jemalloc/src/arena.c
index 2a6150f..0c53b07 100644
--- a/contrib/jemalloc/src/arena.c
+++ b/contrib/jemalloc/src/arena.c
@@ -40,6 +40,12 @@ const uint8_t small_size2bin[] = {
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
+static void arena_avail_insert(arena_t *arena, arena_chunk_t *chunk,
+ size_t pageind, size_t npages, bool maybe_adjac_pred,
+ bool maybe_adjac_succ);
+static void arena_avail_remove(arena_t *arena, arena_chunk_t *chunk,
+ size_t pageind, size_t npages, bool maybe_adjac_pred,
+ bool maybe_adjac_succ);
static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
bool large, size_t binind, bool zero);
static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
@@ -48,8 +54,11 @@ static arena_run_t *arena_run_alloc_helper(arena_t *arena, size_t size,
bool large, size_t binind, bool zero);
static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
size_t binind, bool zero);
+static arena_chunk_t *chunks_dirty_iter_cb(arena_chunk_tree_t *tree,
+ arena_chunk_t *chunk, void *arg);
static void arena_purge(arena_t *arena, bool all);
-static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
+static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
+ bool cleaned);
static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, size_t oldsize, size_t newsize);
static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
@@ -101,9 +110,6 @@ arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
size_t a_size = a->bits & ~PAGE_MASK;
size_t b_size = b->bits & ~PAGE_MASK;
- assert((a->bits & CHUNK_MAP_KEY) == CHUNK_MAP_KEY || (a->bits &
- CHUNK_MAP_DIRTY) == (b->bits & CHUNK_MAP_DIRTY));
-
ret = (a_size > b_size) - (a_size < b_size);
if (ret == 0) {
uintptr_t a_mapelm, b_mapelm;
@@ -129,6 +135,182 @@ arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
u.rb_link, arena_avail_comp)
+static inline int
+arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b)
+{
+
+ assert(a != NULL);
+ assert(b != NULL);
+
+ /*
+ * Short-circuit for self comparison. The following comparison code
+ * would come to the same result, but at the cost of executing the slow
+ * path.
+ */
+ if (a == b)
+ return (0);
+
+ /*
+ * Order such that chunks with higher fragmentation are "less than"
+ * those with lower fragmentation -- purging order is from "least" to
+ * "greatest". Fragmentation is measured as:
+ *
+ * mean current avail run size
+ * --------------------------------
+ * mean defragmented avail run size
+ *
+ * navail
+ * -----------
+ * nruns_avail nruns_avail-nruns_adjac
+ * = ========================= = -----------------------
+ * navail nruns_avail
+ * -----------------------
+ * nruns_avail-nruns_adjac
+ *
+ * The following code multiplies away the denominator prior to
+ * comparison, in order to avoid division.
+ *
+ */
+ {
+ size_t a_val = (a->nruns_avail - a->nruns_adjac) *
+ b->nruns_avail;
+ size_t b_val = (b->nruns_avail - b->nruns_adjac) *
+ a->nruns_avail;
+
+ if (a_val < b_val)
+ return (1);
+ if (a_val > b_val)
+ return (-1);
+ }
+ /*
+ * Break ties by chunk address. For fragmented chunks, report lower
+ * addresses as "lower", so that fragmentation reduction happens first
+ * at lower addresses. However, use the opposite ordering for
+ * unfragmented chunks, in order to increase the chances of
+ * re-allocating dirty runs.
+ */
+ {
+ uintptr_t a_chunk = (uintptr_t)a;
+ uintptr_t b_chunk = (uintptr_t)b;
+ int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk));
+ if (a->nruns_adjac == 0) {
+ assert(b->nruns_adjac == 0);
+ ret = -ret;
+ }
+ return (ret);
+ }
+}
+
+/* Generate red-black tree functions. */
+rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t,
+ dirty_link, arena_chunk_dirty_comp)
+
+static inline bool
+arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind)
+{
+ bool ret;
+
+ if (pageind-1 < map_bias)
+ ret = false;
+ else {
+ ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0);
+ assert(ret == false || arena_mapbits_dirty_get(chunk,
+ pageind-1) != arena_mapbits_dirty_get(chunk, pageind));
+ }
+ return (ret);
+}
+
+static inline bool
+arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages)
+{
+ bool ret;
+
+ if (pageind+npages == chunk_npages)
+ ret = false;
+ else {
+ assert(pageind+npages < chunk_npages);
+ ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0);
+ assert(ret == false || arena_mapbits_dirty_get(chunk, pageind)
+ != arena_mapbits_dirty_get(chunk, pageind+npages));
+ }
+ return (ret);
+}
+
+static inline bool
+arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages)
+{
+
+ return (arena_avail_adjac_pred(chunk, pageind) ||
+ arena_avail_adjac_succ(chunk, pageind, npages));
+}
+
+static void
+arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
+ size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
+{
+
+ assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
+ LG_PAGE));
+
+ /*
+ * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
+ * removed and reinserted even if the run to be inserted is clean.
+ */
+ if (chunk->ndirty != 0)
+ arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
+
+ if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
+ chunk->nruns_adjac++;
+ if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
+ chunk->nruns_adjac++;
+ chunk->nruns_avail++;
+ assert(chunk->nruns_avail > chunk->nruns_adjac);
+
+ if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
+ arena->ndirty += npages;
+ chunk->ndirty += npages;
+ }
+ if (chunk->ndirty != 0)
+ arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
+
+ arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk,
+ pageind));
+}
+
+static void
+arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
+ size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
+{
+
+ assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
+ LG_PAGE));
+
+ /*
+ * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
+ * removed and reinserted even if the run to be removed is clean.
+ */
+ if (chunk->ndirty != 0)
+ arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
+
+ if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
+ chunk->nruns_adjac--;
+ if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
+ chunk->nruns_adjac--;
+ chunk->nruns_avail--;
+ assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail
+ == 0 && chunk->nruns_adjac == 0));
+
+ if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
+ arena->ndirty -= npages;
+ chunk->ndirty -= npages;
+ }
+ if (chunk->ndirty != 0)
+ arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
+
+ arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk,
+ pageind));
+}
+
static inline void *
arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
{
@@ -193,7 +375,6 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
arena_chunk_t *chunk;
size_t run_ind, total_pages, need_pages, rem_pages, i;
size_t flag_dirty;
- arena_avail_tree_t *runs_avail;
assert((large && binind == BININD_INVALID) || (large == false && binind
!= BININD_INVALID));
@@ -201,8 +382,6 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
- runs_avail = (flag_dirty != 0) ? &arena->runs_avail_dirty :
- &arena->runs_avail_clean;
total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
LG_PAGE;
assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
@@ -212,7 +391,7 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
assert(need_pages <= total_pages);
rem_pages = total_pages - need_pages;
- arena_avail_tree_remove(runs_avail, arena_mapp_get(chunk, run_ind));
+ arena_avail_remove(arena, chunk, run_ind, total_pages, true, true);
if (config_stats) {
/*
* Update stats_cactive if nactive is crossing a chunk
@@ -244,14 +423,8 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
arena_mapbits_unzeroed_get(chunk,
run_ind+total_pages-1));
}
- arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk,
- run_ind+need_pages));
- }
-
- /* Update dirty page accounting. */
- if (flag_dirty != 0) {
- chunk->ndirty -= need_pages;
- arena->ndirty -= need_pages;
+ arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages,
+ false, true);
}
/*
@@ -344,8 +517,6 @@ arena_chunk_alloc(arena_t *arena)
size_t i;
if (arena->spare != NULL) {
- arena_avail_tree_t *runs_avail;
-
chunk = arena->spare;
arena->spare = NULL;
@@ -357,14 +528,6 @@ arena_chunk_alloc(arena_t *arena)
chunk_npages-1) == arena_maxclass);
assert(arena_mapbits_dirty_get(chunk, map_bias) ==
arena_mapbits_dirty_get(chunk, chunk_npages-1));
-
- /* Insert the run into the appropriate runs_avail_* tree. */
- if (arena_mapbits_dirty_get(chunk, map_bias) == 0)
- runs_avail = &arena->runs_avail_clean;
- else
- runs_avail = &arena->runs_avail_dirty;
- arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk,
- map_bias));
} else {
bool zero;
size_t unzeroed;
@@ -372,7 +535,7 @@ arena_chunk_alloc(arena_t *arena)
zero = false;
malloc_mutex_unlock(&arena->lock);
chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize,
- false, &zero);
+ false, &zero, arena->dss_prec);
malloc_mutex_lock(&arena->lock);
if (chunk == NULL)
return (NULL);
@@ -380,8 +543,6 @@ arena_chunk_alloc(arena_t *arena)
arena->stats.mapped += chunksize;
chunk->arena = arena;
- ql_elm_new(chunk, link_dirty);
- chunk->dirtied = false;
/*
* Claim that no pages are in use, since the header is merely
@@ -389,6 +550,9 @@ arena_chunk_alloc(arena_t *arena)
*/
chunk->ndirty = 0;
+ chunk->nruns_avail = 0;
+ chunk->nruns_adjac = 0;
+
/*
* Initialize the map to contain one maximal free untouched run.
* Mark the pages as zeroed iff chunk_alloc() returned a zeroed
@@ -412,20 +576,18 @@ arena_chunk_alloc(arena_t *arena)
}
arena_mapbits_unallocated_set(chunk, chunk_npages-1,
arena_maxclass, unzeroed);
-
- /* Insert the run into the runs_avail_clean tree. */
- arena_avail_tree_insert(&arena->runs_avail_clean,
- arena_mapp_get(chunk, map_bias));
}
+ /* Insert the run into the runs_avail tree. */
+ arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias,
+ false, false);
+
return (chunk);
}
static void
arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
{
- arena_avail_tree_t *runs_avail;
-
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
@@ -436,24 +598,16 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
arena_mapbits_dirty_get(chunk, chunk_npages-1));
/*
- * Remove run from the appropriate runs_avail_* tree, so that the arena
- * does not use it.
+ * Remove run from the runs_avail tree, so that the arena does not use
+ * it.
*/
- if (arena_mapbits_dirty_get(chunk, map_bias) == 0)
- runs_avail = &arena->runs_avail_clean;
- else
- runs_avail = &arena->runs_avail_dirty;
- arena_avail_tree_remove(runs_avail, arena_mapp_get(chunk, map_bias));
+ arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias,
+ false, false);
if (arena->spare != NULL) {
arena_chunk_t *spare = arena->spare;
arena->spare = chunk;
- if (spare->dirtied) {
- ql_remove(&chunk->arena->chunks_dirty, spare,
- link_dirty);
- arena->ndirty -= spare->ndirty;
- }
malloc_mutex_unlock(&arena->lock);
chunk_dealloc((void *)spare, chunksize, true);
malloc_mutex_lock(&arena->lock);
@@ -471,19 +625,7 @@ arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind,
arena_chunk_map_t *mapelm, key;
key.bits = size | CHUNK_MAP_KEY;
- mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
- if (mapelm != NULL) {
- arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
- size_t pageind = (((uintptr_t)mapelm -
- (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
- + map_bias;
-
- run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
- LG_PAGE));
- arena_run_split(arena, run, size, large, binind, zero);
- return (run);
- }
- mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
+ mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
if (mapelm != NULL) {
arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
size_t pageind = (((uintptr_t)mapelm -
@@ -537,41 +679,40 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
static inline void
arena_maybe_purge(arena_t *arena)
{
+ size_t npurgeable, threshold;
+
+ /* Don't purge if the option is disabled. */
+ if (opt_lg_dirty_mult < 0)
+ return;
+ /* Don't purge if all dirty pages are already being purged. */
+ if (arena->ndirty <= arena->npurgatory)
+ return;
+ npurgeable = arena->ndirty - arena->npurgatory;
+ threshold = (arena->nactive >> opt_lg_dirty_mult);
+ /*
+ * Don't purge unless the number of purgeable pages exceeds the
+ * threshold.
+ */
+ if (npurgeable <= threshold)
+ return;
- /* Enforce opt_lg_dirty_mult. */
- if (opt_lg_dirty_mult >= 0 && arena->ndirty > arena->npurgatory &&
- (arena->ndirty - arena->npurgatory) > chunk_npages &&
- (arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
- arena->npurgatory))
- arena_purge(arena, false);
+ arena_purge(arena, false);
}
-static inline void
-arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
+static inline size_t
+arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
{
+ size_t npurged;
ql_head(arena_chunk_map_t) mapelms;
arena_chunk_map_t *mapelm;
- size_t pageind, flag_unzeroed;
- size_t ndirty;
+ size_t pageind, npages;
size_t nmadvise;
ql_new(&mapelms);
- flag_unzeroed =
-#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
- /*
- * madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous
- * mappings, but not for file-backed mappings.
- */
- 0
-#else
- CHUNK_MAP_UNZEROED
-#endif
- ;
-
/*
* If chunk is the spare, temporarily re-allocate it, 1) so that its
- * run is reinserted into runs_avail_dirty, and 2) so that it cannot be
+ * run is reinserted into runs_avail, and 2) so that it cannot be
* completely discarded by another thread while arena->lock is dropped
* by this thread. Note that the arena_run_dalloc() call will
* implicitly deallocate the chunk, so no explicit action is required
@@ -591,68 +732,50 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
arena_chunk_alloc(arena);
}
- /* Temporarily allocate all free dirty runs within chunk. */
- for (pageind = map_bias; pageind < chunk_npages;) {
+ if (config_stats)
+ arena->stats.purged += chunk->ndirty;
+
+ /*
+ * Operate on all dirty runs if there is no clean/dirty run
+ * fragmentation.
+ */
+ if (chunk->nruns_adjac == 0)
+ all = true;
+
+ /*
+ * Temporarily allocate free dirty runs within chunk. If all is false,
+ * only operate on dirty runs that are fragments; otherwise operate on
+ * all dirty runs.
+ */
+ for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
mapelm = arena_mapp_get(chunk, pageind);
if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
- size_t npages;
+ size_t run_size =
+ arena_mapbits_unallocated_size_get(chunk, pageind);
- npages = arena_mapbits_unallocated_size_get(chunk,
- pageind) >> LG_PAGE;
+ npages = run_size >> LG_PAGE;
assert(pageind + npages <= chunk_npages);
assert(arena_mapbits_dirty_get(chunk, pageind) ==
arena_mapbits_dirty_get(chunk, pageind+npages-1));
- if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
- size_t i;
-
- arena_avail_tree_remove(
- &arena->runs_avail_dirty, mapelm);
- arena_mapbits_unzeroed_set(chunk, pageind,
- flag_unzeroed);
- arena_mapbits_large_set(chunk, pageind,
- (npages << LG_PAGE), 0);
- /*
- * Update internal elements in the page map, so
- * that CHUNK_MAP_UNZEROED is properly set.
- */
- for (i = 1; i < npages - 1; i++) {
- arena_mapbits_unzeroed_set(chunk,
- pageind+i, flag_unzeroed);
- }
- if (npages > 1) {
- arena_mapbits_unzeroed_set(chunk,
- pageind+npages-1, flag_unzeroed);
- arena_mapbits_large_set(chunk,
- pageind+npages-1, 0, 0);
- }
+ if (arena_mapbits_dirty_get(chunk, pageind) != 0 &&
+ (all || arena_avail_adjac(chunk, pageind,
+ npages))) {
+ arena_run_t *run = (arena_run_t *)((uintptr_t)
+ chunk + (uintptr_t)(pageind << LG_PAGE));
- if (config_stats) {
- /*
- * Update stats_cactive if nactive is
- * crossing a chunk multiple.
- */
- size_t cactive_diff =
- CHUNK_CEILING((arena->nactive +
- npages) << LG_PAGE) -
- CHUNK_CEILING(arena->nactive <<
- LG_PAGE);
- if (cactive_diff != 0)
- stats_cactive_add(cactive_diff);
- }
- arena->nactive += npages;
+ arena_run_split(arena, run, run_size, true,
+ BININD_INVALID, false);
/* Append to list for later processing. */
ql_elm_new(mapelm, u.ql_link);
ql_tail_insert(&mapelms, mapelm, u.ql_link);
}
-
- pageind += npages;
} else {
- /* Skip allocated run. */
- if (arena_mapbits_large_get(chunk, pageind))
- pageind += arena_mapbits_large_size_get(chunk,
+ /* Skip run. */
+ if (arena_mapbits_large_get(chunk, pageind) != 0) {
+ npages = arena_mapbits_large_size_get(chunk,
pageind) >> LG_PAGE;
- else {
+ } else {
size_t binind;
arena_bin_info_t *bin_info;
arena_run_t *run = (arena_run_t *)((uintptr_t)
@@ -662,41 +785,48 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
pageind) == 0);
binind = arena_bin_index(arena, run->bin);
bin_info = &arena_bin_info[binind];
- pageind += bin_info->run_size >> LG_PAGE;
+ npages = bin_info->run_size >> LG_PAGE;
}
}
}
assert(pageind == chunk_npages);
-
- if (config_debug)
- ndirty = chunk->ndirty;
- if (config_stats)
- arena->stats.purged += chunk->ndirty;
- arena->ndirty -= chunk->ndirty;
- chunk->ndirty = 0;
- ql_remove(&arena->chunks_dirty, chunk, link_dirty);
- chunk->dirtied = false;
+ assert(chunk->ndirty == 0 || all == false);
+ assert(chunk->nruns_adjac == 0);
malloc_mutex_unlock(&arena->lock);
if (config_stats)
nmadvise = 0;
+ npurged = 0;
ql_foreach(mapelm, &mapelms, u.ql_link) {
- size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
+ bool unzeroed;
+ size_t flag_unzeroed, i;
+
+ pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
sizeof(arena_chunk_map_t)) + map_bias;
- size_t npages = arena_mapbits_large_size_get(chunk, pageind) >>
+ npages = arena_mapbits_large_size_get(chunk, pageind) >>
LG_PAGE;
-
assert(pageind + npages <= chunk_npages);
- assert(ndirty >= npages);
- if (config_debug)
- ndirty -= npages;
-
- pages_purge((void *)((uintptr_t)chunk + (pageind << LG_PAGE)),
- (npages << LG_PAGE));
+ unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
+ LG_PAGE)), (npages << LG_PAGE));
+ flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
+ /*
+ * Set the unzeroed flag for all pages, now that pages_purge()
+ * has returned whether the pages were zeroed as a side effect
+ * of purging. This chunk map modification is safe even though
+ * the arena mutex isn't currently owned by this thread,
+ * because the run is marked as allocated, thus protecting it
+ * from being modified by any other thread. As long as these
+ * writes don't perturb the first and last elements'
+ * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
+ */
+ for (i = 0; i < npages; i++) {
+ arena_mapbits_unzeroed_set(chunk, pageind+i,
+ flag_unzeroed);
+ }
+ npurged += npages;
if (config_stats)
nmadvise++;
}
- assert(ndirty == 0);
malloc_mutex_lock(&arena->lock);
if (config_stats)
arena->stats.nmadvise += nmadvise;
@@ -704,14 +834,27 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
/* Deallocate runs. */
for (mapelm = ql_first(&mapelms); mapelm != NULL;
mapelm = ql_first(&mapelms)) {
- size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
- sizeof(arena_chunk_map_t)) + map_bias;
- arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
- (uintptr_t)(pageind << LG_PAGE));
+ arena_run_t *run;
+ pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
+ sizeof(arena_chunk_map_t)) + map_bias;
+ run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind <<
+ LG_PAGE));
ql_remove(&mapelms, mapelm, u.ql_link);
- arena_run_dalloc(arena, run, false);
+ arena_run_dalloc(arena, run, false, true);
}
+
+ return (npurged);
+}
+
+static arena_chunk_t *
+chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg)
+{
+ size_t *ndirty = (size_t *)arg;
+
+ assert(chunk->ndirty != 0);
+ *ndirty += chunk->ndirty;
+ return (NULL);
}
static void
@@ -722,14 +865,11 @@ arena_purge(arena_t *arena, bool all)
if (config_debug) {
size_t ndirty = 0;
- ql_foreach(chunk, &arena->chunks_dirty, link_dirty) {
- assert(chunk->dirtied);
- ndirty += chunk->ndirty;
- }
+ arena_chunk_dirty_iter(&arena->chunks_dirty, NULL,
+ chunks_dirty_iter_cb, (void *)&ndirty);
assert(ndirty == arena->ndirty);
}
assert(arena->ndirty > arena->npurgatory || all);
- assert(arena->ndirty - arena->npurgatory > chunk_npages || all);
assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
arena->npurgatory) || all);
@@ -741,16 +881,24 @@ arena_purge(arena_t *arena, bool all)
* purge, and add the result to arena->npurgatory. This will keep
* multiple threads from racing to reduce ndirty below the threshold.
*/
- npurgatory = arena->ndirty - arena->npurgatory;
- if (all == false) {
- assert(npurgatory >= arena->nactive >> opt_lg_dirty_mult);
- npurgatory -= arena->nactive >> opt_lg_dirty_mult;
+ {
+ size_t npurgeable = arena->ndirty - arena->npurgatory;
+
+ if (all == false) {
+ size_t threshold = (arena->nactive >>
+ opt_lg_dirty_mult);
+
+ npurgatory = npurgeable - threshold;
+ } else
+ npurgatory = npurgeable;
}
arena->npurgatory += npurgatory;
while (npurgatory > 0) {
+ size_t npurgeable, npurged, nunpurged;
+
/* Get next chunk with dirty pages. */
- chunk = ql_first(&arena->chunks_dirty);
+ chunk = arena_chunk_dirty_first(&arena->chunks_dirty);
if (chunk == NULL) {
/*
* This thread was unable to purge as many pages as
@@ -761,23 +909,15 @@ arena_purge(arena_t *arena, bool all)
arena->npurgatory -= npurgatory;
return;
}
- while (chunk->ndirty == 0) {
- ql_remove(&arena->chunks_dirty, chunk, link_dirty);
- chunk->dirtied = false;
- chunk = ql_first(&arena->chunks_dirty);
- if (chunk == NULL) {
- /* Same logic as for above. */
- arena->npurgatory -= npurgatory;
- return;
- }
- }
+ npurgeable = chunk->ndirty;
+ assert(npurgeable != 0);
- if (chunk->ndirty > npurgatory) {
+ if (npurgeable > npurgatory && chunk->nruns_adjac == 0) {
/*
- * This thread will, at a minimum, purge all the dirty
- * pages in chunk, so set npurgatory to reflect this
- * thread's commitment to purge the pages. This tends
- * to reduce the chances of the following scenario:
+ * This thread will purge all the dirty pages in chunk,
+ * so set npurgatory to reflect this thread's intent to
+ * purge the pages. This tends to reduce the chances
+ * of the following scenario:
*
* 1) This thread sets arena->npurgatory such that
* (arena->ndirty - arena->npurgatory) is at the
@@ -791,13 +931,20 @@ arena_purge(arena_t *arena, bool all)
* because all of the purging work being done really
* needs to happen.
*/
- arena->npurgatory += chunk->ndirty - npurgatory;
- npurgatory = chunk->ndirty;
+ arena->npurgatory += npurgeable - npurgatory;
+ npurgatory = npurgeable;
}
- arena->npurgatory -= chunk->ndirty;
- npurgatory -= chunk->ndirty;
- arena_chunk_purge(arena, chunk);
+ /*
+ * Keep track of how many pages are purgeable, versus how many
+ * actually get purged, and adjust counters accordingly.
+ */
+ arena->npurgatory -= npurgeable;
+ npurgatory -= npurgeable;
+ npurged = arena_chunk_purge(arena, chunk, all);
+ nunpurged = npurgeable - npurged;
+ arena->npurgatory += nunpurged;
+ npurgatory += nunpurged;
}
}
@@ -811,11 +958,10 @@ arena_purge_all(arena_t *arena)
}
static void
-arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
+arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
{
arena_chunk_t *chunk;
size_t size, run_ind, run_pages, flag_dirty;
- arena_avail_tree_t *runs_avail;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
@@ -846,15 +992,14 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
/*
* The run is dirty if the caller claims to have dirtied it, as well as
- * if it was already dirty before being allocated.
+ * if it was already dirty before being allocated and the caller
+ * doesn't claim to have cleaned it.
*/
assert(arena_mapbits_dirty_get(chunk, run_ind) ==
arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
- if (arena_mapbits_dirty_get(chunk, run_ind) != 0)
+ if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
dirty = true;
flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
- runs_avail = dirty ? &arena->runs_avail_dirty :
- &arena->runs_avail_clean;
/* Mark pages as unallocated in the chunk map. */
if (dirty) {
@@ -862,9 +1007,6 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
CHUNK_MAP_DIRTY);
arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
CHUNK_MAP_DIRTY);
-
- chunk->ndirty += run_pages;
- arena->ndirty += run_pages;
} else {
arena_mapbits_unallocated_set(chunk, run_ind, size,
arena_mapbits_unzeroed_get(chunk, run_ind));
@@ -888,8 +1030,8 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
run_ind+run_pages+nrun_pages-1) == nrun_size);
assert(arena_mapbits_dirty_get(chunk,
run_ind+run_pages+nrun_pages-1) == flag_dirty);
- arena_avail_tree_remove(runs_avail,
- arena_mapp_get(chunk, run_ind+run_pages));
+ arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages,
+ false, true);
size += nrun_size;
run_pages += nrun_pages;
@@ -915,8 +1057,8 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
prun_size);
assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
- arena_avail_tree_remove(runs_avail, arena_mapp_get(chunk,
- run_ind));
+ arena_avail_remove(arena, chunk, run_ind, prun_pages, true,
+ false);
size += prun_size;
run_pages += prun_pages;
@@ -931,19 +1073,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
assert(arena_mapbits_dirty_get(chunk, run_ind) ==
arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
- arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk, run_ind));
-
- if (dirty) {
- /*
- * Insert into chunks_dirty before potentially calling
- * arena_chunk_dealloc(), so that chunks_dirty and
- * arena->ndirty are consistent.
- */
- if (chunk->dirtied == false) {
- ql_tail_insert(&arena->chunks_dirty, chunk, link_dirty);
- chunk->dirtied = true;
- }
- }
+ arena_avail_insert(arena, chunk, run_ind, run_pages, true, true);
/* Deallocate chunk if it is now completely unused. */
if (size == arena_maxclass) {
@@ -992,7 +1122,7 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
flag_dirty);
- arena_run_dalloc(arena, run, false);
+ arena_run_dalloc(arena, run, false, false);
}
static void
@@ -1025,7 +1155,7 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
flag_dirty);
arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
- dirty);
+ dirty, false);
}
static arena_run_t *
@@ -1536,7 +1666,7 @@ arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
((past - run_ind) << LG_PAGE), false);
/* npages = past - run_ind; */
}
- arena_run_dalloc(arena, run, true);
+ arena_run_dalloc(arena, run, true, false);
malloc_mutex_unlock(&arena->lock);
/****************************/
malloc_mutex_lock(&bin->lock);
@@ -1629,52 +1759,6 @@ arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
mapelm = arena_mapp_get(chunk, pageind);
arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
}
-void
-arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
- arena_stats_t *astats, malloc_bin_stats_t *bstats,
- malloc_large_stats_t *lstats)
-{
- unsigned i;
-
- malloc_mutex_lock(&arena->lock);
- *nactive += arena->nactive;
- *ndirty += arena->ndirty;
-
- astats->mapped += arena->stats.mapped;
- astats->npurge += arena->stats.npurge;
- astats->nmadvise += arena->stats.nmadvise;
- astats->purged += arena->stats.purged;
- astats->allocated_large += arena->stats.allocated_large;
- astats->nmalloc_large += arena->stats.nmalloc_large;
- astats->ndalloc_large += arena->stats.ndalloc_large;
- astats->nrequests_large += arena->stats.nrequests_large;
-
- for (i = 0; i < nlclasses; i++) {
- lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
- lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
- lstats[i].nrequests += arena->stats.lstats[i].nrequests;
- lstats[i].curruns += arena->stats.lstats[i].curruns;
- }
- malloc_mutex_unlock(&arena->lock);
-
- for (i = 0; i < NBINS; i++) {
- arena_bin_t *bin = &arena->bins[i];
-
- malloc_mutex_lock(&bin->lock);
- bstats[i].allocated += bin->stats.allocated;
- bstats[i].nmalloc += bin->stats.nmalloc;
- bstats[i].ndalloc += bin->stats.ndalloc;
- bstats[i].nrequests += bin->stats.nrequests;
- if (config_tcache) {
- bstats[i].nfills += bin->stats.nfills;
- bstats[i].nflushes += bin->stats.nflushes;
- }
- bstats[i].nruns += bin->stats.nruns;
- bstats[i].reruns += bin->stats.reruns;
- bstats[i].curruns += bin->stats.curruns;
- malloc_mutex_unlock(&bin->lock);
- }
-}
void
arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
@@ -1694,7 +1778,7 @@ arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
}
}
- arena_run_dalloc(arena, (arena_run_t *)ptr, true);
+ arena_run_dalloc(arena, (arena_run_t *)ptr, true, false);
}
void
@@ -1887,8 +1971,9 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
}
void *
-arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero, bool try_tcache)
+arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
+ size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
+ bool try_tcache_dalloc)
{
void *ret;
size_t copysize;
@@ -1907,9 +1992,9 @@ arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
- ret = ipalloc(usize, alignment, zero);
+ ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
} else
- ret = arena_malloc(NULL, size + extra, zero, try_tcache);
+ ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
if (ret == NULL) {
if (extra == 0)
@@ -1919,9 +2004,10 @@ arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t usize = sa2u(size, alignment);
if (usize == 0)
return (NULL);
- ret = ipalloc(usize, alignment, zero);
+ ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
+ arena);
} else
- ret = arena_malloc(NULL, size, zero, try_tcache);
+ ret = arena_malloc(arena, size, zero, try_tcache_alloc);
if (ret == NULL)
return (NULL);
@@ -1936,10 +2022,78 @@ arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
copysize = (size < oldsize) ? size : oldsize;
VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
memcpy(ret, ptr, copysize);
- iqalloc(ptr);
+ iqallocx(ptr, try_tcache_dalloc);
+ return (ret);
+}
+
+dss_prec_t
+arena_dss_prec_get(arena_t *arena)
+{
+ dss_prec_t ret;
+
+ malloc_mutex_lock(&arena->lock);
+ ret = arena->dss_prec;
+ malloc_mutex_unlock(&arena->lock);
return (ret);
}
+void
+arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
+{
+
+ malloc_mutex_lock(&arena->lock);
+ arena->dss_prec = dss_prec;
+ malloc_mutex_unlock(&arena->lock);
+}
+
+void
+arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
+ size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
+ malloc_large_stats_t *lstats)
+{
+ unsigned i;
+
+ malloc_mutex_lock(&arena->lock);
+ *dss = dss_prec_names[arena->dss_prec];
+ *nactive += arena->nactive;
+ *ndirty += arena->ndirty;
+
+ astats->mapped += arena->stats.mapped;
+ astats->npurge += arena->stats.npurge;
+ astats->nmadvise += arena->stats.nmadvise;
+ astats->purged += arena->stats.purged;
+ astats->allocated_large += arena->stats.allocated_large;
+ astats->nmalloc_large += arena->stats.nmalloc_large;
+ astats->ndalloc_large += arena->stats.ndalloc_large;
+ astats->nrequests_large += arena->stats.nrequests_large;
+
+ for (i = 0; i < nlclasses; i++) {
+ lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
+ lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
+ lstats[i].nrequests += arena->stats.lstats[i].nrequests;
+ lstats[i].curruns += arena->stats.lstats[i].curruns;
+ }
+ malloc_mutex_unlock(&arena->lock);
+
+ for (i = 0; i < NBINS; i++) {
+ arena_bin_t *bin = &arena->bins[i];
+
+ malloc_mutex_lock(&bin->lock);
+ bstats[i].allocated += bin->stats.allocated;
+ bstats[i].nmalloc += bin->stats.nmalloc;
+ bstats[i].ndalloc += bin->stats.ndalloc;
+ bstats[i].nrequests += bin->stats.nrequests;
+ if (config_tcache) {
+ bstats[i].nfills += bin->stats.nfills;
+ bstats[i].nflushes += bin->stats.nflushes;
+ }
+ bstats[i].nruns += bin->stats.nruns;
+ bstats[i].reruns += bin->stats.reruns;
+ bstats[i].curruns += bin->stats.curruns;
+ malloc_mutex_unlock(&bin->lock);
+ }
+}
+
bool
arena_new(arena_t *arena, unsigned ind)
{
@@ -1968,16 +2122,17 @@ arena_new(arena_t *arena, unsigned ind)
if (config_prof)
arena->prof_accumbytes = 0;
+ arena->dss_prec = chunk_dss_prec_get();
+
/* Initialize chunks. */
- ql_new(&arena->chunks_dirty);
+ arena_chunk_dirty_new(&arena->chunks_dirty);
arena->spare = NULL;
arena->nactive = 0;
arena->ndirty = 0;
arena->npurgatory = 0;
- arena_avail_tree_new(&arena->runs_avail_clean);
- arena_avail_tree_new(&arena->runs_avail_dirty);
+ arena_avail_tree_new(&arena->runs_avail);
/* Initialize bins. */
for (i = 0; i < NBINS; i++) {
diff --git a/contrib/jemalloc/src/base.c b/contrib/jemalloc/src/base.c
index bafaa74..b1a5945 100644
--- a/contrib/jemalloc/src/base.c
+++ b/contrib/jemalloc/src/base.c
@@ -32,7 +32,8 @@ base_pages_alloc(size_t minsize)
assert(minsize != 0);
csize = CHUNK_CEILING(minsize);
zero = false;
- base_pages = chunk_alloc(csize, chunksize, true, &zero);
+ base_pages = chunk_alloc(csize, chunksize, true, &zero,
+ chunk_dss_prec_get());
if (base_pages == NULL)
return (true);
base_next_addr = base_pages;
diff --git a/contrib/jemalloc/src/chunk.c b/contrib/jemalloc/src/chunk.c
index 6bc2454..1a3bb4f6 100644
--- a/contrib/jemalloc/src/chunk.c
+++ b/contrib/jemalloc/src/chunk.c
@@ -4,7 +4,8 @@
/******************************************************************************/
/* Data. */
-size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
+const char *opt_dss = DSS_DEFAULT;
+size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
malloc_mutex_t chunks_mtx;
chunk_stats_t stats_chunks;
@@ -15,8 +16,10 @@ chunk_stats_t stats_chunks;
* address space. Depending on function, different tree orderings are needed,
* which is why there are two trees with the same contents.
*/
-static extent_tree_t chunks_szad;
-static extent_tree_t chunks_ad;
+static extent_tree_t chunks_szad_mmap;
+static extent_tree_t chunks_ad_mmap;
+static extent_tree_t chunks_szad_dss;
+static extent_tree_t chunks_ad_dss;
rtree_t *chunks_rtree;
@@ -30,19 +33,23 @@ size_t arena_maxclass; /* Max size class for arenas. */
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-static void *chunk_recycle(size_t size, size_t alignment, bool base,
+static void *chunk_recycle(extent_tree_t *chunks_szad,
+ extent_tree_t *chunks_ad, size_t size, size_t alignment, bool base,
bool *zero);
-static void chunk_record(void *chunk, size_t size);
+static void chunk_record(extent_tree_t *chunks_szad,
+ extent_tree_t *chunks_ad, void *chunk, size_t size);
/******************************************************************************/
static void *
-chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
+chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
+ size_t alignment, bool base, bool *zero)
{
void *ret;
extent_node_t *node;
extent_node_t key;
size_t alloc_size, leadsize, trailsize;
+ bool zeroed;
if (base) {
/*
@@ -61,7 +68,7 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
key.addr = NULL;
key.size = alloc_size;
malloc_mutex_lock(&chunks_mtx);
- node = extent_tree_szad_nsearch(&chunks_szad, &key);
+ node = extent_tree_szad_nsearch(chunks_szad, &key);
if (node == NULL) {
malloc_mutex_unlock(&chunks_mtx);
return (NULL);
@@ -72,13 +79,13 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
trailsize = node->size - leadsize - size;
ret = (void *)((uintptr_t)node->addr + leadsize);
/* Remove node from the tree. */
- extent_tree_szad_remove(&chunks_szad, node);
- extent_tree_ad_remove(&chunks_ad, node);
+ extent_tree_szad_remove(chunks_szad, node);
+ extent_tree_ad_remove(chunks_ad, node);
if (leadsize != 0) {
/* Insert the leading space as a smaller chunk. */
node->size = leadsize;
- extent_tree_szad_insert(&chunks_szad, node);
- extent_tree_ad_insert(&chunks_ad, node);
+ extent_tree_szad_insert(chunks_szad, node);
+ extent_tree_ad_insert(chunks_ad, node);
node = NULL;
}
if (trailsize != 0) {
@@ -101,23 +108,24 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
}
node->addr = (void *)((uintptr_t)(ret) + size);
node->size = trailsize;
- extent_tree_szad_insert(&chunks_szad, node);
- extent_tree_ad_insert(&chunks_ad, node);
+ extent_tree_szad_insert(chunks_szad, node);
+ extent_tree_ad_insert(chunks_ad, node);
node = NULL;
}
malloc_mutex_unlock(&chunks_mtx);
- if (node != NULL)
+ zeroed = false;
+ if (node != NULL) {
+ if (node->zeroed) {
+ zeroed = true;
+ *zero = true;
+ }
base_node_dealloc(node);
-#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
- /* Pages are zeroed as a side effect of pages_purge(). */
- *zero = true;
-#else
- if (*zero) {
+ }
+ if (zeroed == false && *zero) {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
-#endif
return (ret);
}
@@ -128,7 +136,8 @@ chunk_recycle(size_t size, size_t alignment, bool base, bool *zero)
* advantage of them if they are returned.
*/
void *
-chunk_alloc(size_t size, size_t alignment, bool base, bool *zero)
+chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
+ dss_prec_t dss_prec)
{
void *ret;
@@ -137,17 +146,26 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero)
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
- ret = chunk_recycle(size, alignment, base, zero);
- if (ret != NULL)
+ /* "primary" dss. */
+ if (config_dss && dss_prec == dss_prec_primary) {
+ if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
+ alignment, base, zero)) != NULL)
+ goto label_return;
+ if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
+ goto label_return;
+ }
+ /* mmap. */
+ if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size,
+ alignment, base, zero)) != NULL)
goto label_return;
-
- ret = chunk_alloc_mmap(size, alignment, zero);
- if (ret != NULL)
+ if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
goto label_return;
-
- if (config_dss) {
- ret = chunk_alloc_dss(size, alignment, zero);
- if (ret != NULL)
+ /* "secondary" dss. */
+ if (config_dss && dss_prec == dss_prec_secondary) {
+ if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
+ alignment, base, zero)) != NULL)
+ goto label_return;
+ if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
goto label_return;
}
@@ -189,11 +207,13 @@ label_return:
}
static void
-chunk_record(void *chunk, size_t size)
+chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
+ size_t size)
{
+ bool unzeroed;
extent_node_t *xnode, *node, *prev, key;
- pages_purge(chunk, size);
+ unzeroed = pages_purge(chunk, size);
/*
* Allocate a node before acquiring chunks_mtx even though it might not
@@ -205,7 +225,7 @@ chunk_record(void *chunk, size_t size)
malloc_mutex_lock(&chunks_mtx);
key.addr = (void *)((uintptr_t)chunk + size);
- node = extent_tree_ad_nsearch(&chunks_ad, &key);
+ node = extent_tree_ad_nsearch(chunks_ad, &key);
/* Try to coalesce forward. */
if (node != NULL && node->addr == key.addr) {
/*
@@ -213,10 +233,11 @@ chunk_record(void *chunk, size_t size)
* not change the position within chunks_ad, so only
* remove/insert from/into chunks_szad.
*/
- extent_tree_szad_remove(&chunks_szad, node);
+ extent_tree_szad_remove(chunks_szad, node);
node->addr = chunk;
node->size += size;
- extent_tree_szad_insert(&chunks_szad, node);
+ node->zeroed = (node->zeroed && (unzeroed == false));
+ extent_tree_szad_insert(chunks_szad, node);
if (xnode != NULL)
base_node_dealloc(xnode);
} else {
@@ -234,12 +255,13 @@ chunk_record(void *chunk, size_t size)
node = xnode;
node->addr = chunk;
node->size = size;
- extent_tree_ad_insert(&chunks_ad, node);
- extent_tree_szad_insert(&chunks_szad, node);
+ node->zeroed = (unzeroed == false);
+ extent_tree_ad_insert(chunks_ad, node);
+ extent_tree_szad_insert(chunks_szad, node);
}
/* Try to coalesce backward. */
- prev = extent_tree_ad_prev(&chunks_ad, node);
+ prev = extent_tree_ad_prev(chunks_ad, node);
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
chunk) {
/*
@@ -247,13 +269,14 @@ chunk_record(void *chunk, size_t size)
* not change the position within chunks_ad, so only
* remove/insert node from/into chunks_szad.
*/
- extent_tree_szad_remove(&chunks_szad, prev);
- extent_tree_ad_remove(&chunks_ad, prev);
+ extent_tree_szad_remove(chunks_szad, prev);
+ extent_tree_ad_remove(chunks_ad, prev);
- extent_tree_szad_remove(&chunks_szad, node);
+ extent_tree_szad_remove(chunks_szad, node);
node->addr = prev->addr;
node->size += prev->size;
- extent_tree_szad_insert(&chunks_szad, node);
+ node->zeroed = (node->zeroed && prev->zeroed);
+ extent_tree_szad_insert(chunks_szad, node);
base_node_dealloc(prev);
}
@@ -261,6 +284,20 @@ chunk_record(void *chunk, size_t size)
}
void
+chunk_unmap(void *chunk, size_t size)
+{
+ assert(chunk != NULL);
+ assert(CHUNK_ADDR2BASE(chunk) == chunk);
+ assert(size != 0);
+ assert((size & chunksize_mask) == 0);
+
+ if (config_dss && chunk_in_dss(chunk))
+ chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
+ else if (chunk_dealloc_mmap(chunk, size))
+ chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
+}
+
+void
chunk_dealloc(void *chunk, size_t size, bool unmap)
{
@@ -273,15 +310,13 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
if (config_stats || config_prof) {
malloc_mutex_lock(&chunks_mtx);
+ assert(stats_chunks.curchunks >= (size / chunksize));
stats_chunks.curchunks -= (size / chunksize);
malloc_mutex_unlock(&chunks_mtx);
}
- if (unmap) {
- if ((config_dss && chunk_in_dss(chunk)) ||
- chunk_dealloc_mmap(chunk, size))
- chunk_record(chunk, size);
- }
+ if (unmap)
+ chunk_unmap(chunk, size);
}
bool
@@ -301,8 +336,10 @@ chunk_boot(void)
}
if (config_dss && chunk_dss_boot())
return (true);
- extent_tree_szad_new(&chunks_szad);
- extent_tree_ad_new(&chunks_ad);
+ extent_tree_szad_new(&chunks_szad_mmap);
+ extent_tree_ad_new(&chunks_ad_mmap);
+ extent_tree_szad_new(&chunks_szad_dss);
+ extent_tree_ad_new(&chunks_ad_dss);
if (config_ivsalloc) {
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
opt_lg_chunk);
@@ -312,3 +349,33 @@ chunk_boot(void)
return (false);
}
+
+void
+chunk_prefork(void)
+{
+
+ malloc_mutex_lock(&chunks_mtx);
+ if (config_ivsalloc)
+ rtree_prefork(chunks_rtree);
+ chunk_dss_prefork();
+}
+
+void
+chunk_postfork_parent(void)
+{
+
+ chunk_dss_postfork_parent();
+ if (config_ivsalloc)
+ rtree_postfork_parent(chunks_rtree);
+ malloc_mutex_postfork_parent(&chunks_mtx);
+}
+
+void
+chunk_postfork_child(void)
+{
+
+ chunk_dss_postfork_child();
+ if (config_ivsalloc)
+ rtree_postfork_child(chunks_rtree);
+ malloc_mutex_postfork_child(&chunks_mtx);
+}
diff --git a/contrib/jemalloc/src/chunk_dss.c b/contrib/jemalloc/src/chunk_dss.c
index 2d68e48..24781cc 100644
--- a/contrib/jemalloc/src/chunk_dss.c
+++ b/contrib/jemalloc/src/chunk_dss.c
@@ -3,6 +3,16 @@
/******************************************************************************/
/* Data. */
+const char *dss_prec_names[] = {
+ "disabled",
+ "primary",
+ "secondary",
+ "N/A"
+};
+
+/* Current dss precedence default, used when creating new arenas. */
+static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT;
+
/*
* Protects sbrk() calls. This avoids malloc races among threads, though it
* does not protect against races with threads that call sbrk() directly.
@@ -29,6 +39,31 @@ sbrk(intptr_t increment)
}
#endif
+dss_prec_t
+chunk_dss_prec_get(void)
+{
+ dss_prec_t ret;
+
+ if (config_dss == false)
+ return (dss_prec_disabled);
+ malloc_mutex_lock(&dss_mtx);
+ ret = dss_prec_default;
+ malloc_mutex_unlock(&dss_mtx);
+ return (ret);
+}
+
+bool
+chunk_dss_prec_set(dss_prec_t dss_prec)
+{
+
+ if (config_dss == false)
+ return (true);
+ malloc_mutex_lock(&dss_mtx);
+ dss_prec_default = dss_prec;
+ malloc_mutex_unlock(&dss_mtx);
+ return (false);
+}
+
void *
chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
{
@@ -88,7 +123,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
dss_max = dss_next;
malloc_mutex_unlock(&dss_mtx);
if (cpad_size != 0)
- chunk_dealloc(cpad, cpad_size, true);
+ chunk_unmap(cpad, cpad_size);
if (*zero) {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
diff --git a/contrib/jemalloc/src/chunk_mmap.c b/contrib/jemalloc/src/chunk_mmap.c
index c8da655..8a42e75 100644
--- a/contrib/jemalloc/src/chunk_mmap.c
+++ b/contrib/jemalloc/src/chunk_mmap.c
@@ -113,22 +113,30 @@ pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
#endif
}
-void
+bool
pages_purge(void *addr, size_t length)
{
+ bool unzeroed;
#ifdef _WIN32
VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
+ unzeroed = true;
#else
# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
# define JEMALLOC_MADV_PURGE MADV_DONTNEED
+# define JEMALLOC_MADV_ZEROS true
# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
# define JEMALLOC_MADV_PURGE MADV_FREE
+# define JEMALLOC_MADV_ZEROS false
# else
# error "No method defined for purging unused dirty pages."
# endif
- madvise(addr, length, JEMALLOC_MADV_PURGE);
+ int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
+ unzeroed = (JEMALLOC_MADV_ZEROS == false || err != 0);
+# undef JEMALLOC_MADV_PURGE
+# undef JEMALLOC_MADV_ZEROS
#endif
+ return (unzeroed);
}
static void *
diff --git a/contrib/jemalloc/src/ctl.c b/contrib/jemalloc/src/ctl.c
index 55e7667..6e01b1e 100644
--- a/contrib/jemalloc/src/ctl.c
+++ b/contrib/jemalloc/src/ctl.c
@@ -48,8 +48,8 @@ static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen);
#define INDEX_PROTO(n) \
-const ctl_named_node_t *n##_index(const size_t *mib, size_t miblen, \
- size_t i);
+static const ctl_named_node_t *n##_index(const size_t *mib, \
+ size_t miblen, size_t i);
static bool ctl_arena_init(ctl_arena_stats_t *astats);
static void ctl_arena_clear(ctl_arena_stats_t *astats);
@@ -58,6 +58,7 @@ static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
ctl_arena_stats_t *astats);
static void ctl_arena_refresh(arena_t *arena, unsigned i);
+static bool ctl_grow(void);
static void ctl_refresh(void);
static bool ctl_init(void);
static int ctl_lookup(const char *name, ctl_node_t const **nodesp,
@@ -88,6 +89,7 @@ CTL_PROTO(config_utrace)
CTL_PROTO(config_valgrind)
CTL_PROTO(config_xmalloc)
CTL_PROTO(opt_abort)
+CTL_PROTO(opt_dss)
CTL_PROTO(opt_lg_chunk)
CTL_PROTO(opt_narenas)
CTL_PROTO(opt_lg_dirty_mult)
@@ -110,6 +112,10 @@ CTL_PROTO(opt_prof_gdump)
CTL_PROTO(opt_prof_final)
CTL_PROTO(opt_prof_leak)
CTL_PROTO(opt_prof_accum)
+CTL_PROTO(arena_i_purge)
+static void arena_purge(unsigned arena_ind);
+CTL_PROTO(arena_i_dss)
+INDEX_PROTO(arena_i)
CTL_PROTO(arenas_bin_i_size)
CTL_PROTO(arenas_bin_i_nregs)
CTL_PROTO(arenas_bin_i_run_size)
@@ -125,6 +131,7 @@ CTL_PROTO(arenas_nbins)
CTL_PROTO(arenas_nhbins)
CTL_PROTO(arenas_nlruns)
CTL_PROTO(arenas_purge)
+CTL_PROTO(arenas_extend)
CTL_PROTO(prof_active)
CTL_PROTO(prof_dump)
CTL_PROTO(prof_interval)
@@ -158,6 +165,7 @@ CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
CTL_PROTO(stats_arenas_i_lruns_j_curruns)
INDEX_PROTO(stats_arenas_i_lruns_j)
CTL_PROTO(stats_arenas_i_nthreads)
+CTL_PROTO(stats_arenas_i_dss)
CTL_PROTO(stats_arenas_i_pactive)
CTL_PROTO(stats_arenas_i_pdirty)
CTL_PROTO(stats_arenas_i_mapped)
@@ -223,6 +231,7 @@ static const ctl_named_node_t config_node[] = {
static const ctl_named_node_t opt_node[] = {
{NAME("abort"), CTL(opt_abort)},
+ {NAME("dss"), CTL(opt_dss)},
{NAME("lg_chunk"), CTL(opt_lg_chunk)},
{NAME("narenas"), CTL(opt_narenas)},
{NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
@@ -247,6 +256,18 @@ static const ctl_named_node_t opt_node[] = {
{NAME("prof_accum"), CTL(opt_prof_accum)}
};
+static const ctl_named_node_t arena_i_node[] = {
+ {NAME("purge"), CTL(arena_i_purge)},
+ {NAME("dss"), CTL(arena_i_dss)}
+};
+static const ctl_named_node_t super_arena_i_node[] = {
+ {NAME(""), CHILD(named, arena_i)}
+};
+
+static const ctl_indexed_node_t arena_node[] = {
+ {INDEX(arena_i)}
+};
+
static const ctl_named_node_t arenas_bin_i_node[] = {
{NAME("size"), CTL(arenas_bin_i_size)},
{NAME("nregs"), CTL(arenas_bin_i_nregs)},
@@ -282,7 +303,8 @@ static const ctl_named_node_t arenas_node[] = {
{NAME("bin"), CHILD(indexed, arenas_bin)},
{NAME("nlruns"), CTL(arenas_nlruns)},
{NAME("lrun"), CHILD(indexed, arenas_lrun)},
- {NAME("purge"), CTL(arenas_purge)}
+ {NAME("purge"), CTL(arenas_purge)},
+ {NAME("extend"), CTL(arenas_extend)}
};
static const ctl_named_node_t prof_node[] = {
@@ -352,6 +374,7 @@ static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
+ {NAME("dss"), CTL(stats_arenas_i_dss)},
{NAME("pactive"), CTL(stats_arenas_i_pactive)},
{NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
{NAME("mapped"), CTL(stats_arenas_i_mapped)},
@@ -387,6 +410,7 @@ static const ctl_named_node_t root_node[] = {
{NAME("thread"), CHILD(named, thread)},
{NAME("config"), CHILD(named, config)},
{NAME("opt"), CHILD(named, opt)},
+ {NAME("arena"), CHILD(indexed, arena)},
{NAME("arenas"), CHILD(named, arenas)},
{NAME("prof"), CHILD(named, prof)},
{NAME("stats"), CHILD(named, stats)}
@@ -420,6 +444,7 @@ static void
ctl_arena_clear(ctl_arena_stats_t *astats)
{
+ astats->dss = dss_prec_names[dss_prec_limit];
astats->pactive = 0;
astats->pdirty = 0;
if (config_stats) {
@@ -439,8 +464,8 @@ ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
{
unsigned i;
- arena_stats_merge(arena, &cstats->pactive, &cstats->pdirty,
- &cstats->astats, cstats->bstats, cstats->lstats);
+ arena_stats_merge(arena, &cstats->dss, &cstats->pactive,
+ &cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats);
for (i = 0; i < NBINS; i++) {
cstats->allocated_small += cstats->bstats[i].allocated;
@@ -500,7 +525,7 @@ static void
ctl_arena_refresh(arena_t *arena, unsigned i)
{
ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
- ctl_arena_stats_t *sstats = &ctl_stats.arenas[narenas];
+ ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas];
ctl_arena_clear(astats);
@@ -518,11 +543,72 @@ ctl_arena_refresh(arena_t *arena, unsigned i)
}
}
+static bool
+ctl_grow(void)
+{
+ size_t astats_size;
+ ctl_arena_stats_t *astats;
+ arena_t **tarenas;
+
+ /* Extend arena stats and arenas arrays. */
+ astats_size = (ctl_stats.narenas + 2) * sizeof(ctl_arena_stats_t);
+ if (ctl_stats.narenas == narenas_auto) {
+ /* ctl_stats.arenas and arenas came from base_alloc(). */
+ astats = (ctl_arena_stats_t *)imalloc(astats_size);
+ if (astats == NULL)
+ return (true);
+ memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
+ sizeof(ctl_arena_stats_t));
+
+ tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) *
+ sizeof(arena_t *));
+ if (tarenas == NULL) {
+ idalloc(astats);
+ return (true);
+ }
+ memcpy(tarenas, arenas, ctl_stats.narenas * sizeof(arena_t *));
+ } else {
+ astats = (ctl_arena_stats_t *)iralloc(ctl_stats.arenas,
+ astats_size, 0, 0, false, false);
+ if (astats == NULL)
+ return (true);
+
+ tarenas = (arena_t **)iralloc(arenas, (ctl_stats.narenas + 1) *
+ sizeof(arena_t *), 0, 0, false, false);
+ if (tarenas == NULL)
+ return (true);
+ }
+ /* Initialize the new astats and arenas elements. */
+ memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
+ if (ctl_arena_init(&astats[ctl_stats.narenas + 1]))
+ return (true);
+ tarenas[ctl_stats.narenas] = NULL;
+ /* Swap merged stats to their new location. */
+ {
+ ctl_arena_stats_t tstats;
+ memcpy(&tstats, &astats[ctl_stats.narenas],
+ sizeof(ctl_arena_stats_t));
+ memcpy(&astats[ctl_stats.narenas],
+ &astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t));
+ memcpy(&astats[ctl_stats.narenas + 1], &tstats,
+ sizeof(ctl_arena_stats_t));
+ }
+ ctl_stats.arenas = astats;
+ ctl_stats.narenas++;
+ malloc_mutex_lock(&arenas_lock);
+ arenas = tarenas;
+ narenas_total++;
+ arenas_extend(narenas_total - 1);
+ malloc_mutex_unlock(&arenas_lock);
+
+ return (false);
+}
+
static void
ctl_refresh(void)
{
unsigned i;
- VARIABLE_ARRAY(arena_t *, tarenas, narenas);
+ VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
if (config_stats) {
malloc_mutex_lock(&chunks_mtx);
@@ -542,19 +628,19 @@ ctl_refresh(void)
* Clear sum stats, since they will be merged into by
* ctl_arena_refresh().
*/
- ctl_stats.arenas[narenas].nthreads = 0;
- ctl_arena_clear(&ctl_stats.arenas[narenas]);
+ ctl_stats.arenas[ctl_stats.narenas].nthreads = 0;
+ ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
malloc_mutex_lock(&arenas_lock);
- memcpy(tarenas, arenas, sizeof(arena_t *) * narenas);
- for (i = 0; i < narenas; i++) {
+ memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
+ for (i = 0; i < ctl_stats.narenas; i++) {
if (arenas[i] != NULL)
ctl_stats.arenas[i].nthreads = arenas[i]->nthreads;
else
ctl_stats.arenas[i].nthreads = 0;
}
malloc_mutex_unlock(&arenas_lock);
- for (i = 0; i < narenas; i++) {
+ for (i = 0; i < ctl_stats.narenas; i++) {
bool initialized = (tarenas[i] != NULL);
ctl_stats.arenas[i].initialized = initialized;
@@ -563,11 +649,13 @@ ctl_refresh(void)
}
if (config_stats) {
- ctl_stats.allocated = ctl_stats.arenas[narenas].allocated_small
- + ctl_stats.arenas[narenas].astats.allocated_large
+ ctl_stats.allocated =
+ ctl_stats.arenas[ctl_stats.narenas].allocated_small
+ + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large
+ + ctl_stats.huge.allocated;
+ ctl_stats.active =
+ (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE)
+ ctl_stats.huge.allocated;
- ctl_stats.active = (ctl_stats.arenas[narenas].pactive <<
- LG_PAGE) + ctl_stats.huge.allocated;
ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
}
@@ -585,13 +673,15 @@ ctl_init(void)
* Allocate space for one extra arena stats element, which
* contains summed stats across all arenas.
*/
+ assert(narenas_auto == narenas_total_get());
+ ctl_stats.narenas = narenas_auto;
ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc(
- (narenas + 1) * sizeof(ctl_arena_stats_t));
+ (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t));
if (ctl_stats.arenas == NULL) {
ret = true;
goto label_return;
}
- memset(ctl_stats.arenas, 0, (narenas + 1) *
+ memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) *
sizeof(ctl_arena_stats_t));
/*
@@ -601,14 +691,14 @@ ctl_init(void)
*/
if (config_stats) {
unsigned i;
- for (i = 0; i <= narenas; i++) {
+ for (i = 0; i <= ctl_stats.narenas; i++) {
if (ctl_arena_init(&ctl_stats.arenas[i])) {
ret = true;
goto label_return;
}
}
}
- ctl_stats.arenas[narenas].initialized = true;
+ ctl_stats.arenas[ctl_stats.narenas].initialized = true;
ctl_epoch = 0;
ctl_refresh();
@@ -827,6 +917,27 @@ ctl_boot(void)
return (false);
}
+void
+ctl_prefork(void)
+{
+
+ malloc_mutex_lock(&ctl_mtx);
+}
+
+void
+ctl_postfork_parent(void)
+{
+
+ malloc_mutex_postfork_parent(&ctl_mtx);
+}
+
+void
+ctl_postfork_child(void)
+{
+
+ malloc_mutex_postfork_child(&ctl_mtx);
+}
+
/******************************************************************************/
/* *_ctl() functions. */
@@ -1032,8 +1143,8 @@ thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
}
READ(oldval, bool);
-label_return:
ret = 0;
+label_return:
return (ret);
}
@@ -1063,13 +1174,14 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
int ret;
unsigned newind, oldind;
+ malloc_mutex_lock(&ctl_mtx);
newind = oldind = choose_arena(NULL)->ind;
WRITE(newind, unsigned);
READ(oldind, unsigned);
if (newind != oldind) {
arena_t *arena;
- if (newind >= narenas) {
+ if (newind >= ctl_stats.narenas) {
/* New arena index is out of range. */
ret = EFAULT;
goto label_return;
@@ -1102,6 +1214,7 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = 0;
label_return:
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
@@ -1135,6 +1248,7 @@ CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
/******************************************************************************/
CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
+CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
@@ -1160,10 +1274,121 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
/******************************************************************************/
+/* ctl_mutex must be held during execution of this function. */
+static void
+arena_purge(unsigned arena_ind)
+{
+ VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
+
+ malloc_mutex_lock(&arenas_lock);
+ memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
+ malloc_mutex_unlock(&arenas_lock);
+
+ if (arena_ind == ctl_stats.narenas) {
+ unsigned i;
+ for (i = 0; i < ctl_stats.narenas; i++) {
+ if (tarenas[i] != NULL)
+ arena_purge_all(tarenas[i]);
+ }
+ } else {
+ assert(arena_ind < ctl_stats.narenas);
+ if (tarenas[arena_ind] != NULL)
+ arena_purge_all(tarenas[arena_ind]);
+ }
+}
+
+static int
+arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+
+ READONLY();
+ WRITEONLY();
+ malloc_mutex_lock(&ctl_mtx);
+ arena_purge(mib[1]);
+ malloc_mutex_unlock(&ctl_mtx);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret, i;
+ bool match, err;
+ const char *dss;
+ unsigned arena_ind = mib[1];
+ dss_prec_t dss_prec_old = dss_prec_limit;
+ dss_prec_t dss_prec = dss_prec_limit;
+
+ malloc_mutex_lock(&ctl_mtx);
+ WRITE(dss, const char *);
+ match = false;
+ for (i = 0; i < dss_prec_limit; i++) {
+ if (strcmp(dss_prec_names[i], dss) == 0) {
+ dss_prec = i;
+ match = true;
+ break;
+ }
+ }
+ if (match == false) {
+ ret = EINVAL;
+ goto label_return;
+ }
+
+ if (arena_ind < ctl_stats.narenas) {
+ arena_t *arena = arenas[arena_ind];
+ if (arena != NULL) {
+ dss_prec_old = arena_dss_prec_get(arena);
+ arena_dss_prec_set(arena, dss_prec);
+ err = false;
+ } else
+ err = true;
+ } else {
+ dss_prec_old = chunk_dss_prec_get();
+ err = chunk_dss_prec_set(dss_prec);
+ }
+ dss = dss_prec_names[dss_prec_old];
+ READ(dss, const char *);
+ if (err) {
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
+
+static const ctl_named_node_t *
+arena_i_index(const size_t *mib, size_t miblen, size_t i)
+{
+ const ctl_named_node_t * ret;
+
+ malloc_mutex_lock(&ctl_mtx);
+ if (i > ctl_stats.narenas) {
+ ret = NULL;
+ goto label_return;
+ }
+
+ ret = super_arena_i_node;
+label_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
+
+
+/******************************************************************************/
+
CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
-const ctl_named_node_t *
+static const ctl_named_node_t *
arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
{
@@ -1173,7 +1398,7 @@ arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
}
CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
-const ctl_named_node_t *
+static const ctl_named_node_t *
arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
{
@@ -1182,7 +1407,27 @@ arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
return (super_arenas_lrun_i_node);
}
-CTL_RO_NL_GEN(arenas_narenas, narenas, unsigned)
+static int
+arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned narenas;
+
+ malloc_mutex_lock(&ctl_mtx);
+ READONLY();
+ if (*oldlenp != sizeof(unsigned)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ narenas = ctl_stats.narenas;
+ READ(narenas, unsigned);
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
static int
arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
@@ -1193,13 +1438,13 @@ arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
malloc_mutex_lock(&ctl_mtx);
READONLY();
- if (*oldlenp != narenas * sizeof(bool)) {
+ if (*oldlenp != ctl_stats.narenas * sizeof(bool)) {
ret = EINVAL;
- nread = (*oldlenp < narenas * sizeof(bool))
- ? (*oldlenp / sizeof(bool)) : narenas;
+ nread = (*oldlenp < ctl_stats.narenas * sizeof(bool))
+ ? (*oldlenp / sizeof(bool)) : ctl_stats.narenas;
} else {
ret = 0;
- nread = narenas;
+ nread = ctl_stats.narenas;
}
for (i = 0; i < nread; i++)
@@ -1222,36 +1467,43 @@ arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
- unsigned arena;
+ unsigned arena_ind;
+ malloc_mutex_lock(&ctl_mtx);
WRITEONLY();
- arena = UINT_MAX;
- WRITE(arena, unsigned);
- if (newp != NULL && arena >= narenas) {
+ arena_ind = UINT_MAX;
+ WRITE(arena_ind, unsigned);
+ if (newp != NULL && arena_ind >= ctl_stats.narenas)
ret = EFAULT;
- goto label_return;
- } else {
- VARIABLE_ARRAY(arena_t *, tarenas, narenas);
+ else {
+ if (arena_ind == UINT_MAX)
+ arena_ind = ctl_stats.narenas;
+ arena_purge(arena_ind);
+ ret = 0;
+ }
- malloc_mutex_lock(&arenas_lock);
- memcpy(tarenas, arenas, sizeof(arena_t *) * narenas);
- malloc_mutex_unlock(&arenas_lock);
+label_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
- if (arena == UINT_MAX) {
- unsigned i;
- for (i = 0; i < narenas; i++) {
- if (tarenas[i] != NULL)
- arena_purge_all(tarenas[i]);
- }
- } else {
- assert(arena < narenas);
- if (tarenas[arena] != NULL)
- arena_purge_all(tarenas[arena]);
- }
+static int
+arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+
+ malloc_mutex_lock(&ctl_mtx);
+ READONLY();
+ if (ctl_grow()) {
+ ret = EAGAIN;
+ goto label_return;
}
+ READ(ctl_stats.narenas - 1, unsigned);
ret = 0;
label_return:
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
@@ -1356,7 +1608,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
-const ctl_named_node_t *
+static const ctl_named_node_t *
stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j)
{
@@ -1374,7 +1626,7 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
-const ctl_named_node_t *
+static const ctl_named_node_t *
stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
{
@@ -1384,6 +1636,7 @@ stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
}
CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
+CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
@@ -1395,13 +1648,13 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
-const ctl_named_node_t *
+static const ctl_named_node_t *
stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
{
const ctl_named_node_t * ret;
malloc_mutex_lock(&ctl_mtx);
- if (ctl_stats.arenas[i].initialized == false) {
+ if (i > ctl_stats.narenas || ctl_stats.arenas[i].initialized == false) {
ret = NULL;
goto label_return;
}
diff --git a/contrib/jemalloc/src/huge.c b/contrib/jemalloc/src/huge.c
index 8a4ec94..aa08d43 100644
--- a/contrib/jemalloc/src/huge.c
+++ b/contrib/jemalloc/src/huge.c
@@ -48,7 +48,8 @@ huge_palloc(size_t size, size_t alignment, bool zero)
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
- ret = chunk_alloc(csize, alignment, false, &is_zeroed);
+ ret = chunk_alloc(csize, alignment, false, &is_zeroed,
+ chunk_dss_prec_get());
if (ret == NULL) {
base_node_dealloc(node);
return (NULL);
@@ -101,7 +102,7 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
void *
huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero)
+ size_t alignment, bool zero, bool try_tcache_dalloc)
{
void *ret;
size_t copysize;
@@ -180,7 +181,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
#endif
{
memcpy(ret, ptr, copysize);
- iqalloc(ptr);
+ iqallocx(ptr, try_tcache_dalloc);
}
return (ret);
}
diff --git a/contrib/jemalloc/src/jemalloc.c b/contrib/jemalloc/src/jemalloc.c
index fa9fcf0..aaf5012 100644
--- a/contrib/jemalloc/src/jemalloc.c
+++ b/contrib/jemalloc/src/jemalloc.c
@@ -37,7 +37,8 @@ unsigned ncpus;
malloc_mutex_t arenas_lock;
arena_t **arenas;
-unsigned narenas;
+unsigned narenas_total;
+unsigned narenas_auto;
/* Set to true once the allocator has been initialized. */
static bool malloc_initialized = false;
@@ -148,14 +149,14 @@ choose_arena_hard(void)
{
arena_t *ret;
- if (narenas > 1) {
+ if (narenas_auto > 1) {
unsigned i, choose, first_null;
choose = 0;
- first_null = narenas;
+ first_null = narenas_auto;
malloc_mutex_lock(&arenas_lock);
assert(arenas[0] != NULL);
- for (i = 1; i < narenas; i++) {
+ for (i = 1; i < narenas_auto; i++) {
if (arenas[i] != NULL) {
/*
* Choose the first arena that has the lowest
@@ -164,7 +165,7 @@ choose_arena_hard(void)
if (arenas[i]->nthreads <
arenas[choose]->nthreads)
choose = i;
- } else if (first_null == narenas) {
+ } else if (first_null == narenas_auto) {
/*
* Record the index of the first uninitialized
* arena, in case all extant arenas are in use.
@@ -178,7 +179,8 @@ choose_arena_hard(void)
}
}
- if (arenas[choose]->nthreads == 0 || first_null == narenas) {
+ if (arenas[choose]->nthreads == 0
+ || first_null == narenas_auto) {
/*
* Use an unloaded arena, or the least loaded arena if
* all arenas are already initialized.
@@ -207,7 +209,7 @@ stats_print_atexit(void)
{
if (config_tcache && config_stats) {
- unsigned i;
+ unsigned narenas, i;
/*
* Merge stats from extant threads. This is racy, since
@@ -216,7 +218,7 @@ stats_print_atexit(void)
* out of date by the time they are reported, if other threads
* continue to allocate.
*/
- for (i = 0; i < narenas; i++) {
+ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena = arenas[i];
if (arena != NULL) {
tcache_t *tcache;
@@ -258,12 +260,13 @@ malloc_ncpus(void)
result = si.dwNumberOfProcessors;
#else
result = sysconf(_SC_NPROCESSORS_ONLN);
+#endif
if (result == -1) {
/* Error. */
ret = 1;
- }
-#endif
- ret = (unsigned)result;
+ } else {
+ ret = (unsigned)result;
+ }
return (ret);
}
@@ -381,6 +384,22 @@ malloc_conf_init(void)
const char *opts, *k, *v;
size_t klen, vlen;
+ /*
+ * Automatically configure valgrind before processing options. The
+ * valgrind option remains in jemalloc 3.x for compatibility reasons.
+ */
+ if (config_valgrind) {
+ opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
+ if (config_fill && opt_valgrind) {
+ opt_junk = false;
+ assert(opt_zero == false);
+ opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
+ opt_redzone = true;
+ }
+ if (config_tcache && opt_valgrind)
+ opt_tcache = false;
+ }
+
for (i = 0; i < 3; i++) {
/* Get runtime configuration. */
switch (i) {
@@ -542,6 +561,30 @@ malloc_conf_init(void)
*/
CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
(config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1)
+ if (strncmp("dss", k, klen) == 0) {
+ int i;
+ bool match = false;
+ for (i = 0; i < dss_prec_limit; i++) {
+ if (strncmp(dss_prec_names[i], v, vlen)
+ == 0) {
+ if (chunk_dss_prec_set(i)) {
+ malloc_conf_error(
+ "Error setting dss",
+ k, klen, v, vlen);
+ } else {
+ opt_dss =
+ dss_prec_names[i];
+ match = true;
+ break;
+ }
+ }
+ }
+ if (match == false) {
+ malloc_conf_error("Invalid conf value",
+ k, klen, v, vlen);
+ }
+ continue;
+ }
CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
SIZE_T_MAX)
CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
@@ -558,20 +601,7 @@ malloc_conf_init(void)
CONF_HANDLE_BOOL(opt_utrace, "utrace")
}
if (config_valgrind) {
- bool hit;
- CONF_HANDLE_BOOL_HIT(opt_valgrind,
- "valgrind", hit)
- if (config_fill && opt_valgrind && hit) {
- opt_junk = false;
- opt_zero = false;
- if (opt_quarantine == 0) {
- opt_quarantine =
- JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
- }
- opt_redzone = true;
- }
- if (hit)
- continue;
+ CONF_HANDLE_BOOL(opt_valgrind, "valgrind")
}
if (config_xmalloc) {
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
@@ -700,9 +730,9 @@ malloc_init_hard(void)
* Create enough scaffolding to allow recursive allocation in
* malloc_ncpus().
*/
- narenas = 1;
+ narenas_total = narenas_auto = 1;
arenas = init_arenas;
- memset(arenas, 0, sizeof(arena_t *) * narenas);
+ memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
/*
* Initialize one arena here. The rest are lazily created in
@@ -760,20 +790,21 @@ malloc_init_hard(void)
else
opt_narenas = 1;
}
- narenas = opt_narenas;
+ narenas_auto = opt_narenas;
/*
* Make sure that the arenas array can be allocated. In practice, this
* limit is enough to allow the allocator to function, but the ctl
* machinery will fail to allocate memory at far lower limits.
*/
- if (narenas > chunksize / sizeof(arena_t *)) {
- narenas = chunksize / sizeof(arena_t *);
+ if (narenas_auto > chunksize / sizeof(arena_t *)) {
+ narenas_auto = chunksize / sizeof(arena_t *);
malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
- narenas);
+ narenas_auto);
}
+ narenas_total = narenas_auto;
/* Allocate and initialize arenas. */
- arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
+ arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
if (arenas == NULL) {
malloc_mutex_unlock(&init_lock);
return (true);
@@ -782,7 +813,7 @@ malloc_init_hard(void)
* Zero the array. In practice, this should always be pre-zeroed,
* since it was just mmap()ed, but let's be sure.
*/
- memset(arenas, 0, sizeof(arena_t *) * narenas);
+ memset(arenas, 0, sizeof(arena_t *) * narenas_total);
/* Copy the pointer to the one arena that was already initialized. */
arenas[0] = init_arenas[0];
@@ -1267,11 +1298,10 @@ je_valloc(size_t size)
* passed an extra argument for the caller return address, which will be
* ignored.
*/
-JEMALLOC_EXPORT void (* const __free_hook)(void *ptr) = je_free;
-JEMALLOC_EXPORT void *(* const __malloc_hook)(size_t size) = je_malloc;
-JEMALLOC_EXPORT void *(* const __realloc_hook)(void *ptr, size_t size) =
- je_realloc;
-JEMALLOC_EXPORT void *(* const __memalign_hook)(size_t alignment, size_t size) =
+JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free;
+JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc;
+JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc;
+JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
je_memalign;
#endif
@@ -1284,7 +1314,7 @@ JEMALLOC_EXPORT void *(* const __memalign_hook)(size_t alignment, size_t size) =
*/
size_t
-je_malloc_usable_size(const void *ptr)
+je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
{
size_t ret;
@@ -1348,18 +1378,19 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
#ifdef JEMALLOC_EXPERIMENTAL
JEMALLOC_INLINE void *
-iallocm(size_t usize, size_t alignment, bool zero)
+iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache,
+ arena_t *arena)
{
assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
alignment)));
if (alignment != 0)
- return (ipalloc(usize, alignment, zero));
+ return (ipallocx(usize, alignment, zero, try_tcache, arena));
else if (zero)
- return (icalloc(usize));
+ return (icallocx(usize, try_tcache, arena));
else
- return (imalloc(usize));
+ return (imallocx(usize, try_tcache, arena));
}
int
@@ -1370,6 +1401,9 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
bool zero = flags & ALLOCM_ZERO;
+ unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
+ arena_t *arena;
+ bool try_tcache;
assert(ptr != NULL);
assert(size != 0);
@@ -1377,6 +1411,14 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
if (malloc_init())
goto label_oom;
+ if (arena_ind != UINT_MAX) {
+ arena = arenas[arena_ind];
+ try_tcache = false;
+ } else {
+ arena = NULL;
+ try_tcache = true;
+ }
+
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
if (usize == 0)
goto label_oom;
@@ -1393,18 +1435,19 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
alignment);
assert(usize_promoted != 0);
- p = iallocm(usize_promoted, alignment, zero);
+ p = iallocm(usize_promoted, alignment, zero,
+ try_tcache, arena);
if (p == NULL)
goto label_oom;
arena_prof_promoted(p, usize);
} else {
- p = iallocm(usize, alignment, zero);
+ p = iallocm(usize, alignment, zero, try_tcache, arena);
if (p == NULL)
goto label_oom;
}
prof_malloc(p, usize, cnt);
} else {
- p = iallocm(usize, alignment, zero);
+ p = iallocm(usize, alignment, zero, try_tcache, arena);
if (p == NULL)
goto label_oom;
}
@@ -1441,6 +1484,9 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
& (SIZE_T_MAX-1));
bool zero = flags & ALLOCM_ZERO;
bool no_move = flags & ALLOCM_NO_MOVE;
+ unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
+ bool try_tcache_alloc, try_tcache_dalloc;
+ arena_t *arena;
assert(ptr != NULL);
assert(*ptr != NULL);
@@ -1448,6 +1494,19 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
assert(SIZE_T_MAX - size >= extra);
assert(malloc_initialized || IS_INITIALIZER);
+ if (arena_ind != UINT_MAX) {
+ arena_chunk_t *chunk;
+ try_tcache_alloc = true;
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(*ptr);
+ try_tcache_dalloc = (chunk == *ptr || chunk->arena !=
+ arenas[arena_ind]);
+ arena = arenas[arena_ind];
+ } else {
+ try_tcache_alloc = true;
+ try_tcache_dalloc = true;
+ arena = NULL;
+ }
+
p = *ptr;
if (config_prof && opt_prof) {
prof_thr_cnt_t *cnt;
@@ -1474,9 +1533,10 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
&& ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
<= SMALL_MAXCLASS) {
- q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
+ q = irallocx(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
- alignment, zero, no_move);
+ alignment, zero, no_move, try_tcache_alloc,
+ try_tcache_dalloc, arena);
if (q == NULL)
goto label_err;
if (max_usize < PAGE) {
@@ -1485,7 +1545,8 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
} else
usize = isalloc(q, config_prof);
} else {
- q = iralloc(p, size, extra, alignment, zero, no_move);
+ q = irallocx(p, size, extra, alignment, zero, no_move,
+ try_tcache_alloc, try_tcache_dalloc, arena);
if (q == NULL)
goto label_err;
usize = isalloc(q, config_prof);
@@ -1502,7 +1563,8 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
old_size = isalloc(p, false);
old_rzsize = u2rz(old_size);
}
- q = iralloc(p, size, extra, alignment, zero, no_move);
+ q = irallocx(p, size, extra, alignment, zero, no_move,
+ try_tcache_alloc, try_tcache_dalloc, arena);
if (q == NULL)
goto label_err;
if (config_stats)
@@ -1563,10 +1625,19 @@ je_dallocm(void *ptr, int flags)
{
size_t usize;
size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
+ unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
+ bool try_tcache;
assert(ptr != NULL);
assert(malloc_initialized || IS_INITIALIZER);
+ if (arena_ind != UINT_MAX) {
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ try_tcache = (chunk == ptr || chunk->arena !=
+ arenas[arena_ind]);
+ } else
+ try_tcache = true;
+
UTRACE(ptr, 0, 0);
if (config_stats || config_valgrind)
usize = isalloc(ptr, config_prof);
@@ -1579,7 +1650,7 @@ je_dallocm(void *ptr, int flags)
thread_allocated_tsd_get()->deallocated += usize;
if (config_valgrind && opt_valgrind)
rzsize = p2rz(ptr);
- iqalloc(ptr);
+ iqallocx(ptr, try_tcache);
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
return (ALLOCM_SUCCESS);
@@ -1616,6 +1687,27 @@ je_nallocm(size_t *rsize, size_t size, int flags)
* malloc during fork().
*/
+/*
+ * If an application creates a thread before doing any allocation in the main
+ * thread, then calls fork(2) in the main thread followed by memory allocation
+ * in the child process, a race can occur that results in deadlock within the
+ * child: the main thread may have forked while the created thread had
+ * partially initialized the allocator. Ordinarily jemalloc prevents
+ * fork/malloc races via the following functions it registers during
+ * initialization using pthread_atfork(), but of course that does no good if
+ * the allocator isn't fully initialized at fork time. The following library
+ * constructor is a partial solution to this problem. It may still possible to
+ * trigger the deadlock described above, but doing so would involve forking via
+ * a library constructor that runs before jemalloc's runs.
+ */
+JEMALLOC_ATTR(constructor)
+static void
+jemalloc_constructor(void)
+{
+
+ malloc_init();
+}
+
#ifndef JEMALLOC_MUTEX_INIT_CB
void
jemalloc_prefork(void)
@@ -1633,14 +1725,16 @@ _malloc_prefork(void)
assert(malloc_initialized);
/* Acquire all mutexes in a safe order. */
+ ctl_prefork();
malloc_mutex_prefork(&arenas_lock);
- for (i = 0; i < narenas; i++) {
+ for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
arena_prefork(arenas[i]);
}
+ prof_prefork();
+ chunk_prefork();
base_prefork();
huge_prefork();
- chunk_dss_prefork();
}
#ifndef JEMALLOC_MUTEX_INIT_CB
@@ -1660,14 +1754,16 @@ _malloc_postfork(void)
assert(malloc_initialized);
/* Release all mutexes, now that fork() has completed. */
- chunk_dss_postfork_parent();
huge_postfork_parent();
base_postfork_parent();
- for (i = 0; i < narenas; i++) {
+ chunk_postfork_parent();
+ prof_postfork_parent();
+ for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
arena_postfork_parent(arenas[i]);
}
malloc_mutex_postfork_parent(&arenas_lock);
+ ctl_postfork_parent();
}
void
@@ -1678,14 +1774,16 @@ jemalloc_postfork_child(void)
assert(malloc_initialized);
/* Release all mutexes, now that fork() has completed. */
- chunk_dss_postfork_child();
huge_postfork_child();
base_postfork_child();
- for (i = 0; i < narenas; i++) {
+ chunk_postfork_child();
+ prof_postfork_child();
+ for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
arena_postfork_child(arenas[i]);
}
malloc_mutex_postfork_child(&arenas_lock);
+ ctl_postfork_child();
}
/******************************************************************************/
diff --git a/contrib/jemalloc/src/mutex.c b/contrib/jemalloc/src/mutex.c
index 4a90a05..6b6f438 100644
--- a/contrib/jemalloc/src/mutex.c
+++ b/contrib/jemalloc/src/mutex.c
@@ -64,7 +64,7 @@ pthread_create(pthread_t *__restrict thread,
/******************************************************************************/
#ifdef JEMALLOC_MUTEX_INIT_CB
-int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
+JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
void *(calloc_cb)(size_t, size_t));
__weak_reference(_pthread_mutex_init_calloc_cb_stub,
diff --git a/contrib/jemalloc/src/prof.c b/contrib/jemalloc/src/prof.c
index de1d392..04964ef 100644
--- a/contrib/jemalloc/src/prof.c
+++ b/contrib/jemalloc/src/prof.c
@@ -1270,4 +1270,46 @@ prof_boot2(void)
return (false);
}
+void
+prof_prefork(void)
+{
+
+ if (opt_prof) {
+ unsigned i;
+
+ malloc_mutex_lock(&bt2ctx_mtx);
+ malloc_mutex_lock(&prof_dump_seq_mtx);
+ for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ malloc_mutex_lock(&ctx_locks[i]);
+ }
+}
+
+void
+prof_postfork_parent(void)
+{
+
+ if (opt_prof) {
+ unsigned i;
+
+ for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ malloc_mutex_postfork_parent(&ctx_locks[i]);
+ malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
+ malloc_mutex_postfork_parent(&bt2ctx_mtx);
+ }
+}
+
+void
+prof_postfork_child(void)
+{
+
+ if (opt_prof) {
+ unsigned i;
+
+ for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ malloc_mutex_postfork_child(&ctx_locks[i]);
+ malloc_mutex_postfork_child(&prof_dump_seq_mtx);
+ malloc_mutex_postfork_child(&bt2ctx_mtx);
+ }
+}
+
/******************************************************************************/
diff --git a/contrib/jemalloc/src/rtree.c b/contrib/jemalloc/src/rtree.c
index eb0ff1e2..90c6935 100644
--- a/contrib/jemalloc/src/rtree.c
+++ b/contrib/jemalloc/src/rtree.c
@@ -44,3 +44,24 @@ rtree_new(unsigned bits)
return (ret);
}
+
+void
+rtree_prefork(rtree_t *rtree)
+{
+
+ malloc_mutex_prefork(&rtree->mutex);
+}
+
+void
+rtree_postfork_parent(rtree_t *rtree)
+{
+
+ malloc_mutex_postfork_parent(&rtree->mutex);
+}
+
+void
+rtree_postfork_child(rtree_t *rtree)
+{
+
+ malloc_mutex_postfork_child(&rtree->mutex);
+}
diff --git a/contrib/jemalloc/src/stats.c b/contrib/jemalloc/src/stats.c
index 433b80d..43f87af 100644
--- a/contrib/jemalloc/src/stats.c
+++ b/contrib/jemalloc/src/stats.c
@@ -206,6 +206,7 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned i, bool bins, bool large)
{
unsigned nthreads;
+ const char *dss;
size_t page, pactive, pdirty, mapped;
uint64_t npurge, nmadvise, purged;
size_t small_allocated;
@@ -218,6 +219,9 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned);
malloc_cprintf(write_cb, cbopaque,
"assigned threads: %u\n", nthreads);
+ CTL_I_GET("stats.arenas.0.dss", &dss, const char *);
+ malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n",
+ dss);
CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t);
CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t);
CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t);
@@ -370,6 +374,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
"Run-time option settings:\n");
OPT_WRITE_BOOL(abort)
OPT_WRITE_SIZE_T(lg_chunk)
+ OPT_WRITE_CHAR_P(dss)
OPT_WRITE_SIZE_T(narenas)
OPT_WRITE_SSIZE_T(lg_dirty_mult)
OPT_WRITE_BOOL(stats_print)
@@ -400,7 +405,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus);
CTL_GET("arenas.narenas", &uv, unsigned);
- malloc_cprintf(write_cb, cbopaque, "Max arenas: %u\n", uv);
+ malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv);
malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n",
sizeof(void *));
@@ -472,7 +477,8 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_GET("stats.chunks.current", &chunks_current, size_t);
malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
"highchunks curchunks\n");
- malloc_cprintf(write_cb, cbopaque, " %13"PRIu64"%13zu%13zu\n",
+ malloc_cprintf(write_cb, cbopaque,
+ " %13"PRIu64" %12zu %12zu\n",
chunks_total, chunks_high, chunks_current);
/* Print huge stats. */
diff --git a/contrib/jemalloc/src/tcache.c b/contrib/jemalloc/src/tcache.c
index 60244c4..47e14f3 100644
--- a/contrib/jemalloc/src/tcache.c
+++ b/contrib/jemalloc/src/tcache.c
@@ -288,7 +288,7 @@ tcache_create(arena_t *arena)
else if (size <= tcache_maxclass)
tcache = (tcache_t *)arena_malloc_large(arena, size, true);
else
- tcache = (tcache_t *)icalloc(size);
+ tcache = (tcache_t *)icallocx(size, false, arena);
if (tcache == NULL)
return (NULL);
@@ -364,7 +364,7 @@ tcache_destroy(tcache_t *tcache)
arena_dalloc_large(arena, chunk, tcache);
} else
- idalloc(tcache);
+ idallocx(tcache, false);
}
void
diff --git a/contrib/jemalloc/src/util.c b/contrib/jemalloc/src/util.c
index f94799f..df1c5d5 100644
--- a/contrib/jemalloc/src/util.c
+++ b/contrib/jemalloc/src/util.c
@@ -393,7 +393,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
case '\0': goto label_out;
case '%': {
bool alt_form = false;
- bool zero_pad = false;
bool left_justify = false;
bool plus_space = false;
bool plus_plus = false;
@@ -414,10 +413,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
assert(alt_form == false);
alt_form = true;
break;
- case '0':
- assert(zero_pad == false);
- zero_pad = true;
- break;
case '-':
assert(left_justify == false);
left_justify = true;
diff --git a/contrib/llvm/lib/Target/X86/X86Subtarget.cpp b/contrib/llvm/lib/Target/X86/X86Subtarget.cpp
index 9087852..c2db11a 100644
--- a/contrib/llvm/lib/Target/X86/X86Subtarget.cpp
+++ b/contrib/llvm/lib/Target/X86/X86Subtarget.cpp
@@ -416,12 +416,12 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU,
assert((!In64BitMode || HasX86_64) &&
"64-bit code requested on a subtarget that doesn't support it!");
- // Stack alignment is 16 bytes on Darwin, FreeBSD, Linux and Solaris (both
- // 32 and 64 bit) and for all 64-bit targets.
+ // Stack alignment is 16 bytes on Darwin, Linux and Solaris (both 32 and 64
+ // bit) and for all 64-bit targets.
if (StackAlignOverride)
stackAlignment = StackAlignOverride;
- else if (isTargetDarwin() || isTargetFreeBSD() || isTargetLinux() ||
- isTargetSolaris() || In64BitMode)
+ else if (isTargetDarwin() || isTargetLinux() || isTargetSolaris() ||
+ In64BitMode)
stackAlignment = 16;
}
diff --git a/contrib/top/commands.c b/contrib/top/commands.c
index 49472fd..4d987baa 100644
--- a/contrib/top/commands.c
+++ b/contrib/top/commands.c
@@ -80,11 +80,11 @@ n or # - change number of processes to display\n", stdout);
#ifdef ORDER
if (displaymode == DISP_CPU)
fputs("\
-o - specify sort order (pri, size, res, cpu, time, threads, jid)\n",
+o - specify sort order (pri, size, res, cpu, time, threads, jid, pid)\n",
stdout);
else
fputs("\
-o - specify sort order (vcsw, ivcsw, read, write, fault, total, jid)\n",
+o - specify sort order (vcsw, ivcsw, read, write, fault, total, jid, pid)\n",
stdout);
#endif
fputs("\
diff --git a/contrib/tzdata/asia b/contrib/tzdata/asia
index 7d12e8b..25d161d 100644
--- a/contrib/tzdata/asia
+++ b/contrib/tzdata/asia
@@ -1204,7 +1204,7 @@ Rule Zion 2012 only - Sep 23 2:00 0 S
# past, approved sending the proposed June 2011 changes to the Time
# Decree Law back to the Knesset for second and third (final) votes
# before the upcoming elections on Jan. 22, 2013. Hence, although the
-# changes are not yet law, they are expected to be so before Februray 2013.
+# changes are not yet law, they are expected to be so before February 2013.
#
# As of 2013, DST starts at 02:00 on the Friday before the last Sunday in March.
# DST ends at 02:00 on the first Sunday after October 1, unless it occurs on the
diff --git a/contrib/tzdata/northamerica b/contrib/tzdata/northamerica
index 1f784e0..18a4228 100644
--- a/contrib/tzdata/northamerica
+++ b/contrib/tzdata/northamerica
@@ -2797,6 +2797,13 @@ Zone America/Costa_Rica -5:36:20 - LMT 1890 # San Jose
# http://www.timeanddate.com/news/time/cuba-starts-dst-2012.html
# </a>
+# From Steffen Thorsen (2012-11-03):
+# Radio Reloj and many other sources report that Cuba is changing back
+# to standard time on 2012-11-04:
+# http://www.radioreloj.cu/index.php/noticias-radio-reloj/36-nacionales/9961-regira-horario-normal-en-cuba-desde-el-domingo-cuatro-de-noviembre
+# From Paul Eggert (2012-11-03):
+# For now, assume the future rule is first Sunday in November.
+
# Rule NAME FROM TO TYPE IN ON AT SAVE LETTER/S
Rule Cuba 1928 only - Jun 10 0:00 1:00 D
Rule Cuba 1928 only - Oct 10 0:00 0 S
@@ -2834,7 +2841,7 @@ Rule Cuba 2009 2010 - Mar Sun>=8 0:00s 1:00 D
Rule Cuba 2011 only - Mar Sun>=15 0:00s 1:00 D
Rule Cuba 2011 only - Nov 13 0:00s 0 S
Rule Cuba 2012 only - Apr 1 0:00s 1:00 D
-Rule Cuba 2012 max - Oct lastSun 0:00s 0 S
+Rule Cuba 2012 max - Nov Sun>=1 0:00s 0 S
Rule Cuba 2013 max - Mar Sun>=8 0:00s 1:00 D
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
diff --git a/etc/pccard_ether b/etc/pccard_ether
index 841c1a0..5bd9c33 100755
--- a/etc/pccard_ether
+++ b/etc/pccard_ether
@@ -123,4 +123,5 @@ else
fi
load_rc_config pccard_ether
+load_rc_config network
run_rc_command $args
diff --git a/lib/Makefile b/lib/Makefile
index d536ee0..d587574 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -212,6 +212,11 @@ _libproc= libproc
_librtld_db= librtld_db
.endif
+.if ${MACHINE_CPUARCH} == "powerpc"
+_libproc= libproc
+_librtld_db= librtld_db
+.endif
+
.if ${MK_OPENSSL} != "no"
_libmp= libmp
.endif
diff --git a/lib/libc/gen/isnan.c b/lib/libc/gen/isnan.c
index ec81362..72c2868 100644
--- a/lib/libc/gen/isnan.c
+++ b/lib/libc/gen/isnan.c
@@ -33,8 +33,14 @@
/*
* XXX These routines belong in libm, but they must remain in libc for
* binary compat until we can bump libm's major version number.
+ *
+ * Note this only applies to the dynamic versions of libm and libc, so
+ * for the static and profiled versions we stub out the definitions.
+ * Otherwise you cannot link statically to libm and libc at the same
+ * time, when calling both functions.
*/
+#ifdef PIC
__weak_reference(__isnan, isnan);
__weak_reference(__isnanf, isnanf);
@@ -55,3 +61,4 @@ __isnanf(float f)
u.f = f;
return (u.bits.exp == 255 && u.bits.man != 0);
}
+#endif /* PIC */
diff --git a/lib/libc/stdio/printf.3 b/lib/libc/stdio/printf.3
index 0d9339e..05c30dc 100644
--- a/lib/libc/stdio/printf.3
+++ b/lib/libc/stdio/printf.3
@@ -267,7 +267,7 @@ number produced by a signed conversion.
A
.Cm +
overrides a space if both are used.
-.It Sq Cm '
+.It So "'" Sc (apostrophe)
Decimal conversions
.Cm ( d , u ,
or
diff --git a/lib/libcrypt/tests/Makefile b/lib/libcrypt/tests/Makefile
new file mode 100644
index 0000000..3190dbe
--- /dev/null
+++ b/lib/libcrypt/tests/Makefile
@@ -0,0 +1,10 @@
+# $FreeBSD$
+
+# exercise libcrypt
+
+TESTS_C= crypt_tests
+
+CFLAGS+= -I${.CURDIR:H}
+LDADD+= -L${.OBJDIR:H} -lcrypt
+
+.include <atf.test.mk>
diff --git a/lib/libcrypt/tests/crypt_tests.c b/lib/libcrypt/tests/crypt_tests.c
new file mode 100644
index 0000000..3331d12
--- /dev/null
+++ b/lib/libcrypt/tests/crypt_tests.c
@@ -0,0 +1,54 @@
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <crypt.h>
+#include <unistd.h>
+
+#include <atf-c.h>
+
+#define LEET "0.s0.l33t"
+
+ATF_TC(md5);
+ATF_TC_HEAD(md5, tc)
+{
+
+ atf_tc_set_md_var(tc, "descr", "Tests the MD5 based password hash");
+}
+
+ATF_TC_BODY(md5, tc)
+{
+ const char want[] = "$1$deadbeef$0Huu6KHrKLVWfqa4WljDE0";
+ char *pw;
+
+ pw = crypt(LEET, want);
+ ATF_CHECK_STREQ(pw, want);
+}
+
+ATF_TC(invalid);
+ATF_TC_HEAD(invalid, tc)
+{
+
+ atf_tc_set_md_var(tc, "descr", "Tests that invalid password fails");
+}
+
+ATF_TC_BODY(invalid, tc)
+{
+ const char want[] = "$1$cafebabe$0Huu6KHrKLVWfqa4WljDE0";
+ char *pw;
+
+ pw = crypt(LEET, want);
+ ATF_CHECK(strcmp(pw, want) != 0);
+}
+
+/*
+ * This function must not do anything except enumerate
+ * the test cases, else atf-run is likely to be upset.
+ */
+ATF_TP_ADD_TCS(tp)
+{
+
+ ATF_TP_ADD_TC(tp, md5);
+ ATF_TP_ADD_TC(tp, invalid);
+ return atf_no_error();
+}
diff --git a/lib/libproc/proc_bkpt.c b/lib/libproc/proc_bkpt.c
index e16b0fc..c15e53c 100644
--- a/lib/libproc/proc_bkpt.c
+++ b/lib/libproc/proc_bkpt.c
@@ -47,6 +47,9 @@ __FBSDID("$FreeBSD$");
#elif defined(__mips__)
#define BREAKPOINT_INSTR 0xd /* break */
#define BREAKPOINT_INSTR_SZ 4
+#elif defined(__powerpc__)
+#define BREAKPOINT_INSTR 0x7fe00008 /* trap */
+#define BREAKPOINT_INSTR_SZ 4
#else
#error "Add support for your architecture"
#endif
diff --git a/lib/libproc/proc_regs.c b/lib/libproc/proc_regs.c
index c299b9b..aac0125 100644
--- a/lib/libproc/proc_regs.c
+++ b/lib/libproc/proc_regs.c
@@ -60,6 +60,8 @@ proc_regget(struct proc_handle *phdl, proc_reg_t reg, unsigned long *regvalue)
*regvalue = regs.r_eip;
#elif defined(__mips__)
*regvalue = regs.r_regs[PC];
+#elif defined(__powerpc__)
+ *regvalue = regs.pc;
#endif
break;
case REG_SP:
@@ -69,6 +71,8 @@ proc_regget(struct proc_handle *phdl, proc_reg_t reg, unsigned long *regvalue)
*regvalue = regs.r_esp;
#elif defined(__mips__)
*regvalue = regs.r_regs[SP];
+#elif defined(__powerpc__)
+ *regvalue = regs.fixreg[1];
#endif
break;
default:
@@ -99,6 +103,8 @@ proc_regset(struct proc_handle *phdl, proc_reg_t reg, unsigned long regvalue)
regs.r_eip = regvalue;
#elif defined(__mips__)
regs.r_regs[PC] = regvalue;
+#elif defined(__powerpc__)
+ regs.pc = regvalue;
#endif
break;
case REG_SP:
@@ -108,6 +114,8 @@ proc_regset(struct proc_handle *phdl, proc_reg_t reg, unsigned long regvalue)
regs.r_esp = regvalue;
#elif defined(__mips__)
regs.r_regs[PC] = regvalue;
+#elif defined(__powerpc__)
+ regs.fixreg[1] = regvalue;
#endif
break;
default:
diff --git a/lib/msun/src/k_rem_pio2.c b/lib/msun/src/k_rem_pio2.c
index a2ffca6..3942441 100644
--- a/lib/msun/src/k_rem_pio2.c
+++ b/lib/msun/src/k_rem_pio2.c
@@ -45,7 +45,7 @@ __FBSDID("$FreeBSD$");
* z = (z-x[i])*2**24
*
*
- * y[] ouput result in an array of double precision numbers.
+ * y[] output result in an array of double precision numbers.
* The dimension of y[] is:
* 24-bit precision 1
* 53-bit precision 2
diff --git a/lib/msun/src/s_isnan.c b/lib/msun/src/s_isnan.c
index 0f544db..a54ded3 100644
--- a/lib/msun/src/s_isnan.c
+++ b/lib/msun/src/s_isnan.c
@@ -30,8 +30,9 @@
#include "fpmath.h"
-/* Provided by libc */
-#if 0
+/* Provided by libc.so */
+#ifndef PIC
+#undef isnan
int
isnan(double d)
{
@@ -40,7 +41,7 @@ isnan(double d)
u.d = d;
return (u.bits.exp == 2047 && (u.bits.manl != 0 || u.bits.manh != 0));
}
-#endif
+#endif /* !PIC */
int
__isnanf(float f)
diff --git a/release/Makefile b/release/Makefile
index 104670f..79a3e3f 100644
--- a/release/Makefile
+++ b/release/Makefile
@@ -83,7 +83,7 @@ kernel.txz:
src.txz:
mkdir -p ${DISTDIR}/usr
ln -fs ${WORLDDIR} ${DISTDIR}/usr/src
- cd ${DISTDIR} && tar cLvJf ${.OBJDIR}/src.txz --exclude .svn \
+ cd ${DISTDIR} && tar cLvJf ${.OBJDIR}/src.txz --exclude .svn --exclude .zfs \
--exclude CVS --exclude @ --exclude usr/src/release/dist usr/src
ports.txz:
diff --git a/release/doc/share/xml/release.ent b/release/doc/share/xml/release.ent
index 60b7130..dcfb897 100644
--- a/release/doc/share/xml/release.ent
+++ b/release/doc/share/xml/release.ent
@@ -6,23 +6,23 @@
<!-- Version of the OS we're describing. This needs to be updated
with each new release. -->
-<!ENTITY release.current "9.0-CURRENT">
+<!ENTITY release.current "10.0-CURRENT">
<!-- The previous version used for comparison in the "What's New"
section. For -CURRENT, we might point back to the last
branchpoint. -->
-<!ENTITY release.prev "8.0-RELEASE">
+<!ENTITY release.prev "9.0-RELEASE">
<!-- The previous stable release, useful for pointing user's at the
release they SHOULD be running if they don't want the bleeding
edge. -->
-<!ENTITY release.prev.stable "8.0-RELEASE">
+<!ENTITY release.prev.stable "8.3-RELEASE">
<!-- The next version to be released, usually used for snapshots. -->
-<!ENTITY release.next "9.0-RELEASE">
+<!ENTITY release.next "9.1-RELEASE">
<!-- The name of this branch. -->
-<!ENTITY release.branch "9-CURRENT">
+<!ENTITY release.branch "10-CURRENT">
<!-- The URL for obtaining this version of FreeBSD. -->
<!ENTITY release.url "http://www.FreeBSD.org/snapshots/">
@@ -39,7 +39,7 @@
<!ENTITY release.manpath.xorg "7.5.1">
<!ENTITY release.manpath.netbsd "5.1">
<!ENTITY release.manpath.freebsd-ports "Ports">
-<!ENTITY release.manpath.freebsd "9-current">
+<!ENTITY release.manpath.freebsd "10-current">
<!-- Text constants which probably don't need to be changed.-->
diff --git a/sbin/ifconfig/ifconfig.8 b/sbin/ifconfig/ifconfig.8
index f22cc50..f4c6daa 100644
--- a/sbin/ifconfig/ifconfig.8
+++ b/sbin/ifconfig/ifconfig.8
@@ -28,7 +28,7 @@
.\" From: @(#)ifconfig.8 8.3 (Berkeley) 1/5/94
.\" $FreeBSD$
.\"
-.Dd July 9, 2012
+.Dd November 7, 2012
.Dt IFCONFIG 8
.Os
.Sh NAME
@@ -142,7 +142,7 @@ The link-level
address
is specified as a series of colon-separated hex digits.
This can be used to
-e.g.\& set a new MAC address on an ethernet interface, though the
+e.g.,\& set a new MAC address on an ethernet interface, though the
mechanism used is not ethernet-specific.
If the interface is already
up when this option is used, it will be briefly brought down and
@@ -301,7 +301,7 @@ Specify interface FIB.
A FIB
.Ar fib_number
is assigned to all frames or packets received on that interface.
-The FIB is not inherited, e.g. vlans or other sub-interfaces will use
+The FIB is not inherited, e.g., vlans or other sub-interfaces will use
the default FIB (0) irrespective of the parent interface's FIB.
The kernel needs to be tuned to support more than the default FIB
using the
@@ -1003,7 +1003,7 @@ For example, if a device is capable of operating on channel 6
with 802.11n and 802.11g then one can specify that g-only use
should be used by specifying ``6:g''.
Similarly the channel width can be specified by appending it
-with ``/''; e.g. ``6/40'' specifies a 40MHz wide channel,
+with ``/''; e.g., ``6/40'' specifies a 40MHz wide channel,
These attributes can be combined as in: ``6:ht/40''.
The full set of flags specified following a ``:'' are:
.Cm a
@@ -1036,7 +1036,7 @@ and
In addition,
a 40MHz HT channel specification may include the location
of the extension channel by appending ``+'' or ``-'' for above and below,
-respectively; e.g. ``2437:ht/40+'' specifies 40MHz wide HT operation
+respectively; e.g., ``2437:ht/40+'' specifies 40MHz wide HT operation
with the center channel at frequency 2437 and the extension channel above.
.It Cm country Ar name
Set the country code to use in calculating the regulatory constraints
@@ -1046,7 +1046,7 @@ will operation on the channels, and the maximum transmit power that
can be used on a channel are defined by this setting.
Country/Region codes are specified as a 2-character abbreviation
defined by ISO 3166 or using a longer, but possibly ambiguous, spelling;
-e.g. "ES" and "Spain".
+e.g., "ES" and "Spain".
The set of country codes are taken from /etc/regdomain.xml and can also
be viewed with the ``list countries'' request.
Note that not all devices support changing the country code from a default
@@ -1063,7 +1063,7 @@ DFS embodies several facilities including detection of overlapping
radar signals, dynamic transmit power control, and channel selection
according to a least-congested criteria.
DFS support is mandatory for some 5GHz frequencies in certain
-locales (e.g. ETSI).
+locales (e.g., ETSI).
By default DFS is enabled according to the regulatory definitions
specified in /etc/regdomain.xml and the current country code, regdomain,
and channel.
@@ -1115,7 +1115,7 @@ specifies the number of beacon intervals between DTIM
and must be in the range 1 to 15.
By default DTIM is 1 (i.e., DTIM occurs at each beacon).
.It Cm quiet
-Enable the use of quiet IE. Hostap will use this to silent other
+Enable the use of quiet IE. Hostap will use this to silence other
stations to reduce interference for radar detection when
operating on 5GHz frequency and doth support is enabled.
Use
@@ -1168,7 +1168,7 @@ Enable Dynamic WDS (DWDS) support.
DWDS is a facility by which 4-address traffic can be carried between
stations operating in infrastructure mode.
A station first associates to an access point and authenticates using
-normal procedures (e.g. WPA).
+normal procedures (e.g., WPA).
Then 4-address frames are passed to carry traffic for stations
operating on either side of the wireless link.
DWDS extends the normal WDS mechanism by leveraging existing security
@@ -1186,7 +1186,7 @@ When DWDS is enabled on a station, traffic with a destination address
different from the peer station are encapsulated in a 4-address frame
and transmitted to the peer.
All 4-address traffic uses the security information of the stations
-(e.g. cryptographic keys).
+(e.g., cryptographic keys).
A station is associated using 802.11n facilities may transport
4-address traffic using these same mechanisms; this depends on available
resources and capabilities of the device.
@@ -1236,7 +1236,7 @@ Stations negotiate use of these facilities, termed HT20 and HT40,
when they associate.
To disable all use of 802.11n use
.Fl ht .
-To disable use of HT20 (e.g. to force only HT40 use) use
+To disable use of HT20 (e.g., to force only HT40 use) use
.Fl ht20 .
To disable use of HT40 use
.Fl ht40 .
@@ -1250,7 +1250,7 @@ Auto Channel Selection is used to locate a channel to operate on,
HT configuration controls whether legacy, HT20, or HT40 operation is setup
on the selected channel.
If a fixed channel is specified for a station then HT configuration can
-be given as part of the channel specification; e.g. 6:ht/20 to setup
+be given as part of the channel specification; e.g., 6:ht/20 to setup
HT20 operation on channel 6.
.It Cm htcompat
Enable use of compatibility support for pre-802.11n devices (default).
@@ -1506,13 +1506,13 @@ The default setting is 6 but drivers may override this with a value
they choose.
.It Cm mcastrate Ar rate
Set the rate for transmitting multicast/broadcast frames.
-Rates are specified as megabits/second in decimal; e.g.\& 5.5 for 5.5 Mb/s.
+Rates are specified as megabits/second in decimal; e.g.,\& 5.5 for 5.5 Mb/s.
This rate should be valid for the current operating conditions;
if an invalid rate is specified drivers are free to chose an
appropriate rate.
.It Cm mgtrate Ar rate
Set the rate for transmitting management and/or control frames.
-Rates are specified as megabits/second in decimal; e.g.\& 5.5 for 5.5 Mb/s.
+Rates are specified as megabits/second in decimal; e.g.,\& 5.5 for 5.5 Mb/s.
.It Cm outdoor
Set the location to use in calculating regulatory constraints.
The location is also advertised in beacon and probe response frames
@@ -1672,7 +1672,7 @@ request can be used to show recent scan results without
initiating a new scan.
.It Cm scanvalid Ar threshold
Set the maximum time the scan cache contents are considered valid;
-i.e. will be used without first triggering a scan operation to
+i.e., will be used without first triggering a scan operation to
refresh the data.
The
.Ar threshold
@@ -1734,7 +1734,7 @@ When operating with TDMA, setup a BSS with
slots.
The slot count may be at most 8.
The current implementation is only tested with two stations
-(i.e. point to point applications).
+(i.e., point to point applications).
This setting is only meaningful when a station is configured as slot 0;
other stations adopt this setting from the BSS they join.
By default
@@ -1758,7 +1758,7 @@ is set to 10 milliseconds.
When operating with TDMA, setup a BSS such that beacons are transmitted every
.Ar intval
superframes to synchronize the TDMA slot timing.
-A superframe is defined as the number of slots times the slot length; e.g.
+A superframe is defined as the number of slots times the slot length; e.g.,
a BSS with two slots of 10 milliseconds has a 20 millisecond superframe.
The beacon interval may not be zero.
A lower setting of
@@ -1784,7 +1784,7 @@ the driver will use the setting closest to the specified value.
Not all adapters support changing the transmit power.
.It Cm ucastrate Ar rate
Set a fixed rate for transmitting unicast frames.
-Rates are specified as megabits/second in decimal; e.g.\& 5.5 for 5.5 Mb/s.
+Rates are specified as megabits/second in decimal; e.g.,\& 5.5 for 5.5 Mb/s.
This rate should be valid for the current operating conditions;
if an invalid rate is specified drivers are free to chose an
appropriate rate.
@@ -2519,7 +2519,7 @@ protocol on an interface:
Set the virtual host ID.
This is a required setting to initiate
.Xr carp 4 .
-If the virtual host ID doesn't exist yet, it is created and attached to the
+If the virtual host ID does not exist yet, it is created and attached to the
interface, otherwise configuration of an existing vhid is adjusted.
If the
.Cm vhid
@@ -2628,9 +2628,6 @@ The
flag disables this behavior.
.Pp
Only the super-user may modify the configuration of a network interface.
-.Sh NOTES
-The media selection system is relatively new and only some drivers support
-it (or have need for it).
.Sh EXAMPLES
Assign the IPv4 address
.Li 192.0.2.10 ,
@@ -2714,7 +2711,9 @@ tried to alter an interface's configuration.
.Xr pfsync 4 ,
.Xr polling 4 ,
.Xr vlan 4 ,
+.Xr devd.conf 5 ,
.\" .Xr eon 5 ,
+.Xr devd 8 ,
.Xr rc 8 ,
.Xr routed 8 ,
.Xr jail 8 ,
diff --git a/secure/usr.bin/bdes/bdes.1 b/secure/usr.bin/bdes/bdes.1
index 367d32d..8863a84 100644
--- a/secure/usr.bin/bdes/bdes.1
+++ b/secure/usr.bin/bdes/bdes.1
@@ -141,7 +141,7 @@ or in CFB mode.
.It Fl o Ar N
Use
.Ar N Ns \-bit
-ouput feedback (OFB) mode.
+output feedback (OFB) mode.
Currently
.Ar N
must be a multiple of 8 between 8 and 64 inclusive (this does not conform
diff --git a/share/man/man4/icmp6.4 b/share/man/man4/icmp6.4
index 1c6026f..b052fc9 100644
--- a/share/man/man4/icmp6.4
+++ b/share/man/man4/icmp6.4
@@ -30,7 +30,7 @@
.\"
.\" $FreeBSD$
.\"
-.Dd December 20, 2004
+.Dd November 7, 2012
.Dt ICMP6 4
.Os
.Sh NAME
@@ -234,7 +234,7 @@ calls may be used to obtain and install the filter on ICMPv6 sockets at
option level
.Dv IPPROTO_ICMPV6
and name
-.Dv ICMPV6_FILTER
+.Dv ICMP6_FILTER
with a pointer to the
.Vt icmp6_filter
structure as the option value.
diff --git a/share/man/man4/ipsec.4 b/share/man/man4/ipsec.4
index d5f8b8f..c6a3f24 100644
--- a/share/man/man4/ipsec.4
+++ b/share/man/man4/ipsec.4
@@ -33,7 +33,7 @@
.Dt IPSEC 4
.Os
.Sh NAME
-.Nm IPsec
+.Nm ipsec
.Nd Internet Protocol Security protocol
.Sh SYNOPSIS
.Cd "options IPSEC"
diff --git a/share/mk/Makefile b/share/mk/Makefile
index 5bab862..fef17ec 100644
--- a/share/mk/Makefile
+++ b/share/mk/Makefile
@@ -1,20 +1,50 @@
# $FreeBSD$
# @(#)Makefile 8.1 (Berkeley) 6/8/93
-FILES= bsd.README
-FILES+= bsd.arch.inc.mk
-FILES+= bsd.compat.mk bsd.compiler.mk bsd.cpu.mk
-FILES+= bsd.dep.mk bsd.doc.mk bsd.dtrace.mk
-FILES+= bsd.endian.mk
-FILES+= bsd.files.mk bsd.crunchgen.mk bsd.incs.mk bsd.info.mk bsd.init.mk
-FILES+= bsd.kmod.mk
-FILES+= bsd.lib.mk bsd.libnames.mk bsd.links.mk bsd.man.mk bsd.nls.mk
-FILES+= bsd.obj.mk bsd.own.mk
-FILES+= bsd.port.mk bsd.port.options.mk bsd.port.post.mk
-FILES+= bsd.port.pre.mk bsd.port.subdir.mk bsd.prog.mk
-FILES+= bsd.snmpmod.mk bsd.subdir.mk bsd.sys.mk bsd.symver.mk
-FILES+= sys.mk version_gen.awk
+.include <bsd.own.mk>
+
+FILES= \
+ bsd.README \
+ bsd.arch.inc.mk \
+ bsd.compat.mk \
+ bsd.compiler.mk \
+ bsd.cpu.mk \
+ bsd.crunchgen.mk \
+ bsd.dep.mk \
+ bsd.doc.mk \
+ bsd.dtrace.mk \
+ bsd.endian.mk \
+ bsd.files.mk \
+ bsd.incs.mk \
+ bsd.info.mk \
+ bsd.init.mk \
+ bsd.kmod.mk \
+ bsd.lib.mk \
+ bsd.libnames.mk \
+ bsd.links.mk \
+ bsd.man.mk \
+ bsd.nls.mk \
+ bsd.obj.mk \
+ bsd.own.mk \
+ bsd.port.mk \
+ bsd.port.options.mk \
+ bsd.port.post.mk \
+ bsd.port.pre.mk \
+ bsd.port.subdir.mk \
+ bsd.prog.mk \
+ bsd.snmpmod.mk \
+ bsd.subdir.mk \
+ bsd.symver.mk \
+ bsd.sys.mk \
+ bsd.test.mk \
+ sys.mk \
+ version_gen.awk
+
NO_OBJ=
FILESDIR= ${BINDIR}/mk
+.if ${MK_ATF} != "no"
+FILES+= atf.test.mk
+.endif
+
.include <bsd.prog.mk>
diff --git a/share/mk/atf.test.mk b/share/mk/atf.test.mk
new file mode 100644
index 0000000..7e19087
--- /dev/null
+++ b/share/mk/atf.test.mk
@@ -0,0 +1,148 @@
+# $NetBSD$
+# $FreeBSD$
+#
+
+.include <bsd.init.mk>
+
+ATF_TESTS:=
+
+.if make(*test)
+TESTSDIR?= .
+.endif
+
+.if defined(ATF_TESTS_SUBDIRS)
+# Only visit subdirs when building, etc because ATF does this it on its own.
+.if !make(atf-test)
+SUBDIR+= ${ATF_TESTS_SUBDIRS}
+.endif
+ATF_TESTS+= ${ATF_TESTS_SUBDIRS}
+
+.include <bsd.subdir.mk>
+.endif
+
+.if defined(TESTS_C)
+ATF_TESTS+= ${TESTS_C}
+.for _T in ${TESTS_C}
+SRCS.${_T}?= ${_T}.c
+DPADD.${_T}+= ${LIBATF_C}
+LDADD.${_T}+= -latf-c
+.endfor
+.endif
+
+.if defined(TESTS_CXX)
+ATF_TESTS+= ${TESTS_CXX}
+.for _T in ${TESTS_CXX}
+SRCS.${_T}?= ${_T}${CXX_SUFFIX:U.cc}
+DPADD.${_T}+= ${LIBATF_CXX} ${LIBATF_C}
+LDADD.${_T}+= -latf-c++ -latf-c
+.endfor
+.endif
+
+.if defined(TESTS_SH)
+ATF_TESTS+= ${TESTS_SH}
+.for _T in ${TESTS_SH}
+CLEANFILES+= ${_T} ${_T}.tmp
+TESTS_SH_SRC_${_T}?= ${_T}.sh
+${_T}: ${TESTS_SH_SRC_${_T}}
+ echo '#! /usr/bin/atf-sh' > ${.TARGET}.tmp
+ cat ${.ALLSRC} >> ${.TARGET}.tmp
+ chmod +x ${.TARGET}.tmp
+ mv ${.TARGET}.tmp ${.TARGET}
+.endfor
+.endif
+
+ATFFILE?= auto
+
+.if ${ATFFILE:tl} != "no"
+FILES+= Atffile
+FILESDIR_Atffile= ${TESTSDIR}
+
+.if ${ATFFILE:tl} == "auto"
+CLEANFILES+= Atffile Atffile.tmp
+
+Atffile: Makefile
+ @{ echo 'Content-Type: application/X-atf-atffile; version="1"'; \
+ echo; \
+ echo '# Automatically generated by atf-test.mk.'; \
+ echo; \
+ echo 'prop: test-suite = "'`uname -o`'"'; \
+ echo; \
+ for tp in ${ATF_TESTS}; do \
+ echo "tp: $${tp}"; \
+ done; } >Atffile.tmp
+ @mv Atffile.tmp Atffile
+.endif
+.endif
+
+# Generate support variables for atf-test.
+#
+# atf-test can only work for native builds, i.e. a build host of a particular
+# OS building a release for the same OS version and architecture. The target
+# runs ATF, which is on the build host, and the tests execute code built for
+# the target host.
+#
+# Due to the dependencies of the binaries built by the source tree and how they
+# are used by tests, it is highly possible for a execution of "make test" to
+# report bogus results unless the new binaries are put in place.
+
+# XXX (gcooper): Executing ATF from outside the source tree is improper; it
+# should be built as part of the OS toolchain build for the host OS and
+# executed from there.
+ATF_PATH+= ${DESTDIR}/bin ${DESTDIR}/sbin ${DESTDIR}/usr/bin ${DESTDIR}/usr/sbin
+TESTS_ENV+= PATH=${ATF_PATH:ts:}:${PATH}
+
+ATF_BUILD_CC?= ${DESTDIR}/usr/bin/cc
+ATF_BUILD_CPP?= ${DESTDIR}/usr/bin/cpp
+ATF_BUILD_CXX?= ${DESTDIR}/usr/bin/c++
+ATF_CONFDIR?= ${DESTDIR}/etc
+ATF_INCLUDEDIR?= ${DESTDIR}/usr/include
+ATF_LIBDIR?= ${DESTDIR}/usr/lib
+ATF_LIBEXECDIR?= ${DESTDIR}/usr/libexec
+ATF_PKGDATADIR?= ${DESTDIR}/usr/share/atf
+ATF_SHELL?= ${DESTDIR}/bin/sh
+LD_LIBRARY_PATH?= ${TESTS_LD_LIBRARY_PATH:tW:S/ /:/g}
+
+ATF_ENV_VARS= \
+ ATF_BUILD_CC \
+ ATF_BUILD_CPP \
+ ATF_BUILD_CXX \
+ ATF_CONFDIR \
+ ATF_INCLUDEDIR \
+ ATF_LIBDIR \
+ ATF_LIBEXECDIR \
+ ATF_PKGDATADIR \
+ ATF_SHELL \
+
+.for v in ${ATF_ENV_VARS}
+.if !empty($v)
+TESTS_ENV+= $v=${$v}
+.endif
+.endfor
+
+_TESTS_FIFO= ${.OBJDIR}/atf-run.fifo
+_TESTS_LOG= ${.OBJDIR}/atf-run.log
+CLEANFILES+= ${_TESTS_FIFO} ${_TESTS_LOG}
+
+ATF_BIN?= ${DESTDIR}/usr/bin
+ATF_REPORT?= ${ATF_BIN}/atf-report
+ATF_RUN?= ${ATF_BIN}/atf-run
+
+.PHONY: realtest
+realtest:
+.if defined(TESTSDIR)
+ @set -e; \
+ cd ${DESTDIR}${TESTSDIR}; \
+ rm -f ${_TESTS_FIFO}; \
+ mkfifo ${_TESTS_FIFO}; \
+ tee ${_TESTS_LOG} < ${_TESTS_FIFO} | ${TESTS_ENV} ${ATF_REPORT} & \
+ set +e; \
+ ${TESTS_ENV} ${ATF_RUN} >> ${_TESTS_FIFO}; \
+ result=$${?}; \
+ wait; \
+ rm -f ${_TESTS_FIFO}; \
+ echo; \
+ echo "*** The verbatim output of atf-run has been saved to ${_TESTS_LOG}"; \
+ exit $${result}
+.endif
+
+.include <bsd.test.mk>
diff --git a/share/mk/bsd.progs.mk b/share/mk/bsd.progs.mk
index 531c2ef..b6236fe 100644
--- a/share/mk/bsd.progs.mk
+++ b/share/mk/bsd.progs.mk
@@ -1,350 +1,88 @@
-# from: @(#)bsd.prog.mk 5.26 (Berkeley) 6/25/91
# $FreeBSD$
-
-.include <bsd.init.mk>
-
-.SUFFIXES: .out .o .c .cc .cpp .cxx .C .m .y .l .ln .s .S .asm
-
-.if ${MK_MAN} == "no"
-NO_MAN=
+# $Id: progs.mk,v 1.11 2012/11/06 17:18:54 sjg Exp $
+#
+# @(#) Copyright (c) 2006, Simon J. Gerraty
+#
+# This file is provided in the hope that it will
+# be of use. There is absolutely NO WARRANTY.
+# Permission to copy, redistribute or otherwise
+# use this file is hereby granted provided that
+# the above copyright notice and this notice are
+# left intact.
+#
+# Please send copies of changes and bug-fixes to:
+# sjg@crufty.net
+#
+
+.MAIN: all
+
+.if defined(PROGS)
+
+# In meta mode, we can capture dependenices for _one_ of the progs.
+# if makefile doesn't nominate one, we use the first.
+.ifndef UPDATE_DEPENDFILE_PROG
+UPDATE_DEPENDFILE_PROG = ${PROGS:[1]}
+.export UPDATE_DEPENDFILE_PROG
.endif
-# Legacy knobs
-.if defined(PROG) || defined(PROG_CXX)
-. if defined(PROG)
-PROGS= ${PROG}
-. endif
-. if defined(PROG_CXX)
-PROGS= ${PROG_CXX}
-PROGS_CXX= ${PROG_CXX}
-. endif
-# Loop once to keep pattern and avoid namespace pollution
-. for _P in ${PROGS}
-. if defined(INTERNALPROG)
-INTERNALPROG.${_P}=
-. endif
-. if !defined(NO_MAN)
-. if defined(MAN)
-MAN.${_P}= ${MAN}
-. else
-. for sect in 1 1aout 2 3 4 5 6 7 8 9
-. if defined(MAN${sect})
-MAN.${_P}= ${MAN${sect}}
-. endif
-. endfor
-. endif
-. endif # defined(NO_MAN)
-. if defined(NLSNAME) && !empty(NLSNAME)
-NLSNAME.${P}:= ${NLSNAME}
-. endif
-. if defined(OBJS)
-OBJS.${_P}:= ${OBJS}
-. endif
-. if defined(PRECIOUSPROG)
-PRECIOUSPROG.${_P}=
-. endif
-. if defined(PROGNAME)
-PROGNAME.${_P}= ${PROGNAME}
-. endif
-. if defined(SRCS)
-SRCS.${_P}:= ${SRCS}
-. endif
-. endfor
-.else # !defined(PROG) && !defined(PROG_CXX)
-. if defined(PROGS_CXX) && !empty(PROGS_CXX)
-PROGS+= ${PROGS_CXX}
-. endif
-.endif # defined(PROG) || defined(PROG_CXX)
-
-.if defined(PROGS_CXX) && !empty(PROGS_CXX)
-. for _P in ${PROGS_CXX}
-PROG_CXX.${_P}=
-. endfor
+.ifndef PROG
+# They may have asked us to build just one
+.for t in ${PROGS}
+.if make($t)
+PROG ?= $t
+.endif
+.endfor
.endif
-# Avoid recursive variables
-.undef NLSNAME
+.if defined(PROG)
+# just one of many
+PROG_VARS += CFLAGS CPPFLAGS CXXFLAGS DPADD DPLIBS LDADD MAN SRCS
+.for v in ${PROG_VARS:O:u}
+$v += ${${v}_${PROG}:U${${v}.${PROG}}}
+.endfor
-.if defined(COPTS)
-CFLAGS+=${COPTS}
+# for meta mode, there can be only one!
+.if ${PROG} == ${UPDATE_DEPENDFILE_PROG:Uno}
+UPDATE_DEPENDFILE ?= yes
.endif
-
-.if defined(DEBUG_FLAGS)
-. if ${MK_CTF} != "no" && ${DEBUG_FLAGS:M-g} != ""
-CTFFLAGS+= -g
-. endif
-CFLAGS+=${DEBUG_FLAGS}
-CXXFLAGS+=${DEBUG_FLAGS}
+UPDATE_DEPENDFILE ?= NO
+
+# ensure that we don't clobber each other's dependencies
+DEPENDFILE?= .depend.${PROG}
+# prog.mk will do the rest
+.else
+all: ${PROGS}
+
+# We cannot capture dependencies for meta mode here
+UPDATE_DEPENDFILE = NO
+# nor can we safely run in parallel.
+.NOTPARALLEL:
.endif
-
-STRIP?= -s
-
-.if ${MK_ASSERT_DEBUG} == "no"
-CFLAGS+= -DNDEBUG
-NO_WERROR=
.endif
-.for _P in ${PROGS}
-
-BINDIR.${_P}?= ${BINDIR}
-BINGRP.${_P}?= ${BINGRP}
-BINMODE.${_P}?= ${BINMODE}
-BINOWN.${_P}?= ${BINOWN}
-
-CFLAGS.${_P}+= ${CFLAGS}
-CXXFLAGS.${_P}+= ${CXXFLAGS}
-DPADD.${_P}+= ${DPADD}
-LDADD.${_P}+= ${LDADD}
-LDFLAGS.${_P}+= ${LDFLAGS}
-
-INSTALLFLAGS.${_P}?= ${INSTALLFLAGS}
-
-. if defined(PRECIOUSPROG.${_P})
-. if !defined(NO_FSCHG) && !defined(NO_FSCHG.${_P})
-INSTALLFLAGS.${_P}+= -fschg
-. endif
-INSTALLFLAGS.${_P}+= -S
-. endif
-
-NO_SHARED.${_P}?= ${NO_SHARED}
+# handle being called [bsd.]progs.mk
+.include <${.PARSEFILE:S,progs,prog,}>
-. if !defined(NLSDIR.${_P})
-NLSDIR.${_P}:= ${NLSDIR}
-. endif
-. undef NLSDIR
+.ifndef PROG
+PROGS_TARGETS += clean
-. if !empty(NO_SHARED.${_P}) && ${NO_SHARED.${_P}:tl} != "no"
-LDFLAGS.${_P}+= -static
-. endif
-
-. if defined(SRCS.${_P})
-
-_SRCS:= ${SRCS.${_P}}
-OBJS.${_P}+= ${_SRCS:N*.h:R:S/$/.o/g}
-
-. if target(beforelinking)
-${_P}: ${OBJS.${_P}} beforelinking
-. else
-${_P}: ${OBJS.${_P}}
-. endif
-. if defined(PROG_CXX.${_P})
- ${CXX} ${CXXFLAGS.${_P}} ${LDFLAGS.${_P}} -o ${.TARGET} ${OBJS.${_P}} \
- ${LDADD.${_P}}
-. else
- ${CC} ${CFLAGS.${_P}} ${LDFLAGS.${_P}} -o ${.TARGET} ${OBJS.${_P}} \
- ${LDADD.${_P}}
-. endif
-. if ${MK_CTF} != "no"
- ${CTFMERGE} ${CTFFLAGS} -o ${.TARGET} ${OBJS.${_P}}
-. endif
-
-. else # !defined(SRCS.${_P})
-
-. if !target(${_P})
-. if defined(PROG_CXX.${_P})
-SRCS.${_P}?= ${_P}.cc
-. else
-SRCS.${_P}?= ${_P}.c
-. endif
-
-# Always make an intermediate object file because:
-# - it saves time rebuilding when only the library has changed
-# - the name of the object gets put into the executable symbol table instead of
-# the name of a variable temporary object.
-# - it's useful to keep objects around for crunching.
-OBJS.${_P}:= ${_P}.o
-
-. if target(beforelinking)
-${_P}: ${OBJS.${_P}} beforelinking
-. else
-${_P}: ${OBJS.${_P}}
-. endif # target(beforelinking)
-. if defined(PROG_CXX.${_P})
- ${CXX} ${CXXFLAGS.${_P}} ${LDFLAGS.${_P}} -o ${.TARGET} ${OBJS.${_P}} \
- ${LDADD.${_P}}
-. else
- ${CC} ${CFLAGS.${_P}} ${LDFLAGS.${_P}} -o ${.TARGET} ${OBJS.${_P}} \
- ${LDADD.${_P}}
-. endif
-. if ${MK_CTF} != "no"
- ${CTFMERGE} ${CTFFLAGS} -o ${.TARGET} ${OBJS.${_P}}
-. endif
-
-. endif # !target(${_P})
-
-. endif # defined(SRCS.${_P})
-
-CLEANFILES+= ${OBJS.${_P}}
-
-.endfor # for _P in ${PROGS}
-
-all: objwarn ${PROGS} ${SCRIPTS}
-
-.if !defined(NO_MAN)
-. for _P in ${PROGS}
-MAN.${_P}?= ${_P}.1
-MAN:= ${MAN.${_P}}
-. include <bsd.man.mk>
-. endfor
-. if target(_manpages) # bsd.man.mk was included
-all: _manpages
-. endif
+.for p in ${PROGS}
+.if defined(PROGS_CXX) && !empty(PROGS_CXX:M$p)
+# bsd.prog.mk may need to know this
+x.$p= PROG_CXX=$p
.endif
-CLEANFILES+= ${PROGS}
-
-.include <bsd.libnames.mk>
+$p ${p}_p: .PHONY .MAKE
+ (cd ${.CURDIR} && ${.MAKE} -f ${MAKEFILE} PROG=$p ${x.$p})
-_EXTRADEPEND:
-.for _P in ${PROGS}
-. if !empty(LDFLAGS.${P}:M-nostdlib)
-. if !empty(DPADD.${_P})
- echo ${_P}: ${DPADD.${_P}} >> ${DEPENDFILE}
-. endif
-. else
- echo ${_P}: ${LIBC} ${DPADD.${_P}} >> ${DEPENDFILE}
-. if defined(PROG_CXX.${_P})
-. if !empty(CXXFLAGS.${P}:M-stdlib=libc++)
- echo ${_P}: ${LIBCPLUSPLUS} >> ${DEPENDFILE}
-. else
- echo ${_P}: ${LIBSTDCPLUSPLUS} >> ${DEPENDFILE}
-. endif
-. endif
-. endif
+.for t in ${PROGS_TARGETS:O:u}
+$p.$t: .PHONY .MAKE
+ (cd ${.CURDIR} && ${.MAKE} -f ${MAKEFILE} PROG=$p ${x.$p} ${@:E})
.endfor
-
-.if !target(install)
-
-. if !target(realinstall)
-
-. for _P in ${PROGS}
-
-. if !defined(INTERNALPROG.${_P})
-
-.ORDER: beforeinstall _proginstall.${_P}
-_proginstall.${_P}:
-. if defined(PROGNAME.${_P})
- ${INSTALL} ${STRIP} -o ${BINOWN.${_P}} -g ${BINGRP.${_P}} \
- -m ${BINMODE.${_P}} ${INSTALLFLAGS.${_P}} ${_P} \
- ${DESTDIR}${BINDIR.${_P}}/${PROGNAME.${_P}}
-. else
- ${INSTALL} ${STRIP} -o ${BINOWN.${_P}} -g ${BINGRP.${_P}} \
- -m ${BINMODE.${_P}} ${INSTALLFLAGS.${_P}} ${_P} \
- ${DESTDIR}${BINDIR.${_P}}
-. endif
-
-realinstall: _proginstall.${_P}
-
-. endif # !defined(INTERNALPROG.${_P})
-
-. endfor # for _P in ${PROGS}
-
-. endif # !target(realinstall)
-
-. if defined(SCRIPTS) && !empty(SCRIPTS)
-SCRIPTSDIR?= ${BINDIR}
-SCRIPTSOWN?= ${BINOWN}
-SCRIPTSGRP?= ${BINGRP}
-SCRIPTSMODE?= ${BINMODE}
-
-. for S in ${SCRIPTS}
-
-realinstall: scriptsinstall
-.ORDER: beforeinstall scriptsinstall
-
-. if defined(SCRIPTSNAME)
-SCRIPTSNAME_${S}?= ${SCRIPTSNAME}
-. else
-SCRIPTSNAME_${S}?= ${S:T:R}
-. endif
-
-SCRIPTSDIR_${S}?= ${SCRIPTSDIR}
-SCRIPTSOWN_${S}?= ${SCRIPTSOWN}
-SCRIPTSGRP_${S}?= ${SCRIPTSGRP}
-SCRIPTSMODE_${S}?= ${SCRIPTSMODE}
-
-scriptsinstall: ${DESTDIR}${SCRIPTSDIR_${S}}/${SCRIPTSNAME_${S}}
-
-${DESTDIR}${SCRIPTSDIR_${S}}/${SCRIPTSNAME_${S}}: ${S}
- ${INSTALL} -o ${SCRIPTSOWN_${S}} \
- -g ${SCRIPTSGRP_${S}} \
- -m ${SCRIPTSMODE_${S}} \
- ${.ALLSRC} \
- ${.TARGET}
-
-. endfor # for S in ${SCRIPTS}
-
-. endif # defined(SCRIPTS) && !empty(SCRIPTS)
-
-.endif # !target(install)
-
-.if !defined(NO_MAN)
-. if target(_manpages) # bsd.man.mk was included
-realinstall: _maninstall
-. endif
-.endif
-
-# Wrap bsd.nls.mk because I can't force that Makefile snippet to work only with
-# ${PROGS}.
-.for _P in ${PROGS}
-NLSNAME.${_P}?= ${_P}
-NLS:= ${NLS.${_P}}
-NLSDIR:= ${NLSDIR.${_P}}
-NLSNAME:= ${NLSNAME.${_P}}
-.include <bsd.nls.mk>
.endfor
-.include <bsd.files.mk>
-.include <bsd.incs.mk>
-.include <bsd.links.mk>
-
-.if !target(lint)
-. for _P in ${PROGS}
-. if !target(lint.${_P})
-. if defined(PROG_CXX.${_P})
-lint.${_P}:
-. else
-_CFLAGS:= ${CFLAGS.${_P}}
-_SRCS:= ${SRCS.${_P}}
-lint.${_P}: ${_SRCS:M*.c}
- ${LINT} ${LINTFLAGS} ${_CFLAGS:M-[DIU]*} ${.ALLSRC}
-. endif
-. endif
-lint: lint.${_P}
-
-. endfor
-.endif # !target(lint)
-
-.for _P in ${PROGS}
-CFLAGS:= ${CFLAGS.${_P}}
-CXXFLAGS:= ${CXXFLAGS.${_P}}
-# XXX: Pollutes DPADD.${_P} and LDADD.${_P} above
-#DPADD:= ${DPADD.${_P}}
-#LDADD:= ${LDADD.${_P}}
-SRCS:= ${SRCS.${_P}}
-. include <bsd.dep.mk>
-# bsd.dep.mk mangles SRCS
-SRCS.${_P}:= ${SRCS}
-. undef DPADD LDADD
+.for t in ${PROGS_TARGETS:O:u}
+$t: ${PROGS:%=%.$t}
.endfor
-# XXX: emulate the old bsd.prog.mk by allowing Makefiles that didn't set
-# ${PROG*} to function with this Makefile snippet.
-.if empty(PROGS)
-. include <bsd.dep.mk>
-.endif
-
-.if !exists(${.OBJDIR}/${DEPENDFILE})
-. for _P in ${PROGS}
-_SRCS:= ${SRCS.${_P}}
-${OBJS.${_P}}: ${_SRCS:M*.h}
-. endfor
-.endif
-
-.include <bsd.obj.mk>
-
-.include <bsd.sys.mk>
-
-.if defined(PORTNAME)
-.include <bsd.pkg.mk>
.endif
diff --git a/share/mk/bsd.subdir.mk b/share/mk/bsd.subdir.mk
index a66ab9b..3d5fb61 100644
--- a/share/mk/bsd.subdir.mk
+++ b/share/mk/bsd.subdir.mk
@@ -29,6 +29,9 @@
# maninstall, manlint, obj, objlink, realinstall, regress, tags
#
+.if !target(__<bsd.subdir.mk>__)
+__<bsd.subdir.mk>__:
+
.include <bsd.init.mk>
DISTRIBUTION?= base
@@ -92,3 +95,5 @@ afterinstall:
install: beforeinstall realinstall afterinstall
.ORDER: beforeinstall realinstall afterinstall
.endif
+
+.endif
diff --git a/share/mk/bsd.test.mk b/share/mk/bsd.test.mk
new file mode 100644
index 0000000..a75298f
--- /dev/null
+++ b/share/mk/bsd.test.mk
@@ -0,0 +1,79 @@
+# $NetBSD: bsd.test.mk,v 1.21 2012/08/25 22:21:16 jmmv Exp $
+# $FreeBSD$
+
+.include <bsd.init.mk>
+
+.if defined(TESTS_C)
+PROGS+= ${TESTS_C}
+.for _T in ${TESTS_C}
+BINDIR.${_T}= ${TESTSDIR}
+MAN.${_T}?= # empty
+.endfor
+.endif
+
+.if defined(TESTS_CXX)
+PROGS_CXX+= ${TESTS_CXX}
+PROGS+= ${TESTS_CXX}
+.for _T in ${TESTS_CXX}
+BINDIR.${_T}= ${TESTSDIR}
+MAN.${_T}?= # empty
+.endfor
+.endif
+
+.if defined(TESTS_SH)
+SCRIPTS+= ${TESTS_SH}
+.for _T in ${TESTS_SH}
+SCRIPTSDIR_${_T}= ${TESTSDIR}
+.endfor
+.endif
+
+TESTSBASE?= ${DESTDIR}/usr/tests
+
+# it is rare for test cases to have man pages
+.if !defined(MAN)
+WITHOUT_MAN=yes
+.export WITHOUT_MAN
+.endif
+
+# tell progs.mk we might want to install things
+PROG_VARS+= BINDIR
+PROGS_TARGETS+= install
+
+.if !empty(PROGS) || !empty(PROGS_CXX) || !empty(SCRIPTS)
+.include <bsd.progs.mk>
+.endif
+
+beforetest: .PHONY
+.if defined(TESTSDIR)
+.if ${TESTSDIR} == ${TESTSBASE}
+# Forbid running from ${TESTSBASE}. It can cause false positives/negatives and
+# it does not cover all the tests (e.g. it misses testing software in external).
+ @echo "*** Sorry, you cannot use make test from src/tests. Install the"
+ @echo "*** tests into their final location and run them from ${TESTSBASE}"
+ @false
+.else
+ @echo "*** Using this test does not preclude you from running the tests"
+ @echo "*** installed in ${TESTSBASE}. This test run may raise false"
+ @echo "*** positives and/or false negatives."
+.endif
+.else
+ @echo "*** No TESTSDIR defined; nothing to do."
+ @false
+.endif
+ @echo
+
+.if !target(realtest)
+realtest: .PHONY
+ @echo "$@ not defined; skipping"
+.endif
+
+test: .PHONY
+.ORDER: beforetest realtest
+test: beforetest realtest
+
+.if target(aftertest)
+.ORDER: realtest aftertest
+test: aftertest
+.endif
+
+.include <bsd.obj.mk>
diff --git a/sys/amd64/amd64/identcpu.c b/sys/amd64/amd64/identcpu.c
index 465316a..2517498 100644
--- a/sys/amd64/amd64/identcpu.c
+++ b/sys/amd64/amd64/identcpu.c
@@ -481,7 +481,7 @@ SYSINIT(hook_tsc_freq, SI_SUB_CONFIGURE, SI_ORDER_ANY, hook_tsc_freq, NULL);
void
identify_cpu(void)
{
- u_int regs[4];
+ u_int regs[4], cpu_stdext_disable;
do_cpuid(0, regs);
cpu_high = regs[0];
@@ -516,6 +516,20 @@ identify_cpu(void)
if (cpu_high >= 7) {
cpuid_count(7, 0, regs);
cpu_stdext_feature = regs[1];
+
+ /*
+ * Some hypervisors fail to filter out unsupported
+ * extended features. For now, disable the
+ * extensions, activation of which requires setting a
+ * bit in CR4, and which VM monitors do not support.
+ */
+ if (cpu_feature2 & CPUID2_HV) {
+ cpu_stdext_disable = CPUID_STDEXT_FSGSBASE |
+ CPUID_STDEXT_SMEP;
+ } else
+ cpu_stdext_disable = 0;
+ TUNABLE_INT_FETCH("hw.cpu_stdext_disable", &cpu_stdext_disable);
+ cpu_stdext_feature &= ~cpu_stdext_disable;
}
if (cpu_vendor_id == CPU_VENDOR_INTEL ||
diff --git a/sys/arm/arm/machdep.c b/sys/arm/arm/machdep.c
index 06c702a..17a60c2 100644
--- a/sys/arm/arm/machdep.c
+++ b/sys/arm/arm/machdep.c
@@ -1206,6 +1206,9 @@ initarm(struct arm_boot_params *abp)
pcpu0_init();
+ /* Do basic tuning, hz etc */
+ init_param1();
+
/* Calculate number of L2 tables needed for mapping vm_page_array */
l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page);
l2size = (l2size >> L1_S_SHIFT) + 1;
@@ -1219,17 +1222,16 @@ initarm(struct arm_boot_params *abp)
/* Make it divisible by 4 */
l2size = (l2size + 3) & ~3;
-#define KERNEL_TEXT_BASE (KERNBASE)
freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK;
/* Define a macro to simplify memory allocation */
-#define valloc_pages(var, np) \
- alloc_pages((var).pv_va, (np)); \
+#define valloc_pages(var, np) \
+ alloc_pages((var).pv_va, (np)); \
(var).pv_pa = (var).pv_va + (KERNPHYSADDR - KERNVIRTADDR);
-#define alloc_pages(var, np) \
- (var) = freemempos; \
- freemempos += (np * PAGE_SIZE); \
+#define alloc_pages(var, np) \
+ (var) = freemempos; \
+ freemempos += (np * PAGE_SIZE); \
memset((char *)(var), 0, ((np) * PAGE_SIZE));
while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
@@ -1262,13 +1264,10 @@ initarm(struct arm_boot_params *abp)
dpcpu_init((void *)dpcpu.pv_va, 0);
/* Allocate stacks for all modes */
- valloc_pages(irqstack, (IRQ_STACK_SIZE * MAXCPU));
- valloc_pages(abtstack, (ABT_STACK_SIZE * MAXCPU));
- valloc_pages(undstack, (UND_STACK_SIZE * MAXCPU));
- valloc_pages(kernelstack, (KSTACK_PAGES * MAXCPU));
-
- init_param1();
-
+ valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU);
+ valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU);
+ valloc_pages(undstack, UND_STACK_SIZE * MAXCPU);
+ valloc_pages(kernelstack, KSTACK_PAGES * MAXCPU);
valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
/*
@@ -1323,8 +1322,7 @@ initarm(struct arm_boot_params *abp)
err_devmap = platform_devmap_init();
pmap_devmap_bootstrap(l1pagetable, pmap_devmap_bootstrap_table);
- cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) |
- DOMAIN_CLIENT);
+ cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT);
pmap_pa = kernel_l1pt.pv_pa;
setttb(kernel_l1pt.pv_pa);
cpu_tlb_flushID();
@@ -1403,7 +1401,6 @@ initarm(struct arm_boot_params *abp)
*/
physmap_init(availmem_regions, availmem_regions_sz);
- /* Do basic tuning, hz etc */
init_param2(physmem);
kdb_init();
@@ -1411,4 +1408,3 @@ initarm(struct arm_boot_params *abp)
sizeof(struct pcb)));
}
#endif
-
diff --git a/sys/arm/at91/at91_machdep.c b/sys/arm/at91/at91_machdep.c
index 51e7b81..f2b63cbd 100644
--- a/sys/arm/at91/at91_machdep.c
+++ b/sys/arm/at91/at91_machdep.c
@@ -96,6 +96,10 @@ __FBSDID("$FreeBSD$");
#include <arm/at91/at91sam9g20reg.h>
#include <arm/at91/at91sam9g45reg.h>
+#ifndef MAXCPU
+#define MAXCPU 1
+#endif
+
/* Page table for mapping proc0 zero page */
#define KERNEL_PT_SYS 0
#define KERNEL_PT_KERN 1
@@ -454,7 +458,7 @@ initarm(struct arm_boot_params *abp)
{
struct pv_addr kernel_l1pt;
struct pv_addr dpcpu;
- int loop, i;
+ int i;
u_int l1pagetable;
vm_offset_t freemempos;
vm_offset_t afterkern;
@@ -482,23 +486,23 @@ initarm(struct arm_boot_params *abp)
while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
freemempos += PAGE_SIZE;
valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
- for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
- if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
- valloc_pages(kernel_pt_table[loop],
+ for (i = 0; i < NUM_KERNEL_PTS; ++i) {
+ if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
+ valloc_pages(kernel_pt_table[i],
L2_TABLE_SIZE / PAGE_SIZE);
} else {
- kernel_pt_table[loop].pv_va = freemempos -
- (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) *
+ kernel_pt_table[i].pv_va = freemempos -
+ (i % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) *
L2_TABLE_SIZE_REAL;
- kernel_pt_table[loop].pv_pa =
- kernel_pt_table[loop].pv_va - KERNVIRTADDR +
+ kernel_pt_table[i].pv_pa =
+ kernel_pt_table[i].pv_va - KERNVIRTADDR +
KERNPHYSADDR;
}
}
/*
- * Allocate a page for the system page mapped to V0x00000000
- * This page will just contain the system vectors and can be
- * shared by all processes.
+ * Allocate a page for the system page mapped to 0x00000000
+ * or 0xffff0000. This page will just contain the system vectors
+ * and can be shared by all processes.
*/
valloc_pages(systempage, 1);
@@ -507,10 +511,10 @@ initarm(struct arm_boot_params *abp)
dpcpu_init((void *)dpcpu.pv_va, 0);
/* Allocate stacks for all modes */
- valloc_pages(irqstack, IRQ_STACK_SIZE);
- valloc_pages(abtstack, ABT_STACK_SIZE);
- valloc_pages(undstack, UND_STACK_SIZE);
- valloc_pages(kernelstack, KSTACK_PAGES);
+ valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU);
+ valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU);
+ valloc_pages(undstack, UND_STACK_SIZE * MAXCPU);
+ valloc_pages(kernelstack, KSTACK_PAGES * MAXCPU);
valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
/*
@@ -558,17 +562,17 @@ initarm(struct arm_boot_params *abp)
pmap_map_chunk(l1pagetable, msgbufpv.pv_va, msgbufpv.pv_pa,
msgbufsize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
- for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
- pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va,
- kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE,
+ for (i = 0; i < NUM_KERNEL_PTS; ++i) {
+ pmap_map_chunk(l1pagetable, kernel_pt_table[i].pv_va,
+ kernel_pt_table[i].pv_pa, L2_TABLE_SIZE,
VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
}
pmap_devmap_bootstrap(l1pagetable, at91_devmap);
- cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT);
+ cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT);
setttb(kernel_l1pt.pv_pa);
cpu_tlb_flushID();
- cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2));
+ cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2));
at91_soc_id();
diff --git a/sys/arm/lpc/lpc_gpio.c b/sys/arm/lpc/lpc_gpio.c
index 4c06302..a7ef028 100644
--- a/sys/arm/lpc/lpc_gpio.c
+++ b/sys/arm/lpc/lpc_gpio.c
@@ -33,7 +33,7 @@
* - Port3 with:
* - 26 input pins (GPI_00..GPI_09 + GPI_15..GPI_23 + GPI_25 + GPI_27..GPI_28)
* - 24 output pins (GPO_00..GPO_23)
- * - 6 input/ouput pins (GPIO_00..GPIO_05)
+ * - 6 input/output pins (GPIO_00..GPIO_05)
*
* Pins are mapped to logical pin number as follows:
* [0..9] -> GPI_00..GPI_09 (port 3)
diff --git a/sys/boot/common/Makefile.inc b/sys/boot/common/Makefile.inc
index 935519c..fab1630 100644
--- a/sys/boot/common/Makefile.inc
+++ b/sys/boot/common/Makefile.inc
@@ -64,6 +64,7 @@ MAN+= ../forth/delay.4th.8
MAN+= ../forth/loader.conf.5
MAN+= ../forth/loader.4th.8
MAN+= ../forth/menu.4th.8
+MAN+= ../forth/menusets.4th.8
MAN+= ../forth/version.4th.8
.endif
diff --git a/sys/boot/forth/menu.4th b/sys/boot/forth/menu.4th
index 7d3de94..c98b4f8 100644
--- a/sys/boot/forth/menu.4th
+++ b/sys/boot/forth/menu.4th
@@ -342,6 +342,7 @@ create init_text8 255 allot
\ sure that things move along smoothly, allocate
\ a temporary NULL string
+ drop ( getenv cruft )
s" "
then
then
diff --git a/sys/boot/i386/boot2/sio.S b/sys/boot/i386/boot2/sio.S
index f2cd5c7..ca9d0a2 100644
--- a/sys/boot/i386/boot2/sio.S
+++ b/sys/boot/i386/boot2/sio.S
@@ -40,13 +40,11 @@ sio_init: pushl %eax
movb $0x3,%al # Set RTS,
outb %al,(%dx) # DTR
incl %edx # Line status reg
- call sio_flush
- ret
+ # Fallthrough
/* int sio_flush(void) */
-sio_flush: xorl %eax,%eax # Return value
- xorl %ecx,%ecx # Timeout
+sio_flush: xorl %ecx,%ecx # Timeout
movb $0x80,%ch # counter
sio_flush.1: call sio_ischar # Check for character
jz sio_flush.2 # Till none
diff --git a/sys/boot/i386/loader/Makefile b/sys/boot/i386/loader/Makefile
index f91715c..0ee8bd8 100644
--- a/sys/boot/i386/loader/Makefile
+++ b/sys/boot/i386/loader/Makefile
@@ -103,7 +103,7 @@ FILESMODE_${LOADER}= ${BINMODE} -b
FILES+= loader.help loader.4th support.4th loader.conf
FILES+= screen.4th frames.4th beastie.4th
FILES+= brand.4th check-password.4th color.4th delay.4th
-FILES+= menu.4th menu-commands.4th shortcuts.4th version.4th
+FILES+= menu.4th menu-commands.4th menusets.4th shortcuts.4th version.4th
FILESDIR_loader.conf= /boot/defaults
.if !exists(${DESTDIR}/boot/loader.rc)
diff --git a/sys/boot/ia64/common/Makefile b/sys/boot/ia64/common/Makefile
index d90898f..5740070 100644
--- a/sys/boot/ia64/common/Makefile
+++ b/sys/boot/ia64/common/Makefile
@@ -35,7 +35,7 @@ loader.help: help.common
FILES+= loader.4th support.4th loader.conf
FILES+= screen.4th frames.4th
FILES+= beastie.4th brand.4th check-password.4th color.4th delay.4th
-FILES+= menu.4th menu-commands.4th shortcuts.4th version.4th
+FILES+= menu.4th menu-commands.4th menusets.4th shortcuts.4th version.4th
.if !exists(${DESTDIR}/boot/loader.rc)
FILES+= loader.rc
.endif
diff --git a/sys/boot/pc98/Makefile.inc b/sys/boot/pc98/Makefile.inc
index 62cfc1d..857c8bc 100644
--- a/sys/boot/pc98/Makefile.inc
+++ b/sys/boot/pc98/Makefile.inc
@@ -5,9 +5,9 @@
BINDIR?= /boot
LOADER_ADDRESS?=0x200000
-CFLAGS+= -ffreestanding -mpreferred-stack-boundary=2 \
- -mno-mmx -mno-3dnow -mno-sse -mno-sse2 -mno-sse3 -msoft-float \
- -Os -DPC98
+CFLAGS+= -march=i386 -ffreestanding -mpreferred-stack-boundary=2 \
+ -mno-mmx -mno-3dnow -mno-sse -mno-sse2 -mno-sse3 -msoft-float
+CFLAGS+= -Os -DPC98
LDFLAGS+= -nostdlib
# BTX components
diff --git a/sys/boot/pc98/boot2/Makefile b/sys/boot/pc98/boot2/Makefile
index 18bf251..00a28de 100644
--- a/sys/boot/pc98/boot2/Makefile
+++ b/sys/boot/pc98/boot2/Makefile
@@ -3,7 +3,7 @@
.include <bsd.own.mk>
# XXX: clang can compile the boot code just fine, but boot2 gets too big
-CC:=${CC:C/^(.*\/)?clang$/gcc/1}
+CC:= gcc
FILES= boot boot1 boot2
diff --git a/sys/boot/pc98/boot2/boot2.c b/sys/boot/pc98/boot2/boot2.c
index e3e0b86..296ca55 100644
--- a/sys/boot/pc98/boot2/boot2.c
+++ b/sys/boot/pc98/boot2/boot2.c
@@ -554,8 +554,10 @@ parse()
}
ioctrl = OPT_CHECK(RBX_DUAL) ? (IO_SERIAL|IO_KEYBOARD) :
OPT_CHECK(RBX_SERIAL) ? IO_SERIAL : IO_KEYBOARD;
- if (ioctrl & IO_SERIAL)
- sio_init(115200 / comspeed);
+ if (ioctrl & IO_SERIAL) {
+ if (sio_init(115200 / comspeed) != 0)
+ ioctrl &= ~IO_SERIAL;
+ }
} else {
for (q = arg--; *q && *q != '('; q++);
if (*q) {
diff --git a/sys/boot/pc98/btx/btx/btx.S b/sys/boot/pc98/btx/btx/btx.S
index ceed5a6..1f8ff80 100644
--- a/sys/boot/pc98/btx/btx/btx.S
+++ b/sys/boot/pc98/btx/btx/btx.S
@@ -840,7 +840,7 @@ putstr: lodsb # Load char
.set SIO_DIV,(115200/SIOSPD) # 115200 / SPD
/*
- * void sio_init(void)
+ * int sio_init(void)
*/
sio_init: movw $SIO_PRT+0x3,%dx # Data format reg
movb $SIO_FMT|0x80,%al # Set format
@@ -856,14 +856,19 @@ sio_init: movw $SIO_PRT+0x3,%dx # Data format reg
movb $0x3,%al # Set RTS,
outb %al,(%dx) # DTR
incl %edx # Line status reg
+ call sio_getc.1 # Get character
/*
- * void sio_flush(void)
+ * int sio_flush(void)
*/
-sio_flush.0: call sio_getc.1 # Get character
-sio_flush: call sio_ischar # Check for character
- jnz sio_flush.0 # Till none
- ret # To caller
+sio_flush: xorl %eax,%eax # Return value
+ xorl %ecx,%ecx # Timeout
+ movb $0x80,%ch # counter
+sio_flush.1: call sio_ischar # Check for character
+ jz sio_flush.2 # Till none
+ loop sio_flush.1 # or counter is zero
+ movb $1, %al # Exhausted all tries
+sio_flush.2: ret # To caller
/*
* void sio_putc(int c)
diff --git a/sys/boot/pc98/cdboot/Makefile b/sys/boot/pc98/cdboot/Makefile
index eec1328..bcce0ef 100644
--- a/sys/boot/pc98/cdboot/Makefile
+++ b/sys/boot/pc98/cdboot/Makefile
@@ -13,3 +13,7 @@ ORG= 0x0000
LDFLAGS=-e start -Ttext ${ORG} -Wl,-N,-S,--oformat,binary
.include <bsd.prog.mk>
+
+# XXX: clang integrated-as doesn't grok .codeNN directives yet
+CFLAGS.cdboot.S= ${CLANG_NO_IAS}
+CFLAGS+= ${CFLAGS.${.IMPSRC:T}}
diff --git a/sys/boot/pc98/libpc98/comconsole.c b/sys/boot/pc98/libpc98/comconsole.c
index c4028db..825de8f 100644
--- a/sys/boot/pc98/libpc98/comconsole.c
+++ b/sys/boot/pc98/libpc98/comconsole.c
@@ -63,7 +63,6 @@ static void comc_setup(int speed, int port);
static int comc_speed_set(struct env_var *ev, int flags,
const void *value);
-static int comc_started;
static int comc_curspeed;
static int comc_port = COMPORT;
static uint32_t comc_locator;
@@ -87,9 +86,6 @@ comc_probe(struct console *cp)
int speed, port;
uint32_t locator;
- /* XXX check the BIOS equipment list? */
- cp->c_flags |= (C_PRESENTIN | C_PRESENTOUT);
-
if (comc_curspeed == 0) {
comc_curspeed = COMSPEED;
/*
@@ -137,18 +133,19 @@ comc_probe(struct console *cp)
env_setenv("comconsole_pcidev", EV_VOLATILE, env, comc_pcidev_set,
env_nounset);
}
+ comc_setup(comc_curspeed, comc_port);
}
static int
comc_init(int arg)
{
- if (comc_started && arg == 0)
- return 0;
- comc_started = 1;
comc_setup(comc_curspeed, comc_port);
- return(0);
+ if ((comconsole.c_flags & (C_PRESENTIN | C_PRESENTOUT)) ==
+ (C_PRESENTIN | C_PRESENTOUT))
+ return (CMD_OK);
+ return (CMD_ERROR);
}
static void
@@ -166,13 +163,13 @@ comc_putchar(int c)
static int
comc_getchar(void)
{
- return(comc_ischar() ? inb(comc_port + com_data) : -1);
+ return (comc_ischar() ? inb(comc_port + com_data) : -1);
}
static int
comc_ischar(void)
{
- return(inb(comc_port + com_lsr) & LSR_RXRDY);
+ return (inb(comc_port + com_lsr) & LSR_RXRDY);
}
static int
@@ -185,7 +182,8 @@ comc_speed_set(struct env_var *ev, int flags, const void *value)
return (CMD_ERROR);
}
- if (comc_started && comc_curspeed != speed)
+ if ((comconsole.c_flags & (C_ACTIVEIN | C_ACTIVEOUT)) != 0 &&
+ comc_curspeed != speed)
comc_setup(speed, comc_port);
env_setenv(ev->ev_name, flags | EV_NOHOOK, value, NULL, NULL);
@@ -203,7 +201,8 @@ comc_port_set(struct env_var *ev, int flags, const void *value)
return (CMD_ERROR);
}
- if (comc_started && comc_port != port) {
+ if ((comconsole.c_flags & (C_ACTIVEIN | C_ACTIVEOUT)) != 0 &&
+ comc_port != port) {
comc_setup(comc_curspeed, port);
set_hw_console_hint();
}
@@ -305,7 +304,8 @@ comc_pcidev_set(struct env_var *ev, int flags, const void *value)
printf("Invalid pcidev\n");
return (CMD_ERROR);
}
- if (comc_started && comc_locator != locator) {
+ if ((comconsole.c_flags & (C_ACTIVEIN | C_ACTIVEOUT)) != 0 &&
+ comc_locator != locator) {
error = comc_pcidev_handle(locator);
if (error != CMD_OK)
return (error);
@@ -317,6 +317,8 @@ comc_pcidev_set(struct env_var *ev, int flags, const void *value)
static void
comc_setup(int speed, int port)
{
+ static int TRY_COUNT = 1000000;
+ int tries;
comc_curspeed = speed;
comc_port = port;
@@ -327,9 +329,15 @@ comc_setup(int speed, int port)
outb(comc_port + com_cfcr, COMC_FMT);
outb(comc_port + com_mcr, MCR_RTS | MCR_DTR);
+ tries = 0;
do
inb(comc_port + com_data);
- while (inb(comc_port + com_lsr) & LSR_RXRDY);
+ while (inb(comc_port + com_lsr) & LSR_RXRDY && ++tries < TRY_COUNT);
+
+ if (tries < TRY_COUNT)
+ comconsole.c_flags |= (C_PRESENTIN | C_PRESENTOUT);
+ else
+ comconsole.c_flags &= ~(C_PRESENTIN | C_PRESENTOUT);
}
static int
diff --git a/sys/boot/pc98/loader/Makefile b/sys/boot/pc98/loader/Makefile
index 48db11b..489eeac 100644
--- a/sys/boot/pc98/loader/Makefile
+++ b/sys/boot/pc98/loader/Makefile
@@ -88,7 +88,7 @@ FILESMODE_${LOADER}= ${BINMODE} -b
FILES+= loader.help loader.4th support.4th loader.conf
FILES+= screen.4th frames.4th beastie.4th
FILES+= brand.4th check-password.4th color.4th delay.4th
-FILES+= menu.4th menu-commands.4th shortcuts.4th version.4th
+FILES+= menu.4th menu-commands.4th menusets.4th shortcuts.4th version.4th
FILESDIR_loader.conf= /boot/defaults
.if !exists(${DESTDIR}/boot/loader.rc)
diff --git a/sys/boot/powerpc/ofw/Makefile b/sys/boot/powerpc/ofw/Makefile
index 079f552..d3d3523 100644
--- a/sys/boot/powerpc/ofw/Makefile
+++ b/sys/boot/powerpc/ofw/Makefile
@@ -105,7 +105,7 @@ loader.help: help.common help.ofw
FILES= loader.help loader.4th support.4th loader.conf
FILES+= screen.4th frames.4th
FILES+= beastie.4th brand.4th check-password.4th color.4th delay.4th
-FILES+= menu.4th menu-commands.4th shortcuts.4th version.4th
+FILES+= menu.4th menu-commands.4th menusets.4th shortcuts.4th version.4th
FILESDIR_loader.conf= /boot/defaults
.if !exists(${DESTDIR}/boot/loader.rc)
diff --git a/sys/boot/powerpc/ps3/Makefile b/sys/boot/powerpc/ps3/Makefile
index 0dee5f4..810e480 100644
--- a/sys/boot/powerpc/ps3/Makefile
+++ b/sys/boot/powerpc/ps3/Makefile
@@ -116,7 +116,7 @@ loader.help: help.common help.ps3
FILES= loader.help loader.4th support.4th loader.conf
FILES+= screen.4th frames.4th
FILES+= beastie.4th brand.4th check-password.4th color.4th delay.4th
-FILES+= menu.4th menu-commands.4th shortcuts.4th version.4th
+FILES+= menu.4th menu-commands.4th menusets.4th shortcuts.4th version.4th
FILESDIR_loader.conf= /boot/defaults
.if !exists(${DESTDIR}/boot/loader.rc)
diff --git a/sys/boot/sparc64/loader/Makefile b/sys/boot/sparc64/loader/Makefile
index 0a3b753..4624b6f 100644
--- a/sys/boot/sparc64/loader/Makefile
+++ b/sys/boot/sparc64/loader/Makefile
@@ -99,7 +99,7 @@ loader.help: help.common help.sparc64
FILES= loader.help loader.4th support.4th loader.conf
FILES+= screen.4th frames.4th
FILES+= beastie.4th brand.4th check-password.4th color.4th delay.4th
-FILES+= menu.4th menu-commands.4th shortcuts.4th version.4th
+FILES+= menu.4th menu-commands.4th menusets.4th shortcuts.4th version.4th
FILESDIR_loader.conf= /boot/defaults
.if !exists(${DESTDIR}/boot/loader.rc)
diff --git a/sys/cam/scsi/scsi_enc_ses.c b/sys/cam/scsi/scsi_enc_ses.c
index 1e632ca..cbc9b90 100644
--- a/sys/cam/scsi/scsi_enc_ses.c
+++ b/sys/cam/scsi/scsi_enc_ses.c
@@ -363,6 +363,7 @@ typedef struct ses_softc {
uint32_t ses_flags;
#define SES_FLAG_TIMEDCOMP 0x01
#define SES_FLAG_ADDLSTATUS 0x02
+#define SES_FLAG_DESC 0x04
ses_control_reqlist_t ses_requests;
ses_control_reqlist_t ses_pending_requests;
@@ -1271,7 +1272,10 @@ ses_process_pages(enc_softc_t *enc, struct enc_fsm_state *state,
err = 0;
for (i = 0; i < length; i++) {
- if (page->params[i] == SesAddlElementStatus) {
+ if (page->params[i] == SesElementDescriptor) {
+ ses->ses_flags |= SES_FLAG_DESC;
+ break;
+ } else if (page->params[i] == SesAddlElementStatus) {
ses->ses_flags |= SES_FLAG_ADDLSTATUS;
break;
}
@@ -1486,7 +1490,8 @@ out:
ses_cache_free(enc, enc_cache);
else {
enc_update_request(enc, SES_UPDATE_GETSTATUS);
- enc_update_request(enc, SES_UPDATE_GETELMDESCS);
+ if (ses->ses_flags & SES_FLAG_DESC)
+ enc_update_request(enc, SES_UPDATE_GETELMDESCS);
if (ses->ses_flags & SES_FLAG_ADDLSTATUS)
enc_update_request(enc, SES_UPDATE_GETELMADDLSTATUS);
enc_update_request(enc, SES_PUBLISH_CACHE);
diff --git a/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c b/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c
index 86f401f..79edb7c 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c
@@ -235,7 +235,7 @@ static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */
static struct mtx dtrace_unr_mtx;
MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF);
int dtrace_in_probe; /* non-zero if executing a probe */
-#if defined(__i386__) || defined(__amd64__) || defined(__mips__)
+#if defined(__i386__) || defined(__amd64__) || defined(__mips__) || defined(__powerpc__)
uintptr_t dtrace_in_probe_addr; /* Address of invop when already in probe */
#endif
#endif
@@ -10762,7 +10762,7 @@ err:
#else
int i;
-#if defined(__amd64__) || defined(__mips__)
+#if defined(__amd64__) || defined(__mips__) || defined(__powerpc__)
/*
* FreeBSD isn't good at limiting the amount of memory we
* ask to malloc, so let's place a limit here before trying
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c
index 8e08927..607364f 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c
@@ -191,6 +191,7 @@ uint64_t zfs_arc_meta_limit = 0;
int zfs_arc_grow_retry = 0;
int zfs_arc_shrink_shift = 0;
int zfs_arc_p_min_shift = 0;
+int zfs_disable_dup_eviction = 0;
TUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max);
TUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min);
@@ -321,7 +322,6 @@ typedef struct arc_stats {
kstat_named_t arcstat_l2_io_error;
kstat_named_t arcstat_l2_size;
kstat_named_t arcstat_l2_hdr_size;
- kstat_named_t arcstat_memory_throttle_count;
kstat_named_t arcstat_l2_write_trylock_fail;
kstat_named_t arcstat_l2_write_passed_headroom;
kstat_named_t arcstat_l2_write_spa_mismatch;
@@ -334,6 +334,10 @@ typedef struct arc_stats {
kstat_named_t arcstat_l2_write_buffer_bytes_scanned;
kstat_named_t arcstat_l2_write_buffer_list_iter;
kstat_named_t arcstat_l2_write_buffer_list_null_iter;
+ kstat_named_t arcstat_memory_throttle_count;
+ kstat_named_t arcstat_duplicate_buffers;
+ kstat_named_t arcstat_duplicate_buffers_size;
+ kstat_named_t arcstat_duplicate_reads;
} arc_stats_t;
static arc_stats_t arc_stats = {
@@ -391,7 +395,6 @@ static arc_stats_t arc_stats = {
{ "l2_io_error", KSTAT_DATA_UINT64 },
{ "l2_size", KSTAT_DATA_UINT64 },
{ "l2_hdr_size", KSTAT_DATA_UINT64 },
- { "memory_throttle_count", KSTAT_DATA_UINT64 },
{ "l2_write_trylock_fail", KSTAT_DATA_UINT64 },
{ "l2_write_passed_headroom", KSTAT_DATA_UINT64 },
{ "l2_write_spa_mismatch", KSTAT_DATA_UINT64 },
@@ -403,7 +406,11 @@ static arc_stats_t arc_stats = {
{ "l2_write_pios", KSTAT_DATA_UINT64 },
{ "l2_write_buffer_bytes_scanned", KSTAT_DATA_UINT64 },
{ "l2_write_buffer_list_iter", KSTAT_DATA_UINT64 },
- { "l2_write_buffer_list_null_iter", KSTAT_DATA_UINT64 }
+ { "l2_write_buffer_list_null_iter", KSTAT_DATA_UINT64 },
+ { "memory_throttle_count", KSTAT_DATA_UINT64 },
+ { "duplicate_buffers", KSTAT_DATA_UINT64 },
+ { "duplicate_buffers_size", KSTAT_DATA_UINT64 },
+ { "duplicate_reads", KSTAT_DATA_UINT64 }
};
#define ARCSTAT(stat) (arc_stats.stat.value.ui64)
@@ -1518,6 +1525,17 @@ arc_buf_clone(arc_buf_t *from)
hdr->b_buf = buf;
arc_get_data_buf(buf);
bcopy(from->b_data, buf->b_data, size);
+
+ /*
+ * This buffer already exists in the arc so create a duplicate
+ * copy for the caller. If the buffer is associated with user data
+ * then track the size and number of duplicates. These stats will be
+ * updated as duplicate buffers are created and destroyed.
+ */
+ if (hdr->b_type == ARC_BUFC_DATA) {
+ ARCSTAT_BUMP(arcstat_duplicate_buffers);
+ ARCSTAT_INCR(arcstat_duplicate_buffers_size, size);
+ }
hdr->b_datacnt += 1;
return (buf);
}
@@ -1618,6 +1636,16 @@ arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
ASSERT3U(state->arcs_size, >=, size);
atomic_add_64(&state->arcs_size, -size);
buf->b_data = NULL;
+
+ /*
+ * If we're destroying a duplicate buffer make sure
+ * that the appropriate statistics are updated.
+ */
+ if (buf->b_hdr->b_datacnt > 1 &&
+ buf->b_hdr->b_type == ARC_BUFC_DATA) {
+ ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
+ ARCSTAT_INCR(arcstat_duplicate_buffers_size, -size);
+ }
ASSERT(buf->b_hdr->b_datacnt > 0);
buf->b_hdr->b_datacnt -= 1;
}
@@ -1802,6 +1830,48 @@ arc_buf_size(arc_buf_t *buf)
}
/*
+ * Called from the DMU to determine if the current buffer should be
+ * evicted. In order to ensure proper locking, the eviction must be initiated
+ * from the DMU. Return true if the buffer is associated with user data and
+ * duplicate buffers still exist.
+ */
+boolean_t
+arc_buf_eviction_needed(arc_buf_t *buf)
+{
+ arc_buf_hdr_t *hdr;
+ boolean_t evict_needed = B_FALSE;
+
+ if (zfs_disable_dup_eviction)
+ return (B_FALSE);
+
+ mutex_enter(&buf->b_evict_lock);
+ hdr = buf->b_hdr;
+ if (hdr == NULL) {
+ /*
+ * We are in arc_do_user_evicts(); let that function
+ * perform the eviction.
+ */
+ ASSERT(buf->b_data == NULL);
+ mutex_exit(&buf->b_evict_lock);
+ return (B_FALSE);
+ } else if (buf->b_data == NULL) {
+ /*
+ * We have already been added to the arc eviction list;
+ * recommend eviction.
+ */
+ ASSERT3P(hdr, ==, &arc_eviction_hdr);
+ mutex_exit(&buf->b_evict_lock);
+ return (B_TRUE);
+ }
+
+ if (hdr->b_datacnt > 1 && hdr->b_type == ARC_BUFC_DATA)
+ evict_needed = B_TRUE;
+
+ mutex_exit(&buf->b_evict_lock);
+ return (evict_needed);
+}
+
+/*
* Evict buffers from list until we've removed the specified number of
* bytes. Move the removed buffers to the appropriate evict state.
* If the recycle flag is set, then attempt to "recycle" a buffer:
@@ -2887,8 +2957,10 @@ arc_read_done(zio_t *zio)
abuf = buf;
for (acb = callback_list; acb; acb = acb->acb_next) {
if (acb->acb_done) {
- if (abuf == NULL)
+ if (abuf == NULL) {
+ ARCSTAT_BUMP(arcstat_duplicate_reads);
abuf = arc_buf_clone(buf);
+ }
acb->acb_buf = abuf;
abuf = NULL;
}
@@ -3434,6 +3506,16 @@ arc_release(arc_buf_t *buf, void *tag)
ASSERT3U(*size, >=, hdr->b_size);
atomic_add_64(size, -hdr->b_size);
}
+
+ /*
+ * We're releasing a duplicate user data buffer, update
+ * our statistics accordingly.
+ */
+ if (hdr->b_type == ARC_BUFC_DATA) {
+ ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
+ ARCSTAT_INCR(arcstat_duplicate_buffers_size,
+ -hdr->b_size);
+ }
hdr->b_datacnt -= 1;
arc_cksum_verify(buf);
#ifdef illumos
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c
index 571a5a3..50f1bea 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c
@@ -2089,7 +2089,24 @@ dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
dbuf_evict(db);
} else {
VERIFY(arc_buf_remove_ref(db->db_buf, db) == 0);
- if (!DBUF_IS_CACHEABLE(db))
+
+ /*
+ * A dbuf will be eligible for eviction if either the
+ * 'primarycache' property is set or a duplicate
+ * copy of this buffer is already cached in the arc.
+ *
+ * In the case of the 'primarycache' a buffer
+ * is considered for eviction if it matches the
+ * criteria set in the property.
+ *
+ * To decide if our buffer is considered a
+ * duplicate, we must call into the arc to determine
+ * if multiple buffers are referencing the same
+ * block on-disk. If so, then we simply evict
+ * ourselves.
+ */
+ if (!DBUF_IS_CACHEABLE(db) ||
+ arc_buf_eviction_needed(db->db_buf))
dbuf_clear(db);
else
mutex_exit(&db->db_mtx);
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/arc.h b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/arc.h
index 9ca8ed6..857cb91 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/arc.h
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/arc.h
@@ -99,6 +99,7 @@ int arc_released(arc_buf_t *buf);
int arc_has_callback(arc_buf_t *buf);
void arc_buf_freeze(arc_buf_t *buf);
void arc_buf_thaw(arc_buf_t *buf);
+boolean_t arc_buf_eviction_needed(arc_buf_t *buf);
#ifdef ZFS_DEBUG
int arc_referenced(arc_buf_t *buf);
#endif
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ioctl.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ioctl.c
index 1b4cb78..f110d8c 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ioctl.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ioctl.c
@@ -3245,6 +3245,7 @@ zfs_ioc_destroy_snaps_nvl(zfs_cmd_t *zc)
}
(void) zfs_unmount_snap(name, NULL);
+ (void) zvol_remove_minor(name);
}
err = dmu_snapshots_destroy_nvl(nvl, zc->zc_defer_destroy,
diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c
index 0a8f135..2c7f423 100644
--- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c
+++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vfsops.c
@@ -1132,7 +1132,6 @@ zfs_domount(vfs_t *vfsp, char *osname)
vfsp->vfs_data = zfsvfs;
vfsp->mnt_flag |= MNT_LOCAL;
- vfsp->mnt_kern_flag |= MNTK_MPSAFE;
vfsp->mnt_kern_flag |= MNTK_LOOKUP_SHARED;
vfsp->mnt_kern_flag |= MNTK_SHARED_WRITES;
vfsp->mnt_kern_flag |= MNTK_EXTENDED_SHARED;
diff --git a/sys/cddl/contrib/opensolaris/uts/powerpc/dtrace/fasttrap_isa.c b/sys/cddl/contrib/opensolaris/uts/powerpc/dtrace/fasttrap_isa.c
new file mode 100644
index 0000000..36115ea
--- /dev/null
+++ b/sys/cddl/contrib/opensolaris/uts/powerpc/dtrace/fasttrap_isa.c
@@ -0,0 +1,30 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+
+/*
+ * XXX: Placeholder for PowerPC fasttrap code
+ */
diff --git a/sys/cddl/contrib/opensolaris/uts/powerpc/sys/fasttrap_isa.h b/sys/cddl/contrib/opensolaris/uts/powerpc/sys/fasttrap_isa.h
new file mode 100644
index 0000000..0f21899
--- /dev/null
+++ b/sys/cddl/contrib/opensolaris/uts/powerpc/sys/fasttrap_isa.h
@@ -0,0 +1,49 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#ifndef _FASTTRAP_ISA_H
+#define _FASTTRAP_ISA_H
+
+#pragma ident "%Z%%M% %I% %E% SMI"
+
+#include <sys/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * XXXDTRACE: placehodler for PowerPC fasttrap stuff
+ */
+
+typedef uint32_t fasttrap_instr_t;
+#define FASTTRAP_SUNWDTRACE_SIZE 64
+#define FASTTRAP_INSTR 0x0FFFDDDD
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _FASTTRAP_ISA_H */
diff --git a/sys/cddl/dev/dtrace/powerpc/dtrace_asm.S b/sys/cddl/dev/dtrace/powerpc/dtrace_asm.S
new file mode 100644
index 0000000..9ff83db
--- /dev/null
+++ b/sys/cddl/dev/dtrace/powerpc/dtrace_asm.S
@@ -0,0 +1,269 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ * $FreeBSD$
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include "assym.s"
+
+#define _ASM
+
+#include <sys/cpuvar_defs.h>
+#include <sys/dtrace.h>
+
+#include <machine/asm.h>
+/*
+#include <machine/cpu.h>
+*/
+
+/*
+ * Primitives
+ */
+
+ .text
+
+/*
+void dtrace_membar_producer(void)
+*/
+ASENTRY_NOPROF(dtrace_membar_producer)
+ blr
+END(dtrace_membar_producer)
+
+/*
+void dtrace_membar_consumer(void)
+*/
+ASENTRY_NOPROF(dtrace_membar_consumer)
+ blr
+END(dtrace_membar_consumer)
+
+/*
+dtrace_icookie_t dtrace_interrupt_disable(void)
+*/
+ASENTRY_NOPROF(dtrace_interrupt_disable)
+ mfmsr %r3
+ andi. %r0,%r3,~PSL_EE@l
+ mtmsr %r0
+ blr
+END(dtrace_interrupt_disable)
+
+/*
+void dtrace_interrupt_enable(dtrace_icookie_t cookie)
+*/
+ASENTRY_NOPROF(dtrace_interrupt_enable)
+ mtmsr %r3
+ blr
+END(dtrace_interrupt_enable)
+
+/*
+uint32_t dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new)
+*/
+ASENTRY_NOPROF(dtrace_cas32)
+1:
+ lwarx %r0,0,%r3
+ cmpw %r4,%r0
+ bne 2f
+ stwcx. %r5,0,%r3
+ bne 1b
+2: mr %r3,%r0
+ blr
+END(dtrace_cas32)
+
+/*
+void *
+dtrace_casptr(void *target, void *cmp, void *new)
+*/
+ASENTRY_NOPROF(dtrace_casptr)
+1:
+ lwarx %r0,0,%r3
+ cmpw %r4,%r0
+ bne 2f
+ stwcx. %r5,0,%r3
+ bne 1b
+2: mr %r3,%r0
+ blr
+END(dtrace_casptr)
+
+
+/*
+uintptr_t
+dtrace_fulword(void *addr)
+*/
+ASENTRY_NOPROF(dtrace_fulword)
+END(dtrace_fulword)
+
+/*
+uint8_t
+dtrace_fuword8_nocheck(void *addr)
+*/
+ASENTRY_NOPROF(dtrace_fuword8_nocheck)
+ lbz %r3,0(%r3)
+ blr
+END(dtrace_fuword8_nocheck)
+
+/*
+uint16_t
+dtrace_fuword16_nocheck(void *addr)
+*/
+ASENTRY_NOPROF(dtrace_fuword16_nocheck)
+ lhz %r3,0(%r3)
+ blr
+END(dtrace_fuword16_nocheck)
+
+/*
+uint32_t
+dtrace_fuword32_nocheck(void *addr)
+*/
+ASENTRY_NOPROF(dtrace_fuword32_nocheck)
+ lwz %r3,0(%r3)
+ blr
+END(dtrace_fuword32_nocheck)
+
+/*
+uint64_t
+dtrace_fuword64_nocheck(void *addr)
+*/
+ASENTRY_NOPROF(dtrace_fuword64_nocheck)
+#if defined(__powerpc64__)
+ ld %r3,0(%r3)
+#else
+ lwz %r5,0(%r3)
+ lwz %r4,4(%r3)
+ mr %r3,%r5
+#endif
+ blr
+END(dtrace_fuword64_nocheck)
+
+/*
+XXX: unoptimized
+void
+dtrace_copy(uintptr_t src, uintptr_t dest, size_t size)
+*/
+ASENTRY_NOPROF(dtrace_copy)
+ addme %r7,%r3
+ addme %r8,%r4
+1:
+ lbzu %r3,1(%r7)
+ stbu %r3,1(%r8)
+ addme %r5,%r5
+ beq 2f
+2:
+ blr
+END(dtrace_copy)
+
+/*
+void
+dtrace_copystr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
+ volatile uint16_t *flags)
+*/
+ASENTRY_NOPROF(dtrace_copystr)
+ addme %r7,%r3
+ addme %r8,%r4
+1:
+ lbzu %r3,1(%r7)
+ stbu %r3,1(%r8)
+ addme %r5,%r5
+ beq 2f
+ or %r3,%r3,%r3
+ beq 2f
+ andi. %r0,%r5,0x0fff
+ beq 2f
+ lwz %r0,0(%r6)
+ andi. %r0,%r0,CPU_DTRACE_BADADDR
+ beq 1b
+2:
+ blr
+END(dtrace_copystr)
+
+/*
+void dtrace_invop_init(void)
+*/
+ASENTRY_NOPROF(dtrace_invop_init)
+ /* XXX: impement it properly -- implement dtrace_invop_start */
+ li %r0,0
+ li %r3,dtrace_invop_jump_addr@l
+ addis %r3,%r3,dtrace_invop_jump_addr@ha
+ stw %r0,0(%r3)
+ blr
+END(dtrace_invop_init)
+
+/*
+void dtrace_invop_uninit(void)
+*/
+ASENTRY_NOPROF(dtrace_invop_uninit)
+ li %r0,0
+ li %r3,dtrace_invop_jump_addr@l
+ addis %r3,%r3,dtrace_invop_jump_addr@ha
+ stw %r0,0(%r3)
+ blr
+END(dtrace_invop_uninit)
+
+/*
+ * The panic() and cmn_err() functions invoke vpanic() as a common entry point
+ * into the panic code implemented in panicsys(). vpanic() is responsible
+ * for passing through the format string and arguments, and constructing a
+ * regs structure on the stack into which it saves the current register
+ * values. If we are not dying due to a fatal trap, these registers will
+ * then be preserved in panicbuf as the current processor state. Before
+ * invoking panicsys(), vpanic() activates the first panic trigger (see
+ * common/os/panic.c) and switches to the panic_stack if successful. Note that
+ * DTrace takes a slightly different panic path if it must panic from probe
+ * context. Instead of calling panic, it calls into dtrace_vpanic(), which
+ * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
+ * branches back into vpanic().
+ */
+
+/*
+void
+vpanic(const char *format, va_list alist)
+*/
+ASENTRY_NOPROF(vpanic) /* Initial stack layout: */
+
+vpanic_common:
+ blr
+END(vpanic)
+
+
+
+/*
+void
+dtrace_vpanic(const char *format, va_list alist)
+*/
+ASENTRY_NOPROF(dtrace_vpanic) /* Initial stack layout: */
+
+#if 0
+ bl dtrace_panic_trigger /* %eax = dtrace_panic_trigger() */
+#endif
+ b vpanic_common
+END(dtrace_vpanic)
+
+/*
+uintptr_t
+dtrace_caller(int aframes)
+*/
+ASENTRY_NOPROF(dtrace_caller)
+ li %r3, -1
+ blr
+END(dtrace_caller)
+
diff --git a/sys/cddl/dev/dtrace/powerpc/dtrace_isa.c b/sys/cddl/dev/dtrace/powerpc/dtrace_isa.c
new file mode 100644
index 0000000..a697816
--- /dev/null
+++ b/sys/cddl/dev/dtrace/powerpc/dtrace_isa.c
@@ -0,0 +1,534 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ * $FreeBSD$
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+#include <sys/cdefs.h>
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/stack.h>
+#include <sys/sysent.h>
+#include <sys/pcpu.h>
+
+#include <machine/frame.h>
+#include <machine/md_var.h>
+#include <machine/reg.h>
+#include <machine/stack.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+
+#include "regset.h"
+
+uint8_t dtrace_fuword8_nocheck(void *);
+uint16_t dtrace_fuword16_nocheck(void *);
+uint32_t dtrace_fuword32_nocheck(void *);
+uint64_t dtrace_fuword64_nocheck(void *);
+
+/* Offset to the LR Save word (ppc32) */
+#define RETURN_OFFSET 4
+#define RETURN_OFFSET64 8
+
+#define INKERNEL(x) ((x) <= VM_MAX_KERNEL_ADDRESS && \
+ (x) >= VM_MIN_KERNEL_ADDRESS)
+
+greg_t
+dtrace_getfp(void)
+{
+ return (greg_t)__builtin_frame_address(0);
+}
+
+void
+dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
+ uint32_t *intrpc)
+{
+ int depth = 0;
+ register_t sp;
+ vm_offset_t callpc;
+ pc_t caller = (pc_t) solaris_cpu[curcpu].cpu_dtrace_caller;
+
+ if (intrpc != 0)
+ pcstack[depth++] = (pc_t) intrpc;
+
+ aframes++;
+
+ sp = dtrace_getfp();
+
+ while (depth < pcstack_limit) {
+ if (!INKERNEL((long) sp))
+ break;
+
+ callpc = *(uintptr_t *)(sp + RETURN_OFFSET);
+
+ if (!INKERNEL(callpc))
+ break;
+
+ if (aframes > 0) {
+ aframes--;
+ if ((aframes == 0) && (caller != 0)) {
+ pcstack[depth++] = caller;
+ }
+ }
+ else {
+ pcstack[depth++] = callpc;
+ }
+
+ sp = *(uintptr_t*)sp;
+ }
+
+ for (; depth < pcstack_limit; depth++) {
+ pcstack[depth] = 0;
+ }
+}
+
+static int
+dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
+ uintptr_t sp)
+{
+ proc_t *p = curproc;
+ int ret = 0;
+
+ ASSERT(pcstack == NULL || pcstack_limit > 0);
+
+ while (pc != 0) {
+ ret++;
+ if (pcstack != NULL) {
+ *pcstack++ = (uint64_t)pc;
+ pcstack_limit--;
+ if (pcstack_limit <= 0)
+ break;
+ }
+
+ if (sp == 0)
+ break;
+
+ if (SV_PROC_FLAG(p, SV_ILP32)) {
+ pc = dtrace_fuword32((void *)(sp + RETURN_OFFSET));
+ sp = dtrace_fuword32((void *)sp);
+ }
+ else {
+ pc = dtrace_fuword64((void *)(sp + RETURN_OFFSET64));
+ sp = dtrace_fuword64((void *)sp);
+ }
+ }
+
+ return (ret);
+}
+
+void
+dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
+{
+ proc_t *p = curproc;
+ struct trapframe *tf;
+ uintptr_t pc, sp;
+ volatile uint16_t *flags =
+ (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
+ int n;
+
+ if (*flags & CPU_DTRACE_FAULT)
+ return;
+
+ if (pcstack_limit <= 0)
+ return;
+
+ /*
+ * If there's no user context we still need to zero the stack.
+ */
+ if (p == NULL || (tf = curthread->td_frame) == NULL)
+ goto zero;
+
+ *pcstack++ = (uint64_t)p->p_pid;
+ pcstack_limit--;
+
+ if (pcstack_limit <= 0)
+ return;
+
+ pc = tf->srr0;
+ sp = tf->fixreg[1];
+
+ if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
+ /*
+ * In an entry probe. The frame pointer has not yet been
+ * pushed (that happens in the function prologue). The
+ * best approach is to add the current pc as a missing top
+ * of stack and back the pc up to the caller, which is stored
+ * at the current stack pointer address since the call
+ * instruction puts it there right before the branch.
+ */
+
+ *pcstack++ = (uint64_t)pc;
+ pcstack_limit--;
+ if (pcstack_limit <= 0)
+ return;
+
+ pc = tf->lr;
+ }
+
+ n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
+ ASSERT(n >= 0);
+ ASSERT(n <= pcstack_limit);
+
+ pcstack += n;
+ pcstack_limit -= n;
+
+zero:
+ while (pcstack_limit-- > 0)
+ *pcstack++ = 0;
+}
+
+int
+dtrace_getustackdepth(void)
+{
+ proc_t *p = curproc;
+ struct trapframe *tf;
+ uintptr_t pc, sp;
+ int n = 0;
+
+ if (p == NULL || (tf = curthread->td_frame) == NULL)
+ return (0);
+
+ if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
+ return (-1);
+
+ pc = tf->srr0;
+ sp = tf->fixreg[1];
+
+ if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
+ /*
+ * In an entry probe. The frame pointer has not yet been
+ * pushed (that happens in the function prologue). The
+ * best approach is to add the current pc as a missing top
+ * of stack and back the pc up to the caller, which is stored
+ * at the current stack pointer address since the call
+ * instruction puts it there right before the branch.
+ */
+
+ if (SV_PROC_FLAG(p, SV_ILP32)) {
+ pc = dtrace_fuword32((void *) sp);
+ }
+ else
+ pc = dtrace_fuword64((void *) sp);
+ n++;
+ }
+
+ n += dtrace_getustack_common(NULL, 0, pc, sp);
+
+ return (n);
+}
+
+void
+dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
+{
+ proc_t *p = curproc;
+ struct trapframe *tf;
+ uintptr_t pc, sp;
+ volatile uint16_t *flags =
+ (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
+#ifdef notyet /* XXX signal stack */
+ uintptr_t oldcontext;
+ size_t s1, s2;
+#endif
+
+ if (*flags & CPU_DTRACE_FAULT)
+ return;
+
+ if (pcstack_limit <= 0)
+ return;
+
+ /*
+ * If there's no user context we still need to zero the stack.
+ */
+ if (p == NULL || (tf = curthread->td_frame) == NULL)
+ goto zero;
+
+ *pcstack++ = (uint64_t)p->p_pid;
+ pcstack_limit--;
+
+ if (pcstack_limit <= 0)
+ return;
+
+ pc = tf->srr0;
+ sp = tf->fixreg[1];
+
+#ifdef notyet /* XXX signal stack */
+ oldcontext = lwp->lwp_oldcontext;
+ s1 = sizeof (struct xframe) + 2 * sizeof (long);
+ s2 = s1 + sizeof (siginfo_t);
+#endif
+
+ if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
+ *pcstack++ = (uint64_t)pc;
+ *fpstack++ = 0;
+ pcstack_limit--;
+ if (pcstack_limit <= 0)
+ return;
+
+ if (SV_PROC_FLAG(p, SV_ILP32)) {
+ pc = dtrace_fuword32((void *)sp);
+ }
+ else {
+ pc = dtrace_fuword64((void *)sp);
+ }
+ }
+
+ while (pc != 0) {
+ *pcstack++ = (uint64_t)pc;
+ *fpstack++ = sp;
+ pcstack_limit--;
+ if (pcstack_limit <= 0)
+ break;
+
+ if (sp == 0)
+ break;
+
+#ifdef notyet /* XXX signal stack */
+ if (oldcontext == sp + s1 || oldcontext == sp + s2) {
+ ucontext_t *ucp = (ucontext_t *)oldcontext;
+ greg_t *gregs = ucp->uc_mcontext.gregs;
+
+ sp = dtrace_fulword(&gregs[REG_FP]);
+ pc = dtrace_fulword(&gregs[REG_PC]);
+
+ oldcontext = dtrace_fulword(&ucp->uc_link);
+ } else
+#endif /* XXX */
+ {
+ if (SV_PROC_FLAG(p, SV_ILP32)) {
+ pc = dtrace_fuword32((void *)(sp + RETURN_OFFSET));
+ sp = dtrace_fuword32((void *)sp);
+ }
+ else {
+ pc = dtrace_fuword64((void *)(sp + RETURN_OFFSET64));
+ sp = dtrace_fuword64((void *)sp);
+ }
+ }
+
+ /*
+ * This is totally bogus: if we faulted, we're going to clear
+ * the fault and break. This is to deal with the apparently
+ * broken Java stacks on x86.
+ */
+ if (*flags & CPU_DTRACE_FAULT) {
+ *flags &= ~CPU_DTRACE_FAULT;
+ break;
+ }
+ }
+
+zero:
+ while (pcstack_limit-- > 0)
+ *pcstack++ = 0;
+}
+
+/*ARGSUSED*/
+uint64_t
+dtrace_getarg(int arg, int aframes)
+{
+ return (0);
+}
+
+#ifdef notyet
+{
+ int depth = 0;
+ register_t sp;
+ vm_offset_t callpc;
+ pc_t caller = (pc_t) solaris_cpu[curcpu].cpu_dtrace_caller;
+
+ if (intrpc != 0)
+ pcstack[depth++] = (pc_t) intrpc;
+
+ aframes++;
+
+ sp = dtrace_getfp();
+
+ while (depth < pcstack_limit) {
+ if (!INKERNEL((long) frame))
+ break;
+
+ callpc = *(void **)(sp + RETURN_OFFSET);
+
+ if (!INKERNEL(callpc))
+ break;
+
+ if (aframes > 0) {
+ aframes--;
+ if ((aframes == 0) && (caller != 0)) {
+ pcstack[depth++] = caller;
+ }
+ }
+ else {
+ pcstack[depth++] = callpc;
+ }
+
+ sp = *(void **)sp;
+ }
+
+ for (; depth < pcstack_limit; depth++) {
+ pcstack[depth] = 0;
+ }
+}
+#endif
+
+int
+dtrace_getstackdepth(int aframes)
+{
+ int depth = 0;
+ register_t sp;
+
+ aframes++;
+ sp = dtrace_getfp();
+ depth++;
+ for(;;) {
+ if (!INKERNEL((long) sp))
+ break;
+ if (!INKERNEL((long) *(void **)sp))
+ break;
+ depth++;
+ sp = *(uintptr_t *)sp;
+ }
+ if (depth < aframes)
+ return 0;
+ else
+ return depth - aframes;
+}
+
+ulong_t
+dtrace_getreg(struct trapframe *rp, uint_t reg)
+{
+ if (reg < 32)
+ return (rp->fixreg[reg]);
+
+ switch (reg) {
+ case 33:
+ return (rp->lr);
+ case 34:
+ return (rp->cr);
+ case 35:
+ return (rp->xer);
+ case 36:
+ return (rp->ctr);
+ case 37:
+ return (rp->srr0);
+ case 38:
+ return (rp->srr1);
+ case 39:
+ return (rp->exc);
+ default:
+ DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
+ return (0);
+ }
+}
+
+static int
+dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
+{
+ ASSERT(INKERNEL(kaddr) && kaddr + size >= kaddr);
+
+ if (uaddr + size > VM_MAXUSER_ADDRESS || uaddr + size < uaddr) {
+ DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
+ cpu_core[curcpu].cpuc_dtrace_illval = uaddr;
+ return (0);
+ }
+
+ return (1);
+}
+
+void
+dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
+ volatile uint16_t *flags)
+{
+ if (dtrace_copycheck(uaddr, kaddr, size))
+ dtrace_copy(uaddr, kaddr, size);
+}
+
+void
+dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
+ volatile uint16_t *flags)
+{
+ if (dtrace_copycheck(uaddr, kaddr, size))
+ dtrace_copy(kaddr, uaddr, size);
+}
+
+void
+dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
+ volatile uint16_t *flags)
+{
+ if (dtrace_copycheck(uaddr, kaddr, size))
+ dtrace_copystr(uaddr, kaddr, size, flags);
+}
+
+void
+dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
+ volatile uint16_t *flags)
+{
+ if (dtrace_copycheck(uaddr, kaddr, size))
+ dtrace_copystr(kaddr, uaddr, size, flags);
+}
+
+uint8_t
+dtrace_fuword8(void *uaddr)
+{
+ if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
+ DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
+ cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
+ return (0);
+ }
+ return (dtrace_fuword8_nocheck(uaddr));
+}
+
+uint16_t
+dtrace_fuword16(void *uaddr)
+{
+ if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
+ DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
+ cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
+ return (0);
+ }
+ return (dtrace_fuword16_nocheck(uaddr));
+}
+
+uint32_t
+dtrace_fuword32(void *uaddr)
+{
+ if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
+ DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
+ cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
+ return (0);
+ }
+ return (dtrace_fuword32_nocheck(uaddr));
+}
+
+uint64_t
+dtrace_fuword64(void *uaddr)
+{
+ if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
+ DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
+ cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
+ return (0);
+ }
+ return (dtrace_fuword64_nocheck(uaddr));
+}
diff --git a/sys/cddl/dev/dtrace/powerpc/dtrace_subr.c b/sys/cddl/dev/dtrace/powerpc/dtrace_subr.c
new file mode 100644
index 0000000..22fb442
--- /dev/null
+++ b/sys/cddl/dev/dtrace/powerpc/dtrace_subr.c
@@ -0,0 +1,201 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ * $FreeBSD$
+ *
+ */
+/*
+ * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/types.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/kmem.h>
+#include <sys/smp.h>
+#include <sys/dtrace_impl.h>
+#include <sys/dtrace_bsd.h>
+#include <machine/clock.h>
+#include <machine/frame.h>
+#include <machine/trap.h>
+#include <vm/pmap.h>
+
+#define DELAYBRANCH(x) ((int)(x) < 0)
+
+extern uintptr_t dtrace_in_probe_addr;
+extern int dtrace_in_probe;
+extern dtrace_id_t dtrace_probeid_error;
+
+int dtrace_invop(uintptr_t, uintptr_t *, uintptr_t);
+
+typedef struct dtrace_invop_hdlr {
+ int (*dtih_func)(uintptr_t, uintptr_t *, uintptr_t);
+ struct dtrace_invop_hdlr *dtih_next;
+} dtrace_invop_hdlr_t;
+
+dtrace_invop_hdlr_t *dtrace_invop_hdlr;
+
+int
+dtrace_invop(uintptr_t addr, uintptr_t *stack, uintptr_t eax)
+{
+ dtrace_invop_hdlr_t *hdlr;
+ int rval;
+
+ for (hdlr = dtrace_invop_hdlr; hdlr != NULL; hdlr = hdlr->dtih_next)
+ if ((rval = hdlr->dtih_func(addr, stack, eax)) != 0)
+ return (rval);
+
+ return (0);
+}
+
+
+/*ARGSUSED*/
+void
+dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
+{
+ /*
+ * No toxic regions?
+ */
+}
+
+void
+dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
+{
+ cpuset_t cpus;
+
+ if (cpu == DTRACE_CPUALL)
+ cpus = all_cpus;
+ else
+ CPU_SETOF(cpu, &cpus);
+
+ smp_rendezvous_cpus(cpus, smp_no_rendevous_barrier, func,
+ smp_no_rendevous_barrier, arg);
+}
+
+static void
+dtrace_sync_func(void)
+{
+}
+
+void
+dtrace_sync(void)
+{
+ dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
+}
+
+/*
+ * DTrace needs a high resolution time function which can
+ * be called from a probe context and guaranteed not to have
+ * instrumented with probes itself.
+ *
+ * Returns nanoseconds since boot.
+ */
+uint64_t
+dtrace_gethrtime()
+{
+ struct timespec curtime;
+
+ nanouptime(&curtime);
+
+ return (curtime.tv_sec * 1000000000UL + curtime.tv_nsec);
+
+}
+
+uint64_t
+dtrace_gethrestime(void)
+{
+ struct timespec curtime;
+
+ getnanotime(&curtime);
+
+ return (curtime.tv_sec * 1000000000UL + curtime.tv_nsec);
+}
+
+/* Function to handle DTrace traps during probes. See amd64/amd64/trap.c */
+int
+dtrace_trap(struct trapframe *frame, u_int type)
+{
+ /*
+ * A trap can occur while DTrace executes a probe. Before
+ * executing the probe, DTrace blocks re-scheduling and sets
+ * a flag in it's per-cpu flags to indicate that it doesn't
+ * want to fault. On returning from the probe, the no-fault
+ * flag is cleared and finally re-scheduling is enabled.
+ *
+ * Check if DTrace has enabled 'no-fault' mode:
+ *
+ */
+ if ((cpu_core[curcpu].cpuc_dtrace_flags & CPU_DTRACE_NOFAULT) != 0) {
+ /*
+ * There are only a couple of trap types that are expected.
+ * All the rest will be handled in the usual way.
+ */
+ switch (type) {
+ /* Page fault. */
+ case EXC_DSI:
+ case EXC_DSE:
+ /* Flag a bad address. */
+ cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
+ cpu_core[curcpu].cpuc_dtrace_illval = frame->cpu.aim.dar;
+
+ /*
+ * Offset the instruction pointer to the instruction
+ * following the one causing the fault.
+ */
+ frame->srr0 += sizeof(int);
+ return (1);
+ case EXC_ISI:
+ case EXC_ISE:
+ /* Flag a bad address. */
+ cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
+ cpu_core[curcpu].cpuc_dtrace_illval = frame->srr0;
+
+ /*
+ * Offset the instruction pointer to the instruction
+ * following the one causing the fault.
+ */
+ frame->srr0 += sizeof(int);
+ return (1);
+ default:
+ /* Handle all other traps in the usual way. */
+ break;
+ }
+ }
+
+ /* Handle the trap in the usual way. */
+ return (0);
+}
+
+void
+dtrace_probe_error(dtrace_state_t *state, dtrace_epid_t epid, int which,
+ int fault, int fltoffs, uintptr_t illval)
+{
+
+ dtrace_probe(dtrace_probeid_error, (uint64_t)(uintptr_t)state,
+ (uintptr_t)epid,
+ (uintptr_t)which, (uintptr_t)fault, (uintptr_t)fltoffs);
+}
diff --git a/sys/cddl/dev/dtrace/powerpc/regset.h b/sys/cddl/dev/dtrace/powerpc/regset.h
new file mode 100644
index 0000000..6497388
--- /dev/null
+++ b/sys/cddl/dev/dtrace/powerpc/regset.h
@@ -0,0 +1,63 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License"). You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ * $FreeBSD$
+ */
+/*
+ * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/* Copyright (c) 1990, 1991 UNIX System Laboratories, Inc. */
+
+/* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
+/* All Rights Reserved */
+
+#ifndef _REGSET_H
+#define _REGSET_H
+
+/*
+ * #pragma ident "@(#)regset.h 1.11 05/06/08 SMI"
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * XXXDTRACE: define registers properly
+ */
+
+#if 0
+#define REG_PC PC
+#define REG_FP EBP
+#define REG_SP SP
+#define REG_PS EFL
+#define REG_R0 EAX
+#define REG_R1 EDX
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _REGSET_H */
+
diff --git a/sys/cddl/dev/lockstat/lockstat.c b/sys/cddl/dev/lockstat/lockstat.c
index f16c813..9b3f7d7 100644
--- a/sys/cddl/dev/lockstat/lockstat.c
+++ b/sys/cddl/dev/lockstat/lockstat.c
@@ -45,7 +45,8 @@
#include <sys/dtrace.h>
#include <sys/lockstat.h>
-#if defined(__i386__) || defined(__amd64__) || defined(__mips__)
+#if defined(__i386__) || defined(__amd64__) || \
+ defined(__mips__) || defined(__powerpc__)
#define LOCKSTAT_AFRAMES 1
#else
#error "architecture not supported"
diff --git a/sys/cddl/dev/profile/profile.c b/sys/cddl/dev/profile/profile.c
index 44dea7b..051ffa1 100644
--- a/sys/cddl/dev/profile/profile.c
+++ b/sys/cddl/dev/profile/profile.c
@@ -119,6 +119,13 @@
#define PROF_ARTIFICIAL_FRAMES 3
#endif
+#ifdef __powerpc__
+/*
+ * This value is bogus just to make module compilable on powerpc
+ */
+#define PROF_ARTIFICIAL_FRAMES 3
+#endif
+
typedef struct profile_probe {
char prof_name[PROF_NAMELEN];
dtrace_id_t prof_id;
diff --git a/sys/conf/Makefile.pc98 b/sys/conf/Makefile.pc98
index 18ee498..e3a265d 100644
--- a/sys/conf/Makefile.pc98
+++ b/sys/conf/Makefile.pc98
@@ -34,6 +34,10 @@ MACHINE=pc98
MKMODULESENV+= MACHINE=${MACHINE}
+# XXX: clang integrated-as doesn't grok .codeNN directives yet
+ASM_CFLAGS.mpboot.s= ${CLANG_NO_IAS}
+ASM_CFLAGS+= ${ASM_CFLAGS.${.IMPSRC:T}}
+
%BEFORE_DEPEND
%OBJS
diff --git a/sys/conf/files b/sys/conf/files
index 5120310..134935e 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -1163,15 +1163,14 @@ dev/drm/mach64_state.c optional mach64drm
dev/drm/mga_dma.c optional mgadrm
dev/drm/mga_drv.c optional mgadrm
dev/drm/mga_irq.c optional mgadrm
-dev/drm/mga_state.c optional mgadrm \
- compile-with "${NORMAL_C} -finline-limit=13500"
+dev/drm/mga_state.c optional mgadrm
dev/drm/mga_warp.c optional mgadrm
dev/drm/r128_cce.c optional r128drm \
compile-with "${NORMAL_C} ${NO_WUNUSED_VALUE} ${NO_WCONSTANT_CONVERSION}"
dev/drm/r128_drv.c optional r128drm
dev/drm/r128_irq.c optional r128drm
dev/drm/r128_state.c optional r128drm \
- compile-with "${NORMAL_C} ${NO_WUNUSED_VALUE} -finline-limit=13500"
+ compile-with "${NORMAL_C} ${NO_WUNUSED_VALUE}"
dev/drm/r300_cmdbuf.c optional radeondrm
dev/drm/r600_blit.c optional radeondrm
dev/drm/r600_cp.c optional radeondrm \
diff --git a/sys/conf/kern.pre.mk b/sys/conf/kern.pre.mk
index 7e6e20a..442eac9 100644
--- a/sys/conf/kern.pre.mk
+++ b/sys/conf/kern.pre.mk
@@ -102,12 +102,18 @@ CLANG_NO_IAS= -no-integrated-as
.endif
.if defined(PROFLEVEL) && ${PROFLEVEL} >= 1
-CFLAGS+= -DGPROF -falign-functions=16
+CFLAGS+= -DGPROF
+.if ${COMPILER_TYPE} != "clang"
+CFLAGS+= -falign-functions=16
+.endif
.if ${PROFLEVEL} >= 2
CFLAGS+= -DGPROF4 -DGUPROF
-PROF= -pg -mprofiler-epilogue
+PROF= -pg
+.if ${COMPILER_TYPE} != "clang"
+PROF+= -mprofiler-epilogue
+.endif
.else
-PROF= -pg
+PROF= -pg
.endif
.endif
DEFINED_PROF= ${PROF}
diff --git a/sys/dev/aac/aac_debug.c b/sys/dev/aac/aac_debug.c
index 9cdfd3f..353c6fa 100644
--- a/sys/dev/aac/aac_debug.c
+++ b/sys/dev/aac/aac_debug.c
@@ -160,7 +160,7 @@ void
aac_panic(struct aac_softc *sc, char *reason)
{
aac_print_queues(sc);
- panic(reason);
+ panic("%s", reason);
}
/*
diff --git a/sys/dev/acpica/acpivar.h b/sys/dev/acpica/acpivar.h
index 81cff16..56635e5 100644
--- a/sys/dev/acpica/acpivar.h
+++ b/sys/dev/acpica/acpivar.h
@@ -476,7 +476,7 @@ ACPI_HANDLE acpi_GetReference(ACPI_HANDLE scope, ACPI_OBJECT *obj);
/* Default maximum number of tasks to enqueue. */
#ifndef ACPI_MAX_TASKS
-#define ACPI_MAX_TASKS 32
+#define ACPI_MAX_TASKS MAX(32, MAXCPU * 2)
#endif
/* Default number of task queue threads to start. */
diff --git a/sys/dev/ahci/ahciem.c b/sys/dev/ahci/ahciem.c
index a0978d1..389da3f 100644
--- a/sys/dev/ahci/ahciem.c
+++ b/sys/dev/ahci/ahciem.c
@@ -399,10 +399,11 @@ ahci_em_emulate_ses_on_led(device_t dev, union ccb *ccb)
ccb->ataio.cmd.sector_count >= 2) {
bzero(buf, ccb->ataio.dxfer_len);
page->hdr.page_code = 0;
- scsi_ulto2b(3, page->hdr.length);
+ scsi_ulto2b(4, page->hdr.length);
buf[4] = 0;
buf[5] = 1;
buf[6] = 2;
+ buf[7] = 7;
ccb->ccb_h.status = CAM_REQ_CMP;
goto out;
}
diff --git a/sys/dev/asmc/asmc.c b/sys/dev/asmc/asmc.c
index 5fc8c41..bc4c4e9 100644
--- a/sys/dev/asmc/asmc.c
+++ b/sys/dev/asmc/asmc.c
@@ -839,7 +839,7 @@ out:
strlcat(buf, buf2, sizeof(buf));
}
strlcat(buf, " \n", sizeof(buf));
- device_printf(dev, buf);
+ device_printf(dev, "%s", buf);
}
return (error);
diff --git a/sys/dev/ath/ath_hal/ah.h b/sys/dev/ath/ath_hal/ah.h
index bce46c9..12e9204 100644
--- a/sys/dev/ath/ath_hal/ah.h
+++ b/sys/dev/ath/ath_hal/ah.h
@@ -1215,6 +1215,9 @@ typedef struct
int ath_hal_enable_ani; /* should set this.. */
int ath_hal_cwm_ignore_ext_cca;
int ath_hal_show_bb_panic;
+ int ath_hal_ant_ctrl_comm2g_switch_enable;
+ int ath_hal_ext_atten_margin_cfg;
+ int ath_hal_war70c;
} HAL_OPS_CONFIG;
/*
diff --git a/sys/dev/ath/ath_hal/ah_debug.h b/sys/dev/ath/ath_hal/ah_debug.h
index 6cd2627..c5aac60 100644
--- a/sys/dev/ath/ath_hal/ah_debug.h
+++ b/sys/dev/ath/ath_hal/ah_debug.h
@@ -52,6 +52,7 @@ enum {
HAL_DEBUG_CHANNEL = 0x02000000,
HAL_DEBUG_QUEUE = 0x04000000,
HAL_DEBUG_PRINT_REG = 0x08000000,
+ HAL_DEBUG_FCS_RTT = 0x10000000,
HAL_DEBUG_UNMASKABLE = 0x80000000, /* always printed */
HAL_DEBUG_ANY = 0xffffffff
diff --git a/sys/dev/ath/ath_hal/ah_internal.h b/sys/dev/ath/ath_hal/ah_internal.h
index 416130c..2d1087a 100644
--- a/sys/dev/ath/ath_hal/ah_internal.h
+++ b/sys/dev/ath/ath_hal/ah_internal.h
@@ -276,7 +276,9 @@ typedef struct {
halApmEnable : 1,
halIntrMitigation : 1,
hal49GhzSupport : 1,
- halAntDivCombSupport : 1;
+ halAntDivCombSupport : 1,
+ halAntDivCombSupportOrg : 1,
+ halRadioRetentionSupport : 1;
uint32_t halWirelessModes;
uint16_t halTotalQueues;
diff --git a/sys/dev/ath/if_ath.c b/sys/dev/ath/if_ath.c
index cfd0559..4ef23a2 100644
--- a/sys/dev/ath/if_ath.c
+++ b/sys/dev/ath/if_ath.c
@@ -117,6 +117,15 @@ __FBSDID("$FreeBSD$");
#include <dev/ath/ath_tx99/ath_tx99.h>
#endif
+#ifdef ATH_DEBUG_ALQ
+#include <dev/ath/if_ath_alq.h>
+#endif
+
+/*
+ * Only enable this if you're working on PS-POLL support.
+ */
+#undef ATH_SW_PSQ
+
/*
* ATH_BCBUF determines the number of vap's that can transmit
* beacons and also (currently) the number of vap's that can
@@ -877,6 +886,13 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
#endif /* ATH_ENABLE_RADIOTAP_VENDOR_EXT */
/*
+ * Setup the ALQ logging if required
+ */
+#ifdef ATH_DEBUG_ALQ
+ if_ath_alq_init(&sc->sc_alq, device_get_nameunit(sc->sc_dev));
+#endif
+
+ /*
* Setup dynamic sysctl's now that country code and
* regdomain are available from the hal.
*/
@@ -936,6 +952,10 @@ ath_detach(struct ath_softc *sc)
#endif
ath_rate_detach(sc->sc_rc);
+#ifdef ATH_DEBUG_ALQ
+ if_ath_alq_tidyup(&sc->sc_alq);
+#endif
+
ath_dfs_detach(sc);
ath_desc_free(sc);
ath_txdma_teardown(sc);
@@ -5424,6 +5444,7 @@ ath_dfs_tasklet(void *p, int npending)
static void
ath_node_powersave(struct ieee80211_node *ni, int enable)
{
+#ifdef ATH_SW_PSQ
struct ath_node *an = ATH_NODE(ni);
struct ieee80211com *ic = ni->ni_ic;
struct ath_softc *sc = ic->ic_ifp->if_softc;
@@ -5443,6 +5464,12 @@ ath_node_powersave(struct ieee80211_node *ni, int enable)
/* Update net80211 state */
avp->av_node_ps(ni, enable);
+#else
+ struct ath_vap *avp = ATH_VAP(ni->ni_vap);
+
+ /* Update net80211 state */
+ avp->av_node_ps(ni, enable);
+#endif/* ATH_SW_PSQ */
}
/*
@@ -5483,6 +5510,7 @@ ath_node_powersave(struct ieee80211_node *ni, int enable)
static int
ath_node_set_tim(struct ieee80211_node *ni, int enable)
{
+#ifdef ATH_SW_PSQ
struct ieee80211com *ic = ni->ni_ic;
struct ath_softc *sc = ic->ic_ifp->if_softc;
struct ath_node *an = ATH_NODE(ni);
@@ -5567,6 +5595,18 @@ ath_node_set_tim(struct ieee80211_node *ni, int enable)
}
return (changed);
+#else
+ struct ath_vap *avp = ATH_VAP(ni->ni_vap);
+
+ /*
+ * Some operating omdes don't set av_set_tim(), so don't
+ * update it here.
+ */
+ if (avp->av_set_tim == NULL)
+ return (0);
+
+ return (avp->av_set_tim(ni, enable));
+#endif /* ATH_SW_PSQ */
}
/*
@@ -5594,6 +5634,7 @@ void
ath_tx_update_tim(struct ath_softc *sc, struct ieee80211_node *ni,
int enable)
{
+#ifdef ATH_SW_PSQ
struct ath_node *an;
struct ath_vap *avp;
@@ -5657,6 +5698,9 @@ ath_tx_update_tim(struct ath_softc *sc, struct ieee80211_node *ni,
ATH_NODE_UNLOCK(an);
}
}
+#else
+ return;
+#endif /* ATH_SW_PSQ */
}
MODULE_VERSION(if_ath, 1);
diff --git a/sys/dev/ath/if_ath_alq.c b/sys/dev/ath/if_ath_alq.c
new file mode 100644
index 0000000..b838772
--- /dev/null
+++ b/sys/dev/ath/if_ath_alq.c
@@ -0,0 +1,172 @@
+/*-
+ * Copyright (c) 2012 Adrian Chadd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ * redistribution must be conditioned upon including a substantially
+ * similar Disclaimer requirement for further binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $FreeBSD$
+ */
+#include "opt_ah.h"
+#include "opt_ath.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/sysctl.h>
+#include <sys/bus.h>
+#include <sys/malloc.h>
+#include <sys/proc.h>
+#include <sys/pcpu.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/alq.h>
+
+#include <dev/ath/if_ath_alq.h>
+
+#ifdef ATH_DEBUG_ALQ
+static struct ale *
+if_ath_alq_get(struct if_ath_alq *alq, int len)
+{
+ struct ale *ale;
+
+ if (alq->sc_alq_isactive == 0)
+ return (NULL);
+
+ ale = alq_getn(alq->sc_alq_alq, len, ALQ_NOWAIT);
+ if (! ale)
+ alq->sc_alq_numlost++;
+ return (ale);
+}
+
+void
+if_ath_alq_init(struct if_ath_alq *alq, const char *devname)
+{
+
+ bzero(alq, sizeof(*alq));
+
+ strncpy(alq->sc_alq_devname, devname, ATH_ALQ_DEVNAME_LEN);
+ printf("%s (%s): attached\n", __func__, alq->sc_alq_devname);
+ snprintf(alq->sc_alq_filename, ATH_ALQ_FILENAME_LEN,
+ "/tmp/ath_%s_alq.log", alq->sc_alq_devname);
+
+ /* XXX too conservative, right? */
+ alq->sc_alq_qsize = (64*1024);
+}
+
+void
+if_ath_alq_tidyup(struct if_ath_alq *alq)
+{
+
+ if_ath_alq_stop(alq);
+ printf("%s (%s): detached\n", __func__, alq->sc_alq_devname);
+ bzero(alq, sizeof(*alq));
+}
+
+int
+if_ath_alq_start(struct if_ath_alq *alq)
+{
+ int error;
+
+ if (alq->sc_alq_isactive)
+ return (0);
+
+ /*
+ * Create a variable-length ALQ.
+ */
+ error = alq_open(&alq->sc_alq_alq, alq->sc_alq_filename,
+ curthread->td_ucred, ALQ_DEFAULT_CMODE,
+ alq->sc_alq_qsize, 0);
+
+ if (error != 0) {
+ printf("%s (%s): failed, err=%d\n", __func__,
+ alq->sc_alq_devname, error);
+ } else {
+ printf("%s (%s): opened\n", __func__, alq->sc_alq_devname);
+ alq->sc_alq_isactive = 1;
+ }
+ return (error);
+}
+
+int
+if_ath_alq_stop(struct if_ath_alq *alq)
+{
+
+ if (alq->sc_alq_isactive == 0)
+ return (0);
+
+ printf("%s (%s): closed\n", __func__, alq->sc_alq_devname);
+
+ alq->sc_alq_isactive = 0;
+ alq_close(alq->sc_alq_alq);
+ alq->sc_alq_alq = NULL;
+
+ return (0);
+}
+
+/*
+ * Post a debug message to the ALQ.
+ *
+ * "len" is the size of the buf payload in bytes.
+ */
+void
+if_ath_alq_post(struct if_ath_alq *alq, uint16_t op, uint16_t len,
+ const char *buf)
+{
+ struct if_ath_alq_hdr *ap;
+ struct ale *ale;
+
+ if (! if_ath_alq_checkdebug(alq, op))
+ return;
+
+ /*
+ * Enforce some semblence of sanity on 'len'.
+ * Although strictly speaking, any length is possible -
+ * just be conservative so things don't get out of hand.
+ */
+ if (len > ATH_ALQ_PAYLOAD_LEN)
+ len = ATH_ALQ_PAYLOAD_LEN;
+
+ ale = if_ath_alq_get(alq, len + sizeof(struct if_ath_alq_hdr));
+
+ if (ale == NULL)
+ return;
+
+ ap = (struct if_ath_alq_hdr *) ale->ae_data;
+ ap->threadid = (uint64_t) curthread->td_tid;
+ ap->tstamp = (uint32_t) ticks;
+ ap->op = op;
+ ap->len = len;
+
+ /*
+ * Copy the payload _after_ the header field.
+ */
+ memcpy(((char *) ap) + sizeof(struct if_ath_alq_hdr),
+ buf,
+ ap->len);
+
+ alq_post(alq->sc_alq_alq, ale);
+}
+#endif /* ATH_DEBUG */
diff --git a/sys/dev/ath/if_ath_alq.h b/sys/dev/ath/if_ath_alq.h
new file mode 100644
index 0000000..aeee12c
--- /dev/null
+++ b/sys/dev/ath/if_ath_alq.h
@@ -0,0 +1,82 @@
+/*-
+ * Copyright (c) 2012 Adrian Chadd
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
+ * redistribution must be conditioned upon including a substantially
+ * similar Disclaimer requirement for further binary redistribution.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
+ * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
+ * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGES.
+ *
+ * $FreeBSD$
+ */
+#ifndef __IF_ATH_ALQ_H__
+#define __IF_ATH_ALQ_H__
+
+#define ATH_ALQ_FILENAME_LEN 128
+#define ATH_ALQ_DEVNAME_LEN 32
+
+struct if_ath_alq {
+ uint32_t sc_alq_debug; /* Debug flags to report */
+ struct alq * sc_alq_alq; /* alq state */
+ unsigned int sc_alq_qsize; /* queue size */
+ unsigned int sc_alq_numlost; /* number of "lost" entries */
+ int sc_alq_isactive;
+ char sc_alq_devname[ATH_ALQ_DEVNAME_LEN];
+ char sc_alq_filename[ATH_ALQ_FILENAME_LEN];
+};
+
+#define ATH_ALQ_EDMA_TXSTATUS 1
+#define ATH_ALQ_EDMA_RXSTATUS 2
+#define ATH_ALQ_EDMA_TXDESC 3
+
+/* 128 bytes in total */
+#define ATH_ALQ_PAYLOAD_LEN 112
+
+struct if_ath_alq_hdr {
+ uint64_t threadid;
+ uint32_t tstamp;
+ uint16_t op;
+ uint16_t len; /* Length of (optional) payload */
+};
+
+struct if_ath_alq_payload {
+ struct if_ath_alq_hdr hdr;
+ char payload[];
+};
+
+#ifdef _KERNEL
+static inline int
+if_ath_alq_checkdebug(struct if_ath_alq *alq, uint16_t op)
+{
+
+ return (alq->sc_alq_debug & (1 << (op - 1)));
+}
+
+extern void if_ath_alq_init(struct if_ath_alq *alq, const char *devname);
+extern void if_ath_alq_tidyup(struct if_ath_alq *alq);
+extern int if_ath_alq_start(struct if_ath_alq *alq);
+extern int if_ath_alq_stop(struct if_ath_alq *alq);
+extern void if_ath_alq_post(struct if_ath_alq *alq, uint16_t op,
+ uint16_t len, const char *buf);
+#endif /* _KERNEL */
+
+#endif
diff --git a/sys/dev/ath/if_ath_rx.c b/sys/dev/ath/if_ath_rx.c
index d614d5b..ff160b9 100644
--- a/sys/dev/ath/if_ath_rx.c
+++ b/sys/dev/ath/if_ath_rx.c
@@ -115,6 +115,10 @@ __FBSDID("$FreeBSD$");
#include <dev/ath/ath_tx99/ath_tx99.h>
#endif
+#ifdef ATH_DEBUG_ALQ
+#include <dev/ath/if_ath_alq.h>
+#endif
+
/*
* Calculate the receive filter according to the
* operating mode and state:
diff --git a/sys/dev/ath/if_ath_rx_edma.c b/sys/dev/ath/if_ath_rx_edma.c
index bb5f082..dc7fd9a 100644
--- a/sys/dev/ath/if_ath_rx_edma.c
+++ b/sys/dev/ath/if_ath_rx_edma.c
@@ -117,6 +117,10 @@ __FBSDID("$FreeBSD$");
#include <dev/ath/if_ath_rx_edma.h>
+#ifdef ATH_DEBUG_ALQ
+#include <dev/ath/if_ath_alq.h>
+#endif
+
/*
* some general macros
*/
@@ -282,7 +286,7 @@ static void
ath_edma_recv_flush(struct ath_softc *sc)
{
- device_printf(sc->sc_dev, "%s: called\n", __func__);
+ DPRINTF(sc, ATH_DEBUG_RECV, "%s: called\n", __func__);
ATH_PCU_LOCK(sc);
sc->sc_rxproc_cnt++;
@@ -357,7 +361,12 @@ ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype,
#ifdef ATH_DEBUG
if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
ath_printrxbuf(sc, bf, 0, bf->bf_rxstatus == HAL_OK);
-#endif
+#endif /* ATH_DEBUG */
+#ifdef ATH_DEBUG_ALQ
+ if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS))
+ if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS,
+ sc->sc_rx_statuslen, (char *) ds);
+#endif /* ATH_DEBUG */
if (bf->bf_rxstatus == HAL_EINPROGRESS)
break;
diff --git a/sys/dev/ath/if_ath_sysctl.c b/sys/dev/ath/if_ath_sysctl.c
index 9c9ce52..1ab627f 100644
--- a/sys/dev/ath/if_ath_sysctl.c
+++ b/sys/dev/ath/if_ath_sysctl.c
@@ -99,6 +99,10 @@ __FBSDID("$FreeBSD$");
#include <dev/ath/ath_tx99/ath_tx99.h>
#endif
+#ifdef ATH_DEBUG_ALQ
+#include <dev/ath/if_ath_alq.h>
+#endif
+
static int
ath_sysctl_slottime(SYSCTL_HANDLER_ARGS)
{
@@ -501,6 +505,57 @@ ath_sysctl_forcebstuck(SYSCTL_HANDLER_ARGS)
return 0;
}
+
+#ifdef ATH_DEBUG_ALQ
+static int
+ath_sysctl_alq_log(SYSCTL_HANDLER_ARGS)
+{
+ struct ath_softc *sc = arg1;
+ int error, enable;
+
+ enable = (sc->sc_alq.sc_alq_isactive);
+
+ error = sysctl_handle_int(oidp, &enable, 0, req);
+ if (error || !req->newptr)
+ return (error);
+ else if (enable)
+ error = if_ath_alq_start(&sc->sc_alq);
+ else
+ error = if_ath_alq_stop(&sc->sc_alq);
+ return (error);
+}
+
+/*
+ * Attach the ALQ debugging if required.
+ */
+static void
+ath_sysctl_alq_attach(struct ath_softc *sc)
+{
+ struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
+ struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
+
+ tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "alq", CTLFLAG_RD,
+ NULL, "Atheros ALQ logging parameters");
+ child = SYSCTL_CHILDREN(tree);
+
+ SYSCTL_ADD_STRING(ctx, child, OID_AUTO, "filename",
+ CTLFLAG_RW, sc->sc_alq.sc_alq_filename, 0, "ALQ filename");
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "enable", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
+ ath_sysctl_alq_log, "I", "");
+
+ SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "debugmask", CTLFLAG_RW, &sc->sc_alq.sc_alq_debug, 0,
+ "ALQ debug mask");
+
+ SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "numlost", CTLFLAG_RW, &sc->sc_alq.sc_alq_numlost, 0,
+ "number lost");
+}
+#endif /* ATH_DEBUG_ALQ */
+
void
ath_sysctlattach(struct ath_softc *sc)
{
@@ -514,14 +569,14 @@ ath_sysctlattach(struct ath_softc *sc)
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"regdomain", CTLFLAG_RD, &sc->sc_eerd, 0,
"EEPROM regdomain code");
-#ifdef ATH_DEBUG
+#ifdef ATH_DEBUG_ALQ
SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"debug", CTLFLAG_RW, &sc->sc_debug,
"control debugging printfs");
SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"ktrdebug", CTLFLAG_RW, &sc->sc_ktrdebug,
"control debugging KTR");
-#endif
+#endif /* ATH_DEBUG_ALQ */
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"slottime", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_slottime, "I", "802.11 slot time (us)");
@@ -655,6 +710,10 @@ ath_sysctlattach(struct ath_softc *sc)
ath_sysctl_setcca, "I", "enable CCA control");
}
#endif
+
+#ifdef ATH_DEBUG_ALQ
+ ath_sysctl_alq_attach(sc);
+#endif
}
static int
diff --git a/sys/dev/ath/if_ath_tx.c b/sys/dev/ath/if_ath_tx.c
index cf465c4..e18785e 100644
--- a/sys/dev/ath/if_ath_tx.c
+++ b/sys/dev/ath/if_ath_tx.c
@@ -101,6 +101,10 @@ __FBSDID("$FreeBSD$");
#include <dev/ath/if_ath_tx.h>
#include <dev/ath/if_ath_tx_ht.h>
+#ifdef ATH_DEBUG_ALQ
+#include <dev/ath/if_ath_alq.h>
+#endif
+
/*
* How many retries to perform in software
*/
diff --git a/sys/dev/ath/if_ath_tx_edma.c b/sys/dev/ath/if_ath_tx_edma.c
index f2fe2ce..8f35907 100644
--- a/sys/dev/ath/if_ath_tx_edma.c
+++ b/sys/dev/ath/if_ath_tx_edma.c
@@ -117,6 +117,10 @@ __FBSDID("$FreeBSD$");
#include <dev/ath/if_ath_tx_edma.h>
+#ifdef ATH_DEBUG_ALQ
+#include <dev/ath/if_ath_alq.h>
+#endif
+
/*
* some general macros
*/
@@ -130,6 +134,39 @@ __FBSDID("$FreeBSD$");
MALLOC_DECLARE(M_ATHDEV);
+static void ath_edma_tx_processq(struct ath_softc *sc, int dosched);
+
+#ifdef ATH_DEBUG_ALQ
+static void
+ath_edma_tx_alq_post(struct ath_softc *sc, struct ath_buf *bf_first)
+{
+ struct ath_buf *bf;
+ int i, n;
+ const char *ds;
+
+ /* XXX we should skip out early if debugging isn't enabled! */
+ bf = bf_first;
+
+ while (bf != NULL) {
+ /* XXX assume nmaps = 4! */
+ /* XXX should ensure bf_nseg > 0! */
+ if (bf->bf_nseg == 0)
+ break;
+ n = ((bf->bf_nseg - 1) / 4) + 1;
+ for (i = 0, ds = (const char *) bf->bf_desc;
+ i < n;
+ i++, ds += sc->sc_tx_desclen) {
+ if_ath_alq_post(&sc->sc_alq,
+ ATH_ALQ_EDMA_TXDESC,
+ 96,
+ ds);
+ }
+
+ bf = bf->bf_next;
+ }
+}
+#endif /* ATH_DEBUG_ALQ */
+
static void
ath_edma_tx_fifo_fill(struct ath_softc *sc, struct ath_txq *txq)
{
@@ -147,7 +184,11 @@ ath_edma_tx_fifo_fill(struct ath_softc *sc, struct ath_txq *txq)
#ifdef ATH_DEBUG
if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
ath_printtxbuf(sc, bf, txq->axq_qnum, i, 0);
-#endif
+#endif/* ATH_DEBUG */
+#ifdef ATH_DEBUG_ALQ
+ if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
+ ath_edma_tx_alq_post(sc, bf);
+#endif /* ATH_DEBUG_ALQ */
txq->axq_fifo_depth++;
i++;
}
@@ -161,16 +202,12 @@ ath_edma_tx_fifo_fill(struct ath_softc *sc, struct ath_txq *txq)
*
* This should only be called as part of the chip reset path, as it
* assumes the FIFO is currently empty.
- *
- * TODO: verify that a cold/warm reset does clear the TX FIFO, so
- * writing in a partially-filled FIFO will not cause double-entries
- * to appear.
*/
static void
ath_edma_dma_restart(struct ath_softc *sc, struct ath_txq *txq)
{
- device_printf(sc->sc_dev, "%s: called: txq=%p, qnum=%d\n",
+ DPRINTF(sc, ATH_DEBUG_RESET, "%s: called: txq=%p, qnum=%d\n",
__func__,
txq,
txq->axq_qnum);
@@ -220,7 +257,11 @@ ath_edma_xmit_handoff_hw(struct ath_softc *sc, struct ath_txq *txq,
#ifdef ATH_DEBUG
if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
ath_printtxbuf(sc, bf, txq->axq_qnum, 0, 0);
-#endif /* ATH_DEBUG */
+#endif /* ATH_DEBUG */
+#ifdef ATH_DEBUG_ALQ
+ if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC))
+ ath_edma_tx_alq_post(sc, bf);
+#endif /* ATH_DEBUG_ALQ */
ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
txq->axq_fifo_depth++;
ath_hal_txstart(ah, txq->axq_qnum);
@@ -366,7 +407,6 @@ ath_edma_dma_txsetup(struct ath_softc *sc)
ath_edma_setup_txfifo(sc, i);
}
-
return (0);
}
@@ -393,7 +433,7 @@ ath_edma_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type)
struct ifnet *ifp = sc->sc_ifp;
int i;
- device_printf(sc->sc_dev, "%s: called\n", __func__);
+ DPRINTF(sc, ATH_DEBUG_RESET, "%s: called\n", __func__);
(void) ath_stoptxdma(sc);
@@ -403,17 +443,19 @@ ath_edma_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type)
*
* Otherwise, just toss everything in each TX queue.
*/
+ if (reset_type == ATH_RESET_NOLOSS) {
+ ath_edma_tx_processq(sc, 0);
+ } else {
+ for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
+ if (ATH_TXQ_SETUP(sc, i))
+ ath_tx_draintxq(sc, &sc->sc_txq[i]);
+ }
+ }
/* XXX dump out the TX completion FIFO contents */
/* XXX dump out the frames */
- /* XXX for now, just drain */
- for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
- if (ATH_TXQ_SETUP(sc, i))
- ath_tx_draintxq(sc, &sc->sc_txq[i]);
- }
-
IF_LOCK(&ifp->if_snd);
ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
IF_UNLOCK(&ifp->if_snd);
@@ -421,12 +463,25 @@ ath_edma_tx_drain(struct ath_softc *sc, ATH_RESET_TYPE reset_type)
}
/*
- * Process the TX status queue.
+ * TX completion tasklet.
*/
+
static void
ath_edma_tx_proc(void *arg, int npending)
{
struct ath_softc *sc = (struct ath_softc *) arg;
+
+ DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: called, npending=%d\n",
+ __func__, npending);
+ ath_edma_tx_processq(sc, 1);
+}
+
+/*
+ * Process the TX status queue.
+ */
+static void
+ath_edma_tx_processq(struct ath_softc *sc, int dosched)
+{
struct ath_hal *ah = sc->sc_ah;
HAL_STATUS status;
struct ath_tx_status ts;
@@ -441,17 +496,14 @@ ath_edma_tx_proc(void *arg, int npending)
uint32_t txstatus[32];
#endif
- DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: called, npending=%d\n",
- __func__, npending);
-
for (idx = 0; ; idx++) {
bzero(&ts, sizeof(ts));
ATH_TXSTATUS_LOCK(sc);
- status = ath_hal_txprocdesc(ah, NULL, (void *) &ts);
#ifdef ATH_DEBUG
ath_hal_gettxrawtxdesc(ah, txstatus);
#endif
+ status = ath_hal_txprocdesc(ah, NULL, (void *) &ts);
ATH_TXSTATUS_UNLOCK(sc);
#ifdef ATH_DEBUG
@@ -475,6 +527,13 @@ ath_edma_tx_proc(void *arg, int npending)
continue;
}
+#ifdef ATH_DEBUG_ALQ
+ if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS))
+ if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_TXSTATUS,
+ sc->sc_tx_statuslen,
+ (char *) txstatus);
+#endif /* ATH_DEBUG_ALQ */
+
/*
* At this point we have a valid status descriptor.
* The QID and descriptor ID (which currently isn't set)
@@ -535,13 +594,26 @@ ath_edma_tx_proc(void *arg, int npending)
* in the TX descriptor. However the TX completion
* FIFO doesn't have this information. So here we
* do a separate HAL call to populate that information.
+ *
+ * The same problem exists with ts_longretry.
+ * The FreeBSD HAL corrects ts_longretry in the HAL layer;
+ * the AR9380 HAL currently doesn't. So until the HAL
+ * is imported and this can be added, we correct for it
+ * here.
*/
-
/* XXX TODO */
/* XXX faked for now. Ew. */
if (ts.ts_finaltsi < 4) {
ts.ts_rate =
bf->bf_state.bfs_rc[ts.ts_finaltsi].ratecode;
+ switch (ts.ts_finaltsi) {
+ case 3: ts.ts_longretry +=
+ bf->bf_state.bfs_rc[2].tries;
+ case 2: ts.ts_longretry +=
+ bf->bf_state.bfs_rc[1].tries;
+ case 1: ts.ts_longretry +=
+ bf->bf_state.bfs_rc[0].tries;
+ }
} else {
device_printf(sc->sc_dev, "%s: finaltsi=%d\n",
__func__,
@@ -594,7 +666,7 @@ ath_edma_tx_proc(void *arg, int npending)
* working.
*/
ATH_TXQ_LOCK(txq);
- if (txq->axq_fifo_depth == 0) {
+ if (dosched && txq->axq_fifo_depth == 0) {
ath_edma_tx_fifo_fill(sc, txq);
}
ATH_TXQ_UNLOCK(txq);
@@ -614,7 +686,8 @@ ath_edma_tx_proc(void *arg, int npending)
* but there's no easy way right now to only populate
* the txq task for _one_ TXQ. This should be fixed.
*/
- taskqueue_enqueue(sc->sc_tq, &sc->sc_txqtask);
+ if (dosched)
+ taskqueue_enqueue(sc->sc_tq, &sc->sc_txqtask);
}
static void
diff --git a/sys/dev/ath/if_athvar.h b/sys/dev/ath/if_athvar.h
index 974db28..38fd484 100644
--- a/sys/dev/ath/if_athvar.h
+++ b/sys/dev/ath/if_athvar.h
@@ -42,6 +42,9 @@
#include <net80211/ieee80211_radiotap.h>
#include <dev/ath/if_athioctl.h>
#include <dev/ath/if_athrate.h>
+#ifdef ATH_DEBUG_ALQ
+#include <dev/ath/if_ath_alq.h>
+#endif
#define ATH_TIMEOUT 1000
@@ -770,6 +773,11 @@ struct ath_softc {
int sc_dodfs; /* Whether to enable DFS rx filter bits */
struct task sc_dfstask; /* DFS processing task */
+ /* ALQ */
+#ifdef ATH_DEBUG_ALQ
+ struct if_ath_alq sc_alq;
+#endif
+
/* TX AMPDU handling */
int (*sc_addba_request)(struct ieee80211_node *,
struct ieee80211_tx_ampdu *, int, int, int);
diff --git a/sys/dev/bktr/bktr_audio.c b/sys/dev/bktr/bktr_audio.c
index efc63da..6cfe576 100644
--- a/sys/dev/bktr/bktr_audio.c
+++ b/sys/dev/bktr/bktr_audio.c
@@ -498,7 +498,7 @@ void msp_autodetect( bktr_ptr_t bktr ) {
}
- /* MSP3415D SPECIAL CASE Use the Tuner's Mono audio ouput for the MSP */
+ /* MSP3415D SPECIAL CASE Use the Tuner's Mono audio output for the MSP */
/* (for Hauppauge 44xxx card with Tuner Type 0x2a) */
else if ( ( (strncmp("3415D", bktr->msp_version_string, 5) == 0)
&&(bktr->msp_use_mono_source == 1)
diff --git a/sys/dev/ct/bshw_machdep.c b/sys/dev/ct/bshw_machdep.c
index 674291e..ba89e55 100644
--- a/sys/dev/ct/bshw_machdep.c
+++ b/sys/dev/ct/bshw_machdep.c
@@ -78,9 +78,7 @@ typedef unsigned long vaddr_t;
* GENERIC MACHDEP FUNCTIONS
*********************************************************/
void
-bshw_synch_setup(ct, ti)
- struct ct_softc *ct;
- struct targ_info *ti;
+bshw_synch_setup(struct ct_softc *ct, struct targ_info *ti)
{
struct ct_bus_access_handle *chp = &ct->sc_ch;
struct ct_targ_info *cti = (void *) ti;
@@ -99,8 +97,7 @@ bshw_synch_setup(ct, ti)
}
void
-bshw_bus_reset(ct)
- struct ct_softc *ct;
+bshw_bus_reset(struct ct_softc *ct)
{
struct scsi_low_softc *slp = &ct->sc_sclow;
struct ct_bus_access_handle *chp = &ct->sc_ch;
@@ -149,9 +146,7 @@ bshw_bus_reset(ct)
/* probe */
int
-bshw_read_settings(chp, bs)
- struct ct_bus_access_handle *chp;
- struct bshw_softc *bs;
+bshw_read_settings(struct ct_bus_access_handle *chp, struct bshw_softc *bs)
{
static int irq_tbl[] = { 3, 5, 6, 9, 12, 13 };
@@ -183,8 +178,7 @@ static __inline void bshw_lc_smit_stop(struct ct_softc *);
static int bshw_lc_smit_fstat(struct ct_softc *, int, int);
static __inline void
-bshw_lc_smit_stop(ct)
- struct ct_softc *ct;
+bshw_lc_smit_stop(struct ct_softc *ct)
{
struct ct_bus_access_handle *chp = &ct->sc_ch;
@@ -193,10 +187,7 @@ bshw_lc_smit_stop(ct)
}
static __inline void
-bshw_lc_smit_start(ct, count, direction)
- struct ct_softc *ct;
- int count;
- u_int direction;
+bshw_lc_smit_start(struct ct_softc *ct, int count, u_int direction)
{
struct ct_bus_access_handle *chp = &ct->sc_ch;
u_int8_t pval, val;
@@ -212,9 +203,7 @@ bshw_lc_smit_start(ct, count, direction)
}
static int
-bshw_lc_smit_fstat(ct, wc, read)
- struct ct_softc *ct;
- int wc, read;
+bshw_lc_smit_fstat(struct ct_softc *ct, int wc, int read)
{
struct ct_bus_access_handle *chp = &ct->sc_ch;
u_int8_t stat;
@@ -244,8 +233,7 @@ bshw_lc_smit_fstat(ct, wc, read)
}
void
-bshw_smit_xfer_stop(ct)
- struct ct_softc *ct;
+bshw_smit_xfer_stop(struct ct_softc *ct)
{
struct scsi_low_softc *slp = &ct->sc_sclow;
struct bshw_softc *bs = ct->ct_hw;
@@ -292,8 +280,7 @@ bad:
}
int
-bshw_smit_xfer_start(ct)
- struct ct_softc *ct;
+bshw_smit_xfer_start(struct ct_softc *ct)
{
struct scsi_low_softc *slp = &ct->sc_sclow;
struct ct_bus_access_handle *chp = &ct->sc_ch;
@@ -396,8 +383,7 @@ static void bshw_dmastart(struct ct_softc *);
static void bshw_dmadone(struct ct_softc *);
int
-bshw_dma_xfer_start(ct)
- struct ct_softc *ct;
+bshw_dma_xfer_start(struct ct_softc *ct)
{
struct scsi_low_softc *slp = &ct->sc_sclow;
struct sc_p *sp = &slp->sl_scp;
@@ -458,8 +444,7 @@ bshw_dma_xfer_start(ct)
}
void
-bshw_dma_xfer_stop(ct)
- struct ct_softc *ct;
+bshw_dma_xfer_stop(struct ct_softc *ct)
{
struct scsi_low_softc *slp = &ct->sc_sclow;
struct sc_p *sp = &slp->sl_scp;
@@ -523,10 +508,8 @@ bshw_dma_xfer_stop(ct)
static bus_addr_t dmapageport[4] = { 0x27, 0x21, 0x23, 0x25 };
static __inline void
-bshw_dma_write_1(chp, port, val)
- struct ct_bus_access_handle *chp;
- bus_addr_t port;
- u_int8_t val;
+bshw_dma_write_1(struct ct_bus_access_handle *chp, bus_addr_t port,
+ u_int8_t val)
{
CT_BUS_WEIGHT(chp);
@@ -534,8 +517,7 @@ bshw_dma_write_1(chp, port, val)
}
static void
-bshw_dmastart(ct)
- struct ct_softc *ct;
+bshw_dmastart(struct ct_softc *ct)
{
struct scsi_low_softc *slp = &ct->sc_sclow;
struct bshw_softc *bs = ct->ct_hw;
@@ -581,8 +563,7 @@ bshw_dmastart(ct)
}
static void
-bshw_dmadone(ct)
- struct ct_softc *ct;
+bshw_dmadone(struct ct_softc *ct)
{
struct bshw_softc *bs = ct->ct_hw;
struct ct_bus_access_handle *chp = &ct->sc_ch;
@@ -609,8 +590,7 @@ static void bshw_dma_start_elecom(struct ct_softc *);
static void bshw_dma_stop_elecom(struct ct_softc *);
static int
-bshw_dma_init_texa(ct)
- struct ct_softc *ct;
+bshw_dma_init_texa(struct ct_softc *ct)
{
struct ct_bus_access_handle *chp = &ct->sc_ch;
u_int8_t regval;
@@ -625,8 +605,7 @@ bshw_dma_init_texa(ct)
}
static int
-bshw_dma_init_sc98(ct)
- struct ct_softc *ct;
+bshw_dma_init_sc98(struct ct_softc *ct)
{
struct ct_bus_access_handle *chp = &ct->sc_ch;
@@ -652,8 +631,7 @@ bshw_dma_init_sc98(ct)
}
static void
-bshw_dma_start_sc98(ct)
- struct ct_softc *ct;
+bshw_dma_start_sc98(struct ct_softc *ct)
{
struct ct_bus_access_handle *chp = &ct->sc_ch;
@@ -662,8 +640,7 @@ bshw_dma_start_sc98(ct)
}
static void
-bshw_dma_stop_sc98(ct)
- struct ct_softc *ct;
+bshw_dma_stop_sc98(struct ct_softc *ct)
{
struct ct_bus_access_handle *chp = &ct->sc_ch;
@@ -672,8 +649,7 @@ bshw_dma_stop_sc98(ct)
}
static void
-bshw_dma_start_elecom(ct)
- struct ct_softc *ct;
+bshw_dma_start_elecom(struct ct_softc *ct)
{
struct ct_bus_access_handle *chp = &ct->sc_ch;
u_int8_t tmp = ct_cr_read_1(chp, 0x4c);
@@ -682,8 +658,7 @@ bshw_dma_start_elecom(ct)
}
static void
-bshw_dma_stop_elecom(ct)
- struct ct_softc *ct;
+bshw_dma_stop_elecom(struct ct_softc *ct)
{
struct ct_bus_access_handle *chp = &ct->sc_ch;
u_int8_t tmp = ct_cr_read_1(chp, 0x4c);
diff --git a/sys/dev/ct/ct.c b/sys/dev/ct/ct.c
index 89fe351..1d3f0db 100644
--- a/sys/dev/ct/ct.c
+++ b/sys/dev/ct/ct.c
@@ -164,9 +164,7 @@ struct scsi_low_funcs ct_funcs = {
* HW functions
**************************************************/
static __inline void
-cthw_phase_bypass(ct, ph)
- struct ct_softc *ct;
- u_int8_t ph;
+cthw_phase_bypass(struct ct_softc *ct, u_int8_t ph)
{
struct ct_bus_access_handle *chp = &ct->sc_ch;
@@ -175,8 +173,7 @@ cthw_phase_bypass(ct, ph)
}
static void
-cthw_bus_reset(ct)
- struct ct_softc *ct;
+cthw_bus_reset(struct ct_softc *ct)
{
/*
@@ -187,10 +184,8 @@ cthw_bus_reset(ct)
}
static int
-cthw_chip_reset(chp, chiprevp, chipclk, hostid)
- struct ct_bus_access_handle *chp;
- int *chiprevp;
- int chipclk, hostid;
+cthw_chip_reset(struct ct_bus_access_handle *chp, int *chiprevp, int chipclk,
+ int hostid)
{
#define CT_SELTIMEOUT_20MHz_REGV (0x80)
u_int8_t aux, regv;
@@ -285,8 +280,7 @@ out:
}
static struct ct_synch_data *
-ct_make_synch_table(ct)
- struct ct_softc *ct;
+ct_make_synch_table(struct ct_softc *ct)
{
struct ct_synch_data *sdtp, *sdp;
u_int base, i, period;
@@ -329,11 +323,8 @@ ct_make_synch_table(ct)
* Attach & Probe
**************************************************/
int
-ctprobesubr(chp, dvcfg, hsid, chipclk, chiprevp)
- struct ct_bus_access_handle *chp;
- u_int dvcfg, chipclk;
- int hsid;
- int *chiprevp;
+ctprobesubr(struct ct_bus_access_handle *chp, u_int dvcfg, int hsid,
+ u_int chipclk, int *chiprevp)
{
#if 0
@@ -346,8 +337,7 @@ ctprobesubr(chp, dvcfg, hsid, chipclk, chiprevp)
}
void
-ctattachsubr(ct)
- struct ct_softc *ct;
+ctattachsubr(struct ct_softc *ct)
{
struct scsi_low_softc *slp = &ct->sc_sclow;
@@ -362,8 +352,7 @@ ctattachsubr(ct)
* SCSI LOW interface functions
**************************************************/
static void
-cthw_attention(ct)
- struct ct_softc *ct;
+cthw_attention(struct ct_softc *ct)
{
struct ct_bus_access_handle *chp = &ct->sc_ch;
@@ -380,8 +369,7 @@ cthw_attention(ct)
}
static void
-ct_attention(ct)
- struct ct_softc *ct;
+ct_attention(struct ct_softc *ct)
{
struct scsi_low_softc *slp = &ct->sc_sclow;
@@ -398,10 +386,7 @@ ct_attention(ct)
}
static int
-ct_targ_init(ct, ti, action)
- struct ct_softc *ct;
- struct targ_info *ti;
- int action;
+ct_targ_init(struct ct_softc *ct, struct targ_info *ti, int action)
{
struct ct_targ_info *cti = (void *) ti;
@@ -438,9 +423,7 @@ ct_targ_init(ct, ti, action)
}
static int
-ct_world_start(ct, fdone)
- struct ct_softc *ct;
- int fdone;
+ct_world_start(struct ct_softc *ct, int fdone)
{
struct scsi_low_softc *slp = &ct->sc_sclow;
struct ct_bus_access_handle *chp = &ct->sc_ch;
@@ -470,9 +453,7 @@ ct_world_start(ct, fdone)
}
static int
-ct_start_selection(ct, cb)
- struct ct_softc *ct;
- struct slccb *cb;
+ct_start_selection(struct ct_softc *ct, struct slccb *cb)
{
struct scsi_low_softc *slp = &ct->sc_sclow;
struct ct_bus_access_handle *chp = &ct->sc_ch;
@@ -544,10 +525,7 @@ ct_start_selection(ct, cb)
}
static int
-ct_msg(ct, ti, msg)
- struct ct_softc *ct;
- struct targ_info *ti;
- u_int msg;
+ct_msg(struct ct_softc *ct, struct targ_info *ti, u_int msg)
{
struct ct_bus_access_handle *chp = &ct->sc_ch;
struct ct_targ_info *cti = (void *) ti;
@@ -599,11 +577,8 @@ ct_msg(ct, ti, msg)
* <DATA PHASE>
*************************************************/
static int
-ct_xfer(ct, data, len, direction, statp)
- struct ct_softc *ct;
- u_int8_t *data;
- int len, direction;
- u_int *statp;
+ct_xfer(struct ct_softc *ct, u_int8_t *data, int len, int direction,
+ u_int *statp)
{
struct ct_bus_access_handle *chp = &ct->sc_ch;
int wc;
@@ -663,8 +638,7 @@ ct_xfer(ct, data, len, direction, statp)
#define CT_PADDING_BUF_SIZE 32
static void
-ct_io_xfer(ct)
- struct ct_softc *ct;
+ct_io_xfer(struct ct_softc *ct)
{
struct scsi_low_softc *slp = &ct->sc_sclow;
struct ct_bus_access_handle *chp = &ct->sc_ch;
@@ -716,9 +690,7 @@ struct ct_err ct_cmderr[] = {
};
static void
-ct_phase_error(ct, scsi_status)
- struct ct_softc *ct;
- u_int8_t scsi_status;
+ct_phase_error(struct ct_softc *ct, u_int8_t scsi_status)
{
struct scsi_low_softc *slp = &ct->sc_sclow;
struct targ_info *ti = slp->sl_Tnexus;
@@ -764,9 +736,7 @@ ct_phase_error(ct, scsi_status)
* ### SCSI PHASE SEQUENCER ###
**************************************************/
static int
-ct_reselected(ct, scsi_status)
- struct ct_softc *ct;
- u_int8_t scsi_status;
+ct_reselected(struct ct_softc *ct, u_int8_t scsi_status)
{
struct scsi_low_softc *slp = &ct->sc_sclow;
struct ct_bus_access_handle *chp = &ct->sc_ch;
@@ -809,9 +779,7 @@ ct_reselected(ct, scsi_status)
}
static int
-ct_target_nexus_establish(ct, lun, dir)
- struct ct_softc *ct;
- int lun, dir;
+ct_target_nexus_establish(struct ct_softc *ct, int lun, int dir)
{
struct scsi_low_softc *slp = &ct->sc_sclow;
struct ct_bus_access_handle *chp = &ct->sc_ch;
@@ -831,8 +799,7 @@ ct_target_nexus_establish(ct, lun, dir)
}
static int
-ct_lun_nexus_establish(ct)
- struct ct_softc *ct;
+ct_lun_nexus_establish(struct ct_softc *ct)
{
struct scsi_low_softc *slp = &ct->sc_sclow;
struct ct_bus_access_handle *chp = &ct->sc_ch;
@@ -843,8 +810,7 @@ ct_lun_nexus_establish(ct)
}
static int
-ct_ccb_nexus_establish(ct)
- struct ct_softc *ct;
+ct_ccb_nexus_establish(struct ct_softc *ct)
{
struct scsi_low_softc *slp = &ct->sc_sclow;
struct ct_bus_access_handle *chp = &ct->sc_ch;
@@ -870,8 +836,7 @@ ct_ccb_nexus_establish(ct)
}
static int
-ct_unbusy(ct)
- struct ct_softc *ct;
+ct_unbusy(struct ct_softc *ct)
{
struct scsi_low_softc *slp = &ct->sc_sclow;
struct ct_bus_access_handle *chp = &ct->sc_ch;
@@ -894,8 +859,7 @@ ct_unbusy(ct)
}
static int
-ct_catch_intr(ct)
- struct ct_softc *ct;
+ct_catch_intr(struct ct_softc *ct)
{
struct ct_bus_access_handle *chp = &ct->sc_ch;
int wc;
@@ -913,8 +877,7 @@ ct_catch_intr(ct)
}
int
-ctintr(arg)
- void *arg;
+ctintr(void *arg)
{
struct ct_softc *ct = arg;
struct scsi_low_softc *slp = &ct->sc_sclow;
diff --git a/sys/dev/ct/ct_isa.c b/sys/dev/ct/ct_isa.c
index ac93759..d17af23 100644
--- a/sys/dev/ct/ct_isa.c
+++ b/sys/dev/ct/ct_isa.c
@@ -367,16 +367,14 @@ ct_dmamap(void *arg, bus_dma_segment_t *seg, int nseg, int error)
}
static void
-ct_isa_bus_access_weight(chp)
- struct ct_bus_access_handle *chp;
+ct_isa_bus_access_weight(struct ct_bus_access_handle *chp)
{
outb(0x5f, 0);
}
static void
-ct_isa_dmasync_before(ct)
- struct ct_softc *ct;
+ct_isa_dmasync_before(struct ct_softc *ct)
{
if (need_pre_dma_flush)
@@ -384,8 +382,7 @@ ct_isa_dmasync_before(ct)
}
static void
-ct_isa_dmasync_after(ct)
- struct ct_softc *ct;
+ct_isa_dmasync_after(struct ct_softc *ct)
{
if (need_post_dma_flush)
diff --git a/sys/dev/ct/ct_machdep.h b/sys/dev/ct/ct_machdep.h
index ceba654..a6b8b15 100644
--- a/sys/dev/ct/ct_machdep.h
+++ b/sys/dev/ct/ct_machdep.h
@@ -90,8 +90,7 @@ static __inline void cthw_set_count
(struct ct_bus_access_handle *, u_int);
static __inline u_int8_t
-ct_stat_read_1(chp)
- struct ct_bus_access_handle *chp;
+ct_stat_read_1(struct ct_bus_access_handle *chp)
{
u_int8_t regv;
@@ -101,9 +100,7 @@ ct_stat_read_1(chp)
}
static __inline void
-cthw_set_count(chp, count)
- struct ct_bus_access_handle *chp;
- u_int count;
+cthw_set_count(struct ct_bus_access_handle *chp, u_int count)
{
bus_space_tag_t bst = chp->ch_iot;
bus_space_handle_t bsh = chp->ch_ioh;
@@ -119,8 +116,7 @@ cthw_set_count(chp, count)
}
static __inline u_int
-cthw_get_count(chp)
- struct ct_bus_access_handle *chp;
+cthw_get_count(struct ct_bus_access_handle *chp)
{
bus_space_tag_t bst = chp->ch_iot;
bus_space_handle_t bsh = chp->ch_ioh;
@@ -138,10 +134,7 @@ cthw_get_count(chp)
}
static __inline void
-ct_write_cmds(chp, cmd, len)
- struct ct_bus_access_handle *chp;
- u_int8_t *cmd;
- int len;
+ct_write_cmds(struct ct_bus_access_handle *chp, u_int8_t *cmd, int len)
{
bus_space_tag_t bst = chp->ch_iot;
bus_space_handle_t bsh = chp->ch_ioh;
@@ -157,9 +150,7 @@ ct_write_cmds(chp, cmd, len)
}
static __inline u_int8_t
-ct_cr_read_1(chp, offs)
- struct ct_bus_access_handle *chp;
- bus_addr_t offs;
+ct_cr_read_1(struct ct_bus_access_handle *chp, bus_addr_t offs)
{
bus_space_tag_t bst = chp->ch_iot;
bus_space_handle_t bsh = chp->ch_ioh;
@@ -173,10 +164,7 @@ ct_cr_read_1(chp, offs)
}
static __inline void
-ct_cr_write_1(chp, offs, val)
- struct ct_bus_access_handle *chp;
- bus_addr_t offs;
- u_int8_t val;
+ct_cr_write_1(struct ct_bus_access_handle *chp, bus_addr_t offs, u_int8_t val)
{
bus_space_tag_t bst = chp->ch_iot;
bus_space_handle_t bsh = chp->ch_ioh;
@@ -188,8 +176,7 @@ ct_cr_write_1(chp, offs, val)
}
static __inline u_int8_t
-ct_cmdp_read_1(chp)
- struct ct_bus_access_handle *chp;
+ct_cmdp_read_1(struct ct_bus_access_handle *chp)
{
u_int8_t regv;
@@ -199,9 +186,7 @@ ct_cmdp_read_1(chp)
}
static __inline void
-ct_cmdp_write_1(chp, val)
- struct ct_bus_access_handle *chp;
- u_int8_t val;
+ct_cmdp_write_1(struct ct_bus_access_handle *chp, u_int8_t val)
{
bus_space_write_1(chp->ch_iot, chp->ch_ioh, cmd_port, val);
diff --git a/sys/dev/drm/via_dma.c b/sys/dev/drm/via_dma.c
index 6f435fe..53f806c 100644
--- a/sys/dev/drm/via_dma.c
+++ b/sys/dev/drm/via_dma.c
@@ -481,7 +481,7 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
- VIA_READ(VIA_REG_TRANSPACE);
+ (void)VIA_READ(VIA_REG_TRANSPACE);
}
}
return paused;
@@ -569,7 +569,7 @@ static void via_cmdbuf_start(drm_via_private_t * dev_priv)
VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
DRM_WRITEMEMORYBARRIER();
VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
- VIA_READ(VIA_REG_TRANSPACE);
+ (void)VIA_READ(VIA_REG_TRANSPACE);
dev_priv->dma_diff = 0;
diff --git a/sys/dev/drm/via_dmablit.c b/sys/dev/drm/via_dmablit.c
index 11dc2aa..70ba9d9 100644
--- a/sys/dev/drm/via_dmablit.c
+++ b/sys/dev/drm/via_dmablit.c
@@ -210,7 +210,7 @@ via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
DRM_WRITEMEMORYBARRIER();
VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
- VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04);
+ (void)VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04);
}
diff --git a/sys/dev/etherswitch/arswitch/arswitch.c b/sys/dev/etherswitch/arswitch/arswitch.c
index 07c7d4e..001b54e 100644
--- a/sys/dev/etherswitch/arswitch/arswitch.c
+++ b/sys/dev/etherswitch/arswitch/arswitch.c
@@ -77,16 +77,6 @@ static void arswitch_tick(void *arg);
static int arswitch_ifmedia_upd(struct ifnet *);
static void arswitch_ifmedia_sts(struct ifnet *, struct ifmediareq *);
-static void
-arswitch_identify(driver_t *driver, device_t parent)
-{
- device_t child;
-
- if (device_find_child(parent, driver->name, -1) == NULL) {
- child = BUS_ADD_CHILD(parent, 0, driver->name, -1);
- }
-}
-
static int
arswitch_probe(device_t dev)
{
@@ -557,7 +547,6 @@ arswitch_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
static device_method_t arswitch_methods[] = {
/* Device interface */
- DEVMETHOD(device_identify, arswitch_identify),
DEVMETHOD(device_probe, arswitch_probe),
DEVMETHOD(device_attach, arswitch_attach),
DEVMETHOD(device_detach, arswitch_detach),
diff --git a/sys/dev/md/md.c b/sys/dev/md/md.c
index a86c26a..443c127 100644
--- a/sys/dev/md/md.c
+++ b/sys/dev/md/md.c
@@ -677,6 +677,15 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
sched_unpin();
vm_page_wakeup(m);
break;
+ } else if (rv == VM_PAGER_FAIL) {
+ /*
+ * Pager does not have the page. Zero
+ * the allocated page, and mark it as
+ * valid. Do not set dirty, the page
+ * can be recreated if thrown out.
+ */
+ bzero((void *)sf_buf_kva(sf), PAGE_SIZE);
+ m->valid = VM_PAGE_BITS_ALL;
}
bcopy((void *)(sf_buf_kva(sf) + offs), p, len);
cpu_flush_dcache(p, len);
diff --git a/sys/dev/mfi/mfi.c b/sys/dev/mfi/mfi.c
index 07b9915..7cb19d2 100644
--- a/sys/dev/mfi/mfi.c
+++ b/sys/dev/mfi/mfi.c
@@ -1577,6 +1577,11 @@ mfi_decode_evt(struct mfi_softc *sc, struct mfi_evt_detail *detail)
sx_xunlock(&sc->mfi_config_lock);
}
}
+ if (sc->mfi_cam_rescan_cb != NULL &&
+ (detail->code == MR_EVT_PD_INSERTED ||
+ detail->code == MR_EVT_PD_REMOVED)) {
+ sc->mfi_cam_rescan_cb(sc, detail->args.pd.device_id);
+ }
break;
}
}
diff --git a/sys/dev/mfi/mfi_cam.c b/sys/dev/mfi/mfi_cam.c
index ce1e415..599ff3b 100644
--- a/sys/dev/mfi/mfi_cam.c
+++ b/sys/dev/mfi/mfi_cam.c
@@ -50,7 +50,9 @@ __FBSDID("$FreeBSD$");
#include <cam/cam.h>
#include <cam/cam_ccb.h>
#include <cam/cam_debug.h>
+#include <cam/cam_periph.h>
#include <cam/cam_sim.h>
+#include <cam/cam_xpt_periph.h>
#include <cam/cam_xpt_sim.h>
#include <cam/scsi/scsi_all.h>
#include <cam/scsi/scsi_message.h>
@@ -63,12 +65,19 @@ __FBSDID("$FreeBSD$");
#include <dev/mfi/mfi_ioctl.h>
#include <dev/mfi/mfivar.h>
+enum mfip_state {
+ MFIP_STATE_NONE,
+ MFIP_STATE_DETACH,
+ MFIP_STATE_RESCAN
+};
+
struct mfip_softc {
device_t dev;
struct mfi_softc *mfi_sc;
struct cam_devq *devq;
struct cam_sim *sim;
struct cam_path *path;
+ enum mfip_state state;
};
static int mfip_probe(device_t);
@@ -76,6 +85,7 @@ static int mfip_attach(device_t);
static int mfip_detach(device_t);
static void mfip_cam_action(struct cam_sim *, union ccb *);
static void mfip_cam_poll(struct cam_sim *);
+static void mfip_cam_rescan(struct mfi_softc *, uint32_t tid);
static struct mfi_command * mfip_start(void *);
static void mfip_done(struct mfi_command *cm);
@@ -122,6 +132,7 @@ mfip_attach(device_t dev)
mfisc = device_get_softc(device_get_parent(dev));
sc->dev = dev;
+ sc->state = MFIP_STATE_NONE;
sc->mfi_sc = mfisc;
mfisc->mfi_cam_start = mfip_start;
@@ -137,6 +148,8 @@ mfip_attach(device_t dev)
return (EINVAL);
}
+ mfisc->mfi_cam_rescan_cb = mfip_cam_rescan;
+
mtx_lock(&mfisc->mfi_io_lock);
if (xpt_bus_register(sc->sim, dev, 0) != 0) {
device_printf(dev, "XPT bus registration failed\n");
@@ -159,6 +172,16 @@ mfip_detach(device_t dev)
if (sc == NULL)
return (EINVAL);
+ mtx_lock(&sc->mfi_sc->mfi_io_lock);
+ if (sc->state == MFIP_STATE_RESCAN) {
+ mtx_unlock(&sc->mfi_sc->mfi_io_lock);
+ return (EBUSY);
+ }
+ sc->state = MFIP_STATE_DETACH;
+ mtx_unlock(&sc->mfi_sc->mfi_io_lock);
+
+ sc->mfi_sc->mfi_cam_rescan_cb = NULL;
+
if (sc->sim != NULL) {
mtx_lock(&sc->mfi_sc->mfi_io_lock);
xpt_bus_deregister(cam_sim_path(sc->sim));
@@ -266,6 +289,54 @@ mfip_cam_action(struct cam_sim *sim, union ccb *ccb)
return;
}
+static void
+mfip_cam_rescan(struct mfi_softc *sc, uint32_t tid)
+{
+ union ccb *ccb;
+ struct mfip_softc *camsc;
+ struct cam_sim *sim;
+ device_t mfip_dev;
+
+ mtx_lock(&Giant);
+ mfip_dev = device_find_child(sc->mfi_dev, "mfip", -1);
+ mtx_unlock(&Giant);
+ if (mfip_dev == NULL) {
+ device_printf(sc->mfi_dev, "Couldn't find mfip child device!\n");
+ return;
+ }
+
+ mtx_lock(&sc->mfi_io_lock);
+ camsc = device_get_softc(mfip_dev);
+ if (camsc->state == MFIP_STATE_DETACH) {
+ mtx_unlock(&sc->mfi_io_lock);
+ return;
+ }
+ camsc->state = MFIP_STATE_RESCAN;
+ mtx_unlock(&sc->mfi_io_lock);
+
+ ccb = xpt_alloc_ccb_nowait();
+ if (ccb == NULL) {
+ device_printf(sc->mfi_dev,
+ "Cannot allocate ccb for bus rescan.\n");
+ return;
+ }
+
+ sim = camsc->sim;
+ if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sim),
+ tid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
+ xpt_free_ccb(ccb);
+ device_printf(sc->mfi_dev,
+ "Cannot create path for bus rescan.\n");
+ return;
+ }
+
+ xpt_rescan(ccb);
+
+ mtx_lock(&sc->mfi_io_lock);
+ camsc->state = MFIP_STATE_NONE;
+ mtx_unlock(&sc->mfi_io_lock);
+}
+
static struct mfi_command *
mfip_start(void *data)
{
diff --git a/sys/dev/mfi/mfivar.h b/sys/dev/mfi/mfivar.h
index 46b7c7e..435ca8d 100644
--- a/sys/dev/mfi/mfivar.h
+++ b/sys/dev/mfi/mfivar.h
@@ -303,6 +303,8 @@ struct mfi_softc {
TAILQ_HEAD(, ccb_hdr) mfi_cam_ccbq;
struct mfi_command * (* mfi_cam_start)(void *);
+ void (*mfi_cam_rescan_cb)(struct mfi_softc *,
+ uint32_t);
struct callout mfi_watchdog_callout;
struct mtx mfi_io_lock;
struct sx mfi_config_lock;
diff --git a/sys/dev/mn/if_mn.c b/sys/dev/mn/if_mn.c
index e8b3dc6..07c2a98 100644
--- a/sys/dev/mn/if_mn.c
+++ b/sys/dev/mn/if_mn.c
@@ -1254,24 +1254,6 @@ mn_intr(void *xsc)
sc->m32x->stat = stat;
}
-static void
-mn_timeout(void *xsc)
-{
- static int round = 0;
- struct mn_softc *sc;
-
- mn_intr(xsc);
- sc = xsc;
- timeout(mn_timeout, xsc, 10 * hz);
- round++;
- if (round == 2) {
- sc->m32_mem.ccb = 0x00008004;
- sc->m32x->cmd = 0x1;
- } else if (round > 2) {
- printf("%s: timeout\n", sc->name);
- }
-}
-
/*
* PCI initialization stuff
*/
diff --git a/sys/dev/nve/if_nve.c b/sys/dev/nve/if_nve.c
index a9b8be8..fe26666 100644
--- a/sys/dev/nve/if_nve.c
+++ b/sys/dev/nve/if_nve.c
@@ -152,30 +152,30 @@ static int nve_miibus_writereg(device_t, int, int, int);
static void nve_dmamap_cb(void *, bus_dma_segment_t *, int, int);
static void nve_dmamap_tx_cb(void *, bus_dma_segment_t *, int, bus_size_t, int);
-static NV_SINT32 nve_osalloc(PNV_VOID, PMEMORY_BLOCK);
-static NV_SINT32 nve_osfree(PNV_VOID, PMEMORY_BLOCK);
-static NV_SINT32 nve_osallocex(PNV_VOID, PMEMORY_BLOCKEX);
-static NV_SINT32 nve_osfreeex(PNV_VOID, PMEMORY_BLOCKEX);
-static NV_SINT32 nve_osclear(PNV_VOID, PNV_VOID, NV_SINT32);
-static NV_SINT32 nve_osdelay(PNV_VOID, NV_UINT32);
-static NV_SINT32 nve_osallocrxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID *);
-static NV_SINT32 nve_osfreerxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID);
-static NV_SINT32 nve_ospackettx(PNV_VOID, PNV_VOID, NV_UINT32);
-static NV_SINT32 nve_ospacketrx(PNV_VOID, PNV_VOID, NV_UINT32, NV_UINT8 *, NV_UINT8);
-static NV_SINT32 nve_oslinkchg(PNV_VOID, NV_SINT32);
-static NV_SINT32 nve_osalloctimer(PNV_VOID, PNV_VOID *);
-static NV_SINT32 nve_osfreetimer(PNV_VOID, PNV_VOID);
-static NV_SINT32 nve_osinittimer(PNV_VOID, PNV_VOID, PTIMER_FUNC, PNV_VOID);
-static NV_SINT32 nve_ossettimer(PNV_VOID, PNV_VOID, NV_UINT32);
-static NV_SINT32 nve_oscanceltimer(PNV_VOID, PNV_VOID);
-
-static NV_SINT32 nve_ospreprocpkt(PNV_VOID, PNV_VOID, PNV_VOID *, NV_UINT8 *, NV_UINT8);
-static PNV_VOID nve_ospreprocpktnopq(PNV_VOID, PNV_VOID);
-static NV_SINT32 nve_osindicatepkt(PNV_VOID, PNV_VOID *, NV_UINT32);
-static NV_SINT32 nve_oslockalloc(PNV_VOID, NV_SINT32, PNV_VOID *);
-static NV_SINT32 nve_oslockacquire(PNV_VOID, NV_SINT32, PNV_VOID);
-static NV_SINT32 nve_oslockrelease(PNV_VOID, NV_SINT32, PNV_VOID);
-static PNV_VOID nve_osreturnbufvirt(PNV_VOID, PNV_VOID);
+static NV_API_CALL NV_SINT32 nve_osalloc(PNV_VOID, PMEMORY_BLOCK);
+static NV_API_CALL NV_SINT32 nve_osfree(PNV_VOID, PMEMORY_BLOCK);
+static NV_API_CALL NV_SINT32 nve_osallocex(PNV_VOID, PMEMORY_BLOCKEX);
+static NV_API_CALL NV_SINT32 nve_osfreeex(PNV_VOID, PMEMORY_BLOCKEX);
+static NV_API_CALL NV_SINT32 nve_osclear(PNV_VOID, PNV_VOID, NV_SINT32);
+static NV_API_CALL NV_SINT32 nve_osdelay(PNV_VOID, NV_UINT32);
+static NV_API_CALL NV_SINT32 nve_osallocrxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID *);
+static NV_API_CALL NV_SINT32 nve_osfreerxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID);
+static NV_API_CALL NV_SINT32 nve_ospackettx(PNV_VOID, PNV_VOID, NV_UINT32);
+static NV_API_CALL NV_SINT32 nve_ospacketrx(PNV_VOID, PNV_VOID, NV_UINT32, NV_UINT8 *, NV_UINT8);
+static NV_API_CALL NV_SINT32 nve_oslinkchg(PNV_VOID, NV_SINT32);
+static NV_API_CALL NV_SINT32 nve_osalloctimer(PNV_VOID, PNV_VOID *);
+static NV_API_CALL NV_SINT32 nve_osfreetimer(PNV_VOID, PNV_VOID);
+static NV_API_CALL NV_SINT32 nve_osinittimer(PNV_VOID, PNV_VOID, PTIMER_FUNC, PNV_VOID);
+static NV_API_CALL NV_SINT32 nve_ossettimer(PNV_VOID, PNV_VOID, NV_UINT32);
+static NV_API_CALL NV_SINT32 nve_oscanceltimer(PNV_VOID, PNV_VOID);
+
+static NV_API_CALL NV_SINT32 nve_ospreprocpkt(PNV_VOID, PNV_VOID, PNV_VOID *, NV_UINT8 *, NV_UINT8);
+static NV_API_CALL PNV_VOID nve_ospreprocpktnopq(PNV_VOID, PNV_VOID);
+static NV_API_CALL NV_SINT32 nve_osindicatepkt(PNV_VOID, PNV_VOID *, NV_UINT32);
+static NV_API_CALL NV_SINT32 nve_oslockalloc(PNV_VOID, NV_SINT32, PNV_VOID *);
+static NV_API_CALL NV_SINT32 nve_oslockacquire(PNV_VOID, NV_SINT32, PNV_VOID);
+static NV_API_CALL NV_SINT32 nve_oslockrelease(PNV_VOID, NV_SINT32, PNV_VOID);
+static NV_API_CALL PNV_VOID nve_osreturnbufvirt(PNV_VOID, PNV_VOID);
static device_method_t nve_methods[] = {
/* Device interface */
@@ -1342,7 +1342,7 @@ nve_watchdog(struct nve_softc *sc)
/* --- Start of NVOSAPI interface --- */
/* Allocate DMA enabled general use memory for API */
-static NV_SINT32
+static NV_API_CALL NV_SINT32
nve_osalloc(PNV_VOID ctx, PMEMORY_BLOCK mem)
{
struct nve_softc *sc;
@@ -1370,7 +1370,7 @@ nve_osalloc(PNV_VOID ctx, PMEMORY_BLOCK mem)
}
/* Free allocated memory */
-static NV_SINT32
+static NV_API_CALL NV_SINT32
nve_osfree(PNV_VOID ctx, PMEMORY_BLOCK mem)
{
DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfree - 0x%x - %d\n",
@@ -1381,7 +1381,7 @@ nve_osfree(PNV_VOID ctx, PMEMORY_BLOCK mem)
}
/* Copied directly from nvnet.c */
-static NV_SINT32
+static NV_API_CALL NV_SINT32
nve_osallocex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex)
{
MEMORY_BLOCK mem_block;
@@ -1427,7 +1427,7 @@ nve_osallocex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex)
}
/* Copied directly from nvnet.c */
-static NV_SINT32
+static NV_API_CALL NV_SINT32
nve_osfreeex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex)
{
MEMORY_BLOCK mem_block;
@@ -1442,7 +1442,7 @@ nve_osfreeex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex)
}
/* Clear memory region */
-static NV_SINT32
+static NV_API_CALL NV_SINT32
nve_osclear(PNV_VOID ctx, PNV_VOID mem, NV_SINT32 length)
{
DEBUGOUT(NVE_DEBUG_API, "nve: nve_osclear\n");
@@ -1451,7 +1451,7 @@ nve_osclear(PNV_VOID ctx, PNV_VOID mem, NV_SINT32 length)
}
/* Sleep for a tick */
-static NV_SINT32
+static NV_API_CALL NV_SINT32
nve_osdelay(PNV_VOID ctx, NV_UINT32 usec)
{
DELAY(usec);
@@ -1459,7 +1459,7 @@ nve_osdelay(PNV_VOID ctx, NV_UINT32 usec)
}
/* Allocate memory for rx buffer */
-static NV_SINT32
+static NV_API_CALL NV_SINT32
nve_osallocrxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID *id)
{
struct nve_softc *sc = ctx;
@@ -1515,7 +1515,7 @@ fail:
}
/* Free the rx buffer */
-static NV_SINT32
+static NV_API_CALL NV_SINT32
nve_osfreerxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID id)
{
struct nve_softc *sc = ctx;
@@ -1539,7 +1539,7 @@ nve_osfreerxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID id)
}
/* This gets called by the Nvidia API after our TX packet has been sent */
-static NV_SINT32
+static NV_API_CALL NV_SINT32
nve_ospackettx(PNV_VOID ctx, PNV_VOID id, NV_UINT32 success)
{
struct nve_softc *sc = ctx;
@@ -1578,7 +1578,7 @@ fail:
/* This gets called by the Nvidia API when a new packet has been received */
/* XXX What is newbuf used for? XXX */
-static NV_SINT32
+static NV_API_CALL NV_SINT32
nve_ospacketrx(PNV_VOID ctx, PNV_VOID data, NV_UINT32 success, NV_UINT8 *newbuf,
NV_UINT8 priority)
{
@@ -1636,7 +1636,7 @@ nve_ospacketrx(PNV_VOID ctx, PNV_VOID data, NV_UINT32 success, NV_UINT8 *newbuf,
}
/* This gets called by NVIDIA API when the PHY link state changes */
-static NV_SINT32
+static NV_API_CALL NV_SINT32
nve_oslinkchg(PNV_VOID ctx, NV_SINT32 enabled)
{
@@ -1646,7 +1646,7 @@ nve_oslinkchg(PNV_VOID ctx, NV_SINT32 enabled)
}
/* Setup a watchdog timer */
-static NV_SINT32
+static NV_API_CALL NV_SINT32
nve_osalloctimer(PNV_VOID ctx, PNV_VOID *timer)
{
struct nve_softc *sc = (struct nve_softc *)ctx;
@@ -1660,7 +1660,7 @@ nve_osalloctimer(PNV_VOID ctx, PNV_VOID *timer)
}
/* Free the timer */
-static NV_SINT32
+static NV_API_CALL NV_SINT32
nve_osfreetimer(PNV_VOID ctx, PNV_VOID timer)
{
@@ -1672,7 +1672,7 @@ nve_osfreetimer(PNV_VOID ctx, PNV_VOID timer)
}
/* Setup timer parameters */
-static NV_SINT32
+static NV_API_CALL NV_SINT32
nve_osinittimer(PNV_VOID ctx, PNV_VOID timer, PTIMER_FUNC func, PNV_VOID parameters)
{
struct nve_softc *sc = (struct nve_softc *)ctx;
@@ -1686,7 +1686,7 @@ nve_osinittimer(PNV_VOID ctx, PNV_VOID timer, PTIMER_FUNC func, PNV_VOID paramet
}
/* Set the timer to go off */
-static NV_SINT32
+static NV_API_CALL NV_SINT32
nve_ossettimer(PNV_VOID ctx, PNV_VOID timer, NV_UINT32 delay)
{
struct nve_softc *sc = ctx;
@@ -1700,7 +1700,7 @@ nve_ossettimer(PNV_VOID ctx, PNV_VOID timer, NV_UINT32 delay)
}
/* Cancel the timer */
-static NV_SINT32
+static NV_API_CALL NV_SINT32
nve_oscanceltimer(PNV_VOID ctx, PNV_VOID timer)
{
@@ -1711,7 +1711,7 @@ nve_oscanceltimer(PNV_VOID ctx, PNV_VOID timer)
return (1);
}
-static NV_SINT32
+static NV_API_CALL NV_SINT32
nve_ospreprocpkt(PNV_VOID ctx, PNV_VOID readdata, PNV_VOID *id,
NV_UINT8 *newbuffer, NV_UINT8 priority)
{
@@ -1722,7 +1722,7 @@ nve_ospreprocpkt(PNV_VOID ctx, PNV_VOID readdata, PNV_VOID *id,
return (1);
}
-static PNV_VOID
+static NV_API_CALL PNV_VOID
nve_ospreprocpktnopq(PNV_VOID ctx, PNV_VOID readdata)
{
@@ -1732,7 +1732,7 @@ nve_ospreprocpktnopq(PNV_VOID ctx, PNV_VOID readdata)
return (NULL);
}
-static NV_SINT32
+static NV_API_CALL NV_SINT32
nve_osindicatepkt(PNV_VOID ctx, PNV_VOID *id, NV_UINT32 pktno)
{
@@ -1743,7 +1743,7 @@ nve_osindicatepkt(PNV_VOID ctx, PNV_VOID *id, NV_UINT32 pktno)
}
/* Allocate mutex context (already done in nve_attach) */
-static NV_SINT32
+static NV_API_CALL NV_SINT32
nve_oslockalloc(PNV_VOID ctx, NV_SINT32 type, PNV_VOID *pLock)
{
struct nve_softc *sc = (struct nve_softc *)ctx;
@@ -1756,7 +1756,7 @@ nve_oslockalloc(PNV_VOID ctx, NV_SINT32 type, PNV_VOID *pLock)
}
/* Obtain a spin lock */
-static NV_SINT32
+static NV_API_CALL NV_SINT32
nve_oslockacquire(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock)
{
@@ -1766,7 +1766,7 @@ nve_oslockacquire(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock)
}
/* Release lock */
-static NV_SINT32
+static NV_API_CALL NV_SINT32
nve_oslockrelease(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock)
{
@@ -1776,7 +1776,7 @@ nve_oslockrelease(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock)
}
/* I have no idea what this is for */
-static PNV_VOID
+static NV_API_CALL PNV_VOID
nve_osreturnbufvirt(PNV_VOID ctx, PNV_VOID readdata)
{
diff --git a/sys/dev/nxge/xgehal/xgehal-device.c b/sys/dev/nxge/xgehal/xgehal-device.c
index 43f944a..2f52a7e 100644
--- a/sys/dev/nxge/xgehal/xgehal-device.c
+++ b/sys/dev/nxge/xgehal/xgehal-device.c
@@ -6793,7 +6793,7 @@ _again:
} else {
/*
* Logging Error messages in the excess temperature,
- * Bias current, laser ouput for three cycle
+ * Bias current, laser output for three cycle
*/
__hal_updt_stats_xpak(hldev);
hldev->stats.sw_dev_err_stats.xpak_counter.tick_period = 0;
diff --git a/sys/dev/puc/pucdata.c b/sys/dev/puc/pucdata.c
index 5e3f376..216a30b 100644
--- a/sys/dev/puc/pucdata.c
+++ b/sys/dev/puc/pucdata.c
@@ -630,6 +630,12 @@ const struct puc_cfg puc_pci_devices[] = {
PUC_PORT_8S, 0x10, 0, -1,
},
+ { 0x13fe, 0x1600, 0x1602, 0x0002,
+ "Advantech PCI-1602",
+ DEFAULT_RCLK * 8,
+ PUC_PORT_2S, 0x10, 0, 8,
+ },
+
{ 0x1407, 0x0100, 0xffff, 0,
"Lava Computers Dual Serial",
DEFAULT_RCLK,
diff --git a/sys/dev/snc/dp83932.c b/sys/dev/snc/dp83932.c
index f86ad7e..17b67cd 100644
--- a/sys/dev/snc/dp83932.c
+++ b/sys/dev/snc/dp83932.c
@@ -142,10 +142,8 @@ int sncdebug = 0;
int
-sncconfig(sc, media, nmedia, defmedia, myea)
- struct snc_softc *sc;
- int *media, nmedia, defmedia;
- u_int8_t *myea;
+sncconfig(struct snc_softc *sc, int *media, int nmedia, int defmedia,
+ u_int8_t *myea)
{
struct ifnet *ifp;
int i;
@@ -195,8 +193,7 @@ sncconfig(sc, media, nmedia, defmedia, myea)
}
void
-sncshutdown(arg)
- void *arg;
+sncshutdown(void *arg)
{
struct snc_softc *sc = arg;
@@ -208,8 +205,7 @@ sncshutdown(arg)
* Media change callback.
*/
int
-snc_mediachange(ifp)
- struct ifnet *ifp;
+snc_mediachange(struct ifnet *ifp)
{
struct snc_softc *sc = ifp->if_softc;
int error;
@@ -227,9 +223,7 @@ snc_mediachange(ifp)
* Media status callback.
*/
void
-snc_mediastatus(ifp, ifmr)
- struct ifnet *ifp;
- struct ifmediareq *ifmr;
+snc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
{
struct snc_softc *sc = ifp->if_softc;
@@ -248,10 +242,7 @@ snc_mediastatus(ifp, ifmr)
static int
-sncioctl(ifp, cmd, data)
- struct ifnet *ifp;
- u_long cmd;
- caddr_t data;
+sncioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
struct ifreq *ifr;
struct snc_softc *sc = ifp->if_softc;
@@ -317,8 +308,7 @@ sncioctl(ifp, cmd, data)
* Encapsulate a packet of type family for the local net.
*/
static void
-sncstart(ifp)
- struct ifnet *ifp;
+sncstart(struct ifnet *ifp)
{
struct snc_softc *sc = ifp->if_softc;
@@ -328,8 +318,7 @@ sncstart(ifp)
}
static void
-sncstart_locked(ifp)
- struct ifnet *ifp;
+sncstart_locked(struct ifnet *ifp)
{
struct snc_softc *sc = ifp->if_softc;
struct mbuf *m;
@@ -390,16 +379,14 @@ outloop:
* hardware/software errors.
*/
static void
-sncreset(sc)
- struct snc_softc *sc;
+sncreset(struct snc_softc *sc)
{
sncstop(sc);
sncinit_locked(sc);
}
static void
-sncinit(xsc)
- void *xsc;
+sncinit(void *xsc)
{
struct snc_softc *sc = xsc;
@@ -476,8 +463,7 @@ sncinit_locked(struct snc_softc *sc)
* part way through.
*/
static int
-sncstop(sc)
- struct snc_softc *sc;
+sncstop(struct snc_softc *sc)
{
struct mtd *mtd;
@@ -536,10 +522,7 @@ sncwatchdog(void *arg)
* stuff packet into sonic
*/
static u_int
-sonicput(sc, m0, mtd_next)
- struct snc_softc *sc;
- struct mbuf *m0;
- int mtd_next;
+sonicput(struct snc_softc *sc, struct mbuf *m0, int mtd_next)
{
struct mtd *mtdp;
struct mbuf *m;
@@ -629,8 +612,7 @@ sonicput(sc, m0, mtd_next)
* CAM support
*/
static void
-caminitialise(sc)
- struct snc_softc *sc;
+caminitialise(struct snc_softc *sc)
{
u_int32_t v_cda = sc->v_cda;
int i;
@@ -653,10 +635,7 @@ caminitialise(sc)
}
static void
-camentry(sc, entry, ea)
- int entry;
- u_char *ea;
- struct snc_softc *sc;
+camentry(struct snc_softc *sc, int entry, u_char *ea)
{
u_int32_t v_cda = sc->v_cda;
int camoffset = entry * CDA_CAMDESC;
@@ -670,8 +649,7 @@ camentry(sc, entry, ea)
}
static void
-camprogram(sc)
- struct snc_softc *sc;
+camprogram(struct snc_softc *sc)
{
struct ifmultiaddr *ifma;
struct ifnet *ifp;
@@ -732,8 +710,7 @@ camprogram(sc)
#ifdef SNCDEBUG
static void
-camdump(sc)
- struct snc_softc *sc;
+camdump(struct snc_softc *sc)
{
int i;
@@ -758,8 +735,7 @@ camdump(sc)
#endif
static void
-initialise_tda(sc)
- struct snc_softc *sc;
+initialise_tda(struct snc_softc *sc)
{
struct mtd *mtd;
int i;
@@ -780,8 +756,7 @@ initialise_tda(sc)
}
static void
-initialise_rda(sc)
- struct snc_softc *sc;
+initialise_rda(struct snc_softc *sc)
{
int i;
u_int32_t vv_rda = 0;
@@ -809,8 +784,7 @@ initialise_rda(sc)
}
static void
-initialise_rra(sc)
- struct snc_softc *sc;
+initialise_rra(struct snc_softc *sc)
{
int i;
u_int v;
@@ -842,8 +816,7 @@ initialise_rra(sc)
}
void
-sncintr(arg)
- void *arg;
+sncintr(void *arg)
{
struct snc_softc *sc = (struct snc_softc *)arg;
int isr;
@@ -912,8 +885,7 @@ sncintr(arg)
* Transmit interrupt routine
*/
static void
-sonictxint(sc)
- struct snc_softc *sc;
+sonictxint(struct snc_softc *sc)
{
struct mtd *mtd;
u_int32_t txp;
@@ -990,8 +962,7 @@ sonictxint(sc)
* Receive interrupt routine
*/
static void
-sonicrxint(sc)
- struct snc_softc *sc;
+sonicrxint(struct snc_softc *sc)
{
u_int32_t rda;
int orra;
@@ -1084,10 +1055,7 @@ sonicrxint(sc)
* appropriate protocol handler
*/
static int
-sonic_read(sc, pkt, len)
- struct snc_softc *sc;
- u_int32_t pkt;
- int len;
+sonic_read(struct snc_softc *sc, u_int32_t pkt, int len)
{
struct ifnet *ifp = sc->sc_ifp;
struct ether_header *et;
@@ -1131,10 +1099,7 @@ sonic_read(sc, pkt, len)
* munge the received packet into an mbuf chain
*/
static struct mbuf *
-sonic_get(sc, pkt, datalen)
- struct snc_softc *sc;
- u_int32_t pkt;
- int datalen;
+sonic_get(struct snc_softc *sc, u_int32_t pkt, int datalen)
{
struct mbuf *m, *top, **mp;
int len;
@@ -1194,8 +1159,7 @@ sonic_get(sc, pkt, datalen)
* Enable power on the interface.
*/
int
-snc_enable(sc)
- struct snc_softc *sc;
+snc_enable(struct snc_softc *sc)
{
#ifdef SNCDEBUG
@@ -1217,8 +1181,7 @@ snc_enable(sc)
* Disable power on the interface.
*/
void
-snc_disable(sc)
- struct snc_softc *sc;
+snc_disable(struct snc_softc *sc)
{
#ifdef SNCDEBUG
diff --git a/sys/dev/snc/dp83932subr.c b/sys/dev/snc/dp83932subr.c
index 35c94a8..a34c554 100644
--- a/sys/dev/snc/dp83932subr.c
+++ b/sys/dev/snc/dp83932subr.c
@@ -73,9 +73,7 @@ static __inline u_int16_t snc_nec16_select_bank
* to accept packets.
*/
int
-sncsetup(sc, lladdr)
- struct snc_softc *sc;
- u_int8_t *lladdr;
+sncsetup(struct snc_softc *sc, u_int8_t *lladdr)
{
u_int32_t p, pp;
int i;
@@ -172,8 +170,7 @@ sncsetup(sc, lladdr)
* check if a specified irq is acceptable.
*/
u_int8_t
-snc_nec16_validate_irq(irq)
- int irq;
+snc_nec16_validate_irq(int irq)
{
const u_int8_t encoded_irq[16] = {
-1, -1, -1, 0, -1, 1, 2, -1, -1, 3, 4, -1, 5, 6, -1, -1
@@ -186,9 +183,7 @@ snc_nec16_validate_irq(irq)
* specify irq to board.
*/
int
-snc_nec16_register_irq(sc, irq)
- struct snc_softc *sc;
- int irq;
+snc_nec16_register_irq(struct snc_softc *sc, int irq)
{
bus_space_tag_t iot = sc->sc_iot;
bus_space_handle_t ioh = sc->sc_ioh;
@@ -212,8 +207,7 @@ snc_nec16_register_irq(sc, irq)
* check if a specified memory base address is acceptable.
*/
int
-snc_nec16_validate_mem(maddr)
- int maddr;
+snc_nec16_validate_mem(int maddr)
{
/* Check on Normal mode with max range, only */
@@ -230,9 +224,7 @@ snc_nec16_validate_mem(maddr)
* specify memory base address to board and map to first bank.
*/
int
-snc_nec16_register_mem(sc, maddr)
- struct snc_softc *sc;
- int maddr;
+snc_nec16_register_mem(struct snc_softc *sc, int maddr)
{
bus_space_tag_t iot = sc->sc_iot;
bus_space_handle_t ioh = sc->sc_ioh;
@@ -260,11 +252,8 @@ snc_nec16_register_mem(sc, maddr)
}
int
-snc_nec16_check_memory(iot, ioh, memt, memh)
- bus_space_tag_t iot;
- bus_space_handle_t ioh;
- bus_space_tag_t memt;
- bus_space_handle_t memh;
+snc_nec16_check_memory(bus_space_tag_t iot, bus_space_handle_t ioh,
+ bus_space_tag_t memt, bus_space_handle_t memh)
{
u_int16_t val;
int i, j;
@@ -341,14 +330,9 @@ snc_nec16_check_memory(iot, ioh, memt, memh)
}
int
-snc_nec16_detectsubr(iot, ioh, memt, memh, irq, maddr, type)
- bus_space_tag_t iot;
- bus_space_handle_t ioh;
- bus_space_tag_t memt;
- bus_space_handle_t memh;
- int irq;
- int maddr;
- u_int8_t type;
+snc_nec16_detectsubr(bus_space_tag_t iot, bus_space_handle_t ioh,
+ bus_space_tag_t memt, bus_space_handle_t memh, int irq, int maddr,
+ u_int8_t type)
{
u_int16_t cr;
u_int8_t ident;
@@ -413,8 +397,7 @@ snc_nec16_detectsubr(iot, ioh, memt, memh, irq, maddr, type)
#define SNC_NEC_SERIES_PNP_CBUS2 0x3d
u_int8_t *
-snc_nec16_detect_type(myea)
- u_int8_t *myea;
+snc_nec16_detect_type(u_int8_t *myea)
{
u_int32_t vendor = (myea[0] << 16) | (myea[1] << 8) | myea[2];
u_int8_t series = myea[3];
@@ -459,10 +442,8 @@ snc_nec16_detect_type(myea)
}
int
-snc_nec16_get_enaddr(iot, ioh, myea)
- bus_space_tag_t iot;
- bus_space_handle_t ioh;
- u_int8_t *myea;
+snc_nec16_get_enaddr(bus_space_tag_t iot, bus_space_handle_t ioh,
+ u_int8_t *myea)
{
u_int8_t eeprom[SNEC_EEPROM_SIZE];
u_int8_t rom_sum, sum = 0x00;
@@ -493,9 +474,7 @@ snc_nec16_get_enaddr(iot, ioh, myea)
* read from NEC/SONIC NIC register.
*/
u_int16_t
-snc_nec16_nic_get(sc, reg)
- struct snc_softc *sc;
- u_int8_t reg;
+snc_nec16_nic_get(struct snc_softc *sc, u_int8_t reg)
{
u_int16_t val;
@@ -510,10 +489,7 @@ snc_nec16_nic_get(sc, reg)
* write to NEC/SONIC NIC register.
*/
void
-snc_nec16_nic_put(sc, reg, val)
- struct snc_softc *sc;
- u_int8_t reg;
- u_int16_t val;
+snc_nec16_nic_put(struct snc_softc *sc, u_int8_t reg, u_int16_t val)
{
/* select SONIC register */
@@ -527,10 +503,7 @@ snc_nec16_nic_put(sc, reg, val)
* where exists specified (internal buffer memory) offset.
*/
static __inline u_int16_t
-snc_nec16_select_bank(sc, base, offset)
- struct snc_softc *sc;
- u_int32_t base;
- u_int32_t offset;
+snc_nec16_select_bank(struct snc_softc *sc, u_int32_t base, u_int32_t offset)
{
bus_space_tag_t iot = sc->sc_iot;
bus_space_handle_t ioh = sc->sc_ioh;
@@ -565,11 +538,8 @@ snc_nec16_select_bank(sc, base, offset)
* write to SONIC descriptors.
*/
void
-snc_nec16_writetodesc(sc, base, offset, val)
- struct snc_softc *sc;
- u_int32_t base;
- u_int32_t offset;
- u_int16_t val;
+snc_nec16_writetodesc(struct snc_softc *sc, u_int32_t base, u_int32_t offset,
+ u_int16_t val)
{
bus_space_tag_t memt = sc->sc_memt;
bus_space_handle_t memh = sc->sc_memh;
@@ -584,10 +554,7 @@ snc_nec16_writetodesc(sc, base, offset, val)
* read from SONIC descriptors.
*/
u_int16_t
-snc_nec16_readfromdesc(sc, base, offset)
- struct snc_softc *sc;
- u_int32_t base;
- u_int32_t offset;
+snc_nec16_readfromdesc(struct snc_softc *sc, u_int32_t base, u_int32_t offset)
{
bus_space_tag_t memt = sc->sc_memt;
bus_space_handle_t memh = sc->sc_memh;
@@ -602,11 +569,8 @@ snc_nec16_readfromdesc(sc, base, offset)
* read from SONIC data buffer.
*/
void
-snc_nec16_copyfrombuf(sc, dst, offset, size)
- struct snc_softc *sc;
- void *dst;
- u_int32_t offset;
- size_t size;
+snc_nec16_copyfrombuf(struct snc_softc *sc, void *dst, u_int32_t offset,
+ size_t size)
{
bus_space_tag_t memt = sc->sc_memt;
bus_space_handle_t memh = sc->sc_memh;
@@ -644,11 +608,8 @@ snc_nec16_copyfrombuf(sc, dst, offset, size)
* write to SONIC data buffer.
*/
void
-snc_nec16_copytobuf(sc, src, offset, size)
- struct snc_softc *sc;
- void *src;
- u_int32_t offset;
- size_t size;
+snc_nec16_copytobuf(struct snc_softc *sc, void *src, u_int32_t offset,
+ size_t size)
{
bus_space_tag_t memt = sc->sc_memt;
bus_space_handle_t memh = sc->sc_memh;
@@ -688,10 +649,7 @@ snc_nec16_copytobuf(sc, src, offset, size)
* write (fill) 0 to SONIC data buffer.
*/
void
-snc_nec16_zerobuf(sc, offset, size)
- struct snc_softc *sc;
- u_int32_t offset;
- size_t size;
+snc_nec16_zerobuf(struct snc_softc *sc, u_int32_t offset, size_t size)
{
bus_space_tag_t memt = sc->sc_memt;
bus_space_handle_t memh = sc->sc_memh;
@@ -735,10 +693,8 @@ snc_nec16_zerobuf(sc, offset, size)
#define SNEC_EEP_DELAY 1000
void
-snc_nec16_read_eeprom(iot, ioh, data)
- bus_space_tag_t iot;
- bus_space_handle_t ioh;
- u_int8_t *data;
+snc_nec16_read_eeprom(bus_space_tag_t iot, bus_space_handle_t ioh,
+ u_int8_t *data)
{
u_int8_t n, val, bit;
@@ -851,9 +807,7 @@ snc_nec16_read_eeprom(iot, ioh, data)
#ifdef SNCDEBUG
void
-snc_nec16_dump_reg(iot, ioh)
- bus_space_tag_t iot;
- bus_space_handle_t ioh;
+snc_nec16_dump_reg(bus_space_tag_t iot, bus_space_handle_t ioh)
{
u_int8_t n;
u_int16_t val;
diff --git a/sys/dev/snc/if_snc.c b/sys/dev/snc/if_snc.c
index 02ac7eb..914a406 100644
--- a/sys/dev/snc/if_snc.c
+++ b/sys/dev/snc/if_snc.c
@@ -66,9 +66,7 @@ devclass_t snc_devclass;
* Allocate a port resource with the given resource id.
*/
int
-snc_alloc_port(dev, rid)
- device_t dev;
- int rid;
+snc_alloc_port(device_t dev, int rid)
{
struct snc_softc *sc = device_get_softc(dev);
struct resource *res;
@@ -91,9 +89,7 @@ snc_alloc_port(dev, rid)
* Allocate a memory resource with the given resource id.
*/
int
-snc_alloc_memory(dev, rid)
- device_t dev;
- int rid;
+snc_alloc_memory(device_t dev, int rid)
{
struct snc_softc *sc = device_get_softc(dev);
struct resource *res;
@@ -116,10 +112,7 @@ snc_alloc_memory(dev, rid)
* Allocate an irq resource with the given resource id.
*/
int
-snc_alloc_irq(dev, rid, flags)
- device_t dev;
- int rid;
- int flags;
+snc_alloc_irq(device_t dev, int rid, int flags)
{
struct snc_softc *sc = device_get_softc(dev);
struct resource *res;
@@ -139,8 +132,7 @@ snc_alloc_irq(dev, rid, flags)
* Release all resources
*/
void
-snc_release_resources(dev)
- device_t dev;
+snc_release_resources(device_t dev)
{
struct snc_softc *sc = device_get_softc(dev);
@@ -170,9 +162,7 @@ snc_release_resources(dev)
****************************************************************/
int
-snc_probe(dev, type)
- device_t dev;
- int type;
+snc_probe(device_t dev, int type)
{
struct snc_softc *sc = device_get_softc(dev);
@@ -188,8 +178,7 @@ snc_probe(dev, type)
****************************************************************/
int
-snc_attach(dev)
- device_t dev;
+snc_attach(device_t dev)
{
struct snc_softc *sc = device_get_softc(dev);
u_int8_t myea[ETHER_ADDR_LEN];
@@ -253,8 +242,7 @@ snc_attach(dev)
****************************************************************/
int
-snc_shutdown(dev)
- device_t dev;
+snc_shutdown(device_t dev)
{
struct snc_softc *sc = device_get_softc(dev);
diff --git a/sys/dev/snc/if_snc_cbus.c b/sys/dev/snc/if_snc_cbus.c
index e859b7a..c3c7946 100644
--- a/sys/dev/snc/if_snc_cbus.c
+++ b/sys/dev/snc/if_snc_cbus.c
@@ -68,8 +68,7 @@ static struct isa_pnp_id snc_ids[] = {
};
static void
-snc_isapnp_reconfig(dev)
- device_t dev;
+snc_isapnp_reconfig(device_t dev)
{
struct isa_device *idev = DEVTOISA(dev);
struct isa_config config;
@@ -111,8 +110,7 @@ snc_isapnp_reconfig(dev)
}
static int
-snc_isa_probe(dev)
- device_t dev;
+snc_isa_probe(device_t dev)
{
struct snc_softc *sc = device_get_softc(dev);
int type;
@@ -177,8 +175,7 @@ snc_isa_probe(dev)
}
static int
-snc_isa_attach(dev)
- device_t dev;
+snc_isa_attach(device_t dev)
{
struct snc_softc *sc = device_get_softc(dev);
diff --git a/sys/dev/sound/pci/emu10kx.c b/sys/dev/sound/pci/emu10kx.c
index c15a03b..5f217c1 100644
--- a/sys/dev/sound/pci/emu10kx.c
+++ b/sys/dev/sound/pci/emu10kx.c
@@ -3168,7 +3168,7 @@ emu_pci_attach(device_t dev)
sc->output_base = 0x20;
/*
* XXX 5.1 Analog outputs are inside efxc address space!
- * They use ouput+0x11/+0x12 (=efxc+1/+2).
+ * They use output+0x11/+0x12 (=efxc+1/+2).
* Don't use this efx registers for recording on SB Live! 5.1!
*/
sc->efxc_base = 0x30;
diff --git a/sys/dev/twa/tw_cl_misc.c b/sys/dev/twa/tw_cl_misc.c
index b6b5333..b0bf8e4 100644
--- a/sys/dev/twa/tw_cl_misc.c
+++ b/sys/dev/twa/tw_cl_misc.c
@@ -641,7 +641,7 @@ tw_cli_dbg_printf(TW_UINT8 dbg_level,
va_end(ap);
tw_osl_strcpy(print_str + tw_osl_strlen(print_str), "\n");
- tw_osl_dbg_printf(ctlr_handle, print_str);
+ tw_osl_dbg_printf(ctlr_handle, "%s", print_str);
}
#endif /* TW_OSL_DEBUG */
}
diff --git a/sys/dev/usb/controller/dwc_otg.c b/sys/dev/usb/controller/dwc_otg.c
index bb19e40..89e312a 100644
--- a/sys/dev/usb/controller/dwc_otg.c
+++ b/sys/dev/usb/controller/dwc_otg.c
@@ -372,6 +372,15 @@ dwc_otg_pull_down(struct dwc_otg_softc *sc)
}
static void
+dwc_otg_enable_sof_irq(struct dwc_otg_softc *sc)
+{
+ if (sc->sc_irq_mask & GINTSTS_SOF)
+ return;
+ sc->sc_irq_mask |= GINTSTS_SOF;
+ DWC_OTG_WRITE_4(sc, DOTG_GINTMSK, sc->sc_irq_mask);
+}
+
+static void
dwc_otg_resume_irq(struct dwc_otg_softc *sc)
{
if (sc->sc_flags.status_suspend) {
@@ -523,10 +532,6 @@ dwc_otg_host_channel_wait(struct dwc_otg_td *td)
if (x == 0)
return (0); /* wait */
- /* assume NAK-ing is next */
- if (sc->sc_chan_state[x].hcint & HCINT_NYET)
- return (0); /* wait */
-
/* find new disabled channel */
for (x = 1; x != sc->sc_host_ch_max; x++) {
@@ -629,8 +634,7 @@ dwc_otg_host_channel_disable(struct dwc_otg_softc *sc, uint8_t x)
/* don't re-use channel until next SOF is transmitted */
sc->sc_chan_state[x].wait_sof = 2;
/* enable SOF interrupt */
- sc->sc_irq_mask |= GINTMSK_SOFMSK;
- DWC_OTG_WRITE_4(sc, DOTG_GINTMSK, sc->sc_irq_mask);
+ dwc_otg_enable_sof_irq(sc);
}
}
@@ -688,14 +692,15 @@ dwc_otg_host_setup_tx(struct dwc_otg_td *td)
DWC_OTG_READ_4(sc, DOTG_HCCHAR(td->channel)),
DWC_OTG_READ_4(sc, DOTG_HCTSIZ(td->channel)));
- if (hcint & HCINT_STALL) {
+ if (hcint & (HCINT_RETRY |
+ HCINT_ACK | HCINT_NYET)) {
+ /* give success bits priority over failure bits */
+ } else if (hcint & HCINT_STALL) {
DPRINTF("CH=%d STALL\n", td->channel);
td->error_stall = 1;
td->error_any = 1;
return (0); /* complete */
- }
-
- if (hcint & HCINT_ERRORS) {
+ } else if (hcint & HCINT_ERRORS) {
DPRINTF("CH=%d ERROR\n", td->channel);
td->errcnt++;
if (td->hcsplt != 0 || td->errcnt >= 3) {
@@ -769,6 +774,8 @@ dwc_otg_host_setup_tx(struct dwc_otg_td *td)
return (0); /* complete */
}
break;
+ case DWC_CHAN_ST_TX_PKT_SYNC:
+ goto send_pkt_sync;
default:
break;
}
@@ -780,7 +787,21 @@ send_pkt:
return (0); /* complete */
}
+send_pkt_sync:
if (td->hcsplt != 0) {
+ uint32_t count;
+
+ count = DWC_OTG_READ_4(sc, DOTG_HFNUM) & 7;
+ /* check for not first microframe */
+ if (count != 0) {
+ /* enable SOF interrupt */
+ dwc_otg_enable_sof_irq(sc);
+ /* set state */
+ td->state = DWC_CHAN_ST_TX_PKT_SYNC;
+ dwc_otg_host_channel_free(td);
+ return (1); /* busy */
+ }
+
td->hcsplt &= ~HCSPLT_COMPSPLT;
td->state = DWC_CHAN_ST_WAIT_S_ANE;
} else {
@@ -961,8 +982,7 @@ not_complete:
}
static uint8_t
-dwc_otg_host_rate_check(struct dwc_otg_td *td,
- uint8_t do_inc)
+dwc_otg_host_rate_check(struct dwc_otg_td *td)
{
struct dwc_otg_softc *sc;
uint8_t ep_type;
@@ -981,13 +1001,14 @@ dwc_otg_host_rate_check(struct dwc_otg_td *td,
td->hcchar |= HCCHAR_ODDFRM;
else
td->hcchar &= ~HCCHAR_ODDFRM;
- if (do_inc)
- td->tmr_val += td->tmr_res;
+ td->tmr_val += td->tmr_res;
} else if (ep_type == UE_INTERRUPT) {
- if ((sc->sc_tmr_val & 0xFF) != td->tmr_val)
+ uint8_t delta;
+
+ delta = sc->sc_tmr_val - td->tmr_val;
+ if (delta >= 128)
goto busy;
- if (do_inc)
- td->tmr_val += td->tmr_res;
+ td->tmr_val = sc->sc_tmr_val + td->tmr_res;
} else if (td->did_nak != 0) {
goto busy;
}
@@ -1010,6 +1031,7 @@ dwc_otg_host_data_rx(struct dwc_otg_td *td)
uint32_t hcint;
uint32_t hcchar;
uint32_t count;
+ uint8_t ep_type;
if (dwc_otg_host_channel_alloc(td))
return (1); /* busy */
@@ -1017,6 +1039,9 @@ dwc_otg_host_data_rx(struct dwc_otg_td *td)
/* get pointer to softc */
sc = DWC_OTG_PC2SC(td->pc);
+ ep_type = ((td->hcchar &
+ HCCHAR_EPTYPE_MASK) >> HCCHAR_EPTYPE_SHIFT);
+
hcint = sc->sc_chan_state[td->channel].hcint;
DPRINTF("CH=%d ST=%d HCINT=0x%08x HCCHAR=0x%08x HCTSIZ=0x%08x\n",
@@ -1026,14 +1051,15 @@ dwc_otg_host_data_rx(struct dwc_otg_td *td)
/* check interrupt bits */
- if (hcint & HCINT_STALL) {
+ if (hcint & (HCINT_RETRY |
+ HCINT_ACK | HCINT_NYET)) {
+ /* give success bits priority over failure bits */
+ } else if (hcint & HCINT_STALL) {
DPRINTF("CH=%d STALL\n", td->channel);
td->error_stall = 1;
td->error_any = 1;
return (0); /* complete */
- }
-
- if (hcint & HCINT_ERRORS) {
+ } else if (hcint & HCINT_ERRORS) {
DPRINTF("CH=%d ERROR\n", td->channel);
td->errcnt++;
if (td->hcsplt != 0 || td->errcnt >= 3) {
@@ -1063,7 +1089,17 @@ dwc_otg_host_data_rx(struct dwc_otg_td *td)
switch (sc->sc_last_rx_status & GRXSTSRD_PKTSTS_MASK) {
case GRXSTSRH_IN_DATA:
- DPRINTF("DATA\n");
+ DPRINTF("DATA ST=%d STATUS=0x%08x\n",
+ (int)td->state, (int)sc->sc_last_rx_status);
+
+ if (hcint & HCINT_SOFTWARE_ONLY) {
+ /*
+ * When using SPLIT transactions on interrupt
+ * endpoints, sometimes data occurs twice.
+ */
+ DPRINTF("Data already received\n");
+ break;
+ }
td->toggle ^= 1;
@@ -1131,12 +1167,16 @@ check_state:
else
goto receive_pkt;
}
- if (hcint & HCINT_NYET) {
- if (td->hcsplt != 0)
- goto receive_pkt;
- }
- if (!(hcint & HCINT_SOFTWARE_ONLY))
+ if (!(hcint & HCINT_SOFTWARE_ONLY)) {
+ if (hcint & HCINT_NYET) {
+ if (td->hcsplt != 0) {
+ if (!dwc_otg_host_channel_wait(td))
+ break;
+ goto receive_pkt;
+ }
+ }
break;
+ }
if (hcint & (HCINT_ACK | HCINT_NYET)) {
if (!dwc_otg_host_channel_wait(td))
break;
@@ -1179,20 +1219,38 @@ check_state:
case DWC_CHAN_ST_RX_SPKT:
goto receive_spkt;
+ case DWC_CHAN_ST_RX_SPKT_SYNC:
+ goto receive_spkt_sync;
+
default:
break;
}
goto busy;
receive_pkt:
- if (dwc_otg_host_rate_check(td, 1)) {
+ if (td->hcsplt != 0) {
+ count = DWC_OTG_READ_4(sc, DOTG_HFNUM) & 7;
+
+ /* check for even microframes */
+ if (count == td->curr_frame) {
+ td->state = DWC_CHAN_ST_RX_PKT;
+ dwc_otg_host_channel_free(td);
+ /* enable SOF interrupt */
+ dwc_otg_enable_sof_irq(sc);
+ goto busy;
+ } else if (count == 0) {
+ /* check for start split timeout */
+ goto receive_spkt;
+ }
+
+ td->curr_frame = count;
+ td->hcsplt |= HCSPLT_COMPSPLT;
+ } else if (dwc_otg_host_rate_check(td)) {
td->state = DWC_CHAN_ST_RX_PKT;
dwc_otg_host_channel_free(td);
goto busy;
}
- if (td->hcsplt != 0)
- td->hcsplt |= HCSPLT_COMPSPLT;
td->state = DWC_CHAN_ST_WAIT_ANE;
/* receive one packet */
@@ -1213,12 +1271,42 @@ receive_pkt:
goto busy;
receive_spkt:
- if (dwc_otg_host_rate_check(td, 0)) {
+ if (dwc_otg_host_rate_check(td)) {
td->state = DWC_CHAN_ST_RX_SPKT;
dwc_otg_host_channel_free(td);
goto busy;
}
+receive_spkt_sync:
+ if (ep_type == UE_INTERRUPT ||
+ ep_type == UE_ISOCHRONOUS) {
+ count = DWC_OTG_READ_4(sc, DOTG_HFNUM) & 7;
+ td->curr_frame = count;
+
+ /* check for non-zero microframe */
+ if (count != 0) {
+ /* enable SOF interrupt */
+ dwc_otg_enable_sof_irq(sc);
+ /* set state */
+ td->state = DWC_CHAN_ST_RX_SPKT_SYNC;
+ dwc_otg_host_channel_free(td);
+ goto busy;
+ }
+ } else {
+ count = DWC_OTG_READ_4(sc, DOTG_HFNUM) & 7;
+ td->curr_frame = count;
+
+ /* check for two last frames */
+ if (count >= 6) {
+ /* enable SOF interrupt */
+ dwc_otg_enable_sof_irq(sc);
+ /* set state */
+ td->state = DWC_CHAN_ST_RX_SPKT_SYNC;
+ dwc_otg_host_channel_free(td);
+ goto busy;
+ }
+ }
+
td->hcsplt &= ~HCSPLT_COMPSPLT;
td->state = DWC_CHAN_ST_WAIT_S_ANE;
@@ -1377,14 +1465,15 @@ dwc_otg_host_data_tx(struct dwc_otg_td *td)
DWC_OTG_READ_4(sc, DOTG_HCCHAR(td->channel)),
DWC_OTG_READ_4(sc, DOTG_HCTSIZ(td->channel)));
- if (hcint & HCINT_STALL) {
+ if (hcint & (HCINT_RETRY |
+ HCINT_ACK | HCINT_NYET)) {
+ /* give success bits priority over failure bits */
+ } else if (hcint & HCINT_STALL) {
DPRINTF("CH=%d STALL\n", td->channel);
td->error_stall = 1;
td->error_any = 1;
return (0); /* complete */
- }
-
- if (hcint & HCINT_ERRORS) {
+ } else if (hcint & HCINT_ERRORS) {
DPRINTF("CH=%d ERROR\n", td->channel);
td->errcnt++;
if (td->hcsplt != 0 || td->errcnt >= 3) {
@@ -1482,6 +1571,9 @@ dwc_otg_host_data_tx(struct dwc_otg_td *td)
case DWC_CHAN_ST_TX_PKT:
goto send_pkt;
+ case DWC_CHAN_ST_TX_PKT_SYNC:
+ goto send_pkt_sync;
+
case DWC_CHAN_ST_TX_CPKT:
goto send_cpkt;
@@ -1491,13 +1583,25 @@ dwc_otg_host_data_tx(struct dwc_otg_td *td)
goto busy;
send_pkt:
- if (dwc_otg_host_rate_check(td, 1)) {
+ if (dwc_otg_host_rate_check(td)) {
td->state = DWC_CHAN_ST_TX_PKT;
dwc_otg_host_channel_free(td);
goto busy;
}
+send_pkt_sync:
if (td->hcsplt != 0) {
+ count = DWC_OTG_READ_4(sc, DOTG_HFNUM) & 7;
+ /* check for first or last microframe */
+ if (count == 7 || count == 0) {
+ /* enable SOF interrupt */
+ dwc_otg_enable_sof_irq(sc);
+ /* set state */
+ td->state = DWC_CHAN_ST_TX_PKT_SYNC;
+ dwc_otg_host_channel_free(td);
+ goto busy;
+ }
+
td->hcsplt &= ~HCSPLT_COMPSPLT;
td->state = DWC_CHAN_ST_WAIT_S_ANE;
} else {
@@ -1549,6 +1653,13 @@ send_pkt:
goto busy;
send_cpkt:
+ count = DWC_OTG_READ_4(sc, DOTG_HFNUM) & 7;
+ /* check for first microframe */
+ if (count == 0) {
+ /* send packet again */
+ goto send_pkt;
+ }
+
td->hcsplt |= HCSPLT_COMPSPLT;
td->state = DWC_CHAN_ST_WAIT_C_ANE;
@@ -2242,6 +2353,9 @@ dwc_otg_interrupt(struct dwc_otg_softc *sc)
if (sc->sc_irq_mask & GINTMSK_SOFMSK) {
uint8_t x;
uint8_t y;
+
+ DPRINTFN(12, "SOF interrupt\n");
+
for (x = y = 0; x != sc->sc_host_ch_max; x++) {
if (sc->sc_chan_state[x].wait_sof != 0) {
if (--(sc->sc_chan_state[x].wait_sof) != 0)
@@ -2280,6 +2394,7 @@ dwc_otg_setup_standard_chain_sub(struct dwc_otg_std_temp *temp)
td->remainder = temp->len;
td->tx_bytes = 0;
td->error_any = 0;
+ td->error_stall = 0;
td->npkt = 0;
td->did_stall = temp->did_stall;
td->short_pkt = temp->short_pkt;
@@ -2531,8 +2646,8 @@ dwc_otg_setup_standard_chain(struct usb_xfer *xfer)
ival = xfer->interval / DWC_OTG_HOST_TIMER_RATE;
if (ival == 0)
ival = 1;
- else if (ival > 255)
- ival = 255;
+ else if (ival > 127)
+ ival = 127;
td->tmr_val = sc->sc_tmr_val + ival;
td->tmr_res = ival;
}
@@ -2549,8 +2664,8 @@ dwc_otg_setup_standard_chain(struct usb_xfer *xfer)
ival = xfer->interval / DWC_OTG_HOST_TIMER_RATE;
if (ival == 0)
ival = 1;
- else if (ival > 255)
- ival = 255;
+ else if (ival > 127)
+ ival = 127;
td->tmr_val = sc->sc_tmr_val + ival;
td->tmr_res = ival;
}
diff --git a/sys/dev/usb/controller/dwc_otg.h b/sys/dev/usb/controller/dwc_otg.h
index 1ff2583..a301698 100644
--- a/sys/dev/usb/controller/dwc_otg.h
+++ b/sys/dev/usb/controller/dwc_otg.h
@@ -60,6 +60,7 @@ struct dwc_otg_td {
uint8_t errcnt;
uint8_t tmr_res;
uint8_t tmr_val;
+ uint8_t curr_frame;
uint8_t ep_no;
uint8_t channel;
uint8_t state;
@@ -69,8 +70,10 @@ struct dwc_otg_td {
#define DWC_CHAN_ST_WAIT_C_ANE 3
#define DWC_CHAN_ST_RX_PKT 4
#define DWC_CHAN_ST_RX_SPKT 5
+#define DWC_CHAN_ST_RX_SPKT_SYNC 6
#define DWC_CHAN_ST_TX_PKT 4
#define DWC_CHAN_ST_TX_CPKT 5
+#define DWC_CHAN_ST_TX_PKT_SYNC 6
uint8_t error:1;
uint8_t error_any:1;
uint8_t error_stall:1;
diff --git a/sys/dev/usb/controller/dwc_otgreg.h b/sys/dev/usb/controller/dwc_otgreg.h
index a91f7dc..f59f48c 100644
--- a/sys/dev/usb/controller/dwc_otgreg.h
+++ b/sys/dev/usb/controller/dwc_otgreg.h
@@ -299,7 +299,7 @@
#define GRXSTSRD_DPID_DATA0 (0<<15)
#define GRXSTSRD_DPID_DATA1 (2<<15)
#define GRXSTSRD_DPID_DATA2 (1<<15)
-#define GRXSTSRD_PID_MDATA (3<<15)
+#define GRXSTSRD_DPID_MDATA (3<<15)
#define GRXSTSRD_BCNT_MASK 0x00007ff0
#define GRXSTSRD_BCNT_GET(x) (((x) >> 4) & 0x7FF)
#define GRXSTSRD_BCNT_SHIFT 4
diff --git a/sys/dev/usb/net/if_udav.c b/sys/dev/usb/net/if_udav.c
index c6f0811..ff57e25 100644
--- a/sys/dev/usb/net/if_udav.c
+++ b/sys/dev/usb/net/if_udav.c
@@ -169,7 +169,7 @@ MODULE_DEPEND(udav, ether, 1, 1, 1);
MODULE_DEPEND(udav, miibus, 1, 1, 1);
MODULE_VERSION(udav, 1);
-static struct usb_ether_methods udav_ue_methods = {
+static const struct usb_ether_methods udav_ue_methods = {
.ue_attach_post = udav_attach_post,
.ue_start = udav_start,
.ue_init = udav_init,
@@ -181,6 +181,15 @@ static struct usb_ether_methods udav_ue_methods = {
.ue_mii_sts = udav_ifmedia_status,
};
+static const struct usb_ether_methods udav_ue_methods_nophy = {
+ .ue_attach_post = udav_attach_post,
+ .ue_start = udav_start,
+ .ue_init = udav_init,
+ .ue_stop = udav_stop,
+ .ue_setmulti = udav_setmulti,
+ .ue_setpromisc = udav_setpromisc,
+};
+
#ifdef USB_DEBUG
static int udav_debug = 0;
@@ -264,17 +273,16 @@ udav_attach(device_t dev)
* The JP1082 has an unusable PHY and provides no link information.
*/
if (sc->sc_flags & UDAV_FLAG_NO_PHY) {
- udav_ue_methods.ue_tick = NULL;
- udav_ue_methods.ue_mii_upd = NULL;
- udav_ue_methods.ue_mii_sts = NULL;
+ ue->ue_methods = &udav_ue_methods_nophy;
sc->sc_flags |= UDAV_FLAG_LINK;
+ } else {
+ ue->ue_methods = &udav_ue_methods;
}
ue->ue_sc = sc;
ue->ue_dev = dev;
ue->ue_udev = uaa->device;
ue->ue_mtx = &sc->sc_mtx;
- ue->ue_methods = &udav_ue_methods;
error = uether_ifattach(ue);
if (error) {
diff --git a/sys/dev/usb/serial/u3g.c b/sys/dev/usb/serial/u3g.c
index c61c887..193c71b 100644
--- a/sys/dev/usb/serial/u3g.c
+++ b/sys/dev/usb/serial/u3g.c
@@ -319,6 +319,10 @@ static const STRUCT_USB_HOST_ID u3g_devs[] = {
U3G_DEV(HUAWEI, E1820, U3GINIT_HUAWEISCSI),
U3G_DEV(HUAWEI, K3765, U3GINIT_HUAWEI),
U3G_DEV(HUAWEI, K3765_INIT, U3GINIT_HUAWEISCSI),
+ U3G_DEV(HUAWEI, K3770, U3GINIT_HUAWEI),
+ U3G_DEV(HUAWEI, K3770_INIT, U3GINIT_HUAWEISCSI),
+ U3G_DEV(HUAWEI, K4505, U3GINIT_HUAWEI),
+ U3G_DEV(HUAWEI, K4505_INIT, U3GINIT_HUAWEISCSI),
U3G_DEV(HUAWEI, ETS2055, U3GINIT_HUAWEI),
U3G_DEV(KYOCERA2, CDMA_MSM_K, 0),
U3G_DEV(KYOCERA2, KPC680, 0),
diff --git a/sys/dev/usb/serial/usb_serial.c b/sys/dev/usb/serial/usb_serial.c
index 02f75fb..2047bc3 100644
--- a/sys/dev/usb/serial/usb_serial.c
+++ b/sys/dev/usb/serial/usb_serial.c
@@ -425,16 +425,13 @@ ucom_attach_tty(struct ucom_super_softc *ssc, struct ucom_softc *sc)
if ((ucom_cons_softc == NULL) &&
(ssc->sc_unit == ucom_cons_unit) &&
(sc->sc_subunit == ucom_cons_subunit)) {
- struct termios t;
- DPRINTF("unit %d subunit %d is console", ssc->sc_unit, sc->sc_subunit);
+ DPRINTF("unit %d subunit %d is console",
+ ssc->sc_unit, sc->sc_subunit);
ucom_cons_softc = sc;
- memset(&t, 0, sizeof(t));
- t.c_ispeed = ucom_cons_baud;
- t.c_ospeed = t.c_ispeed;
- t.c_cflag = CS8;
+ tty_init_console(tp, ucom_cons_baud);
UCOM_MTX_LOCK(ucom_cons_softc);
ucom_cons_rx_low = 0;
@@ -443,7 +440,7 @@ ucom_attach_tty(struct ucom_super_softc *ssc, struct ucom_softc *sc)
ucom_cons_tx_high = 0;
sc->sc_flag |= UCOM_FLAG_CONSOLE;
ucom_open(ucom_cons_softc->sc_tty);
- ucom_param(ucom_cons_softc->sc_tty, &t);
+ ucom_param(ucom_cons_softc->sc_tty, &tp->t_termios_init_in);
UCOM_MTX_UNLOCK(ucom_cons_softc);
}
@@ -795,11 +792,12 @@ ucom_inwakeup(struct tty *tp)
if (sc == NULL)
return;
- tty_lock(tp);
+ UCOM_MTX_ASSERT(sc, MA_OWNED);
+
+ DPRINTF("tp=%p\n", tp);
if (ttydisc_can_bypass(tp) != 0 ||
(sc->sc_flag & UCOM_FLAG_HL_READY) == 0) {
- tty_unlock(tp);
return;
}
@@ -823,8 +821,6 @@ ucom_inwakeup(struct tty *tp)
if ((sc->sc_jitterbuf_in == pos) &&
(sc->sc_flag & UCOM_FLAG_RTS_IFLOW))
ucom_rts(sc, 0);
-
- tty_unlock(tp);
}
static int
@@ -1186,17 +1182,22 @@ ucom_param(struct tty *tp, struct termios *t)
if (!(sc->sc_flag & UCOM_FLAG_HL_READY)) {
/* XXX the TTY layer should call "open()" first! */
-
+ /*
+ * Not quite: Its ordering is partly backwards, but
+ * some parameters must be set early in ttydev_open(),
+ * possibly before calling ttydevsw_open().
+ */
error = ucom_open(tp);
- if (error) {
+ if (error)
goto done;
- }
+
opened = 1;
}
DPRINTF("sc = %p\n", sc);
/* Check requested parameters. */
if (t->c_ispeed && (t->c_ispeed != t->c_ospeed)) {
+ /* XXX c_ospeed == 0 is perfectly valid. */
DPRINTF("mismatch ispeed and ospeed\n");
error = EINVAL;
goto done;
diff --git a/sys/dev/usb/usbdevs b/sys/dev/usb/usbdevs
index 2761e8e..4112c9c 100644
--- a/sys/dev/usb/usbdevs
+++ b/sys/dev/usb/usbdevs
@@ -1545,6 +1545,7 @@ product EDIMAX EW7318USG 0x7318 USB Wireless dongle
product EDIMAX RT2870_1 0x7711 RT2870
product EDIMAX EW7717 0x7717 EW-7717
product EDIMAX EW7718 0x7718 EW-7718
+product EDIMAX EW7811UN 0x7811 EW-7811Un
/* eGalax Products */
product EGALAX TPANEL 0x0001 Touch Panel
@@ -2188,12 +2189,16 @@ product HUAWEI E143D 0x143d 3G modem
product HUAWEI E143E 0x143e 3G modem
product HUAWEI E143F 0x143f 3G modem
product HUAWEI E1752 0x1446 3G modem
+product HUAWEI K4505 0x1464 3G modem
product HUAWEI K3765 0x1465 3G modem
product HUAWEI E1820 0x14ac E1820 HSPA+ USB Slider
+product HUAWEI K3770 0x14c9 3G modem
+product HUAWEI K3770_INIT 0x14d1 K3770 Initial
product HUAWEI E3131_INIT 0x14fe 3G modem initial
product HUAWEI E392 0x1505 LTE modem
product HUAWEI E3131 0x1506 3G modem
product HUAWEI K3765_INIT 0x1520 K3765 Initial
+product HUAWEI K4505_INIT 0x1521 K4505 Initial
product HUAWEI ETS2055 0x1803 CDMA modem
product HUAWEI E173 0x1c05 3G modem
product HUAWEI E173_INIT 0x1c0b 3G modem initial
diff --git a/sys/dev/xen/netback/netback_unit_tests.c b/sys/dev/xen/netback/netback_unit_tests.c
index e4789f9..92e2b68 100644
--- a/sys/dev/xen/netback/netback_unit_tests.c
+++ b/sys/dev/xen/netback/netback_unit_tests.c
@@ -58,8 +58,8 @@ __FBSDID("$FreeBSD$");
#define TOSTRING(x) STRINGIFY(x)
/**
- * Writes an error message to buffer if cond is false, and returns true
- * iff the assertion failed. Note the implied parameters buffer and
+ * Writes an error message to buffer if cond is false
+ * Note the implied parameters buffer and
* buflen
*/
#define XNB_ASSERT(cond) ({ \
@@ -71,7 +71,7 @@ __FBSDID("$FreeBSD$");
strlcat(_buffer, ":" TOSTRING(__LINE__) \
" Assertion Error: " #cond "\n", _buflen); \
} \
- ! passed; })
+ })
/**
diff --git a/sys/fs/cd9660/cd9660_vfsops.c b/sys/fs/cd9660/cd9660_vfsops.c
index 7780b04..1865202 100644
--- a/sys/fs/cd9660/cd9660_vfsops.c
+++ b/sys/fs/cd9660/cd9660_vfsops.c
@@ -376,8 +376,7 @@ iso_mountfs(devvp, mp)
mp->mnt_maxsymlinklen = 0;
MNT_ILOCK(mp);
mp->mnt_flag |= MNT_LOCAL;
- mp->mnt_kern_flag |= MNTK_MPSAFE | MNTK_LOOKUP_SHARED |
- MNTK_EXTENDED_SHARED;
+ mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED;
MNT_IUNLOCK(mp);
isomp->im_mountp = mp;
isomp->im_dev = dev;
diff --git a/sys/fs/devfs/devfs_vfsops.c b/sys/fs/devfs/devfs_vfsops.c
index c0407eb..85ff158 100644
--- a/sys/fs/devfs/devfs_vfsops.c
+++ b/sys/fs/devfs/devfs_vfsops.c
@@ -131,8 +131,7 @@ devfs_mount(struct mount *mp)
MNT_ILOCK(mp);
mp->mnt_flag |= MNT_LOCAL;
- mp->mnt_kern_flag |= MNTK_MPSAFE | MNTK_LOOKUP_SHARED |
- MNTK_EXTENDED_SHARED;
+ mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED;
#ifdef MAC
mp->mnt_flag |= MNT_MULTILABEL;
#endif
diff --git a/sys/fs/ext2fs/ext2_vfsops.c b/sys/fs/ext2fs/ext2_vfsops.c
index 576a1f59..147b9b8 100644
--- a/sys/fs/ext2fs/ext2_vfsops.c
+++ b/sys/fs/ext2fs/ext2_vfsops.c
@@ -665,8 +665,7 @@ ext2_mountfs(struct vnode *devvp, struct mount *mp)
* Initialize filesystem stat information in mount struct.
*/
MNT_ILOCK(mp);
- mp->mnt_kern_flag |= MNTK_MPSAFE | MNTK_LOOKUP_SHARED |
- MNTK_EXTENDED_SHARED;
+ mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED;
MNT_IUNLOCK(mp);
return (0);
out:
diff --git a/sys/fs/fdescfs/fdesc_vfsops.c b/sys/fs/fdescfs/fdesc_vfsops.c
index dc88278..c3dbccf 100644
--- a/sys/fs/fdescfs/fdesc_vfsops.c
+++ b/sys/fs/fdescfs/fdesc_vfsops.c
@@ -107,9 +107,6 @@ fdesc_mount(struct mount *mp)
VOP_UNLOCK(rvp, 0);
/* XXX -- don't mark as local to work around fts() problems */
/*mp->mnt_flag |= MNT_LOCAL;*/
- MNT_ILOCK(mp);
- mp->mnt_kern_flag |= MNTK_MPSAFE;
- MNT_IUNLOCK(mp);
vfs_getnewfsid(mp);
vfs_mountedfrom(mp, "fdescfs");
diff --git a/sys/fs/fuse/fuse_file.c b/sys/fs/fuse/fuse_file.c
index 4e96edd..d9fb67b 100644
--- a/sys/fs/fuse/fuse_file.c
+++ b/sys/fs/fuse/fuse_file.c
@@ -192,7 +192,6 @@ out:
atomic_subtract_acq_int(&fuse_fh_count, 1);
fufh->fh_id = (uint64_t)-1;
fufh->fh_type = FUFH_INVALID;
- fuse_invalidate_attr(vp);
return err;
}
diff --git a/sys/fs/fuse/fuse_internal.c b/sys/fs/fuse/fuse_internal.c
index 96496c7..4e0a163 100644
--- a/sys/fs/fuse/fuse_internal.c
+++ b/sys/fs/fuse/fuse_internal.c
@@ -371,26 +371,6 @@ fuse_internal_readdir_processdata(struct uio *uio,
/* remove */
-#ifdef XXXIP
-static int
-fuse_internal_remove_callback(struct vnode *vp, void *cargs)
-{
- struct vattr *vap;
- uint64_t target_nlink;
-
- vap = VTOVA(vp);
-
- target_nlink = *(uint64_t *)cargs;
-
- /* somewhat lame "heuristics", but you got better ideas? */
- if ((vap->va_nlink == target_nlink) && vnode_isreg(vp)) {
- fuse_invalidate_attr(vp);
- }
- return 0;
-}
-
-#endif
-
#define INVALIDATE_CACHED_VATTRS_UPON_UNLINK 1
int
fuse_internal_remove(struct vnode *dvp,
@@ -426,27 +406,6 @@ fuse_internal_remove(struct vnode *dvp,
err = fdisp_wait_answ(&fdi);
fdisp_destroy(&fdi);
-
- fuse_invalidate_attr(dvp);
- fuse_invalidate_attr(vp);
-
-#ifdef XXXIP
- /*
- * XXX: INVALIDATE_CACHED_VATTRS_UPON_UNLINK
- *
- * Consider the case where vap->va_nlink > 1 for the entity being
- * removed. In our world, other in-memory vnodes that share a link
- * count each with this one may not know right way that this one just
- * got deleted. We should let them know, say, through a vnode_iterate()
- * here and a callback that does fuse_invalidate_attr(vp) on each
- * relevant vnode.
- */
- if (need_invalidate && !err) {
- vnode_iterate(vnode_mount(vp), 0, fuse_internal_remove_callback,
- (void *)&target_nlink);
- }
-#endif
-
return err;
}
@@ -477,11 +436,6 @@ fuse_internal_rename(struct vnode *fdvp,
err = fdisp_wait_answ(&fdi);
fdisp_destroy(&fdi);
-
- fuse_invalidate_attr(fdvp);
- if (tdvp != fdvp) {
- fuse_invalidate_attr(tdvp);
- }
return err;
}
@@ -556,7 +510,6 @@ fuse_internal_newentry(struct vnode *dvp,
bufsize, &fdi);
err = fuse_internal_newentry_core(dvp, vpp, cnp, vtype, &fdi);
fdisp_destroy(&fdi);
- fuse_invalidate_attr(dvp);
return err;
}
diff --git a/sys/fs/fuse/fuse_internal.h b/sys/fs/fuse/fuse_internal.h
index 9cf20e9..187245c 100644
--- a/sys/fs/fuse/fuse_internal.h
+++ b/sys/fs/fuse/fuse_internal.h
@@ -138,23 +138,6 @@ uio_setresid(struct uio *uio, ssize_t resid)
uio->uio_resid = resid;
}
-/* time */
-
-#define fuse_timespec_add(vvp, uvp) \
- do { \
- (vvp)->tv_sec += (uvp)->tv_sec; \
- (vvp)->tv_nsec += (uvp)->tv_nsec; \
- if ((vvp)->tv_nsec >= 1000000000) { \
- (vvp)->tv_sec++; \
- (vvp)->tv_nsec -= 1000000000; \
- } \
- } while (0)
-
-#define fuse_timespec_cmp(tvp, uvp, cmp) \
- (((tvp)->tv_sec == (uvp)->tv_sec) ? \
- ((tvp)->tv_nsec cmp (uvp)->tv_nsec) : \
- ((tvp)->tv_sec cmp (uvp)->tv_sec))
-
/* miscellaneous */
static __inline__
@@ -254,17 +237,9 @@ fuse_internal_attr_fat2vat(struct mount *mp,
}
-#define cache_attrs(vp, fuse_out) do { \
- struct timespec uptsp_ ## __func__; \
- \
- VTOFUD(vp)->cached_attrs_valid.tv_sec = (fuse_out)->attr_valid; \
- VTOFUD(vp)->cached_attrs_valid.tv_nsec = (fuse_out)->attr_valid_nsec; \
- nanouptime(&uptsp_ ## __func__); \
- \
- fuse_timespec_add(&VTOFUD(vp)->cached_attrs_valid, &uptsp_ ## __func__); \
- \
- fuse_internal_attr_fat2vat(vnode_mount(vp), &(fuse_out)->attr, VTOVA(vp)); \
-} while (0)
+#define cache_attrs(vp, fuse_out) \
+ fuse_internal_attr_fat2vat(vnode_mount(vp), &(fuse_out)->attr, \
+ VTOVA(vp));
/* fsync */
diff --git a/sys/fs/fuse/fuse_io.c b/sys/fs/fuse/fuse_io.c
index 299aa75..5b71a6b 100644
--- a/sys/fs/fuse/fuse_io.c
+++ b/sys/fs/fuse/fuse_io.c
@@ -159,7 +159,6 @@ fuse_io_dispatch(struct vnode *vp, struct uio *uio, int ioflag,
FS_DEBUG("direct write of vnode %ju via file handle %ju\n",
(uintmax_t)VTOILLU(vp), (uintmax_t)fufh->fh_id);
err = fuse_write_directbackend(vp, uio, cred, fufh);
- fuse_invalidate_attr(vp);
} else {
FS_DEBUG("buffered write of vnode %ju\n",
(uintmax_t)VTOILLU(vp));
diff --git a/sys/fs/fuse/fuse_node.c b/sys/fs/fuse/fuse_node.c
index a77082e..ea10c96 100644
--- a/sys/fs/fuse/fuse_node.c
+++ b/sys/fs/fuse/fuse_node.c
@@ -283,16 +283,6 @@ fuse_vnode_open(struct vnode *vp, int32_t fuse_open_flags, struct thread *td)
}
int
-fuse_isvalid_attr(struct vnode *vp)
-{
- struct fuse_vnode_data *fvdat = VTOFUD(vp);
- struct timespec uptsp;
-
- nanouptime(&uptsp);
- return fuse_timespec_cmp(&uptsp, &fvdat->cached_attrs_valid, <=);
-}
-
-int
fuse_vnode_savesize(struct vnode *vp, struct ucred *cred)
{
struct fuse_vnode_data *fvdat = VTOFUD(vp);
@@ -337,8 +327,6 @@ fuse_vnode_savesize(struct vnode *vp, struct ucred *cred)
if (err == 0)
fvdat->flag &= ~FN_SIZECHANGE;
- fuse_invalidate_attr(vp);
-
return err;
}
@@ -350,8 +338,7 @@ fuse_vnode_refreshsize(struct vnode *vp, struct ucred *cred)
struct vattr va;
if ((fvdat->flag & FN_SIZECHANGE) != 0 ||
- (fuse_refresh_size == 0 && fvdat->filesize != 0) ||
- fuse_isvalid_attr(vp))
+ (fuse_refresh_size == 0 && fvdat->filesize != 0))
return;
VOP_GETATTR(vp, &va, cred);
@@ -378,7 +365,5 @@ fuse_vnode_setsize(struct vnode *vp, struct ucred *cred, off_t newsize)
err = vtruncbuf(vp, cred, newsize, fuse_iosize(vp));
}
vnode_pager_setsize(vp, newsize);
- fuse_invalidate_attr(vp);
-
return err;
}
diff --git a/sys/fs/fuse/fuse_node.h b/sys/fs/fuse/fuse_node.h
index 45b15a4..2f1434f 100644
--- a/sys/fs/fuse/fuse_node.h
+++ b/sys/fs/fuse/fuse_node.h
@@ -83,7 +83,6 @@ struct fuse_vnode_data {
uint32_t flag;
/** meta **/
- struct timespec cached_attrs_valid;
struct vattr cached_attrs;
off_t filesize;
uint64_t nlookup;
@@ -100,15 +99,6 @@ struct fuse_vnode_data {
extern struct vop_vector fuse_vnops;
-static __inline__
-void
-fuse_invalidate_attr(struct vnode *vp)
-{
- if (VTOFUD(vp)) {
- bzero(&VTOFUD(vp)->cached_attrs_valid, sizeof(struct timespec));
- }
-}
-
static __inline void
fuse_vnode_setparent(struct vnode *vp, struct vnode *dvp)
{
@@ -118,8 +108,6 @@ fuse_vnode_setparent(struct vnode *vp, struct vnode *dvp)
}
}
-int fuse_isvalid_attr(struct vnode *vp);
-
void fuse_vnode_destroy(struct vnode *vp);
int fuse_vnode_get(struct mount *mp,
diff --git a/sys/fs/fuse/fuse_vfsops.c b/sys/fs/fuse/fuse_vfsops.c
index 74e43f0..639550a 100644
--- a/sys/fs/fuse/fuse_vfsops.c
+++ b/sys/fs/fuse/fuse_vfsops.c
@@ -234,8 +234,10 @@ fuse_vfsop_mount(struct mount *mp)
if (mp->mnt_flag & MNT_UPDATE)
return EOPNOTSUPP;
+ MNT_ILOCK(mp);
mp->mnt_flag |= MNT_SYNCHRONOUS;
mp->mnt_data = NULL;
+ MNT_IUNLOCK(mp);
/* Get the new options passed to mount */
opts = mp->mnt_optnew;
@@ -323,23 +325,20 @@ fuse_vfsop_mount(struct mount *mp)
FUSE_UNLOCK();
goto out;
}
- /* We need this here as this slot is used by getnewvnode() */
- mp->mnt_stat.f_iosize = PAGE_SIZE;
- mp->mnt_data = data;
data->ref++;
data->mp = mp;
data->dataflags |= mntopts;
data->max_read = max_read;
data->daemon_timeout = daemon_timeout;
-#ifdef XXXIP
- if (!priv_check(td, PRIV_VFS_FUSE_SYNC_UNMOUNT))
- data->dataflags |= FSESS_CAN_SYNC_UNMOUNT;
-#endif
FUSE_UNLOCK();
vfs_getnewfsid(mp);
+ MNT_ILOCK(mp);
+ mp->mnt_data = data;
mp->mnt_flag |= MNT_LOCAL;
- mp->mnt_kern_flag |= MNTK_MPSAFE;
+ MNT_IUNLOCK(mp);
+ /* We need this here as this slot is used by getnewvnode() */
+ mp->mnt_stat.f_iosize = PAGE_SIZE;
if (subtype) {
strlcat(mp->mnt_stat.f_fstypename, ".", MFSNAMELEN);
strlcat(mp->mnt_stat.f_fstypename, subtype, MFSNAMELEN);
diff --git a/sys/fs/fuse/fuse_vnops.c b/sys/fs/fuse/fuse_vnops.c
index 1774e0a..cc9733d 100644
--- a/sys/fs/fuse/fuse_vnops.c
+++ b/sys/fs/fuse/fuse_vnops.c
@@ -482,17 +482,6 @@ fuse_vnop_getattr(struct vop_getattr_args *ap)
/* Note that we are not bailing out on a dead file system just yet. */
- /* look for cached attributes */
- if (fuse_isvalid_attr(vp)) {
- if (vap != VTOVA(vp)) {
- memcpy(vap, VTOVA(vp), sizeof(*vap));
- }
- if ((fvdat->flag & FN_SIZECHANGE) != 0) {
- vap->va_size = fvdat->filesize;
- }
- debug_printf("return cached: inode=%ju\n", (uintmax_t)VTOI(vp));
- return 0;
- }
if (!(dataflags & FSESS_INITED)) {
if (!vnode_isvroot(vp)) {
fdata_set_dead(fuse_get_mpdata(vnode_mount(vp)));
@@ -519,6 +508,11 @@ fuse_vnop_getattr(struct vop_getattr_args *ap)
if (vap != VTOVA(vp)) {
memcpy(vap, VTOVA(vp), sizeof(*vap));
}
+ if (vap->va_type != vnode_vtype(vp)) {
+ fuse_internal_vnode_disappear(vp);
+ err = ENOENT;
+ goto out;
+ }
if ((fvdat->flag & FN_SIZECHANGE) != 0)
vap->va_size = fvdat->filesize;
@@ -534,7 +528,6 @@ fuse_vnop_getattr(struct vop_getattr_args *ap)
fuse_vnode_setsize(vp, cred, new_filesize);
}
}
- KASSERT(vnode_vtype(vp) == vap->va_type, ("stale vnode"));
debug_printf("fuse_getattr e: returning 0\n");
out:
@@ -635,9 +628,6 @@ fuse_vnop_link(struct vop_link_args *ap)
feo = fdi.answ;
err = fuse_internal_checkentry(feo, vnode_vtype(vp));
- fuse_invalidate_attr(tdvp);
- fuse_invalidate_attr(vp);
-
out:
fdisp_destroy(&fdi);
return err;
@@ -1085,8 +1075,6 @@ fuse_vnop_mkdir(struct vop_mkdir_args *ap)
struct componentname *cnp = ap->a_cnp;
struct vattr *vap = ap->a_vap;
- int err = 0;
-
struct fuse_mkdir_in fmdi;
fuse_trace_printf_vnop();
@@ -1096,13 +1084,8 @@ fuse_vnop_mkdir(struct vop_mkdir_args *ap)
}
fmdi.mode = MAKEIMODE(vap->va_type, vap->va_mode);
- err = fuse_internal_newentry(dvp, vpp, cnp, FUSE_MKDIR, &fmdi,
- sizeof(fmdi), VDIR);
-
- if (err == 0) {
- fuse_invalidate_attr(dvp);
- }
- return err;
+ return (fuse_internal_newentry(dvp, vpp, cnp, FUSE_MKDIR, &fmdi,
+ sizeof(fmdi), VDIR));
}
/*
@@ -1367,10 +1350,8 @@ fuse_vnop_remove(struct vop_remove_args *ap)
err = fuse_internal_remove(dvp, vp, cnp, FUSE_UNLINK);
- if (err == 0) {
+ if (err == 0)
fuse_internal_vnode_disappear(vp);
- fuse_invalidate_attr(dvp);
- }
return err;
}
@@ -1423,11 +1404,8 @@ fuse_vnop_rename(struct vop_rename_args *ap)
sx_xlock(&data->rename_lock);
err = fuse_internal_rename(fdvp, fcnp, tdvp, tcnp);
if (err == 0) {
- fuse_invalidate_attr(fdvp);
- if (tdvp != fdvp) {
+ if (tdvp != fdvp)
fuse_vnode_setparent(fvp, tdvp);
- fuse_invalidate_attr(tdvp);
- }
if (tvp != NULL)
fuse_vnode_setparent(tvp, NULL);
}
@@ -1482,10 +1460,8 @@ fuse_vnop_rmdir(struct vop_rmdir_args *ap)
}
err = fuse_internal_remove(dvp, vp, ap->a_cnp, FUSE_RMDIR);
- if (err == 0) {
+ if (err == 0)
fuse_internal_vnode_disappear(vp);
- fuse_invalidate_attr(dvp);
- }
return err;
}
@@ -1593,14 +1569,10 @@ fuse_vnop_setattr(struct vop_setattr_args *ap)
vap->va_vaflags & VA_UTIMES_NULL) {
err = fuse_internal_access(vp, VWRITE, &facp, td, cred);
}
- if (err) {
- fuse_invalidate_attr(vp);
+ if (err)
goto out;
- }
- if ((err = fdisp_wait_answ(&fdi))) {
- fuse_invalidate_attr(vp);
+ if ((err = fdisp_wait_answ(&fdi)))
goto out;
- }
vtyp = IFTOVT(((struct fuse_attr_out *)fdi.answ)->attr.mode);
if (vnode_vtype(vp) != vtyp) {
@@ -1624,7 +1596,6 @@ fuse_vnop_setattr(struct vop_setattr_args *ap)
out:
fdisp_destroy(&fdi);
if (!err && sizechanged) {
- fuse_invalidate_attr(vp);
fuse_vnode_setsize(vp, cred, newsize);
VTOFUD(vp)->flag &= ~FN_SIZECHANGE;
}
@@ -1715,10 +1686,6 @@ fuse_vnop_symlink(struct vop_symlink_args *ap)
err = fuse_internal_newentry_core(dvp, vpp, cnp, VLNK, &fdi);
fdisp_destroy(&fdi);
-
- if (err == 0) {
- fuse_invalidate_attr(dvp);
- }
return err;
}
diff --git a/sys/fs/msdosfs/msdosfs_vfsops.c b/sys/fs/msdosfs/msdosfs_vfsops.c
index 7bdd2fa..e99fff3 100644
--- a/sys/fs/msdosfs/msdosfs_vfsops.c
+++ b/sys/fs/msdosfs/msdosfs_vfsops.c
@@ -760,7 +760,6 @@ mountmsdosfs(struct vnode *devvp, struct mount *mp)
mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;
MNT_ILOCK(mp);
mp->mnt_flag |= MNT_LOCAL;
- mp->mnt_kern_flag |= MNTK_MPSAFE;
MNT_IUNLOCK(mp);
if (pmp->pm_flags & MSDOSFS_LARGEFS)
diff --git a/sys/fs/nandfs/nandfs_vfsops.c b/sys/fs/nandfs/nandfs_vfsops.c
index ba53546..9e3196e 100644
--- a/sys/fs/nandfs/nandfs_vfsops.c
+++ b/sys/fs/nandfs/nandfs_vfsops.c
@@ -1383,7 +1383,6 @@ nandfs_mountfs(struct vnode *devvp, struct mount *mp)
nmp->nm_ronly = ronly;
MNT_ILOCK(mp);
mp->mnt_flag |= MNT_LOCAL;
- mp->mnt_kern_flag |= MNTK_MPSAFE;
MNT_IUNLOCK(mp);
nmp->nm_nandfsdev = nandfsdev;
/* Add our mountpoint */
diff --git a/sys/fs/nfsclient/nfs_clvfsops.c b/sys/fs/nfsclient/nfs_clvfsops.c
index 966688f..71791fa 100644
--- a/sys/fs/nfsclient/nfs_clvfsops.c
+++ b/sys/fs/nfsclient/nfs_clvfsops.c
@@ -1136,8 +1136,7 @@ nfs_mount(struct mount *mp)
out:
if (!error) {
MNT_ILOCK(mp);
- mp->mnt_kern_flag |= MNTK_MPSAFE | MNTK_LOOKUP_SHARED |
- MNTK_NO_IOPF;
+ mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_NO_IOPF;
MNT_IUNLOCK(mp);
}
return (error);
diff --git a/sys/fs/nullfs/null_vfsops.c b/sys/fs/nullfs/null_vfsops.c
index 9c2851b..7d84d51 100644
--- a/sys/fs/nullfs/null_vfsops.c
+++ b/sys/fs/nullfs/null_vfsops.c
@@ -189,8 +189,7 @@ nullfs_mount(struct mount *mp)
}
MNT_ILOCK(mp);
mp->mnt_kern_flag |= lowerrootvp->v_mount->mnt_kern_flag &
- (MNTK_MPSAFE | MNTK_SHARED_WRITES | MNTK_LOOKUP_SHARED |
- MNTK_EXTENDED_SHARED);
+ (MNTK_SHARED_WRITES | MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED);
mp->mnt_kern_flag |= MNTK_LOOKUP_EXCL_DOTDOT;
MNT_IUNLOCK(mp);
mp->mnt_data = xmp;
diff --git a/sys/fs/pseudofs/pseudofs.c b/sys/fs/pseudofs/pseudofs.c
index 145aaef..d7894af 100644
--- a/sys/fs/pseudofs/pseudofs.c
+++ b/sys/fs/pseudofs/pseudofs.c
@@ -308,7 +308,6 @@ pfs_mount(struct pfs_info *pi, struct mount *mp)
MNT_ILOCK(mp);
mp->mnt_flag |= MNT_LOCAL;
- mp->mnt_kern_flag |= MNTK_MPSAFE;
MNT_IUNLOCK(mp);
mp->mnt_data = pi;
vfs_getnewfsid(mp);
diff --git a/sys/fs/tmpfs/tmpfs_vfsops.c b/sys/fs/tmpfs/tmpfs_vfsops.c
index 445112d..b2aa786 100644
--- a/sys/fs/tmpfs/tmpfs_vfsops.c
+++ b/sys/fs/tmpfs/tmpfs_vfsops.c
@@ -253,7 +253,6 @@ tmpfs_mount(struct mount *mp)
MNT_ILOCK(mp);
mp->mnt_flag |= MNT_LOCAL;
- mp->mnt_kern_flag |= MNTK_MPSAFE;
MNT_IUNLOCK(mp);
mp->mnt_data = tmp;
diff --git a/sys/fs/udf/udf_vfsops.c b/sys/fs/udf/udf_vfsops.c
index ddf3b99..8c44f48 100644
--- a/sys/fs/udf/udf_vfsops.c
+++ b/sys/fs/udf/udf_vfsops.c
@@ -355,8 +355,7 @@ udf_mountfs(struct vnode *devvp, struct mount *mp)
mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;
MNT_ILOCK(mp);
mp->mnt_flag |= MNT_LOCAL;
- mp->mnt_kern_flag |= MNTK_MPSAFE | MNTK_LOOKUP_SHARED |
- MNTK_EXTENDED_SHARED;
+ mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED;
MNT_IUNLOCK(mp);
udfmp->im_mountp = mp;
udfmp->im_dev = dev;
diff --git a/sys/fs/unionfs/union_vfsops.c b/sys/fs/unionfs/union_vfsops.c
index 936df4c..c6586e2 100644
--- a/sys/fs/unionfs/union_vfsops.c
+++ b/sys/fs/unionfs/union_vfsops.c
@@ -266,11 +266,6 @@ unionfs_domount(struct mount *mp)
ump->um_copymode = copymode;
ump->um_whitemode = whitemode;
- MNT_ILOCK(mp);
- if ((lowerrootvp->v_mount->mnt_kern_flag & MNTK_MPSAFE) &&
- (upperrootvp->v_mount->mnt_kern_flag & MNTK_MPSAFE))
- mp->mnt_kern_flag |= MNTK_MPSAFE;
- MNT_IUNLOCK(mp);
mp->mnt_data = ump;
/*
diff --git a/sys/fs/unionfs/union_vnops.c b/sys/fs/unionfs/union_vnops.c
index 36ca302..5076f16 100644
--- a/sys/fs/unionfs/union_vnops.c
+++ b/sys/fs/unionfs/union_vnops.c
@@ -1867,8 +1867,7 @@ unionfs_lock(struct vop_lock1_args *ap)
if ((revlock = unionfs_get_llt_revlock(vp, flags)) == 0)
panic("unknown lock type: 0x%x", flags & LK_TYPE_MASK);
- if ((mp->mnt_kern_flag & MNTK_MPSAFE) != 0 &&
- (vp->v_iflag & VI_OWEINACT) != 0)
+ if ((vp->v_iflag & VI_OWEINACT) != 0)
flags |= LK_NOWAIT;
/*
diff --git a/sys/gnu/fs/reiserfs/reiserfs_vfsops.c b/sys/gnu/fs/reiserfs/reiserfs_vfsops.c
index 825111e..496a6ad 100644
--- a/sys/gnu/fs/reiserfs/reiserfs_vfsops.c
+++ b/sys/gnu/fs/reiserfs/reiserfs_vfsops.c
@@ -580,7 +580,6 @@ reiserfs_mountfs(struct vnode *devvp, struct mount *mp, struct thread *td)
mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;
MNT_ILOCK(mp);
mp->mnt_flag |= MNT_LOCAL;
- mp->mnt_kern_flag |= MNTK_MPSAFE;
MNT_IUNLOCK(mp);
#if defined(si_mountpoint)
devvp->v_rdev->si_mountpoint = mp;
diff --git a/sys/i386/include/vmparam.h b/sys/i386/include/vmparam.h
index ce6672d..9cfd692 100644
--- a/sys/i386/include/vmparam.h
+++ b/sys/i386/include/vmparam.h
@@ -202,4 +202,13 @@
#define ZERO_REGION_SIZE (64 * 1024) /* 64KB */
+#ifndef VM_MAX_AUTOTUNE_MAXUSERS
+#define VM_MAX_AUTOTUNE_MAXUSERS 384
+#endif
+
+#ifndef VM_MAX_AUTOTUNE_NMBCLUSTERS
+/* old maxusers max value. */
+#define VM_MAX_AUTOTUNE_NMBCLUSTERS (1024 + VM_MAX_AUTOTUNE_MAXUSERS * 64)
+#endif
+
#endif /* _MACHINE_VMPARAM_H_ */
diff --git a/sys/i386/xen/clock.c b/sys/i386/xen/clock.c
index f5965bf..4e43a12 100644
--- a/sys/i386/xen/clock.c
+++ b/sys/i386/xen/clock.c
@@ -516,7 +516,7 @@ startrtclock()
__cpu_khz = 1000000ULL << 32;
info = &HYPERVISOR_shared_info->vcpu_info[0].time;
- do_div(__cpu_khz, info->tsc_to_system_mul);
+ (void)do_div(__cpu_khz, info->tsc_to_system_mul);
if ( info->tsc_shift < 0 )
cpu_khz = __cpu_khz << -info->tsc_shift;
else
diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c
index a09823a..1186e36 100644
--- a/sys/kern/kern_malloc.c
+++ b/sys/kern/kern_malloc.c
@@ -186,11 +186,13 @@ struct {
*/
static uma_zone_t mt_zone;
-static u_long vm_min_kernel_address = VM_MIN_KERNEL_ADDRESS;
+static vm_offset_t vm_min_kernel_address = VM_MIN_KERNEL_ADDRESS;
SYSCTL_ULONG(_vm, OID_AUTO, min_kernel_address, CTLFLAG_RD,
&vm_min_kernel_address, 0, "Min kernel address");
-static u_long vm_max_kernel_address = VM_MAX_KERNEL_ADDRESS;
+#ifndef __sparc64__
+static vm_offset_t vm_max_kernel_address = VM_MAX_KERNEL_ADDRESS;
+#endif
SYSCTL_ULONG(_vm, OID_AUTO, max_kernel_address, CTLFLAG_RD,
&vm_max_kernel_address, 0, "Max kernel address");
diff --git a/sys/kern/kern_mbuf.c b/sys/kern/kern_mbuf.c
index de2998b..f8fd0d7 100644
--- a/sys/kern/kern_mbuf.c
+++ b/sys/kern/kern_mbuf.c
@@ -113,8 +113,17 @@ tunable_mbinit(void *dummy)
/* This has to be done before VM init. */
TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters);
- if (nmbclusters == 0)
+ if (nmbclusters == 0) {
+#ifdef VM_AUTOTUNE_NMBCLUSTERS
+ nmbclusters = VM_AUTOTUNE_NMBCLUSTERS;
+#else
nmbclusters = 1024 + maxusers * 64;
+#endif
+#ifdef VM_MAX_AUTOTUNE_NMBCLUSTERS
+ if (nmbclusters > VM_MAX_AUTOTUNE_NMBCLUSTERS)
+ nmbclusters = VM_MAX_AUTOTUNE_NMBCLUSTERS;
+#endif
+ }
TUNABLE_INT_FETCH("kern.ipc.nmbjumbop", &nmbjumbop);
if (nmbjumbop == 0)
diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c
index d078ea1..7626de6 100644
--- a/sys/kern/sched_ule.c
+++ b/sys/kern/sched_ule.c
@@ -189,6 +189,12 @@ static struct td_sched td_sched0;
#define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2)
#define SCHED_INTERACT_THRESH (30)
+/*
+ * These parameters determine the slice behavior for batch work.
+ */
+#define SCHED_SLICE_DEFAULT_DIVISOR 10 /* ~94 ms, 12 stathz ticks. */
+#define SCHED_SLICE_MIN_DIVISOR 6 /* DEFAULT/MIN = ~16 ms. */
+
/* Flags kept in td_flags. */
#define TDF_SLICEEND TDF_SCHED2 /* Thread time slice is over. */
@@ -201,9 +207,10 @@ static struct td_sched td_sched0;
* preempt_thresh: Priority threshold for preemption and remote IPIs.
*/
static int sched_interact = SCHED_INTERACT_THRESH;
-static int realstathz = 127;
static int tickincr = 8 << SCHED_TICK_SHIFT;
-static int sched_slice = 12;
+static int realstathz = 127; /* reset during boot. */
+static int sched_slice = 10; /* reset during boot. */
+static int sched_slice_min = 1; /* reset during boot. */
#ifdef PREEMPTION
#ifdef FULL_PREEMPTION
static int preempt_thresh = PRI_MAX_IDLE;
@@ -559,6 +566,30 @@ tdq_load_rem(struct tdq *tdq, struct thread *td)
}
/*
+ * Bound timeshare latency by decreasing slice size as load increases. We
+ * consider the maximum latency as the sum of the threads waiting to run
+ * aside from curthread and target no more than sched_slice latency but
+ * no less than sched_slice_min runtime.
+ */
+static inline int
+tdq_slice(struct tdq *tdq)
+{
+ int load;
+
+ /*
+ * It is safe to use sys_load here because this is called from
+ * contexts where timeshare threads are running and so there
+ * cannot be higher priority load in the system.
+ */
+ load = tdq->tdq_sysload - 1;
+ if (load >= SCHED_SLICE_MIN_DIVISOR)
+ return (sched_slice_min);
+ if (load <= 1)
+ return (sched_slice);
+ return (sched_slice / load);
+}
+
+/*
* Set lowpri to its exact value by searching the run-queue and
* evaluating curthread. curthread may be passed as an optimization.
*/
@@ -1384,7 +1415,8 @@ sched_initticks(void *dummy)
int incr;
realstathz = stathz ? stathz : hz;
- sched_slice = realstathz / 10; /* ~100ms */
+ sched_slice = realstathz / SCHED_SLICE_DEFAULT_DIVISOR;
+ sched_slice_min = sched_slice / SCHED_SLICE_MIN_DIVISOR;
hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) /
realstathz);
@@ -1409,7 +1441,7 @@ sched_initticks(void *dummy)
affinity = SCHED_AFFINITY_DEFAULT;
#endif
if (sched_idlespinthresh < 0)
- sched_idlespinthresh = imax(16, 2 * hz / realstathz);
+ sched_idlespinthresh = 2 * max(10000, 6 * hz) / realstathz;
}
@@ -1585,7 +1617,7 @@ schedinit(void)
thread0.td_sched = &td_sched0;
td_sched0.ts_ltick = ticks;
td_sched0.ts_ftick = ticks;
- td_sched0.ts_slice = sched_slice;
+ td_sched0.ts_slice = 0;
}
/*
@@ -1851,7 +1883,8 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
preempted = !(td->td_flags & TDF_SLICEEND);
td->td_flags &= ~(TDF_NEEDRESCHED | TDF_SLICEEND);
td->td_owepreempt = 0;
- tdq->tdq_switchcnt++;
+ if (!TD_IS_IDLETHREAD(td))
+ tdq->tdq_switchcnt++;
/*
* The lock pointer in an idle thread should never change. Reset it
* to CAN_RUN as well.
@@ -2003,8 +2036,10 @@ sched_wakeup(struct thread *td)
sched_interact_update(td);
sched_pctcpu_update(ts, 0);
}
- /* Reset the slice value after we sleep. */
- ts->ts_slice = sched_slice;
+ /*
+ * Reset the slice value since we slept and advanced the round-robin.
+ */
+ ts->ts_slice = 0;
sched_add(td, SRQ_BORING);
}
@@ -2036,14 +2071,16 @@ sched_fork_thread(struct thread *td, struct thread *child)
{
struct td_sched *ts;
struct td_sched *ts2;
+ struct tdq *tdq;
+ tdq = TDQ_SELF();
THREAD_LOCK_ASSERT(td, MA_OWNED);
/*
* Initialize child.
*/
ts = td->td_sched;
ts2 = child->td_sched;
- child->td_lock = TDQ_LOCKPTR(TDQ_SELF());
+ child->td_lock = TDQ_LOCKPTR(tdq);
child->td_cpuset = cpuset_ref(td->td_cpuset);
ts2->ts_cpu = ts->ts_cpu;
ts2->ts_flags = 0;
@@ -2062,7 +2099,8 @@ sched_fork_thread(struct thread *td, struct thread *child)
*/
ts2->ts_slptime = ts->ts_slptime;
ts2->ts_runtime = ts->ts_runtime;
- ts2->ts_slice = 1; /* Attempt to quickly learn interactivity. */
+ /* Attempt to quickly learn interactivity. */
+ ts2->ts_slice = tdq_slice(tdq) - sched_slice_min;
#ifdef KTR
bzero(ts2->ts_name, sizeof(ts2->ts_name));
#endif
@@ -2227,8 +2265,8 @@ sched_clock(struct thread *td)
* Force a context switch if the current thread has used up a full
* time slice (default is 100ms).
*/
- if (!TD_IS_IDLETHREAD(td) && --ts->ts_slice <= 0) {
- ts->ts_slice = sched_slice;
+ if (!TD_IS_IDLETHREAD(td) && ++ts->ts_slice >= tdq_slice(tdq)) {
+ ts->ts_slice = 0;
td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND;
}
}
@@ -2577,17 +2615,27 @@ sched_idletd(void *dummy)
{
struct thread *td;
struct tdq *tdq;
- int switchcnt;
+ int oldswitchcnt, switchcnt;
int i;
mtx_assert(&Giant, MA_NOTOWNED);
td = curthread;
tdq = TDQ_SELF();
THREAD_NO_SLEEPING();
+ oldswitchcnt = -1;
for (;;) {
+ if (tdq->tdq_load) {
+ thread_lock(td);
+ mi_switch(SW_VOL | SWT_IDLE, NULL);
+ thread_unlock(td);
+ }
+ switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
#ifdef SMP
- if (tdq_idled(tdq) == 0)
- continue;
+ if (switchcnt != oldswitchcnt) {
+ oldswitchcnt = switchcnt;
+ if (tdq_idled(tdq) == 0)
+ continue;
+ }
#endif
switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
/*
@@ -2604,20 +2652,26 @@ sched_idletd(void *dummy)
cpu_spinwait();
}
}
+
+ /* If there was context switch during spin, restart it. */
switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
- if (tdq->tdq_load == 0) {
- tdq->tdq_cpu_idle = 1;
- if (tdq->tdq_load == 0) {
- cpu_idle(switchcnt > sched_idlespinthresh * 4);
- tdq->tdq_switchcnt++;
- }
- tdq->tdq_cpu_idle = 0;
- }
- if (tdq->tdq_load) {
- thread_lock(td);
- mi_switch(SW_VOL | SWT_IDLE, NULL);
- thread_unlock(td);
- }
+ if (tdq->tdq_load != 0 || switchcnt != oldswitchcnt)
+ continue;
+
+ /* Run main MD idle handler. */
+ tdq->tdq_cpu_idle = 1;
+ cpu_idle(switchcnt * 4 > sched_idlespinthresh);
+ tdq->tdq_cpu_idle = 0;
+
+ /*
+ * Account thread-less hardware interrupts and
+ * other wakeup reasons equal to context switches.
+ */
+ switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
+ if (switchcnt != oldswitchcnt)
+ continue;
+ tdq->tdq_switchcnt++;
+ oldswitchcnt++;
}
}
@@ -2799,6 +2853,7 @@ sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
if (new_val <= 0)
return (EINVAL);
sched_slice = imax(1, (new_val + period / 2) / period);
+ sched_slice_min = sched_slice / SCHED_SLICE_MIN_DIVISOR;
hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) /
realstathz);
return (0);
diff --git a/sys/kern/subr_param.c b/sys/kern/subr_param.c
index 6ff54af..421bd37 100644
--- a/sys/kern/subr_param.c
+++ b/sys/kern/subr_param.c
@@ -279,17 +279,17 @@ init_param2(long physpages)
maxusers = physpages / (2 * 1024 * 1024 / PAGE_SIZE);
if (maxusers < 32)
maxusers = 32;
- /*
- * Clips maxusers to 384 on machines with <= 4GB RAM or 32bit.
- * Scales it down 6x for large memory machines.
- */
- if (maxusers > 384) {
- if (sizeof(void *) <= 4)
- maxusers = 384;
- else
- maxusers = 384 + ((maxusers - 384) / 6);
- }
- }
+#ifdef VM_MAX_AUTOTUNE_MAXUSERS
+ if (maxusers > VM_MAX_AUTOTUNE_MAXUSERS)
+ maxusers = VM_MAX_AUTOTUNE_MAXUSERS;
+#endif
+ /*
+ * Scales down the function in which maxusers grows once
+ * we hit 384.
+ */
+ if (maxusers > 384)
+ maxusers = 384 + ((maxusers - 384) / 8);
+ }
/*
* The following can be overridden after boot via sysctl. Note:
diff --git a/sys/kern/tty.c b/sys/kern/tty.c
index e6d0d80..5c7b753 100644
--- a/sys/kern/tty.c
+++ b/sys/kern/tty.c
@@ -114,7 +114,7 @@ tty_watermarks(struct tty *tp)
/* Set low watermark at 10% (when 90% is available). */
tp->t_inlow = (ttyinq_getallocatedsize(&tp->t_inq) * 9) / 10;
- /* Provide an ouput buffer for 0.2 seconds of data. */
+ /* Provide an output buffer for 0.2 seconds of data. */
bs = MIN(tp->t_termios.c_ospeed / 5, TTYBUF_MAX);
ttyoutq_setsize(&tp->t_outq, tp, bs);
diff --git a/sys/kern/uipc_mqueue.c b/sys/kern/uipc_mqueue.c
index 81dec87..9da464c 100644
--- a/sys/kern/uipc_mqueue.c
+++ b/sys/kern/uipc_mqueue.c
@@ -582,7 +582,6 @@ mqfs_mount(struct mount *mp)
mp->mnt_data = &mqfs_data;
MNT_ILOCK(mp);
mp->mnt_flag |= MNT_LOCAL;
- mp->mnt_kern_flag |= MNTK_MPSAFE;
MNT_IUNLOCK(mp);
vfs_getnewfsid(mp);
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index ac600de..454caee 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -3110,7 +3110,6 @@ DB_SHOW_COMMAND(mount, db_show_mount)
MNT_KERN_FLAG(MNTK_SUSPEND);
MNT_KERN_FLAG(MNTK_SUSPEND2);
MNT_KERN_FLAG(MNTK_SUSPENDED);
- MNT_KERN_FLAG(MNTK_MPSAFE);
MNT_KERN_FLAG(MNTK_LOOKUP_SHARED);
MNT_KERN_FLAG(MNTK_NOKNOTE);
#undef MNT_KERN_FLAG
diff --git a/sys/mips/conf/AP91.hints b/sys/mips/conf/AP91.hints
index 803bbc2..6c0cc43 100644
--- a/sys/mips/conf/AP91.hints
+++ b/sys/mips/conf/AP91.hints
@@ -30,6 +30,7 @@ hint.arge.1.fduplex=1
#
# AR7240 switch config
#
+hint.arswitch.0.at="mdio0"
hint.arswitch.0.is_7240=1 # We need to be explicitly told this
hint.arswitch.0.numphys=4 # 4 active switch PHYs (PHY 0 -> 3)
hint.arswitch.0.phy4cpu=1 # Yes, PHY 4 == dedicated PHY
diff --git a/sys/mips/conf/AP93.hints b/sys/mips/conf/AP93.hints
index 91f2a04..22be50f 100644
--- a/sys/mips/conf/AP93.hints
+++ b/sys/mips/conf/AP93.hints
@@ -25,6 +25,7 @@ hint.arge.1.fduplex=1
#
# AR7240 switch config
#
+hint.arswitch.0.at="mdio0"
hint.arswitch.0.is_7240=1 # We need to be explicitly told this
hint.arswitch.0.numphys=4 # 4 active switch PHYs (PHY 0 -> 3)
hint.arswitch.0.phy4cpu=1 # Yes, PHY 4 == dedicated PHY
diff --git a/sys/mips/conf/AP96.hints b/sys/mips/conf/AP96.hints
index c0778bc..6067a4b 100644
--- a/sys/mips/conf/AP96.hints
+++ b/sys/mips/conf/AP96.hints
@@ -25,6 +25,14 @@ hint.arge.1.phymask=0x10
hint.arge.1.miimode=3 # RGMII
hint.arge.1.mdio=mdioproxy1 # off the switch mdiobus
+# AR8316 switch on MDIO0
+hint.arswitch.0.at="mdio0"
+hint.arswitch.0.is_7240=0
+hint.arswitch.0.numphys=4
+hint.arswitch.0.phy4cpu=1
+hint.arswitch.0.is_rgmii=1
+hint.arswitch.0.is_gmii=0
+
# ath0 - slot 17
hint.pcib.0.bus.0.17.0.ath_fixup_addr=0x1fff1000
hint.pcib.0.bus.0.17.0.ath_fixup_size=4096
diff --git a/sys/mips/conf/RSPRO.hints b/sys/mips/conf/RSPRO.hints
index 3be5d13..a802328 100644
--- a/sys/mips/conf/RSPRO.hints
+++ b/sys/mips/conf/RSPRO.hints
@@ -17,6 +17,7 @@ hint.arge.1.fduplex=1 #
hint.arge.1.phymask=0x0 # no directly mapped PHYs
hint.arge.1.miimode=3 # RGMII
+hint.arswitch.0.at="mdio0"
hint.arswitch.0.is_7240=0
hint.arswitch.0.numphys=4
hint.arswitch.0.phy4cpu=1
diff --git a/sys/modules/Makefile b/sys/modules/Makefile
index 676ab8a..dabec69 100644
--- a/sys/modules/Makefile
+++ b/sys/modules/Makefile
@@ -792,10 +792,15 @@ _exca= exca
_nvram= powermac_nvram
_pccard= pccard
_sound= sound
+_cyclic= cyclic
+_dtrace= dtrace
+_opensolaris= opensolaris
.endif
.if ${MACHINE_ARCH} == "powerpc64"
.if ${MK_CDDL} != "no" || defined(ALL_MODULES)
+_cyclic= cyclic
+_dtrace= dtrace
_opensolaris= opensolaris
.endif
.if ${MK_ZFS} != "no" || defined(ALL_MODULES)
diff --git a/sys/modules/dtrace/Makefile b/sys/modules/dtrace/Makefile
index 02423e9..6d18143 100644
--- a/sys/modules/dtrace/Makefile
+++ b/sys/modules/dtrace/Makefile
@@ -10,12 +10,14 @@ SUBDIR= dtmalloc \
dtraceall \
dtrace_test \
dtio \
+ lockstat \
+ profile \
prototype \
sdt \
systrace
.if ${MACHINE_CPUARCH} == "amd64" || ${MACHINE_CPUARCH} == "i386"
-SUBDIR+= fasttrap fbt lockstat profile systrace_linux32
+SUBDIR+= fasttrap fbt systrace_linux32
.endif
.if ${MACHINE_CPUARCH} == "amd64"
SUBDIR+= systrace_freebsd32
diff --git a/sys/modules/nxge/Makefile b/sys/modules/nxge/Makefile
index a21239e..98119c9 100644
--- a/sys/modules/nxge/Makefile
+++ b/sys/modules/nxge/Makefile
@@ -1,7 +1,7 @@
# $FreeBSD$
.PATH: ${.CURDIR}/../../dev/nxge
-VPATH = ${.CURDIR}/../../dev/nxge/xgehal
+.PATH: ${.CURDIR}/../../dev/nxge/xgehal
CFLAGS_NXGE =
diff --git a/sys/netinet/sctp_constants.h b/sys/netinet/sctp_constants.h
index dd70bcb..07f9612 100644
--- a/sys/netinet/sctp_constants.h
+++ b/sys/netinet/sctp_constants.h
@@ -370,6 +370,7 @@ __FBSDID("$FreeBSD$");
#define SCTP_DATAGRAM_ACKED 10010
#define SCTP_DATAGRAM_MARKED 20010
#define SCTP_FORWARD_TSN_SKIP 30010
+#define SCTP_DATAGRAM_NR_MARKED 40010
/* chunk output send from locations */
#define SCTP_OUTPUT_FROM_USR_SEND 0
diff --git a/sys/netinet/sctp_indata.c b/sys/netinet/sctp_indata.c
index ac776a8..7701780 100644
--- a/sys/netinet/sctp_indata.c
+++ b/sys/netinet/sctp_indata.c
@@ -2975,9 +2975,10 @@ sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1
* All chunks NOT UNSENT fall through here and are marked
* (leave PR-SCTP ones that are to skip alone though)
*/
- if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
+ if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
+ (tp1->sent != SCTP_DATAGRAM_NR_MARKED)) {
tp1->sent = SCTP_DATAGRAM_MARKED;
-
+ }
if (tp1->rec.data.chunk_was_revoked) {
/* deflate the cwnd */
tp1->whoTo->cwnd -= tp1->book_size;
@@ -2985,6 +2986,7 @@ sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1
}
/* NR Sack code here */
if (nr_sacking) {
+ tp1->sent = SCTP_DATAGRAM_NR_MARKED;
if (tp1->data) {
/*
* sa_ignore
@@ -3600,12 +3602,14 @@ sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
}
TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
- tp1->sent != SCTP_DATAGRAM_RESEND) {
+ tp1->sent != SCTP_DATAGRAM_RESEND &&
+ tp1->sent != SCTP_DATAGRAM_NR_MARKED) {
/* no chance to advance, out of here */
break;
}
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
- if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
+ if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
+ (tp1->sent == SCTP_DATAGRAM_NR_MARKED)) {
sctp_misc_ints(SCTP_FWD_TSN_CHECK,
asoc->advanced_peer_ack_point,
tp1->rec.data.TSN_seq, 0, 0);
@@ -3653,7 +3657,8 @@ sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
* the chunk, advance our peer ack point and we can check
* the next chunk.
*/
- if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
+ if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
+ (tp1->sent == SCTP_DATAGRAM_NR_MARKED)) {
/* advance PeerAckPoint goes forward */
if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
@@ -3958,7 +3963,15 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
tp1->whoTo->cwnd -= tp1->book_size;
tp1->rec.data.chunk_was_revoked = 0;
}
- tp1->sent = SCTP_DATAGRAM_ACKED;
+ if (tp1->sent != SCTP_DATAGRAM_NR_MARKED) {
+ if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
+ asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
+#ifdef INVARIANTS
+ } else {
+ panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
+#endif
+ }
+ }
TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
if (tp1->data) {
/* sa_ignore NO_NULL_CHK */
@@ -4694,10 +4707,14 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
break;
}
- if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
- /* no more sent on list */
- SCTP_PRINTF("Warning, tp1->sent == %d and its now acked?\n",
- tp1->sent);
+ if (tp1->sent != SCTP_DATAGRAM_NR_MARKED) {
+ if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
+ asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
+#ifdef INVARIANTS
+ } else {
+ panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
+#endif
+ }
}
TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
if (tp1->pr_sctp_on) {
diff --git a/sys/netinet/sctp_input.c b/sys/netinet/sctp_input.c
index 2371ecb..d1a0ef6 100644
--- a/sys/netinet/sctp_input.c
+++ b/sys/netinet/sctp_input.c
@@ -311,6 +311,13 @@ sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb)
if (chk->rec.data.stream_number >= newcnt) {
TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
asoc->send_queue_cnt--;
+ if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
+ asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
+#ifdef INVARIANTS
+ } else {
+ panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
+#endif
+ }
if (chk->data != NULL) {
sctp_free_bufspace(stcb, asoc, chk, 1);
sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
diff --git a/sys/netinet/sctp_output.c b/sys/netinet/sctp_output.c
index cc97fd1..4a77b34 100644
--- a/sys/netinet/sctp_output.c
+++ b/sys/netinet/sctp_output.c
@@ -10152,7 +10152,8 @@ sctp_fill_in_rest:
unsigned int cnt_of_skipped = 0;
TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
- if (at->sent != SCTP_FORWARD_TSN_SKIP) {
+ if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
+ (at->sent != SCTP_DATAGRAM_NR_MARKED)) {
/* no more to look at */
break;
}
diff --git a/sys/netinet/sctp_pcb.c b/sys/netinet/sctp_pcb.c
index 4a40b91..93115f8 100644
--- a/sys/netinet/sctp_pcb.c
+++ b/sys/netinet/sctp_pcb.c
@@ -4949,6 +4949,15 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
}
/* sent queue SHOULD be empty */
TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
+ if (chk->sent != SCTP_DATAGRAM_NR_MARKED) {
+ if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
+ asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
+#ifdef INVARIANTS
+ } else {
+ panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
+#endif
+ }
+ }
TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
if (chk->data) {
if (so) {
diff --git a/sys/netinet/sctp_structs.h b/sys/netinet/sctp_structs.h
index d899ee5..05d5fd1 100644
--- a/sys/netinet/sctp_structs.h
+++ b/sys/netinet/sctp_structs.h
@@ -588,6 +588,7 @@ union scheduling_parameters {
struct sctp_stream_out {
struct sctp_streamhead outqueue;
union scheduling_parameters ss_params;
+ uint32_t chunks_on_queues;
uint16_t stream_no;
uint16_t next_sequence_send; /* next one I expect to send out */
uint8_t last_msg_incomplete;
diff --git a/sys/netinet/sctp_timer.c b/sys/netinet/sctp_timer.c
index d422350..fe7f754 100644
--- a/sys/netinet/sctp_timer.c
+++ b/sys/netinet/sctp_timer.c
@@ -440,6 +440,11 @@ sctp_recover_sent_list(struct sctp_tcb *stcb)
if (SCTP_TSN_GE(asoc->last_acked_seq, chk->rec.data.TSN_seq)) {
SCTP_PRINTF("Found chk:%p tsn:%x <= last_acked_seq:%x\n",
(void *)chk, chk->rec.data.TSN_seq, asoc->last_acked_seq);
+ if (chk->sent != SCTP_DATAGRAM_NR_MARKED) {
+ if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
+ asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
+ }
+ }
TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
if (chk->pr_sctp_on) {
if (asoc->pr_sctp_cnt != 0)
diff --git a/sys/netinet/sctputil.c b/sys/netinet/sctputil.c
index d636c28..72dc34e 100644
--- a/sys/netinet/sctputil.c
+++ b/sys/netinet/sctputil.c
@@ -3727,6 +3727,15 @@ sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock,
TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) {
TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
asoc->sent_queue_cnt--;
+ if (chk->sent != SCTP_DATAGRAM_NR_MARKED) {
+ if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
+ asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
+#ifdef INVARIANTS
+ } else {
+ panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
+#endif
+ }
+ }
if (chk->data != NULL) {
sctp_free_bufspace(stcb, asoc, chk, 1);
sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb,
@@ -3743,6 +3752,13 @@ sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock,
TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) {
TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
asoc->send_queue_cnt--;
+ if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) {
+ asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--;
+#ifdef INVARIANTS
+ } else {
+ panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number);
+#endif
+ }
if (chk->data != NULL) {
sctp_free_bufspace(stcb, asoc, chk, 1);
sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb,
diff --git a/sys/netinet/tcp_output.c b/sys/netinet/tcp_output.c
index d367674..b050fcf 100644
--- a/sys/netinet/tcp_output.c
+++ b/sys/netinet/tcp_output.c
@@ -127,7 +127,7 @@ static void inline hhook_run_tcp_est_out(struct tcpcb *tp,
static void inline cc_after_idle(struct tcpcb *tp);
/*
- * Wrapper for the TCP established ouput helper hook.
+ * Wrapper for the TCP established output helper hook.
*/
static void inline
hhook_run_tcp_est_out(struct tcpcb *tp, struct tcphdr *th,
diff --git a/sys/netinet/tcp_subr.c b/sys/netinet/tcp_subr.c
index c566ec3..5c2a115 100644
--- a/sys/netinet/tcp_subr.c
+++ b/sys/netinet/tcp_subr.c
@@ -529,11 +529,11 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
nth = (struct tcphdr *)(ip6 + 1);
} else
#endif /* INET6 */
- {
- bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
- ip = mtod(m, struct ip *);
- nth = (struct tcphdr *)(ip + 1);
- }
+ {
+ bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip));
+ ip = mtod(m, struct ip *);
+ nth = (struct tcphdr *)(ip + 1);
+ }
bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr));
flags = TH_ACK;
} else {
@@ -553,10 +553,10 @@ tcp_respond(struct tcpcb *tp, void *ipgen, struct tcphdr *th, struct mbuf *m,
nth = (struct tcphdr *)(ip6 + 1);
} else
#endif /* INET6 */
- {
- xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t);
- nth = (struct tcphdr *)(ip + 1);
- }
+ {
+ xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, uint32_t);
+ nth = (struct tcphdr *)(ip + 1);
+ }
if (th != nth) {
/*
* this is usually a case when an extension header
@@ -902,14 +902,14 @@ tcp_discardcb(struct tcpcb *tp)
ssthresh = 2;
ssthresh *= (u_long)(tp->t_maxseg +
#ifdef INET6
- (isipv6 ? sizeof (struct ip6_hdr) +
- sizeof (struct tcphdr) :
+ (isipv6 ? sizeof (struct ip6_hdr) +
+ sizeof (struct tcphdr) :
#endif
- sizeof (struct tcpiphdr)
+ sizeof (struct tcpiphdr)
#ifdef INET6
- )
+ )
#endif
- );
+ );
} else
ssthresh = 0;
metrics.rmx_ssthresh = ssthresh;
diff --git a/sys/netinet/tcp_timewait.c b/sys/netinet/tcp_timewait.c
index cdfad7e..c7076d7 100644
--- a/sys/netinet/tcp_timewait.c
+++ b/sys/netinet/tcp_timewait.c
@@ -519,6 +519,7 @@ tcp_twrespond(struct tcptw *tw, int flags)
struct ip6_hdr *ip6 = NULL;
int isipv6 = inp->inp_inc.inc_flags & INC_ISIPV6;
#endif
+ hdrlen = 0; /* Keep compiler happy */
INP_WLOCK_ASSERT(inp);
diff --git a/sys/netinet6/in6_src.c b/sys/netinet6/in6_src.c
index 85392a8..e028517 100644
--- a/sys/netinet6/in6_src.c
+++ b/sys/netinet6/in6_src.c
@@ -140,7 +140,7 @@ static void init_policy_queue(void);
static int add_addrsel_policyent(struct in6_addrpolicy *);
static int delete_addrsel_policyent(struct in6_addrpolicy *);
static int walk_addrsel_policy(int (*)(struct in6_addrpolicy *, void *),
- void *);
+ void *);
static int dump_addrsel_policyent(struct in6_addrpolicy *, void *);
static struct in6_addrpolicy *match_addrsel_policy(struct sockaddr_in6 *);
@@ -1103,8 +1103,7 @@ delete_addrsel_policyent(struct in6_addrpolicy *key)
}
static int
-walk_addrsel_policy(int (*callback)(struct in6_addrpolicy *, void *),
- void *w)
+walk_addrsel_policy(int (*callback)(struct in6_addrpolicy *, void *), void *w)
{
struct addrsel_policyent *pol;
int error = 0;
diff --git a/sys/netpfil/ipfw/ip_fw_dynamic.c b/sys/netpfil/ipfw/ip_fw_dynamic.c
index 859df19..57b5731 100644
--- a/sys/netpfil/ipfw/ip_fw_dynamic.c
+++ b/sys/netpfil/ipfw/ip_fw_dynamic.c
@@ -971,6 +971,31 @@ ipfw_send_pkt(struct mbuf *replyto, struct ipfw_flow_id *id, u_int32_t seq,
}
/*
+ * Queue keepalive packets for given dynamic rule
+ */
+static struct mbuf **
+ipfw_dyn_send_ka(struct mbuf **mtailp, ipfw_dyn_rule *q)
+{
+ struct mbuf *m_rev, *m_fwd;
+
+ m_rev = (q->state & ACK_REV) ? NULL :
+ ipfw_send_pkt(NULL, &(q->id), q->ack_rev - 1, q->ack_fwd, TH_SYN);
+ m_fwd = (q->state & ACK_FWD) ? NULL :
+ ipfw_send_pkt(NULL, &(q->id), q->ack_fwd - 1, q->ack_rev, 0);
+
+ if (m_rev != NULL) {
+ *mtailp = m_rev;
+ mtailp = &(*mtailp)->m_nextpkt;
+ }
+ if (m_fwd != NULL) {
+ *mtailp = m_fwd;
+ mtailp = &(*mtailp)->m_nextpkt;
+ }
+
+ return (mtailp);
+}
+
+/*
* This procedure is only used to handle keepalives. It is invoked
* every dyn_keepalive_period
*/
@@ -978,9 +1003,7 @@ static void
ipfw_tick(void * vnetx)
{
struct mbuf *m0, *m, *mnext, **mtailp;
-#ifdef INET6
- struct mbuf *m6, **m6_tailp;
-#endif
+ struct ip *h;
int i;
ipfw_dyn_rule *q;
#ifdef VIMAGE
@@ -999,15 +1022,14 @@ ipfw_tick(void * vnetx)
*/
m0 = NULL;
mtailp = &m0;
-#ifdef INET6
- m6 = NULL;
- m6_tailp = &m6;
-#endif
IPFW_DYN_LOCK();
for (i = 0 ; i < V_curr_dyn_buckets ; i++) {
for (q = V_ipfw_dyn_v[i] ; q ; q = q->next ) {
if (q->dyn_type == O_LIMIT_PARENT)
continue;
+ if (TIME_LEQ(q->expire, time_uptime))
+ continue; /* too late, rule expired */
+
if (q->id.proto != IPPROTO_TCP)
continue;
if ( (q->state & BOTH_SYN) != BOTH_SYN)
@@ -1015,55 +1037,24 @@ ipfw_tick(void * vnetx)
if (TIME_LEQ(time_uptime + V_dyn_keepalive_interval,
q->expire))
continue; /* too early */
- if (TIME_LEQ(q->expire, time_uptime))
- continue; /* too late, rule expired */
- m = (q->state & ACK_REV) ? NULL :
- ipfw_send_pkt(NULL, &(q->id), q->ack_rev - 1,
- q->ack_fwd, TH_SYN);
- mnext = (q->state & ACK_FWD) ? NULL :
- ipfw_send_pkt(NULL, &(q->id), q->ack_fwd - 1,
- q->ack_rev, 0);
-
- switch (q->id.addr_type) {
- case 4:
- if (m != NULL) {
- *mtailp = m;
- mtailp = &(*mtailp)->m_nextpkt;
- }
- if (mnext != NULL) {
- *mtailp = mnext;
- mtailp = &(*mtailp)->m_nextpkt;
- }
- break;
-#ifdef INET6
- case 6:
- if (m != NULL) {
- *m6_tailp = m;
- m6_tailp = &(*m6_tailp)->m_nextpkt;
- }
- if (mnext != NULL) {
- *m6_tailp = mnext;
- m6_tailp = &(*m6_tailp)->m_nextpkt;
- }
- break;
-#endif
- }
+ mtailp = ipfw_dyn_send_ka(mtailp, q);
}
}
IPFW_DYN_UNLOCK();
+
+ /* Send keepalive packets if any */
for (m = m0; m != NULL; m = mnext) {
mnext = m->m_nextpkt;
m->m_nextpkt = NULL;
- ip_output(m, NULL, NULL, 0, NULL, NULL);
- }
+ h = mtod(m, struct ip *);
+ if (h->ip_v == 4)
+ ip_output(m, NULL, NULL, 0, NULL, NULL);
#ifdef INET6
- for (m = m6; m != NULL; m = mnext) {
- mnext = m->m_nextpkt;
- m->m_nextpkt = NULL;
- ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
- }
+ else
+ ip6_output(m, NULL, NULL, 0, NULL, NULL, NULL);
#endif
+ }
done:
callout_reset_on(&V_ipfw_timeout, V_dyn_keepalive_period * hz,
ipfw_tick, vnetx, 0);
diff --git a/sys/netpfil/pf/if_pfsync.c b/sys/netpfil/pf/if_pfsync.c
index bd1e5c4..f5f8a33 100644
--- a/sys/netpfil/pf/if_pfsync.c
+++ b/sys/netpfil/pf/if_pfsync.c
@@ -47,6 +47,7 @@
* 1.118, 1.124, 1.148, 1.149, 1.151, 1.171 - fixes to bulk updates
* 1.120, 1.175 - use monotonic time_uptime
* 1.122 - reduce number of updates for non-TCP sessions
+ * 1.125 - rewrite merge or stale processing
* 1.128 - cleanups
* 1.146 - bzero() mbuf before sparsely filling it with data
* 1.170 - SIOCSIFMTU checks
@@ -774,7 +775,7 @@ static int
pfsync_upd_tcp(struct pf_state *st, struct pfsync_state_peer *src,
struct pfsync_state_peer *dst)
{
- int sfail = 0;
+ int sync = 0;
PF_STATE_LOCK_ASSERT(st);
@@ -783,27 +784,22 @@ pfsync_upd_tcp(struct pf_state *st, struct pfsync_state_peer *src,
* for syn-proxy states. Neither should the
* sequence window slide backwards.
*/
- if (st->src.state > src->state &&
+ if ((st->src.state > src->state &&
(st->src.state < PF_TCPS_PROXY_SRC ||
- src->state >= PF_TCPS_PROXY_SRC))
- sfail = 1;
- else if (SEQ_GT(st->src.seqlo, ntohl(src->seqlo)))
- sfail = 3;
- else if (st->dst.state > dst->state) {
- /* There might still be useful
- * information about the src state here,
- * so import that part of the update,
- * then "fail" so we send the updated
- * state back to the peer who is missing
- * our what we know. */
+ src->state >= PF_TCPS_PROXY_SRC)) ||
+ SEQ_GT(st->src.seqlo, ntohl(src->seqlo)))
+ sync++;
+ else
pf_state_peer_ntoh(src, &st->src);
- /* XXX do anything with timeouts? */
- sfail = 7;
- } else if (st->dst.state >= TCPS_SYN_SENT &&
- SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo)))
- sfail = 4;
- return (sfail);
+ if (st->dst.state > dst->state ||
+ (st->dst.state >= TCPS_SYN_SENT &&
+ SEQ_GT(st->dst.seqlo, ntohl(dst->seqlo))))
+ sync++;
+ else
+ pf_state_peer_ntoh(dst, &st->dst);
+
+ return (sync);
}
static int
@@ -811,9 +807,8 @@ pfsync_in_upd(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
{
struct pfsync_softc *sc = V_pfsyncif;
struct pfsync_state *sa, *sp;
- struct pf_state_key *sk;
struct pf_state *st;
- int sfail;
+ int sync;
struct mbuf *mp;
int len = count * sizeof(*sp);
@@ -855,29 +850,33 @@ pfsync_in_upd(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
PFSYNC_UNLOCK(sc);
}
- sk = st->key[PF_SK_WIRE]; /* XXX right one? */
- sfail = 0;
- if (sk->proto == IPPROTO_TCP)
- sfail = pfsync_upd_tcp(st, &sp->src, &sp->dst);
+ if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP)
+ sync = pfsync_upd_tcp(st, &sp->src, &sp->dst);
else {
+ sync = 0;
+
/*
* Non-TCP protocol state machine always go
* forwards
*/
if (st->src.state > sp->src.state)
- sfail = 5;
- else if (st->dst.state > sp->dst.state)
- sfail = 6;
+ sync++;
+ else
+ pf_state_peer_ntoh(&sp->src, &st->src);
+ if (st->dst.state > sp->dst.state)
+ sync++;
+ else
+ pf_state_peer_ntoh(&sp->dst, &st->dst);
+ }
+ if (sync < 2) {
+ pfsync_alloc_scrub_memory(&sp->dst, &st->dst);
+ pf_state_peer_ntoh(&sp->dst, &st->dst);
+ st->expire = time_uptime;
+ st->timeout = sp->timeout;
}
+ st->pfsync_time = time_uptime;
- if (sfail) {
- if (V_pf_status.debug >= PF_DEBUG_MISC) {
- printf("pfsync: %s stale update (%d)"
- " id: %016llx creatorid: %08x\n",
- (sfail < 7 ? "ignoring" : "partial"),
- sfail, (unsigned long long)be64toh(st->id),
- ntohl(st->creatorid));
- }
+ if (sync) {
V_pfsyncstats.pfsyncs_stale++;
pfsync_update_state(st);
@@ -887,12 +886,6 @@ pfsync_in_upd(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
PFSYNC_UNLOCK(sc);
continue;
}
- pfsync_alloc_scrub_memory(&sp->dst, &st->dst);
- pf_state_peer_ntoh(&sp->src, &st->src);
- pf_state_peer_ntoh(&sp->dst, &st->dst);
- st->expire = time_uptime;
- st->timeout = sp->timeout;
- st->pfsync_time = time_uptime;
PF_STATE_UNLOCK(st);
}
@@ -904,12 +897,9 @@ pfsync_in_upd_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
{
struct pfsync_softc *sc = V_pfsyncif;
struct pfsync_upd_c *ua, *up;
- struct pf_state_key *sk;
struct pf_state *st;
-
int len = count * sizeof(*up);
- int sfail;
-
+ int sync;
struct mbuf *mp;
int offp, i;
@@ -951,28 +941,33 @@ pfsync_in_upd_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
PFSYNC_UNLOCK(sc);
}
- sk = st->key[PF_SK_WIRE]; /* XXX right one? */
- sfail = 0;
- if (sk->proto == IPPROTO_TCP)
- sfail = pfsync_upd_tcp(st, &up->src, &up->dst);
+ if (st->key[PF_SK_WIRE]->proto == IPPROTO_TCP)
+ sync = pfsync_upd_tcp(st, &up->src, &up->dst);
else {
+ sync = 0;
+
/*
- * Non-TCP protocol state machine always go forwards
+ * Non-TCP protocol state machine always go
+ * forwards
*/
if (st->src.state > up->src.state)
- sfail = 5;
- else if (st->dst.state > up->dst.state)
- sfail = 6;
+ sync++;
+ else
+ pf_state_peer_ntoh(&up->src, &st->src);
+ if (st->dst.state > up->dst.state)
+ sync++;
+ else
+ pf_state_peer_ntoh(&up->dst, &st->dst);
+ }
+ if (sync < 2) {
+ pfsync_alloc_scrub_memory(&up->dst, &st->dst);
+ pf_state_peer_ntoh(&up->dst, &st->dst);
+ st->expire = time_uptime;
+ st->timeout = up->timeout;
}
+ st->pfsync_time = time_uptime;
- if (sfail) {
- if (V_pf_status.debug >= PF_DEBUG_MISC) {
- printf("pfsync: ignoring stale update "
- "(%d) id: %016llx "
- "creatorid: %08x\n", sfail,
- (unsigned long long)be64toh(st->id),
- ntohl(st->creatorid));
- }
+ if (sync) {
V_pfsyncstats.pfsyncs_stale++;
pfsync_update_state(st);
@@ -982,12 +977,6 @@ pfsync_in_upd_c(struct pfsync_pkt *pkt, struct mbuf *m, int offset, int count)
PFSYNC_UNLOCK(sc);
continue;
}
- pfsync_alloc_scrub_memory(&up->dst, &st->dst);
- pf_state_peer_ntoh(&up->src, &st->src);
- pf_state_peer_ntoh(&up->dst, &st->dst);
- st->expire = time_uptime;
- st->timeout = up->timeout;
- st->pfsync_time = time_uptime;
PF_STATE_UNLOCK(st);
}
@@ -1545,6 +1534,16 @@ pfsync_sendout(int schedswi)
KASSERT(st->sync_state == q,
("%s: st->sync_state == q",
__func__));
+ if (st->timeout == PFTM_UNLINKED) {
+ /*
+ * This happens if pfsync was once
+ * stopped, and then re-enabled
+ * after long time. Theoretically
+ * may happen at usual runtime, too.
+ */
+ pf_release_state(st);
+ continue;
+ }
/*
* XXXGL: some of write methods do unlocked reads
* of state data :(
diff --git a/sys/nfsclient/nfs_vfsops.c b/sys/nfsclient/nfs_vfsops.c
index 9172703..63b5772 100644
--- a/sys/nfsclient/nfs_vfsops.c
+++ b/sys/nfsclient/nfs_vfsops.c
@@ -1193,7 +1193,7 @@ nfs_mount(struct mount *mp)
out:
if (!error) {
MNT_ILOCK(mp);
- mp->mnt_kern_flag |= (MNTK_MPSAFE|MNTK_LOOKUP_SHARED);
+ mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED;
MNT_IUNLOCK(mp);
}
return (error);
diff --git a/sys/ofed/drivers/infiniband/core/cma.c b/sys/ofed/drivers/infiniband/core/cma.c
index 9867f10..34419f3 100644
--- a/sys/ofed/drivers/infiniband/core/cma.c
+++ b/sys/ofed/drivers/infiniband/core/cma.c
@@ -1312,7 +1312,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
*sin = iw_event->local_addr;
sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
*sin = iw_event->remote_addr;
- switch (iw_event->status) {
+ switch ((int)iw_event->status) {
case 0:
event.event = RDMA_CM_EVENT_ESTABLISHED;
break;
diff --git a/sys/ofed/drivers/infiniband/core/ud_header.c b/sys/ofed/drivers/infiniband/core/ud_header.c
index e095a12..09fc1ff 100644
--- a/sys/ofed/drivers/infiniband/core/ud_header.c
+++ b/sys/ofed/drivers/infiniband/core/ud_header.c
@@ -230,7 +230,7 @@ void ib_ud_header_init(int payload_bytes,
int immediate_present,
struct ib_ud_header *header)
{
- u16 packet_length;
+ u16 packet_length = 0;
memset(header, 0, sizeof *header);
diff --git a/sys/ofed/drivers/infiniband/ulp/sdp/sdp_rx.c b/sys/ofed/drivers/infiniband/ulp/sdp/sdp_rx.c
index de4b80b..f8d6181 100644
--- a/sys/ofed/drivers/infiniband/ulp/sdp/sdp_rx.c
+++ b/sys/ofed/drivers/infiniband/ulp/sdp/sdp_rx.c
@@ -590,7 +590,7 @@ sdp_rx_comp_work(struct work_struct *work)
if (unlikely(!ssk->poll_cq)) {
struct rdma_cm_id *id = ssk->id;
if (id && id->qp)
- rdma_notify(id, RDMA_CM_EVENT_ESTABLISHED);
+ rdma_notify(id, IB_EVENT_COMM_EST);
goto out;
}
diff --git a/sys/ofed/include/linux/cdev.h b/sys/ofed/include/linux/cdev.h
index cc77495..ea48334 100644
--- a/sys/ofed/include/linux/cdev.h
+++ b/sys/ofed/include/linux/cdev.h
@@ -107,7 +107,7 @@ cdev_add(struct linux_cdev *cdev, dev_t dev, unsigned count)
if (count != 1)
panic("cdev_add: Unsupported count: %d", count);
cdev->cdev = make_dev(&linuxcdevsw, MINOR(dev), 0, 0, 0700,
- kobject_name(&cdev->kobj));
+ "%s", kobject_name(&cdev->kobj));
cdev->dev = dev;
cdev->cdev->si_drv1 = cdev;
diff --git a/sys/ofed/include/linux/pci.h b/sys/ofed/include/linux/pci.h
index b05f6f2..5d91e2d 100644
--- a/sys/ofed/include/linux/pci.h
+++ b/sys/ofed/include/linux/pci.h
@@ -73,10 +73,12 @@ struct pci_device_id {
#define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274
-#define PCI_VDEVICE(vendor, device) \
- PCI_VENDOR_ID_##vendor, (device), PCI_ANY_ID, PCI_ANY_ID, 0, 0
-#define PCI_DEVICE(vendor, device) \
- (vendor), (device), PCI_ANY_ID, PCI_ANY_ID, 0, 0
+#define PCI_VDEVICE(_vendor, _device) \
+ .vendor = PCI_VENDOR_ID_##_vendor, .device = (_device), \
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+#define PCI_DEVICE(_vendor, _device) \
+ .vendor = (_vendor), .device = (_device), \
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
#define to_pci_dev(n) container_of(n, struct pci_dev, dev)
diff --git a/sys/pc98/include/bus.h b/sys/pc98/include/bus.h
index 46d1a1b..3292474 100644
--- a/sys/pc98/include/bus.h
+++ b/sys/pc98/include/bus.h
@@ -317,24 +317,22 @@ _BUS_ACCESS_METHODS_PROTO(u_int32_t,4)
/*
* read methods
*/
-#define _BUS_SPACE_READ(TYPE,BWN) \
-static __inline TYPE \
-bus_space_read_##BWN (tag, bsh, offset) \
- bus_space_tag_t tag; \
- bus_space_handle_t bsh; \
- bus_size_t offset; \
-{ \
- register TYPE result; \
- \
- __asm __volatile("call *%2" \
- :"=a" (result), \
- "=d" (offset) \
- :"o" (bsh->bsh_bam.bs_read_##BWN), \
- "b" (bsh), \
- "1" (offset) \
- ); \
- \
- return result; \
+#define _BUS_SPACE_READ(TYPE,BWN) \
+static __inline TYPE \
+bus_space_read_##BWN (bus_space_tag_t tag, bus_space_handle_t bsh, \
+ bus_size_t offset) \
+{ \
+ register TYPE result; \
+ \
+ __asm __volatile("call *%2" \
+ :"=a" (result), \
+ "=d" (offset) \
+ :"o" (bsh->bsh_bam.bs_read_##BWN), \
+ "b" (bsh), \
+ "1" (offset) \
+ ); \
+ \
+ return result; \
}
_BUS_SPACE_READ(u_int8_t,1)
@@ -344,22 +342,19 @@ _BUS_SPACE_READ(u_int32_t,4)
/*
* write methods
*/
-#define _BUS_SPACE_WRITE(TYPE,BWN) \
-static __inline void \
-bus_space_write_##BWN (tag, bsh, offset, val) \
- bus_space_tag_t tag; \
- bus_space_handle_t bsh; \
- bus_size_t offset; \
- TYPE val; \
-{ \
- \
- __asm __volatile("call *%1" \
- :"=d" (offset) \
- :"o" (bsh->bsh_bam.bs_write_##BWN), \
- "a" (val), \
- "b" (bsh), \
- "0" (offset) \
- ); \
+#define _BUS_SPACE_WRITE(TYPE,BWN) \
+static __inline void \
+bus_space_write_##BWN (bus_space_tag_t tag, bus_space_handle_t bsh, \
+ bus_size_t offset, TYPE val) \
+{ \
+ \
+ __asm __volatile("call *%1" \
+ :"=d" (offset) \
+ :"o" (bsh->bsh_bam.bs_write_##BWN), \
+ "a" (val), \
+ "b" (bsh), \
+ "0" (offset) \
+ ); \
}
_BUS_SPACE_WRITE(u_int8_t,1)
@@ -371,12 +366,8 @@ _BUS_SPACE_WRITE(u_int32_t,4)
*/
#define _BUS_SPACE_READ_MULTI(TYPE,BWN) \
static __inline void \
-bus_space_read_multi_##BWN (tag, bsh, offset, buf, cnt) \
- bus_space_tag_t tag; \
- bus_space_handle_t bsh; \
- bus_size_t offset; \
- TYPE *buf; \
- size_t cnt; \
+bus_space_read_multi_##BWN (bus_space_tag_t tag, bus_space_handle_t bsh, \
+ bus_size_t offset, TYPE *buf, size_t cnt) \
{ \
\
__asm __volatile("call *%3" \
@@ -400,12 +391,8 @@ _BUS_SPACE_READ_MULTI(u_int32_t,4)
*/
#define _BUS_SPACE_WRITE_MULTI(TYPE,BWN) \
static __inline void \
-bus_space_write_multi_##BWN (tag, bsh, offset, buf, cnt) \
- bus_space_tag_t tag; \
- bus_space_handle_t bsh; \
- bus_size_t offset; \
- const TYPE *buf; \
- size_t cnt; \
+bus_space_write_multi_##BWN (bus_space_tag_t tag, bus_space_handle_t bsh, \
+ bus_size_t offset, const TYPE *buf, size_t cnt) \
{ \
\
__asm __volatile("call *%3" \
@@ -429,12 +416,8 @@ _BUS_SPACE_WRITE_MULTI(u_int32_t,4)
*/
#define _BUS_SPACE_READ_REGION(TYPE,BWN) \
static __inline void \
-bus_space_read_region_##BWN (tag, bsh, offset, buf, cnt) \
- bus_space_tag_t tag; \
- bus_space_handle_t bsh; \
- bus_size_t offset; \
- TYPE *buf; \
- size_t cnt; \
+bus_space_read_region_##BWN (bus_space_tag_t tag, bus_space_handle_t bsh, \
+ bus_size_t offset, TYPE *buf, size_t cnt) \
{ \
\
__asm __volatile("call *%3" \
@@ -458,12 +441,8 @@ _BUS_SPACE_READ_REGION(u_int32_t,4)
*/
#define _BUS_SPACE_WRITE_REGION(TYPE,BWN) \
static __inline void \
-bus_space_write_region_##BWN (tag, bsh, offset, buf, cnt) \
- bus_space_tag_t tag; \
- bus_space_handle_t bsh; \
- bus_size_t offset; \
- const TYPE *buf; \
- size_t cnt; \
+bus_space_write_region_##BWN (bus_space_tag_t tag, bus_space_handle_t bsh, \
+ bus_size_t offset, const TYPE *buf, size_t cnt) \
{ \
\
__asm __volatile("call *%3" \
@@ -487,12 +466,8 @@ _BUS_SPACE_WRITE_REGION(u_int32_t,4)
*/
#define _BUS_SPACE_SET_MULTI(TYPE,BWN) \
static __inline void \
-bus_space_set_multi_##BWN (tag, bsh, offset, val, cnt) \
- bus_space_tag_t tag; \
- bus_space_handle_t bsh; \
- bus_size_t offset; \
- TYPE val; \
- size_t cnt; \
+bus_space_set_multi_##BWN (bus_space_tag_t tag, bus_space_handle_t bsh, \
+ bus_size_t offset, TYPE val, size_t cnt) \
{ \
\
__asm __volatile("call *%2" \
@@ -515,12 +490,8 @@ _BUS_SPACE_SET_MULTI(u_int32_t,4)
*/
#define _BUS_SPACE_SET_REGION(TYPE,BWN) \
static __inline void \
-bus_space_set_region_##BWN (tag, bsh, offset, val, cnt) \
- bus_space_tag_t tag; \
- bus_space_handle_t bsh; \
- bus_size_t offset; \
- TYPE val; \
- size_t cnt; \
+bus_space_set_region_##BWN (bus_space_tag_t tag, bus_space_handle_t bsh, \
+ bus_size_t offset, TYPE val, size_t cnt) \
{ \
\
__asm __volatile("call *%2" \
@@ -543,13 +514,8 @@ _BUS_SPACE_SET_REGION(u_int32_t,4)
*/
#define _BUS_SPACE_COPY_REGION(BWN) \
static __inline void \
-bus_space_copy_region_##BWN (tag, sbsh, src, dbsh, dst, cnt) \
- bus_space_tag_t tag; \
- bus_space_handle_t sbsh; \
- bus_size_t src; \
- bus_space_handle_t dbsh; \
- bus_size_t dst; \
- size_t cnt; \
+bus_space_copy_region_##BWN (bus_space_tag_t tag, bus_space_handle_t sbsh, \
+ bus_size_t src, bus_space_handle_t dbsh, bus_size_t dst, size_t cnt) \
{ \
\
if (dbsh->bsh_bam.bs_copy_region_1 != sbsh->bsh_bam.bs_copy_region_1) \
diff --git a/sys/pc98/pc98/machdep.c b/sys/pc98/pc98/machdep.c
index b34966a..3133d1b 100644
--- a/sys/pc98/pc98/machdep.c
+++ b/sys/pc98/pc98/machdep.c
@@ -54,6 +54,7 @@ __FBSDID("$FreeBSD$");
#include "opt_mp_watchdog.h"
#include "opt_npx.h"
#include "opt_perfmon.h"
+#include "opt_kdtrace.h"
#include <sys/param.h>
#include <sys/proc.h>
@@ -1236,7 +1237,7 @@ cpu_idle(int busy)
CTR2(KTR_SPARE2, "cpu_idle(%d) at %d",
busy, curcpu);
-#ifdef MP_WATCHDOG
+#if defined(MP_WATCHDOG)
ap_watchdog(PCPU_GET(cpuid));
#endif
/* If we are busy - try to use fast methods. */
@@ -1772,7 +1773,11 @@ extern inthand_t
IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm),
IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot),
IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align),
- IDTVEC(xmm), IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall);
+ IDTVEC(xmm),
+#ifdef KDTRACE_HOOKS
+ IDTVEC(dtrace_ret),
+#endif
+ IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall);
#ifdef DDB
/*
@@ -2152,6 +2157,8 @@ do_next:
for (off = 0; off < round_page(msgbufsize); off += PAGE_SIZE)
pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] +
off);
+
+ PT_UPDATES_FLUSH();
}
void
@@ -2291,6 +2298,10 @@ init386(first)
GSEL(GCODE_SEL, SEL_KPL));
setidt(IDT_SYSCALL, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL,
GSEL(GCODE_SEL, SEL_KPL));
+#ifdef KDTRACE_HOOKS
+ setidt(IDT_DTRACE_RET, &IDTVEC(dtrace_ret), SDT_SYS386TGT, SEL_UPL,
+ GSEL(GCODE_SEL, SEL_KPL));
+#endif
r_idt.rd_limit = sizeof(idt0) - 1;
r_idt.rd_base = (int) idt;
@@ -2643,7 +2654,8 @@ int
fill_fpregs(struct thread *td, struct fpreg *fpregs)
{
- KASSERT(td == curthread || TD_IS_SUSPENDED(td),
+ KASSERT(td == curthread || TD_IS_SUSPENDED(td) ||
+ P_SHOULDSTOP(td->td_proc),
("not suspended thread %p", td));
#ifdef DEV_NPX
npxgetregs(td);
@@ -2812,6 +2824,7 @@ static void
fpstate_drop(struct thread *td)
{
+ KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu"));
critical_enter();
#ifdef DEV_NPX
if (PCPU_GET(fpcurthread) == td)
diff --git a/sys/powerpc/aim/locore32.S b/sys/powerpc/aim/locore32.S
index 80c3c08..f039db9 100644
--- a/sys/powerpc/aim/locore32.S
+++ b/sys/powerpc/aim/locore32.S
@@ -65,6 +65,8 @@
#include <machine/spr.h>
#include <machine/asm.h>
+#include "opt_kdtrace.h"
+
/* Locate the per-CPU data structure */
#define GET_CPUINFO(r) \
mfsprg0 r
diff --git a/sys/powerpc/aim/locore64.S b/sys/powerpc/aim/locore64.S
index 65c4999..3b3c8f3 100644
--- a/sys/powerpc/aim/locore64.S
+++ b/sys/powerpc/aim/locore64.S
@@ -65,6 +65,8 @@
#include <machine/spr.h>
#include <machine/asm.h>
+#include "opt_kdtrace.h"
+
/* Locate the per-CPU data structure */
#define GET_CPUINFO(r) \
mfsprg0 r
diff --git a/sys/powerpc/aim/trap.c b/sys/powerpc/aim/trap.c
index b55d34b..d30aded 100644
--- a/sys/powerpc/aim/trap.c
+++ b/sys/powerpc/aim/trap.c
@@ -35,6 +35,7 @@
__FBSDID("$FreeBSD$");
#include "opt_hwpmc_hooks.h"
+#include "opt_kdtrace.h"
#include <sys/param.h>
#include <sys/kdb.h>
@@ -104,6 +105,33 @@ struct powerpc_exception {
char *name;
};
+#ifdef KDTRACE_HOOKS
+#include <sys/dtrace_bsd.h>
+
+/*
+ * This is a hook which is initialised by the dtrace module
+ * to handle traps which might occur during DTrace probe
+ * execution.
+ */
+dtrace_trap_func_t dtrace_trap_func;
+
+dtrace_doubletrap_func_t dtrace_doubletrap_func;
+
+/*
+ * This is a hook which is initialised by the systrace module
+ * when it is loaded. This keeps the DTrace syscall provider
+ * implementation opaque.
+ */
+systrace_probe_func_t systrace_probe_func;
+
+/*
+ * These hooks are necessary for the pid, usdt and fasttrap providers.
+ */
+dtrace_fasttrap_probe_ptr_t dtrace_fasttrap_probe_ptr;
+dtrace_pid_probe_ptr_t dtrace_pid_probe_ptr;
+dtrace_return_probe_ptr_t dtrace_return_probe_ptr;
+#endif
+
static struct powerpc_exception powerpc_exceptions[] = {
{ 0x0100, "system reset" },
{ 0x0200, "machine check" },
@@ -176,6 +204,28 @@ trap(struct trapframe *frame)
}
else
#endif
+#ifdef KDTRACE_HOOKS
+ /*
+ * A trap can occur while DTrace executes a probe. Before
+ * executing the probe, DTrace blocks re-scheduling and sets
+ * a flag in it's per-cpu flags to indicate that it doesn't
+ * want to fault. On returning from the probe, the no-fault
+ * flag is cleared and finally re-scheduling is enabled.
+ *
+ * If the DTrace kernel module has registered a trap handler,
+ * call it and if it returns non-zero, assume that it has
+ * handled the trap and modified the trap frame so that this
+ * function can return normally.
+ */
+ /*
+ * XXXDTRACE: add fasttrap and pid probes handlers here (if ever)
+ */
+ if (!user) {
+ if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type))
+ return;
+ }
+#endif
+
if (user) {
td->td_pticks = 0;
td->td_frame = frame;
@@ -617,6 +667,9 @@ trap_pfault(struct trapframe *frame, int user)
PROC_LOCK(p);
--p->p_lock;
PROC_UNLOCK(p);
+ /*
+ * XXXDTRACE: add dtrace_doubletrap_func here?
+ */
} else {
/*
* Don't have to worry about process locking or stacks in the
diff --git a/sys/powerpc/aim/trap_subr32.S b/sys/powerpc/aim/trap_subr32.S
index cf8d03a..a00cc3d 100644
--- a/sys/powerpc/aim/trap_subr32.S
+++ b/sys/powerpc/aim/trap_subr32.S
@@ -240,6 +240,26 @@
mfsprg2 %r2; /* restore r2 & r3 */ \
mfsprg3 %r3
+#ifdef KDTRACE_HOOKS
+ .data
+ .globl dtrace_invop_jump_addr
+ .align 4
+ .type dtrace_invop_jump_addr, @object
+ .size dtrace_invop_jump_addr, 4
+dtrace_invop_jump_addr:
+ .word 0
+ .word 0
+ .globl dtrace_invop_calltrap_addr
+ .align 4
+ .type dtrace_invop_calltrap_addr, @object
+ .size dtrace_invop_calltrap_addr, 4
+dtrace_invop_calltrap_addr:
+ .word 0
+ .word 0
+
+ .text
+#endif
+
/*
* The next two routines are 64-bit glue code. The first is used to test if
* we are on a 64-bit system. By copying it to the illegal instruction
diff --git a/sys/powerpc/aim/trap_subr64.S b/sys/powerpc/aim/trap_subr64.S
index 8243dc7..0a12753 100644
--- a/sys/powerpc/aim/trap_subr64.S
+++ b/sys/powerpc/aim/trap_subr64.S
@@ -274,6 +274,26 @@ restore_kernsrs:
mtsrr1 %r3; \
mfsprg3 %r3 /* restore r3 */
+#ifdef KDTRACE_HOOKS
+ .data
+ .globl dtrace_invop_jump_addr
+ .align 8
+ .type dtrace_invop_jump_addr, @object
+ .size dtrace_invop_jump_addr, 8
+dtrace_invop_jump_addr:
+ .word 0
+ .word 0
+ .globl dtrace_invop_calltrap_addr
+ .align 8
+ .type dtrace_invop_calltrap_addr, @object
+ .size dtrace_invop_calltrap_addr, 8
+dtrace_invop_calltrap_addr:
+ .word 0
+ .word 0
+
+ .text
+#endif
+
#ifdef SMP
/*
* Processor reset exception handler. These are typically
diff --git a/sys/powerpc/conf/GENERIC b/sys/powerpc/conf/GENERIC
index 91a49bb..7f31d43 100644
--- a/sys/powerpc/conf/GENERIC
+++ b/sys/powerpc/conf/GENERIC
@@ -24,6 +24,7 @@ ident GENERIC
machine powerpc powerpc
makeoptions DEBUG=-g #Build kernel with gdb(1) debug symbols
+makeoptions WITH_CTF=1
# Platform support
options POWERMAC #NewWorld Apple PowerMacs
@@ -68,6 +69,8 @@ options AUDIT # Security event auditing
options CAPABILITY_MODE # Capsicum capability mode
options CAPABILITIES # Capsicum capabilities
options MAC # TrustedBSD MAC Framework
+options KDTRACE_HOOKS # Kernel DTrace hooks
+options DDB_CTF # Kernel ELF linker loads CTF data
options INCLUDE_CONFIG_FILE # Include this file in kernel
# Debugging support. Always need this:
diff --git a/sys/powerpc/include/bat.h b/sys/powerpc/include/bat.h
index 27c6d5a..c674fb7 100644
--- a/sys/powerpc/include/bat.h
+++ b/sys/powerpc/include/bat.h
@@ -71,10 +71,12 @@
#ifndef _MACHINE_BAT_H_
#define _MACHINE_BAT_H_
+#ifndef LOCORE
struct bat {
u_int32_t batu;
u_int32_t batl;
};
+#endif
/* Lower BAT bits (all but PowerPC 601): */
#define BAT_PBS 0xfffe0000 /* physical block start */
@@ -165,7 +167,7 @@ struct bat {
#define BATL601(pa, size, v) \
(((pa) & BAT601_PBN) | (v) | (size))
-#ifdef _KERNEL
+#if defined(_KERNEL) && !defined(LOCORE)
extern struct bat battable[16];
#endif
diff --git a/sys/sys/_mutex.h b/sys/sys/_mutex.h
index 8670485..5da137a 100644
--- a/sys/sys/_mutex.h
+++ b/sys/sys/_mutex.h
@@ -36,12 +36,11 @@
/*
* Sleep/spin mutex.
*
- * The layout of the first 2 members of struct mtx* is considered fixed.
- * More specifically, it is assumed that there is a member called mtx_lock
- * for every struct mtx* and that other locking primitive structures are
- * not allowed to use such name for their members.
- * If this needs to change, the bits in the mutex implementation might be
- * modified appropriately.
+ * All mutex implementations must always have a member called mtx_lock.
+ * Other locking primitive structures are not allowed to use this name
+ * for their members.
+ * If this rule needs to change, the bits in the mutex implementation must
+ * be modified appropriately.
*/
struct mtx {
struct lock_object lock_object; /* Common lock properties. */
@@ -50,11 +49,12 @@ struct mtx {
/*
* Members of struct mtx_padalign must mirror members of struct mtx.
- * mtx_padalign mutexes can use mtx(9) KPI transparently, without modifies.
- * When using pad-aligned mutexes within structures, they should generally
- * stay as the first member of the struct. This is because otherwise the
- * compiler can generate ever more padding for the struct to keep a correct
- * alignment for the mutex.
+ * mtx_padalign mutexes can use the mtx(9) API transparently without
+ * modification.
+ * Pad-aligned mutexes used within structures should generally be the
+ * first member of the struct. Otherwise, the compiler can generate
+ * additional padding for the struct to keep a correct alignment for
+ * the mutex.
*/
struct mtx_padalign {
struct lock_object lock_object; /* Common lock properties. */
diff --git a/sys/sys/_rwlock.h b/sys/sys/_rwlock.h
index 7b8c6c0..c7cd572 100644
--- a/sys/sys/_rwlock.h
+++ b/sys/sys/_rwlock.h
@@ -37,12 +37,11 @@
/*
* Reader/writer lock.
*
- * The layout of the first 2 members of struct rwlock* is considered fixed.
- * More specifically, it is assumed that there is a member called rw_lock
- * for every struct rwlock* and that other locking primitive structures are
- * not allowed to use such name for their members.
- * If this needs to change, the bits in the rwlock implementation might be
- * modified appropriately.
+ * All reader/writer lock implementations must always have a member
+ * called rw_lock. Other locking primitive structures are not allowed to
+ * use this name for their members.
+ * If this rule needs to change, the bits in the reader/writer lock
+ * implementation must be modified appropriately.
*/
struct rwlock {
struct lock_object lock_object;
@@ -51,12 +50,12 @@ struct rwlock {
/*
* Members of struct rwlock_padalign must mirror members of struct rwlock.
- * rwlock_padalign rwlocks can use rwlock(9) KPI transparently, without
- * modifies.
- * When using pad-aligned rwlocks within structures, they should generally
- * stay as the first member of the struct. This is because otherwise the
- * compiler can generate ever more padding for the struct to keep a correct
- * alignment for the rwlock.
+ * rwlock_padalign rwlocks can use the rwlock(9) API transparently without
+ * modification.
+ * Pad-aligned rwlocks used within structures should generally be the
+ * first member of the struct. Otherwise, the compiler can generate
+ * additional padding for the struct to keep a correct alignment for
+ * the rwlock.
*/
struct rwlock_padalign {
struct lock_object lock_object;
diff --git a/sys/sys/mount.h b/sys/sys/mount.h
index 438a6a4..4d010ec 100644
--- a/sys/sys/mount.h
+++ b/sys/sys/mount.h
@@ -385,7 +385,7 @@ void __mnt_vnode_markerfree(struct vnode **mvp, struct mount *mp);
#define MNTK_SUSPEND 0x08000000 /* request write suspension */
#define MNTK_SUSPEND2 0x04000000 /* block secondary writes */
#define MNTK_SUSPENDED 0x10000000 /* write operations are suspended */
-#define MNTK_MPSAFE 0x20000000 /* Filesystem is MPSAFE. */
+#define MNTK_UNUSED25 0x20000000 /* --available-- */
#define MNTK_LOOKUP_SHARED 0x40000000 /* FS supports shared lock lookups */
#define MNTK_NOKNOTE 0x80000000 /* Don't send KNOTEs from VOP hooks */
diff --git a/sys/ufs/ffs/ffs_softdep.c b/sys/ufs/ffs/ffs_softdep.c
index 4e65e63..d188761 100644
--- a/sys/ufs/ffs/ffs_softdep.c
+++ b/sys/ufs/ffs/ffs_softdep.c
@@ -88,6 +88,8 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_extern.h>
#include <vm/vm_object.h>
+#include <geom/geom.h>
+
#include <ddb/ddb.h>
#ifndef SOFTUPDATES
@@ -802,6 +804,7 @@ static void handle_written_jnewblk(struct jnewblk *);
static void handle_written_jblkdep(struct jblkdep *);
static void handle_written_jfreefrag(struct jfreefrag *);
static void complete_jseg(struct jseg *);
+static void complete_jsegs(struct jseg *);
static void jseg_write(struct ufsmount *ump, struct jseg *, uint8_t *);
static void jaddref_write(struct jaddref *, struct jseg *, uint8_t *);
static void jremref_write(struct jremref *, struct jseg *, uint8_t *);
@@ -974,7 +977,7 @@ static struct freework *newfreework(struct ufsmount *, struct freeblks *,
struct freework *, ufs_lbn_t, ufs2_daddr_t, int, int, int);
static int jwait(struct worklist *, int);
static struct inodedep *inodedep_lookup_ip(struct inode *);
-static int bmsafemap_rollbacks(struct bmsafemap *);
+static int bmsafemap_backgroundwrite(struct bmsafemap *, struct buf *);
static struct freefile *handle_bufwait(struct inodedep *, struct workhead *);
static void handle_jwork(struct workhead *);
static struct mkdir *setup_newdir(struct diradd *, ino_t, ino_t, struct buf *,
@@ -1227,6 +1230,7 @@ static struct callout softdep_callout;
static int req_pending;
static int req_clear_inodedeps; /* syncer process flush some inodedeps */
static int req_clear_remove; /* syncer process flush some freeblks */
+static int softdep_flushcache = 0; /* Should we do BIO_FLUSH? */
/*
* runtime statistics
@@ -1310,6 +1314,8 @@ SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_retries, CTLFLAG_RW,
&stat_cleanup_retries, 0, "");
SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_failures, CTLFLAG_RW,
&stat_cleanup_failures, 0, "");
+SYSCTL_INT(_debug_softdep, OID_AUTO, flushcache, CTLFLAG_RW,
+ &softdep_flushcache, 0, "");
SYSCTL_DECL(_vfs_ffs);
@@ -1789,7 +1795,7 @@ softdep_move_dependencies(oldbp, newbp)
while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) {
LIST_REMOVE(wk, wk_list);
if (wk->wk_type == D_BMSAFEMAP &&
- bmsafemap_rollbacks(WK_BMSAFEMAP(wk)))
+ bmsafemap_backgroundwrite(WK_BMSAFEMAP(wk), newbp))
dirty = 1;
if (wktail == 0)
LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list);
@@ -3078,6 +3084,67 @@ softdep_flushjournal(mp)
FREE_LOCK(&lk);
}
+static void softdep_synchronize_completed(struct bio *);
+static void softdep_synchronize(struct bio *, struct ufsmount *, void *);
+
+static void
+softdep_synchronize_completed(bp)
+ struct bio *bp;
+{
+ struct jseg *oldest;
+ struct jseg *jseg;
+
+ /*
+ * caller1 marks the last segment written before we issued the
+ * synchronize cache.
+ */
+ jseg = bp->bio_caller1;
+ oldest = NULL;
+ ACQUIRE_LOCK(&lk);
+ /*
+ * Mark all the journal entries waiting on the synchronize cache
+ * as completed so they may continue on.
+ */
+ while (jseg != NULL && (jseg->js_state & COMPLETE) == 0) {
+ jseg->js_state |= COMPLETE;
+ oldest = jseg;
+ jseg = TAILQ_PREV(jseg, jseglst, js_next);
+ }
+ /*
+ * Restart deferred journal entry processing from the oldest
+ * completed jseg.
+ */
+ if (oldest)
+ complete_jsegs(oldest);
+
+ FREE_LOCK(&lk);
+ g_destroy_bio(bp);
+}
+
+/*
+ * Send BIO_FLUSH/SYNCHRONIZE CACHE to the device to enforce write ordering
+ * barriers. The journal must be written prior to any blocks that depend
+ * on it and the journal can not be released until the blocks have be
+ * written. This code handles both barriers simultaneously.
+ */
+static void
+softdep_synchronize(bp, ump, caller1)
+ struct bio *bp;
+ struct ufsmount *ump;
+ void *caller1;
+{
+
+ bp->bio_cmd = BIO_FLUSH;
+ bp->bio_flags |= BIO_ORDERED;
+ bp->bio_data = NULL;
+ bp->bio_offset = ump->um_cp->provider->mediasize;
+ bp->bio_length = 0;
+ bp->bio_done = softdep_synchronize_completed;
+ bp->bio_caller1 = caller1;
+ g_io_request(bp,
+ (struct g_consumer *)ump->um_devvp->v_bufobj.bo_private);
+}
+
/*
* Flush some journal records to disk.
*/
@@ -3092,8 +3159,10 @@ softdep_process_journal(mp, needwk, flags)
struct worklist *wk;
struct jseg *jseg;
struct buf *bp;
+ struct bio *bio;
uint8_t *data;
struct fs *fs;
+ int shouldflush;
int segwritten;
int jrecmin; /* Minimum records per block. */
int jrecmax; /* Maximum records per block. */
@@ -3104,6 +3173,9 @@ softdep_process_journal(mp, needwk, flags)
if (MOUNTEDSUJ(mp) == 0)
return;
+ shouldflush = softdep_flushcache;
+ bio = NULL;
+ jseg = NULL;
ump = VFSTOUFS(mp);
fs = ump->um_fs;
jblocks = ump->softdep_jblocks;
@@ -3152,6 +3224,10 @@ softdep_process_journal(mp, needwk, flags)
LIST_INIT(&jseg->js_entries);
LIST_INIT(&jseg->js_indirs);
jseg->js_state = ATTACHED;
+ if (shouldflush == 0)
+ jseg->js_state |= COMPLETE;
+ else if (bio == NULL)
+ bio = g_alloc_bio();
jseg->js_jblocks = jblocks;
bp = geteblk(fs->fs_bsize, 0);
ACQUIRE_LOCK(&lk);
@@ -3284,6 +3360,17 @@ softdep_process_journal(mp, needwk, flags)
ACQUIRE_LOCK(&lk);
}
/*
+ * If we wrote a segment issue a synchronize cache so the journal
+ * is reflected on disk before the data is written. Since reclaiming
+ * journal space also requires writing a journal record this
+ * process also enforces a barrier before reclamation.
+ */
+ if (segwritten && shouldflush) {
+ softdep_synchronize(bio, ump,
+ TAILQ_LAST(&jblocks->jb_segs, jseglst));
+ } else if (bio)
+ g_destroy_bio(bio);
+ /*
* If we've suspended the filesystem because we ran out of journal
* space either try to sync it here to make some progress or
* unsuspend it if we already have.
@@ -3366,25 +3453,17 @@ complete_jseg(jseg)
}
/*
- * Mark a jseg as DEPCOMPLETE and throw away the buffer. Handle jseg
- * completions in order only.
+ * Determine which jsegs are ready for completion processing. Waits for
+ * synchronize cache to complete as well as forcing in-order completion
+ * of journal entries.
*/
static void
-handle_written_jseg(jseg, bp)
+complete_jsegs(jseg)
struct jseg *jseg;
- struct buf *bp;
{
struct jblocks *jblocks;
struct jseg *jsegn;
- if (jseg->js_refs == 0)
- panic("handle_written_jseg: No self-reference on %p", jseg);
- jseg->js_state |= DEPCOMPLETE;
- /*
- * We'll never need this buffer again, set flags so it will be
- * discarded.
- */
- bp->b_flags |= B_INVAL | B_NOCACHE;
jblocks = jseg->js_jblocks;
/*
* Don't allow out of order completions. If this isn't the first
@@ -3393,12 +3472,12 @@ handle_written_jseg(jseg, bp)
if (jseg != jblocks->jb_writeseg)
return;
/* Iterate through available jsegs processing their entries. */
- do {
+ while (jseg && (jseg->js_state & ALLCOMPLETE) == ALLCOMPLETE) {
jblocks->jb_oldestwrseq = jseg->js_oldseq;
jsegn = TAILQ_NEXT(jseg, js_next);
complete_jseg(jseg);
jseg = jsegn;
- } while (jseg && jseg->js_state & DEPCOMPLETE);
+ }
jblocks->jb_writeseg = jseg;
/*
* Attempt to free jsegs now that oldestwrseq may have advanced.
@@ -3406,6 +3485,27 @@ handle_written_jseg(jseg, bp)
free_jsegs(jblocks);
}
+/*
+ * Mark a jseg as DEPCOMPLETE and throw away the buffer. Attempt to handle
+ * the final completions.
+ */
+static void
+handle_written_jseg(jseg, bp)
+ struct jseg *jseg;
+ struct buf *bp;
+{
+
+ if (jseg->js_refs == 0)
+ panic("handle_written_jseg: No self-reference on %p", jseg);
+ jseg->js_state |= DEPCOMPLETE;
+ /*
+ * We'll never need this buffer again, set flags so it will be
+ * discarded.
+ */
+ bp->b_flags |= B_INVAL | B_NOCACHE;
+ complete_jsegs(jseg);
+}
+
static inline struct jsegdep *
inoref_jseg(inoref)
struct inoref *inoref;
@@ -4191,8 +4291,16 @@ free_jsegs(jblocks)
jblocks->jb_oldestseg = jseg;
return;
}
- if (!LIST_EMPTY(&jseg->js_indirs) &&
- jseg->js_seq >= jblocks->jb_oldestwrseq)
+ if ((jseg->js_state & ALLCOMPLETE) != ALLCOMPLETE)
+ break;
+ if (jseg->js_seq > jblocks->jb_oldestwrseq)
+ break;
+ /*
+ * We can free jsegs that didn't write entries when
+ * oldestwrseq == js_seq.
+ */
+ if (jseg->js_seq == jblocks->jb_oldestwrseq &&
+ jseg->js_cnt != 0)
break;
free_jseg(jseg, jblocks);
}
@@ -5065,9 +5173,15 @@ jnewblk_merge(new, old, wkhd)
return (new);
/* Replace a jfreefrag with a jnewblk. */
if (new->wk_type == D_JFREEFRAG) {
+ if (WK_JNEWBLK(old)->jn_blkno != WK_JFREEFRAG(new)->fr_blkno)
+ panic("jnewblk_merge: blkno mismatch: %p, %p",
+ old, new);
cancel_jfreefrag(WK_JFREEFRAG(new));
return (old);
}
+ if (old->wk_type != D_JNEWBLK || new->wk_type != D_JNEWBLK)
+ panic("jnewblk_merge: Bad type: old %d new %d\n",
+ old->wk_type, new->wk_type);
/*
* Handle merging of two jnewblk records that describe
* different sets of fragments in the same block.
@@ -10396,7 +10510,7 @@ initiate_write_bmsafemap(bmsafemap, bp)
ino_t ino;
if (bmsafemap->sm_state & IOSTARTED)
- panic("initiate_write_bmsafemap: Already started\n");
+ return;
bmsafemap->sm_state |= IOSTARTED;
/*
* Clear any inode allocations which are pending journal writes.
@@ -10407,10 +10521,6 @@ initiate_write_bmsafemap(bmsafemap, bp)
inosused = cg_inosused(cgp);
LIST_FOREACH(jaddref, &bmsafemap->sm_jaddrefhd, ja_bmdeps) {
ino = jaddref->ja_ino % fs->fs_ipg;
- /*
- * If this is a background copy the inode may not
- * be marked used yet.
- */
if (isset(inosused, ino)) {
if ((jaddref->ja_mode & IFMT) == IFDIR)
cgp->cg_cs.cs_ndir--;
@@ -10419,7 +10529,7 @@ initiate_write_bmsafemap(bmsafemap, bp)
jaddref->ja_state &= ~ATTACHED;
jaddref->ja_state |= UNDONE;
stat_jaddref++;
- } else if ((bp->b_xflags & BX_BKGRDMARKER) == 0)
+ } else
panic("initiate_write_bmsafemap: inode %ju "
"marked free", (uintmax_t)jaddref->ja_ino);
}
@@ -10434,9 +10544,8 @@ initiate_write_bmsafemap(bmsafemap, bp)
LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) {
if (jnewblk_rollback(jnewblk, fs, cgp, blksfree))
continue;
- if ((bp->b_xflags & BX_BKGRDMARKER) == 0)
- panic("initiate_write_bmsafemap: block %jd "
- "marked free", jnewblk->jn_blkno);
+ panic("initiate_write_bmsafemap: block %jd "
+ "marked free", jnewblk->jn_blkno);
}
}
/*
@@ -11171,12 +11280,24 @@ diradd_inode_written(dap, inodedep)
* only be called with lk and the buf lock on the cg held.
*/
static int
-bmsafemap_rollbacks(bmsafemap)
+bmsafemap_backgroundwrite(bmsafemap, bp)
struct bmsafemap *bmsafemap;
+ struct buf *bp;
{
+ int dirty;
- return (!LIST_EMPTY(&bmsafemap->sm_jaddrefhd) |
- !LIST_EMPTY(&bmsafemap->sm_jnewblkhd));
+ dirty = !LIST_EMPTY(&bmsafemap->sm_jaddrefhd) |
+ !LIST_EMPTY(&bmsafemap->sm_jnewblkhd);
+ /*
+ * If we're initiating a background write we need to process the
+ * rollbacks as they exist now, not as they exist when IO starts.
+ * No other consumers will look at the contents of the shadowed
+ * buf so this is safe to do here.
+ */
+ if (bp->b_xflags & BX_BKGRDMARKER)
+ initiate_write_bmsafemap(bmsafemap, bp);
+
+ return (dirty);
}
/*
diff --git a/sys/ufs/ffs/ffs_vfsops.c b/sys/ufs/ffs/ffs_vfsops.c
index cc8d826..cc54ece 100644
--- a/sys/ufs/ffs/ffs_vfsops.c
+++ b/sys/ufs/ffs/ffs_vfsops.c
@@ -1063,8 +1063,8 @@ ffs_mountfs(devvp, mp, td)
* Initialize filesystem stat information in mount struct.
*/
MNT_ILOCK(mp);
- mp->mnt_kern_flag |= MNTK_MPSAFE | MNTK_LOOKUP_SHARED |
- MNTK_EXTENDED_SHARED | MNTK_NO_IOPF;
+ mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED |
+ MNTK_NO_IOPF;
MNT_IUNLOCK(mp);
#ifdef UFS_EXTATTR
#ifdef UFS_EXTATTR_AUTOSTART
diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c
index da96bab..034754a 100644
--- a/sys/vm/vm_map.c
+++ b/sys/vm/vm_map.c
@@ -3975,32 +3975,20 @@ vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
#include <ddb/ddb.h>
-/*
- * vm_map_print: [ debug ]
- */
-DB_SHOW_COMMAND(map, vm_map_print)
+static void
+vm_map_print(vm_map_t map)
{
- static int nlines;
- /* XXX convert args. */
- vm_map_t map = (vm_map_t)addr;
- boolean_t full = have_addr;
-
vm_map_entry_t entry;
db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
(void *)map,
(void *)map->pmap, map->nentries, map->timestamp);
- nlines++;
-
- if (!full && db_indent)
- return;
db_indent += 2;
for (entry = map->header.next; entry != &map->header;
entry = entry->next) {
db_iprintf("map entry %p: start=%p, end=%p\n",
(void *)entry, (void *)entry->start, (void *)entry->end);
- nlines++;
{
static char *inheritance_name[4] =
{"share", "copy", "none", "donate_copy"};
@@ -4016,14 +4004,11 @@ DB_SHOW_COMMAND(map, vm_map_print)
db_printf(", share=%p, offset=0x%jx\n",
(void *)entry->object.sub_map,
(uintmax_t)entry->offset);
- nlines++;
if ((entry->prev == &map->header) ||
(entry->prev->object.sub_map !=
entry->object.sub_map)) {
db_indent += 2;
- vm_map_print((db_expr_t)(intptr_t)
- entry->object.sub_map,
- full, 0, (char *)0);
+ vm_map_print((vm_map_t)entry->object.sub_map);
db_indent -= 2;
}
} else {
@@ -4040,7 +4025,6 @@ DB_SHOW_COMMAND(map, vm_map_print)
db_printf(", copy (%s)",
(entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
db_printf("\n");
- nlines++;
if ((entry->prev == &map->header) ||
(entry->prev->object.vm_object !=
@@ -4048,17 +4032,23 @@ DB_SHOW_COMMAND(map, vm_map_print)
db_indent += 2;
vm_object_print((db_expr_t)(intptr_t)
entry->object.vm_object,
- full, 0, (char *)0);
- nlines += 4;
+ 1, 0, (char *)0);
db_indent -= 2;
}
}
}
db_indent -= 2;
- if (db_indent == 0)
- nlines = 0;
}
+DB_SHOW_COMMAND(map, map)
+{
+
+ if (!have_addr) {
+ db_printf("usage: show map <addr>\n");
+ return;
+ }
+ vm_map_print((vm_map_t)addr);
+}
DB_SHOW_COMMAND(procvm, procvm)
{
@@ -4074,7 +4064,7 @@ DB_SHOW_COMMAND(procvm, procvm)
(void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
(void *)vmspace_pmap(p->p_vmspace));
- vm_map_print((db_expr_t)(intptr_t)&p->p_vmspace->vm_map, 1, 0, NULL);
+ vm_map_print((vm_map_t)&p->p_vmspace->vm_map);
}
#endif /* DDB */
diff --git a/tools/regression/bin/sh/builtins/alias3.0 b/tools/regression/bin/sh/builtins/alias3.0
new file mode 100644
index 0000000..fe65e31f
--- /dev/null
+++ b/tools/regression/bin/sh/builtins/alias3.0
@@ -0,0 +1,12 @@
+# $FreeBSD$
+set -e
+
+unalias -a
+alias foo=bar
+alias bar=
+alias quux="1 2 3"
+alias foo=bar
+alias bar=
+alias quux="1 2 3"
+alias
+alias foo
diff --git a/tools/regression/bin/sh/builtins/alias3.0.stdout b/tools/regression/bin/sh/builtins/alias3.0.stdout
new file mode 100644
index 0000000..52efaf0
--- /dev/null
+++ b/tools/regression/bin/sh/builtins/alias3.0.stdout
@@ -0,0 +1,4 @@
+bar=''
+foo=bar
+quux='1 2 3'
+foo=bar
diff --git a/tools/regression/bin/sh/parser/alias10.0 b/tools/regression/bin/sh/parser/alias10.0
new file mode 100644
index 0000000..264f5c5
--- /dev/null
+++ b/tools/regression/bin/sh/parser/alias10.0
@@ -0,0 +1,5 @@
+# $FreeBSD$
+
+alias echo='echo'
+alias echo='echo'
+[ "`eval echo b`" = b ]
diff --git a/tools/regression/bin/sh/parser/alias9.0 b/tools/regression/bin/sh/parser/alias9.0
new file mode 100644
index 0000000..6bd8808
--- /dev/null
+++ b/tools/regression/bin/sh/parser/alias9.0
@@ -0,0 +1,6 @@
+# $FreeBSD$
+
+alias alias0=:
+alias alias0=exit
+eval 'alias0 0'
+exit 1
diff --git a/usr.bin/calendar/calendars/calendar.history b/usr.bin/calendar/calendars/calendar.history
index de8fb00..a05a6ff 100644
--- a/usr.bin/calendar/calendars/calendar.history
+++ b/usr.bin/calendar/calendars/calendar.history
@@ -411,7 +411,7 @@
11/09 Roosevelt establishes the Civil Works Administration, 1933
11/10 41 Women arrested in suffragette demonstrations near White House, 1917
11/10 Cpt. Wirz, commandant of Andersonville Prison hanged, 1865
-11/10 Henry Stanley asks David Livingston, "Dr. Livingston, I presume?", 1871
+11/10 Henry Stanley asks David Livingstone, "Dr. Livingstone, I presume?", 1871
11/11 Washington becomes the 42nd state, 1889
11/12 Dr. Sun Yat-sen's Birthday in Taiwan
11/12 USA first exports oil to Europe, 1861
diff --git a/usr.bin/clang/clang/Makefile b/usr.bin/clang/clang/Makefile
index 7c8f0e4..0c3a02a 100644
--- a/usr.bin/clang/clang/Makefile
+++ b/usr.bin/clang/clang/Makefile
@@ -20,9 +20,11 @@ MLINKS= clang.1 clang++.1 \
.if ${MK_CLANG_IS_CC} != "no"
LINKS+= ${BINDIR}/clang ${BINDIR}/cc \
${BINDIR}/clang ${BINDIR}/c++ \
+ ${BINDIR}/clang ${BINDIR}/CC \
${BINDIR}/clang ${BINDIR}/cpp
MLINKS+= clang.1 cc.1 \
clang.1 c++.1 \
+ clang.1 CC.1 \
clang.1 cpp.1
.endif
diff --git a/usr.bin/locale/locale.1 b/usr.bin/locale/locale.1
index 144f280..6e6bf29 100644
--- a/usr.bin/locale/locale.1
+++ b/usr.bin/locale/locale.1
@@ -40,7 +40,7 @@
.Op Ar prefix
.Nm
.Op Fl ck
-.Ar keyword ...
+.Op Ar keyword ...
.Sh DESCRIPTION
The
.Nm
@@ -74,8 +74,12 @@ directory.
Print names of all available charmaps.
.It Fl k
Print the names and values of all selected keywords.
+If no keywords are selected, print the names and values of all defined
+keywords.
.It Fl c
Print the category name for all selected keywords.
+If no keywords are selected, print the category name for all defined
+keywords.
.El
.Sh IMPLEMENTATION NOTES
The special
diff --git a/usr.bin/locale/locale.c b/usr.bin/locale/locale.c
index cad3afe..85e6d7f 100644
--- a/usr.bin/locale/locale.c
+++ b/usr.bin/locale/locale.c
@@ -31,7 +31,7 @@
* nl_langinfo(3) extensions)
*
* XXX: correctly handle reserved 'charmap' keyword and '-m' option (require
- * localedef(1) implementation). Currently it's handled via
+ * localedef(1) implementation). Currently it's handled via
* nl_langinfo(CODESET).
*/
@@ -79,32 +79,32 @@ struct _lcinfo {
{ "LC_MONETARY", LC_MONETARY },
{ "LC_MESSAGES", LC_MESSAGES }
};
-#define NLCINFO (sizeof(lcinfo)/sizeof(lcinfo[0]))
+#define NLCINFO (sizeof(lcinfo)/sizeof(lcinfo[0]))
/* ids for values not referenced by nl_langinfo() */
#define KW_ZERO 10000
#define KW_GROUPING (KW_ZERO+1)
-#define KW_INT_CURR_SYMBOL (KW_ZERO+2)
-#define KW_CURRENCY_SYMBOL (KW_ZERO+3)
-#define KW_MON_DECIMAL_POINT (KW_ZERO+4)
-#define KW_MON_THOUSANDS_SEP (KW_ZERO+5)
-#define KW_MON_GROUPING (KW_ZERO+6)
-#define KW_POSITIVE_SIGN (KW_ZERO+7)
-#define KW_NEGATIVE_SIGN (KW_ZERO+8)
-#define KW_INT_FRAC_DIGITS (KW_ZERO+9)
-#define KW_FRAC_DIGITS (KW_ZERO+10)
-#define KW_P_CS_PRECEDES (KW_ZERO+11)
-#define KW_P_SEP_BY_SPACE (KW_ZERO+12)
-#define KW_N_CS_PRECEDES (KW_ZERO+13)
-#define KW_N_SEP_BY_SPACE (KW_ZERO+14)
-#define KW_P_SIGN_POSN (KW_ZERO+15)
-#define KW_N_SIGN_POSN (KW_ZERO+16)
-#define KW_INT_P_CS_PRECEDES (KW_ZERO+17)
-#define KW_INT_P_SEP_BY_SPACE (KW_ZERO+18)
-#define KW_INT_N_CS_PRECEDES (KW_ZERO+19)
-#define KW_INT_N_SEP_BY_SPACE (KW_ZERO+20)
-#define KW_INT_P_SIGN_POSN (KW_ZERO+21)
-#define KW_INT_N_SIGN_POSN (KW_ZERO+22)
+#define KW_INT_CURR_SYMBOL (KW_ZERO+2)
+#define KW_CURRENCY_SYMBOL (KW_ZERO+3)
+#define KW_MON_DECIMAL_POINT (KW_ZERO+4)
+#define KW_MON_THOUSANDS_SEP (KW_ZERO+5)
+#define KW_MON_GROUPING (KW_ZERO+6)
+#define KW_POSITIVE_SIGN (KW_ZERO+7)
+#define KW_NEGATIVE_SIGN (KW_ZERO+8)
+#define KW_INT_FRAC_DIGITS (KW_ZERO+9)
+#define KW_FRAC_DIGITS (KW_ZERO+10)
+#define KW_P_CS_PRECEDES (KW_ZERO+11)
+#define KW_P_SEP_BY_SPACE (KW_ZERO+12)
+#define KW_N_CS_PRECEDES (KW_ZERO+13)
+#define KW_N_SEP_BY_SPACE (KW_ZERO+14)
+#define KW_P_SIGN_POSN (KW_ZERO+15)
+#define KW_N_SIGN_POSN (KW_ZERO+16)
+#define KW_INT_P_CS_PRECEDES (KW_ZERO+17)
+#define KW_INT_P_SEP_BY_SPACE (KW_ZERO+18)
+#define KW_INT_N_CS_PRECEDES (KW_ZERO+19)
+#define KW_INT_N_SEP_BY_SPACE (KW_ZERO+20)
+#define KW_INT_P_SIGN_POSN (KW_ZERO+21)
+#define KW_INT_N_SIGN_POSN (KW_ZERO+22)
struct _kwinfo {
const char *name;
@@ -218,7 +218,7 @@ struct _kwinfo {
"(POSIX legacy)" } /* compat */
};
-#define NKWINFO (sizeof(kwinfo)/sizeof(kwinfo[0]))
+#define NKWINFO (sizeof(kwinfo)/sizeof(kwinfo[0]))
const char *boguslocales[] = { "UTF-8" };
#define NBOGUS (sizeof(boguslocales)/sizeof(boguslocales[0]))
@@ -253,12 +253,10 @@ main(int argc, char *argv[])
/* validate arguments */
if (all_locales && all_charmaps)
usage();
- if ((all_locales || all_charmaps) && argc > 0)
+ if ((all_locales || all_charmaps) && argc > 0)
usage();
if ((all_locales || all_charmaps) && (prt_categories || prt_keywords))
usage();
- if ((prt_categories || prt_keywords) && argc <= 0)
- usage();
/* process '-a' */
if (all_locales) {
@@ -282,12 +280,18 @@ main(int argc, char *argv[])
}
/* process '-c' and/or '-k' */
- if (prt_categories || prt_keywords || argc > 0) {
- setlocale(LC_ALL, "");
- while (argc > 0) {
- showdetails(*argv);
- argv++;
- argc--;
+ if (prt_categories || prt_keywords) {
+ if (argc > 0) {
+ setlocale(LC_ALL, "");
+ while (argc > 0) {
+ showdetails(*argv);
+ argv++;
+ argc--;
+ }
+ } else {
+ uint i;
+ for (i = 0; i < sizeof (kwinfo) / sizeof (struct _kwinfo); i++)
+ showdetails ((char *)kwinfo [i].name);
}
exit(0);
}
@@ -302,8 +306,8 @@ void
usage(void)
{
printf("Usage: locale [ -a | -m ]\n"
- " locale -k list [prefix]\n"
- " locale [ -ck ] keyword ...\n");
+ " locale -k list [prefix]\n"
+ " locale [ -ck ] [keyword ...]\n");
exit(1);
}
@@ -423,10 +427,10 @@ init_locales_list(void)
}
closedir(dirp);
- /* make sure that 'POSIX' and 'C' locales are present in the list.
+ /* make sure that 'POSIX' and 'C' locales are present in the list.
* POSIX 1003.1-2001 requires presence of 'POSIX' name only here, but
- * we also list 'C' for constistency
- */
+ * we also list 'C' for constistency
+ */
if (sl_find(locales, "POSIX") == NULL)
sl_add(locales, "POSIX");
@@ -612,7 +616,10 @@ showdetails(char *kw)
}
if (prt_categories) {
- printf("%s\n", lookup_localecat(cat));
+ if (prt_keywords)
+ printf("%-20s ", lookup_localecat(cat));
+ else
+ printf("%-20s\t%s\n", kw, lookup_localecat(cat));
}
if (prt_keywords) {
@@ -657,7 +664,7 @@ showkeywordslist(char *substring)
{
size_t i;
-#define FMT "%-20s %-12s %-7s %-20s\n"
+#define FMT "%-20s %-12s %-7s %-20s\n"
if (substring == NULL)
printf("List of available keywords\n\n");
diff --git a/usr.bin/ssh-copy-id/ssh-copy-id.1 b/usr.bin/ssh-copy-id/ssh-copy-id.1
index 2b2bbc2..caefe5c 100644
--- a/usr.bin/ssh-copy-id/ssh-copy-id.1
+++ b/usr.bin/ssh-copy-id/ssh-copy-id.1
@@ -25,7 +25,7 @@
.\"
.\" $FreeBSD$
.\"
-.Dd October 3, 2012
+.Dd November 11, 2012
.Dt SSH-COPY-ID 1
.Os
.Sh NAME
@@ -42,8 +42,8 @@
The
.Nm
utility copies public keys to a remote host's
-.Pa authorized_keys
-file.
+.Pa ~/.ssh/authorized_keys
+file (creating the file and directory, if required).
.Pp
The following options are available:
.Bl -tag -width indent
@@ -71,6 +71,8 @@ default.
.Pp
The remaining arguments are a list of remote hosts to connect to,
each one optionally qualified by a user name.
+.Sh EXIT STATUS
+.Ex -std
.Sh HISTORY
The
.Nm
diff --git a/usr.bin/ssh-copy-id/ssh-copy-id.sh b/usr.bin/ssh-copy-id/ssh-copy-id.sh
index 8f087d3..94429de 100755
--- a/usr.bin/ssh-copy-id/ssh-copy-id.sh
+++ b/usr.bin/ssh-copy-id/ssh-copy-id.sh
@@ -34,19 +34,18 @@ usage() {
sendkey() {
local h="$1"
- shift 1
- local k="$@"
- echo "$k" | ssh $port -S none $options "$user$h" /bin/sh -c \''
- set -e;
- umask 077;
- keyfile=$HOME/.ssh/authorized_keys ;
- mkdir -p $HOME/.ssh/ ;
- while read alg key comment ; do
- if ! grep -sqwF "$key" "$keyfile"; then
- echo "$alg $key $comment" |
- tee -a "$keyfile" >/dev/null ;
- fi ;
- done
+ local k="$2"
+ printf "%s\n" "$k" | ssh $port -S none $options "$user$h" /bin/sh -c \'' \
+ set -e; \
+ umask 077; \
+ keyfile=$HOME/.ssh/authorized_keys ; \
+ mkdir -p -- "$HOME/.ssh/" ; \
+ while read alg key comment ; do \
+ [ -n "$key" ] || continue; \
+ if ! grep -sqwF "$key" "$keyfile"; then \
+ printf "$alg $key $comment\n" >> "$keyfile" ; \
+ fi ; \
+ done \
'\'
}
@@ -63,12 +62,17 @@ nl="
"
options=""
+IFS=$nl
+
while getopts 'i:lo:p:' arg; do
case $arg in
i)
hasarg="x"
- if [ -f "$OPTARG" ]; then
- keys="$(cat $OPTARG)$nl$keys"
+ if [ -r "$OPTARG" ]; then
+ keys="$(cat -- "$OPTARG")$nl$keys"
+ else
+ echo "File $OPTARG not found" >&2
+ exit 1
fi
;;
l)
@@ -76,10 +80,10 @@ while getopts 'i:lo:p:' arg; do
agentKeys
;;
p)
- port="-p $OPTARG"
+ port=-p$nl$OPTARG
;;
o)
- options="$options -o '$OPTARG'"
+ options=$options$nl-o$nl$OPTARG
;;
*)
usage
@@ -92,11 +96,11 @@ shift $((OPTIND-1))
if [ -z "$hasarg" ]; then
agentKeys
fi
-if [ -z "$keys" -o "$keys" = "$nl" ]; then
+if [ -z "$keys" ] || [ "$keys" = "$nl" ]; then
echo "no keys found" >&2
exit 1
fi
-if [ -z "$@" ]; then
+if [ "$#" -eq 0 ]; then
usage
fi
diff --git a/usr.bin/top/machine.c b/usr.bin/top/machine.c
index 236cb554..4284838 100644
--- a/usr.bin/top/machine.c
+++ b/usr.bin/top/machine.c
@@ -225,7 +225,7 @@ long percentages();
char *ordernames[] = {
"cpu", "size", "res", "time", "pri", "threads",
"total", "read", "write", "fault", "vcsw", "ivcsw",
- "jid", NULL
+ "jid", "pid", NULL
};
#endif
diff --git a/usr.sbin/wpa/wpa_supplicant/wpa_supplicant.8 b/usr.sbin/wpa/wpa_supplicant/wpa_supplicant.8
index 4858410..466759d 100644
--- a/usr.sbin/wpa/wpa_supplicant/wpa_supplicant.8
+++ b/usr.sbin/wpa/wpa_supplicant/wpa_supplicant.8
@@ -24,7 +24,7 @@
.\"
.\" $FreeBSD$
.\"
-.Dd March 24, 2008
+.Dd November 7, 2012
.Dt WPA_SUPPLICANT 8
.Os
.Sh NAME
@@ -32,9 +32,26 @@
.Nd "WPA/802.11i Supplicant for wireless network devices"
.Sh SYNOPSIS
.Nm
-.Op Fl BdehLqsvw
+.Op Fl BdhKLqstuvW
+.Op Fl b Ar br_ifname
+.Fl c Ar config-file
+.Op Fl C Ar ctrl
+.Op Fl D Ar driver
+.Op Fl f Ar debug file
+.Op Fl g Ar global ctrl
+.Fl i Ar ifname
+.Op Fl o Ar override driver
+.Op Fl O Ar override ctrl
+.Op Fl P Ar pid file
+.Oo Fl N
.Fl i Ar ifname
.Fl c Ar config-file
+.Op Fl C Ar ctrl
+.Op Fl D driver
+.Op Fl p Ar driver_param
+.Op Fl b Ar br_ifname
+.No ...
+.Oc
.Sh DESCRIPTION
The
.Nm
@@ -87,15 +104,36 @@ utility, using
.Sh OPTIONS
The following options are available:
.Bl -tag -width indent
+.It Fl b
+Optional bridge interface name.
+.It Fl B
+Detach from the controlling terminal and run as a daemon process
+in the background.
.It Fl d
Enable debugging messages.
If this option is supplied twice, more verbose messages are displayed.
-.It Fl e
-Use an external IEEE 802.1X Supplicant program and disable the
-internal Supplicant.
-This option is not normally used.
+.It Fl D
+Driver name (can be multiple drivers: nl80211,wext).
+.It Fl f
+Log output to debug file instead of stdout.
+.It Fl g
+Global ctrl_interface.
.It Fl h
Show help text.
+.It Fl K
+Include key information in debugging output.
+.It Fl L
+Display the license for this program on the terminal and exit.
+.It Fl N
+Start describing a new interface.
+.It Fl o
+Overrides driver parameter for new interfaces.
+.It Fl O
+Override ctrl_interface parameter for new interfaces.
+.It Fl p
+Specify driver parameters.
+.It Fl P
+File in which to save the process PID.
.It Fl q
Decrease debugging verbosity (i.e., counteract the use of the
.Fl d
@@ -104,23 +142,14 @@ flag).
Send log messages through
.Xr syslog 3
instead of to the terminal.
+.It Fl t
+Include timestamp in debug messages.
+.It Fl u
+Enable DBus control interface.
.It Fl v
Display version information on the terminal and exit.
-.It Fl w
-If the specified interface is not present, wait for it to be
-added; e.g.\& a cardbus device to be inserted.
-This option is not normally used; instead,
-.Xr devd 8
-should be configured to launch
-.Nm
-when a device is created.
-.It Fl B
-Detach from the controlling terminal and run as a daemon process
-in the background.
-.It Fl K
-Include key information in debugging output.
-.It Fl L
-Display the license for this program on the terminal and exit.
+.It Fl W
+Wait for a control interface monitor before starting.
.El
.Sh SEE ALSO
.Xr an 4 ,
OpenPOWER on IntegriCloud