summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRenato Botelho <renato@netgate.com>2016-05-13 14:13:03 -0300
committerRenato Botelho <renato@netgate.com>2016-05-13 14:13:03 -0300
commit20eb25b64d4b00d8a767f0be4b5a66c82ee1278b (patch)
treefc6b544ee4a17bb2ffe27d7e62feda1a259b6229
parent59c1206f1c28817ab7b7333b0ccec93dbf82ae5d (diff)
parentee023106504311121dda8696b3ebe0aaea736251 (diff)
downloadFreeBSD-src-20eb25b64d4b00d8a767f0be4b5a66c82ee1278b.zip
FreeBSD-src-20eb25b64d4b00d8a767f0be4b5a66c82ee1278b.tar.gz
Merge remote-tracking branch 'origin/master' into devel-11
-rw-r--r--contrib/jemalloc/ChangeLog48
-rw-r--r--contrib/jemalloc/FREEBSD-diffs57
-rw-r--r--contrib/jemalloc/VERSION2
-rw-r--r--contrib/jemalloc/doc/jemalloc.353
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/arena.h387
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/base.h11
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/bitmap.h6
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/chunk.h40
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/chunk_dss.h16
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/ckh.h8
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/ctl.h25
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/extent.h2
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/hash.h4
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/huge.h21
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h194
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h9
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/mb.h10
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/mutex.h53
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/nstime.h2
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/pages.h5
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/ph.h345
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/private_namespace.h103
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/prof.h85
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/rtree.h160
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/stats.h8
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/tcache.h45
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/tsd.h86
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/util.h12
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/valgrind.h16
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/witness.h249
-rw-r--r--contrib/jemalloc/include/jemalloc/jemalloc.h14
-rw-r--r--contrib/jemalloc/src/arena.c1158
-rw-r--r--contrib/jemalloc/src/base.c49
-rw-r--r--contrib/jemalloc/src/bitmap.c11
-rw-r--r--contrib/jemalloc/src/chunk.c238
-rw-r--r--contrib/jemalloc/src/chunk_dss.c48
-rw-r--r--contrib/jemalloc/src/chunk_mmap.c12
-rw-r--r--contrib/jemalloc/src/ckh.c44
-rw-r--r--contrib/jemalloc/src/ctl.c442
-rw-r--r--contrib/jemalloc/src/huge.c201
-rw-r--r--contrib/jemalloc/src/jemalloc.c863
-rw-r--r--contrib/jemalloc/src/mutex.c21
-rw-r--r--contrib/jemalloc/src/nstime.c4
-rw-r--r--contrib/jemalloc/src/pages.c116
-rw-r--r--contrib/jemalloc/src/prof.c674
-rw-r--r--contrib/jemalloc/src/quarantine.c46
-rw-r--r--contrib/jemalloc/src/rtree.c2
-rw-r--r--contrib/jemalloc/src/stats.c14
-rw-r--r--contrib/jemalloc/src/tcache.c131
-rw-r--r--contrib/jemalloc/src/tsd.c22
-rw-r--r--contrib/jemalloc/src/util.c16
-rw-r--r--contrib/jemalloc/src/witness.c136
-rw-r--r--contrib/libarchive/cpio/bsdcpio.13
-rw-r--r--contrib/libarchive/cpio/cpio.c2
-rw-r--r--include/stdio.h10
-rw-r--r--lib/libc/stdio/Symbol.map1
-rw-r--r--lib/libc/stdlib/jemalloc/Makefile.inc2
-rw-r--r--lib/libc/tests/nss/testutil.h33
-rw-r--r--lib/libutil/quotafile.c2
-rw-r--r--libexec/ftpd/ftpd.c6
-rw-r--r--release/doc/en_US.ISO8859-1/relnotes/article.xml4
-rw-r--r--share/misc/committers-src.dot1
-rw-r--r--sys/arm64/arm64/busdma_bounce.c221
-rw-r--r--sys/arm64/include/cpufunc.h5
-rw-r--r--sys/boot/geli/geliboot.c2
-rw-r--r--sys/boot/geli/geliboot.h3
-rw-r--r--sys/boot/i386/common/cons.h1
-rw-r--r--sys/boot/i386/zfsboot/zfsboot.c3
-rw-r--r--sys/cddl/boot/zfs/lz4.c2
-rw-r--r--sys/compat/linuxkpi/common/include/asm/pgtable.h2
-rw-r--r--sys/compat/linuxkpi/common/include/linux/compiler.h14
-rw-r--r--sys/compat/linuxkpi/common/include/linux/device.h10
-rw-r--r--sys/compat/linuxkpi/common/include/linux/err.h6
-rw-r--r--sys/compat/linuxkpi/common/include/linux/errno.h19
-rw-r--r--sys/compat/linuxkpi/common/include/linux/ioctl.h4
-rw-r--r--sys/compat/linuxkpi/common/include/linux/jiffies.h34
-rw-r--r--sys/compat/linuxkpi/common/include/linux/kdev_t.h6
-rw-r--r--sys/compat/linuxkpi/common/include/linux/kernel.h100
-rw-r--r--sys/compat/linuxkpi/common/include/linux/page.h25
-rw-r--r--sys/conf/files8
-rw-r--r--sys/conf/files.amd641
-rw-r--r--sys/conf/kmod.mk4
-rw-r--r--sys/dev/bxe/bxe.c7
-rw-r--r--sys/dev/gpio/gpiobus.c24
-rw-r--r--sys/dev/gpio/gpiobusvar.h1
-rw-r--r--sys/dev/gpio/gpiokeys.c17
-rw-r--r--sys/dev/gpio/ofw_gpiobus.c7
-rw-r--r--sys/dev/ixl/i40e_adminq.c63
-rw-r--r--sys/dev/ixl/i40e_adminq.h4
-rw-r--r--sys/dev/ixl/i40e_adminq_cmd.h38
-rw-r--r--sys/dev/ixl/i40e_common.c576
-rw-r--r--sys/dev/ixl/i40e_osdep.c22
-rw-r--r--sys/dev/ixl/i40e_osdep.h23
-rw-r--r--sys/dev/ixl/i40e_prototype.h22
-rw-r--r--sys/dev/ixl/i40e_register.h16
-rw-r--r--sys/dev/ixl/i40e_type.h17
-rw-r--r--sys/dev/ixl/if_ixl.c792
-rw-r--r--sys/dev/ixl/if_ixlv.c11
-rw-r--r--sys/dev/ixl/ixl.h26
-rw-r--r--sys/dev/ixl/ixl_txrx.c13
-rw-r--r--sys/dev/ixl/ixlv.h2
-rw-r--r--sys/dev/ixl/ixlvc.c48
-rw-r--r--sys/dev/mmc/host/dwmmc.c7
-rw-r--r--sys/dev/mrsas/mrsas.c690
-rw-r--r--sys/dev/mrsas/mrsas.h63
-rw-r--r--sys/dev/mrsas/mrsas_cam.c282
-rw-r--r--sys/dev/mrsas/mrsas_fp.c21
-rw-r--r--sys/dev/mrsas/mrsas_ioctl.c25
-rw-r--r--sys/dev/pci/pci_host_generic.c1
-rw-r--r--sys/dev/sfxge/common/ef10_ev.c (renamed from sys/dev/sfxge/common/hunt_ev.c)4
-rw-r--r--sys/dev/sfxge/common/ef10_filter.c (renamed from sys/dev/sfxge/common/hunt_filter.c)68
-rw-r--r--sys/dev/sfxge/common/ef10_intr.c (renamed from sys/dev/sfxge/common/hunt_intr.c)4
-rw-r--r--sys/dev/sfxge/common/ef10_mac.c (renamed from sys/dev/sfxge/common/hunt_mac.c)4
-rw-r--r--sys/dev/sfxge/common/ef10_mcdi.c (renamed from sys/dev/sfxge/common/hunt_mcdi.c)0
-rw-r--r--sys/dev/sfxge/common/ef10_nic.c1616
-rw-r--r--sys/dev/sfxge/common/ef10_nvram.c (renamed from sys/dev/sfxge/common/hunt_nvram.c)4
-rw-r--r--sys/dev/sfxge/common/ef10_phy.c518
-rw-r--r--sys/dev/sfxge/common/ef10_rx.c (renamed from sys/dev/sfxge/common/hunt_rx.c)4
-rwxr-xr-xsys/dev/sfxge/common/ef10_tx.c (renamed from sys/dev/sfxge/common/hunt_tx.c)6
-rw-r--r--sys/dev/sfxge/common/ef10_vpd.c (renamed from sys/dev/sfxge/common/hunt_vpd.c)4
-rw-r--r--sys/dev/sfxge/common/efx_ev.c82
-rw-r--r--sys/dev/sfxge/common/efx_filter.c148
-rw-r--r--sys/dev/sfxge/common/efx_impl.h2
-rw-r--r--sys/dev/sfxge/common/efx_intr.c62
-rw-r--r--sys/dev/sfxge/common/efx_mac.c17
-rw-r--r--sys/dev/sfxge/common/efx_rx.c84
-rw-r--r--sys/dev/sfxge/common/efx_tx.c74
-rw-r--r--sys/dev/sfxge/common/hunt_impl.h3
-rw-r--r--sys/dev/sfxge/common/hunt_nic.c1573
-rw-r--r--sys/dev/sfxge/common/hunt_phy.c479
-rw-r--r--sys/modules/gpio/Makefile2
-rw-r--r--sys/modules/gpio/gpiokeys/Makefile14
-rw-r--r--sys/modules/sfxge/Makefile6
-rw-r--r--sys/net/if.c18
-rw-r--r--sys/net80211/ieee80211_adhoc.c3
-rw-r--r--sys/net80211/ieee80211_hostap.c13
-rw-r--r--sys/net80211/ieee80211_hwmp.c5
-rw-r--r--sys/net80211/ieee80211_mesh.c11
-rw-r--r--sys/net80211/ieee80211_node.c6
-rw-r--r--sys/net80211/ieee80211_output.c7
-rw-r--r--sys/net80211/ieee80211_phy.c7
-rw-r--r--sys/net80211/ieee80211_scan_sw.c17
-rw-r--r--sys/net80211/ieee80211_sta.c8
-rw-r--r--sys/net80211/ieee80211_wds.c2
-rw-r--r--sys/netinet/sctp_structs.h5
-rw-r--r--sys/netinet/sctputil.c12
-rw-r--r--sys/sys/_types.h1
-rw-r--r--sys/sys/types.h5
-rw-r--r--sys/x86/x86/mca.c3
-rw-r--r--usr.bin/catman/catman.c15
-rw-r--r--usr.bin/mail/fio.c4
-rw-r--r--usr.bin/rpcinfo/rpcinfo.c11
-rw-r--r--usr.sbin/bhyve/pci_emul.c4
-rw-r--r--usr.sbin/bhyve/pci_virtio_block.c4
-rw-r--r--usr.sbin/bsnmpd/tools/libbsnmptools/bsnmptools.c2
-rw-r--r--usr.sbin/edquota/edquota.c2
-rw-r--r--usr.sbin/i2c/i2c.c3
-rw-r--r--usr.sbin/mptutil/mpt_cam.c2
-rw-r--r--usr.sbin/timed/timed/master.c2
-rw-r--r--usr.sbin/ypbind/ypbind.c18
-rw-r--r--usr.sbin/ypldap/ber.c2
161 files changed, 9198 insertions, 5448 deletions
diff --git a/contrib/jemalloc/ChangeLog b/contrib/jemalloc/ChangeLog
index 9cbfbf9..c9ce7c4 100644
--- a/contrib/jemalloc/ChangeLog
+++ b/contrib/jemalloc/ChangeLog
@@ -4,6 +4,50 @@ brevity. Much more detail can be found in the git revision history:
https://github.com/jemalloc/jemalloc
+* 4.2.0 (May 12, 2016)
+
+ New features:
+ - Add the arena.<i>.reset mallctl, which makes it possible to discard all of
+ an arena's allocations in a single operation. (@jasone)
+ - Add the stats.retained and stats.arenas.<i>.retained statistics. (@jasone)
+ - Add the --with-version configure option. (@jasone)
+ - Support --with-lg-page values larger than actual page size. (@jasone)
+
+ Optimizations:
+ - Use pairing heaps rather than red-black trees for various hot data
+ structures. (@djwatson, @jasone)
+ - Streamline fast paths of rtree operations. (@jasone)
+ - Optimize the fast paths of calloc() and [m,d,sd]allocx(). (@jasone)
+ - Decommit unused virtual memory if the OS does not overcommit. (@jasone)
+ - Specify MAP_NORESERVE on Linux if [heuristic] overcommit is active, in order
+ to avoid unfortunate interactions during fork(2). (@jasone)
+
+ Bug fixes:
+ - Fix chunk accounting related to triggering gdump profiles. (@jasone)
+ - Link against librt for clock_gettime(2) if glibc < 2.17. (@jasone)
+ - Scale leak report summary according to sampling probability. (@jasone)
+
+* 4.1.1 (May 3, 2016)
+
+ This bugfix release resolves a variety of mostly minor issues, though the
+ bitmap fix is critical for 64-bit Windows.
+
+ Bug fixes:
+ - Fix the linear scan version of bitmap_sfu() to shift by the proper amount
+ even when sizeof(long) is not the same as sizeof(void *), as on 64-bit
+ Windows. (@jasone)
+ - Fix hashing functions to avoid unaligned memory accesses (and resulting
+ crashes). This is relevant at least to some ARM-based platforms.
+ (@rkmisra)
+ - Fix fork()-related lock rank ordering reversals. These reversals were
+ unlikely to cause deadlocks in practice except when heap profiling was
+ enabled and active. (@jasone)
+ - Fix various chunk leaks in OOM code paths. (@jasone)
+ - Fix malloc_stats_print() to print opt.narenas correctly. (@jasone)
+ - Fix MSVC-specific build/test issues. (@rustyx, @yuslepukhin)
+ - Fix a variety of test failures that were due to test fragility rather than
+ core bugs. (@jasone)
+
* 4.1.0 (February 28, 2016)
This release is primarily about optimizations, but it also incorporates a lot
@@ -59,14 +103,14 @@ brevity. Much more detail can be found in the git revision history:
Bug fixes:
- Fix stats.cactive accounting regression. (@rustyx, @jasone)
- Handle unaligned keys in hash(). This caused problems for some ARM systems.
- (@jasone, Christopher Ferris)
+ (@jasone, @cferris1000)
- Refactor arenas array. In addition to fixing a fork-related deadlock, this
makes arena lookups faster and simpler. (@jasone)
- Move retained memory allocation out of the default chunk allocation
function, to a location that gets executed even if the application installs
a custom chunk allocation function. This resolves a virtual memory leak.
(@buchgr)
- - Fix a potential tsd cleanup leak. (Christopher Ferris, @jasone)
+ - Fix a potential tsd cleanup leak. (@cferris1000, @jasone)
- Fix run quantization. In practice this bug had no impact unless
applications requested memory with alignment exceeding one page.
(@jasone, @djwatson)
diff --git a/contrib/jemalloc/FREEBSD-diffs b/contrib/jemalloc/FREEBSD-diffs
index e7484ea..6c20441 100644
--- a/contrib/jemalloc/FREEBSD-diffs
+++ b/contrib/jemalloc/FREEBSD-diffs
@@ -1,5 +1,5 @@
diff --git a/doc/jemalloc.xml.in b/doc/jemalloc.xml.in
-index bc5dbd1..ba182da 100644
+index c4a44e3..4626e9b 100644
--- a/doc/jemalloc.xml.in
+++ b/doc/jemalloc.xml.in
@@ -53,11 +53,23 @@
@@ -27,7 +27,7 @@ index bc5dbd1..ba182da 100644
<refsect2>
<title>Standard API</title>
<funcprototype>
-@@ -2905,4 +2917,18 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
+@@ -2961,4 +2973,18 @@ malloc_conf = "lg_chunk:24";]]></programlisting></para>
<para>The <function>posix_memalign<parameter/></function> function conforms
to IEEE Std 1003.1-2001 (&ldquo;POSIX.1&rdquo;).</para>
</refsect1>
@@ -47,7 +47,7 @@ index bc5dbd1..ba182da 100644
+ </refsect1>
</refentry>
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
-index 3f54391..d240256 100644
+index 51bf897..7de22ea 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -8,6 +8,9 @@
@@ -90,10 +90,10 @@ index 2b8ca5d..42d97f2 100644
#ifdef _WIN32
# include <windows.h>
diff --git a/include/jemalloc/internal/mutex.h b/include/jemalloc/internal/mutex.h
-index f051f29..561378f 100644
+index 5221799..60ab041 100644
--- a/include/jemalloc/internal/mutex.h
+++ b/include/jemalloc/internal/mutex.h
-@@ -47,15 +47,13 @@ struct malloc_mutex_s {
+@@ -52,9 +52,6 @@ struct malloc_mutex_s {
#ifdef JEMALLOC_LAZY_LOCK
extern bool isthreaded;
@@ -102,19 +102,20 @@ index f051f29..561378f 100644
-# define isthreaded true
#endif
- bool malloc_mutex_init(malloc_mutex_t *mutex);
- void malloc_mutex_prefork(malloc_mutex_t *mutex);
- void malloc_mutex_postfork_parent(malloc_mutex_t *mutex);
- void malloc_mutex_postfork_child(malloc_mutex_t *mutex);
+ bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
+@@ -62,6 +59,7 @@ bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
+ void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
+ void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
+ void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
+bool malloc_mutex_first_thread(void);
- bool mutex_boot(void);
+ bool malloc_mutex_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
-index 5880996..6e94e03 100644
+index f2b6a55..69369c9 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
-@@ -296,7 +296,6 @@ iralloct_realign
+@@ -311,7 +311,6 @@ iralloct_realign
isalloc
isdalloct
isqalloc
@@ -124,10 +125,10 @@ index 5880996..6e94e03 100644
jemalloc_postfork_child
diff --git a/include/jemalloc/jemalloc_FreeBSD.h b/include/jemalloc/jemalloc_FreeBSD.h
new file mode 100644
-index 0000000..433dab5
+index 0000000..c58a8f3
--- /dev/null
+++ b/include/jemalloc/jemalloc_FreeBSD.h
-@@ -0,0 +1,160 @@
+@@ -0,0 +1,162 @@
+/*
+ * Override settings that were generated in jemalloc_defs.h as necessary.
+ */
@@ -138,6 +139,8 @@ index 0000000..433dab5
+#define JEMALLOC_DEBUG
+#endif
+
++#undef JEMALLOC_DSS
++
+/*
+ * The following are architecture-dependent, so conditionally define them for
+ * each supported architecture.
@@ -300,7 +303,7 @@ index f943891..47d032c 100755
+#include "jemalloc_FreeBSD.h"
EOF
diff --git a/src/jemalloc.c b/src/jemalloc.c
-index 0735376..a34b85c 100644
+index 40eb2ea..666c49d 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -4,6 +4,10 @@
@@ -314,7 +317,7 @@ index 0735376..a34b85c 100644
/* Runtime configuration options. */
const char *je_malloc_conf JEMALLOC_ATTR(weak);
bool opt_abort =
-@@ -2611,6 +2615,107 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
+@@ -2673,6 +2677,107 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
*/
/******************************************************************************/
/*
@@ -341,7 +344,7 @@ index 0735376..a34b85c 100644
+ if (p == NULL)
+ return (ALLOCM_ERR_OOM);
+ if (rsize != NULL)
-+ *rsize = isalloc(p, config_prof);
++ *rsize = isalloc(tsdn_fetch(), p, config_prof);
+ *ptr = p;
+ return (ALLOCM_SUCCESS);
+}
@@ -370,7 +373,7 @@ index 0735376..a34b85c 100644
+ } else
+ ret = ALLOCM_ERR_OOM;
+ if (rsize != NULL)
-+ *rsize = isalloc(*ptr, config_prof);
++ *rsize = isalloc(tsdn_fetch(), *ptr, config_prof);
+ }
+ return (ret);
+}
@@ -422,8 +425,8 @@ index 0735376..a34b85c 100644
* The following functions are used by threading libraries for protection of
* malloc during fork().
*/
-@@ -2717,4 +2822,11 @@ jemalloc_postfork_child(void)
- ctl_postfork_child();
+@@ -2814,4 +2919,11 @@ jemalloc_postfork_child(void)
+ ctl_postfork_child(tsd_tsdn(tsd));
}
+void
@@ -435,7 +438,7 @@ index 0735376..a34b85c 100644
+
/******************************************************************************/
diff --git a/src/mutex.c b/src/mutex.c
-index 2d47af9..934d5aa 100644
+index a1fac34..a24e420 100644
--- a/src/mutex.c
+++ b/src/mutex.c
@@ -66,6 +66,17 @@ pthread_create(pthread_t *__restrict thread,
@@ -456,22 +459,22 @@ index 2d47af9..934d5aa 100644
#endif
bool
-@@ -137,7 +148,7 @@ malloc_mutex_postfork_child(malloc_mutex_t *mutex)
+@@ -140,7 +151,7 @@ malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex)
}
bool
--mutex_boot(void)
+-malloc_mutex_boot(void)
+malloc_mutex_first_thread(void)
{
#ifdef JEMALLOC_MUTEX_INIT_CB
-@@ -151,3 +162,14 @@ mutex_boot(void)
+@@ -154,3 +165,14 @@ malloc_mutex_boot(void)
#endif
return (false);
}
+
+bool
-+mutex_boot(void)
++malloc_mutex_boot(void)
+{
+
+#ifndef JEMALLOC_MUTEX_INIT_CB
@@ -481,10 +484,10 @@ index 2d47af9..934d5aa 100644
+#endif
+}
diff --git a/src/util.c b/src/util.c
-index 02673c7..116e981 100644
+index a1c4a2a..04f9153 100644
--- a/src/util.c
+++ b/src/util.c
-@@ -66,6 +66,22 @@ wrtmessage(void *cbopaque, const char *s)
+@@ -67,6 +67,22 @@ wrtmessage(void *cbopaque, const char *s)
JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s);
diff --git a/contrib/jemalloc/VERSION b/contrib/jemalloc/VERSION
index fd7c988..b4ecbbf 100644
--- a/contrib/jemalloc/VERSION
+++ b/contrib/jemalloc/VERSION
@@ -1 +1 @@
-4.1.0-1-g994da4232621dd1210fcf39bdf0d6454cefda473
+4.2.0-1-gdc7ff6306d7a15b53479e2fb8e5546404b82e6fc
diff --git a/contrib/jemalloc/doc/jemalloc.3 b/contrib/jemalloc/doc/jemalloc.3
index c47f417..21d40c1 100644
--- a/contrib/jemalloc/doc/jemalloc.3
+++ b/contrib/jemalloc/doc/jemalloc.3
@@ -2,12 +2,12 @@
.\" Title: JEMALLOC
.\" Author: Jason Evans
.\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
-.\" Date: 02/28/2016
+.\" Date: 05/12/2016
.\" Manual: User Manual
-.\" Source: jemalloc 4.1.0-1-g994da4232621dd1210fcf39bdf0d6454cefda473
+.\" Source: jemalloc 4.2.0-1-gdc7ff6306d7a15b53479e2fb8e5546404b82e6fc
.\" Language: English
.\"
-.TH "JEMALLOC" "3" "02/28/2016" "jemalloc 4.1.0-1-g994da4232621" "User Manual"
+.TH "JEMALLOC" "3" "05/12/2016" "jemalloc 4.2.0-1-gdc7ff6306d7a" "User Manual"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
@@ -31,7 +31,7 @@
jemalloc \- general purpose memory allocation functions
.SH "LIBRARY"
.PP
-This manual describes jemalloc 4\&.1\&.0\-1\-g994da4232621dd1210fcf39bdf0d6454cefda473\&. More information can be found at the
+This manual describes jemalloc 4\&.2\&.0\-1\-gdc7ff6306d7a15b53479e2fb8e5546404b82e6fc\&. More information can be found at the
\m[blue]\fBjemalloc website\fR\m[]\&\s-2\u[1]\d\s+2\&.
.PP
The following configuration options are enabled in libc\*(Aqs built\-in jemalloc:
@@ -461,7 +461,8 @@ Memory is conceptually broken into equal\-sized chunks, where the chunk size is
Small objects are managed in groups by page runs\&. Each run maintains a bitmap to track which regions are in use\&. Allocation requests that are no more than half the quantum (8 or 16, depending on architecture) are rounded up to the nearest power of two that is at least
sizeof(\fBdouble\fR)\&. All other object size classes are multiples of the quantum, spaced such that there are four size classes for each doubling in size, which limits internal fragmentation to approximately 20% for all but the smallest size classes\&. Small size classes are smaller than four times the page size, large size classes are smaller than the chunk size (see the
"opt\&.lg_chunk"
-option), and huge size classes extend from the chunk size up to one size class less than the full address space size\&.
+option), and huge size classes extend from the chunk size up to the largest size class that does not exceed
+\fBPTRDIFF_MAX\fR\&.
.PP
Allocations are packed tightly together, which can be an issue for multi\-threaded applications\&. If you need to assure that allocations do not suffer from cacheline sharing, round your allocation requests up to the nearest multiple of the cacheline size, or specify cacheline alignment when allocating\&.
.PP
@@ -518,6 +519,8 @@ l r l
^ r l
^ r l
^ r l
+^ r l
+^ r l
^ r l.
T{
Small
@@ -645,6 +648,16 @@ T}
T}:T{
\&.\&.\&.
T}
+:T{
+512 PiB
+T}:T{
+[2560 PiB, 3 EiB, 3584 PiB, 4 EiB]
+T}
+:T{
+1 EiB
+T}:T{
+[5 EiB, 6 EiB, 7 EiB]
+T}
.TE
.sp 1
.SH "MALLCTL NAMESPACE"
@@ -841,7 +854,7 @@ function\&. If
is specified during configuration, this has the potential to cause deadlock for a multi\-threaded process that exits while one or more threads are executing in the memory allocation functions\&. Furthermore,
\fBatexit\fR\fB\fR
may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls
-\fBatexit\fR\fB\fR, so this option is not univerally usable (though the application can register its own
+\fBatexit\fR\fB\fR, so this option is not universally usable (though the application can register its own
\fBatexit\fR\fB\fR
function with equivalent functionality)\&. Therefore, this option should only be used with care; it is primarily intended as a performance tuning aid during application development\&. This option is disabled by default\&.
.RE
@@ -1007,7 +1020,7 @@ is controlled by the
option\&. Note that
\fBatexit\fR\fB\fR
may allocate memory during application initialization and then deadlock internally when jemalloc in turn calls
-\fBatexit\fR\fB\fR, so this option is not univerally usable (though the application can register its own
+\fBatexit\fR\fB\fR, so this option is not universally usable (though the application can register its own
\fBatexit\fR\fB\fR
function with equivalent functionality)\&. This option is disabled by default\&.
.RE
@@ -1113,6 +1126,14 @@ Trigger decay\-based purging of unused dirty pages for arena <i>, or for all are
for details\&.
.RE
.PP
+"arena\&.<i>\&.reset" (\fBvoid\fR) \-\-
+.RS 4
+Discard all of the arena\*(Aqs extant allocations\&. This interface can only be used with arenas created via
+"arenas\&.extend"\&. None of the arena\*(Aqs discarded/cached allocations may accessed afterward\&. As part of this requirement, all thread caches which were used to allocate/deallocate in conjunction with the arena must be flushed beforehand\&. This interface cannot be used if running inside Valgrind, nor if the
+quarantine
+size is non\-zero\&.
+.RE
+.PP
"arena\&.<i>\&.dss" (\fBconst char *\fR) rw
.RS 4
Set the precedence of dss allocation as related to mmap allocation for arena <i>, or for all arenas if <i> equals
@@ -1503,7 +1524,7 @@ Get the current sample rate (see
.PP
"prof\&.interval" (\fBuint64_t\fR) r\- [\fB\-\-enable\-prof\fR]
.RS 4
-Average number of bytes allocated between inverval\-based profile dumps\&. See the
+Average number of bytes allocated between interval\-based profile dumps\&. See the
"opt\&.lg_prof_interval"
option for additional information\&.
.RE
@@ -1547,6 +1568,15 @@ Total number of bytes in active chunks mapped by the allocator\&. This is a mult
"stats\&.resident"\&.
.RE
.PP
+"stats\&.retained" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Total number of bytes in virtual memory mappings that were retained rather than being returned to the operating system via e\&.g\&.
+\fBmunmap\fR(2)\&. Retained virtual memory is typically untouched, decommitted, or purged, so it has no strongly associated physical memory (see
+chunk hooks
+for details)\&. Retained memory is excluded from mapped memory statistics, e\&.g\&.
+"stats\&.mapped"\&.
+.RE
+.PP
"stats\&.arenas\&.<i>\&.dss" (\fBconst char *\fR) r\-
.RS 4
dss (\fBsbrk\fR(2)) allocation precedence as related to
@@ -1592,6 +1622,13 @@ or similar has not been called\&.
Number of mapped bytes\&.
.RE
.PP
+"stats\&.arenas\&.<i>\&.retained" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
+.RS 4
+Number of retained bytes\&. See
+"stats\&.retained"
+for details\&.
+.RE
+.PP
"stats\&.arenas\&.<i>\&.metadata\&.mapped" (\fBsize_t\fR) r\- [\fB\-\-enable\-stats\fR]
.RS 4
Number of mapped bytes in arena chunk headers, which track the states of the non\-metadata pages\&.
diff --git a/contrib/jemalloc/include/jemalloc/internal/arena.h b/contrib/jemalloc/include/jemalloc/internal/arena.h
index 3519873..b1de2b61 100644
--- a/contrib/jemalloc/include/jemalloc/internal/arena.h
+++ b/contrib/jemalloc/include/jemalloc/internal/arena.h
@@ -36,6 +36,7 @@ typedef enum {
#define DECAY_NTICKS_PER_UPDATE 1000
typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t;
+typedef struct arena_avail_links_s arena_avail_links_t;
typedef struct arena_run_s arena_run_t;
typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t;
@@ -153,13 +154,13 @@ struct arena_runs_dirty_link_s {
*/
struct arena_chunk_map_misc_s {
/*
- * Linkage for run trees. There are two disjoint uses:
+ * Linkage for run heaps. There are two disjoint uses:
*
- * 1) arena_t's runs_avail tree.
+ * 1) arena_t's runs_avail heaps.
* 2) arena_run_t conceptually uses this linkage for in-use non-full
* runs, rather than directly embedding linkage.
*/
- rb_node(arena_chunk_map_misc_t) rb_link;
+ phn(arena_chunk_map_misc_t) ph_link;
union {
/* Linkage for list of dirty runs. */
@@ -175,7 +176,7 @@ struct arena_chunk_map_misc_s {
arena_run_t run;
};
};
-typedef rb_tree(arena_chunk_map_misc_t) arena_run_tree_t;
+typedef ph(arena_chunk_map_misc_t) arena_run_heap_t;
#endif /* JEMALLOC_ARENA_STRUCTS_A */
#ifdef JEMALLOC_ARENA_STRUCTS_B
@@ -272,13 +273,13 @@ struct arena_bin_s {
arena_run_t *runcur;
/*
- * Tree of non-full runs. This tree is used when looking for an
+ * Heap of non-full runs. This heap is used when looking for an
* existing run when runcur is no longer usable. We choose the
* non-full run that is lowest in memory; this policy tends to keep
* objects packed well, and it can also help reduce the number of
* almost-empty chunks.
*/
- arena_run_tree_t runs;
+ arena_run_heap_t runs;
/* Bin statistics. */
malloc_bin_stats_t stats;
@@ -289,10 +290,18 @@ struct arena_s {
unsigned ind;
/*
- * Number of threads currently assigned to this arena. This field is
- * synchronized via atomic operations.
+ * Number of threads currently assigned to this arena, synchronized via
+ * atomic operations. Each thread has two distinct assignments, one for
+ * application-serving allocation, and the other for internal metadata
+ * allocation. Internal metadata must not be allocated from arenas
+ * created via the arenas.extend mallctl, because the arena.<i>.reset
+ * mallctl indiscriminately discards all allocations for the affected
+ * arena.
+ *
+ * 0: Application allocation.
+ * 1: Internal metadata allocation.
*/
- unsigned nthreads;
+ unsigned nthreads[2];
/*
* There are three classes of arena operations from a locking
@@ -321,6 +330,10 @@ struct arena_s {
dss_prec_t dss_prec;
+
+ /* Extant arena chunks. */
+ ql_head(extent_node_t) achunks;
+
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
* oscillates right on the cusp of needing a new chunk, cache the most
@@ -457,10 +470,10 @@ struct arena_s {
arena_bin_t bins[NBINS];
/*
- * Quantized address-ordered trees of this arena's available runs. The
- * trees are used for first-best-fit run allocation.
+ * Quantized address-ordered heaps of this arena's available runs. The
+ * heaps are used for first-best-fit run allocation.
*/
- arena_run_tree_t runs_avail[1]; /* Dynamically sized. */
+ arena_run_heap_t runs_avail[1]; /* Dynamically sized. */
};
/* Used in conjunction with tsd for fast arena-related context lookup. */
@@ -505,25 +518,28 @@ void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node,
bool cache);
void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
bool cache);
-extent_node_t *arena_node_alloc(arena_t *arena);
-void arena_node_dalloc(arena_t *arena, extent_node_t *node);
-void *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
- bool *zero);
-void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize);
-void arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk,
- size_t oldsize, size_t usize);
-void arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk,
- size_t oldsize, size_t usize);
-bool arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk,
- size_t oldsize, size_t usize, bool *zero);
-ssize_t arena_lg_dirty_mult_get(arena_t *arena);
-bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult);
-ssize_t arena_decay_time_get(arena_t *arena);
-bool arena_decay_time_set(arena_t *arena, ssize_t decay_time);
-void arena_maybe_purge(arena_t *arena);
-void arena_purge(arena_t *arena, bool all);
-void arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
- szind_t binind, uint64_t prof_accumbytes);
+extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena);
+void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node);
+void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool *zero);
+void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
+ size_t usize);
+void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena,
+ void *chunk, size_t oldsize, size_t usize);
+void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
+ void *chunk, size_t oldsize, size_t usize);
+bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
+ void *chunk, size_t oldsize, size_t usize, bool *zero);
+ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
+bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena,
+ ssize_t lg_dirty_mult);
+ssize_t arena_decay_time_get(tsdn_t *tsdn, arena_t *arena);
+bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time);
+void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all);
+void arena_maybe_purge(tsdn_t *tsdn, arena_t *arena);
+void arena_reset(tsd_t *tsd, arena_t *arena);
+void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena,
+ tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
bool zero);
#ifdef JEMALLOC_JET
@@ -536,17 +552,18 @@ extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
#endif
void arena_quarantine_junk_small(void *ptr, size_t usize);
-void *arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t ind, bool zero);
-void *arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
- bool zero, tcache_t *tcache);
-void *arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize,
+void *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind,
+ bool zero);
+void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
+ szind_t ind, bool zero);
+void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache);
-void arena_prof_promoted(const void *ptr, size_t size);
-void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk,
- void *ptr, arena_chunk_map_bits_t *bitselm);
-void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t pageind, arena_chunk_map_bits_t *bitselm);
-void arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
+void arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size);
+void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
+ arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm);
+void arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm);
+void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
void *ptr, size_t pageind);
#ifdef JEMALLOC_JET
typedef void (arena_dalloc_junk_large_t)(void *, size_t);
@@ -554,67 +571,80 @@ extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
#else
void arena_dalloc_junk_large(void *ptr, size_t usize);
#endif
-void arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
- void *ptr);
-void arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
+void arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
+ arena_chunk_t *chunk, void *ptr);
+void arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
void *ptr);
#ifdef JEMALLOC_JET
typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
#endif
-bool arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
- size_t extra, bool zero);
+bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize,
+ size_t size, size_t extra, bool zero);
void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t alignment, bool zero, tcache_t *tcache);
-dss_prec_t arena_dss_prec_get(arena_t *arena);
-bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
+dss_prec_t arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena);
+bool arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec);
ssize_t arena_lg_dirty_mult_default_get(void);
bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
ssize_t arena_decay_time_default_get(void);
bool arena_decay_time_default_set(ssize_t decay_time);
-void arena_basic_stats_merge(arena_t *arena, unsigned *nthreads,
+void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
+ unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult,
+ ssize_t *decay_time, size_t *nactive, size_t *ndirty);
+void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
- size_t *nactive, size_t *ndirty);
-void arena_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss,
- ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
- size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
- malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats);
-unsigned arena_nthreads_get(arena_t *arena);
-void arena_nthreads_inc(arena_t *arena);
-void arena_nthreads_dec(arena_t *arena);
-arena_t *arena_new(unsigned ind);
+ size_t *nactive, size_t *ndirty, arena_stats_t *astats,
+ malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
+ malloc_huge_stats_t *hstats);
+unsigned arena_nthreads_get(arena_t *arena, bool internal);
+void arena_nthreads_inc(arena_t *arena, bool internal);
+void arena_nthreads_dec(arena_t *arena, bool internal);
+arena_t *arena_new(tsdn_t *tsdn, unsigned ind);
bool arena_boot(void);
-void arena_prefork(arena_t *arena);
-void arena_postfork_parent(arena_t *arena);
-void arena_postfork_child(arena_t *arena);
+void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
+void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
+void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
+void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
+void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
+void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
-arena_chunk_map_bits_t *arena_bitselm_get(arena_chunk_t *chunk,
+arena_chunk_map_bits_t *arena_bitselm_get_mutable(arena_chunk_t *chunk,
size_t pageind);
-arena_chunk_map_misc_t *arena_miscelm_get(arena_chunk_t *chunk,
+const arena_chunk_map_bits_t *arena_bitselm_get_const(
+ const arena_chunk_t *chunk, size_t pageind);
+arena_chunk_map_misc_t *arena_miscelm_get_mutable(arena_chunk_t *chunk,
size_t pageind);
+const arena_chunk_map_misc_t *arena_miscelm_get_const(
+ const arena_chunk_t *chunk, size_t pageind);
size_t arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm);
-void *arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm);
+void *arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm);
arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd);
arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run);
-size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbitsp_read(size_t *mapbitsp);
-size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
+size_t *arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind);
+const size_t *arena_mapbitsp_get_const(const arena_chunk_t *chunk,
+ size_t pageind);
+size_t arena_mapbitsp_read(const size_t *mapbitsp);
+size_t arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_size_decode(size_t mapbits);
-size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
+size_t arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk,
size_t pageind);
-size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
-szind_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_large_size_get(const arena_chunk_t *chunk,
+ size_t pageind);
+size_t arena_mapbits_small_runind_get(const arena_chunk_t *chunk,
+ size_t pageind);
+szind_t arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_decommitted_get(const arena_chunk_t *chunk,
+ size_t pageind);
+size_t arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind);
void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
size_t arena_mapbits_size_encode(size_t size);
void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
@@ -634,29 +664,31 @@ void arena_metadata_allocated_sub(arena_t *arena, size_t size);
size_t arena_metadata_allocated_get(arena_t *arena);
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
-bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
+bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes);
szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
const void *ptr);
-prof_tctx_t *arena_prof_tctx_get(const void *ptr);
-void arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
-void arena_prof_tctx_reset(const void *ptr, size_t usize,
+prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
+void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
+ prof_tctx_t *tctx);
+void arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
const void *old_ptr, prof_tctx_t *old_tctx);
-void arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks);
-void arena_decay_tick(tsd_t *tsd, arena_t *arena);
-void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
+void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks);
+void arena_decay_tick(tsdn_t *tsdn, arena_t *arena);
+void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
bool zero, tcache_t *tcache, bool slow_path);
arena_t *arena_aalloc(const void *ptr);
-size_t arena_salloc(const void *ptr, bool demote);
-void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path);
-void arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
+size_t arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote);
+void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path);
+void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
+ bool slow_path);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
# ifdef JEMALLOC_ARENA_INLINE_A
JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t *
-arena_bitselm_get(arena_chunk_t *chunk, size_t pageind)
+arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind)
{
assert(pageind >= map_bias);
@@ -665,8 +697,15 @@ arena_bitselm_get(arena_chunk_t *chunk, size_t pageind)
return (&chunk->map_bits[pageind-map_bias]);
}
+JEMALLOC_ALWAYS_INLINE const arena_chunk_map_bits_t *
+arena_bitselm_get_const(const arena_chunk_t *chunk, size_t pageind)
+{
+
+ return (arena_bitselm_get_mutable((arena_chunk_t *)chunk, pageind));
+}
+
JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
-arena_miscelm_get(arena_chunk_t *chunk, size_t pageind)
+arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind)
{
assert(pageind >= map_bias);
@@ -676,6 +715,13 @@ arena_miscelm_get(arena_chunk_t *chunk, size_t pageind)
(uintptr_t)map_misc_offset) + pageind-map_bias);
}
+JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t *
+arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind)
+{
+
+ return (arena_miscelm_get_mutable((arena_chunk_t *)chunk, pageind));
+}
+
JEMALLOC_ALWAYS_INLINE size_t
arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm)
{
@@ -690,7 +736,7 @@ arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm)
}
JEMALLOC_ALWAYS_INLINE void *
-arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm)
+arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm)
{
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
size_t pageind = arena_miscelm_to_pageind(miscelm);
@@ -723,24 +769,31 @@ arena_run_to_miscelm(arena_run_t *run)
}
JEMALLOC_ALWAYS_INLINE size_t *
-arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind)
{
- return (&arena_bitselm_get(chunk, pageind)->bits);
+ return (&arena_bitselm_get_mutable(chunk, pageind)->bits);
+}
+
+JEMALLOC_ALWAYS_INLINE const size_t *
+arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind)
+{
+
+ return (arena_mapbitsp_get_mutable((arena_chunk_t *)chunk, pageind));
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbitsp_read(size_t *mapbitsp)
+arena_mapbitsp_read(const size_t *mapbitsp)
{
return (*mapbitsp);
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind)
{
- return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
+ return (arena_mapbitsp_read(arena_mapbitsp_get_const(chunk, pageind)));
}
JEMALLOC_ALWAYS_INLINE size_t
@@ -760,7 +813,7 @@ arena_mapbits_size_decode(size_t mapbits)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -770,7 +823,7 @@ arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -781,7 +834,7 @@ arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -792,7 +845,7 @@ arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE szind_t
-arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
szind_t binind;
@@ -804,7 +857,7 @@ arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -815,7 +868,7 @@ arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -826,7 +879,7 @@ arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -837,7 +890,7 @@ arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -846,7 +899,7 @@ arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -882,7 +935,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
- size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
@@ -896,7 +949,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
size_t size)
{
- size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
assert((size & PAGE_MASK) == 0);
@@ -908,7 +961,7 @@ arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags)
{
- size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
assert((flags & CHUNK_MAP_UNZEROED) == flags);
arena_mapbitsp_write(mapbitsp, flags);
@@ -918,7 +971,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
- size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
@@ -933,7 +986,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
szind_t binind)
{
- size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
assert(binind <= BININD_INVALID);
@@ -947,7 +1000,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
szind_t binind, size_t flags)
{
- size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
assert(binind < BININD_INVALID);
assert(pageind - runind >= map_bias);
@@ -1004,7 +1057,7 @@ arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
}
JEMALLOC_INLINE bool
-arena_prof_accum(arena_t *arena, uint64_t accumbytes)
+arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes)
{
cassert(config_prof);
@@ -1015,9 +1068,9 @@ arena_prof_accum(arena_t *arena, uint64_t accumbytes)
{
bool ret;
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
ret = arena_prof_accum_impl(arena, accumbytes);
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
return (ret);
}
}
@@ -1035,12 +1088,12 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
size_t pageind;
size_t actual_mapbits;
size_t rpages_ind;
- arena_run_t *run;
+ const arena_run_t *run;
arena_bin_t *bin;
szind_t run_binind, actual_binind;
arena_bin_info_t *bin_info;
- arena_chunk_map_misc_t *miscelm;
- void *rpages;
+ const arena_chunk_map_misc_t *miscelm;
+ const void *rpages;
assert(binind != BININD_INVALID);
assert(binind < NBINS);
@@ -1053,7 +1106,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk,
pageind);
- miscelm = arena_miscelm_get(chunk, rpages_ind);
+ miscelm = arena_miscelm_get_const(chunk, rpages_ind);
run = &miscelm->run;
run_binind = run->binind;
bin = &arena->bins[run_binind];
@@ -1153,7 +1206,7 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
}
JEMALLOC_INLINE prof_tctx_t *
-arena_prof_tctx_get(const void *ptr)
+arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
{
prof_tctx_t *ret;
arena_chunk_t *chunk;
@@ -1169,18 +1222,19 @@ arena_prof_tctx_get(const void *ptr)
if (likely((mapbits & CHUNK_MAP_LARGE) == 0))
ret = (prof_tctx_t *)(uintptr_t)1U;
else {
- arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk,
- pageind);
+ arena_chunk_map_misc_t *elm =
+ arena_miscelm_get_mutable(chunk, pageind);
ret = atomic_read_p(&elm->prof_tctx_pun);
}
} else
- ret = huge_prof_tctx_get(ptr);
+ ret = huge_prof_tctx_get(tsdn, ptr);
return (ret);
}
JEMALLOC_INLINE void
-arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
+arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
+ prof_tctx_t *tctx)
{
arena_chunk_t *chunk;
@@ -1199,7 +1253,7 @@ arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
assert(arena_mapbits_large_get(chunk, pageind) != 0);
- elm = arena_miscelm_get(chunk, pageind);
+ elm = arena_miscelm_get_mutable(chunk, pageind);
atomic_write_p(&elm->prof_tctx_pun, tctx);
} else {
/*
@@ -1211,12 +1265,12 @@ arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
assert(arena_mapbits_large_get(chunk, pageind) == 0);
}
} else
- huge_prof_tctx_set(ptr, tctx);
+ huge_prof_tctx_set(tsdn, ptr, tctx);
}
JEMALLOC_INLINE void
-arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
- prof_tctx_t *old_tctx)
+arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
+ const void *old_ptr, prof_tctx_t *old_tctx)
{
cassert(config_prof);
@@ -1235,56 +1289,59 @@ arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
0);
assert(arena_mapbits_large_get(chunk, pageind) != 0);
- elm = arena_miscelm_get(chunk, pageind);
+ elm = arena_miscelm_get_mutable(chunk, pageind);
atomic_write_p(&elm->prof_tctx_pun,
(prof_tctx_t *)(uintptr_t)1U);
} else
- huge_prof_tctx_reset(ptr);
+ huge_prof_tctx_reset(tsdn, ptr);
}
}
JEMALLOC_ALWAYS_INLINE void
-arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks)
+arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks)
{
+ tsd_t *tsd;
ticker_t *decay_ticker;
- if (unlikely(tsd == NULL))
+ if (unlikely(tsdn_null(tsdn)))
return;
+ tsd = tsdn_tsd(tsdn);
decay_ticker = decay_ticker_get(tsd, arena->ind);
if (unlikely(decay_ticker == NULL))
return;
if (unlikely(ticker_ticks(decay_ticker, nticks)))
- arena_purge(arena, false);
+ arena_purge(tsdn, arena, false);
}
JEMALLOC_ALWAYS_INLINE void
-arena_decay_tick(tsd_t *tsd, arena_t *arena)
+arena_decay_tick(tsdn_t *tsdn, arena_t *arena)
{
- arena_decay_ticks(tsd, arena, 1);
+ arena_decay_ticks(tsdn, arena, 1);
}
JEMALLOC_ALWAYS_INLINE void *
-arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, bool zero,
+arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
tcache_t *tcache, bool slow_path)
{
+ assert(!tsdn_null(tsdn) || tcache == NULL);
assert(size != 0);
if (likely(tcache != NULL)) {
if (likely(size <= SMALL_MAXCLASS)) {
- return (tcache_alloc_small(tsd, arena, tcache, size,
- ind, zero, slow_path));
+ return (tcache_alloc_small(tsdn_tsd(tsdn), arena,
+ tcache, size, ind, zero, slow_path));
}
if (likely(size <= tcache_maxclass)) {
- return (tcache_alloc_large(tsd, arena, tcache, size,
- ind, zero, slow_path));
+ return (tcache_alloc_large(tsdn_tsd(tsdn), arena,
+ tcache, size, ind, zero, slow_path));
}
/* (size > tcache_maxclass) case falls through. */
assert(size > tcache_maxclass);
}
- return (arena_malloc_hard(tsd, arena, size, ind, zero, tcache));
+ return (arena_malloc_hard(tsdn, arena, size, ind, zero));
}
JEMALLOC_ALWAYS_INLINE arena_t *
@@ -1301,7 +1358,7 @@ arena_aalloc(const void *ptr)
/* Return the size of the allocation pointed to by ptr. */
JEMALLOC_ALWAYS_INLINE size_t
-arena_salloc(const void *ptr, bool demote)
+arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote)
{
size_t ret;
arena_chunk_t *chunk;
@@ -1344,17 +1401,18 @@ arena_salloc(const void *ptr, bool demote)
ret = index2size(binind);
}
} else
- ret = huge_salloc(ptr);
+ ret = huge_salloc(tsdn, ptr);
return (ret);
}
JEMALLOC_ALWAYS_INLINE void
-arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
+arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path)
{
arena_chunk_t *chunk;
size_t pageind, mapbits;
+ assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
@@ -1367,11 +1425,12 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
if (likely(tcache != NULL)) {
szind_t binind = arena_ptr_small_binind_get(ptr,
mapbits);
- tcache_dalloc_small(tsd, tcache, ptr, binind,
- slow_path);
+ tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
+ binind, slow_path);
} else {
- arena_dalloc_small(tsd, extent_node_arena_get(
- &chunk->node), chunk, ptr, pageind);
+ arena_dalloc_small(tsdn,
+ extent_node_arena_get(&chunk->node), chunk,
+ ptr, pageind);
}
} else {
size_t size = arena_mapbits_large_size_get(chunk,
@@ -1382,22 +1441,26 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
if (likely(tcache != NULL) && size - large_pad <=
tcache_maxclass) {
- tcache_dalloc_large(tsd, tcache, ptr, size -
- large_pad, slow_path);
+ tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
+ size - large_pad, slow_path);
} else {
- arena_dalloc_large(tsd, extent_node_arena_get(
- &chunk->node), chunk, ptr);
+ arena_dalloc_large(tsdn,
+ extent_node_arena_get(&chunk->node), chunk,
+ ptr);
}
}
} else
- huge_dalloc(tsd, ptr, tcache);
+ huge_dalloc(tsdn, ptr);
}
JEMALLOC_ALWAYS_INLINE void
-arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
+arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
+ bool slow_path)
{
arena_chunk_t *chunk;
+ assert(!tsdn_null(tsdn) || tcache == NULL);
+
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr)) {
if (config_prof && opt_prof) {
@@ -1414,34 +1477,36 @@ arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
pageind) - large_pad;
}
}
- assert(s2u(size) == s2u(arena_salloc(ptr, false)));
+ assert(s2u(size) == s2u(arena_salloc(tsdn, ptr, false)));
if (likely(size <= SMALL_MAXCLASS)) {
/* Small allocation. */
if (likely(tcache != NULL)) {
szind_t binind = size2index(size);
- tcache_dalloc_small(tsd, tcache, ptr, binind,
- true);
+ tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
+ binind, slow_path);
} else {
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
- arena_dalloc_small(tsd, extent_node_arena_get(
- &chunk->node), chunk, ptr, pageind);
+ arena_dalloc_small(tsdn,
+ extent_node_arena_get(&chunk->node), chunk,
+ ptr, pageind);
}
} else {
assert(config_cache_oblivious || ((uintptr_t)ptr &
PAGE_MASK) == 0);
if (likely(tcache != NULL) && size <= tcache_maxclass) {
- tcache_dalloc_large(tsd, tcache, ptr, size,
- true);
+ tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
+ size, slow_path);
} else {
- arena_dalloc_large(tsd, extent_node_arena_get(
- &chunk->node), chunk, ptr);
+ arena_dalloc_large(tsdn,
+ extent_node_arena_get(&chunk->node), chunk,
+ ptr);
}
}
} else
- huge_dalloc(tsd, ptr, tcache);
+ huge_dalloc(tsdn, ptr);
}
# endif /* JEMALLOC_ARENA_INLINE_B */
#endif
diff --git a/contrib/jemalloc/include/jemalloc/internal/base.h b/contrib/jemalloc/include/jemalloc/internal/base.h
index 39e46ee..d6b81e1 100644
--- a/contrib/jemalloc/include/jemalloc/internal/base.h
+++ b/contrib/jemalloc/include/jemalloc/internal/base.h
@@ -9,12 +9,13 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-void *base_alloc(size_t size);
-void base_stats_get(size_t *allocated, size_t *resident, size_t *mapped);
+void *base_alloc(tsdn_t *tsdn, size_t size);
+void base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
+ size_t *mapped);
bool base_boot(void);
-void base_prefork(void);
-void base_postfork_parent(void);
-void base_postfork_child(void);
+void base_prefork(tsdn_t *tsdn);
+void base_postfork_parent(tsdn_t *tsdn);
+void base_postfork_child(tsdn_t *tsdn);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/contrib/jemalloc/include/jemalloc/internal/bitmap.h b/contrib/jemalloc/include/jemalloc/internal/bitmap.h
index 2594e3a..36f38b5 100644
--- a/contrib/jemalloc/include/jemalloc/internal/bitmap.h
+++ b/contrib/jemalloc/include/jemalloc/internal/bitmap.h
@@ -17,8 +17,8 @@ typedef unsigned long bitmap_t;
/*
* Do some analysis on how big the bitmap is before we use a tree. For a brute
- * force linear search, if we would have to call ffsl more than 2^3 times, use a
- * tree instead.
+ * force linear search, if we would have to call ffs_lu() more than 2^3 times,
+ * use a tree instead.
*/
#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
# define USE_TREE
@@ -223,7 +223,7 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
i++;
g = bitmap[i];
}
- bit = (bit - 1) + (i << 6);
+ bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
#endif
bitmap_set(bitmap, binfo, bit);
return (bit);
diff --git a/contrib/jemalloc/include/jemalloc/internal/chunk.h b/contrib/jemalloc/include/jemalloc/internal/chunk.h
index 5d19383..c9fd4ec 100644
--- a/contrib/jemalloc/include/jemalloc/internal/chunk.h
+++ b/contrib/jemalloc/include/jemalloc/internal/chunk.h
@@ -48,32 +48,32 @@ extern size_t chunk_npages;
extern const chunk_hooks_t chunk_hooks_default;
-chunk_hooks_t chunk_hooks_get(arena_t *arena);
-chunk_hooks_t chunk_hooks_set(arena_t *arena,
+chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena);
+chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
const chunk_hooks_t *chunk_hooks);
-bool chunk_register(const void *chunk, const extent_node_t *node);
+bool chunk_register(tsdn_t *tsdn, const void *chunk,
+ const extent_node_t *node);
void chunk_deregister(const void *chunk, const extent_node_t *node);
void *chunk_alloc_base(size_t size);
-void *chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *new_addr, size_t size, size_t alignment, bool *zero,
- bool dalloc_node);
-void *chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit);
-void chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *chunk, size_t size, bool committed);
-void chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *chunk, size_t size, bool zeroed, bool committed);
-void chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *chunk, size_t size, bool committed);
-bool chunk_purge_arena(arena_t *arena, void *chunk, size_t offset,
+void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
+ bool *zero, bool dalloc_node);
+void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
+ bool *zero, bool *commit);
+void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed);
+void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool zeroed,
+ bool committed);
+bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
size_t length);
-bool chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *chunk, size_t size, size_t offset, size_t length);
bool chunk_boot(void);
-void chunk_prefork(void);
-void chunk_postfork_parent(void);
-void chunk_postfork_child(void);
+void chunk_prefork(tsdn_t *tsdn);
+void chunk_postfork_parent(tsdn_t *tsdn);
+void chunk_postfork_child(tsdn_t *tsdn);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/contrib/jemalloc/include/jemalloc/internal/chunk_dss.h b/contrib/jemalloc/include/jemalloc/internal/chunk_dss.h
index 388f46b..724fa57 100644
--- a/contrib/jemalloc/include/jemalloc/internal/chunk_dss.h
+++ b/contrib/jemalloc/include/jemalloc/internal/chunk_dss.h
@@ -21,15 +21,15 @@ extern const char *dss_prec_names[];
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-dss_prec_t chunk_dss_prec_get(void);
-bool chunk_dss_prec_set(dss_prec_t dss_prec);
-void *chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit);
-bool chunk_in_dss(void *chunk);
+dss_prec_t chunk_dss_prec_get(tsdn_t *tsdn);
+bool chunk_dss_prec_set(tsdn_t *tsdn, dss_prec_t dss_prec);
+void *chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool *commit);
+bool chunk_in_dss(tsdn_t *tsdn, void *chunk);
bool chunk_dss_boot(void);
-void chunk_dss_prefork(void);
-void chunk_dss_postfork_parent(void);
-void chunk_dss_postfork_child(void);
+void chunk_dss_prefork(tsdn_t *tsdn);
+void chunk_dss_postfork_parent(tsdn_t *tsdn);
+void chunk_dss_postfork_child(tsdn_t *tsdn);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/contrib/jemalloc/include/jemalloc/internal/ckh.h b/contrib/jemalloc/include/jemalloc/internal/ckh.h
index f75ad90..46e151c 100644
--- a/contrib/jemalloc/include/jemalloc/internal/ckh.h
+++ b/contrib/jemalloc/include/jemalloc/internal/ckh.h
@@ -64,13 +64,13 @@ struct ckh_s {
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
+bool ckh_new(tsdn_t *tsdn, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh_keycomp_t *keycomp);
-void ckh_delete(tsd_t *tsd, ckh_t *ckh);
+void ckh_delete(tsdn_t *tsdn, ckh_t *ckh);
size_t ckh_count(ckh_t *ckh);
bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
-bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
-bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
+bool ckh_insert(tsdn_t *tsdn, ckh_t *ckh, const void *key, const void *data);
+bool ckh_remove(tsdn_t *tsdn, ckh_t *ckh, const void *searchkey, void **key,
void **data);
bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
void ckh_string_hash(const void *key, size_t r_hash[2]);
diff --git a/contrib/jemalloc/include/jemalloc/internal/ctl.h b/contrib/jemalloc/include/jemalloc/internal/ctl.h
index 9c5e932..af0f6d7 100644
--- a/contrib/jemalloc/include/jemalloc/internal/ctl.h
+++ b/contrib/jemalloc/include/jemalloc/internal/ctl.h
@@ -21,13 +21,14 @@ struct ctl_named_node_s {
/* If (nchildren == 0), this is a terminal node. */
unsigned nchildren;
const ctl_node_t *children;
- int (*ctl)(const size_t *, size_t, void *, size_t *,
- void *, size_t);
+ int (*ctl)(tsd_t *, const size_t *, size_t, void *,
+ size_t *, void *, size_t);
};
struct ctl_indexed_node_s {
struct ctl_node_s node;
- const ctl_named_node_t *(*index)(const size_t *, size_t, size_t);
+ const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
+ size_t);
};
struct ctl_arena_stats_s {
@@ -60,6 +61,7 @@ struct ctl_stats_s {
size_t metadata;
size_t resident;
size_t mapped;
+ size_t retained;
unsigned narenas;
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
};
@@ -68,16 +70,17 @@ struct ctl_stats_s {
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
- size_t newlen);
-int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp);
-
-int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
+int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp,
+ size_t *miblenp);
+
+int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen);
bool ctl_boot(void);
-void ctl_prefork(void);
-void ctl_postfork_parent(void);
-void ctl_postfork_child(void);
+void ctl_prefork(tsdn_t *tsdn);
+void ctl_postfork_parent(tsdn_t *tsdn);
+void ctl_postfork_child(tsdn_t *tsdn);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
diff --git a/contrib/jemalloc/include/jemalloc/internal/extent.h b/contrib/jemalloc/include/jemalloc/internal/extent.h
index 386d50e..49d76a5 100644
--- a/contrib/jemalloc/include/jemalloc/internal/extent.h
+++ b/contrib/jemalloc/include/jemalloc/internal/extent.h
@@ -48,7 +48,7 @@ struct extent_node_s {
/* Linkage for the size/address-ordered tree. */
rb_node(extent_node_t) szad_link;
- /* Linkage for arena's huge and node_cache lists. */
+ /* Linkage for arena's achunks, huge, and node_cache lists. */
ql_elm(extent_node_t) ql_link;
};
diff --git a/contrib/jemalloc/include/jemalloc/internal/hash.h b/contrib/jemalloc/include/jemalloc/internal/hash.h
index 864fda8..1ff2d9a 100644
--- a/contrib/jemalloc/include/jemalloc/internal/hash.h
+++ b/contrib/jemalloc/include/jemalloc/internal/hash.h
@@ -53,7 +53,7 @@ hash_get_block_32(const uint32_t *p, int i)
if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
uint32_t ret;
- memcpy(&ret, &p[i], sizeof(uint32_t));
+ memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
return (ret);
}
@@ -68,7 +68,7 @@ hash_get_block_64(const uint64_t *p, int i)
if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
uint64_t ret;
- memcpy(&ret, &p[i], sizeof(uint64_t));
+ memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
return (ret);
}
diff --git a/contrib/jemalloc/include/jemalloc/internal/huge.h b/contrib/jemalloc/include/jemalloc/internal/huge.h
index cb6f69e..b5fa9e6 100644
--- a/contrib/jemalloc/include/jemalloc/internal/huge.h
+++ b/contrib/jemalloc/include/jemalloc/internal/huge.h
@@ -9,24 +9,23 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-void *huge_malloc(tsd_t *tsd, arena_t *arena, size_t usize, bool zero,
- tcache_t *tcache);
-void *huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
- bool zero, tcache_t *tcache);
-bool huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize,
+void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
+void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool zero);
+bool huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t usize_min, size_t usize_max, bool zero);
void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
size_t usize, size_t alignment, bool zero, tcache_t *tcache);
#ifdef JEMALLOC_JET
-typedef void (huge_dalloc_junk_t)(void *, size_t);
+typedef void (huge_dalloc_junk_t)(tsdn_t *, void *, size_t);
extern huge_dalloc_junk_t *huge_dalloc_junk;
#endif
-void huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
+void huge_dalloc(tsdn_t *tsdn, void *ptr);
arena_t *huge_aalloc(const void *ptr);
-size_t huge_salloc(const void *ptr);
-prof_tctx_t *huge_prof_tctx_get(const void *ptr);
-void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
-void huge_prof_tctx_reset(const void *ptr);
+size_t huge_salloc(tsdn_t *tsdn, const void *ptr);
+prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
+void huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx);
+void huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
index 7a60806..a694869 100644
--- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal.h
@@ -158,6 +158,7 @@ static const bool config_cache_oblivious =
#include <malloc/malloc.h>
#endif
+#include "jemalloc/internal/ph.h"
#define RB_COMPACT
#include "jemalloc/internal/rb.h"
#include "jemalloc/internal/qr.h"
@@ -367,6 +368,7 @@ typedef unsigned szind_t;
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
@@ -398,6 +400,7 @@ typedef unsigned szind_t;
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
@@ -440,6 +443,9 @@ extern bool in_valgrind;
/* Number of CPUs. */
extern unsigned ncpus;
+/* Number of arenas used for automatic multiplexing of threads and arenas. */
+extern unsigned narenas_auto;
+
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
@@ -463,14 +469,14 @@ void a0dalloc(void *ptr);
void *bootstrap_malloc(size_t size);
void *bootstrap_calloc(size_t num, size_t size);
void bootstrap_free(void *ptr);
-arena_t *arenas_extend(unsigned ind);
unsigned narenas_total_get(void);
-arena_t *arena_init(unsigned ind);
+arena_t *arena_init(tsdn_t *tsdn, unsigned ind);
arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
-arena_t *arena_choose_hard(tsd_t *tsd);
+arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
void thread_allocated_cleanup(tsd_t *tsd);
void thread_deallocated_cleanup(tsd_t *tsd);
+void iarena_cleanup(tsd_t *tsd);
void arena_cleanup(tsd_t *tsd);
void arenas_tdata_cleanup(tsd_t *tsd);
void narenas_tdata_cleanup(tsd_t *tsd);
@@ -490,6 +496,7 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
@@ -521,8 +528,9 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
-#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
+#include "jemalloc/internal/witness.h"
+#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/base.h"
@@ -542,10 +550,12 @@ size_t s2u_compute(size_t size);
size_t s2u_lookup(size_t size);
size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment);
+arena_t *arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal);
arena_t *arena_choose(tsd_t *tsd, arena_t *arena);
+arena_t *arena_ichoose(tsdn_t *tsdn, arena_t *arena);
arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind,
bool refresh_if_missing);
-arena_t *arena_get(unsigned ind, bool init_if_missing);
+arena_t *arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing);
ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
#endif
@@ -741,7 +751,7 @@ sa2u(size_t size, size_t alignment)
* Calculate the size of the over-size run that arena_palloc()
* would need to allocate in order to guarantee the alignment.
*/
- if (usize + large_pad + alignment - PAGE <= arena_maxrun)
+ if (usize + large_pad + alignment <= arena_maxrun)
return (usize);
}
@@ -771,7 +781,7 @@ sa2u(size_t size, size_t alignment)
* Calculate the multi-chunk mapping that huge_palloc() would need in
* order to guarantee the alignment.
*/
- if (usize + alignment - PAGE < usize) {
+ if (usize + alignment < usize) {
/* size_t overflow. */
return (0);
}
@@ -780,19 +790,38 @@ sa2u(size_t size, size_t alignment)
/* Choose an arena based on a per-thread value. */
JEMALLOC_INLINE arena_t *
-arena_choose(tsd_t *tsd, arena_t *arena)
+arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal)
{
arena_t *ret;
if (arena != NULL)
return (arena);
- if (unlikely((ret = tsd_arena_get(tsd)) == NULL))
- ret = arena_choose_hard(tsd);
+ ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
+ if (unlikely(ret == NULL))
+ ret = arena_choose_hard(tsd, internal);
return (ret);
}
+JEMALLOC_INLINE arena_t *
+arena_choose(tsd_t *tsd, arena_t *arena)
+{
+
+ return (arena_choose_impl(tsd, arena, false));
+}
+
+JEMALLOC_INLINE arena_t *
+arena_ichoose(tsdn_t *tsdn, arena_t *arena)
+{
+
+ assert(!tsdn_null(tsdn) || arena != NULL);
+
+ if (!tsdn_null(tsdn))
+ return (arena_choose_impl(tsdn_tsd(tsdn), NULL, true));
+ return (arena);
+}
+
JEMALLOC_INLINE arena_tdata_t *
arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
{
@@ -819,7 +848,7 @@ arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
}
JEMALLOC_INLINE arena_t *
-arena_get(unsigned ind, bool init_if_missing)
+arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing)
{
arena_t *ret;
@@ -829,7 +858,7 @@ arena_get(unsigned ind, bool init_if_missing)
if (unlikely(ret == NULL)) {
ret = atomic_read_p((void *)&arenas[ind]);
if (init_if_missing && unlikely(ret == NULL))
- ret = arena_init(ind);
+ ret = arena_init(tsdn, ind);
}
return (ret);
}
@@ -863,30 +892,27 @@ decay_ticker_get(tsd_t *tsd, unsigned ind)
#ifndef JEMALLOC_ENABLE_INLINE
arena_t *iaalloc(const void *ptr);
-size_t isalloc(const void *ptr, bool demote);
-void *iallocztm(tsd_t *tsd, size_t size, szind_t ind, bool zero,
+size_t isalloc(tsdn_t *tsdn, const void *ptr, bool demote);
+void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path);
-void *imalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache,
- arena_t *arena);
-void *imalloc(tsd_t *tsd, size_t size, szind_t ind, bool slow_path);
-void *icalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache,
- arena_t *arena);
-void *icalloc(tsd_t *tsd, size_t size, szind_t ind);
-void *ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
+void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero,
+ bool slow_path);
+void *ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena);
-void *ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
+void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena);
void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
-size_t ivsalloc(const void *ptr, bool demote);
+size_t ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote);
size_t u2rz(size_t usize);
-size_t p2rz(const void *ptr);
-void idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata,
+size_t p2rz(tsdn_t *tsdn, const void *ptr);
+void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
bool slow_path);
-void idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache);
void idalloc(tsd_t *tsd, void *ptr);
void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path);
-void isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
-void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
+void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
+ bool slow_path);
+void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache,
+ bool slow_path);
void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, tcache_t *tcache,
arena_t *arena);
@@ -894,7 +920,7 @@ void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero);
-bool ixalloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
+bool ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero);
#endif
@@ -910,102 +936,85 @@ iaalloc(const void *ptr)
/*
* Typical usage:
+ * tsdn_t *tsdn = [...]
* void *ptr = [...]
- * size_t sz = isalloc(ptr, config_prof);
+ * size_t sz = isalloc(tsdn, ptr, config_prof);
*/
JEMALLOC_ALWAYS_INLINE size_t
-isalloc(const void *ptr, bool demote)
+isalloc(tsdn_t *tsdn, const void *ptr, bool demote)
{
assert(ptr != NULL);
/* Demotion only makes sense if config_prof is true. */
assert(config_prof || !demote);
- return (arena_salloc(ptr, demote));
+ return (arena_salloc(tsdn, ptr, demote));
}
JEMALLOC_ALWAYS_INLINE void *
-iallocztm(tsd_t *tsd, size_t size, szind_t ind, bool zero, tcache_t *tcache,
+iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
bool is_metadata, arena_t *arena, bool slow_path)
{
void *ret;
assert(size != 0);
+ assert(!is_metadata || tcache == NULL);
+ assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
- ret = arena_malloc(tsd, arena, size, ind, zero, tcache, slow_path);
+ ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
if (config_stats && is_metadata && likely(ret != NULL)) {
- arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
- config_prof));
+ arena_metadata_allocated_add(iaalloc(ret),
+ isalloc(tsdn, ret, config_prof));
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
-imalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache, arena_t *arena)
+ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path)
{
- return (iallocztm(tsd, size, ind, false, tcache, false, arena, true));
+ return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true),
+ false, NULL, slow_path));
}
JEMALLOC_ALWAYS_INLINE void *
-imalloc(tsd_t *tsd, size_t size, szind_t ind, bool slow_path)
-{
-
- return (iallocztm(tsd, size, ind, false, tcache_get(tsd, true), false,
- NULL, slow_path));
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-icalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache, arena_t *arena)
-{
-
- return (iallocztm(tsd, size, ind, true, tcache, false, arena, true));
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-icalloc(tsd_t *tsd, size_t size, szind_t ind)
-{
-
- return (iallocztm(tsd, size, ind, true, tcache_get(tsd, true), false,
- NULL, true));
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
+ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena)
{
void *ret;
assert(usize != 0);
assert(usize == sa2u(usize, alignment));
+ assert(!is_metadata || tcache == NULL);
+ assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
- ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache);
+ ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
if (config_stats && is_metadata && likely(ret != NULL)) {
- arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
+ arena_metadata_allocated_add(iaalloc(ret), isalloc(tsdn, ret,
config_prof));
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
-ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
+ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena)
{
- return (ipallocztm(tsd, usize, alignment, zero, tcache, false, arena));
+ return (ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena));
}
JEMALLOC_ALWAYS_INLINE void *
ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
{
- return (ipallocztm(tsd, usize, alignment, zero, tcache_get(tsd, true),
- false, NULL));
+ return (ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
+ tcache_get(tsd, true), false, NULL));
}
JEMALLOC_ALWAYS_INLINE size_t
-ivsalloc(const void *ptr, bool demote)
+ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
{
extent_node_t *node;
@@ -1017,7 +1026,7 @@ ivsalloc(const void *ptr, bool demote)
assert(extent_node_addr_get(node) == ptr ||
extent_node_achunk_get(node));
- return (isalloc(ptr, demote));
+ return (isalloc(tsdn, ptr, demote));
}
JEMALLOC_INLINE size_t
@@ -1035,39 +1044,34 @@ u2rz(size_t usize)
}
JEMALLOC_INLINE size_t
-p2rz(const void *ptr)
+p2rz(tsdn_t *tsdn, const void *ptr)
{
- size_t usize = isalloc(ptr, false);
+ size_t usize = isalloc(tsdn, ptr, false);
return (u2rz(usize));
}
JEMALLOC_ALWAYS_INLINE void
-idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata,
+idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
bool slow_path)
{
assert(ptr != NULL);
+ assert(!is_metadata || tcache == NULL);
+ assert(!is_metadata || iaalloc(ptr)->ind < narenas_auto);
if (config_stats && is_metadata) {
- arena_metadata_allocated_sub(iaalloc(ptr), isalloc(ptr,
+ arena_metadata_allocated_sub(iaalloc(ptr), isalloc(tsdn, ptr,
config_prof));
}
- arena_dalloc(tsd, ptr, tcache, slow_path);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache)
-{
-
- idalloctm(tsd, ptr, tcache, false, true);
+ arena_dalloc(tsdn, ptr, tcache, slow_path);
}
JEMALLOC_ALWAYS_INLINE void
idalloc(tsd_t *tsd, void *ptr)
{
- idalloctm(tsd, ptr, tcache_get(tsd, false), false, true);
+ idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd, false), false, true);
}
JEMALLOC_ALWAYS_INLINE void
@@ -1077,24 +1081,25 @@ iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
if (slow_path && config_fill && unlikely(opt_quarantine))
quarantine(tsd, ptr);
else
- idalloctm(tsd, ptr, tcache, false, slow_path);
+ idalloctm(tsd_tsdn(tsd), ptr, tcache, false, slow_path);
}
JEMALLOC_ALWAYS_INLINE void
-isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
+isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
+ bool slow_path)
{
- arena_sdalloc(tsd, ptr, size, tcache);
+ arena_sdalloc(tsdn, ptr, size, tcache, slow_path);
}
JEMALLOC_ALWAYS_INLINE void
-isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
+isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, bool slow_path)
{
- if (config_fill && unlikely(opt_quarantine))
+ if (slow_path && config_fill && unlikely(opt_quarantine))
quarantine(tsd, ptr);
else
- isdalloct(tsd, ptr, size, tcache);
+ isdalloct(tsd_tsdn(tsd), ptr, size, tcache, slow_path);
}
JEMALLOC_ALWAYS_INLINE void *
@@ -1107,7 +1112,7 @@ iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
usize = sa2u(size + extra, alignment);
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
return (NULL);
- p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
+ p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, arena);
if (p == NULL) {
if (extra == 0)
return (NULL);
@@ -1115,7 +1120,8 @@ iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
usize = sa2u(size, alignment);
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
return (NULL);
- p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
+ p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache,
+ arena);
if (p == NULL)
return (NULL);
}
@@ -1125,7 +1131,7 @@ iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(p, ptr, copysize);
- isqalloc(tsd, ptr, oldsize, tcache);
+ isqalloc(tsd, ptr, oldsize, tcache, true);
return (p);
}
@@ -1161,7 +1167,7 @@ iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
}
JEMALLOC_ALWAYS_INLINE bool
-ixalloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t extra,
+ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero)
{
@@ -1174,7 +1180,7 @@ ixalloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t extra,
return (true);
}
- return (arena_ralloc_no_move(tsd, ptr, oldsize, size, extra, zero));
+ return (arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero));
}
#endif
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h
index 89c3c52..32f6f15 100644
--- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h
@@ -216,6 +216,15 @@
/* #undef JEMALLOC_ZONE_VERSION */
/*
+ * Methods for determining whether the OS overcommits.
+ * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
+ * /proc/sys/vm.overcommit_memory file.
+ * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
+ */
+#define JEMALLOC_SYSCTL_VM_OVERCOMMIT
+/* #undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY */
+
+/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
diff --git a/contrib/jemalloc/include/jemalloc/internal/mb.h b/contrib/jemalloc/include/jemalloc/internal/mb.h
index 3cfa787..437c86f 100644
--- a/contrib/jemalloc/include/jemalloc/internal/mb.h
+++ b/contrib/jemalloc/include/jemalloc/internal/mb.h
@@ -42,7 +42,7 @@ mb_write(void)
: /* Inputs. */
: "memory" /* Clobbers. */
);
-#else
+# else
/*
* This is hopefully enough to keep the compiler from reordering
* instructions around this one.
@@ -52,7 +52,7 @@ mb_write(void)
: /* Inputs. */
: "memory" /* Clobbers. */
);
-#endif
+# endif
}
#elif (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE void
@@ -104,9 +104,9 @@ mb_write(void)
{
malloc_mutex_t mtx;
- malloc_mutex_init(&mtx);
- malloc_mutex_lock(&mtx);
- malloc_mutex_unlock(&mtx);
+ malloc_mutex_init(&mtx, "mb", WITNESS_RANK_OMIT);
+ malloc_mutex_lock(NULL, &mtx);
+ malloc_mutex_unlock(NULL, &mtx);
}
#endif
#endif
diff --git a/contrib/jemalloc/include/jemalloc/internal/mutex.h b/contrib/jemalloc/include/jemalloc/internal/mutex.h
index 561378f..60ab041 100644
--- a/contrib/jemalloc/include/jemalloc/internal/mutex.h
+++ b/contrib/jemalloc/include/jemalloc/internal/mutex.h
@@ -6,17 +6,21 @@ typedef struct malloc_mutex_s malloc_mutex_t;
#ifdef _WIN32
# define MALLOC_MUTEX_INITIALIZER
#elif (defined(JEMALLOC_OSSPIN))
-# define MALLOC_MUTEX_INITIALIZER {0}
+# define MALLOC_MUTEX_INITIALIZER {0, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
-# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL}
+# define MALLOC_MUTEX_INITIALIZER \
+ {PTHREAD_MUTEX_INITIALIZER, NULL, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
#else
# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
-# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}
+# define MALLOC_MUTEX_INITIALIZER \
+ {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, \
+ WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
# else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
-# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER}
+# define MALLOC_MUTEX_INITIALIZER \
+ {PTHREAD_MUTEX_INITIALIZER, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
# endif
#endif
@@ -39,6 +43,7 @@ struct malloc_mutex_s {
#else
pthread_mutex_t lock;
#endif
+ witness_t witness;
};
#endif /* JEMALLOC_H_STRUCTS */
@@ -49,28 +54,32 @@ struct malloc_mutex_s {
extern bool isthreaded;
#endif
-bool malloc_mutex_init(malloc_mutex_t *mutex);
-void malloc_mutex_prefork(malloc_mutex_t *mutex);
-void malloc_mutex_postfork_parent(malloc_mutex_t *mutex);
-void malloc_mutex_postfork_child(malloc_mutex_t *mutex);
+bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
+ witness_rank_t rank);
+void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
+void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
+void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
bool malloc_mutex_first_thread(void);
-bool mutex_boot(void);
+bool malloc_mutex_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
-void malloc_mutex_lock(malloc_mutex_t *mutex);
-void malloc_mutex_unlock(malloc_mutex_t *mutex);
+void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex);
+void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex);
+void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
+void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
JEMALLOC_INLINE void
-malloc_mutex_lock(malloc_mutex_t *mutex)
+malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
if (isthreaded) {
+ witness_assert_not_owner(tsdn, &mutex->witness);
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
AcquireSRWLockExclusive(&mutex->lock);
@@ -82,14 +91,16 @@ malloc_mutex_lock(malloc_mutex_t *mutex)
#else
pthread_mutex_lock(&mutex->lock);
#endif
+ witness_lock(tsdn, &mutex->witness);
}
}
JEMALLOC_INLINE void
-malloc_mutex_unlock(malloc_mutex_t *mutex)
+malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
if (isthreaded) {
+ witness_unlock(tsdn, &mutex->witness);
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
ReleaseSRWLockExclusive(&mutex->lock);
@@ -103,6 +114,22 @@ malloc_mutex_unlock(malloc_mutex_t *mutex)
#endif
}
}
+
+JEMALLOC_INLINE void
+malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
+{
+
+ if (isthreaded)
+ witness_assert_owner(tsdn, &mutex->witness);
+}
+
+JEMALLOC_INLINE void
+malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
+{
+
+ if (isthreaded)
+ witness_assert_not_owner(tsdn, &mutex->witness);
+}
#endif
#endif /* JEMALLOC_H_INLINES */
diff --git a/contrib/jemalloc/include/jemalloc/internal/nstime.h b/contrib/jemalloc/include/jemalloc/internal/nstime.h
index dcb4b47..dc293b7 100644
--- a/contrib/jemalloc/include/jemalloc/internal/nstime.h
+++ b/contrib/jemalloc/include/jemalloc/internal/nstime.h
@@ -1,7 +1,7 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
-#define JEMALLOC_CLOCK_GETTIME defined(_POSIX_MONOTONIC_CLOCK) \
+#define JEMALLOC_CLOCK_GETTIME defined(_POSIX_MONOTONIC_CLOCK) \
&& _POSIX_MONOTONIC_CLOCK >= 0
typedef struct nstime_s nstime_t;
diff --git a/contrib/jemalloc/include/jemalloc/internal/pages.h b/contrib/jemalloc/include/jemalloc/internal/pages.h
index da7eb96..e21effd 100644
--- a/contrib/jemalloc/include/jemalloc/internal/pages.h
+++ b/contrib/jemalloc/include/jemalloc/internal/pages.h
@@ -9,13 +9,14 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-void *pages_map(void *addr, size_t size);
+void *pages_map(void *addr, size_t size, bool *commit);
void pages_unmap(void *addr, size_t size);
void *pages_trim(void *addr, size_t alloc_size, size_t leadsize,
- size_t size);
+ size_t size, bool *commit);
bool pages_commit(void *addr, size_t size);
bool pages_decommit(void *addr, size_t size);
bool pages_purge(void *addr, size_t size);
+void pages_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/contrib/jemalloc/include/jemalloc/internal/ph.h b/contrib/jemalloc/include/jemalloc/internal/ph.h
new file mode 100644
index 0000000..4f91c33
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/ph.h
@@ -0,0 +1,345 @@
+/*
+ * A Pairing Heap implementation.
+ *
+ * "The Pairing Heap: A New Form of Self-Adjusting Heap"
+ * https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf
+ *
+ * With auxiliary twopass list, described in a follow on paper.
+ *
+ * "Pairing Heaps: Experiments and Analysis"
+ * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
+ *
+ *******************************************************************************
+ */
+
+#ifndef PH_H_
+#define PH_H_
+
+/* Node structure. */
+#define phn(a_type) \
+struct { \
+ a_type *phn_prev; \
+ a_type *phn_next; \
+ a_type *phn_lchild; \
+}
+
+/* Root structure. */
+#define ph(a_type) \
+struct { \
+ a_type *ph_root; \
+}
+
+/* Internal utility macros. */
+#define phn_lchild_get(a_type, a_field, a_phn) \
+ (a_phn->a_field.phn_lchild)
+#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
+ a_phn->a_field.phn_lchild = a_lchild; \
+} while (0)
+
+#define phn_next_get(a_type, a_field, a_phn) \
+ (a_phn->a_field.phn_next)
+#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
+ a_phn->a_field.phn_prev = a_prev; \
+} while (0)
+
+#define phn_prev_get(a_type, a_field, a_phn) \
+ (a_phn->a_field.phn_prev)
+#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
+ a_phn->a_field.phn_next = a_next; \
+} while (0)
+
+#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
+ a_type *phn0child; \
+ \
+ assert(a_phn0 != NULL); \
+ assert(a_phn1 != NULL); \
+ assert(a_cmp(a_phn0, a_phn1) <= 0); \
+ \
+ phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
+ phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
+ phn_next_set(a_type, a_field, a_phn1, phn0child); \
+ if (phn0child != NULL) \
+ phn_prev_set(a_type, a_field, phn0child, a_phn1); \
+ phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
+} while (0)
+
+#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
+ if (a_phn0 == NULL) \
+ r_phn = a_phn1; \
+ else if (a_phn1 == NULL) \
+ r_phn = a_phn0; \
+ else if (a_cmp(a_phn0, a_phn1) < 0) { \
+ phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
+ a_cmp); \
+ r_phn = a_phn0; \
+ } else { \
+ phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \
+ a_cmp); \
+ r_phn = a_phn1; \
+ } \
+} while (0)
+
+#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
+ a_type *head = NULL; \
+ a_type *tail = NULL; \
+ a_type *phn0 = a_phn; \
+ a_type *phn1 = phn_next_get(a_type, a_field, phn0); \
+ \
+ /* \
+ * Multipass merge, wherein the first two elements of a FIFO \
+ * are repeatedly merged, and each result is appended to the \
+ * singly linked FIFO, until the FIFO contains only a single \
+ * element. We start with a sibling list but no reference to \
+ * its tail, so we do a single pass over the sibling list to \
+ * populate the FIFO. \
+ */ \
+ if (phn1 != NULL) { \
+ a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
+ if (phnrest != NULL) \
+ phn_prev_set(a_type, a_field, phnrest, NULL); \
+ phn_prev_set(a_type, a_field, phn0, NULL); \
+ phn_next_set(a_type, a_field, phn0, NULL); \
+ phn_prev_set(a_type, a_field, phn1, NULL); \
+ phn_next_set(a_type, a_field, phn1, NULL); \
+ phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \
+ head = tail = phn0; \
+ phn0 = phnrest; \
+ while (phn0 != NULL) { \
+ phn1 = phn_next_get(a_type, a_field, phn0); \
+ if (phn1 != NULL) { \
+ phnrest = phn_next_get(a_type, a_field, \
+ phn1); \
+ if (phnrest != NULL) { \
+ phn_prev_set(a_type, a_field, \
+ phnrest, NULL); \
+ } \
+ phn_prev_set(a_type, a_field, phn0, \
+ NULL); \
+ phn_next_set(a_type, a_field, phn0, \
+ NULL); \
+ phn_prev_set(a_type, a_field, phn1, \
+ NULL); \
+ phn_next_set(a_type, a_field, phn1, \
+ NULL); \
+ phn_merge(a_type, a_field, phn0, phn1, \
+ a_cmp, phn0); \
+ phn_next_set(a_type, a_field, tail, \
+ phn0); \
+ tail = phn0; \
+ phn0 = phnrest; \
+ } else { \
+ phn_next_set(a_type, a_field, tail, \
+ phn0); \
+ tail = phn0; \
+ phn0 = NULL; \
+ } \
+ } \
+ phn0 = head; \
+ phn1 = phn_next_get(a_type, a_field, phn0); \
+ if (phn1 != NULL) { \
+ while (true) { \
+ head = phn_next_get(a_type, a_field, \
+ phn1); \
+ assert(phn_prev_get(a_type, a_field, \
+ phn0) == NULL); \
+ phn_next_set(a_type, a_field, phn0, \
+ NULL); \
+ assert(phn_prev_get(a_type, a_field, \
+ phn1) == NULL); \
+ phn_next_set(a_type, a_field, phn1, \
+ NULL); \
+ phn_merge(a_type, a_field, phn0, phn1, \
+ a_cmp, phn0); \
+ if (head == NULL) \
+ break; \
+ phn_next_set(a_type, a_field, tail, \
+ phn0); \
+ tail = phn0; \
+ phn0 = head; \
+ phn1 = phn_next_get(a_type, a_field, \
+ phn0); \
+ } \
+ } \
+ } \
+ r_phn = phn0; \
+} while (0)
+
+#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
+ a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \
+ if (phn != NULL) { \
+ phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \
+ phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \
+ phn_prev_set(a_type, a_field, phn, NULL); \
+ ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \
+ assert(phn_next_get(a_type, a_field, phn) == NULL); \
+ phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \
+ a_ph->ph_root); \
+ } \
+} while (0)
+
+#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
+ a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
+ if (lchild == NULL) \
+ r_phn = NULL; \
+ else { \
+ ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
+ r_phn); \
+ } \
+} while (0)
+
+/*
+ * The ph_proto() macro generates function prototypes that correspond to the
+ * functions generated by an equivalently parameterized call to ph_gen().
+ */
+#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
+a_attr void a_prefix##new(a_ph_type *ph); \
+a_attr bool a_prefix##empty(a_ph_type *ph); \
+a_attr a_type *a_prefix##first(a_ph_type *ph); \
+a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
+a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
+a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
+
+/*
+ * The ph_gen() macro generates a type-specific pairing heap implementation,
+ * based on the above cpp macros.
+ */
+#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
+a_attr void \
+a_prefix##new(a_ph_type *ph) \
+{ \
+ \
+ memset(ph, 0, sizeof(ph(a_type))); \
+} \
+a_attr bool \
+a_prefix##empty(a_ph_type *ph) \
+{ \
+ \
+ return (ph->ph_root == NULL); \
+} \
+a_attr a_type * \
+a_prefix##first(a_ph_type *ph) \
+{ \
+ \
+ if (ph->ph_root == NULL) \
+ return (NULL); \
+ ph_merge_aux(a_type, a_field, ph, a_cmp); \
+ return (ph->ph_root); \
+} \
+a_attr void \
+a_prefix##insert(a_ph_type *ph, a_type *phn) \
+{ \
+ \
+ memset(&phn->a_field, 0, sizeof(phn(a_type))); \
+ \
+ /* \
+ * Treat the root as an aux list during insertion, and lazily \
+ * merge during a_prefix##remove_first(). For elements that \
+ * are inserted, then removed via a_prefix##remove() before the \
+ * aux list is ever processed, this makes insert/remove \
+ * constant-time, whereas eager merging would make insert \
+ * O(log n). \
+ */ \
+ if (ph->ph_root == NULL) \
+ ph->ph_root = phn; \
+ else { \
+ phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
+ a_field, ph->ph_root)); \
+ if (phn_next_get(a_type, a_field, ph->ph_root) != \
+ NULL) { \
+ phn_prev_set(a_type, a_field, \
+ phn_next_get(a_type, a_field, ph->ph_root), \
+ phn); \
+ } \
+ phn_prev_set(a_type, a_field, phn, ph->ph_root); \
+ phn_next_set(a_type, a_field, ph->ph_root, phn); \
+ } \
+} \
+a_attr a_type * \
+a_prefix##remove_first(a_ph_type *ph) \
+{ \
+ a_type *ret; \
+ \
+ if (ph->ph_root == NULL) \
+ return (NULL); \
+ ph_merge_aux(a_type, a_field, ph, a_cmp); \
+ \
+ ret = ph->ph_root; \
+ \
+ ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
+ ph->ph_root); \
+ \
+ return (ret); \
+} \
+a_attr void \
+a_prefix##remove(a_ph_type *ph, a_type *phn) \
+{ \
+ a_type *replace, *parent; \
+ \
+ /* \
+ * We can delete from aux list without merging it, but we need \
+ * to merge if we are dealing with the root node. \
+ */ \
+ if (ph->ph_root == phn) { \
+ ph_merge_aux(a_type, a_field, ph, a_cmp); \
+ if (ph->ph_root == phn) { \
+ ph_merge_children(a_type, a_field, ph->ph_root, \
+ a_cmp, ph->ph_root); \
+ return; \
+ } \
+ } \
+ \
+ /* Get parent (if phn is leftmost child) before mutating. */ \
+ if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
+ if (phn_lchild_get(a_type, a_field, parent) != phn) \
+ parent = NULL; \
+ } \
+ /* Find a possible replacement node, and link to parent. */ \
+ ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
+ /* Set next/prev for sibling linked list. */ \
+ if (replace != NULL) { \
+ if (parent != NULL) { \
+ phn_prev_set(a_type, a_field, replace, parent); \
+ phn_lchild_set(a_type, a_field, parent, \
+ replace); \
+ } else { \
+ phn_prev_set(a_type, a_field, replace, \
+ phn_prev_get(a_type, a_field, phn)); \
+ if (phn_prev_get(a_type, a_field, phn) != \
+ NULL) { \
+ phn_next_set(a_type, a_field, \
+ phn_prev_get(a_type, a_field, phn), \
+ replace); \
+ } \
+ } \
+ phn_next_set(a_type, a_field, replace, \
+ phn_next_get(a_type, a_field, phn)); \
+ if (phn_next_get(a_type, a_field, phn) != NULL) { \
+ phn_prev_set(a_type, a_field, \
+ phn_next_get(a_type, a_field, phn), \
+ replace); \
+ } \
+ } else { \
+ if (parent != NULL) { \
+ a_type *next = phn_next_get(a_type, a_field, \
+ phn); \
+ phn_lchild_set(a_type, a_field, parent, next); \
+ if (next != NULL) { \
+ phn_prev_set(a_type, a_field, next, \
+ parent); \
+ } \
+ } else { \
+ assert(phn_prev_get(a_type, a_field, phn) != \
+ NULL); \
+ phn_next_set(a_type, a_field, \
+ phn_prev_get(a_type, a_field, phn), \
+ phn_next_get(a_type, a_field, phn)); \
+ } \
+ if (phn_next_get(a_type, a_field, phn) != NULL) { \
+ phn_prev_set(a_type, a_field, \
+ phn_next_get(a_type, a_field, phn), \
+ phn_prev_get(a_type, a_field, phn)); \
+ } \
+ } \
+}
+
+#endif /* PH_H_ */
diff --git a/contrib/jemalloc/include/jemalloc/internal/private_namespace.h b/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
index fb43a6b..00bedba 100644
--- a/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
+++ b/contrib/jemalloc/include/jemalloc/internal/private_namespace.h
@@ -5,10 +5,12 @@
#define arena_basic_stats_merge JEMALLOC_N(arena_basic_stats_merge)
#define arena_bin_index JEMALLOC_N(arena_bin_index)
#define arena_bin_info JEMALLOC_N(arena_bin_info)
-#define arena_bitselm_get JEMALLOC_N(arena_bitselm_get)
+#define arena_bitselm_get_const JEMALLOC_N(arena_bitselm_get_const)
+#define arena_bitselm_get_mutable JEMALLOC_N(arena_bitselm_get_mutable)
#define arena_boot JEMALLOC_N(arena_boot)
#define arena_choose JEMALLOC_N(arena_choose)
#define arena_choose_hard JEMALLOC_N(arena_choose_hard)
+#define arena_choose_impl JEMALLOC_N(arena_choose_impl)
#define arena_chunk_alloc_huge JEMALLOC_N(arena_chunk_alloc_huge)
#define arena_chunk_cache_maybe_insert JEMALLOC_N(arena_chunk_cache_maybe_insert)
#define arena_chunk_cache_maybe_remove JEMALLOC_N(arena_chunk_cache_maybe_remove)
@@ -34,6 +36,7 @@
#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
#define arena_get JEMALLOC_N(arena_get)
+#define arena_ichoose JEMALLOC_N(arena_ichoose)
#define arena_init JEMALLOC_N(arena_init)
#define arena_lg_dirty_mult_default_get JEMALLOC_N(arena_lg_dirty_mult_default_get)
#define arena_lg_dirty_mult_default_set JEMALLOC_N(arena_lg_dirty_mult_default_set)
@@ -60,7 +63,8 @@
#define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get)
#define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set)
#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get)
-#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get)
+#define arena_mapbitsp_get_const JEMALLOC_N(arena_mapbitsp_get_const)
+#define arena_mapbitsp_get_mutable JEMALLOC_N(arena_mapbitsp_get_mutable)
#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read)
#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write)
#define arena_maxrun JEMALLOC_N(arena_maxrun)
@@ -69,7 +73,8 @@
#define arena_metadata_allocated_get JEMALLOC_N(arena_metadata_allocated_get)
#define arena_metadata_allocated_sub JEMALLOC_N(arena_metadata_allocated_sub)
#define arena_migrate JEMALLOC_N(arena_migrate)
-#define arena_miscelm_get JEMALLOC_N(arena_miscelm_get)
+#define arena_miscelm_get_const JEMALLOC_N(arena_miscelm_get_const)
+#define arena_miscelm_get_mutable JEMALLOC_N(arena_miscelm_get_mutable)
#define arena_miscelm_to_pageind JEMALLOC_N(arena_miscelm_to_pageind)
#define arena_miscelm_to_rpages JEMALLOC_N(arena_miscelm_to_rpages)
#define arena_new JEMALLOC_N(arena_new)
@@ -81,7 +86,10 @@
#define arena_palloc JEMALLOC_N(arena_palloc)
#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
-#define arena_prefork JEMALLOC_N(arena_prefork)
+#define arena_prefork0 JEMALLOC_N(arena_prefork0)
+#define arena_prefork1 JEMALLOC_N(arena_prefork1)
+#define arena_prefork2 JEMALLOC_N(arena_prefork2)
+#define arena_prefork3 JEMALLOC_N(arena_prefork3)
#define arena_prof_accum JEMALLOC_N(arena_prof_accum)
#define arena_prof_accum_impl JEMALLOC_N(arena_prof_accum_impl)
#define arena_prof_accum_locked JEMALLOC_N(arena_prof_accum_locked)
@@ -97,6 +105,7 @@
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
#define arena_rd_to_miscelm JEMALLOC_N(arena_rd_to_miscelm)
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
+#define arena_reset JEMALLOC_N(arena_reset)
#define arena_run_regind JEMALLOC_N(arena_run_regind)
#define arena_run_to_miscelm JEMALLOC_N(arena_run_to_miscelm)
#define arena_salloc JEMALLOC_N(arena_salloc)
@@ -123,6 +132,11 @@
#define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32)
#define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64)
#define atomic_sub_z JEMALLOC_N(atomic_sub_z)
+#define atomic_write_p JEMALLOC_N(atomic_write_p)
+#define atomic_write_u JEMALLOC_N(atomic_write_u)
+#define atomic_write_uint32 JEMALLOC_N(atomic_write_uint32)
+#define atomic_write_uint64 JEMALLOC_N(atomic_write_uint64)
+#define atomic_write_z JEMALLOC_N(atomic_write_z)
#define base_alloc JEMALLOC_N(base_alloc)
#define base_boot JEMALLOC_N(base_boot)
#define base_postfork_child JEMALLOC_N(base_postfork_child)
@@ -148,7 +162,6 @@
#define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap)
#define chunk_alloc_wrapper JEMALLOC_N(chunk_alloc_wrapper)
#define chunk_boot JEMALLOC_N(chunk_boot)
-#define chunk_dalloc_arena JEMALLOC_N(chunk_dalloc_arena)
#define chunk_dalloc_cache JEMALLOC_N(chunk_dalloc_cache)
#define chunk_dalloc_mmap JEMALLOC_N(chunk_dalloc_mmap)
#define chunk_dalloc_wrapper JEMALLOC_N(chunk_dalloc_wrapper)
@@ -168,7 +181,6 @@
#define chunk_postfork_child JEMALLOC_N(chunk_postfork_child)
#define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent)
#define chunk_prefork JEMALLOC_N(chunk_prefork)
-#define chunk_purge_arena JEMALLOC_N(chunk_purge_arena)
#define chunk_purge_wrapper JEMALLOC_N(chunk_purge_wrapper)
#define chunk_register JEMALLOC_N(chunk_register)
#define chunks_rtree JEMALLOC_N(chunks_rtree)
@@ -200,6 +212,8 @@
#define extent_node_addr_set JEMALLOC_N(extent_node_addr_set)
#define extent_node_arena_get JEMALLOC_N(extent_node_arena_get)
#define extent_node_arena_set JEMALLOC_N(extent_node_arena_set)
+#define extent_node_committed_get JEMALLOC_N(extent_node_committed_get)
+#define extent_node_committed_set JEMALLOC_N(extent_node_committed_set)
#define extent_node_dirty_insert JEMALLOC_N(extent_node_dirty_insert)
#define extent_node_dirty_linkage_init JEMALLOC_N(extent_node_dirty_linkage_init)
#define extent_node_dirty_remove JEMALLOC_N(extent_node_dirty_remove)
@@ -210,6 +224,8 @@
#define extent_node_size_set JEMALLOC_N(extent_node_size_set)
#define extent_node_zeroed_get JEMALLOC_N(extent_node_zeroed_get)
#define extent_node_zeroed_set JEMALLOC_N(extent_node_zeroed_set)
+#define extent_tree_ad_destroy JEMALLOC_N(extent_tree_ad_destroy)
+#define extent_tree_ad_destroy_recurse JEMALLOC_N(extent_tree_ad_destroy_recurse)
#define extent_tree_ad_empty JEMALLOC_N(extent_tree_ad_empty)
#define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first)
#define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert)
@@ -227,6 +243,8 @@
#define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse)
#define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start)
#define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search)
+#define extent_tree_szad_destroy JEMALLOC_N(extent_tree_szad_destroy)
+#define extent_tree_szad_destroy_recurse JEMALLOC_N(extent_tree_szad_destroy_recurse)
#define extent_tree_szad_empty JEMALLOC_N(extent_tree_szad_empty)
#define extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first)
#define extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert)
@@ -273,14 +291,11 @@
#define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move)
#define huge_salloc JEMALLOC_N(huge_salloc)
#define iaalloc JEMALLOC_N(iaalloc)
+#define ialloc JEMALLOC_N(ialloc)
#define iallocztm JEMALLOC_N(iallocztm)
-#define icalloc JEMALLOC_N(icalloc)
-#define icalloct JEMALLOC_N(icalloct)
+#define iarena_cleanup JEMALLOC_N(iarena_cleanup)
#define idalloc JEMALLOC_N(idalloc)
-#define idalloct JEMALLOC_N(idalloct)
#define idalloctm JEMALLOC_N(idalloctm)
-#define imalloc JEMALLOC_N(imalloc)
-#define imalloct JEMALLOC_N(imalloct)
#define in_valgrind JEMALLOC_N(in_valgrind)
#define index2size JEMALLOC_N(index2size)
#define index2size_compute JEMALLOC_N(index2size_compute)
@@ -303,7 +318,11 @@
#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
#define large_maxclass JEMALLOC_N(large_maxclass)
#define lg_floor JEMALLOC_N(lg_floor)
+#define lg_prof_sample JEMALLOC_N(lg_prof_sample)
#define malloc_cprintf JEMALLOC_N(malloc_cprintf)
+#define malloc_mutex_assert_not_owner JEMALLOC_N(malloc_mutex_assert_not_owner)
+#define malloc_mutex_assert_owner JEMALLOC_N(malloc_mutex_assert_owner)
+#define malloc_mutex_boot JEMALLOC_N(malloc_mutex_boot)
#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
#define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock)
#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child)
@@ -325,11 +344,13 @@
#define map_bias JEMALLOC_N(map_bias)
#define map_misc_offset JEMALLOC_N(map_misc_offset)
#define mb_write JEMALLOC_N(mb_write)
-#define mutex_boot JEMALLOC_N(mutex_boot)
+#define narenas_auto JEMALLOC_N(narenas_auto)
#define narenas_tdata_cleanup JEMALLOC_N(narenas_tdata_cleanup)
#define narenas_total_get JEMALLOC_N(narenas_total_get)
#define ncpus JEMALLOC_N(ncpus)
#define nhbins JEMALLOC_N(nhbins)
+#define nhclasses JEMALLOC_N(nhclasses)
+#define nlclasses JEMALLOC_N(nlclasses)
#define nstime_add JEMALLOC_N(nstime_add)
#define nstime_compare JEMALLOC_N(nstime_compare)
#define nstime_copy JEMALLOC_N(nstime_copy)
@@ -372,6 +393,7 @@
#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
#define opt_zero JEMALLOC_N(opt_zero)
#define p2rz JEMALLOC_N(p2rz)
+#define pages_boot JEMALLOC_N(pages_boot)
#define pages_commit JEMALLOC_N(pages_commit)
#define pages_decommit JEMALLOC_N(pages_decommit)
#define pages_map JEMALLOC_N(pages_map)
@@ -383,6 +405,7 @@
#define pow2_ceil_zu JEMALLOC_N(pow2_ceil_zu)
#define prng_lg_range JEMALLOC_N(prng_lg_range)
#define prng_range JEMALLOC_N(prng_range)
+#define prof_active JEMALLOC_N(prof_active)
#define prof_active_get JEMALLOC_N(prof_active_get)
#define prof_active_get_unlocked JEMALLOC_N(prof_active_get_unlocked)
#define prof_active_set JEMALLOC_N(prof_active_set)
@@ -392,6 +415,7 @@
#define prof_boot0 JEMALLOC_N(prof_boot0)
#define prof_boot1 JEMALLOC_N(prof_boot1)
#define prof_boot2 JEMALLOC_N(prof_boot2)
+#define prof_bt_count JEMALLOC_N(prof_bt_count)
#define prof_dump_header JEMALLOC_N(prof_dump_header)
#define prof_dump_open JEMALLOC_N(prof_dump_open)
#define prof_free JEMALLOC_N(prof_free)
@@ -409,7 +433,8 @@
#define prof_mdump JEMALLOC_N(prof_mdump)
#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
-#define prof_prefork JEMALLOC_N(prof_prefork)
+#define prof_prefork0 JEMALLOC_N(prof_prefork0)
+#define prof_prefork1 JEMALLOC_N(prof_prefork1)
#define prof_realloc JEMALLOC_N(prof_realloc)
#define prof_reset JEMALLOC_N(prof_reset)
#define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update)
@@ -418,6 +443,7 @@
#define prof_tctx_reset JEMALLOC_N(prof_tctx_reset)
#define prof_tctx_set JEMALLOC_N(prof_tctx_set)
#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
+#define prof_tdata_count JEMALLOC_N(prof_tdata_count)
#define prof_tdata_get JEMALLOC_N(prof_tdata_get)
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
#define prof_tdata_reinit JEMALLOC_N(prof_tdata_reinit)
@@ -469,8 +495,6 @@
#define tcache_alloc_large JEMALLOC_N(tcache_alloc_large)
#define tcache_alloc_small JEMALLOC_N(tcache_alloc_small)
#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
-#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate)
-#define tcache_arena_dissociate JEMALLOC_N(tcache_arena_dissociate)
#define tcache_arena_reassociate JEMALLOC_N(tcache_arena_reassociate)
#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
@@ -505,38 +529,83 @@
#define ticker_ticks JEMALLOC_N(ticker_ticks)
#define tsd_arena_get JEMALLOC_N(tsd_arena_get)
#define tsd_arena_set JEMALLOC_N(tsd_arena_set)
+#define tsd_arenap_get JEMALLOC_N(tsd_arenap_get)
+#define tsd_arenas_tdata_bypass_get JEMALLOC_N(tsd_arenas_tdata_bypass_get)
+#define tsd_arenas_tdata_bypass_set JEMALLOC_N(tsd_arenas_tdata_bypass_set)
+#define tsd_arenas_tdata_bypassp_get JEMALLOC_N(tsd_arenas_tdata_bypassp_get)
+#define tsd_arenas_tdata_get JEMALLOC_N(tsd_arenas_tdata_get)
+#define tsd_arenas_tdata_set JEMALLOC_N(tsd_arenas_tdata_set)
+#define tsd_arenas_tdatap_get JEMALLOC_N(tsd_arenas_tdatap_get)
#define tsd_boot JEMALLOC_N(tsd_boot)
#define tsd_boot0 JEMALLOC_N(tsd_boot0)
#define tsd_boot1 JEMALLOC_N(tsd_boot1)
#define tsd_booted JEMALLOC_N(tsd_booted)
+#define tsd_booted_get JEMALLOC_N(tsd_booted_get)
#define tsd_cleanup JEMALLOC_N(tsd_cleanup)
#define tsd_cleanup_wrapper JEMALLOC_N(tsd_cleanup_wrapper)
#define tsd_fetch JEMALLOC_N(tsd_fetch)
#define tsd_get JEMALLOC_N(tsd_get)
-#define tsd_wrapper_get JEMALLOC_N(tsd_wrapper_get)
-#define tsd_wrapper_set JEMALLOC_N(tsd_wrapper_set)
+#define tsd_iarena_get JEMALLOC_N(tsd_iarena_get)
+#define tsd_iarena_set JEMALLOC_N(tsd_iarena_set)
+#define tsd_iarenap_get JEMALLOC_N(tsd_iarenap_get)
#define tsd_initialized JEMALLOC_N(tsd_initialized)
#define tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion)
#define tsd_init_finish JEMALLOC_N(tsd_init_finish)
#define tsd_init_head JEMALLOC_N(tsd_init_head)
+#define tsd_narenas_tdata_get JEMALLOC_N(tsd_narenas_tdata_get)
+#define tsd_narenas_tdata_set JEMALLOC_N(tsd_narenas_tdata_set)
+#define tsd_narenas_tdatap_get JEMALLOC_N(tsd_narenas_tdatap_get)
+#define tsd_wrapper_get JEMALLOC_N(tsd_wrapper_get)
+#define tsd_wrapper_set JEMALLOC_N(tsd_wrapper_set)
#define tsd_nominal JEMALLOC_N(tsd_nominal)
#define tsd_prof_tdata_get JEMALLOC_N(tsd_prof_tdata_get)
#define tsd_prof_tdata_set JEMALLOC_N(tsd_prof_tdata_set)
+#define tsd_prof_tdatap_get JEMALLOC_N(tsd_prof_tdatap_get)
#define tsd_quarantine_get JEMALLOC_N(tsd_quarantine_get)
#define tsd_quarantine_set JEMALLOC_N(tsd_quarantine_set)
+#define tsd_quarantinep_get JEMALLOC_N(tsd_quarantinep_get)
#define tsd_set JEMALLOC_N(tsd_set)
#define tsd_tcache_enabled_get JEMALLOC_N(tsd_tcache_enabled_get)
#define tsd_tcache_enabled_set JEMALLOC_N(tsd_tcache_enabled_set)
+#define tsd_tcache_enabledp_get JEMALLOC_N(tsd_tcache_enabledp_get)
#define tsd_tcache_get JEMALLOC_N(tsd_tcache_get)
#define tsd_tcache_set JEMALLOC_N(tsd_tcache_set)
+#define tsd_tcachep_get JEMALLOC_N(tsd_tcachep_get)
#define tsd_thread_allocated_get JEMALLOC_N(tsd_thread_allocated_get)
#define tsd_thread_allocated_set JEMALLOC_N(tsd_thread_allocated_set)
+#define tsd_thread_allocatedp_get JEMALLOC_N(tsd_thread_allocatedp_get)
#define tsd_thread_deallocated_get JEMALLOC_N(tsd_thread_deallocated_get)
#define tsd_thread_deallocated_set JEMALLOC_N(tsd_thread_deallocated_set)
+#define tsd_thread_deallocatedp_get JEMALLOC_N(tsd_thread_deallocatedp_get)
#define tsd_tls JEMALLOC_N(tsd_tls)
#define tsd_tsd JEMALLOC_N(tsd_tsd)
+#define tsd_tsdn JEMALLOC_N(tsd_tsdn)
+#define tsd_witness_fork_get JEMALLOC_N(tsd_witness_fork_get)
+#define tsd_witness_fork_set JEMALLOC_N(tsd_witness_fork_set)
+#define tsd_witness_forkp_get JEMALLOC_N(tsd_witness_forkp_get)
+#define tsd_witnesses_get JEMALLOC_N(tsd_witnesses_get)
+#define tsd_witnesses_set JEMALLOC_N(tsd_witnesses_set)
+#define tsd_witnessesp_get JEMALLOC_N(tsd_witnessesp_get)
+#define tsdn_fetch JEMALLOC_N(tsdn_fetch)
+#define tsdn_null JEMALLOC_N(tsdn_null)
+#define tsdn_tsd JEMALLOC_N(tsdn_tsd)
#define u2rz JEMALLOC_N(u2rz)
#define valgrind_freelike_block JEMALLOC_N(valgrind_freelike_block)
#define valgrind_make_mem_defined JEMALLOC_N(valgrind_make_mem_defined)
#define valgrind_make_mem_noaccess JEMALLOC_N(valgrind_make_mem_noaccess)
#define valgrind_make_mem_undefined JEMALLOC_N(valgrind_make_mem_undefined)
+#define witness_assert_lockless JEMALLOC_N(witness_assert_lockless)
+#define witness_assert_not_owner JEMALLOC_N(witness_assert_not_owner)
+#define witness_assert_owner JEMALLOC_N(witness_assert_owner)
+#define witness_fork_cleanup JEMALLOC_N(witness_fork_cleanup)
+#define witness_init JEMALLOC_N(witness_init)
+#define witness_lock JEMALLOC_N(witness_lock)
+#define witness_lock_error JEMALLOC_N(witness_lock_error)
+#define witness_lockless_error JEMALLOC_N(witness_lockless_error)
+#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
+#define witness_owner_error JEMALLOC_N(witness_owner_error)
+#define witness_postfork_child JEMALLOC_N(witness_postfork_child)
+#define witness_postfork_parent JEMALLOC_N(witness_postfork_parent)
+#define witness_prefork JEMALLOC_N(witness_prefork)
+#define witness_unlock JEMALLOC_N(witness_unlock)
+#define witnesses_cleanup JEMALLOC_N(witnesses_cleanup)
diff --git a/contrib/jemalloc/include/jemalloc/internal/prof.h b/contrib/jemalloc/include/jemalloc/internal/prof.h
index a25502a..691e153 100644
--- a/contrib/jemalloc/include/jemalloc/internal/prof.h
+++ b/contrib/jemalloc/include/jemalloc/internal/prof.h
@@ -281,7 +281,7 @@ extern uint64_t prof_interval;
extern size_t lg_prof_sample;
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
-void prof_malloc_sample_object(const void *ptr, size_t usize,
+void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
void bt_init(prof_bt_t *bt, void **vec);
@@ -293,32 +293,33 @@ size_t prof_bt_count(void);
const prof_cnt_t *prof_cnt_all(void);
typedef int (prof_dump_open_t)(bool, const char *);
extern prof_dump_open_t *prof_dump_open;
-typedef bool (prof_dump_header_t)(bool, const prof_cnt_t *);
+typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *);
extern prof_dump_header_t *prof_dump_header;
#endif
-void prof_idump(void);
-bool prof_mdump(const char *filename);
-void prof_gdump(void);
-prof_tdata_t *prof_tdata_init(tsd_t *tsd);
+void prof_idump(tsdn_t *tsdn);
+bool prof_mdump(tsd_t *tsd, const char *filename);
+void prof_gdump(tsdn_t *tsdn);
+prof_tdata_t *prof_tdata_init(tsdn_t *tsdn);
prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
-void prof_reset(tsd_t *tsd, size_t lg_sample);
+void prof_reset(tsdn_t *tsdn, size_t lg_sample);
void prof_tdata_cleanup(tsd_t *tsd);
-const char *prof_thread_name_get(void);
-bool prof_active_get(void);
-bool prof_active_set(bool active);
+bool prof_active_get(tsdn_t *tsdn);
+bool prof_active_set(tsdn_t *tsdn, bool active);
+const char *prof_thread_name_get(tsd_t *tsd);
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
-bool prof_thread_active_get(void);
-bool prof_thread_active_set(bool active);
-bool prof_thread_active_init_get(void);
-bool prof_thread_active_init_set(bool active_init);
-bool prof_gdump_get(void);
-bool prof_gdump_set(bool active);
+bool prof_thread_active_get(tsd_t *tsd);
+bool prof_thread_active_set(tsd_t *tsd, bool active);
+bool prof_thread_active_init_get(tsdn_t *tsdn);
+bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
+bool prof_gdump_get(tsdn_t *tsdn);
+bool prof_gdump_set(tsdn_t *tsdn, bool active);
void prof_boot0(void);
void prof_boot1(void);
-bool prof_boot2(void);
-void prof_prefork(void);
-void prof_postfork_parent(void);
-void prof_postfork_child(void);
+bool prof_boot2(tsdn_t *tsdn);
+void prof_prefork0(tsdn_t *tsdn);
+void prof_prefork1(tsdn_t *tsdn);
+void prof_postfork_parent(tsdn_t *tsdn);
+void prof_postfork_child(tsdn_t *tsdn);
void prof_sample_threshold_update(prof_tdata_t *tdata);
#endif /* JEMALLOC_H_EXTERNS */
@@ -329,17 +330,17 @@ void prof_sample_threshold_update(prof_tdata_t *tdata);
bool prof_active_get_unlocked(void);
bool prof_gdump_get_unlocked(void);
prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
+prof_tctx_t *prof_tctx_get(tsdn_t *tsdn, const void *ptr);
+void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
+ prof_tctx_t *tctx);
+void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
+ const void *old_ptr, prof_tctx_t *tctx);
bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
prof_tdata_t **tdata_out);
prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
bool update);
-prof_tctx_t *prof_tctx_get(const void *ptr);
-void prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
-void prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
- prof_tctx_t *tctx);
-void prof_malloc_sample_object(const void *ptr, size_t usize,
+void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx);
-void prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx);
void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr,
size_t old_usize, prof_tctx_t *old_tctx);
@@ -383,7 +384,7 @@ prof_tdata_get(tsd_t *tsd, bool create)
if (create) {
if (unlikely(tdata == NULL)) {
if (tsd_nominal(tsd)) {
- tdata = prof_tdata_init(tsd);
+ tdata = prof_tdata_init(tsd_tsdn(tsd));
tsd_prof_tdata_set(tsd, tdata);
}
} else if (unlikely(tdata->expired)) {
@@ -397,34 +398,34 @@ prof_tdata_get(tsd_t *tsd, bool create)
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
-prof_tctx_get(const void *ptr)
+prof_tctx_get(tsdn_t *tsdn, const void *ptr)
{
cassert(config_prof);
assert(ptr != NULL);
- return (arena_prof_tctx_get(ptr));
+ return (arena_prof_tctx_get(tsdn, ptr));
}
JEMALLOC_ALWAYS_INLINE void
-prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
+prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
- arena_prof_tctx_set(ptr, usize, tctx);
+ arena_prof_tctx_set(tsdn, ptr, usize, tctx);
}
JEMALLOC_ALWAYS_INLINE void
-prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
+prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr,
prof_tctx_t *old_tctx)
{
cassert(config_prof);
assert(ptr != NULL);
- arena_prof_tctx_reset(ptr, usize, old_ptr, old_tctx);
+ arena_prof_tctx_reset(tsdn, ptr, usize, old_ptr, old_tctx);
}
JEMALLOC_ALWAYS_INLINE bool
@@ -479,17 +480,17 @@ prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
}
JEMALLOC_ALWAYS_INLINE void
-prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx)
+prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
- assert(usize == isalloc(ptr, true));
+ assert(usize == isalloc(tsdn, ptr, true));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
- prof_malloc_sample_object(ptr, usize, tctx);
+ prof_malloc_sample_object(tsdn, ptr, usize, tctx);
else
- prof_tctx_set(ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
+ prof_tctx_set(tsdn, ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
}
JEMALLOC_ALWAYS_INLINE void
@@ -503,7 +504,7 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
if (prof_active && !updated && ptr != NULL) {
- assert(usize == isalloc(ptr, true));
+ assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
/*
* Don't sample. The usize passed to prof_alloc_prep()
@@ -520,9 +521,9 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
if (unlikely(sampled))
- prof_malloc_sample_object(ptr, usize, tctx);
+ prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
else
- prof_tctx_reset(ptr, usize, old_ptr, old_tctx);
+ prof_tctx_reset(tsd_tsdn(tsd), ptr, usize, old_ptr, old_tctx);
if (unlikely(old_sampled))
prof_free_sampled_object(tsd, old_usize, old_tctx);
@@ -531,10 +532,10 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
JEMALLOC_ALWAYS_INLINE void
prof_free(tsd_t *tsd, const void *ptr, size_t usize)
{
- prof_tctx_t *tctx = prof_tctx_get(ptr);
+ prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
cassert(config_prof);
- assert(usize == isalloc(ptr, true));
+ assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_free_sampled_object(tsd, usize, tctx);
diff --git a/contrib/jemalloc/include/jemalloc/internal/rtree.h b/contrib/jemalloc/include/jemalloc/internal/rtree.h
index 28ae9d1..8d0c584 100644
--- a/contrib/jemalloc/include/jemalloc/internal/rtree.h
+++ b/contrib/jemalloc/include/jemalloc/internal/rtree.h
@@ -15,9 +15,10 @@ typedef struct rtree_s rtree_t;
* machine address width.
*/
#define LG_RTREE_BITS_PER_LEVEL 4
-#define RTREE_BITS_PER_LEVEL (ZU(1) << LG_RTREE_BITS_PER_LEVEL)
+#define RTREE_BITS_PER_LEVEL (1U << LG_RTREE_BITS_PER_LEVEL)
+/* Maximum rtree height. */
#define RTREE_HEIGHT_MAX \
- ((ZU(1) << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
+ ((1U << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
/* Used for two-stage lock-free node initialization. */
#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1)
@@ -111,22 +112,25 @@ unsigned rtree_start_level(rtree_t *rtree, uintptr_t key);
uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level);
bool rtree_node_valid(rtree_node_elm_t *node);
-rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm);
+rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm,
+ bool dependent);
rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm,
- unsigned level);
+ unsigned level, bool dependent);
extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm,
bool dependent);
void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm,
const extent_node_t *val);
-rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level);
-rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level);
+rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level,
+ bool dependent);
+rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level,
+ bool dependent);
extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent);
bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
-JEMALLOC_INLINE unsigned
+JEMALLOC_ALWAYS_INLINE unsigned
rtree_start_level(rtree_t *rtree, uintptr_t key)
{
unsigned start_level;
@@ -140,7 +144,7 @@ rtree_start_level(rtree_t *rtree, uintptr_t key)
return (start_level);
}
-JEMALLOC_INLINE uintptr_t
+JEMALLOC_ALWAYS_INLINE uintptr_t
rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
{
@@ -149,37 +153,40 @@ rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
rtree->levels[level].bits) - 1));
}
-JEMALLOC_INLINE bool
+JEMALLOC_ALWAYS_INLINE bool
rtree_node_valid(rtree_node_elm_t *node)
{
return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING);
}
-JEMALLOC_INLINE rtree_node_elm_t *
-rtree_child_tryread(rtree_node_elm_t *elm)
+JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
+rtree_child_tryread(rtree_node_elm_t *elm, bool dependent)
{
rtree_node_elm_t *child;
/* Double-checked read (first read may be stale. */
child = elm->child;
- if (!rtree_node_valid(child))
+ if (!dependent && !rtree_node_valid(child))
child = atomic_read_p(&elm->pun);
+ assert(!dependent || child != NULL);
return (child);
}
-JEMALLOC_INLINE rtree_node_elm_t *
-rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
+JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
+rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level,
+ bool dependent)
{
rtree_node_elm_t *child;
- child = rtree_child_tryread(elm);
- if (unlikely(!rtree_node_valid(child)))
+ child = rtree_child_tryread(elm, dependent);
+ if (!dependent && unlikely(!rtree_node_valid(child)))
child = rtree_child_read_hard(rtree, elm, level);
+ assert(!dependent || child != NULL);
return (child);
}
-JEMALLOC_INLINE extent_node_t *
+JEMALLOC_ALWAYS_INLINE extent_node_t *
rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent)
{
@@ -208,54 +215,119 @@ rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val)
atomic_write_p(&elm->pun, val);
}
-JEMALLOC_INLINE rtree_node_elm_t *
-rtree_subtree_tryread(rtree_t *rtree, unsigned level)
+JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
+rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent)
{
rtree_node_elm_t *subtree;
/* Double-checked read (first read may be stale. */
subtree = rtree->levels[level].subtree;
- if (!rtree_node_valid(subtree))
+ if (!dependent && unlikely(!rtree_node_valid(subtree)))
subtree = atomic_read_p(&rtree->levels[level].subtree_pun);
+ assert(!dependent || subtree != NULL);
return (subtree);
}
-JEMALLOC_INLINE rtree_node_elm_t *
-rtree_subtree_read(rtree_t *rtree, unsigned level)
+JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
+rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent)
{
rtree_node_elm_t *subtree;
- subtree = rtree_subtree_tryread(rtree, level);
- if (unlikely(!rtree_node_valid(subtree)))
+ subtree = rtree_subtree_tryread(rtree, level, dependent);
+ if (!dependent && unlikely(!rtree_node_valid(subtree)))
subtree = rtree_subtree_read_hard(rtree, level);
+ assert(!dependent || subtree != NULL);
return (subtree);
}
-JEMALLOC_INLINE extent_node_t *
+JEMALLOC_ALWAYS_INLINE extent_node_t *
rtree_get(rtree_t *rtree, uintptr_t key, bool dependent)
{
uintptr_t subkey;
- unsigned i, start_level;
- rtree_node_elm_t *node, *child;
+ unsigned start_level;
+ rtree_node_elm_t *node;
start_level = rtree_start_level(rtree, key);
- for (i = start_level, node = rtree_subtree_tryread(rtree, start_level);
- /**/; i++, node = child) {
- if (!dependent && unlikely(!rtree_node_valid(node)))
- return (NULL);
- subkey = rtree_subkey(rtree, key, i);
- if (i == rtree->height - 1) {
- /*
- * node is a leaf, so it contains values rather than
- * child pointers.
- */
- return (rtree_val_read(rtree, &node[subkey],
- dependent));
- }
- assert(i < rtree->height - 1);
- child = rtree_child_tryread(&node[subkey]);
+ node = rtree_subtree_tryread(rtree, start_level, dependent);
+#define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height)
+ switch (start_level + RTREE_GET_BIAS) {
+#define RTREE_GET_SUBTREE(level) \
+ case level: \
+ assert(level < (RTREE_HEIGHT_MAX-1)); \
+ if (!dependent && unlikely(!rtree_node_valid(node))) \
+ return (NULL); \
+ subkey = rtree_subkey(rtree, key, level - \
+ RTREE_GET_BIAS); \
+ node = rtree_child_tryread(&node[subkey], dependent); \
+ /* Fall through. */
+#define RTREE_GET_LEAF(level) \
+ case level: \
+ assert(level == (RTREE_HEIGHT_MAX-1)); \
+ if (!dependent && unlikely(!rtree_node_valid(node))) \
+ return (NULL); \
+ subkey = rtree_subkey(rtree, key, level - \
+ RTREE_GET_BIAS); \
+ /* \
+ * node is a leaf, so it contains values rather than \
+ * child pointers. \
+ */ \
+ return (rtree_val_read(rtree, &node[subkey], \
+ dependent));
+#if RTREE_HEIGHT_MAX > 1
+ RTREE_GET_SUBTREE(0)
+#endif
+#if RTREE_HEIGHT_MAX > 2
+ RTREE_GET_SUBTREE(1)
+#endif
+#if RTREE_HEIGHT_MAX > 3
+ RTREE_GET_SUBTREE(2)
+#endif
+#if RTREE_HEIGHT_MAX > 4
+ RTREE_GET_SUBTREE(3)
+#endif
+#if RTREE_HEIGHT_MAX > 5
+ RTREE_GET_SUBTREE(4)
+#endif
+#if RTREE_HEIGHT_MAX > 6
+ RTREE_GET_SUBTREE(5)
+#endif
+#if RTREE_HEIGHT_MAX > 7
+ RTREE_GET_SUBTREE(6)
+#endif
+#if RTREE_HEIGHT_MAX > 8
+ RTREE_GET_SUBTREE(7)
+#endif
+#if RTREE_HEIGHT_MAX > 9
+ RTREE_GET_SUBTREE(8)
+#endif
+#if RTREE_HEIGHT_MAX > 10
+ RTREE_GET_SUBTREE(9)
+#endif
+#if RTREE_HEIGHT_MAX > 11
+ RTREE_GET_SUBTREE(10)
+#endif
+#if RTREE_HEIGHT_MAX > 12
+ RTREE_GET_SUBTREE(11)
+#endif
+#if RTREE_HEIGHT_MAX > 13
+ RTREE_GET_SUBTREE(12)
+#endif
+#if RTREE_HEIGHT_MAX > 14
+ RTREE_GET_SUBTREE(13)
+#endif
+#if RTREE_HEIGHT_MAX > 15
+ RTREE_GET_SUBTREE(14)
+#endif
+#if RTREE_HEIGHT_MAX > 16
+# error Unsupported RTREE_HEIGHT_MAX
+#endif
+ RTREE_GET_LEAF(RTREE_HEIGHT_MAX-1)
+#undef RTREE_GET_SUBTREE
+#undef RTREE_GET_LEAF
+ default: not_reached();
}
+#undef RTREE_GET_BIAS
not_reached();
}
@@ -268,7 +340,7 @@ rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
start_level = rtree_start_level(rtree, key);
- node = rtree_subtree_read(rtree, start_level);
+ node = rtree_subtree_read(rtree, start_level, false);
if (node == NULL)
return (true);
for (i = start_level; /**/; i++, node = child) {
@@ -282,7 +354,7 @@ rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
return (false);
}
assert(i + 1 < rtree->height);
- child = rtree_child_read(rtree, &node[subkey], i);
+ child = rtree_child_read(rtree, &node[subkey], i, false);
if (child == NULL)
return (true);
}
diff --git a/contrib/jemalloc/include/jemalloc/internal/stats.h b/contrib/jemalloc/include/jemalloc/internal/stats.h
index 705903a..b621817 100644
--- a/contrib/jemalloc/include/jemalloc/internal/stats.h
+++ b/contrib/jemalloc/include/jemalloc/internal/stats.h
@@ -103,6 +103,14 @@ struct arena_stats_s {
size_t mapped;
/*
+ * Number of bytes currently retained as a side effect of munmap() being
+ * disabled/bypassed. Retained bytes are technically mapped (though
+ * always decommitted or purged), but they are excluded from the mapped
+ * statistic (above).
+ */
+ size_t retained;
+
+ /*
* Total number of purge sweeps, total number of madvise calls made,
* and total pages purged in order to keep dirty unused memory under
* control.
diff --git a/contrib/jemalloc/include/jemalloc/internal/tcache.h b/contrib/jemalloc/include/jemalloc/internal/tcache.h
index 8357820..70883b1 100644
--- a/contrib/jemalloc/include/jemalloc/internal/tcache.h
+++ b/contrib/jemalloc/include/jemalloc/internal/tcache.h
@@ -130,27 +130,25 @@ extern size_t tcache_maxclass;
*/
extern tcaches_t *tcaches;
-size_t tcache_salloc(const void *ptr);
+size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
-void *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
+void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
szind_t binind, unsigned rem);
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
unsigned rem, tcache_t *tcache);
-void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
-void tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena,
- arena_t *newarena);
-void tcache_arena_dissociate(tcache_t *tcache, arena_t *arena);
+void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
+ arena_t *oldarena, arena_t *newarena);
tcache_t *tcache_get_hard(tsd_t *tsd);
-tcache_t *tcache_create(tsd_t *tsd, arena_t *arena);
+tcache_t *tcache_create(tsdn_t *tsdn, arena_t *arena);
void tcache_cleanup(tsd_t *tsd);
void tcache_enabled_cleanup(tsd_t *tsd);
-void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
-bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
+void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
+bool tcaches_create(tsdn_t *tsdn, unsigned *r_ind);
void tcaches_flush(tsd_t *tsd, unsigned ind);
void tcaches_destroy(tsd_t *tsd, unsigned ind);
-bool tcache_boot(void);
+bool tcache_boot(tsdn_t *tsdn);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
@@ -297,8 +295,8 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
if (unlikely(arena == NULL))
return (NULL);
- ret = tcache_alloc_small_hard(tsd, arena, tcache, tbin, binind,
- &tcache_hard_success);
+ ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
+ tbin, binind, &tcache_hard_success);
if (tcache_hard_success == false)
return (NULL);
}
@@ -310,7 +308,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
*/
if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
usize = index2size(binind);
- assert(tcache_salloc(ret) == usize);
+ assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
}
if (likely(!zero)) {
@@ -358,7 +356,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
if (unlikely(arena == NULL))
return (NULL);
- ret = arena_malloc_large(tsd, arena, binind, zero);
+ ret = arena_malloc_large(tsd_tsdn(tsd), arena, binind, zero);
if (ret == NULL)
return (NULL);
} else {
@@ -381,9 +379,10 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
}
if (likely(!zero)) {
if (slow_path && config_fill) {
- if (unlikely(opt_junk_alloc))
- memset(ret, 0xa5, usize);
- else if (unlikely(opt_zero))
+ if (unlikely(opt_junk_alloc)) {
+ memset(ret, JEMALLOC_ALLOC_JUNK,
+ usize);
+ } else if (unlikely(opt_zero))
memset(ret, 0, usize);
}
} else
@@ -406,7 +405,7 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
- assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
+ assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS);
if (slow_path && config_fill && unlikely(opt_junk_free))
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
@@ -433,8 +432,8 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
tcache_bin_info_t *tbin_info;
assert((size & PAGE_MASK) == 0);
- assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
- assert(tcache_salloc(ptr) <= tcache_maxclass);
+ assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
+ assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
binind = size2index(size);
@@ -458,8 +457,10 @@ JEMALLOC_ALWAYS_INLINE tcache_t *
tcaches_get(tsd_t *tsd, unsigned ind)
{
tcaches_t *elm = &tcaches[ind];
- if (unlikely(elm->tcache == NULL))
- elm->tcache = tcache_create(tsd, arena_choose(tsd, NULL));
+ if (unlikely(elm->tcache == NULL)) {
+ elm->tcache = tcache_create(tsd_tsdn(tsd), arena_choose(tsd,
+ NULL));
+ }
return (elm->tcache);
}
#endif
diff --git a/contrib/jemalloc/include/jemalloc/internal/tsd.h b/contrib/jemalloc/include/jemalloc/internal/tsd.h
index 16cc2f1..bf11341 100644
--- a/contrib/jemalloc/include/jemalloc/internal/tsd.h
+++ b/contrib/jemalloc/include/jemalloc/internal/tsd.h
@@ -13,6 +13,9 @@ typedef struct tsd_init_head_s tsd_init_head_t;
#endif
typedef struct tsd_s tsd_t;
+typedef struct tsdn_s tsdn_t;
+
+#define TSDN_NULL ((tsdn_t *)0)
typedef enum {
tsd_state_uninitialized,
@@ -44,6 +47,7 @@ typedef enum {
* The result is a set of generated functions, e.g.:
*
* bool example_tsd_boot(void) {...}
+ * bool example_tsd_booted_get(void) {...}
* example_t *example_tsd_get() {...}
* void example_tsd_set(example_t *val) {...}
*
@@ -98,6 +102,8 @@ a_attr void \
a_name##tsd_boot1(void); \
a_attr bool \
a_name##tsd_boot(void); \
+a_attr bool \
+a_name##tsd_booted_get(void); \
a_attr a_type * \
a_name##tsd_get(void); \
a_attr void \
@@ -201,6 +207,12 @@ a_name##tsd_boot(void) \
\
return (a_name##tsd_boot0()); \
} \
+a_attr bool \
+a_name##tsd_booted_get(void) \
+{ \
+ \
+ return (a_name##tsd_booted); \
+} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(void) \
@@ -246,6 +258,12 @@ a_name##tsd_boot(void) \
\
return (a_name##tsd_boot0()); \
} \
+a_attr bool \
+a_name##tsd_booted_get(void) \
+{ \
+ \
+ return (a_name##tsd_booted); \
+} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(void) \
@@ -368,6 +386,12 @@ a_name##tsd_boot(void) \
a_name##tsd_boot1(); \
return (false); \
} \
+a_attr bool \
+a_name##tsd_booted_get(void) \
+{ \
+ \
+ return (a_name##tsd_booted); \
+} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(void) \
@@ -490,6 +514,12 @@ a_name##tsd_boot(void) \
a_name##tsd_boot1(); \
return (false); \
} \
+a_attr bool \
+a_name##tsd_booted_get(void) \
+{ \
+ \
+ return (a_name##tsd_booted); \
+} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(void) \
@@ -536,12 +566,15 @@ struct tsd_init_head_s {
O(thread_allocated, uint64_t) \
O(thread_deallocated, uint64_t) \
O(prof_tdata, prof_tdata_t *) \
+ O(iarena, arena_t *) \
O(arena, arena_t *) \
O(arenas_tdata, arena_tdata_t *) \
O(narenas_tdata, unsigned) \
O(arenas_tdata_bypass, bool) \
O(tcache_enabled, tcache_enabled_t) \
O(quarantine, quarantine_t *) \
+ O(witnesses, witness_list_t) \
+ O(witness_fork, bool) \
#define TSD_INITIALIZER { \
tsd_state_uninitialized, \
@@ -551,10 +584,13 @@ struct tsd_init_head_s {
NULL, \
NULL, \
NULL, \
+ NULL, \
0, \
false, \
tcache_enabled_default, \
- NULL \
+ NULL, \
+ ql_head_initializer(witnesses), \
+ false \
}
struct tsd_s {
@@ -565,6 +601,15 @@ MALLOC_TSD
#undef O
};
+/*
+ * Wrapper around tsd_t that makes it possible to avoid implicit conversion
+ * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
+ * explicitly converted to tsd_t, which is non-nullable.
+ */
+struct tsdn_s {
+ tsd_t tsd;
+};
+
static const tsd_t tsd_initializer = TSD_INITIALIZER;
malloc_tsd_types(, tsd_t)
@@ -577,7 +622,7 @@ void *malloc_tsd_malloc(size_t size);
void malloc_tsd_dalloc(void *wrapper);
void malloc_tsd_no_cleanup(void *arg);
void malloc_tsd_cleanup_register(bool (*f)(void));
-bool malloc_tsd_boot0(void);
+tsd_t *malloc_tsd_boot0(void);
void malloc_tsd_boot1(void);
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
@@ -595,6 +640,7 @@ void tsd_cleanup(void *arg);
malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t)
tsd_t *tsd_fetch(void);
+tsdn_t *tsd_tsdn(tsd_t *tsd);
bool tsd_nominal(tsd_t *tsd);
#define O(n, t) \
t *tsd_##n##p_get(tsd_t *tsd); \
@@ -602,6 +648,9 @@ t tsd_##n##_get(tsd_t *tsd); \
void tsd_##n##_set(tsd_t *tsd, t n);
MALLOC_TSD
#undef O
+tsdn_t *tsdn_fetch(void);
+bool tsdn_null(const tsdn_t *tsdn);
+tsd_t *tsdn_tsd(tsdn_t *tsdn);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_))
@@ -628,6 +677,13 @@ tsd_fetch(void)
return (tsd);
}
+JEMALLOC_ALWAYS_INLINE tsdn_t *
+tsd_tsdn(tsd_t *tsd)
+{
+
+ return ((tsdn_t *)tsd);
+}
+
JEMALLOC_INLINE bool
tsd_nominal(tsd_t *tsd)
{
@@ -659,6 +715,32 @@ tsd_##n##_set(tsd_t *tsd, t n) \
}
MALLOC_TSD
#undef O
+
+JEMALLOC_ALWAYS_INLINE tsdn_t *
+tsdn_fetch(void)
+{
+
+ if (!tsd_booted_get())
+ return (NULL);
+
+ return (tsd_tsdn(tsd_fetch()));
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsdn_null(const tsdn_t *tsdn)
+{
+
+ return (tsdn == NULL);
+}
+
+JEMALLOC_ALWAYS_INLINE tsd_t *
+tsdn_tsd(tsdn_t *tsdn)
+{
+
+ assert(!tsdn_null(tsdn));
+
+ return (&tsdn->tsd);
+}
#endif
#endif /* JEMALLOC_H_INLINES */
diff --git a/contrib/jemalloc/include/jemalloc/internal/util.h b/contrib/jemalloc/include/jemalloc/internal/util.h
index b8885bf..a0c2203 100644
--- a/contrib/jemalloc/include/jemalloc/internal/util.h
+++ b/contrib/jemalloc/include/jemalloc/internal/util.h
@@ -40,6 +40,10 @@
*/
#define MALLOC_PRINTF_BUFSIZE 4096
+/* Junk fill patterns. */
+#define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5)
+#define JEMALLOC_FREE_JUNK ((uint8_t)0x5a)
+
/*
* Wrap a cpp argument that contains commas such that it isn't broken up into
* multiple arguments.
@@ -73,12 +77,12 @@
JEMALLOC_CLANG_HAS_BUILTIN(__builtin_unreachable)
# define unreachable() __builtin_unreachable()
# else
-# define unreachable()
+# define unreachable() abort()
# endif
#else
# define likely(x) !!(x)
# define unlikely(x) !!(x)
-# define unreachable()
+# define unreachable() abort()
#endif
#include "jemalloc/internal/assert.h"
@@ -106,9 +110,9 @@ void malloc_write(const char *s);
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
* point math.
*/
-int malloc_vsnprintf(char *str, size_t size, const char *format,
+size_t malloc_vsnprintf(char *str, size_t size, const char *format,
va_list ap);
-int malloc_snprintf(char *str, size_t size, const char *format, ...)
+size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
JEMALLOC_FORMAT_PRINTF(3, 4);
void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, va_list ap);
diff --git a/contrib/jemalloc/include/jemalloc/internal/valgrind.h b/contrib/jemalloc/include/jemalloc/internal/valgrind.h
index a3380df..1a86808 100644
--- a/contrib/jemalloc/include/jemalloc/internal/valgrind.h
+++ b/contrib/jemalloc/include/jemalloc/internal/valgrind.h
@@ -30,15 +30,17 @@
* calls must be embedded in macros rather than in functions so that when
* Valgrind reports errors, there are no extra stack frames in the backtraces.
*/
-#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \
- if (unlikely(in_valgrind && cond)) \
- VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \
+#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do { \
+ if (unlikely(in_valgrind && cond)) { \
+ VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(tsdn, ptr), \
+ zero); \
+ } \
} while (0)
-#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \
+#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
zero) do { \
if (unlikely(in_valgrind)) { \
- size_t rzsize = p2rz(ptr); \
+ size_t rzsize = p2rz(tsdn, ptr); \
\
if (!maybe_moved || ptr == old_ptr) { \
VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
@@ -81,8 +83,8 @@
#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0)
-#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
-#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \
+#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do {} while (0)
+#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
zero) do {} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
diff --git a/contrib/jemalloc/include/jemalloc/internal/witness.h b/contrib/jemalloc/include/jemalloc/internal/witness.h
new file mode 100644
index 0000000..d78dca2
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/witness.h
@@ -0,0 +1,249 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+typedef struct witness_s witness_t;
+typedef unsigned witness_rank_t;
+typedef ql_head(witness_t) witness_list_t;
+typedef int witness_comp_t (const witness_t *, const witness_t *);
+
+/*
+ * Lock ranks. Witnesses with rank WITNESS_RANK_OMIT are completely ignored by
+ * the witness machinery.
+ */
+#define WITNESS_RANK_OMIT 0U
+
+#define WITNESS_RANK_INIT 1U
+#define WITNESS_RANK_CTL 1U
+#define WITNESS_RANK_ARENAS 2U
+
+#define WITNESS_RANK_PROF_DUMP 3U
+#define WITNESS_RANK_PROF_BT2GCTX 4U
+#define WITNESS_RANK_PROF_TDATAS 5U
+#define WITNESS_RANK_PROF_TDATA 6U
+#define WITNESS_RANK_PROF_GCTX 7U
+
+#define WITNESS_RANK_ARENA 8U
+#define WITNESS_RANK_ARENA_CHUNKS 9U
+#define WITNESS_RANK_ARENA_NODE_CACHE 10
+
+#define WITNESS_RANK_BASE 11U
+
+#define WITNESS_RANK_LEAF 0xffffffffU
+#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF
+#define WITNESS_RANK_ARENA_HUGE WITNESS_RANK_LEAF
+#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
+
+#define WITNESS_INITIALIZER(rank) {"initializer", rank, NULL, {NULL, NULL}}
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+struct witness_s {
+ /* Name, used for printing lock order reversal messages. */
+ const char *name;
+
+ /*
+ * Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses
+ * must be acquired in order of increasing rank.
+ */
+ witness_rank_t rank;
+
+ /*
+ * If two witnesses are of equal rank and they have the samp comp
+ * function pointer, it is called as a last attempt to differentiate
+ * between witnesses of equal rank.
+ */
+ witness_comp_t *comp;
+
+ /* Linkage for thread's currently owned locks. */
+ ql_elm(witness_t) link;
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+void witness_init(witness_t *witness, const char *name, witness_rank_t rank,
+ witness_comp_t *comp);
+#ifdef JEMALLOC_JET
+typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *);
+extern witness_lock_error_t *witness_lock_error;
+#else
+void witness_lock_error(const witness_list_t *witnesses,
+ const witness_t *witness);
+#endif
+#ifdef JEMALLOC_JET
+typedef void (witness_owner_error_t)(const witness_t *);
+extern witness_owner_error_t *witness_owner_error;
+#else
+void witness_owner_error(const witness_t *witness);
+#endif
+#ifdef JEMALLOC_JET
+typedef void (witness_not_owner_error_t)(const witness_t *);
+extern witness_not_owner_error_t *witness_not_owner_error;
+#else
+void witness_not_owner_error(const witness_t *witness);
+#endif
+#ifdef JEMALLOC_JET
+typedef void (witness_lockless_error_t)(const witness_list_t *);
+extern witness_lockless_error_t *witness_lockless_error;
+#else
+void witness_lockless_error(const witness_list_t *witnesses);
+#endif
+
+void witnesses_cleanup(tsd_t *tsd);
+void witness_fork_cleanup(tsd_t *tsd);
+void witness_prefork(tsd_t *tsd);
+void witness_postfork_parent(tsd_t *tsd);
+void witness_postfork_child(tsd_t *tsd);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+void witness_assert_owner(tsdn_t *tsdn, const witness_t *witness);
+void witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness);
+void witness_assert_lockless(tsdn_t *tsdn);
+void witness_lock(tsdn_t *tsdn, witness_t *witness);
+void witness_unlock(tsdn_t *tsdn, witness_t *witness);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
+JEMALLOC_INLINE void
+witness_assert_owner(tsdn_t *tsdn, const witness_t *witness)
+{
+ tsd_t *tsd;
+ witness_list_t *witnesses;
+ witness_t *w;
+
+ if (!config_debug)
+ return;
+
+ if (tsdn_null(tsdn))
+ return;
+ tsd = tsdn_tsd(tsdn);
+ if (witness->rank == WITNESS_RANK_OMIT)
+ return;
+
+ witnesses = tsd_witnessesp_get(tsd);
+ ql_foreach(w, witnesses, link) {
+ if (w == witness)
+ return;
+ }
+ witness_owner_error(witness);
+}
+
+JEMALLOC_INLINE void
+witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness)
+{
+ tsd_t *tsd;
+ witness_list_t *witnesses;
+ witness_t *w;
+
+ if (!config_debug)
+ return;
+
+ if (tsdn_null(tsdn))
+ return;
+ tsd = tsdn_tsd(tsdn);
+ if (witness->rank == WITNESS_RANK_OMIT)
+ return;
+
+ witnesses = tsd_witnessesp_get(tsd);
+ ql_foreach(w, witnesses, link) {
+ if (w == witness)
+ witness_not_owner_error(witness);
+ }
+}
+
+JEMALLOC_INLINE void
+witness_assert_lockless(tsdn_t *tsdn)
+{
+ tsd_t *tsd;
+ witness_list_t *witnesses;
+ witness_t *w;
+
+ if (!config_debug)
+ return;
+
+ if (tsdn_null(tsdn))
+ return;
+ tsd = tsdn_tsd(tsdn);
+
+ witnesses = tsd_witnessesp_get(tsd);
+ w = ql_last(witnesses, link);
+ if (w != NULL)
+ witness_lockless_error(witnesses);
+}
+
+JEMALLOC_INLINE void
+witness_lock(tsdn_t *tsdn, witness_t *witness)
+{
+ tsd_t *tsd;
+ witness_list_t *witnesses;
+ witness_t *w;
+
+ if (!config_debug)
+ return;
+
+ if (tsdn_null(tsdn))
+ return;
+ tsd = tsdn_tsd(tsdn);
+ if (witness->rank == WITNESS_RANK_OMIT)
+ return;
+
+ witness_assert_not_owner(tsdn, witness);
+
+ witnesses = tsd_witnessesp_get(tsd);
+ w = ql_last(witnesses, link);
+ if (w == NULL) {
+ /* No other locks; do nothing. */
+ } else if (tsd_witness_fork_get(tsd) && w->rank <= witness->rank) {
+ /* Forking, and relaxed ranking satisfied. */
+ } else if (w->rank > witness->rank) {
+ /* Not forking, rank order reversal. */
+ witness_lock_error(witnesses, witness);
+ } else if (w->rank == witness->rank && (w->comp == NULL || w->comp !=
+ witness->comp || w->comp(w, witness) > 0)) {
+ /*
+ * Missing/incompatible comparison function, or comparison
+ * function indicates rank order reversal.
+ */
+ witness_lock_error(witnesses, witness);
+ }
+
+ ql_elm_new(witness, link);
+ ql_tail_insert(witnesses, witness, link);
+}
+
+JEMALLOC_INLINE void
+witness_unlock(tsdn_t *tsdn, witness_t *witness)
+{
+ tsd_t *tsd;
+ witness_list_t *witnesses;
+
+ if (!config_debug)
+ return;
+
+ if (tsdn_null(tsdn))
+ return;
+ tsd = tsdn_tsd(tsdn);
+ if (witness->rank == WITNESS_RANK_OMIT)
+ return;
+
+ witness_assert_owner(tsdn, witness);
+
+ witnesses = tsd_witnessesp_get(tsd);
+ ql_remove(witnesses, witness, link);
+}
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/contrib/jemalloc/include/jemalloc/jemalloc.h b/contrib/jemalloc/include/jemalloc/jemalloc.h
index 2d0825a..394699f 100644
--- a/contrib/jemalloc/include/jemalloc/jemalloc.h
+++ b/contrib/jemalloc/include/jemalloc/jemalloc.h
@@ -87,20 +87,20 @@ extern "C" {
#include <limits.h>
#include <strings.h>
-#define JEMALLOC_VERSION "4.1.0-1-g994da4232621dd1210fcf39bdf0d6454cefda473"
+#define JEMALLOC_VERSION "4.2.0-1-gdc7ff6306d7a15b53479e2fb8e5546404b82e6fc"
#define JEMALLOC_VERSION_MAJOR 4
-#define JEMALLOC_VERSION_MINOR 1
+#define JEMALLOC_VERSION_MINOR 2
#define JEMALLOC_VERSION_BUGFIX 0
#define JEMALLOC_VERSION_NREV 1
-#define JEMALLOC_VERSION_GID "994da4232621dd1210fcf39bdf0d6454cefda473"
+#define JEMALLOC_VERSION_GID "dc7ff6306d7a15b53479e2fb8e5546404b82e6fc"
# define MALLOCX_LG_ALIGN(la) ((int)(la))
# if LG_SIZEOF_PTR == 2
-# define MALLOCX_ALIGN(a) ((int)(ffs(a)-1))
+# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
# else
# define MALLOCX_ALIGN(a) \
- ((int)(((a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
- ffs((int)((a)>>32))+31))
+ ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
+ ffs((int)(((size_t)(a))>>32))+31))
# endif
# define MALLOCX_ZERO ((int)0x40)
/*
@@ -112,7 +112,7 @@ extern "C" {
/*
* Bias arena index bits so that 0 encodes "use an automatically chosen arena".
*/
-# define MALLOCX_ARENA(a) ((int)(((a)+1) << 20))
+# define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
# define JEMALLOC_CXX_THROW throw()
diff --git a/contrib/jemalloc/src/arena.c b/contrib/jemalloc/src/arena.c
index 8e4b042..c605bcd 100644
--- a/contrib/jemalloc/src/arena.c
+++ b/contrib/jemalloc/src/arena.c
@@ -37,11 +37,12 @@ static szind_t runs_avail_nclasses; /* Number of runs_avail trees. */
* definition.
*/
-static void arena_purge_to_limit(arena_t *arena, size_t ndirty_limit);
-static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
- bool cleaned, bool decommitted);
-static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, arena_bin_t *bin);
+static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
+ size_t ndirty_limit);
+static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
+ bool dirty, bool cleaned, bool decommitted);
+static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
+ arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, arena_bin_t *bin);
@@ -72,9 +73,9 @@ arena_run_addr_comp(const arena_chunk_map_misc_t *a,
return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
}
-/* Generate red-black tree functions. */
-rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t,
- rb_link, arena_run_addr_comp)
+/* Generate pairing heap functions. */
+ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
+ ph_link, arena_run_addr_comp)
static size_t
run_quantize_floor_compute(size_t size)
@@ -155,7 +156,7 @@ run_quantize_ceil_compute(size_t size)
#ifdef JEMALLOC_JET
#undef run_quantize_floor
-#define run_quantize_floor JEMALLOC_N(run_quantize_floor_impl)
+#define run_quantize_floor JEMALLOC_N(n_run_quantize_floor)
#endif
static size_t
run_quantize_floor(size_t size)
@@ -173,12 +174,12 @@ run_quantize_floor(size_t size)
#ifdef JEMALLOC_JET
#undef run_quantize_floor
#define run_quantize_floor JEMALLOC_N(run_quantize_floor)
-run_quantize_t *run_quantize_floor = JEMALLOC_N(run_quantize_floor_impl);
+run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor);
#endif
#ifdef JEMALLOC_JET
#undef run_quantize_ceil
-#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil_impl)
+#define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil)
#endif
static size_t
run_quantize_ceil(size_t size)
@@ -196,10 +197,10 @@ run_quantize_ceil(size_t size)
#ifdef JEMALLOC_JET
#undef run_quantize_ceil
#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
-run_quantize_t *run_quantize_ceil = JEMALLOC_N(run_quantize_ceil_impl);
+run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil);
#endif
-static arena_run_tree_t *
+static arena_run_heap_t *
arena_runs_avail_get(arena_t *arena, szind_t ind)
{
@@ -214,11 +215,11 @@ arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages)
{
szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get(
- arena_miscelm_get(chunk, pageind))));
+ arena_miscelm_get_const(chunk, pageind))));
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
- arena_run_tree_insert(arena_runs_avail_get(arena, ind),
- arena_miscelm_get(chunk, pageind));
+ arena_run_heap_insert(arena_runs_avail_get(arena, ind),
+ arena_miscelm_get_mutable(chunk, pageind));
}
static void
@@ -226,18 +227,19 @@ arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages)
{
szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get(
- arena_miscelm_get(chunk, pageind))));
+ arena_miscelm_get_const(chunk, pageind))));
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
- arena_run_tree_remove(arena_runs_avail_get(arena, ind),
- arena_miscelm_get(chunk, pageind));
+ arena_run_heap_remove(arena_runs_avail_get(arena, ind),
+ arena_miscelm_get_mutable(chunk, pageind));
}
static void
arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages)
{
- arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
+ pageind);
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
@@ -254,7 +256,8 @@ static void
arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
size_t npages)
{
- arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
+ pageind);
assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
LG_PAGE));
@@ -589,7 +592,8 @@ arena_chunk_init_spare(arena_t *arena)
}
static bool
-arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero)
+arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ bool zero)
{
/*
@@ -600,62 +604,63 @@ arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero)
*/
extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
extent_node_achunk_set(&chunk->node, true);
- return (chunk_register(chunk, &chunk->node));
+ return (chunk_register(tsdn, chunk, &chunk->node));
}
static arena_chunk_t *
-arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
- bool *zero, bool *commit)
+arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
{
arena_chunk_t *chunk;
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
- chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL,
- chunksize, chunksize, zero, commit);
+ chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
+ NULL, chunksize, chunksize, zero, commit);
if (chunk != NULL && !*commit) {
/* Commit header. */
if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind)) {
- chunk_dalloc_wrapper(arena, chunk_hooks,
- (void *)chunk, chunksize, *commit);
+ chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
+ (void *)chunk, chunksize, *zero, *commit);
chunk = NULL;
}
}
- if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) {
+ if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, *zero)) {
if (!*commit) {
/* Undo commit of header. */
chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
LG_PAGE, arena->ind);
}
- chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk,
- chunksize, *commit);
+ chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
+ chunksize, *zero, *commit);
chunk = NULL;
}
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
return (chunk);
}
static arena_chunk_t *
-arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit)
+arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
+ bool *commit)
{
arena_chunk_t *chunk;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
- chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize,
+ chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
chunksize, zero, true);
if (chunk != NULL) {
- if (arena_chunk_register(arena, chunk, *zero)) {
- chunk_dalloc_cache(arena, &chunk_hooks, chunk,
+ if (arena_chunk_register(tsdn, arena, chunk, *zero)) {
+ chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
chunksize, true);
return (NULL);
}
*commit = true;
}
if (chunk == NULL) {
- chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks,
- zero, commit);
+ chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
+ &chunk_hooks, zero, commit);
}
if (config_stats && chunk != NULL) {
@@ -667,7 +672,7 @@ arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit)
}
static arena_chunk_t *
-arena_chunk_init_hard(arena_t *arena)
+arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
{
arena_chunk_t *chunk;
bool zero, commit;
@@ -677,14 +682,14 @@ arena_chunk_init_hard(arena_t *arena)
zero = false;
commit = false;
- chunk = arena_chunk_alloc_internal(arena, &zero, &commit);
+ chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
if (chunk == NULL)
return (NULL);
/*
* Initialize the map to contain one maximal free untouched run. Mark
- * the pages as zeroed if chunk_alloc() returned a zeroed or decommitted
- * chunk.
+ * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
+ * or decommitted chunk.
*/
flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
@@ -696,17 +701,18 @@ arena_chunk_init_hard(arena_t *arena)
*/
if (!zero) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
- (void *)arena_bitselm_get(chunk, map_bias+1),
- (size_t)((uintptr_t) arena_bitselm_get(chunk,
- chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk,
- map_bias+1)));
+ (void *)arena_bitselm_get_const(chunk, map_bias+1),
+ (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
+ chunk_npages-1) -
+ (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
for (i = map_bias+1; i < chunk_npages-1; i++)
arena_mapbits_internal_set(chunk, i, flag_unzeroed);
} else {
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
- *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t)
- arena_bitselm_get(chunk, chunk_npages-1) -
- (uintptr_t)arena_bitselm_get(chunk, map_bias+1)));
+ *)arena_bitselm_get_const(chunk, map_bias+1),
+ (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
+ chunk_npages-1) -
+ (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
if (config_debug) {
for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(arena_mapbits_unzeroed_get(chunk, i) ==
@@ -721,26 +727,73 @@ arena_chunk_init_hard(arena_t *arena)
}
static arena_chunk_t *
-arena_chunk_alloc(arena_t *arena)
+arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
{
arena_chunk_t *chunk;
if (arena->spare != NULL)
chunk = arena_chunk_init_spare(arena);
else {
- chunk = arena_chunk_init_hard(arena);
+ chunk = arena_chunk_init_hard(tsdn, arena);
if (chunk == NULL)
return (NULL);
}
+ ql_elm_new(&chunk->node, ql_link);
+ ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
return (chunk);
}
static void
-arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
+arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
{
+ bool committed;
+ chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
+
+ chunk_deregister(chunk, &chunk->node);
+
+ committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
+ if (!committed) {
+ /*
+ * Decommit the header. Mark the chunk as decommitted even if
+ * header decommit fails, since treating a partially committed
+ * chunk as committed has a high potential for causing later
+ * access of decommitted memory.
+ */
+ chunk_hooks = chunk_hooks_get(tsdn, arena);
+ chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
+ arena->ind);
+ }
+
+ chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
+ committed);
+
+ if (config_stats) {
+ arena->stats.mapped -= chunksize;
+ arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
+ }
+}
+
+static void
+arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare)
+{
+
+ assert(arena->spare != spare);
+
+ if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
+ arena_run_dirty_remove(arena, spare, map_bias,
+ chunk_npages-map_bias);
+ }
+
+ arena_chunk_discard(tsdn, arena, spare);
+}
+
+static void
+arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
+{
+ arena_chunk_t *spare;
assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
@@ -756,43 +809,11 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
/* Remove run from runs_avail, so that the arena does not use it. */
arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
- if (arena->spare != NULL) {
- arena_chunk_t *spare = arena->spare;
- chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
- bool committed;
-
- arena->spare = chunk;
- if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
- arena_run_dirty_remove(arena, spare, map_bias,
- chunk_npages-map_bias);
- }
-
- chunk_deregister(spare, &spare->node);
-
- committed = (arena_mapbits_decommitted_get(spare, map_bias) ==
- 0);
- if (!committed) {
- /*
- * Decommit the header. Mark the chunk as decommitted
- * even if header decommit fails, since treating a
- * partially committed chunk as committed has a high
- * potential for causing later access of decommitted
- * memory.
- */
- chunk_hooks = chunk_hooks_get(arena);
- chunk_hooks.decommit(spare, chunksize, 0, map_bias <<
- LG_PAGE, arena->ind);
- }
-
- chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare,
- chunksize, committed);
-
- if (config_stats) {
- arena->stats.mapped -= chunksize;
- arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
- }
- } else
- arena->spare = chunk;
+ ql_remove(&arena->achunks, &chunk->node, ql_link);
+ spare = arena->spare;
+ arena->spare = chunk;
+ if (spare != NULL)
+ arena_spare_discard(tsdn, arena, spare);
}
static void
@@ -835,6 +856,17 @@ arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
}
static void
+arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
+{
+ szind_t index = size2index(usize) - nlclasses - NBINS;
+
+ cassert(config_stats);
+
+ arena->stats.ndalloc_huge++;
+ arena->stats.hstats[index].ndalloc--;
+}
+
+static void
arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
{
szind_t index = size2index(usize) - nlclasses - NBINS;
@@ -865,63 +897,64 @@ arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
}
extent_node_t *
-arena_node_alloc(arena_t *arena)
+arena_node_alloc(tsdn_t *tsdn, arena_t *arena)
{
extent_node_t *node;
- malloc_mutex_lock(&arena->node_cache_mtx);
+ malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
node = ql_last(&arena->node_cache, ql_link);
if (node == NULL) {
- malloc_mutex_unlock(&arena->node_cache_mtx);
- return (base_alloc(sizeof(extent_node_t)));
+ malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
+ return (base_alloc(tsdn, sizeof(extent_node_t)));
}
ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
- malloc_mutex_unlock(&arena->node_cache_mtx);
+ malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
return (node);
}
void
-arena_node_dalloc(arena_t *arena, extent_node_t *node)
+arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
{
- malloc_mutex_lock(&arena->node_cache_mtx);
+ malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
ql_elm_new(node, ql_link);
ql_tail_insert(&arena->node_cache, node, ql_link);
- malloc_mutex_unlock(&arena->node_cache_mtx);
+ malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
}
static void *
-arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
- size_t usize, size_t alignment, bool *zero, size_t csize)
+arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero,
+ size_t csize)
{
void *ret;
bool commit = true;
- ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment,
- zero, &commit);
+ ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
+ alignment, zero, &commit);
if (ret == NULL) {
/* Revert optimistic stats updates. */
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
arena_huge_malloc_stats_update_undo(arena, usize);
arena->stats.mapped -= usize;
}
arena_nactive_sub(arena, usize >> LG_PAGE);
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
}
return (ret);
}
void *
-arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
- bool *zero)
+arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool *zero)
{
void *ret;
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t csize = CHUNK_CEILING(usize);
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
/* Optimistically update stats. */
if (config_stats) {
@@ -930,61 +963,61 @@ arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
}
arena_nactive_add(arena, usize >> LG_PAGE);
- ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment,
- zero, true);
- malloc_mutex_unlock(&arena->lock);
+ ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
+ alignment, zero, true);
+ malloc_mutex_unlock(tsdn, &arena->lock);
if (ret == NULL) {
- ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize,
- alignment, zero, csize);
+ ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
+ usize, alignment, zero, csize);
}
return (ret);
}
void
-arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize)
+arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize)
{
chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
size_t csize;
csize = CHUNK_CEILING(usize);
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
arena_huge_dalloc_stats_update(arena, usize);
arena->stats.mapped -= usize;
}
arena_nactive_sub(arena, usize >> LG_PAGE);
- chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true);
- malloc_mutex_unlock(&arena->lock);
+ chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, true);
+ malloc_mutex_unlock(tsdn, &arena->lock);
}
void
-arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize,
- size_t usize)
+arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
+ size_t oldsize, size_t usize)
{
assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
assert(oldsize != usize);
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats)
arena_huge_ralloc_stats_update(arena, oldsize, usize);
if (oldsize < usize)
arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
else
arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
}
void
-arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
- size_t usize)
+arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
+ size_t oldsize, size_t usize)
{
size_t udiff = oldsize - usize;
size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
arena_huge_ralloc_stats_update(arena, oldsize, usize);
if (cdiff != 0)
@@ -997,51 +1030,52 @@ arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize,
void *nchunk = (void *)((uintptr_t)chunk +
CHUNK_CEILING(usize));
- chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true);
+ chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
+ true);
}
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
}
static bool
-arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk,
- size_t udiff, size_t cdiff)
+arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
+ bool *zero, void *nchunk, size_t udiff, size_t cdiff)
{
bool err;
bool commit = true;
- err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize,
- zero, &commit) == NULL);
+ err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
+ chunksize, zero, &commit) == NULL);
if (err) {
/* Revert optimistic stats updates. */
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
arena_huge_ralloc_stats_update_undo(arena, oldsize,
usize);
arena->stats.mapped -= cdiff;
}
arena_nactive_sub(arena, udiff >> LG_PAGE);
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) {
- chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero,
- true);
+ chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
+ *zero, true);
err = true;
}
return (err);
}
bool
-arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
- size_t usize, bool *zero)
+arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
+ size_t oldsize, size_t usize, bool *zero)
{
bool err;
- chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
+ chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
size_t udiff = usize - oldsize;
size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
/* Optimistically update stats. */
if (config_stats) {
@@ -1050,17 +1084,17 @@ arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
}
arena_nactive_add(arena, udiff >> LG_PAGE);
- err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff,
+ err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
chunksize, zero, true) == NULL);
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
if (err) {
- err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks,
- chunk, oldsize, usize, zero, nchunk, udiff,
+ err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
+ &chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff,
cdiff);
} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
cdiff, true, arena->ind)) {
- chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero,
- true);
+ chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
+ *zero, true);
err = true;
}
@@ -1079,7 +1113,7 @@ arena_run_first_best_fit(arena_t *arena, size_t size)
ind = size2index(run_quantize_ceil(size));
for (i = ind; i < runs_avail_nclasses + runs_avail_bias; i++) {
- arena_chunk_map_misc_t *miscelm = arena_run_tree_first(
+ arena_chunk_map_misc_t *miscelm = arena_run_heap_first(
arena_runs_avail_get(arena, i));
if (miscelm != NULL)
return (&miscelm->run);
@@ -1100,7 +1134,7 @@ arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
}
static arena_run_t *
-arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
+arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
{
arena_chunk_t *chunk;
arena_run_t *run;
@@ -1116,9 +1150,9 @@ arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
/*
* No usable runs. Create a new chunk from which to allocate the run.
*/
- chunk = arena_chunk_alloc(arena);
+ chunk = arena_chunk_alloc(tsdn, arena);
if (chunk != NULL) {
- run = &arena_miscelm_get(chunk, map_bias)->run;
+ run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
if (arena_run_split_large(arena, run, size, zero))
run = NULL;
return (run);
@@ -1144,7 +1178,7 @@ arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
}
static arena_run_t *
-arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind)
+arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
{
arena_chunk_t *chunk;
arena_run_t *run;
@@ -1161,9 +1195,9 @@ arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind)
/*
* No usable runs. Create a new chunk from which to allocate the run.
*/
- chunk = arena_chunk_alloc(arena);
+ chunk = arena_chunk_alloc(tsdn, arena);
if (chunk != NULL) {
- run = &arena_miscelm_get(chunk, map_bias)->run;
+ run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
if (arena_run_split_small(arena, run, size, binind))
run = NULL;
return (run);
@@ -1186,28 +1220,28 @@ arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
}
ssize_t
-arena_lg_dirty_mult_get(arena_t *arena)
+arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena)
{
ssize_t lg_dirty_mult;
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
lg_dirty_mult = arena->lg_dirty_mult;
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
return (lg_dirty_mult);
}
bool
-arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult)
+arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult)
{
if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
return (true);
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
arena->lg_dirty_mult = lg_dirty_mult;
- arena_maybe_purge(arena);
- malloc_mutex_unlock(&arena->lock);
+ arena_maybe_purge(tsdn, arena);
+ malloc_mutex_unlock(tsdn, &arena->lock);
return (false);
}
@@ -1265,7 +1299,7 @@ arena_decay_backlog_npages_limit(const arena_t *arena)
sum = 0;
for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
sum += arena->decay_backlog[i] * h_steps[i];
- npages_limit_backlog = (sum >> SMOOTHSTEP_BFP);
+ npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
return (npages_limit_backlog);
}
@@ -1273,7 +1307,7 @@ arena_decay_backlog_npages_limit(const arena_t *arena)
static void
arena_decay_epoch_advance(arena_t *arena, const nstime_t *time)
{
- uint64_t nadvance;
+ uint64_t nadvance_u64;
nstime_t delta;
size_t ndirty_delta;
@@ -1282,27 +1316,31 @@ arena_decay_epoch_advance(arena_t *arena, const nstime_t *time)
nstime_copy(&delta, time);
nstime_subtract(&delta, &arena->decay_epoch);
- nadvance = nstime_divide(&delta, &arena->decay_interval);
- assert(nadvance > 0);
+ nadvance_u64 = nstime_divide(&delta, &arena->decay_interval);
+ assert(nadvance_u64 > 0);
- /* Add nadvance decay intervals to epoch. */
+ /* Add nadvance_u64 decay intervals to epoch. */
nstime_copy(&delta, &arena->decay_interval);
- nstime_imultiply(&delta, nadvance);
+ nstime_imultiply(&delta, nadvance_u64);
nstime_add(&arena->decay_epoch, &delta);
/* Set a new deadline. */
arena_decay_deadline_init(arena);
/* Update the backlog. */
- if (nadvance >= SMOOTHSTEP_NSTEPS) {
+ if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
memset(arena->decay_backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
sizeof(size_t));
} else {
- memmove(arena->decay_backlog, &arena->decay_backlog[nadvance],
- (SMOOTHSTEP_NSTEPS - nadvance) * sizeof(size_t));
- if (nadvance > 1) {
+ size_t nadvance_z = (size_t)nadvance_u64;
+
+ assert((uint64_t)nadvance_z == nadvance_u64);
+
+ memmove(arena->decay_backlog, &arena->decay_backlog[nadvance_z],
+ (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
+ if (nadvance_z > 1) {
memset(&arena->decay_backlog[SMOOTHSTEP_NSTEPS -
- nadvance], 0, (nadvance-1) * sizeof(size_t));
+ nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
}
}
ndirty_delta = (arena->ndirty > arena->decay_ndirty) ? arena->ndirty -
@@ -1360,25 +1398,25 @@ arena_decay_time_valid(ssize_t decay_time)
}
ssize_t
-arena_decay_time_get(arena_t *arena)
+arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
{
ssize_t decay_time;
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
decay_time = arena->decay_time;
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
return (decay_time);
}
bool
-arena_decay_time_set(arena_t *arena, ssize_t decay_time)
+arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
{
if (!arena_decay_time_valid(decay_time))
return (true);
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
/*
* Restart decay backlog from scratch, which may cause many dirty pages
* to be immediately purged. It would conceptually be possible to map
@@ -1388,14 +1426,14 @@ arena_decay_time_set(arena_t *arena, ssize_t decay_time)
* arbitrary change during initial arena configuration.
*/
arena_decay_init(arena, decay_time);
- arena_maybe_purge(arena);
- malloc_mutex_unlock(&arena->lock);
+ arena_maybe_purge(tsdn, arena);
+ malloc_mutex_unlock(tsdn, &arena->lock);
return (false);
}
static void
-arena_maybe_purge_ratio(arena_t *arena)
+arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
{
assert(opt_purge == purge_mode_ratio);
@@ -1418,12 +1456,12 @@ arena_maybe_purge_ratio(arena_t *arena)
*/
if (arena->ndirty <= threshold)
return;
- arena_purge_to_limit(arena, threshold);
+ arena_purge_to_limit(tsdn, arena, threshold);
}
}
static void
-arena_maybe_purge_decay(arena_t *arena)
+arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
{
nstime_t time;
size_t ndirty_limit;
@@ -1433,7 +1471,7 @@ arena_maybe_purge_decay(arena_t *arena)
/* Purge all or nothing if the option is disabled. */
if (arena->decay_time <= 0) {
if (arena->decay_time == 0)
- arena_purge_to_limit(arena, 0);
+ arena_purge_to_limit(tsdn, arena, 0);
return;
}
@@ -1454,11 +1492,11 @@ arena_maybe_purge_decay(arena_t *arena)
*/
if (arena->ndirty <= ndirty_limit)
return;
- arena_purge_to_limit(arena, ndirty_limit);
+ arena_purge_to_limit(tsdn, arena, ndirty_limit);
}
void
-arena_maybe_purge(arena_t *arena)
+arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
{
/* Don't recursively purge. */
@@ -1466,9 +1504,9 @@ arena_maybe_purge(arena_t *arena)
return;
if (opt_purge == purge_mode_ratio)
- arena_maybe_purge_ratio(arena);
+ arena_maybe_purge_ratio(tsdn, arena);
else
- arena_maybe_purge_decay(arena);
+ arena_maybe_purge_decay(tsdn, arena);
}
static size_t
@@ -1506,7 +1544,7 @@ arena_dirty_count(arena_t *arena)
}
static size_t
-arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks,
+arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
{
@@ -1537,7 +1575,7 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks,
* dalloc_node=false argument to chunk_alloc_cache().
*/
zero = false;
- chunk = chunk_alloc_cache(arena, chunk_hooks,
+ chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
extent_node_addr_get(chunkselm),
extent_node_size_get(chunkselm), chunksize, &zero,
false);
@@ -1572,7 +1610,7 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks,
* prior to allocation.
*/
if (chunk == arena->spare)
- arena_chunk_alloc(arena);
+ arena_chunk_alloc(tsdn, arena);
/* Temporarily allocate the free dirty run. */
arena_run_split_large(arena, run, run_size, false);
@@ -1596,7 +1634,7 @@ arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks,
}
static size_t
-arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
+arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
{
@@ -1608,7 +1646,7 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
nmadvise = 0;
npurged = 0;
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
for (rdelm = qr_next(purge_runs_sentinel, rd_link),
chunkselm = qr_next(purge_chunks_sentinel, cc_link);
rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
@@ -1647,7 +1685,7 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
flag_unzeroed = 0;
flags = CHUNK_MAP_DECOMMITTED;
} else {
- flag_unzeroed = chunk_purge_wrapper(arena,
+ flag_unzeroed = chunk_purge_wrapper(tsdn, arena,
chunk_hooks, chunk, chunksize, pageind <<
LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
flags = flag_unzeroed;
@@ -1678,7 +1716,7 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
if (config_stats)
nmadvise++;
}
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
if (config_stats) {
arena->stats.nmadvise += nmadvise;
@@ -1689,7 +1727,7 @@ arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks,
}
static void
-arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
+arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_runs_dirty_link_t *purge_runs_sentinel,
extent_node_t *purge_chunks_sentinel)
{
@@ -1709,10 +1747,10 @@ arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
bool zeroed = extent_node_zeroed_get(chunkselm);
bool committed = extent_node_committed_get(chunkselm);
extent_node_dirty_remove(chunkselm);
- arena_node_dalloc(arena, chunkselm);
+ arena_node_dalloc(tsdn, arena, chunkselm);
chunkselm = chunkselm_next;
- chunk_dalloc_arena(arena, chunk_hooks, addr, size,
- zeroed, committed);
+ chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
+ size, zeroed, committed);
} else {
arena_chunk_t *chunk =
(arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
@@ -1723,7 +1761,8 @@ arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
pageind) != 0);
arena_run_t *run = &miscelm->run;
qr_remove(rdelm, rd_link);
- arena_run_dalloc(arena, run, false, true, decommitted);
+ arena_run_dalloc(tsdn, arena, run, false, true,
+ decommitted);
}
}
}
@@ -1738,9 +1777,9 @@ arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks,
* (arena->ndirty >= ndirty_limit)
*/
static void
-arena_purge_to_limit(arena_t *arena, size_t ndirty_limit)
+arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
{
- chunk_hooks_t chunk_hooks = chunk_hooks_get(arena);
+ chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
size_t npurge, npurged;
arena_runs_dirty_link_t purge_runs_sentinel;
extent_node_t purge_chunks_sentinel;
@@ -1761,14 +1800,14 @@ arena_purge_to_limit(arena_t *arena, size_t ndirty_limit)
qr_new(&purge_runs_sentinel, rd_link);
extent_node_dirty_linkage_init(&purge_chunks_sentinel);
- npurge = arena_stash_dirty(arena, &chunk_hooks, ndirty_limit,
+ npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
&purge_runs_sentinel, &purge_chunks_sentinel);
if (npurge == 0)
goto label_return;
- npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel,
- &purge_chunks_sentinel);
+ npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks,
+ &purge_runs_sentinel, &purge_chunks_sentinel);
assert(npurged == npurge);
- arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel,
+ arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel,
&purge_chunks_sentinel);
if (config_stats)
@@ -1779,15 +1818,159 @@ label_return:
}
void
-arena_purge(arena_t *arena, bool all)
+arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
{
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
if (all)
- arena_purge_to_limit(arena, 0);
+ arena_purge_to_limit(tsdn, arena, 0);
else
- arena_maybe_purge(arena);
- malloc_mutex_unlock(&arena->lock);
+ arena_maybe_purge(tsdn, arena);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+}
+
+static void
+arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
+{
+ size_t pageind, npages;
+
+ cassert(config_prof);
+ assert(opt_prof);
+
+ /*
+ * Iterate over the allocated runs and remove profiled allocations from
+ * the sample set.
+ */
+ for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
+ if (arena_mapbits_allocated_get(chunk, pageind) != 0) {
+ if (arena_mapbits_large_get(chunk, pageind) != 0) {
+ void *ptr = (void *)((uintptr_t)chunk + (pageind
+ << LG_PAGE));
+ size_t usize = isalloc(tsd_tsdn(tsd), ptr,
+ config_prof);
+
+ prof_free(tsd, ptr, usize);
+ npages = arena_mapbits_large_size_get(chunk,
+ pageind) >> LG_PAGE;
+ } else {
+ /* Skip small run. */
+ size_t binind = arena_mapbits_binind_get(chunk,
+ pageind);
+ arena_bin_info_t *bin_info =
+ &arena_bin_info[binind];
+ npages = bin_info->run_size >> LG_PAGE;
+ }
+ } else {
+ /* Skip unallocated run. */
+ npages = arena_mapbits_unallocated_size_get(chunk,
+ pageind) >> LG_PAGE;
+ }
+ assert(pageind + npages <= chunk_npages);
+ }
+}
+
+void
+arena_reset(tsd_t *tsd, arena_t *arena)
+{
+ unsigned i;
+ extent_node_t *node;
+
+ /*
+ * Locking in this function is unintuitive. The caller guarantees that
+ * no concurrent operations are happening in this arena, but there are
+ * still reasons that some locking is necessary:
+ *
+ * - Some of the functions in the transitive closure of calls assume
+ * appropriate locks are held, and in some cases these locks are
+ * temporarily dropped to avoid lock order reversal or deadlock due to
+ * reentry.
+ * - mallctl("epoch", ...) may concurrently refresh stats. While
+ * strictly speaking this is a "concurrent operation", disallowing
+ * stats refreshes would impose an inconvenient burden.
+ */
+
+ /* Remove large allocations from prof sample set. */
+ if (config_prof && opt_prof) {
+ ql_foreach(node, &arena->achunks, ql_link) {
+ arena_achunk_prof_reset(tsd, arena,
+ extent_node_addr_get(node));
+ }
+ }
+
+ /* Reset curruns for large size classes. */
+ if (config_stats) {
+ for (i = 0; i < nlclasses; i++)
+ arena->stats.lstats[i].curruns = 0;
+ }
+
+ /* Huge allocations. */
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
+ for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
+ ql_last(&arena->huge, ql_link)) {
+ void *ptr = extent_node_addr_get(node);
+ size_t usize;
+
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
+ if (config_stats || (config_prof && opt_prof))
+ usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+ /* Remove huge allocation from prof sample set. */
+ if (config_prof && opt_prof)
+ prof_free(tsd, ptr, usize);
+ huge_dalloc(tsd_tsdn(tsd), ptr);
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
+ /* Cancel out unwanted effects on stats. */
+ if (config_stats)
+ arena_huge_reset_stats_cancel(arena, usize);
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
+
+ /* Bins. */
+ for (i = 0; i < NBINS; i++) {
+ arena_bin_t *bin = &arena->bins[i];
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ bin->runcur = NULL;
+ arena_run_heap_new(&bin->runs);
+ if (config_stats) {
+ bin->stats.curregs = 0;
+ bin->stats.curruns = 0;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ }
+
+ /*
+ * Re-initialize runs_dirty such that the chunks_cache and runs_dirty
+ * chains directly correspond.
+ */
+ qr_new(&arena->runs_dirty, rd_link);
+ for (node = qr_next(&arena->chunks_cache, cc_link);
+ node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
+ qr_new(&node->rd, rd_link);
+ qr_meld(&arena->runs_dirty, &node->rd, rd_link);
+ }
+
+ /* Arena chunks. */
+ for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
+ ql_last(&arena->achunks, ql_link)) {
+ ql_remove(&arena->achunks, node, ql_link);
+ arena_chunk_discard(tsd_tsdn(tsd), arena,
+ extent_node_addr_get(node));
+ }
+
+ /* Spare. */
+ if (arena->spare != NULL) {
+ arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare);
+ arena->spare = NULL;
+ }
+
+ assert(!arena->purging);
+ arena->nactive = 0;
+
+ for(i = 0; i < runs_avail_nclasses; i++)
+ arena_run_heap_new(&arena->runs_avail[i]);
+
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
}
static void
@@ -1904,8 +2087,8 @@ arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
}
static void
-arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
- bool decommitted)
+arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
+ bool cleaned, bool decommitted)
{
arena_chunk_t *chunk;
arena_chunk_map_misc_t *miscelm;
@@ -1965,7 +2148,7 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
if (size == arena_maxrun) {
assert(run_ind == map_bias);
assert(run_pages == (arena_maxrun >> LG_PAGE));
- arena_chunk_dalloc(arena, chunk);
+ arena_chunk_dalloc(tsdn, arena, chunk);
}
/*
@@ -1976,12 +2159,12 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
* chances of spuriously crossing the dirty page purging threshold.
*/
if (dirty)
- arena_maybe_purge(arena);
+ arena_maybe_purge(tsdn, arena);
}
static void
-arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- size_t oldsize, size_t newsize)
+arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run, size_t oldsize, size_t newsize)
{
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
size_t pageind = arena_miscelm_to_pageind(miscelm);
@@ -2016,12 +2199,13 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
pageind+head_npages)));
- arena_run_dalloc(arena, run, false, false, (flag_decommitted != 0));
+ arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted !=
+ 0));
}
static void
-arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- size_t oldsize, size_t newsize, bool dirty)
+arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run, size_t oldsize, size_t newsize, bool dirty)
{
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
size_t pageind = arena_miscelm_to_pageind(miscelm);
@@ -2058,20 +2242,10 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
pageind+head_npages)));
- tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages);
+ tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages);
tail_run = &tail_miscelm->run;
- arena_run_dalloc(arena, tail_run, dirty, false, (flag_decommitted !=
- 0));
-}
-
-static arena_run_t *
-arena_bin_runs_first(arena_bin_t *bin)
-{
- arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs);
- if (miscelm != NULL)
- return (&miscelm->run);
-
- return (NULL);
+ arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted
+ != 0));
}
static void
@@ -2079,35 +2253,25 @@ arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
{
arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
- assert(arena_run_tree_search(&bin->runs, miscelm) == NULL);
-
- arena_run_tree_insert(&bin->runs, miscelm);
-}
-
-static void
-arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
-{
- arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
-
- assert(arena_run_tree_search(&bin->runs, miscelm) != NULL);
-
- arena_run_tree_remove(&bin->runs, miscelm);
+ arena_run_heap_insert(&bin->runs, miscelm);
}
static arena_run_t *
arena_bin_nonfull_run_tryget(arena_bin_t *bin)
{
- arena_run_t *run = arena_bin_runs_first(bin);
- if (run != NULL) {
- arena_bin_runs_remove(bin, run);
- if (config_stats)
- bin->stats.reruns++;
- }
- return (run);
+ arena_chunk_map_misc_t *miscelm;
+
+ miscelm = arena_run_heap_remove_first(&bin->runs);
+ if (miscelm == NULL)
+ return (NULL);
+ if (config_stats)
+ bin->stats.reruns++;
+
+ return (&miscelm->run);
}
static arena_run_t *
-arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
+arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
{
arena_run_t *run;
szind_t binind;
@@ -2123,19 +2287,19 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
bin_info = &arena_bin_info[binind];
/* Allocate a new run. */
- malloc_mutex_unlock(&bin->lock);
+ malloc_mutex_unlock(tsdn, &bin->lock);
/******************************/
- malloc_mutex_lock(&arena->lock);
- run = arena_run_alloc_small(arena, bin_info->run_size, binind);
+ malloc_mutex_lock(tsdn, &arena->lock);
+ run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind);
if (run != NULL) {
/* Initialize run internals. */
run->binind = binind;
run->nfree = bin_info->nregs;
bitmap_init(run->bitmap, &bin_info->bitmap_info);
}
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
/********************************/
- malloc_mutex_lock(&bin->lock);
+ malloc_mutex_lock(tsdn, &bin->lock);
if (run != NULL) {
if (config_stats) {
bin->stats.nruns++;
@@ -2158,7 +2322,7 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
static void *
-arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
+arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
{
szind_t binind;
arena_bin_info_t *bin_info;
@@ -2167,7 +2331,7 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
binind = arena_bin_index(arena, bin);
bin_info = &arena_bin_info[binind];
bin->runcur = NULL;
- run = arena_bin_nonfull_run_get(arena, bin);
+ run = arena_bin_nonfull_run_get(tsdn, arena, bin);
if (bin->runcur != NULL && bin->runcur->nfree > 0) {
/*
* Another thread updated runcur while this one ran without the
@@ -2188,9 +2352,10 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
* were just deallocated from the run.
*/
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- if (run->nfree == bin_info->nregs)
- arena_dalloc_bin_run(arena, chunk, run, bin);
- else
+ if (run->nfree == bin_info->nregs) {
+ arena_dalloc_bin_run(tsdn, arena, chunk, run,
+ bin);
+ } else
arena_bin_lower_run(arena, chunk, run, bin);
}
return (ret);
@@ -2207,7 +2372,7 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
}
void
-arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
+arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
szind_t binind, uint64_t prof_accumbytes)
{
unsigned i, nfill;
@@ -2215,10 +2380,10 @@ arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
assert(tbin->ncached == 0);
- if (config_prof && arena_prof_accum(arena, prof_accumbytes))
- prof_idump();
+ if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes))
+ prof_idump(tsdn);
bin = &arena->bins[binind];
- malloc_mutex_lock(&bin->lock);
+ malloc_mutex_lock(tsdn, &bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
tbin->lg_fill_div); i < nfill; i++) {
arena_run_t *run;
@@ -2226,7 +2391,7 @@ arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
if ((run = bin->runcur) != NULL && run->nfree > 0)
ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
else
- ptr = arena_bin_malloc_hard(arena, bin);
+ ptr = arena_bin_malloc_hard(tsdn, arena, bin);
if (ptr == NULL) {
/*
* OOM. tbin->avail isn't yet filled down to its first
@@ -2253,30 +2418,31 @@ arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
bin->stats.nfills++;
tbin->tstats.nrequests = 0;
}
- malloc_mutex_unlock(&bin->lock);
+ malloc_mutex_unlock(tsdn, &bin->lock);
tbin->ncached = i;
- arena_decay_tick(tsd, arena);
+ arena_decay_tick(tsdn, arena);
}
void
arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
{
+ size_t redzone_size = bin_info->redzone_size;
+
if (zero) {
- size_t redzone_size = bin_info->redzone_size;
- memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
- redzone_size);
- memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
- redzone_size);
+ memset((void *)((uintptr_t)ptr - redzone_size),
+ JEMALLOC_ALLOC_JUNK, redzone_size);
+ memset((void *)((uintptr_t)ptr + bin_info->reg_size),
+ JEMALLOC_ALLOC_JUNK, redzone_size);
} else {
- memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
- bin_info->reg_interval);
+ memset((void *)((uintptr_t)ptr - redzone_size),
+ JEMALLOC_ALLOC_JUNK, bin_info->reg_interval);
}
}
#ifdef JEMALLOC_JET
#undef arena_redzone_corruption
-#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
+#define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption)
#endif
static void
arena_redzone_corruption(void *ptr, size_t usize, bool after,
@@ -2291,7 +2457,7 @@ arena_redzone_corruption(void *ptr, size_t usize, bool after,
#undef arena_redzone_corruption
#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
arena_redzone_corruption_t *arena_redzone_corruption =
- JEMALLOC_N(arena_redzone_corruption_impl);
+ JEMALLOC_N(n_arena_redzone_corruption);
#endif
static void
@@ -2306,22 +2472,22 @@ arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
for (i = 1; i <= redzone_size; i++) {
uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
- if (*byte != 0xa5) {
+ if (*byte != JEMALLOC_ALLOC_JUNK) {
error = true;
arena_redzone_corruption(ptr, size, false, i,
*byte);
if (reset)
- *byte = 0xa5;
+ *byte = JEMALLOC_ALLOC_JUNK;
}
}
for (i = 0; i < redzone_size; i++) {
uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
- if (*byte != 0xa5) {
+ if (*byte != JEMALLOC_ALLOC_JUNK) {
error = true;
arena_redzone_corruption(ptr, size, true, i,
*byte);
if (reset)
- *byte = 0xa5;
+ *byte = JEMALLOC_ALLOC_JUNK;
}
}
}
@@ -2332,7 +2498,7 @@ arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_small
-#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
+#define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
#endif
void
arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
@@ -2340,14 +2506,14 @@ arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
size_t redzone_size = bin_info->redzone_size;
arena_redzones_validate(ptr, bin_info, false);
- memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
+ memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK,
bin_info->reg_interval);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_small
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
arena_dalloc_junk_small_t *arena_dalloc_junk_small =
- JEMALLOC_N(arena_dalloc_junk_small_impl);
+ JEMALLOC_N(n_arena_dalloc_junk_small);
#endif
void
@@ -2366,7 +2532,7 @@ arena_quarantine_junk_small(void *ptr, size_t usize)
}
static void *
-arena_malloc_small(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
+arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
{
void *ret;
arena_bin_t *bin;
@@ -2377,14 +2543,14 @@ arena_malloc_small(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
bin = &arena->bins[binind];
usize = index2size(binind);
- malloc_mutex_lock(&bin->lock);
+ malloc_mutex_lock(tsdn, &bin->lock);
if ((run = bin->runcur) != NULL && run->nfree > 0)
ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
else
- ret = arena_bin_malloc_hard(arena, bin);
+ ret = arena_bin_malloc_hard(tsdn, arena, bin);
if (ret == NULL) {
- malloc_mutex_unlock(&bin->lock);
+ malloc_mutex_unlock(tsdn, &bin->lock);
return (NULL);
}
@@ -2393,9 +2559,9 @@ arena_malloc_small(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
bin->stats.nrequests++;
bin->stats.curregs++;
}
- malloc_mutex_unlock(&bin->lock);
- if (config_prof && !isthreaded && arena_prof_accum(arena, usize))
- prof_idump();
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize))
+ prof_idump(tsdn);
if (!zero) {
if (config_fill) {
@@ -2415,23 +2581,23 @@ arena_malloc_small(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
memset(ret, 0, usize);
}
- arena_decay_tick(tsd, arena);
+ arena_decay_tick(tsdn, arena);
return (ret);
}
void *
-arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
+arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
{
void *ret;
size_t usize;
uintptr_t random_offset;
arena_run_t *run;
arena_chunk_map_misc_t *miscelm;
- UNUSED bool idump;
+ UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
/* Large allocation. */
usize = index2size(binind);
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
if (config_cache_oblivious) {
uint64_t r;
@@ -2444,9 +2610,9 @@ arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
random_offset = ((uintptr_t)r) << LG_CACHELINE;
} else
random_offset = 0;
- run = arena_run_alloc_large(arena, usize + large_pad, zero);
+ run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero);
if (run == NULL) {
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
return (NULL);
}
miscelm = arena_run_to_miscelm(run);
@@ -2464,42 +2630,45 @@ arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero)
}
if (config_prof)
idump = arena_prof_accum_locked(arena, usize);
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
if (config_prof && idump)
- prof_idump();
+ prof_idump(tsdn);
if (!zero) {
if (config_fill) {
if (unlikely(opt_junk_alloc))
- memset(ret, 0xa5, usize);
+ memset(ret, JEMALLOC_ALLOC_JUNK, usize);
else if (unlikely(opt_zero))
memset(ret, 0, usize);
}
}
- arena_decay_tick(tsd, arena);
+ arena_decay_tick(tsdn, arena);
return (ret);
}
void *
-arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
- bool zero, tcache_t *tcache)
+arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
+ bool zero)
{
- arena = arena_choose(tsd, arena);
+ assert(!tsdn_null(tsdn) || arena != NULL);
+
+ if (likely(!tsdn_null(tsdn)))
+ arena = arena_choose(tsdn_tsd(tsdn), arena);
if (unlikely(arena == NULL))
return (NULL);
if (likely(size <= SMALL_MAXCLASS))
- return (arena_malloc_small(tsd, arena, ind, zero));
+ return (arena_malloc_small(tsdn, arena, ind, zero));
if (likely(size <= large_maxclass))
- return (arena_malloc_large(tsd, arena, ind, zero));
- return (huge_malloc(tsd, arena, index2size(ind), zero, tcache));
+ return (arena_malloc_large(tsdn, arena, ind, zero));
+ return (huge_malloc(tsdn, arena, index2size(ind), zero));
}
/* Only handles large allocations that require more than page alignment. */
static void *
-arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
+arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero)
{
void *ret;
@@ -2509,19 +2678,21 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
arena_chunk_map_misc_t *miscelm;
void *rpages;
+ assert(!tsdn_null(tsdn) || arena != NULL);
assert(usize == PAGE_CEILING(usize));
- arena = arena_choose(tsd, arena);
+ if (likely(!tsdn_null(tsdn)))
+ arena = arena_choose(tsdn_tsd(tsdn), arena);
if (unlikely(arena == NULL))
return (NULL);
alignment = PAGE_CEILING(alignment);
- alloc_size = usize + large_pad + alignment - PAGE;
+ alloc_size = usize + large_pad + alignment;
- malloc_mutex_lock(&arena->lock);
- run = arena_run_alloc_large(arena, alloc_size, false);
+ malloc_mutex_lock(tsdn, &arena->lock);
+ run = arena_run_alloc_large(tsdn, arena, alloc_size, false);
if (run == NULL) {
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
return (NULL);
}
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
@@ -2536,16 +2707,16 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
arena_chunk_map_misc_t *head_miscelm = miscelm;
arena_run_t *head_run = run;
- miscelm = arena_miscelm_get(chunk,
+ miscelm = arena_miscelm_get_mutable(chunk,
arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
LG_PAGE));
run = &miscelm->run;
- arena_run_trim_head(arena, chunk, head_run, alloc_size,
+ arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size,
alloc_size - leadsize);
}
if (trailsize != 0) {
- arena_run_trim_tail(arena, chunk, run, usize + large_pad +
+ arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad +
trailsize, usize + large_pad, false);
}
if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
@@ -2556,8 +2727,8 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
run_ind) != 0);
assert(decommitted); /* Cause of OOM. */
- arena_run_dalloc(arena, run, dirty, false, decommitted);
- malloc_mutex_unlock(&arena->lock);
+ arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted);
+ malloc_mutex_unlock(tsdn, &arena->lock);
return (NULL);
}
ret = arena_miscelm_to_rpages(miscelm);
@@ -2572,20 +2743,20 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++;
}
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
if (config_fill && !zero) {
if (unlikely(opt_junk_alloc))
- memset(ret, 0xa5, usize);
+ memset(ret, JEMALLOC_ALLOC_JUNK, usize);
else if (unlikely(opt_zero))
memset(ret, 0, usize);
}
- arena_decay_tick(tsd, arena);
+ arena_decay_tick(tsdn, arena);
return (ret);
}
void *
-arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
+arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero, tcache_t *tcache)
{
void *ret;
@@ -2593,7 +2764,7 @@ arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
&& (usize & PAGE_MASK) == 0))) {
/* Small; alignment doesn't require special run placement. */
- ret = arena_malloc(tsd, arena, usize, size2index(usize), zero,
+ ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
tcache, true);
} else if (usize <= large_maxclass && alignment <= PAGE) {
/*
@@ -2602,26 +2773,25 @@ arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
* the base of the run, so do some bit manipulation to retrieve
* the base.
*/
- ret = arena_malloc(tsd, arena, usize, size2index(usize), zero,
+ ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
tcache, true);
if (config_cache_oblivious)
ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
} else {
if (likely(usize <= large_maxclass)) {
- ret = arena_palloc_large(tsd, arena, usize, alignment,
+ ret = arena_palloc_large(tsdn, arena, usize, alignment,
zero);
} else if (likely(alignment <= chunksize))
- ret = huge_malloc(tsd, arena, usize, zero, tcache);
+ ret = huge_malloc(tsdn, arena, usize, zero);
else {
- ret = huge_palloc(tsd, arena, usize, alignment, zero,
- tcache);
+ ret = huge_palloc(tsdn, arena, usize, alignment, zero);
}
}
return (ret);
}
void
-arena_prof_promoted(const void *ptr, size_t size)
+arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
{
arena_chunk_t *chunk;
size_t pageind;
@@ -2630,8 +2800,8 @@ arena_prof_promoted(const void *ptr, size_t size)
cassert(config_prof);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
- assert(isalloc(ptr, false) == LARGE_MINCLASS);
- assert(isalloc(ptr, true) == LARGE_MINCLASS);
+ assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
+ assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS);
assert(size <= SMALL_MAXCLASS);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
@@ -2640,8 +2810,8 @@ arena_prof_promoted(const void *ptr, size_t size)
assert(binind < NBINS);
arena_mapbits_large_binind_set(chunk, pageind, binind);
- assert(isalloc(ptr, false) == LARGE_MINCLASS);
- assert(isalloc(ptr, true) == size);
+ assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
+ assert(isalloc(tsdn, ptr, true) == size);
}
static void
@@ -2657,33 +2827,34 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
&chunk->node), bin);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
+ /*
+ * The following block's conditional is necessary because if the
+ * run only contains one region, then it never gets inserted
+ * into the non-full runs tree.
+ */
if (bin_info->nregs != 1) {
- /*
- * This block's conditional is necessary because if the
- * run only contains one region, then it never gets
- * inserted into the non-full runs tree.
- */
- arena_bin_runs_remove(bin, run);
+ arena_chunk_map_misc_t *miscelm =
+ arena_run_to_miscelm(run);
+
+ arena_run_heap_remove(&bin->runs, miscelm);
}
}
}
static void
-arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- arena_bin_t *bin)
+arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ arena_run_t *run, arena_bin_t *bin)
{
assert(run != bin->runcur);
- assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) ==
- NULL);
- malloc_mutex_unlock(&bin->lock);
+ malloc_mutex_unlock(tsdn, &bin->lock);
/******************************/
- malloc_mutex_lock(&arena->lock);
- arena_run_dalloc(arena, run, true, false, false);
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
+ arena_run_dalloc(tsdn, arena, run, true, false, false);
+ malloc_mutex_unlock(tsdn, &arena->lock);
/****************************/
- malloc_mutex_lock(&bin->lock);
+ malloc_mutex_lock(tsdn, &bin->lock);
if (config_stats)
bin->stats.curruns--;
}
@@ -2710,8 +2881,8 @@ arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
}
static void
-arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- arena_chunk_map_bits_t *bitselm, bool junked)
+arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, arena_chunk_map_bits_t *bitselm, bool junked)
{
size_t pageind, rpages_ind;
arena_run_t *run;
@@ -2721,7 +2892,7 @@ arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
- run = &arena_miscelm_get(chunk, rpages_ind)->run;
+ run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
binind = run->binind;
bin = &arena->bins[binind];
bin_info = &arena_bin_info[binind];
@@ -2732,7 +2903,7 @@ arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_run_reg_dalloc(run, ptr);
if (run->nfree == bin_info->nregs) {
arena_dissociate_bin_run(chunk, run, bin);
- arena_dalloc_bin_run(arena, chunk, run, bin);
+ arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
} else if (run->nfree == 1 && run != bin->runcur)
arena_bin_lower_run(arena, chunk, run, bin);
@@ -2743,15 +2914,15 @@ arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr,
}
void
-arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- arena_chunk_map_bits_t *bitselm)
+arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
+ arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm)
{
- arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true);
+ arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true);
}
void
-arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind, arena_chunk_map_bits_t *bitselm)
{
arena_run_t *run;
@@ -2759,16 +2930,16 @@ arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t rpages_ind;
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
- run = &arena_miscelm_get(chunk, rpages_ind)->run;
+ run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
bin = &arena->bins[run->binind];
- malloc_mutex_lock(&bin->lock);
- arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false);
- malloc_mutex_unlock(&bin->lock);
+ malloc_mutex_lock(tsdn, &bin->lock);
+ arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false);
+ malloc_mutex_unlock(tsdn, &bin->lock);
}
void
-arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t pageind)
+arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, size_t pageind)
{
arena_chunk_map_bits_t *bitselm;
@@ -2777,35 +2948,36 @@ arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr,
assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
pageind)) != BININD_INVALID);
}
- bitselm = arena_bitselm_get(chunk, pageind);
- arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm);
- arena_decay_tick(tsd, arena);
+ bitselm = arena_bitselm_get_mutable(chunk, pageind);
+ arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm);
+ arena_decay_tick(tsdn, arena);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large
-#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
+#define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large)
#endif
void
arena_dalloc_junk_large(void *ptr, size_t usize)
{
if (config_fill && unlikely(opt_junk_free))
- memset(ptr, 0x5a, usize);
+ memset(ptr, JEMALLOC_FREE_JUNK, usize);
}
#ifdef JEMALLOC_JET
#undef arena_dalloc_junk_large
#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
arena_dalloc_junk_large_t *arena_dalloc_junk_large =
- JEMALLOC_N(arena_dalloc_junk_large_impl);
+ JEMALLOC_N(n_arena_dalloc_junk_large);
#endif
static void
-arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
- void *ptr, bool junked)
+arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena,
+ arena_chunk_t *chunk, void *ptr, bool junked)
{
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
+ pageind);
arena_run_t *run = &miscelm->run;
if (config_fill || config_stats) {
@@ -2824,33 +2996,35 @@ arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk,
}
}
- arena_run_dalloc(arena, run, true, false, false);
+ arena_run_dalloc(tsdn, arena, run, true, false, false);
}
void
-arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
- void *ptr)
+arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
+ arena_chunk_t *chunk, void *ptr)
{
- arena_dalloc_large_locked_impl(arena, chunk, ptr, true);
+ arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true);
}
void
-arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr)
+arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr)
{
- malloc_mutex_lock(&arena->lock);
- arena_dalloc_large_locked_impl(arena, chunk, ptr, false);
- malloc_mutex_unlock(&arena->lock);
- arena_decay_tick(tsd, arena);
+ malloc_mutex_lock(tsdn, &arena->lock);
+ arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false);
+ malloc_mutex_unlock(tsdn, &arena->lock);
+ arena_decay_tick(tsdn, arena);
}
static void
-arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t oldsize, size_t size)
+arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, size_t oldsize, size_t size)
{
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind);
+ arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
+ pageind);
arena_run_t *run = &miscelm->run;
assert(size < oldsize);
@@ -2859,8 +3033,8 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
* Shrink the run, and make trailing pages available for other
* allocations.
*/
- malloc_mutex_lock(&arena->lock);
- arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size +
+ malloc_mutex_lock(tsdn, &arena->lock);
+ arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size +
large_pad, true);
if (config_stats) {
szind_t oldindex = size2index(oldsize) - NBINS;
@@ -2878,12 +3052,12 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++;
}
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
}
static bool
-arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
+arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
{
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t npages = (oldsize + large_pad) >> LG_PAGE;
@@ -2893,7 +3067,7 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
large_pad);
/* Try to extend the run. */
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
pageind+npages) != 0)
goto label_fail;
@@ -2916,7 +3090,7 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
if (splitsize == 0)
goto label_fail;
- run = &arena_miscelm_get(chunk, pageind+npages)->run;
+ run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
if (arena_run_split_large(arena, run, splitsize, zero))
goto label_fail;
@@ -2973,24 +3147,24 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena->stats.lstats[index].nrequests++;
arena->stats.lstats[index].curruns++;
}
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
return (false);
}
label_fail:
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
return (true);
}
#ifdef JEMALLOC_JET
#undef arena_ralloc_junk_large
-#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
+#define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large)
#endif
static void
arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
{
if (config_fill && unlikely(opt_junk_free)) {
- memset((void *)((uintptr_t)ptr + usize), 0x5a,
+ memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK,
old_usize - usize);
}
}
@@ -2998,7 +3172,7 @@ arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
#undef arena_ralloc_junk_large
#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
arena_ralloc_junk_large_t *arena_ralloc_junk_large =
- JEMALLOC_N(arena_ralloc_junk_large_impl);
+ JEMALLOC_N(n_arena_ralloc_junk_large);
#endif
/*
@@ -3006,7 +3180,7 @@ arena_ralloc_junk_large_t *arena_ralloc_junk_large =
* always fail if growing an object, and the following run is already in use.
*/
static bool
-arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
+arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
size_t usize_max, bool zero)
{
arena_chunk_t *chunk;
@@ -3021,15 +3195,16 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
arena = extent_node_arena_get(&chunk->node);
if (oldsize < usize_max) {
- bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize,
- usize_min, usize_max, zero);
+ bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
+ oldsize, usize_min, usize_max, zero);
if (config_fill && !ret && !zero) {
if (unlikely(opt_junk_alloc)) {
- memset((void *)((uintptr_t)ptr + oldsize), 0xa5,
- isalloc(ptr, config_prof) - oldsize);
+ memset((void *)((uintptr_t)ptr + oldsize),
+ JEMALLOC_ALLOC_JUNK,
+ isalloc(tsdn, ptr, config_prof) - oldsize);
} else if (unlikely(opt_zero)) {
memset((void *)((uintptr_t)ptr + oldsize), 0,
- isalloc(ptr, config_prof) - oldsize);
+ isalloc(tsdn, ptr, config_prof) - oldsize);
}
}
return (ret);
@@ -3038,12 +3213,12 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
assert(oldsize > usize_max);
/* Fill before shrinking in order avoid a race. */
arena_ralloc_junk_large(ptr, oldsize, usize_max);
- arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max);
+ arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max);
return (false);
}
bool
-arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
+arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero)
{
size_t usize_min, usize_max;
@@ -3073,32 +3248,32 @@ arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
} else {
if (usize_max <= SMALL_MAXCLASS)
return (true);
- if (arena_ralloc_large(ptr, oldsize, usize_min,
+ if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min,
usize_max, zero))
return (true);
}
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- arena_decay_tick(tsd, extent_node_arena_get(&chunk->node));
+ arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node));
return (false);
} else {
- return (huge_ralloc_no_move(tsd, ptr, oldsize, usize_min,
+ return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min,
usize_max, zero));
}
}
static void *
-arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
+arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache)
{
if (alignment == 0)
- return (arena_malloc(tsd, arena, usize, size2index(usize), zero,
- tcache, true));
+ return (arena_malloc(tsdn, arena, usize, size2index(usize),
+ zero, tcache, true));
usize = sa2u(usize, alignment);
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
return (NULL);
- return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
+ return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
}
void *
@@ -3116,7 +3291,8 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t copysize;
/* Try to avoid moving the allocation. */
- if (!arena_ralloc_no_move(tsd, ptr, oldsize, usize, 0, zero))
+ if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0,
+ zero))
return (ptr);
/*
@@ -3124,8 +3300,8 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
* the object. In that case, fall back to allocating new space
* and copying.
*/
- ret = arena_ralloc_move_helper(tsd, arena, usize, alignment,
- zero, tcache);
+ ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize,
+ alignment, zero, tcache);
if (ret == NULL)
return (NULL);
@@ -3137,7 +3313,7 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
copysize = (usize < oldsize) ? usize : oldsize;
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
memcpy(ret, ptr, copysize);
- isqalloc(tsd, ptr, oldsize, tcache);
+ isqalloc(tsd, ptr, oldsize, tcache, true);
} else {
ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
zero, tcache);
@@ -3146,25 +3322,25 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
}
dss_prec_t
-arena_dss_prec_get(arena_t *arena)
+arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena)
{
dss_prec_t ret;
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
ret = arena->dss_prec;
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
return (ret);
}
bool
-arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
+arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec)
{
if (!have_dss)
return (dss_prec != dss_prec_disabled);
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
arena->dss_prec = dss_prec;
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
return (false);
}
@@ -3212,7 +3388,7 @@ arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
size_t *nactive, size_t *ndirty)
{
- *nthreads += arena_nthreads_get(arena);
+ *nthreads += arena_nthreads_get(arena, false);
*dss = dss_prec_names[arena->dss_prec];
*lg_dirty_mult = arena->lg_dirty_mult;
*decay_time = arena->decay_time;
@@ -3221,32 +3397,34 @@ arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
}
void
-arena_basic_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss,
- ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
- size_t *ndirty)
+arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
+ const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
+ size_t *nactive, size_t *ndirty)
{
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
decay_time, nactive, ndirty);
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
}
void
-arena_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss,
- ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
- size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
- malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats)
+arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
+ const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
+ size_t *nactive, size_t *ndirty, arena_stats_t *astats,
+ malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
+ malloc_huge_stats_t *hstats)
{
unsigned i;
cassert(config_stats);
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
decay_time, nactive, ndirty);
astats->mapped += arena->stats.mapped;
+ astats->retained += arena->stats.retained;
astats->npurge += arena->stats.npurge;
astats->nmadvise += arena->stats.nmadvise;
astats->purged += arena->stats.purged;
@@ -3272,12 +3450,12 @@ arena_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss,
hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
}
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
- malloc_mutex_lock(&bin->lock);
+ malloc_mutex_lock(tsdn, &bin->lock);
bstats[i].nmalloc += bin->stats.nmalloc;
bstats[i].ndalloc += bin->stats.ndalloc;
bstats[i].nrequests += bin->stats.nrequests;
@@ -3289,58 +3467,58 @@ arena_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss,
bstats[i].nruns += bin->stats.nruns;
bstats[i].reruns += bin->stats.reruns;
bstats[i].curruns += bin->stats.curruns;
- malloc_mutex_unlock(&bin->lock);
+ malloc_mutex_unlock(tsdn, &bin->lock);
}
}
unsigned
-arena_nthreads_get(arena_t *arena)
+arena_nthreads_get(arena_t *arena, bool internal)
{
- return (atomic_read_u(&arena->nthreads));
+ return (atomic_read_u(&arena->nthreads[internal]));
}
void
-arena_nthreads_inc(arena_t *arena)
+arena_nthreads_inc(arena_t *arena, bool internal)
{
- atomic_add_u(&arena->nthreads, 1);
+ atomic_add_u(&arena->nthreads[internal], 1);
}
void
-arena_nthreads_dec(arena_t *arena)
+arena_nthreads_dec(arena_t *arena, bool internal)
{
- atomic_sub_u(&arena->nthreads, 1);
+ atomic_sub_u(&arena->nthreads[internal], 1);
}
arena_t *
-arena_new(unsigned ind)
+arena_new(tsdn_t *tsdn, unsigned ind)
{
arena_t *arena;
size_t arena_size;
unsigned i;
- arena_bin_t *bin;
/* Compute arena size to incorporate sufficient runs_avail elements. */
- arena_size = offsetof(arena_t, runs_avail) + (sizeof(arena_run_tree_t) *
+ arena_size = offsetof(arena_t, runs_avail) + (sizeof(arena_run_heap_t) *
runs_avail_nclasses);
/*
* Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
* because there is no way to clean up if base_alloc() OOMs.
*/
if (config_stats) {
- arena = (arena_t *)base_alloc(CACHELINE_CEILING(arena_size) +
- QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) +
- nhclasses) * sizeof(malloc_huge_stats_t));
+ arena = (arena_t *)base_alloc(tsdn,
+ CACHELINE_CEILING(arena_size) + QUANTUM_CEILING(nlclasses *
+ sizeof(malloc_large_stats_t) + nhclasses) *
+ sizeof(malloc_huge_stats_t));
} else
- arena = (arena_t *)base_alloc(arena_size);
+ arena = (arena_t *)base_alloc(tsdn, arena_size);
if (arena == NULL)
return (NULL);
arena->ind = ind;
- arena->nthreads = 0;
- if (malloc_mutex_init(&arena->lock))
+ arena->nthreads[0] = arena->nthreads[1] = 0;
+ if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
return (NULL);
if (config_stats) {
@@ -3373,7 +3551,9 @@ arena_new(unsigned ind)
(uint64_t)(uintptr_t)arena;
}
- arena->dss_prec = chunk_dss_prec_get();
+ arena->dss_prec = chunk_dss_prec_get(tsdn);
+
+ ql_new(&arena->achunks);
arena->spare = NULL;
@@ -3383,7 +3563,7 @@ arena_new(unsigned ind)
arena->ndirty = 0;
for(i = 0; i < runs_avail_nclasses; i++)
- arena_run_tree_new(&arena->runs_avail[i]);
+ arena_run_heap_new(&arena->runs_avail[i]);
qr_new(&arena->runs_dirty, rd_link);
qr_new(&arena->chunks_cache, cc_link);
@@ -3391,28 +3571,32 @@ arena_new(unsigned ind)
arena_decay_init(arena, arena_decay_time_default_get());
ql_new(&arena->huge);
- if (malloc_mutex_init(&arena->huge_mtx))
+ if (malloc_mutex_init(&arena->huge_mtx, "arena_huge",
+ WITNESS_RANK_ARENA_HUGE))
return (NULL);
extent_tree_szad_new(&arena->chunks_szad_cached);
extent_tree_ad_new(&arena->chunks_ad_cached);
extent_tree_szad_new(&arena->chunks_szad_retained);
extent_tree_ad_new(&arena->chunks_ad_retained);
- if (malloc_mutex_init(&arena->chunks_mtx))
+ if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
+ WITNESS_RANK_ARENA_CHUNKS))
return (NULL);
ql_new(&arena->node_cache);
- if (malloc_mutex_init(&arena->node_cache_mtx))
+ if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache",
+ WITNESS_RANK_ARENA_NODE_CACHE))
return (NULL);
arena->chunk_hooks = chunk_hooks_default;
/* Initialize bins. */
for (i = 0; i < NBINS; i++) {
- bin = &arena->bins[i];
- if (malloc_mutex_init(&bin->lock))
+ arena_bin_t *bin = &arena->bins[i];
+ if (malloc_mutex_init(&bin->lock, "arena_bin",
+ WITNESS_RANK_ARENA_BIN))
return (NULL);
bin->runcur = NULL;
- arena_run_tree_new(&bin->runs);
+ arena_run_heap_new(&bin->runs);
if (config_stats)
memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
}
@@ -3541,7 +3725,7 @@ small_run_size_init(void)
assert(small_maxrun != 0);
- small_run_tab = (bool *)base_alloc(sizeof(bool) * (small_maxrun >>
+ small_run_tab = (bool *)base_alloc(NULL, sizeof(bool) * (small_maxrun >>
LG_PAGE));
if (small_run_tab == NULL)
return (true);
@@ -3568,12 +3752,12 @@ run_quantize_init(void)
run_quantize_max = chunksize + large_pad;
- run_quantize_floor_tab = (size_t *)base_alloc(sizeof(size_t) *
+ run_quantize_floor_tab = (size_t *)base_alloc(NULL, sizeof(size_t) *
(run_quantize_max >> LG_PAGE));
if (run_quantize_floor_tab == NULL)
return (true);
- run_quantize_ceil_tab = (size_t *)base_alloc(sizeof(size_t) *
+ run_quantize_ceil_tab = (size_t *)base_alloc(NULL, sizeof(size_t) *
(run_quantize_max >> LG_PAGE));
if (run_quantize_ceil_tab == NULL)
return (true);
@@ -3650,40 +3834,58 @@ arena_boot(void)
}
void
-arena_prefork(arena_t *arena)
+arena_prefork0(tsdn_t *tsdn, arena_t *arena)
+{
+
+ malloc_mutex_prefork(tsdn, &arena->lock);
+}
+
+void
+arena_prefork1(tsdn_t *tsdn, arena_t *arena)
+{
+
+ malloc_mutex_prefork(tsdn, &arena->chunks_mtx);
+}
+
+void
+arena_prefork2(tsdn_t *tsdn, arena_t *arena)
+{
+
+ malloc_mutex_prefork(tsdn, &arena->node_cache_mtx);
+}
+
+void
+arena_prefork3(tsdn_t *tsdn, arena_t *arena)
{
unsigned i;
- malloc_mutex_prefork(&arena->lock);
- malloc_mutex_prefork(&arena->huge_mtx);
- malloc_mutex_prefork(&arena->chunks_mtx);
- malloc_mutex_prefork(&arena->node_cache_mtx);
for (i = 0; i < NBINS; i++)
- malloc_mutex_prefork(&arena->bins[i].lock);
+ malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
+ malloc_mutex_prefork(tsdn, &arena->huge_mtx);
}
void
-arena_postfork_parent(arena_t *arena)
+arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
{
unsigned i;
+ malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
for (i = 0; i < NBINS; i++)
- malloc_mutex_postfork_parent(&arena->bins[i].lock);
- malloc_mutex_postfork_parent(&arena->node_cache_mtx);
- malloc_mutex_postfork_parent(&arena->chunks_mtx);
- malloc_mutex_postfork_parent(&arena->huge_mtx);
- malloc_mutex_postfork_parent(&arena->lock);
+ malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
+ malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx);
+ malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
+ malloc_mutex_postfork_parent(tsdn, &arena->lock);
}
void
-arena_postfork_child(arena_t *arena)
+arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
{
unsigned i;
+ malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
for (i = 0; i < NBINS; i++)
- malloc_mutex_postfork_child(&arena->bins[i].lock);
- malloc_mutex_postfork_child(&arena->node_cache_mtx);
- malloc_mutex_postfork_child(&arena->chunks_mtx);
- malloc_mutex_postfork_child(&arena->huge_mtx);
- malloc_mutex_postfork_child(&arena->lock);
+ malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
+ malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx);
+ malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
+ malloc_mutex_postfork_child(tsdn, &arena->lock);
}
diff --git a/contrib/jemalloc/src/base.c b/contrib/jemalloc/src/base.c
index 7cdcfed..81b0801 100644
--- a/contrib/jemalloc/src/base.c
+++ b/contrib/jemalloc/src/base.c
@@ -13,12 +13,13 @@ static size_t base_mapped;
/******************************************************************************/
-/* base_mtx must be held. */
static extent_node_t *
-base_node_try_alloc(void)
+base_node_try_alloc(tsdn_t *tsdn)
{
extent_node_t *node;
+ malloc_mutex_assert_owner(tsdn, &base_mtx);
+
if (base_nodes == NULL)
return (NULL);
node = base_nodes;
@@ -27,33 +28,34 @@ base_node_try_alloc(void)
return (node);
}
-/* base_mtx must be held. */
static void
-base_node_dalloc(extent_node_t *node)
+base_node_dalloc(tsdn_t *tsdn, extent_node_t *node)
{
+ malloc_mutex_assert_owner(tsdn, &base_mtx);
+
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
*(extent_node_t **)node = base_nodes;
base_nodes = node;
}
-/* base_mtx must be held. */
static extent_node_t *
-base_chunk_alloc(size_t minsize)
+base_chunk_alloc(tsdn_t *tsdn, size_t minsize)
{
extent_node_t *node;
size_t csize, nsize;
void *addr;
+ malloc_mutex_assert_owner(tsdn, &base_mtx);
assert(minsize != 0);
- node = base_node_try_alloc();
+ node = base_node_try_alloc(tsdn);
/* Allocate enough space to also carve a node out if necessary. */
nsize = (node == NULL) ? CACHELINE_CEILING(sizeof(extent_node_t)) : 0;
csize = CHUNK_CEILING(minsize + nsize);
addr = chunk_alloc_base(csize);
if (addr == NULL) {
if (node != NULL)
- base_node_dalloc(node);
+ base_node_dalloc(tsdn, node);
return (NULL);
}
base_mapped += csize;
@@ -76,7 +78,7 @@ base_chunk_alloc(size_t minsize)
* physical memory usage.
*/
void *
-base_alloc(size_t size)
+base_alloc(tsdn_t *tsdn, size_t size)
{
void *ret;
size_t csize, usize;
@@ -91,14 +93,14 @@ base_alloc(size_t size)
usize = s2u(csize);
extent_node_init(&key, NULL, NULL, usize, false, false);
- malloc_mutex_lock(&base_mtx);
+ malloc_mutex_lock(tsdn, &base_mtx);
node = extent_tree_szad_nsearch(&base_avail_szad, &key);
if (node != NULL) {
/* Use existing space. */
extent_tree_szad_remove(&base_avail_szad, node);
} else {
/* Try to allocate more space. */
- node = base_chunk_alloc(csize);
+ node = base_chunk_alloc(tsdn, csize);
}
if (node == NULL) {
ret = NULL;
@@ -111,7 +113,7 @@ base_alloc(size_t size)
extent_node_size_set(node, extent_node_size_get(node) - csize);
extent_tree_szad_insert(&base_avail_szad, node);
} else
- base_node_dalloc(node);
+ base_node_dalloc(tsdn, node);
if (config_stats) {
base_allocated += csize;
/*
@@ -123,28 +125,29 @@ base_alloc(size_t size)
}
JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, csize);
label_return:
- malloc_mutex_unlock(&base_mtx);
+ malloc_mutex_unlock(tsdn, &base_mtx);
return (ret);
}
void
-base_stats_get(size_t *allocated, size_t *resident, size_t *mapped)
+base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
+ size_t *mapped)
{
- malloc_mutex_lock(&base_mtx);
+ malloc_mutex_lock(tsdn, &base_mtx);
assert(base_allocated <= base_resident);
assert(base_resident <= base_mapped);
*allocated = base_allocated;
*resident = base_resident;
*mapped = base_mapped;
- malloc_mutex_unlock(&base_mtx);
+ malloc_mutex_unlock(tsdn, &base_mtx);
}
bool
base_boot(void)
{
- if (malloc_mutex_init(&base_mtx))
+ if (malloc_mutex_init(&base_mtx, "base", WITNESS_RANK_BASE))
return (true);
extent_tree_szad_new(&base_avail_szad);
base_nodes = NULL;
@@ -153,22 +156,22 @@ base_boot(void)
}
void
-base_prefork(void)
+base_prefork(tsdn_t *tsdn)
{
- malloc_mutex_prefork(&base_mtx);
+ malloc_mutex_prefork(tsdn, &base_mtx);
}
void
-base_postfork_parent(void)
+base_postfork_parent(tsdn_t *tsdn)
{
- malloc_mutex_postfork_parent(&base_mtx);
+ malloc_mutex_postfork_parent(tsdn, &base_mtx);
}
void
-base_postfork_child(void)
+base_postfork_child(tsdn_t *tsdn)
{
- malloc_mutex_postfork_child(&base_mtx);
+ malloc_mutex_postfork_child(tsdn, &base_mtx);
}
diff --git a/contrib/jemalloc/src/bitmap.c b/contrib/jemalloc/src/bitmap.c
index b1e6627..ac0f3b3 100644
--- a/contrib/jemalloc/src/bitmap.c
+++ b/contrib/jemalloc/src/bitmap.c
@@ -74,15 +74,11 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
void
bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
{
- size_t i;
assert(nbits > 0);
assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
- i = nbits >> LG_BITMAP_GROUP_NBITS;
- if (nbits % BITMAP_GROUP_NBITS != 0)
- i++;
- binfo->ngroups = i;
+ binfo->ngroups = BITMAP_BITS2GROUPS(nbits);
binfo->nbits = nbits;
}
@@ -99,9 +95,10 @@ bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
size_t extra;
memset(bitmap, 0xffU, bitmap_size(binfo));
- extra = (binfo->nbits % (binfo->ngroups * BITMAP_GROUP_NBITS));
+ extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
+ & BITMAP_GROUP_NBITS_MASK;
if (extra != 0)
- bitmap[binfo->ngroups - 1] >>= (BITMAP_GROUP_NBITS - extra);
+ bitmap[binfo->ngroups - 1] >>= extra;
}
#endif /* USE_TREE */
diff --git a/contrib/jemalloc/src/chunk.c b/contrib/jemalloc/src/chunk.c
index b179d21..adc666f 100644
--- a/contrib/jemalloc/src/chunk.c
+++ b/contrib/jemalloc/src/chunk.c
@@ -49,9 +49,10 @@ const chunk_hooks_t chunk_hooks_default = {
* definition.
*/
-static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
- extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
- void *chunk, size_t size, bool zeroed, bool committed);
+static void chunk_record(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szad,
+ extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, bool zeroed,
+ bool committed);
/******************************************************************************/
@@ -63,23 +64,23 @@ chunk_hooks_get_locked(arena_t *arena)
}
chunk_hooks_t
-chunk_hooks_get(arena_t *arena)
+chunk_hooks_get(tsdn_t *tsdn, arena_t *arena)
{
chunk_hooks_t chunk_hooks;
- malloc_mutex_lock(&arena->chunks_mtx);
+ malloc_mutex_lock(tsdn, &arena->chunks_mtx);
chunk_hooks = chunk_hooks_get_locked(arena);
- malloc_mutex_unlock(&arena->chunks_mtx);
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (chunk_hooks);
}
chunk_hooks_t
-chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
+chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks)
{
chunk_hooks_t old_chunk_hooks;
- malloc_mutex_lock(&arena->chunks_mtx);
+ malloc_mutex_lock(tsdn, &arena->chunks_mtx);
old_chunk_hooks = arena->chunk_hooks;
/*
* Copy each field atomically so that it is impossible for readers to
@@ -104,14 +105,14 @@ chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks)
ATOMIC_COPY_HOOK(split);
ATOMIC_COPY_HOOK(merge);
#undef ATOMIC_COPY_HOOK
- malloc_mutex_unlock(&arena->chunks_mtx);
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (old_chunk_hooks);
}
static void
-chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
- bool locked)
+chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, bool locked)
{
static const chunk_hooks_t uninitialized_hooks =
CHUNK_HOOKS_INITIALIZER;
@@ -119,27 +120,28 @@ chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks,
if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) ==
0) {
*chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
- chunk_hooks_get(arena);
+ chunk_hooks_get(tsdn, arena);
}
}
static void
-chunk_hooks_assure_initialized_locked(arena_t *arena,
+chunk_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena,
chunk_hooks_t *chunk_hooks)
{
- chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true);
+ chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, true);
}
static void
-chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks)
+chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks)
{
- chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false);
+ chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false);
}
bool
-chunk_register(const void *chunk, const extent_node_t *node)
+chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node)
{
assert(extent_node_addr_get(node) == chunk);
@@ -159,7 +161,7 @@ chunk_register(const void *chunk, const extent_node_t *node)
high = atomic_read_z(&highchunks);
}
if (cur > high && prof_gdump_get_unlocked())
- prof_gdump();
+ prof_gdump(tsdn);
}
return (false);
@@ -197,7 +199,7 @@ chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
}
static void *
-chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
+chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit,
bool dalloc_node)
@@ -219,8 +221,8 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
- malloc_mutex_lock(&arena->chunks_mtx);
- chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
+ malloc_mutex_lock(tsdn, &arena->chunks_mtx);
+ chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
if (new_addr != NULL) {
extent_node_t key;
extent_node_init(&key, arena, new_addr, alloc_size, false,
@@ -232,7 +234,7 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
}
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <
size)) {
- malloc_mutex_unlock(&arena->chunks_mtx);
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (NULL);
}
leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node),
@@ -251,7 +253,7 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
if (leadsize != 0 &&
chunk_hooks->split(extent_node_addr_get(node),
extent_node_size_get(node), leadsize, size, false, arena->ind)) {
- malloc_mutex_unlock(&arena->chunks_mtx);
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
return (NULL);
}
/* Remove node from the tree. */
@@ -271,20 +273,21 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
if (chunk_hooks->split(ret, size + trailsize, size,
trailsize, false, arena->ind)) {
if (dalloc_node && node != NULL)
- arena_node_dalloc(arena, node);
- malloc_mutex_unlock(&arena->chunks_mtx);
- chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad,
- cache, ret, size + trailsize, zeroed, committed);
+ arena_node_dalloc(tsdn, arena, node);
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ chunk_record(tsdn, arena, chunk_hooks, chunks_szad,
+ chunks_ad, cache, ret, size + trailsize, zeroed,
+ committed);
return (NULL);
}
/* Insert the trailing space as a smaller chunk. */
if (node == NULL) {
- node = arena_node_alloc(arena);
+ node = arena_node_alloc(tsdn, arena);
if (node == NULL) {
- malloc_mutex_unlock(&arena->chunks_mtx);
- chunk_record(arena, chunk_hooks, chunks_szad,
- chunks_ad, cache, ret, size + trailsize,
- zeroed, committed);
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ chunk_record(tsdn, arena, chunk_hooks,
+ chunks_szad, chunks_ad, cache, ret, size +
+ trailsize, zeroed, committed);
return (NULL);
}
}
@@ -296,16 +299,16 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
node = NULL;
}
if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
- malloc_mutex_unlock(&arena->chunks_mtx);
- chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache,
- ret, size, zeroed, committed);
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
+ chunk_record(tsdn, arena, chunk_hooks, chunks_szad, chunks_ad,
+ cache, ret, size, zeroed, committed);
return (NULL);
}
- malloc_mutex_unlock(&arena->chunks_mtx);
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
assert(dalloc_node || node != NULL);
if (dalloc_node && node != NULL)
- arena_node_dalloc(arena, node);
+ arena_node_dalloc(tsdn, arena, node);
if (*zero) {
if (!zeroed)
memset(ret, 0, size);
@@ -328,8 +331,8 @@ chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks,
* them if they are returned.
*/
static void *
-chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
- bool *zero, bool *commit, dss_prec_t dss_prec)
+chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec)
{
void *ret;
@@ -340,8 +343,8 @@ chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
/* "primary" dss. */
if (have_dss && dss_prec == dss_prec_primary && (ret =
- chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
- NULL)
+ chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
+ commit)) != NULL)
return (ret);
/* mmap. */
if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) !=
@@ -349,8 +352,8 @@ chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment,
return (ret);
/* "secondary" dss. */
if (have_dss && dss_prec == dss_prec_secondary && (ret =
- chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) !=
- NULL)
+ chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
+ commit)) != NULL)
return (ret);
/* All strategies for allocation failed. */
@@ -380,8 +383,8 @@ chunk_alloc_base(size_t size)
}
void *
-chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
- size_t size, size_t alignment, bool *zero, bool dalloc_node)
+chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *new_addr, size_t size, size_t alignment, bool *zero, bool dalloc_node)
{
void *ret;
bool commit;
@@ -392,9 +395,9 @@ chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
assert((alignment & chunksize_mask) == 0);
commit = true;
- ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached,
- &arena->chunks_ad_cached, true, new_addr, size, alignment, zero,
- &commit, dalloc_node);
+ ret = chunk_recycle(tsdn, arena, chunk_hooks,
+ &arena->chunks_szad_cached, &arena->chunks_ad_cached, true,
+ new_addr, size, alignment, zero, &commit, dalloc_node);
if (ret == NULL)
return (NULL);
assert(commit);
@@ -404,11 +407,11 @@ chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
}
static arena_t *
-chunk_arena_get(unsigned arena_ind)
+chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind)
{
arena_t *arena;
- arena = arena_get(arena_ind, false);
+ arena = arena_get(tsdn, arena_ind, false);
/*
* The arena we're allocating on behalf of must have been initialized
* already.
@@ -422,10 +425,12 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
bool *commit, unsigned arena_ind)
{
void *ret;
+ tsdn_t *tsdn;
arena_t *arena;
- arena = chunk_arena_get(arena_ind);
- ret = chunk_alloc_core(arena, new_addr, size, alignment, zero,
+ tsdn = tsdn_fetch();
+ arena = chunk_arena_get(tsdn, arena_ind);
+ ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
commit, arena->dss_prec);
if (ret == NULL)
return (NULL);
@@ -436,29 +441,35 @@ chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero,
}
static void *
-chunk_alloc_retained(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
- size_t size, size_t alignment, bool *zero, bool *commit)
+chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
{
+ void *ret;
assert(size != 0);
assert((size & chunksize_mask) == 0);
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
- return (chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_retained,
- &arena->chunks_ad_retained, false, new_addr, size, alignment, zero,
- commit, true));
+ ret = chunk_recycle(tsdn, arena, chunk_hooks,
+ &arena->chunks_szad_retained, &arena->chunks_ad_retained, false,
+ new_addr, size, alignment, zero, commit, true);
+
+ if (config_stats && ret != NULL)
+ arena->stats.retained -= size;
+
+ return (ret);
}
void *
-chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
- size_t size, size_t alignment, bool *zero, bool *commit)
+chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit)
{
void *ret;
- chunk_hooks_assure_initialized(arena, chunk_hooks);
+ chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
- ret = chunk_alloc_retained(arena, chunk_hooks, new_addr, size,
+ ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
alignment, zero, commit);
if (ret == NULL) {
ret = chunk_hooks->alloc(new_addr, size, alignment, zero,
@@ -473,7 +484,7 @@ chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr,
}
static void
-chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
+chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache,
void *chunk, size_t size, bool zeroed, bool committed)
{
@@ -485,8 +496,8 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
unzeroed = cache || !zeroed;
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
- malloc_mutex_lock(&arena->chunks_mtx);
- chunk_hooks_assure_initialized_locked(arena, chunk_hooks);
+ malloc_mutex_lock(tsdn, &arena->chunks_mtx);
+ chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0,
false, false);
node = extent_tree_ad_nsearch(chunks_ad, &key);
@@ -511,7 +522,7 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
arena_chunk_cache_maybe_insert(arena, node, cache);
} else {
/* Coalescing forward failed, so insert a new node. */
- node = arena_node_alloc(arena);
+ node = arena_node_alloc(tsdn, arena);
if (node == NULL) {
/*
* Node allocation failed, which is an exceedingly
@@ -520,8 +531,8 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
* a virtual memory leak.
*/
if (cache) {
- chunk_purge_wrapper(arena, chunk_hooks, chunk,
- size, 0, size);
+ chunk_purge_wrapper(tsdn, arena, chunk_hooks,
+ chunk, size, 0, size);
}
goto label_return;
}
@@ -557,16 +568,16 @@ chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks,
extent_tree_szad_insert(chunks_szad, node);
arena_chunk_cache_maybe_insert(arena, node, cache);
- arena_node_dalloc(arena, prev);
+ arena_node_dalloc(tsdn, arena, prev);
}
label_return:
- malloc_mutex_unlock(&arena->chunks_mtx);
+ malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
}
void
-chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
- size_t size, bool committed)
+chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t size, bool committed)
{
assert(chunk != NULL);
@@ -574,14 +585,24 @@ chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
assert(size != 0);
assert((size & chunksize_mask) == 0);
- chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached,
+ chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_cached,
&arena->chunks_ad_cached, true, chunk, size, false, committed);
- arena_maybe_purge(arena);
+ arena_maybe_purge(tsdn, arena);
+}
+
+static bool
+chunk_dalloc_default(void *chunk, size_t size, bool committed,
+ unsigned arena_ind)
+{
+
+ if (!have_dss || !chunk_in_dss(tsdn_fetch(), chunk))
+ return (chunk_dalloc_mmap(chunk, size));
+ return (true);
}
void
-chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
- size_t size, bool zeroed, bool committed)
+chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t size, bool zeroed, bool committed)
{
assert(chunk != NULL);
@@ -589,7 +610,7 @@ chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
assert(size != 0);
assert((size & chunksize_mask) == 0);
- chunk_hooks_assure_initialized(arena, chunk_hooks);
+ chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
/* Try to deallocate. */
if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind))
return;
@@ -600,29 +621,11 @@ chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
}
zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size,
arena->ind);
- chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained,
+ chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szad_retained,
&arena->chunks_ad_retained, false, chunk, size, zeroed, committed);
-}
-static bool
-chunk_dalloc_default(void *chunk, size_t size, bool committed,
- unsigned arena_ind)
-{
-
- if (!have_dss || !chunk_in_dss(chunk))
- return (chunk_dalloc_mmap(chunk, size));
- return (true);
-}
-
-void
-chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
- size_t size, bool committed)
-{
-
- chunk_hooks_assure_initialized(arena, chunk_hooks);
- chunk_hooks->dalloc(chunk, size, committed, arena->ind);
- if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default)
- JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
+ if (config_stats)
+ arena->stats.retained += size;
}
static bool
@@ -643,8 +646,9 @@ chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length,
length));
}
-bool
-chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
+static bool
+chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
+ unsigned arena_ind)
{
assert(chunk != NULL);
@@ -657,21 +661,12 @@ chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length)
length));
}
-static bool
-chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length,
- unsigned arena_ind)
-{
-
- return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset,
- length));
-}
-
bool
-chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk,
- size_t size, size_t offset, size_t length)
+chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
+ void *chunk, size_t size, size_t offset, size_t length)
{
- chunk_hooks_assure_initialized(arena, chunk_hooks);
+ chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));
}
@@ -692,8 +687,11 @@ chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b,
if (!maps_coalesce)
return (true);
- if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b))
- return (true);
+ if (have_dss) {
+ tsdn_t *tsdn = tsdn_fetch();
+ if (chunk_in_dss(tsdn, chunk_a) != chunk_in_dss(tsdn, chunk_b))
+ return (true);
+ }
return (false);
}
@@ -702,7 +700,7 @@ static rtree_node_elm_t *
chunks_rtree_node_alloc(size_t nelms)
{
- return ((rtree_node_elm_t *)base_alloc(nelms *
+ return ((rtree_node_elm_t *)base_alloc(tsdn_fetch(), nelms *
sizeof(rtree_node_elm_t)));
}
@@ -749,22 +747,22 @@ chunk_boot(void)
}
void
-chunk_prefork(void)
+chunk_prefork(tsdn_t *tsdn)
{
- chunk_dss_prefork();
+ chunk_dss_prefork(tsdn);
}
void
-chunk_postfork_parent(void)
+chunk_postfork_parent(tsdn_t *tsdn)
{
- chunk_dss_postfork_parent();
+ chunk_dss_postfork_parent(tsdn);
}
void
-chunk_postfork_child(void)
+chunk_postfork_child(tsdn_t *tsdn)
{
- chunk_dss_postfork_child();
+ chunk_dss_postfork_child(tsdn);
}
diff --git a/contrib/jemalloc/src/chunk_dss.c b/contrib/jemalloc/src/chunk_dss.c
index 61fc916..0b1f82b 100644
--- a/contrib/jemalloc/src/chunk_dss.c
+++ b/contrib/jemalloc/src/chunk_dss.c
@@ -41,33 +41,33 @@ chunk_dss_sbrk(intptr_t increment)
}
dss_prec_t
-chunk_dss_prec_get(void)
+chunk_dss_prec_get(tsdn_t *tsdn)
{
dss_prec_t ret;
if (!have_dss)
return (dss_prec_disabled);
- malloc_mutex_lock(&dss_mtx);
+ malloc_mutex_lock(tsdn, &dss_mtx);
ret = dss_prec_default;
- malloc_mutex_unlock(&dss_mtx);
+ malloc_mutex_unlock(tsdn, &dss_mtx);
return (ret);
}
bool
-chunk_dss_prec_set(dss_prec_t dss_prec)
+chunk_dss_prec_set(tsdn_t *tsdn, dss_prec_t dss_prec)
{
if (!have_dss)
return (dss_prec != dss_prec_disabled);
- malloc_mutex_lock(&dss_mtx);
+ malloc_mutex_lock(tsdn, &dss_mtx);
dss_prec_default = dss_prec;
- malloc_mutex_unlock(&dss_mtx);
+ malloc_mutex_unlock(tsdn, &dss_mtx);
return (false);
}
void *
-chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
- bool *zero, bool *commit)
+chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit)
{
cassert(have_dss);
assert(size > 0 && (size & chunksize_mask) == 0);
@@ -80,7 +80,7 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
if ((intptr_t)size < 0)
return (NULL);
- malloc_mutex_lock(&dss_mtx);
+ malloc_mutex_lock(tsdn, &dss_mtx);
if (dss_prev != (void *)-1) {
/*
@@ -122,7 +122,7 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
if ((uintptr_t)ret < (uintptr_t)dss_max ||
(uintptr_t)dss_next < (uintptr_t)dss_max) {
/* Wrap-around. */
- malloc_mutex_unlock(&dss_mtx);
+ malloc_mutex_unlock(tsdn, &dss_mtx);
return (NULL);
}
incr = gap_size + cpad_size + size;
@@ -130,13 +130,13 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
if (dss_prev == dss_max) {
/* Success. */
dss_max = dss_next;
- malloc_mutex_unlock(&dss_mtx);
+ malloc_mutex_unlock(tsdn, &dss_mtx);
if (cpad_size != 0) {
chunk_hooks_t chunk_hooks =
CHUNK_HOOKS_INITIALIZER;
- chunk_dalloc_wrapper(arena,
+ chunk_dalloc_wrapper(tsdn, arena,
&chunk_hooks, cpad, cpad_size,
- true);
+ false, true);
}
if (*zero) {
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
@@ -149,25 +149,25 @@ chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size, size_t alignment,
}
} while (dss_prev != (void *)-1);
}
- malloc_mutex_unlock(&dss_mtx);
+ malloc_mutex_unlock(tsdn, &dss_mtx);
return (NULL);
}
bool
-chunk_in_dss(void *chunk)
+chunk_in_dss(tsdn_t *tsdn, void *chunk)
{
bool ret;
cassert(have_dss);
- malloc_mutex_lock(&dss_mtx);
+ malloc_mutex_lock(tsdn, &dss_mtx);
if ((uintptr_t)chunk >= (uintptr_t)dss_base
&& (uintptr_t)chunk < (uintptr_t)dss_max)
ret = true;
else
ret = false;
- malloc_mutex_unlock(&dss_mtx);
+ malloc_mutex_unlock(tsdn, &dss_mtx);
return (ret);
}
@@ -178,7 +178,7 @@ chunk_dss_boot(void)
cassert(have_dss);
- if (malloc_mutex_init(&dss_mtx))
+ if (malloc_mutex_init(&dss_mtx, "dss", WITNESS_RANK_DSS))
return (true);
dss_base = chunk_dss_sbrk(0);
dss_prev = dss_base;
@@ -188,27 +188,27 @@ chunk_dss_boot(void)
}
void
-chunk_dss_prefork(void)
+chunk_dss_prefork(tsdn_t *tsdn)
{
if (have_dss)
- malloc_mutex_prefork(&dss_mtx);
+ malloc_mutex_prefork(tsdn, &dss_mtx);
}
void
-chunk_dss_postfork_parent(void)
+chunk_dss_postfork_parent(tsdn_t *tsdn)
{
if (have_dss)
- malloc_mutex_postfork_parent(&dss_mtx);
+ malloc_mutex_postfork_parent(tsdn, &dss_mtx);
}
void
-chunk_dss_postfork_child(void)
+chunk_dss_postfork_child(tsdn_t *tsdn)
{
if (have_dss)
- malloc_mutex_postfork_child(&dss_mtx);
+ malloc_mutex_postfork_child(tsdn, &dss_mtx);
}
/******************************************************************************/
diff --git a/contrib/jemalloc/src/chunk_mmap.c b/contrib/jemalloc/src/chunk_mmap.c
index 56b2ee4..f95ae75 100644
--- a/contrib/jemalloc/src/chunk_mmap.c
+++ b/contrib/jemalloc/src/chunk_mmap.c
@@ -9,25 +9,23 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
void *ret;
size_t alloc_size;
- alloc_size = size + alignment - PAGE;
+ alloc_size = size + alignment;
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);
do {
void *pages;
size_t leadsize;
- pages = pages_map(NULL, alloc_size);
+ pages = pages_map(NULL, alloc_size, commit);
if (pages == NULL)
return (NULL);
leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
(uintptr_t)pages;
- ret = pages_trim(pages, alloc_size, leadsize, size);
+ ret = pages_trim(pages, alloc_size, leadsize, size, commit);
} while (ret == NULL);
assert(ret != NULL);
*zero = true;
- if (!*commit)
- *commit = pages_decommit(ret, size);
return (ret);
}
@@ -54,7 +52,7 @@ chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
assert(alignment != 0);
assert((alignment & chunksize_mask) == 0);
- ret = pages_map(new_addr, size);
+ ret = pages_map(new_addr, size, commit);
if (ret == NULL || ret == new_addr)
return (ret);
assert(new_addr == NULL);
@@ -66,8 +64,6 @@ chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero,
assert(ret != NULL);
*zero = true;
- if (!*commit)
- *commit = pages_decommit(ret, size);
return (ret);
}
diff --git a/contrib/jemalloc/src/ckh.c b/contrib/jemalloc/src/ckh.c
index 3b423aa..747c1c8 100644
--- a/contrib/jemalloc/src/ckh.c
+++ b/contrib/jemalloc/src/ckh.c
@@ -40,8 +40,8 @@
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-static bool ckh_grow(tsd_t *tsd, ckh_t *ckh);
-static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
+static bool ckh_grow(tsdn_t *tsdn, ckh_t *ckh);
+static void ckh_shrink(tsdn_t *tsdn, ckh_t *ckh);
/******************************************************************************/
@@ -244,7 +244,7 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
}
static bool
-ckh_grow(tsd_t *tsd, ckh_t *ckh)
+ckh_grow(tsdn_t *tsdn, ckh_t *ckh)
{
bool ret;
ckhc_t *tab, *ttab;
@@ -270,8 +270,8 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
ret = true;
goto label_return;
}
- tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL,
- true, NULL);
+ tab = (ckhc_t *)ipallocztm(tsdn, usize, CACHELINE, true, NULL,
+ true, arena_ichoose(tsdn, NULL));
if (tab == NULL) {
ret = true;
goto label_return;
@@ -283,12 +283,12 @@ ckh_grow(tsd_t *tsd, ckh_t *ckh)
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) {
- idalloctm(tsd, tab, tcache_get(tsd, false), true, true);
+ idalloctm(tsdn, tab, NULL, true, true);
break;
}
/* Rebuilding failed, so back out partially rebuilt table. */
- idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true, true);
+ idalloctm(tsdn, ckh->tab, NULL, true, true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
}
@@ -299,7 +299,7 @@ label_return:
}
static void
-ckh_shrink(tsd_t *tsd, ckh_t *ckh)
+ckh_shrink(tsdn_t *tsdn, ckh_t *ckh)
{
ckhc_t *tab, *ttab;
size_t usize;
@@ -314,8 +314,8 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
return;
- tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
- NULL);
+ tab = (ckhc_t *)ipallocztm(tsdn, usize, CACHELINE, true, NULL, true,
+ arena_ichoose(tsdn, NULL));
if (tab == NULL) {
/*
* An OOM error isn't worth propagating, since it doesn't
@@ -330,7 +330,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
if (!ckh_rebuild(ckh, tab)) {
- idalloctm(tsd, tab, tcache_get(tsd, false), true, true);
+ idalloctm(tsdn, tab, NULL, true, true);
#ifdef CKH_COUNT
ckh->nshrinks++;
#endif
@@ -338,7 +338,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
}
/* Rebuilding failed, so back out partially rebuilt table. */
- idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true, true);
+ idalloctm(tsdn, ckh->tab, NULL, true, true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
#ifdef CKH_COUNT
@@ -347,7 +347,7 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh)
}
bool
-ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
+ckh_new(tsdn_t *tsdn, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh_keycomp_t *keycomp)
{
bool ret;
@@ -391,8 +391,8 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ret = true;
goto label_return;
}
- ckh->tab = (ckhc_t *)ipallocztm(tsd, usize, CACHELINE, true, NULL, true,
- NULL);
+ ckh->tab = (ckhc_t *)ipallocztm(tsdn, usize, CACHELINE, true, NULL,
+ true, arena_ichoose(tsdn, NULL));
if (ckh->tab == NULL) {
ret = true;
goto label_return;
@@ -404,7 +404,7 @@ label_return:
}
void
-ckh_delete(tsd_t *tsd, ckh_t *ckh)
+ckh_delete(tsdn_t *tsdn, ckh_t *ckh)
{
assert(ckh != NULL);
@@ -421,9 +421,9 @@ ckh_delete(tsd_t *tsd, ckh_t *ckh)
(unsigned long long)ckh->nrelocs);
#endif
- idalloctm(tsd, ckh->tab, tcache_get(tsd, false), true, true);
+ idalloctm(tsdn, ckh->tab, NULL, true, true);
if (config_debug)
- memset(ckh, 0x5a, sizeof(ckh_t));
+ memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
}
size_t
@@ -456,7 +456,7 @@ ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
}
bool
-ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data)
+ckh_insert(tsdn_t *tsdn, ckh_t *ckh, const void *key, const void *data)
{
bool ret;
@@ -468,7 +468,7 @@ ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data)
#endif
while (ckh_try_insert(ckh, &key, &data)) {
- if (ckh_grow(tsd, ckh)) {
+ if (ckh_grow(tsdn, ckh)) {
ret = true;
goto label_return;
}
@@ -480,7 +480,7 @@ label_return:
}
bool
-ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
+ckh_remove(tsdn_t *tsdn, ckh_t *ckh, const void *searchkey, void **key,
void **data)
{
size_t cell;
@@ -502,7 +502,7 @@ ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
+ LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
> ckh->lg_minbuckets) {
/* Ignore error due to OOM. */
- ckh_shrink(tsd, ckh);
+ ckh_shrink(tsdn, ckh);
}
return (false);
diff --git a/contrib/jemalloc/src/ctl.c b/contrib/jemalloc/src/ctl.c
index 17bd071..dad8008 100644
--- a/contrib/jemalloc/src/ctl.c
+++ b/contrib/jemalloc/src/ctl.c
@@ -42,25 +42,25 @@ ctl_indexed_node(const ctl_node_t *node)
/* Function prototypes for non-inline static functions. */
#define CTL_PROTO(n) \
-static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen);
+static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen);
#define INDEX_PROTO(n) \
-static const ctl_named_node_t *n##_index(const size_t *mib, \
- size_t miblen, size_t i);
+static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
+ const size_t *mib, size_t miblen, size_t i);
static bool ctl_arena_init(ctl_arena_stats_t *astats);
static void ctl_arena_clear(ctl_arena_stats_t *astats);
-static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
+static void ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats,
arena_t *arena);
static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
ctl_arena_stats_t *astats);
-static void ctl_arena_refresh(arena_t *arena, unsigned i);
-static bool ctl_grow(void);
-static void ctl_refresh(void);
-static bool ctl_init(void);
-static int ctl_lookup(const char *name, ctl_node_t const **nodesp,
- size_t *mibp, size_t *depthp);
+static void ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i);
+static bool ctl_grow(tsdn_t *tsdn);
+static void ctl_refresh(tsdn_t *tsdn);
+static bool ctl_init(tsdn_t *tsdn);
+static int ctl_lookup(tsdn_t *tsdn, const char *name,
+ ctl_node_t const **nodesp, size_t *mibp, size_t *depthp);
CTL_PROTO(version)
CTL_PROTO(epoch)
@@ -117,9 +117,10 @@ CTL_PROTO(opt_prof_accum)
CTL_PROTO(tcache_create)
CTL_PROTO(tcache_flush)
CTL_PROTO(tcache_destroy)
-static void arena_i_purge(unsigned arena_ind, bool all);
+static void arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all);
CTL_PROTO(arena_i_purge)
CTL_PROTO(arena_i_decay)
+CTL_PROTO(arena_i_reset)
CTL_PROTO(arena_i_dss)
CTL_PROTO(arena_i_lg_dirty_mult)
CTL_PROTO(arena_i_decay_time)
@@ -191,6 +192,7 @@ CTL_PROTO(stats_arenas_i_decay_time)
CTL_PROTO(stats_arenas_i_pactive)
CTL_PROTO(stats_arenas_i_pdirty)
CTL_PROTO(stats_arenas_i_mapped)
+CTL_PROTO(stats_arenas_i_retained)
CTL_PROTO(stats_arenas_i_npurge)
CTL_PROTO(stats_arenas_i_nmadvise)
CTL_PROTO(stats_arenas_i_purged)
@@ -203,6 +205,7 @@ CTL_PROTO(stats_active)
CTL_PROTO(stats_metadata)
CTL_PROTO(stats_resident)
CTL_PROTO(stats_mapped)
+CTL_PROTO(stats_retained)
/******************************************************************************/
/* mallctl tree. */
@@ -299,6 +302,7 @@ static const ctl_named_node_t tcache_node[] = {
static const ctl_named_node_t arena_i_node[] = {
{NAME("purge"), CTL(arena_i_purge)},
{NAME("decay"), CTL(arena_i_decay)},
+ {NAME("reset"), CTL(arena_i_reset)},
{NAME("dss"), CTL(arena_i_dss)},
{NAME("lg_dirty_mult"), CTL(arena_i_lg_dirty_mult)},
{NAME("decay_time"), CTL(arena_i_decay_time)},
@@ -456,6 +460,7 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("pactive"), CTL(stats_arenas_i_pactive)},
{NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
{NAME("mapped"), CTL(stats_arenas_i_mapped)},
+ {NAME("retained"), CTL(stats_arenas_i_retained)},
{NAME("npurge"), CTL(stats_arenas_i_npurge)},
{NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
{NAME("purged"), CTL(stats_arenas_i_purged)},
@@ -482,6 +487,7 @@ static const ctl_named_node_t stats_node[] = {
{NAME("metadata"), CTL(stats_metadata)},
{NAME("resident"), CTL(stats_resident)},
{NAME("mapped"), CTL(stats_mapped)},
+ {NAME("retained"), CTL(stats_retained)},
{NAME("arenas"), CHILD(indexed, stats_arenas)}
};
@@ -554,12 +560,12 @@ ctl_arena_clear(ctl_arena_stats_t *astats)
}
static void
-ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
+ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_stats_t *cstats, arena_t *arena)
{
unsigned i;
if (config_stats) {
- arena_stats_merge(arena, &cstats->nthreads, &cstats->dss,
+ arena_stats_merge(tsdn, arena, &cstats->nthreads, &cstats->dss,
&cstats->lg_dirty_mult, &cstats->decay_time,
&cstats->pactive, &cstats->pdirty, &cstats->astats,
cstats->bstats, cstats->lstats, cstats->hstats);
@@ -572,8 +578,8 @@ ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
cstats->nrequests_small += cstats->bstats[i].nrequests;
}
} else {
- arena_basic_stats_merge(arena, &cstats->nthreads, &cstats->dss,
- &cstats->lg_dirty_mult, &cstats->decay_time,
+ arena_basic_stats_merge(tsdn, arena, &cstats->nthreads,
+ &cstats->dss, &cstats->lg_dirty_mult, &cstats->decay_time,
&cstats->pactive, &cstats->pdirty);
}
}
@@ -589,6 +595,7 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
if (config_stats) {
sstats->astats.mapped += astats->astats.mapped;
+ sstats->astats.retained += astats->astats.retained;
sstats->astats.npurge += astats->astats.npurge;
sstats->astats.nmadvise += astats->astats.nmadvise;
sstats->astats.purged += astats->astats.purged;
@@ -649,24 +656,24 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
}
static void
-ctl_arena_refresh(arena_t *arena, unsigned i)
+ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, unsigned i)
{
ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas];
ctl_arena_clear(astats);
- ctl_arena_stats_amerge(astats, arena);
+ ctl_arena_stats_amerge(tsdn, astats, arena);
/* Merge into sum stats as well. */
ctl_arena_stats_smerge(sstats, astats);
}
static bool
-ctl_grow(void)
+ctl_grow(tsdn_t *tsdn)
{
ctl_arena_stats_t *astats;
/* Initialize new arena. */
- if (arena_init(ctl_stats.narenas) == NULL)
+ if (arena_init(tsdn, ctl_stats.narenas) == NULL)
return (true);
/* Allocate extended arena stats. */
@@ -701,7 +708,7 @@ ctl_grow(void)
}
static void
-ctl_refresh(void)
+ctl_refresh(tsdn_t *tsdn)
{
unsigned i;
VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
@@ -713,19 +720,20 @@ ctl_refresh(void)
ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
for (i = 0; i < ctl_stats.narenas; i++)
- tarenas[i] = arena_get(i, false);
+ tarenas[i] = arena_get(tsdn, i, false);
for (i = 0; i < ctl_stats.narenas; i++) {
bool initialized = (tarenas[i] != NULL);
ctl_stats.arenas[i].initialized = initialized;
if (initialized)
- ctl_arena_refresh(tarenas[i], i);
+ ctl_arena_refresh(tsdn, tarenas[i], i);
}
if (config_stats) {
size_t base_allocated, base_resident, base_mapped;
- base_stats_get(&base_allocated, &base_resident, &base_mapped);
+ base_stats_get(tsdn, &base_allocated, &base_resident,
+ &base_mapped);
ctl_stats.allocated =
ctl_stats.arenas[ctl_stats.narenas].allocated_small +
ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large +
@@ -742,17 +750,19 @@ ctl_refresh(void)
ctl_stats.arenas[ctl_stats.narenas].pdirty) << LG_PAGE);
ctl_stats.mapped = base_mapped +
ctl_stats.arenas[ctl_stats.narenas].astats.mapped;
+ ctl_stats.retained =
+ ctl_stats.arenas[ctl_stats.narenas].astats.retained;
}
ctl_epoch++;
}
static bool
-ctl_init(void)
+ctl_init(tsdn_t *tsdn)
{
bool ret;
- malloc_mutex_lock(&ctl_mtx);
+ malloc_mutex_lock(tsdn, &ctl_mtx);
if (!ctl_initialized) {
/*
* Allocate space for one extra arena stats element, which
@@ -794,19 +804,19 @@ ctl_init(void)
ctl_stats.arenas[ctl_stats.narenas].initialized = true;
ctl_epoch = 0;
- ctl_refresh();
+ ctl_refresh(tsdn);
ctl_initialized = true;
}
ret = false;
label_return:
- malloc_mutex_unlock(&ctl_mtx);
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
return (ret);
}
static int
-ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
- size_t *depthp)
+ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
+ size_t *mibp, size_t *depthp)
{
int ret;
const char *elm, *tdot, *dot;
@@ -858,7 +868,7 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
}
inode = ctl_indexed_node(node->children);
- node = inode->index(mibp, *depthp, (size_t)index);
+ node = inode->index(tsdn, mibp, *depthp, (size_t)index);
if (node == NULL) {
ret = ENOENT;
goto label_return;
@@ -902,8 +912,8 @@ label_return:
}
int
-ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
- size_t newlen)
+ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
int ret;
size_t depth;
@@ -911,19 +921,19 @@ ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t mib[CTL_MAX_DEPTH];
const ctl_named_node_t *node;
- if (!ctl_initialized && ctl_init()) {
+ if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) {
ret = EAGAIN;
goto label_return;
}
depth = CTL_MAX_DEPTH;
- ret = ctl_lookup(name, nodes, mib, &depth);
+ ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
if (ret != 0)
goto label_return;
node = ctl_named_node(nodes[depth-1]);
if (node != NULL && node->ctl)
- ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen);
+ ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
else {
/* The name refers to a partial path through the ctl tree. */
ret = ENOENT;
@@ -934,29 +944,29 @@ label_return:
}
int
-ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp)
+ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp)
{
int ret;
- if (!ctl_initialized && ctl_init()) {
+ if (!ctl_initialized && ctl_init(tsdn)) {
ret = EAGAIN;
goto label_return;
}
- ret = ctl_lookup(name, NULL, mibp, miblenp);
+ ret = ctl_lookup(tsdn, name, NULL, mibp, miblenp);
label_return:
return(ret);
}
int
-ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
+ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
const ctl_named_node_t *node;
size_t i;
- if (!ctl_initialized && ctl_init()) {
+ if (!ctl_initialized && ctl_init(tsd_tsdn(tsd))) {
ret = EAGAIN;
goto label_return;
}
@@ -978,7 +988,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
/* Indexed element. */
inode = ctl_indexed_node(node->children);
- node = inode->index(mib, miblen, mib[i]);
+ node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]);
if (node == NULL) {
ret = ENOENT;
goto label_return;
@@ -988,7 +998,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
/* Call the ctl function. */
if (node && node->ctl)
- ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen);
+ ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
else {
/* Partial MIB. */
ret = ENOENT;
@@ -1002,7 +1012,7 @@ bool
ctl_boot(void)
{
- if (malloc_mutex_init(&ctl_mtx))
+ if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL))
return (true);
ctl_initialized = false;
@@ -1011,24 +1021,24 @@ ctl_boot(void)
}
void
-ctl_prefork(void)
+ctl_prefork(tsdn_t *tsdn)
{
- malloc_mutex_prefork(&ctl_mtx);
+ malloc_mutex_prefork(tsdn, &ctl_mtx);
}
void
-ctl_postfork_parent(void)
+ctl_postfork_parent(tsdn_t *tsdn)
{
- malloc_mutex_postfork_parent(&ctl_mtx);
+ malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
}
void
-ctl_postfork_child(void)
+ctl_postfork_child(tsdn_t *tsdn)
{
- malloc_mutex_postfork_child(&ctl_mtx);
+ malloc_mutex_postfork_child(tsdn, &ctl_mtx);
}
/******************************************************************************/
@@ -1085,8 +1095,8 @@ ctl_postfork_child(void)
*/
#define CTL_RO_CLGEN(c, l, n, v, t) \
static int \
-n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
- void *newp, size_t newlen) \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
@@ -1094,7 +1104,7 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
if (!(c)) \
return (ENOENT); \
if (l) \
- malloc_mutex_lock(&ctl_mtx); \
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
@@ -1102,47 +1112,47 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
ret = 0; \
label_return: \
if (l) \
- malloc_mutex_unlock(&ctl_mtx); \
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
return (ret); \
}
#define CTL_RO_CGEN(c, n, v, t) \
static int \
-n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
- void *newp, size_t newlen) \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
\
if (!(c)) \
return (ENOENT); \
- malloc_mutex_lock(&ctl_mtx); \
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
- malloc_mutex_unlock(&ctl_mtx); \
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
return (ret); \
}
#define CTL_RO_GEN(n, v, t) \
static int \
-n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
- void *newp, size_t newlen) \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
\
- malloc_mutex_lock(&ctl_mtx); \
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
- malloc_mutex_unlock(&ctl_mtx); \
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
return (ret); \
}
@@ -1152,8 +1162,8 @@ label_return: \
*/
#define CTL_RO_NL_CGEN(c, n, v, t) \
static int \
-n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
- void *newp, size_t newlen) \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
@@ -1171,8 +1181,8 @@ label_return: \
#define CTL_RO_NL_GEN(n, v, t) \
static int \
-n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
- void *newp, size_t newlen) \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
@@ -1188,17 +1198,15 @@ label_return: \
#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
static int \
-n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
- void *newp, size_t newlen) \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
- tsd_t *tsd; \
\
if (!(c)) \
return (ENOENT); \
READONLY(); \
- tsd = tsd_fetch(); \
oldval = (m(tsd)); \
READ(oldval, t); \
\
@@ -1209,8 +1217,8 @@ label_return: \
#define CTL_RO_CONFIG_GEN(n, t) \
static int \
-n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
- void *newp, size_t newlen) \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) \
{ \
int ret; \
t oldval; \
@@ -1229,21 +1237,21 @@ label_return: \
CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
static int
-epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
+epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
UNUSED uint64_t newval;
- malloc_mutex_lock(&ctl_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(newval, uint64_t);
if (newp != NULL)
- ctl_refresh();
+ ctl_refresh(tsd_tsdn(tsd));
READ(ctl_epoch, uint64_t);
ret = 0;
label_return:
- malloc_mutex_unlock(&ctl_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
}
@@ -1298,20 +1306,18 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
/******************************************************************************/
static int
-thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
+thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
- tsd_t *tsd;
arena_t *oldarena;
unsigned newind, oldind;
- tsd = tsd_fetch();
oldarena = arena_choose(tsd, NULL);
if (oldarena == NULL)
return (EAGAIN);
- malloc_mutex_lock(&ctl_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
newind = oldind = oldarena->ind;
WRITE(newind, unsigned);
READ(oldind, unsigned);
@@ -1325,7 +1331,7 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
}
/* Initialize arena if necessary. */
- newarena = arena_get(newind, true);
+ newarena = arena_get(tsd_tsdn(tsd), newind, true);
if (newarena == NULL) {
ret = EAGAIN;
goto label_return;
@@ -1335,15 +1341,15 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
if (config_tcache) {
tcache_t *tcache = tsd_tcache_get(tsd);
if (tcache != NULL) {
- tcache_arena_reassociate(tcache, oldarena,
- newarena);
+ tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
+ oldarena, newarena);
}
}
}
ret = 0;
label_return:
- malloc_mutex_unlock(&ctl_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
}
@@ -1357,8 +1363,8 @@ CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
tsd_thread_deallocatedp_get, uint64_t *)
static int
-thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
bool oldval;
@@ -1382,8 +1388,8 @@ label_return:
}
static int
-thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
@@ -1401,7 +1407,7 @@ label_return:
}
static int
-thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp,
+thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
@@ -1412,20 +1418,16 @@ thread_prof_name_ctl(const size_t *mib, size_t miblen, void *oldp,
READ_XOR_WRITE();
if (newp != NULL) {
- tsd_t *tsd;
-
if (newlen != sizeof(const char *)) {
ret = EINVAL;
goto label_return;
}
- tsd = tsd_fetch();
-
if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
0)
goto label_return;
} else {
- const char *oldname = prof_thread_name_get();
+ const char *oldname = prof_thread_name_get(tsd);
READ(oldname, const char *);
}
@@ -1435,7 +1437,7 @@ label_return:
}
static int
-thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp,
+thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
@@ -1444,13 +1446,13 @@ thread_prof_active_ctl(const size_t *mib, size_t miblen, void *oldp,
if (!config_prof)
return (ENOENT);
- oldval = prof_thread_active_get();
+ oldval = prof_thread_active_get(tsd);
if (newp != NULL) {
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
}
- if (prof_thread_active_set(*(bool *)newp)) {
+ if (prof_thread_active_set(tsd, *(bool *)newp)) {
ret = EAGAIN;
goto label_return;
}
@@ -1465,21 +1467,18 @@ label_return:
/******************************************************************************/
static int
-tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
+tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
- tsd_t *tsd;
unsigned tcache_ind;
if (!config_tcache)
return (ENOENT);
- tsd = tsd_fetch();
-
- malloc_mutex_lock(&ctl_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY();
- if (tcaches_create(tsd, &tcache_ind)) {
+ if (tcaches_create(tsd_tsdn(tsd), &tcache_ind)) {
ret = EFAULT;
goto label_return;
}
@@ -1487,23 +1486,20 @@ tcache_create_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = 0;
label_return:
- malloc_mutex_unlock(&ctl_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
}
static int
-tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
+tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
- tsd_t *tsd;
unsigned tcache_ind;
if (!config_tcache)
return (ENOENT);
- tsd = tsd_fetch();
-
WRITEONLY();
tcache_ind = UINT_MAX;
WRITE(tcache_ind, unsigned);
@@ -1519,18 +1515,15 @@ label_return:
}
static int
-tcache_destroy_ctl(const size_t *mib, size_t miblen, void *oldp,
+tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
- tsd_t *tsd;
unsigned tcache_ind;
if (!config_tcache)
return (ENOENT);
- tsd = tsd_fetch();
-
WRITEONLY();
tcache_ind = UINT_MAX;
WRITE(tcache_ind, unsigned);
@@ -1548,10 +1541,10 @@ label_return:
/******************************************************************************/
static void
-arena_i_purge(unsigned arena_ind, bool all)
+arena_i_purge(tsdn_t *tsdn, unsigned arena_ind, bool all)
{
- malloc_mutex_lock(&ctl_mtx);
+ malloc_mutex_lock(tsdn, &ctl_mtx);
{
unsigned narenas = ctl_stats.narenas;
@@ -1560,43 +1553,43 @@ arena_i_purge(unsigned arena_ind, bool all)
VARIABLE_ARRAY(arena_t *, tarenas, narenas);
for (i = 0; i < narenas; i++)
- tarenas[i] = arena_get(i, false);
+ tarenas[i] = arena_get(tsdn, i, false);
/*
* No further need to hold ctl_mtx, since narenas and
* tarenas contain everything needed below.
*/
- malloc_mutex_unlock(&ctl_mtx);
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
for (i = 0; i < narenas; i++) {
if (tarenas[i] != NULL)
- arena_purge(tarenas[i], all);
+ arena_purge(tsdn, tarenas[i], all);
}
} else {
arena_t *tarena;
assert(arena_ind < narenas);
- tarena = arena_get(arena_ind, false);
+ tarena = arena_get(tsdn, arena_ind, false);
/* No further need to hold ctl_mtx. */
- malloc_mutex_unlock(&ctl_mtx);
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
if (tarena != NULL)
- arena_purge(tarena, all);
+ arena_purge(tsdn, tarena, all);
}
}
}
static int
-arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
+arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
READONLY();
WRITEONLY();
- arena_i_purge((unsigned)mib[1], true);
+ arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], true);
ret = 0;
label_return:
@@ -1604,14 +1597,14 @@ label_return:
}
static int
-arena_i_decay_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
+arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
READONLY();
WRITEONLY();
- arena_i_purge((unsigned)mib[1], false);
+ arena_i_purge(tsd_tsdn(tsd), (unsigned)mib[1], false);
ret = 0;
label_return:
@@ -1619,8 +1612,42 @@ label_return:
}
static int
-arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
+arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned arena_ind;
+ arena_t *arena;
+
+ READONLY();
+ WRITEONLY();
+
+ if ((config_valgrind && unlikely(in_valgrind)) || (config_fill &&
+ unlikely(opt_quarantine))) {
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ arena_ind = (unsigned)mib[1];
+ if (config_debug) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ assert(arena_ind < ctl_stats.narenas);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ }
+ assert(arena_ind >= opt_narenas);
+
+ arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
+
+ arena_reset(tsd, arena);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
const char *dss = NULL;
@@ -1628,7 +1655,7 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
dss_prec_t dss_prec_old = dss_prec_limit;
dss_prec_t dss_prec = dss_prec_limit;
- malloc_mutex_lock(&ctl_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(dss, const char *);
if (dss != NULL) {
int i;
@@ -1649,20 +1676,20 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
}
if (arena_ind < ctl_stats.narenas) {
- arena_t *arena = arena_get(arena_ind, false);
+ arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
if (arena == NULL || (dss_prec != dss_prec_limit &&
- arena_dss_prec_set(arena, dss_prec))) {
+ arena_dss_prec_set(tsd_tsdn(tsd), arena, dss_prec))) {
ret = EFAULT;
goto label_return;
}
- dss_prec_old = arena_dss_prec_get(arena);
+ dss_prec_old = arena_dss_prec_get(tsd_tsdn(tsd), arena);
} else {
if (dss_prec != dss_prec_limit &&
- chunk_dss_prec_set(dss_prec)) {
+ chunk_dss_prec_set(tsd_tsdn(tsd), dss_prec)) {
ret = EFAULT;
goto label_return;
}
- dss_prec_old = chunk_dss_prec_get();
+ dss_prec_old = chunk_dss_prec_get(tsd_tsdn(tsd));
}
dss = dss_prec_names[dss_prec_old];
@@ -1670,26 +1697,26 @@ arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = 0;
label_return:
- malloc_mutex_unlock(&ctl_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
}
static int
-arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+arena_i_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned arena_ind = (unsigned)mib[1];
arena_t *arena;
- arena = arena_get(arena_ind, false);
+ arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
if (arena == NULL) {
ret = EFAULT;
goto label_return;
}
if (oldp != NULL && oldlenp != NULL) {
- size_t oldval = arena_lg_dirty_mult_get(arena);
+ size_t oldval = arena_lg_dirty_mult_get(tsd_tsdn(tsd), arena);
READ(oldval, ssize_t);
}
if (newp != NULL) {
@@ -1697,7 +1724,8 @@ arena_i_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
ret = EINVAL;
goto label_return;
}
- if (arena_lg_dirty_mult_set(arena, *(ssize_t *)newp)) {
+ if (arena_lg_dirty_mult_set(tsd_tsdn(tsd), arena,
+ *(ssize_t *)newp)) {
ret = EFAULT;
goto label_return;
}
@@ -1709,21 +1737,21 @@ label_return:
}
static int
-arena_i_decay_time_ctl(const size_t *mib, size_t miblen, void *oldp,
+arena_i_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned arena_ind = (unsigned)mib[1];
arena_t *arena;
- arena = arena_get(arena_ind, false);
+ arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
if (arena == NULL) {
ret = EFAULT;
goto label_return;
}
if (oldp != NULL && oldlenp != NULL) {
- size_t oldval = arena_decay_time_get(arena);
+ size_t oldval = arena_decay_time_get(tsd_tsdn(tsd), arena);
READ(oldval, ssize_t);
}
if (newp != NULL) {
@@ -1731,7 +1759,8 @@ arena_i_decay_time_ctl(const size_t *mib, size_t miblen, void *oldp,
ret = EINVAL;
goto label_return;
}
- if (arena_decay_time_set(arena, *(ssize_t *)newp)) {
+ if (arena_decay_time_set(tsd_tsdn(tsd), arena,
+ *(ssize_t *)newp)) {
ret = EFAULT;
goto label_return;
}
@@ -1743,24 +1772,25 @@ label_return:
}
static int
-arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+arena_i_chunk_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned arena_ind = (unsigned)mib[1];
arena_t *arena;
- malloc_mutex_lock(&ctl_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
if (arena_ind < narenas_total_get() && (arena =
- arena_get(arena_ind, false)) != NULL) {
+ arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
if (newp != NULL) {
chunk_hooks_t old_chunk_hooks, new_chunk_hooks;
WRITE(new_chunk_hooks, chunk_hooks_t);
- old_chunk_hooks = chunk_hooks_set(arena,
+ old_chunk_hooks = chunk_hooks_set(tsd_tsdn(tsd), arena,
&new_chunk_hooks);
READ(old_chunk_hooks, chunk_hooks_t);
} else {
- chunk_hooks_t old_chunk_hooks = chunk_hooks_get(arena);
+ chunk_hooks_t old_chunk_hooks =
+ chunk_hooks_get(tsd_tsdn(tsd), arena);
READ(old_chunk_hooks, chunk_hooks_t);
}
} else {
@@ -1769,16 +1799,16 @@ arena_i_chunk_hooks_ctl(const size_t *mib, size_t miblen, void *oldp,
}
ret = 0;
label_return:
- malloc_mutex_unlock(&ctl_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
}
static const ctl_named_node_t *
-arena_i_index(const size_t *mib, size_t miblen, size_t i)
+arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
{
- const ctl_named_node_t * ret;
+ const ctl_named_node_t *ret;
- malloc_mutex_lock(&ctl_mtx);
+ malloc_mutex_lock(tsdn, &ctl_mtx);
if (i > ctl_stats.narenas) {
ret = NULL;
goto label_return;
@@ -1786,20 +1816,20 @@ arena_i_index(const size_t *mib, size_t miblen, size_t i)
ret = super_arena_i_node;
label_return:
- malloc_mutex_unlock(&ctl_mtx);
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
return (ret);
}
/******************************************************************************/
static int
-arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
+arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned narenas;
- malloc_mutex_lock(&ctl_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY();
if (*oldlenp != sizeof(unsigned)) {
ret = EINVAL;
@@ -1810,18 +1840,18 @@ arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
ret = 0;
label_return:
- malloc_mutex_unlock(&ctl_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
}
static int
-arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
+arenas_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned nread, i;
- malloc_mutex_lock(&ctl_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY();
if (*oldlenp != ctl_stats.narenas * sizeof(bool)) {
ret = EINVAL;
@@ -1836,13 +1866,13 @@ arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
label_return:
- malloc_mutex_unlock(&ctl_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
}
static int
-arenas_lg_dirty_mult_ctl(const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+arenas_lg_dirty_mult_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
@@ -1867,7 +1897,7 @@ label_return:
}
static int
-arenas_decay_time_ctl(const size_t *mib, size_t miblen, void *oldp,
+arenas_decay_time_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
@@ -1901,7 +1931,7 @@ CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
static const ctl_named_node_t *
-arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
+arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
{
if (i > NBINS)
@@ -1912,7 +1942,7 @@ arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
CTL_RO_NL_GEN(arenas_nlruns, nlclasses, unsigned)
CTL_RO_NL_GEN(arenas_lrun_i_size, index2size(NBINS+(szind_t)mib[2]), size_t)
static const ctl_named_node_t *
-arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
+arenas_lrun_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
{
if (i > nlclasses)
@@ -1924,7 +1954,7 @@ CTL_RO_NL_GEN(arenas_nhchunks, nhclasses, unsigned)
CTL_RO_NL_GEN(arenas_hchunk_i_size, index2size(NBINS+nlclasses+(szind_t)mib[2]),
size_t)
static const ctl_named_node_t *
-arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i)
+arenas_hchunk_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
{
if (i > nhclasses)
@@ -1933,15 +1963,15 @@ arenas_hchunk_i_index(const size_t *mib, size_t miblen, size_t i)
}
static int
-arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
+arenas_extend_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
unsigned narenas;
- malloc_mutex_lock(&ctl_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY();
- if (ctl_grow()) {
+ if (ctl_grow(tsd_tsdn(tsd))) {
ret = EAGAIN;
goto label_return;
}
@@ -1950,15 +1980,15 @@ arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = 0;
label_return:
- malloc_mutex_unlock(&ctl_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return (ret);
}
/******************************************************************************/
static int
-prof_thread_active_init_ctl(const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
bool oldval;
@@ -1971,9 +2001,10 @@ prof_thread_active_init_ctl(const size_t *mib, size_t miblen, void *oldp,
ret = EINVAL;
goto label_return;
}
- oldval = prof_thread_active_init_set(*(bool *)newp);
+ oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
+ *(bool *)newp);
} else
- oldval = prof_thread_active_init_get();
+ oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
READ(oldval, bool);
ret = 0;
@@ -1982,8 +2013,8 @@ label_return:
}
static int
-prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
+prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
bool oldval;
@@ -1996,9 +2027,9 @@ prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = EINVAL;
goto label_return;
}
- oldval = prof_active_set(*(bool *)newp);
+ oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
} else
- oldval = prof_active_get();
+ oldval = prof_active_get(tsd_tsdn(tsd));
READ(oldval, bool);
ret = 0;
@@ -2007,8 +2038,8 @@ label_return:
}
static int
-prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
+prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
const char *filename = NULL;
@@ -2019,7 +2050,7 @@ prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
WRITEONLY();
WRITE(filename, const char *);
- if (prof_mdump(filename)) {
+ if (prof_mdump(tsd, filename)) {
ret = EFAULT;
goto label_return;
}
@@ -2030,8 +2061,8 @@ label_return:
}
static int
-prof_gdump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
+prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
bool oldval;
@@ -2044,9 +2075,9 @@ prof_gdump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
ret = EINVAL;
goto label_return;
}
- oldval = prof_gdump_set(*(bool *)newp);
+ oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
} else
- oldval = prof_gdump_get();
+ oldval = prof_gdump_get(tsd_tsdn(tsd));
READ(oldval, bool);
ret = 0;
@@ -2055,12 +2086,11 @@ label_return:
}
static int
-prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
+prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
size_t lg_sample = lg_prof_sample;
- tsd_t *tsd;
if (!config_prof)
return (ENOENT);
@@ -2070,9 +2100,7 @@ prof_reset_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
if (lg_sample >= (sizeof(uint64_t) << 3))
lg_sample = (sizeof(uint64_t) << 3) - 1;
- tsd = tsd_fetch();
-
- prof_reset(tsd, lg_sample);
+ prof_reset(tsd_tsdn(tsd), lg_sample);
ret = 0;
label_return:
@@ -2090,6 +2118,7 @@ CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats.metadata, size_t)
CTL_RO_CGEN(config_stats, stats_resident, ctl_stats.resident, size_t)
CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
+CTL_RO_CGEN(config_stats, stats_retained, ctl_stats.retained, size_t)
CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
CTL_RO_GEN(stats_arenas_i_lg_dirty_mult, ctl_stats.arenas[mib[2]].lg_dirty_mult,
@@ -2101,6 +2130,8 @@ CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
ctl_stats.arenas[mib[2]].astats.mapped, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
+ ctl_stats.arenas[mib[2]].astats.retained, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
@@ -2157,7 +2188,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
static const ctl_named_node_t *
-stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j)
+stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
+ size_t j)
{
if (j > NBINS)
@@ -2175,7 +2207,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
static const ctl_named_node_t *
-stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
+stats_arenas_i_lruns_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
+ size_t j)
{
if (j > nlclasses)
@@ -2194,7 +2227,8 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_hchunks_j_curhchunks,
ctl_stats.arenas[mib[2]].hstats[mib[4]].curhchunks, size_t)
static const ctl_named_node_t *
-stats_arenas_i_hchunks_j_index(const size_t *mib, size_t miblen, size_t j)
+stats_arenas_i_hchunks_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
+ size_t j)
{
if (j > nhclasses)
@@ -2203,11 +2237,11 @@ stats_arenas_i_hchunks_j_index(const size_t *mib, size_t miblen, size_t j)
}
static const ctl_named_node_t *
-stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
+stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i)
{
const ctl_named_node_t * ret;
- malloc_mutex_lock(&ctl_mtx);
+ malloc_mutex_lock(tsdn, &ctl_mtx);
if (i > ctl_stats.narenas || !ctl_stats.arenas[i].initialized) {
ret = NULL;
goto label_return;
@@ -2215,6 +2249,6 @@ stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
ret = super_stats_arenas_i_node;
label_return:
- malloc_mutex_unlock(&ctl_mtx);
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
return (ret);
}
diff --git a/contrib/jemalloc/src/huge.c b/contrib/jemalloc/src/huge.c
index 5f7ceaf..1aa02a0 100644
--- a/contrib/jemalloc/src/huge.c
+++ b/contrib/jemalloc/src/huge.c
@@ -15,12 +15,21 @@ huge_node_get(const void *ptr)
}
static bool
-huge_node_set(const void *ptr, extent_node_t *node)
+huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
{
assert(extent_node_addr_get(node) == ptr);
assert(!extent_node_achunk_get(node));
- return (chunk_register(ptr, node));
+ return (chunk_register(tsdn, ptr, node));
+}
+
+static void
+huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node)
+{
+ bool err;
+
+ err = huge_node_set(tsdn, ptr, node);
+ assert(!err);
}
static void
@@ -31,18 +40,17 @@ huge_node_unset(const void *ptr, const extent_node_t *node)
}
void *
-huge_malloc(tsd_t *tsd, arena_t *arena, size_t usize, bool zero,
- tcache_t *tcache)
+huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero)
{
assert(usize == s2u(usize));
- return (huge_palloc(tsd, arena, usize, chunksize, zero, tcache));
+ return (huge_palloc(tsdn, arena, usize, chunksize, zero));
}
void *
-huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
- bool zero, tcache_t *tcache)
+huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
+ bool zero)
{
void *ret;
size_t ausize;
@@ -51,14 +59,16 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
/* Allocate one or more contiguous chunks for this request. */
+ assert(!tsdn_null(tsdn) || arena != NULL);
+
ausize = sa2u(usize, alignment);
if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS))
return (NULL);
assert(ausize >= chunksize);
/* Allocate an extent node with which to track the chunk. */
- node = ipallocztm(tsd, CACHELINE_CEILING(sizeof(extent_node_t)),
- CACHELINE, false, tcache, true, arena);
+ node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)),
+ CACHELINE, false, NULL, true, arena_ichoose(tsdn, arena));
if (node == NULL)
return (NULL);
@@ -67,34 +77,35 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
* it is possible to make correct junk/zero fill decisions below.
*/
is_zeroed = zero;
- arena = arena_choose(tsd, arena);
- if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(arena,
- usize, alignment, &is_zeroed)) == NULL) {
- idalloctm(tsd, node, tcache, true, true);
+ if (likely(!tsdn_null(tsdn)))
+ arena = arena_choose(tsdn_tsd(tsdn), arena);
+ if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn,
+ arena, usize, alignment, &is_zeroed)) == NULL) {
+ idalloctm(tsdn, node, NULL, true, true);
return (NULL);
}
extent_node_init(node, arena, ret, usize, is_zeroed, true);
- if (huge_node_set(ret, node)) {
- arena_chunk_dalloc_huge(arena, ret, usize);
- idalloctm(tsd, node, tcache, true, true);
+ if (huge_node_set(tsdn, ret, node)) {
+ arena_chunk_dalloc_huge(tsdn, arena, ret, usize);
+ idalloctm(tsdn, node, NULL, true, true);
return (NULL);
}
/* Insert node into huge. */
- malloc_mutex_lock(&arena->huge_mtx);
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
ql_elm_new(node, ql_link);
ql_tail_insert(&arena->huge, node, ql_link);
- malloc_mutex_unlock(&arena->huge_mtx);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed)
memset(ret, 0, usize);
} else if (config_fill && unlikely(opt_junk_alloc))
- memset(ret, 0xa5, usize);
+ memset(ret, JEMALLOC_ALLOC_JUNK, usize);
- arena_decay_tick(tsd, arena);
+ arena_decay_tick(tsdn, arena);
return (ret);
}
@@ -103,7 +114,7 @@ huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
#endif
static void
-huge_dalloc_junk(void *ptr, size_t usize)
+huge_dalloc_junk(tsdn_t *tsdn, void *ptr, size_t usize)
{
if (config_fill && have_dss && unlikely(opt_junk_free)) {
@@ -111,8 +122,8 @@ huge_dalloc_junk(void *ptr, size_t usize)
* Only bother junk filling if the chunk isn't about to be
* unmapped.
*/
- if (!config_munmap || (have_dss && chunk_in_dss(ptr)))
- memset(ptr, 0x5a, usize);
+ if (!config_munmap || (have_dss && chunk_in_dss(tsdn, ptr)))
+ memset(ptr, JEMALLOC_FREE_JUNK, usize);
}
}
#ifdef JEMALLOC_JET
@@ -122,8 +133,8 @@ huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
#endif
static void
-huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
- size_t usize_max, bool zero)
+huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize,
+ size_t usize_min, size_t usize_max, bool zero)
{
size_t usize, usize_next;
extent_node_t *node;
@@ -147,24 +158,28 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
if (oldsize > usize) {
size_t sdiff = oldsize - usize;
if (config_fill && unlikely(opt_junk_free)) {
- memset((void *)((uintptr_t)ptr + usize), 0x5a, sdiff);
+ memset((void *)((uintptr_t)ptr + usize),
+ JEMALLOC_FREE_JUNK, sdiff);
post_zeroed = false;
} else {
- post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
- ptr, CHUNK_CEILING(oldsize), usize, sdiff);
+ post_zeroed = !chunk_purge_wrapper(tsdn, arena,
+ &chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize,
+ sdiff);
}
} else
post_zeroed = pre_zeroed;
- malloc_mutex_lock(&arena->huge_mtx);
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
/* Update the size of the huge allocation. */
+ huge_node_unset(ptr, node);
assert(extent_node_size_get(node) != usize);
extent_node_size_set(node, usize);
+ huge_node_reset(tsdn, ptr, node);
/* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed);
- malloc_mutex_unlock(&arena->huge_mtx);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
- arena_chunk_ralloc_huge_similar(arena, ptr, oldsize, usize);
+ arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize);
/* Fill if necessary (growing). */
if (oldsize < usize) {
@@ -174,14 +189,15 @@ huge_ralloc_no_move_similar(void *ptr, size_t oldsize, size_t usize_min,
usize - oldsize);
}
} else if (config_fill && unlikely(opt_junk_alloc)) {
- memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
- oldsize);
+ memset((void *)((uintptr_t)ptr + oldsize),
+ JEMALLOC_ALLOC_JUNK, usize - oldsize);
}
}
}
static bool
-huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
+huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize,
+ size_t usize)
{
extent_node_t *node;
arena_t *arena;
@@ -192,7 +208,7 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
pre_zeroed = extent_node_zeroed_get(node);
- chunk_hooks = chunk_hooks_get(arena);
+ chunk_hooks = chunk_hooks_get(tsdn, arena);
assert(oldsize > usize);
@@ -205,42 +221,45 @@ huge_ralloc_no_move_shrink(void *ptr, size_t oldsize, size_t usize)
if (oldsize > usize) {
size_t sdiff = oldsize - usize;
if (config_fill && unlikely(opt_junk_free)) {
- huge_dalloc_junk((void *)((uintptr_t)ptr + usize),
+ huge_dalloc_junk(tsdn, (void *)((uintptr_t)ptr + usize),
sdiff);
post_zeroed = false;
} else {
- post_zeroed = !chunk_purge_wrapper(arena, &chunk_hooks,
- CHUNK_ADDR2BASE((uintptr_t)ptr + usize),
- CHUNK_CEILING(oldsize),
+ post_zeroed = !chunk_purge_wrapper(tsdn, arena,
+ &chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr +
+ usize), CHUNK_CEILING(oldsize),
CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff);
}
} else
post_zeroed = pre_zeroed;
- malloc_mutex_lock(&arena->huge_mtx);
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
/* Update the size of the huge allocation. */
+ huge_node_unset(ptr, node);
extent_node_size_set(node, usize);
+ huge_node_reset(tsdn, ptr, node);
/* Update zeroed. */
extent_node_zeroed_set(node, post_zeroed);
- malloc_mutex_unlock(&arena->huge_mtx);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
/* Zap the excess chunks. */
- arena_chunk_ralloc_huge_shrink(arena, ptr, oldsize, usize);
+ arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize);
return (false);
}
static bool
-huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
+huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize,
+ size_t usize, bool zero) {
extent_node_t *node;
arena_t *arena;
bool is_zeroed_subchunk, is_zeroed_chunk;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
- malloc_mutex_lock(&arena->huge_mtx);
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
is_zeroed_subchunk = extent_node_zeroed_get(node);
- malloc_mutex_unlock(&arena->huge_mtx);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
/*
* Copy zero into is_zeroed_chunk and pass the copy to chunk_alloc(), so
@@ -248,14 +267,16 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
*/
is_zeroed_chunk = zero;
- if (arena_chunk_ralloc_huge_expand(arena, ptr, oldsize, usize,
+ if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize,
&is_zeroed_chunk))
return (true);
- malloc_mutex_lock(&arena->huge_mtx);
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
/* Update the size of the huge allocation. */
+ huge_node_unset(ptr, node);
extent_node_size_set(node, usize);
- malloc_mutex_unlock(&arena->huge_mtx);
+ huge_node_reset(tsdn, ptr, node);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
if (zero || (config_fill && unlikely(opt_zero))) {
if (!is_zeroed_subchunk) {
@@ -268,15 +289,15 @@ huge_ralloc_no_move_expand(void *ptr, size_t oldsize, size_t usize, bool zero) {
CHUNK_CEILING(oldsize));
}
} else if (config_fill && unlikely(opt_junk_alloc)) {
- memset((void *)((uintptr_t)ptr + oldsize), 0xa5, usize -
- oldsize);
+ memset((void *)((uintptr_t)ptr + oldsize), JEMALLOC_ALLOC_JUNK,
+ usize - oldsize);
}
return (false);
}
bool
-huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
+huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
size_t usize_max, bool zero)
{
@@ -290,16 +311,16 @@ huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) {
/* Attempt to expand the allocation in-place. */
- if (!huge_ralloc_no_move_expand(ptr, oldsize, usize_max,
+ if (!huge_ralloc_no_move_expand(tsdn, ptr, oldsize, usize_max,
zero)) {
- arena_decay_tick(tsd, huge_aalloc(ptr));
+ arena_decay_tick(tsdn, huge_aalloc(ptr));
return (false);
}
/* Try again, this time with usize_min. */
if (usize_min < usize_max && CHUNK_CEILING(usize_min) >
- CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(ptr,
- oldsize, usize_min, zero)) {
- arena_decay_tick(tsd, huge_aalloc(ptr));
+ CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsdn,
+ ptr, oldsize, usize_min, zero)) {
+ arena_decay_tick(tsdn, huge_aalloc(ptr));
return (false);
}
}
@@ -310,16 +331,17 @@ huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
*/
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) {
- huge_ralloc_no_move_similar(ptr, oldsize, usize_min, usize_max,
- zero);
- arena_decay_tick(tsd, huge_aalloc(ptr));
+ huge_ralloc_no_move_similar(tsdn, ptr, oldsize, usize_min,
+ usize_max, zero);
+ arena_decay_tick(tsdn, huge_aalloc(ptr));
return (false);
}
/* Attempt to shrink the allocation in-place. */
if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) {
- if (!huge_ralloc_no_move_shrink(ptr, oldsize, usize_max)) {
- arena_decay_tick(tsd, huge_aalloc(ptr));
+ if (!huge_ralloc_no_move_shrink(tsdn, ptr, oldsize,
+ usize_max)) {
+ arena_decay_tick(tsdn, huge_aalloc(ptr));
return (false);
}
}
@@ -327,18 +349,18 @@ huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t usize_min,
}
static void *
-huge_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
- size_t alignment, bool zero, tcache_t *tcache)
+huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool zero)
{
if (alignment <= chunksize)
- return (huge_malloc(tsd, arena, usize, zero, tcache));
- return (huge_palloc(tsd, arena, usize, alignment, zero, tcache));
+ return (huge_malloc(tsdn, arena, usize, zero));
+ return (huge_palloc(tsdn, arena, usize, alignment, zero));
}
void *
-huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
- size_t alignment, bool zero, tcache_t *tcache)
+huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
+ size_t usize, size_t alignment, bool zero, tcache_t *tcache)
{
void *ret;
size_t copysize;
@@ -347,7 +369,8 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
assert(usize > 0 && usize <= HUGE_MAXCLASS);
/* Try to avoid moving the allocation. */
- if (!huge_ralloc_no_move(tsd, ptr, oldsize, usize, usize, zero))
+ if (!huge_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, usize,
+ zero))
return (ptr);
/*
@@ -355,19 +378,19 @@ huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize,
* different size class. In that case, fall back to allocating new
* space and copying.
*/
- ret = huge_ralloc_move_helper(tsd, arena, usize, alignment, zero,
- tcache);
+ ret = huge_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, alignment,
+ zero);
if (ret == NULL)
return (NULL);
copysize = (usize < oldsize) ? usize : oldsize;
memcpy(ret, ptr, copysize);
- isqalloc(tsd, ptr, oldsize, tcache);
+ isqalloc(tsd, ptr, oldsize, tcache, true);
return (ret);
}
void
-huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
+huge_dalloc(tsdn_t *tsdn, void *ptr)
{
extent_node_t *node;
arena_t *arena;
@@ -375,17 +398,17 @@ huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
huge_node_unset(ptr, node);
- malloc_mutex_lock(&arena->huge_mtx);
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
ql_remove(&arena->huge, node, ql_link);
- malloc_mutex_unlock(&arena->huge_mtx);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
- huge_dalloc_junk(extent_node_addr_get(node),
+ huge_dalloc_junk(tsdn, extent_node_addr_get(node),
extent_node_size_get(node));
- arena_chunk_dalloc_huge(extent_node_arena_get(node),
+ arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node),
extent_node_addr_get(node), extent_node_size_get(node));
- idalloctm(tsd, node, tcache, true, true);
+ idalloctm(tsdn, node, NULL, true, true);
- arena_decay_tick(tsd, arena);
+ arena_decay_tick(tsdn, arena);
}
arena_t *
@@ -396,7 +419,7 @@ huge_aalloc(const void *ptr)
}
size_t
-huge_salloc(const void *ptr)
+huge_salloc(tsdn_t *tsdn, const void *ptr)
{
size_t size;
extent_node_t *node;
@@ -404,15 +427,15 @@ huge_salloc(const void *ptr)
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
- malloc_mutex_lock(&arena->huge_mtx);
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
size = extent_node_size_get(node);
- malloc_mutex_unlock(&arena->huge_mtx);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
return (size);
}
prof_tctx_t *
-huge_prof_tctx_get(const void *ptr)
+huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
{
prof_tctx_t *tctx;
extent_node_t *node;
@@ -420,29 +443,29 @@ huge_prof_tctx_get(const void *ptr)
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
- malloc_mutex_lock(&arena->huge_mtx);
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
tctx = extent_node_prof_tctx_get(node);
- malloc_mutex_unlock(&arena->huge_mtx);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
return (tctx);
}
void
-huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
+huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx)
{
extent_node_t *node;
arena_t *arena;
node = huge_node_get(ptr);
arena = extent_node_arena_get(node);
- malloc_mutex_lock(&arena->huge_mtx);
+ malloc_mutex_lock(tsdn, &arena->huge_mtx);
extent_node_prof_tctx_set(node, tctx);
- malloc_mutex_unlock(&arena->huge_mtx);
+ malloc_mutex_unlock(tsdn, &arena->huge_mtx);
}
void
-huge_prof_tctx_reset(const void *ptr)
+huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr)
{
- huge_prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
+ huge_prof_tctx_set(tsdn, ptr, (prof_tctx_t *)(uintptr_t)1U);
}
diff --git a/contrib/jemalloc/src/jemalloc.c b/contrib/jemalloc/src/jemalloc.c
index a34b85c..666c49d 100644
--- a/contrib/jemalloc/src/jemalloc.c
+++ b/contrib/jemalloc/src/jemalloc.c
@@ -64,7 +64,7 @@ static malloc_mutex_t arenas_lock;
arena_t **arenas;
static unsigned narenas_total; /* Use narenas_total_*(). */
static arena_t *a0; /* arenas[0]; read-only after initialization. */
-static unsigned narenas_auto; /* Read-only after initialization. */
+unsigned narenas_auto; /* Read-only after initialization. */
typedef enum {
malloc_init_uninitialized = 3,
@@ -74,10 +74,10 @@ typedef enum {
} malloc_init_t;
static malloc_init_t malloc_init_state = malloc_init_uninitialized;
-/* 0 should be the common case. Set to true to trigger initialization. */
+/* False should be the common case. Set to true to trigger initialization. */
static bool malloc_slow = true;
-/* When malloc_slow != 0, set the corresponding bits for sanity check. */
+/* When malloc_slow is true, set the corresponding bits for sanity check. */
enum {
flag_opt_junk_alloc = (1U),
flag_opt_junk_free = (1U << 1),
@@ -216,7 +216,7 @@ _init_init_lock(void)
* really only matters early in the process creation, before any
* separate thread normally starts doing anything. */
if (!init_lock_initialized)
- malloc_mutex_init(&init_lock);
+ malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT);
init_lock_initialized = true;
}
@@ -311,7 +311,7 @@ malloc_init(void)
}
/*
- * The a0*() functions are used instead of i[mcd]alloc() in situations that
+ * The a0*() functions are used instead of i{d,}alloc() in situations that
* cannot tolerate TLS variable access.
*/
@@ -322,15 +322,15 @@ a0ialloc(size_t size, bool zero, bool is_metadata)
if (unlikely(malloc_init_a0()))
return (NULL);
- return (iallocztm(NULL, size, size2index(size), zero, false,
- is_metadata, arena_get(0, false), true));
+ return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL,
+ is_metadata, arena_get(TSDN_NULL, 0, true), true));
}
static void
a0idalloc(void *ptr, bool is_metadata)
{
- idalloctm(NULL, ptr, false, is_metadata, true);
+ idalloctm(TSDN_NULL, ptr, false, is_metadata, true);
}
void *
@@ -417,7 +417,7 @@ narenas_total_get(void)
/* Create a new arena and insert it into the arenas array at index ind. */
static arena_t *
-arena_init_locked(unsigned ind)
+arena_init_locked(tsdn_t *tsdn, unsigned ind)
{
arena_t *arena;
@@ -431,39 +431,43 @@ arena_init_locked(unsigned ind)
* Another thread may have already initialized arenas[ind] if it's an
* auto arena.
*/
- arena = arena_get(ind, false);
+ arena = arena_get(tsdn, ind, false);
if (arena != NULL) {
assert(ind < narenas_auto);
return (arena);
}
/* Actually initialize the arena. */
- arena = arena_new(ind);
+ arena = arena_new(tsdn, ind);
arena_set(ind, arena);
return (arena);
}
arena_t *
-arena_init(unsigned ind)
+arena_init(tsdn_t *tsdn, unsigned ind)
{
arena_t *arena;
- malloc_mutex_lock(&arenas_lock);
- arena = arena_init_locked(ind);
- malloc_mutex_unlock(&arenas_lock);
+ malloc_mutex_lock(tsdn, &arenas_lock);
+ arena = arena_init_locked(tsdn, ind);
+ malloc_mutex_unlock(tsdn, &arenas_lock);
return (arena);
}
static void
-arena_bind(tsd_t *tsd, unsigned ind)
+arena_bind(tsd_t *tsd, unsigned ind, bool internal)
{
arena_t *arena;
- arena = arena_get(ind, false);
- arena_nthreads_inc(arena);
+ arena = arena_get(tsd_tsdn(tsd), ind, false);
+ arena_nthreads_inc(arena, internal);
- if (tsd_nominal(tsd))
- tsd_arena_set(tsd, arena);
+ if (tsd_nominal(tsd)) {
+ if (internal)
+ tsd_iarena_set(tsd, arena);
+ else
+ tsd_arena_set(tsd, arena);
+ }
}
void
@@ -471,21 +475,24 @@ arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
{
arena_t *oldarena, *newarena;
- oldarena = arena_get(oldind, false);
- newarena = arena_get(newind, false);
- arena_nthreads_dec(oldarena);
- arena_nthreads_inc(newarena);
+ oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
+ newarena = arena_get(tsd_tsdn(tsd), newind, false);
+ arena_nthreads_dec(oldarena, false);
+ arena_nthreads_inc(newarena, false);
tsd_arena_set(tsd, newarena);
}
static void
-arena_unbind(tsd_t *tsd, unsigned ind)
+arena_unbind(tsd_t *tsd, unsigned ind, bool internal)
{
arena_t *arena;
- arena = arena_get(ind, false);
- arena_nthreads_dec(arena);
- tsd_arena_set(tsd, NULL);
+ arena = arena_get(tsd_tsdn(tsd), ind, false);
+ arena_nthreads_dec(arena, internal);
+ if (internal)
+ tsd_iarena_set(tsd, NULL);
+ else
+ tsd_arena_set(tsd, NULL);
}
arena_tdata_t *
@@ -566,27 +573,41 @@ label_return:
/* Slow path, called only by arena_choose(). */
arena_t *
-arena_choose_hard(tsd_t *tsd)
+arena_choose_hard(tsd_t *tsd, bool internal)
{
- arena_t *ret;
+ arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
if (narenas_auto > 1) {
- unsigned i, choose, first_null;
+ unsigned i, j, choose[2], first_null;
+
+ /*
+ * Determine binding for both non-internal and internal
+ * allocation.
+ *
+ * choose[0]: For application allocation.
+ * choose[1]: For internal metadata allocation.
+ */
+
+ for (j = 0; j < 2; j++)
+ choose[j] = 0;
- choose = 0;
first_null = narenas_auto;
- malloc_mutex_lock(&arenas_lock);
- assert(arena_get(0, false) != NULL);
+ malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
+ assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
for (i = 1; i < narenas_auto; i++) {
- if (arena_get(i, false) != NULL) {
+ if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
/*
* Choose the first arena that has the lowest
* number of threads assigned to it.
*/
- if (arena_nthreads_get(arena_get(i, false)) <
- arena_nthreads_get(arena_get(choose,
- false)))
- choose = i;
+ for (j = 0; j < 2; j++) {
+ if (arena_nthreads_get(arena_get(
+ tsd_tsdn(tsd), i, false), !!j) <
+ arena_nthreads_get(arena_get(
+ tsd_tsdn(tsd), choose[j], false),
+ !!j))
+ choose[j] = i;
+ }
} else if (first_null == narenas_auto) {
/*
* Record the index of the first uninitialized
@@ -601,27 +622,40 @@ arena_choose_hard(tsd_t *tsd)
}
}
- if (arena_nthreads_get(arena_get(choose, false)) == 0
- || first_null == narenas_auto) {
- /*
- * Use an unloaded arena, or the least loaded arena if
- * all arenas are already initialized.
- */
- ret = arena_get(choose, false);
- } else {
- /* Initialize a new arena. */
- choose = first_null;
- ret = arena_init_locked(choose);
- if (ret == NULL) {
- malloc_mutex_unlock(&arenas_lock);
- return (NULL);
+ for (j = 0; j < 2; j++) {
+ if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
+ choose[j], false), !!j) == 0 || first_null ==
+ narenas_auto) {
+ /*
+ * Use an unloaded arena, or the least loaded
+ * arena if all arenas are already initialized.
+ */
+ if (!!j == internal) {
+ ret = arena_get(tsd_tsdn(tsd),
+ choose[j], false);
+ }
+ } else {
+ arena_t *arena;
+
+ /* Initialize a new arena. */
+ choose[j] = first_null;
+ arena = arena_init_locked(tsd_tsdn(tsd),
+ choose[j]);
+ if (arena == NULL) {
+ malloc_mutex_unlock(tsd_tsdn(tsd),
+ &arenas_lock);
+ return (NULL);
+ }
+ if (!!j == internal)
+ ret = arena;
}
+ arena_bind(tsd, choose[j], !!j);
}
- arena_bind(tsd, choose);
- malloc_mutex_unlock(&arenas_lock);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
} else {
- ret = arena_get(0, false);
- arena_bind(tsd, 0);
+ ret = arena_get(tsd_tsdn(tsd), 0, false);
+ arena_bind(tsd, 0, false);
+ arena_bind(tsd, 0, true);
}
return (ret);
@@ -642,13 +676,23 @@ thread_deallocated_cleanup(tsd_t *tsd)
}
void
+iarena_cleanup(tsd_t *tsd)
+{
+ arena_t *iarena;
+
+ iarena = tsd_iarena_get(tsd);
+ if (iarena != NULL)
+ arena_unbind(tsd, iarena->ind, true);
+}
+
+void
arena_cleanup(tsd_t *tsd)
{
arena_t *arena;
arena = tsd_arena_get(tsd);
if (arena != NULL)
- arena_unbind(tsd, arena->ind);
+ arena_unbind(tsd, arena->ind, false);
}
void
@@ -685,8 +729,11 @@ stats_print_atexit(void)
{
if (config_tcache && config_stats) {
+ tsdn_t *tsdn;
unsigned narenas, i;
+ tsdn = tsdn_fetch();
+
/*
* Merge stats from extant threads. This is racy, since
* individual threads do not lock when recording tcache stats
@@ -695,7 +742,7 @@ stats_print_atexit(void)
* continue to allocate.
*/
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
- arena_t *arena = arena_get(i, false);
+ arena_t *arena = arena_get(tsdn, i, false);
if (arena != NULL) {
tcache_t *tcache;
@@ -705,11 +752,11 @@ stats_print_atexit(void)
* and bin locks in the opposite order,
* deadlocks may result.
*/
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
ql_foreach(tcache, &arena->tcache_ql, link) {
- tcache_stats_merge(tcache, arena);
+ tcache_stats_merge(tsdn, tcache, arena);
}
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
}
}
}
@@ -1060,7 +1107,8 @@ malloc_conf_init(void)
for (i = 0; i < dss_prec_limit; i++) {
if (strncmp(dss_prec_names[i], v, vlen)
== 0) {
- if (chunk_dss_prec_set(i)) {
+ if (chunk_dss_prec_set(NULL,
+ i)) {
malloc_conf_error(
"Error setting dss",
k, klen, v, vlen);
@@ -1190,7 +1238,6 @@ malloc_conf_init(void)
}
}
-/* init_lock must be held. */
static bool
malloc_init_hard_needed(void)
{
@@ -1208,9 +1255,9 @@ malloc_init_hard_needed(void)
if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
/* Busy-wait until the initializing thread completes. */
do {
- malloc_mutex_unlock(&init_lock);
+ malloc_mutex_unlock(NULL, &init_lock);
CPU_SPINWAIT;
- malloc_mutex_lock(&init_lock);
+ malloc_mutex_lock(NULL, &init_lock);
} while (!malloc_initialized());
return (false);
}
@@ -1218,9 +1265,8 @@ malloc_init_hard_needed(void)
return (true);
}
-/* init_lock must be held. */
static bool
-malloc_init_hard_a0_locked(void)
+malloc_init_hard_a0_locked()
{
malloc_initializer = INITIALIZER;
@@ -1236,6 +1282,7 @@ malloc_init_hard_a0_locked(void)
abort();
}
}
+ pages_boot();
if (base_boot())
return (true);
if (chunk_boot())
@@ -1246,9 +1293,9 @@ malloc_init_hard_a0_locked(void)
prof_boot1();
if (arena_boot())
return (true);
- if (config_tcache && tcache_boot())
+ if (config_tcache && tcache_boot(TSDN_NULL))
return (true);
- if (malloc_mutex_init(&arenas_lock))
+ if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS))
return (true);
/*
* Create enough scaffolding to allow recursive allocation in
@@ -1262,9 +1309,11 @@ malloc_init_hard_a0_locked(void)
* Initialize one arena here. The rest are lazily created in
* arena_choose_hard().
*/
- if (arena_init(0) == NULL)
+ if (arena_init(TSDN_NULL, 0) == NULL)
return (true);
+
malloc_init_state = malloc_init_a0_initialized;
+
return (false);
}
@@ -1273,30 +1322,18 @@ malloc_init_hard_a0(void)
{
bool ret;
- malloc_mutex_lock(&init_lock);
+ malloc_mutex_lock(TSDN_NULL, &init_lock);
ret = malloc_init_hard_a0_locked();
- malloc_mutex_unlock(&init_lock);
+ malloc_mutex_unlock(TSDN_NULL, &init_lock);
return (ret);
}
-/*
- * Initialize data structures which may trigger recursive allocation.
- *
- * init_lock must be held.
- */
+/* Initialize data structures which may trigger recursive allocation. */
static bool
malloc_init_hard_recursible(void)
{
- bool ret = false;
malloc_init_state = malloc_init_recursible;
- malloc_mutex_unlock(&init_lock);
-
- /* LinuxThreads' pthread_setspecific() allocates. */
- if (malloc_tsd_boot0()) {
- ret = true;
- goto label_return;
- }
ncpus = malloc_ncpus();
@@ -1305,24 +1342,21 @@ malloc_init_hard_recursible(void)
/* LinuxThreads' pthread_atfork() allocates. */
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
jemalloc_postfork_child) != 0) {
- ret = true;
malloc_write("<jemalloc>: Error in pthread_atfork()\n");
if (opt_abort)
abort();
+ return (true);
}
#endif
-label_return:
- malloc_mutex_lock(&init_lock);
- return (ret);
+ return (false);
}
-/* init_lock must be held. */
static bool
-malloc_init_hard_finish(void)
+malloc_init_hard_finish(tsdn_t *tsdn)
{
- if (mutex_boot())
+ if (malloc_mutex_boot())
return (true);
if (opt_narenas == 0) {
@@ -1347,7 +1381,7 @@ malloc_init_hard_finish(void)
narenas_total_set(narenas_auto);
/* Allocate and initialize arenas. */
- arenas = (arena_t **)base_alloc(sizeof(arena_t *) *
+ arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) *
(MALLOCX_ARENA_MAX+1));
if (arenas == NULL)
return (true);
@@ -1363,38 +1397,43 @@ malloc_init_hard_finish(void)
static bool
malloc_init_hard(void)
{
+ tsd_t *tsd;
#if defined(_WIN32) && _WIN32_WINNT < 0x0600
_init_init_lock();
#endif
- malloc_mutex_lock(&init_lock);
+ malloc_mutex_lock(TSDN_NULL, &init_lock);
if (!malloc_init_hard_needed()) {
- malloc_mutex_unlock(&init_lock);
+ malloc_mutex_unlock(TSDN_NULL, &init_lock);
return (false);
}
if (malloc_init_state != malloc_init_a0_initialized &&
malloc_init_hard_a0_locked()) {
- malloc_mutex_unlock(&init_lock);
+ malloc_mutex_unlock(TSDN_NULL, &init_lock);
return (true);
}
- if (malloc_init_hard_recursible()) {
- malloc_mutex_unlock(&init_lock);
+ malloc_mutex_unlock(TSDN_NULL, &init_lock);
+ /* Recursive allocation relies on functional tsd. */
+ tsd = malloc_tsd_boot0();
+ if (tsd == NULL)
return (true);
- }
+ if (malloc_init_hard_recursible())
+ return (true);
+ malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
- if (config_prof && prof_boot2()) {
- malloc_mutex_unlock(&init_lock);
+ if (config_prof && prof_boot2(tsd_tsdn(tsd))) {
+ malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
return (true);
}
- if (malloc_init_hard_finish()) {
- malloc_mutex_unlock(&init_lock);
+ if (malloc_init_hard_finish(tsd_tsdn(tsd))) {
+ malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
return (true);
}
- malloc_mutex_unlock(&init_lock);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
malloc_tsd_boot1();
return (false);
}
@@ -1408,7 +1447,7 @@ malloc_init_hard(void)
*/
static void *
-imalloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind,
+ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero,
prof_tctx_t *tctx, bool slow_path)
{
void *p;
@@ -1417,44 +1456,58 @@ imalloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind,
return (NULL);
if (usize <= SMALL_MAXCLASS) {
szind_t ind_large = size2index(LARGE_MINCLASS);
- p = imalloc(tsd, LARGE_MINCLASS, ind_large, slow_path);
+ p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path);
if (p == NULL)
return (NULL);
- arena_prof_promoted(p, usize);
+ arena_prof_promoted(tsd_tsdn(tsd), p, usize);
} else
- p = imalloc(tsd, usize, ind, slow_path);
+ p = ialloc(tsd, usize, ind, zero, slow_path);
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
-imalloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool slow_path)
+ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path)
{
void *p;
prof_tctx_t *tctx;
tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
- p = imalloc_prof_sample(tsd, usize, ind, tctx, slow_path);
+ p = ialloc_prof_sample(tsd, usize, ind, zero, tctx, slow_path);
else
- p = imalloc(tsd, usize, ind, slow_path);
+ p = ialloc(tsd, usize, ind, zero, slow_path);
if (unlikely(p == NULL)) {
prof_alloc_rollback(tsd, tctx, true);
return (NULL);
}
- prof_malloc(p, usize, tctx);
+ prof_malloc(tsd_tsdn(tsd), p, usize, tctx);
return (p);
}
+/*
+ * ialloc_body() is inlined so that fast and slow paths are generated separately
+ * with statically known slow_path.
+ *
+ * This function guarantees that *tsdn is non-NULL on success.
+ */
JEMALLOC_ALWAYS_INLINE_C void *
-imalloc_body(size_t size, tsd_t **tsd, size_t *usize, bool slow_path)
+ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize,
+ bool slow_path)
{
+ tsd_t *tsd;
szind_t ind;
- if (slow_path && unlikely(malloc_init()))
+ if (slow_path && unlikely(malloc_init())) {
+ *tsdn = NULL;
return (NULL);
- *tsd = tsd_fetch();
+ }
+
+ tsd = tsd_fetch();
+ *tsdn = tsd_tsdn(tsd);
+ witness_assert_lockless(tsd_tsdn(tsd));
+
ind = size2index(size);
if (unlikely(ind >= NSIZES))
return (NULL);
@@ -1466,26 +1519,32 @@ imalloc_body(size_t size, tsd_t **tsd, size_t *usize, bool slow_path)
}
if (config_prof && opt_prof)
- return (imalloc_prof(*tsd, *usize, ind, slow_path));
+ return (ialloc_prof(tsd, *usize, ind, zero, slow_path));
- return (imalloc(*tsd, size, ind, slow_path));
+ return (ialloc(tsd, size, ind, zero, slow_path));
}
JEMALLOC_ALWAYS_INLINE_C void
-imalloc_post_check(void *ret, tsd_t *tsd, size_t usize, bool slow_path)
+ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func,
+ bool update_errno, bool slow_path)
{
+
+ assert(!tsdn_null(tsdn) || ret == NULL);
+
if (unlikely(ret == NULL)) {
if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) {
- malloc_write("<jemalloc>: Error in malloc(): "
- "out of memory\n");
+ malloc_printf("<jemalloc>: Error in %s(): out of "
+ "memory\n", func);
abort();
}
- set_errno(ENOMEM);
+ if (update_errno)
+ set_errno(ENOMEM);
}
if (config_stats && likely(ret != NULL)) {
- assert(usize == isalloc(ret, config_prof));
- *tsd_thread_allocatedp_get(tsd) += usize;
+ assert(usize == isalloc(tsdn, ret, config_prof));
+ *tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize;
}
+ witness_assert_lockless(tsdn);
}
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
@@ -1494,24 +1553,20 @@ JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
je_malloc(size_t size)
{
void *ret;
- tsd_t *tsd;
+ tsdn_t *tsdn;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
if (size == 0)
size = 1;
if (likely(!malloc_slow)) {
- /*
- * imalloc_body() is inlined so that fast and slow paths are
- * generated separately with statically known slow_path.
- */
- ret = imalloc_body(size, &tsd, &usize, false);
- imalloc_post_check(ret, tsd, usize, false);
+ ret = ialloc_body(size, false, &tsdn, &usize, false);
+ ialloc_post_check(ret, tsdn, usize, "malloc", true, false);
} else {
- ret = imalloc_body(size, &tsd, &usize, true);
- imalloc_post_check(ret, tsd, usize, true);
+ ret = ialloc_body(size, false, &tsdn, &usize, true);
+ ialloc_post_check(ret, tsdn, usize, "malloc", true, true);
UTRACE(0, size, ret);
- JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false);
}
return (ret);
@@ -1530,7 +1585,7 @@ imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
if (p == NULL)
return (NULL);
- arena_prof_promoted(p, usize);
+ arena_prof_promoted(tsd_tsdn(tsd), p, usize);
} else
p = ipalloc(tsd, usize, alignment, false);
@@ -1552,7 +1607,7 @@ imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
prof_alloc_rollback(tsd, tctx, true);
return (NULL);
}
- prof_malloc(p, usize, tctx);
+ prof_malloc(tsd_tsdn(tsd), p, usize, tctx);
return (p);
}
@@ -1569,10 +1624,12 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
assert(min_alignment != 0);
if (unlikely(malloc_init())) {
+ tsd = NULL;
result = NULL;
goto label_oom;
}
tsd = tsd_fetch();
+ witness_assert_lockless(tsd_tsdn(tsd));
if (size == 0)
size = 1;
@@ -1607,10 +1664,13 @@ imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
ret = 0;
label_return:
if (config_stats && likely(result != NULL)) {
- assert(usize == isalloc(result, config_prof));
+ assert(usize == isalloc(tsd_tsdn(tsd), result, config_prof));
*tsd_thread_allocatedp_get(tsd) += usize;
}
UTRACE(0, size, result);
+ JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize,
+ false);
+ witness_assert_lockless(tsd_tsdn(tsd));
return (ret);
label_oom:
assert(result == NULL);
@@ -1620,6 +1680,7 @@ label_oom:
abort();
}
ret = ENOMEM;
+ witness_assert_lockless(tsd_tsdn(tsd));
goto label_return;
}
@@ -1627,9 +1688,10 @@ JEMALLOC_EXPORT int JEMALLOC_NOTHROW
JEMALLOC_ATTR(nonnull(1))
je_posix_memalign(void **memptr, size_t alignment, size_t size)
{
- int ret = imemalign(memptr, alignment, size, sizeof(void *));
- JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
- config_prof), false);
+ int ret;
+
+ ret = imemalign(memptr, alignment, size, sizeof(void *));
+
return (ret);
}
@@ -1645,48 +1707,8 @@ je_aligned_alloc(size_t alignment, size_t size)
ret = NULL;
set_errno(err);
}
- JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
- false);
- return (ret);
-}
-
-static void *
-icalloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, prof_tctx_t *tctx)
-{
- void *p;
-
- if (tctx == NULL)
- return (NULL);
- if (usize <= SMALL_MAXCLASS) {
- szind_t ind_large = size2index(LARGE_MINCLASS);
- p = icalloc(tsd, LARGE_MINCLASS, ind_large);
- if (p == NULL)
- return (NULL);
- arena_prof_promoted(p, usize);
- } else
- p = icalloc(tsd, usize, ind);
-
- return (p);
-}
-
-JEMALLOC_ALWAYS_INLINE_C void *
-icalloc_prof(tsd_t *tsd, size_t usize, szind_t ind)
-{
- void *p;
- prof_tctx_t *tctx;
-
- tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true);
- if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
- p = icalloc_prof_sample(tsd, usize, ind, tctx);
- else
- p = icalloc(tsd, usize, ind);
- if (unlikely(p == NULL)) {
- prof_alloc_rollback(tsd, tctx, true);
- return (NULL);
- }
- prof_malloc(p, usize, tctx);
- return (p);
+ return (ret);
}
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
@@ -1695,67 +1717,35 @@ JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
je_calloc(size_t num, size_t size)
{
void *ret;
- tsd_t *tsd;
+ tsdn_t *tsdn;
size_t num_size;
- szind_t ind;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
- if (unlikely(malloc_init())) {
- num_size = 0;
- ret = NULL;
- goto label_return;
- }
- tsd = tsd_fetch();
-
num_size = num * size;
if (unlikely(num_size == 0)) {
if (num == 0 || size == 0)
num_size = 1;
- else {
- ret = NULL;
- goto label_return;
- }
+ else
+ num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */
/*
* Try to avoid division here. We know that it isn't possible to
* overflow during multiplication if neither operand uses any of the
* most significant half of the bits in a size_t.
*/
} else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
- 2))) && (num_size / size != num))) {
- /* size_t overflow. */
- ret = NULL;
- goto label_return;
- }
+ 2))) && (num_size / size != num)))
+ num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */
- ind = size2index(num_size);
- if (unlikely(ind >= NSIZES)) {
- ret = NULL;
- goto label_return;
- }
- if (config_prof && opt_prof) {
- usize = index2size(ind);
- ret = icalloc_prof(tsd, usize, ind);
+ if (likely(!malloc_slow)) {
+ ret = ialloc_body(num_size, true, &tsdn, &usize, false);
+ ialloc_post_check(ret, tsdn, usize, "calloc", true, false);
} else {
- if (config_stats || (config_valgrind && unlikely(in_valgrind)))
- usize = index2size(ind);
- ret = icalloc(tsd, num_size, ind);
+ ret = ialloc_body(num_size, true, &tsdn, &usize, true);
+ ialloc_post_check(ret, tsdn, usize, "calloc", true, true);
+ UTRACE(0, num_size, ret);
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false);
}
-label_return:
- if (unlikely(ret == NULL)) {
- if (config_xmalloc && unlikely(opt_xmalloc)) {
- malloc_write("<jemalloc>: Error in calloc(): out of "
- "memory\n");
- abort();
- }
- set_errno(ENOMEM);
- }
- if (config_stats && likely(ret != NULL)) {
- assert(usize == isalloc(ret, config_prof));
- *tsd_thread_allocatedp_get(tsd) += usize;
- }
- UTRACE(0, num_size, ret);
- JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
return (ret);
}
@@ -1771,7 +1761,7 @@ irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
if (p == NULL)
return (NULL);
- arena_prof_promoted(p, usize);
+ arena_prof_promoted(tsd_tsdn(tsd), p, usize);
} else
p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
@@ -1786,7 +1776,7 @@ irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize)
prof_tctx_t *old_tctx, *tctx;
prof_active = prof_active_get_unlocked();
- old_tctx = prof_tctx_get(old_ptr);
+ old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
tctx = prof_alloc_prep(tsd, usize, prof_active, true);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
@@ -1808,14 +1798,16 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
size_t usize;
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
+ witness_assert_lockless(tsd_tsdn(tsd));
+
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
if (config_prof && opt_prof) {
- usize = isalloc(ptr, config_prof);
+ usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
prof_free(tsd, ptr, usize);
} else if (config_stats || config_valgrind)
- usize = isalloc(ptr, config_prof);
+ usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
if (config_stats)
*tsd_thread_deallocatedp_get(tsd) += usize;
@@ -1823,17 +1815,19 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
iqalloc(tsd, ptr, tcache, false);
else {
if (config_valgrind && unlikely(in_valgrind))
- rzsize = p2rz(ptr);
+ rzsize = p2rz(tsd_tsdn(tsd), ptr);
iqalloc(tsd, ptr, tcache, true);
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
}
}
JEMALLOC_INLINE_C void
-isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache)
+isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path)
{
UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
+ witness_assert_lockless(tsd_tsdn(tsd));
+
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
@@ -1842,8 +1836,8 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache)
if (config_stats)
*tsd_thread_deallocatedp_get(tsd) += usize;
if (config_valgrind && unlikely(in_valgrind))
- rzsize = p2rz(ptr);
- isqalloc(tsd, ptr, usize, tcache);
+ rzsize = p2rz(tsd_tsdn(tsd), ptr);
+ isqalloc(tsd, ptr, usize, tcache, slow_path);
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
}
@@ -1853,13 +1847,15 @@ JEMALLOC_ALLOC_SIZE(2)
je_realloc(void *ptr, size_t size)
{
void *ret;
- tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL);
+ tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
size_t old_usize = 0;
UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
if (unlikely(size == 0)) {
if (ptr != NULL) {
+ tsd_t *tsd;
+
/* realloc(ptr, 0) is equivalent to free(ptr). */
UTRACE(ptr, 0, 0);
tsd = tsd_fetch();
@@ -1870,13 +1866,19 @@ je_realloc(void *ptr, size_t size)
}
if (likely(ptr != NULL)) {
+ tsd_t *tsd;
+
assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
tsd = tsd_fetch();
- old_usize = isalloc(ptr, config_prof);
- if (config_valgrind && unlikely(in_valgrind))
- old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
+ witness_assert_lockless(tsd_tsdn(tsd));
+
+ old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
+ if (config_valgrind && unlikely(in_valgrind)) {
+ old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) :
+ u2rz(old_usize);
+ }
if (config_prof && opt_prof) {
usize = s2u(size);
@@ -1888,12 +1890,14 @@ je_realloc(void *ptr, size_t size)
usize = s2u(size);
ret = iralloc(tsd, ptr, old_usize, size, 0, false);
}
+ tsdn = tsd_tsdn(tsd);
} else {
/* realloc(NULL, size) is equivalent to malloc(size). */
if (likely(!malloc_slow))
- ret = imalloc_body(size, &tsd, &usize, false);
+ ret = ialloc_body(size, false, &tsdn, &usize, false);
else
- ret = imalloc_body(size, &tsd, &usize, true);
+ ret = ialloc_body(size, false, &tsdn, &usize, true);
+ assert(!tsdn_null(tsdn) || ret == NULL);
}
if (unlikely(ret == NULL)) {
@@ -1905,13 +1909,17 @@ je_realloc(void *ptr, size_t size)
set_errno(ENOMEM);
}
if (config_stats && likely(ret != NULL)) {
- assert(usize == isalloc(ret, config_prof));
+ tsd_t *tsd;
+
+ assert(usize == isalloc(tsdn, ret, config_prof));
+ tsd = tsdn_tsd(tsdn);
*tsd_thread_allocatedp_get(tsd) += usize;
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, ret);
- JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize,
+ JEMALLOC_VALGRIND_REALLOC(true, tsdn, ret, usize, true, ptr, old_usize,
old_rzsize, true, false);
+ witness_assert_lockless(tsdn);
return (ret);
}
@@ -1922,10 +1930,12 @@ je_free(void *ptr)
UTRACE(ptr, 0, 0);
if (likely(ptr != NULL)) {
tsd_t *tsd = tsd_fetch();
+ witness_assert_lockless(tsd_tsdn(tsd));
if (likely(!malloc_slow))
ifree(tsd, ptr, tcache_get(tsd, false), false);
else
ifree(tsd, ptr, tcache_get(tsd, false), true);
+ witness_assert_lockless(tsd_tsdn(tsd));
}
}
@@ -1946,7 +1956,6 @@ je_memalign(size_t alignment, size_t size)
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
ret = NULL;
- JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
return (ret);
}
#endif
@@ -1960,7 +1969,6 @@ je_valloc(size_t size)
void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
ret = NULL;
- JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
return (ret);
}
#endif
@@ -2001,7 +2009,7 @@ JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
*/
JEMALLOC_ALWAYS_INLINE_C bool
-imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
+imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
{
@@ -2024,7 +2032,7 @@ imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
*tcache = tcache_get(tsd, true);
if ((flags & MALLOCX_ARENA_MASK) != 0) {
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
- *arena = arena_get(arena_ind, true);
+ *arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
if (unlikely(*arena == NULL))
return (true);
} else
@@ -2032,63 +2040,44 @@ imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
return (false);
}
-JEMALLOC_ALWAYS_INLINE_C bool
-imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
- size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
-{
-
- if (likely(flags == 0)) {
- *usize = s2u(size);
- if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS))
- return (true);
- *alignment = 0;
- *zero = false;
- *tcache = tcache_get(tsd, true);
- *arena = NULL;
- return (false);
- } else {
- return (imallocx_flags_decode_hard(tsd, size, flags, usize,
- alignment, zero, tcache, arena));
- }
-}
-
JEMALLOC_ALWAYS_INLINE_C void *
-imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
- tcache_t *tcache, arena_t *arena)
+imallocx_flags(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, arena_t *arena, bool slow_path)
{
szind_t ind;
if (unlikely(alignment != 0))
- return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
+ return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
ind = size2index(usize);
assert(ind < NSIZES);
- if (unlikely(zero))
- return (icalloct(tsd, usize, ind, tcache, arena));
- return (imalloct(tsd, usize, ind, tcache, arena));
+ return (iallocztm(tsdn, usize, ind, zero, tcache, false, arena,
+ slow_path));
}
static void *
-imallocx_prof_sample(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
- tcache_t *tcache, arena_t *arena)
+imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
+ tcache_t *tcache, arena_t *arena, bool slow_path)
{
void *p;
if (usize <= SMALL_MAXCLASS) {
assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
- p = imallocx_flags(tsd, LARGE_MINCLASS, alignment, zero, tcache,
- arena);
+ p = imallocx_flags(tsdn, LARGE_MINCLASS, alignment, zero,
+ tcache, arena, slow_path);
if (p == NULL)
return (NULL);
- arena_prof_promoted(p, usize);
- } else
- p = imallocx_flags(tsd, usize, alignment, zero, tcache, arena);
+ arena_prof_promoted(tsdn, p, usize);
+ } else {
+ p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena,
+ slow_path);
+ }
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
-imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
+imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path)
{
void *p;
size_t alignment;
@@ -2101,25 +2090,27 @@ imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
&zero, &tcache, &arena)))
return (NULL);
tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true);
- if (likely((uintptr_t)tctx == (uintptr_t)1U))
- p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
- else if ((uintptr_t)tctx > (uintptr_t)1U) {
- p = imallocx_prof_sample(tsd, *usize, alignment, zero, tcache,
- arena);
+ if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
+ p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero,
+ tcache, arena, slow_path);
+ } else if ((uintptr_t)tctx > (uintptr_t)1U) {
+ p = imallocx_prof_sample(tsd_tsdn(tsd), *usize, alignment, zero,
+ tcache, arena, slow_path);
} else
p = NULL;
if (unlikely(p == NULL)) {
prof_alloc_rollback(tsd, tctx, true);
return (NULL);
}
- prof_malloc(p, *usize, tctx);
+ prof_malloc(tsd_tsdn(tsd), p, *usize, tctx);
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
return (p);
}
JEMALLOC_ALWAYS_INLINE_C void *
-imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
+imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize,
+ bool slow_path)
{
void *p;
size_t alignment;
@@ -2127,24 +2118,53 @@ imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
tcache_t *tcache;
arena_t *arena;
+ if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
+ &zero, &tcache, &arena)))
+ return (NULL);
+ p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, tcache,
+ arena, slow_path);
+ assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
+ return (p);
+}
+
+/* This function guarantees that *tsdn is non-NULL on success. */
+JEMALLOC_ALWAYS_INLINE_C void *
+imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize,
+ bool slow_path)
+{
+ tsd_t *tsd;
+
+ if (slow_path && unlikely(malloc_init())) {
+ *tsdn = NULL;
+ return (NULL);
+ }
+
+ tsd = tsd_fetch();
+ *tsdn = tsd_tsdn(tsd);
+ witness_assert_lockless(tsd_tsdn(tsd));
+
if (likely(flags == 0)) {
szind_t ind = size2index(size);
if (unlikely(ind >= NSIZES))
return (NULL);
- if (config_stats || (config_valgrind &&
- unlikely(in_valgrind))) {
+ if (config_stats || (config_prof && opt_prof) || (slow_path &&
+ config_valgrind && unlikely(in_valgrind))) {
*usize = index2size(ind);
assert(*usize > 0 && *usize <= HUGE_MAXCLASS);
}
- return (imalloc(tsd, size, ind, true));
+
+ if (config_prof && opt_prof) {
+ return (ialloc_prof(tsd, *usize, ind, false,
+ slow_path));
+ }
+
+ return (ialloc(tsd, size, ind, false, slow_path));
}
- if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize,
- &alignment, &zero, &tcache, &arena)))
- return (NULL);
- p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
- assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
- return (p);
+ if (config_prof && opt_prof)
+ return (imallocx_prof(tsd, size, flags, usize, slow_path));
+
+ return (imallocx_no_prof(tsd, size, flags, usize, slow_path));
}
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
@@ -2152,37 +2172,24 @@ void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
je_mallocx(size_t size, int flags)
{
- tsd_t *tsd;
+ tsdn_t *tsdn;
void *p;
size_t usize;
assert(size != 0);
- if (unlikely(malloc_init()))
- goto label_oom;
- tsd = tsd_fetch();
-
- if (config_prof && opt_prof)
- p = imallocx_prof(tsd, size, flags, &usize);
- else
- p = imallocx_no_prof(tsd, size, flags, &usize);
- if (unlikely(p == NULL))
- goto label_oom;
-
- if (config_stats) {
- assert(usize == isalloc(p, config_prof));
- *tsd_thread_allocatedp_get(tsd) += usize;
+ if (likely(!malloc_slow)) {
+ p = imallocx_body(size, flags, &tsdn, &usize, false);
+ ialloc_post_check(p, tsdn, usize, "mallocx", false, false);
+ } else {
+ p = imallocx_body(size, flags, &tsdn, &usize, true);
+ ialloc_post_check(p, tsdn, usize, "mallocx", false, true);
+ UTRACE(0, size, p);
+ JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize,
+ MALLOCX_ZERO_GET(flags));
}
- UTRACE(0, size, p);
- JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags));
+
return (p);
-label_oom:
- if (config_xmalloc && unlikely(opt_xmalloc)) {
- malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
- abort();
- }
- UTRACE(0, size, 0);
- return (NULL);
}
static void *
@@ -2199,7 +2206,7 @@ irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize,
zero, tcache, arena);
if (p == NULL)
return (NULL);
- arena_prof_promoted(p, usize);
+ arena_prof_promoted(tsd_tsdn(tsd), p, usize);
} else {
p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero,
tcache, arena);
@@ -2218,7 +2225,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
prof_tctx_t *old_tctx, *tctx;
prof_active = prof_active_get_unlocked();
- old_tctx = prof_tctx_get(old_ptr);
+ old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr);
tctx = prof_alloc_prep(tsd, *usize, prof_active, true);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize,
@@ -2241,7 +2248,7 @@ irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
* be the same as the current usize because of in-place large
* reallocation. Therefore, query the actual value of usize.
*/
- *usize = isalloc(p, config_prof);
+ *usize = isalloc(tsd_tsdn(tsd), p, config_prof);
}
prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr,
old_usize, old_tctx);
@@ -2269,10 +2276,11 @@ je_rallocx(void *ptr, size_t size, int flags)
assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
tsd = tsd_fetch();
+ witness_assert_lockless(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
unsigned arena_ind = MALLOCX_ARENA_GET(flags);
- arena = arena_get(arena_ind, true);
+ arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
if (unlikely(arena == NULL))
goto label_oom;
} else
@@ -2286,7 +2294,7 @@ je_rallocx(void *ptr, size_t size, int flags)
} else
tcache = tcache_get(tsd, true);
- old_usize = isalloc(ptr, config_prof);
+ old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
if (config_valgrind && unlikely(in_valgrind))
old_rzsize = u2rz(old_usize);
@@ -2304,7 +2312,7 @@ je_rallocx(void *ptr, size_t size, int flags)
if (unlikely(p == NULL))
goto label_oom;
if (config_stats || (config_valgrind && unlikely(in_valgrind)))
- usize = isalloc(p, config_prof);
+ usize = isalloc(tsd_tsdn(tsd), p, config_prof);
}
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
@@ -2313,8 +2321,9 @@ je_rallocx(void *ptr, size_t size, int flags)
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, p);
- JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize,
- old_rzsize, false, zero);
+ JEMALLOC_VALGRIND_REALLOC(true, tsd_tsdn(tsd), p, usize, false, ptr,
+ old_usize, old_rzsize, false, zero);
+ witness_assert_lockless(tsd_tsdn(tsd));
return (p);
label_oom:
if (config_xmalloc && unlikely(opt_xmalloc)) {
@@ -2322,31 +2331,32 @@ label_oom:
abort();
}
UTRACE(ptr, size, 0);
+ witness_assert_lockless(tsd_tsdn(tsd));
return (NULL);
}
JEMALLOC_ALWAYS_INLINE_C size_t
-ixallocx_helper(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
+ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
size_t extra, size_t alignment, bool zero)
{
size_t usize;
- if (ixalloc(tsd, ptr, old_usize, size, extra, alignment, zero))
+ if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero))
return (old_usize);
- usize = isalloc(ptr, config_prof);
+ usize = isalloc(tsdn, ptr, config_prof);
return (usize);
}
static size_t
-ixallocx_prof_sample(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
+ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx)
{
size_t usize;
if (tctx == NULL)
return (old_usize);
- usize = ixallocx_helper(tsd, ptr, old_usize, size, extra, alignment,
+ usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
zero);
return (usize);
@@ -2361,7 +2371,7 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
prof_tctx_t *old_tctx, *tctx;
prof_active = prof_active_get_unlocked();
- old_tctx = prof_tctx_get(ptr);
+ old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
/*
* usize isn't knowable before ixalloc() returns when extra is non-zero.
* Therefore, compute its maximum possible value and use that in
@@ -2386,11 +2396,11 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
- usize = ixallocx_prof_sample(tsd, ptr, old_usize, size, extra,
- alignment, zero, tctx);
+ usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
+ size, extra, alignment, zero, tctx);
} else {
- usize = ixallocx_helper(tsd, ptr, old_usize, size, extra,
- alignment, zero);
+ usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
+ extra, alignment, zero);
}
if (usize == old_usize) {
prof_alloc_rollback(tsd, tctx, false);
@@ -2417,8 +2427,9 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
tsd = tsd_fetch();
+ witness_assert_lockless(tsd_tsdn(tsd));
- old_usize = isalloc(ptr, config_prof);
+ old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
/*
* The API explicitly absolves itself of protecting against (size +
@@ -2443,8 +2454,8 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
alignment, zero);
} else {
- usize = ixallocx_helper(tsd, ptr, old_usize, size, extra,
- alignment, zero);
+ usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
+ extra, alignment, zero);
}
if (unlikely(usize == old_usize))
goto label_not_resized;
@@ -2453,10 +2464,11 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags)
*tsd_thread_allocatedp_get(tsd) += usize;
*tsd_thread_deallocatedp_get(tsd) += old_usize;
}
- JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize,
- old_rzsize, false, zero);
+ JEMALLOC_VALGRIND_REALLOC(false, tsd_tsdn(tsd), ptr, usize, false, ptr,
+ old_usize, old_rzsize, false, zero);
label_not_resized:
UTRACE(ptr, size, ptr);
+ witness_assert_lockless(tsd_tsdn(tsd));
return (usize);
}
@@ -2465,15 +2477,20 @@ JEMALLOC_ATTR(pure)
je_sallocx(const void *ptr, int flags)
{
size_t usize;
+ tsdn_t *tsdn;
assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
+ tsdn = tsdn_fetch();
+ witness_assert_lockless(tsdn);
+
if (config_ivsalloc)
- usize = ivsalloc(ptr, config_prof);
+ usize = ivsalloc(tsdn, ptr, config_prof);
else
- usize = isalloc(ptr, config_prof);
+ usize = isalloc(tsdn, ptr, config_prof);
+ witness_assert_lockless(tsdn);
return (usize);
}
@@ -2487,6 +2504,7 @@ je_dallocx(void *ptr, int flags)
assert(malloc_initialized() || IS_INITIALIZER);
tsd = tsd_fetch();
+ witness_assert_lockless(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
tcache = NULL;
@@ -2496,18 +2514,25 @@ je_dallocx(void *ptr, int flags)
tcache = tcache_get(tsd, false);
UTRACE(ptr, 0, 0);
- ifree(tsd_fetch(), ptr, tcache, true);
+ if (likely(!malloc_slow))
+ ifree(tsd, ptr, tcache, false);
+ else
+ ifree(tsd, ptr, tcache, true);
+ witness_assert_lockless(tsd_tsdn(tsd));
}
JEMALLOC_ALWAYS_INLINE_C size_t
-inallocx(size_t size, int flags)
+inallocx(tsdn_t *tsdn, size_t size, int flags)
{
size_t usize;
+ witness_assert_lockless(tsdn);
+
if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
usize = s2u(size);
else
usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
+ witness_assert_lockless(tsdn);
return (usize);
}
@@ -2520,10 +2545,11 @@ je_sdallocx(void *ptr, size_t size, int flags)
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
- usize = inallocx(size, flags);
- assert(usize == isalloc(ptr, config_prof));
-
tsd = tsd_fetch();
+ usize = inallocx(tsd_tsdn(tsd), size, flags);
+ assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof));
+
+ witness_assert_lockless(tsd_tsdn(tsd));
if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
tcache = NULL;
@@ -2533,7 +2559,11 @@ je_sdallocx(void *ptr, size_t size, int flags)
tcache = tcache_get(tsd, false);
UTRACE(ptr, 0, 0);
- isfree(tsd, ptr, usize, tcache);
+ if (likely(!malloc_slow))
+ isfree(tsd, ptr, usize, tcache, false);
+ else
+ isfree(tsd, ptr, usize, tcache, true);
+ witness_assert_lockless(tsd_tsdn(tsd));
}
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
@@ -2541,16 +2571,21 @@ JEMALLOC_ATTR(pure)
je_nallocx(size_t size, int flags)
{
size_t usize;
+ tsdn_t *tsdn;
assert(size != 0);
if (unlikely(malloc_init()))
return (0);
- usize = inallocx(size, flags);
+ tsdn = tsdn_fetch();
+ witness_assert_lockless(tsdn);
+
+ usize = inallocx(tsdn, size, flags);
if (unlikely(usize > HUGE_MAXCLASS))
return (0);
+ witness_assert_lockless(tsdn);
return (usize);
}
@@ -2558,55 +2593,82 @@ JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t newlen)
{
+ int ret;
+ tsd_t *tsd;
if (unlikely(malloc_init()))
return (EAGAIN);
- return (ctl_byname(name, oldp, oldlenp, newp, newlen));
+ tsd = tsd_fetch();
+ witness_assert_lockless(tsd_tsdn(tsd));
+ ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
+ witness_assert_lockless(tsd_tsdn(tsd));
+ return (ret);
}
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
{
+ int ret;
+ tsdn_t *tsdn;
if (unlikely(malloc_init()))
return (EAGAIN);
- return (ctl_nametomib(name, mibp, miblenp));
+ tsdn = tsdn_fetch();
+ witness_assert_lockless(tsdn);
+ ret = ctl_nametomib(tsdn, name, mibp, miblenp);
+ witness_assert_lockless(tsdn);
+ return (ret);
}
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
+ int ret;
+ tsd_t *tsd;
if (unlikely(malloc_init()))
return (EAGAIN);
- return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
+ tsd = tsd_fetch();
+ witness_assert_lockless(tsd_tsdn(tsd));
+ ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
+ witness_assert_lockless(tsd_tsdn(tsd));
+ return (ret);
}
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
const char *opts)
{
+ tsdn_t *tsdn;
+ tsdn = tsdn_fetch();
+ witness_assert_lockless(tsdn);
stats_print(write_cb, cbopaque, opts);
+ witness_assert_lockless(tsdn);
}
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
{
size_t ret;
+ tsdn_t *tsdn;
assert(malloc_initialized() || IS_INITIALIZER);
malloc_thread_init();
+ tsdn = tsdn_fetch();
+ witness_assert_lockless(tsdn);
+
if (config_ivsalloc)
- ret = ivsalloc(ptr, config_prof);
+ ret = ivsalloc(tsdn, ptr, config_prof);
else
- ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof);
+ ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof);
+ witness_assert_lockless(tsdn);
return (ret);
}
@@ -2638,7 +2700,7 @@ je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
if (p == NULL)
return (ALLOCM_ERR_OOM);
if (rsize != NULL)
- *rsize = isalloc(p, config_prof);
+ *rsize = isalloc(tsdn_fetch(), p, config_prof);
*ptr = p;
return (ALLOCM_SUCCESS);
}
@@ -2667,7 +2729,7 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
} else
ret = ALLOCM_ERR_OOM;
if (rsize != NULL)
- *rsize = isalloc(*ptr, config_prof);
+ *rsize = isalloc(tsdn_fetch(), *ptr, config_prof);
}
return (ret);
}
@@ -2733,6 +2795,7 @@ je_nallocm(size_t *rsize, size_t size, int flags)
* to trigger the deadlock described above, but doing so would involve forking
* via a library constructor that runs before jemalloc's runs.
*/
+#ifndef JEMALLOC_JET
JEMALLOC_ATTR(constructor)
static void
jemalloc_constructor(void)
@@ -2740,6 +2803,7 @@ jemalloc_constructor(void)
malloc_init();
}
+#endif
#ifndef JEMALLOC_MUTEX_INIT_CB
void
@@ -2749,7 +2813,9 @@ JEMALLOC_EXPORT void
_malloc_prefork(void)
#endif
{
- unsigned i, narenas;
+ tsd_t *tsd;
+ unsigned i, j, narenas;
+ arena_t *arena;
#ifdef JEMALLOC_MUTEX_INIT_CB
if (!malloc_initialized())
@@ -2757,18 +2823,41 @@ _malloc_prefork(void)
#endif
assert(malloc_initialized());
- /* Acquire all mutexes in a safe order. */
- ctl_prefork();
- prof_prefork();
- malloc_mutex_prefork(&arenas_lock);
- for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
- arena_t *arena;
+ tsd = tsd_fetch();
- if ((arena = arena_get(i, false)) != NULL)
- arena_prefork(arena);
+ narenas = narenas_total_get();
+
+ witness_prefork(tsd);
+ /* Acquire all mutexes in a safe order. */
+ ctl_prefork(tsd_tsdn(tsd));
+ malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
+ prof_prefork0(tsd_tsdn(tsd));
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < narenas; j++) {
+ if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
+ NULL) {
+ switch (i) {
+ case 0:
+ arena_prefork0(tsd_tsdn(tsd), arena);
+ break;
+ case 1:
+ arena_prefork1(tsd_tsdn(tsd), arena);
+ break;
+ case 2:
+ arena_prefork2(tsd_tsdn(tsd), arena);
+ break;
+ default: not_reached();
+ }
+ }
+ }
}
- chunk_prefork();
- base_prefork();
+ base_prefork(tsd_tsdn(tsd));
+ chunk_prefork(tsd_tsdn(tsd));
+ for (i = 0; i < narenas; i++) {
+ if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
+ arena_prefork3(tsd_tsdn(tsd), arena);
+ }
+ prof_prefork1(tsd_tsdn(tsd));
}
#ifndef JEMALLOC_MUTEX_INIT_CB
@@ -2779,6 +2868,7 @@ JEMALLOC_EXPORT void
_malloc_postfork(void)
#endif
{
+ tsd_t *tsd;
unsigned i, narenas;
#ifdef JEMALLOC_MUTEX_INIT_CB
@@ -2787,39 +2877,46 @@ _malloc_postfork(void)
#endif
assert(malloc_initialized());
+ tsd = tsd_fetch();
+
+ witness_postfork_parent(tsd);
/* Release all mutexes, now that fork() has completed. */
- base_postfork_parent();
- chunk_postfork_parent();
+ chunk_postfork_parent(tsd_tsdn(tsd));
+ base_postfork_parent(tsd_tsdn(tsd));
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena;
- if ((arena = arena_get(i, false)) != NULL)
- arena_postfork_parent(arena);
+ if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
+ arena_postfork_parent(tsd_tsdn(tsd), arena);
}
- malloc_mutex_postfork_parent(&arenas_lock);
- prof_postfork_parent();
- ctl_postfork_parent();
+ prof_postfork_parent(tsd_tsdn(tsd));
+ malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
+ ctl_postfork_parent(tsd_tsdn(tsd));
}
void
jemalloc_postfork_child(void)
{
+ tsd_t *tsd;
unsigned i, narenas;
assert(malloc_initialized());
+ tsd = tsd_fetch();
+
+ witness_postfork_child(tsd);
/* Release all mutexes, now that fork() has completed. */
- base_postfork_child();
- chunk_postfork_child();
+ chunk_postfork_child(tsd_tsdn(tsd));
+ base_postfork_child(tsd_tsdn(tsd));
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena;
- if ((arena = arena_get(i, false)) != NULL)
- arena_postfork_child(arena);
+ if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL)
+ arena_postfork_child(tsd_tsdn(tsd), arena);
}
- malloc_mutex_postfork_child(&arenas_lock);
- prof_postfork_child();
- ctl_postfork_child();
+ prof_postfork_child(tsd_tsdn(tsd));
+ malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
+ ctl_postfork_child(tsd_tsdn(tsd));
}
void
diff --git a/contrib/jemalloc/src/mutex.c b/contrib/jemalloc/src/mutex.c
index 934d5aa..a24e420 100644
--- a/contrib/jemalloc/src/mutex.c
+++ b/contrib/jemalloc/src/mutex.c
@@ -80,7 +80,7 @@ _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
#endif
bool
-malloc_mutex_init(malloc_mutex_t *mutex)
+malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank)
{
#ifdef _WIN32
@@ -114,31 +114,34 @@ malloc_mutex_init(malloc_mutex_t *mutex)
}
pthread_mutexattr_destroy(&attr);
#endif
+ if (config_debug)
+ witness_init(&mutex->witness, name, rank, NULL);
return (false);
}
void
-malloc_mutex_prefork(malloc_mutex_t *mutex)
+malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
- malloc_mutex_lock(mutex);
+ malloc_mutex_lock(tsdn, mutex);
}
void
-malloc_mutex_postfork_parent(malloc_mutex_t *mutex)
+malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
- malloc_mutex_unlock(mutex);
+ malloc_mutex_unlock(tsdn, mutex);
}
void
-malloc_mutex_postfork_child(malloc_mutex_t *mutex)
+malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
#ifdef JEMALLOC_MUTEX_INIT_CB
- malloc_mutex_unlock(mutex);
+ malloc_mutex_unlock(tsdn, mutex);
#else
- if (malloc_mutex_init(mutex)) {
+ if (malloc_mutex_init(mutex, mutex->witness.name,
+ mutex->witness.rank)) {
malloc_printf("<jemalloc>: Error re-initializing mutex in "
"child\n");
if (opt_abort)
@@ -164,7 +167,7 @@ malloc_mutex_first_thread(void)
}
bool
-mutex_boot(void)
+malloc_mutex_boot(void)
{
#ifndef JEMALLOC_MUTEX_INIT_CB
diff --git a/contrib/jemalloc/src/nstime.c b/contrib/jemalloc/src/nstime.c
index 4cf90b5..26e49dc 100644
--- a/contrib/jemalloc/src/nstime.c
+++ b/contrib/jemalloc/src/nstime.c
@@ -99,7 +99,7 @@ nstime_divide(const nstime_t *time, const nstime_t *divisor)
#ifdef JEMALLOC_JET
#undef nstime_update
-#define nstime_update JEMALLOC_N(nstime_update_impl)
+#define nstime_update JEMALLOC_N(n_nstime_update)
#endif
bool
nstime_update(nstime_t *time)
@@ -144,5 +144,5 @@ nstime_update(nstime_t *time)
#ifdef JEMALLOC_JET
#undef nstime_update
#define nstime_update JEMALLOC_N(nstime_update)
-nstime_update_t *nstime_update = JEMALLOC_N(nstime_update_impl);
+nstime_update_t *nstime_update = JEMALLOC_N(n_nstime_update);
#endif
diff --git a/contrib/jemalloc/src/pages.c b/contrib/jemalloc/src/pages.c
index 83a167f..2a9b7e3 100644
--- a/contrib/jemalloc/src/pages.c
+++ b/contrib/jemalloc/src/pages.c
@@ -1,29 +1,49 @@
#define JEMALLOC_PAGES_C_
#include "jemalloc/internal/jemalloc_internal.h"
+#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
+#include <sys/sysctl.h>
+#endif
+
+/******************************************************************************/
+/* Data. */
+
+#ifndef _WIN32
+# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE)
+# define PAGES_PROT_DECOMMIT (PROT_NONE)
+static int mmap_flags;
+#endif
+static bool os_overcommits;
+
/******************************************************************************/
void *
-pages_map(void *addr, size_t size)
+pages_map(void *addr, size_t size, bool *commit)
{
void *ret;
assert(size != 0);
+ if (os_overcommits)
+ *commit = true;
+
#ifdef _WIN32
/*
* If VirtualAlloc can't allocate at the given address when one is
* given, it fails and returns NULL.
*/
- ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
+ ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0),
PAGE_READWRITE);
#else
/*
* We don't use MAP_FIXED here, because it can cause the *replacement*
* of existing mappings, and we only want to create new mappings.
*/
- ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
- -1, 0);
+ {
+ int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
+
+ ret = mmap(addr, size, prot, mmap_flags, -1, 0);
+ }
assert(ret != NULL);
if (ret == MAP_FAILED)
@@ -67,7 +87,8 @@ pages_unmap(void *addr, size_t size)
}
void *
-pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
+pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size,
+ bool *commit)
{
void *ret = (void *)((uintptr_t)addr + leadsize);
@@ -77,7 +98,7 @@ pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
void *new_addr;
pages_unmap(addr, alloc_size);
- new_addr = pages_map(ret, size);
+ new_addr = pages_map(ret, size, commit);
if (new_addr == ret)
return (ret);
if (new_addr)
@@ -101,17 +122,17 @@ static bool
pages_commit_impl(void *addr, size_t size, bool commit)
{
-#ifndef _WIN32
- /*
- * The following decommit/commit implementation is functional, but
- * always disabled because it doesn't add value beyong improved
- * debugging (at the cost of extra system calls) on systems that
- * overcommit.
- */
- if (false) {
- int prot = commit ? (PROT_READ | PROT_WRITE) : PROT_NONE;
- void *result = mmap(addr, size, prot, MAP_PRIVATE | MAP_ANON |
- MAP_FIXED, -1, 0);
+ if (os_overcommits)
+ return (true);
+
+#ifdef _WIN32
+ return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
+ PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
+#else
+ {
+ int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
+ void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
+ -1, 0);
if (result == MAP_FAILED)
return (true);
if (result != addr) {
@@ -125,7 +146,6 @@ pages_commit_impl(void *addr, size_t size, bool commit)
return (false);
}
#endif
- return (true);
}
bool
@@ -171,3 +191,63 @@ pages_purge(void *addr, size_t size)
return (unzeroed);
}
+#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
+static bool
+os_overcommits_sysctl(void)
+{
+ int vm_overcommit;
+ size_t sz;
+
+ sz = sizeof(vm_overcommit);
+ if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0)
+ return (false); /* Error. */
+
+ return ((vm_overcommit & 0x3) == 0);
+}
+#endif
+
+#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
+static bool
+os_overcommits_proc(void)
+{
+ int fd;
+ char buf[1];
+ ssize_t nread;
+
+ fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
+ if (fd == -1)
+ return (false); /* Error. */
+
+ nread = read(fd, &buf, sizeof(buf));
+ if (nread < 1)
+ return (false); /* Error. */
+ /*
+ * /proc/sys/vm/overcommit_memory meanings:
+ * 0: Heuristic overcommit.
+ * 1: Always overcommit.
+ * 2: Never overcommit.
+ */
+ return (buf[0] == '0' || buf[0] == '1');
+}
+#endif
+
+void
+pages_boot(void)
+{
+
+#ifndef _WIN32
+ mmap_flags = MAP_PRIVATE | MAP_ANON;
+#endif
+
+#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT
+ os_overcommits = os_overcommits_sysctl();
+#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY)
+ os_overcommits = os_overcommits_proc();
+# ifdef MAP_NORESERVE
+ if (os_overcommits)
+ mmap_flags |= MAP_NORESERVE;
+# endif
+#else
+ os_overcommits = false;
+#endif
+}
diff --git a/contrib/jemalloc/src/prof.c b/contrib/jemalloc/src/prof.c
index b387227..c1f58d4 100644
--- a/contrib/jemalloc/src/prof.c
+++ b/contrib/jemalloc/src/prof.c
@@ -121,13 +121,13 @@ static bool prof_booted = false;
* definition.
*/
-static bool prof_tctx_should_destroy(prof_tctx_t *tctx);
+static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx);
static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
-static bool prof_tdata_should_destroy(prof_tdata_t *tdata,
+static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
bool even_if_attached);
-static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
+static void prof_tdata_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
bool even_if_attached);
-static char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name);
+static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
/******************************************************************************/
/* Red-black trees. */
@@ -213,22 +213,23 @@ prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
}
if ((uintptr_t)tctx > (uintptr_t)1U) {
- malloc_mutex_lock(tctx->tdata->lock);
+ malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
tctx->prepared = false;
- if (prof_tctx_should_destroy(tctx))
+ if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
prof_tctx_destroy(tsd, tctx);
else
- malloc_mutex_unlock(tctx->tdata->lock);
+ malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
}
}
void
-prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx)
+prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
+ prof_tctx_t *tctx)
{
- prof_tctx_set(ptr, usize, tctx);
+ prof_tctx_set(tsdn, ptr, usize, tctx);
- malloc_mutex_lock(tctx->tdata->lock);
+ malloc_mutex_lock(tsdn, tctx->tdata->lock);
tctx->cnts.curobjs++;
tctx->cnts.curbytes += usize;
if (opt_prof_accum) {
@@ -236,23 +237,23 @@ prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx)
tctx->cnts.accumbytes += usize;
}
tctx->prepared = false;
- malloc_mutex_unlock(tctx->tdata->lock);
+ malloc_mutex_unlock(tsdn, tctx->tdata->lock);
}
void
prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
{
- malloc_mutex_lock(tctx->tdata->lock);
+ malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
assert(tctx->cnts.curobjs > 0);
assert(tctx->cnts.curbytes >= usize);
tctx->cnts.curobjs--;
tctx->cnts.curbytes -= usize;
- if (prof_tctx_should_destroy(tctx))
+ if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx))
prof_tctx_destroy(tsd, tctx);
else
- malloc_mutex_unlock(tctx->tdata->lock);
+ malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
}
void
@@ -277,7 +278,7 @@ prof_enter(tsd_t *tsd, prof_tdata_t *tdata)
tdata->enq = true;
}
- malloc_mutex_lock(&bt2gctx_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
}
JEMALLOC_INLINE_C void
@@ -287,7 +288,7 @@ prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
cassert(config_prof);
assert(tdata == prof_tdata_get(tsd, false));
- malloc_mutex_unlock(&bt2gctx_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
if (tdata != NULL) {
bool idump, gdump;
@@ -300,9 +301,9 @@ prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
tdata->enq_gdump = false;
if (idump)
- prof_idump();
+ prof_idump(tsd_tsdn(tsd));
if (gdump)
- prof_gdump();
+ prof_gdump(tsd_tsdn(tsd));
}
}
@@ -546,14 +547,15 @@ prof_tdata_mutex_choose(uint64_t thr_uid)
}
static prof_gctx_t *
-prof_gctx_create(tsd_t *tsd, prof_bt_t *bt)
+prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt)
{
/*
* Create a single allocation that has space for vec of length bt->len.
*/
size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
- prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, size,
- size2index(size), false, tcache_get(tsd, true), true, NULL, true);
+ prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
+ size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
+ true);
if (gctx == NULL)
return (NULL);
gctx->lock = prof_gctx_mutex_choose();
@@ -585,32 +587,33 @@ prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
* into this function.
*/
prof_enter(tsd, tdata_self);
- malloc_mutex_lock(gctx->lock);
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
assert(gctx->nlimbo != 0);
if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
/* Remove gctx from bt2gctx. */
- if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL))
+ if (ckh_remove(tsd_tsdn(tsd), &bt2gctx, &gctx->bt, NULL, NULL))
not_reached();
prof_leave(tsd, tdata_self);
/* Destroy gctx. */
- malloc_mutex_unlock(gctx->lock);
- idalloctm(tsd, gctx, tcache_get(tsd, false), true, true);
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ idalloctm(tsd_tsdn(tsd), gctx, NULL, true, true);
} else {
/*
* Compensate for increment in prof_tctx_destroy() or
* prof_lookup().
*/
gctx->nlimbo--;
- malloc_mutex_unlock(gctx->lock);
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
prof_leave(tsd, tdata_self);
}
}
-/* tctx->tdata->lock must be held. */
static bool
-prof_tctx_should_destroy(prof_tctx_t *tctx)
+prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx)
{
+ malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
+
if (opt_prof_accum)
return (false);
if (tctx->cnts.curobjs != 0)
@@ -633,7 +636,6 @@ prof_gctx_should_destroy(prof_gctx_t *gctx)
return (true);
}
-/* tctx->tdata->lock is held upon entry, and released before return. */
static void
prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
{
@@ -641,17 +643,19 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
prof_gctx_t *gctx = tctx->gctx;
bool destroy_tdata, destroy_tctx, destroy_gctx;
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+
assert(tctx->cnts.curobjs == 0);
assert(tctx->cnts.curbytes == 0);
assert(!opt_prof_accum);
assert(tctx->cnts.accumobjs == 0);
assert(tctx->cnts.accumbytes == 0);
- ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
- destroy_tdata = prof_tdata_should_destroy(tdata, false);
- malloc_mutex_unlock(tdata->lock);
+ ckh_remove(tsd_tsdn(tsd), &tdata->bt2tctx, &gctx->bt, NULL, NULL);
+ destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false);
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
- malloc_mutex_lock(gctx->lock);
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
switch (tctx->state) {
case prof_tctx_state_nominal:
tctx_tree_remove(&gctx->tctxs, tctx);
@@ -691,17 +695,19 @@ prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
destroy_tctx = false;
destroy_gctx = false;
}
- malloc_mutex_unlock(gctx->lock);
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
if (destroy_gctx) {
prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
tdata);
}
+ malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+
if (destroy_tdata)
- prof_tdata_destroy(tsd, tdata, false);
+ prof_tdata_destroy(tsd_tsdn(tsd), tdata, false);
if (destroy_tctx)
- idalloctm(tsd, tctx, tcache_get(tsd, false), true, true);
+ idalloctm(tsd_tsdn(tsd), tctx, NULL, true, true);
}
static bool
@@ -721,17 +727,16 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
prof_enter(tsd, tdata);
if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
/* bt has never been seen before. Insert it. */
- gctx.p = prof_gctx_create(tsd, bt);
+ gctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
if (gctx.v == NULL) {
prof_leave(tsd, tdata);
return (true);
}
btkey.p = &gctx.p->bt;
- if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
+ if (ckh_insert(tsd_tsdn(tsd), &bt2gctx, btkey.v, gctx.v)) {
/* OOM. */
prof_leave(tsd, tdata);
- idalloctm(tsd, gctx.v, tcache_get(tsd, false), true,
- true);
+ idalloctm(tsd_tsdn(tsd), gctx.v, NULL, true, true);
return (true);
}
new_gctx = true;
@@ -740,9 +745,9 @@ prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
* Increment nlimbo, in order to avoid a race condition with
* prof_tctx_destroy()/prof_gctx_try_destroy().
*/
- malloc_mutex_lock(gctx.p->lock);
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock);
gctx.p->nlimbo++;
- malloc_mutex_unlock(gctx.p->lock);
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
new_gctx = false;
}
prof_leave(tsd, tdata);
@@ -769,13 +774,12 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
if (tdata == NULL)
return (NULL);
- malloc_mutex_lock(tdata->lock);
+ malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
if (!not_found) /* Note double negative! */
ret.p->prepared = true;
- malloc_mutex_unlock(tdata->lock);
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
if (not_found) {
- tcache_t *tcache;
void *btkey;
prof_gctx_t *gctx;
bool new_gctx, error;
@@ -789,10 +793,9 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
return (NULL);
/* Link a prof_tctx_t into gctx for this thread. */
- tcache = tcache_get(tsd, true);
- ret.v = iallocztm(tsd, sizeof(prof_tctx_t),
- size2index(sizeof(prof_tctx_t)), false, tcache, true, NULL,
- true);
+ ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
+ size2index(sizeof(prof_tctx_t)), false, NULL, true,
+ arena_ichoose(tsd_tsdn(tsd), NULL), true);
if (ret.p == NULL) {
if (new_gctx)
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
@@ -806,41 +809,42 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt)
ret.p->tctx_uid = tdata->tctx_uid_next++;
ret.p->prepared = true;
ret.p->state = prof_tctx_state_initializing;
- malloc_mutex_lock(tdata->lock);
- error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
- malloc_mutex_unlock(tdata->lock);
+ malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ error = ckh_insert(tsd_tsdn(tsd), &tdata->bt2tctx, btkey,
+ ret.v);
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
if (error) {
if (new_gctx)
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
- idalloctm(tsd, ret.v, tcache, true, true);
+ idalloctm(tsd_tsdn(tsd), ret.v, NULL, true, true);
return (NULL);
}
- malloc_mutex_lock(gctx->lock);
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
ret.p->state = prof_tctx_state_nominal;
tctx_tree_insert(&gctx->tctxs, ret.p);
gctx->nlimbo--;
- malloc_mutex_unlock(gctx->lock);
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
}
return (ret.p);
}
+/*
+ * The bodies of this function and prof_leakcheck() are compiled out unless heap
+ * profiling is enabled, so that it is possible to compile jemalloc with
+ * floating point support completely disabled. Avoiding floating point code is
+ * important on memory-constrained systems, but it also enables a workaround for
+ * versions of glibc that don't properly save/restore floating point registers
+ * during dynamic lazy symbol loading (which internally calls into whatever
+ * malloc implementation happens to be integrated into the application). Note
+ * that some compilers (e.g. gcc 4.8) may use floating point registers for fast
+ * memory moves, so jemalloc must be compiled with such optimizations disabled
+ * (e.g.
+ * -mno-sse) in order for the workaround to be complete.
+ */
void
prof_sample_threshold_update(prof_tdata_t *tdata)
{
- /*
- * The body of this function is compiled out unless heap profiling is
- * enabled, so that it is possible to compile jemalloc with floating
- * point support completely disabled. Avoiding floating point code is
- * important on memory-constrained systems, but it also enables a
- * workaround for versions of glibc that don't properly save/restore
- * floating point registers during dynamic lazy symbol loading (which
- * internally calls into whatever malloc implementation happens to be
- * integrated into the application). Note that some compilers (e.g.
- * gcc 4.8) may use floating point registers for fast memory moves, so
- * jemalloc must be compiled with such optimizations disabled (e.g.
- * -mno-sse) in order for the workaround to be complete.
- */
#ifdef JEMALLOC_PROF
uint64_t r;
double u;
@@ -894,11 +898,13 @@ size_t
prof_tdata_count(void)
{
size_t tdata_count = 0;
+ tsdn_t *tsdn;
- malloc_mutex_lock(&tdatas_mtx);
+ tsdn = tsdn_fetch();
+ malloc_mutex_lock(tsdn, &tdatas_mtx);
tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
(void *)&tdata_count);
- malloc_mutex_unlock(&tdatas_mtx);
+ malloc_mutex_unlock(tsdn, &tdatas_mtx);
return (tdata_count);
}
@@ -917,9 +923,9 @@ prof_bt_count(void)
if (tdata == NULL)
return (0);
- malloc_mutex_lock(&bt2gctx_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
bt_count = ckh_count(&bt2gctx);
- malloc_mutex_unlock(&bt2gctx_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
return (bt_count);
}
@@ -1032,20 +1038,21 @@ prof_dump_printf(bool propagate_err, const char *format, ...)
return (ret);
}
-/* tctx->tdata->lock is held. */
static void
-prof_tctx_merge_tdata(prof_tctx_t *tctx, prof_tdata_t *tdata)
+prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata)
{
- malloc_mutex_lock(tctx->gctx->lock);
+ malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
+
+ malloc_mutex_lock(tsdn, tctx->gctx->lock);
switch (tctx->state) {
case prof_tctx_state_initializing:
- malloc_mutex_unlock(tctx->gctx->lock);
+ malloc_mutex_unlock(tsdn, tctx->gctx->lock);
return;
case prof_tctx_state_nominal:
tctx->state = prof_tctx_state_dumping;
- malloc_mutex_unlock(tctx->gctx->lock);
+ malloc_mutex_unlock(tsdn, tctx->gctx->lock);
memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
@@ -1064,11 +1071,12 @@ prof_tctx_merge_tdata(prof_tctx_t *tctx, prof_tdata_t *tdata)
}
}
-/* gctx->lock is held. */
static void
-prof_tctx_merge_gctx(prof_tctx_t *tctx, prof_gctx_t *gctx)
+prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx)
{
+ malloc_mutex_assert_owner(tsdn, gctx->lock);
+
gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
if (opt_prof_accum) {
@@ -1077,10 +1085,12 @@ prof_tctx_merge_gctx(prof_tctx_t *tctx, prof_gctx_t *gctx)
}
}
-/* tctx->gctx is held. */
static prof_tctx_t *
prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
{
+ tsdn_t *tsdn = (tsdn_t *)arg;
+
+ malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
switch (tctx->state) {
case prof_tctx_state_nominal:
@@ -1088,7 +1098,7 @@ prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
break;
case prof_tctx_state_dumping:
case prof_tctx_state_purgatory:
- prof_tctx_merge_gctx(tctx, tctx->gctx);
+ prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx);
break;
default:
not_reached();
@@ -1097,11 +1107,18 @@ prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
return (NULL);
}
-/* gctx->lock is held. */
+struct prof_tctx_dump_iter_arg_s {
+ tsdn_t *tsdn;
+ bool propagate_err;
+};
+
static prof_tctx_t *
-prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
+prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque)
{
- bool propagate_err = *(bool *)arg;
+ struct prof_tctx_dump_iter_arg_s *arg =
+ (struct prof_tctx_dump_iter_arg_s *)opaque;
+
+ malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
switch (tctx->state) {
case prof_tctx_state_initializing:
@@ -1110,7 +1127,7 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
break;
case prof_tctx_state_dumping:
case prof_tctx_state_purgatory:
- if (prof_dump_printf(propagate_err,
+ if (prof_dump_printf(arg->propagate_err,
" t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
"%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
@@ -1123,12 +1140,14 @@ prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
return (NULL);
}
-/* tctx->gctx is held. */
static prof_tctx_t *
prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
{
+ tsdn_t *tsdn = (tsdn_t *)arg;
prof_tctx_t *ret;
+ malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
+
switch (tctx->state) {
case prof_tctx_state_nominal:
/* New since dumping started; ignore. */
@@ -1149,12 +1168,12 @@ label_return:
}
static void
-prof_dump_gctx_prep(prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
+prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
{
cassert(config_prof);
- malloc_mutex_lock(gctx->lock);
+ malloc_mutex_lock(tsdn, gctx->lock);
/*
* Increment nlimbo so that gctx won't go away before dump.
@@ -1166,19 +1185,26 @@ prof_dump_gctx_prep(prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
- malloc_mutex_unlock(gctx->lock);
+ malloc_mutex_unlock(tsdn, gctx->lock);
}
+struct prof_gctx_merge_iter_arg_s {
+ tsdn_t *tsdn;
+ size_t leak_ngctx;
+};
+
static prof_gctx_t *
-prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
+prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
{
- size_t *leak_ngctx = (size_t *)arg;
+ struct prof_gctx_merge_iter_arg_s *arg =
+ (struct prof_gctx_merge_iter_arg_s *)opaque;
- malloc_mutex_lock(gctx->lock);
- tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, NULL);
+ malloc_mutex_lock(arg->tsdn, gctx->lock);
+ tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
+ (void *)arg->tsdn);
if (gctx->cnt_summed.curobjs != 0)
- (*leak_ngctx)++;
- malloc_mutex_unlock(gctx->lock);
+ arg->leak_ngctx++;
+ malloc_mutex_unlock(arg->tsdn, gctx->lock);
return (NULL);
}
@@ -1197,7 +1223,7 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
*/
while ((gctx = gctx_tree_first(gctxs)) != NULL) {
gctx_tree_remove(gctxs, gctx);
- malloc_mutex_lock(gctx->lock);
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
{
prof_tctx_t *next;
@@ -1205,14 +1231,15 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
do {
prof_tctx_t *to_destroy =
tctx_tree_iter(&gctx->tctxs, next,
- prof_tctx_finish_iter, NULL);
+ prof_tctx_finish_iter,
+ (void *)tsd_tsdn(tsd));
if (to_destroy != NULL) {
next = tctx_tree_next(&gctx->tctxs,
to_destroy);
tctx_tree_remove(&gctx->tctxs,
to_destroy);
- idalloctm(tsd, to_destroy,
- tcache_get(tsd, false), true, true);
+ idalloctm(tsd_tsdn(tsd), to_destroy,
+ NULL, true, true);
} else
next = NULL;
} while (next != NULL);
@@ -1220,19 +1247,26 @@ prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
gctx->nlimbo--;
if (prof_gctx_should_destroy(gctx)) {
gctx->nlimbo++;
- malloc_mutex_unlock(gctx->lock);
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
} else
- malloc_mutex_unlock(gctx->lock);
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
}
}
+struct prof_tdata_merge_iter_arg_s {
+ tsdn_t *tsdn;
+ prof_cnt_t cnt_all;
+};
+
static prof_tdata_t *
-prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
+prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
+ void *opaque)
{
- prof_cnt_t *cnt_all = (prof_cnt_t *)arg;
+ struct prof_tdata_merge_iter_arg_s *arg =
+ (struct prof_tdata_merge_iter_arg_s *)opaque;
- malloc_mutex_lock(tdata->lock);
+ malloc_mutex_lock(arg->tsdn, tdata->lock);
if (!tdata->expired) {
size_t tabind;
union {
@@ -1244,17 +1278,17 @@ prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
&tctx.v);)
- prof_tctx_merge_tdata(tctx.p, tdata);
+ prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
- cnt_all->curobjs += tdata->cnt_summed.curobjs;
- cnt_all->curbytes += tdata->cnt_summed.curbytes;
+ arg->cnt_all.curobjs += tdata->cnt_summed.curobjs;
+ arg->cnt_all.curbytes += tdata->cnt_summed.curbytes;
if (opt_prof_accum) {
- cnt_all->accumobjs += tdata->cnt_summed.accumobjs;
- cnt_all->accumbytes += tdata->cnt_summed.accumbytes;
+ arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs;
+ arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes;
}
} else
tdata->dumping = false;
- malloc_mutex_unlock(tdata->lock);
+ malloc_mutex_unlock(arg->tsdn, tdata->lock);
return (NULL);
}
@@ -1283,7 +1317,7 @@ prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
#define prof_dump_header JEMALLOC_N(prof_dump_header_impl)
#endif
static bool
-prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
+prof_dump_header(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all)
{
bool ret;
@@ -1294,10 +1328,10 @@ prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes))
return (true);
- malloc_mutex_lock(&tdatas_mtx);
+ malloc_mutex_lock(tsdn, &tdatas_mtx);
ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
(void *)&propagate_err) != NULL);
- malloc_mutex_unlock(&tdatas_mtx);
+ malloc_mutex_unlock(tsdn, &tdatas_mtx);
return (ret);
}
#ifdef JEMALLOC_JET
@@ -1306,15 +1340,16 @@ prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
#endif
-/* gctx->lock is held. */
static bool
-prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt,
- prof_gctx_tree_t *gctxs)
+prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
+ const prof_bt_t *bt, prof_gctx_tree_t *gctxs)
{
bool ret;
unsigned i;
+ struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg;
cassert(config_prof);
+ malloc_mutex_assert_owner(tsdn, gctx->lock);
/* Avoid dumping such gctx's that have no useful data. */
if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
@@ -1348,8 +1383,10 @@ prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt,
goto label_return;
}
+ prof_tctx_dump_iter_arg.tsdn = tsdn;
+ prof_tctx_dump_iter_arg.propagate_err = propagate_err;
if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
- (void *)&propagate_err) != NULL) {
+ (void *)&prof_tctx_dump_iter_arg) != NULL) {
ret = true;
goto label_return;
}
@@ -1442,39 +1479,66 @@ label_return:
return (ret);
}
+/*
+ * See prof_sample_threshold_update() comment for why the body of this function
+ * is conditionally compiled.
+ */
static void
prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
const char *filename)
{
+#ifdef JEMALLOC_PROF
+ /*
+ * Scaling is equivalent AdjustSamples() in jeprof, but the result may
+ * differ slightly from what jeprof reports, because here we scale the
+ * summary values, whereas jeprof scales each context individually and
+ * reports the sums of the scaled values.
+ */
if (cnt_all->curbytes != 0) {
- malloc_printf("<jemalloc>: Leak summary: %"FMTu64" byte%s, %"
- FMTu64" object%s, %zu context%s\n",
- cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "",
- cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "",
- leak_ngctx, (leak_ngctx != 1) ? "s" : "");
+ double sample_period = (double)((uint64_t)1 << lg_prof_sample);
+ double ratio = (((double)cnt_all->curbytes) /
+ (double)cnt_all->curobjs) / sample_period;
+ double scale_factor = 1.0 / (1.0 - exp(-ratio));
+ uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
+ * scale_factor);
+ uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
+ scale_factor);
+
+ malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
+ " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
+ curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
+ 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
malloc_printf(
"<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
filename);
}
+#endif
}
+struct prof_gctx_dump_iter_arg_s {
+ tsdn_t *tsdn;
+ bool propagate_err;
+};
+
static prof_gctx_t *
-prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
+prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque)
{
prof_gctx_t *ret;
- bool propagate_err = *(bool *)arg;
+ struct prof_gctx_dump_iter_arg_s *arg =
+ (struct prof_gctx_dump_iter_arg_s *)opaque;
- malloc_mutex_lock(gctx->lock);
+ malloc_mutex_lock(arg->tsdn, gctx->lock);
- if (prof_dump_gctx(propagate_err, gctx, &gctx->bt, gctxs)) {
+ if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt,
+ gctxs)) {
ret = gctx;
goto label_return;
}
ret = NULL;
label_return:
- malloc_mutex_unlock(gctx->lock);
+ malloc_mutex_unlock(arg->tsdn, gctx->lock);
return (ret);
}
@@ -1482,13 +1546,14 @@ static bool
prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
{
prof_tdata_t *tdata;
- prof_cnt_t cnt_all;
+ struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
size_t tabind;
union {
prof_gctx_t *p;
void *v;
} gctx;
- size_t leak_ngctx;
+ struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
+ struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg;
prof_gctx_tree_t gctxs;
cassert(config_prof);
@@ -1497,7 +1562,7 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
if (tdata == NULL)
return (true);
- malloc_mutex_lock(&prof_dump_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
prof_enter(tsd, tdata);
/*
@@ -1506,20 +1571,24 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
*/
gctx_tree_new(&gctxs);
for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);)
- prof_dump_gctx_prep(gctx.p, &gctxs);
+ prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, &gctxs);
/*
* Iterate over tdatas, and for the non-expired ones snapshot their tctx
* stats and merge them into the associated gctx's.
*/
- memset(&cnt_all, 0, sizeof(prof_cnt_t));
- malloc_mutex_lock(&tdatas_mtx);
- tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, (void *)&cnt_all);
- malloc_mutex_unlock(&tdatas_mtx);
+ prof_tdata_merge_iter_arg.tsdn = tsd_tsdn(tsd);
+ memset(&prof_tdata_merge_iter_arg.cnt_all, 0, sizeof(prof_cnt_t));
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
+ (void *)&prof_tdata_merge_iter_arg);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
/* Merge tctx stats into gctx's. */
- leak_ngctx = 0;
- gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, (void *)&leak_ngctx);
+ prof_gctx_merge_iter_arg.tsdn = tsd_tsdn(tsd);
+ prof_gctx_merge_iter_arg.leak_ngctx = 0;
+ gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter,
+ (void *)&prof_gctx_merge_iter_arg);
prof_leave(tsd, tdata);
@@ -1528,12 +1597,15 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
goto label_open_close_error;
/* Dump profile header. */
- if (prof_dump_header(propagate_err, &cnt_all))
+ if (prof_dump_header(tsd_tsdn(tsd), propagate_err,
+ &prof_tdata_merge_iter_arg.cnt_all))
goto label_write_error;
/* Dump per gctx profile stats. */
+ prof_gctx_dump_iter_arg.tsdn = tsd_tsdn(tsd);
+ prof_gctx_dump_iter_arg.propagate_err = propagate_err;
if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter,
- (void *)&propagate_err) != NULL)
+ (void *)&prof_gctx_dump_iter_arg) != NULL)
goto label_write_error;
/* Dump /proc/<pid>/maps if possible. */
@@ -1544,17 +1616,18 @@ prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
goto label_open_close_error;
prof_gctx_finish(tsd, &gctxs);
- malloc_mutex_unlock(&prof_dump_mtx);
-
- if (leakcheck)
- prof_leakcheck(&cnt_all, leak_ngctx, filename);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
+ if (leakcheck) {
+ prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all,
+ prof_gctx_merge_iter_arg.leak_ngctx, filename);
+ }
return (false);
label_write_error:
prof_dump_close(propagate_err);
label_open_close_error:
prof_gctx_finish(tsd, &gctxs);
- malloc_mutex_unlock(&prof_dump_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
return (true);
}
@@ -1594,23 +1667,23 @@ prof_fdump(void)
return;
tsd = tsd_fetch();
- malloc_mutex_lock(&prof_dump_seq_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
prof_dump_filename(filename, 'f', VSEQ_INVALID);
- malloc_mutex_unlock(&prof_dump_seq_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
prof_dump(tsd, false, filename, opt_prof_leak);
}
void
-prof_idump(void)
+prof_idump(tsdn_t *tsdn)
{
tsd_t *tsd;
prof_tdata_t *tdata;
cassert(config_prof);
- if (!prof_booted)
+ if (!prof_booted || tsdn_null(tsdn))
return;
- tsd = tsd_fetch();
+ tsd = tsdn_tsd(tsdn);
tdata = prof_tdata_get(tsd, false);
if (tdata == NULL)
return;
@@ -1621,50 +1694,48 @@ prof_idump(void)
if (opt_prof_prefix[0] != '\0') {
char filename[PATH_MAX + 1];
- malloc_mutex_lock(&prof_dump_seq_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
prof_dump_filename(filename, 'i', prof_dump_iseq);
prof_dump_iseq++;
- malloc_mutex_unlock(&prof_dump_seq_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
prof_dump(tsd, false, filename, false);
}
}
bool
-prof_mdump(const char *filename)
+prof_mdump(tsd_t *tsd, const char *filename)
{
- tsd_t *tsd;
char filename_buf[DUMP_FILENAME_BUFSIZE];
cassert(config_prof);
if (!opt_prof || !prof_booted)
return (true);
- tsd = tsd_fetch();
if (filename == NULL) {
/* No filename specified, so automatically generate one. */
if (opt_prof_prefix[0] == '\0')
return (true);
- malloc_mutex_lock(&prof_dump_seq_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
prof_dump_mseq++;
- malloc_mutex_unlock(&prof_dump_seq_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
filename = filename_buf;
}
return (prof_dump(tsd, true, filename, false));
}
void
-prof_gdump(void)
+prof_gdump(tsdn_t *tsdn)
{
tsd_t *tsd;
prof_tdata_t *tdata;
cassert(config_prof);
- if (!prof_booted)
+ if (!prof_booted || tsdn_null(tsdn))
return;
- tsd = tsd_fetch();
+ tsd = tsdn_tsd(tsdn);
tdata = prof_tdata_get(tsd, false);
if (tdata == NULL)
return;
@@ -1675,10 +1746,10 @@ prof_gdump(void)
if (opt_prof_prefix[0] != '\0') {
char filename[DUMP_FILENAME_BUFSIZE];
- malloc_mutex_lock(&prof_dump_seq_mtx);
+ malloc_mutex_lock(tsdn, &prof_dump_seq_mtx);
prof_dump_filename(filename, 'u', prof_dump_useq);
prof_dump_useq++;
- malloc_mutex_unlock(&prof_dump_seq_mtx);
+ malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx);
prof_dump(tsd, false, filename, false);
}
}
@@ -1707,31 +1778,30 @@ prof_bt_keycomp(const void *k1, const void *k2)
}
JEMALLOC_INLINE_C uint64_t
-prof_thr_uid_alloc(void)
+prof_thr_uid_alloc(tsdn_t *tsdn)
{
uint64_t thr_uid;
- malloc_mutex_lock(&next_thr_uid_mtx);
+ malloc_mutex_lock(tsdn, &next_thr_uid_mtx);
thr_uid = next_thr_uid;
next_thr_uid++;
- malloc_mutex_unlock(&next_thr_uid_mtx);
+ malloc_mutex_unlock(tsdn, &next_thr_uid_mtx);
return (thr_uid);
}
static prof_tdata_t *
-prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
+prof_tdata_init_impl(tsdn_t *tsdn, uint64_t thr_uid, uint64_t thr_discrim,
char *thread_name, bool active)
{
prof_tdata_t *tdata;
- tcache_t *tcache;
cassert(config_prof);
/* Initialize an empty cache for this thread. */
- tcache = tcache_get(tsd, true);
- tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t),
- size2index(sizeof(prof_tdata_t)), false, tcache, true, NULL, true);
+ tdata = (prof_tdata_t *)iallocztm(tsdn, sizeof(prof_tdata_t),
+ size2index(sizeof(prof_tdata_t)), false, NULL, true,
+ arena_get(TSDN_NULL, 0, true), true);
if (tdata == NULL)
return (NULL);
@@ -1743,9 +1813,9 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
tdata->expired = false;
tdata->tctx_uid_next = 0;
- if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS,
+ if (ckh_new(tsdn, &tdata->bt2tctx, PROF_CKH_MINITEMS,
prof_bt_hash, prof_bt_keycomp)) {
- idalloctm(tsd, tdata, tcache, true, true);
+ idalloctm(tsdn, tdata, NULL, true, true);
return (NULL);
}
@@ -1759,24 +1829,23 @@ prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
tdata->dumping = false;
tdata->active = active;
- malloc_mutex_lock(&tdatas_mtx);
+ malloc_mutex_lock(tsdn, &tdatas_mtx);
tdata_tree_insert(&tdatas, tdata);
- malloc_mutex_unlock(&tdatas_mtx);
+ malloc_mutex_unlock(tsdn, &tdatas_mtx);
return (tdata);
}
prof_tdata_t *
-prof_tdata_init(tsd_t *tsd)
+prof_tdata_init(tsdn_t *tsdn)
{
- return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0, NULL,
- prof_thread_active_init_get()));
+ return (prof_tdata_init_impl(tsdn, prof_thr_uid_alloc(tsdn), 0, NULL,
+ prof_thread_active_init_get(tsdn)));
}
-/* tdata->lock must be held. */
static bool
-prof_tdata_should_destroy(prof_tdata_t *tdata, bool even_if_attached)
+prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached)
{
if (tdata->attached && !even_if_attached)
@@ -1786,32 +1855,42 @@ prof_tdata_should_destroy(prof_tdata_t *tdata, bool even_if_attached)
return (true);
}
-/* tdatas_mtx must be held. */
+static bool
+prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
+ bool even_if_attached)
+{
+
+ malloc_mutex_assert_owner(tsdn, tdata->lock);
+
+ return (prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
+}
+
static void
-prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
+prof_tdata_destroy_locked(tsdn_t *tsdn, prof_tdata_t *tdata,
bool even_if_attached)
{
- tcache_t *tcache;
- assert(prof_tdata_should_destroy(tdata, even_if_attached));
- assert(tsd_prof_tdata_get(tsd) != tdata);
+ malloc_mutex_assert_owner(tsdn, &tdatas_mtx);
+
+ assert(tsdn_null(tsdn) || tsd_prof_tdata_get(tsdn_tsd(tsdn)) != tdata);
tdata_tree_remove(&tdatas, tdata);
- tcache = tcache_get(tsd, false);
+ assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
+
if (tdata->thread_name != NULL)
- idalloctm(tsd, tdata->thread_name, tcache, true, true);
- ckh_delete(tsd, &tdata->bt2tctx);
- idalloctm(tsd, tdata, tcache, true, true);
+ idalloctm(tsdn, tdata->thread_name, NULL, true, true);
+ ckh_delete(tsdn, &tdata->bt2tctx);
+ idalloctm(tsdn, tdata, NULL, true, true);
}
static void
-prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached)
+prof_tdata_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, bool even_if_attached)
{
- malloc_mutex_lock(&tdatas_mtx);
- prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
- malloc_mutex_unlock(&tdatas_mtx);
+ malloc_mutex_lock(tsdn, &tdatas_mtx);
+ prof_tdata_destroy_locked(tsdn, tdata, even_if_attached);
+ malloc_mutex_unlock(tsdn, &tdatas_mtx);
}
static void
@@ -1819,9 +1898,10 @@ prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
{
bool destroy_tdata;
- malloc_mutex_lock(tdata->lock);
+ malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
if (tdata->attached) {
- destroy_tdata = prof_tdata_should_destroy(tdata, true);
+ destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
+ true);
/*
* Only detach if !destroy_tdata, because detaching would allow
* another thread to win the race to destroy tdata.
@@ -1831,9 +1911,9 @@ prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
tsd_prof_tdata_set(tsd, NULL);
} else
destroy_tdata = false;
- malloc_mutex_unlock(tdata->lock);
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
if (destroy_tdata)
- prof_tdata_destroy(tsd, tdata, true);
+ prof_tdata_destroy(tsd_tsdn(tsd), tdata, true);
}
prof_tdata_t *
@@ -1842,27 +1922,27 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
uint64_t thr_uid = tdata->thr_uid;
uint64_t thr_discrim = tdata->thr_discrim + 1;
char *thread_name = (tdata->thread_name != NULL) ?
- prof_thread_name_alloc(tsd, tdata->thread_name) : NULL;
+ prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL;
bool active = tdata->active;
prof_tdata_detach(tsd, tdata);
- return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
- active));
+ return (prof_tdata_init_impl(tsd_tsdn(tsd), thr_uid, thr_discrim,
+ thread_name, active));
}
static bool
-prof_tdata_expire(prof_tdata_t *tdata)
+prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata)
{
bool destroy_tdata;
- malloc_mutex_lock(tdata->lock);
+ malloc_mutex_lock(tsdn, tdata->lock);
if (!tdata->expired) {
tdata->expired = true;
destroy_tdata = tdata->attached ? false :
- prof_tdata_should_destroy(tdata, false);
+ prof_tdata_should_destroy(tsdn, tdata, false);
} else
destroy_tdata = false;
- malloc_mutex_unlock(tdata->lock);
+ malloc_mutex_unlock(tsdn, tdata->lock);
return (destroy_tdata);
}
@@ -1870,35 +1950,36 @@ prof_tdata_expire(prof_tdata_t *tdata)
static prof_tdata_t *
prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
{
+ tsdn_t *tsdn = (tsdn_t *)arg;
- return (prof_tdata_expire(tdata) ? tdata : NULL);
+ return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
}
void
-prof_reset(tsd_t *tsd, size_t lg_sample)
+prof_reset(tsdn_t *tsdn, size_t lg_sample)
{
prof_tdata_t *next;
assert(lg_sample < (sizeof(uint64_t) << 3));
- malloc_mutex_lock(&prof_dump_mtx);
- malloc_mutex_lock(&tdatas_mtx);
+ malloc_mutex_lock(tsdn, &prof_dump_mtx);
+ malloc_mutex_lock(tsdn, &tdatas_mtx);
lg_prof_sample = lg_sample;
next = NULL;
do {
prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
- prof_tdata_reset_iter, NULL);
+ prof_tdata_reset_iter, (void *)tsdn);
if (to_destroy != NULL) {
next = tdata_tree_next(&tdatas, to_destroy);
- prof_tdata_destroy_locked(tsd, to_destroy, false);
+ prof_tdata_destroy_locked(tsdn, to_destroy, false);
} else
next = NULL;
} while (next != NULL);
- malloc_mutex_unlock(&tdatas_mtx);
- malloc_mutex_unlock(&prof_dump_mtx);
+ malloc_mutex_unlock(tsdn, &tdatas_mtx);
+ malloc_mutex_unlock(tsdn, &prof_dump_mtx);
}
void
@@ -1915,35 +1996,33 @@ prof_tdata_cleanup(tsd_t *tsd)
}
bool
-prof_active_get(void)
+prof_active_get(tsdn_t *tsdn)
{
bool prof_active_current;
- malloc_mutex_lock(&prof_active_mtx);
+ malloc_mutex_lock(tsdn, &prof_active_mtx);
prof_active_current = prof_active;
- malloc_mutex_unlock(&prof_active_mtx);
+ malloc_mutex_unlock(tsdn, &prof_active_mtx);
return (prof_active_current);
}
bool
-prof_active_set(bool active)
+prof_active_set(tsdn_t *tsdn, bool active)
{
bool prof_active_old;
- malloc_mutex_lock(&prof_active_mtx);
+ malloc_mutex_lock(tsdn, &prof_active_mtx);
prof_active_old = prof_active;
prof_active = active;
- malloc_mutex_unlock(&prof_active_mtx);
+ malloc_mutex_unlock(tsdn, &prof_active_mtx);
return (prof_active_old);
}
const char *
-prof_thread_name_get(void)
+prof_thread_name_get(tsd_t *tsd)
{
- tsd_t *tsd;
prof_tdata_t *tdata;
- tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL)
return ("");
@@ -1951,7 +2030,7 @@ prof_thread_name_get(void)
}
static char *
-prof_thread_name_alloc(tsd_t *tsd, const char *thread_name)
+prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name)
{
char *ret;
size_t size;
@@ -1963,8 +2042,8 @@ prof_thread_name_alloc(tsd_t *tsd, const char *thread_name)
if (size == 1)
return ("");
- ret = iallocztm(tsd, size, size2index(size), false, tcache_get(tsd,
- true), true, NULL, true);
+ ret = iallocztm(tsdn, size, size2index(size), false, NULL, true,
+ arena_get(TSDN_NULL, 0, true), true);
if (ret == NULL)
return (NULL);
memcpy(ret, thread_name, size);
@@ -1991,13 +2070,12 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name)
return (EFAULT);
}
- s = prof_thread_name_alloc(tsd, thread_name);
+ s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
if (s == NULL)
return (EAGAIN);
if (tdata->thread_name != NULL) {
- idalloctm(tsd, tdata->thread_name, tcache_get(tsd, false),
- true, true);
+ idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, true, true);
tdata->thread_name = NULL;
}
if (strlen(s) > 0)
@@ -2006,12 +2084,10 @@ prof_thread_name_set(tsd_t *tsd, const char *thread_name)
}
bool
-prof_thread_active_get(void)
+prof_thread_active_get(tsd_t *tsd)
{
- tsd_t *tsd;
prof_tdata_t *tdata;
- tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL)
return (false);
@@ -2019,12 +2095,10 @@ prof_thread_active_get(void)
}
bool
-prof_thread_active_set(bool active)
+prof_thread_active_set(tsd_t *tsd, bool active)
{
- tsd_t *tsd;
prof_tdata_t *tdata;
- tsd = tsd_fetch();
tdata = prof_tdata_get(tsd, true);
if (tdata == NULL)
return (true);
@@ -2033,48 +2107,48 @@ prof_thread_active_set(bool active)
}
bool
-prof_thread_active_init_get(void)
+prof_thread_active_init_get(tsdn_t *tsdn)
{
bool active_init;
- malloc_mutex_lock(&prof_thread_active_init_mtx);
+ malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
active_init = prof_thread_active_init;
- malloc_mutex_unlock(&prof_thread_active_init_mtx);
+ malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
return (active_init);
}
bool
-prof_thread_active_init_set(bool active_init)
+prof_thread_active_init_set(tsdn_t *tsdn, bool active_init)
{
bool active_init_old;
- malloc_mutex_lock(&prof_thread_active_init_mtx);
+ malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
active_init_old = prof_thread_active_init;
prof_thread_active_init = active_init;
- malloc_mutex_unlock(&prof_thread_active_init_mtx);
+ malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
return (active_init_old);
}
bool
-prof_gdump_get(void)
+prof_gdump_get(tsdn_t *tsdn)
{
bool prof_gdump_current;
- malloc_mutex_lock(&prof_gdump_mtx);
+ malloc_mutex_lock(tsdn, &prof_gdump_mtx);
prof_gdump_current = prof_gdump_val;
- malloc_mutex_unlock(&prof_gdump_mtx);
+ malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
return (prof_gdump_current);
}
bool
-prof_gdump_set(bool gdump)
+prof_gdump_set(tsdn_t *tsdn, bool gdump)
{
bool prof_gdump_old;
- malloc_mutex_lock(&prof_gdump_mtx);
+ malloc_mutex_lock(tsdn, &prof_gdump_mtx);
prof_gdump_old = prof_gdump_val;
prof_gdump_val = gdump;
- malloc_mutex_unlock(&prof_gdump_mtx);
+ malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
return (prof_gdump_old);
}
@@ -2115,47 +2189,54 @@ prof_boot1(void)
}
bool
-prof_boot2(void)
+prof_boot2(tsdn_t *tsdn)
{
cassert(config_prof);
if (opt_prof) {
- tsd_t *tsd;
unsigned i;
lg_prof_sample = opt_lg_prof_sample;
prof_active = opt_prof_active;
- if (malloc_mutex_init(&prof_active_mtx))
+ if (malloc_mutex_init(&prof_active_mtx, "prof_active",
+ WITNESS_RANK_PROF_ACTIVE))
return (true);
prof_gdump_val = opt_prof_gdump;
- if (malloc_mutex_init(&prof_gdump_mtx))
+ if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
+ WITNESS_RANK_PROF_GDUMP))
return (true);
prof_thread_active_init = opt_prof_thread_active_init;
- if (malloc_mutex_init(&prof_thread_active_init_mtx))
+ if (malloc_mutex_init(&prof_thread_active_init_mtx,
+ "prof_thread_active_init",
+ WITNESS_RANK_PROF_THREAD_ACTIVE_INIT))
return (true);
- tsd = tsd_fetch();
- if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
+ if (ckh_new(tsdn, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
prof_bt_keycomp))
return (true);
- if (malloc_mutex_init(&bt2gctx_mtx))
+ if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
+ WITNESS_RANK_PROF_BT2GCTX))
return (true);
tdata_tree_new(&tdatas);
- if (malloc_mutex_init(&tdatas_mtx))
+ if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
+ WITNESS_RANK_PROF_TDATAS))
return (true);
next_thr_uid = 0;
- if (malloc_mutex_init(&next_thr_uid_mtx))
+ if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
+ WITNESS_RANK_PROF_NEXT_THR_UID))
return (true);
- if (malloc_mutex_init(&prof_dump_seq_mtx))
+ if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
+ WITNESS_RANK_PROF_DUMP_SEQ))
return (true);
- if (malloc_mutex_init(&prof_dump_mtx))
+ if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
+ WITNESS_RANK_PROF_DUMP))
return (true);
if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
@@ -2165,21 +2246,23 @@ prof_boot2(void)
abort();
}
- gctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS *
- sizeof(malloc_mutex_t));
+ gctx_locks = (malloc_mutex_t *)base_alloc(tsdn, PROF_NCTX_LOCKS
+ * sizeof(malloc_mutex_t));
if (gctx_locks == NULL)
return (true);
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
- if (malloc_mutex_init(&gctx_locks[i]))
+ if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
+ WITNESS_RANK_PROF_GCTX))
return (true);
}
- tdata_locks = (malloc_mutex_t *)base_alloc(PROF_NTDATA_LOCKS *
- sizeof(malloc_mutex_t));
+ tdata_locks = (malloc_mutex_t *)base_alloc(tsdn,
+ PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t));
if (tdata_locks == NULL)
return (true);
for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
- if (malloc_mutex_init(&tdata_locks[i]))
+ if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
+ WITNESS_RANK_PROF_TDATA))
return (true);
}
}
@@ -2198,56 +2281,77 @@ prof_boot2(void)
}
void
-prof_prefork(void)
+prof_prefork0(tsdn_t *tsdn)
{
if (opt_prof) {
unsigned i;
- malloc_mutex_prefork(&tdatas_mtx);
- malloc_mutex_prefork(&bt2gctx_mtx);
- malloc_mutex_prefork(&next_thr_uid_mtx);
- malloc_mutex_prefork(&prof_dump_seq_mtx);
- for (i = 0; i < PROF_NCTX_LOCKS; i++)
- malloc_mutex_prefork(&gctx_locks[i]);
+ malloc_mutex_prefork(tsdn, &prof_dump_mtx);
+ malloc_mutex_prefork(tsdn, &bt2gctx_mtx);
+ malloc_mutex_prefork(tsdn, &tdatas_mtx);
for (i = 0; i < PROF_NTDATA_LOCKS; i++)
- malloc_mutex_prefork(&tdata_locks[i]);
+ malloc_mutex_prefork(tsdn, &tdata_locks[i]);
+ for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ malloc_mutex_prefork(tsdn, &gctx_locks[i]);
}
}
void
-prof_postfork_parent(void)
+prof_prefork1(tsdn_t *tsdn)
+{
+
+ if (opt_prof) {
+ malloc_mutex_prefork(tsdn, &prof_active_mtx);
+ malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
+ malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
+ malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
+ }
+}
+
+void
+prof_postfork_parent(tsdn_t *tsdn)
{
if (opt_prof) {
unsigned i;
- for (i = 0; i < PROF_NTDATA_LOCKS; i++)
- malloc_mutex_postfork_parent(&tdata_locks[i]);
+ malloc_mutex_postfork_parent(tsdn,
+ &prof_thread_active_init_mtx);
+ malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
- malloc_mutex_postfork_parent(&gctx_locks[i]);
- malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
- malloc_mutex_postfork_parent(&next_thr_uid_mtx);
- malloc_mutex_postfork_parent(&bt2gctx_mtx);
- malloc_mutex_postfork_parent(&tdatas_mtx);
+ malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++)
+ malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
+ malloc_mutex_postfork_parent(tsdn, &tdatas_mtx);
+ malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx);
}
}
void
-prof_postfork_child(void)
+prof_postfork_child(tsdn_t *tsdn)
{
if (opt_prof) {
unsigned i;
- for (i = 0; i < PROF_NTDATA_LOCKS; i++)
- malloc_mutex_postfork_child(&tdata_locks[i]);
+ malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
+ malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
- malloc_mutex_postfork_child(&gctx_locks[i]);
- malloc_mutex_postfork_child(&prof_dump_seq_mtx);
- malloc_mutex_postfork_child(&next_thr_uid_mtx);
- malloc_mutex_postfork_child(&bt2gctx_mtx);
- malloc_mutex_postfork_child(&tdatas_mtx);
+ malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++)
+ malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
+ malloc_mutex_postfork_child(tsdn, &tdatas_mtx);
+ malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_dump_mtx);
}
}
diff --git a/contrib/jemalloc/src/quarantine.c b/contrib/jemalloc/src/quarantine.c
index ff8801c..18903fb 100644
--- a/contrib/jemalloc/src/quarantine.c
+++ b/contrib/jemalloc/src/quarantine.c
@@ -13,24 +13,22 @@
/* Function prototypes for non-inline static functions. */
static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine);
-static void quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine);
-static void quarantine_drain(tsd_t *tsd, quarantine_t *quarantine,
+static void quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine);
+static void quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine,
size_t upper_bound);
/******************************************************************************/
static quarantine_t *
-quarantine_init(tsd_t *tsd, size_t lg_maxobjs)
+quarantine_init(tsdn_t *tsdn, size_t lg_maxobjs)
{
quarantine_t *quarantine;
size_t size;
- assert(tsd_nominal(tsd));
-
size = offsetof(quarantine_t, objs) + ((ZU(1) << lg_maxobjs) *
sizeof(quarantine_obj_t));
- quarantine = (quarantine_t *)iallocztm(tsd, size, size2index(size),
- false, tcache_get(tsd, true), true, NULL, true);
+ quarantine = (quarantine_t *)iallocztm(tsdn, size, size2index(size),
+ false, NULL, true, arena_get(TSDN_NULL, 0, true), true);
if (quarantine == NULL)
return (NULL);
quarantine->curbytes = 0;
@@ -49,7 +47,7 @@ quarantine_alloc_hook_work(tsd_t *tsd)
if (!tsd_nominal(tsd))
return;
- quarantine = quarantine_init(tsd, LG_MAXOBJS_INIT);
+ quarantine = quarantine_init(tsd_tsdn(tsd), LG_MAXOBJS_INIT);
/*
* Check again whether quarantine has been initialized, because
* quarantine_init() may have triggered recursive initialization.
@@ -57,7 +55,7 @@ quarantine_alloc_hook_work(tsd_t *tsd)
if (tsd_quarantine_get(tsd) == NULL)
tsd_quarantine_set(tsd, quarantine);
else
- idalloctm(tsd, quarantine, tcache_get(tsd, false), true, true);
+ idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
}
static quarantine_t *
@@ -65,9 +63,9 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
{
quarantine_t *ret;
- ret = quarantine_init(tsd, quarantine->lg_maxobjs + 1);
+ ret = quarantine_init(tsd_tsdn(tsd), quarantine->lg_maxobjs + 1);
if (ret == NULL) {
- quarantine_drain_one(tsd, quarantine);
+ quarantine_drain_one(tsd_tsdn(tsd), quarantine);
return (quarantine);
}
@@ -89,18 +87,18 @@ quarantine_grow(tsd_t *tsd, quarantine_t *quarantine)
memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
sizeof(quarantine_obj_t));
}
- idalloctm(tsd, quarantine, tcache_get(tsd, false), true, true);
+ idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
tsd_quarantine_set(tsd, ret);
return (ret);
}
static void
-quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine)
+quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine)
{
quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
- assert(obj->usize == isalloc(obj->ptr, config_prof));
- idalloctm(tsd, obj->ptr, NULL, false, true);
+ assert(obj->usize == isalloc(tsdn, obj->ptr, config_prof));
+ idalloctm(tsdn, obj->ptr, NULL, false, true);
quarantine->curbytes -= obj->usize;
quarantine->curobjs--;
quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
@@ -108,24 +106,24 @@ quarantine_drain_one(tsd_t *tsd, quarantine_t *quarantine)
}
static void
-quarantine_drain(tsd_t *tsd, quarantine_t *quarantine, size_t upper_bound)
+quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine, size_t upper_bound)
{
while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
- quarantine_drain_one(tsd, quarantine);
+ quarantine_drain_one(tsdn, quarantine);
}
void
quarantine(tsd_t *tsd, void *ptr)
{
quarantine_t *quarantine;
- size_t usize = isalloc(ptr, config_prof);
+ size_t usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
cassert(config_fill);
assert(opt_quarantine);
if ((quarantine = tsd_quarantine_get(tsd)) == NULL) {
- idalloctm(tsd, ptr, NULL, false, true);
+ idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true);
return;
}
/*
@@ -135,7 +133,7 @@ quarantine(tsd_t *tsd, void *ptr)
if (quarantine->curbytes + usize > opt_quarantine) {
size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
- usize : 0;
- quarantine_drain(tsd, quarantine, upper_bound);
+ quarantine_drain(tsd_tsdn(tsd), quarantine, upper_bound);
}
/* Grow the quarantine ring buffer if it's full. */
if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
@@ -160,11 +158,11 @@ quarantine(tsd_t *tsd, void *ptr)
&& usize <= SMALL_MAXCLASS)
arena_quarantine_junk_small(ptr, usize);
else
- memset(ptr, 0x5a, usize);
+ memset(ptr, JEMALLOC_FREE_JUNK, usize);
}
} else {
assert(quarantine->curbytes == 0);
- idalloctm(tsd, ptr, NULL, false, true);
+ idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true);
}
}
@@ -178,8 +176,8 @@ quarantine_cleanup(tsd_t *tsd)
quarantine = tsd_quarantine_get(tsd);
if (quarantine != NULL) {
- quarantine_drain(tsd, quarantine, 0);
- idalloctm(tsd, quarantine, tcache_get(tsd, false), true, true);
+ quarantine_drain(tsd_tsdn(tsd), quarantine, 0);
+ idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true);
tsd_quarantine_set(tsd, NULL);
}
}
diff --git a/contrib/jemalloc/src/rtree.c b/contrib/jemalloc/src/rtree.c
index af0d97e..3166b45 100644
--- a/contrib/jemalloc/src/rtree.c
+++ b/contrib/jemalloc/src/rtree.c
@@ -15,6 +15,8 @@ rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc,
{
unsigned bits_in_leaf, height, i;
+ assert(RTREE_HEIGHT_MAX == ((ZU(1) << (LG_SIZEOF_PTR+3)) /
+ RTREE_BITS_PER_LEVEL));
assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL
diff --git a/contrib/jemalloc/src/stats.c b/contrib/jemalloc/src/stats.c
index a724947..073be4f 100644
--- a/contrib/jemalloc/src/stats.c
+++ b/contrib/jemalloc/src/stats.c
@@ -259,7 +259,7 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned nthreads;
const char *dss;
ssize_t lg_dirty_mult, decay_time;
- size_t page, pactive, pdirty, mapped;
+ size_t page, pactive, pdirty, mapped, retained;
size_t metadata_mapped, metadata_allocated;
uint64_t npurge, nmadvise, purged;
size_t small_allocated;
@@ -349,6 +349,9 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t);
malloc_cprintf(write_cb, cbopaque,
"mapped: %12zu\n", mapped);
+ CTL_M2_GET("stats.arenas.0.retained", i, &retained, size_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "retained: %12zu\n", retained);
CTL_M2_GET("stats.arenas.0.metadata.mapped", i, &metadata_mapped,
size_t);
CTL_M2_GET("stats.arenas.0.metadata.allocated", i, &metadata_allocated,
@@ -468,7 +471,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
#define OPT_WRITE_UNSIGNED(n) \
if (je_mallctl("opt."#n, &uv, &usz, NULL, 0) == 0) { \
malloc_cprintf(write_cb, cbopaque, \
- " opt."#n": %zu\n", sv); \
+ " opt."#n": %u\n", uv); \
}
#define OPT_WRITE_SIZE_T(n) \
if (je_mallctl("opt."#n, &sv, &ssz, NULL, 0) == 0) { \
@@ -597,7 +600,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
if (config_stats) {
size_t *cactive;
- size_t allocated, active, metadata, resident, mapped;
+ size_t allocated, active, metadata, resident, mapped, retained;
CTL_GET("stats.cactive", &cactive, size_t *);
CTL_GET("stats.allocated", &allocated, size_t);
@@ -605,10 +608,11 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_GET("stats.metadata", &metadata, size_t);
CTL_GET("stats.resident", &resident, size_t);
CTL_GET("stats.mapped", &mapped, size_t);
+ CTL_GET("stats.retained", &retained, size_t);
malloc_cprintf(write_cb, cbopaque,
"Allocated: %zu, active: %zu, metadata: %zu,"
- " resident: %zu, mapped: %zu\n",
- allocated, active, metadata, resident, mapped);
+ " resident: %zu, mapped: %zu, retained: %zu\n",
+ allocated, active, metadata, resident, mapped, retained);
malloc_cprintf(write_cb, cbopaque,
"Current active ceiling: %zu\n",
atomic_read_z(cactive));
diff --git a/contrib/jemalloc/src/tcache.c b/contrib/jemalloc/src/tcache.c
index 6e32f40..175759c 100644
--- a/contrib/jemalloc/src/tcache.c
+++ b/contrib/jemalloc/src/tcache.c
@@ -23,10 +23,11 @@ static tcaches_t *tcaches_avail;
/******************************************************************************/
-size_t tcache_salloc(const void *ptr)
+size_t
+tcache_salloc(tsdn_t *tsdn, const void *ptr)
{
- return (arena_salloc(ptr, false));
+ return (arena_salloc(tsdn, ptr, false));
}
void
@@ -70,12 +71,12 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache)
}
void *
-tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
+tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
tcache_bin_t *tbin, szind_t binind, bool *tcache_success)
{
void *ret;
- arena_tcache_fill_small(tsd, arena, tbin, binind, config_prof ?
+ arena_tcache_fill_small(tsdn, arena, tbin, binind, config_prof ?
tcache->prof_accumbytes : 0);
if (config_prof)
tcache->prof_accumbytes = 0;
@@ -106,12 +107,13 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
arena_bin_t *bin = &bin_arena->bins[binind];
if (config_prof && bin_arena == arena) {
- if (arena_prof_accum(arena, tcache->prof_accumbytes))
- prof_idump();
+ if (arena_prof_accum(tsd_tsdn(tsd), arena,
+ tcache->prof_accumbytes))
+ prof_idump(tsd_tsdn(tsd));
tcache->prof_accumbytes = 0;
}
- malloc_mutex_lock(&bin->lock);
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
if (config_stats && bin_arena == arena) {
assert(!merged_stats);
merged_stats = true;
@@ -128,9 +130,9 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_bits_t *bitselm =
- arena_bitselm_get(chunk, pageind);
- arena_dalloc_bin_junked_locked(bin_arena, chunk,
- ptr, bitselm);
+ arena_bitselm_get_mutable(chunk, pageind);
+ arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
+ bin_arena, chunk, ptr, bitselm);
} else {
/*
* This object was allocated via a different
@@ -142,8 +144,8 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
ndeferred++;
}
}
- malloc_mutex_unlock(&bin->lock);
- arena_decay_ticks(tsd, bin_arena, nflush - ndeferred);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
}
if (config_stats && !merged_stats) {
/*
@@ -151,11 +153,11 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
* arena, so the stats didn't get merged. Manually do so now.
*/
arena_bin_t *bin = &arena->bins[binind];
- malloc_mutex_lock(&bin->lock);
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
- malloc_mutex_unlock(&bin->lock);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
}
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
@@ -188,7 +190,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
if (config_prof)
idump = false;
- malloc_mutex_lock(&locked_arena->lock);
+ malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock);
if ((config_prof || config_stats) && locked_arena == arena) {
if (config_prof) {
idump = arena_prof_accum_locked(arena,
@@ -211,8 +213,8 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (extent_node_arena_get(&chunk->node) ==
locked_arena) {
- arena_dalloc_large_junked_locked(locked_arena,
- chunk, ptr);
+ arena_dalloc_large_junked_locked(tsd_tsdn(tsd),
+ locked_arena, chunk, ptr);
} else {
/*
* This object was allocated via a different
@@ -224,22 +226,23 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
ndeferred++;
}
}
- malloc_mutex_unlock(&locked_arena->lock);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock);
if (config_prof && idump)
- prof_idump();
- arena_decay_ticks(tsd, locked_arena, nflush - ndeferred);
+ prof_idump(tsd_tsdn(tsd));
+ arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
+ ndeferred);
}
if (config_stats && !merged_stats) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
*/
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[binind - NBINS].nrequests +=
tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
}
memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
@@ -249,34 +252,26 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
tbin->low_water = tbin->ncached;
}
-void
-tcache_arena_associate(tcache_t *tcache, arena_t *arena)
+static void
+tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
{
if (config_stats) {
/* Link into list of extant tcaches. */
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
ql_elm_new(tcache, link);
ql_tail_insert(&arena->tcache_ql, tcache, link);
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
}
}
-void
-tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena)
-{
-
- tcache_arena_dissociate(tcache, oldarena);
- tcache_arena_associate(tcache, newarena);
-}
-
-void
-tcache_arena_dissociate(tcache_t *tcache, arena_t *arena)
+static void
+tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
{
if (config_stats) {
/* Unlink from list of extant tcaches. */
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
if (config_debug) {
bool in_ql = false;
tcache_t *iter;
@@ -289,11 +284,20 @@ tcache_arena_dissociate(tcache_t *tcache, arena_t *arena)
assert(in_ql);
}
ql_remove(&arena->tcache_ql, tcache, link);
- tcache_stats_merge(tcache, arena);
- malloc_mutex_unlock(&arena->lock);
+ tcache_stats_merge(tsdn, tcache, arena);
+ malloc_mutex_unlock(tsdn, &arena->lock);
}
}
+void
+tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena,
+ arena_t *newarena)
+{
+
+ tcache_arena_dissociate(tsdn, tcache, oldarena);
+ tcache_arena_associate(tsdn, tcache, newarena);
+}
+
tcache_t *
tcache_get_hard(tsd_t *tsd)
{
@@ -307,11 +311,11 @@ tcache_get_hard(tsd_t *tsd)
arena = arena_choose(tsd, NULL);
if (unlikely(arena == NULL))
return (NULL);
- return (tcache_create(tsd, arena));
+ return (tcache_create(tsd_tsdn(tsd), arena));
}
tcache_t *
-tcache_create(tsd_t *tsd, arena_t *arena)
+tcache_create(tsdn_t *tsdn, arena_t *arena)
{
tcache_t *tcache;
size_t size, stack_offset;
@@ -325,12 +329,12 @@ tcache_create(tsd_t *tsd, arena_t *arena)
/* Avoid false cacheline sharing. */
size = sa2u(size, CACHELINE);
- tcache = ipallocztm(tsd, size, CACHELINE, true, false, true,
- arena_get(0, false));
+ tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true,
+ arena_get(TSDN_NULL, 0, true));
if (tcache == NULL)
return (NULL);
- tcache_arena_associate(tcache, arena);
+ tcache_arena_associate(tsdn, tcache, arena);
ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
@@ -357,7 +361,7 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
unsigned i;
arena = arena_choose(tsd, NULL);
- tcache_arena_dissociate(tcache, arena);
+ tcache_arena_dissociate(tsd_tsdn(tsd), tcache, arena);
for (i = 0; i < NBINS; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
@@ -365,9 +369,9 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
if (config_stats && tbin->tstats.nrequests != 0) {
arena_bin_t *bin = &arena->bins[i];
- malloc_mutex_lock(&bin->lock);
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
- malloc_mutex_unlock(&bin->lock);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
}
}
@@ -376,19 +380,19 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache)
tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
if (config_stats && tbin->tstats.nrequests != 0) {
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[i - NBINS].nrequests +=
tbin->tstats.nrequests;
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
}
}
if (config_prof && tcache->prof_accumbytes > 0 &&
- arena_prof_accum(arena, tcache->prof_accumbytes))
- prof_idump();
+ arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes))
+ prof_idump(tsd_tsdn(tsd));
- idalloctm(tsd, tcache, false, true, true);
+ idalloctm(tsd_tsdn(tsd), tcache, NULL, true, true);
}
void
@@ -412,21 +416,22 @@ tcache_enabled_cleanup(tsd_t *tsd)
/* Do nothing. */
}
-/* Caller must own arena->lock. */
void
-tcache_stats_merge(tcache_t *tcache, arena_t *arena)
+tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena)
{
unsigned i;
cassert(config_stats);
+ malloc_mutex_assert_owner(tsdn, &arena->lock);
+
/* Merge and reset tcache stats. */
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
tcache_bin_t *tbin = &tcache->tbins[i];
- malloc_mutex_lock(&bin->lock);
+ malloc_mutex_lock(tsdn, &bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
- malloc_mutex_unlock(&bin->lock);
+ malloc_mutex_unlock(tsdn, &bin->lock);
tbin->tstats.nrequests = 0;
}
@@ -440,13 +445,14 @@ tcache_stats_merge(tcache_t *tcache, arena_t *arena)
}
bool
-tcaches_create(tsd_t *tsd, unsigned *r_ind)
+tcaches_create(tsdn_t *tsdn, unsigned *r_ind)
{
+ arena_t *arena;
tcache_t *tcache;
tcaches_t *elm;
if (tcaches == NULL) {
- tcaches = base_alloc(sizeof(tcache_t *) *
+ tcaches = base_alloc(tsdn, sizeof(tcache_t *) *
(MALLOCX_TCACHE_MAX+1));
if (tcaches == NULL)
return (true);
@@ -454,7 +460,10 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind)
if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX)
return (true);
- tcache = tcache_create(tsd, arena_get(0, false));
+ arena = arena_ichoose(tsdn, NULL);
+ if (unlikely(arena == NULL))
+ return (true);
+ tcache = tcache_create(tsdn, arena);
if (tcache == NULL)
return (true);
@@ -500,7 +509,7 @@ tcaches_destroy(tsd_t *tsd, unsigned ind)
}
bool
-tcache_boot(void)
+tcache_boot(tsdn_t *tsdn)
{
unsigned i;
@@ -518,7 +527,7 @@ tcache_boot(void)
nhbins = size2index(tcache_maxclass) + 1;
/* Initialize tcache_bin_info. */
- tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
+ tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, nhbins *
sizeof(tcache_bin_info_t));
if (tcache_bin_info == NULL)
return (true);
diff --git a/contrib/jemalloc/src/tsd.c b/contrib/jemalloc/src/tsd.c
index 34c1573..aeaa5e1 100644
--- a/contrib/jemalloc/src/tsd.c
+++ b/contrib/jemalloc/src/tsd.c
@@ -77,7 +77,7 @@ tsd_cleanup(void *arg)
/* Do nothing. */
break;
case tsd_state_nominal:
-#define O(n, t) \
+#define O(n, t) \
n##_cleanup(tsd);
MALLOC_TSD
#undef O
@@ -106,15 +106,17 @@ MALLOC_TSD
}
}
-bool
+tsd_t *
malloc_tsd_boot0(void)
{
+ tsd_t *tsd;
ncleanups = 0;
if (tsd_boot0())
- return (true);
- *tsd_arenas_tdata_bypassp_get(tsd_fetch()) = true;
- return (false);
+ return (NULL);
+ tsd = tsd_fetch();
+ *tsd_arenas_tdata_bypassp_get(tsd) = true;
+ return (tsd);
}
void
@@ -169,10 +171,10 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
tsd_init_block_t *iter;
/* Check whether this thread has already inserted into the list. */
- malloc_mutex_lock(&head->lock);
+ malloc_mutex_lock(NULL, &head->lock);
ql_foreach(iter, &head->blocks, link) {
if (iter->thread == self) {
- malloc_mutex_unlock(&head->lock);
+ malloc_mutex_unlock(NULL, &head->lock);
return (iter->data);
}
}
@@ -180,7 +182,7 @@ tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
ql_elm_new(block, link);
block->thread = self;
ql_tail_insert(&head->blocks, block, link);
- malloc_mutex_unlock(&head->lock);
+ malloc_mutex_unlock(NULL, &head->lock);
return (NULL);
}
@@ -188,8 +190,8 @@ void
tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block)
{
- malloc_mutex_lock(&head->lock);
+ malloc_mutex_lock(NULL, &head->lock);
ql_remove(&head->blocks, block, link);
- malloc_mutex_unlock(&head->lock);
+ malloc_mutex_unlock(NULL, &head->lock);
}
#endif
diff --git a/contrib/jemalloc/src/util.c b/contrib/jemalloc/src/util.c
index 116e981..04f9153 100644
--- a/contrib/jemalloc/src/util.c
+++ b/contrib/jemalloc/src/util.c
@@ -14,6 +14,7 @@
malloc_write("<jemalloc>: Unreachable code reached\n"); \
abort(); \
} \
+ unreachable(); \
} while (0)
#define not_implemented() do { \
@@ -330,10 +331,9 @@ x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p)
return (s);
}
-int
+size_t
malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
{
- int ret;
size_t i;
const char *f;
@@ -424,6 +424,8 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
int prec = -1;
int width = -1;
unsigned char len = '?';
+ char *s;
+ size_t slen;
f++;
/* Flags. */
@@ -514,8 +516,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
}
/* Conversion specifier. */
switch (*f) {
- char *s;
- size_t slen;
case '%':
/* %% */
APPEND_C(*f);
@@ -601,21 +601,19 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
str[i] = '\0';
else
str[size - 1] = '\0';
- assert(i < INT_MAX);
- ret = (int)i;
#undef APPEND_C
#undef APPEND_S
#undef APPEND_PADDED_S
#undef GET_ARG_NUMERIC
- return (ret);
+ return (i);
}
JEMALLOC_FORMAT_PRINTF(3, 4)
-int
+size_t
malloc_snprintf(char *str, size_t size, const char *format, ...)
{
- int ret;
+ size_t ret;
va_list ap;
va_start(ap, format);
diff --git a/contrib/jemalloc/src/witness.c b/contrib/jemalloc/src/witness.c
new file mode 100644
index 0000000..23753f2
--- /dev/null
+++ b/contrib/jemalloc/src/witness.c
@@ -0,0 +1,136 @@
+#define JEMALLOC_WITNESS_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+void
+witness_init(witness_t *witness, const char *name, witness_rank_t rank,
+ witness_comp_t *comp)
+{
+
+ witness->name = name;
+ witness->rank = rank;
+ witness->comp = comp;
+}
+
+#ifdef JEMALLOC_JET
+#undef witness_lock_error
+#define witness_lock_error JEMALLOC_N(n_witness_lock_error)
+#endif
+void
+witness_lock_error(const witness_list_t *witnesses, const witness_t *witness)
+{
+ witness_t *w;
+
+ malloc_printf("<jemalloc>: Lock rank order reversal:");
+ ql_foreach(w, witnesses, link) {
+ malloc_printf(" %s(%u)", w->name, w->rank);
+ }
+ malloc_printf(" %s(%u)\n", witness->name, witness->rank);
+ abort();
+}
+#ifdef JEMALLOC_JET
+#undef witness_lock_error
+#define witness_lock_error JEMALLOC_N(witness_lock_error)
+witness_lock_error_t *witness_lock_error = JEMALLOC_N(n_witness_lock_error);
+#endif
+
+#ifdef JEMALLOC_JET
+#undef witness_owner_error
+#define witness_owner_error JEMALLOC_N(n_witness_owner_error)
+#endif
+void
+witness_owner_error(const witness_t *witness)
+{
+
+ malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name,
+ witness->rank);
+ abort();
+}
+#ifdef JEMALLOC_JET
+#undef witness_owner_error
+#define witness_owner_error JEMALLOC_N(witness_owner_error)
+witness_owner_error_t *witness_owner_error = JEMALLOC_N(n_witness_owner_error);
+#endif
+
+#ifdef JEMALLOC_JET
+#undef witness_not_owner_error
+#define witness_not_owner_error JEMALLOC_N(n_witness_not_owner_error)
+#endif
+void
+witness_not_owner_error(const witness_t *witness)
+{
+
+ malloc_printf("<jemalloc>: Should not own %s(%u)\n", witness->name,
+ witness->rank);
+ abort();
+}
+#ifdef JEMALLOC_JET
+#undef witness_not_owner_error
+#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error)
+witness_not_owner_error_t *witness_not_owner_error =
+ JEMALLOC_N(n_witness_not_owner_error);
+#endif
+
+#ifdef JEMALLOC_JET
+#undef witness_lockless_error
+#define witness_lockless_error JEMALLOC_N(n_witness_lockless_error)
+#endif
+void
+witness_lockless_error(const witness_list_t *witnesses)
+{
+ witness_t *w;
+
+ malloc_printf("<jemalloc>: Should not own any locks:");
+ ql_foreach(w, witnesses, link) {
+ malloc_printf(" %s(%u)", w->name, w->rank);
+ }
+ malloc_printf("\n");
+ abort();
+}
+#ifdef JEMALLOC_JET
+#undef witness_lockless_error
+#define witness_lockless_error JEMALLOC_N(witness_lockless_error)
+witness_lockless_error_t *witness_lockless_error =
+ JEMALLOC_N(n_witness_lockless_error);
+#endif
+
+void
+witnesses_cleanup(tsd_t *tsd)
+{
+
+ witness_assert_lockless(tsd_tsdn(tsd));
+
+ /* Do nothing. */
+}
+
+void
+witness_fork_cleanup(tsd_t *tsd)
+{
+
+ /* Do nothing. */
+}
+
+void
+witness_prefork(tsd_t *tsd)
+{
+
+ tsd_witness_fork_set(tsd, true);
+}
+
+void
+witness_postfork_parent(tsd_t *tsd)
+{
+
+ tsd_witness_fork_set(tsd, false);
+}
+
+void
+witness_postfork_child(tsd_t *tsd)
+{
+#ifndef JEMALLOC_MUTEX_INIT_CB
+ witness_list_t *witnesses;
+
+ witnesses = tsd_witnessesp_get(tsd);
+ ql_new(witnesses);
+#endif
+ tsd_witness_fork_set(tsd, false);
+}
diff --git a/contrib/libarchive/cpio/bsdcpio.1 b/contrib/libarchive/cpio/bsdcpio.1
index e52546e..f966aa0 100644
--- a/contrib/libarchive/cpio/bsdcpio.1
+++ b/contrib/libarchive/cpio/bsdcpio.1
@@ -156,8 +156,7 @@ See above for description.
.It Fl Fl insecure
(i and p mode only)
Disable security checks during extraction or copying.
-This allows extraction via symbolic links, absolute paths,
-and path names containing
+This allows extraction via symbolic links and path names containing
.Sq ..
in the name.
.It Fl J , Fl Fl xz
diff --git a/contrib/libarchive/cpio/cpio.c b/contrib/libarchive/cpio/cpio.c
index f6441aa..413fc87 100644
--- a/contrib/libarchive/cpio/cpio.c
+++ b/contrib/libarchive/cpio/cpio.c
@@ -171,7 +171,6 @@ main(int argc, char *argv[])
cpio->extract_flags |= ARCHIVE_EXTRACT_NO_OVERWRITE_NEWER;
cpio->extract_flags |= ARCHIVE_EXTRACT_SECURE_SYMLINKS;
cpio->extract_flags |= ARCHIVE_EXTRACT_SECURE_NODOTDOT;
- cpio->extract_flags |= ARCHIVE_EXTRACT_SECURE_NOABSOLUTEPATHS;
cpio->extract_flags |= ARCHIVE_EXTRACT_PERM;
cpio->extract_flags |= ARCHIVE_EXTRACT_FFLAGS;
cpio->extract_flags |= ARCHIVE_EXTRACT_ACL;
@@ -257,7 +256,6 @@ main(int argc, char *argv[])
case OPTION_INSECURE:
cpio->extract_flags &= ~ARCHIVE_EXTRACT_SECURE_SYMLINKS;
cpio->extract_flags &= ~ARCHIVE_EXTRACT_SECURE_NODOTDOT;
- cpio->extract_flags &= ~ARCHIVE_EXTRACT_SECURE_NOABSOLUTEPATHS;
break;
case 'L': /* GNU cpio */
cpio->option_follow_links = 1;
diff --git a/include/stdio.h b/include/stdio.h
index 7241386..f16f644 100644
--- a/include/stdio.h
+++ b/include/stdio.h
@@ -60,7 +60,7 @@ typedef __ssize_t ssize_t;
#ifndef _OFF64_T_DECLARED
#define _OFF64_T_DECLARED
-typedef __off_t off64_t;
+typedef __off64_t off64_t;
#endif
#if __POSIX_VISIBLE >= 200112 || __XSI_VISIBLE
@@ -432,10 +432,10 @@ FILE *funopen(const void *,
#define fropen(cookie, fn) funopen(cookie, fn, 0, 0, 0)
#define fwopen(cookie, fn) funopen(cookie, 0, fn, 0, 0)
-typedef ssize_t (cookie_read_function_t)(void *, char *, size_t);
-typedef ssize_t (cookie_write_function_t)(void *, const char *, size_t);
-typedef int (cookie_seek_function_t)(void *, off64_t *, int);
-typedef int (cookie_close_function_t)(void *);
+typedef __ssize_t cookie_read_function_t(void *, char *, size_t);
+typedef __ssize_t cookie_write_function_t(void *, const char *, size_t);
+typedef int cookie_seek_function_t(void *, off64_t *, int);
+typedef int cookie_close_function_t(void *);
typedef struct {
cookie_read_function_t *read;
cookie_write_function_t *write;
diff --git a/lib/libc/stdio/Symbol.map b/lib/libc/stdio/Symbol.map
index a332ab2..92aefa0 100644
--- a/lib/libc/stdio/Symbol.map
+++ b/lib/libc/stdio/Symbol.map
@@ -164,6 +164,7 @@ FBSD_1.3 {
FBSD_1.4 {
fdclose;
+ fopencookie;
};
FBSDprivate_1.0 {
diff --git a/lib/libc/stdlib/jemalloc/Makefile.inc b/lib/libc/stdlib/jemalloc/Makefile.inc
index 8c4c12a..8b22fda 100644
--- a/lib/libc/stdlib/jemalloc/Makefile.inc
+++ b/lib/libc/stdlib/jemalloc/Makefile.inc
@@ -5,7 +5,7 @@
JEMALLOCSRCS:= jemalloc.c arena.c atomic.c base.c bitmap.c chunk.c \
chunk_dss.c chunk_mmap.c ckh.c ctl.c extent.c hash.c huge.c mb.c \
mutex.c nstime.c pages.c prng.c prof.c quarantine.c rtree.c stats.c \
- tcache.c ticker.c tsd.c util.c
+ tcache.c ticker.c tsd.c util.c witness.c
SYM_MAPS+=${LIBC_SRCTOP}/stdlib/jemalloc/Symbol.map
diff --git a/lib/libc/tests/nss/testutil.h b/lib/libc/tests/nss/testutil.h
index 39c4f41..ff9e8f0 100644
--- a/lib/libc/tests/nss/testutil.h
+++ b/lib/libc/tests/nss/testutil.h
@@ -252,9 +252,7 @@ int \
__##ent##_snapshot_read(char const *fname, struct ent##_test_data *td, \
int (*read_func)(struct ent *, char *)) \
{ \
- char buffer[1024]; \
struct ent data; \
- char *s; \
FILE *fi; \
size_t len; \
int rv; \
@@ -267,23 +265,22 @@ __##ent##_snapshot_read(char const *fname, struct ent##_test_data *td, \
return (-1); \
\
rv = 0; \
- memset(buffer, 0, sizeof(buffer)); \
while (!feof(fi)) { \
- s = fgets(buffer, sizeof(buffer), fi); \
- if (s != NULL && s[0] != '#') { \
- len = strlen(s); \
- if (len == 0) \
- continue; \
- if (buffer[len - 1] == '\n') \
- buffer[len -1] = '\0'; \
- \
- rv = read_func(&data, s); \
- if (rv == 0) { \
- __##ent##_test_data_append(td, &data); \
- td->free_func(&data); \
- } else \
- goto fin; \
- } \
+ char *buf = fgetln(fi, &len); \
+ if (buf == NULL || len <= 1) \
+ continue; \
+ if (buf[len - 1] == '\n') \
+ buf[len - 1] = '\0'; \
+ else \
+ buf[len] = '\0'; \
+ if (buf[0] == '#') \
+ continue; \
+ rv = read_func(&data, buf); \
+ if (rv == 0) { \
+ __##ent##_test_data_append(td, &data); \
+ td->free_func(&data); \
+ } else \
+ goto fin; \
} \
\
fin: \
diff --git a/lib/libutil/quotafile.c b/lib/libutil/quotafile.c
index 6b5a44d..27444c2 100644
--- a/lib/libutil/quotafile.c
+++ b/lib/libutil/quotafile.c
@@ -124,7 +124,7 @@ quota_open(struct fstab *fs, int quotatype, int openflags)
return (NULL);
qf->fd = -1;
qf->quotatype = quotatype;
- strncpy(qf->fsname, fs->fs_file, sizeof(qf->fsname));
+ strlcpy(qf->fsname, fs->fs_file, sizeof(qf->fsname));
if (stat(qf->fsname, &st) != 0)
goto error;
qf->dev = st.st_dev;
diff --git a/libexec/ftpd/ftpd.c b/libexec/ftpd/ftpd.c
index 8928493..521e152 100644
--- a/libexec/ftpd/ftpd.c
+++ b/libexec/ftpd/ftpd.c
@@ -1671,14 +1671,14 @@ retrieve(char *cmd, char *name)
struct stat st;
int (*closefunc)(FILE *);
time_t start;
+ char line[BUFSIZ];
if (cmd == 0) {
fin = fopen(name, "r"), closefunc = fclose;
st.st_size = 0;
} else {
- char line[BUFSIZ];
-
- (void) snprintf(line, sizeof(line), cmd, name), name = line;
+ (void) snprintf(line, sizeof(line), cmd, name);
+ name = line;
fin = ftpd_popen(line, "r"), closefunc = ftpd_pclose;
st.st_size = -1;
st.st_blksize = BUFSIZ;
diff --git a/release/doc/en_US.ISO8859-1/relnotes/article.xml b/release/doc/en_US.ISO8859-1/relnotes/article.xml
index d9382dc..1021677 100644
--- a/release/doc/en_US.ISO8859-1/relnotes/article.xml
+++ b/release/doc/en_US.ISO8859-1/relnotes/article.xml
@@ -460,8 +460,8 @@
<para revision="260445">&man.byacc.1; has been updated to
version 20140101.</para>
- <para revision="261320"><application>OpenSSH</application> has
- been updated to 6.5p1.</para>
+ <para revision="296633"><application>OpenSSH</application> has
+ been updated to 7.2p2.</para>
<para revision="261344"><application>mdocml</application> has
been updated to version 1.12.3.</para>
diff --git a/share/misc/committers-src.dot b/share/misc/committers-src.dot
index 72ebf77..29997f5 100644
--- a/share/misc/committers-src.dot
+++ b/share/misc/committers-src.dot
@@ -247,6 +247,7 @@ ngie [label="Garrett Cooper\nngie@FreeBSD.org\n2014/07/27"]
nork [label="Norikatsu Shigemura\nnork@FreeBSD.org\n2009/06/09"]
np [label="Navdeep Parhar\nnp@FreeBSD.org\n2009/06/05"]
nwhitehorn [label="Nathan Whitehorn\nnwhitehorn@FreeBSD.org\n2008/07/03"]
+n_hibma [label="Nick Hibma\nn_hibma@FreeBSD.org\n1998/11/26"]
obrien [label="David E. O'Brien\nobrien@FreeBSD.org\n1996/10/29"]
olli [label="Oliver Fromme\nolli@FreeBSD.org\n2008/02/14"]
oshogbo [label="Mariusz Zaborski\noshogbo@FreeBSD.org\n2015/04/15"]
diff --git a/sys/arm64/arm64/busdma_bounce.c b/sys/arm64/arm64/busdma_bounce.c
index fc23a43..71a18dc 100644
--- a/sys/arm64/arm64/busdma_bounce.c
+++ b/sys/arm64/arm64/busdma_bounce.c
@@ -1,8 +1,11 @@
/*-
* Copyright (c) 1997, 1998 Justin T. Gibbs.
- * Copyright (c) 2015 The FreeBSD Foundation
+ * Copyright (c) 2015-2016 The FreeBSD Foundation
* All rights reserved.
*
+ * Portions of this software were developed by Andrew Turner
+ * under sponsorship of the FreeBSD Foundation.
+ *
* Portions of this software were developed by Semihalf
* under sponsorship of the FreeBSD Foundation.
*
@@ -62,6 +65,7 @@ enum {
BF_COULD_BOUNCE = 0x01,
BF_MIN_ALLOC_COMP = 0x02,
BF_KMEM_ALLOC = 0x04,
+ BF_COHERENT = 0x10,
};
struct bounce_zone;
@@ -113,6 +117,13 @@ static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters");
SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0,
"Total bounce pages");
+struct sync_list {
+ vm_offset_t vaddr; /* kva of client data */
+ bus_addr_t paddr; /* physical address */
+ vm_page_t pages; /* starting page of client data */
+ bus_size_t datacount; /* client data count */
+};
+
struct bus_dmamap {
struct bp_list bpages;
int pagesneeded;
@@ -125,6 +136,8 @@ struct bus_dmamap {
u_int flags;
#define DMAMAP_COULD_BOUNCE (1 << 0)
#define DMAMAP_FROM_DMAMEM (1 << 1)
+ int sync_count;
+ struct sync_list slist[];
};
static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
@@ -171,9 +184,19 @@ bounce_bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
newtag->map_count = 0;
newtag->segments = NULL;
- if (parent != NULL && ((newtag->common.filter != NULL) ||
- ((parent->bounce_flags & BF_COULD_BOUNCE) != 0)))
- newtag->bounce_flags |= BF_COULD_BOUNCE;
+#ifdef notyet
+ if ((flags & BUS_DMA_COHERENT) != 0)
+ newtag->bounce_flags |= BF_COHERENT;
+#endif
+
+ if (parent != NULL) {
+ if ((newtag->common.filter != NULL ||
+ (parent->bounce_flags & BF_COULD_BOUNCE) != 0))
+ newtag->bounce_flags |= BF_COULD_BOUNCE;
+
+ /* Copy some flags from the parent */
+ newtag->bounce_flags |= parent->bounce_flags & BF_COHERENT;
+ }
if (newtag->common.lowaddr < ptoa((vm_paddr_t)Maxmem) ||
newtag->common.alignment > 1)
@@ -251,11 +274,14 @@ out:
}
static bus_dmamap_t
-alloc_dmamap(int flags)
+alloc_dmamap(bus_dma_tag_t dmat, int flags)
{
+ u_long mapsize;
bus_dmamap_t map;
- map = malloc(sizeof(*map), M_DEVBUF, flags | M_ZERO);
+ mapsize = sizeof(*map);
+ mapsize += sizeof(struct sync_list) * dmat->common.nsegments;
+ map = malloc(mapsize, M_DEVBUF, flags | M_ZERO);
if (map == NULL)
return (NULL);
@@ -288,7 +314,7 @@ bounce_bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
}
}
- *mapp = alloc_dmamap(M_NOWAIT);
+ *mapp = alloc_dmamap(dmat, M_NOWAIT);
if (*mapp == NULL) {
CTR3(KTR_BUSDMA, "%s: tag %p error %d",
__func__, dmat, ENOMEM);
@@ -360,7 +386,7 @@ bounce_bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
if ((map->flags & DMAMAP_FROM_DMAMEM) != 0)
panic("bounce_bus_dmamap_destroy: Invalid map freed\n");
- if (STAILQ_FIRST(&map->bpages) != NULL) {
+ if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) {
CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, EBUSY);
return (EBUSY);
}
@@ -421,7 +447,7 @@ bounce_bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
* Create the map, but don't set the could bounce flag as
* this allocation should never bounce;
*/
- *mapp = alloc_dmamap(mflags);
+ *mapp = alloc_dmamap(dmat, mflags);
if (*mapp == NULL) {
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->common.flags, ENOMEM);
@@ -644,8 +670,9 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs,
int *segp)
{
+ struct sync_list *sl;
bus_size_t sgsize;
- bus_addr_t curaddr;
+ bus_addr_t curaddr, sl_end;
int error;
if (segs == NULL)
@@ -660,6 +687,9 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
}
}
+ sl = map->slist + map->sync_count - 1;
+ sl_end = 0;
+
while (buflen > 0) {
curaddr = buf;
sgsize = MIN(buflen, dmat->common.maxsegsz);
@@ -669,6 +699,23 @@ bounce_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map,
sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK));
curaddr = add_bounce_page(dmat, map, 0, curaddr,
sgsize);
+ } else if ((dmat->bounce_flags & BF_COHERENT) == 0) {
+ if (map->sync_count > 0)
+ sl_end = sl->paddr + sl->datacount;
+
+ if (map->sync_count == 0 || curaddr != sl_end) {
+ if (++map->sync_count > dmat->common.nsegments)
+ break;
+ sl++;
+ sl->vaddr = 0;
+ sl->paddr = curaddr;
+ sl->datacount = sgsize;
+ sl->pages = PHYS_TO_VM_PAGE(curaddr);
+ KASSERT(sl->pages != NULL,
+ ("%s: page at PA:0x%08lx is not in "
+ "vm_page_array", __func__, curaddr));
+ } else
+ sl->datacount += sgsize;
}
sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs,
segp);
@@ -693,9 +740,10 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
int *segp)
{
+ struct sync_list *sl;
bus_size_t sgsize, max_sgsize;
- bus_addr_t curaddr;
- vm_offset_t kvaddr, vaddr;
+ bus_addr_t curaddr, sl_pend;
+ vm_offset_t kvaddr, vaddr, sl_vend;
int error;
if (segs == NULL)
@@ -710,7 +758,11 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
}
}
+ sl = map->slist + map->sync_count - 1;
vaddr = (vm_offset_t)buf;
+ sl_pend = 0;
+ sl_vend = 0;
+
while (buflen > 0) {
/*
* Get the physical address for this segment.
@@ -735,6 +787,34 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
sgsize = MIN(sgsize, max_sgsize);
curaddr = add_bounce_page(dmat, map, kvaddr, curaddr,
sgsize);
+ } else if ((dmat->bounce_flags & BF_COHERENT) == 0) {
+ sgsize = MIN(sgsize, max_sgsize);
+ if (map->sync_count > 0) {
+ sl_pend = sl->paddr + sl->datacount;
+ sl_vend = sl->vaddr + sl->datacount;
+ }
+
+ if (map->sync_count == 0 ||
+ (kvaddr != 0 && kvaddr != sl_vend) ||
+ (curaddr != sl_pend)) {
+
+ if (++map->sync_count > dmat->common.nsegments)
+ goto cleanup;
+ sl++;
+ sl->vaddr = kvaddr;
+ sl->paddr = curaddr;
+ if (kvaddr != 0) {
+ sl->pages = NULL;
+ } else {
+ sl->pages = PHYS_TO_VM_PAGE(curaddr);
+ KASSERT(sl->pages != NULL,
+ ("%s: page at PA:0x%08lx is not "
+ "in vm_page_array", __func__,
+ curaddr));
+ }
+ sl->datacount = sgsize;
+ } else
+ sl->datacount += sgsize;
} else {
sgsize = MIN(sgsize, max_sgsize);
}
@@ -746,6 +826,7 @@ bounce_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
buflen -= sgsize;
}
+cleanup:
/*
* Did we fit?
*/
@@ -783,13 +864,87 @@ bounce_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
{
struct bounce_page *bpage;
- if ((map->flags & DMAMAP_COULD_BOUNCE) == 0)
- return;
-
while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
STAILQ_REMOVE_HEAD(&map->bpages, links);
free_bounce_page(dmat, bpage);
}
+
+ map->sync_count = 0;
+}
+
+static void
+dma_preread_safe(vm_offset_t va, vm_size_t size)
+{
+ /*
+ * Write back any partial cachelines immediately before and
+ * after the DMA region.
+ */
+ if (va & (dcache_line_size - 1))
+ cpu_dcache_wb_range(va, 1);
+ if ((va + size) & (dcache_line_size - 1))
+ cpu_dcache_wb_range(va + size, 1);
+
+ cpu_dcache_inv_range(va, size);
+}
+
+static void
+dma_dcache_sync(struct sync_list *sl, bus_dmasync_op_t op)
+{
+ uint32_t len, offset;
+ vm_page_t m;
+ vm_paddr_t pa;
+ vm_offset_t va, tempva;
+ bus_size_t size;
+
+ offset = sl->paddr & PAGE_MASK;
+ m = sl->pages;
+ size = sl->datacount;
+ pa = sl->paddr;
+
+ for ( ; size != 0; size -= len, pa += len, offset = 0, ++m) {
+ tempva = 0;
+ if (sl->vaddr == 0) {
+ len = min(PAGE_SIZE - offset, size);
+ tempva = pmap_quick_enter_page(m);
+ va = tempva | offset;
+ KASSERT(pa == (VM_PAGE_TO_PHYS(m) | offset),
+ ("unexpected vm_page_t phys: 0x%16lx != 0x%16lx",
+ VM_PAGE_TO_PHYS(m) | offset, pa));
+ } else {
+ len = sl->datacount;
+ va = sl->vaddr;
+ }
+
+ switch (op) {
+ case BUS_DMASYNC_PREWRITE:
+ case BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD:
+ cpu_dcache_wb_range(va, len);
+ break;
+ case BUS_DMASYNC_PREREAD:
+ /*
+ * An mbuf may start in the middle of a cacheline. There
+ * will be no cpu writes to the beginning of that line
+ * (which contains the mbuf header) while dma is in
+ * progress. Handle that case by doing a writeback of
+ * just the first cacheline before invalidating the
+ * overall buffer. Any mbuf in a chain may have this
+ * misalignment. Buffers which are not mbufs bounce if
+ * they are not aligned to a cacheline.
+ */
+ dma_preread_safe(va, len);
+ break;
+ case BUS_DMASYNC_POSTREAD:
+ case BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE:
+ cpu_dcache_inv_range(va, len);
+ break;
+ default:
+ panic("unsupported combination of sync operations: "
+ "0x%08x\n", op);
+ }
+
+ if (tempva != 0)
+ pmap_quick_remove_page(tempva);
+ }
}
static void
@@ -797,15 +952,9 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
bus_dmasync_op_t op)
{
struct bounce_page *bpage;
+ struct sync_list *sl, *end;
vm_offset_t datavaddr, tempvaddr;
- /*
- * XXX ARM64TODO:
- * This bus_dma implementation requires IO-Coherent architecutre.
- * If IO-Coherency is not guaranteed, cache operations have to be
- * added to this function.
- */
-
if ((op & BUS_DMASYNC_POSTREAD) != 0) {
/*
* Wait for any DMA operations to complete before the bcopy.
@@ -832,13 +981,26 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
(void *)bpage->vaddr, bpage->datacount);
if (tempvaddr != 0)
pmap_quick_remove_page(tempvaddr);
+ if ((dmat->bounce_flags & BF_COHERENT) == 0)
+ cpu_dcache_wb_range(bpage->vaddr,
+ bpage->datacount);
bpage = STAILQ_NEXT(bpage, links);
}
dmat->bounce_zone->total_bounced++;
+ } else if ((op & BUS_DMASYNC_PREREAD) != 0) {
+ while (bpage != NULL) {
+ if ((dmat->bounce_flags & BF_COHERENT) == 0)
+ cpu_dcache_wbinv_range(bpage->vaddr,
+ bpage->datacount);
+ bpage = STAILQ_NEXT(bpage, links);
+ }
}
if ((op & BUS_DMASYNC_POSTREAD) != 0) {
while (bpage != NULL) {
+ if ((dmat->bounce_flags & BF_COHERENT) == 0)
+ cpu_dcache_inv_range(bpage->vaddr,
+ bpage->datacount);
tempvaddr = 0;
datavaddr = bpage->datavaddr;
if (datavaddr == 0) {
@@ -858,7 +1020,20 @@ bounce_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map,
}
}
- if ((op & BUS_DMASYNC_PREWRITE) != 0) {
+ /*
+ * Cache maintenance for normal (non-COHERENT non-bounce) buffers.
+ */
+ if (map->sync_count != 0) {
+ sl = &map->slist[0];
+ end = &map->slist[map->sync_count];
+ CTR3(KTR_BUSDMA, "%s: tag %p op 0x%x "
+ "performing sync", __func__, dmat, op);
+
+ for ( ; sl != end; ++sl)
+ dma_dcache_sync(sl, op);
+ }
+
+ if ((op & (BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)) != 0) {
/*
* Wait for the bcopy to complete before any DMA operations.
*/
diff --git a/sys/arm64/include/cpufunc.h b/sys/arm64/include/cpufunc.h
index 56f1acf..905d222 100644
--- a/sys/arm64/include/cpufunc.h
+++ b/sys/arm64/include/cpufunc.h
@@ -119,6 +119,11 @@ clrex(void)
__asm __volatile("clrex" : : : "memory");
}
+extern int64_t dcache_line_size;
+extern int64_t icache_line_size;
+extern int64_t idcache_line_size;
+extern int64_t dczva_line_size;
+
#define cpu_nullop() arm64_nullop()
#define cpufunc_nullop() arm64_nullop()
#define cpu_setttb(a) arm64_setttb(a)
diff --git a/sys/boot/geli/geliboot.c b/sys/boot/geli/geliboot.c
index df40c3b..10df707 100644
--- a/sys/boot/geli/geliboot.c
+++ b/sys/boot/geli/geliboot.c
@@ -155,7 +155,7 @@ geli_attach(struct dsk *dskp, const char *passphrase)
g_eli_crypto_hmac_update(&ctx, passphrase,
strlen(passphrase));
} else if (geli_e->md.md_iterations > 0) {
- printf("Calculating GELI Decryption Key disk%dp%d @ %lu"
+ printf("Calculating GELI Decryption Key disk%dp%d @ %d"
" iterations...\n", dskp->unit,
(dskp->slice > 0 ? dskp->slice : dskp->part),
geli_e->md.md_iterations);
diff --git a/sys/boot/geli/geliboot.h b/sys/boot/geli/geliboot.h
index 61e64f1..83df152 100644
--- a/sys/boot/geli/geliboot.h
+++ b/sys/boot/geli/geliboot.h
@@ -86,4 +86,7 @@ int geli_decrypt(u_int algo, u_char *data, size_t datasize,
const u_char *key, size_t keysize, const uint8_t* iv);
int geli_passphrase(char *pw, int disk, int parttype, int part, struct dsk *dskp);
+int geliboot_crypt(u_int algo, int enc, u_char *data, size_t datasize,
+ const u_char *key, size_t keysize, u_char *iv);
+
#endif /* _GELIBOOT_H_ */
diff --git a/sys/boot/i386/common/cons.h b/sys/boot/i386/common/cons.h
index fe00a13..73474fb 100644
--- a/sys/boot/i386/common/cons.h
+++ b/sys/boot/i386/common/cons.h
@@ -28,6 +28,7 @@ void xputc(int c);
void putchar(int c);
int getc(int fn);
int xgetc(int fn);
+int getchar(void);
int keyhit(unsigned int secs);
void getstr(char *cmdstr, size_t cmdstrsize);
diff --git a/sys/boot/i386/zfsboot/zfsboot.c b/sys/boot/i386/zfsboot/zfsboot.c
index 2562aab..e190b49 100644
--- a/sys/boot/i386/zfsboot/zfsboot.c
+++ b/sys/boot/i386/zfsboot/zfsboot.c
@@ -85,7 +85,6 @@ static const unsigned char flags[NOPT] = {
};
uint32_t opts;
-static const char *const dev_nm[NDEV] = {"ad", "da", "fd"};
static const unsigned char dev_maj[NDEV] = {30, 4, 2};
static char cmd[512];
@@ -399,7 +398,9 @@ probe_drive(struct dsk *dsk)
struct gpt_ent *ent;
unsigned part, entries_per_sec;
#endif
+#ifdef LOADER_GELI_SUPPORT
daddr_t slba, elba;
+#endif
struct dos_partition *dp;
char *sec;
unsigned i;
diff --git a/sys/cddl/boot/zfs/lz4.c b/sys/cddl/boot/zfs/lz4.c
index c29f861..b12122c 100644
--- a/sys/cddl/boot/zfs/lz4.c
+++ b/sys/cddl/boot/zfs/lz4.c
@@ -34,6 +34,8 @@
* $FreeBSD$
*/
+#include <arpa/inet.h>
+
static int LZ4_uncompress_unknownOutputSize(const char *source, char *dest,
int isize, int maxOutputSize);
diff --git a/sys/compat/linuxkpi/common/include/asm/pgtable.h b/sys/compat/linuxkpi/common/include/asm/pgtable.h
index 7bdab1c..c54eb04 100644
--- a/sys/compat/linuxkpi/common/include/asm/pgtable.h
+++ b/sys/compat/linuxkpi/common/include/asm/pgtable.h
@@ -31,6 +31,6 @@
#ifndef _ASM_PGTABLE_H_
#define _ASM_PGTABLE_H_
-typedef int pgprot_t;
+#include <linux/page.h>
#endif /* _ASM_PGTABLE_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/compiler.h b/sys/compat/linuxkpi/common/include/linux/compiler.h
index b6ea98f..c780abc 100644
--- a/sys/compat/linuxkpi/common/include/linux/compiler.h
+++ b/sys/compat/linuxkpi/common/include/linux/compiler.h
@@ -2,7 +2,7 @@
* Copyright (c) 2010 Isilon Systems, Inc.
* Copyright (c) 2010 iX Systems, Inc.
* Copyright (c) 2010 Panasas, Inc.
- * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
* Copyright (c) 2015 François Tigeot
* All rights reserved.
*
@@ -50,14 +50,17 @@
#define __cond_lock(x,c) (c)
#define __bitwise
#define __devinitdata
+#define __deprecated
#define __init
#define __devinit
#define __devexit
#define __exit
+#define __rcu
#define __stringify(x) #x
#define __attribute_const__ __attribute__((__const__))
#undef __always_inline
#define __always_inline inline
+#define ____cacheline_aligned __aligned(CACHE_LINE_SIZE)
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
@@ -72,6 +75,9 @@
#define barrier() __asm__ __volatile__("": : :"memory")
+#define ___PASTE(a,b) a##b
+#define __PASTE(a,b) ___PASTE(a,b)
+
#define ACCESS_ONCE(x) (*(volatile __typeof(x) *)&(x))
#define WRITE_ONCE(x,v) do { \
@@ -87,5 +93,9 @@
barrier(); \
__var; \
})
-
+
+#define lockless_dereference(p) READ_ONCE(p)
+
+#define _AT(T,X) ((T)(X))
+
#endif /* _LINUX_COMPILER_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/device.h b/sys/compat/linuxkpi/common/include/linux/device.h
index f4b8ec6..4b4c232 100644
--- a/sys/compat/linuxkpi/common/include/linux/device.h
+++ b/sys/compat/linuxkpi/common/include/linux/device.h
@@ -2,7 +2,7 @@
* Copyright (c) 2010 Isilon Systems, Inc.
* Copyright (c) 2010 iX Systems, Inc.
* Copyright (c) 2010 Panasas, Inc.
- * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -203,11 +203,15 @@ device_register(struct device *dev)
int unit;
bsddev = NULL;
+ unit = -1;
+
if (dev->devt) {
unit = MINOR(dev->devt);
bsddev = devclass_get_device(dev->class->bsdclass, unit);
- } else
- unit = -1;
+ } else if (dev->parent == NULL) {
+ bsddev = devclass_get_device(dev->class->bsdclass, 0);
+ }
+
if (bsddev == NULL)
bsddev = device_add_child(dev->parent->bsddev,
dev->class->kobj.name, unit);
diff --git a/sys/compat/linuxkpi/common/include/linux/err.h b/sys/compat/linuxkpi/common/include/linux/err.h
index 2366130..a976932 100644
--- a/sys/compat/linuxkpi/common/include/linux/err.h
+++ b/sys/compat/linuxkpi/common/include/linux/err.h
@@ -2,7 +2,7 @@
* Copyright (c) 2010 Isilon Systems, Inc.
* Copyright (c) 2010 iX Systems, Inc.
* Copyright (c) 2010 Panasas, Inc.
- * Copyright (c) 2013-2015 Mellanox Technologies, Ltd.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,9 +31,11 @@
#ifndef _LINUX_ERR_H_
#define _LINUX_ERR_H_
+#include <linux/compiler.h>
+
#define MAX_ERRNO 4095
-#define IS_ERR_VALUE(x) ((x) >= (unsigned long)-MAX_ERRNO)
+#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
static inline void *
ERR_PTR(long error)
diff --git a/sys/compat/linuxkpi/common/include/linux/errno.h b/sys/compat/linuxkpi/common/include/linux/errno.h
index a043a3d..a972f4a 100644
--- a/sys/compat/linuxkpi/common/include/linux/errno.h
+++ b/sys/compat/linuxkpi/common/include/linux/errno.h
@@ -2,7 +2,7 @@
* Copyright (c) 2010 Isilon Systems, Inc.
* Copyright (c) 2010 iX Systems, Inc.
* Copyright (c) 2010 Panasas, Inc.
- * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -38,8 +38,23 @@
#define ECOMM ESTALE
#define ENODATA ECONNREFUSED
#define ENOIOCTLCMD ENOIOCTL
-#define ERESTARTSYS ERESTART
+/* Use same value as Linux, because BSD's ERESTART is negative */
+#define ERESTARTSYS 512
#define ENOTSUPP EOPNOTSUPP
#define ENONET EHOSTDOWN
+#define ERESTARTNOINTR 513
+#define ERESTARTNOHAND 514
+#define ERESTART_RESTARTBLOCK 516
+#define EPROBE_DEFER 517
+#define EOPENSTALE 518
+#define EBADHANDLE 521
+#define ENOTSYNC 522
+#define EBADCOOKIE 523
+#define ETOOSMALL 525
+#define ESERVERFAULT 526
+#define EBADTYPE 527
+#define EJUKEBOX 528
+#define EIOCBQUEUED 529
+
#endif /* _LINUX_ERRNO_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/ioctl.h b/sys/compat/linuxkpi/common/include/linux/ioctl.h
index 6f57906..fdedf9c 100644
--- a/sys/compat/linuxkpi/common/include/linux/ioctl.h
+++ b/sys/compat/linuxkpi/common/include/linux/ioctl.h
@@ -2,7 +2,7 @@
* Copyright (c) 2010 Isilon Systems, Inc.
* Copyright (c) 2010 iX Systems, Inc.
* Copyright (c) 2010 Panasas, Inc.
- * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -33,4 +33,6 @@
#include <sys/ioccom.h>
+#define _IOC_SIZE(cmd) IOCPARM_LEN(cmd)
+
#endif /* _LINUX_IOCTL_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/jiffies.h b/sys/compat/linuxkpi/common/include/linux/jiffies.h
index 33629ec..9a85f61 100644
--- a/sys/compat/linuxkpi/common/include/linux/jiffies.h
+++ b/sys/compat/linuxkpi/common/include/linux/jiffies.h
@@ -2,7 +2,7 @@
* Copyright (c) 2010 Isilon Systems, Inc.
* Copyright (c) 2010 iX Systems, Inc.
* Copyright (c) 2010 Panasas, Inc.
- * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -39,16 +39,6 @@
#include <sys/kernel.h>
#include <sys/limits.h>
-static inline int
-msecs_to_jiffies(int msec)
-{
- struct timeval tv;
-
- tv.tv_sec = msec / 1000;
- tv.tv_usec = (msec % 1000) * 1000;
- return (tvtohz(&tv) - 1);
-}
-
#define jiffies ticks
#define jiffies_64 ticks
#define jiffies_to_msecs(x) (((int64_t)(x)) * 1000 / hz)
@@ -78,17 +68,35 @@ timespec_to_jiffies(const struct timespec *ts)
}
static inline int
-usecs_to_jiffies(const unsigned int u)
+msecs_to_jiffies(const u64 msec)
{
u64 result;
- result = ((u64)u * hz + 1000000 - 1) / 1000000;
+ result = howmany(msec * (u64)hz, 1000ULL);
if (result > MAX_JIFFY_OFFSET)
result = MAX_JIFFY_OFFSET;
return ((int)result);
}
+static inline int
+usecs_to_jiffies(const u64 u)
+{
+ u64 result;
+
+ result = howmany(u * (u64)hz, 1000000ULL);
+ if (result > MAX_JIFFY_OFFSET)
+ result = MAX_JIFFY_OFFSET;
+
+ return ((int)result);
+}
+
+static inline u64
+nsecs_to_jiffies(const u64 n)
+{
+ return (usecs_to_jiffies(howmany(n, 1000ULL)));
+}
+
static inline u64
get_jiffies_64(void)
{
diff --git a/sys/compat/linuxkpi/common/include/linux/kdev_t.h b/sys/compat/linuxkpi/common/include/linux/kdev_t.h
index 8dea1ab..c0bb97e 100644
--- a/sys/compat/linuxkpi/common/include/linux/kdev_t.h
+++ b/sys/compat/linuxkpi/common/include/linux/kdev_t.h
@@ -35,4 +35,10 @@
#define MINOR(dev) minor((dev))
#define MKDEV(ma, mi) makedev((ma), (mi))
+static inline uint16_t
+old_encode_dev(dev_t dev)
+{
+ return ((MAJOR(dev) << 8) | MINOR(dev));
+}
+
#endif /* _LINUX_KDEV_T_H_ */
diff --git a/sys/compat/linuxkpi/common/include/linux/kernel.h b/sys/compat/linuxkpi/common/include/linux/kernel.h
index ef8d0ab..040c7de 100644
--- a/sys/compat/linuxkpi/common/include/linux/kernel.h
+++ b/sys/compat/linuxkpi/common/include/linux/kernel.h
@@ -62,11 +62,59 @@
#define KERN_INFO "<6>"
#define KERN_DEBUG "<7>"
+#define U8_MAX ((u8)~0U)
+#define S8_MAX ((s8)(U8_MAX >> 1))
+#define S8_MIN ((s8)(-S8_MAX - 1))
+#define U16_MAX ((u16)~0U)
+#define S16_MAX ((s16)(U16_MAX >> 1))
+#define S16_MIN ((s16)(-S16_MAX - 1))
+#define U32_MAX ((u32)~0U)
+#define S32_MAX ((s32)(U32_MAX >> 1))
+#define S32_MIN ((s32)(-S32_MAX - 1))
+#define U64_MAX ((u64)~0ULL)
+#define S64_MAX ((s64)(U64_MAX >> 1))
+#define S64_MIN ((s64)(-S64_MAX - 1))
+
+#define S8_C(x) x
+#define U8_C(x) x ## U
+#define S16_C(x) x
+#define U16_C(x) x ## U
+#define S32_C(x) x
+#define U32_C(x) x ## U
+#define S64_C(x) x ## LL
+#define U64_C(x) x ## ULL
+
#define BUILD_BUG_ON(x) CTASSERT(!(x))
-#define BUG() panic("BUG")
-#define BUG_ON(condition) do { if (condition) BUG(); } while(0)
-#define WARN_ON BUG_ON
+#define BUG() panic("BUG at %s:%d", __FILE__, __LINE__)
+#define BUG_ON(cond) do { \
+ if (cond) { \
+ panic("BUG ON %s failed at %s:%d", \
+ __stringify(cond), __FILE__, __LINE__); \
+ } \
+} while (0)
+
+#define WARN_ON(cond) ({ \
+ bool __ret = (cond); \
+ if (__ret) { \
+ printf("WARNING %s failed at %s:%d\n", \
+ __stringify(cond), __FILE__, __LINE__); \
+ } \
+ unlikely(__ret); \
+})
+
+#define WARN_ON_SMP(cond) WARN_ON(cond)
+
+#define WARN_ON_ONCE(cond) ({ \
+ static bool __warn_on_once; \
+ bool __ret = (cond); \
+ if (__ret && !__warn_on_once) { \
+ __warn_on_once = 1; \
+ printf("WARNING %s failed at %s:%d\n", \
+ __stringify(cond), __FILE__, __LINE__); \
+ } \
+ unlikely(__ret); \
+})
#undef ALIGN
#define ALIGN(x, y) roundup2((x), (y))
@@ -116,7 +164,7 @@
#define log_once(level,...) do { \
static bool __log_once; \
\
- if (!__log_once) { \
+ if (unlikely(!__log_once)) { \
__log_once = true; \
log(level, __VA_ARGS__); \
} \
@@ -132,7 +180,10 @@
log(LOG_ERR, pr_fmt(fmt), ##__VA_ARGS__)
#define pr_warning(fmt, ...) \
log(LOG_WARNING, pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warn pr_warning
+#define pr_warn(...) \
+ pr_warning(__VA_ARGS__)
+#define pr_warn_once(fmt, ...) \
+ log_once(LOG_WARNING, pr_fmt(fmt), ##__VA_ARGS__)
#define pr_notice(fmt, ...) \
log(LOG_NOTICE, pr_fmt(fmt), ##__VA_ARGS__)
#define pr_info(fmt, ...) \
@@ -143,11 +194,20 @@
printk(KERN_CONT fmt, ##__VA_ARGS__)
#ifndef WARN
-#define WARN(condition, format...) ({ \
- int __ret_warn_on = !!(condition); \
- if (unlikely(__ret_warn_on)) \
- pr_warning(format); \
- unlikely(__ret_warn_on); \
+#define WARN(condition, ...) ({ \
+ bool __ret_warn_on = (condition); \
+ if (unlikely(__ret_warn_on)) \
+ pr_warning(__VA_ARGS__); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
+#ifndef WARN_ONCE
+#define WARN_ONCE(condition, ...) ({ \
+ bool __ret_warn_on = (condition); \
+ if (unlikely(__ret_warn_on)) \
+ pr_warn_once(__VA_ARGS__); \
+ unlikely(__ret_warn_on); \
})
#endif
@@ -171,11 +231,19 @@
#define min3(a, b, c) min(a, min(b,c))
#define max3(a, b, c) max(a, max(b,c))
-#define min_t(type, _x, _y) ((type)(_x) < (type)(_y) ? (type)(_x) : (type)(_y))
-#define max_t(type, _x, _y) ((type)(_x) > (type)(_y) ? (type)(_x) : (type)(_y))
+#define min_t(type, x, y) ({ \
+ type __min1 = (x); \
+ type __min2 = (y); \
+ __min1 < __min2 ? __min1 : __min2; })
+
+#define max_t(type, x, y) ({ \
+ type __max1 = (x); \
+ type __max2 = (y); \
+ __max1 > __max2 ? __max1 : __max2; })
#define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max)
#define clamp(x, lo, hi) min( max(x,lo), hi)
+#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
/*
* This looks more complex than it should be. But we need to
@@ -190,6 +258,7 @@
#define smp_processor_id() PCPU_GET(cpuid)
#define num_possible_cpus() mp_ncpus
#define num_online_cpus() mp_ncpus
+#define cpu_has_clflush (1)
typedef struct pm_message {
int event;
@@ -204,6 +273,13 @@ typedef struct pm_message {
#define DIV_ROUND_CLOSEST(x, divisor) (((x) + ((divisor) / 2)) / (divisor))
+#define DIV_ROUND_CLOSEST_ULL(x, divisor) ({ \
+ __typeof(divisor) __d = (divisor); \
+ unsigned long long __ret = (x) + (__d) / 2; \
+ __ret /= __d; \
+ __ret; \
+})
+
static inline uintmax_t
mult_frac(uintmax_t x, uintmax_t multiplier, uintmax_t divisor)
{
diff --git a/sys/compat/linuxkpi/common/include/linux/page.h b/sys/compat/linuxkpi/common/include/linux/page.h
index acc9f03..2ad9eb6 100644
--- a/sys/compat/linuxkpi/common/include/linux/page.h
+++ b/sys/compat/linuxkpi/common/include/linux/page.h
@@ -2,7 +2,7 @@
* Copyright (c) 2010 Isilon Systems, Inc.
* Copyright (c) 2010 iX Systems, Inc.
* Copyright (c) 2010 Panasas, Inc.
- * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
+ * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -38,16 +38,33 @@
#include <machine/atomic.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
+#include <vm/pmap.h>
+
+typedef unsigned long pgprot_t;
#define page vm_page
-#define virt_to_page(x) PHYS_TO_VM_PAGE(vtophys((x)))
+#define virt_to_page(x) PHYS_TO_VM_PAGE(vtophys((x)))
+#define page_to_pfn(pp) (VM_PAGE_TO_PHYS((pp)) >> PAGE_SHIFT)
+#define pfn_to_page(pfn) (PHYS_TO_VM_PAGE((pfn) << PAGE_SHIFT))
+#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
#define clear_page(page) memset((page), 0, PAGE_SIZE)
-#define pgprot_noncached(prot) VM_MEMATTR_UNCACHEABLE
-#define pgprot_writecombine(prot) VM_MEMATTR_WRITE_COMBINING
+#define pgprot_noncached(prot) ((pgprot_t)VM_MEMATTR_UNCACHEABLE)
+#define pgprot_writecombine(prot) ((pgprot_t)VM_MEMATTR_WRITE_COMBINING)
#undef PAGE_MASK
#define PAGE_MASK (~(PAGE_SIZE-1))
+/*
+ * Modifying PAGE_MASK in the above way breaks trunc_page, round_page,
+ * and btoc macros. Therefore, redefine them in a way that makes sense
+ * so the LinuxKPI consumers don't get totally broken behavior.
+ */
+#undef btoc
+#define btoc(x) (((vm_offset_t)(x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
+#undef round_page
+#define round_page(x) ((((uintptr_t)(x)) + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))
+#undef trunc_page
+#define trunc_page(x) ((uintptr_t)(x) & ~(PAGE_SIZE - 1))
#endif /* _LINUX_PAGE_H_ */
diff --git a/sys/conf/files b/sys/conf/files
index b58709a..7569044 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -10,13 +10,13 @@ acpi_quirks.h optional acpi \
no-obj no-implicit-rule before-depend \
clean "acpi_quirks.h"
bhnd_nvram_map.h optional bhndbus | bhnd \
- dependency "$S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/tools/nvram_map_gen.awk $S/dev/bhnd/nvram/nvram_map" \
- compile-with "$S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/nvram/nvram_map -h" \
+ dependency "sh $S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/tools/nvram_map_gen.awk $S/dev/bhnd/nvram/nvram_map" \
+ compile-with "sh $S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/nvram/nvram_map -h" \
no-obj no-implicit-rule before-depend \
clean "bhnd_nvram_map.h"
bhnd_nvram_map_data.h optional bhndbus | bhnd \
- dependency "$S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/tools/nvram_map_gen.awk $S/dev/bhnd/nvram/nvram_map" \
- compile-with "$S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/nvram/nvram_map -d" \
+ dependency "sh $S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/tools/nvram_map_gen.awk $S/dev/bhnd/nvram/nvram_map" \
+ compile-with "sh $S/dev/bhnd/tools/nvram_map_gen.sh $S/dev/bhnd/nvram/nvram_map -d" \
no-obj no-implicit-rule before-depend \
clean "bhnd_nvram_map_data.h"
#
diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
index ca31558..41912e3 100644
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -332,7 +332,6 @@ dev/sfxge/common/efx_sram.c optional sfxge pci
dev/sfxge/common/efx_tx.c optional sfxge pci
dev/sfxge/common/efx_vpd.c optional sfxge pci
dev/sfxge/common/efx_wol.c optional sfxge pci
-dev/sfxge/common/hunt_ev.c optional sfxge pci
dev/sfxge/common/hunt_filter.c optional sfxge pci
dev/sfxge/common/hunt_intr.c optional sfxge pci
dev/sfxge/common/hunt_mac.c optional sfxge pci
diff --git a/sys/conf/kmod.mk b/sys/conf/kmod.mk
index 67f1646..3c1cd2f 100644
--- a/sys/conf/kmod.mk
+++ b/sys/conf/kmod.mk
@@ -417,7 +417,7 @@ bhnd_nvram_map.h: ${SYSDIR}/dev/bhnd/tools/nvram_map_gen.awk \
${SYSDIR}/dev/bhnd/tools/nvram_map_gen.sh \
${SYSDIR}/dev/bhnd/nvram/nvram_map
bhnd_nvram_map.h:
- ${SYSDIR}/dev/bhnd/tools/nvram_map_gen.sh \
+ sh ${SYSDIR}/dev/bhnd/tools/nvram_map_gen.sh \
${SYSDIR}/dev/bhnd/nvram/nvram_map -h
.endif
@@ -427,7 +427,7 @@ bhnd_nvram_map_data.h: ${SYSDIR}/dev/bhnd/tools/nvram_map_gen.awk \
${SYSDIR}/dev/bhnd/tools/nvram_map_gen.sh \
${SYSDIR}/dev/bhnd/nvram/nvram_map
bhnd_nvram_map_data.h:
- ${SYSDIR}/dev/bhnd/tools/nvram_map_gen.sh \
+ sh ${SYSDIR}/dev/bhnd/tools/nvram_map_gen.sh \
${SYSDIR}/dev/bhnd/nvram/nvram_map -d
.endif
diff --git a/sys/dev/bxe/bxe.c b/sys/dev/bxe/bxe.c
index 3b5ab17..e832389 100644
--- a/sys/dev/bxe/bxe.c
+++ b/sys/dev/bxe/bxe.c
@@ -2454,14 +2454,11 @@ bxe_sp_post(struct bxe_softc *sc,
static int
bxe_probe(device_t dev)
{
- struct bxe_softc *sc;
struct bxe_device_type *t;
char *descbuf;
uint16_t did, sdid, svid, vid;
/* Find our device structure */
- sc = device_get_softc(dev);
- sc->dev = dev;
t = bxe_devs;
/* Get the data for the device to be probed. */
@@ -2470,10 +2467,6 @@ bxe_probe(device_t dev)
svid = pci_get_subvendor(dev);
sdid = pci_get_subdevice(dev);
- BLOGD(sc, DBG_LOAD,
- "%s(); VID = 0x%04X, DID = 0x%04X, SVID = 0x%04X, "
- "SDID = 0x%04X\n", __FUNCTION__, vid, did, svid, sdid);
-
/* Look through the list of known devices for a match. */
while (t->bxe_name != NULL) {
if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
diff --git a/sys/dev/gpio/gpiobus.c b/sys/dev/gpio/gpiobus.c
index b26a741..21e33b8 100644
--- a/sys/dev/gpio/gpiobus.c
+++ b/sys/dev/gpio/gpiobus.c
@@ -281,6 +281,30 @@ gpiobus_map_pin(device_t bus, uint32_t pin)
return (0);
}
+/* Release mapped pin */
+int
+gpiobus_release_pin(device_t bus, uint32_t pin)
+{
+ struct gpiobus_softc *sc;
+
+ sc = device_get_softc(bus);
+ /* Consistency check. */
+ if (pin >= sc->sc_npins) {
+ device_printf(bus,
+ "gpiobus_map_pin: invalid pin %d, max=%d\n",
+ pin, sc->sc_npins - 1);
+ return (-1);
+ }
+
+ if (!sc->sc_pins[pin].mapped) {
+ device_printf(bus, "gpiobus_map_pin: pin %d is not mapped\n", pin);
+ return (-1);
+ }
+ sc->sc_pins[pin].mapped = 0;
+
+ return (0);
+}
+
static int
gpiobus_parse_pins(struct gpiobus_softc *sc, device_t child, int mask)
{
diff --git a/sys/dev/gpio/gpiobusvar.h b/sys/dev/gpio/gpiobusvar.h
index 61901a3..bc4e633 100644
--- a/sys/dev/gpio/gpiobusvar.h
+++ b/sys/dev/gpio/gpiobusvar.h
@@ -138,6 +138,7 @@ int gpiobus_init_softc(device_t);
int gpiobus_alloc_ivars(struct gpiobus_ivar *);
void gpiobus_free_ivars(struct gpiobus_ivar *);
int gpiobus_map_pin(device_t, uint32_t);
+int gpiobus_release_pin(device_t, uint32_t);
extern driver_t gpiobus_driver;
diff --git a/sys/dev/gpio/gpiokeys.c b/sys/dev/gpio/gpiokeys.c
index a1b551c..d2662e1 100644
--- a/sys/dev/gpio/gpiokeys.c
+++ b/sys/dev/gpio/gpiokeys.c
@@ -57,6 +57,8 @@ __FBSDID("$FreeBSD$");
#include <dev/gpio/gpiobusvar.h>
#include <dev/gpio/gpiokeys.h>
+#define KBD_DRIVER_NAME "gpiokeys"
+
#define GPIOKEYS_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
#define GPIOKEYS_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
#define GPIOKEYS_LOCK_INIT(_sc) \
@@ -357,12 +359,12 @@ gpiokeys_detach_key(struct gpiokeys_softc *sc, struct gpiokey *key)
if (key->irq_res)
bus_release_resource(sc->sc_dev, SYS_RES_IRQ,
key->irq_rid, key->irq_res);
- if (key->pin)
- gpio_pin_release(key->pin);
if (callout_pending(&key->repeat_callout))
callout_drain(&key->repeat_callout);
if (callout_pending(&key->debounce_callout))
callout_drain(&key->debounce_callout);
+ if (key->pin)
+ gpio_pin_release(key->pin);
GPIOKEY_UNLOCK(key);
GPIOKEY_LOCK_DESTROY(key);
@@ -397,7 +399,7 @@ gpiokeys_attach(device_t dev)
GPIOKEYS_LOCK_INIT(sc);
unit = device_get_unit(dev);
- kbd_init_struct(kbd, "gpiokeys", KB_OTHER, unit, 0, 0, 0);
+ kbd_init_struct(kbd, KBD_DRIVER_NAME, KB_OTHER, unit, 0, 0, 0);
kbd->kb_data = (void *)sc;
sc->sc_mode = K_XLATE;
@@ -468,6 +470,7 @@ static int
gpiokeys_detach(device_t dev)
{
struct gpiokeys_softc *sc;
+ keyboard_t *kbd;
int i;
sc = device_get_softc(dev);
@@ -475,6 +478,14 @@ gpiokeys_detach(device_t dev)
for (i = 0; i < sc->sc_total_keys; i++)
gpiokeys_detach_key(sc, &sc->sc_keys[i]);
+ kbd = kbd_get_keyboard(kbd_find_keyboard(KBD_DRIVER_NAME,
+ device_get_unit(dev)));
+
+#ifdef KBD_INSTALL_CDEV
+ kbd_detach(kbd);
+#endif
+ kbd_unregister(kbd);
+
GPIOKEYS_LOCK_DESTROY(sc);
if (sc->sc_keys)
free(sc->sc_keys, M_DEVBUF);
diff --git a/sys/dev/gpio/ofw_gpiobus.c b/sys/dev/gpio/ofw_gpiobus.c
index 4dfde93..6a38cc1 100644
--- a/sys/dev/gpio/ofw_gpiobus.c
+++ b/sys/dev/gpio/ofw_gpiobus.c
@@ -139,10 +139,17 @@ gpio_pin_get_by_ofw_name(device_t consumer, phandle_t node,
void
gpio_pin_release(gpio_pin_t gpio)
{
+ device_t busdev;
if (gpio == NULL)
return;
+ KASSERT(gpio->dev != NULL, ("invalid pin state"));
+
+ busdev = GPIO_GET_BUS(gpio->dev);
+ if (busdev != NULL)
+ gpiobus_release_pin(busdev, gpio->pin);
+
/* XXXX Unreserve pin. */
free(gpio, M_DEVBUF);
}
diff --git a/sys/dev/ixl/i40e_adminq.c b/sys/dev/ixl/i40e_adminq.c
index cc1be99..792f832 100644
--- a/sys/dev/ixl/i40e_adminq.c
+++ b/sys/dev/ixl/i40e_adminq.c
@@ -44,8 +44,8 @@
**/
static INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
{
- return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) ||
- desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
+ return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase)) ||
+ (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
}
/**
@@ -555,6 +555,24 @@ shutdown_arq_out:
}
/**
+ * i40e_resume_aq - resume AQ processing from 0
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_resume_aq(struct i40e_hw *hw)
+{
+ /* Registers are reset after PF reset */
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+ i40e_config_asq_regs(hw);
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ i40e_config_arq_regs(hw);
+}
+
+/**
* i40e_init_adminq - main initialization routine for Admin Queue
* @hw: pointer to the hardware structure
*
@@ -567,10 +585,11 @@ shutdown_arq_out:
**/
enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
{
- enum i40e_status_code ret_code;
- u16 eetrack_lo, eetrack_hi;
u16 cfg_ptr, oem_hi, oem_lo;
+ u16 eetrack_lo, eetrack_hi;
+ enum i40e_status_code ret_code;
int retry = 0;
+
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
(hw->aq.num_asq_entries == 0) ||
@@ -579,8 +598,6 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
ret_code = I40E_ERR_CONFIG;
goto init_adminq_exit;
}
-
- /* initialize spin locks */
i40e_init_spinlock(&hw->aq.asq_spinlock);
i40e_init_spinlock(&hw->aq.arq_spinlock);
@@ -681,8 +698,6 @@ enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
i40e_shutdown_asq(hw);
i40e_shutdown_arq(hw);
-
- /* destroy the spinlocks */
i40e_destroy_spinlock(&hw->aq.asq_spinlock);
i40e_destroy_spinlock(&hw->aq.arq_spinlock);
@@ -708,7 +723,6 @@ u16 i40e_clean_asq(struct i40e_hw *hw)
desc = I40E_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
-
while (rd32(hw, hw->aq.asq.head) != ntc) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
@@ -899,7 +913,6 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
*/
if (i40e_asq_done(hw))
break;
- /* ugh! delay while spin_lock */
i40e_msec_delay(1);
total_delay++;
} while (total_delay < hw->aq.asq_cmd_timeout);
@@ -993,6 +1006,9 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
u16 flags;
u16 ntu;
+ /* pre-clean the event info */
+ i40e_memset(&e->desc, 0, sizeof(e->desc), I40E_NONDMA_MEM);
+
/* take the lock before we start messing with the ring */
i40e_acquire_spinlock(&hw->aq.arq_spinlock);
@@ -1065,13 +1081,6 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
hw->aq.arq.next_to_clean = ntc;
hw->aq.arq.next_to_use = ntu;
-clean_arq_element_out:
- /* Set pending if needed, unlock and return */
- if (pending != NULL)
- *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
-clean_arq_element_err:
- i40e_release_spinlock(&hw->aq.arq_spinlock);
-
if (i40e_is_nvm_update_op(&e->desc)) {
if (hw->aq.nvm_release_on_done) {
i40e_release_nvm(hw);
@@ -1092,19 +1101,13 @@ clean_arq_element_err:
}
}
+clean_arq_element_out:
+ /* Set pending if needed, unlock and return */
+ if (pending != NULL)
+ *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+clean_arq_element_err:
+ i40e_release_spinlock(&hw->aq.arq_spinlock);
+
return ret_code;
}
-void i40e_resume_aq(struct i40e_hw *hw)
-{
- /* Registers are reset after PF reset */
- hw->aq.asq.next_to_use = 0;
- hw->aq.asq.next_to_clean = 0;
-
- i40e_config_asq_regs(hw);
-
- hw->aq.arq.next_to_use = 0;
- hw->aq.arq.next_to_clean = 0;
-
- i40e_config_arq_regs(hw);
-}
diff --git a/sys/dev/ixl/i40e_adminq.h b/sys/dev/ixl/i40e_adminq.h
index ca0e1e9..6448b8b 100644
--- a/sys/dev/ixl/i40e_adminq.h
+++ b/sys/dev/ixl/i40e_adminq.h
@@ -159,8 +159,8 @@ static INLINE int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
}
/* general information */
-#define I40E_AQ_LARGE_BUF 512
-#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
+#define I40E_AQ_LARGE_BUF 512
+#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
u16 opcode);
diff --git a/sys/dev/ixl/i40e_adminq_cmd.h b/sys/dev/ixl/i40e_adminq_cmd.h
index 8402c93..7c0ace2 100644
--- a/sys/dev/ixl/i40e_adminq_cmd.h
+++ b/sys/dev/ixl/i40e_adminq_cmd.h
@@ -154,6 +154,8 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_set_port_parameters = 0x0203,
i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
i40e_aqc_opc_set_switch_config = 0x0205,
+ i40e_aqc_opc_rx_ctl_reg_read = 0x0206,
+ i40e_aqc_opc_rx_ctl_reg_write = 0x0207,
i40e_aqc_opc_add_vsi = 0x0210,
i40e_aqc_opc_update_vsi_parameters = 0x0211,
@@ -700,6 +702,20 @@ struct i40e_aqc_set_switch_config {
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config);
+/* Read Receive control registers (direct 0x0206)
+ * Write Receive control registers (direct 0x0207)
+ * used for accessing Rx control registers that can be
+ * slow and need special handling when under high Rx load
+ */
+struct i40e_aqc_rx_ctl_reg_read_write {
+ __le32 reserved1;
+ __le32 address;
+ __le32 reserved2;
+ __le32 value;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write);
+
/* Add VSI (indirect 0x0210)
* this indirect command uses struct i40e_aqc_vsi_properties_data
* as the indirect buffer (128 bytes)
@@ -853,7 +869,7 @@ struct i40e_aqc_vsi_properties_data {
u8 up_enable_bits;
u8 sched_reserved;
/* outer up section */
- __le32 outer_up_table; /* same structure and defines as ingress table */
+ __le32 outer_up_table; /* same structure and defines as ingress tbl */
u8 cmd_reserved[8];
/* last 32 bytes are written by FW */
__le16 qs_handle[8];
@@ -1087,6 +1103,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
#define I40E_AQC_SET_VSI_DEFAULT 0x08
#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
+#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000
__le16 seid;
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
__le16 vlan_tag;
@@ -1492,7 +1509,8 @@ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data {
u8 reserved1[28];
};
-I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_switching_comp_ets_bw_limit_data);
+I40E_CHECK_STRUCT_LEN(0x40,
+ i40e_aqc_configure_switching_comp_ets_bw_limit_data);
/* Configure Switching Component Bandwidth Allocation per Tc
* (indirect 0x0417)
@@ -1887,7 +1905,7 @@ struct i40e_aqc_nvm_config_read {
#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0
#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1
__le16 element_count;
- __le16 element_id; /* Feature/field ID */
+ __le16 element_id; /* Feature/field ID */
__le16 element_id_msw; /* MSWord of field ID */
__le32 address_high;
__le32 address_low;
@@ -1908,9 +1926,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
/* Used for 0x0704 as well as for 0x0705 commands */
#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
-#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
-#define I40E_AQ_ANVM_FEATURE 0
-#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
+ (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_FEATURE 0
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
struct i40e_aqc_nvm_config_data_feature {
__le16 feature_id;
#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
@@ -1934,7 +1953,7 @@ I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
/* OEM Post Update (indirect 0x0720)
* no command data struct used
*/
- struct i40e_aqc_nvm_oem_post_update {
+struct i40e_aqc_nvm_oem_post_update {
#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01
u8 sel_data;
u8 reserved[7];
@@ -2224,7 +2243,8 @@ I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_lldp_set_local_mib_resp);
*/
struct i40e_aqc_lldp_stop_start_specific_agent {
#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0
-#define I40E_AQC_START_SPECIFIC_AGENT_MASK (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
+#define I40E_AQC_START_SPECIFIC_AGENT_MASK \
+ (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
u8 command;
u8 reserved[15];
};
@@ -2246,7 +2266,7 @@ struct i40e_aqc_add_udp_tunnel {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
struct i40e_aqc_add_udp_tunnel_completion {
- __le16 udp_port;
+ __le16 udp_port;
u8 filter_entry_index;
u8 multiple_pfs;
#define I40E_AQC_SINGLE_PF 0x0
diff --git a/sys/dev/ixl/i40e_common.c b/sys/dev/ixl/i40e_common.c
index 4f6fbab..5a8a8ae 100644
--- a/sys/dev/ixl/i40e_common.c
+++ b/sys/dev/ixl/i40e_common.c
@@ -1103,7 +1103,11 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
- for (cnt = 0; cnt < grst_del + 10; cnt++) {
+
+ /* It can take upto 15 secs for GRST steady state */
+ grst_del = grst_del * 20; /* bump it to 16 secs max to be safe */
+
+ for (cnt = 0; cnt < grst_del; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
break;
@@ -2012,12 +2016,19 @@ enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
- if (set)
+ if (set) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+ if (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
+ (hw->aq.api_maj_ver > 1))
+ flags |= I40E_AQC_SET_VSI_PROMISC_TX;
+ }
cmd->promiscuous_flags = CPU_TO_LE16(flags);
cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+ if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
+ (hw->aq.api_maj_ver > 1))
+ cmd->valid_flags |= CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_TX);
cmd->seid = CPU_TO_LE16(seid);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
@@ -2246,6 +2257,9 @@ enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_aq_desc desc;
struct i40e_aqc_add_get_update_vsi *cmd =
(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
enum i40e_status_code status;
i40e_fill_default_direct_cmd_desc(&desc,
@@ -2257,6 +2271,9 @@ enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw,
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), cmd_details);
+ vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
+
return status;
}
@@ -3367,41 +3384,73 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
switch (id) {
case I40E_AQ_CAP_ID_SWITCH_MODE:
p->switch_mode = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Switch mode = %d\n",
+ p->switch_mode);
break;
case I40E_AQ_CAP_ID_MNG_MODE:
p->management_mode = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Management Mode = %d\n",
+ p->management_mode);
break;
case I40E_AQ_CAP_ID_NPAR_ACTIVE:
p->npar_enable = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: NPAR enable = %d\n",
+ p->npar_enable);
break;
case I40E_AQ_CAP_ID_OS2BMC_CAP:
p->os2bmc = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: OS2BMC = %d\n", p->os2bmc);
break;
case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
p->valid_functions = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Valid Functions = %d\n",
+ p->valid_functions);
break;
case I40E_AQ_CAP_ID_SRIOV:
if (number == 1)
p->sr_iov_1_1 = TRUE;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: SR-IOV = %d\n",
+ p->sr_iov_1_1);
break;
case I40E_AQ_CAP_ID_VF:
p->num_vfs = number;
p->vf_base_id = logical_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: VF count = %d\n",
+ p->num_vfs);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: VF base_id = %d\n",
+ p->vf_base_id);
break;
case I40E_AQ_CAP_ID_VMDQ:
if (number == 1)
p->vmdq = TRUE;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: VMDQ = %d\n", p->vmdq);
break;
case I40E_AQ_CAP_ID_8021QBG:
if (number == 1)
p->evb_802_1_qbg = TRUE;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: 802.1Qbg = %d\n", number);
break;
case I40E_AQ_CAP_ID_8021QBR:
if (number == 1)
p->evb_802_1_qbh = TRUE;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: 802.1Qbh = %d\n", number);
break;
case I40E_AQ_CAP_ID_VSI:
p->num_vsis = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: VSI count = %d\n",
+ p->num_vsis);
break;
case I40E_AQ_CAP_ID_DCB:
if (number == 1) {
@@ -3409,33 +3458,68 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
p->enabled_tcmap = logical_id;
p->maxtc = phys_id;
}
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: DCB = %d\n", p->dcb);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: TC Mapping = %d\n",
+ logical_id);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: TC Max = %d\n", p->maxtc);
break;
case I40E_AQ_CAP_ID_FCOE:
if (number == 1)
p->fcoe = TRUE;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: FCOE = %d\n", p->fcoe);
break;
case I40E_AQ_CAP_ID_ISCSI:
if (number == 1)
p->iscsi = TRUE;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: iSCSI = %d\n", p->iscsi);
break;
case I40E_AQ_CAP_ID_RSS:
p->rss = TRUE;
p->rss_table_size = number;
p->rss_table_entry_width = logical_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: RSS = %d\n", p->rss);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: RSS table size = %d\n",
+ p->rss_table_size);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: RSS table width = %d\n",
+ p->rss_table_entry_width);
break;
case I40E_AQ_CAP_ID_RXQ:
p->num_rx_qp = number;
p->base_queue = phys_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Rx QP = %d\n", number);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: base_queue = %d\n",
+ p->base_queue);
break;
case I40E_AQ_CAP_ID_TXQ:
p->num_tx_qp = number;
p->base_queue = phys_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Tx QP = %d\n", number);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: base_queue = %d\n",
+ p->base_queue);
break;
case I40E_AQ_CAP_ID_MSIX:
p->num_msix_vectors = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: MSIX vector count = %d\n",
+ p->num_msix_vectors);
break;
case I40E_AQ_CAP_ID_VF_MSIX:
p->num_msix_vectors_vf = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: MSIX VF vector count = %d\n",
+ p->num_msix_vectors_vf);
break;
case I40E_AQ_CAP_ID_FLEX10:
if (major_rev == 1) {
@@ -3452,41 +3536,72 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
}
p->flex10_mode = logical_id;
p->flex10_status = phys_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Flex10 mode = %d\n",
+ p->flex10_mode);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Flex10 status = %d\n",
+ p->flex10_status);
break;
case I40E_AQ_CAP_ID_CEM:
if (number == 1)
p->mgmt_cem = TRUE;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: CEM = %d\n", p->mgmt_cem);
break;
case I40E_AQ_CAP_ID_IWARP:
if (number == 1)
p->iwarp = TRUE;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: iWARP = %d\n", p->iwarp);
break;
case I40E_AQ_CAP_ID_LED:
if (phys_id < I40E_HW_CAP_MAX_GPIO)
p->led[phys_id] = TRUE;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: LED - PIN %d\n", phys_id);
break;
case I40E_AQ_CAP_ID_SDP:
if (phys_id < I40E_HW_CAP_MAX_GPIO)
p->sdp[phys_id] = TRUE;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: SDP - PIN %d\n", phys_id);
break;
case I40E_AQ_CAP_ID_MDIO:
if (number == 1) {
p->mdio_port_num = phys_id;
p->mdio_port_mode = logical_id;
}
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: MDIO port number = %d\n",
+ p->mdio_port_num);
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: MDIO port mode = %d\n",
+ p->mdio_port_mode);
break;
case I40E_AQ_CAP_ID_1588:
if (number == 1)
p->ieee_1588 = TRUE;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: IEEE 1588 = %d\n",
+ p->ieee_1588);
break;
case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
p->fd = TRUE;
p->fd_filters_guaranteed = number;
p->fd_filters_best_effort = logical_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Flow Director = 1\n");
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Guaranteed FD filters = %d\n",
+ p->fd_filters_guaranteed);
break;
case I40E_AQ_CAP_ID_WSR_PROT:
p->wr_csr_prot = (u64)number;
p->wr_csr_prot |= (u64)logical_id << 32;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: wr_csr_prot = 0x%llX\n\n",
+ (p->wr_csr_prot & 0xffff));
break;
default:
break;
@@ -4132,7 +4247,7 @@ enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
}
/**
- * i40_aq_add_pvirt - Instantiate a Port Virtualizer on a port
+ * i40e_aq_add_pvirt - Instantiate a Port Virtualizer on a port
* @hw: pointer to the hw struct
* @flags: component flags
* @mac_seid: uplink seid (MAC SEID)
@@ -4968,7 +5083,7 @@ enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw,
return ret;
/* Read the PF Queue Filter control register */
- val = rd32(hw, I40E_PFQF_CTL_0);
+ val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
/* Program required PE hash buckets for the PF */
val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
@@ -5005,7 +5120,7 @@ enum i40e_status_code i40e_set_filter_control(struct i40e_hw *hw,
if (settings->enable_macvlan)
val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
- wr32(hw, I40E_PFQF_CTL_0, val);
+ i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
return I40E_SUCCESS;
}
@@ -5601,6 +5716,457 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
}
/**
+ * i40e_read_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr,
+ u16 *value)
+{
+ enum i40e_status_code status = I40E_ERR_TIMEOUT;
+ u32 command = 0;
+ u16 retry = 1000;
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+
+ command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
+ (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_OPCODE_ADDRESS) |
+ (I40E_MDIO_STCODE) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK) |
+ (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(10);
+ retry--;
+ } while (retry);
+
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "PHY: Can't write command to external PHY.\n");
+ goto phy_read_end;
+ }
+
+ command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_OPCODE_READ) |
+ (I40E_MDIO_STCODE) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK) |
+ (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
+ status = I40E_ERR_TIMEOUT;
+ retry = 1000;
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(10);
+ retry--;
+ } while (retry);
+
+ if (!status) {
+ command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
+ *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
+ I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
+ } else {
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "PHY: Can't read register value from external PHY.\n");
+ }
+
+phy_read_end:
+ return status;
+}
+
+/**
+ * i40e_write_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes value to specified PHY register
+ **/
+enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr,
+ u16 value)
+{
+ enum i40e_status_code status = I40E_ERR_TIMEOUT;
+ u32 command = 0;
+ u16 retry = 1000;
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+
+ command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
+ (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_OPCODE_ADDRESS) |
+ (I40E_MDIO_STCODE) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK) |
+ (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(10);
+ retry--;
+ } while (retry);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "PHY: Can't write command to external PHY.\n");
+ goto phy_write_end;
+ }
+
+ command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
+ wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
+
+ command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_OPCODE_WRITE) |
+ (I40E_MDIO_STCODE) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK) |
+ (I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
+ status = I40E_ERR_TIMEOUT;
+ retry = 1000;
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(10);
+ retry--;
+ } while (retry);
+
+phy_write_end:
+ return status;
+}
+
+/**
+ * i40e_get_phy_address
+ * @hw: pointer to the HW structure
+ * @dev_num: PHY port num that address we want
+ * @phy_addr: Returned PHY address
+ *
+ * Gets PHY address for current port
+ **/
+u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
+{
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num));
+
+ return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
+}
+
+/**
+ * i40e_blink_phy_led
+ * @hw: pointer to the HW structure
+ * @time: time how long led will blinks in secs
+ * @interval: gap between LED on and off in msecs
+ *
+ * Blinks PHY link LED
+ **/
+enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ u32 i;
+ u16 led_ctl = 0;
+ u16 gpio_led_port;
+ u16 led_reg;
+ u16 led_addr = I40E_PHY_LED_PROV_REG_1;
+ u8 phy_addr = 0;
+ u8 port_num;
+
+ i = rd32(hw, I40E_PFGEN_PORTNUM);
+ port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+ phy_addr = i40e_get_phy_address(hw, port_num);
+
+ for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
+ led_addr++) {
+ status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
+ if (status)
+ goto phy_blinking_end;
+ led_ctl = led_reg;
+ if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
+ led_reg = 0;
+ status = i40e_write_phy_register(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ led_reg);
+ if (status)
+ goto phy_blinking_end;
+ break;
+ }
+ }
+
+ if (time > 0 && interval > 0) {
+ for (i = 0; i < time * 1000; i += interval) {
+ status = i40e_read_phy_register(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ &led_reg);
+ if (status)
+ goto restore_config;
+ if (led_reg & I40E_PHY_LED_MANUAL_ON)
+ led_reg = 0;
+ else
+ led_reg = I40E_PHY_LED_MANUAL_ON;
+ status = i40e_write_phy_register(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ led_reg);
+ if (status)
+ goto restore_config;
+ i40e_msec_delay(interval);
+ }
+ }
+
+restore_config:
+ status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
+ phy_addr, led_ctl);
+
+phy_blinking_end:
+ return status;
+}
+
+/**
+ * i40e_led_get_phy - return current on/off mode
+ * @hw: pointer to the hw struct
+ * @led_addr: address of led register to use
+ * @val: original value of register to use
+ *
+ **/
+enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+ u16 *val)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ u16 gpio_led_port;
+ u8 phy_addr = 0;
+ u16 reg_val;
+ u16 temp_addr;
+ u8 port_num;
+ u32 i;
+
+ temp_addr = I40E_PHY_LED_PROV_REG_1;
+ i = rd32(hw, I40E_PFGEN_PORTNUM);
+ port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+ phy_addr = i40e_get_phy_address(hw, port_num);
+
+ for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
+ temp_addr++) {
+ status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
+ temp_addr, phy_addr, &reg_val);
+ if (status)
+ return status;
+ *val = reg_val;
+ if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
+ *led_addr = temp_addr;
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * i40e_led_set_phy
+ * @hw: pointer to the HW structure
+ * @on: TRUE or FALSE
+ * @mode: original val plus bit for set or ignore
+ * Set led's on or off when controlled by the PHY
+ *
+ **/
+enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on,
+ u16 led_addr, u32 mode)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ u16 led_ctl = 0;
+ u16 led_reg = 0;
+ u8 phy_addr = 0;
+ u8 port_num;
+ u32 i;
+
+ i = rd32(hw, I40E_PFGEN_PORTNUM);
+ port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
+ phy_addr = i40e_get_phy_address(hw, port_num);
+
+ status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
+ phy_addr, &led_reg);
+ if (status)
+ return status;
+ led_ctl = led_reg;
+ if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
+ led_reg = 0;
+ status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_reg);
+ if (status)
+ return status;
+ }
+ status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
+ if (status)
+ goto restore_config;
+ if (on)
+ led_reg = I40E_PHY_LED_MANUAL_ON;
+ else
+ led_reg = 0;
+ status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_reg);
+ if (status)
+ goto restore_config;
+ if (mode & I40E_PHY_LED_MODE_ORIG) {
+ led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
+ status = i40e_write_phy_register(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_ctl);
+ }
+ return status;
+restore_config:
+ status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
+ phy_addr, led_ctl);
+ return status;
+}
+
+/**
+ * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: ptr to register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Use the firmware to read the Rx control register,
+ * especially useful if the Rx unit is under heavy pressure
+ **/
+enum i40e_status_code i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
+ (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ if (reg_val == NULL)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
+
+ cmd_resp->address = CPU_TO_LE32(reg_addr);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status == I40E_SUCCESS)
+ *reg_val = LE32_TO_CPU(cmd_resp->value);
+
+ return status;
+}
+
+/**
+ * i40e_read_rx_ctl - read from an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ **/
+u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ bool use_register;
+ int retry = 5;
+ u32 val = 0;
+
+ use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ if (!use_register) {
+do_retry:
+ status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
+ if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
+ i40e_msec_delay(1);
+ retry--;
+ goto do_retry;
+ }
+ }
+
+ /* if the AQ access failed, try the old-fashioned way */
+ if (status || use_register)
+ val = rd32(hw, reg_addr);
+
+ return val;
+}
+
+/**
+ * i40e_aq_rx_ctl_write_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Use the firmware to write to an Rx control register,
+ * especially useful if the Rx unit is under heavy pressure
+ **/
+enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_rx_ctl_reg_read_write *cmd =
+ (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
+
+ cmd->address = CPU_TO_LE32(reg_addr);
+ cmd->value = CPU_TO_LE32(reg_val);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_write_rx_ctl - write to an Rx control register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ **/
+void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ bool use_register;
+ int retry = 5;
+
+ use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ if (!use_register) {
+do_retry:
+ status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
+ reg_val, NULL);
+ if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
+ i40e_msec_delay(1);
+ retry--;
+ goto do_retry;
+ }
+ }
+
+ /* if the AQ access failed, try the old-fashioned way */
+ if (status || use_register)
+ wr32(hw, reg_addr, reg_val);
+}
+
+/**
* i40e_aq_send_msg_to_pf
* @hw: pointer to the hardware structure
* @v_opcode: opcodes for VF-PF communication
diff --git a/sys/dev/ixl/i40e_osdep.c b/sys/dev/ixl/i40e_osdep.c
index 133f690..9b77627 100644
--- a/sys/dev/ixl/i40e_osdep.c
+++ b/sys/dev/ixl/i40e_osdep.c
@@ -155,26 +155,24 @@ i40e_release_spinlock(struct i40e_spinlock *lock)
void
i40e_destroy_spinlock(struct i40e_spinlock *lock)
{
- mtx_destroy(&lock->mutex);
+ if (mtx_initialized(&lock->mutex))
+ mtx_destroy(&lock->mutex);
}
/*
-** i40e_debug_d - OS dependent version of shared code debug printing
-*/
-void i40e_debug_d(void *hw, u32 mask, char *fmt, ...)
+ * Helper function for debug statement printing
+ */
+void
+i40e_debug_d(struct i40e_hw *hw, enum i40e_debug_mask mask, char *fmt, ...)
{
- char buf[512];
- va_list args;
+ va_list args;
- if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
- return;
+ if (!(mask & ((struct i40e_hw *)hw)->debug_mask))
+ return;
va_start(args, fmt);
- vsnprintf(buf, sizeof(buf), fmt, args);
+ device_printf(((struct i40e_osdep *)hw->back)->dev, fmt, args);
va_end(args);
-
- /* the debug string is already formatted with a newline */
- printf("%s", buf);
}
u16
diff --git a/sys/dev/ixl/i40e_osdep.h b/sys/dev/ixl/i40e_osdep.h
index 67a4b18..f5ca7f4 100644
--- a/sys/dev/ixl/i40e_osdep.h
+++ b/sys/dev/ixl/i40e_osdep.h
@@ -54,8 +54,6 @@
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
-#define ASSERT(x) if(!(x)) panic("IXL: x")
-
#define i40e_usec_delay(x) DELAY(x)
#define i40e_msec_delay(x) DELAY(1000*(x))
@@ -169,21 +167,26 @@ struct i40e_dma_mem {
int flags;
};
+struct i40e_virt_mem {
+ void *va;
+ u32 size;
+};
+
struct i40e_hw; /* forward decl */
u16 i40e_read_pci_cfg(struct i40e_hw *, u32);
void i40e_write_pci_cfg(struct i40e_hw *, u32, u16);
+/*
+** i40e_debug - OS dependent version of shared code debug printing
+*/
+enum i40e_debug_mask;
#define i40e_debug(h, m, s, ...) i40e_debug_d(h, m, s, ##__VA_ARGS__)
-extern void i40e_debug_d(void *hw, u32 mask, char *fmt_str, ...);
-
-struct i40e_virt_mem {
- void *va;
- u32 size;
-};
+extern void i40e_debug_d(struct i40e_hw *hw, enum i40e_debug_mask mask,
+ char *fmt_str, ...);
/*
-** This hardware supports either 16 or 32 byte rx descriptors
-** we default here to the larger size.
+** This hardware supports either 16 or 32 byte rx descriptors;
+** the driver only uses the 32 byte kind.
*/
#define i40e_rx_desc i40e_32byte_rx_desc
diff --git a/sys/dev/ixl/i40e_prototype.h b/sys/dev/ixl/i40e_prototype.h
index 0f28573..ab981e2 100644
--- a/sys/dev/ixl/i40e_prototype.h
+++ b/sys/dev/ixl/i40e_prototype.h
@@ -75,7 +75,6 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void *desc, void *buffer, u16 buf_len);
void i40e_idle_aq(struct i40e_hw *hw);
-void i40e_resume_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
@@ -84,6 +83,12 @@ const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err);
u32 i40e_led_get(struct i40e_hw *hw);
void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
+enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on,
+ u16 led_addr, u32 mode);
+enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+ u16 *val);
+enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval);
/* admin send queue commands */
@@ -483,4 +488,19 @@ enum i40e_status_code i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
struct i40e_asq_cmd_details *cmd_details);
void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
u16 vsi_seid);
+enum i40e_status_code i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr);
+enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
+enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw, u8 page,
+ u16 reg, u8 phy_addr, u16 *value);
+enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw, u8 page,
+ u16 reg, u8 phy_addr, u16 value);
+u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
+enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/sys/dev/ixl/i40e_register.h b/sys/dev/ixl/i40e_register.h
index db23251..9fbfc7b 100644
--- a/sys/dev/ixl/i40e_register.h
+++ b/sys/dev/ixl/i40e_register.h
@@ -2054,6 +2054,14 @@
#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */
#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
+#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
+#define I40E_GL_PRS_FVBM_MAX_INDEX 3
+#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT 0
+#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_MASK I40E_MASK(0x7F, I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT)
+#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT 8
+#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_MASK I40E_MASK(0x3F, I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT)
+#define I40E_GL_PRS_FVBM_MSK_ENA_SHIFT 31
+#define I40E_GL_PRS_FVBM_MSK_ENA_MASK I40E_MASK(0x1, I40E_GL_PRS_FVBM_MSK_ENA_SHIFT)
#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */
#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
@@ -2225,6 +2233,14 @@
#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_INSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
+#define I40E_PRTQF_FD_INSET_MAX_INDEX 63
+#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0
+#define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT)
#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
diff --git a/sys/dev/ixl/i40e_type.h b/sys/dev/ixl/i40e_type.h
index c0151e1..57351cb 100644
--- a/sys/dev/ixl/i40e_type.h
+++ b/sys/dev/ixl/i40e_type.h
@@ -147,6 +147,22 @@ enum i40e_debug_mask {
#define I40E_PCI_LINK_SPEED_5000 0x2
#define I40E_PCI_LINK_SPEED_8000 0x3
+#define I40E_MDIO_STCODE 0
+#define I40E_MDIO_OPCODE_ADDRESS 0
+#define I40E_MDIO_OPCODE_WRITE I40E_MASK(1, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_OPCODE_READ_INC_ADDR I40E_MASK(2, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_OPCODE_READ I40E_MASK(3, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+
+#define I40E_PHY_COM_REG_PAGE 0x1E
+#define I40E_PHY_LED_LINK_MODE_MASK 0xF0
+#define I40E_PHY_LED_MANUAL_ON 0x100
+#define I40E_PHY_LED_PROV_REG_1 0xC430
+#define I40E_PHY_LED_MODE_MASK 0xFFFF
+#define I40E_PHY_LED_MODE_ORIG 0x80000000
+
/* Memory types */
enum i40e_memset_type {
I40E_NONDMA_MEM = 0,
@@ -161,7 +177,6 @@ enum i40e_memcpy_type {
I40E_DMA_TO_NONDMA
};
-
#define I40E_FW_API_VERSION_MINOR_X710 0x0005
diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c
index 5382ea9..104e7c3 100644
--- a/sys/dev/ixl/if_ixl.c
+++ b/sys/dev/ixl/if_ixl.c
@@ -48,7 +48,7 @@
/*********************************************************************
* Driver version
*********************************************************************/
-char ixl_driver_version[] = "1.4.17-k";
+char ixl_driver_version[] = "1.4.27-k";
/*********************************************************************
* PCI Device ID Table
@@ -105,15 +105,22 @@ static u16 ixl_get_bus_info(struct i40e_hw *, device_t);
static int ixl_setup_stations(struct ixl_pf *);
static int ixl_switch_config(struct ixl_pf *);
static int ixl_initialize_vsi(struct ixl_vsi *);
-static int ixl_assign_vsi_msix(struct ixl_pf *);
+
+static int ixl_setup_adminq_msix(struct ixl_pf *);
+static int ixl_setup_adminq_tq(struct ixl_pf *);
+static int ixl_setup_queue_msix(struct ixl_vsi *);
+static int ixl_setup_queue_tqs(struct ixl_vsi *);
+static int ixl_teardown_adminq_msix(struct ixl_pf *);
+static int ixl_teardown_queue_msix(struct ixl_vsi *);
+static void ixl_configure_intr0_msix(struct ixl_pf *);
+static void ixl_configure_queue_intr_msix(struct ixl_pf *);
+static void ixl_free_queue_tqs(struct ixl_vsi *);
+static void ixl_free_adminq_tq(struct ixl_pf *);
+
static int ixl_assign_vsi_legacy(struct ixl_pf *);
static int ixl_init_msix(struct ixl_pf *);
-static void ixl_configure_msix(struct ixl_pf *);
static void ixl_configure_itr(struct ixl_pf *);
static void ixl_configure_legacy(struct ixl_pf *);
-static void ixl_init_taskqueues(struct ixl_pf *);
-static void ixl_free_taskqueues(struct ixl_pf *);
-static void ixl_free_interrupt_resources(struct ixl_pf *);
static void ixl_free_pci_resources(struct ixl_pf *);
static void ixl_local_timer(void *);
static int ixl_setup_interface(device_t, struct ixl_vsi *);
@@ -122,6 +129,7 @@ static void ixl_config_rss(struct ixl_vsi *);
static void ixl_set_queue_rx_itr(struct ixl_queue *);
static void ixl_set_queue_tx_itr(struct ixl_queue *);
static int ixl_set_advertised_speeds(struct ixl_pf *, int);
+static void ixl_get_initial_advertised_speeds(struct ixl_pf *);
static int ixl_enable_rings(struct ixl_vsi *);
static int ixl_disable_rings(struct ixl_vsi *);
@@ -200,7 +208,13 @@ static void ixl_stat_update32(struct i40e_hw *, u32, bool,
u64 *, u64 *);
/* NVM update */
static int ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *);
+static void ixl_handle_empr_reset(struct ixl_pf *);
+static int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *);
+/* Debug helper functions */
+#ifdef IXL_DEBUG
+static void ixl_print_nvm_cmd(device_t, struct i40e_nvm_access *);
+#endif
#ifdef PCI_IOV
static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
@@ -293,12 +307,12 @@ SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
** - true/false for dynamic adjustment
** - default values for static ITR
*/
-int ixl_dynamic_rx_itr = 0;
+int ixl_dynamic_rx_itr = 1;
TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
&ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
-int ixl_dynamic_tx_itr = 0;
+int ixl_dynamic_tx_itr = 1;
TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
&ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
@@ -543,8 +557,10 @@ ixl_attach(device_t dev)
goto err_mac_hmc;
}
- /* Disable LLDP from the firmware */
- i40e_aq_stop_lldp(hw, TRUE, NULL);
+ /* Disable LLDP from the firmware for certain NVM versions */
+ if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
+ (pf->hw.aq.fw_maj_ver < 4))
+ i40e_aq_stop_lldp(hw, TRUE, NULL);
i40e_get_mac_addr(hw, hw->mac.addr);
error = i40e_validate_mac_addr(hw->mac.addr);
@@ -586,7 +602,8 @@ ixl_attach(device_t dev)
error = ixl_switch_config(pf);
if (error) {
- device_printf(dev, "Initial ixl_switch_config() failed: %d\n", error);
+ device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
+ error);
goto err_late;
}
@@ -599,12 +616,31 @@ ixl_attach(device_t dev)
goto err_late;
}
- /* Get the bus configuration and set the shared code */
+ /* Get the bus configuration and set the shared code's config */
bus = ixl_get_bus_info(hw, dev);
i40e_set_pci_config_data(hw, bus);
- /* Initialize taskqueues */
- ixl_init_taskqueues(pf);
+ /*
+ * In MSI-X mode, initialize the Admin Queue interrupt,
+ * so userland tools can communicate with the adapter regardless of
+ * the ifnet interface's status.
+ */
+ if (pf->msix > 1) {
+ error = ixl_setup_adminq_msix(pf);
+ if (error) {
+ device_printf(dev, "ixl_setup_adminq_msix error: %d\n",
+ error);
+ goto err_late;
+ }
+ error = ixl_setup_adminq_tq(pf);
+ if (error) {
+ device_printf(dev, "ixl_setup_adminq_tq error: %d\n",
+ error);
+ goto err_late;
+ }
+ ixl_configure_intr0_msix(pf);
+ ixl_enable_adminq(hw);
+ }
/* Initialize statistics & add sysctls */
ixl_add_device_sysctls(pf);
@@ -678,7 +714,7 @@ ixl_detach(device_t dev)
struct ixl_pf *pf = device_get_softc(dev);
struct i40e_hw *hw = &pf->hw;
struct ixl_vsi *vsi = &pf->vsi;
- i40e_status status;
+ enum i40e_status_code status;
#ifdef PCI_IOV
int error;
#endif
@@ -687,7 +723,7 @@ ixl_detach(device_t dev)
/* Make sure VLANS are not using driver */
if (vsi->ifp->if_vlantrunk != NULL) {
- device_printf(dev,"Vlan in use, detach first\n");
+ device_printf(dev, "Vlan in use, detach first\n");
return (EBUSY);
}
@@ -703,7 +739,7 @@ ixl_detach(device_t dev)
if (vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
ixl_stop(pf);
- ixl_free_taskqueues(pf);
+ ixl_free_queue_tqs(vsi);
/* Shutdown LAN HMC */
status = i40e_shutdown_lan_hmc(hw);
@@ -712,6 +748,9 @@ ixl_detach(device_t dev)
"Shutdown LAN HMC failed with code %d\n", status);
/* Shutdown admin queue */
+ ixl_disable_adminq(hw);
+ ixl_free_adminq_tq(pf);
+ ixl_teardown_adminq_msix(pf);
status = i40e_shutdown_adminq(hw);
if (status)
device_printf(dev,
@@ -794,7 +833,7 @@ retry:
pf->qbase = hw->func_caps.base_queue;
#ifdef IXL_DEBUG
- device_printf(dev,"pf_id=%d, num_vfs=%d, msix_pf=%d, "
+ device_printf(dev, "pf_id=%d, num_vfs=%d, msix_pf=%d, "
"msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
hw->pf_id, hw->func_caps.num_vfs,
hw->func_caps.num_msix_vectors,
@@ -1070,7 +1109,7 @@ ixl_init_locked(struct ixl_pf *pf)
int ret;
mtx_assert(&pf->pf_mtx, MA_OWNED);
- INIT_DEBUGOUT("ixl_init: begin");
+ INIT_DEBUGOUT("ixl_init_locked: begin");
ixl_stop_locked(pf);
@@ -1131,7 +1170,7 @@ ixl_init_locked(struct ixl_pf *pf)
/* Set up MSI/X routing and the ITR settings */
if (ixl_enable_msix) {
- ixl_configure_msix(pf);
+ ixl_configure_queue_intr_msix(pf);
ixl_configure_itr(pf);
} else
ixl_configure_legacy(pf);
@@ -1150,6 +1189,9 @@ ixl_init_locked(struct ixl_pf *pf)
i40e_get_link_status(hw, &pf->link_up);
ixl_update_link_status(pf);
+ /* Set initial advertised speed sysctl value */
+ ixl_get_initial_advertised_speeds(pf);
+
/* Start the local timer */
callout_reset(&pf->timer, hz, ixl_local_timer, pf);
@@ -1159,6 +1201,37 @@ ixl_init_locked(struct ixl_pf *pf)
return;
}
+/* For the set_advertise sysctl */
+static void
+ixl_get_initial_advertised_speeds(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ enum i40e_status_code status;
+ struct i40e_aq_get_phy_abilities_resp abilities;
+
+ /* Set initial sysctl values */
+ status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities,
+ NULL);
+ if (status) {
+ /* Non-fatal error */
+ device_printf(dev, "%s: i40e_aq_get_phy_capabilities() error %d\n",
+ __func__, status);
+ return;
+ }
+
+ if (abilities.link_speed & I40E_LINK_SPEED_40GB)
+ pf->advertised_speed |= 0x10;
+ if (abilities.link_speed & I40E_LINK_SPEED_20GB)
+ pf->advertised_speed |= 0x8;
+ if (abilities.link_speed & I40E_LINK_SPEED_10GB)
+ pf->advertised_speed |= 0x4;
+ if (abilities.link_speed & I40E_LINK_SPEED_1GB)
+ pf->advertised_speed |= 0x2;
+ if (abilities.link_speed & I40E_LINK_SPEED_100MB)
+ pf->advertised_speed |= 0x1;
+}
+
static int
ixl_teardown_hw_structs(struct ixl_pf *pf)
{
@@ -1194,6 +1267,7 @@ ixl_reset(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
+ u8 set_fc_err_mask;
int error = 0;
// XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
@@ -1207,7 +1281,8 @@ ixl_reset(struct ixl_pf *pf)
error = i40e_init_adminq(hw);
if (error) {
- device_printf(dev, "init: Admin queue init failure; status code %d", error);
+ device_printf(dev, "init: Admin queue init failure;"
+ " status code %d", error);
error = EIO;
goto err_out;
}
@@ -1216,26 +1291,35 @@ ixl_reset(struct ixl_pf *pf)
error = ixl_get_hw_capabilities(pf);
if (error) {
- device_printf(dev, "init: Error retrieving HW capabilities; status code %d\n", error);
+ device_printf(dev, "init: Error retrieving HW capabilities;"
+ " status code %d\n", error);
goto err_out;
}
error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp, 0, 0);
if (error) {
- device_printf(dev, "init: LAN HMC init failed; status code %d\n", error);
+ device_printf(dev, "init: LAN HMC init failed; status code %d\n",
+ error);
error = EIO;
goto err_out;
}
error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
if (error) {
- device_printf(dev, "init: LAN HMC config failed; status code %d\n", error);
+ device_printf(dev, "init: LAN HMC config failed; status code %d\n",
+ error);
error = EIO;
goto err_out;
}
- // XXX: need to do switch config here?
+ // XXX: possible fix for panic, but our failure recovery is still broken
+ error = ixl_switch_config(pf);
+ if (error) {
+ device_printf(dev, "init: ixl_switch_config() failed: %d\n",
+ error);
+ goto err_out;
+ }
error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
NULL);
@@ -1246,7 +1330,6 @@ ixl_reset(struct ixl_pf *pf)
goto err_out;
}
- u8 set_fc_err_mask;
error = i40e_set_fc(hw, &set_fc_err_mask, true);
if (error) {
device_printf(dev, "init: setting link flow control failed; retcode %d,"
@@ -1277,7 +1360,9 @@ static void
ixl_init(void *arg)
{
struct ixl_pf *pf = arg;
- int ret = 0;
+ struct ixl_vsi *vsi = &pf->vsi;
+ device_t dev = pf->dev;
+ int error = 0;
/*
* If the aq is dead here, it probably means something outside of the driver
@@ -1285,33 +1370,46 @@ ixl_init(void *arg)
* So rebuild the driver's state here if that occurs.
*/
if (!i40e_check_asq_alive(&pf->hw)) {
- device_printf(pf->dev, "asq is not alive; rebuilding...\n");
+ device_printf(dev, "Admin Queue is down; resetting...\n");
IXL_PF_LOCK(pf);
ixl_teardown_hw_structs(pf);
ixl_reset(pf);
IXL_PF_UNLOCK(pf);
}
- /* Set up interrupt routing here */
- if (pf->msix > 1)
- ret = ixl_assign_vsi_msix(pf);
- else
- ret = ixl_assign_vsi_legacy(pf);
- if (ret) {
- device_printf(pf->dev, "assign_vsi_msix/legacy error: %d\n", ret);
+ /*
+ * Set up LAN queue interrupts here.
+ * Kernel interrupt setup functions cannot be called while holding a lock,
+ * so this is done outside of init_locked().
+ */
+ if (pf->msix > 1) {
+ /* Teardown existing interrupts, if they exist */
+ ixl_teardown_queue_msix(vsi);
+ ixl_free_queue_tqs(vsi);
+ /* Then set them up again */
+ error = ixl_setup_queue_msix(vsi);
+ if (error)
+ device_printf(dev, "ixl_setup_queue_msix() error: %d\n",
+ error);
+ error = ixl_setup_queue_tqs(vsi);
+ if (error)
+ device_printf(dev, "ixl_setup_queue_tqs() error: %d\n",
+ error);
+ } else
+ // possibly broken
+ error = ixl_assign_vsi_legacy(pf);
+ if (error) {
+ device_printf(pf->dev, "assign_vsi_msix/legacy error: %d\n", error);
return;
}
IXL_PF_LOCK(pf);
ixl_init_locked(pf);
IXL_PF_UNLOCK(pf);
- return;
}
/*
-**
** MSIX Interrupt Handlers and Tasklets
-**
*/
static void
ixl_handle_que(void *context, int pending)
@@ -1954,7 +2052,7 @@ ixl_local_timer(void *arg)
mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
- for (int i = 0; i < vsi->num_queues; i++,que++) {
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
/* Any queues with outstanding work get a sw irq */
if (que->busy)
wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
@@ -2050,7 +2148,8 @@ ixl_stop(struct ixl_pf *pf)
ixl_stop_locked(pf);
IXL_PF_UNLOCK(pf);
- ixl_free_interrupt_resources(pf);
+ ixl_teardown_queue_msix(&pf->vsi);
+ ixl_free_queue_tqs(&pf->vsi);
}
/*********************************************************************
@@ -2073,14 +2172,11 @@ ixl_stop_locked(struct ixl_pf *pf)
/* Stop the local timer */
callout_stop(&pf->timer);
- if (pf->num_vfs == 0)
- ixl_disable_intr(vsi);
- else
- ixl_disable_rings_intr(vsi);
+ ixl_disable_rings_intr(vsi);
ixl_disable_rings(vsi);
/* Tell the stack that the interface is no longer active */
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
}
@@ -2125,10 +2221,6 @@ ixl_assign_vsi_legacy(struct ixl_pf *pf)
device_get_nameunit(dev));
TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
-#ifdef PCI_IOV
- TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
-#endif
-
pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
taskqueue_thread_enqueue, &pf->tq);
taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
@@ -2137,25 +2229,41 @@ ixl_assign_vsi_legacy(struct ixl_pf *pf)
return (0);
}
-static void
-ixl_init_taskqueues(struct ixl_pf *pf)
+static int
+ixl_setup_adminq_tq(struct ixl_pf *pf)
{
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
device_t dev = pf->dev;
+ int error = 0;
- /* Tasklet for Admin Queue */
+ /* Tasklet for Admin Queue interrupts */
TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
#ifdef PCI_IOV
/* VFLR Tasklet */
TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
#endif
-
- /* Create and start PF taskqueue */
- pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
+ /* Create and start Admin Queue taskqueue */
+ pf->tq = taskqueue_create_fast("ixl_aq", M_NOWAIT,
taskqueue_thread_enqueue, &pf->tq);
- taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
+ if (!pf->tq) {
+ device_printf(dev, "taskqueue_create_fast (for AQ) returned NULL!\n");
+ return (ENOMEM);
+ }
+ error = taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s aq",
device_get_nameunit(dev));
+ if (error) {
+ device_printf(dev, "taskqueue_start_threads (for AQ) error: %d\n",
+ error);
+ taskqueue_free(pf->tq);
+ return (error);
+ }
+ return (0);
+}
+
+static int
+ixl_setup_queue_tqs(struct ixl_vsi *vsi)
+{
+ struct ixl_queue *que = vsi->queues;
+ device_t dev = vsi->dev;
/* Create queue tasks and start queue taskqueues */
for (int i = 0; i < vsi->num_queues; i++, que++) {
@@ -2174,71 +2282,94 @@ ixl_init_taskqueues(struct ixl_pf *pf)
#endif
}
+ return (0);
}
static void
-ixl_free_taskqueues(struct ixl_pf *pf)
+ixl_free_adminq_tq(struct ixl_pf *pf)
{
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
-
- if (pf->tq)
+ if (pf->tq) {
taskqueue_free(pf->tq);
+ pf->tq = NULL;
+ }
+}
+
+static void
+ixl_free_queue_tqs(struct ixl_vsi *vsi)
+{
+ struct ixl_queue *que = vsi->queues;
+
for (int i = 0; i < vsi->num_queues; i++, que++) {
- if (que->tq)
+ if (que->tq) {
taskqueue_free(que->tq);
+ que->tq = NULL;
+ }
}
}
-/*********************************************************************
- *
- * Setup MSIX Interrupt resources and handlers for the VSI
- *
- **********************************************************************/
static int
-ixl_assign_vsi_msix(struct ixl_pf *pf)
+ixl_setup_adminq_msix(struct ixl_pf *pf)
{
- device_t dev = pf->dev;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
- struct tx_ring *txr;
- int error, rid, vector = 0;
-#ifdef RSS
- cpuset_t cpu_mask;
-#endif
+ device_t dev = pf->dev;
+ int rid, error = 0;
- /* Admin Queue interrupt vector is 0 */
- rid = vector + 1;
+ /* Admin IRQ rid is 1, vector is 0 */
+ rid = 1;
+ /* Get interrupt resource from bus */
pf->res = bus_alloc_resource_any(dev,
SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
if (!pf->res) {
- device_printf(dev, "Unable to allocate"
- " bus resource: Adminq interrupt [rid=%d]\n", rid);
+ device_printf(dev, "bus_alloc_resource_any() for Admin Queue"
+ " interrupt failed [rid=%d]\n", rid);
return (ENXIO);
}
- /* Set the adminq vector and handler */
+ /* Then associate interrupt with handler */
error = bus_setup_intr(dev, pf->res,
INTR_TYPE_NET | INTR_MPSAFE, NULL,
ixl_msix_adminq, pf, &pf->tag);
if (error) {
pf->res = NULL;
- device_printf(dev, "Failed to register Admin que handler");
- return (error);
+ device_printf(dev, "bus_setup_intr() for Admin Queue"
+ " interrupt handler failed, error %d\n", error);
+ return (ENXIO);
+ }
+ error = bus_describe_intr(dev, pf->res, pf->tag, "aq");
+ if (error) {
+ /* Probably non-fatal? */
+ device_printf(dev, "bus_describe_intr() for Admin Queue"
+ " interrupt name failed, error %d\n", error);
}
- bus_describe_intr(dev, pf->res, pf->tag, "aq");
- pf->admvec = vector;
- ++vector;
+ pf->admvec = 0;
+
+ return (0);
+}
+
+/*
+ * Allocate interrupt resources from bus and associate an interrupt handler
+ * to those for the VSI's queues.
+ */
+static int
+ixl_setup_queue_msix(struct ixl_vsi *vsi)
+{
+ device_t dev = vsi->dev;
+ struct ixl_queue *que = vsi->queues;
+ struct tx_ring *txr;
+ int error, rid, vector = 1;
+#ifdef RSS
+ cpuset_t cpu_mask;
+#endif
- /* Now set up the stations */
+ /* Queue interrupt vector numbers start at 1 (adminq intr is 0) */
for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
int cpu_id = i;
rid = vector + 1;
txr = &que->txr;
que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
- if (que->res == NULL) {
- device_printf(dev, "Unable to allocate"
- " bus resource: que interrupt [rid=%d]\n", rid);
+ if (!que->res) {
+ device_printf(dev, "bus_alloc_resource_any() for"
+ " Queue %d interrupt failed [rid=%d]\n",
+ que->me, rid);
return (ENXIO);
}
/* Set the handler function */
@@ -2246,16 +2377,29 @@ ixl_assign_vsi_msix(struct ixl_pf *pf)
INTR_TYPE_NET | INTR_MPSAFE, NULL,
ixl_msix_que, que, &que->tag);
if (error) {
- que->res = NULL;
- device_printf(dev, "Failed to register que handler");
+ device_printf(dev, "bus_setup_intr() for Queue %d"
+ " interrupt handler failed, error %d\n",
+ que->me, error);
+ // TODO: Check for error from this?
+ bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
return (error);
}
- bus_describe_intr(dev, que->res, que->tag, "que%d", i);
+ error = bus_describe_intr(dev, que->res, que->tag, "q%d", i);
+ if (error) {
+ device_printf(dev, "bus_describe_intr() for Queue %d"
+ " interrupt name failed, error %d\n",
+ que->me, error);
+ }
/* Bind the vector to a CPU */
#ifdef RSS
cpu_id = rss_getcpu(i % rss_getnumbuckets());
#endif
- bus_bind_intr(dev, que->res, cpu_id);
+ error = bus_bind_intr(dev, que->res, cpu_id);
+ if (error) {
+ device_printf(dev, "bus_bind_intr() for Queue %d"
+ " to CPU %d failed, error %d\n",
+ que->me, cpu_id, error);
+ }
que->msix = vector;
}
@@ -2391,15 +2535,13 @@ no_msix:
}
/*
- * Plumb MSIX vectors
+ * Configure admin queue/misc interrupt cause registers in hardware.
*/
static void
-ixl_configure_msix(struct ixl_pf *pf)
+ixl_configure_intr0_msix(struct ixl_pf *pf)
{
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- u32 reg;
- u16 vector = 1;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg;
/* First set up the adminq - vector 0 */
wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
@@ -2428,10 +2570,22 @@ ixl_configure_msix(struct ixl_pf *pf)
I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
wr32(hw, I40E_PFINT_STAT_CTL0, 0);
+}
+
+/*
+ * Configure queue interrupt cause registers in hardware.
+ */
+static void
+ixl_configure_queue_intr_msix(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ u32 reg;
+ u16 vector = 1;
- /* Next configure the queues */
for (int i = 0; i < vsi->num_queues; i++, vector++) {
- wr32(hw, I40E_PFINT_DYN_CTLN(i), i);
+ wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
+ /* First queue type is RX / 0 */
wr32(hw, I40E_PFINT_LNKLSTN(i), i);
reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
@@ -2444,11 +2598,8 @@ ixl_configure_msix(struct ixl_pf *pf)
reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
(IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
- ((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
+ (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
(I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
- if (i == (vsi->num_queues - 1))
- reg |= (IXL_QUEUE_EOL
- << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
wr32(hw, I40E_QINT_TQCTL(i), reg);
}
}
@@ -2499,12 +2650,11 @@ ixl_configure_legacy(struct ixl_pf *pf)
| (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
| (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
wr32(hw, I40E_QINT_TQCTL(0), reg);
-
}
/*
- * Set the Initial ITR state
+ * Get initial ITR values from tunable values.
*/
static void
ixl_configure_itr(struct ixl_pf *pf)
@@ -2514,11 +2664,7 @@ ixl_configure_itr(struct ixl_pf *pf)
struct ixl_queue *que = vsi->queues;
vsi->rx_itr_setting = ixl_rx_itr;
- if (ixl_dynamic_rx_itr)
- vsi->rx_itr_setting |= IXL_ITR_DYNAMIC;
vsi->tx_itr_setting = ixl_tx_itr;
- if (ixl_dynamic_tx_itr)
- vsi->tx_itr_setting |= IXL_ITR_DYNAMIC;
for (int i = 0; i < vsi->num_queues; i++, que++) {
struct tx_ring *txr = &que->txr;
@@ -2528,6 +2674,7 @@ ixl_configure_itr(struct ixl_pf *pf)
vsi->rx_itr_setting);
rxr->itr = vsi->rx_itr_setting;
rxr->latency = IXL_AVE_LATENCY;
+
wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
vsi->tx_itr_setting);
txr->itr = vsi->tx_itr_setting;
@@ -2570,40 +2717,23 @@ ixl_allocate_pci_resources(struct ixl_pf *pf)
return (0);
}
-static void
-ixl_free_interrupt_resources(struct ixl_pf *pf)
+/*
+ * Teardown and release the admin queue/misc vector
+ * interrupt.
+ */
+static int
+ixl_teardown_adminq_msix(struct ixl_pf *pf)
{
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
device_t dev = pf->dev;
- int rid;
+ int rid;
- /* We may get here before stations are setup */
- if ((!ixl_enable_msix) || (que == NULL))
- goto early;
-
- /*
- ** Release all msix VSI resources:
- */
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- rid = que->msix + 1;
- if (que->tag != NULL) {
- bus_teardown_intr(dev, que->res, que->tag);
- que->tag = NULL;
- }
- if (que->res != NULL) {
- bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
- que->res = NULL;
- }
- }
-
-early:
- /* Clean the AdminQ interrupt last */
if (pf->admvec) /* we are doing MSIX */
rid = pf->admvec + 1;
else
(pf->msix != 0) ? (rid = 1):(rid = 0);
+ // TODO: Check for errors from bus_teardown_intr
+ // TODO: Check for errors from bus_release_resource
if (pf->tag != NULL) {
bus_teardown_intr(dev, pf->res, pf->tag);
pf->tag = NULL;
@@ -2612,6 +2742,47 @@ early:
bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
pf->res = NULL;
}
+
+ return (0);
+}
+
+static int
+ixl_teardown_queue_msix(struct ixl_vsi *vsi)
+{
+ struct ixl_queue *que = vsi->queues;
+ device_t dev = vsi->dev;
+ int rid, error = 0;
+
+ /* We may get here before stations are setup */
+ if ((!ixl_enable_msix) || (que == NULL))
+ return (0);
+
+ /* Release all MSIX queue resources */
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ rid = que->msix + 1;
+ if (que->tag != NULL) {
+ error = bus_teardown_intr(dev, que->res, que->tag);
+ if (error) {
+ device_printf(dev, "bus_teardown_intr() for"
+ " Queue %d interrupt failed\n",
+ que->me);
+ // return (ENXIO);
+ }
+ que->tag = NULL;
+ }
+ if (que->res != NULL) {
+ error = bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
+ if (error) {
+ device_printf(dev, "bus_release_resource() for"
+ " Queue %d interrupt failed [rid=%d]\n",
+ que->me, rid);
+ // return (ENXIO);
+ }
+ que->res = NULL;
+ }
+ }
+
+ return (0);
}
static void
@@ -2620,7 +2791,8 @@ ixl_free_pci_resources(struct ixl_pf *pf)
device_t dev = pf->dev;
int memrid;
- ixl_free_interrupt_resources(pf);
+ ixl_teardown_queue_msix(&pf->vsi);
+ ixl_teardown_adminq_msix(pf);
if (pf->msix)
pci_release_msi(dev);
@@ -2837,7 +3009,6 @@ ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
struct i40e_aqc_get_link_status *status =
(struct i40e_aqc_get_link_status *)&e->desc.params.raw;
-
/* Request link status from adapter */
hw->phy.get_link_info = TRUE;
i40e_get_link_status(hw, &pf->link_up);
@@ -2876,8 +3047,8 @@ ixl_switch_config(struct ixl_pf *pf)
ret = i40e_aq_get_switch_config(hw, sw_config,
sizeof(aq_buf), &next, NULL);
if (ret) {
- device_printf(dev,"aq_get_switch_config failed (ret=%d)!!\n",
- ret);
+ device_printf(dev, "aq_get_switch_config() failed, error %d,"
+ " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
return (ret);
}
#ifdef IXL_DEBUG
@@ -2924,7 +3095,8 @@ ixl_initialize_vsi(struct ixl_vsi *vsi)
ctxt.pf_num = hw->pf_id;
err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
if (err) {
- device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d\n", err);
+ device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
+ " aq_error %d\n", err, hw->aq.asq_last_status);
return (err);
}
#ifdef IXL_DEBUG
@@ -2943,9 +3115,17 @@ ixl_initialize_vsi(struct ixl_vsi *vsi)
*/
ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
- ctxt.info.queue_mapping[0] = 0;
- /* This VSI is assigned 64 queues (we may not use all of them) */
- ctxt.info.tc_mapping[0] = 0x0c00;
+ /* In contig mode, que_mapping[0] is first queue index used by this VSI */
+ ctxt.info.queue_mapping[0] = 0;
+ /*
+ * This VSI will only use traffic class 0; start traffic class 0's
+ * queue allocation at queue 0, and assign it 64 (2^6) queues (though
+ * the driver may not use all of them).
+ */
+ ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+ & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
+ ((6 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
/* Set VLAN receive stripping mode */
ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
@@ -3060,7 +3240,6 @@ ixl_initialize_vsi(struct ixl_vsi *vsi)
device_printf(dev, "Fail in init_rx_ring %d\n", i);
break;
}
- wr32(vsi->hw, I40E_QRX_TAIL(que->me), 0);
#ifdef DEV_NETMAP
/* preserve queue */
if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
@@ -3277,7 +3456,6 @@ ixl_set_queue_rx_itr(struct ixl_queue *que)
u16 rx_latency = 0;
int rx_bytes;
-
/* Idle, do nothing */
if (rxr->bytes == 0)
return;
@@ -3429,6 +3607,52 @@ ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
}
+#ifdef IXL_DEBUG
+/**
+ * ixl_sysctl_qtx_tail_handler
+ * Retrieves I40E_QTX_TAIL value from hardware
+ * for a sysctl.
+ */
+static int
+ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_queue *que;
+ int error;
+ u32 val;
+
+ que = ((struct ixl_queue *)oidp->oid_arg1);
+ if (!que) return 0;
+
+ val = rd32(que->vsi->hw, que->txr.tail);
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || !req->newptr)
+ return error;
+ return (0);
+}
+
+/**
+ * ixl_sysctl_qrx_tail_handler
+ * Retrieves I40E_QRX_TAIL value from hardware
+ * for a sysctl.
+ */
+static int
+ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_queue *que;
+ int error;
+ u32 val;
+
+ que = ((struct ixl_queue *)oidp->oid_arg1);
+ if (!que) return 0;
+
+ val = rd32(que->vsi->hw, que->rxr.tail);
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || !req->newptr)
+ return error;
+ return (0);
+}
+#endif
+
static void
ixl_add_hw_stats(struct ixl_pf *pf)
{
@@ -3473,9 +3697,6 @@ ixl_add_hw_stats(struct ixl_pf *pf)
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
"m_defrag() failed");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "dropped",
- CTLFLAG_RD, &(queues[q].dropped_pkts),
- "Driver dropped packets");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
CTLFLAG_RD, &(queues[q].irqs),
"irqs on this queue");
@@ -3500,6 +3721,45 @@ ixl_add_hw_stats(struct ixl_pf *pf)
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
CTLFLAG_RD, &(rxr->rx_bytes),
"Queue Bytes Received");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_desc_err",
+ CTLFLAG_RD, &(rxr->desc_errs),
+ "Queue Rx Descriptor Errors");
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
+ CTLFLAG_RD, &(rxr->itr), 0,
+ "Queue Rx ITR Interval");
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
+ CTLFLAG_RD, &(txr->itr), 0,
+ "Queue Tx ITR Interval");
+ // Not actual latency; just a calculated value to put in a register
+ // TODO: Put in better descriptions here
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_latency",
+ CTLFLAG_RD, &(rxr->latency), 0,
+ "Queue Rx ITRL Average Interval");
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_latency",
+ CTLFLAG_RD, &(txr->latency), 0,
+ "Queue Tx ITRL Average Interval");
+
+#ifdef IXL_DEBUG
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done",
+ CTLFLAG_RD, &(rxr->not_done),
+ "Queue Rx Descriptors not Done");
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_refresh",
+ CTLFLAG_RD, &(rxr->next_refresh), 0,
+ "Queue Rx Descriptors not Done");
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check",
+ CTLFLAG_RD, &(rxr->next_check), 0,
+ "Queue Rx Descriptors not Done");
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail",
+ CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
+ sizeof(struct ixl_queue),
+ ixl_sysctl_qtx_tail_handler, "IU",
+ "Queue Transmit Descriptor Tail");
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail",
+ CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
+ sizeof(struct ixl_queue),
+ ixl_sysctl_qrx_tail_handler, "IU",
+ "Queue Receive Descriptor Tail");
+#endif
}
/* MAC stats */
@@ -3605,7 +3865,8 @@ ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
** ixl_config_rss - setup RSS
** - note this is done for the single vsi
*/
-static void ixl_config_rss(struct ixl_vsi *vsi)
+static void
+ixl_config_rss(struct ixl_vsi *vsi)
{
struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
struct i40e_hw *hw = vsi->hw;
@@ -3629,7 +3890,7 @@ static void ixl_config_rss(struct ixl_vsi *vsi)
/* Fill out hash function seed */
for (i = 0; i < IXL_KEYSZ; i++)
- wr32(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
/* Enable PCTYPES for RSS: */
#ifdef RSS
@@ -3662,11 +3923,11 @@ static void ixl_config_rss(struct ixl_vsi *vsi)
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
#endif
- hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
- ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
+ hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
+ ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
hena |= set_hena;
- wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
- wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
/* Populate the LUT with max no. of queues in round robin fashion */
for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
@@ -4160,7 +4421,8 @@ ixl_disable_rings(struct ixl_vsi *vsi)
* Called from interrupt handler to identify possibly malicious vfs
* (But also detects events from the PF, as well)
**/
-static void ixl_handle_mdd_event(struct ixl_pf *pf)
+static void
+ixl_handle_mdd_event(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
@@ -4233,7 +4495,6 @@ ixl_enable_intr(struct ixl_vsi *vsi)
struct ixl_queue *que = vsi->queues;
if (ixl_enable_msix) {
- ixl_enable_adminq(hw);
for (int i = 0; i < vsi->num_queues; i++, que++)
ixl_enable_queue(hw, que->me);
} else
@@ -4498,6 +4759,88 @@ ixl_update_stats_counters(struct ixl_pf *pf)
}
}
+static int
+ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ device_t dev = pf->dev;
+ bool is_up = false;
+ int error = 0;
+
+ is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
+
+ /* Teardown */
+ if (is_up)
+ ixl_stop(pf);
+ error = i40e_shutdown_lan_hmc(hw);
+ if (error)
+ device_printf(dev,
+ "Shutdown LAN HMC failed with code %d\n", error);
+ ixl_disable_adminq(hw);
+ ixl_teardown_adminq_msix(pf);
+ error = i40e_shutdown_adminq(hw);
+ if (error)
+ device_printf(dev,
+ "Shutdown Admin queue failed with code %d\n", error);
+
+ /* Setup */
+ error = i40e_init_adminq(hw);
+ if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
+ device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
+ error);
+ }
+ error = ixl_setup_adminq_msix(pf);
+ if (error) {
+ device_printf(dev, "ixl_setup_adminq_msix error: %d\n",
+ error);
+ }
+ ixl_configure_intr0_msix(pf);
+ ixl_enable_adminq(hw);
+ error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+ hw->func_caps.num_rx_qp, 0, 0);
+ if (error) {
+ device_printf(dev, "init_lan_hmc failed: %d\n", error);
+ }
+ error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+ if (error) {
+ device_printf(dev, "configure_lan_hmc failed: %d\n", error);
+ }
+ if (is_up)
+ ixl_init(pf);
+
+ return (0);
+}
+
+static void
+ixl_handle_empr_reset(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ int count = 0;
+ u32 reg;
+
+ /* Typically finishes within 3-4 seconds */
+ while (count++ < 100) {
+ reg = rd32(hw, I40E_GLGEN_RSTAT)
+ & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
+ if (reg)
+ i40e_msec_delay(100);
+ else
+ break;
+ }
+#ifdef IXL_DEBUG
+ // Reset-related
+ device_printf(dev, "EMPR reset wait count: %d\n", count);
+#endif
+
+ device_printf(dev, "Rebuilding driver state...\n");
+ ixl_rebuild_hw_structs_after_reset(pf);
+ device_printf(dev, "Rebuilding driver state done.\n");
+
+ atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
+}
+
/*
** Tasklet handler for MSIX Adminq interrupts
** - do outside interrupt since it might sleep
@@ -4510,34 +4853,16 @@ ixl_do_adminq(void *context, int pending)
struct i40e_arq_event_info event;
i40e_status ret;
device_t dev = pf->dev;
- u32 reg, loop = 0;
+ u32 loop = 0;
u16 opcode, result;
- // XXX: Possibly inappropriate overload
if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
- int count = 0;
- // ERJ: Typically finishes within 3-4 seconds
- while (count++ < 100) {
- reg = rd32(hw, I40E_GLGEN_RSTAT);
- reg = reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
- if (reg) {
- i40e_msec_delay(100);
- } else {
- break;
- }
- }
- device_printf(dev, "EMPR reset wait count: %d\n", count);
-
- device_printf(dev, "Rebuilding HW structs...\n");
- // XXX: I feel like this could cause a kernel panic some time in the future
- ixl_stop(pf);
- ixl_init(pf);
-
- atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
+ /* Flag cleared at end of this function */
+ ixl_handle_empr_reset(pf);
return;
}
- // Actually do Admin Queue handling
+ /* Admin Queue handling */
event.buf_len = IXL_AQ_BUF_SZ;
event.msg_buf = malloc(event.buf_len,
M_DEVBUF, M_NOWAIT | M_ZERO);
@@ -4555,7 +4880,8 @@ ixl_do_adminq(void *context, int pending)
break;
opcode = LE16_TO_CPU(event.desc.opcode);
#ifdef IXL_DEBUG
- device_printf(dev, "%s: Admin Queue event: %#06x\n", __func__, opcode);
+ device_printf(dev, "%s: Admin Queue event: %#06x\n", __func__,
+ opcode);
#endif
switch (opcode) {
case i40e_aqc_opc_get_link_status:
@@ -4590,7 +4916,8 @@ ixl_do_adminq(void *context, int pending)
/**
* Update VSI-specific ethernet statistics counters.
**/
-void ixl_update_eth_stats(struct ixl_vsi *vsi)
+void
+ixl_update_eth_stats(struct ixl_vsi *vsi)
{
struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
struct i40e_hw *hw = &pf->hw;
@@ -4788,10 +5115,11 @@ ixl_add_device_sysctls(struct ixl_pf *pf)
OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
+#if 0
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "rx_itr", CTLFLAG_RW,
- &ixl_rx_itr, IXL_ITR_8K, "RX ITR");
+ &ixl_rx_itr, 0, "RX ITR");
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
@@ -4801,12 +5129,13 @@ ixl_add_device_sysctls(struct ixl_pf *pf)
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "tx_itr", CTLFLAG_RW,
- &ixl_tx_itr, IXL_ITR_4K, "TX ITR");
+ &ixl_tx_itr, 0, "TX ITR");
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
&ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
+#endif
#ifdef IXL_DEBUG_SYSCTL
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
@@ -4872,33 +5201,23 @@ ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
device_t dev = pf->dev;
- int error = 0;
+ int requested_fc, error = 0;
enum i40e_status_code aq_error = 0;
u8 fc_aq_err = 0;
/* Get request */
- error = sysctl_handle_int(oidp, &pf->fc, 0, req);
+ requested_fc = pf->fc;
+ error = sysctl_handle_int(oidp, &requested_fc, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
- if (pf->fc < 0 || pf->fc > 3) {
+ if (requested_fc < 0 || requested_fc > 3) {
device_printf(dev,
"Invalid fc mode; valid modes are 0 through 3\n");
return (EINVAL);
}
- /*
- ** Changing flow control mode currently does not work on
- ** 40GBASE-CR4 PHYs
- */
- if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4
- || hw->phy.link_info.phy_type == I40E_PHY_TYPE_40GBASE_CR4_CU) {
- device_printf(dev, "Changing flow control mode unsupported"
- " on 40GBase-CR4 media.\n");
- return (ENODEV);
- }
-
/* Set fc ability for port */
- hw->fc.requested_mode = pf->fc;
+ hw->fc.requested_mode = requested_fc;
aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
if (aq_error) {
device_printf(dev,
@@ -4906,6 +5225,7 @@ ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
__func__, aq_error, fc_aq_err);
return (EIO);
}
+ pf->fc = requested_fc;
/* Get new link state */
i40e_msec_delay(250);
@@ -5155,7 +5475,6 @@ ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
break;
}
-
device_printf(dev,"PCI Express Bus: Speed %s %s\n",
((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
(hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
@@ -5192,6 +5511,41 @@ ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
return 0;
}
+#ifdef IXL_DEBUG
+static void
+ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
+{
+ if ((nvma->command == I40E_NVM_READ) &&
+ ((nvma->config & 0xFF) == 0xF) &&
+ (((nvma->config & 0xF00) >> 8) == 0xF) &&
+ (nvma->offset == 0) &&
+ (nvma->data_size == 1)) {
+ // device_printf(dev, "- Get Driver Status Command\n");
+ }
+ else if (nvma->command == I40E_NVM_READ) {
+
+ }
+ else {
+ switch (nvma->command) {
+ case 0xB:
+ device_printf(dev, "- command: I40E_NVM_READ\n");
+ break;
+ case 0xC:
+ device_printf(dev, "- command: I40E_NVM_WRITE\n");
+ break;
+ default:
+ device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
+ break;
+ }
+
+ device_printf(dev, "- config (ptr) : 0x%02x\n", nvma->config & 0xFF);
+ device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
+ device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
+ device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
+ }
+}
+#endif
+
static int
ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
{
@@ -5203,17 +5557,24 @@ ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
DEBUGFUNC("ixl_handle_nvmupd_cmd");
+ /* Sanity checks */
if (ifd->ifd_len < sizeof(struct i40e_nvm_access) ||
ifd->ifd_data == NULL) {
- device_printf(dev, "%s: incorrect ifdrv length or data pointer\n", __func__);
- device_printf(dev, "%s: ifdrv length: %lu, sizeof(struct i40e_nvm_access): %lu\n", __func__,
- ifd->ifd_len, sizeof(struct i40e_nvm_access));
- device_printf(dev, "%s: data pointer: %p\n", __func__, ifd->ifd_data);
+ device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
+ __func__);
+ device_printf(dev, "%s: ifdrv length: %lu, sizeof(struct i40e_nvm_access): %lu\n",
+ __func__, ifd->ifd_len, sizeof(struct i40e_nvm_access));
+ device_printf(dev, "%s: data pointer: %p\n", __func__,
+ ifd->ifd_data);
return (EINVAL);
}
nvma = (struct i40e_nvm_access *)ifd->ifd_data;
+#ifdef IXL_DEBUG
+ ixl_print_nvm_cmd(dev, nvma);
+#endif
+
if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
int count = 0;
while (count++ < 100) {
@@ -5221,7 +5582,6 @@ ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING))
break;
}
- // device_printf(dev, "ioctl EMPR reset wait count %d\n", count);
}
if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) {
@@ -5442,18 +5802,10 @@ ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
sbuf_cat(buf, "\n");
sbuf_printf(buf, "# of entries: %d\n", num_entries);
sbuf_printf(buf,
-#if 0
- "Type | Guaranteed | Total | Used | Un-allocated\n"
- " | (this) | (all) | (this) | (all) \n");
-#endif
" Type | Guaranteed | Total | Used | Un-allocated\n"
" | (this) | (all) | (this) | (all) \n");
for (int i = 0; i < num_entries; i++) {
sbuf_printf(buf,
-#if 0
- "%#4x | %10d %5d %6d %12d",
- resp[i].resource_type,
-#endif
"%25s | %10d %5d %6d %12d",
ixl_switch_res_type_string(resp[i].resource_type),
resp[i].guaranteed,
@@ -5772,10 +6124,10 @@ ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
index = qnum / 2;
shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
- qtable = rd32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
+ qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
qtable |= val << shift;
- wr32(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
+ i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
}
static void
@@ -5791,7 +6143,7 @@ ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
* Contiguous mappings aren't actually supported by the hardware,
* so we have to use non-contiguous mappings.
*/
- wr32(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
+ i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
@@ -7008,7 +7360,7 @@ ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
goto fail;
}
- ixl_configure_msix(pf);
+ // TODO: [Configure MSI-X here]
ixl_enable_adminq(hw);
pf->num_vfs = num_vfs;
diff --git a/sys/dev/ixl/if_ixlv.c b/sys/dev/ixl/if_ixlv.c
index 5519095..9e5242c 100644
--- a/sys/dev/ixl/if_ixlv.c
+++ b/sys/dev/ixl/if_ixlv.c
@@ -48,7 +48,7 @@
/*********************************************************************
* Driver version
*********************************************************************/
-char ixlv_driver_version[] = "1.2.7-k";
+char ixlv_driver_version[] = "1.2.11-k";
/*********************************************************************
* PCI Device ID Table
@@ -670,7 +670,7 @@ ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
error = EINVAL;
IOCTL_DBG_IF(ifp, "mtu too large");
} else {
- IOCTL_DBG_IF2(ifp, "mtu: %u -> %d", ifp->if_mtu, ifr->ifr_mtu);
+ IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", (u_long)ifp->if_mtu, ifr->ifr_mtu);
// ERJ: Interestingly enough, these types don't match
ifp->if_mtu = (u_long)ifr->ifr_mtu;
vsi->max_frame_size =
@@ -941,12 +941,12 @@ ixlv_init(void *arg)
/* Wait for init_locked to finish */
while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
- && ++retries < 100) {
- i40e_msec_delay(10);
+ && ++retries < IXLV_AQ_MAX_ERR) {
+ i40e_msec_delay(25);
}
if (retries >= IXLV_AQ_MAX_ERR)
if_printf(vsi->ifp,
- "Init failed to complete in alloted time!\n");
+ "Init failed to complete in allotted time!\n");
}
/*
@@ -2598,6 +2598,7 @@ ixlv_config_rss(struct ixlv_sc *sc)
wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+ // TODO: Fix -- only 3,7,11,15 are filled out, instead of all 16 registers
/* Populate the LUT with max no. of queues in round robin fashion */
for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
if (j == vsi->num_queues)
diff --git a/sys/dev/ixl/ixl.h b/sys/dev/ixl/ixl.h
index 34ff880..68fd3f1 100644
--- a/sys/dev/ixl/ixl.h
+++ b/sys/dev/ixl/ixl.h
@@ -162,8 +162,10 @@
/*
* Ring Descriptors Valid Range: 32-4096 Default Value: 1024 This value is the
* number of tx/rx descriptors allocated by the driver. Increasing this
- * value allows the driver to queue more operations. Each descriptor is 16
- * or 32 bytes (configurable in FVL)
+ * value allows the driver to queue more operations.
+ *
+ * Tx descriptors are always 16 bytes, but Rx descriptors can be 32 bytes.
+ * The driver currently always uses 32 byte Rx descriptors.
*/
#define DEFAULT_RING 1024
#define PERFORM_RING 2048
@@ -215,7 +217,7 @@
#define IXL_TX_ITR 1
#define IXL_ITR_NONE 3
#define IXL_QUEUE_EOL 0x7FF
-#define IXL_MAX_FRAME 0x2600
+#define IXL_MAX_FRAME 9728
#define IXL_MAX_TX_SEGS 8
#define IXL_MAX_TSO_SEGS 66
#define IXL_SPARSE_CHAIN 6
@@ -397,8 +399,8 @@ struct tx_ring {
u16 next_to_clean;
u16 atr_rate;
u16 atr_count;
- u16 itr;
- u16 latency;
+ u32 itr;
+ u32 latency;
struct ixl_tx_buf *buffers;
volatile u16 avail;
u32 cmd;
@@ -430,10 +432,10 @@ struct rx_ring {
bool lro_enabled;
bool hdr_split;
bool discard;
- u16 next_refresh;
- u16 next_check;
- u16 itr;
- u16 latency;
+ u32 next_refresh;
+ u32 next_check;
+ u32 itr;
+ u32 latency;
char mtx_name[16];
struct ixl_rx_buf *buffers;
u32 mbuf_sz;
@@ -449,7 +451,7 @@ struct rx_ring {
u64 split;
u64 rx_packets;
u64 rx_bytes;
- u64 discarded;
+ u64 desc_errs;
u64 not_done;
};
@@ -502,8 +504,8 @@ struct ixl_vsi {
u16 msix_base; /* station base MSIX vector */
u16 first_queue;
u16 num_queues;
- u16 rx_itr_setting;
- u16 tx_itr_setting;
+ u32 rx_itr_setting;
+ u32 tx_itr_setting;
struct ixl_queue *queues; /* head of queues */
bool link_active;
u16 seid;
diff --git a/sys/dev/ixl/ixl_txrx.c b/sys/dev/ixl/ixl_txrx.c
index 68e867e..fc57f4e 100644
--- a/sys/dev/ixl/ixl_txrx.c
+++ b/sys/dev/ixl/ixl_txrx.c
@@ -247,7 +247,6 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
bus_dma_tag_t tag;
bus_dma_segment_t segs[IXL_MAX_TSO_SEGS];
-
cmd = off = 0;
m_head = *m_headp;
@@ -286,7 +285,7 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
if (error == EFBIG) {
struct mbuf *m;
- m = m_collapse(*m_headp, M_NOWAIT, maxsegs);
+ m = m_defrag(*m_headp, M_NOWAIT);
if (m == NULL) {
que->mbuf_defrag_failed++;
m_freem(*m_headp);
@@ -390,7 +389,6 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
++txr->total_packets;
wr32(hw, txr->tail, i);
- ixl_flush(hw);
/* Mark outstanding work */
if (que->busy == 0)
que->busy = 1;
@@ -631,7 +629,6 @@ ixl_tx_setup_offload(struct ixl_queue *que,
u8 ipproto = 0;
bool tso = FALSE;
-
/* Set up the TSO context descriptor if required */
if (mp->m_pkthdr.csum_flags & CSUM_TSO) {
tso = ixl_tso_setup(que, mp);
@@ -769,6 +766,12 @@ ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp)
th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen);
th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
tcp_hlen = th->th_off << 2;
+ /*
+ * The corresponding flag is set by the stack in the IPv4
+ * TSO case, but not in IPv6 (at least in FreeBSD 10.2).
+ * So, set it here because the rest of the flow requires it.
+ */
+ mp->m_pkthdr.csum_flags |= CSUM_TCP_IPV6;
break;
#endif
#ifdef INET
@@ -1579,7 +1582,7 @@ ixl_rxeof(struct ixl_queue *que, int count)
** error results.
*/
if (eop && (error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
- rxr->discarded++;
+ rxr->desc_errs++;
ixl_rx_discard(rxr, i);
goto next_desc;
}
diff --git a/sys/dev/ixl/ixlv.h b/sys/dev/ixl/ixlv.h
index ee4f436..a131ca0 100644
--- a/sys/dev/ixl/ixlv.h
+++ b/sys/dev/ixl/ixlv.h
@@ -38,7 +38,7 @@
#include "ixlv_vc_mgr.h"
-#define IXLV_AQ_MAX_ERR 100
+#define IXLV_AQ_MAX_ERR 200
#define IXLV_MAX_FILTERS 128
#define IXLV_MAX_QUEUES 16
#define IXLV_AQ_TIMEOUT (1 * hz)
diff --git a/sys/dev/ixl/ixlvc.c b/sys/dev/ixl/ixlvc.c
index 443c9e2..6a367ea 100644
--- a/sys/dev/ixl/ixlvc.c
+++ b/sys/dev/ixl/ixlvc.c
@@ -70,7 +70,8 @@ static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
break;
case I40E_VIRTCHNL_OP_RESET_VF:
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
- valid_len = 0;
+ // TODO: valid length in api v1.0 is 0, v1.1 is 4
+ valid_len = 4;
break;
case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
valid_len = sizeof(struct i40e_virtchnl_txq_info);
@@ -224,29 +225,34 @@ ixlv_verify_api_ver(struct ixlv_sc *sc)
goto out;
}
- do {
+ for (;;) {
if (++retries > IXLV_AQ_MAX_ERR)
goto out_alloc;
- /* NOTE: initial delay is necessary */
+ /* Initial delay here is necessary */
i40e_msec_delay(100);
err = i40e_clean_arq_element(hw, &event, NULL);
- } while (err == I40E_ERR_ADMIN_QUEUE_NO_WORK);
- if (err)
- goto out_alloc;
+ if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
+ continue;
+ else if (err) {
+ err = EIO;
+ goto out_alloc;
+ }
- err = (i40e_status)le32toh(event.desc.cookie_low);
- if (err) {
- err = EIO;
- goto out_alloc;
- }
+ if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
+ I40E_VIRTCHNL_OP_VERSION) {
+ DDPRINTF(dev, "Received unexpected op response: %d\n",
+ le32toh(event.desc.cookie_high));
+ /* Don't stop looking for expected response */
+ continue;
+ }
- if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
- I40E_VIRTCHNL_OP_VERSION) {
- DDPRINTF(dev, "Received unexpected op response: %d\n",
- le32toh(event.desc.cookie_high));
- err = EIO;
- goto out_alloc;
+ err = (i40e_status)le32toh(event.desc.cookie_low);
+ if (err) {
+ err = EIO;
+ goto out_alloc;
+ } else
+ break;
}
pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
@@ -266,7 +272,7 @@ ixlv_verify_api_ver(struct ixlv_sc *sc)
out_alloc:
free(event.msg_buf, M_DEVBUF);
out:
- return err;
+ return (err);
}
/*
@@ -871,7 +877,9 @@ ixlv_vc_completion(struct ixlv_sc *sc,
case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
device_printf(dev, "PF initiated reset!\n");
sc->init_state = IXLV_RESET_PENDING;
- ixlv_init(sc);
+ mtx_unlock(&sc->mtx);
+ ixlv_init(vsi);
+ mtx_lock(&sc->mtx);
break;
default:
device_printf(dev, "%s: Unknown event %d from AQ\n",
@@ -954,9 +962,11 @@ ixlv_vc_completion(struct ixlv_sc *sc,
v_retval);
break;
default:
+#ifdef IXL_DEBUG
device_printf(dev,
"%s: Received unexpected message %d from PF.\n",
__func__, v_opcode);
+#endif
break;
}
return;
diff --git a/sys/dev/mmc/host/dwmmc.c b/sys/dev/mmc/host/dwmmc.c
index f96eff6..fe08fe9 100644
--- a/sys/dev/mmc/host/dwmmc.c
+++ b/sys/dev/mmc/host/dwmmc.c
@@ -722,6 +722,9 @@ dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd)
bus_dmamap_sync(sc->buf_tag, sc->buf_map,
BUS_DMASYNC_POSTREAD);
+ bus_dmamap_sync(sc->desc_tag, sc->desc_map,
+ BUS_DMASYNC_POSTWRITE);
+
bus_dmamap_unload(sc->buf_tag, sc->buf_map);
return (0);
@@ -766,6 +769,10 @@ dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
if (err != 0)
panic("dmamap_load failed\n");
+ /* Ensure the device can see the desc */
+ bus_dmamap_sync(sc->desc_tag, sc->desc_map,
+ BUS_DMASYNC_PREWRITE);
+
if (data->flags & MMC_DATA_WRITE)
bus_dmamap_sync(sc->buf_tag, sc->buf_map,
BUS_DMASYNC_PREWRITE);
diff --git a/sys/dev/mrsas/mrsas.c b/sys/dev/mrsas/mrsas.c
index 23682a9..71219be 100644
--- a/sys/dev/mrsas/mrsas.c
+++ b/sys/dev/mrsas/mrsas.c
@@ -48,6 +48,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <sys/types.h>
+#include <sys/sysent.h>
#include <sys/kthread.h>
#include <sys/taskqueue.h>
#include <sys/smp.h>
@@ -63,6 +64,7 @@ static d_write_t mrsas_write;
static d_ioctl_t mrsas_ioctl;
static d_poll_t mrsas_poll;
+static void mrsas_ich_startup(void *arg);
static struct mrsas_mgmt_info mrsas_mgmt_info;
static struct mrsas_ident *mrsas_find_ident(device_t);
static int mrsas_setup_msix(struct mrsas_softc *sc);
@@ -80,7 +82,8 @@ static int mrsas_setup_irq(struct mrsas_softc *sc);
static int mrsas_alloc_mem(struct mrsas_softc *sc);
static int mrsas_init_fw(struct mrsas_softc *sc);
static int mrsas_setup_raidmap(struct mrsas_softc *sc);
-static int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
+static void megasas_setup_jbod_map(struct mrsas_softc *sc);
+static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend);
static int mrsas_clear_intr(struct mrsas_softc *sc);
static int mrsas_get_ctrl_info(struct mrsas_softc *sc);
static void mrsas_update_ext_vd_details(struct mrsas_softc *sc);
@@ -104,8 +107,9 @@ int mrsas_ioc_init(struct mrsas_softc *sc);
int mrsas_bus_scan(struct mrsas_softc *sc);
int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd);
-int mrsas_reset_ctrl(struct mrsas_softc *sc);
-int mrsas_wait_for_outstanding(struct mrsas_softc *sc);
+int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason);
+int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason);
+int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
int
mrsas_issue_blocked_cmd(struct mrsas_softc *sc,
struct mrsas_mfi_cmd *cmd);
@@ -182,6 +186,8 @@ MRSAS_CTLR_ID device_table[] = {
{0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"},
{0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"},
{0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"},
+ {0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"},
+ {0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"},
{0, 0, 0, 0, NULL}
};
@@ -553,6 +559,7 @@ mrsas_get_seq_num(struct mrsas_softc *sc,
{
struct mrsas_mfi_cmd *cmd;
struct mrsas_dcmd_frame *dcmd;
+ u_int8_t do_ocr = 1, retcode = 0;
cmd = mrsas_get_mfi_cmd(sc);
@@ -580,16 +587,24 @@ mrsas_get_seq_num(struct mrsas_softc *sc,
dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr;
dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info);
- mrsas_issue_blocked_cmd(sc, cmd);
+ retcode = mrsas_issue_blocked_cmd(sc, cmd);
+ if (retcode == ETIMEDOUT)
+ goto dcmd_timeout;
+ do_ocr = 0;
/*
* Copy the data back into callers buffer
*/
memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info));
mrsas_free_evt_log_info_cmd(sc);
- mrsas_release_mfi_cmd(cmd);
- return 0;
+dcmd_timeout:
+ if (do_ocr)
+ sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
+ else
+ mrsas_release_mfi_cmd(cmd);
+
+ return retcode;
}
@@ -811,7 +826,8 @@ mrsas_attach(device_t dev)
{
struct mrsas_softc *sc = device_get_softc(dev);
uint32_t cmd, bar, error;
- struct cdev *linux_dev;
+
+ memset(sc, 0, sizeof(struct mrsas_softc));
/* Look up our softc and initialize its fields. */
sc->mrsas_dev = dev;
@@ -852,12 +868,6 @@ mrsas_attach(device_t dev)
mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF);
mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF);
- /*
- * Intialize a counting Semaphore to take care no. of concurrent
- * IOCTLs
- */
- sema_init(&sc->ioctl_count_sema, MRSAS_MAX_MFI_CMDS - 5, IOCTL_SEMA_DESCRIPTION);
-
/* Intialize linked list */
TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head);
TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head);
@@ -866,16 +876,6 @@ mrsas_attach(device_t dev)
sc->io_cmds_highwater = 0;
- /* Create a /dev entry for this device. */
- sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(dev), UID_ROOT,
- GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
- device_get_unit(dev));
- if (device_get_unit(dev) == 0)
- make_dev_alias_p(MAKEDEV_CHECKNAME, &linux_dev, sc->mrsas_cdev,
- "megaraid_sas_ioctl_node");
- if (sc->mrsas_cdev)
- sc->mrsas_cdev->si_drv1 = sc;
-
sc->adprecovery = MRSAS_HBA_OPERATIONAL;
sc->UnevenSpanSupport = 0;
@@ -885,7 +885,7 @@ mrsas_attach(device_t dev)
if (mrsas_init_fw(sc) != SUCCESS) {
goto attach_fail_fw;
}
- /* Register SCSI mid-layer */
+ /* Register mrsas to CAM layer */
if ((mrsas_cam_attach(sc) != SUCCESS)) {
goto attach_fail_cam;
}
@@ -893,38 +893,28 @@ mrsas_attach(device_t dev)
if (mrsas_setup_irq(sc) != SUCCESS) {
goto attach_fail_irq;
}
- /* Enable Interrupts */
- mrsas_enable_intr(sc);
-
error = mrsas_kproc_create(mrsas_ocr_thread, sc,
&sc->ocr_thread, 0, 0, "mrsas_ocr%d",
device_get_unit(sc->mrsas_dev));
if (error) {
- printf("Error %d starting rescan thread\n", error);
- goto attach_fail_irq;
- }
- mrsas_setup_sysctl(sc);
-
- /* Initiate AEN (Asynchronous Event Notification) */
-
- if (mrsas_start_aen(sc)) {
- printf("Error: start aen failed\n");
- goto fail_start_aen;
+ device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error);
+ goto attach_fail_ocr_thread;
}
/*
- * Add this controller to mrsas_mgmt_info structure so that it can be
- * exported to management applications
+ * After FW initialization and OCR thread creation
+ * we will defer the cdev creation, AEN setup on ICH callback
*/
- if (device_get_unit(dev) == 0)
- memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
-
- mrsas_mgmt_info.count++;
- mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
- mrsas_mgmt_info.max_index++;
-
- return (0);
+ sc->mrsas_ich.ich_func = mrsas_ich_startup;
+ sc->mrsas_ich.ich_arg = sc;
+ if (config_intrhook_establish(&sc->mrsas_ich) != 0) {
+ device_printf(sc->mrsas_dev, "Config hook is already established\n");
+ }
+ mrsas_setup_sysctl(sc);
+ return SUCCESS;
-fail_start_aen:
+attach_fail_ocr_thread:
+ if (sc->ocr_thread_active)
+ wakeup(&sc->ocr_chan);
attach_fail_irq:
mrsas_teardown_intr(sc);
attach_fail_cam:
@@ -942,10 +932,7 @@ attach_fail_fw:
mtx_destroy(&sc->mpt_cmd_pool_lock);
mtx_destroy(&sc->mfi_cmd_pool_lock);
mtx_destroy(&sc->raidmap_lock);
- /* Destroy the counting semaphore created for Ioctl */
- sema_destroy(&sc->ioctl_count_sema);
attach_fail:
- destroy_dev(sc->mrsas_cdev);
if (sc->reg_res) {
bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY,
sc->reg_res_id, sc->reg_res);
@@ -954,6 +941,63 @@ attach_fail:
}
/*
+ * Interrupt config hook
+ */
+static void
+mrsas_ich_startup(void *arg)
+{
+ struct mrsas_softc *sc = (struct mrsas_softc *)arg;
+
+ /*
+ * Intialize a counting Semaphore to take care no. of concurrent IOCTLs
+ */
+ sema_init(&sc->ioctl_count_sema,
+ MRSAS_MAX_MFI_CMDS - 5,
+ IOCTL_SEMA_DESCRIPTION);
+
+ /* Create a /dev entry for mrsas controller. */
+ sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT,
+ GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u",
+ device_get_unit(sc->mrsas_dev));
+
+ if (device_get_unit(sc->mrsas_dev) == 0) {
+ make_dev_alias_p(MAKEDEV_CHECKNAME,
+ &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev,
+ "megaraid_sas_ioctl_node");
+ }
+ if (sc->mrsas_cdev)
+ sc->mrsas_cdev->si_drv1 = sc;
+
+ /*
+ * Add this controller to mrsas_mgmt_info structure so that it can be
+ * exported to management applications
+ */
+ if (device_get_unit(sc->mrsas_dev) == 0)
+ memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info));
+
+ mrsas_mgmt_info.count++;
+ mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc;
+ mrsas_mgmt_info.max_index++;
+
+ /* Enable Interrupts */
+ mrsas_enable_intr(sc);
+
+ /* Initiate AEN (Asynchronous Event Notification) */
+ if (mrsas_start_aen(sc)) {
+ device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! "
+ "Further events from the controller will not be communicated.\n"
+ "Either there is some problem in the controller"
+ "or the controller does not support AEN.\n"
+ "Please contact to the SUPPORT TEAM if the problem persists\n");
+ }
+ if (sc->mrsas_ich.ich_arg != NULL) {
+ device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n");
+ config_intrhook_disestablish(&sc->mrsas_ich);
+ sc->mrsas_ich.ich_arg = NULL;
+ }
+}
+
+/*
* mrsas_detach: De-allocates and teardown resources
* input: pointer to device struct
*
@@ -971,6 +1015,8 @@ mrsas_detach(device_t dev)
sc->remove_in_progress = 1;
/* Destroy the character device so no other IOCTL will be handled */
+ if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev)
+ destroy_dev(sc->mrsas_linux_emulator_cdev);
destroy_dev(sc->mrsas_cdev);
/*
@@ -991,7 +1037,7 @@ mrsas_detach(device_t dev)
i++;
if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
mrsas_dprint(sc, MRSAS_INFO,
- "[%2d]waiting for ocr to be finished\n", i);
+ "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
}
pause("mr_shutdown", hz);
}
@@ -1067,7 +1113,14 @@ mrsas_free_mem(struct mrsas_softc *sc)
if (sc->ld_drv_map[i] != NULL)
free(sc->ld_drv_map[i], M_MRSAS);
}
-
+ for (i = 0; i < 2; i++) {
+ if (sc->jbodmap_phys_addr[i])
+ bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]);
+ if (sc->jbodmap_mem[i] != NULL)
+ bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]);
+ if (sc->jbodmap_tag[i] != NULL)
+ bus_dma_tag_destroy(sc->jbodmap_tag[i]);
+ }
/*
* Free version buffer memory
*/
@@ -1229,9 +1282,7 @@ mrsas_teardown_intr(struct mrsas_softc *sc)
static int
mrsas_suspend(device_t dev)
{
- struct mrsas_softc *sc;
-
- sc = device_get_softc(dev);
+ /* This will be filled when the driver will have hibernation support */
return (0);
}
@@ -1244,9 +1295,7 @@ mrsas_suspend(device_t dev)
static int
mrsas_resume(device_t dev)
{
- struct mrsas_softc *sc;
-
- sc = device_get_softc(dev);
+ /* This will be filled when the driver will have hibernation support */
return (0);
}
@@ -1318,9 +1367,7 @@ mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag,
i++;
if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) {
mrsas_dprint(sc, MRSAS_INFO,
- "[%2d]waiting for "
- "OCR to be finished %d\n", i,
- sc->ocr_thread_active);
+ "[%2d]waiting for OCR to be finished from %s\n", i, __func__);
}
pause("mr_ioctl", hz);
}
@@ -1484,7 +1531,7 @@ mrsas_isr(void *arg)
* perform the appropriate action. Before we return, we clear the response
* interrupt.
*/
-static int
+int
mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
{
Mpi2ReplyDescriptorsUnion_t *desc;
@@ -1581,7 +1628,9 @@ mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
if (sc->msix_enable) {
if ((sc->device_id == MRSAS_INVADER) ||
- (sc->device_id == MRSAS_FURY))
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24))
mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
((MSIxIndex & 0x7) << 24) |
sc->last_reply_idx[MSIxIndex]);
@@ -1603,7 +1652,9 @@ mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex)
/* Clear response interrupt */
if (sc->msix_enable) {
if ((sc->device_id == MRSAS_INVADER) ||
- (sc->device_id == MRSAS_FURY)) {
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24)) {
mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8],
((MSIxIndex & 0x7) << 24) |
sc->last_reply_idx[MSIxIndex]);
@@ -1687,9 +1738,9 @@ mrsas_alloc_mem(struct mrsas_softc *sc)
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
- MRSAS_MAX_IO_SIZE, /* maxsize */
- MRSAS_MAX_SGL, /* nsegments */
- MRSAS_MAX_IO_SIZE, /* maxsegsize */
+ MAXPHYS, /* maxsize */
+ sc->max_num_sge, /* nsegments */
+ MAXPHYS, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockarg */
&sc->mrsas_parent_tag /* tag */
@@ -1886,9 +1937,9 @@ mrsas_alloc_mem(struct mrsas_softc *sc)
BUS_SPACE_MAXADDR,
BUS_SPACE_MAXADDR,
NULL, NULL,
- MRSAS_MAX_IO_SIZE,
- MRSAS_MAX_SGL,
- MRSAS_MAX_IO_SIZE,
+ MAXPHYS,
+ sc->max_num_sge, /* nsegments */
+ MAXPHYS,
BUS_DMA_ALLOCNOW,
busdma_lock_mutex,
&sc->io_lock,
@@ -1990,6 +2041,78 @@ ABORT:
return (1);
}
+/**
+ * megasas_setup_jbod_map - setup jbod map for FP seq_number.
+ * @sc: Adapter soft state
+ *
+ * Return 0 on success.
+ */
+void
+megasas_setup_jbod_map(struct mrsas_softc *sc)
+{
+ int i;
+ uint32_t pd_seq_map_sz;
+
+ pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
+ (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
+
+ if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) {
+ sc->use_seqnum_jbod_fp = 0;
+ return;
+ }
+ if (sc->jbodmap_mem[0])
+ goto skip_alloc;
+
+ for (i = 0; i < 2; i++) {
+ if (bus_dma_tag_create(sc->mrsas_parent_tag,
+ 4, 0,
+ BUS_SPACE_MAXADDR_32BIT,
+ BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ pd_seq_map_sz,
+ 1,
+ pd_seq_map_sz,
+ BUS_DMA_ALLOCNOW,
+ NULL, NULL,
+ &sc->jbodmap_tag[i])) {
+ device_printf(sc->mrsas_dev,
+ "Cannot allocate jbod map tag.\n");
+ return;
+ }
+ if (bus_dmamem_alloc(sc->jbodmap_tag[i],
+ (void **)&sc->jbodmap_mem[i],
+ BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) {
+ device_printf(sc->mrsas_dev,
+ "Cannot allocate jbod map memory.\n");
+ return;
+ }
+ bzero(sc->jbodmap_mem[i], pd_seq_map_sz);
+
+ if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i],
+ sc->jbodmap_mem[i], pd_seq_map_sz,
+ mrsas_addr_cb, &sc->jbodmap_phys_addr[i],
+ BUS_DMA_NOWAIT)) {
+ device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n");
+ return;
+ }
+ if (!sc->jbodmap_mem[i]) {
+ device_printf(sc->mrsas_dev,
+ "Cannot allocate memory for jbod map.\n");
+ sc->use_seqnum_jbod_fp = 0;
+ return;
+ }
+ }
+
+skip_alloc:
+ if (!megasas_sync_pd_seq_num(sc, false) &&
+ !megasas_sync_pd_seq_num(sc, true))
+ sc->use_seqnum_jbod_fp = 1;
+ else
+ sc->use_seqnum_jbod_fp = 0;
+
+ device_printf(sc->mrsas_dev, "Jbod map is supported\n");
+}
+
/*
* mrsas_init_fw: Initialize Firmware
* input: Adapter soft state
@@ -2089,18 +2212,28 @@ mrsas_init_fw(struct mrsas_softc *sc)
if (sc->secure_jbod_support)
device_printf(sc->mrsas_dev, "FW supports SED \n");
+ if (sc->use_seqnum_jbod_fp)
+ device_printf(sc->mrsas_dev, "FW supports JBOD Map \n");
+
if (mrsas_setup_raidmap(sc) != SUCCESS) {
- device_printf(sc->mrsas_dev, "Set up RAID map failed.\n");
- return (1);
+ device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! "
+ "There seems to be some problem in the controller\n"
+ "Please contact to the SUPPORT TEAM if the problem persists\n");
}
+ megasas_setup_jbod_map(sc);
+
/* For pass-thru, get PD/LD list and controller info */
memset(sc->pd_list, 0,
MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
- mrsas_get_pd_list(sc);
-
+ if (mrsas_get_pd_list(sc) != SUCCESS) {
+ device_printf(sc->mrsas_dev, "Get PD list failed.\n");
+ return (1);
+ }
memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
- mrsas_get_ld_list(sc);
-
+ if (mrsas_get_ld_list(sc) != SUCCESS) {
+ device_printf(sc->mrsas_dev, "Get LD lsit failed.\n");
+ return (1);
+ }
/*
* Compute the max allowed sectors per IO: The controller info has
* two limits on max sectors. Driver should use the minimum of these
@@ -2150,7 +2283,7 @@ int
mrsas_init_adapter(struct mrsas_softc *sc)
{
uint32_t status;
- u_int32_t max_cmd;
+ u_int32_t max_cmd, scratch_pad_2;
int ret;
int i = 0;
@@ -2169,13 +2302,33 @@ mrsas_init_adapter(struct mrsas_softc *sc)
sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd;
sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth);
sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1));
- sc->chain_frames_alloc_sz = 1024 * max_cmd;
+ scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+ outbound_scratch_pad_2));
+ /*
+ * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
+ * Firmware support extended IO chain frame which is 4 time more
+ * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) =
+ * 1K 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K
+ */
+ if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
+ sc->max_chain_frame_sz =
+ ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
+ * MEGASAS_1MB_IO;
+ else
+ sc->max_chain_frame_sz =
+ ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5)
+ * MEGASAS_256K_IO;
+
+ sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * max_cmd;
sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16;
- sc->max_sge_in_chain = MRSAS_MAX_SZ_CHAIN_FRAME / sizeof(MPI2_SGE_IO_UNION);
+ sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION);
sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2;
+ mrsas_dprint(sc, MRSAS_INFO, "Avago Debug: MAX sge 0x%X MAX chain frame size 0x%X \n",
+ sc->max_num_sge, sc->max_chain_frame_sz);
+
/* Used for pass thru MFI frame (DCMD) */
sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16;
@@ -2300,7 +2453,9 @@ mrsas_ioc_init(struct mrsas_softc *sc)
/* driver support Extended MSIX */
if ((sc->device_id == MRSAS_INVADER) ||
- (sc->device_id == MRSAS_FURY)) {
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24)) {
init_frame->driver_operations.
mfi_capabilities.support_additional_msix = 1;
}
@@ -2313,6 +2468,8 @@ mrsas_ioc_init(struct mrsas_softc *sc)
init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1;
init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1;
init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1;
+ if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
+ init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1;
phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024;
init_frame->queue_info_new_phys_addr_lo = phys_addr;
init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t);
@@ -2415,7 +2572,7 @@ mrsas_alloc_mpt_cmds(struct mrsas_softc *sc)
for (i = 0; i < max_cmd; i++) {
cmd = sc->mpt_cmd_list[i];
offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
- chain_offset = 1024 * i;
+ chain_offset = sc->max_chain_frame_sz * i;
sense_offset = MRSAS_SENSE_LEN * i;
memset(cmd, 0, sizeof(struct mrsas_mpt_cmd));
cmd->index = i + 1;
@@ -2626,16 +2783,20 @@ mrsas_ocr_thread(void *arg)
/* Sleep for 1 second and check the queue status */
msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO,
"mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz);
- if (sc->remove_in_progress) {
+ if (sc->remove_in_progress ||
+ sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
mrsas_dprint(sc, MRSAS_OCR,
- "Exit due to shutdown from %s\n", __func__);
+ "Exit due to %s from %s\n",
+ sc->remove_in_progress ? "Shutdown" :
+ "Hardware critical error", __func__);
break;
}
fw_status = mrsas_read_reg(sc,
offsetof(mrsas_reg_set, outbound_scratch_pad));
fw_state = fw_status & MFI_STATE_MASK;
if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset) {
- device_printf(sc->mrsas_dev, "OCR started due to %s!\n",
+ device_printf(sc->mrsas_dev, "%s started due to %s!\n",
+ sc->disableOnlineCtrlReset ? "Kill Adapter" : "OCR",
sc->do_timedout_reset ? "IO Timeout" :
"FW fault detected");
mtx_lock_spin(&sc->ioctl_lock);
@@ -2643,7 +2804,7 @@ mrsas_ocr_thread(void *arg)
sc->reset_count++;
mtx_unlock_spin(&sc->ioctl_lock);
mrsas_xpt_freeze(sc);
- mrsas_reset_ctrl(sc);
+ mrsas_reset_ctrl(sc, sc->do_timedout_reset);
mrsas_xpt_release(sc);
sc->reset_in_progress = 0;
sc->do_timedout_reset = 0;
@@ -2690,14 +2851,14 @@ mrsas_reset_reply_desc(struct mrsas_softc *sc)
* OCR, Re-fire Management command and move Controller to Operation state.
*/
int
-mrsas_reset_ctrl(struct mrsas_softc *sc)
+mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason)
{
int retval = SUCCESS, i, j, retry = 0;
u_int32_t host_diag, abs_state, status_reg, reset_adapter;
union ccb *ccb;
struct mrsas_mfi_cmd *mfi_cmd;
struct mrsas_mpt_cmd *mpt_cmd;
- MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ union mrsas_evt_class_locale class_locale;
if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) {
device_printf(sc->mrsas_dev,
@@ -2707,10 +2868,11 @@ mrsas_reset_ctrl(struct mrsas_softc *sc)
mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT;
mrsas_disable_intr(sc);
- DELAY(1000 * 1000);
+ msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr",
+ sc->mrsas_fw_fault_check_delay * hz);
/* First try waiting for commands to complete */
- if (mrsas_wait_for_outstanding(sc)) {
+ if (mrsas_wait_for_outstanding(sc, reset_reason)) {
mrsas_dprint(sc, MRSAS_OCR,
"resetting adapter from %s.\n",
__func__);
@@ -2820,31 +2982,17 @@ mrsas_reset_ctrl(struct mrsas_softc *sc)
mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n");
continue;
}
- /* Re-fire management commands */
for (j = 0; j < sc->max_fw_cmds; j++) {
mpt_cmd = sc->mpt_cmd_list[j];
if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) {
mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx];
- if (mfi_cmd->frame->dcmd.opcode ==
- MR_DCMD_LD_MAP_GET_INFO) {
- mrsas_release_mfi_cmd(mfi_cmd);
- mrsas_release_mpt_cmd(mpt_cmd);
- } else {
- req_desc = mrsas_get_request_desc(sc,
- mfi_cmd->cmd_id.context.smid - 1);
- mrsas_dprint(sc, MRSAS_OCR,
- "Re-fire command DCMD opcode 0x%x index %d\n ",
- mfi_cmd->frame->dcmd.opcode, j);
- if (!req_desc)
- device_printf(sc->mrsas_dev,
- "Cannot build MPT cmd.\n");
- else
- mrsas_fire_cmd(sc, req_desc->addr.u.low,
- req_desc->addr.u.high);
- }
+ mrsas_release_mfi_cmd(mfi_cmd);
+ mrsas_release_mpt_cmd(mpt_cmd);
}
}
+ sc->aen_cmd = NULL;
+
/* Reset load balance info */
memset(sc->load_balance_info, 0,
sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
@@ -2857,10 +3005,37 @@ mrsas_reset_ctrl(struct mrsas_softc *sc)
if (!mrsas_get_map_info(sc))
mrsas_sync_map_info(sc);
+ megasas_setup_jbod_map(sc);
+
+ memset(sc->pd_list, 0,
+ MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
+ if (mrsas_get_pd_list(sc) != SUCCESS) {
+ device_printf(sc->mrsas_dev, "Get PD list failed from OCR.\n"
+ "Will get the latest PD LIST after OCR on event.\n");
+ }
+ memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS);
+ if (mrsas_get_ld_list(sc) != SUCCESS) {
+ device_printf(sc->mrsas_dev, "Get LD lsit failed from OCR.\n"
+ "Will get the latest LD LIST after OCR on event.\n");
+ }
mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags);
mrsas_enable_intr(sc);
sc->adprecovery = MRSAS_HBA_OPERATIONAL;
+ /* Register AEN with FW for last sequence number */
+ class_locale.members.reserved = 0;
+ class_locale.members.locale = MR_EVT_LOCALE_ALL;
+ class_locale.members.class = MR_EVT_CLASS_DEBUG;
+
+ if (mrsas_register_aen(sc, sc->last_seq_num,
+ class_locale.word)) {
+ device_printf(sc->mrsas_dev,
+ "ERROR: AEN registration FAILED from OCR !!! "
+ "Further events from the controller cannot be notified."
+ "Either there is some problem in the controller"
+ "or the controller does not support AEN.\n"
+ "Please contact to the SUPPORT TEAM if the problem persists\n");
+ }
/* Adapter reset completed successfully */
device_printf(sc->mrsas_dev, "Reset successful\n");
retval = SUCCESS;
@@ -2892,7 +3067,7 @@ void
mrsas_kill_hba(struct mrsas_softc *sc)
{
sc->adprecovery = MRSAS_HW_CRITICAL_ERROR;
- pause("mrsas_kill_hba", 1000);
+ DELAY(1000 * 1000);
mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__);
mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell),
MFI_STOP_ADP);
@@ -2938,7 +3113,7 @@ mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc)
* completed.
*/
int
-mrsas_wait_for_outstanding(struct mrsas_softc *sc)
+mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason)
{
int i, outstanding, retval = 0;
u_int32_t fw_state, count, MSIxIndex;
@@ -2960,6 +3135,12 @@ mrsas_wait_for_outstanding(struct mrsas_softc *sc)
retval = 1;
goto out;
}
+ if (check_reason == MFI_DCMD_TIMEOUT_OCR) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "DCMD IO TIMEOUT detected, will reset adapter.\n");
+ retval = 1;
+ goto out;
+ }
outstanding = mrsas_atomic_read(&sc->fw_outstanding);
if (!outstanding)
goto out;
@@ -3017,6 +3198,7 @@ static int
mrsas_get_ctrl_info(struct mrsas_softc *sc)
{
int retcode = 0;
+ u_int8_t do_ocr = 1;
struct mrsas_mfi_cmd *cmd;
struct mrsas_dcmd_frame *dcmd;
@@ -3046,15 +3228,26 @@ mrsas_get_ctrl_info(struct mrsas_softc *sc)
dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr;
dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info);
- if (!mrsas_issue_polled(sc, cmd))
- memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
+ retcode = mrsas_issue_polled(sc, cmd);
+ if (retcode == ETIMEDOUT)
+ goto dcmd_timeout;
else
- retcode = 1;
+ memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info));
+ do_ocr = 0;
mrsas_update_ext_vd_details(sc);
+ sc->use_seqnum_jbod_fp =
+ sc->ctrl_info->adapterOperations3.useSeqNumJbodFP;
+
+dcmd_timeout:
mrsas_free_ctlr_info_cmd(sc);
- mrsas_release_mfi_cmd(cmd);
+
+ if (do_ocr)
+ sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
+ else
+ mrsas_release_mfi_cmd(cmd);
+
return (retcode);
}
@@ -3173,7 +3366,7 @@ mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
{
struct mrsas_header *frame_hdr = &cmd->frame->hdr;
u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
- int i, retcode = 0;
+ int i, retcode = SUCCESS;
frame_hdr->cmd_status = 0xFF;
frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
@@ -3196,12 +3389,12 @@ mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
break;
}
}
- if (frame_hdr->cmd_status != 0) {
- if (frame_hdr->cmd_status == 0xFF)
- device_printf(sc->mrsas_dev, "DCMD timed out after %d seconds.\n", max_wait);
- else
- device_printf(sc->mrsas_dev, "DCMD failed, status = 0x%x\n", frame_hdr->cmd_status);
- retcode = 1;
+ if (frame_hdr->cmd_status == 0xFF) {
+ device_printf(sc->mrsas_dev, "DCMD timed out after %d "
+ "seconds from %s\n", max_wait, __func__);
+ device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
+ cmd->frame->dcmd.opcode);
+ retcode = ETIMEDOUT;
}
return (retcode);
}
@@ -3295,7 +3488,10 @@ mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cm
io_req = mpt_cmd->io_request;
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ if ((sc->device_id == MRSAS_INVADER) ||
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24)) {
pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL;
sgl_ptr_end += sc->max_sge_in_main_msg - 1;
@@ -3312,7 +3508,7 @@ mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cm
mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
- mpi25_ieee_chain->Length = MRSAS_MAX_SZ_CHAIN_FRAME;
+ mpi25_ieee_chain->Length = sc->max_chain_frame_sz;
return (0);
}
@@ -3330,10 +3526,10 @@ mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
{
u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME;
unsigned long total_time = 0;
- int retcode = 0;
+ int retcode = SUCCESS;
/* Initialize cmd_status */
- cmd->cmd_status = ECONNREFUSED;
+ cmd->cmd_status = 0xFF;
/* Build MPT-MFI command for issue to FW */
if (mrsas_issue_dcmd(sc, cmd)) {
@@ -3343,18 +3539,30 @@ mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
sc->chan = (void *)&cmd;
while (1) {
- if (cmd->cmd_status == ECONNREFUSED) {
+ if (cmd->cmd_status == 0xFF) {
tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz);
} else
break;
- total_time++;
- if (total_time >= max_wait) {
- device_printf(sc->mrsas_dev,
- "Internal command timed out after %d seconds.\n", max_wait);
- retcode = 1;
- break;
+
+ if (!cmd->sync_cmd) { /* cmd->sync will be set for an IOCTL
+ * command */
+ total_time++;
+ if (total_time >= max_wait) {
+ device_printf(sc->mrsas_dev,
+ "Internal command timed out after %d seconds.\n", max_wait);
+ retcode = 1;
+ break;
+ }
}
}
+
+ if (cmd->cmd_status == 0xFF) {
+ device_printf(sc->mrsas_dev, "DCMD timed out after %d "
+ "seconds from %s\n", max_wait, __func__);
+ device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n",
+ cmd->frame->dcmd.opcode);
+ retcode = ETIMEDOUT;
+ }
return (retcode);
}
@@ -3405,6 +3613,7 @@ mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd
(cmd->frame->dcmd.mbox.b[1] == 1)) {
sc->fast_path_io = 0;
mtx_lock(&sc->raidmap_lock);
+ sc->map_update_cmd = NULL;
if (cmd_status != 0) {
if (cmd_status != MFI_STAT_NOT_FOUND)
device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status);
@@ -3428,6 +3637,28 @@ mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd
cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) {
sc->mrsas_aen_triggered = 0;
}
+ /* FW has an updated PD sequence */
+ if ((cmd->frame->dcmd.opcode ==
+ MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
+ (cmd->frame->dcmd.mbox.b[0] == 1)) {
+
+ mtx_lock(&sc->raidmap_lock);
+ sc->jbod_seq_cmd = NULL;
+ mrsas_release_mfi_cmd(cmd);
+
+ if (cmd_status == MFI_STAT_OK) {
+ sc->pd_seq_map_id++;
+ /* Re-register a pd sync seq num cmd */
+ if (megasas_sync_pd_seq_num(sc, true))
+ sc->use_seqnum_jbod_fp = 0;
+ } else {
+ sc->use_seqnum_jbod_fp = 0;
+ device_printf(sc->mrsas_dev,
+ "Jbod map sync failed, status=%x\n", cmd_status);
+ }
+ mtx_unlock(&sc->raidmap_lock);
+ break;
+ }
/* See if got an event notification */
if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT)
mrsas_complete_aen(sc, cmd);
@@ -3459,7 +3690,7 @@ mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd)
{
cmd->cmd_status = cmd->frame->io.cmd_status;
- if (cmd->cmd_status == ECONNREFUSED)
+ if (cmd->cmd_status == 0xFF)
cmd->cmd_status = 0;
sc->chan = (void *)&cmd;
@@ -3490,9 +3721,10 @@ mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode)
}
if (sc->aen_cmd)
mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd);
-
if (sc->map_update_cmd)
mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd);
+ if (sc->jbod_seq_cmd)
+ mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd);
dcmd = &cmd->frame->dcmd;
memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -3554,6 +3786,85 @@ mrsas_flush_cache(struct mrsas_softc *sc)
return;
}
+int
+megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend)
+{
+ int retcode = 0;
+ u_int8_t do_ocr = 1;
+ struct mrsas_mfi_cmd *cmd;
+ struct mrsas_dcmd_frame *dcmd;
+ uint32_t pd_seq_map_sz;
+ struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
+ bus_addr_t pd_seq_h;
+
+ pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
+ (sizeof(struct MR_PD_CFG_SEQ) *
+ (MAX_PHYSICAL_DEVICES - 1));
+
+ cmd = mrsas_get_mfi_cmd(sc);
+ if (!cmd) {
+ device_printf(sc->mrsas_dev,
+ "Cannot alloc for ld map info cmd.\n");
+ return 1;
+ }
+ dcmd = &cmd->frame->dcmd;
+
+ pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)];
+ pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)];
+ if (!pd_sync) {
+ device_printf(sc->mrsas_dev,
+ "Failed to alloc mem for jbod map info.\n");
+ mrsas_release_mfi_cmd(cmd);
+ return (ENOMEM);
+ }
+ memset(pd_sync, 0, pd_seq_map_sz);
+ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+ dcmd->cmd = MFI_CMD_DCMD;
+ dcmd->cmd_status = 0xFF;
+ dcmd->sge_count = 1;
+ dcmd->timeout = 0;
+ dcmd->pad_0 = 0;
+ dcmd->data_xfer_len = (pd_seq_map_sz);
+ dcmd->opcode = (MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
+ dcmd->sgl.sge32[0].phys_addr = (pd_seq_h);
+ dcmd->sgl.sge32[0].length = (pd_seq_map_sz);
+
+ if (pend) {
+ dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG;
+ dcmd->flags = (MFI_FRAME_DIR_WRITE);
+ sc->jbod_seq_cmd = cmd;
+ if (mrsas_issue_dcmd(sc, cmd)) {
+ device_printf(sc->mrsas_dev,
+ "Fail to send sync map info command.\n");
+ return 1;
+ } else
+ return 0;
+ } else
+ dcmd->flags = MFI_FRAME_DIR_READ;
+
+ retcode = mrsas_issue_polled(sc, cmd);
+ if (retcode == ETIMEDOUT)
+ goto dcmd_timeout;
+
+ if (pd_sync->count > MAX_PHYSICAL_DEVICES) {
+ device_printf(sc->mrsas_dev,
+ "driver supports max %d JBOD, but FW reports %d\n",
+ MAX_PHYSICAL_DEVICES, pd_sync->count);
+ retcode = -EINVAL;
+ }
+ if (!retcode)
+ sc->pd_seq_map_id++;
+ do_ocr = 0;
+
+dcmd_timeout:
+ if (do_ocr)
+ sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
+ else
+ mrsas_release_mfi_cmd(cmd);
+
+ return (retcode);
+}
+
/*
* mrsas_get_map_info: Load and validate RAID map input:
* Adapter instance soft state
@@ -3623,14 +3934,11 @@ mrsas_get_ld_map_info(struct mrsas_softc *sc)
dcmd->sgl.sge32[0].phys_addr = map_phys_addr;
dcmd->sgl.sge32[0].length = sc->current_map_sz;
- if (!mrsas_issue_polled(sc, cmd))
- retcode = 0;
- else {
- device_printf(sc->mrsas_dev,
- "Fail to send get LD map info cmd.\n");
- retcode = 1;
- }
- mrsas_release_mfi_cmd(cmd);
+ retcode = mrsas_issue_polled(sc, cmd);
+ if (retcode == ETIMEDOUT)
+ sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
+ else
+ mrsas_release_mfi_cmd(cmd);
return (retcode);
}
@@ -3715,6 +4023,7 @@ static int
mrsas_get_pd_list(struct mrsas_softc *sc)
{
int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size;
+ u_int8_t do_ocr = 1;
struct mrsas_mfi_cmd *cmd;
struct mrsas_dcmd_frame *dcmd;
struct MR_PD_LIST *pd_list_mem;
@@ -3736,6 +4045,8 @@ mrsas_get_pd_list(struct mrsas_softc *sc)
device_printf(sc->mrsas_dev,
"Cannot alloc dmamap for get PD list cmd\n");
mrsas_release_mfi_cmd(cmd);
+ mrsas_free_tmp_dcmd(tcmd);
+ free(tcmd, M_MRSAS);
return (ENOMEM);
} else {
pd_list_mem = tcmd->tmp_dcmd_mem;
@@ -3756,15 +4067,14 @@ mrsas_get_pd_list(struct mrsas_softc *sc)
dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr;
dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST);
- if (!mrsas_issue_polled(sc, cmd))
- retcode = 0;
- else
- retcode = 1;
+ retcode = mrsas_issue_polled(sc, cmd);
+ if (retcode == ETIMEDOUT)
+ goto dcmd_timeout;
/* Get the instance PD list */
pd_count = MRSAS_MAX_PD;
pd_addr = pd_list_mem->addr;
- if (retcode == 0 && pd_list_mem->count < pd_count) {
+ if (pd_list_mem->count < pd_count) {
memset(sc->local_pd_list, 0,
MRSAS_MAX_PD * sizeof(struct mrsas_pd_list));
for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) {
@@ -3775,15 +4085,22 @@ mrsas_get_pd_list(struct mrsas_softc *sc)
MR_PD_STATE_SYSTEM;
pd_addr++;
}
+ /*
+ * Use mutext/spinlock if pd_list component size increase more than
+ * 32 bit.
+ */
+ memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
+ do_ocr = 0;
}
- /*
- * Use mutext/spinlock if pd_list component size increase more than
- * 32 bit.
- */
- memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list));
+dcmd_timeout:
mrsas_free_tmp_dcmd(tcmd);
- mrsas_release_mfi_cmd(cmd);
free(tcmd, M_MRSAS);
+
+ if (do_ocr)
+ sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
+ else
+ mrsas_release_mfi_cmd(cmd);
+
return (retcode);
}
@@ -3799,6 +4116,7 @@ static int
mrsas_get_ld_list(struct mrsas_softc *sc)
{
int ld_list_size, retcode = 0, ld_index = 0, ids = 0;
+ u_int8_t do_ocr = 1;
struct mrsas_mfi_cmd *cmd;
struct mrsas_dcmd_frame *dcmd;
struct MR_LD_LIST *ld_list_mem;
@@ -3819,6 +4137,8 @@ mrsas_get_ld_list(struct mrsas_softc *sc)
device_printf(sc->mrsas_dev,
"Cannot alloc dmamap for get LD list cmd\n");
mrsas_release_mfi_cmd(cmd);
+ mrsas_free_tmp_dcmd(tcmd);
+ free(tcmd, M_MRSAS);
return (ENOMEM);
} else {
ld_list_mem = tcmd->tmp_dcmd_mem;
@@ -3840,18 +4160,16 @@ mrsas_get_ld_list(struct mrsas_softc *sc)
dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST);
dcmd->pad_0 = 0;
- if (!mrsas_issue_polled(sc, cmd))
- retcode = 0;
- else
- retcode = 1;
+ retcode = mrsas_issue_polled(sc, cmd);
+ if (retcode == ETIMEDOUT)
+ goto dcmd_timeout;
#if VD_EXT_DEBUG
printf("Number of LDs %d\n", ld_list_mem->ldCount);
#endif
/* Get the instance LD list */
- if ((retcode == 0) &&
- (ld_list_mem->ldCount <= sc->fw_supported_vd_count)) {
+ if (ld_list_mem->ldCount <= sc->fw_supported_vd_count) {
sc->CurLdCount = ld_list_mem->ldCount;
memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) {
@@ -3860,10 +4178,17 @@ mrsas_get_ld_list(struct mrsas_softc *sc)
sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId;
}
}
+ do_ocr = 0;
}
+dcmd_timeout:
mrsas_free_tmp_dcmd(tcmd);
- mrsas_release_mfi_cmd(cmd);
free(tcmd, M_MRSAS);
+
+ if (do_ocr)
+ sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
+ else
+ mrsas_release_mfi_cmd(cmd);
+
return (retcode);
}
@@ -4019,7 +4344,7 @@ mrsas_aen_handler(struct mrsas_softc *sc)
union mrsas_evt_class_locale class_locale;
int doscan = 0;
u_int32_t seq_num;
- int error;
+ int error, fail_aen = 0;
if (sc == NULL) {
printf("invalid instance!\n");
@@ -4028,13 +4353,19 @@ mrsas_aen_handler(struct mrsas_softc *sc)
if (sc->evt_detail_mem) {
switch (sc->evt_detail_mem->code) {
case MR_EVT_PD_INSERTED:
- mrsas_get_pd_list(sc);
- mrsas_bus_scan_sim(sc, sc->sim_1);
+ fail_aen = mrsas_get_pd_list(sc);
+ if (!fail_aen)
+ mrsas_bus_scan_sim(sc, sc->sim_1);
+ else
+ goto skip_register_aen;
doscan = 0;
break;
case MR_EVT_PD_REMOVED:
- mrsas_get_pd_list(sc);
- mrsas_bus_scan_sim(sc, sc->sim_1);
+ fail_aen = mrsas_get_pd_list(sc);
+ if (!fail_aen)
+ mrsas_bus_scan_sim(sc, sc->sim_1);
+ else
+ goto skip_register_aen;
doscan = 0;
break;
case MR_EVT_LD_OFFLINE:
@@ -4044,8 +4375,11 @@ mrsas_aen_handler(struct mrsas_softc *sc)
doscan = 0;
break;
case MR_EVT_LD_CREATED:
- mrsas_get_ld_list(sc);
- mrsas_bus_scan_sim(sc, sc->sim_0);
+ fail_aen = mrsas_get_ld_list(sc);
+ if (!fail_aen)
+ mrsas_bus_scan_sim(sc, sc->sim_0);
+ else
+ goto skip_register_aen;
doscan = 0;
break;
case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
@@ -4062,12 +4396,19 @@ mrsas_aen_handler(struct mrsas_softc *sc)
return;
}
if (doscan) {
- mrsas_get_pd_list(sc);
- mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
- mrsas_bus_scan_sim(sc, sc->sim_1);
- mrsas_get_ld_list(sc);
- mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
- mrsas_bus_scan_sim(sc, sc->sim_0);
+ fail_aen = mrsas_get_pd_list(sc);
+ if (!fail_aen) {
+ mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n");
+ mrsas_bus_scan_sim(sc, sc->sim_1);
+ } else
+ goto skip_register_aen;
+
+ fail_aen = mrsas_get_ld_list(sc);
+ if (!fail_aen) {
+ mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n");
+ mrsas_bus_scan_sim(sc, sc->sim_0);
+ } else
+ goto skip_register_aen;
}
seq_num = sc->evt_detail_mem->seq_num + 1;
@@ -4087,6 +4428,9 @@ mrsas_aen_handler(struct mrsas_softc *sc)
if (error)
device_printf(sc->mrsas_dev, "register aen failed error %x\n", error);
+skip_register_aen:
+ return;
+
}
diff --git a/sys/dev/mrsas/mrsas.h b/sys/dev/mrsas/mrsas.h
index 1fa2639..4d19b4f 100644
--- a/sys/dev/mrsas/mrsas.h
+++ b/sys/dev/mrsas/mrsas.h
@@ -80,6 +80,8 @@ __FBSDID("$FreeBSD$");
#define MRSAS_TBOLT 0x005b
#define MRSAS_INVADER 0x005d
#define MRSAS_FURY 0x005f
+#define MRSAS_INTRUDER 0x00ce
+#define MRSAS_INTRUDER_24 0x00cf
#define MRSAS_PCI_BAR0 0x10
#define MRSAS_PCI_BAR1 0x14
#define MRSAS_PCI_BAR2 0x1C
@@ -102,7 +104,7 @@ __FBSDID("$FreeBSD$");
*/
#define BYTE_ALIGNMENT 1
#define MRSAS_MAX_NAME_LENGTH 32
-#define MRSAS_VERSION "06.707.05.00-fbsd"
+#define MRSAS_VERSION "06.709.07.00-fbsd"
#define MRSAS_ULONG_MAX 0xFFFFFFFFFFFFFFFF
#define MRSAS_DEFAULT_TIMEOUT 0x14 /* Temporarily set */
#define DONE 0
@@ -166,7 +168,9 @@ typedef struct _RAID_CONTEXT {
u_int8_t numSGE;
u_int16_t configSeqNum;
u_int8_t spanArm;
- u_int8_t resvd2[3];
+ u_int8_t priority; /* 0x1D MR_PRIORITY_RANGE */
+ u_int8_t numSGEExt; /* 0x1E 1M IO support */
+ u_int8_t resvd2; /* 0x1F */
} RAID_CONTEXT;
@@ -577,6 +581,7 @@ Mpi2IOCInitRequest_t, MPI2_POINTER pMpi2IOCInitRequest_t;
#define MAX_PHYSICAL_DEVICES 256
#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
+#define MR_DCMD_SYSTEM_PD_MAP_GET_INFO 0x0200e102
#define MRSAS_MAX_PD_CHANNELS 1
@@ -863,6 +868,22 @@ struct IO_REQUEST_INFO {
u_int8_t pd_after_lb;
};
+/*
+ * define MR_PD_CFG_SEQ structure for system PDs
+ */
+struct MR_PD_CFG_SEQ {
+ u_int16_t seqNum;
+ u_int16_t devHandle;
+ u_int8_t reserved[4];
+} __packed;
+
+struct MR_PD_CFG_SEQ_NUM_SYNC {
+ u_int32_t size;
+ u_int32_t count;
+ struct MR_PD_CFG_SEQ seq[1];
+} __packed;
+
+
typedef struct _MR_LD_TARGET_SYNC {
u_int8_t targetId;
u_int8_t reserved;
@@ -1223,7 +1244,7 @@ enum MR_EVT_ARGS {
/*
* Thunderbolt (and later) Defines
*/
-#define MRSAS_MAX_SZ_CHAIN_FRAME 1024
+#define MEGASAS_CHAIN_FRAME_SZ_MIN 1024
#define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000009)
#define MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 256
#define MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST 0xF0
@@ -1301,10 +1322,13 @@ typedef enum _REGION_TYPE {
#define MRSAS_SCSI_MAX_CMDS 8
#define MRSAS_SCSI_MAX_CDB_LEN 16
#define MRSAS_SCSI_SENSE_BUFFERSIZE 96
-#define MRSAS_MAX_SGL 70
-#define MRSAS_MAX_IO_SIZE (256 * 1024)
#define MRSAS_INTERNAL_CMDS 32
+#define MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK 0x400000
+#define MEGASAS_MAX_CHAIN_SIZE_MASK 0x3E0
+#define MEGASAS_256K_IO 128
+#define MEGASAS_1MB_IO (MEGASAS_256K_IO * 4)
+
/* Request types */
#define MRSAS_REQ_TYPE_INTERNAL_CMD 0x0
#define MRSAS_REQ_TYPE_AEN_FETCH 0x1
@@ -1927,7 +1951,12 @@ struct mrsas_ctrl_info {
u_int32_t supportCacheBypassModes:1;
u_int32_t supportSecurityonJBOD:1;
u_int32_t discardCacheDuringLDDelete:1;
- u_int32_t reserved:12;
+ u_int32_t supportTTYLogCompression:1;
+ u_int32_t supportCPLDUpdate:1;
+ u_int32_t supportDiskCacheSettingForSysPDs:1;
+ u_int32_t supportExtendedSSCSize:1;
+ u_int32_t useSeqNumJbodFP:1;
+ u_int32_t reserved:7;
} adapterOperations3;
u_int8_t pad[0x800 - 0x7EC]; /* 0x7EC */
@@ -2001,7 +2030,9 @@ typedef union _MFI_CAPABILITIES {
u_int32_t support_ndrive_r1_lb:1;
u_int32_t support_core_affinity:1;
u_int32_t security_protocol_cmds_fw:1;
- u_int32_t reserved:25;
+ u_int32_t support_ext_queue_depth:1;
+ u_int32_t support_ext_io_size:1;
+ u_int32_t reserved:23;
} mfi_capabilities;
u_int32_t reg;
} MFI_CAPABILITIES;
@@ -2435,6 +2466,12 @@ struct mrsas_irq_context {
uint32_t MSIxIndex;
};
+enum MEGASAS_OCR_REASON {
+ FW_FAULT_OCR = 0,
+ SCSIIO_TIMEOUT_OCR = 1,
+ MFI_DCMD_TIMEOUT_OCR = 2,
+};
+
/* Controller management info added to support Linux Emulator */
#define MAX_MGMT_ADAPTERS 1024
@@ -2611,6 +2648,8 @@ typedef struct _MRSAS_DRV_PCI_INFORMATION {
struct mrsas_softc {
device_t mrsas_dev;
struct cdev *mrsas_cdev;
+ struct intr_config_hook mrsas_ich;
+ struct cdev *mrsas_linux_emulator_cdev;
uint16_t device_id;
struct resource *reg_res;
int reg_res_id;
@@ -2669,6 +2708,7 @@ struct mrsas_softc {
int msix_enable;
uint32_t msix_reg_offset[16];
uint8_t mask_interrupts;
+ uint16_t max_chain_frame_sz;
struct mrsas_mpt_cmd **mpt_cmd_list;
struct mrsas_mfi_cmd **mfi_cmd_list;
TAILQ_HEAD(, mrsas_mpt_cmd) mrsas_mpt_cmd_list_head;
@@ -2691,7 +2731,9 @@ struct mrsas_softc {
u_int8_t chain_offset_mfi_pthru;
u_int32_t map_sz;
u_int64_t map_id;
+ u_int64_t pd_seq_map_id;
struct mrsas_mfi_cmd *map_update_cmd;
+ struct mrsas_mfi_cmd *jbod_seq_cmd;
struct mrsas_mfi_cmd *aen_cmd;
u_int8_t fast_path_io;
void *chan;
@@ -2702,6 +2744,12 @@ struct mrsas_softc {
u_int8_t do_timedout_reset;
u_int32_t reset_in_progress;
u_int32_t reset_count;
+
+ bus_dma_tag_t jbodmap_tag[2];
+ bus_dmamap_t jbodmap_dmamap[2];
+ void *jbodmap_mem[2];
+ bus_addr_t jbodmap_phys_addr[2];
+
bus_dma_tag_t raidmap_tag[2];
bus_dmamap_t raidmap_dmamap[2];
void *raidmap_mem[2];
@@ -2745,6 +2793,7 @@ struct mrsas_softc {
LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT];
u_int8_t secure_jbod_support;
+ u_int8_t use_seqnum_jbod_fp;
u_int8_t max256vdSupport;
u_int16_t fw_supported_vd_count;
u_int16_t fw_supported_pd_count;
diff --git a/sys/dev/mrsas/mrsas_cam.c b/sys/dev/mrsas/mrsas_cam.c
index d6d4ad5..0cf00376 100644
--- a/sys/dev/mrsas/mrsas_cam.c
+++ b/sys/dev/mrsas/mrsas_cam.c
@@ -65,11 +65,14 @@ int
mrsas_map_request(struct mrsas_softc *sc,
struct mrsas_mpt_cmd *cmd, union ccb *ccb);
int
-mrsas_build_ldio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
union ccb *ccb);
int
-mrsas_build_dcdb(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
- union ccb *ccb, struct cam_sim *sim);
+mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb);
+int
+mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible);
int
mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
union ccb *ccb, u_int32_t device_id,
@@ -121,6 +124,7 @@ mrsas_get_updated_dev_handle(struct mrsas_softc *sc,
extern u_int8_t
megasas_get_best_arm(PLD_LOAD_BALANCE_INFO lbInfo, u_int8_t arm,
u_int64_t block, u_int32_t count);
+extern int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex);
/*
@@ -341,7 +345,7 @@ mrsas_action(struct cam_sim *sim, union ccb *ccb)
else
ccb->cpi.max_target = MRSAS_MAX_LD_IDS - 1;
#if (__FreeBSD_version > 704000)
- ccb->cpi.maxio = MRSAS_MAX_IO_SIZE;
+ ccb->cpi.maxio = sc->max_num_sge * MRSAS_PAGE_SIZE;
#endif
ccb->ccb_h.status = CAM_REQ_CMP;
xpt_done(ccb);
@@ -392,7 +396,7 @@ mrsas_scsiio_timeout(void *data)
callout_reset(&cmd->cm_callout, (600000 * hz) / 1000,
mrsas_scsiio_timeout, cmd);
#endif
- sc->do_timedout_reset = 1;
+ sc->do_timedout_reset = SCSIIO_TIMEOUT_OCR;
if (sc->ocr_thread_active)
wakeup(&sc->ocr_chan);
}
@@ -415,6 +419,7 @@ mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
struct ccb_hdr *ccb_h = &(ccb->ccb_h);
struct ccb_scsiio *csio = &(ccb->csio);
MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
+ u_int8_t cmd_type;
if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE) {
ccb->ccb_h.status = CAM_REQ_CMP;
@@ -458,7 +463,7 @@ mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
ccb_h->status = CAM_REQ_INVALID;
goto done;
case CAM_DATA_VADDR:
- if (csio->dxfer_len > MRSAS_MAX_IO_SIZE) {
+ if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) {
mrsas_release_mpt_cmd(cmd);
ccb_h->status = CAM_REQ_TOO_BIG;
goto done;
@@ -468,6 +473,11 @@ mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
cmd->data = csio->data_ptr;
break;
case CAM_DATA_BIO:
+ if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) {
+ mrsas_release_mpt_cmd(cmd);
+ ccb_h->status = CAM_REQ_TOO_BIG;
+ goto done;
+ }
cmd->length = csio->dxfer_len;
if (cmd->length)
cmd->data = csio->data_ptr;
@@ -479,7 +489,7 @@ mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
#else
if (!(ccb_h->flags & CAM_DATA_PHYS)) { /* Virtual data address */
if (!(ccb_h->flags & CAM_SCATTER_VALID)) {
- if (csio->dxfer_len > MRSAS_MAX_IO_SIZE) {
+ if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) {
mrsas_release_mpt_cmd(cmd);
ccb_h->status = CAM_REQ_TOO_BIG;
goto done;
@@ -517,19 +527,44 @@ mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
mtx_lock(&sc->raidmap_lock);
/* Check for IO type READ-WRITE targeted for Logical Volume */
- if (mrsas_find_io_type(sim, ccb) == READ_WRITE_LDIO) {
+ cmd_type = mrsas_find_io_type(sim, ccb);
+ switch (cmd_type) {
+ case READ_WRITE_LDIO:
/* Build READ-WRITE IO for Logical Volume */
- if (mrsas_build_ldio(sc, cmd, ccb)) {
- device_printf(sc->mrsas_dev, "Build LDIO failed.\n");
+ if (mrsas_build_ldio_rw(sc, cmd, ccb)) {
+ device_printf(sc->mrsas_dev, "Build RW LDIO failed.\n");
mtx_unlock(&sc->raidmap_lock);
return (1);
}
- } else {
- if (mrsas_build_dcdb(sc, cmd, ccb, sim)) {
- device_printf(sc->mrsas_dev, "Build DCDB failed.\n");
+ break;
+ case NON_READ_WRITE_LDIO:
+ /* Build NON READ-WRITE IO for Logical Volume */
+ if (mrsas_build_ldio_nonrw(sc, cmd, ccb)) {
+ device_printf(sc->mrsas_dev, "Build NON-RW LDIO failed.\n");
mtx_unlock(&sc->raidmap_lock);
return (1);
}
+ break;
+ case READ_WRITE_SYSPDIO:
+ case NON_READ_WRITE_SYSPDIO:
+ if (sc->secure_jbod_support &&
+ (cmd_type == NON_READ_WRITE_SYSPDIO)) {
+ /* Build NON-RW IO for JBOD */
+ if (mrsas_build_syspdio(sc, cmd, ccb, sim, 0)) {
+ device_printf(sc->mrsas_dev,
+ "Build SYSPDIO failed.\n");
+ mtx_unlock(&sc->raidmap_lock);
+ return (1);
+ }
+ } else {
+ /* Build RW IO for JBOD */
+ if (mrsas_build_syspdio(sc, cmd, ccb, sim, 1)) {
+ device_printf(sc->mrsas_dev,
+ "Build SYSPDIO failed.\n");
+ mtx_unlock(&sc->raidmap_lock);
+ return (1);
+ }
+ }
}
mtx_unlock(&sc->raidmap_lock);
@@ -614,7 +649,10 @@ mrsas_get_mpt_cmd(struct mrsas_softc *sc)
if (!TAILQ_EMPTY(&sc->mrsas_mpt_cmd_list_head)) {
cmd = TAILQ_FIRST(&sc->mrsas_mpt_cmd_list_head);
TAILQ_REMOVE(&sc->mrsas_mpt_cmd_list_head, cmd, next);
+ } else {
+ goto out;
}
+
memset((uint8_t *)cmd->io_request, 0, MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
cmd->data = NULL;
cmd->length = 0;
@@ -622,8 +660,9 @@ mrsas_get_mpt_cmd(struct mrsas_softc *sc)
cmd->error_code = 0;
cmd->load_balance = 0;
cmd->ccb_ptr = NULL;
- mtx_unlock(&sc->mpt_cmd_pool_lock);
+out:
+ mtx_unlock(&sc->mpt_cmd_pool_lock);
return cmd;
}
@@ -668,7 +707,7 @@ mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index)
}
/*
- * mrsas_build_ldio: Builds an LDIO command
+ * mrsas_build_ldio_rw: Builds an LDIO command
* input: Adapter instance soft state
* Pointer to command packet
* Pointer to CCB
@@ -677,7 +716,7 @@ mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index)
* built successfully, otherwise it returns a 1.
*/
int
-mrsas_build_ldio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
union ccb *ccb)
{
struct ccb_hdr *ccb_h = &(ccb->ccb_h);
@@ -701,12 +740,18 @@ mrsas_build_ldio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
io_request->DataLength = cmd->length;
if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
- if (cmd->sge_count > MRSAS_MAX_SGL) {
+ if (cmd->sge_count > sc->max_num_sge) {
device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
"max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
return (FAIL);
}
+ /*
+ * numSGE store lower 8 bit of sge_count. numSGEExt store
+ * higher 8 bit of sge_count
+ */
io_request->RaidContext.numSGE = cmd->sge_count;
+ io_request->RaidContext.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
+
} else {
device_printf(sc->mrsas_dev, "Data map/load failed.\n");
return (FAIL);
@@ -832,7 +877,10 @@ mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ if ((sc->device_id == MRSAS_INVADER) ||
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24)) {
if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)
cmd->request_desc->SCSIIO.RequestFlags =
(MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
@@ -861,7 +909,10 @@ mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
cmd->request_desc->SCSIIO.RequestFlags =
(MRSAS_REQ_DESCRIPT_FLAGS_LD_IO <<
MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ if ((sc->device_id == MRSAS_INVADER) ||
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24)) {
if (io_request->RaidContext.regLockFlags == REGION_TYPE_UNUSED)
cmd->request_desc->SCSIIO.RequestFlags =
(MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
@@ -879,78 +930,141 @@ mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
}
/*
- * mrsas_build_dcdb: Builds an DCDB command
+ * mrsas_build_ldio_nonrw: Builds an LDIO command
* input: Adapter instance soft state
* Pointer to command packet
* Pointer to CCB
*
- * This function builds the DCDB inquiry command. It returns 0 if the command
- * is built successfully, otherwise it returns a 1.
+ * This function builds the LDIO command packet. It returns 0 if the command is
+ * built successfully, otherwise it returns a 1.
*/
int
-mrsas_build_dcdb(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
- union ccb *ccb, struct cam_sim *sim)
+mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb)
{
struct ccb_hdr *ccb_h = &(ccb->ccb_h);
u_int32_t device_id;
- MR_DRV_RAID_MAP_ALL *map_ptr;
MRSAS_RAID_SCSI_IO_REQUEST *io_request;
io_request = cmd->io_request;
device_id = ccb_h->target_id;
- map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
- /*
- * Check if this is RW for system PD or
- * it's a NON RW for sys PD and there is NO secure jbod FW support
- */
- if (cam_sim_bus(sim) == 1 &&
- sc->pd_list[device_id].driveState == MR_PD_STATE_SYSTEM) {
+ /* FW path for LD Non-RW (SCSI management commands) */
+ io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
+ io_request->DevHandle = device_id;
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
+ MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- io_request->DevHandle =
- map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
- io_request->RaidContext.RAIDFlags =
- MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
- MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
- cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
- cmd->request_desc->SCSIIO.MSIxIndex =
- sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
-
- if (sc->secure_jbod_support && (mrsas_find_io_type(sim, ccb) == NON_READ_WRITE_SYSPDIO)) {
- /* system pd firmware path */
- io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
- cmd->request_desc->SCSIIO.RequestFlags =
- (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
- } else {
- /* system pd fast path */
- io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
- io_request->RaidContext.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec;
- io_request->RaidContext.regLockFlags = 0;
- io_request->RaidContext.regLockRowLBA = 0;
- io_request->RaidContext.regLockLength = 0;
-
- cmd->request_desc->SCSIIO.RequestFlags =
- (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
- MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ io_request->RaidContext.VirtualDiskTgtId = device_id;
+ io_request->LUN[1] = ccb_h->target_lun & 0xF;
+ io_request->DataLength = cmd->length;
- /*
- * NOTE - For system pd RW cmds only IoFlags will be FAST_PATH
- * Because the NON RW cmds will now go via FW Queue
- * and not the Exception queue
- */
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
- io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
+ if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) {
+ if (cmd->sge_count > sc->max_num_sge) {
+ device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds"
+ "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
+ return (1);
}
+ /*
+ * numSGE store lower 8 bit of sge_count. numSGEExt store
+ * higher 8 bit of sge_count
+ */
+ io_request->RaidContext.numSGE = cmd->sge_count;
+ io_request->RaidContext.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
} else {
- /* FW path for SysPD or LD Non-RW (SCSI management commands) */
+ device_printf(sc->mrsas_dev, "Data map/load failed.\n");
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * mrsas_build_syspdio: Builds an DCDB command
+ * input: Adapter instance soft state
+ * Pointer to command packet
+ * Pointer to CCB
+ *
+ * This function builds the DCDB inquiry command. It returns 0 if the command
+ * is built successfully, otherwise it returns a 1.
+ */
+int
+mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
+ union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible)
+{
+ struct ccb_hdr *ccb_h = &(ccb->ccb_h);
+ u_int32_t device_id;
+ MR_DRV_RAID_MAP_ALL *local_map_ptr;
+ MRSAS_RAID_SCSI_IO_REQUEST *io_request;
+ struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
+
+ pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id - 1) & 1];
+
+ io_request = cmd->io_request;
+ device_id = ccb_h->target_id;
+ local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
+ io_request->RaidContext.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
+ << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
+ io_request->RaidContext.regLockFlags = 0;
+ io_request->RaidContext.regLockRowLBA = 0;
+ io_request->RaidContext.regLockLength = 0;
+
+ /* If FW supports PD sequence number */
+ if (sc->use_seqnum_jbod_fp &&
+ sc->pd_list[device_id].driveType == 0x00) {
+ //printf("Using Drv seq num\n");
+ io_request->RaidContext.VirtualDiskTgtId = device_id + 255;
+ io_request->RaidContext.configSeqNum = pd_sync->seq[device_id].seqNum;
+ io_request->DevHandle = pd_sync->seq[device_id].devHandle;
+ io_request->RaidContext.regLockFlags |=
+ (MR_RL_FLAGS_SEQ_NUM_ENABLE | MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
+ io_request->RaidContext.Type = MPI2_TYPE_CUDA;
+ io_request->RaidContext.nseg = 0x1;
+ } else if (sc->fast_path_io) {
+ //printf("Using LD RAID map\n");
+ io_request->RaidContext.VirtualDiskTgtId = device_id;
+ io_request->RaidContext.configSeqNum = 0;
+ local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)];
+ io_request->DevHandle =
+ local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
+ } else {
+ //printf("Using FW PATH\n");
+ /* Want to send all IO via FW path */
+ io_request->RaidContext.VirtualDiskTgtId = device_id;
+ io_request->RaidContext.configSeqNum = 0;
+ io_request->DevHandle = 0xFFFF;
+ }
+
+ cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
+ cmd->request_desc->SCSIIO.MSIxIndex =
+ sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0;
+
+ if (!fp_possible) {
+ /* system pd firmware path */
io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST;
- io_request->DevHandle = device_id;
cmd->request_desc->SCSIIO.RequestFlags =
(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
+ io_request->RaidContext.timeoutValue =
+ local_map_ptr->raidMap.fpPdIoTimeoutSec;
+ io_request->RaidContext.VirtualDiskTgtId = device_id;
+ } else {
+ /* system pd fast path */
+ io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ io_request->RaidContext.timeoutValue = local_map_ptr->raidMap.fpPdIoTimeoutSec;
+
+ /*
+ * NOTE - For system pd RW cmds only IoFlags will be FAST_PATH
+ * Because the NON RW cmds will now go via FW Queue
+ * and not the Exception queue
+ */
+ io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
+
+ cmd->request_desc->SCSIIO.RequestFlags =
+ (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
+ MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
}
- io_request->RaidContext.VirtualDiskTgtId = device_id;
io_request->LUN[1] = ccb_h->target_lun & 0xF;
io_request->DataLength = cmd->length;
@@ -960,7 +1074,12 @@ mrsas_build_dcdb(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd,
"max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge);
return (1);
}
+ /*
+ * numSGE store lower 8 bit of sge_count. numSGEExt store
+ * higher 8 bit of sge_count
+ */
io_request->RaidContext.numSGE = cmd->sge_count;
+ io_request->RaidContext.numSGEExt = (uint8_t)(cmd->sge_count >> 8);
} else {
device_printf(sc->mrsas_dev, "Data map/load failed.\n");
return (1);
@@ -1069,7 +1188,10 @@ mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
io_request = cmd->io_request;
sgl_ptr = (pMpi25IeeeSgeChain64_t)&io_request->SGL;
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ if ((sc->device_id == MRSAS_INVADER) ||
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24)) {
pMpi25IeeeSgeChain64_t sgl_ptr_end = sgl_ptr;
sgl_ptr_end += sc->max_sge_in_main_msg - 1;
@@ -1080,7 +1202,10 @@ mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
sgl_ptr->Address = segs[i].ds_addr;
sgl_ptr->Length = segs[i].ds_len;
sgl_ptr->Flags = 0;
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ if ((sc->device_id == MRSAS_INVADER) ||
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24)) {
if (i == nseg - 1)
sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
}
@@ -1090,7 +1215,10 @@ mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
(nseg > sc->max_sge_in_main_msg)) {
pMpi25IeeeSgeChain64_t sg_chain;
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY)) {
+ if ((sc->device_id == MRSAS_INVADER) ||
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24)) {
if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
!= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
cmd->io_request->ChainOffset = sc->chain_offset_io_request;
@@ -1099,7 +1227,10 @@ mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
} else
cmd->io_request->ChainOffset = sc->chain_offset_io_request;
sg_chain = sgl_ptr;
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
+ if ((sc->device_id == MRSAS_INVADER) ||
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24))
sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
else
sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
@@ -1170,9 +1301,16 @@ mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd)
static void
mrsas_cam_poll(struct cam_sim *sim)
{
+ int i;
struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim);
- mrsas_isr((void *)sc);
+ if (sc->msix_vectors != 0){
+ for (i=0; i<sc->msix_vectors; i++){
+ mrsas_complete_cmd(sc, i);
+ }
+ } else {
+ mrsas_complete_cmd(sc, 0);
+ }
}
/*
diff --git a/sys/dev/mrsas/mrsas_fp.c b/sys/dev/mrsas/mrsas_fp.c
index 7ae5662..470b0f7 100644
--- a/sys/dev/mrsas/mrsas_fp.c
+++ b/sys/dev/mrsas/mrsas_fp.c
@@ -749,7 +749,10 @@ mr_spanset_get_phy_params(struct mrsas_softc *sc, u_int32_t ld, u_int64_t stripR
u_int32_t logArm, rowMod, armQ, arm;
u_int8_t do_invader = 0;
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
+ if ((sc->device_id == MRSAS_INVADER) ||
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24))
do_invader = 1;
/* Get row and span from io_info for Uneven Span IO. */
@@ -960,7 +963,10 @@ MR_BuildRaidContext(struct mrsas_softc *sc, struct IO_REQUEST_INFO *io_info,
regSize += stripSize;
}
pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec;
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
+ if ((sc->device_id == MRSAS_INVADER) ||
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24))
pRAID_Context->regLockFlags = (isRead) ? raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
else
pRAID_Context->regLockFlags = (isRead) ? REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
@@ -1309,12 +1315,6 @@ mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, u_int8_t cdb_len,
cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff);
cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff);
break;
- case 12:
- cdb[5] = (u_int8_t)(start_blk & 0xff);
- cdb[4] = (u_int8_t)((start_blk >> 8) & 0xff);
- cdb[3] = (u_int8_t)((start_blk >> 16) & 0xff);
- cdb[2] = (u_int8_t)((start_blk >> 24) & 0xff);
- break;
case 16:
cdb[9] = (u_int8_t)(start_blk & 0xff);
cdb[8] = (u_int8_t)((start_blk >> 8) & 0xff);
@@ -1451,7 +1451,10 @@ MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld,
u_int32_t rowMod, armQ, arm, logArm;
u_int8_t do_invader = 0;
- if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY))
+ if ((sc->device_id == MRSAS_INVADER) ||
+ (sc->device_id == MRSAS_FURY) ||
+ (sc->device_id == MRSAS_INTRUDER) ||
+ (sc->device_id == MRSAS_INTRUDER_24))
do_invader = 1;
row = mega_div64_32(stripRow, raid->rowDataSize);
diff --git a/sys/dev/mrsas/mrsas_ioctl.c b/sys/dev/mrsas/mrsas_ioctl.c
index 6258e63..c9b190e9 100644
--- a/sys/dev/mrsas/mrsas_ioctl.c
+++ b/sys/dev/mrsas/mrsas_ioctl.c
@@ -138,6 +138,11 @@ mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd)
kern_sge32 = (struct mrsas_sge32 *)
((unsigned long)cmd->frame + user_ioc->sgl_off);
+ memset(ioctl_data_tag, 0, (sizeof(bus_dma_tag_t) * MAX_IOCTL_SGE));
+ memset(ioctl_data_dmamap, 0, (sizeof(bus_dmamap_t) * MAX_IOCTL_SGE));
+ memset(ioctl_data_mem, 0, (sizeof(void *) * MAX_IOCTL_SGE));
+ memset(ioctl_data_phys_addr, 0, (sizeof(bus_addr_t) * MAX_IOCTL_SGE));
+
/*
* For each user buffer, create a mirror buffer and copy in
*/
@@ -246,7 +251,14 @@ mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd)
* cmd to the SCSI mid-layer
*/
cmd->sync_cmd = 1;
- mrsas_issue_blocked_cmd(sc, cmd);
+ ret = mrsas_issue_blocked_cmd(sc, cmd);
+ if (ret == ETIMEDOUT) {
+ mrsas_dprint(sc, MRSAS_OCR,
+ "IOCTL command is timed out, initiating OCR\n");
+ sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR;
+ ret = EAGAIN;
+ goto out;
+ }
cmd->sync_cmd = 0;
/*
@@ -435,6 +447,17 @@ mrsas_create_frame_pool(struct mrsas_softc *sc)
device_printf(sc->mrsas_dev, "Cannot alloc MFI frame memory\n");
return (ENOMEM);
}
+ /*
+ * For MFI controllers.
+ * max_num_sge = 60
+ * max_sge_sz = 16 byte (sizeof megasas_sge_skinny)
+ * Totl 960 byte (15 MFI frame of 64 byte)
+ *
+ * Fusion adapter require only 3 extra frame.
+ * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
+ * max_sge_sz = 12 byte (sizeof megasas_sge64)
+ * Total 192 byte (3 MFI frame of 64 byte)
+ */
memset(cmd->frame, 0, MRSAS_MFI_FRAME_SIZE);
cmd->frame->io.context = cmd->index;
cmd->frame->io.pad_0 = 0;
diff --git a/sys/dev/pci/pci_host_generic.c b/sys/dev/pci/pci_host_generic.c
index 617f2f6..8ecf9c6 100644
--- a/sys/dev/pci/pci_host_generic.c
+++ b/sys/dev/pci/pci_host_generic.c
@@ -613,6 +613,7 @@ generic_pcie_activate_resource(device_t dev, device_t child, int type, int rid,
}
if (found) {
rman_set_start(r, rman_get_start(r) + phys_base);
+ rman_set_end(r, rman_get_end(r) + phys_base);
BUS_ACTIVATE_RESOURCE(device_get_parent(dev), child,
type, rid, r);
} else {
diff --git a/sys/dev/sfxge/common/hunt_ev.c b/sys/dev/sfxge/common/ef10_ev.c
index 1a41b49..0110c86 100644
--- a/sys/dev/sfxge/common/hunt_ev.c
+++ b/sys/dev/sfxge/common/ef10_ev.c
@@ -37,7 +37,7 @@ __FBSDID("$FreeBSD$");
#include "mcdi_mon.h"
#endif
-#if EFSYS_OPT_HUNTINGTON
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
#if EFSYS_OPT_QSTATS
#define EFX_EV_QSTAT_INCR(_eep, _stat) \
@@ -985,4 +985,4 @@ ef10_ev_rxlabel_fini(
eersp->eers_rx_mask = 0;
}
-#endif /* EFSYS_OPT_HUNTINGTON */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/sys/dev/sfxge/common/hunt_filter.c b/sys/dev/sfxge/common/ef10_filter.c
index f125936..3795142 100644
--- a/sys/dev/sfxge/common/hunt_filter.c
+++ b/sys/dev/sfxge/common/ef10_filter.c
@@ -34,7 +34,7 @@ __FBSDID("$FreeBSD$");
#include "efx.h"
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
#if EFSYS_OPT_FILTER
@@ -1121,6 +1121,7 @@ ef10_filter_insert_multicast_list(
}
eftp->eft_mulcst_filter_count = filter_count;
+ eftp->eft_using_all_mulcst = B_FALSE;
return (0);
@@ -1160,6 +1161,7 @@ ef10_filter_insert_all_multicast(
goto fail1;
eftp->eft_mulcst_filter_count = 1;
+ eftp->eft_using_all_mulcst = B_TRUE;
/*
* FIXME: If brdcst == B_FALSE, add a filter to drop broadcast traffic.
@@ -1173,9 +1175,23 @@ fail1:
return (rc);
}
+static void
+ef10_filter_remove_old(
+ __in efx_nic_t *enp)
+{
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+ uint32_t i;
+
+ for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) {
+ if (ef10_filter_entry_is_auto_old(table, i)) {
+ (void) ef10_filter_delete_internal(enp, i);
+ }
+ }
+}
+
static __checkReturn efx_rc_t
-hunt_filter_get_workarounds(
+ef10_filter_get_workarounds(
__in efx_nic_t *enp)
{
efx_nic_cfg_t *encp = &enp->en_nic_cfg;
@@ -1227,6 +1243,7 @@ ef10_filter_reconfigure(
__in_ecount(6*count) uint8_t const *addrs,
__in uint32_t count)
{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
efx_filter_flag_t filter_flags;
unsigned i;
@@ -1302,20 +1319,42 @@ ef10_filter_reconfigure(
* filters, and can only be enabled or disabled when the hardware filter
* table is empty.
*
+ * Chained multicast filters require support from the datapath firmware,
+ * and may not be available (e.g. low-latency variants or old Huntington
+ * firmware).
+ *
* Firmware will reset (FLR) functions which have inserted filters in
* the hardware filter table when the workaround is enabled/disabled.
* Functions without any hardware filters are not reset.
*
* Re-check if the workaround is enabled after adding unicast hardware
- * filters. This ensures that encp->enc_workaround_bug26807 matches the
+ * filters. This ensures that encp->enc_bug26807_workaround matches the
* firmware state, and that later changes to enable/disable the
* workaround will result in this function seeing a reset (FLR).
*
- * FIXME: On Medford multicast chaining should always be on.
+ * In common-code drivers, we only support multiple PCI function
+ * scenarios with firmware that supports multicast chaining, so we can
+ * assume it is enabled for such cases and hence simplify the filter
+ * insertion logic. Firmware that does not support multicast chaining
+ * does not support multiple PCI function configurations either, so
+ * filter insertion is much simpler and the same strategies can still be
+ * used.
*/
- if ((rc = hunt_filter_get_workarounds(enp)) != 0)
+ if ((rc = ef10_filter_get_workarounds(enp)) != 0)
goto fail2;
+ if ((table->eft_using_all_mulcst != all_mulcst) &&
+ (encp->enc_bug26807_workaround == B_TRUE)) {
+ /*
+ * Multicast filter chaining is enabled, so traffic that matches
+ * more than one multicast filter will be replicated and
+ * delivered to multiple recipients. To avoid this duplicate
+ * delivery, remove old multicast filters before inserting new
+ * multicast filters.
+ */
+ ef10_filter_remove_old(enp);
+ }
+
/* Insert or renew multicast filters */
if (all_mulcst == B_TRUE) {
/*
@@ -1342,6 +1381,17 @@ ef10_filter_reconfigure(
rc = ef10_filter_insert_multicast_list(enp, mulcst, brdcst,
addrs, count, filter_flags, B_TRUE);
if (rc != 0) {
+ if ((table->eft_using_all_mulcst == B_FALSE) &&
+ (encp->enc_bug26807_workaround == B_TRUE)) {
+ /*
+ * Multicast filter chaining is on, so remove
+ * old filters before inserting the multicast
+ * all filter to avoid duplicate delivery caused
+ * by packets matching multiple filters.
+ */
+ ef10_filter_remove_old(enp);
+ }
+
rc = ef10_filter_insert_all_multicast(enp,
filter_flags);
if (rc != 0) {
@@ -1355,11 +1405,7 @@ ef10_filter_reconfigure(
}
/* Remove old filters which were not renewed */
- for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) {
- if (ef10_filter_entry_is_auto_old(table, i)) {
- (void) ef10_filter_delete_internal(enp, i);
- }
- }
+ ef10_filter_remove_old(enp);
/* report if any optional flags were rejected */
if (((all_unicst != B_FALSE) && (all_unicst_rc != 0)) ||
@@ -1433,4 +1479,4 @@ ef10_filter_default_rxq_clear(
#endif /* EFSYS_OPT_FILTER */
-#endif /* EFSYS_OPT_HUNTINGTON */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/sys/dev/sfxge/common/hunt_intr.c b/sys/dev/sfxge/common/ef10_intr.c
index 7a4293c..5a91fe1 100644
--- a/sys/dev/sfxge/common/hunt_intr.c
+++ b/sys/dev/sfxge/common/ef10_intr.c
@@ -35,7 +35,7 @@ __FBSDID("$FreeBSD$");
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
__checkReturn efx_rc_t
ef10_intr_init(
@@ -197,4 +197,4 @@ ef10_intr_fini(
_NOTE(ARGUNUSED(enp))
}
-#endif /* EFSYS_OPT_HUNTINGTON */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/sys/dev/sfxge/common/hunt_mac.c b/sys/dev/sfxge/common/ef10_mac.c
index 267be0c..6546ef1 100644
--- a/sys/dev/sfxge/common/hunt_mac.c
+++ b/sys/dev/sfxge/common/ef10_mac.c
@@ -35,7 +35,7 @@ __FBSDID("$FreeBSD$");
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
__checkReturn efx_rc_t
ef10_mac_poll(
@@ -749,4 +749,4 @@ ef10_mac_stats_update(
#endif /* EFSYS_OPT_MAC_STATS */
-#endif /* EFSYS_OPT_HUNTINGTON */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/sys/dev/sfxge/common/hunt_mcdi.c b/sys/dev/sfxge/common/ef10_mcdi.c
index f39e977..f39e977 100644
--- a/sys/dev/sfxge/common/hunt_mcdi.c
+++ b/sys/dev/sfxge/common/ef10_mcdi.c
diff --git a/sys/dev/sfxge/common/ef10_nic.c b/sys/dev/sfxge/common/ef10_nic.c
new file mode 100644
index 0000000..493f4a0
--- /dev/null
+++ b/sys/dev/sfxge/common/ef10_nic.c
@@ -0,0 +1,1616 @@
+/*-
+ * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "efx.h"
+#include "efx_impl.h"
+#if EFSYS_OPT_MON_MCDI
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#include "ef10_tlv_layout.h"
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_port_assignment(
+ __in efx_nic_t *enp,
+ __out uint32_t *portp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,
+ MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_port_modes(
+ __in efx_nic_t *enp,
+ __out uint32_t *modesp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PORT_MODES_IN_LEN,
+ MC_CMD_GET_PORT_MODES_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PORT_MODES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /*
+ * Require only Modes and DefaultMode fields.
+ * (CurrentMode field was added for Medford)
+ */
+ if (req.emr_out_length_used <
+ MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_vadaptor_alloc(
+ __in efx_nic_t *enp,
+ __in uint32_t port_id)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VADAPTOR_ALLOC_IN_LEN,
+ MC_CMD_VADAPTOR_ALLOC_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_vport_id, ==, EVB_PORT_ID_NULL);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VADAPTOR_ALLOC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
+ MCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS,
+ VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED,
+ enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_vadaptor_free(
+ __in efx_nic_t *enp,
+ __in uint32_t port_id)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VADAPTOR_FREE_IN_LEN,
+ MC_CMD_VADAPTOR_FREE_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VADAPTOR_FREE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_mac_address_pf(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6])
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_MAC_ADDRESSES_IN_LEN,
+ MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ if (mac_addrp != NULL) {
+ uint8_t *addrp;
+
+ addrp = MCDI_OUT2(req, uint8_t,
+ GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE);
+
+ EFX_MAC_ADDR_COPY(mac_addrp, addrp);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_mac_address_vf(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6])
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,
+ MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used <
+ MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD(req,
+ VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ if (mac_addrp != NULL) {
+ uint8_t *addrp;
+
+ addrp = MCDI_OUT2(req, uint8_t,
+ VPORT_GET_MAC_ADDRESSES_OUT_MACADDR);
+
+ EFX_MAC_ADDR_COPY(mac_addrp, addrp);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_clock(
+ __in efx_nic_t *enp,
+ __out uint32_t *sys_freqp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_CLOCK_IN_LEN,
+ MC_CMD_GET_CLOCK_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_CLOCK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ);
+ if (*sys_freqp == 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_vector_cfg(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *vec_basep,
+ __out_opt uint32_t *pf_nvecp,
+ __out_opt uint32_t *vf_nvecp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_VECTOR_CFG_IN_LEN,
+ MC_CMD_GET_VECTOR_CFG_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_VECTOR_CFG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (vec_basep != NULL)
+ *vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE);
+ if (pf_nvecp != NULL)
+ *pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF);
+ if (vf_nvecp != NULL)
+ *vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_get_capabilities(
+ __in efx_nic_t *enp,
+ __out uint32_t *flagsp,
+ __out uint32_t *flags2p)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_CAPABILITIES_IN_LEN,
+ MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_CAPABILITIES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_CAPABILITIES_V2_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *flagsp = MCDI_OUT_DWORD(req, GET_CAPABILITIES_OUT_FLAGS1);
+
+ if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)
+ *flags2p = 0;
+ else
+ *flags2p = MCDI_OUT_DWORD(req, GET_CAPABILITIES_V2_OUT_FLAGS2);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_alloc_vis(
+ __in efx_nic_t *enp,
+ __in uint32_t min_vi_count,
+ __in uint32_t max_vi_count,
+ __out uint32_t *vi_basep,
+ __out uint32_t *vi_countp,
+ __out uint32_t *vi_shiftp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_ALLOC_VIS_IN_LEN,
+ MC_CMD_ALLOC_VIS_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (vi_countp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_ALLOC_VIS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_ALLOC_VIS_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count);
+ MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ *vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE);
+ *vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT);
+
+ /* Report VI_SHIFT if available (always zero for Huntington) */
+ if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN)
+ *vi_shiftp = 0;
+ else
+ *vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_free_vis(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0);
+ EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0);
+
+ req.emr_cmd = MC_CMD_FREE_VIS;
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ /* Ignore ELREADY (no allocated VIs, so nothing to free) */
+ if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_alloc_piobuf(
+ __in efx_nic_t *enp,
+ __out efx_piobuf_handle_t *handlep)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_ALLOC_PIOBUF_IN_LEN,
+ MC_CMD_ALLOC_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (handlep == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_ALLOC_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ *handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_free_piobuf(
+ __in efx_nic_t *enp,
+ __in efx_piobuf_handle_t handle)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FREE_PIOBUF_IN_LEN,
+ MC_CMD_FREE_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FREE_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_link_piobuf(
+ __in efx_nic_t *enp,
+ __in uint32_t vi_index,
+ __in efx_piobuf_handle_t handle)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LINK_PIOBUF_IN_LEN,
+ MC_CMD_LINK_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LINK_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle);
+ MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_unlink_piobuf(
+ __in efx_nic_t *enp,
+ __in uint32_t vi_index)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_UNLINK_PIOBUF_IN_LEN,
+ MC_CMD_UNLINK_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_UNLINK_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static void
+ef10_nic_alloc_piobufs(
+ __in efx_nic_t *enp,
+ __in uint32_t max_piobuf_count)
+{
+ efx_piobuf_handle_t *handlep;
+ unsigned int i;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(max_piobuf_count, <=,
+ EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle));
+
+ enp->en_arch.ef10.ena_piobuf_count = 0;
+
+ for (i = 0; i < max_piobuf_count; i++) {
+ handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
+
+ if ((rc = efx_mcdi_alloc_piobuf(enp, handlep)) != 0)
+ goto fail1;
+
+ enp->en_arch.ef10.ena_pio_alloc_map[i] = 0;
+ enp->en_arch.ef10.ena_piobuf_count++;
+ }
+
+ return;
+
+fail1:
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
+
+ efx_mcdi_free_piobuf(enp, *handlep);
+ *handlep = EFX_PIOBUF_HANDLE_INVALID;
+ }
+ enp->en_arch.ef10.ena_piobuf_count = 0;
+}
+
+
+static void
+ef10_nic_free_piobufs(
+ __in efx_nic_t *enp)
+{
+ efx_piobuf_handle_t *handlep;
+ unsigned int i;
+
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
+
+ efx_mcdi_free_piobuf(enp, *handlep);
+ *handlep = EFX_PIOBUF_HANDLE_INVALID;
+ }
+ enp->en_arch.ef10.ena_piobuf_count = 0;
+}
+
+/* Sub-allocate a block from a piobuf */
+ __checkReturn efx_rc_t
+ef10_nic_pio_alloc(
+ __inout efx_nic_t *enp,
+ __out uint32_t *bufnump,
+ __out efx_piobuf_handle_t *handlep,
+ __out uint32_t *blknump,
+ __out uint32_t *offsetp,
+ __out size_t *sizep)
+{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ efx_drv_cfg_t *edcp = &enp->en_drv_cfg;
+ uint32_t blk_per_buf;
+ uint32_t buf, blk;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+ EFSYS_ASSERT(bufnump);
+ EFSYS_ASSERT(handlep);
+ EFSYS_ASSERT(blknump);
+ EFSYS_ASSERT(offsetp);
+ EFSYS_ASSERT(sizep);
+
+ if ((edcp->edc_pio_alloc_size == 0) ||
+ (enp->en_arch.ef10.ena_piobuf_count == 0)) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+ blk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size;
+
+ for (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) {
+ uint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf];
+
+ if (~(*map) == 0)
+ continue;
+
+ EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map)));
+ for (blk = 0; blk < blk_per_buf; blk++) {
+ if ((*map & (1u << blk)) == 0) {
+ *map |= (1u << blk);
+ goto done;
+ }
+ }
+ }
+ rc = ENOMEM;
+ goto fail2;
+
+done:
+ *handlep = enp->en_arch.ef10.ena_piobuf_handle[buf];
+ *bufnump = buf;
+ *blknump = blk;
+ *sizep = edcp->edc_pio_alloc_size;
+ *offsetp = blk * (*sizep);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Free a piobuf sub-allocated block */
+ __checkReturn efx_rc_t
+ef10_nic_pio_free(
+ __inout efx_nic_t *enp,
+ __in uint32_t bufnum,
+ __in uint32_t blknum)
+{
+ uint32_t *map;
+ efx_rc_t rc;
+
+ if ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) ||
+ (blknum >= (8 * sizeof (*map)))) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ map = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum];
+ if ((*map & (1u << blknum)) == 0) {
+ rc = ENOENT;
+ goto fail2;
+ }
+ *map &= ~(1u << blknum);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_pio_link(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index,
+ __in efx_piobuf_handle_t handle)
+{
+ return (efx_mcdi_link_piobuf(enp, vi_index, handle));
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_pio_unlink(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index)
+{
+ return (efx_mcdi_unlink_piobuf(enp, vi_index));
+}
+
+ __checkReturn efx_rc_t
+ef10_get_datapath_caps(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t flags;
+ uint32_t flags2;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_get_capabilities(enp, &flags, &flags2)) != 0)
+ goto fail1;
+
+#define CAP_FLAG(flags1, field) \
+ ((flags1) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))
+
+#define CAP_FLAG2(flags2, field) \
+ ((flags2) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))
+
+ /*
+ * Huntington RXDP firmware inserts a 0 or 14 byte prefix.
+ * We only support the 14 byte prefix here.
+ */
+ if (CAP_FLAG(flags, RX_PREFIX_LEN_14) == 0) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ encp->enc_rx_prefix_size = 14;
+
+ /* Check if the firmware supports TSO */
+ encp->enc_fw_assisted_tso_enabled =
+ CAP_FLAG(flags, TX_TSO) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports FATSOv2 */
+ encp->enc_fw_assisted_tso_v2_enabled =
+ CAP_FLAG2(flags2, TX_TSO_V2) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware has vadapter/vport/vswitch support */
+ encp->enc_datapath_cap_evb =
+ CAP_FLAG(flags, EVB) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports VLAN insertion */
+ encp->enc_hw_tx_insert_vlan_enabled =
+ CAP_FLAG(flags, TX_VLAN_INSERTION) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports RX event batching */
+ encp->enc_rx_batching_enabled =
+ CAP_FLAG(flags, RX_BATCHING) ? B_TRUE : B_FALSE;
+
+ if (encp->enc_rx_batching_enabled)
+ encp->enc_rx_batch_max = 16;
+
+ /* Check if the firmware supports disabling scatter on RXQs */
+ encp->enc_rx_disable_scatter_supported =
+ CAP_FLAG(flags, RX_DISABLE_SCATTER) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports set mac with running filters */
+ encp->enc_allow_set_mac_with_installed_filters =
+ CAP_FLAG(flags, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED) ?
+ B_TRUE : B_FALSE;
+
+ /*
+ * Check if firmware supports the extended MC_CMD_SET_MAC, which allows
+ * specifying which parameters to configure.
+ */
+ encp->enc_enhanced_set_mac_supported =
+ CAP_FLAG(flags, SET_MAC_ENHANCED) ? B_TRUE : B_FALSE;
+
+#undef CAP_FLAG
+#undef CAP_FLAG2
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+ef10_get_privilege_mask(
+ __in efx_nic_t *enp,
+ __out uint32_t *maskp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t mask;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf,
+ &mask)) != 0) {
+ if (rc != ENOTSUP)
+ goto fail1;
+
+ /* Fallback for old firmware without privilege mask support */
+ if (EFX_PCI_FUNCTION_IS_PF(encp)) {
+ /* Assume PF has admin privilege */
+ mask = EF10_LEGACY_PF_PRIVILEGE_MASK;
+ } else {
+ /* VF is always unprivileged by default */
+ mask = EF10_LEGACY_VF_PRIVILEGE_MASK;
+ }
+ }
+
+ *maskp = mask;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+/*
+ * The external port mapping is a one-based numbering of the external
+ * connectors on the board. It does not distinguish off-board separated
+ * outputs such as multi-headed cables.
+ * The number of ports that map to each external port connector
+ * on the board is determined by the chip family and the port modes to
+ * which the NIC can be configured. The mapping table lists modes with
+ * port numbering requirements in increasing order.
+ */
+static struct {
+ efx_family_t family;
+ uint32_t modes_mask;
+ uint32_t stride;
+} __ef10_external_port_mappings[] = {
+ /* Supported modes requiring 1 output per port */
+ {
+ EFX_FAMILY_HUNTINGTON,
+ (1 << TLV_PORT_MODE_10G) |
+ (1 << TLV_PORT_MODE_10G_10G) |
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G),
+ 1
+ },
+ {
+ EFX_FAMILY_MEDFORD,
+ (1 << TLV_PORT_MODE_10G) |
+ (1 << TLV_PORT_MODE_10G_10G) |
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G),
+ 1
+ },
+ /* Supported modes requiring 2 outputs per port */
+ {
+ EFX_FAMILY_HUNTINGTON,
+ (1 << TLV_PORT_MODE_40G) |
+ (1 << TLV_PORT_MODE_40G_40G) |
+ (1 << TLV_PORT_MODE_40G_10G_10G) |
+ (1 << TLV_PORT_MODE_10G_10G_40G),
+ 2
+ },
+ {
+ EFX_FAMILY_MEDFORD,
+ (1 << TLV_PORT_MODE_40G) |
+ (1 << TLV_PORT_MODE_40G_40G) |
+ (1 << TLV_PORT_MODE_40G_10G_10G) |
+ (1 << TLV_PORT_MODE_10G_10G_40G),
+ 2
+ },
+ /* Supported modes requiring 4 outputs per port */
+ {
+ EFX_FAMILY_MEDFORD,
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q) |
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q2),
+ 4
+ },
+};
+
+ __checkReturn efx_rc_t
+ef10_external_port_mapping(
+ __in efx_nic_t *enp,
+ __in uint32_t port,
+ __out uint8_t *external_portp)
+{
+ efx_rc_t rc;
+ int i;
+ uint32_t port_modes;
+ uint32_t matches;
+ uint32_t stride = 1; /* default 1-1 mapping */
+
+ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes)) != 0) {
+ /* No port mode information available - use default mapping */
+ goto out;
+ }
+
+ /*
+ * Infer the internal port -> external port mapping from
+ * the possible port modes for this NIC.
+ */
+ for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) {
+ if (__ef10_external_port_mappings[i].family !=
+ enp->en_family)
+ continue;
+ matches = (__ef10_external_port_mappings[i].modes_mask &
+ port_modes);
+ if (matches != 0) {
+ stride = __ef10_external_port_mappings[i].stride;
+ port_modes &= ~matches;
+ }
+ }
+
+ if (port_modes != 0) {
+ /* Some advertised modes are not supported */
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+out:
+ /*
+ * Scale as required by last matched mode and then convert to
+ * one-based numbering
+ */
+ *external_portp = (uint8_t)(port / stride) + 1;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+ef10_nic_probe(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* Read and clear any assertion state */
+ if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+ goto fail1;
+
+ /* Exit the assertion handler */
+ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+ if (rc != EACCES)
+ goto fail2;
+
+ if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
+ goto fail3;
+
+ if ((rc = enop->eno_board_cfg(enp)) != 0)
+ if (rc != EACCES)
+ goto fail4;
+
+ /*
+ * Set default driver config limits (based on board config).
+ *
+ * FIXME: For now allocate a fixed number of VIs which is likely to be
+ * sufficient and small enough to allow multiple functions on the same
+ * port.
+ */
+ edcp->edc_min_vi_count = edcp->edc_max_vi_count =
+ MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit));
+
+ /* The client driver must configure and enable PIO buffer support */
+ edcp->edc_max_piobuf_count = 0;
+ edcp->edc_pio_alloc_size = 0;
+
+#if EFSYS_OPT_MAC_STATS
+ /* Wipe the MAC statistics */
+ if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
+ goto fail5;
+#endif
+
+#if EFSYS_OPT_LOOPBACK
+ if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
+ goto fail6;
+#endif
+
+#if EFSYS_OPT_MON_STATS
+ if ((rc = mcdi_mon_cfg_build(enp)) != 0) {
+ /* Unprivileged functions do not have access to sensors */
+ if (rc != EACCES)
+ goto fail7;
+ }
+#endif
+
+ encp->enc_features = enp->en_features;
+
+ return (0);
+
+#if EFSYS_OPT_MON_STATS
+fail7:
+ EFSYS_PROBE(fail7);
+#endif
+#if EFSYS_OPT_LOOPBACK
+fail6:
+ EFSYS_PROBE(fail6);
+#endif
+#if EFSYS_OPT_MAC_STATS
+fail5:
+ EFSYS_PROBE(fail5);
+#endif
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_set_drv_limits(
+ __inout efx_nic_t *enp,
+ __in efx_drv_limits_t *edlp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
+ uint32_t min_evq_count, max_evq_count;
+ uint32_t min_rxq_count, max_rxq_count;
+ uint32_t min_txq_count, max_txq_count;
+ efx_rc_t rc;
+
+ if (edlp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Get minimum required and maximum usable VI limits */
+ min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit);
+ min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit);
+ min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit);
+
+ edcp->edc_min_vi_count =
+ MAX(min_evq_count, MAX(min_rxq_count, min_txq_count));
+
+ max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit);
+ max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit);
+ max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit);
+
+ edcp->edc_max_vi_count =
+ MAX(max_evq_count, MAX(max_rxq_count, max_txq_count));
+
+ /*
+ * Check limits for sub-allocated piobuf blocks.
+ * PIO is optional, so don't fail if the limits are incorrect.
+ */
+ if ((encp->enc_piobuf_size == 0) ||
+ (encp->enc_piobuf_limit == 0) ||
+ (edlp->edl_min_pio_alloc_size == 0) ||
+ (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) {
+ /* Disable PIO */
+ edcp->edc_max_piobuf_count = 0;
+ edcp->edc_pio_alloc_size = 0;
+ } else {
+ uint32_t blk_size, blk_count, blks_per_piobuf;
+
+ blk_size =
+ MAX(edlp->edl_min_pio_alloc_size,
+ encp->enc_piobuf_min_alloc_size);
+
+ blks_per_piobuf = encp->enc_piobuf_size / blk_size;
+ EFSYS_ASSERT3U(blks_per_piobuf, <=, 32);
+
+ blk_count = (encp->enc_piobuf_limit * blks_per_piobuf);
+
+ /* A zero max pio alloc count means unlimited */
+ if ((edlp->edl_max_pio_alloc_count > 0) &&
+ (edlp->edl_max_pio_alloc_count < blk_count)) {
+ blk_count = edlp->edl_max_pio_alloc_count;
+ }
+
+ edcp->edc_pio_alloc_size = blk_size;
+ edcp->edc_max_piobuf_count =
+ (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+ef10_nic_reset(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_ENTITY_RESET_IN_LEN,
+ MC_CMD_ENTITY_RESET_OUT_LEN)];
+ efx_rc_t rc;
+
+ /* ef10_nic_reset() is called to recover from BADASSERT failures. */
+ if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+ goto fail1;
+ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+ goto fail2;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_ENTITY_RESET;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN;
+
+ MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG,
+ ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ /* Clear RX/TX DMA queue errors */
+ enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_init(
+ __in efx_nic_t *enp)
+{
+ efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
+ uint32_t min_vi_count, max_vi_count;
+ uint32_t vi_count, vi_base, vi_shift;
+ uint32_t i;
+ uint32_t retry;
+ uint32_t delay_us;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* Enable reporting of some events (e.g. link change) */
+ if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
+ goto fail1;
+
+ /* Allocate (optional) on-chip PIO buffers */
+ ef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count);
+
+ /*
+ * For best performance, PIO writes should use a write-combined
+ * (WC) memory mapping. Using a separate WC mapping for the PIO
+ * aperture of each VI would be a burden to drivers (and not
+ * possible if the host page size is >4Kbyte).
+ *
+ * To avoid this we use a single uncached (UC) mapping for VI
+ * register access, and a single WC mapping for extra VIs used
+ * for PIO writes.
+ *
+ * Each piobuf must be linked to a VI in the WC mapping, and to
+ * each VI that is using a sub-allocated block from the piobuf.
+ */
+ min_vi_count = edcp->edc_min_vi_count;
+ max_vi_count =
+ edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count;
+
+ /* Ensure that the previously attached driver's VIs are freed */
+ if ((rc = efx_mcdi_free_vis(enp)) != 0)
+ goto fail2;
+
+ /*
+ * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this
+ * fails then retrying the request for fewer VI resources may succeed.
+ */
+ vi_count = 0;
+ if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count,
+ &vi_base, &vi_count, &vi_shift)) != 0)
+ goto fail3;
+
+ EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count);
+
+ if (vi_count < min_vi_count) {
+ rc = ENOMEM;
+ goto fail4;
+ }
+
+ enp->en_arch.ef10.ena_vi_base = vi_base;
+ enp->en_arch.ef10.ena_vi_count = vi_count;
+ enp->en_arch.ef10.ena_vi_shift = vi_shift;
+
+ if (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) {
+ /* Not enough extra VIs to map piobufs */
+ ef10_nic_free_piobufs(enp);
+ }
+
+ enp->en_arch.ef10.ena_pio_write_vi_base =
+ vi_count - enp->en_arch.ef10.ena_piobuf_count;
+
+ /* Save UC memory mapping details */
+ enp->en_arch.ef10.ena_uc_mem_map_offset = 0;
+ if (enp->en_arch.ef10.ena_piobuf_count > 0) {
+ enp->en_arch.ef10.ena_uc_mem_map_size =
+ (ER_DZ_TX_PIOBUF_STEP *
+ enp->en_arch.ef10.ena_pio_write_vi_base);
+ } else {
+ enp->en_arch.ef10.ena_uc_mem_map_size =
+ (ER_DZ_TX_PIOBUF_STEP *
+ enp->en_arch.ef10.ena_vi_count);
+ }
+
+ /* Save WC memory mapping details */
+ enp->en_arch.ef10.ena_wc_mem_map_offset =
+ enp->en_arch.ef10.ena_uc_mem_map_offset +
+ enp->en_arch.ef10.ena_uc_mem_map_size;
+
+ enp->en_arch.ef10.ena_wc_mem_map_size =
+ (ER_DZ_TX_PIOBUF_STEP *
+ enp->en_arch.ef10.ena_piobuf_count);
+
+ /* Link piobufs to extra VIs in WC mapping */
+ if (enp->en_arch.ef10.ena_piobuf_count > 0) {
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ rc = efx_mcdi_link_piobuf(enp,
+ enp->en_arch.ef10.ena_pio_write_vi_base + i,
+ enp->en_arch.ef10.ena_piobuf_handle[i]);
+ if (rc != 0)
+ break;
+ }
+ }
+
+ /*
+ * Allocate a vAdaptor attached to our upstream vPort/pPort.
+ *
+ * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF
+ * driver has yet to bring up the EVB port. See bug 56147. In this case,
+ * retry the request several times after waiting a while. The wait time
+ * between retries starts small (10ms) and exponentially increases.
+ * Total wait time is a little over two seconds. Retry logic in the
+ * client driver may mean this whole loop is repeated if it continues to
+ * fail.
+ */
+ retry = 0;
+ delay_us = 10000;
+ while ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) {
+ if (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) ||
+ (rc != ENOENT)) {
+ /*
+ * Do not retry alloc for PF, or for other errors on
+ * a VF.
+ */
+ goto fail5;
+ }
+
+ /* VF startup before PF is ready. Retry allocation. */
+ if (retry > 5) {
+ /* Too many attempts */
+ rc = EINVAL;
+ goto fail6;
+ }
+ EFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry);
+ EFSYS_SLEEP(delay_us);
+ retry++;
+ if (delay_us < 500000)
+ delay_us <<= 2;
+ }
+
+ enp->en_vport_id = EVB_PORT_ID_ASSIGNED;
+ enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2;
+
+ return (0);
+
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+
+ ef10_nic_free_piobufs(enp);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_get_vi_pool(
+ __in efx_nic_t *enp,
+ __out uint32_t *vi_countp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /*
+ * Report VIs that the client driver can use.
+ * Do not include VIs used for PIO buffer writes.
+ */
+ *vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base;
+
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_get_bar_region(
+ __in efx_nic_t *enp,
+ __in efx_nic_region_t region,
+ __out uint32_t *offsetp,
+ __out size_t *sizep)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /*
+ * TODO: Specify host memory mapping alignment and granularity
+ * in efx_drv_limits_t so that they can be taken into account
+ * when allocating extra VIs for PIO writes.
+ */
+ switch (region) {
+ case EFX_REGION_VI:
+ /* UC mapped memory BAR region for VI registers */
+ *offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset;
+ *sizep = enp->en_arch.ef10.ena_uc_mem_map_size;
+ break;
+
+ case EFX_REGION_PIO_WRITE_VI:
+ /* WC mapped memory BAR region for piobuf writes */
+ *offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset;
+ *sizep = enp->en_arch.ef10.ena_wc_mem_map_size;
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_nic_fini(
+ __in efx_nic_t *enp)
+{
+ uint32_t i;
+ efx_rc_t rc;
+
+ (void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id);
+ enp->en_vport_id = 0;
+
+ /* Unlink piobufs from extra VIs in WC mapping */
+ if (enp->en_arch.ef10.ena_piobuf_count > 0) {
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ rc = efx_mcdi_unlink_piobuf(enp,
+ enp->en_arch.ef10.ena_pio_write_vi_base + i);
+ if (rc != 0)
+ break;
+ }
+ }
+
+ ef10_nic_free_piobufs(enp);
+
+ (void) efx_mcdi_free_vis(enp);
+ enp->en_arch.ef10.ena_vi_count = 0;
+}
+
+ void
+ef10_nic_unprobe(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_MON_STATS
+ mcdi_mon_cfg_free(enp);
+#endif /* EFSYS_OPT_MON_STATS */
+ (void) efx_mcdi_drv_attach(enp, B_FALSE);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+ef10_nic_register_test(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ /* FIXME */
+ _NOTE(ARGUNUSED(enp))
+ if (B_FALSE) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ /* FIXME */
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/sys/dev/sfxge/common/hunt_nvram.c b/sys/dev/sfxge/common/ef10_nvram.c
index e124c3e..e63ba87 100644
--- a/sys/dev/sfxge/common/hunt_nvram.c
+++ b/sys/dev/sfxge/common/ef10_nvram.c
@@ -34,7 +34,7 @@ __FBSDID("$FreeBSD$");
#include "efx.h"
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
@@ -2330,4 +2330,4 @@ ef10_nvram_partn_rw_finish(
#endif /* EFSYS_OPT_NVRAM */
-#endif /* EFSYS_OPT_HUNTINGTON */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/sys/dev/sfxge/common/ef10_phy.c b/sys/dev/sfxge/common/ef10_phy.c
new file mode 100644
index 0000000..9241297
--- /dev/null
+++ b/sys/dev/sfxge/common/ef10_phy.c
@@ -0,0 +1,518 @@
+/*-
+ * Copyright (c) 2012-2015 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+static void
+mcdi_phy_decode_cap(
+ __in uint32_t mcdi_cap,
+ __out uint32_t *maskp)
+{
+ uint32_t mask;
+
+ mask = 0;
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_40000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ mask |= (1 << EFX_PHY_CAP_PAUSE);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ mask |= (1 << EFX_PHY_CAP_ASYM);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ mask |= (1 << EFX_PHY_CAP_AN);
+
+ *maskp = mask;
+}
+
+static void
+mcdi_phy_decode_link_mode(
+ __in efx_nic_t *enp,
+ __in uint32_t link_flags,
+ __in unsigned int speed,
+ __in unsigned int fcntl,
+ __out efx_link_mode_t *link_modep,
+ __out unsigned int *fcntlp)
+{
+ boolean_t fd = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
+ boolean_t up = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (!up)
+ *link_modep = EFX_LINK_DOWN;
+ else if (speed == 40000 && fd)
+ *link_modep = EFX_LINK_40000FDX;
+ else if (speed == 10000 && fd)
+ *link_modep = EFX_LINK_10000FDX;
+ else if (speed == 1000)
+ *link_modep = fd ? EFX_LINK_1000FDX : EFX_LINK_1000HDX;
+ else if (speed == 100)
+ *link_modep = fd ? EFX_LINK_100FDX : EFX_LINK_100HDX;
+ else if (speed == 10)
+ *link_modep = fd ? EFX_LINK_10FDX : EFX_LINK_10HDX;
+ else
+ *link_modep = EFX_LINK_UNKNOWN;
+
+ if (fcntl == MC_CMD_FCNTL_OFF)
+ *fcntlp = 0;
+ else if (fcntl == MC_CMD_FCNTL_RESPOND)
+ *fcntlp = EFX_FCNTL_RESPOND;
+ else if (fcntl == MC_CMD_FCNTL_GENERATE)
+ *fcntlp = EFX_FCNTL_GENERATE;
+ else if (fcntl == MC_CMD_FCNTL_BIDIR)
+ *fcntlp = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ else {
+ EFSYS_PROBE1(mc_pcol_error, int, fcntl);
+ *fcntlp = 0;
+ }
+}
+
+
+ void
+ef10_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ unsigned int link_flags;
+ unsigned int speed;
+ unsigned int fcntl;
+ efx_link_mode_t link_mode;
+ uint32_t lp_cap_mask;
+
+ /*
+ * Convert the LINKCHANGE speed enumeration into mbit/s, in the
+ * same way as GET_LINK encodes the speed
+ */
+ switch (MCDI_EV_FIELD(eqp, LINKCHANGE_SPEED)) {
+ case MCDI_EVENT_LINKCHANGE_SPEED_100M:
+ speed = 100;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_1G:
+ speed = 1000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_10G:
+ speed = 10000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_40G:
+ speed = 40000;
+ break;
+ default:
+ speed = 0;
+ break;
+ }
+
+ link_flags = MCDI_EV_FIELD(eqp, LINKCHANGE_LINK_FLAGS);
+ mcdi_phy_decode_link_mode(enp, link_flags, speed,
+ MCDI_EV_FIELD(eqp, LINKCHANGE_FCNTL),
+ &link_mode, &fcntl);
+ mcdi_phy_decode_cap(MCDI_EV_FIELD(eqp, LINKCHANGE_LP_CAP),
+ &lp_cap_mask);
+
+ /*
+ * It's safe to update ep_lp_cap_mask without the driver's port lock
+ * because presumably any concurrently running efx_port_poll() is
+ * only going to arrive at the same value.
+ *
+ * ep_fcntl has two meanings. It's either the link common fcntl
+ * (if the PHY supports AN), or it's the forced link state. If
+ * the former, it's safe to update the value for the same reason as
+ * for ep_lp_cap_mask. If the latter, then just ignore the value,
+ * because we can race with efx_mac_fcntl_set().
+ */
+ epp->ep_lp_cap_mask = lp_cap_mask;
+ epp->ep_fcntl = fcntl;
+
+ *link_modep = link_mode;
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t power)
+{
+ efx_rc_t rc;
+
+ if (!power)
+ return (0);
+
+ /* Check if the PHY is a zombie */
+ if ((rc = ef10_phy_verify(enp)) != 0)
+ goto fail1;
+
+ enp->en_reset_flags |= EFX_RESET_PHY;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_get_link(
+ __in efx_nic_t *enp,
+ __out ef10_link_state_t *elsp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN,
+ MC_CMD_GET_LINK_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LINK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LINK_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP),
+ &elsp->els_adv_cap_mask);
+ mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP),
+ &elsp->els_lp_cap_mask);
+
+ mcdi_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL),
+ &elsp->els_link_mode, &elsp->els_fcntl);
+
+#if EFSYS_OPT_LOOPBACK
+ /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);
+
+ elsp->els_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE);
+#endif /* EFSYS_OPT_LOOPBACK */
+
+ elsp->els_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_port_t *epp = &(enp->en_port);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_LINK_IN_LEN,
+ MC_CMD_SET_LINK_OUT_LEN)];
+ uint32_t cap_mask;
+ unsigned int led_mode;
+ unsigned int speed;
+ efx_rc_t rc;
+
+ if (~encp->enc_func_flags & EFX_NIC_FUNC_LINKCTRL)
+ goto out;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_LINK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_LINK_OUT_LEN;
+
+ cap_mask = epp->ep_adv_cap_mask;
+ MCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP,
+ PHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1,
+ PHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1,
+ PHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1,
+ PHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1,
+ PHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1,
+ PHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1,
+ PHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1,
+ PHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1,
+ PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1,
+ PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1);
+ /* Too many fields for for POPULATE macros, so insert this afterwards */
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_40000FDX, (cap_mask >> EFX_PHY_CAP_40000FDX) & 0x1);
+
+#if EFSYS_OPT_LOOPBACK
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE,
+ epp->ep_loopback_type);
+ switch (epp->ep_loopback_link_mode) {
+ case EFX_LINK_100FDX:
+ speed = 100;
+ break;
+ case EFX_LINK_1000FDX:
+ speed = 1000;
+ break;
+ case EFX_LINK_10000FDX:
+ speed = 10000;
+ break;
+ case EFX_LINK_40000FDX:
+ speed = 40000;
+ break;
+ default:
+ speed = 0;
+ }
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE);
+ speed = 0;
+#endif /* EFSYS_OPT_LOOPBACK */
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed);
+
+#if EFSYS_OPT_PHY_FLAGS
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, epp->ep_phy_flags);
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0);
+#endif /* EFSYS_OPT_PHY_FLAGS */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /* And set the blink mode */
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_ID_LED;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_ID_LED_OUT_LEN;
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+ switch (epp->ep_phy_led_mode) {
+ case EFX_PHY_LED_DEFAULT:
+ led_mode = MC_CMD_LED_DEFAULT;
+ break;
+ case EFX_PHY_LED_OFF:
+ led_mode = MC_CMD_LED_OFF;
+ break;
+ case EFX_PHY_LED_ON:
+ led_mode = MC_CMD_LED_ON;
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ led_mode = MC_CMD_LED_DEFAULT;
+ }
+
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, led_mode);
+#else
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT);
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+out:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_verify(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PHY_STATE_IN_LEN,
+ MC_CMD_GET_PHY_STATE_OUT_LEN)];
+ uint32_t state;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PHY_STATE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PHY_STATE_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ state = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE);
+ if (state != MC_CMD_PHY_STATE_OK) {
+ if (state != MC_CMD_PHY_STATE_ZOMBIE)
+ EFSYS_PROBE1(mc_pcol_error, int, state);
+ rc = ENOTACTIVE;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip)
+{
+ _NOTE(ARGUNUSED(enp, ouip))
+
+ return (ENOTSUP);
+}
+
+#if EFSYS_OPT_PHY_STATS
+
+ __checkReturn efx_rc_t
+ef10_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ /* TBD: no stats support in firmware yet */
+ _NOTE(ARGUNUSED(enp, esmp))
+ memset(stat, 0, EFX_PHY_NSTATS * sizeof (*stat));
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_PHY_PROPS
+
+#if EFSYS_OPT_NAMES
+
+ const char *
+ef10_phy_prop_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id)
+{
+ _NOTE(ARGUNUSED(enp, id))
+
+ return (NULL);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+ __checkReturn efx_rc_t
+ef10_phy_prop_get(
+ __in efx_nic_t *enp,
+ __in unsigned int id,
+ __in uint32_t flags,
+ __out uint32_t *valp)
+{
+ _NOTE(ARGUNUSED(enp, id, flags, valp))
+
+ return (ENOTSUP);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_prop_set(
+ __in efx_nic_t *enp,
+ __in unsigned int id,
+ __in uint32_t val)
+{
+ _NOTE(ARGUNUSED(enp, id, val))
+
+ return (ENOTSUP);
+}
+
+#endif /* EFSYS_OPT_PHY_PROPS */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/sys/dev/sfxge/common/hunt_rx.c b/sys/dev/sfxge/common/ef10_rx.c
index 984d48d..278a0a1 100644
--- a/sys/dev/sfxge/common/hunt_rx.c
+++ b/sys/dev/sfxge/common/ef10_rx.c
@@ -35,7 +35,7 @@ __FBSDID("$FreeBSD$");
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
static __checkReturn efx_rc_t
@@ -823,4 +823,4 @@ ef10_rx_fini(
#endif /* EFSYS_OPT_RX_SCALE */
}
-#endif /* EFSYS_OPT_HUNTINGTON */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/sys/dev/sfxge/common/hunt_tx.c b/sys/dev/sfxge/common/ef10_tx.c
index baa7444..2922371 100755
--- a/sys/dev/sfxge/common/hunt_tx.c
+++ b/sys/dev/sfxge/common/ef10_tx.c
@@ -35,7 +35,7 @@ __FBSDID("$FreeBSD$");
#include "efx_impl.h"
-#if EFSYS_OPT_HUNTINGTON
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
#if EFSYS_OPT_QSTATS
#define EFX_TX_QSTAT_INCR(_etp, _stat) \
@@ -569,7 +569,7 @@ ef10_tx_qdesc_dma_create(
}
void
-hunt_tx_qdesc_tso_create(
+ef10_tx_qdesc_tso_create(
__in efx_txq_t *etp,
__in uint16_t ipv4_id,
__in uint32_t tcp_seq,
@@ -707,4 +707,4 @@ ef10_tx_qstats_update(
#endif /* EFSYS_OPT_QSTATS */
-#endif /* EFSYS_OPT_HUNTINGTON */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/sys/dev/sfxge/common/hunt_vpd.c b/sys/dev/sfxge/common/ef10_vpd.c
index b4c6d26..8eef3ad 100644
--- a/sys/dev/sfxge/common/hunt_vpd.c
+++ b/sys/dev/sfxge/common/ef10_vpd.c
@@ -37,7 +37,7 @@ __FBSDID("$FreeBSD$");
#if EFSYS_OPT_VPD
-#if EFSYS_OPT_HUNTINGTON
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
#include "ef10_tlv_layout.h"
@@ -458,6 +458,6 @@ ef10_vpd_fini(
}
}
-#endif /* EFSYS_OPT_HUNTINGTON */
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
#endif /* EFSYS_OPT_VPD */
diff --git a/sys/dev/sfxge/common/efx_ev.c b/sys/dev/sfxge/common/efx_ev.c
index 7849af4..6a2aab4 100644
--- a/sys/dev/sfxge/common/efx_ev.c
+++ b/sys/dev/sfxge/common/efx_ev.c
@@ -56,15 +56,15 @@ __FBSDID("$FreeBSD$");
#if EFSYS_OPT_SIENA
static __checkReturn efx_rc_t
-falconsiena_ev_init(
+siena_ev_init(
__in efx_nic_t *enp);
static void
-falconsiena_ev_fini(
+siena_ev_fini(
__in efx_nic_t *enp);
static __checkReturn efx_rc_t
-falconsiena_ev_qcreate(
+siena_ev_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in efsys_mem_t *esmp,
@@ -73,34 +73,34 @@ falconsiena_ev_qcreate(
__in efx_evq_t *eep);
static void
-falconsiena_ev_qdestroy(
+siena_ev_qdestroy(
__in efx_evq_t *eep);
static __checkReturn efx_rc_t
-falconsiena_ev_qprime(
+siena_ev_qprime(
__in efx_evq_t *eep,
__in unsigned int count);
static void
-falconsiena_ev_qpoll(
+siena_ev_qpoll(
__in efx_evq_t *eep,
__inout unsigned int *countp,
__in const efx_ev_callbacks_t *eecp,
__in_opt void *arg);
static void
-falconsiena_ev_qpost(
+siena_ev_qpost(
__in efx_evq_t *eep,
__in uint16_t data);
static __checkReturn efx_rc_t
-falconsiena_ev_qmoderate(
+siena_ev_qmoderate(
__in efx_evq_t *eep,
__in unsigned int us);
#if EFSYS_OPT_QSTATS
static void
-falconsiena_ev_qstats_update(
+siena_ev_qstats_update(
__in efx_evq_t *eep,
__inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
@@ -110,15 +110,15 @@ falconsiena_ev_qstats_update(
#if EFSYS_OPT_SIENA
static const efx_ev_ops_t __efx_ev_siena_ops = {
- falconsiena_ev_init, /* eevo_init */
- falconsiena_ev_fini, /* eevo_fini */
- falconsiena_ev_qcreate, /* eevo_qcreate */
- falconsiena_ev_qdestroy, /* eevo_qdestroy */
- falconsiena_ev_qprime, /* eevo_qprime */
- falconsiena_ev_qpost, /* eevo_qpost */
- falconsiena_ev_qmoderate, /* eevo_qmoderate */
+ siena_ev_init, /* eevo_init */
+ siena_ev_fini, /* eevo_fini */
+ siena_ev_qcreate, /* eevo_qcreate */
+ siena_ev_qdestroy, /* eevo_qdestroy */
+ siena_ev_qprime, /* eevo_qprime */
+ siena_ev_qpost, /* eevo_qpost */
+ siena_ev_qmoderate, /* eevo_qmoderate */
#if EFSYS_OPT_QSTATS
- falconsiena_ev_qstats_update, /* eevo_qstats_update */
+ siena_ev_qstats_update, /* eevo_qstats_update */
#endif
};
#endif /* EFSYS_OPT_SIENA */
@@ -374,7 +374,7 @@ efx_ev_qpoll(
EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_MCDI_EV ==
FSE_AZ_EV_CODE_MCDI_EVRESPONSE);
#endif
- falconsiena_ev_qpoll(eep, countp, eecp, arg);
+ siena_ev_qpoll(eep, countp, eecp, arg);
}
void
@@ -433,7 +433,7 @@ efx_ev_qstats_update(
#if EFSYS_OPT_SIENA
static __checkReturn efx_rc_t
-falconsiena_ev_init(
+siena_ev_init(
__in efx_nic_t *enp)
{
efx_oword_t oword;
@@ -451,7 +451,7 @@ falconsiena_ev_init(
}
static __checkReturn boolean_t
-falconsiena_ev_rx_not_ok(
+siena_ev_rx_not_ok(
__in efx_evq_t *eep,
__in efx_qword_t *eqp,
__in uint32_t label,
@@ -541,7 +541,7 @@ falconsiena_ev_rx_not_ok(
}
static __checkReturn boolean_t
-falconsiena_ev_rx(
+siena_ev_rx(
__in efx_evq_t *eep,
__in efx_qword_t *eqp,
__in const efx_ev_callbacks_t *eecp,
@@ -636,7 +636,7 @@ falconsiena_ev_rx(
/* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */
if (!ok) {
- ignore = falconsiena_ev_rx_not_ok(eep, eqp, label, id, &flags);
+ ignore = siena_ev_rx_not_ok(eep, eqp, label, id, &flags);
if (ignore) {
EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
uint32_t, size, uint16_t, flags);
@@ -695,7 +695,7 @@ falconsiena_ev_rx(
}
static __checkReturn boolean_t
-falconsiena_ev_tx(
+siena_ev_tx(
__in efx_evq_t *eep,
__in efx_qword_t *eqp,
__in const efx_ev_callbacks_t *eecp,
@@ -742,7 +742,7 @@ falconsiena_ev_tx(
}
static __checkReturn boolean_t
-falconsiena_ev_global(
+siena_ev_global(
__in efx_evq_t *eep,
__in efx_qword_t *eqp,
__in const efx_ev_callbacks_t *eecp,
@@ -756,7 +756,7 @@ falconsiena_ev_global(
}
static __checkReturn boolean_t
-falconsiena_ev_driver(
+siena_ev_driver(
__in efx_evq_t *eep,
__in efx_qword_t *eqp,
__in const efx_ev_callbacks_t *eecp,
@@ -885,7 +885,7 @@ falconsiena_ev_driver(
}
static __checkReturn boolean_t
-falconsiena_ev_drv_gen(
+siena_ev_drv_gen(
__in efx_evq_t *eep,
__in efx_qword_t *eqp,
__in const efx_ev_callbacks_t *eecp,
@@ -913,7 +913,7 @@ falconsiena_ev_drv_gen(
#if EFSYS_OPT_MCDI
static __checkReturn boolean_t
-falconsiena_ev_mcdi(
+siena_ev_mcdi(
__in efx_evq_t *eep,
__in efx_qword_t *eqp,
__in const efx_ev_callbacks_t *eecp,
@@ -1018,7 +1018,7 @@ out:
#endif /* EFSYS_OPT_MCDI */
static __checkReturn efx_rc_t
-falconsiena_ev_qprime(
+siena_ev_qprime(
__in efx_evq_t *eep,
__in unsigned int count)
{
@@ -1039,7 +1039,7 @@ falconsiena_ev_qprime(
#define EFX_EV_BATCH 8
static void
-falconsiena_ev_qpoll(
+siena_ev_qpoll(
__in efx_evq_t *eep,
__inout unsigned int *countp,
__in const efx_ev_callbacks_t *eecp,
@@ -1172,7 +1172,7 @@ falconsiena_ev_qpoll(
}
static void
-falconsiena_ev_qpost(
+siena_ev_qpost(
__in efx_evq_t *eep,
__in uint16_t data)
{
@@ -1191,7 +1191,7 @@ falconsiena_ev_qpost(
}
static __checkReturn efx_rc_t
-falconsiena_ev_qmoderate(
+siena_ev_qmoderate(
__in efx_evq_t *eep,
__in unsigned int us)
{
@@ -1240,7 +1240,7 @@ fail1:
}
static __checkReturn efx_rc_t
-falconsiena_ev_qcreate(
+siena_ev_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in efsys_mem_t *esmp,
@@ -1281,13 +1281,13 @@ falconsiena_ev_qcreate(
}
/* Set up the handler table */
- eep->ee_rx = falconsiena_ev_rx;
- eep->ee_tx = falconsiena_ev_tx;
- eep->ee_driver = falconsiena_ev_driver;
- eep->ee_global = falconsiena_ev_global;
- eep->ee_drv_gen = falconsiena_ev_drv_gen;
+ eep->ee_rx = siena_ev_rx;
+ eep->ee_tx = siena_ev_tx;
+ eep->ee_driver = siena_ev_driver;
+ eep->ee_global = siena_ev_global;
+ eep->ee_drv_gen = siena_ev_drv_gen;
#if EFSYS_OPT_MCDI
- eep->ee_mcdi = falconsiena_ev_mcdi;
+ eep->ee_mcdi = siena_ev_mcdi;
#endif /* EFSYS_OPT_MCDI */
/* Set up the new event queue */
@@ -1378,7 +1378,7 @@ efx_ev_qstat_name(
#if EFSYS_OPT_QSTATS
static void
-falconsiena_ev_qstats_update(
+siena_ev_qstats_update(
__in efx_evq_t *eep,
__inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
{
@@ -1394,7 +1394,7 @@ falconsiena_ev_qstats_update(
#endif /* EFSYS_OPT_QSTATS */
static void
-falconsiena_ev_qdestroy(
+siena_ev_qdestroy(
__in efx_evq_t *eep)
{
efx_nic_t *enp = eep->ee_enp;
@@ -1411,7 +1411,7 @@ falconsiena_ev_qdestroy(
}
static void
-falconsiena_ev_fini(
+siena_ev_fini(
__in efx_nic_t *enp)
{
_NOTE(ARGUNUSED(enp))
diff --git a/sys/dev/sfxge/common/efx_filter.c b/sys/dev/sfxge/common/efx_filter.c
index 06ca29a..55beafd 100644
--- a/sys/dev/sfxge/common/efx_filter.c
+++ b/sys/dev/sfxge/common/efx_filter.c
@@ -40,30 +40,30 @@ __FBSDID("$FreeBSD$");
#if EFSYS_OPT_SIENA
static __checkReturn efx_rc_t
-falconsiena_filter_init(
+siena_filter_init(
__in efx_nic_t *enp);
static void
-falconsiena_filter_fini(
+siena_filter_fini(
__in efx_nic_t *enp);
static __checkReturn efx_rc_t
-falconsiena_filter_restore(
+siena_filter_restore(
__in efx_nic_t *enp);
static __checkReturn efx_rc_t
-falconsiena_filter_add(
+siena_filter_add(
__in efx_nic_t *enp,
__inout efx_filter_spec_t *spec,
__in boolean_t may_replace);
static __checkReturn efx_rc_t
-falconsiena_filter_delete(
+siena_filter_delete(
__in efx_nic_t *enp,
__inout efx_filter_spec_t *spec);
static __checkReturn efx_rc_t
-falconsiena_filter_supported_filters(
+siena_filter_supported_filters(
__in efx_nic_t *enp,
__out uint32_t *list,
__out size_t *length);
@@ -72,13 +72,13 @@ falconsiena_filter_supported_filters(
#if EFSYS_OPT_SIENA
static const efx_filter_ops_t __efx_filter_siena_ops = {
- falconsiena_filter_init, /* efo_init */
- falconsiena_filter_fini, /* efo_fini */
- falconsiena_filter_restore, /* efo_restore */
- falconsiena_filter_add, /* efo_add */
- falconsiena_filter_delete, /* efo_delete */
- falconsiena_filter_supported_filters, /* efo_supported_filters */
- NULL, /* efo_reconfigure */
+ siena_filter_init, /* efo_init */
+ siena_filter_fini, /* efo_fini */
+ siena_filter_restore, /* efo_restore */
+ siena_filter_add, /* efo_add */
+ siena_filter_delete, /* efo_delete */
+ siena_filter_supported_filters, /* efo_supported_filters */
+ NULL, /* efo_reconfigure */
};
#endif /* EFSYS_OPT_SIENA */
@@ -428,7 +428,7 @@ efx_filter_spec_set_mc_def(
#define FILTER_CTL_SRCH_MAX 200
static __checkReturn efx_rc_t
-falconsiena_filter_spec_from_gen_spec(
+siena_filter_spec_from_gen_spec(
__out falconsiena_filter_spec_t *fs_spec,
__in efx_filter_spec_t *gen_spec)
{
@@ -586,7 +586,7 @@ fail1:
* key derived from the n-tuple.
*/
static uint16_t
-falconsiena_filter_tbl_hash(
+siena_filter_tbl_hash(
__in uint32_t key)
{
uint16_t tmp;
@@ -609,14 +609,14 @@ falconsiena_filter_tbl_hash(
* increments from the first possible entry selected by the hash.
*/
static uint16_t
-falconsiena_filter_tbl_increment(
+siena_filter_tbl_increment(
__in uint32_t key)
{
return ((uint16_t)(key * 2 - 1));
}
static __checkReturn boolean_t
-falconsiena_filter_test_used(
+siena_filter_test_used(
__in falconsiena_filter_tbl_t *fsftp,
__in unsigned int index)
{
@@ -625,7 +625,7 @@ falconsiena_filter_test_used(
}
static void
-falconsiena_filter_set_used(
+siena_filter_set_used(
__in falconsiena_filter_tbl_t *fsftp,
__in unsigned int index)
{
@@ -635,7 +635,7 @@ falconsiena_filter_set_used(
}
static void
-falconsiena_filter_clear_used(
+siena_filter_clear_used(
__in falconsiena_filter_tbl_t *fsftp,
__in unsigned int index)
{
@@ -648,7 +648,7 @@ falconsiena_filter_clear_used(
static falconsiena_filter_tbl_id_t
-falconsiena_filter_tbl_id(
+siena_filter_tbl_id(
__in falconsiena_filter_type_t type)
{
falconsiena_filter_tbl_id_t tbl_id;
@@ -661,7 +661,6 @@ falconsiena_filter_tbl_id(
tbl_id = EFX_FS_FILTER_TBL_RX_IP;
break;
-#if EFSYS_OPT_SIENA
case EFX_FS_FILTER_RX_MAC_FULL:
case EFX_FS_FILTER_RX_MAC_WILD:
tbl_id = EFX_FS_FILTER_TBL_RX_MAC;
@@ -678,7 +677,6 @@ falconsiena_filter_tbl_id(
case EFX_FS_FILTER_TX_MAC_WILD:
tbl_id = EFX_FS_FILTER_TBL_TX_MAC;
break;
-#endif /* EFSYS_OPT_SIENA */
default:
EFSYS_ASSERT(B_FALSE);
@@ -689,7 +687,7 @@ falconsiena_filter_tbl_id(
}
static void
-falconsiena_filter_reset_search_depth(
+siena_filter_reset_search_depth(
__inout falconsiena_filter_t *fsfp,
__in falconsiena_filter_tbl_id_t tbl_id)
{
@@ -701,7 +699,6 @@ falconsiena_filter_reset_search_depth(
fsfp->fsf_depth[EFX_FS_FILTER_RX_UDP_WILD] = 0;
break;
-#if EFSYS_OPT_SIENA
case EFX_FS_FILTER_TBL_RX_MAC:
fsfp->fsf_depth[EFX_FS_FILTER_RX_MAC_FULL] = 0;
fsfp->fsf_depth[EFX_FS_FILTER_RX_MAC_WILD] = 0;
@@ -718,7 +715,6 @@ falconsiena_filter_reset_search_depth(
fsfp->fsf_depth[EFX_FS_FILTER_TX_MAC_FULL] = 0;
fsfp->fsf_depth[EFX_FS_FILTER_TX_MAC_WILD] = 0;
break;
-#endif /* EFSYS_OPT_SIENA */
default:
EFSYS_ASSERT(B_FALSE);
@@ -727,7 +723,7 @@ falconsiena_filter_reset_search_depth(
}
static void
-falconsiena_filter_push_rx_limits(
+siena_filter_push_rx_limits(
__in efx_nic_t *enp)
{
falconsiena_filter_t *fsfp = enp->en_filter.ef_falconsiena_filter;
@@ -748,7 +744,6 @@ falconsiena_filter_push_rx_limits(
fsfp->fsf_depth[EFX_FS_FILTER_RX_UDP_WILD] +
FILTER_CTL_SRCH_FUDGE_WILD);
-#if EFSYS_OPT_SIENA
if (fsfp->fsf_tbl[EFX_FS_FILTER_TBL_RX_MAC].fsft_size) {
EFX_SET_OWORD_FIELD(oword,
FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
@@ -759,13 +754,12 @@ falconsiena_filter_push_rx_limits(
fsfp->fsf_depth[EFX_FS_FILTER_RX_MAC_WILD] +
FILTER_CTL_SRCH_FUDGE_WILD);
}
-#endif /* EFSYS_OPT_SIENA */
EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
}
static void
-falconsiena_filter_push_tx_limits(
+siena_filter_push_tx_limits(
__in efx_nic_t *enp)
{
falconsiena_filter_t *fsfp = enp->en_filter.ef_falconsiena_filter;
@@ -808,7 +802,7 @@ falconsiena_filter_push_tx_limits(
/* Build a filter entry and return its n-tuple key. */
static __checkReturn uint32_t
-falconsiena_filter_build(
+siena_filter_build(
__out efx_oword_t *filter,
__in falconsiena_filter_spec_t *spec)
{
@@ -817,7 +811,7 @@ falconsiena_filter_build(
uint8_t type = spec->fsfs_type;
uint32_t flags = spec->fsfs_flags;
- switch (falconsiena_filter_tbl_id(type)) {
+ switch (siena_filter_tbl_id(type)) {
case EFX_FS_FILTER_TBL_RX_IP: {
boolean_t is_udp = (type == EFX_FS_FILTER_RX_UDP_FULL ||
type == EFX_FS_FILTER_RX_UDP_WILD);
@@ -835,7 +829,6 @@ falconsiena_filter_build(
break;
}
-#if EFSYS_OPT_SIENA
case EFX_FS_FILTER_TBL_RX_MAC: {
boolean_t is_wild = (type == EFX_FS_FILTER_RX_MAC_WILD);
EFX_POPULATE_OWORD_7(*filter,
@@ -851,7 +844,6 @@ falconsiena_filter_build(
dword3 = is_wild;
break;
}
-#endif /* EFSYS_OPT_SIENA */
case EFX_FS_FILTER_TBL_TX_IP: {
boolean_t is_udp = (type == EFX_FS_FILTER_TX_UDP_FULL ||
@@ -866,7 +858,6 @@ falconsiena_filter_build(
break;
}
-#if EFSYS_OPT_SIENA
case EFX_FS_FILTER_TBL_TX_MAC: {
boolean_t is_wild = (type == EFX_FS_FILTER_TX_MAC_WILD);
EFX_POPULATE_OWORD_5(*filter,
@@ -878,7 +869,6 @@ falconsiena_filter_build(
dword3 = is_wild | spec->fsfs_dmaq_id << 1;
break;
}
-#endif /* EFSYS_OPT_SIENA */
default:
EFSYS_ASSERT(B_FALSE);
@@ -895,7 +885,7 @@ falconsiena_filter_build(
}
static __checkReturn efx_rc_t
-falconsiena_filter_push_entry(
+siena_filter_push_entry(
__inout efx_nic_t *enp,
__in falconsiena_filter_type_t type,
__in int index,
@@ -912,7 +902,6 @@ falconsiena_filter_push_entry(
eop, B_TRUE);
break;
-#if EFSYS_OPT_SIENA
case EFX_FS_FILTER_RX_MAC_FULL:
case EFX_FS_FILTER_RX_MAC_WILD:
EFX_BAR_TBL_WRITEO(enp, FR_CZ_RX_MAC_FILTER_TBL0, index,
@@ -932,7 +921,6 @@ falconsiena_filter_push_entry(
EFX_BAR_TBL_WRITEO(enp, FR_CZ_TX_MAC_FILTER_TBL0, index,
eop, B_TRUE);
break;
-#endif /* EFSYS_OPT_SIENA */
default:
EFSYS_ASSERT(B_FALSE);
@@ -947,13 +935,13 @@ fail1:
static __checkReturn boolean_t
-falconsiena_filter_equal(
+siena_filter_equal(
__in const falconsiena_filter_spec_t *left,
__in const falconsiena_filter_spec_t *right)
{
falconsiena_filter_tbl_id_t tbl_id;
- tbl_id = falconsiena_filter_tbl_id(left->fsfs_type);
+ tbl_id = siena_filter_tbl_id(left->fsfs_type);
if (left->fsfs_type != right->fsfs_type)
@@ -972,7 +960,7 @@ falconsiena_filter_equal(
}
static __checkReturn efx_rc_t
-falconsiena_filter_search(
+siena_filter_search(
__in falconsiena_filter_tbl_t *fsftp,
__in falconsiena_filter_spec_t *spec,
__in uint32_t key,
@@ -982,8 +970,8 @@ falconsiena_filter_search(
{
unsigned hash, incr, filter_idx, depth;
- hash = falconsiena_filter_tbl_hash(key);
- incr = falconsiena_filter_tbl_increment(key);
+ hash = siena_filter_tbl_hash(key);
+ incr = siena_filter_tbl_increment(key);
filter_idx = hash & (fsftp->fsft_size - 1);
depth = 1;
@@ -993,8 +981,8 @@ falconsiena_filter_search(
* Return success if entry is used and matches this spec
* or entry is unused and we are trying to insert.
*/
- if (falconsiena_filter_test_used(fsftp, filter_idx) ?
- falconsiena_filter_equal(spec,
+ if (siena_filter_test_used(fsftp, filter_idx) ?
+ siena_filter_equal(spec,
&fsftp->fsft_spec[filter_idx]) :
for_insert) {
*filter_index = filter_idx;
@@ -1012,18 +1000,18 @@ falconsiena_filter_search(
}
static void
-falconsiena_filter_clear_entry(
+siena_filter_clear_entry(
__in efx_nic_t *enp,
__in falconsiena_filter_tbl_t *fsftp,
__in int index)
{
efx_oword_t filter;
- if (falconsiena_filter_test_used(fsftp, index)) {
- falconsiena_filter_clear_used(fsftp, index);
+ if (siena_filter_test_used(fsftp, index)) {
+ siena_filter_clear_used(fsftp, index);
EFX_ZERO_OWORD(filter);
- falconsiena_filter_push_entry(enp,
+ siena_filter_push_entry(enp,
fsftp->fsft_spec[index].fsfs_type,
index, &filter);
@@ -1033,7 +1021,7 @@ falconsiena_filter_clear_entry(
}
void
-falconsiena_filter_tbl_clear(
+siena_filter_tbl_clear(
__in efx_nic_t *enp,
__in falconsiena_filter_tbl_id_t tbl_id)
{
@@ -1045,17 +1033,17 @@ falconsiena_filter_tbl_clear(
EFSYS_LOCK(enp->en_eslp, state);
for (index = 0; index < fsftp->fsft_size; ++index) {
- falconsiena_filter_clear_entry(enp, fsftp, index);
+ siena_filter_clear_entry(enp, fsftp, index);
}
if (fsftp->fsft_used == 0)
- falconsiena_filter_reset_search_depth(fsfp, tbl_id);
+ siena_filter_reset_search_depth(fsfp, tbl_id);
EFSYS_UNLOCK(enp->en_eslp, state);
}
static __checkReturn efx_rc_t
-falconsiena_filter_init(
+siena_filter_init(
__in efx_nic_t *enp)
{
falconsiena_filter_t *fsfp;
@@ -1073,7 +1061,6 @@ falconsiena_filter_init(
enp->en_filter.ef_falconsiena_filter = fsfp;
switch (enp->en_family) {
-#if EFSYS_OPT_SIENA
case EFX_FAMILY_SIENA:
fsftp = &fsfp->fsf_tbl[EFX_FS_FILTER_TBL_RX_IP];
fsftp->fsft_size = FR_AZ_RX_FILTER_TBL0_ROWS;
@@ -1087,7 +1074,6 @@ falconsiena_filter_init(
fsftp = &fsfp->fsf_tbl[EFX_FS_FILTER_TBL_TX_MAC];
fsftp->fsft_size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
break;
-#endif /* EFSYS_OPT_SIENA */
default:
rc = ENOTSUP;
@@ -1133,7 +1119,7 @@ fail3:
fail2:
EFSYS_PROBE(fail2);
- falconsiena_filter_fini(enp);
+ siena_filter_fini(enp);
fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
@@ -1141,7 +1127,7 @@ fail1:
}
static void
-falconsiena_filter_fini(
+siena_filter_fini(
__in efx_nic_t *enp)
{
falconsiena_filter_t *fsfp = enp->en_filter.ef_falconsiena_filter;
@@ -1181,7 +1167,7 @@ falconsiena_filter_fini(
/* Restore filter state after a reset */
static __checkReturn efx_rc_t
-falconsiena_filter_restore(
+siena_filter_restore(
__in efx_nic_t *enp)
{
falconsiena_filter_t *fsfp = enp->en_filter.ef_falconsiena_filter;
@@ -1200,20 +1186,20 @@ falconsiena_filter_restore(
for (filter_idx = 0;
filter_idx < fsftp->fsft_size;
filter_idx++) {
- if (!falconsiena_filter_test_used(fsftp, filter_idx))
+ if (!siena_filter_test_used(fsftp, filter_idx))
continue;
spec = &fsftp->fsft_spec[filter_idx];
- if ((rc = falconsiena_filter_build(&filter, spec)) != 0)
+ if ((rc = siena_filter_build(&filter, spec)) != 0)
goto fail1;
- if ((rc = falconsiena_filter_push_entry(enp,
+ if ((rc = siena_filter_push_entry(enp,
spec->fsfs_type, filter_idx, &filter)) != 0)
goto fail2;
}
}
- falconsiena_filter_push_rx_limits(enp);
- falconsiena_filter_push_tx_limits(enp);
+ siena_filter_push_rx_limits(enp);
+ siena_filter_push_tx_limits(enp);
EFSYS_UNLOCK(enp->en_eslp, state);
@@ -1231,7 +1217,7 @@ fail1:
}
static __checkReturn efx_rc_t
-falconsiena_filter_add(
+siena_filter_add(
__in efx_nic_t *enp,
__inout efx_filter_spec_t *spec,
__in boolean_t may_replace)
@@ -1251,10 +1237,10 @@ falconsiena_filter_add(
EFSYS_ASSERT3P(spec, !=, NULL);
- if ((rc = falconsiena_filter_spec_from_gen_spec(&fs_spec, spec)) != 0)
+ if ((rc = siena_filter_spec_from_gen_spec(&fs_spec, spec)) != 0)
goto fail1;
- tbl_id = falconsiena_filter_tbl_id(fs_spec.fsfs_type);
+ tbl_id = siena_filter_tbl_id(fs_spec.fsfs_type);
fsftp = &fsfp->fsf_tbl[tbl_id];
if (fsftp->fsft_size == 0) {
@@ -1262,11 +1248,11 @@ falconsiena_filter_add(
goto fail2;
}
- key = falconsiena_filter_build(&filter, &fs_spec);
+ key = siena_filter_build(&filter, &fs_spec);
EFSYS_LOCK(enp->en_eslp, state);
- rc = falconsiena_filter_search(fsftp, &fs_spec, key, B_TRUE,
+ rc = siena_filter_search(fsftp, &fs_spec, key, B_TRUE,
&filter_idx, &depth);
if (rc != 0)
goto fail3;
@@ -1274,25 +1260,25 @@ falconsiena_filter_add(
EFSYS_ASSERT3U(filter_idx, <, fsftp->fsft_size);
saved_fs_spec = &fsftp->fsft_spec[filter_idx];
- if (falconsiena_filter_test_used(fsftp, filter_idx)) {
+ if (siena_filter_test_used(fsftp, filter_idx)) {
if (may_replace == B_FALSE) {
rc = EEXIST;
goto fail4;
}
}
- falconsiena_filter_set_used(fsftp, filter_idx);
+ siena_filter_set_used(fsftp, filter_idx);
*saved_fs_spec = fs_spec;
if (fsfp->fsf_depth[fs_spec.fsfs_type] < depth) {
fsfp->fsf_depth[fs_spec.fsfs_type] = depth;
if (tbl_id == EFX_FS_FILTER_TBL_TX_IP ||
tbl_id == EFX_FS_FILTER_TBL_TX_MAC)
- falconsiena_filter_push_tx_limits(enp);
+ siena_filter_push_tx_limits(enp);
else
- falconsiena_filter_push_rx_limits(enp);
+ siena_filter_push_rx_limits(enp);
}
- falconsiena_filter_push_entry(enp, fs_spec.fsfs_type,
+ siena_filter_push_entry(enp, fs_spec.fsfs_type,
filter_idx, &filter);
EFSYS_UNLOCK(enp->en_eslp, state);
@@ -1314,7 +1300,7 @@ fail1:
}
static __checkReturn efx_rc_t
-falconsiena_filter_delete(
+siena_filter_delete(
__in efx_nic_t *enp,
__inout efx_filter_spec_t *spec)
{
@@ -1331,24 +1317,24 @@ falconsiena_filter_delete(
EFSYS_ASSERT3P(spec, !=, NULL);
- if ((rc = falconsiena_filter_spec_from_gen_spec(&fs_spec, spec)) != 0)
+ if ((rc = siena_filter_spec_from_gen_spec(&fs_spec, spec)) != 0)
goto fail1;
- tbl_id = falconsiena_filter_tbl_id(fs_spec.fsfs_type);
+ tbl_id = siena_filter_tbl_id(fs_spec.fsfs_type);
fsftp = &fsfp->fsf_tbl[tbl_id];
- key = falconsiena_filter_build(&filter, &fs_spec);
+ key = siena_filter_build(&filter, &fs_spec);
EFSYS_LOCK(enp->en_eslp, state);
- rc = falconsiena_filter_search(fsftp, &fs_spec, key, B_FALSE,
+ rc = siena_filter_search(fsftp, &fs_spec, key, B_FALSE,
&filter_idx, &depth);
if (rc != 0)
goto fail2;
- falconsiena_filter_clear_entry(enp, fsftp, filter_idx);
+ siena_filter_clear_entry(enp, fsftp, filter_idx);
if (fsftp->fsft_used == 0)
- falconsiena_filter_reset_search_depth(fsfp, tbl_id);
+ siena_filter_reset_search_depth(fsfp, tbl_id);
EFSYS_UNLOCK(enp->en_eslp, state);
return (0);
@@ -1365,7 +1351,7 @@ fail1:
#define MAX_SUPPORTED 4
static __checkReturn efx_rc_t
-falconsiena_filter_supported_filters(
+siena_filter_supported_filters(
__in efx_nic_t *enp,
__out uint32_t *list,
__out size_t *length)
diff --git a/sys/dev/sfxge/common/efx_impl.h b/sys/dev/sfxge/common/efx_impl.h
index d7a0415..041038f 100644
--- a/sys/dev/sfxge/common/efx_impl.h
+++ b/sys/dev/sfxge/common/efx_impl.h
@@ -429,7 +429,7 @@ typedef struct efx_filter_s {
} efx_filter_t;
extern void
-falconsiena_filter_tbl_clear(
+siena_filter_tbl_clear(
__in efx_nic_t *enp,
__in falconsiena_filter_tbl_id_t tbl);
diff --git a/sys/dev/sfxge/common/efx_intr.c b/sys/dev/sfxge/common/efx_intr.c
index 3e2b08d..eeec8a9 100644
--- a/sys/dev/sfxge/common/efx_intr.c
+++ b/sys/dev/sfxge/common/efx_intr.c
@@ -38,50 +38,50 @@ __FBSDID("$FreeBSD$");
#if EFSYS_OPT_SIENA
static __checkReturn efx_rc_t
-falconsiena_intr_init(
+siena_intr_init(
__in efx_nic_t *enp,
__in efx_intr_type_t type,
__in efsys_mem_t *esmp);
static void
-falconsiena_intr_enable(
+siena_intr_enable(
__in efx_nic_t *enp);
static void
-falconsiena_intr_disable(
+siena_intr_disable(
__in efx_nic_t *enp);
static void
-falconsiena_intr_disable_unlocked(
+siena_intr_disable_unlocked(
__in efx_nic_t *enp);
static __checkReturn efx_rc_t
-falconsiena_intr_trigger(
+siena_intr_trigger(
__in efx_nic_t *enp,
__in unsigned int level);
static void
-falconsiena_intr_fini(
+siena_intr_fini(
__in efx_nic_t *enp);
static void
-falconsiena_intr_status_line(
+siena_intr_status_line(
__in efx_nic_t *enp,
__out boolean_t *fatalp,
__out uint32_t *qmaskp);
static void
-falconsiena_intr_status_message(
+siena_intr_status_message(
__in efx_nic_t *enp,
__in unsigned int message,
__out boolean_t *fatalp);
static void
-falconsiena_intr_fatal(
+siena_intr_fatal(
__in efx_nic_t *enp);
static __checkReturn boolean_t
-falconsiena_intr_check_fatal(
+siena_intr_check_fatal(
__in efx_nic_t *enp);
@@ -90,15 +90,15 @@ falconsiena_intr_check_fatal(
#if EFSYS_OPT_SIENA
static const efx_intr_ops_t __efx_intr_siena_ops = {
- falconsiena_intr_init, /* eio_init */
- falconsiena_intr_enable, /* eio_enable */
- falconsiena_intr_disable, /* eio_disable */
- falconsiena_intr_disable_unlocked, /* eio_disable_unlocked */
- falconsiena_intr_trigger, /* eio_trigger */
- falconsiena_intr_status_line, /* eio_status_line */
- falconsiena_intr_status_message, /* eio_status_message */
- falconsiena_intr_fatal, /* eio_fatal */
- falconsiena_intr_fini, /* eio_fini */
+ siena_intr_init, /* eio_init */
+ siena_intr_enable, /* eio_enable */
+ siena_intr_disable, /* eio_disable */
+ siena_intr_disable_unlocked, /* eio_disable_unlocked */
+ siena_intr_trigger, /* eio_trigger */
+ siena_intr_status_line, /* eio_status_line */
+ siena_intr_status_message, /* eio_status_message */
+ siena_intr_fatal, /* eio_fatal */
+ siena_intr_fini, /* eio_fini */
};
#endif /* EFSYS_OPT_SIENA */
@@ -303,7 +303,7 @@ efx_intr_fatal(
#if EFSYS_OPT_SIENA
static __checkReturn efx_rc_t
-falconsiena_intr_init(
+siena_intr_init(
__in efx_nic_t *enp,
__in efx_intr_type_t type,
__in efsys_mem_t *esmp)
@@ -344,7 +344,7 @@ falconsiena_intr_init(
}
static void
-falconsiena_intr_enable(
+siena_intr_enable(
__in efx_nic_t *enp)
{
efx_intr_t *eip = &(enp->en_intr);
@@ -358,7 +358,7 @@ falconsiena_intr_enable(
}
static void
-falconsiena_intr_disable(
+siena_intr_disable(
__in efx_nic_t *enp)
{
efx_oword_t oword;
@@ -371,7 +371,7 @@ falconsiena_intr_disable(
}
static void
-falconsiena_intr_disable_unlocked(
+siena_intr_disable_unlocked(
__in efx_nic_t *enp)
{
efx_oword_t oword;
@@ -384,7 +384,7 @@ falconsiena_intr_disable_unlocked(
}
static __checkReturn efx_rc_t
-falconsiena_intr_trigger(
+siena_intr_trigger(
__in efx_nic_t *enp,
__in unsigned int level)
{
@@ -437,7 +437,7 @@ fail1:
}
static __checkReturn boolean_t
-falconsiena_intr_check_fatal(
+siena_intr_check_fatal(
__in efx_nic_t *enp)
{
efx_intr_t *eip = &(enp->en_intr);
@@ -461,7 +461,7 @@ falconsiena_intr_check_fatal(
}
static void
-falconsiena_intr_status_line(
+siena_intr_status_line(
__in efx_nic_t *enp,
__out boolean_t *fatalp,
__out uint32_t *qmaskp)
@@ -482,13 +482,13 @@ falconsiena_intr_status_line(
EFSYS_PROBE1(qmask, uint32_t, *qmaskp);
if (*qmaskp & (1U << eip->ei_level))
- *fatalp = falconsiena_intr_check_fatal(enp);
+ *fatalp = siena_intr_check_fatal(enp);
else
*fatalp = B_FALSE;
}
static void
-falconsiena_intr_status_message(
+siena_intr_status_message(
__in efx_nic_t *enp,
__in unsigned int message,
__out boolean_t *fatalp)
@@ -499,14 +499,14 @@ falconsiena_intr_status_message(
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
if (message == eip->ei_level)
- *fatalp = falconsiena_intr_check_fatal(enp);
+ *fatalp = siena_intr_check_fatal(enp);
else
*fatalp = B_FALSE;
}
static void
-falconsiena_intr_fatal(
+siena_intr_fatal(
__in efx_nic_t *enp)
{
#if EFSYS_OPT_DECODE_INTR_FATAL
@@ -562,7 +562,7 @@ falconsiena_intr_fatal(
}
static void
-falconsiena_intr_fini(
+siena_intr_fini(
__in efx_nic_t *enp)
{
efx_oword_t oword;
diff --git a/sys/dev/sfxge/common/efx_mac.c b/sys/dev/sfxge/common/efx_mac.c
index 0e22947..093cb50 100644
--- a/sys/dev/sfxge/common/efx_mac.c
+++ b/sys/dev/sfxge/common/efx_mac.c
@@ -37,7 +37,7 @@ __FBSDID("$FreeBSD$");
#if EFSYS_OPT_SIENA
static __checkReturn efx_rc_t
-falconsiena_mac_multicast_list_set(
+siena_mac_multicast_list_set(
__in efx_nic_t *enp);
#endif /* EFSYS_OPT_SIENA */
@@ -50,7 +50,7 @@ static const efx_mac_ops_t __efx_siena_mac_ops = {
siena_mac_reconfigure, /* emo_addr_set */
siena_mac_reconfigure, /* emo_pdu_set */
siena_mac_reconfigure, /* emo_reconfigure */
- falconsiena_mac_multicast_list_set, /* emo_multicast_list_set */
+ siena_mac_multicast_list_set, /* emo_multicast_list_set */
NULL, /* emo_filter_set_default_rxq */
NULL, /* emo_filter_default_rxq_clear */
#if EFSYS_OPT_LOOPBACK
@@ -764,7 +764,7 @@ fail1:
/* Compute the multicast hash as used on Falcon and Siena. */
static void
-falconsiena_mac_multicast_hash_compute(
+siena_mac_multicast_hash_compute(
__in_ecount(6*count) uint8_t const *addrs,
__in int count,
__out efx_oword_t *hash_low,
@@ -794,7 +794,7 @@ falconsiena_mac_multicast_hash_compute(
}
static __checkReturn efx_rc_t
-falconsiena_mac_multicast_list_set(
+siena_mac_multicast_list_set(
__in efx_nic_t *enp)
{
efx_port_t *epp = &(enp->en_port);
@@ -807,10 +807,11 @@ falconsiena_mac_multicast_list_set(
memcpy(old_hash, epp->ep_multicst_hash, sizeof (old_hash));
- falconsiena_mac_multicast_hash_compute(epp->ep_mulcst_addr_list,
- epp->ep_mulcst_addr_count,
- &epp->ep_multicst_hash[0],
- &epp->ep_multicst_hash[1]);
+ siena_mac_multicast_hash_compute(
+ epp->ep_mulcst_addr_list,
+ epp->ep_mulcst_addr_count,
+ &epp->ep_multicst_hash[0],
+ &epp->ep_multicst_hash[1]);
if ((rc = emop->emo_reconfigure(enp)) != 0)
goto fail1;
diff --git a/sys/dev/sfxge/common/efx_rx.c b/sys/dev/sfxge/common/efx_rx.c
index af138b9..00db5ca 100644
--- a/sys/dev/sfxge/common/efx_rx.c
+++ b/sys/dev/sfxge/common/efx_rx.c
@@ -38,42 +38,42 @@ __FBSDID("$FreeBSD$");
#if EFSYS_OPT_SIENA
static __checkReturn efx_rc_t
-falconsiena_rx_init(
+siena_rx_init(
__in efx_nic_t *enp);
static void
-falconsiena_rx_fini(
+siena_rx_fini(
__in efx_nic_t *enp);
#if EFSYS_OPT_RX_SCATTER
static __checkReturn efx_rc_t
-falconsiena_rx_scatter_enable(
+siena_rx_scatter_enable(
__in efx_nic_t *enp,
__in unsigned int buf_size);
#endif /* EFSYS_OPT_RX_SCATTER */
#if EFSYS_OPT_RX_SCALE
static __checkReturn efx_rc_t
-falconsiena_rx_scale_mode_set(
+siena_rx_scale_mode_set(
__in efx_nic_t *enp,
__in efx_rx_hash_alg_t alg,
__in efx_rx_hash_type_t type,
__in boolean_t insert);
static __checkReturn efx_rc_t
-falconsiena_rx_scale_key_set(
+siena_rx_scale_key_set(
__in efx_nic_t *enp,
__in_ecount(n) uint8_t *key,
__in size_t n);
static __checkReturn efx_rc_t
-falconsiena_rx_scale_tbl_set(
+siena_rx_scale_tbl_set(
__in efx_nic_t *enp,
__in_ecount(n) unsigned int *table,
__in size_t n);
static __checkReturn uint32_t
-falconsiena_rx_prefix_hash(
+siena_rx_prefix_hash(
__in efx_nic_t *enp,
__in efx_rx_hash_alg_t func,
__in uint8_t *buffer);
@@ -81,13 +81,13 @@ falconsiena_rx_prefix_hash(
#endif /* EFSYS_OPT_RX_SCALE */
static __checkReturn efx_rc_t
-falconsiena_rx_prefix_pktlen(
+siena_rx_prefix_pktlen(
__in efx_nic_t *enp,
__in uint8_t *buffer,
__out uint16_t *lengthp);
static void
-falconsiena_rx_qpost(
+siena_rx_qpost(
__in efx_rxq_t *erp,
__in_ecount(n) efsys_dma_addr_t *addrp,
__in size_t size,
@@ -96,21 +96,21 @@ falconsiena_rx_qpost(
__in unsigned int added);
static void
-falconsiena_rx_qpush(
+siena_rx_qpush(
__in efx_rxq_t *erp,
__in unsigned int added,
__inout unsigned int *pushedp);
static __checkReturn efx_rc_t
-falconsiena_rx_qflush(
+siena_rx_qflush(
__in efx_rxq_t *erp);
static void
-falconsiena_rx_qenable(
+siena_rx_qenable(
__in efx_rxq_t *erp);
static __checkReturn efx_rc_t
-falconsiena_rx_qcreate(
+siena_rx_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in unsigned int label,
@@ -122,7 +122,7 @@ falconsiena_rx_qcreate(
__in efx_rxq_t *erp);
static void
-falconsiena_rx_qdestroy(
+siena_rx_qdestroy(
__in efx_rxq_t *erp);
#endif /* EFSYS_OPT_SIENA */
@@ -130,24 +130,24 @@ falconsiena_rx_qdestroy(
#if EFSYS_OPT_SIENA
static const efx_rx_ops_t __efx_rx_siena_ops = {
- falconsiena_rx_init, /* erxo_init */
- falconsiena_rx_fini, /* erxo_fini */
+ siena_rx_init, /* erxo_init */
+ siena_rx_fini, /* erxo_fini */
#if EFSYS_OPT_RX_SCATTER
- falconsiena_rx_scatter_enable, /* erxo_scatter_enable */
+ siena_rx_scatter_enable, /* erxo_scatter_enable */
#endif
#if EFSYS_OPT_RX_SCALE
- falconsiena_rx_scale_mode_set, /* erxo_scale_mode_set */
- falconsiena_rx_scale_key_set, /* erxo_scale_key_set */
- falconsiena_rx_scale_tbl_set, /* erxo_scale_tbl_set */
- falconsiena_rx_prefix_hash, /* erxo_prefix_hash */
+ siena_rx_scale_mode_set, /* erxo_scale_mode_set */
+ siena_rx_scale_key_set, /* erxo_scale_key_set */
+ siena_rx_scale_tbl_set, /* erxo_scale_tbl_set */
+ siena_rx_prefix_hash, /* erxo_prefix_hash */
#endif
- falconsiena_rx_prefix_pktlen, /* erxo_prefix_pktlen */
- falconsiena_rx_qpost, /* erxo_qpost */
- falconsiena_rx_qpush, /* erxo_qpush */
- falconsiena_rx_qflush, /* erxo_qflush */
- falconsiena_rx_qenable, /* erxo_qenable */
- falconsiena_rx_qcreate, /* erxo_qcreate */
- falconsiena_rx_qdestroy, /* erxo_qdestroy */
+ siena_rx_prefix_pktlen, /* erxo_prefix_pktlen */
+ siena_rx_qpost, /* erxo_qpost */
+ siena_rx_qpush, /* erxo_qpush */
+ siena_rx_qflush, /* erxo_qflush */
+ siena_rx_qenable, /* erxo_qenable */
+ siena_rx_qcreate, /* erxo_qcreate */
+ siena_rx_qdestroy, /* erxo_qdestroy */
};
#endif /* EFSYS_OPT_SIENA */
@@ -567,7 +567,7 @@ efx_psuedo_hdr_hash_get(
#if EFSYS_OPT_SIENA
static __checkReturn efx_rc_t
-falconsiena_rx_init(
+siena_rx_init(
__in efx_nic_t *enp)
{
efx_oword_t oword;
@@ -604,7 +604,7 @@ falconsiena_rx_init(
#if EFSYS_OPT_RX_SCATTER
static __checkReturn efx_rc_t
-falconsiena_rx_scatter_enable(
+siena_rx_scatter_enable(
__in efx_nic_t *enp,
__in unsigned int buf_size)
{
@@ -710,7 +710,7 @@ fail1:
#if EFSYS_OPT_RX_SCALE
static __checkReturn efx_rc_t
-falconsiena_rx_scale_mode_set(
+siena_rx_scale_mode_set(
__in efx_nic_t *enp,
__in efx_rx_hash_alg_t alg,
__in efx_rx_hash_type_t type,
@@ -757,7 +757,7 @@ fail1:
#if EFSYS_OPT_RX_SCALE
static __checkReturn efx_rc_t
-falconsiena_rx_scale_key_set(
+siena_rx_scale_key_set(
__in efx_nic_t *enp,
__in_ecount(n) uint8_t *key,
__in size_t n)
@@ -882,7 +882,7 @@ fail1:
#if EFSYS_OPT_RX_SCALE
static __checkReturn efx_rc_t
-falconsiena_rx_scale_tbl_set(
+siena_rx_scale_tbl_set(
__in efx_nic_t *enp,
__in_ecount(n) unsigned int *table,
__in size_t n)
@@ -960,7 +960,7 @@ fail1:
#if EFSYS_OPT_RX_SCALE
static __checkReturn uint32_t
-falconsiena_rx_prefix_hash(
+siena_rx_prefix_hash(
__in efx_nic_t *enp,
__in efx_rx_hash_alg_t func,
__in uint8_t *buffer)
@@ -983,7 +983,7 @@ falconsiena_rx_prefix_hash(
#endif /* EFSYS_OPT_RX_SCALE */
static __checkReturn efx_rc_t
-falconsiena_rx_prefix_pktlen(
+siena_rx_prefix_pktlen(
__in efx_nic_t *enp,
__in uint8_t *buffer,
__out uint16_t *lengthp)
@@ -995,7 +995,7 @@ falconsiena_rx_prefix_pktlen(
static void
-falconsiena_rx_qpost(
+siena_rx_qpost(
__in efx_rxq_t *erp,
__in_ecount(n) efsys_dma_addr_t *addrp,
__in size_t size,
@@ -1033,7 +1033,7 @@ falconsiena_rx_qpost(
}
static void
-falconsiena_rx_qpush(
+siena_rx_qpush(
__in efx_rxq_t *erp,
__in unsigned int added,
__inout unsigned int *pushedp)
@@ -1065,7 +1065,7 @@ falconsiena_rx_qpush(
}
static __checkReturn efx_rc_t
-falconsiena_rx_qflush(
+siena_rx_qflush(
__in efx_rxq_t *erp)
{
efx_nic_t *enp = erp->er_enp;
@@ -1083,7 +1083,7 @@ falconsiena_rx_qflush(
}
static void
-falconsiena_rx_qenable(
+siena_rx_qenable(
__in efx_rxq_t *erp)
{
efx_nic_t *enp = erp->er_enp;
@@ -1103,7 +1103,7 @@ falconsiena_rx_qenable(
}
static __checkReturn efx_rc_t
-falconsiena_rx_qcreate(
+siena_rx_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in unsigned int label,
@@ -1193,7 +1193,7 @@ fail1:
}
static void
-falconsiena_rx_qdestroy(
+siena_rx_qdestroy(
__in efx_rxq_t *erp)
{
efx_nic_t *enp = erp->er_enp;
@@ -1213,7 +1213,7 @@ falconsiena_rx_qdestroy(
}
static void
-falconsiena_rx_fini(
+siena_rx_fini(
__in efx_nic_t *enp)
{
_NOTE(ARGUNUSED(enp))
diff --git a/sys/dev/sfxge/common/efx_tx.c b/sys/dev/sfxge/common/efx_tx.c
index 811530d..69822ec 100644
--- a/sys/dev/sfxge/common/efx_tx.c
+++ b/sys/dev/sfxge/common/efx_tx.c
@@ -47,15 +47,15 @@ __FBSDID("$FreeBSD$");
#if EFSYS_OPT_SIENA
static __checkReturn efx_rc_t
-falconsiena_tx_init(
+siena_tx_init(
__in efx_nic_t *enp);
static void
-falconsiena_tx_fini(
+siena_tx_fini(
__in efx_nic_t *enp);
static __checkReturn efx_rc_t
-falconsiena_tx_qcreate(
+siena_tx_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in unsigned int label,
@@ -68,11 +68,11 @@ falconsiena_tx_qcreate(
__out unsigned int *addedp);
static void
-falconsiena_tx_qdestroy(
+siena_tx_qdestroy(
__in efx_txq_t *etp);
static __checkReturn efx_rc_t
-falconsiena_tx_qpost(
+siena_tx_qpost(
__in efx_txq_t *etp,
__in_ecount(n) efx_buffer_t *eb,
__in unsigned int n,
@@ -80,26 +80,26 @@ falconsiena_tx_qpost(
__inout unsigned int *addedp);
static void
-falconsiena_tx_qpush(
+siena_tx_qpush(
__in efx_txq_t *etp,
__in unsigned int added,
__in unsigned int pushed);
static __checkReturn efx_rc_t
-falconsiena_tx_qpace(
+siena_tx_qpace(
__in efx_txq_t *etp,
__in unsigned int ns);
static __checkReturn efx_rc_t
-falconsiena_tx_qflush(
+siena_tx_qflush(
__in efx_txq_t *etp);
static void
-falconsiena_tx_qenable(
+siena_tx_qenable(
__in efx_txq_t *etp);
__checkReturn efx_rc_t
-falconsiena_tx_qdesc_post(
+siena_tx_qdesc_post(
__in efx_txq_t *etp,
__in_ecount(n) efx_desc_t *ed,
__in unsigned int n,
@@ -107,7 +107,7 @@ falconsiena_tx_qdesc_post(
__inout unsigned int *addedp);
void
-falconsiena_tx_qdesc_dma_create(
+siena_tx_qdesc_dma_create(
__in efx_txq_t *etp,
__in efsys_dma_addr_t addr,
__in size_t size,
@@ -116,7 +116,7 @@ falconsiena_tx_qdesc_dma_create(
#if EFSYS_OPT_QSTATS
static void
-falconsiena_tx_qstats_update(
+siena_tx_qstats_update(
__in efx_txq_t *etp,
__inout_ecount(TX_NQSTATS) efsys_stat_t *stat);
#endif
@@ -126,26 +126,26 @@ falconsiena_tx_qstats_update(
#if EFSYS_OPT_SIENA
static const efx_tx_ops_t __efx_tx_siena_ops = {
- falconsiena_tx_init, /* etxo_init */
- falconsiena_tx_fini, /* etxo_fini */
- falconsiena_tx_qcreate, /* etxo_qcreate */
- falconsiena_tx_qdestroy, /* etxo_qdestroy */
- falconsiena_tx_qpost, /* etxo_qpost */
- falconsiena_tx_qpush, /* etxo_qpush */
- falconsiena_tx_qpace, /* etxo_qpace */
- falconsiena_tx_qflush, /* etxo_qflush */
- falconsiena_tx_qenable, /* etxo_qenable */
+ siena_tx_init, /* etxo_init */
+ siena_tx_fini, /* etxo_fini */
+ siena_tx_qcreate, /* etxo_qcreate */
+ siena_tx_qdestroy, /* etxo_qdestroy */
+ siena_tx_qpost, /* etxo_qpost */
+ siena_tx_qpush, /* etxo_qpush */
+ siena_tx_qpace, /* etxo_qpace */
+ siena_tx_qflush, /* etxo_qflush */
+ siena_tx_qenable, /* etxo_qenable */
NULL, /* etxo_qpio_enable */
NULL, /* etxo_qpio_disable */
NULL, /* etxo_qpio_write */
NULL, /* etxo_qpio_post */
- falconsiena_tx_qdesc_post, /* etxo_qdesc_post */
- falconsiena_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
+ siena_tx_qdesc_post, /* etxo_qdesc_post */
+ siena_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
NULL, /* etxo_qdesc_tso_create */
NULL, /* etxo_qdesc_tso2_create */
NULL, /* etxo_qdesc_vlantci_create */
#if EFSYS_OPT_QSTATS
- falconsiena_tx_qstats_update, /* etxo_qstats_update */
+ siena_tx_qstats_update, /* etxo_qstats_update */
#endif
};
#endif /* EFSYS_OPT_SIENA */
@@ -167,7 +167,7 @@ static const efx_tx_ops_t __efx_tx_hunt_ops = {
ef10_tx_qpio_post, /* etxo_qpio_post */
ef10_tx_qdesc_post, /* etxo_qdesc_post */
ef10_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
- hunt_tx_qdesc_tso_create, /* etxo_qdesc_tso_create */
+ ef10_tx_qdesc_tso_create, /* etxo_qdesc_tso_create */
ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */
ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */
#if EFSYS_OPT_QSTATS
@@ -665,7 +665,7 @@ efx_tx_qstats_update(
#if EFSYS_OPT_SIENA
static __checkReturn efx_rc_t
-falconsiena_tx_init(
+siena_tx_init(
__in efx_nic_t *enp)
{
efx_oword_t oword;
@@ -728,7 +728,7 @@ falconsiena_tx_init(
} while (B_FALSE)
static __checkReturn efx_rc_t
-falconsiena_tx_qpost(
+siena_tx_qpost(
__in efx_txq_t *etp,
__in_ecount(n) efx_buffer_t *eb,
__in unsigned int n,
@@ -766,7 +766,7 @@ fail1:
}
static void
-falconsiena_tx_qpush(
+siena_tx_qpush(
__in efx_txq_t *etp,
__in unsigned int added,
__in unsigned int pushed)
@@ -797,7 +797,7 @@ falconsiena_tx_qpush(
#define EFX_TX_PACE_CLOCK_BASE 104
static __checkReturn efx_rc_t
-falconsiena_tx_qpace(
+siena_tx_qpace(
__in efx_txq_t *etp,
__in unsigned int ns)
{
@@ -840,7 +840,7 @@ fail1:
}
static __checkReturn efx_rc_t
-falconsiena_tx_qflush(
+siena_tx_qflush(
__in efx_txq_t *etp)
{
efx_nic_t *enp = etp->et_enp;
@@ -860,7 +860,7 @@ falconsiena_tx_qflush(
}
static void
-falconsiena_tx_qenable(
+siena_tx_qenable(
__in efx_txq_t *etp)
{
efx_nic_t *enp = etp->et_enp;
@@ -884,7 +884,7 @@ falconsiena_tx_qenable(
}
static __checkReturn efx_rc_t
-falconsiena_tx_qcreate(
+siena_tx_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in unsigned int label,
@@ -959,7 +959,7 @@ fail1:
}
__checkReturn efx_rc_t
-falconsiena_tx_qdesc_post(
+siena_tx_qdesc_post(
__in efx_txq_t *etp,
__in_ecount(n) efx_desc_t *ed,
__in unsigned int n,
@@ -1000,7 +1000,7 @@ fail1:
}
void
-falconsiena_tx_qdesc_dma_create(
+siena_tx_qdesc_dma_create(
__in efx_txq_t *etp,
__in efsys_dma_addr_t addr,
__in size_t size,
@@ -1052,7 +1052,7 @@ efx_tx_qstat_name(
#if EFSYS_OPT_QSTATS
static void
-falconsiena_tx_qstats_update(
+siena_tx_qstats_update(
__in efx_txq_t *etp,
__inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
{
@@ -1068,7 +1068,7 @@ falconsiena_tx_qstats_update(
#endif /* EFSYS_OPT_QSTATS */
static void
-falconsiena_tx_qdestroy(
+siena_tx_qdestroy(
__in efx_txq_t *etp)
{
efx_nic_t *enp = etp->et_enp;
@@ -1082,7 +1082,7 @@ falconsiena_tx_qdestroy(
}
static void
-falconsiena_tx_fini(
+siena_tx_fini(
__in efx_nic_t *enp)
{
_NOTE(ARGUNUSED(enp))
diff --git a/sys/dev/sfxge/common/hunt_impl.h b/sys/dev/sfxge/common/hunt_impl.h
index fd048d2..2653fe2 100644
--- a/sys/dev/sfxge/common/hunt_impl.h
+++ b/sys/dev/sfxge/common/hunt_impl.h
@@ -746,7 +746,7 @@ ef10_tx_qdesc_dma_create(
__out efx_desc_t *edp);
extern void
-hunt_tx_qdesc_tso_create(
+ef10_tx_qdesc_tso_create(
__in efx_txq_t *etp,
__in uint16_t ipv4_id,
__in uint32_t tcp_seq,
@@ -1046,6 +1046,7 @@ typedef struct ef10_filter_table_s {
uint32_t eft_mulcst_filter_indexes[
EFX_EF10_FILTER_MULTICAST_FILTERS_MAX];
uint32_t eft_mulcst_filter_count;
+ boolean_t eft_using_all_mulcst;
} ef10_filter_table_t;
__checkReturn efx_rc_t
diff --git a/sys/dev/sfxge/common/hunt_nic.c b/sys/dev/sfxge/common/hunt_nic.c
index d268e7b..44ddab7 100644
--- a/sys/dev/sfxge/common/hunt_nic.c
+++ b/sys/dev/sfxge/common/hunt_nic.c
@@ -39,1081 +39,6 @@ __FBSDID("$FreeBSD$");
#if EFSYS_OPT_HUNTINGTON
-#include "ef10_tlv_layout.h"
-
- __checkReturn efx_rc_t
-efx_mcdi_get_port_assignment(
- __in efx_nic_t *enp,
- __out uint32_t *portp)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,
- MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN)];
- efx_rc_t rc;
-
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN;
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) {
- rc = EMSGSIZE;
- goto fail2;
- }
-
- *portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT);
-
- return (0);
-
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-efx_mcdi_get_port_modes(
- __in efx_nic_t *enp,
- __out uint32_t *modesp)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_PORT_MODES_IN_LEN,
- MC_CMD_GET_PORT_MODES_OUT_LEN)];
- efx_rc_t rc;
-
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_GET_PORT_MODES;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN;
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- /*
- * Require only Modes and DefaultMode fields.
- * (CurrentMode field was added for Medford)
- */
- if (req.emr_out_length_used <
- MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) {
- rc = EMSGSIZE;
- goto fail2;
- }
-
- *modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES);
-
- return (0);
-
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-
-static __checkReturn efx_rc_t
-efx_mcdi_vadaptor_alloc(
- __in efx_nic_t *enp,
- __in uint32_t port_id)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_VADAPTOR_ALLOC_IN_LEN,
- MC_CMD_VADAPTOR_ALLOC_OUT_LEN)];
- efx_rc_t rc;
-
- EFSYS_ASSERT3U(enp->en_vport_id, ==, EVB_PORT_ID_NULL);
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_VADAPTOR_ALLOC;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN;
-
- MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
- MCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS,
- VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED,
- enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0);
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-static __checkReturn efx_rc_t
-efx_mcdi_vadaptor_free(
- __in efx_nic_t *enp,
- __in uint32_t port_id)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_VADAPTOR_FREE_IN_LEN,
- MC_CMD_VADAPTOR_FREE_OUT_LEN)];
- efx_rc_t rc;
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_VADAPTOR_FREE;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN;
-
- MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-efx_mcdi_get_mac_address_pf(
- __in efx_nic_t *enp,
- __out_ecount_opt(6) uint8_t mac_addrp[6])
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_MAC_ADDRESSES_IN_LEN,
- MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)];
- efx_rc_t rc;
-
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN;
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) {
- rc = EMSGSIZE;
- goto fail2;
- }
-
- if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) {
- rc = ENOENT;
- goto fail3;
- }
-
- if (mac_addrp != NULL) {
- uint8_t *addrp;
-
- addrp = MCDI_OUT2(req, uint8_t,
- GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE);
-
- EFX_MAC_ADDR_COPY(mac_addrp, addrp);
- }
-
- return (0);
-
-fail3:
- EFSYS_PROBE(fail3);
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-efx_mcdi_get_mac_address_vf(
- __in efx_nic_t *enp,
- __out_ecount_opt(6) uint8_t mac_addrp[6])
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,
- MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX)];
- efx_rc_t rc;
-
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX;
-
- MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
- EVB_PORT_ID_ASSIGNED);
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- if (req.emr_out_length_used <
- MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) {
- rc = EMSGSIZE;
- goto fail2;
- }
-
- if (MCDI_OUT_DWORD(req,
- VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) {
- rc = ENOENT;
- goto fail3;
- }
-
- if (mac_addrp != NULL) {
- uint8_t *addrp;
-
- addrp = MCDI_OUT2(req, uint8_t,
- VPORT_GET_MAC_ADDRESSES_OUT_MACADDR);
-
- EFX_MAC_ADDR_COPY(mac_addrp, addrp);
- }
-
- return (0);
-
-fail3:
- EFSYS_PROBE(fail3);
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-efx_mcdi_get_clock(
- __in efx_nic_t *enp,
- __out uint32_t *sys_freqp)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_CLOCK_IN_LEN,
- MC_CMD_GET_CLOCK_OUT_LEN)];
- efx_rc_t rc;
-
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_GET_CLOCK;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN;
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) {
- rc = EMSGSIZE;
- goto fail2;
- }
-
- *sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ);
- if (*sys_freqp == 0) {
- rc = EINVAL;
- goto fail3;
- }
-
- return (0);
-
-fail3:
- EFSYS_PROBE(fail3);
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-efx_mcdi_get_vector_cfg(
- __in efx_nic_t *enp,
- __out_opt uint32_t *vec_basep,
- __out_opt uint32_t *pf_nvecp,
- __out_opt uint32_t *vf_nvecp)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_VECTOR_CFG_IN_LEN,
- MC_CMD_GET_VECTOR_CFG_OUT_LEN)];
- efx_rc_t rc;
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_GET_VECTOR_CFG;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN;
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) {
- rc = EMSGSIZE;
- goto fail2;
- }
-
- if (vec_basep != NULL)
- *vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE);
- if (pf_nvecp != NULL)
- *pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF);
- if (vf_nvecp != NULL)
- *vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF);
-
- return (0);
-
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-static __checkReturn efx_rc_t
-efx_mcdi_get_capabilities(
- __in efx_nic_t *enp,
- __out uint32_t *flagsp,
- __out uint32_t *flags2p)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_CAPABILITIES_IN_LEN,
- MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)];
- efx_rc_t rc;
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_GET_CAPABILITIES;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_GET_CAPABILITIES_V2_OUT_LEN;
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
- rc = EMSGSIZE;
- goto fail2;
- }
-
- *flagsp = MCDI_OUT_DWORD(req, GET_CAPABILITIES_OUT_FLAGS1);
-
- if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)
- *flags2p = 0;
- else
- *flags2p = MCDI_OUT_DWORD(req, GET_CAPABILITIES_V2_OUT_FLAGS2);
-
- return (0);
-
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-
-static __checkReturn efx_rc_t
-efx_mcdi_alloc_vis(
- __in efx_nic_t *enp,
- __in uint32_t min_vi_count,
- __in uint32_t max_vi_count,
- __out uint32_t *vi_basep,
- __out uint32_t *vi_countp,
- __out uint32_t *vi_shiftp)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_ALLOC_VIS_IN_LEN,
- MC_CMD_ALLOC_VIS_OUT_LEN)];
- efx_rc_t rc;
-
- if (vi_countp == NULL) {
- rc = EINVAL;
- goto fail1;
- }
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_ALLOC_VIS;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_ALLOC_VIS_OUT_LEN;
-
- MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count);
- MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count);
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail2;
- }
-
- if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) {
- rc = EMSGSIZE;
- goto fail3;
- }
-
- *vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE);
- *vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT);
-
- /* Report VI_SHIFT if available (always zero for Huntington) */
- if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN)
- *vi_shiftp = 0;
- else
- *vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT);
-
- return (0);
-
-fail3:
- EFSYS_PROBE(fail3);
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-
-static __checkReturn efx_rc_t
-efx_mcdi_free_vis(
- __in efx_nic_t *enp)
-{
- efx_mcdi_req_t req;
- efx_rc_t rc;
-
- EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0);
- EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0);
-
- req.emr_cmd = MC_CMD_FREE_VIS;
- req.emr_in_buf = NULL;
- req.emr_in_length = 0;
- req.emr_out_buf = NULL;
- req.emr_out_length = 0;
-
- efx_mcdi_execute_quiet(enp, &req);
-
- /* Ignore ELREADY (no allocated VIs, so nothing to free) */
- if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-
-static __checkReturn efx_rc_t
-efx_mcdi_alloc_piobuf(
- __in efx_nic_t *enp,
- __out efx_piobuf_handle_t *handlep)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_ALLOC_PIOBUF_IN_LEN,
- MC_CMD_ALLOC_PIOBUF_OUT_LEN)];
- efx_rc_t rc;
-
- if (handlep == NULL) {
- rc = EINVAL;
- goto fail1;
- }
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_ALLOC_PIOBUF;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN;
-
- efx_mcdi_execute_quiet(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail2;
- }
-
- if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
- rc = EMSGSIZE;
- goto fail3;
- }
-
- *handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
-
- return (0);
-
-fail3:
- EFSYS_PROBE(fail3);
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-static __checkReturn efx_rc_t
-efx_mcdi_free_piobuf(
- __in efx_nic_t *enp,
- __in efx_piobuf_handle_t handle)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_FREE_PIOBUF_IN_LEN,
- MC_CMD_FREE_PIOBUF_OUT_LEN)];
- efx_rc_t rc;
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_FREE_PIOBUF;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN;
-
- MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle);
-
- efx_mcdi_execute_quiet(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-static __checkReturn efx_rc_t
-efx_mcdi_link_piobuf(
- __in efx_nic_t *enp,
- __in uint32_t vi_index,
- __in efx_piobuf_handle_t handle)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_LINK_PIOBUF_IN_LEN,
- MC_CMD_LINK_PIOBUF_OUT_LEN)];
- efx_rc_t rc;
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_LINK_PIOBUF;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN;
-
- MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle);
- MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-static __checkReturn efx_rc_t
-efx_mcdi_unlink_piobuf(
- __in efx_nic_t *enp,
- __in uint32_t vi_index)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_UNLINK_PIOBUF_IN_LEN,
- MC_CMD_UNLINK_PIOBUF_OUT_LEN)];
- efx_rc_t rc;
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_UNLINK_PIOBUF;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN;
-
- MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-static void
-ef10_nic_alloc_piobufs(
- __in efx_nic_t *enp,
- __in uint32_t max_piobuf_count)
-{
- efx_piobuf_handle_t *handlep;
- unsigned int i;
- efx_rc_t rc;
-
- EFSYS_ASSERT3U(max_piobuf_count, <=,
- EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle));
-
- enp->en_arch.ef10.ena_piobuf_count = 0;
-
- for (i = 0; i < max_piobuf_count; i++) {
- handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
-
- if ((rc = efx_mcdi_alloc_piobuf(enp, handlep)) != 0)
- goto fail1;
-
- enp->en_arch.ef10.ena_pio_alloc_map[i] = 0;
- enp->en_arch.ef10.ena_piobuf_count++;
- }
-
- return;
-
-fail1:
- for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
- handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
-
- efx_mcdi_free_piobuf(enp, *handlep);
- *handlep = EFX_PIOBUF_HANDLE_INVALID;
- }
- enp->en_arch.ef10.ena_piobuf_count = 0;
-}
-
-
-static void
-ef10_nic_free_piobufs(
- __in efx_nic_t *enp)
-{
- efx_piobuf_handle_t *handlep;
- unsigned int i;
-
- for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
- handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
-
- efx_mcdi_free_piobuf(enp, *handlep);
- *handlep = EFX_PIOBUF_HANDLE_INVALID;
- }
- enp->en_arch.ef10.ena_piobuf_count = 0;
-}
-
-/* Sub-allocate a block from a piobuf */
- __checkReturn efx_rc_t
-ef10_nic_pio_alloc(
- __inout efx_nic_t *enp,
- __out uint32_t *bufnump,
- __out efx_piobuf_handle_t *handlep,
- __out uint32_t *blknump,
- __out uint32_t *offsetp,
- __out size_t *sizep)
-{
- efx_nic_cfg_t *encp = &enp->en_nic_cfg;
- efx_drv_cfg_t *edcp = &enp->en_drv_cfg;
- uint32_t blk_per_buf;
- uint32_t buf, blk;
- efx_rc_t rc;
-
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
- EFSYS_ASSERT(bufnump);
- EFSYS_ASSERT(handlep);
- EFSYS_ASSERT(blknump);
- EFSYS_ASSERT(offsetp);
- EFSYS_ASSERT(sizep);
-
- if ((edcp->edc_pio_alloc_size == 0) ||
- (enp->en_arch.ef10.ena_piobuf_count == 0)) {
- rc = ENOMEM;
- goto fail1;
- }
- blk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size;
-
- for (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) {
- uint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf];
-
- if (~(*map) == 0)
- continue;
-
- EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map)));
- for (blk = 0; blk < blk_per_buf; blk++) {
- if ((*map & (1u << blk)) == 0) {
- *map |= (1u << blk);
- goto done;
- }
- }
- }
- rc = ENOMEM;
- goto fail2;
-
-done:
- *handlep = enp->en_arch.ef10.ena_piobuf_handle[buf];
- *bufnump = buf;
- *blknump = blk;
- *sizep = edcp->edc_pio_alloc_size;
- *offsetp = blk * (*sizep);
-
- return (0);
-
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-/* Free a piobuf sub-allocated block */
- __checkReturn efx_rc_t
-ef10_nic_pio_free(
- __inout efx_nic_t *enp,
- __in uint32_t bufnum,
- __in uint32_t blknum)
-{
- uint32_t *map;
- efx_rc_t rc;
-
- if ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) ||
- (blknum >= (8 * sizeof (*map)))) {
- rc = EINVAL;
- goto fail1;
- }
-
- map = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum];
- if ((*map & (1u << blknum)) == 0) {
- rc = ENOENT;
- goto fail2;
- }
- *map &= ~(1u << blknum);
-
- return (0);
-
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-ef10_nic_pio_link(
- __inout efx_nic_t *enp,
- __in uint32_t vi_index,
- __in efx_piobuf_handle_t handle)
-{
- return (efx_mcdi_link_piobuf(enp, vi_index, handle));
-}
-
- __checkReturn efx_rc_t
-ef10_nic_pio_unlink(
- __inout efx_nic_t *enp,
- __in uint32_t vi_index)
-{
- return (efx_mcdi_unlink_piobuf(enp, vi_index));
-}
-
- __checkReturn efx_rc_t
-ef10_get_datapath_caps(
- __in efx_nic_t *enp)
-{
- efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
- uint32_t flags;
- uint32_t flags2;
- efx_rc_t rc;
-
- if ((rc = efx_mcdi_get_capabilities(enp, &flags, &flags2)) != 0)
- goto fail1;
-
-#define CAP_FLAG(flags1, field) \
- ((flags1) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))
-
-#define CAP_FLAG2(flags2, field) \
- ((flags2) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))
-
- /*
- * Huntington RXDP firmware inserts a 0 or 14 byte prefix.
- * We only support the 14 byte prefix here.
- */
- if (CAP_FLAG(flags, RX_PREFIX_LEN_14) == 0) {
- rc = ENOTSUP;
- goto fail2;
- }
- encp->enc_rx_prefix_size = 14;
-
- /* Check if the firmware supports TSO */
- encp->enc_fw_assisted_tso_enabled =
- CAP_FLAG(flags, TX_TSO) ? B_TRUE : B_FALSE;
-
- /* Check if the firmware supports FATSOv2 */
- encp->enc_fw_assisted_tso_v2_enabled =
- CAP_FLAG2(flags2, TX_TSO_V2) ? B_TRUE : B_FALSE;
-
- /* Check if the firmware has vadapter/vport/vswitch support */
- encp->enc_datapath_cap_evb =
- CAP_FLAG(flags, EVB) ? B_TRUE : B_FALSE;
-
- /* Check if the firmware supports VLAN insertion */
- encp->enc_hw_tx_insert_vlan_enabled =
- CAP_FLAG(flags, TX_VLAN_INSERTION) ? B_TRUE : B_FALSE;
-
- /* Check if the firmware supports RX event batching */
- encp->enc_rx_batching_enabled =
- CAP_FLAG(flags, RX_BATCHING) ? B_TRUE : B_FALSE;
-
- if (encp->enc_rx_batching_enabled)
- encp->enc_rx_batch_max = 16;
-
- /* Check if the firmware supports disabling scatter on RXQs */
- encp->enc_rx_disable_scatter_supported =
- CAP_FLAG(flags, RX_DISABLE_SCATTER) ? B_TRUE : B_FALSE;
-
- /* Check if the firmware supports set mac with running filters */
- encp->enc_allow_set_mac_with_installed_filters =
- CAP_FLAG(flags, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED) ?
- B_TRUE : B_FALSE;
-
- /*
- * Check if firmware supports the extended MC_CMD_SET_MAC, which allows
- * specifying which parameters to configure.
- */
- encp->enc_enhanced_set_mac_supported =
- CAP_FLAG(flags, SET_MAC_ENHANCED) ? B_TRUE : B_FALSE;
-
-#undef CAP_FLAG
-#undef CAP_FLAG2
-
- return (0);
-
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-
- __checkReturn efx_rc_t
-ef10_get_privilege_mask(
- __in efx_nic_t *enp,
- __out uint32_t *maskp)
-{
- efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
- uint32_t mask;
- efx_rc_t rc;
-
- if ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf,
- &mask)) != 0) {
- if (rc != ENOTSUP)
- goto fail1;
-
- /* Fallback for old firmware without privilege mask support */
- if (EFX_PCI_FUNCTION_IS_PF(encp)) {
- /* Assume PF has admin privilege */
- mask = EF10_LEGACY_PF_PRIVILEGE_MASK;
- } else {
- /* VF is always unprivileged by default */
- mask = EF10_LEGACY_VF_PRIVILEGE_MASK;
- }
- }
-
- *maskp = mask;
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-
-/*
- * The external port mapping is a one-based numbering of the external
- * connectors on the board. It does not distinguish off-board separated
- * outputs such as multi-headed cables.
- * The number of ports that map to each external port connector
- * on the board is determined by the chip family and the port modes to
- * which the NIC can be configured. The mapping table lists modes with
- * port numbering requirements in increasing order.
- */
-static struct {
- efx_family_t family;
- uint32_t modes_mask;
- uint32_t stride;
-} __ef10_external_port_mappings[] = {
- /* Supported modes requiring 1 output per port */
- {
- EFX_FAMILY_HUNTINGTON,
- (1 << TLV_PORT_MODE_10G) |
- (1 << TLV_PORT_MODE_10G_10G) |
- (1 << TLV_PORT_MODE_10G_10G_10G_10G),
- 1
- },
- {
- EFX_FAMILY_MEDFORD,
- (1 << TLV_PORT_MODE_10G) |
- (1 << TLV_PORT_MODE_10G_10G) |
- (1 << TLV_PORT_MODE_10G_10G_10G_10G),
- 1
- },
- /* Supported modes requiring 2 outputs per port */
- {
- EFX_FAMILY_HUNTINGTON,
- (1 << TLV_PORT_MODE_40G) |
- (1 << TLV_PORT_MODE_40G_40G) |
- (1 << TLV_PORT_MODE_40G_10G_10G) |
- (1 << TLV_PORT_MODE_10G_10G_40G),
- 2
- },
- {
- EFX_FAMILY_MEDFORD,
- (1 << TLV_PORT_MODE_40G) |
- (1 << TLV_PORT_MODE_40G_40G) |
- (1 << TLV_PORT_MODE_40G_10G_10G) |
- (1 << TLV_PORT_MODE_10G_10G_40G),
- 2
- },
- /* Supported modes requiring 4 outputs per port */
- {
- EFX_FAMILY_MEDFORD,
- (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q) |
- (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q2),
- 4
- },
-};
-
- __checkReturn efx_rc_t
-ef10_external_port_mapping(
- __in efx_nic_t *enp,
- __in uint32_t port,
- __out uint8_t *external_portp)
-{
- efx_rc_t rc;
- int i;
- uint32_t port_modes;
- uint32_t matches;
- uint32_t stride = 1; /* default 1-1 mapping */
-
- if ((rc = efx_mcdi_get_port_modes(enp, &port_modes)) != 0) {
- /* No port mode information available - use default mapping */
- goto out;
- }
-
- /*
- * Infer the internal port -> external port mapping from
- * the possible port modes for this NIC.
- */
- for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) {
- if (__ef10_external_port_mappings[i].family !=
- enp->en_family)
- continue;
- matches = (__ef10_external_port_mappings[i].modes_mask &
- port_modes);
- if (matches != 0) {
- stride = __ef10_external_port_mappings[i].stride;
- port_modes &= ~matches;
- }
- }
-
- if (port_modes != 0) {
- /* Some advertised modes are not supported */
- rc = ENOTSUP;
- goto fail1;
- }
-
-out:
- /*
- * Scale as required by last matched mode and then convert to
- * one-based numbering
- */
- *external_portp = (uint8_t)(port / stride) + 1;
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
__checkReturn efx_rc_t
hunt_board_cfg(
@@ -1396,502 +321,4 @@ fail1:
}
- __checkReturn efx_rc_t
-ef10_nic_probe(
- __in efx_nic_t *enp)
-{
- const efx_nic_ops_t *enop = enp->en_enop;
- efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
- efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
- efx_rc_t rc;
-
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
-
- /* Read and clear any assertion state */
- if ((rc = efx_mcdi_read_assertion(enp)) != 0)
- goto fail1;
-
- /* Exit the assertion handler */
- if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
- if (rc != EACCES)
- goto fail2;
-
- if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
- goto fail3;
-
- if ((rc = enop->eno_board_cfg(enp)) != 0)
- if (rc != EACCES)
- goto fail4;
-
- /*
- * Set default driver config limits (based on board config).
- *
- * FIXME: For now allocate a fixed number of VIs which is likely to be
- * sufficient and small enough to allow multiple functions on the same
- * port.
- */
- edcp->edc_min_vi_count = edcp->edc_max_vi_count =
- MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit));
-
- /* The client driver must configure and enable PIO buffer support */
- edcp->edc_max_piobuf_count = 0;
- edcp->edc_pio_alloc_size = 0;
-
-#if EFSYS_OPT_MAC_STATS
- /* Wipe the MAC statistics */
- if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
- goto fail5;
-#endif
-
-#if EFSYS_OPT_LOOPBACK
- if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
- goto fail6;
-#endif
-
-#if EFSYS_OPT_MON_STATS
- if ((rc = mcdi_mon_cfg_build(enp)) != 0) {
- /* Unprivileged functions do not have access to sensors */
- if (rc != EACCES)
- goto fail7;
- }
-#endif
-
- encp->enc_features = enp->en_features;
-
- return (0);
-
-#if EFSYS_OPT_MON_STATS
-fail7:
- EFSYS_PROBE(fail7);
-#endif
-#if EFSYS_OPT_LOOPBACK
-fail6:
- EFSYS_PROBE(fail6);
-#endif
-#if EFSYS_OPT_MAC_STATS
-fail5:
- EFSYS_PROBE(fail5);
-#endif
-fail4:
- EFSYS_PROBE(fail4);
-fail3:
- EFSYS_PROBE(fail3);
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-ef10_nic_set_drv_limits(
- __inout efx_nic_t *enp,
- __in efx_drv_limits_t *edlp)
-{
- efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
- efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
- uint32_t min_evq_count, max_evq_count;
- uint32_t min_rxq_count, max_rxq_count;
- uint32_t min_txq_count, max_txq_count;
- efx_rc_t rc;
-
- if (edlp == NULL) {
- rc = EINVAL;
- goto fail1;
- }
-
- /* Get minimum required and maximum usable VI limits */
- min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit);
- min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit);
- min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit);
-
- edcp->edc_min_vi_count =
- MAX(min_evq_count, MAX(min_rxq_count, min_txq_count));
-
- max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit);
- max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit);
- max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit);
-
- edcp->edc_max_vi_count =
- MAX(max_evq_count, MAX(max_rxq_count, max_txq_count));
-
- /*
- * Check limits for sub-allocated piobuf blocks.
- * PIO is optional, so don't fail if the limits are incorrect.
- */
- if ((encp->enc_piobuf_size == 0) ||
- (encp->enc_piobuf_limit == 0) ||
- (edlp->edl_min_pio_alloc_size == 0) ||
- (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) {
- /* Disable PIO */
- edcp->edc_max_piobuf_count = 0;
- edcp->edc_pio_alloc_size = 0;
- } else {
- uint32_t blk_size, blk_count, blks_per_piobuf;
-
- blk_size =
- MAX(edlp->edl_min_pio_alloc_size,
- encp->enc_piobuf_min_alloc_size);
-
- blks_per_piobuf = encp->enc_piobuf_size / blk_size;
- EFSYS_ASSERT3U(blks_per_piobuf, <=, 32);
-
- blk_count = (encp->enc_piobuf_limit * blks_per_piobuf);
-
- /* A zero max pio alloc count means unlimited */
- if ((edlp->edl_max_pio_alloc_count > 0) &&
- (edlp->edl_max_pio_alloc_count < blk_count)) {
- blk_count = edlp->edl_max_pio_alloc_count;
- }
-
- edcp->edc_pio_alloc_size = blk_size;
- edcp->edc_max_piobuf_count =
- (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf;
- }
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-
- __checkReturn efx_rc_t
-ef10_nic_reset(
- __in efx_nic_t *enp)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_ENTITY_RESET_IN_LEN,
- MC_CMD_ENTITY_RESET_OUT_LEN)];
- efx_rc_t rc;
-
- /* ef10_nic_reset() is called to recover from BADASSERT failures. */
- if ((rc = efx_mcdi_read_assertion(enp)) != 0)
- goto fail1;
- if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
- goto fail2;
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_ENTITY_RESET;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN;
-
- MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG,
- ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail3;
- }
-
- /* Clear RX/TX DMA queue errors */
- enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR);
-
- return (0);
-
-fail3:
- EFSYS_PROBE(fail3);
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-ef10_nic_init(
- __in efx_nic_t *enp)
-{
- efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
- uint32_t min_vi_count, max_vi_count;
- uint32_t vi_count, vi_base, vi_shift;
- uint32_t i;
- uint32_t retry;
- uint32_t delay_us;
- efx_rc_t rc;
-
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
-
- /* Enable reporting of some events (e.g. link change) */
- if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
- goto fail1;
-
- /* Allocate (optional) on-chip PIO buffers */
- ef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count);
-
- /*
- * For best performance, PIO writes should use a write-combined
- * (WC) memory mapping. Using a separate WC mapping for the PIO
- * aperture of each VI would be a burden to drivers (and not
- * possible if the host page size is >4Kbyte).
- *
- * To avoid this we use a single uncached (UC) mapping for VI
- * register access, and a single WC mapping for extra VIs used
- * for PIO writes.
- *
- * Each piobuf must be linked to a VI in the WC mapping, and to
- * each VI that is using a sub-allocated block from the piobuf.
- */
- min_vi_count = edcp->edc_min_vi_count;
- max_vi_count =
- edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count;
-
- /* Ensure that the previously attached driver's VIs are freed */
- if ((rc = efx_mcdi_free_vis(enp)) != 0)
- goto fail2;
-
- /*
- * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this
- * fails then retrying the request for fewer VI resources may succeed.
- */
- vi_count = 0;
- if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count,
- &vi_base, &vi_count, &vi_shift)) != 0)
- goto fail3;
-
- EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count);
-
- if (vi_count < min_vi_count) {
- rc = ENOMEM;
- goto fail4;
- }
-
- enp->en_arch.ef10.ena_vi_base = vi_base;
- enp->en_arch.ef10.ena_vi_count = vi_count;
- enp->en_arch.ef10.ena_vi_shift = vi_shift;
-
- if (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) {
- /* Not enough extra VIs to map piobufs */
- ef10_nic_free_piobufs(enp);
- }
-
- enp->en_arch.ef10.ena_pio_write_vi_base =
- vi_count - enp->en_arch.ef10.ena_piobuf_count;
-
- /* Save UC memory mapping details */
- enp->en_arch.ef10.ena_uc_mem_map_offset = 0;
- if (enp->en_arch.ef10.ena_piobuf_count > 0) {
- enp->en_arch.ef10.ena_uc_mem_map_size =
- (ER_DZ_TX_PIOBUF_STEP *
- enp->en_arch.ef10.ena_pio_write_vi_base);
- } else {
- enp->en_arch.ef10.ena_uc_mem_map_size =
- (ER_DZ_TX_PIOBUF_STEP *
- enp->en_arch.ef10.ena_vi_count);
- }
-
- /* Save WC memory mapping details */
- enp->en_arch.ef10.ena_wc_mem_map_offset =
- enp->en_arch.ef10.ena_uc_mem_map_offset +
- enp->en_arch.ef10.ena_uc_mem_map_size;
-
- enp->en_arch.ef10.ena_wc_mem_map_size =
- (ER_DZ_TX_PIOBUF_STEP *
- enp->en_arch.ef10.ena_piobuf_count);
-
- /* Link piobufs to extra VIs in WC mapping */
- if (enp->en_arch.ef10.ena_piobuf_count > 0) {
- for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
- rc = efx_mcdi_link_piobuf(enp,
- enp->en_arch.ef10.ena_pio_write_vi_base + i,
- enp->en_arch.ef10.ena_piobuf_handle[i]);
- if (rc != 0)
- break;
- }
- }
-
- /*
- * Allocate a vAdaptor attached to our upstream vPort/pPort.
- *
- * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF
- * driver has yet to bring up the EVB port. See bug 56147. In this case,
- * retry the request several times after waiting a while. The wait time
- * between retries starts small (10ms) and exponentially increases.
- * Total wait time is a little over two seconds. Retry logic in the
- * client driver may mean this whole loop is repeated if it continues to
- * fail.
- */
- retry = 0;
- delay_us = 10000;
- while ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) {
- if (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) ||
- (rc != ENOENT)) {
- /*
- * Do not retry alloc for PF, or for other errors on
- * a VF.
- */
- goto fail5;
- }
-
- /* VF startup before PF is ready. Retry allocation. */
- if (retry > 5) {
- /* Too many attempts */
- rc = EINVAL;
- goto fail6;
- }
- EFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry);
- EFSYS_SLEEP(delay_us);
- retry++;
- if (delay_us < 500000)
- delay_us <<= 2;
- }
-
- enp->en_vport_id = EVB_PORT_ID_ASSIGNED;
- enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2;
-
- return (0);
-
-fail6:
- EFSYS_PROBE(fail6);
-fail5:
- EFSYS_PROBE(fail5);
-fail4:
- EFSYS_PROBE(fail4);
-fail3:
- EFSYS_PROBE(fail3);
-fail2:
- EFSYS_PROBE(fail2);
-
- ef10_nic_free_piobufs(enp);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-ef10_nic_get_vi_pool(
- __in efx_nic_t *enp,
- __out uint32_t *vi_countp)
-{
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
-
- /*
- * Report VIs that the client driver can use.
- * Do not include VIs used for PIO buffer writes.
- */
- *vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base;
-
- return (0);
-}
-
- __checkReturn efx_rc_t
-ef10_nic_get_bar_region(
- __in efx_nic_t *enp,
- __in efx_nic_region_t region,
- __out uint32_t *offsetp,
- __out size_t *sizep)
-{
- efx_rc_t rc;
-
- EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
- enp->en_family == EFX_FAMILY_MEDFORD);
-
- /*
- * TODO: Specify host memory mapping alignment and granularity
- * in efx_drv_limits_t so that they can be taken into account
- * when allocating extra VIs for PIO writes.
- */
- switch (region) {
- case EFX_REGION_VI:
- /* UC mapped memory BAR region for VI registers */
- *offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset;
- *sizep = enp->en_arch.ef10.ena_uc_mem_map_size;
- break;
-
- case EFX_REGION_PIO_WRITE_VI:
- /* WC mapped memory BAR region for piobuf writes */
- *offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset;
- *sizep = enp->en_arch.ef10.ena_wc_mem_map_size;
- break;
-
- default:
- rc = EINVAL;
- goto fail1;
- }
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- void
-ef10_nic_fini(
- __in efx_nic_t *enp)
-{
- uint32_t i;
- efx_rc_t rc;
-
- (void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id);
- enp->en_vport_id = 0;
-
- /* Unlink piobufs from extra VIs in WC mapping */
- if (enp->en_arch.ef10.ena_piobuf_count > 0) {
- for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
- rc = efx_mcdi_unlink_piobuf(enp,
- enp->en_arch.ef10.ena_pio_write_vi_base + i);
- if (rc != 0)
- break;
- }
- }
-
- ef10_nic_free_piobufs(enp);
-
- (void) efx_mcdi_free_vis(enp);
- enp->en_arch.ef10.ena_vi_count = 0;
-}
-
- void
-ef10_nic_unprobe(
- __in efx_nic_t *enp)
-{
-#if EFSYS_OPT_MON_STATS
- mcdi_mon_cfg_free(enp);
-#endif /* EFSYS_OPT_MON_STATS */
- (void) efx_mcdi_drv_attach(enp, B_FALSE);
-}
-
-#if EFSYS_OPT_DIAG
-
- __checkReturn efx_rc_t
-ef10_nic_register_test(
- __in efx_nic_t *enp)
-{
- efx_rc_t rc;
-
- /* FIXME */
- _NOTE(ARGUNUSED(enp))
- if (B_FALSE) {
- rc = ENOTSUP;
- goto fail1;
- }
- /* FIXME */
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
-#endif /* EFSYS_OPT_DIAG */
-
-
-
#endif /* EFSYS_OPT_HUNTINGTON */
diff --git a/sys/dev/sfxge/common/hunt_phy.c b/sys/dev/sfxge/common/hunt_phy.c
index a6b7faa..0f54b57 100644
--- a/sys/dev/sfxge/common/hunt_phy.c
+++ b/sys/dev/sfxge/common/hunt_phy.c
@@ -36,485 +36,6 @@ __FBSDID("$FreeBSD$");
#if EFSYS_OPT_HUNTINGTON
-static void
-mcdi_phy_decode_cap(
- __in uint32_t mcdi_cap,
- __out uint32_t *maskp)
-{
- uint32_t mask;
-
- mask = 0;
- if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
- mask |= (1 << EFX_PHY_CAP_10HDX);
- if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
- mask |= (1 << EFX_PHY_CAP_10FDX);
- if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
- mask |= (1 << EFX_PHY_CAP_100HDX);
- if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
- mask |= (1 << EFX_PHY_CAP_100FDX);
- if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
- mask |= (1 << EFX_PHY_CAP_1000HDX);
- if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
- mask |= (1 << EFX_PHY_CAP_1000FDX);
- if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- mask |= (1 << EFX_PHY_CAP_10000FDX);
- if (mcdi_cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
- mask |= (1 << EFX_PHY_CAP_40000FDX);
- if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
- mask |= (1 << EFX_PHY_CAP_PAUSE);
- if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
- mask |= (1 << EFX_PHY_CAP_ASYM);
- if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
- mask |= (1 << EFX_PHY_CAP_AN);
-
- *maskp = mask;
-}
-
-static void
-mcdi_phy_decode_link_mode(
- __in efx_nic_t *enp,
- __in uint32_t link_flags,
- __in unsigned int speed,
- __in unsigned int fcntl,
- __out efx_link_mode_t *link_modep,
- __out unsigned int *fcntlp)
-{
- boolean_t fd = !!(link_flags &
- (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
- boolean_t up = !!(link_flags &
- (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
-
- _NOTE(ARGUNUSED(enp))
-
- if (!up)
- *link_modep = EFX_LINK_DOWN;
- else if (speed == 40000 && fd)
- *link_modep = EFX_LINK_40000FDX;
- else if (speed == 10000 && fd)
- *link_modep = EFX_LINK_10000FDX;
- else if (speed == 1000)
- *link_modep = fd ? EFX_LINK_1000FDX : EFX_LINK_1000HDX;
- else if (speed == 100)
- *link_modep = fd ? EFX_LINK_100FDX : EFX_LINK_100HDX;
- else if (speed == 10)
- *link_modep = fd ? EFX_LINK_10FDX : EFX_LINK_10HDX;
- else
- *link_modep = EFX_LINK_UNKNOWN;
-
- if (fcntl == MC_CMD_FCNTL_OFF)
- *fcntlp = 0;
- else if (fcntl == MC_CMD_FCNTL_RESPOND)
- *fcntlp = EFX_FCNTL_RESPOND;
- else if (fcntl == MC_CMD_FCNTL_GENERATE)
- *fcntlp = EFX_FCNTL_GENERATE;
- else if (fcntl == MC_CMD_FCNTL_BIDIR)
- *fcntlp = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
- else {
- EFSYS_PROBE1(mc_pcol_error, int, fcntl);
- *fcntlp = 0;
- }
-}
-
-
- void
-ef10_phy_link_ev(
- __in efx_nic_t *enp,
- __in efx_qword_t *eqp,
- __out efx_link_mode_t *link_modep)
-{
- efx_port_t *epp = &(enp->en_port);
- unsigned int link_flags;
- unsigned int speed;
- unsigned int fcntl;
- efx_link_mode_t link_mode;
- uint32_t lp_cap_mask;
-
- /*
- * Convert the LINKCHANGE speed enumeration into mbit/s, in the
- * same way as GET_LINK encodes the speed
- */
- switch (MCDI_EV_FIELD(eqp, LINKCHANGE_SPEED)) {
- case MCDI_EVENT_LINKCHANGE_SPEED_100M:
- speed = 100;
- break;
- case MCDI_EVENT_LINKCHANGE_SPEED_1G:
- speed = 1000;
- break;
- case MCDI_EVENT_LINKCHANGE_SPEED_10G:
- speed = 10000;
- break;
- case MCDI_EVENT_LINKCHANGE_SPEED_40G:
- speed = 40000;
- break;
- default:
- speed = 0;
- break;
- }
-
- link_flags = MCDI_EV_FIELD(eqp, LINKCHANGE_LINK_FLAGS);
- mcdi_phy_decode_link_mode(enp, link_flags, speed,
- MCDI_EV_FIELD(eqp, LINKCHANGE_FCNTL),
- &link_mode, &fcntl);
- mcdi_phy_decode_cap(MCDI_EV_FIELD(eqp, LINKCHANGE_LP_CAP),
- &lp_cap_mask);
-
- /*
- * It's safe to update ep_lp_cap_mask without the driver's port lock
- * because presumably any concurrently running efx_port_poll() is
- * only going to arrive at the same value.
- *
- * ep_fcntl has two meanings. It's either the link common fcntl
- * (if the PHY supports AN), or it's the forced link state. If
- * the former, it's safe to update the value for the same reason as
- * for ep_lp_cap_mask. If the latter, then just ignore the value,
- * because we can race with efx_mac_fcntl_set().
- */
- epp->ep_lp_cap_mask = lp_cap_mask;
- epp->ep_fcntl = fcntl;
-
- *link_modep = link_mode;
-}
-
- __checkReturn efx_rc_t
-ef10_phy_power(
- __in efx_nic_t *enp,
- __in boolean_t power)
-{
- efx_rc_t rc;
-
- if (!power)
- return (0);
-
- /* Check if the PHY is a zombie */
- if ((rc = ef10_phy_verify(enp)) != 0)
- goto fail1;
-
- enp->en_reset_flags |= EFX_RESET_PHY;
-
- return (0);
-
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-ef10_phy_get_link(
- __in efx_nic_t *enp,
- __out ef10_link_state_t *elsp)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN,
- MC_CMD_GET_LINK_OUT_LEN)];
- efx_rc_t rc;
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_GET_LINK;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_GET_LINK_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_GET_LINK_OUT_LEN;
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) {
- rc = EMSGSIZE;
- goto fail2;
- }
-
- mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP),
- &elsp->els_adv_cap_mask);
- mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP),
- &elsp->els_lp_cap_mask);
-
- mcdi_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS),
- MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED),
- MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL),
- &elsp->els_link_mode, &elsp->els_fcntl);
-
-#if EFSYS_OPT_LOOPBACK
- /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
- EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);
-
- elsp->els_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE);
-#endif /* EFSYS_OPT_LOOPBACK */
-
- elsp->els_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0;
-
- return (0);
-
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-ef10_phy_reconfigure(
- __in efx_nic_t *enp)
-{
- efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
- efx_port_t *epp = &(enp->en_port);
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_SET_LINK_IN_LEN,
- MC_CMD_SET_LINK_OUT_LEN)];
- uint32_t cap_mask;
- unsigned int led_mode;
- unsigned int speed;
- efx_rc_t rc;
-
- if (~encp->enc_func_flags & EFX_NIC_FUNC_LINKCTRL)
- goto out;
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_SET_LINK;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_SET_LINK_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_SET_LINK_OUT_LEN;
-
- cap_mask = epp->ep_adv_cap_mask;
- MCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP,
- PHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1,
- PHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1,
- PHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1,
- PHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1,
- PHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1,
- PHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1,
- PHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1,
- PHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1,
- PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1,
- PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1);
- /* Too many fields for for POPULATE macros, so insert this afterwards */
- MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
- PHY_CAP_40000FDX, (cap_mask >> EFX_PHY_CAP_40000FDX) & 0x1);
-
-#if EFSYS_OPT_LOOPBACK
- MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE,
- epp->ep_loopback_type);
- switch (epp->ep_loopback_link_mode) {
- case EFX_LINK_100FDX:
- speed = 100;
- break;
- case EFX_LINK_1000FDX:
- speed = 1000;
- break;
- case EFX_LINK_10000FDX:
- speed = 10000;
- break;
- case EFX_LINK_40000FDX:
- speed = 40000;
- break;
- default:
- speed = 0;
- }
-#else
- MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE);
- speed = 0;
-#endif /* EFSYS_OPT_LOOPBACK */
- MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed);
-
-#if EFSYS_OPT_PHY_FLAGS
- MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, epp->ep_phy_flags);
-#else
- MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0);
-#endif /* EFSYS_OPT_PHY_FLAGS */
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- /* And set the blink mode */
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_SET_ID_LED;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_SET_ID_LED_OUT_LEN;
-
-#if EFSYS_OPT_PHY_LED_CONTROL
- switch (epp->ep_phy_led_mode) {
- case EFX_PHY_LED_DEFAULT:
- led_mode = MC_CMD_LED_DEFAULT;
- break;
- case EFX_PHY_LED_OFF:
- led_mode = MC_CMD_LED_OFF;
- break;
- case EFX_PHY_LED_ON:
- led_mode = MC_CMD_LED_ON;
- break;
- default:
- EFSYS_ASSERT(0);
- led_mode = MC_CMD_LED_DEFAULT;
- }
-
- MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, led_mode);
-#else
- MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT);
-#endif /* EFSYS_OPT_PHY_LED_CONTROL */
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail2;
- }
-out:
- return (0);
-
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-ef10_phy_verify(
- __in efx_nic_t *enp)
-{
- efx_mcdi_req_t req;
- uint8_t payload[MAX(MC_CMD_GET_PHY_STATE_IN_LEN,
- MC_CMD_GET_PHY_STATE_OUT_LEN)];
- uint32_t state;
- efx_rc_t rc;
-
- (void) memset(payload, 0, sizeof (payload));
- req.emr_cmd = MC_CMD_GET_PHY_STATE;
- req.emr_in_buf = payload;
- req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN;
- req.emr_out_buf = payload;
- req.emr_out_length = MC_CMD_GET_PHY_STATE_OUT_LEN;
-
- efx_mcdi_execute(enp, &req);
-
- if (req.emr_rc != 0) {
- rc = req.emr_rc;
- goto fail1;
- }
-
- if (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) {
- rc = EMSGSIZE;
- goto fail2;
- }
-
- state = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE);
- if (state != MC_CMD_PHY_STATE_OK) {
- if (state != MC_CMD_PHY_STATE_ZOMBIE)
- EFSYS_PROBE1(mc_pcol_error, int, state);
- rc = ENOTACTIVE;
- goto fail3;
- }
-
- return (0);
-
-fail3:
- EFSYS_PROBE(fail3);
-fail2:
- EFSYS_PROBE(fail2);
-fail1:
- EFSYS_PROBE1(fail1, efx_rc_t, rc);
-
- return (rc);
-}
-
- __checkReturn efx_rc_t
-ef10_phy_oui_get(
- __in efx_nic_t *enp,
- __out uint32_t *ouip)
-{
- _NOTE(ARGUNUSED(enp, ouip))
-
- return (ENOTSUP);
-}
-
-#if EFSYS_OPT_PHY_STATS
-
- __checkReturn efx_rc_t
-ef10_phy_stats_update(
- __in efx_nic_t *enp,
- __in efsys_mem_t *esmp,
- __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat)
-{
- /* TBD: no stats support in firmware yet */
- _NOTE(ARGUNUSED(enp, esmp))
- memset(stat, 0, EFX_PHY_NSTATS * sizeof (*stat));
-
- return (0);
-}
-
-#endif /* EFSYS_OPT_PHY_STATS */
-
-#if EFSYS_OPT_PHY_PROPS
-
-#if EFSYS_OPT_NAMES
-
- const char *
-ef10_phy_prop_name(
- __in efx_nic_t *enp,
- __in unsigned int id)
-{
- _NOTE(ARGUNUSED(enp, id))
-
- return (NULL);
-}
-
-#endif /* EFSYS_OPT_NAMES */
-
- __checkReturn efx_rc_t
-ef10_phy_prop_get(
- __in efx_nic_t *enp,
- __in unsigned int id,
- __in uint32_t flags,
- __out uint32_t *valp)
-{
- _NOTE(ARGUNUSED(enp, id, flags, valp))
-
- return (ENOTSUP);
-}
-
- __checkReturn efx_rc_t
-ef10_phy_prop_set(
- __in efx_nic_t *enp,
- __in unsigned int id,
- __in uint32_t val)
-{
- _NOTE(ARGUNUSED(enp, id, val))
-
- return (ENOTSUP);
-}
-
-#endif /* EFSYS_OPT_PHY_PROPS */
-
#if EFSYS_OPT_BIST
__checkReturn efx_rc_t
diff --git a/sys/modules/gpio/Makefile b/sys/modules/gpio/Makefile
index 648c33a..47eba81 100644
--- a/sys/modules/gpio/Makefile
+++ b/sys/modules/gpio/Makefile
@@ -25,6 +25,6 @@
# SUCH DAMAGE.
#
-SUBDIR = gpiobus gpioiic gpioled
+SUBDIR = gpiobus gpioiic gpioled gpiokeys
.include <bsd.subdir.mk>
diff --git a/sys/modules/gpio/gpiokeys/Makefile b/sys/modules/gpio/gpiokeys/Makefile
new file mode 100644
index 0000000..95ffce1
--- /dev/null
+++ b/sys/modules/gpio/gpiokeys/Makefile
@@ -0,0 +1,14 @@
+#
+# $FreeBSD$
+#
+
+.PATH: ${.CURDIR}/../../../dev/gpio/
+
+KMOD= gpiokeys
+SRCS= gpiokeys.c gpiokeys_codes.c
+SRCS+= bus_if.h device_if.h gpio_if.h ofw_bus_if.h
+SRCS+= opt_platform.h opt_kbd.h
+
+CFLAGS+= -I. -I${.CURDIR}/../../../dev/gpio/
+
+.include <bsd.kmod.mk>
diff --git a/sys/modules/sfxge/Makefile b/sys/modules/sfxge/Makefile
index 9fa9855..9c1225c 100644
--- a/sys/modules/sfxge/Makefile
+++ b/sys/modules/sfxge/Makefile
@@ -29,11 +29,11 @@ SRCS+= siena_mac.c siena_mcdi.c siena_nic.c siena_nvram.c siena_phy.c
SRCS+= siena_sram.c siena_vpd.c
SRCS+= siena_flash.h siena_impl.h
+SRCS+= ef10_ev.c ef10_filter.c ef10_intr.c ef10_mac.c ef10_mcdi.c ef10_nic.c
+SRCS+= ef10_nvram.c ef10_phy.c ef10_rx.c ef10_tx.c ef10_vpd.c
SRCS+= ef10_impl.h
-SRCS+= hunt_ev.c hunt_intr.c hunt_mac.c hunt_mcdi.c hunt_nic.c
-SRCS+= hunt_nvram.c hunt_rx.c hunt_phy.c hunt_tx.c hunt_vpd.c
-SRCS+= hunt_filter.c
+SRCS+= hunt_nic.c hunt_phy.c
SRCS+= hunt_impl.h
SRCS+= medford_nic.c
diff --git a/sys/net/if.c b/sys/net/if.c
index 7a0bcec..e816442 100644
--- a/sys/net/if.c
+++ b/sys/net/if.c
@@ -112,6 +112,13 @@ SYSCTL_INT(_net_link, OID_AUTO, log_link_state_change, CTLFLAG_RW,
&log_link_state_change, 0,
"log interface link state change events");
+/* Log promiscuous mode change events */
+static int log_promisc_mode_change = 1;
+
+SYSCTL_INT(_net_link, OID_AUTO, log_promisc_mode_change, CTLFLAG_RW,
+ &log_promisc_mode_change, 1,
+ "log promiscuous mode change events");
+
/* Interface description */
static unsigned int ifdescr_maxlen = 1024;
SYSCTL_UINT(_net, OID_AUTO, ifdescr_maxlen, CTLFLAG_RW,
@@ -2326,9 +2333,11 @@ ifhwioctl(u_long cmd, struct ifnet *ifp, caddr_t data, struct thread *td)
ifp->if_flags |= IFF_PROMISC;
else if (ifp->if_pcount == 0)
ifp->if_flags &= ~IFF_PROMISC;
- log(LOG_INFO, "%s: permanently promiscuous mode %s\n",
- ifp->if_xname,
- (new_flags & IFF_PPROMISC) ? "enabled" : "disabled");
+ if (log_promisc_mode_change)
+ log(LOG_INFO, "%s: permanently promiscuous mode %s\n",
+ ifp->if_xname,
+ ((new_flags & IFF_PPROMISC) ?
+ "enabled" : "disabled"));
}
ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) |
(new_flags &~ IFF_CANTCHANGE);
@@ -2813,7 +2822,8 @@ ifpromisc(struct ifnet *ifp, int pswitch)
error = if_setflag(ifp, IFF_PROMISC, IFF_PPROMISC,
&ifp->if_pcount, pswitch);
/* If promiscuous mode status has changed, log a message */
- if (error == 0 && ((ifp->if_flags ^ oldflags) & IFF_PROMISC))
+ if (error == 0 && ((ifp->if_flags ^ oldflags) & IFF_PROMISC) &&
+ log_promisc_mode_change)
log(LOG_INFO, "%s: promiscuous mode %s\n",
ifp->if_xname,
(ifp->if_flags & IFF_PROMISC) ? "enabled" : "disabled");
diff --git a/sys/net80211/ieee80211_adhoc.c b/sys/net80211/ieee80211_adhoc.c
index b7f4bbd..0a03ed2 100644
--- a/sys/net80211/ieee80211_adhoc.c
+++ b/sys/net80211/ieee80211_adhoc.c
@@ -674,7 +674,7 @@ adhoc_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0,
struct ieee80211com *ic = ni->ni_ic;
struct ieee80211_channel *rxchan = ic->ic_curchan;
struct ieee80211_frame *wh;
- uint8_t *frm, *efrm, *sfrm;
+ uint8_t *frm, *efrm;
uint8_t *ssid, *rates, *xrates;
#if 0
int ht_state_change = 0;
@@ -809,7 +809,6 @@ adhoc_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0,
* [tlv] extended supported rates
*/
ssid = rates = xrates = NULL;
- sfrm = frm;
while (efrm - frm > 1) {
IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return);
switch (*frm) {
diff --git a/sys/net80211/ieee80211_hostap.c b/sys/net80211/ieee80211_hostap.c
index 1f32c7e..60535b2 100644
--- a/sys/net80211/ieee80211_hostap.c
+++ b/sys/net80211/ieee80211_hostap.c
@@ -412,16 +412,8 @@ hostap_deliver_data(struct ieee80211vap *vap,
ieee80211_free_node(sta);
}
}
- if (mcopy != NULL) {
- int len, err;
- len = mcopy->m_pkthdr.len;
- err = ieee80211_vap_xmitpkt(vap, mcopy);
- if (err) {
- /* NB: IFQ_HANDOFF reclaims mcopy */
- } else {
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
- }
- }
+ if (mcopy != NULL && ieee80211_vap_xmitpkt(vap, mcopy) == 0)
+ if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
}
if (m != NULL) {
/*
@@ -1798,7 +1790,6 @@ hostap_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0,
* [tlv] extended supported rates
*/
ssid = rates = xrates = NULL;
- sfrm = frm;
while (efrm - frm > 1) {
IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return);
switch (*frm) {
diff --git a/sys/net80211/ieee80211_hwmp.c b/sys/net80211/ieee80211_hwmp.c
index cfc7759..192024e 100644
--- a/sys/net80211/ieee80211_hwmp.c
+++ b/sys/net80211/ieee80211_hwmp.c
@@ -128,7 +128,6 @@ typedef uint32_t ieee80211_hwmp_seq;
#define HWMP_SEQ_LEQ(a, b) ((int32_t)((a)-(b)) <= 0)
#define HWMP_SEQ_EQ(a, b) ((int32_t)((a)-(b)) == 0)
#define HWMP_SEQ_GT(a, b) ((int32_t)((a)-(b)) > 0)
-#define HWMP_SEQ_GEQ(a, b) ((int32_t)((a)-(b)) >= 0)
#define HWMP_SEQ_MAX(a, b) (a > b ? a : b)
@@ -1526,7 +1525,6 @@ hwmp_peerdown(struct ieee80211_node *ni)
#define PERR_DADDR(n) perr->perr_dests[n].dest_addr
#define PERR_DSEQ(n) perr->perr_dests[n].dest_seq
#define PERR_DEXTADDR(n) perr->perr_dests[n].dest_ext_addr
-#define PERR_DRCODE(n) perr->perr_dests[n].dest_rcode
static void
hwmp_recv_perr(struct ieee80211vap *vap, struct ieee80211_node *ni,
const struct ieee80211_frame *wh, const struct ieee80211_meshperr_ie *perr)
@@ -1628,7 +1626,6 @@ done:
#undef PERR_DADDR
#undef PERR_DSEQ
#undef PERR_DEXTADDR
-#undef PERR_DRCODE
static int
hwmp_send_perr(struct ieee80211vap *vap,
@@ -1737,7 +1734,6 @@ hwmp_recv_rann(struct ieee80211vap *vap, struct ieee80211_node *ni,
struct ieee80211_hwmp_route *hr;
struct ieee80211_meshpreq_ie preq;
struct ieee80211_meshrann_ie prann;
- uint32_t metric = 0;
if (IEEE80211_ADDR_EQ(rann->rann_addr, vap->iv_myaddr))
return;
@@ -1766,7 +1762,6 @@ hwmp_recv_rann(struct ieee80211vap *vap, struct ieee80211_node *ni,
/* RANN ACCEPTED */
ieee80211_hwmp_rannint = rann->rann_interval; /* XXX: mtx lock? */
- metric = rann->rann_metric + ms->ms_pmetric->mpm_metric(ni);
if (rt == NULL) {
rt = ieee80211_mesh_rt_add(vap, rann->rann_addr);
diff --git a/sys/net80211/ieee80211_mesh.c b/sys/net80211/ieee80211_mesh.c
index 17adda0..7ba784b 100644
--- a/sys/net80211/ieee80211_mesh.c
+++ b/sys/net80211/ieee80211_mesh.c
@@ -1526,7 +1526,6 @@ mesh_input(struct ieee80211_node *ni, struct mbuf *m,
{
#define HAS_SEQ(type) ((type & 0x4) == 0)
#define MC01(mc) ((const struct ieee80211_meshcntl_ae01 *)mc)
-#define MC10(mc) ((const struct ieee80211_meshcntl_ae10 *)mc)
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct ifnet *ifp = vap->iv_ifp;
@@ -1826,7 +1825,6 @@ out:
return type;
#undef HAS_SEQ
#undef MC01
-#undef MC10
}
static void
@@ -1981,7 +1979,6 @@ mesh_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0, int subtype,
case IEEE80211_FC0_SUBTYPE_PROBE_REQ:
{
uint8_t *ssid, *meshid, *rates, *xrates;
- uint8_t *sfrm;
if (vap->iv_state != IEEE80211_S_RUN) {
IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT,
@@ -2005,7 +2002,6 @@ mesh_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0, int subtype,
* [tlv] mesh id
*/
ssid = meshid = rates = xrates = NULL;
- sfrm = frm;
while (efrm - frm > 1) {
IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return);
switch (*frm) {
@@ -2116,10 +2112,10 @@ mesh_parse_meshpeering_action(struct ieee80211_node *ni,
struct ieee80211vap *vap = ni->ni_vap;
const struct ieee80211_meshpeer_ie *mpie;
uint16_t args[3];
- const uint8_t *meshid, *meshconf, *meshpeer;
+ const uint8_t *meshid, *meshconf;
uint8_t sendclose = 0; /* 1 = MPM frame rejected, close will be sent */
- meshid = meshconf = meshpeer = NULL;
+ meshid = meshconf = NULL;
while (efrm - frm > 1) {
IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return NULL);
switch (*frm) {
@@ -2130,7 +2126,6 @@ mesh_parse_meshpeering_action(struct ieee80211_node *ni,
meshconf = frm;
break;
case IEEE80211_ELEMID_MESHPEER:
- meshpeer = frm;
mpie = (const struct ieee80211_meshpeer_ie *) frm;
memset(mp, 0, sizeof(*mp));
mp->peer_len = mpie->peer_len;
@@ -2660,7 +2655,6 @@ mesh_send_action(struct ieee80211_node *ni,
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct ieee80211_bpf_params params;
- struct ieee80211_frame *wh;
int ret;
KASSERT(ni != NULL, ("null node"));
@@ -2681,7 +2675,6 @@ mesh_send_action(struct ieee80211_node *ni,
}
IEEE80211_TX_LOCK(ic);
- wh = mtod(m, struct ieee80211_frame *);
ieee80211_send_setup(ni, m,
IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_ACTION,
IEEE80211_NONQOS_TID, sa, da, sa);
diff --git a/sys/net80211/ieee80211_node.c b/sys/net80211/ieee80211_node.c
index c1135af..fd134e1 100644
--- a/sys/net80211/ieee80211_node.c
+++ b/sys/net80211/ieee80211_node.c
@@ -73,12 +73,6 @@ CTASSERT((IEEE80211_NODE_HASHSIZE & (IEEE80211_NODE_HASHSIZE-1)) == 0);
#define IEEE80211_AID_ISSET(_vap, b) \
((_vap)->iv_aid_bitmap[IEEE80211_AID(b) / 32] & (1 << (IEEE80211_AID(b) % 32)))
-#ifdef IEEE80211_DEBUG_REFCNT
-#define REFCNT_LOC "%s (%s:%u) %p<%s> refcnt %d\n", __func__, func, line
-#else
-#define REFCNT_LOC "%s %p<%s> refcnt %d\n", __func__
-#endif
-
static int ieee80211_sta_join1(struct ieee80211_node *);
static struct ieee80211_node *node_alloc(struct ieee80211vap *,
diff --git a/sys/net80211/ieee80211_output.c b/sys/net80211/ieee80211_output.c
index 68e46eb..65fe126 100644
--- a/sys/net80211/ieee80211_output.c
+++ b/sys/net80211/ieee80211_output.c
@@ -121,7 +121,6 @@ ieee80211_vap_pkt_send_dest(struct ieee80211vap *vap, struct mbuf *m,
{
struct ieee80211com *ic = vap->iv_ic;
struct ifnet *ifp = vap->iv_ifp;
- int len, mcast;
if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) &&
(m->m_flags & M_PWR_SAV) == 0) {
@@ -161,8 +160,6 @@ ieee80211_vap_pkt_send_dest(struct ieee80211vap *vap, struct mbuf *m,
* interface it (might have been) received on.
*/
m->m_pkthdr.rcvif = (void *)ni;
- mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1: 0;
- len = m->m_pkthdr.len;
BPF_MTAP(ifp, m); /* 802.3 tx */
@@ -2082,7 +2079,6 @@ ieee80211_send_probereq(struct ieee80211_node *ni,
struct ieee80211com *ic = ni->ni_ic;
const struct ieee80211_txparam *tp;
struct ieee80211_bpf_params params;
- struct ieee80211_frame *wh;
const struct ieee80211_rateset *rs;
struct mbuf *m;
uint8_t *frm;
@@ -2152,7 +2148,6 @@ ieee80211_send_probereq(struct ieee80211_node *ni,
}
IEEE80211_TX_LOCK(ic);
- wh = mtod(m, struct ieee80211_frame *);
ieee80211_send_setup(ni, m,
IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_REQ,
IEEE80211_NONQOS_TID, sa, da, bssid);
@@ -2749,7 +2744,6 @@ ieee80211_send_proberesp(struct ieee80211vap *vap,
{
struct ieee80211_node *bss = vap->iv_bss;
struct ieee80211com *ic = vap->iv_ic;
- struct ieee80211_frame *wh;
struct mbuf *m;
int ret;
@@ -2781,7 +2775,6 @@ ieee80211_send_proberesp(struct ieee80211vap *vap,
KASSERT(m != NULL, ("no room for header"));
IEEE80211_TX_LOCK(ic);
- wh = mtod(m, struct ieee80211_frame *);
ieee80211_send_setup(bss, m,
IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP,
IEEE80211_NONQOS_TID, vap->iv_myaddr, da, bss->ni_bssid);
diff --git a/sys/net80211/ieee80211_phy.c b/sys/net80211/ieee80211_phy.c
index 16bbb2a..13cecb6 100644
--- a/sys/net80211/ieee80211_phy.c
+++ b/sys/net80211/ieee80211_phy.c
@@ -590,10 +590,6 @@ static const uint16_t ht40_bps[32] = {
#define HT_STF 4
#define HT_LTF(n) ((n) * 4)
-#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
-#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
-#define IS_HT_RATE(_rc) ( (_rc) & IEEE80211_RATE_MCS)
-
/*
* Calculate the transmit duration of an 11n frame.
*/
@@ -620,9 +616,6 @@ ieee80211_compute_duration_ht(uint32_t frameLen, uint16_t rate,
HT_L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
}
-#undef IS_HT_RATE
-#undef HT_RC_2_STREAMS
-#undef HT_RC_2_MCS
#undef HT_LTF
#undef HT_STF
#undef HT_SIG
diff --git a/sys/net80211/ieee80211_scan_sw.c b/sys/net80211/ieee80211_scan_sw.c
index 9899200..f1415cb 100644
--- a/sys/net80211/ieee80211_scan_sw.c
+++ b/sys/net80211/ieee80211_scan_sw.c
@@ -80,23 +80,6 @@ struct scan_state {
*/
#define IEEE80211_SCAN_OFFCHANNEL msecs_to_ticks(150)
-/*
- * Roaming-related defaults. RSSI thresholds are as returned by the
- * driver (.5dBm). Transmit rate thresholds are IEEE rate codes (i.e
- * .5M units) or MCS.
- */
-/* rssi thresholds */
-#define ROAM_RSSI_11A_DEFAULT 14 /* 11a bss */
-#define ROAM_RSSI_11B_DEFAULT 14 /* 11b bss */
-#define ROAM_RSSI_11BONLY_DEFAULT 14 /* 11b-only bss */
-/* transmit rate thresholds */
-#define ROAM_RATE_11A_DEFAULT 2*12 /* 11a bss */
-#define ROAM_RATE_11B_DEFAULT 2*5 /* 11b bss */
-#define ROAM_RATE_11BONLY_DEFAULT 2*1 /* 11b-only bss */
-#define ROAM_RATE_HALF_DEFAULT 2*6 /* half-width 11a/g bss */
-#define ROAM_RATE_QUARTER_DEFAULT 2*3 /* quarter-width 11a/g bss */
-#define ROAM_MCS_11N_DEFAULT (1 | IEEE80211_RATE_MCS) /* 11n bss */
-
static void scan_curchan(struct ieee80211_scan_state *, unsigned long);
static void scan_mindwell(struct ieee80211_scan_state *);
static void scan_signal(struct ieee80211_scan_state *, int);
diff --git a/sys/net80211/ieee80211_sta.c b/sys/net80211/ieee80211_sta.c
index 8580adf..90561b2 100644
--- a/sys/net80211/ieee80211_sta.c
+++ b/sys/net80211/ieee80211_sta.c
@@ -976,7 +976,6 @@ sta_auth_shared(struct ieee80211_node *ni, struct ieee80211_frame *wh,
{
struct ieee80211vap *vap = ni->ni_vap;
uint8_t *challenge;
- int estatus;
/*
* NB: this can happen as we allow pre-shared key
@@ -990,7 +989,6 @@ sta_auth_shared(struct ieee80211_node *ni, struct ieee80211_frame *wh,
IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH,
ni->ni_macaddr, "shared key auth",
"%s", " PRIVACY is disabled");
- estatus = IEEE80211_STATUS_ALG;
goto bad;
}
/*
@@ -1004,7 +1002,6 @@ sta_auth_shared(struct ieee80211_node *ni, struct ieee80211_frame *wh,
ni->ni_macaddr, "shared key auth",
"bad sta auth mode %u", ni->ni_authmode);
vap->iv_stats.is_rx_bad_auth++; /* XXX maybe a unique error? */
- estatus = IEEE80211_STATUS_ALG;
goto bad;
}
@@ -1016,7 +1013,6 @@ sta_auth_shared(struct ieee80211_node *ni, struct ieee80211_frame *wh,
"ie %d/%d too long",
frm[0], (frm[1] + 2) - (efrm - frm));
vap->iv_stats.is_rx_bad_auth++;
- estatus = IEEE80211_STATUS_CHALLENGE;
goto bad;
}
if (*frm == IEEE80211_ELEMID_CHALLENGE)
@@ -1031,7 +1027,6 @@ sta_auth_shared(struct ieee80211_node *ni, struct ieee80211_frame *wh,
ni->ni_macaddr, "shared key auth",
"%s", "no challenge");
vap->iv_stats.is_rx_bad_auth++;
- estatus = IEEE80211_STATUS_CHALLENGE;
goto bad;
}
if (challenge[1] != IEEE80211_CHALLENGE_LEN) {
@@ -1039,7 +1034,6 @@ sta_auth_shared(struct ieee80211_node *ni, struct ieee80211_frame *wh,
ni->ni_macaddr, "shared key auth",
"bad challenge len %d", challenge[1]);
vap->iv_stats.is_rx_bad_auth++;
- estatus = IEEE80211_STATUS_CHALLENGE;
goto bad;
}
default:
@@ -1281,7 +1275,6 @@ sta_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0, int subtype,
const struct ieee80211_rx_stats *rxs,
int rssi, int nf)
{
-#define ISPROBE(_st) ((_st) == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
#define ISREASSOC(_st) ((_st) == IEEE80211_FC0_SUBTYPE_REASSOC_RESP)
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
@@ -1861,7 +1854,6 @@ sta_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0, int subtype,
break;
}
#undef ISREASSOC
-#undef ISPROBE
}
static void
diff --git a/sys/net80211/ieee80211_wds.c b/sys/net80211/ieee80211_wds.c
index b46fd05..05268e8 100644
--- a/sys/net80211/ieee80211_wds.c
+++ b/sys/net80211/ieee80211_wds.c
@@ -344,7 +344,6 @@ static int
wds_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
{
struct ieee80211com *ic = vap->iv_ic;
- struct ieee80211_node *ni;
enum ieee80211_state ostate;
int error;
@@ -357,7 +356,6 @@ wds_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
callout_stop(&vap->iv_mgtsend); /* XXX callout_drain */
if (ostate != IEEE80211_S_SCAN)
ieee80211_cancel_scan(vap); /* background scan */
- ni = vap->iv_bss; /* NB: no reference held */
error = 0;
switch (nstate) {
case IEEE80211_S_INIT:
diff --git a/sys/netinet/sctp_structs.h b/sys/netinet/sctp_structs.h
index 39fc949..84ee43a 100644
--- a/sys/netinet/sctp_structs.h
+++ b/sys/netinet/sctp_structs.h
@@ -452,11 +452,6 @@ struct sctp_tmit_chunk {
uint8_t window_probe;
};
-/*
- * The first part of this structure MUST be the entire sinfo structure. Maybe
- * I should have made it a sub structure... we can circle back later and do
- * that if we want.
- */
struct sctp_queued_to_read { /* sinfo structure Pluse more */
uint16_t sinfo_stream; /* off the wire */
uint32_t sinfo_ssn; /* off the wire */
diff --git a/sys/netinet/sctputil.c b/sys/netinet/sctputil.c
index b7b2c27..cc18d9f 100644
--- a/sys/netinet/sctputil.c
+++ b/sys/netinet/sctputil.c
@@ -5545,8 +5545,16 @@ found_one:
stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1;
}
/* First lets get off the sinfo and sockaddr info */
- if ((sinfo) && filling_sinfo) {
- memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo));
+ if ((sinfo != NULL) && (filling_sinfo != 0)) {
+ sinfo->sinfo_stream = control->sinfo_stream;
+ sinfo->sinfo_ssn = (uint16_t) control->sinfo_ssn;
+ sinfo->sinfo_flags = control->sinfo_flags;
+ sinfo->sinfo_ppid = control->sinfo_ppid;
+ sinfo->sinfo_context = control->sinfo_context;
+ sinfo->sinfo_timetolive = control->sinfo_timetolive;
+ sinfo->sinfo_tsn = control->sinfo_tsn;
+ sinfo->sinfo_cumtsn = control->sinfo_cumtsn;
+ sinfo->sinfo_assoc_id = control->sinfo_assoc_id;
nxt = TAILQ_NEXT(control, next);
if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) ||
sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
diff --git a/sys/sys/_types.h b/sys/sys/_types.h
index 16039a6..ecc1c7e 100644
--- a/sys/sys/_types.h
+++ b/sys/sys/_types.h
@@ -51,6 +51,7 @@ typedef int __accmode_t; /* access permissions */
typedef int __nl_item;
typedef __uint16_t __nlink_t; /* link count */
typedef __int64_t __off_t; /* file offset */
+typedef __int64_t __off64_t; /* file offset (alias) */
typedef __int32_t __pid_t; /* process [group] */
typedef __int64_t __rlim_t; /* resource limit - intentionally */
/* signed, because of legacy code */
diff --git a/sys/sys/types.h b/sys/sys/types.h
index 579b104..6e2c012 100644
--- a/sys/sys/types.h
+++ b/sys/sys/types.h
@@ -174,6 +174,11 @@ typedef __off_t off_t; /* file offset */
#define _OFF_T_DECLARED
#endif
+#ifndef _OFF64_T_DECLARED
+typedef __off64_t off64_t; /* file offset (alias) */
+#define _OFF64_T_DECLARED
+#endif
+
#ifndef _PID_T_DECLARED
typedef __pid_t pid_t; /* process id */
#define _PID_T_DECLARED
diff --git a/sys/x86/x86/mca.c b/sys/x86/x86/mca.c
index c4ceef2..b3d6066 100644
--- a/sys/x86/x86/mca.c
+++ b/sys/x86/x86/mca.c
@@ -104,7 +104,8 @@ SYSCTL_INT(_hw_mca, OID_AUTO, intel6h_HSD131, CTLFLAG_RDTUN, &intel6h_HSD131, 0,
"Administrative toggle for logging of spurious corrected errors");
int workaround_erratum383;
-SYSCTL_INT(_hw_mca, OID_AUTO, erratum383, CTLFLAG_RD, &workaround_erratum383, 0,
+SYSCTL_INT(_hw_mca, OID_AUTO, erratum383, CTLFLAG_RDTUN,
+ &workaround_erratum383, 0,
"Is the workaround for Erratum 383 on AMD Family 10h processors enabled?");
static STAILQ_HEAD(, mca_internal) mca_freelist;
diff --git a/usr.bin/catman/catman.c b/usr.bin/catman/catman.c
index b221111..47906f5 100644
--- a/usr.bin/catman/catman.c
+++ b/usr.bin/catman/catman.c
@@ -34,9 +34,11 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/utsname.h>
+#include <assert.h>
#include <ctype.h>
#include <dirent.h>
#include <err.h>
+#include <errno.h>
#include <fcntl.h>
#include <locale.h>
#include <langinfo.h>
@@ -267,7 +269,8 @@ get_cat_section(char *section)
char *cat_section;
cat_section = strdup(section);
- strncpy(cat_section, "cat", 3);
+ assert(strlen(section) > 3 && strncmp(section, "man", 3) == 0);
+ memcpy(cat_section, "cat", 3);
return cat_section;
}
@@ -419,8 +422,11 @@ process_page(char *mandir, char *src, char *cat, enum Ziptype zipped)
fprintf(stderr, "%slink %s -> %s\n",
verbose ? "\t" : "", cat, link_name);
}
- if (!pretend)
- link(link_name, cat);
+ if (!pretend) {
+ (void) unlink(cat);
+ if (link(link_name, cat) < 0)
+ warn("%s %s: link", link_name, cat);
+ }
return;
}
insert_hashtable(links, src_ino, src_dev, strdup(cat));
@@ -608,7 +614,8 @@ select_sections(const struct dirent *entry)
static void
process_mandir(char *dir_name, char *section)
{
- fchdir(starting_dir);
+ if (fchdir(starting_dir) < 0)
+ err(1, "fchdir");
if (already_visited(NULL, dir_name, section == NULL))
return;
check_writable(dir_name);
diff --git a/usr.bin/mail/fio.c b/usr.bin/mail/fio.c
index 1b0efea..d9d4ee1 100644
--- a/usr.bin/mail/fio.c
+++ b/usr.bin/mail/fio.c
@@ -367,10 +367,10 @@ expand(char *name)
name = savestr(xname);
}
if (!strpbrk(name, "~{[*?$`'\"\\"))
- return (name);
+ return (savestr(name));
if (pipe(pivec) < 0) {
warn("pipe");
- return (name);
+ return (NULL);
}
(void)snprintf(cmdbuf, sizeof(cmdbuf), "echo %s", name);
if ((sh = value("SHELL")) == NULL)
diff --git a/usr.bin/rpcinfo/rpcinfo.c b/usr.bin/rpcinfo/rpcinfo.c
index a12b932..3fdcfd1 100644
--- a/usr.bin/rpcinfo/rpcinfo.c
+++ b/usr.bin/rpcinfo/rpcinfo.c
@@ -609,12 +609,13 @@ reply_proc(void *res, struct netbuf *who, struct netconfig *nconf)
} else {
hostname = hostbuf;
}
- if (!(uaddr = taddr2uaddr(nconf, who))) {
- uaddr = UNKNOWN;
- }
- printf("%s\t%s\n", uaddr, hostname);
- if (strcmp(uaddr, UNKNOWN))
+ uaddr = taddr2uaddr(nconf, who);
+ if (uaddr == NULL) {
+ printf("%s\t%s\n", UNKNOWN, hostname);
+ } else {
+ printf("%s\t%s\n", uaddr, hostname);
free((char *)uaddr);
+ }
return (FALSE);
}
diff --git a/usr.sbin/bhyve/pci_emul.c b/usr.sbin/bhyve/pci_emul.c
index 1e45bad..523d7b0 100644
--- a/usr.sbin/bhyve/pci_emul.c
+++ b/usr.sbin/bhyve/pci_emul.c
@@ -2034,7 +2034,7 @@ pci_emul_diow(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
*/
}
- if (baridx > 2) {
+ if (baridx > 2 || baridx < 0) {
printf("diow: unknown bar idx %d\n", baridx);
}
}
@@ -2089,7 +2089,7 @@ pci_emul_dior(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
}
- if (baridx > 2) {
+ if (baridx > 2 || baridx < 0) {
printf("dior: unknown bar idx %d\n", baridx);
return (0);
}
diff --git a/usr.sbin/bhyve/pci_virtio_block.c b/usr.sbin/bhyve/pci_virtio_block.c
index 8500be6..a437c1c 100644
--- a/usr.sbin/bhyve/pci_virtio_block.c
+++ b/usr.sbin/bhyve/pci_virtio_block.c
@@ -200,7 +200,6 @@ pci_vtblk_proc(struct pci_vtblk_softc *sc, struct vqueue_info *vq)
int err;
ssize_t iolen;
int writeop, type;
- off_t offset;
struct iovec iov[BLOCKIF_IOV_MAX + 2];
uint16_t idx, flags[BLOCKIF_IOV_MAX + 2];
@@ -249,7 +248,8 @@ pci_vtblk_proc(struct pci_vtblk_softc *sc, struct vqueue_info *vq)
io->io_req.br_resid = iolen;
DPRINTF(("virtio-block: %s op, %zd bytes, %d segs, offset %ld\n\r",
- writeop ? "write" : "read/ident", iolen, i - 1, offset));
+ writeop ? "write" : "read/ident", iolen, i - 1,
+ io->io_req.br_offset));
switch (type) {
case VBH_OP_READ:
diff --git a/usr.sbin/bsnmpd/tools/libbsnmptools/bsnmptools.c b/usr.sbin/bsnmpd/tools/libbsnmptools/bsnmptools.c
index 38665bf..c9353d8 100644
--- a/usr.sbin/bsnmpd/tools/libbsnmptools/bsnmptools.c
+++ b/usr.sbin/bsnmpd/tools/libbsnmptools/bsnmptools.c
@@ -798,7 +798,7 @@ parse_server(char *opt_arg)
return (-1);
if (snmp_client.trans > SNMP_TRANS_UDP && snmp_client.chost == NULL) {
- if ((snmp_client.chost = malloc(strlen(SNMP_DEFAULT_LOCAL + 1)))
+ if ((snmp_client.chost = malloc(strlen(SNMP_DEFAULT_LOCAL) + 1))
== NULL) {
syslog(LOG_ERR, "malloc() failed: %s", strerror(errno));
return (-1);
diff --git a/usr.sbin/edquota/edquota.c b/usr.sbin/edquota/edquota.c
index 0cd117f..6a6c194 100644
--- a/usr.sbin/edquota/edquota.c
+++ b/usr.sbin/edquota/edquota.c
@@ -390,7 +390,7 @@ getprivs(long id, int quotatype, char *fspath)
if ((qup = (struct quotause *)calloc(1, sizeof(*qup))) == NULL)
errx(2, "out of memory");
qup->qf = qf;
- strncpy(qup->fsname, fs->fs_file, sizeof(qup->fsname));
+ strlcpy(qup->fsname, fs->fs_file, sizeof(qup->fsname));
if (quota_read(qf, &qup->dqblk, id) == -1) {
warn("cannot read quotas on %s", fs->fs_file);
freeprivs(qup);
diff --git a/usr.sbin/i2c/i2c.c b/usr.sbin/i2c/i2c.c
index 3021b86..7714cf0 100644
--- a/usr.sbin/i2c/i2c.c
+++ b/usr.sbin/i2c/i2c.c
@@ -294,6 +294,9 @@ i2c_write(char *dev, struct options i2c_opt, char *i2c_buf)
err_msg = "error: offset malloc";
goto err1;
}
+ } else {
+ bufsize = 0;
+ buf = NULL;
}
switch(i2c_opt.mode) {
diff --git a/usr.sbin/mptutil/mpt_cam.c b/usr.sbin/mptutil/mpt_cam.c
index 6a8ff07..a00e228 100644
--- a/usr.sbin/mptutil/mpt_cam.c
+++ b/usr.sbin/mptutil/mpt_cam.c
@@ -260,7 +260,6 @@ fetch_scsi_capacity(struct cam_device *dev, struct mpt_standalone_disk *disk)
cam_freeccb(ccb);
return (EIO);
}
- cam_freeccb(ccb);
/*
* A last block of 2^32-1 means that the true capacity is over 2TB,
@@ -269,6 +268,7 @@ fetch_scsi_capacity(struct cam_device *dev, struct mpt_standalone_disk *disk)
*/
if (scsi_4btoul(rcap.addr) != 0xffffffff) {
disk->maxlba = scsi_4btoul(rcap.addr);
+ cam_freeccb(ccb);
return (0);
}
diff --git a/usr.sbin/timed/timed/master.c b/usr.sbin/timed/timed/master.c
index 9a130bf..2bc3f85 100644
--- a/usr.sbin/timed/timed/master.c
+++ b/usr.sbin/timed/timed/master.c
@@ -623,7 +623,7 @@ addmach(char *name, struct sockaddr_in *addr, struct netinfo *ntp)
}
ret->addr = *addr;
ret->ntp = ntp;
- (void)strncpy(ret->name, name, sizeof(ret->name));
+ (void)strlcpy(ret->name, name, sizeof(ret->name));
ret->good = good_host_name(name);
ret->l_fwd = &self;
ret->l_bak = self.l_bak;
diff --git a/usr.sbin/ypbind/ypbind.c b/usr.sbin/ypbind/ypbind.c
index 39bdd74..15f8394 100644
--- a/usr.sbin/ypbind/ypbind.c
+++ b/usr.sbin/ypbind/ypbind.c
@@ -199,6 +199,11 @@ rejecting.", *argp);
res.ypbind_resp_u.ypbind_error = YPBIND_ERR_RESC;
return (&res);
}
+ if (strlen(*argp) > YPMAXDOMAIN) {
+ syslog(LOG_WARNING, "domain %s too long", *argp);
+ res.ypbind_resp_u.ypbind_error = YPBIND_ERR_RESC;
+ return (&res);
+ }
ypdb = malloc(sizeof *ypdb);
if (ypdb == NULL) {
syslog(LOG_WARNING, "malloc: %m");
@@ -206,7 +211,7 @@ rejecting.", *argp);
return (&res);
}
bzero(ypdb, sizeof *ypdb);
- strncpy(ypdb->dom_domain, *argp, sizeof ypdb->dom_domain);
+ strlcpy(ypdb->dom_domain, *argp, sizeof ypdb->dom_domain);
ypdb->dom_vers = YPVERS;
ypdb->dom_alive = 0;
ypdb->dom_default = 0;
@@ -416,6 +421,9 @@ main(int argc, char *argv[])
errx(1, "unknown option: %s", argv[i]);
}
+ if (strlen(domain_name) > YPMAXDOMAIN)
+ warnx("truncating domain name %s", domain_name);
+
/* blow away everything in BINDINGDIR (if it exists) */
if ((dird = opendir(BINDINGDIR)) != NULL) {
@@ -456,7 +464,7 @@ main(int argc, char *argv[])
if (ypbindlist == NULL)
errx(1, "malloc");
bzero(ypbindlist, sizeof *ypbindlist);
- strncpy(ypbindlist->dom_domain, domain_name, sizeof ypbindlist->dom_domain);
+ strlcpy(ypbindlist->dom_domain, domain_name, sizeof ypbindlist->dom_domain);
ypbindlist->dom_vers = YPVERS;
ypbindlist->dom_alive = 0;
ypbindlist->dom_lockfd = -1;
@@ -886,13 +894,17 @@ rpc_received(char *dom, struct sockaddr_in *raddrp, int force)
if (ypdb == NULL) {
if (force == 0)
return;
+ if (strlen(dom) > YPMAXDOMAIN) {
+ syslog(LOG_WARNING, "domain %s too long", dom);
+ return;
+ }
ypdb = malloc(sizeof *ypdb);
if (ypdb == NULL) {
syslog(LOG_WARNING, "malloc: %m");
return;
}
bzero(ypdb, sizeof *ypdb);
- strncpy(ypdb->dom_domain, dom, sizeof ypdb->dom_domain);
+ strlcpy(ypdb->dom_domain, dom, sizeof ypdb->dom_domain);
ypdb->dom_lockfd = -1;
ypdb->dom_default = 0;
ypdb->dom_pnext = ypbindlist;
diff --git a/usr.sbin/ypldap/ber.c b/usr.sbin/ypldap/ber.c
index f73ecef..d388233 100644
--- a/usr.sbin/ypldap/ber.c
+++ b/usr.sbin/ypldap/ber.c
@@ -726,7 +726,7 @@ ber_scanf_elements(struct ber_element *ber, char *fmt, ...)
continue;
case '}':
case ')':
- if (parent[level] == NULL)
+ if (level < 0 || parent[level] == NULL)
goto fail;
ber = parent[level--];
ret++;
OpenPOWER on IntegriCloud