aboutsummaryrefslogtreecommitdiff
path: root/dep/jemalloc/include
diff options
context:
space:
mode:
Diffstat (limited to 'dep/jemalloc/include')
-rw-r--r--dep/jemalloc/include/jemalloc/internal/arena.h127
-rw-r--r--dep/jemalloc/include/jemalloc/internal/chunk_dss.h2
-rw-r--r--dep/jemalloc/include/jemalloc/internal/ckh.h2
-rw-r--r--dep/jemalloc/include/jemalloc/internal/hash.h20
-rw-r--r--dep/jemalloc/include/jemalloc/internal/huge.h6
-rw-r--r--dep/jemalloc/include/jemalloc/internal/jemalloc_internal.h197
-rw-r--r--dep/jemalloc/include/jemalloc/internal/private_namespace.h34
-rw-r--r--dep/jemalloc/include/jemalloc/internal/prng.h4
-rw-r--r--dep/jemalloc/include/jemalloc/internal/prof.h92
-rw-r--r--dep/jemalloc/include/jemalloc/internal/ql.h36
-rw-r--r--dep/jemalloc/include/jemalloc/internal/qr.h22
-rw-r--r--dep/jemalloc/include/jemalloc/internal/rb.h4
-rw-r--r--dep/jemalloc/include/jemalloc/internal/rtree.h50
-rw-r--r--dep/jemalloc/include/jemalloc/internal/tcache.h5
-rw-r--r--dep/jemalloc/include/jemalloc/internal/tsd.h39
-rw-r--r--dep/jemalloc/include/jemalloc/internal/util.h22
-rw-r--r--dep/jemalloc/include/jemalloc/jemalloc.h187
17 files changed, 528 insertions, 321 deletions
diff --git a/dep/jemalloc/include/jemalloc/internal/arena.h b/dep/jemalloc/include/jemalloc/internal/arena.h
index f2c18f43543..9d000c03dec 100644
--- a/dep/jemalloc/include/jemalloc/internal/arena.h
+++ b/dep/jemalloc/include/jemalloc/internal/arena.h
@@ -158,6 +158,7 @@ struct arena_chunk_map_s {
};
typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t;
typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
+typedef ql_head(arena_chunk_map_t) arena_chunk_mapelms_t;
/* Arena chunk header. */
struct arena_chunk_s {
@@ -174,11 +175,12 @@ struct arena_chunk_s {
size_t nruns_avail;
/*
- * Number of available run adjacencies. Clean and dirty available runs
- * are not coalesced, which causes virtual memory fragmentation. The
- * ratio of (nruns_avail-nruns_adjac):nruns_adjac is used for tracking
- * this fragmentation.
- * */
+ * Number of available run adjacencies that purging could coalesce.
+ * Clean and dirty available runs are not coalesced, which causes
+ * virtual memory fragmentation. The ratio of
+ * (nruns_avail-nruns_adjac):nruns_adjac is used for tracking this
+ * fragmentation.
+ */
size_t nruns_adjac;
/*
@@ -404,7 +406,16 @@ void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
size_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
bool zero);
+#ifdef JEMALLOC_JET
+typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t,
+ uint8_t);
+extern arena_redzone_corruption_t *arena_redzone_corruption;
+typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *);
+extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
+#else
void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
+#endif
+void arena_quarantine_junk_small(void *ptr, size_t usize);
void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero);
@@ -415,10 +426,18 @@ void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind, arena_chunk_map_t *mapelm);
void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind);
+#ifdef JEMALLOC_JET
+typedef void (arena_dalloc_junk_large_t)(void *, size_t);
+extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
+#endif
void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk,
void *ptr);
void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
-void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
+#ifdef JEMALLOC_JET
+typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
+extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
+#endif
+bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
void *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
@@ -441,6 +460,7 @@ void arena_postfork_child(arena_t *arena);
#ifndef JEMALLOC_ENABLE_INLINE
arena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind);
size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbitsp_read(size_t *mapbitsp);
size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
size_t pageind);
@@ -451,6 +471,7 @@ size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
+void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
size_t size, size_t flags);
void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
@@ -471,7 +492,7 @@ size_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
const void *ptr);
prof_ctx_t *arena_prof_ctx_get(const void *ptr);
-void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
+void arena_prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx);
void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache);
size_t arena_salloc(const void *ptr, bool demote);
void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr,
@@ -498,10 +519,17 @@ arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
+arena_mapbitsp_read(size_t *mapbitsp)
+{
+
+ return (*mapbitsp);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
{
- return (*arena_mapbitsp_get(chunk, pageind));
+ return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
}
JEMALLOC_ALWAYS_INLINE size_t
@@ -585,82 +613,89 @@ arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE void
+arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
+{
+
+ *mapbitsp = mapbits;
+}
+
+JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
- size_t *mapbitsp;
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
- mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags);
- *mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags;
+ arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
size_t size)
{
- size_t *mapbitsp;
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t mapbits = arena_mapbitsp_read(mapbitsp);
- mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
- assert((*mapbitsp & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
- *mapbitsp = size | (*mapbitsp & PAGE_MASK);
+ assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
+ arena_mapbitsp_write(mapbitsp, size | (mapbits & PAGE_MASK));
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
- size_t *mapbitsp;
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t mapbits = arena_mapbitsp_read(mapbitsp);
size_t unzeroed;
- mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & CHUNK_MAP_DIRTY) == flags);
- unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
- *mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags | unzeroed |
- CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+ unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
+ arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags
+ | unzeroed | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
size_t binind)
{
- size_t *mapbitsp;
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t mapbits = arena_mapbitsp_read(mapbitsp);
assert(binind <= BININD_INVALID);
- mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE);
- *mapbitsp = (*mapbitsp & ~CHUNK_MAP_BININD_MASK) | (binind <<
- CHUNK_MAP_BININD_SHIFT);
+ arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) |
+ (binind << CHUNK_MAP_BININD_SHIFT));
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
size_t binind, size_t flags)
{
- size_t *mapbitsp;
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t mapbits = arena_mapbitsp_read(mapbitsp);
size_t unzeroed;
assert(binind < BININD_INVALID);
- mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert(pageind - runind >= map_bias);
assert((flags & CHUNK_MAP_DIRTY) == flags);
- unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
- *mapbitsp = (runind << LG_PAGE) | (binind << CHUNK_MAP_BININD_SHIFT) |
- flags | unzeroed | CHUNK_MAP_ALLOCATED;
+ unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
+ arena_mapbitsp_write(mapbitsp, (runind << LG_PAGE) | (binind <<
+ CHUNK_MAP_BININD_SHIFT) | flags | unzeroed | CHUNK_MAP_ALLOCATED);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
size_t unzeroed)
{
- size_t *mapbitsp;
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t mapbits = arena_mapbitsp_read(mapbitsp);
- mapbitsp = arena_mapbitsp_get(chunk, pageind);
- *mapbitsp = (*mapbitsp & ~CHUNK_MAP_UNZEROED) | unzeroed;
+ arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_UNZEROED) |
+ unzeroed);
}
JEMALLOC_INLINE bool
@@ -869,10 +904,10 @@ arena_prof_ctx_get(const void *ptr)
}
JEMALLOC_INLINE void
-arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
+arena_prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx)
{
arena_chunk_t *chunk;
- size_t pageind, mapbits;
+ size_t pageind;
cassert(config_prof);
assert(ptr != NULL);
@@ -880,10 +915,17 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- mapbits = arena_mapbits_get(chunk, pageind);
- assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
- if ((mapbits & CHUNK_MAP_LARGE) == 0) {
+ assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
+
+ if (usize > SMALL_MAXCLASS || (prof_promote &&
+ ((uintptr_t)ctx != (uintptr_t)1U || arena_mapbits_large_get(chunk,
+ pageind) != 0))) {
+ assert(arena_mapbits_large_get(chunk, pageind) != 0);
+ arena_mapp_get(chunk, pageind)->prof_ctx = ctx;
+ } else {
+ assert(arena_mapbits_large_get(chunk, pageind) == 0);
if (prof_promote == false) {
+ size_t mapbits = arena_mapbits_get(chunk, pageind);
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
(uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
LG_PAGE));
@@ -895,12 +937,11 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
bin_info = &arena_bin_info[binind];
regind = arena_run_regind(run, bin_info, ptr);
- *((prof_ctx_t **)((uintptr_t)run + bin_info->ctx0_offset
- + (regind * sizeof(prof_ctx_t *)))) = ctx;
- } else
- assert((uintptr_t)ctx == (uintptr_t)1U);
- } else
- arena_mapp_get(chunk, pageind)->prof_ctx = ctx;
+ *((prof_ctx_t **)((uintptr_t)run +
+ bin_info->ctx0_offset + (regind * sizeof(prof_ctx_t
+ *)))) = ctx;
+ }
+ }
}
JEMALLOC_ALWAYS_INLINE void *
diff --git a/dep/jemalloc/include/jemalloc/internal/chunk_dss.h b/dep/jemalloc/include/jemalloc/internal/chunk_dss.h
index 6585f071bbe..4535ce09c09 100644
--- a/dep/jemalloc/include/jemalloc/internal/chunk_dss.h
+++ b/dep/jemalloc/include/jemalloc/internal/chunk_dss.h
@@ -7,7 +7,7 @@ typedef enum {
dss_prec_secondary = 2,
dss_prec_limit = 3
-} dss_prec_t ;
+} dss_prec_t;
#define DSS_PREC_DEFAULT dss_prec_secondary
#define DSS_DEFAULT "secondary"
diff --git a/dep/jemalloc/include/jemalloc/internal/ckh.h b/dep/jemalloc/include/jemalloc/internal/ckh.h
index 50c39ed9581..58712a6a763 100644
--- a/dep/jemalloc/include/jemalloc/internal/ckh.h
+++ b/dep/jemalloc/include/jemalloc/internal/ckh.h
@@ -17,7 +17,7 @@ typedef bool ckh_keycomp_t (const void *, const void *);
* There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
* one bucket per L1 cache line.
*/
-#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
+#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
diff --git a/dep/jemalloc/include/jemalloc/internal/hash.h b/dep/jemalloc/include/jemalloc/internal/hash.h
index 56ecc793b36..09b69df515b 100644
--- a/dep/jemalloc/include/jemalloc/internal/hash.h
+++ b/dep/jemalloc/include/jemalloc/internal/hash.h
@@ -19,6 +19,11 @@
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
+uint32_t hash_x86_32(const void *key, int len, uint32_t seed);
+void hash_x86_128(const void *key, const int len, uint32_t seed,
+ uint64_t r_out[2]);
+void hash_x64_128(const void *key, const int len, const uint32_t seed,
+ uint64_t r_out[2]);
void hash(const void *key, size_t len, const uint32_t seed,
size_t r_hash[2]);
#endif
@@ -43,14 +48,14 @@ JEMALLOC_INLINE uint32_t
hash_get_block_32(const uint32_t *p, int i)
{
- return p[i];
+ return (p[i]);
}
JEMALLOC_INLINE uint64_t
hash_get_block_64(const uint64_t *p, int i)
{
- return p[i];
+ return (p[i]);
}
JEMALLOC_INLINE uint32_t
@@ -63,7 +68,7 @@ hash_fmix_32(uint32_t h)
h *= 0xc2b2ae35;
h ^= h >> 16;
- return h;
+ return (h);
}
JEMALLOC_INLINE uint64_t
@@ -76,7 +81,7 @@ hash_fmix_64(uint64_t k)
k *= QU(0xc4ceb9fe1a85ec53LLU);
k ^= k >> 33;
- return k;
+ return (k);
}
JEMALLOC_INLINE uint32_t
@@ -127,12 +132,12 @@ hash_x86_32(const void *key, int len, uint32_t seed)
h1 = hash_fmix_32(h1);
- return h1;
+ return (h1);
}
UNUSED JEMALLOC_INLINE void
hash_x86_128(const void *key, const int len, uint32_t seed,
- uint64_t r_out[2])
+ uint64_t r_out[2])
{
const uint8_t * data = (const uint8_t *) key;
const int nblocks = len / 16;
@@ -234,7 +239,7 @@ hash_x86_128(const void *key, const int len, uint32_t seed,
UNUSED JEMALLOC_INLINE void
hash_x64_128(const void *key, const int len, const uint32_t seed,
- uint64_t r_out[2])
+ uint64_t r_out[2])
{
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 16;
@@ -310,7 +315,6 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
r_out[1] = h2;
}
-
/******************************************************************************/
/* API. */
JEMALLOC_INLINE void
diff --git a/dep/jemalloc/include/jemalloc/internal/huge.h b/dep/jemalloc/include/jemalloc/internal/huge.h
index d987d370767..ddf13138ad7 100644
--- a/dep/jemalloc/include/jemalloc/internal/huge.h
+++ b/dep/jemalloc/include/jemalloc/internal/huge.h
@@ -19,10 +19,14 @@ extern malloc_mutex_t huge_mtx;
void *huge_malloc(size_t size, bool zero);
void *huge_palloc(size_t size, size_t alignment, bool zero);
-void *huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
+bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra);
void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero, bool try_tcache_dalloc);
+#ifdef JEMALLOC_JET
+typedef void (huge_dalloc_junk_t)(void *, size_t);
+extern huge_dalloc_junk_t *huge_dalloc_junk;
+#endif
void huge_dalloc(void *ptr, bool unmap);
size_t huge_salloc(const void *ptr);
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
diff --git a/dep/jemalloc/include/jemalloc/internal/jemalloc_internal.h b/dep/jemalloc/include/jemalloc/internal/jemalloc_internal.h
index 80045bda4bd..b64cc4bed87 100644
--- a/dep/jemalloc/include/jemalloc/internal/jemalloc_internal.h
+++ b/dep/jemalloc/include/jemalloc/internal/jemalloc_internal.h
@@ -1,5 +1,5 @@
#ifndef JEMALLOC_INTERNAL_H
-#define JEMALLOC_INTERNAL_H
+#define JEMALLOC_INTERNAL_H
#include <math.h>
#ifdef _WIN32
# include <windows.h>
@@ -54,8 +54,7 @@ typedef intptr_t ssize_t;
#endif
#include <fcntl.h>
-#define JEMALLOC_NO_DEMANGLE
-#include "../jemalloc.h"
+#include "jemalloc_defs.h"
#ifdef JEMALLOC_UTRACE
#include <sys/ktrace.h>
@@ -66,6 +65,8 @@ typedef intptr_t ssize_t;
#include <valgrind/memcheck.h>
#endif
+#define JEMALLOC_NO_DEMANGLE
+#include "../jemalloc.h"
#include "jemalloc/internal/private_namespace.h"
#ifdef JEMALLOC_CC_SILENCE
@@ -221,8 +222,13 @@ static const bool config_ivsalloc =
* JEMALLOC_H_INLINES : Inline functions.
*/
/******************************************************************************/
-#define JEMALLOC_H_TYPES
+#define JEMALLOC_H_TYPES
+
+#ifndef JEMALLOC_HAS_RESTRICT
+# define restrict
+#endif
+#define MALLOCX_LG_ALIGN_MASK ((int)0x3f)
#define ALLOCM_LG_ALIGN_MASK ((int)0x3f)
#define ZU(z) ((size_t)z)
@@ -232,20 +238,26 @@ static const bool config_ivsalloc =
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
-#ifdef JEMALLOC_DEBUG
+#if defined(JEMALLOC_DEBUG)
/* Disable inlining to make debugging easier. */
# define JEMALLOC_ALWAYS_INLINE
+# define JEMALLOC_ALWAYS_INLINE_C static
# define JEMALLOC_INLINE
+# define JEMALLOC_INLINE_C static
# define inline
#else
# define JEMALLOC_ENABLE_INLINE
# ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ALWAYS_INLINE \
static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline)
+# define JEMALLOC_ALWAYS_INLINE_C \
+ static inline JEMALLOC_ATTR(always_inline)
# else
# define JEMALLOC_ALWAYS_INLINE static inline
+# define JEMALLOC_ALWAYS_INLINE_C static inline
# endif
# define JEMALLOC_INLINE static inline
+# define JEMALLOC_INLINE_C static inline
# ifdef _MSC_VER
# define inline _inline
# endif
@@ -278,6 +290,9 @@ static const bool config_ivsalloc =
# ifdef __arm__
# define LG_QUANTUM 3
# endif
+# ifdef __aarch64__
+# define LG_QUANTUM 4
+# endif
# ifdef __hppa__
# define LG_QUANTUM 4
# endif
@@ -478,7 +493,7 @@ static const bool config_ivsalloc =
#undef JEMALLOC_H_TYPES
/******************************************************************************/
-#define JEMALLOC_H_STRUCTS
+#define JEMALLOC_H_STRUCTS
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
@@ -507,14 +522,14 @@ typedef struct {
uint64_t deallocated;
} thread_allocated_t;
/*
- * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
+ * The JEMALLOC_ARG_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
* argument.
*/
-#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_CONCAT({0, 0})
+#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_ARG_CONCAT({0, 0})
#undef JEMALLOC_H_STRUCTS
/******************************************************************************/
-#define JEMALLOC_H_EXTERNS
+#define JEMALLOC_H_EXTERNS
extern bool opt_abort;
extern bool opt_junk;
@@ -574,7 +589,7 @@ void jemalloc_postfork_child(void);
#undef JEMALLOC_H_EXTERNS
/******************************************************************************/
-#define JEMALLOC_H_INLINES
+#define JEMALLOC_H_INLINES
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
@@ -749,32 +764,36 @@ choose_arena(arena_t *arena)
#include "jemalloc/internal/quarantine.h"
#ifndef JEMALLOC_ENABLE_INLINE
-void *imallocx(size_t size, bool try_tcache, arena_t *arena);
+void *imalloct(size_t size, bool try_tcache, arena_t *arena);
void *imalloc(size_t size);
-void *icallocx(size_t size, bool try_tcache, arena_t *arena);
+void *icalloct(size_t size, bool try_tcache, arena_t *arena);
void *icalloc(size_t size);
-void *ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
+void *ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena);
void *ipalloc(size_t usize, size_t alignment, bool zero);
size_t isalloc(const void *ptr, bool demote);
size_t ivsalloc(const void *ptr, bool demote);
size_t u2rz(size_t usize);
size_t p2rz(const void *ptr);
-void idallocx(void *ptr, bool try_tcache);
+void idalloct(void *ptr, bool try_tcache);
void idalloc(void *ptr);
-void iqallocx(void *ptr, bool try_tcache);
+void iqalloct(void *ptr, bool try_tcache);
void iqalloc(void *ptr);
-void *irallocx(void *ptr, size_t size, size_t extra, size_t alignment,
- bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc,
+void *iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
+ size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
arena_t *arena);
+void *iralloct(void *ptr, size_t size, size_t extra, size_t alignment,
+ bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena);
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
- bool zero, bool no_move);
+ bool zero);
+bool ixalloc(void *ptr, size_t size, size_t extra, size_t alignment,
+ bool zero);
malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_ALWAYS_INLINE void *
-imallocx(size_t size, bool try_tcache, arena_t *arena)
+imalloct(size_t size, bool try_tcache, arena_t *arena)
{
assert(size != 0);
@@ -789,11 +808,11 @@ JEMALLOC_ALWAYS_INLINE void *
imalloc(size_t size)
{
- return (imallocx(size, true, NULL));
+ return (imalloct(size, true, NULL));
}
JEMALLOC_ALWAYS_INLINE void *
-icallocx(size_t size, bool try_tcache, arena_t *arena)
+icalloct(size_t size, bool try_tcache, arena_t *arena)
{
if (size <= arena_maxclass)
@@ -806,11 +825,11 @@ JEMALLOC_ALWAYS_INLINE void *
icalloc(size_t size)
{
- return (icallocx(size, true, NULL));
+ return (icalloct(size, true, NULL));
}
JEMALLOC_ALWAYS_INLINE void *
-ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
+ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena)
{
void *ret;
@@ -838,7 +857,7 @@ JEMALLOC_ALWAYS_INLINE void *
ipalloc(size_t usize, size_t alignment, bool zero)
{
- return (ipallocx(usize, alignment, zero, true, NULL));
+ return (ipalloct(usize, alignment, zero, true, NULL));
}
/*
@@ -870,7 +889,7 @@ ivsalloc(const void *ptr, bool demote)
{
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
- if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
+ if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0)
return (0);
return (isalloc(ptr, demote));
@@ -899,7 +918,7 @@ p2rz(const void *ptr)
}
JEMALLOC_ALWAYS_INLINE void
-idallocx(void *ptr, bool try_tcache)
+idalloct(void *ptr, bool try_tcache)
{
arena_chunk_t *chunk;
@@ -916,31 +935,63 @@ JEMALLOC_ALWAYS_INLINE void
idalloc(void *ptr)
{
- idallocx(ptr, true);
+ idalloct(ptr, true);
}
JEMALLOC_ALWAYS_INLINE void
-iqallocx(void *ptr, bool try_tcache)
+iqalloct(void *ptr, bool try_tcache)
{
if (config_fill && opt_quarantine)
quarantine(ptr);
else
- idallocx(ptr, try_tcache);
+ idalloct(ptr, try_tcache);
}
JEMALLOC_ALWAYS_INLINE void
iqalloc(void *ptr)
{
- iqallocx(ptr, true);
+ iqalloct(ptr, true);
}
JEMALLOC_ALWAYS_INLINE void *
-irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
- bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
+iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
+ size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
+ arena_t *arena)
+{
+ void *p;
+ size_t usize, copysize;
+
+ usize = sa2u(size + extra, alignment);
+ if (usize == 0)
+ return (NULL);
+ p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
+ if (p == NULL) {
+ if (extra == 0)
+ return (NULL);
+ /* Try again, without extra this time. */
+ usize = sa2u(size, alignment);
+ if (usize == 0)
+ return (NULL);
+ p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
+ if (p == NULL)
+ return (NULL);
+ }
+ /*
+ * Copy at most size bytes (not size+extra), since the caller has no
+ * expectation that the extra bytes will be reliably preserved.
+ */
+ copysize = (size < oldsize) ? size : oldsize;
+ memcpy(p, ptr, copysize);
+ iqalloct(ptr, try_tcache_dalloc);
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
+ bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
{
- void *ret;
size_t oldsize;
assert(ptr != NULL);
@@ -950,68 +1001,50 @@ irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
- size_t usize, copysize;
-
/*
* Existing object alignment is inadequate; allocate new space
* and copy.
*/
- if (no_move)
- return (NULL);
- usize = sa2u(size + extra, alignment);
- if (usize == 0)
- return (NULL);
- ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
- if (ret == NULL) {
- if (extra == 0)
- return (NULL);
- /* Try again, without extra this time. */
- usize = sa2u(size, alignment);
- if (usize == 0)
- return (NULL);
- ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
- arena);
- if (ret == NULL)
- return (NULL);
- }
- /*
- * Copy at most size bytes (not size+extra), since the caller
- * has no expectation that the extra bytes will be reliably
- * preserved.
- */
- copysize = (size < oldsize) ? size : oldsize;
- memcpy(ret, ptr, copysize);
- iqallocx(ptr, try_tcache_dalloc);
- return (ret);
+ return (iralloct_realign(ptr, oldsize, size, extra, alignment,
+ zero, try_tcache_alloc, try_tcache_dalloc, arena));
}
- if (no_move) {
- if (size <= arena_maxclass) {
- return (arena_ralloc_no_move(ptr, oldsize, size,
- extra, zero));
- } else {
- return (huge_ralloc_no_move(ptr, oldsize, size,
- extra));
- }
+ if (size + extra <= arena_maxclass) {
+ return (arena_ralloc(arena, ptr, oldsize, size, extra,
+ alignment, zero, try_tcache_alloc,
+ try_tcache_dalloc));
} else {
- if (size + extra <= arena_maxclass) {
- return (arena_ralloc(arena, ptr, oldsize, size, extra,
- alignment, zero, try_tcache_alloc,
- try_tcache_dalloc));
- } else {
- return (huge_ralloc(ptr, oldsize, size, extra,
- alignment, zero, try_tcache_dalloc));
- }
+ return (huge_ralloc(ptr, oldsize, size, extra,
+ alignment, zero, try_tcache_dalloc));
}
}
JEMALLOC_ALWAYS_INLINE void *
-iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
- bool no_move)
+iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
{
- return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true,
- NULL));
+ return (iralloct(ptr, size, extra, alignment, zero, true, true, NULL));
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
+{
+ size_t oldsize;
+
+ assert(ptr != NULL);
+ assert(size != 0);
+
+ oldsize = isalloc(ptr, config_prof);
+ if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
+ != 0) {
+ /* Existing object alignment is inadequate. */
+ return (true);
+ }
+
+ if (size <= arena_maxclass)
+ return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
+ else
+ return (huge_ralloc_no_move(ptr, oldsize, size, extra));
}
malloc_tsd_externs(thread_allocated, thread_allocated_t)
diff --git a/dep/jemalloc/include/jemalloc/internal/private_namespace.h b/dep/jemalloc/include/jemalloc/internal/private_namespace.h
index 65de3163fd3..a99bf7293ac 100644
--- a/dep/jemalloc/include/jemalloc/internal/private_namespace.h
+++ b/dep/jemalloc/include/jemalloc/internal/private_namespace.h
@@ -8,6 +8,7 @@
#define arena_dalloc JEMALLOC_N(arena_dalloc)
#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
#define arena_dalloc_bin_locked JEMALLOC_N(arena_dalloc_bin_locked)
+#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked)
@@ -33,6 +34,8 @@
#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get)
#define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set)
#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get)
+#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read)
+#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write)
#define arena_mapp_get JEMALLOC_N(arena_mapp_get)
#define arena_maxclass JEMALLOC_N(arena_maxclass)
#define arena_new JEMALLOC_N(arena_new)
@@ -48,8 +51,11 @@
#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get)
#define arena_purge_all JEMALLOC_N(arena_purge_all)
+#define arena_quarantine_junk_small JEMALLOC_N(arena_quarantine_junk_small)
#define arena_ralloc JEMALLOC_N(arena_ralloc)
+#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
+#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
#define arena_run_regind JEMALLOC_N(arena_run_regind)
#define arena_salloc JEMALLOC_N(arena_salloc)
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
@@ -66,6 +72,7 @@
#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper)
#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get)
#define arenas_tsd_get_wrapper JEMALLOC_N(arenas_tsd_get_wrapper)
+#define arenas_tsd_init_head JEMALLOC_N(arenas_tsd_init_head)
#define arenas_tsd_set JEMALLOC_N(arenas_tsd_set)
#define atomic_add_u JEMALLOC_N(atomic_add_u)
#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32)
@@ -189,6 +196,7 @@
#define huge_allocated JEMALLOC_N(huge_allocated)
#define huge_boot JEMALLOC_N(huge_boot)
#define huge_dalloc JEMALLOC_N(huge_dalloc)
+#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
#define huge_malloc JEMALLOC_N(huge_malloc)
#define huge_mtx JEMALLOC_N(huge_mtx)
#define huge_ndalloc JEMALLOC_N(huge_ndalloc)
@@ -204,20 +212,22 @@
#define huge_salloc JEMALLOC_N(huge_salloc)
#define iallocm JEMALLOC_N(iallocm)
#define icalloc JEMALLOC_N(icalloc)
-#define icallocx JEMALLOC_N(icallocx)
+#define icalloct JEMALLOC_N(icalloct)
#define idalloc JEMALLOC_N(idalloc)
-#define idallocx JEMALLOC_N(idallocx)
+#define idalloct JEMALLOC_N(idalloct)
#define imalloc JEMALLOC_N(imalloc)
-#define imallocx JEMALLOC_N(imallocx)
+#define imalloct JEMALLOC_N(imalloct)
#define ipalloc JEMALLOC_N(ipalloc)
-#define ipallocx JEMALLOC_N(ipallocx)
+#define ipalloct JEMALLOC_N(ipalloct)
#define iqalloc JEMALLOC_N(iqalloc)
-#define iqallocx JEMALLOC_N(iqallocx)
+#define iqalloct JEMALLOC_N(iqalloct)
#define iralloc JEMALLOC_N(iralloc)
-#define irallocx JEMALLOC_N(irallocx)
+#define iralloct JEMALLOC_N(iralloct)
+#define iralloct_realign JEMALLOC_N(iralloct_realign)
#define isalloc JEMALLOC_N(isalloc)
#define isthreaded JEMALLOC_N(isthreaded)
#define ivsalloc JEMALLOC_N(ivsalloc)
+#define ixalloc JEMALLOC_N(ixalloc)
#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
@@ -248,6 +258,7 @@
#define ncpus JEMALLOC_N(ncpus)
#define nhbins JEMALLOC_N(nhbins)
#define opt_abort JEMALLOC_N(opt_abort)
+#define opt_dss JEMALLOC_N(opt_dss)
#define opt_junk JEMALLOC_N(opt_junk)
#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk)
#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult)
@@ -277,8 +288,10 @@
#define prof_boot0 JEMALLOC_N(prof_boot0)
#define prof_boot1 JEMALLOC_N(prof_boot1)
#define prof_boot2 JEMALLOC_N(prof_boot2)
+#define prof_bt_count JEMALLOC_N(prof_bt_count)
#define prof_ctx_get JEMALLOC_N(prof_ctx_get)
#define prof_ctx_set JEMALLOC_N(prof_ctx_set)
+#define prof_dump_open JEMALLOC_N(prof_dump_open)
#define prof_free JEMALLOC_N(prof_free)
#define prof_gdump JEMALLOC_N(prof_gdump)
#define prof_idump JEMALLOC_N(prof_idump)
@@ -304,6 +317,7 @@
#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper)
#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get)
#define prof_tdata_tsd_get_wrapper JEMALLOC_N(prof_tdata_tsd_get_wrapper)
+#define prof_tdata_tsd_init_head JEMALLOC_N(prof_tdata_tsd_init_head)
#define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set)
#define quarantine JEMALLOC_N(quarantine)
#define quarantine_alloc_hook JEMALLOC_N(quarantine_alloc_hook)
@@ -317,8 +331,10 @@
#define quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper)
#define quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get)
#define quarantine_tsd_get_wrapper JEMALLOC_N(quarantine_tsd_get_wrapper)
+#define quarantine_tsd_init_head JEMALLOC_N(quarantine_tsd_init_head)
#define quarantine_tsd_set JEMALLOC_N(quarantine_tsd_set)
#define register_zone JEMALLOC_N(register_zone)
+#define rtree_delete JEMALLOC_N(rtree_delete)
#define rtree_get JEMALLOC_N(rtree_get)
#define rtree_get_locked JEMALLOC_N(rtree_get_locked)
#define rtree_new JEMALLOC_N(rtree_new)
@@ -329,6 +345,7 @@
#define s2u JEMALLOC_N(s2u)
#define sa2u JEMALLOC_N(sa2u)
#define set_errno JEMALLOC_N(set_errno)
+#define small_size2bin JEMALLOC_N(small_size2bin)
#define stats_cactive JEMALLOC_N(stats_cactive)
#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
@@ -361,6 +378,7 @@
#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper)
#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
#define tcache_enabled_tsd_get_wrapper JEMALLOC_N(tcache_enabled_tsd_get_wrapper)
+#define tcache_enabled_tsd_init_head JEMALLOC_N(tcache_enabled_tsd_init_head)
#define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set)
#define tcache_event JEMALLOC_N(tcache_event)
#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
@@ -377,6 +395,7 @@
#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper)
#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get)
#define tcache_tsd_get_wrapper JEMALLOC_N(tcache_tsd_get_wrapper)
+#define tcache_tsd_init_head JEMALLOC_N(tcache_tsd_init_head)
#define tcache_tsd_set JEMALLOC_N(tcache_tsd_set)
#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted)
#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized)
@@ -386,5 +405,8 @@
#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper)
#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get)
#define thread_allocated_tsd_get_wrapper JEMALLOC_N(thread_allocated_tsd_get_wrapper)
+#define thread_allocated_tsd_init_head JEMALLOC_N(thread_allocated_tsd_init_head)
#define thread_allocated_tsd_set JEMALLOC_N(thread_allocated_tsd_set)
+#define tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion)
+#define tsd_init_finish JEMALLOC_N(tsd_init_finish)
#define u2rz JEMALLOC_N(u2rz)
diff --git a/dep/jemalloc/include/jemalloc/internal/prng.h b/dep/jemalloc/include/jemalloc/internal/prng.h
index 83a5462b4dd..7b2b06512ff 100644
--- a/dep/jemalloc/include/jemalloc/internal/prng.h
+++ b/dep/jemalloc/include/jemalloc/internal/prng.h
@@ -25,7 +25,7 @@
* uint32_t state : Seed value.
* const uint32_t a, c : See above discussion.
*/
-#define prng32(r, lg_range, state, a, c) do { \
+#define prng32(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 32); \
\
@@ -35,7 +35,7 @@
} while (false)
/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
-#define prng64(r, lg_range, state, a, c) do { \
+#define prng64(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 64); \
\
diff --git a/dep/jemalloc/include/jemalloc/internal/prof.h b/dep/jemalloc/include/jemalloc/internal/prof.h
index 119a5b1bcb7..6f162d21e84 100644
--- a/dep/jemalloc/include/jemalloc/internal/prof.h
+++ b/dep/jemalloc/include/jemalloc/internal/prof.h
@@ -8,7 +8,11 @@ typedef struct prof_ctx_s prof_ctx_t;
typedef struct prof_tdata_s prof_tdata_t;
/* Option defaults. */
-#define PROF_PREFIX_DEFAULT "jeprof"
+#ifdef JEMALLOC_PROF
+# define PROF_PREFIX_DEFAULT "jeprof"
+#else
+# define PROF_PREFIX_DEFAULT ""
+#endif
#define LG_PROF_SAMPLE_DEFAULT 19
#define LG_PROF_INTERVAL_DEFAULT -1
@@ -129,6 +133,7 @@ struct prof_ctx_s {
* limbo due to one of:
* - Initializing per thread counters associated with this ctx.
* - Preparing to destroy this ctx.
+ * - Dumping a heap profile that includes this ctx.
* nlimbo must be 1 (single destroyer) in order to safely destroy the
* ctx.
*/
@@ -145,7 +150,11 @@ struct prof_ctx_s {
* this context.
*/
ql_head(prof_thr_cnt_t) cnts_ql;
+
+ /* Linkage for list of contexts to be dumped. */
+ ql_elm(prof_ctx_t) dump_link;
};
+typedef ql_head(prof_ctx_t) prof_ctx_list_t;
struct prof_tdata_s {
/*
@@ -195,7 +204,12 @@ extern bool opt_prof_gdump; /* High-water memory dumping. */
extern bool opt_prof_final; /* Final profile dumping. */
extern bool opt_prof_leak; /* Dump leak summary at exit. */
extern bool opt_prof_accum; /* Report cumulative bytes. */
-extern char opt_prof_prefix[PATH_MAX + 1];
+extern char opt_prof_prefix[
+ /* Minimize memory bloat for non-prof builds. */
+#ifdef JEMALLOC_PROF
+ PATH_MAX +
+#endif
+ 1];
/*
* Profile dump interval, measured in bytes allocated. Each arena triggers a
@@ -215,6 +229,11 @@ extern bool prof_promote;
void bt_init(prof_bt_t *bt, void **vec);
void prof_backtrace(prof_bt_t *bt, unsigned nignore);
prof_thr_cnt_t *prof_lookup(prof_bt_t *bt);
+#ifdef JEMALLOC_JET
+size_t prof_bt_count(void);
+typedef int (prof_dump_open_t)(bool, const char *);
+extern prof_dump_open_t *prof_dump_open;
+#endif
void prof_idump(void);
bool prof_mdump(const char *filename);
void prof_gdump(void);
@@ -289,11 +308,11 @@ malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *)
prof_tdata_t *prof_tdata_get(bool create);
void prof_sample_threshold_update(prof_tdata_t *prof_tdata);
prof_ctx_t *prof_ctx_get(const void *ptr);
-void prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
+void prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx);
bool prof_sample_accum_update(size_t size);
-void prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt);
-void prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
- size_t old_size, prof_ctx_t *old_ctx);
+void prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt);
+void prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt,
+ size_t old_usize, prof_ctx_t *old_ctx);
void prof_free(const void *ptr, size_t size);
#endif
@@ -320,6 +339,20 @@ prof_tdata_get(bool create)
JEMALLOC_INLINE void
prof_sample_threshold_update(prof_tdata_t *prof_tdata)
{
+ /*
+ * The body of this function is compiled out unless heap profiling is
+ * enabled, so that it is possible to compile jemalloc with floating
+ * point support completely disabled. Avoiding floating point code is
+ * important on memory-constrained systems, but it also enables a
+ * workaround for versions of glibc that don't properly save/restore
+ * floating point registers during dynamic lazy symbol loading (which
+ * internally calls into whatever malloc implementation happens to be
+ * integrated into the application). Note that some compilers (e.g.
+ * gcc 4.8) may use floating point registers for fast memory moves, so
+ * jemalloc must be compiled with such optimizations disabled (e.g.
+ * -mno-sse) in order for the workaround to be complete.
+ */
+#ifdef JEMALLOC_PROF
uint64_t r;
double u;
@@ -341,7 +374,7 @@ prof_sample_threshold_update(prof_tdata_t *prof_tdata)
* Luc Devroye
* Springer-Verlag, New York, 1986
* pp 500
- * (http://cg.scs.carleton.ca/~luc/rnbookindex.html)
+ * (http://luc.devroye.org/rnbookindex.html)
*/
prng64(r, 53, prof_tdata->prng_state,
UINT64_C(6364136223846793005), UINT64_C(1442695040888963407));
@@ -349,6 +382,7 @@ prof_sample_threshold_update(prof_tdata_t *prof_tdata)
prof_tdata->threshold = (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample))))
+ (uint64_t)1U;
+#endif
}
JEMALLOC_INLINE prof_ctx_t *
@@ -371,7 +405,7 @@ prof_ctx_get(const void *ptr)
}
JEMALLOC_INLINE void
-prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
+prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx)
{
arena_chunk_t *chunk;
@@ -381,7 +415,7 @@ prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr) {
/* Region. */
- arena_prof_ctx_set(ptr, ctx);
+ arena_prof_ctx_set(ptr, usize, ctx);
} else
huge_prof_ctx_set(ptr, ctx);
}
@@ -416,20 +450,20 @@ prof_sample_accum_update(size_t size)
}
JEMALLOC_INLINE void
-prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
+prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt)
{
cassert(config_prof);
assert(ptr != NULL);
- assert(size == isalloc(ptr, true));
+ assert(usize == isalloc(ptr, true));
if (opt_lg_prof_sample != 0) {
- if (prof_sample_accum_update(size)) {
+ if (prof_sample_accum_update(usize)) {
/*
* Don't sample. For malloc()-like allocation, it is
* always possible to tell in advance how large an
* object's usable size will be, so there should never
- * be a difference between the size passed to
+ * be a difference between the usize passed to
* PROF_ALLOC_PREP() and prof_malloc().
*/
assert((uintptr_t)cnt == (uintptr_t)1U);
@@ -437,17 +471,17 @@ prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
}
if ((uintptr_t)cnt > (uintptr_t)1U) {
- prof_ctx_set(ptr, cnt->ctx);
+ prof_ctx_set(ptr, usize, cnt->ctx);
cnt->epoch++;
/*********/
mb_write();
/*********/
cnt->cnts.curobjs++;
- cnt->cnts.curbytes += size;
+ cnt->cnts.curbytes += usize;
if (opt_prof_accum) {
cnt->cnts.accumobjs++;
- cnt->cnts.accumbytes += size;
+ cnt->cnts.accumbytes += usize;
}
/*********/
mb_write();
@@ -457,12 +491,12 @@ prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
mb_write();
/*********/
} else
- prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
+ prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U);
}
JEMALLOC_INLINE void
-prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
- size_t old_size, prof_ctx_t *old_ctx)
+prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt,
+ size_t old_usize, prof_ctx_t *old_ctx)
{
prof_thr_cnt_t *told_cnt;
@@ -470,15 +504,15 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U);
if (ptr != NULL) {
- assert(size == isalloc(ptr, true));
+ assert(usize == isalloc(ptr, true));
if (opt_lg_prof_sample != 0) {
- if (prof_sample_accum_update(size)) {
+ if (prof_sample_accum_update(usize)) {
/*
- * Don't sample. The size passed to
+ * Don't sample. The usize passed to
* PROF_ALLOC_PREP() was larger than what
* actually got allocated, so a backtrace was
* captured for this allocation, even though
- * its actual size was insufficient to cross
+ * its actual usize was insufficient to cross
* the sample threshold.
*/
cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
@@ -495,7 +529,7 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
*/
malloc_mutex_lock(old_ctx->lock);
old_ctx->cnt_merged.curobjs--;
- old_ctx->cnt_merged.curbytes -= old_size;
+ old_ctx->cnt_merged.curbytes -= old_usize;
malloc_mutex_unlock(old_ctx->lock);
told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
}
@@ -505,23 +539,23 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
if ((uintptr_t)told_cnt > (uintptr_t)1U)
told_cnt->epoch++;
if ((uintptr_t)cnt > (uintptr_t)1U) {
- prof_ctx_set(ptr, cnt->ctx);
+ prof_ctx_set(ptr, usize, cnt->ctx);
cnt->epoch++;
} else if (ptr != NULL)
- prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
+ prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U);
/*********/
mb_write();
/*********/
if ((uintptr_t)told_cnt > (uintptr_t)1U) {
told_cnt->cnts.curobjs--;
- told_cnt->cnts.curbytes -= old_size;
+ told_cnt->cnts.curbytes -= old_usize;
}
if ((uintptr_t)cnt > (uintptr_t)1U) {
cnt->cnts.curobjs++;
- cnt->cnts.curbytes += size;
+ cnt->cnts.curbytes += usize;
if (opt_prof_accum) {
cnt->cnts.accumobjs++;
- cnt->cnts.accumbytes += size;
+ cnt->cnts.accumbytes += usize;
}
}
/*********/
diff --git a/dep/jemalloc/include/jemalloc/internal/ql.h b/dep/jemalloc/include/jemalloc/internal/ql.h
index a9ed2393f0c..f70c5f6f391 100644
--- a/dep/jemalloc/include/jemalloc/internal/ql.h
+++ b/dep/jemalloc/include/jemalloc/internal/ql.h
@@ -1,61 +1,61 @@
/*
* List definitions.
*/
-#define ql_head(a_type) \
+#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
}
-#define ql_head_initializer(a_head) {NULL}
+#define ql_head_initializer(a_head) {NULL}
-#define ql_elm(a_type) qr(a_type)
+#define ql_elm(a_type) qr(a_type)
/* List functions. */
-#define ql_new(a_head) do { \
+#define ql_new(a_head) do { \
(a_head)->qlh_first = NULL; \
} while (0)
-#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
+#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
-#define ql_first(a_head) ((a_head)->qlh_first)
+#define ql_first(a_head) ((a_head)->qlh_first)
-#define ql_last(a_head, a_field) \
+#define ql_last(a_head, a_field) \
((ql_first(a_head) != NULL) \
? qr_prev(ql_first(a_head), a_field) : NULL)
-#define ql_next(a_head, a_elm, a_field) \
+#define ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
-#define ql_prev(a_head, a_elm, a_field) \
+#define ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
-#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
+#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
ql_first(a_head) = (a_elm); \
} \
} while (0)
-#define ql_after_insert(a_qlelm, a_elm, a_field) \
+#define ql_after_insert(a_qlelm, a_elm, a_field) \
qr_after_insert((a_qlelm), (a_elm), a_field)
-#define ql_head_insert(a_head, a_elm, a_field) do { \
+#define ql_head_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
} while (0)
-#define ql_tail_insert(a_head, a_elm, a_field) do { \
+#define ql_tail_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
-#define ql_remove(a_head, a_elm, a_field) do { \
+#define ql_remove(a_head, a_elm, a_field) do { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} \
@@ -66,18 +66,18 @@ struct { \
} \
} while (0)
-#define ql_head_remove(a_head, a_type, a_field) do { \
+#define ql_head_remove(a_head, a_type, a_field) do { \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
} while (0)
-#define ql_tail_remove(a_head, a_type, a_field) do { \
+#define ql_tail_remove(a_head, a_type, a_field) do { \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
-#define ql_foreach(a_var, a_head, a_field) \
+#define ql_foreach(a_var, a_head, a_field) \
qr_foreach((a_var), ql_first(a_head), a_field)
-#define ql_reverse_foreach(a_var, a_head, a_field) \
+#define ql_reverse_foreach(a_var, a_head, a_field) \
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
diff --git a/dep/jemalloc/include/jemalloc/internal/qr.h b/dep/jemalloc/include/jemalloc/internal/qr.h
index fe22352fedd..602944b9b4f 100644
--- a/dep/jemalloc/include/jemalloc/internal/qr.h
+++ b/dep/jemalloc/include/jemalloc/internal/qr.h
@@ -1,28 +1,28 @@
/* Ring definitions. */
-#define qr(a_type) \
+#define qr(a_type) \
struct { \
a_type *qre_next; \
a_type *qre_prev; \
}
/* Ring functions. */
-#define qr_new(a_qr, a_field) do { \
+#define qr_new(a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
-#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
+#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
-#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
+#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
-#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
+#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qrelm); \
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
(a_qrelm)->a_field.qre_prev = (a_qr); \
} while (0)
-#define qr_after_insert(a_qrelm, a_qr, a_field) \
+#define qr_after_insert(a_qrelm, a_qr, a_field) \
do \
{ \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
@@ -31,7 +31,7 @@ struct { \
(a_qrelm)->a_field.qre_next = (a_qr); \
} while (0)
-#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
+#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
void *t; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
@@ -42,10 +42,10 @@ struct { \
/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code. */
-#define qr_split(a_qr_a, a_qr_b, a_field) \
+#define qr_split(a_qr_a, a_qr_b, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_field)
-#define qr_remove(a_qr, a_field) do { \
+#define qr_remove(a_qr, a_field) do { \
(a_qr)->a_field.qre_prev->a_field.qre_next \
= (a_qr)->a_field.qre_next; \
(a_qr)->a_field.qre_next->a_field.qre_prev \
@@ -54,13 +54,13 @@ struct { \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
-#define qr_foreach(var, a_qr, a_field) \
+#define qr_foreach(var, a_qr, a_field) \
for ((var) = (a_qr); \
(var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next : NULL))
-#define qr_reverse_foreach(var, a_qr, a_field) \
+#define qr_reverse_foreach(var, a_qr, a_field) \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
(var) = (((var) != (a_qr)) \
diff --git a/dep/jemalloc/include/jemalloc/internal/rb.h b/dep/jemalloc/include/jemalloc/internal/rb.h
index 7b675f09051..423802eb2dc 100644
--- a/dep/jemalloc/include/jemalloc/internal/rb.h
+++ b/dep/jemalloc/include/jemalloc/internal/rb.h
@@ -22,10 +22,6 @@
#ifndef RB_H_
#define RB_H_
-#if 0
-__FBSDID("$FreeBSD: head/lib/libc/stdlib/rb.h 204493 2010-02-28 22:57:13Z jasone $");
-#endif
-
#ifdef RB_COMPACT
/* Node structure. */
#define rb_node(a_type) \
diff --git a/dep/jemalloc/include/jemalloc/internal/rtree.h b/dep/jemalloc/include/jemalloc/internal/rtree.h
index 9bd98548cfe..bc74769f50e 100644
--- a/dep/jemalloc/include/jemalloc/internal/rtree.h
+++ b/dep/jemalloc/include/jemalloc/internal/rtree.h
@@ -14,17 +14,18 @@ typedef struct rtree_s rtree_t;
* Size of each radix tree node (must be a power of 2). This impacts tree
* depth.
*/
-#if (LG_SIZEOF_PTR == 2)
-# define RTREE_NODESIZE (1U << 14)
-#else
-# define RTREE_NODESIZE CACHELINE
-#endif
+#define RTREE_NODESIZE (1U << 16)
+
+typedef void *(rtree_alloc_t)(size_t);
+typedef void (rtree_dalloc_t)(void *);
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct rtree_s {
+ rtree_alloc_t *alloc;
+ rtree_dalloc_t *dalloc;
malloc_mutex_t mutex;
void **root;
unsigned height;
@@ -35,7 +36,8 @@ struct rtree_s {
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-rtree_t *rtree_new(unsigned bits);
+rtree_t *rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc);
+void rtree_delete(rtree_t *rtree);
void rtree_prefork(rtree_t *rtree);
void rtree_postfork_parent(rtree_t *rtree);
void rtree_postfork_child(rtree_t *rtree);
@@ -45,20 +47,20 @@ void rtree_postfork_child(rtree_t *rtree);
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
-#ifndef JEMALLOC_DEBUG
-void *rtree_get_locked(rtree_t *rtree, uintptr_t key);
+#ifdef JEMALLOC_DEBUG
+uint8_t rtree_get_locked(rtree_t *rtree, uintptr_t key);
#endif
-void *rtree_get(rtree_t *rtree, uintptr_t key);
-bool rtree_set(rtree_t *rtree, uintptr_t key, void *val);
+uint8_t rtree_get(rtree_t *rtree, uintptr_t key);
+bool rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
#define RTREE_GET_GENERATE(f) \
/* The least significant bits of the key are ignored. */ \
-JEMALLOC_INLINE void * \
+JEMALLOC_INLINE uint8_t \
f(rtree_t *rtree, uintptr_t key) \
{ \
- void *ret; \
+ uint8_t ret; \
uintptr_t subkey; \
unsigned i, lshift, height, bits; \
void **node, **child; \
@@ -68,12 +70,12 @@ f(rtree_t *rtree, uintptr_t key) \
i < height - 1; \
i++, lshift += bits, node = child) { \
bits = rtree->level2bits[i]; \
- subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
+ subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
3)) - bits); \
child = (void**)node[subkey]; \
if (child == NULL) { \
RTREE_UNLOCK(&rtree->mutex); \
- return (NULL); \
+ return (0); \
} \
} \
\
@@ -84,7 +86,10 @@ f(rtree_t *rtree, uintptr_t key) \
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \
bits); \
- ret = node[subkey]; \
+ { \
+ uint8_t *leaf = (uint8_t *)node; \
+ ret = leaf[subkey]; \
+ } \
RTREE_UNLOCK(&rtree->mutex); \
\
RTREE_GET_VALIDATE \
@@ -123,7 +128,7 @@ RTREE_GET_GENERATE(rtree_get)
#undef RTREE_GET_VALIDATE
JEMALLOC_INLINE bool
-rtree_set(rtree_t *rtree, uintptr_t key, void *val)
+rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val)
{
uintptr_t subkey;
unsigned i, lshift, height, bits;
@@ -138,14 +143,14 @@ rtree_set(rtree_t *rtree, uintptr_t key, void *val)
bits);
child = (void**)node[subkey];
if (child == NULL) {
- child = (void**)base_alloc(sizeof(void *) <<
- rtree->level2bits[i+1]);
+ size_t size = ((i + 1 < height - 1) ? sizeof(void *)
+ : (sizeof(uint8_t))) << rtree->level2bits[i+1];
+ child = (void**)rtree->alloc(size);
if (child == NULL) {
malloc_mutex_unlock(&rtree->mutex);
return (true);
}
- memset(child, 0, sizeof(void *) <<
- rtree->level2bits[i+1]);
+ memset(child, 0, size);
node[subkey] = child;
}
}
@@ -153,7 +158,10 @@ rtree_set(rtree_t *rtree, uintptr_t key, void *val)
/* node is a leaf, so it contains values rather than node pointers. */
bits = rtree->level2bits[i];
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
- node[subkey] = val;
+ {
+ uint8_t *leaf = (uint8_t *)node;
+ leaf[subkey] = val;
+ }
malloc_mutex_unlock(&rtree->mutex);
return (false);
diff --git a/dep/jemalloc/include/jemalloc/internal/tcache.h b/dep/jemalloc/include/jemalloc/internal/tcache.h
index ba36204ff21..c3d4b58d4dc 100644
--- a/dep/jemalloc/include/jemalloc/internal/tcache.h
+++ b/dep/jemalloc/include/jemalloc/internal/tcache.h
@@ -297,6 +297,7 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
binind = SMALL_SIZE2BIN(size);
assert(binind < NBINS);
tbin = &tcache->tbins[binind];
+ size = arena_bin_info[binind].reg_size;
ret = tcache_alloc_easy(tbin);
if (ret == NULL) {
ret = tcache_alloc_small_hard(tcache, tbin, binind);
@@ -313,6 +314,7 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
} else if (opt_zero)
memset(ret, 0, size);
}
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
if (config_fill && opt_junk) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
@@ -321,7 +323,6 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
if (config_stats)
tbin->tstats.nrequests++;
@@ -368,11 +369,11 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
else if (opt_zero)
memset(ret, 0, size);
}
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
if (config_stats)
tbin->tstats.nrequests++;
diff --git a/dep/jemalloc/include/jemalloc/internal/tsd.h b/dep/jemalloc/include/jemalloc/internal/tsd.h
index 0037cf35e70..9fb4a23ec6b 100644
--- a/dep/jemalloc/include/jemalloc/internal/tsd.h
+++ b/dep/jemalloc/include/jemalloc/internal/tsd.h
@@ -6,6 +6,12 @@
typedef bool (*malloc_tsd_cleanup_t)(void);
+#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
+ !defined(_WIN32))
+typedef struct tsd_init_block_s tsd_init_block_t;
+typedef struct tsd_init_head_s tsd_init_head_t;
+#endif
+
/*
* TLS/TSD-agnostic macro-based implementation of thread-specific data. There
* are four macros that support (at least) three use cases: file-private,
@@ -75,12 +81,13 @@ extern __thread a_type a_name##_tls; \
extern pthread_key_t a_name##_tsd; \
extern bool a_name##_booted;
#elif (defined(_WIN32))
-#define malloc_tsd_externs(a_name, a_type) \
+#define malloc_tsd_externs(a_name, a_type) \
extern DWORD a_name##_tsd; \
extern bool a_name##_booted;
#else
#define malloc_tsd_externs(a_name, a_type) \
extern pthread_key_t a_name##_tsd; \
+extern tsd_init_head_t a_name##_tsd_init_head; \
extern bool a_name##_booted;
#endif
@@ -105,6 +112,10 @@ a_attr bool a_name##_booted = false;
#else
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr pthread_key_t a_name##_tsd; \
+a_attr tsd_init_head_t a_name##_tsd_init_head = { \
+ ql_head_initializer(blocks), \
+ MALLOC_MUTEX_INITIALIZER \
+}; \
a_attr bool a_name##_booted = false;
#endif
@@ -333,8 +344,14 @@ a_name##_tsd_get_wrapper(void) \
pthread_getspecific(a_name##_tsd); \
\
if (wrapper == NULL) { \
+ tsd_init_block_t block; \
+ wrapper = tsd_init_check_recursion( \
+ &a_name##_tsd_init_head, &block); \
+ if (wrapper) \
+ return (wrapper); \
wrapper = (a_name##_tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
+ block.data = wrapper; \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
@@ -350,6 +367,7 @@ a_name##_tsd_get_wrapper(void) \
" TSD for "#a_name"\n"); \
abort(); \
} \
+ tsd_init_finish(&a_name##_tsd_init_head, &block); \
} \
return (wrapper); \
} \
@@ -379,6 +397,19 @@ a_name##_tsd_set(a_type *val) \
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
+#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
+ !defined(_WIN32))
+struct tsd_init_block_s {
+ ql_elm(tsd_init_block_t) link;
+ pthread_t thread;
+ void *data;
+};
+struct tsd_init_head_s {
+ ql_head(tsd_init_block_t) blocks;
+ malloc_mutex_t lock;
+};
+#endif
+
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
@@ -388,6 +419,12 @@ void malloc_tsd_dalloc(void *wrapper);
void malloc_tsd_no_cleanup(void *);
void malloc_tsd_cleanup_register(bool (*f)(void));
void malloc_tsd_boot(void);
+#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
+ !defined(_WIN32))
+void *tsd_init_check_recursion(tsd_init_head_t *head,
+ tsd_init_block_t *block);
+void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
+#endif
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/dep/jemalloc/include/jemalloc/internal/util.h b/dep/jemalloc/include/jemalloc/internal/util.h
index 8479693631a..6b938f74688 100644
--- a/dep/jemalloc/include/jemalloc/internal/util.h
+++ b/dep/jemalloc/include/jemalloc/internal/util.h
@@ -14,7 +14,7 @@
* Wrap a cpp argument that contains commas such that it isn't broken up into
* multiple arguments.
*/
-#define JEMALLOC_CONCAT(...) __VA_ARGS__
+#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
/*
* Silence compiler warnings due to uninitialized values. This is used
@@ -42,12 +42,6 @@
} while (0)
#endif
-/* Use to assert a particular configuration, e.g., cassert(config_debug). */
-#define cassert(c) do { \
- if ((c) == false) \
- assert(false); \
-} while (0)
-
#ifndef not_reached
#define not_reached() do { \
if (config_debug) { \
@@ -69,10 +63,18 @@
} while (0)
#endif
+#ifndef assert_not_implemented
#define assert_not_implemented(e) do { \
if (config_debug && !(e)) \
not_implemented(); \
} while (0)
+#endif
+
+/* Use to assert a particular configuration, e.g., cassert(config_debug). */
+#define cassert(c) do { \
+ if ((c) == false) \
+ not_reached(); \
+} while (0)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
@@ -82,8 +84,9 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-int buferror(char *buf, size_t buflen);
-uintmax_t malloc_strtoumax(const char *nptr, char **endptr, int base);
+int buferror(int err, char *buf, size_t buflen);
+uintmax_t malloc_strtoumax(const char *restrict nptr,
+ char **restrict endptr, int base);
void malloc_write(const char *s);
/*
@@ -107,7 +110,6 @@ void malloc_printf(const char *format, ...)
#ifndef JEMALLOC_ENABLE_INLINE
size_t pow2_ceil(size_t x);
-void malloc_write(const char *s);
void set_errno(int errnum);
int get_errno(void);
#endif
diff --git a/dep/jemalloc/include/jemalloc/jemalloc.h b/dep/jemalloc/include/jemalloc/jemalloc.h
index 946c73b75e5..84e2e7294d1 100644
--- a/dep/jemalloc/include/jemalloc/jemalloc.h
+++ b/dep/jemalloc/include/jemalloc/jemalloc.h
@@ -7,36 +7,45 @@ extern "C" {
#include <limits.h>
#include <strings.h>
-#define JEMALLOC_VERSION "3.3.1-0-g9ef9d9e8c271cdf14f664b871a8f98c827714784"
+#define JEMALLOC_VERSION "3.5.1-0-g7709a64c59daf0b1f938be49472fcc499e1bd136"
#define JEMALLOC_VERSION_MAJOR 3
-#define JEMALLOC_VERSION_MINOR 3
+#define JEMALLOC_VERSION_MINOR 5
#define JEMALLOC_VERSION_BUGFIX 1
#define JEMALLOC_VERSION_NREV 0
-#define JEMALLOC_VERSION_GID "9ef9d9e8c271cdf14f664b871a8f98c827714784"
+#define JEMALLOC_VERSION_GID "7709a64c59daf0b1f938be49472fcc499e1bd136"
-#include "jemalloc_defs.h"
+# define MALLOCX_LG_ALIGN(la) (la)
+# if LG_SIZEOF_PTR == 2
+# define MALLOCX_ALIGN(a) (ffs(a)-1)
+# else
+# define MALLOCX_ALIGN(a) \
+ ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
+# endif
+# define MALLOCX_ZERO ((int)0x40)
+/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */
+# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8))
#ifdef JEMALLOC_EXPERIMENTAL
-#define ALLOCM_LG_ALIGN(la) (la)
-#if LG_SIZEOF_PTR == 2
-#define ALLOCM_ALIGN(a) (ffs(a)-1)
-#else
-#define ALLOCM_ALIGN(a) ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
-#endif
-#define ALLOCM_ZERO ((int)0x40)
-#define ALLOCM_NO_MOVE ((int)0x80)
+# define ALLOCM_LG_ALIGN(la) (la)
+# if LG_SIZEOF_PTR == 2
+# define ALLOCM_ALIGN(a) (ffs(a)-1)
+# else
+# define ALLOCM_ALIGN(a) \
+ ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
+# endif
+# define ALLOCM_ZERO ((int)0x40)
+# define ALLOCM_NO_MOVE ((int)0x80)
/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */
-#define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
-
-#define ALLOCM_SUCCESS 0
-#define ALLOCM_ERR_OOM 1
-#define ALLOCM_ERR_NOT_MOVED 2
+# define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
+# define ALLOCM_SUCCESS 0
+# define ALLOCM_ERR_OOM 1
+# define ALLOCM_ERR_NOT_MOVED 2
#endif
/*
- * The je_ prefix on the following public symbol declarations is an artifact of
- * namespace management, and should be omitted in application code unless
- * JEMALLOC_NO_DEMANGLE is defined (see below).
+ * The je_ prefix on the following public symbol declarations is an artifact
+ * of namespace management, and should be omitted in application code unless
+ * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h).
*/
extern JEMALLOC_EXPORT const char *je_malloc_conf;
extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
@@ -52,6 +61,25 @@ JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size)
JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size);
JEMALLOC_EXPORT void je_free(void *ptr);
+JEMALLOC_EXPORT void *je_mallocx(size_t size, int flags);
+JEMALLOC_EXPORT void *je_rallocx(void *ptr, size_t size, int flags);
+JEMALLOC_EXPORT size_t je_xallocx(void *ptr, size_t size, size_t extra,
+ int flags);
+JEMALLOC_EXPORT size_t je_sallocx(const void *ptr, int flags);
+JEMALLOC_EXPORT void je_dallocx(void *ptr, int flags);
+JEMALLOC_EXPORT size_t je_nallocx(size_t size, int flags);
+
+JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen);
+JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp,
+ size_t *miblenp);
+JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
+ const char *), void *je_cbopaque, const char *opts);
+JEMALLOC_EXPORT size_t je_malloc_usable_size(
+ JEMALLOC_USABLE_SIZE_CONST void *ptr);
+
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
@@ -61,17 +89,6 @@ JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
-JEMALLOC_EXPORT size_t je_malloc_usable_size(
- JEMALLOC_USABLE_SIZE_CONST void *ptr);
-JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
- const char *), void *je_cbopaque, const char *opts);
-JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen);
-JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp,
- size_t *miblenp);
-JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-
#ifdef JEMALLOC_EXPERIMENTAL
JEMALLOC_EXPORT int je_allocm(void **ptr, size_t *rsize, size_t size,
int flags) JEMALLOC_ATTR(nonnull(1));
@@ -92,63 +109,71 @@ JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags);
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
-#ifndef JEMALLOC_NO_DEMANGLE
-#define JEMALLOC_NO_DEMANGLE
-#endif
-#define malloc_conf je_malloc_conf
-#define malloc_message je_malloc_message
-#define malloc je_malloc
-#define calloc je_calloc
-#define posix_memalign je_posix_memalign
-#define aligned_alloc je_aligned_alloc
-#define realloc je_realloc
-#define free je_free
-#define malloc_usable_size je_malloc_usable_size
-#define malloc_stats_print je_malloc_stats_print
-#define mallctl je_mallctl
-#define mallctlnametomib je_mallctlnametomib
-#define mallctlbymib je_mallctlbymib
-#define memalign je_memalign
-#define valloc je_valloc
-#ifdef JEMALLOC_EXPERIMENTAL
-#define allocm je_allocm
-#define rallocm je_rallocm
-#define sallocm je_sallocm
-#define dallocm je_dallocm
-#define nallocm je_nallocm
-#endif
+# ifndef JEMALLOC_NO_DEMANGLE
+# define JEMALLOC_NO_DEMANGLE
+# endif
+# define malloc_conf je_malloc_conf
+# define malloc_message je_malloc_message
+# define malloc je_malloc
+# define calloc je_calloc
+# define posix_memalign je_posix_memalign
+# define aligned_alloc je_aligned_alloc
+# define realloc je_realloc
+# define free je_free
+# define mallocx je_mallocx
+# define rallocx je_rallocx
+# define xallocx je_xallocx
+# define sallocx je_sallocx
+# define dallocx je_dallocx
+# define nallocx je_nallocx
+# define mallctl je_mallctl
+# define mallctlnametomib je_mallctlnametomib
+# define mallctlbymib je_mallctlbymib
+# define malloc_stats_print je_malloc_stats_print
+# define malloc_usable_size je_malloc_usable_size
+# define memalign je_memalign
+# define valloc je_valloc
+# define allocm je_allocm
+# define dallocm je_dallocm
+# define nallocm je_nallocm
+# define rallocm je_rallocm
+# define sallocm je_sallocm
#endif
/*
- * The je_* macros can be used as stable alternative names for the public
- * jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily meant
- * for use in jemalloc itself, but it can be used by application code to
+ * The je_* macros can be used as stable alternative names for the
+ * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
+ * meant for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
-#undef je_malloc_conf
-#undef je_malloc_message
-#undef je_malloc
-#undef je_calloc
-#undef je_posix_memalign
-#undef je_aligned_alloc
-#undef je_realloc
-#undef je_free
-#undef je_malloc_usable_size
-#undef je_malloc_stats_print
-#undef je_mallctl
-#undef je_mallctlnametomib
-#undef je_mallctlbymib
-#undef je_memalign
-#undef je_valloc
-#ifdef JEMALLOC_EXPERIMENTAL
-#undef je_allocm
-#undef je_rallocm
-#undef je_sallocm
-#undef je_dallocm
-#undef je_nallocm
-#endif
+# undef je_malloc_conf
+# undef je_malloc_message
+# undef je_malloc
+# undef je_calloc
+# undef je_posix_memalign
+# undef je_aligned_alloc
+# undef je_realloc
+# undef je_free
+# undef je_mallocx
+# undef je_rallocx
+# undef je_xallocx
+# undef je_sallocx
+# undef je_dallocx
+# undef je_nallocx
+# undef je_mallctl
+# undef je_mallctlnametomib
+# undef je_mallctlbymib
+# undef je_malloc_stats_print
+# undef je_malloc_usable_size
+# undef je_memalign
+# undef je_valloc
+# undef je_allocm
+# undef je_dallocm
+# undef je_nallocm
+# undef je_rallocm
+# undef je_sallocm
#endif
#ifdef __cplusplus