aboutsummaryrefslogtreecommitdiff
path: root/dep/jemalloc
diff options
context:
space:
mode:
authorjackpoz <giacomopoz@gmail.com>2014-03-12 20:53:34 +0100
committerjackpoz <giacomopoz@gmail.com>2014-03-28 22:34:39 +0100
commita3e56b06897b1768f52cc3ecbf7a9b836228fe19 (patch)
treedcc681418db3efccba5513c1ff0b82d1b2b6cebc /dep/jemalloc
parentf7659e5c1ac8e3fbc1ec17c69775877cb3482ac4 (diff)
Core/Dependencies: Upgrade to jemalloc-3.5.1
Diffstat (limited to 'dep/jemalloc')
-rw-r--r--dep/jemalloc/COPYING4
-rw-r--r--dep/jemalloc/ChangeLog107
-rw-r--r--dep/jemalloc/README18
-rw-r--r--dep/jemalloc/VERSION2
-rw-r--r--dep/jemalloc/include/jemalloc/internal/arena.h127
-rw-r--r--dep/jemalloc/include/jemalloc/internal/chunk_dss.h2
-rw-r--r--dep/jemalloc/include/jemalloc/internal/ckh.h2
-rw-r--r--dep/jemalloc/include/jemalloc/internal/hash.h20
-rw-r--r--dep/jemalloc/include/jemalloc/internal/huge.h6
-rw-r--r--dep/jemalloc/include/jemalloc/internal/jemalloc_internal.h197
-rw-r--r--dep/jemalloc/include/jemalloc/internal/private_namespace.h34
-rw-r--r--dep/jemalloc/include/jemalloc/internal/prng.h4
-rw-r--r--dep/jemalloc/include/jemalloc/internal/prof.h92
-rw-r--r--dep/jemalloc/include/jemalloc/internal/ql.h36
-rw-r--r--dep/jemalloc/include/jemalloc/internal/qr.h22
-rw-r--r--dep/jemalloc/include/jemalloc/internal/rb.h4
-rw-r--r--dep/jemalloc/include/jemalloc/internal/rtree.h50
-rw-r--r--dep/jemalloc/include/jemalloc/internal/tcache.h5
-rw-r--r--dep/jemalloc/include/jemalloc/internal/tsd.h39
-rw-r--r--dep/jemalloc/include/jemalloc/internal/util.h22
-rw-r--r--dep/jemalloc/include/jemalloc/jemalloc.h187
-rw-r--r--dep/jemalloc/jemalloc_defs.h.in.cmake6
-rw-r--r--dep/jemalloc/src/arena.c971
-rw-r--r--dep/jemalloc/src/bitmap.c2
-rw-r--r--dep/jemalloc/src/chunk.c30
-rw-r--r--dep/jemalloc/src/chunk_dss.c15
-rw-r--r--dep/jemalloc/src/chunk_mmap.c4
-rw-r--r--dep/jemalloc/src/ckh.c12
-rw-r--r--dep/jemalloc/src/ctl.c329
-rw-r--r--dep/jemalloc/src/huge.c56
-rw-r--r--dep/jemalloc/src/jemalloc.c1191
-rw-r--r--dep/jemalloc/src/mutex.c2
-rw-r--r--dep/jemalloc/src/prof.c640
-rw-r--r--dep/jemalloc/src/quarantine.c13
-rw-r--r--dep/jemalloc/src/rtree.c78
-rw-r--r--dep/jemalloc/src/stats.c8
-rw-r--r--dep/jemalloc/src/tcache.c9
-rw-r--r--dep/jemalloc/src/tsd.c36
-rw-r--r--dep/jemalloc/src/util.c85
-rw-r--r--dep/jemalloc/src/zone.c2
40 files changed, 2756 insertions, 1713 deletions
diff --git a/dep/jemalloc/COPYING b/dep/jemalloc/COPYING
index 019e8132275..bdda0feb9e5 100644
--- a/dep/jemalloc/COPYING
+++ b/dep/jemalloc/COPYING
@@ -1,10 +1,10 @@
Unless otherwise specified, files in the jemalloc source distribution are
subject to the following license:
--------------------------------------------------------------------------------
-Copyright (C) 2002-2013 Jason Evans <jasone@canonware.com>.
+Copyright (C) 2002-2014 Jason Evans <jasone@canonware.com>.
All rights reserved.
Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved.
-Copyright (C) 2009-2013 Facebook, Inc. All rights reserved.
+Copyright (C) 2009-2014 Facebook, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
diff --git a/dep/jemalloc/ChangeLog b/dep/jemalloc/ChangeLog
index fc096d8f42f..c0ca338b076 100644
--- a/dep/jemalloc/ChangeLog
+++ b/dep/jemalloc/ChangeLog
@@ -3,8 +3,107 @@ bug fixes are all mentioned, but internal enhancements are omitted here for
brevity (even though they are more fun to write about). Much more detail can be
found in the git revision history:
- http://www.canonware.com/cgi-bin/gitweb.cgi?p=jemalloc.git
- git://canonware.com/jemalloc.git
+ https://github.com/jemalloc/jemalloc
+
+* 3.5.1 (February 25, 2014)
+
+ This version primarily addresses minor bugs in test code.
+
+ Bug fixes:
+ - Configure Solaris/Illumos to use MADV_FREE.
+ - Fix junk filling for mremap(2)-based huge reallocation. This is only
+ relevant if configuring with the --enable-mremap option specified.
+ - Avoid compilation failure if 'restrict' C99 keyword is not supported by the
+ compiler.
+ - Add a configure test for SSE2 rather than assuming it is usable on i686
+ systems. This fixes test compilation errors, especially on 32-bit Linux
+ systems.
+ - Fix mallctl argument size mismatches (size_t vs. uint64_t) in the stats unit
+ test.
+ - Fix/remove flawed alignment-related overflow tests.
+ - Prevent compiler optimizations that could change backtraces in the
+ prof_accum unit test.
+
+* 3.5.0 (January 22, 2014)
+
+ This version focuses on refactoring and automated testing, though it also
+ includes some non-trivial heap profiling optimizations not mentioned below.
+
+ New features:
+ - Add the *allocx() API, which is a successor to the experimental *allocm()
+ API. The *allocx() functions are slightly simpler to use because they have
+ fewer parameters, they directly return the results of primary interest, and
+ mallocx()/rallocx() avoid the strict aliasing pitfall that
+ allocm()/rallocm() share with posix_memalign(). Note that *allocm() is
+ slated for removal in the next non-bugfix release.
+ - Add support for LinuxThreads.
+
+ Bug fixes:
+ - Unless heap profiling is enabled, disable floating point code and don't link
+ with libm. This, in combination with e.g. EXTRA_CFLAGS=-mno-sse on x64
+ systems, makes it possible to completely disable floating point register
+ use. Some versions of glibc neglect to save/restore caller-saved floating
+ point registers during dynamic lazy symbol loading, and the symbol loading
+ code uses whatever malloc the application happens to have linked/loaded
+ with, the result being potential floating point register corruption.
+ - Report ENOMEM rather than EINVAL if an OOM occurs during heap profiling
+ backtrace creation in imemalign(). This bug impacted posix_memalign() and
+ aligned_alloc().
+ - Fix a file descriptor leak in a prof_dump_maps() error path.
+ - Fix prof_dump() to close the dump file descriptor for all relevant error
+ paths.
+ - Fix rallocm() to use the arena specified by the ALLOCM_ARENA(s) flag for
+ allocation, not just deallocation.
+ - Fix a data race for large allocation stats counters.
+ - Fix a potential infinite loop during thread exit. This bug occurred on
+ Solaris, and could affect other platforms with similar pthreads TSD
+ implementations.
+ - Don't junk-fill reallocations unless usable size changes. This fixes a
+ violation of the *allocx()/*allocm() semantics.
+ - Fix growing large reallocation to junk fill new space.
+ - Fix huge deallocation to junk fill when munmap is disabled.
+ - Change the default private namespace prefix from empty to je_, and change
+ --with-private-namespace-prefix so that it prepends an additional prefix
+ rather than replacing je_. This reduces the likelihood of applications
+ which statically link jemalloc experiencing symbol name collisions.
+ - Add missing private namespace mangling (relevant when
+ --with-private-namespace is specified).
+ - Add and use JEMALLOC_INLINE_C so that static inline functions are marked as
+ static even for debug builds.
+ - Add a missing mutex unlock in a malloc_init_hard() error path. In practice
+ this error path is never executed.
+ - Fix numerous bugs in malloc_strotumax() error handling/reporting. These
+ bugs had no impact except for malformed inputs.
+ - Fix numerous bugs in malloc_snprintf(). These bugs were not exercised by
+ existing calls, so they had no impact.
+
+* 3.4.1 (October 20, 2013)
+
+ Bug fixes:
+ - Fix a race in the "arenas.extend" mallctl that could cause memory corruption
+ of internal data structures and subsequent crashes.
+ - Fix Valgrind integration flaws that caused Valgrind warnings about reads of
+ uninitialized memory in:
+ + arena chunk headers
+ + internal zero-initialized data structures (relevant to tcache and prof
+ code)
+ - Preserve errno during the first allocation. A readlink(2) call during
+ initialization fails unless /etc/malloc.conf exists, so errno was typically
+ set during the first allocation prior to this fix.
+ - Fix compilation warnings reported by gcc 4.8.1.
+
+* 3.4.0 (June 2, 2013)
+
+ This version is essentially a small bugfix release, but the addition of
+ aarch64 support requires that the minor version be incremented.
+
+ Bug fixes:
+ - Fix race-triggered deadlocks in chunk_record(). These deadlocks were
+ typically triggered by multiple threads concurrently deallocating huge
+ objects.
+
+ New features:
+ - Add support for the aarch64 architecture.
* 3.3.1 (March 6, 2013)
@@ -15,7 +114,7 @@ found in the git revision history:
- Fix a locking order bug that could cause deadlock during fork if heap
profiling were enabled.
- Fix a chunk recycling bug that could cause the allocator to lose track of
- whether a chunk was zeroed. On FreeBSD, NetBSD, and OS X, it could cause
+ whether a chunk was zeroed. On FreeBSD, NetBSD, and OS X, it could cause
corruption if allocating via sbrk(2) (unlikely unless running with the
"dss:primary" option specified). This was completely harmless on Linux
unless using mlockall(2) (and unlikely even then, unless the
@@ -47,7 +146,7 @@ found in the git revision history:
Bug fixes:
- Fix "arenas.extend" mallctl to output the number of arenas.
- - Fix chunk_recycyle() to unconditionally inform Valgrind that returned memory
+ - Fix chunk_recycle() to unconditionally inform Valgrind that returned memory
is undefined.
- Fix build break on FreeBSD related to alloca.h.
diff --git a/dep/jemalloc/README b/dep/jemalloc/README
index 7661683bae7..9b268f42288 100644
--- a/dep/jemalloc/README
+++ b/dep/jemalloc/README
@@ -1,10 +1,14 @@
-jemalloc is a general-purpose scalable concurrent malloc(3) implementation.
-This distribution is a "portable" implementation that currently targets
-FreeBSD, Linux, Apple OS X, and MinGW. jemalloc is included as the default
-allocator in the FreeBSD and NetBSD operating systems, and it is used by the
-Mozilla Firefox web browser on Microsoft Windows-related platforms. Depending
-on your needs, one of the other divergent versions may suit your needs better
-than this distribution.
+jemalloc is a general purpose malloc(3) implementation that emphasizes
+fragmentation avoidance and scalable concurrency support. jemalloc first came
+into use as the FreeBSD libc allocator in 2005, and since then it has found its
+way into numerous applications that rely on its predictable behavior. In 2010
+jemalloc development efforts broadened to include developer support features
+such as heap profiling, Valgrind integration, and extensive monitoring/tuning
+hooks. Modern jemalloc releases continue to be integrated back into FreeBSD,
+and therefore versatility remains critical. Ongoing development efforts trend
+toward making jemalloc among the best allocators for a broad range of demanding
+applications, and eliminating/mitigating weaknesses that have practical
+repercussions for real world applications.
The COPYING file contains copyright and licensing information.
diff --git a/dep/jemalloc/VERSION b/dep/jemalloc/VERSION
index 900c82d1043..24870eef790 100644
--- a/dep/jemalloc/VERSION
+++ b/dep/jemalloc/VERSION
@@ -1 +1 @@
-3.3.1-0-g9ef9d9e8c271cdf14f664b871a8f98c827714784
+3.5.1-0-g7709a64c59daf0b1f938be49472fcc499e1bd136
diff --git a/dep/jemalloc/include/jemalloc/internal/arena.h b/dep/jemalloc/include/jemalloc/internal/arena.h
index f2c18f43543..9d000c03dec 100644
--- a/dep/jemalloc/include/jemalloc/internal/arena.h
+++ b/dep/jemalloc/include/jemalloc/internal/arena.h
@@ -158,6 +158,7 @@ struct arena_chunk_map_s {
};
typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t;
typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
+typedef ql_head(arena_chunk_map_t) arena_chunk_mapelms_t;
/* Arena chunk header. */
struct arena_chunk_s {
@@ -174,11 +175,12 @@ struct arena_chunk_s {
size_t nruns_avail;
/*
- * Number of available run adjacencies. Clean and dirty available runs
- * are not coalesced, which causes virtual memory fragmentation. The
- * ratio of (nruns_avail-nruns_adjac):nruns_adjac is used for tracking
- * this fragmentation.
- * */
+ * Number of available run adjacencies that purging could coalesce.
+ * Clean and dirty available runs are not coalesced, which causes
+ * virtual memory fragmentation. The ratio of
+ * (nruns_avail-nruns_adjac):nruns_adjac is used for tracking this
+ * fragmentation.
+ */
size_t nruns_adjac;
/*
@@ -404,7 +406,16 @@ void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
size_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
bool zero);
+#ifdef JEMALLOC_JET
+typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t,
+ uint8_t);
+extern arena_redzone_corruption_t *arena_redzone_corruption;
+typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *);
+extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
+#else
void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
+#endif
+void arena_quarantine_junk_small(void *ptr, size_t usize);
void *arena_malloc_small(arena_t *arena, size_t size, bool zero);
void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero);
@@ -415,10 +426,18 @@ void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind, arena_chunk_map_t *mapelm);
void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t pageind);
+#ifdef JEMALLOC_JET
+typedef void (arena_dalloc_junk_large_t)(void *, size_t);
+extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
+#endif
void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk,
void *ptr);
void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr);
-void *arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
+#ifdef JEMALLOC_JET
+typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
+extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
+#endif
+bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
void *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
@@ -441,6 +460,7 @@ void arena_postfork_child(arena_t *arena);
#ifndef JEMALLOC_ENABLE_INLINE
arena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind);
size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbitsp_read(size_t *mapbitsp);
size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
size_t pageind);
@@ -451,6 +471,7 @@ size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
+void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
size_t size, size_t flags);
void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
@@ -471,7 +492,7 @@ size_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
const void *ptr);
prof_ctx_t *arena_prof_ctx_get(const void *ptr);
-void arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
+void arena_prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx);
void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache);
size_t arena_salloc(const void *ptr, bool demote);
void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr,
@@ -498,10 +519,17 @@ arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
+arena_mapbitsp_read(size_t *mapbitsp)
+{
+
+ return (*mapbitsp);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
{
- return (*arena_mapbitsp_get(chunk, pageind));
+ return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
}
JEMALLOC_ALWAYS_INLINE size_t
@@ -585,82 +613,89 @@ arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE void
+arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
+{
+
+ *mapbitsp = mapbits;
+}
+
+JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
- size_t *mapbitsp;
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
- mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0);
assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags);
- *mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags;
+ arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
size_t size)
{
- size_t *mapbitsp;
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t mapbits = arena_mapbitsp_read(mapbitsp);
- mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
- assert((*mapbitsp & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
- *mapbitsp = size | (*mapbitsp & PAGE_MASK);
+ assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
+ arena_mapbitsp_write(mapbitsp, size | (mapbits & PAGE_MASK));
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
- size_t *mapbitsp;
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t mapbits = arena_mapbitsp_read(mapbitsp);
size_t unzeroed;
- mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & CHUNK_MAP_DIRTY) == flags);
- unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
- *mapbitsp = size | CHUNK_MAP_BININD_INVALID | flags | unzeroed |
- CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+ unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
+ arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags
+ | unzeroed | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
size_t binind)
{
- size_t *mapbitsp;
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t mapbits = arena_mapbitsp_read(mapbitsp);
assert(binind <= BININD_INVALID);
- mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE);
- *mapbitsp = (*mapbitsp & ~CHUNK_MAP_BININD_MASK) | (binind <<
- CHUNK_MAP_BININD_SHIFT);
+ arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) |
+ (binind << CHUNK_MAP_BININD_SHIFT));
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
size_t binind, size_t flags)
{
- size_t *mapbitsp;
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t mapbits = arena_mapbitsp_read(mapbitsp);
size_t unzeroed;
assert(binind < BININD_INVALID);
- mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert(pageind - runind >= map_bias);
assert((flags & CHUNK_MAP_DIRTY) == flags);
- unzeroed = *mapbitsp & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
- *mapbitsp = (runind << LG_PAGE) | (binind << CHUNK_MAP_BININD_SHIFT) |
- flags | unzeroed | CHUNK_MAP_ALLOCATED;
+ unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */
+ arena_mapbitsp_write(mapbitsp, (runind << LG_PAGE) | (binind <<
+ CHUNK_MAP_BININD_SHIFT) | flags | unzeroed | CHUNK_MAP_ALLOCATED);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind,
size_t unzeroed)
{
- size_t *mapbitsp;
+ size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t mapbits = arena_mapbitsp_read(mapbitsp);
- mapbitsp = arena_mapbitsp_get(chunk, pageind);
- *mapbitsp = (*mapbitsp & ~CHUNK_MAP_UNZEROED) | unzeroed;
+ arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_UNZEROED) |
+ unzeroed);
}
JEMALLOC_INLINE bool
@@ -869,10 +904,10 @@ arena_prof_ctx_get(const void *ptr)
}
JEMALLOC_INLINE void
-arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
+arena_prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx)
{
arena_chunk_t *chunk;
- size_t pageind, mapbits;
+ size_t pageind;
cassert(config_prof);
assert(ptr != NULL);
@@ -880,10 +915,17 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- mapbits = arena_mapbits_get(chunk, pageind);
- assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
- if ((mapbits & CHUNK_MAP_LARGE) == 0) {
+ assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
+
+ if (usize > SMALL_MAXCLASS || (prof_promote &&
+ ((uintptr_t)ctx != (uintptr_t)1U || arena_mapbits_large_get(chunk,
+ pageind) != 0))) {
+ assert(arena_mapbits_large_get(chunk, pageind) != 0);
+ arena_mapp_get(chunk, pageind)->prof_ctx = ctx;
+ } else {
+ assert(arena_mapbits_large_get(chunk, pageind) == 0);
if (prof_promote == false) {
+ size_t mapbits = arena_mapbits_get(chunk, pageind);
arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
(uintptr_t)((pageind - (mapbits >> LG_PAGE)) <<
LG_PAGE));
@@ -895,12 +937,11 @@ arena_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
bin_info = &arena_bin_info[binind];
regind = arena_run_regind(run, bin_info, ptr);
- *((prof_ctx_t **)((uintptr_t)run + bin_info->ctx0_offset
- + (regind * sizeof(prof_ctx_t *)))) = ctx;
- } else
- assert((uintptr_t)ctx == (uintptr_t)1U);
- } else
- arena_mapp_get(chunk, pageind)->prof_ctx = ctx;
+ *((prof_ctx_t **)((uintptr_t)run +
+ bin_info->ctx0_offset + (regind * sizeof(prof_ctx_t
+ *)))) = ctx;
+ }
+ }
}
JEMALLOC_ALWAYS_INLINE void *
diff --git a/dep/jemalloc/include/jemalloc/internal/chunk_dss.h b/dep/jemalloc/include/jemalloc/internal/chunk_dss.h
index 6585f071bbe..4535ce09c09 100644
--- a/dep/jemalloc/include/jemalloc/internal/chunk_dss.h
+++ b/dep/jemalloc/include/jemalloc/internal/chunk_dss.h
@@ -7,7 +7,7 @@ typedef enum {
dss_prec_secondary = 2,
dss_prec_limit = 3
-} dss_prec_t ;
+} dss_prec_t;
#define DSS_PREC_DEFAULT dss_prec_secondary
#define DSS_DEFAULT "secondary"
diff --git a/dep/jemalloc/include/jemalloc/internal/ckh.h b/dep/jemalloc/include/jemalloc/internal/ckh.h
index 50c39ed9581..58712a6a763 100644
--- a/dep/jemalloc/include/jemalloc/internal/ckh.h
+++ b/dep/jemalloc/include/jemalloc/internal/ckh.h
@@ -17,7 +17,7 @@ typedef bool ckh_keycomp_t (const void *, const void *);
* There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit
* one bucket per L1 cache line.
*/
-#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
+#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
diff --git a/dep/jemalloc/include/jemalloc/internal/hash.h b/dep/jemalloc/include/jemalloc/internal/hash.h
index 56ecc793b36..09b69df515b 100644
--- a/dep/jemalloc/include/jemalloc/internal/hash.h
+++ b/dep/jemalloc/include/jemalloc/internal/hash.h
@@ -19,6 +19,11 @@
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
+uint32_t hash_x86_32(const void *key, int len, uint32_t seed);
+void hash_x86_128(const void *key, const int len, uint32_t seed,
+ uint64_t r_out[2]);
+void hash_x64_128(const void *key, const int len, const uint32_t seed,
+ uint64_t r_out[2]);
void hash(const void *key, size_t len, const uint32_t seed,
size_t r_hash[2]);
#endif
@@ -43,14 +48,14 @@ JEMALLOC_INLINE uint32_t
hash_get_block_32(const uint32_t *p, int i)
{
- return p[i];
+ return (p[i]);
}
JEMALLOC_INLINE uint64_t
hash_get_block_64(const uint64_t *p, int i)
{
- return p[i];
+ return (p[i]);
}
JEMALLOC_INLINE uint32_t
@@ -63,7 +68,7 @@ hash_fmix_32(uint32_t h)
h *= 0xc2b2ae35;
h ^= h >> 16;
- return h;
+ return (h);
}
JEMALLOC_INLINE uint64_t
@@ -76,7 +81,7 @@ hash_fmix_64(uint64_t k)
k *= QU(0xc4ceb9fe1a85ec53LLU);
k ^= k >> 33;
- return k;
+ return (k);
}
JEMALLOC_INLINE uint32_t
@@ -127,12 +132,12 @@ hash_x86_32(const void *key, int len, uint32_t seed)
h1 = hash_fmix_32(h1);
- return h1;
+ return (h1);
}
UNUSED JEMALLOC_INLINE void
hash_x86_128(const void *key, const int len, uint32_t seed,
- uint64_t r_out[2])
+ uint64_t r_out[2])
{
const uint8_t * data = (const uint8_t *) key;
const int nblocks = len / 16;
@@ -234,7 +239,7 @@ hash_x86_128(const void *key, const int len, uint32_t seed,
UNUSED JEMALLOC_INLINE void
hash_x64_128(const void *key, const int len, const uint32_t seed,
- uint64_t r_out[2])
+ uint64_t r_out[2])
{
const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 16;
@@ -310,7 +315,6 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
r_out[1] = h2;
}
-
/******************************************************************************/
/* API. */
JEMALLOC_INLINE void
diff --git a/dep/jemalloc/include/jemalloc/internal/huge.h b/dep/jemalloc/include/jemalloc/internal/huge.h
index d987d370767..ddf13138ad7 100644
--- a/dep/jemalloc/include/jemalloc/internal/huge.h
+++ b/dep/jemalloc/include/jemalloc/internal/huge.h
@@ -19,10 +19,14 @@ extern malloc_mutex_t huge_mtx;
void *huge_malloc(size_t size, bool zero);
void *huge_palloc(size_t size, size_t alignment, bool zero);
-void *huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
+bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size,
size_t extra);
void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero, bool try_tcache_dalloc);
+#ifdef JEMALLOC_JET
+typedef void (huge_dalloc_junk_t)(void *, size_t);
+extern huge_dalloc_junk_t *huge_dalloc_junk;
+#endif
void huge_dalloc(void *ptr, bool unmap);
size_t huge_salloc(const void *ptr);
prof_ctx_t *huge_prof_ctx_get(const void *ptr);
diff --git a/dep/jemalloc/include/jemalloc/internal/jemalloc_internal.h b/dep/jemalloc/include/jemalloc/internal/jemalloc_internal.h
index 80045bda4bd..b64cc4bed87 100644
--- a/dep/jemalloc/include/jemalloc/internal/jemalloc_internal.h
+++ b/dep/jemalloc/include/jemalloc/internal/jemalloc_internal.h
@@ -1,5 +1,5 @@
#ifndef JEMALLOC_INTERNAL_H
-#define JEMALLOC_INTERNAL_H
+#define JEMALLOC_INTERNAL_H
#include <math.h>
#ifdef _WIN32
# include <windows.h>
@@ -54,8 +54,7 @@ typedef intptr_t ssize_t;
#endif
#include <fcntl.h>
-#define JEMALLOC_NO_DEMANGLE
-#include "../jemalloc.h"
+#include "jemalloc_defs.h"
#ifdef JEMALLOC_UTRACE
#include <sys/ktrace.h>
@@ -66,6 +65,8 @@ typedef intptr_t ssize_t;
#include <valgrind/memcheck.h>
#endif
+#define JEMALLOC_NO_DEMANGLE
+#include "../jemalloc.h"
#include "jemalloc/internal/private_namespace.h"
#ifdef JEMALLOC_CC_SILENCE
@@ -221,8 +222,13 @@ static const bool config_ivsalloc =
* JEMALLOC_H_INLINES : Inline functions.
*/
/******************************************************************************/
-#define JEMALLOC_H_TYPES
+#define JEMALLOC_H_TYPES
+
+#ifndef JEMALLOC_HAS_RESTRICT
+# define restrict
+#endif
+#define MALLOCX_LG_ALIGN_MASK ((int)0x3f)
#define ALLOCM_LG_ALIGN_MASK ((int)0x3f)
#define ZU(z) ((size_t)z)
@@ -232,20 +238,26 @@ static const bool config_ivsalloc =
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
#endif
-#ifdef JEMALLOC_DEBUG
+#if defined(JEMALLOC_DEBUG)
/* Disable inlining to make debugging easier. */
# define JEMALLOC_ALWAYS_INLINE
+# define JEMALLOC_ALWAYS_INLINE_C static
# define JEMALLOC_INLINE
+# define JEMALLOC_INLINE_C static
# define inline
#else
# define JEMALLOC_ENABLE_INLINE
# ifdef JEMALLOC_HAVE_ATTR
# define JEMALLOC_ALWAYS_INLINE \
static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline)
+# define JEMALLOC_ALWAYS_INLINE_C \
+ static inline JEMALLOC_ATTR(always_inline)
# else
# define JEMALLOC_ALWAYS_INLINE static inline
+# define JEMALLOC_ALWAYS_INLINE_C static inline
# endif
# define JEMALLOC_INLINE static inline
+# define JEMALLOC_INLINE_C static inline
# ifdef _MSC_VER
# define inline _inline
# endif
@@ -278,6 +290,9 @@ static const bool config_ivsalloc =
# ifdef __arm__
# define LG_QUANTUM 3
# endif
+# ifdef __aarch64__
+# define LG_QUANTUM 4
+# endif
# ifdef __hppa__
# define LG_QUANTUM 4
# endif
@@ -478,7 +493,7 @@ static const bool config_ivsalloc =
#undef JEMALLOC_H_TYPES
/******************************************************************************/
-#define JEMALLOC_H_STRUCTS
+#define JEMALLOC_H_STRUCTS
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
@@ -507,14 +522,14 @@ typedef struct {
uint64_t deallocated;
} thread_allocated_t;
/*
- * The JEMALLOC_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
+ * The JEMALLOC_ARG_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro
* argument.
*/
-#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_CONCAT({0, 0})
+#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_ARG_CONCAT({0, 0})
#undef JEMALLOC_H_STRUCTS
/******************************************************************************/
-#define JEMALLOC_H_EXTERNS
+#define JEMALLOC_H_EXTERNS
extern bool opt_abort;
extern bool opt_junk;
@@ -574,7 +589,7 @@ void jemalloc_postfork_child(void);
#undef JEMALLOC_H_EXTERNS
/******************************************************************************/
-#define JEMALLOC_H_INLINES
+#define JEMALLOC_H_INLINES
#include "jemalloc/internal/util.h"
#include "jemalloc/internal/atomic.h"
@@ -749,32 +764,36 @@ choose_arena(arena_t *arena)
#include "jemalloc/internal/quarantine.h"
#ifndef JEMALLOC_ENABLE_INLINE
-void *imallocx(size_t size, bool try_tcache, arena_t *arena);
+void *imalloct(size_t size, bool try_tcache, arena_t *arena);
void *imalloc(size_t size);
-void *icallocx(size_t size, bool try_tcache, arena_t *arena);
+void *icalloct(size_t size, bool try_tcache, arena_t *arena);
void *icalloc(size_t size);
-void *ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
+void *ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena);
void *ipalloc(size_t usize, size_t alignment, bool zero);
size_t isalloc(const void *ptr, bool demote);
size_t ivsalloc(const void *ptr, bool demote);
size_t u2rz(size_t usize);
size_t p2rz(const void *ptr);
-void idallocx(void *ptr, bool try_tcache);
+void idalloct(void *ptr, bool try_tcache);
void idalloc(void *ptr);
-void iqallocx(void *ptr, bool try_tcache);
+void iqalloct(void *ptr, bool try_tcache);
void iqalloc(void *ptr);
-void *irallocx(void *ptr, size_t size, size_t extra, size_t alignment,
- bool zero, bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc,
+void *iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
+ size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
arena_t *arena);
+void *iralloct(void *ptr, size_t size, size_t extra, size_t alignment,
+ bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena);
void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment,
- bool zero, bool no_move);
+ bool zero);
+bool ixalloc(void *ptr, size_t size, size_t extra, size_t alignment,
+ bool zero);
malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t)
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_ALWAYS_INLINE void *
-imallocx(size_t size, bool try_tcache, arena_t *arena)
+imalloct(size_t size, bool try_tcache, arena_t *arena)
{
assert(size != 0);
@@ -789,11 +808,11 @@ JEMALLOC_ALWAYS_INLINE void *
imalloc(size_t size)
{
- return (imallocx(size, true, NULL));
+ return (imalloct(size, true, NULL));
}
JEMALLOC_ALWAYS_INLINE void *
-icallocx(size_t size, bool try_tcache, arena_t *arena)
+icalloct(size_t size, bool try_tcache, arena_t *arena)
{
if (size <= arena_maxclass)
@@ -806,11 +825,11 @@ JEMALLOC_ALWAYS_INLINE void *
icalloc(size_t size)
{
- return (icallocx(size, true, NULL));
+ return (icalloct(size, true, NULL));
}
JEMALLOC_ALWAYS_INLINE void *
-ipallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
+ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache,
arena_t *arena)
{
void *ret;
@@ -838,7 +857,7 @@ JEMALLOC_ALWAYS_INLINE void *
ipalloc(size_t usize, size_t alignment, bool zero)
{
- return (ipallocx(usize, alignment, zero, true, NULL));
+ return (ipalloct(usize, alignment, zero, true, NULL));
}
/*
@@ -870,7 +889,7 @@ ivsalloc(const void *ptr, bool demote)
{
/* Return 0 if ptr is not within a chunk managed by jemalloc. */
- if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == NULL)
+ if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0)
return (0);
return (isalloc(ptr, demote));
@@ -899,7 +918,7 @@ p2rz(const void *ptr)
}
JEMALLOC_ALWAYS_INLINE void
-idallocx(void *ptr, bool try_tcache)
+idalloct(void *ptr, bool try_tcache)
{
arena_chunk_t *chunk;
@@ -916,31 +935,63 @@ JEMALLOC_ALWAYS_INLINE void
idalloc(void *ptr)
{
- idallocx(ptr, true);
+ idalloct(ptr, true);
}
JEMALLOC_ALWAYS_INLINE void
-iqallocx(void *ptr, bool try_tcache)
+iqalloct(void *ptr, bool try_tcache)
{
if (config_fill && opt_quarantine)
quarantine(ptr);
else
- idallocx(ptr, try_tcache);
+ idalloct(ptr, try_tcache);
}
JEMALLOC_ALWAYS_INLINE void
iqalloc(void *ptr)
{
- iqallocx(ptr, true);
+ iqalloct(ptr, true);
}
JEMALLOC_ALWAYS_INLINE void *
-irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
- bool no_move, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
+iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra,
+ size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
+ arena_t *arena)
+{
+ void *p;
+ size_t usize, copysize;
+
+ usize = sa2u(size + extra, alignment);
+ if (usize == 0)
+ return (NULL);
+ p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
+ if (p == NULL) {
+ if (extra == 0)
+ return (NULL);
+ /* Try again, without extra this time. */
+ usize = sa2u(size, alignment);
+ if (usize == 0)
+ return (NULL);
+ p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
+ if (p == NULL)
+ return (NULL);
+ }
+ /*
+ * Copy at most size bytes (not size+extra), since the caller has no
+ * expectation that the extra bytes will be reliably preserved.
+ */
+ copysize = (size < oldsize) ? size : oldsize;
+ memcpy(p, ptr, copysize);
+ iqalloct(ptr, try_tcache_dalloc);
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
+ bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena)
{
- void *ret;
size_t oldsize;
assert(ptr != NULL);
@@ -950,68 +1001,50 @@ irallocx(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
!= 0) {
- size_t usize, copysize;
-
/*
* Existing object alignment is inadequate; allocate new space
* and copy.
*/
- if (no_move)
- return (NULL);
- usize = sa2u(size + extra, alignment);
- if (usize == 0)
- return (NULL);
- ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
- if (ret == NULL) {
- if (extra == 0)
- return (NULL);
- /* Try again, without extra this time. */
- usize = sa2u(size, alignment);
- if (usize == 0)
- return (NULL);
- ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
- arena);
- if (ret == NULL)
- return (NULL);
- }
- /*
- * Copy at most size bytes (not size+extra), since the caller
- * has no expectation that the extra bytes will be reliably
- * preserved.
- */
- copysize = (size < oldsize) ? size : oldsize;
- memcpy(ret, ptr, copysize);
- iqallocx(ptr, try_tcache_dalloc);
- return (ret);
+ return (iralloct_realign(ptr, oldsize, size, extra, alignment,
+ zero, try_tcache_alloc, try_tcache_dalloc, arena));
}
- if (no_move) {
- if (size <= arena_maxclass) {
- return (arena_ralloc_no_move(ptr, oldsize, size,
- extra, zero));
- } else {
- return (huge_ralloc_no_move(ptr, oldsize, size,
- extra));
- }
+ if (size + extra <= arena_maxclass) {
+ return (arena_ralloc(arena, ptr, oldsize, size, extra,
+ alignment, zero, try_tcache_alloc,
+ try_tcache_dalloc));
} else {
- if (size + extra <= arena_maxclass) {
- return (arena_ralloc(arena, ptr, oldsize, size, extra,
- alignment, zero, try_tcache_alloc,
- try_tcache_dalloc));
- } else {
- return (huge_ralloc(ptr, oldsize, size, extra,
- alignment, zero, try_tcache_dalloc));
- }
+ return (huge_ralloc(ptr, oldsize, size, extra,
+ alignment, zero, try_tcache_dalloc));
}
}
JEMALLOC_ALWAYS_INLINE void *
-iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero,
- bool no_move)
+iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
{
- return (irallocx(ptr, size, extra, alignment, zero, no_move, true, true,
- NULL));
+ return (iralloct(ptr, size, extra, alignment, zero, true, true, NULL));
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero)
+{
+ size_t oldsize;
+
+ assert(ptr != NULL);
+ assert(size != 0);
+
+ oldsize = isalloc(ptr, config_prof);
+ if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
+ != 0) {
+ /* Existing object alignment is inadequate. */
+ return (true);
+ }
+
+ if (size <= arena_maxclass)
+ return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
+ else
+ return (huge_ralloc_no_move(ptr, oldsize, size, extra));
}
malloc_tsd_externs(thread_allocated, thread_allocated_t)
diff --git a/dep/jemalloc/include/jemalloc/internal/private_namespace.h b/dep/jemalloc/include/jemalloc/internal/private_namespace.h
index 65de3163fd3..a99bf7293ac 100644
--- a/dep/jemalloc/include/jemalloc/internal/private_namespace.h
+++ b/dep/jemalloc/include/jemalloc/internal/private_namespace.h
@@ -8,6 +8,7 @@
#define arena_dalloc JEMALLOC_N(arena_dalloc)
#define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin)
#define arena_dalloc_bin_locked JEMALLOC_N(arena_dalloc_bin_locked)
+#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
#define arena_dalloc_large JEMALLOC_N(arena_dalloc_large)
#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked)
@@ -33,6 +34,8 @@
#define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get)
#define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set)
#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get)
+#define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read)
+#define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write)
#define arena_mapp_get JEMALLOC_N(arena_mapp_get)
#define arena_maxclass JEMALLOC_N(arena_maxclass)
#define arena_new JEMALLOC_N(arena_new)
@@ -48,8 +51,11 @@
#define arena_prof_promoted JEMALLOC_N(arena_prof_promoted)
#define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get)
#define arena_purge_all JEMALLOC_N(arena_purge_all)
+#define arena_quarantine_junk_small JEMALLOC_N(arena_quarantine_junk_small)
#define arena_ralloc JEMALLOC_N(arena_ralloc)
+#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
+#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
#define arena_run_regind JEMALLOC_N(arena_run_regind)
#define arena_salloc JEMALLOC_N(arena_salloc)
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
@@ -66,6 +72,7 @@
#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper)
#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get)
#define arenas_tsd_get_wrapper JEMALLOC_N(arenas_tsd_get_wrapper)
+#define arenas_tsd_init_head JEMALLOC_N(arenas_tsd_init_head)
#define arenas_tsd_set JEMALLOC_N(arenas_tsd_set)
#define atomic_add_u JEMALLOC_N(atomic_add_u)
#define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32)
@@ -189,6 +196,7 @@
#define huge_allocated JEMALLOC_N(huge_allocated)
#define huge_boot JEMALLOC_N(huge_boot)
#define huge_dalloc JEMALLOC_N(huge_dalloc)
+#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
#define huge_malloc JEMALLOC_N(huge_malloc)
#define huge_mtx JEMALLOC_N(huge_mtx)
#define huge_ndalloc JEMALLOC_N(huge_ndalloc)
@@ -204,20 +212,22 @@
#define huge_salloc JEMALLOC_N(huge_salloc)
#define iallocm JEMALLOC_N(iallocm)
#define icalloc JEMALLOC_N(icalloc)
-#define icallocx JEMALLOC_N(icallocx)
+#define icalloct JEMALLOC_N(icalloct)
#define idalloc JEMALLOC_N(idalloc)
-#define idallocx JEMALLOC_N(idallocx)
+#define idalloct JEMALLOC_N(idalloct)
#define imalloc JEMALLOC_N(imalloc)
-#define imallocx JEMALLOC_N(imallocx)
+#define imalloct JEMALLOC_N(imalloct)
#define ipalloc JEMALLOC_N(ipalloc)
-#define ipallocx JEMALLOC_N(ipallocx)
+#define ipalloct JEMALLOC_N(ipalloct)
#define iqalloc JEMALLOC_N(iqalloc)
-#define iqallocx JEMALLOC_N(iqallocx)
+#define iqalloct JEMALLOC_N(iqalloct)
#define iralloc JEMALLOC_N(iralloc)
-#define irallocx JEMALLOC_N(irallocx)
+#define iralloct JEMALLOC_N(iralloct)
+#define iralloct_realign JEMALLOC_N(iralloct_realign)
#define isalloc JEMALLOC_N(isalloc)
#define isthreaded JEMALLOC_N(isthreaded)
#define ivsalloc JEMALLOC_N(ivsalloc)
+#define ixalloc JEMALLOC_N(ixalloc)
#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
#define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent)
#define jemalloc_prefork JEMALLOC_N(jemalloc_prefork)
@@ -248,6 +258,7 @@
#define ncpus JEMALLOC_N(ncpus)
#define nhbins JEMALLOC_N(nhbins)
#define opt_abort JEMALLOC_N(opt_abort)
+#define opt_dss JEMALLOC_N(opt_dss)
#define opt_junk JEMALLOC_N(opt_junk)
#define opt_lg_chunk JEMALLOC_N(opt_lg_chunk)
#define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult)
@@ -277,8 +288,10 @@
#define prof_boot0 JEMALLOC_N(prof_boot0)
#define prof_boot1 JEMALLOC_N(prof_boot1)
#define prof_boot2 JEMALLOC_N(prof_boot2)
+#define prof_bt_count JEMALLOC_N(prof_bt_count)
#define prof_ctx_get JEMALLOC_N(prof_ctx_get)
#define prof_ctx_set JEMALLOC_N(prof_ctx_set)
+#define prof_dump_open JEMALLOC_N(prof_dump_open)
#define prof_free JEMALLOC_N(prof_free)
#define prof_gdump JEMALLOC_N(prof_gdump)
#define prof_idump JEMALLOC_N(prof_idump)
@@ -304,6 +317,7 @@
#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper)
#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get)
#define prof_tdata_tsd_get_wrapper JEMALLOC_N(prof_tdata_tsd_get_wrapper)
+#define prof_tdata_tsd_init_head JEMALLOC_N(prof_tdata_tsd_init_head)
#define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set)
#define quarantine JEMALLOC_N(quarantine)
#define quarantine_alloc_hook JEMALLOC_N(quarantine_alloc_hook)
@@ -317,8 +331,10 @@
#define quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper)
#define quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get)
#define quarantine_tsd_get_wrapper JEMALLOC_N(quarantine_tsd_get_wrapper)
+#define quarantine_tsd_init_head JEMALLOC_N(quarantine_tsd_init_head)
#define quarantine_tsd_set JEMALLOC_N(quarantine_tsd_set)
#define register_zone JEMALLOC_N(register_zone)
+#define rtree_delete JEMALLOC_N(rtree_delete)
#define rtree_get JEMALLOC_N(rtree_get)
#define rtree_get_locked JEMALLOC_N(rtree_get_locked)
#define rtree_new JEMALLOC_N(rtree_new)
@@ -329,6 +345,7 @@
#define s2u JEMALLOC_N(s2u)
#define sa2u JEMALLOC_N(sa2u)
#define set_errno JEMALLOC_N(set_errno)
+#define small_size2bin JEMALLOC_N(small_size2bin)
#define stats_cactive JEMALLOC_N(stats_cactive)
#define stats_cactive_add JEMALLOC_N(stats_cactive_add)
#define stats_cactive_get JEMALLOC_N(stats_cactive_get)
@@ -361,6 +378,7 @@
#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper)
#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get)
#define tcache_enabled_tsd_get_wrapper JEMALLOC_N(tcache_enabled_tsd_get_wrapper)
+#define tcache_enabled_tsd_init_head JEMALLOC_N(tcache_enabled_tsd_init_head)
#define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set)
#define tcache_event JEMALLOC_N(tcache_event)
#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
@@ -377,6 +395,7 @@
#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper)
#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get)
#define tcache_tsd_get_wrapper JEMALLOC_N(tcache_tsd_get_wrapper)
+#define tcache_tsd_init_head JEMALLOC_N(tcache_tsd_init_head)
#define tcache_tsd_set JEMALLOC_N(tcache_tsd_set)
#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted)
#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized)
@@ -386,5 +405,8 @@
#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper)
#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get)
#define thread_allocated_tsd_get_wrapper JEMALLOC_N(thread_allocated_tsd_get_wrapper)
+#define thread_allocated_tsd_init_head JEMALLOC_N(thread_allocated_tsd_init_head)
#define thread_allocated_tsd_set JEMALLOC_N(thread_allocated_tsd_set)
+#define tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion)
+#define tsd_init_finish JEMALLOC_N(tsd_init_finish)
#define u2rz JEMALLOC_N(u2rz)
diff --git a/dep/jemalloc/include/jemalloc/internal/prng.h b/dep/jemalloc/include/jemalloc/internal/prng.h
index 83a5462b4dd..7b2b06512ff 100644
--- a/dep/jemalloc/include/jemalloc/internal/prng.h
+++ b/dep/jemalloc/include/jemalloc/internal/prng.h
@@ -25,7 +25,7 @@
* uint32_t state : Seed value.
* const uint32_t a, c : See above discussion.
*/
-#define prng32(r, lg_range, state, a, c) do { \
+#define prng32(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 32); \
\
@@ -35,7 +35,7 @@
} while (false)
/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */
-#define prng64(r, lg_range, state, a, c) do { \
+#define prng64(r, lg_range, state, a, c) do { \
assert(lg_range > 0); \
assert(lg_range <= 64); \
\
diff --git a/dep/jemalloc/include/jemalloc/internal/prof.h b/dep/jemalloc/include/jemalloc/internal/prof.h
index 119a5b1bcb7..6f162d21e84 100644
--- a/dep/jemalloc/include/jemalloc/internal/prof.h
+++ b/dep/jemalloc/include/jemalloc/internal/prof.h
@@ -8,7 +8,11 @@ typedef struct prof_ctx_s prof_ctx_t;
typedef struct prof_tdata_s prof_tdata_t;
/* Option defaults. */
-#define PROF_PREFIX_DEFAULT "jeprof"
+#ifdef JEMALLOC_PROF
+# define PROF_PREFIX_DEFAULT "jeprof"
+#else
+# define PROF_PREFIX_DEFAULT ""
+#endif
#define LG_PROF_SAMPLE_DEFAULT 19
#define LG_PROF_INTERVAL_DEFAULT -1
@@ -129,6 +133,7 @@ struct prof_ctx_s {
* limbo due to one of:
* - Initializing per thread counters associated with this ctx.
* - Preparing to destroy this ctx.
+ * - Dumping a heap profile that includes this ctx.
* nlimbo must be 1 (single destroyer) in order to safely destroy the
* ctx.
*/
@@ -145,7 +150,11 @@ struct prof_ctx_s {
* this context.
*/
ql_head(prof_thr_cnt_t) cnts_ql;
+
+ /* Linkage for list of contexts to be dumped. */
+ ql_elm(prof_ctx_t) dump_link;
};
+typedef ql_head(prof_ctx_t) prof_ctx_list_t;
struct prof_tdata_s {
/*
@@ -195,7 +204,12 @@ extern bool opt_prof_gdump; /* High-water memory dumping. */
extern bool opt_prof_final; /* Final profile dumping. */
extern bool opt_prof_leak; /* Dump leak summary at exit. */
extern bool opt_prof_accum; /* Report cumulative bytes. */
-extern char opt_prof_prefix[PATH_MAX + 1];
+extern char opt_prof_prefix[
+ /* Minimize memory bloat for non-prof builds. */
+#ifdef JEMALLOC_PROF
+ PATH_MAX +
+#endif
+ 1];
/*
* Profile dump interval, measured in bytes allocated. Each arena triggers a
@@ -215,6 +229,11 @@ extern bool prof_promote;
void bt_init(prof_bt_t *bt, void **vec);
void prof_backtrace(prof_bt_t *bt, unsigned nignore);
prof_thr_cnt_t *prof_lookup(prof_bt_t *bt);
+#ifdef JEMALLOC_JET
+size_t prof_bt_count(void);
+typedef int (prof_dump_open_t)(bool, const char *);
+extern prof_dump_open_t *prof_dump_open;
+#endif
void prof_idump(void);
bool prof_mdump(const char *filename);
void prof_gdump(void);
@@ -289,11 +308,11 @@ malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *)
prof_tdata_t *prof_tdata_get(bool create);
void prof_sample_threshold_update(prof_tdata_t *prof_tdata);
prof_ctx_t *prof_ctx_get(const void *ptr);
-void prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
+void prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx);
bool prof_sample_accum_update(size_t size);
-void prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt);
-void prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
- size_t old_size, prof_ctx_t *old_ctx);
+void prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt);
+void prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt,
+ size_t old_usize, prof_ctx_t *old_ctx);
void prof_free(const void *ptr, size_t size);
#endif
@@ -320,6 +339,20 @@ prof_tdata_get(bool create)
JEMALLOC_INLINE void
prof_sample_threshold_update(prof_tdata_t *prof_tdata)
{
+ /*
+ * The body of this function is compiled out unless heap profiling is
+ * enabled, so that it is possible to compile jemalloc with floating
+ * point support completely disabled. Avoiding floating point code is
+ * important on memory-constrained systems, but it also enables a
+ * workaround for versions of glibc that don't properly save/restore
+ * floating point registers during dynamic lazy symbol loading (which
+ * internally calls into whatever malloc implementation happens to be
+ * integrated into the application). Note that some compilers (e.g.
+ * gcc 4.8) may use floating point registers for fast memory moves, so
+ * jemalloc must be compiled with such optimizations disabled (e.g.
+ * -mno-sse) in order for the workaround to be complete.
+ */
+#ifdef JEMALLOC_PROF
uint64_t r;
double u;
@@ -341,7 +374,7 @@ prof_sample_threshold_update(prof_tdata_t *prof_tdata)
* Luc Devroye
* Springer-Verlag, New York, 1986
* pp 500
- * (http://cg.scs.carleton.ca/~luc/rnbookindex.html)
+ * (http://luc.devroye.org/rnbookindex.html)
*/
prng64(r, 53, prof_tdata->prng_state,
UINT64_C(6364136223846793005), UINT64_C(1442695040888963407));
@@ -349,6 +382,7 @@ prof_sample_threshold_update(prof_tdata_t *prof_tdata)
prof_tdata->threshold = (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample))))
+ (uint64_t)1U;
+#endif
}
JEMALLOC_INLINE prof_ctx_t *
@@ -371,7 +405,7 @@ prof_ctx_get(const void *ptr)
}
JEMALLOC_INLINE void
-prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
+prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx)
{
arena_chunk_t *chunk;
@@ -381,7 +415,7 @@ prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr) {
/* Region. */
- arena_prof_ctx_set(ptr, ctx);
+ arena_prof_ctx_set(ptr, usize, ctx);
} else
huge_prof_ctx_set(ptr, ctx);
}
@@ -416,20 +450,20 @@ prof_sample_accum_update(size_t size)
}
JEMALLOC_INLINE void
-prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
+prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt)
{
cassert(config_prof);
assert(ptr != NULL);
- assert(size == isalloc(ptr, true));
+ assert(usize == isalloc(ptr, true));
if (opt_lg_prof_sample != 0) {
- if (prof_sample_accum_update(size)) {
+ if (prof_sample_accum_update(usize)) {
/*
* Don't sample. For malloc()-like allocation, it is
* always possible to tell in advance how large an
* object's usable size will be, so there should never
- * be a difference between the size passed to
+ * be a difference between the usize passed to
* PROF_ALLOC_PREP() and prof_malloc().
*/
assert((uintptr_t)cnt == (uintptr_t)1U);
@@ -437,17 +471,17 @@ prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
}
if ((uintptr_t)cnt > (uintptr_t)1U) {
- prof_ctx_set(ptr, cnt->ctx);
+ prof_ctx_set(ptr, usize, cnt->ctx);
cnt->epoch++;
/*********/
mb_write();
/*********/
cnt->cnts.curobjs++;
- cnt->cnts.curbytes += size;
+ cnt->cnts.curbytes += usize;
if (opt_prof_accum) {
cnt->cnts.accumobjs++;
- cnt->cnts.accumbytes += size;
+ cnt->cnts.accumbytes += usize;
}
/*********/
mb_write();
@@ -457,12 +491,12 @@ prof_malloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt)
mb_write();
/*********/
} else
- prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
+ prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U);
}
JEMALLOC_INLINE void
-prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
- size_t old_size, prof_ctx_t *old_ctx)
+prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt,
+ size_t old_usize, prof_ctx_t *old_ctx)
{
prof_thr_cnt_t *told_cnt;
@@ -470,15 +504,15 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U);
if (ptr != NULL) {
- assert(size == isalloc(ptr, true));
+ assert(usize == isalloc(ptr, true));
if (opt_lg_prof_sample != 0) {
- if (prof_sample_accum_update(size)) {
+ if (prof_sample_accum_update(usize)) {
/*
- * Don't sample. The size passed to
+ * Don't sample. The usize passed to
* PROF_ALLOC_PREP() was larger than what
* actually got allocated, so a backtrace was
* captured for this allocation, even though
- * its actual size was insufficient to cross
+ * its actual usize was insufficient to cross
* the sample threshold.
*/
cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
@@ -495,7 +529,7 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
*/
malloc_mutex_lock(old_ctx->lock);
old_ctx->cnt_merged.curobjs--;
- old_ctx->cnt_merged.curbytes -= old_size;
+ old_ctx->cnt_merged.curbytes -= old_usize;
malloc_mutex_unlock(old_ctx->lock);
told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U;
}
@@ -505,23 +539,23 @@ prof_realloc(const void *ptr, size_t size, prof_thr_cnt_t *cnt,
if ((uintptr_t)told_cnt > (uintptr_t)1U)
told_cnt->epoch++;
if ((uintptr_t)cnt > (uintptr_t)1U) {
- prof_ctx_set(ptr, cnt->ctx);
+ prof_ctx_set(ptr, usize, cnt->ctx);
cnt->epoch++;
} else if (ptr != NULL)
- prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
+ prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U);
/*********/
mb_write();
/*********/
if ((uintptr_t)told_cnt > (uintptr_t)1U) {
told_cnt->cnts.curobjs--;
- told_cnt->cnts.curbytes -= old_size;
+ told_cnt->cnts.curbytes -= old_usize;
}
if ((uintptr_t)cnt > (uintptr_t)1U) {
cnt->cnts.curobjs++;
- cnt->cnts.curbytes += size;
+ cnt->cnts.curbytes += usize;
if (opt_prof_accum) {
cnt->cnts.accumobjs++;
- cnt->cnts.accumbytes += size;
+ cnt->cnts.accumbytes += usize;
}
}
/*********/
diff --git a/dep/jemalloc/include/jemalloc/internal/ql.h b/dep/jemalloc/include/jemalloc/internal/ql.h
index a9ed2393f0c..f70c5f6f391 100644
--- a/dep/jemalloc/include/jemalloc/internal/ql.h
+++ b/dep/jemalloc/include/jemalloc/internal/ql.h
@@ -1,61 +1,61 @@
/*
* List definitions.
*/
-#define ql_head(a_type) \
+#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
}
-#define ql_head_initializer(a_head) {NULL}
+#define ql_head_initializer(a_head) {NULL}
-#define ql_elm(a_type) qr(a_type)
+#define ql_elm(a_type) qr(a_type)
/* List functions. */
-#define ql_new(a_head) do { \
+#define ql_new(a_head) do { \
(a_head)->qlh_first = NULL; \
} while (0)
-#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
+#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
-#define ql_first(a_head) ((a_head)->qlh_first)
+#define ql_first(a_head) ((a_head)->qlh_first)
-#define ql_last(a_head, a_field) \
+#define ql_last(a_head, a_field) \
((ql_first(a_head) != NULL) \
? qr_prev(ql_first(a_head), a_field) : NULL)
-#define ql_next(a_head, a_elm, a_field) \
+#define ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
-#define ql_prev(a_head, a_elm, a_field) \
+#define ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
-#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
+#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
ql_first(a_head) = (a_elm); \
} \
} while (0)
-#define ql_after_insert(a_qlelm, a_elm, a_field) \
+#define ql_after_insert(a_qlelm, a_elm, a_field) \
qr_after_insert((a_qlelm), (a_elm), a_field)
-#define ql_head_insert(a_head, a_elm, a_field) do { \
+#define ql_head_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
} while (0)
-#define ql_tail_insert(a_head, a_elm, a_field) do { \
+#define ql_tail_insert(a_head, a_elm, a_field) do { \
if (ql_first(a_head) != NULL) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
-#define ql_remove(a_head, a_elm, a_field) do { \
+#define ql_remove(a_head, a_elm, a_field) do { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
} \
@@ -66,18 +66,18 @@ struct { \
} \
} while (0)
-#define ql_head_remove(a_head, a_type, a_field) do { \
+#define ql_head_remove(a_head, a_type, a_field) do { \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
} while (0)
-#define ql_tail_remove(a_head, a_type, a_field) do { \
+#define ql_tail_remove(a_head, a_type, a_field) do { \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
-#define ql_foreach(a_var, a_head, a_field) \
+#define ql_foreach(a_var, a_head, a_field) \
qr_foreach((a_var), ql_first(a_head), a_field)
-#define ql_reverse_foreach(a_var, a_head, a_field) \
+#define ql_reverse_foreach(a_var, a_head, a_field) \
qr_reverse_foreach((a_var), ql_first(a_head), a_field)
diff --git a/dep/jemalloc/include/jemalloc/internal/qr.h b/dep/jemalloc/include/jemalloc/internal/qr.h
index fe22352fedd..602944b9b4f 100644
--- a/dep/jemalloc/include/jemalloc/internal/qr.h
+++ b/dep/jemalloc/include/jemalloc/internal/qr.h
@@ -1,28 +1,28 @@
/* Ring definitions. */
-#define qr(a_type) \
+#define qr(a_type) \
struct { \
a_type *qre_next; \
a_type *qre_prev; \
}
/* Ring functions. */
-#define qr_new(a_qr, a_field) do { \
+#define qr_new(a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
-#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
+#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
-#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
+#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
-#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
+#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
(a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
(a_qr)->a_field.qre_next = (a_qrelm); \
(a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
(a_qrelm)->a_field.qre_prev = (a_qr); \
} while (0)
-#define qr_after_insert(a_qrelm, a_qr, a_field) \
+#define qr_after_insert(a_qrelm, a_qr, a_field) \
do \
{ \
(a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
@@ -31,7 +31,7 @@ struct { \
(a_qrelm)->a_field.qre_next = (a_qr); \
} while (0)
-#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
+#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
void *t; \
(a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
(a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
@@ -42,10 +42,10 @@ struct { \
/* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code. */
-#define qr_split(a_qr_a, a_qr_b, a_field) \
+#define qr_split(a_qr_a, a_qr_b, a_field) \
qr_meld((a_qr_a), (a_qr_b), a_field)
-#define qr_remove(a_qr, a_field) do { \
+#define qr_remove(a_qr, a_field) do { \
(a_qr)->a_field.qre_prev->a_field.qre_next \
= (a_qr)->a_field.qre_next; \
(a_qr)->a_field.qre_next->a_field.qre_prev \
@@ -54,13 +54,13 @@ struct { \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
-#define qr_foreach(var, a_qr, a_field) \
+#define qr_foreach(var, a_qr, a_field) \
for ((var) = (a_qr); \
(var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next : NULL))
-#define qr_reverse_foreach(var, a_qr, a_field) \
+#define qr_reverse_foreach(var, a_qr, a_field) \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
(var) = (((var) != (a_qr)) \
diff --git a/dep/jemalloc/include/jemalloc/internal/rb.h b/dep/jemalloc/include/jemalloc/internal/rb.h
index 7b675f09051..423802eb2dc 100644
--- a/dep/jemalloc/include/jemalloc/internal/rb.h
+++ b/dep/jemalloc/include/jemalloc/internal/rb.h
@@ -22,10 +22,6 @@
#ifndef RB_H_
#define RB_H_
-#if 0
-__FBSDID("$FreeBSD: head/lib/libc/stdlib/rb.h 204493 2010-02-28 22:57:13Z jasone $");
-#endif
-
#ifdef RB_COMPACT
/* Node structure. */
#define rb_node(a_type) \
diff --git a/dep/jemalloc/include/jemalloc/internal/rtree.h b/dep/jemalloc/include/jemalloc/internal/rtree.h
index 9bd98548cfe..bc74769f50e 100644
--- a/dep/jemalloc/include/jemalloc/internal/rtree.h
+++ b/dep/jemalloc/include/jemalloc/internal/rtree.h
@@ -14,17 +14,18 @@ typedef struct rtree_s rtree_t;
* Size of each radix tree node (must be a power of 2). This impacts tree
* depth.
*/
-#if (LG_SIZEOF_PTR == 2)
-# define RTREE_NODESIZE (1U << 14)
-#else
-# define RTREE_NODESIZE CACHELINE
-#endif
+#define RTREE_NODESIZE (1U << 16)
+
+typedef void *(rtree_alloc_t)(size_t);
+typedef void (rtree_dalloc_t)(void *);
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
struct rtree_s {
+ rtree_alloc_t *alloc;
+ rtree_dalloc_t *dalloc;
malloc_mutex_t mutex;
void **root;
unsigned height;
@@ -35,7 +36,8 @@ struct rtree_s {
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-rtree_t *rtree_new(unsigned bits);
+rtree_t *rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc);
+void rtree_delete(rtree_t *rtree);
void rtree_prefork(rtree_t *rtree);
void rtree_postfork_parent(rtree_t *rtree);
void rtree_postfork_child(rtree_t *rtree);
@@ -45,20 +47,20 @@ void rtree_postfork_child(rtree_t *rtree);
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
-#ifndef JEMALLOC_DEBUG
-void *rtree_get_locked(rtree_t *rtree, uintptr_t key);
+#ifdef JEMALLOC_DEBUG
+uint8_t rtree_get_locked(rtree_t *rtree, uintptr_t key);
#endif
-void *rtree_get(rtree_t *rtree, uintptr_t key);
-bool rtree_set(rtree_t *rtree, uintptr_t key, void *val);
+uint8_t rtree_get(rtree_t *rtree, uintptr_t key);
+bool rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
#define RTREE_GET_GENERATE(f) \
/* The least significant bits of the key are ignored. */ \
-JEMALLOC_INLINE void * \
+JEMALLOC_INLINE uint8_t \
f(rtree_t *rtree, uintptr_t key) \
{ \
- void *ret; \
+ uint8_t ret; \
uintptr_t subkey; \
unsigned i, lshift, height, bits; \
void **node, **child; \
@@ -68,12 +70,12 @@ f(rtree_t *rtree, uintptr_t key) \
i < height - 1; \
i++, lshift += bits, node = child) { \
bits = rtree->level2bits[i]; \
- subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
+ subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \
3)) - bits); \
child = (void**)node[subkey]; \
if (child == NULL) { \
RTREE_UNLOCK(&rtree->mutex); \
- return (NULL); \
+ return (0); \
} \
} \
\
@@ -84,7 +86,10 @@ f(rtree_t *rtree, uintptr_t key) \
bits = rtree->level2bits[i]; \
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \
bits); \
- ret = node[subkey]; \
+ { \
+ uint8_t *leaf = (uint8_t *)node; \
+ ret = leaf[subkey]; \
+ } \
RTREE_UNLOCK(&rtree->mutex); \
\
RTREE_GET_VALIDATE \
@@ -123,7 +128,7 @@ RTREE_GET_GENERATE(rtree_get)
#undef RTREE_GET_VALIDATE
JEMALLOC_INLINE bool
-rtree_set(rtree_t *rtree, uintptr_t key, void *val)
+rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val)
{
uintptr_t subkey;
unsigned i, lshift, height, bits;
@@ -138,14 +143,14 @@ rtree_set(rtree_t *rtree, uintptr_t key, void *val)
bits);
child = (void**)node[subkey];
if (child == NULL) {
- child = (void**)base_alloc(sizeof(void *) <<
- rtree->level2bits[i+1]);
+ size_t size = ((i + 1 < height - 1) ? sizeof(void *)
+ : (sizeof(uint8_t))) << rtree->level2bits[i+1];
+ child = (void**)rtree->alloc(size);
if (child == NULL) {
malloc_mutex_unlock(&rtree->mutex);
return (true);
}
- memset(child, 0, sizeof(void *) <<
- rtree->level2bits[i+1]);
+ memset(child, 0, size);
node[subkey] = child;
}
}
@@ -153,7 +158,10 @@ rtree_set(rtree_t *rtree, uintptr_t key, void *val)
/* node is a leaf, so it contains values rather than node pointers. */
bits = rtree->level2bits[i];
subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits);
- node[subkey] = val;
+ {
+ uint8_t *leaf = (uint8_t *)node;
+ leaf[subkey] = val;
+ }
malloc_mutex_unlock(&rtree->mutex);
return (false);
diff --git a/dep/jemalloc/include/jemalloc/internal/tcache.h b/dep/jemalloc/include/jemalloc/internal/tcache.h
index ba36204ff21..c3d4b58d4dc 100644
--- a/dep/jemalloc/include/jemalloc/internal/tcache.h
+++ b/dep/jemalloc/include/jemalloc/internal/tcache.h
@@ -297,6 +297,7 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
binind = SMALL_SIZE2BIN(size);
assert(binind < NBINS);
tbin = &tcache->tbins[binind];
+ size = arena_bin_info[binind].reg_size;
ret = tcache_alloc_easy(tbin);
if (ret == NULL) {
ret = tcache_alloc_small_hard(tcache, tbin, binind);
@@ -313,6 +314,7 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
} else if (opt_zero)
memset(ret, 0, size);
}
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
if (config_fill && opt_junk) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
@@ -321,7 +323,6 @@ tcache_alloc_small(tcache_t *tcache, size_t size, bool zero)
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
if (config_stats)
tbin->tstats.nrequests++;
@@ -368,11 +369,11 @@ tcache_alloc_large(tcache_t *tcache, size_t size, bool zero)
else if (opt_zero)
memset(ret, 0, size);
}
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
if (config_stats)
tbin->tstats.nrequests++;
diff --git a/dep/jemalloc/include/jemalloc/internal/tsd.h b/dep/jemalloc/include/jemalloc/internal/tsd.h
index 0037cf35e70..9fb4a23ec6b 100644
--- a/dep/jemalloc/include/jemalloc/internal/tsd.h
+++ b/dep/jemalloc/include/jemalloc/internal/tsd.h
@@ -6,6 +6,12 @@
typedef bool (*malloc_tsd_cleanup_t)(void);
+#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
+ !defined(_WIN32))
+typedef struct tsd_init_block_s tsd_init_block_t;
+typedef struct tsd_init_head_s tsd_init_head_t;
+#endif
+
/*
* TLS/TSD-agnostic macro-based implementation of thread-specific data. There
* are four macros that support (at least) three use cases: file-private,
@@ -75,12 +81,13 @@ extern __thread a_type a_name##_tls; \
extern pthread_key_t a_name##_tsd; \
extern bool a_name##_booted;
#elif (defined(_WIN32))
-#define malloc_tsd_externs(a_name, a_type) \
+#define malloc_tsd_externs(a_name, a_type) \
extern DWORD a_name##_tsd; \
extern bool a_name##_booted;
#else
#define malloc_tsd_externs(a_name, a_type) \
extern pthread_key_t a_name##_tsd; \
+extern tsd_init_head_t a_name##_tsd_init_head; \
extern bool a_name##_booted;
#endif
@@ -105,6 +112,10 @@ a_attr bool a_name##_booted = false;
#else
#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \
a_attr pthread_key_t a_name##_tsd; \
+a_attr tsd_init_head_t a_name##_tsd_init_head = { \
+ ql_head_initializer(blocks), \
+ MALLOC_MUTEX_INITIALIZER \
+}; \
a_attr bool a_name##_booted = false;
#endif
@@ -333,8 +344,14 @@ a_name##_tsd_get_wrapper(void) \
pthread_getspecific(a_name##_tsd); \
\
if (wrapper == NULL) { \
+ tsd_init_block_t block; \
+ wrapper = tsd_init_check_recursion( \
+ &a_name##_tsd_init_head, &block); \
+ if (wrapper) \
+ return (wrapper); \
wrapper = (a_name##_tsd_wrapper_t *) \
malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \
+ block.data = wrapper; \
if (wrapper == NULL) { \
malloc_write("<jemalloc>: Error allocating" \
" TSD for "#a_name"\n"); \
@@ -350,6 +367,7 @@ a_name##_tsd_get_wrapper(void) \
" TSD for "#a_name"\n"); \
abort(); \
} \
+ tsd_init_finish(&a_name##_tsd_init_head, &block); \
} \
return (wrapper); \
} \
@@ -379,6 +397,19 @@ a_name##_tsd_set(a_type *val) \
/******************************************************************************/
#ifdef JEMALLOC_H_STRUCTS
+#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
+ !defined(_WIN32))
+struct tsd_init_block_s {
+ ql_elm(tsd_init_block_t) link;
+ pthread_t thread;
+ void *data;
+};
+struct tsd_init_head_s {
+ ql_head(tsd_init_block_t) blocks;
+ malloc_mutex_t lock;
+};
+#endif
+
#endif /* JEMALLOC_H_STRUCTS */
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
@@ -388,6 +419,12 @@ void malloc_tsd_dalloc(void *wrapper);
void malloc_tsd_no_cleanup(void *);
void malloc_tsd_cleanup_register(bool (*f)(void));
void malloc_tsd_boot(void);
+#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
+ !defined(_WIN32))
+void *tsd_init_check_recursion(tsd_init_head_t *head,
+ tsd_init_block_t *block);
+void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block);
+#endif
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/dep/jemalloc/include/jemalloc/internal/util.h b/dep/jemalloc/include/jemalloc/internal/util.h
index 8479693631a..6b938f74688 100644
--- a/dep/jemalloc/include/jemalloc/internal/util.h
+++ b/dep/jemalloc/include/jemalloc/internal/util.h
@@ -14,7 +14,7 @@
* Wrap a cpp argument that contains commas such that it isn't broken up into
* multiple arguments.
*/
-#define JEMALLOC_CONCAT(...) __VA_ARGS__
+#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__
/*
* Silence compiler warnings due to uninitialized values. This is used
@@ -42,12 +42,6 @@
} while (0)
#endif
-/* Use to assert a particular configuration, e.g., cassert(config_debug). */
-#define cassert(c) do { \
- if ((c) == false) \
- assert(false); \
-} while (0)
-
#ifndef not_reached
#define not_reached() do { \
if (config_debug) { \
@@ -69,10 +63,18 @@
} while (0)
#endif
+#ifndef assert_not_implemented
#define assert_not_implemented(e) do { \
if (config_debug && !(e)) \
not_implemented(); \
} while (0)
+#endif
+
+/* Use to assert a particular configuration, e.g., cassert(config_debug). */
+#define cassert(c) do { \
+ if ((c) == false) \
+ not_reached(); \
+} while (0)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
@@ -82,8 +84,9 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-int buferror(char *buf, size_t buflen);
-uintmax_t malloc_strtoumax(const char *nptr, char **endptr, int base);
+int buferror(int err, char *buf, size_t buflen);
+uintmax_t malloc_strtoumax(const char *restrict nptr,
+ char **restrict endptr, int base);
void malloc_write(const char *s);
/*
@@ -107,7 +110,6 @@ void malloc_printf(const char *format, ...)
#ifndef JEMALLOC_ENABLE_INLINE
size_t pow2_ceil(size_t x);
-void malloc_write(const char *s);
void set_errno(int errnum);
int get_errno(void);
#endif
diff --git a/dep/jemalloc/include/jemalloc/jemalloc.h b/dep/jemalloc/include/jemalloc/jemalloc.h
index 946c73b75e5..84e2e7294d1 100644
--- a/dep/jemalloc/include/jemalloc/jemalloc.h
+++ b/dep/jemalloc/include/jemalloc/jemalloc.h
@@ -7,36 +7,45 @@ extern "C" {
#include <limits.h>
#include <strings.h>
-#define JEMALLOC_VERSION "3.3.1-0-g9ef9d9e8c271cdf14f664b871a8f98c827714784"
+#define JEMALLOC_VERSION "3.5.1-0-g7709a64c59daf0b1f938be49472fcc499e1bd136"
#define JEMALLOC_VERSION_MAJOR 3
-#define JEMALLOC_VERSION_MINOR 3
+#define JEMALLOC_VERSION_MINOR 5
#define JEMALLOC_VERSION_BUGFIX 1
#define JEMALLOC_VERSION_NREV 0
-#define JEMALLOC_VERSION_GID "9ef9d9e8c271cdf14f664b871a8f98c827714784"
+#define JEMALLOC_VERSION_GID "7709a64c59daf0b1f938be49472fcc499e1bd136"
-#include "jemalloc_defs.h"
+# define MALLOCX_LG_ALIGN(la) (la)
+# if LG_SIZEOF_PTR == 2
+# define MALLOCX_ALIGN(a) (ffs(a)-1)
+# else
+# define MALLOCX_ALIGN(a) \
+ ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
+# endif
+# define MALLOCX_ZERO ((int)0x40)
+/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */
+# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8))
#ifdef JEMALLOC_EXPERIMENTAL
-#define ALLOCM_LG_ALIGN(la) (la)
-#if LG_SIZEOF_PTR == 2
-#define ALLOCM_ALIGN(a) (ffs(a)-1)
-#else
-#define ALLOCM_ALIGN(a) ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
-#endif
-#define ALLOCM_ZERO ((int)0x40)
-#define ALLOCM_NO_MOVE ((int)0x80)
+# define ALLOCM_LG_ALIGN(la) (la)
+# if LG_SIZEOF_PTR == 2
+# define ALLOCM_ALIGN(a) (ffs(a)-1)
+# else
+# define ALLOCM_ALIGN(a) \
+ ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31)
+# endif
+# define ALLOCM_ZERO ((int)0x40)
+# define ALLOCM_NO_MOVE ((int)0x80)
/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */
-#define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
-
-#define ALLOCM_SUCCESS 0
-#define ALLOCM_ERR_OOM 1
-#define ALLOCM_ERR_NOT_MOVED 2
+# define ALLOCM_ARENA(a) ((int)(((a)+1) << 8))
+# define ALLOCM_SUCCESS 0
+# define ALLOCM_ERR_OOM 1
+# define ALLOCM_ERR_NOT_MOVED 2
#endif
/*
- * The je_ prefix on the following public symbol declarations is an artifact of
- * namespace management, and should be omitted in application code unless
- * JEMALLOC_NO_DEMANGLE is defined (see below).
+ * The je_ prefix on the following public symbol declarations is an artifact
+ * of namespace management, and should be omitted in application code unless
+ * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle.h).
*/
extern JEMALLOC_EXPORT const char *je_malloc_conf;
extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
@@ -52,6 +61,25 @@ JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size)
JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size);
JEMALLOC_EXPORT void je_free(void *ptr);
+JEMALLOC_EXPORT void *je_mallocx(size_t size, int flags);
+JEMALLOC_EXPORT void *je_rallocx(void *ptr, size_t size, int flags);
+JEMALLOC_EXPORT size_t je_xallocx(void *ptr, size_t size, size_t extra,
+ int flags);
+JEMALLOC_EXPORT size_t je_sallocx(const void *ptr, int flags);
+JEMALLOC_EXPORT void je_dallocx(void *ptr, int flags);
+JEMALLOC_EXPORT size_t je_nallocx(size_t size, int flags);
+
+JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen);
+JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp,
+ size_t *miblenp);
+JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
+ const char *), void *je_cbopaque, const char *opts);
+JEMALLOC_EXPORT size_t je_malloc_usable_size(
+ JEMALLOC_USABLE_SIZE_CONST void *ptr);
+
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
@@ -61,17 +89,6 @@ JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size)
JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc);
#endif
-JEMALLOC_EXPORT size_t je_malloc_usable_size(
- JEMALLOC_USABLE_SIZE_CONST void *ptr);
-JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *,
- const char *), void *je_cbopaque, const char *opts);
-JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen);
-JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp,
- size_t *miblenp);
-JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen,
- void *oldp, size_t *oldlenp, void *newp, size_t newlen);
-
#ifdef JEMALLOC_EXPERIMENTAL
JEMALLOC_EXPORT int je_allocm(void **ptr, size_t *rsize, size_t size,
int flags) JEMALLOC_ATTR(nonnull(1));
@@ -92,63 +109,71 @@ JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags);
* --with-mangling and/or --with-jemalloc-prefix configuration settings.
*/
#ifdef JEMALLOC_MANGLE
-#ifndef JEMALLOC_NO_DEMANGLE
-#define JEMALLOC_NO_DEMANGLE
-#endif
-#define malloc_conf je_malloc_conf
-#define malloc_message je_malloc_message
-#define malloc je_malloc
-#define calloc je_calloc
-#define posix_memalign je_posix_memalign
-#define aligned_alloc je_aligned_alloc
-#define realloc je_realloc
-#define free je_free
-#define malloc_usable_size je_malloc_usable_size
-#define malloc_stats_print je_malloc_stats_print
-#define mallctl je_mallctl
-#define mallctlnametomib je_mallctlnametomib
-#define mallctlbymib je_mallctlbymib
-#define memalign je_memalign
-#define valloc je_valloc
-#ifdef JEMALLOC_EXPERIMENTAL
-#define allocm je_allocm
-#define rallocm je_rallocm
-#define sallocm je_sallocm
-#define dallocm je_dallocm
-#define nallocm je_nallocm
-#endif
+# ifndef JEMALLOC_NO_DEMANGLE
+# define JEMALLOC_NO_DEMANGLE
+# endif
+# define malloc_conf je_malloc_conf
+# define malloc_message je_malloc_message
+# define malloc je_malloc
+# define calloc je_calloc
+# define posix_memalign je_posix_memalign
+# define aligned_alloc je_aligned_alloc
+# define realloc je_realloc
+# define free je_free
+# define mallocx je_mallocx
+# define rallocx je_rallocx
+# define xallocx je_xallocx
+# define sallocx je_sallocx
+# define dallocx je_dallocx
+# define nallocx je_nallocx
+# define mallctl je_mallctl
+# define mallctlnametomib je_mallctlnametomib
+# define mallctlbymib je_mallctlbymib
+# define malloc_stats_print je_malloc_stats_print
+# define malloc_usable_size je_malloc_usable_size
+# define memalign je_memalign
+# define valloc je_valloc
+# define allocm je_allocm
+# define dallocm je_dallocm
+# define nallocm je_nallocm
+# define rallocm je_rallocm
+# define sallocm je_sallocm
#endif
/*
- * The je_* macros can be used as stable alternative names for the public
- * jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily meant
- * for use in jemalloc itself, but it can be used by application code to
+ * The je_* macros can be used as stable alternative names for the
+ * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
+ * meant for use in jemalloc itself, but it can be used by application code to
* provide isolation from the name mangling specified via --with-mangling
* and/or --with-jemalloc-prefix.
*/
#ifndef JEMALLOC_NO_DEMANGLE
-#undef je_malloc_conf
-#undef je_malloc_message
-#undef je_malloc
-#undef je_calloc
-#undef je_posix_memalign
-#undef je_aligned_alloc
-#undef je_realloc
-#undef je_free
-#undef je_malloc_usable_size
-#undef je_malloc_stats_print
-#undef je_mallctl
-#undef je_mallctlnametomib
-#undef je_mallctlbymib
-#undef je_memalign
-#undef je_valloc
-#ifdef JEMALLOC_EXPERIMENTAL
-#undef je_allocm
-#undef je_rallocm
-#undef je_sallocm
-#undef je_dallocm
-#undef je_nallocm
-#endif
+# undef je_malloc_conf
+# undef je_malloc_message
+# undef je_malloc
+# undef je_calloc
+# undef je_posix_memalign
+# undef je_aligned_alloc
+# undef je_realloc
+# undef je_free
+# undef je_mallocx
+# undef je_rallocx
+# undef je_xallocx
+# undef je_sallocx
+# undef je_dallocx
+# undef je_nallocx
+# undef je_mallctl
+# undef je_mallctlnametomib
+# undef je_mallctlbymib
+# undef je_malloc_stats_print
+# undef je_malloc_usable_size
+# undef je_memalign
+# undef je_valloc
+# undef je_allocm
+# undef je_dallocm
+# undef je_nallocm
+# undef je_rallocm
+# undef je_sallocm
#endif
#ifdef __cplusplus
diff --git a/dep/jemalloc/jemalloc_defs.h.in.cmake b/dep/jemalloc/jemalloc_defs.h.in.cmake
index 9fdf53546e3..89e496f4acb 100644
--- a/dep/jemalloc/jemalloc_defs.h.in.cmake
+++ b/dep/jemalloc/jemalloc_defs.h.in.cmake
@@ -266,3 +266,9 @@
/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
#define LG_SIZEOF_INTMAX_T 3
+
+/* C99 restrict keyword supported. */
+#define JEMALLOC_HAS_RESTRICT
+
+/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */
+#undef JEMALLOC_CODE_COVERAGE
diff --git a/dep/jemalloc/src/arena.c b/dep/jemalloc/src/arena.c
index 05a787f89d9..390ab0f8230 100644
--- a/dep/jemalloc/src/arena.c
+++ b/dep/jemalloc/src/arena.c
@@ -38,52 +38,18 @@ const uint8_t small_size2bin[] = {
};
/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static void arena_avail_insert(arena_t *arena, arena_chunk_t *chunk,
- size_t pageind, size_t npages, bool maybe_adjac_pred,
- bool maybe_adjac_succ);
-static void arena_avail_remove(arena_t *arena, arena_chunk_t *chunk,
- size_t pageind, size_t npages, bool maybe_adjac_pred,
- bool maybe_adjac_succ);
-static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
- bool large, size_t binind, bool zero);
-static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
-static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
-static arena_run_t *arena_run_alloc_helper(arena_t *arena, size_t size,
- bool large, size_t binind, bool zero);
-static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
- size_t binind, bool zero);
-static arena_chunk_t *chunks_dirty_iter_cb(arena_chunk_tree_t *tree,
- arena_chunk_t *chunk, void *arg);
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
+
static void arena_purge(arena_t *arena, bool all);
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
bool cleaned);
-static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, size_t oldsize, size_t newsize);
-static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
-static arena_run_t *arena_bin_runs_first(arena_bin_t *bin);
-static void arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run);
-static void arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run);
-static arena_run_t *arena_bin_nonfull_run_tryget(arena_bin_t *bin);
-static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
-static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
-static void arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
- arena_bin_t *bin);
static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, arena_bin_t *bin);
static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, arena_bin_t *bin);
-static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
- void *ptr, size_t oldsize, size_t size);
-static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
- void *ptr, size_t oldsize, size_t size, size_t extra, bool zero);
-static bool arena_ralloc_large(void *ptr, size_t oldsize, size_t size,
- size_t extra, bool zero);
-static size_t bin_info_run_size_calc(arena_bin_info_t *bin_info,
- size_t min_run_size);
-static void bin_info_init(void);
/******************************************************************************/
@@ -369,62 +335,63 @@ arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
}
static inline void
+arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
+{
+
+ VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind <<
+ LG_PAGE)), PAGE);
+}
+
+static inline void
arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
{
size_t i;
UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
- VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind <<
- LG_PAGE)), PAGE);
+ arena_run_page_mark_zeroed(chunk, run_ind);
for (i = 0; i < PAGE / sizeof(size_t); i++)
assert(p[i] == 0);
}
static void
-arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
- size_t binind, bool zero)
+arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages)
{
- arena_chunk_t *chunk;
- size_t run_ind, total_pages, need_pages, rem_pages, i;
- size_t flag_dirty;
- assert((large && binind == BININD_INVALID) || (large == false && binind
- != BININD_INVALID));
+ if (config_stats) {
+ ssize_t cactive_diff = CHUNK_CEILING((arena->nactive +
+ add_pages) << LG_PAGE) - CHUNK_CEILING((arena->nactive -
+ sub_pages) << LG_PAGE);
+ if (cactive_diff != 0)
+ stats_cactive_add(cactive_diff);
+ }
+}
+
+static void
+arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
+ size_t flag_dirty, size_t need_pages)
+{
+ size_t total_pages, rem_pages;
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
- flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
LG_PAGE;
assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
flag_dirty);
- need_pages = (size >> LG_PAGE);
- assert(need_pages > 0);
assert(need_pages <= total_pages);
rem_pages = total_pages - need_pages;
arena_avail_remove(arena, chunk, run_ind, total_pages, true, true);
- if (config_stats) {
- /*
- * Update stats_cactive if nactive is crossing a chunk
- * multiple.
- */
- size_t cactive_diff = CHUNK_CEILING((arena->nactive +
- need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
- LG_PAGE);
- if (cactive_diff != 0)
- stats_cactive_add(cactive_diff);
- }
+ arena_cactive_update(arena, need_pages, 0);
arena->nactive += need_pages;
/* Keep track of trailing unused pages for later use. */
if (rem_pages > 0) {
if (flag_dirty != 0) {
- arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
- (rem_pages << LG_PAGE), CHUNK_MAP_DIRTY);
+ arena_mapbits_unallocated_set(chunk,
+ run_ind+need_pages, (rem_pages << LG_PAGE),
+ flag_dirty);
arena_mapbits_unallocated_set(chunk,
run_ind+total_pages-1, (rem_pages << LG_PAGE),
- CHUNK_MAP_DIRTY);
+ flag_dirty);
} else {
arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
(rem_pages << LG_PAGE),
@@ -438,153 +405,217 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages,
false, true);
}
+}
- /*
- * Update the page map separately for large vs. small runs, since it is
- * possible to avoid iteration for large mallocs.
- */
- if (large) {
- if (zero) {
- if (flag_dirty == 0) {
- /*
- * The run is clean, so some pages may be
- * zeroed (i.e. never before touched).
- */
- for (i = 0; i < need_pages; i++) {
- if (arena_mapbits_unzeroed_get(chunk,
- run_ind+i) != 0) {
- arena_run_zero(chunk, run_ind+i,
- 1);
- } else if (config_debug) {
- arena_run_page_validate_zeroed(
- chunk, run_ind+i);
- }
+static void
+arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
+ bool remove, bool zero)
+{
+ arena_chunk_t *chunk;
+ size_t flag_dirty, run_ind, need_pages, i;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
+ flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
+ need_pages = (size >> LG_PAGE);
+ assert(need_pages > 0);
+
+ if (remove) {
+ arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
+ need_pages);
+ }
+
+ if (zero) {
+ if (flag_dirty == 0) {
+ /*
+ * The run is clean, so some pages may be zeroed (i.e.
+ * never before touched).
+ */
+ for (i = 0; i < need_pages; i++) {
+ if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
+ != 0)
+ arena_run_zero(chunk, run_ind+i, 1);
+ else if (config_debug) {
+ arena_run_page_validate_zeroed(chunk,
+ run_ind+i);
+ } else {
+ arena_run_page_mark_zeroed(chunk,
+ run_ind+i);
}
- } else {
- /*
- * The run is dirty, so all pages must be
- * zeroed.
- */
- arena_run_zero(chunk, run_ind, need_pages);
}
+ } else {
+ /* The run is dirty, so all pages must be zeroed. */
+ arena_run_zero(chunk, run_ind, need_pages);
}
-
- /*
- * Set the last element first, in case the run only contains one
- * page (i.e. both statements set the same element).
- */
- arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0,
- flag_dirty);
- arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
} else {
- assert(zero == false);
- /*
- * Propagate the dirty and unzeroed flags to the allocated
- * small run, so that arena_dalloc_bin_run() has the ability to
- * conditionally trim clean pages.
- */
- arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
- /*
- * The first page will always be dirtied during small run
- * initialization, so a validation failure here would not
- * actually cause an observable failure.
- */
- if (config_debug && flag_dirty == 0 &&
- arena_mapbits_unzeroed_get(chunk, run_ind) == 0)
- arena_run_page_validate_zeroed(chunk, run_ind);
- for (i = 1; i < need_pages - 1; i++) {
- arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
- if (config_debug && flag_dirty == 0 &&
- arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) {
- arena_run_page_validate_zeroed(chunk,
- run_ind+i);
- }
- }
- arena_mapbits_small_set(chunk, run_ind+need_pages-1,
- need_pages-1, binind, flag_dirty);
- if (config_debug && flag_dirty == 0 &&
- arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) ==
- 0) {
- arena_run_page_validate_zeroed(chunk,
- run_ind+need_pages-1);
- }
+ VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
+ (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
}
- VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
- LG_PAGE)), (need_pages << LG_PAGE));
+
+ /*
+ * Set the last element first, in case the run only contains one page
+ * (i.e. both statements set the same element).
+ */
+ arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty);
+ arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
+}
+
+static void
+arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
+{
+
+ arena_run_split_large_helper(arena, run, size, true, zero);
+}
+
+static void
+arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
+{
+
+ arena_run_split_large_helper(arena, run, size, false, zero);
+}
+
+static void
+arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
+ size_t binind)
+{
+ arena_chunk_t *chunk;
+ size_t flag_dirty, run_ind, need_pages, i;
+
+ assert(binind != BININD_INVALID);
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
+ flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
+ need_pages = (size >> LG_PAGE);
+ assert(need_pages > 0);
+
+ arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages);
+
+ /*
+ * Propagate the dirty and unzeroed flags to the allocated small run,
+ * so that arena_dalloc_bin_run() has the ability to conditionally trim
+ * clean pages.
+ */
+ arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
+ /*
+ * The first page will always be dirtied during small run
+ * initialization, so a validation failure here would not actually
+ * cause an observable failure.
+ */
+ if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
+ run_ind) == 0)
+ arena_run_page_validate_zeroed(chunk, run_ind);
+ for (i = 1; i < need_pages - 1; i++) {
+ arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
+ if (config_debug && flag_dirty == 0 &&
+ arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
+ arena_run_page_validate_zeroed(chunk, run_ind+i);
+ }
+ arena_mapbits_small_set(chunk, run_ind+need_pages-1, need_pages-1,
+ binind, flag_dirty);
+ if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
+ run_ind+need_pages-1) == 0)
+ arena_run_page_validate_zeroed(chunk, run_ind+need_pages-1);
+ VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
+ (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
}
static arena_chunk_t *
-arena_chunk_alloc(arena_t *arena)
+arena_chunk_init_spare(arena_t *arena)
{
arena_chunk_t *chunk;
- size_t i;
- if (arena->spare != NULL) {
- chunk = arena->spare;
- arena->spare = NULL;
+ assert(arena->spare != NULL);
- assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
- assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
- assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
- arena_maxclass);
- assert(arena_mapbits_unallocated_size_get(chunk,
- chunk_npages-1) == arena_maxclass);
- assert(arena_mapbits_dirty_get(chunk, map_bias) ==
- arena_mapbits_dirty_get(chunk, chunk_npages-1));
- } else {
- bool zero;
- size_t unzeroed;
+ chunk = arena->spare;
+ arena->spare = NULL;
- zero = false;
- malloc_mutex_unlock(&arena->lock);
- chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize,
- false, &zero, arena->dss_prec);
- malloc_mutex_lock(&arena->lock);
- if (chunk == NULL)
- return (NULL);
- if (config_stats)
- arena->stats.mapped += chunksize;
+ assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
+ assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
+ assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
+ arena_maxclass);
+ assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
+ arena_maxclass);
+ assert(arena_mapbits_dirty_get(chunk, map_bias) ==
+ arena_mapbits_dirty_get(chunk, chunk_npages-1));
- chunk->arena = arena;
+ return (chunk);
+}
- /*
- * Claim that no pages are in use, since the header is merely
- * overhead.
- */
- chunk->ndirty = 0;
+static arena_chunk_t *
+arena_chunk_init_hard(arena_t *arena)
+{
+ arena_chunk_t *chunk;
+ bool zero;
+ size_t unzeroed, i;
- chunk->nruns_avail = 0;
- chunk->nruns_adjac = 0;
+ assert(arena->spare == NULL);
- /*
- * Initialize the map to contain one maximal free untouched run.
- * Mark the pages as zeroed iff chunk_alloc() returned a zeroed
- * chunk.
- */
- unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
- arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
- unzeroed);
- /*
- * There is no need to initialize the internal page map entries
- * unless the chunk is not zeroed.
- */
- if (zero == false) {
- for (i = map_bias+1; i < chunk_npages-1; i++)
- arena_mapbits_unzeroed_set(chunk, i, unzeroed);
- } else if (config_debug) {
- VALGRIND_MAKE_MEM_DEFINED(
- (void *)arena_mapp_get(chunk, map_bias+1),
- (void *)((uintptr_t)
- arena_mapp_get(chunk, chunk_npages-1)
- - (uintptr_t)arena_mapp_get(chunk, map_bias+1)));
+ zero = false;
+ malloc_mutex_unlock(&arena->lock);
+ chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, false,
+ &zero, arena->dss_prec);
+ malloc_mutex_lock(&arena->lock);
+ if (chunk == NULL)
+ return (NULL);
+ if (config_stats)
+ arena->stats.mapped += chunksize;
+
+ chunk->arena = arena;
+
+ /*
+ * Claim that no pages are in use, since the header is merely overhead.
+ */
+ chunk->ndirty = 0;
+
+ chunk->nruns_avail = 0;
+ chunk->nruns_adjac = 0;
+
+ /*
+ * Initialize the map to contain one maximal free untouched run. Mark
+ * the pages as zeroed iff chunk_alloc() returned a zeroed chunk.
+ */
+ unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
+ arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
+ unzeroed);
+ /*
+ * There is no need to initialize the internal page map entries unless
+ * the chunk is not zeroed.
+ */
+ if (zero == false) {
+ VALGRIND_MAKE_MEM_UNDEFINED((void *)arena_mapp_get(chunk,
+ map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk,
+ chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
+ map_bias+1)));
+ for (i = map_bias+1; i < chunk_npages-1; i++)
+ arena_mapbits_unzeroed_set(chunk, i, unzeroed);
+ } else {
+ VALGRIND_MAKE_MEM_DEFINED((void *)arena_mapp_get(chunk,
+ map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk,
+ chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
+ map_bias+1)));
+ if (config_debug) {
for (i = map_bias+1; i < chunk_npages-1; i++) {
assert(arena_mapbits_unzeroed_get(chunk, i) ==
unzeroed);
}
}
- arena_mapbits_unallocated_set(chunk, chunk_npages-1,
- arena_maxclass, unzeroed);
}
+ arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxclass,
+ unzeroed);
+
+ return (chunk);
+}
+
+static arena_chunk_t *
+arena_chunk_alloc(arena_t *arena)
+{
+ arena_chunk_t *chunk;
+
+ if (arena->spare != NULL)
+ chunk = arena_chunk_init_spare(arena);
+ else
+ chunk = arena_chunk_init_hard(arena);
/* Insert the run into the runs_avail tree. */
arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias,
@@ -626,8 +657,7 @@ arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
}
static arena_run_t *
-arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind,
- bool zero)
+arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
{
arena_run_t *run;
arena_chunk_map_t *mapelm, key;
@@ -642,7 +672,7 @@ arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind,
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
LG_PAGE));
- arena_run_split(arena, run, size, large, binind, zero);
+ arena_run_split_large(arena, run, size, zero);
return (run);
}
@@ -650,19 +680,72 @@ arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind,
}
static arena_run_t *
-arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
- bool zero)
+arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
+{
+ arena_chunk_t *chunk;
+ arena_run_t *run;
+
+ assert(size <= arena_maxclass);
+ assert((size & PAGE_MASK) == 0);
+
+ /* Search the arena's chunks for the lowest best fit. */
+ run = arena_run_alloc_large_helper(arena, size, zero);
+ if (run != NULL)
+ return (run);
+
+ /*
+ * No usable runs. Create a new chunk from which to allocate the run.
+ */
+ chunk = arena_chunk_alloc(arena);
+ if (chunk != NULL) {
+ run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
+ arena_run_split_large(arena, run, size, zero);
+ return (run);
+ }
+
+ /*
+ * arena_chunk_alloc() failed, but another thread may have made
+ * sufficient memory available while this one dropped arena->lock in
+ * arena_chunk_alloc(), so search one more time.
+ */
+ return (arena_run_alloc_large_helper(arena, size, zero));
+}
+
+static arena_run_t *
+arena_run_alloc_small_helper(arena_t *arena, size_t size, size_t binind)
+{
+ arena_run_t *run;
+ arena_chunk_map_t *mapelm, key;
+
+ key.bits = size | CHUNK_MAP_KEY;
+ mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
+ if (mapelm != NULL) {
+ arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
+ size_t pageind = (((uintptr_t)mapelm -
+ (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
+ + map_bias;
+
+ run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
+ LG_PAGE));
+ arena_run_split_small(arena, run, size, binind);
+ return (run);
+ }
+
+ return (NULL);
+}
+
+static arena_run_t *
+arena_run_alloc_small(arena_t *arena, size_t size, size_t binind)
{
arena_chunk_t *chunk;
arena_run_t *run;
assert(size <= arena_maxclass);
assert((size & PAGE_MASK) == 0);
- assert((large && binind == BININD_INVALID) || (large == false && binind
- != BININD_INVALID));
+ assert(binind != BININD_INVALID);
/* Search the arena's chunks for the lowest best fit. */
- run = arena_run_alloc_helper(arena, size, large, binind, zero);
+ run = arena_run_alloc_small_helper(arena, size, binind);
if (run != NULL)
return (run);
@@ -672,7 +755,7 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
chunk = arena_chunk_alloc(arena);
if (chunk != NULL) {
run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
- arena_run_split(arena, run, size, large, binind, zero);
+ arena_run_split_small(arena, run, size, binind);
return (run);
}
@@ -681,7 +764,7 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
* sufficient memory available while this one dropped arena->lock in
* arena_chunk_alloc(), so search one more time.
*/
- return (arena_run_alloc_helper(arena, size, large, binind, zero));
+ return (arena_run_alloc_small_helper(arena, size, binind));
}
static inline void
@@ -707,48 +790,42 @@ arena_maybe_purge(arena_t *arena)
arena_purge(arena, false);
}
-static inline size_t
-arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
+static arena_chunk_t *
+chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg)
{
- size_t npurged;
- ql_head(arena_chunk_map_t) mapelms;
- arena_chunk_map_t *mapelm;
- size_t pageind, npages;
- size_t nmadvise;
+ size_t *ndirty = (size_t *)arg;
- ql_new(&mapelms);
+ assert(chunk->ndirty != 0);
+ *ndirty += chunk->ndirty;
+ return (NULL);
+}
+
+static size_t
+arena_compute_npurgatory(arena_t *arena, bool all)
+{
+ size_t npurgatory, npurgeable;
/*
- * If chunk is the spare, temporarily re-allocate it, 1) so that its
- * run is reinserted into runs_avail, and 2) so that it cannot be
- * completely discarded by another thread while arena->lock is dropped
- * by this thread. Note that the arena_run_dalloc() call will
- * implicitly deallocate the chunk, so no explicit action is required
- * in this function to deallocate the chunk.
- *
- * Note that once a chunk contains dirty pages, it cannot again contain
- * a single run unless 1) it is a dirty run, or 2) this function purges
- * dirty pages and causes the transition to a single clean run. Thus
- * (chunk == arena->spare) is possible, but it is not possible for
- * this function to be called on the spare unless it contains a dirty
- * run.
+ * Compute the minimum number of pages that this thread should try to
+ * purge.
*/
- if (chunk == arena->spare) {
- assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
- assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
+ npurgeable = arena->ndirty - arena->npurgatory;
- arena_chunk_alloc(arena);
- }
+ if (all == false) {
+ size_t threshold = (arena->nactive >> opt_lg_dirty_mult);
- if (config_stats)
- arena->stats.purged += chunk->ndirty;
+ npurgatory = npurgeable - threshold;
+ } else
+ npurgatory = npurgeable;
- /*
- * Operate on all dirty runs if there is no clean/dirty run
- * fragmentation.
- */
- if (chunk->nruns_adjac == 0)
- all = true;
+ return (npurgatory);
+}
+
+static void
+arena_chunk_stash_dirty(arena_t *arena, arena_chunk_t *chunk, bool all,
+ arena_chunk_mapelms_t *mapelms)
+{
+ size_t pageind, npages;
/*
* Temporarily allocate free dirty runs within chunk. If all is false,
@@ -756,7 +833,7 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
* all dirty runs.
*/
for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
- mapelm = arena_mapp_get(chunk, pageind);
+ arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
size_t run_size =
arena_mapbits_unallocated_size_get(chunk, pageind);
@@ -772,11 +849,11 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
arena_run_t *run = (arena_run_t *)((uintptr_t)
chunk + (uintptr_t)(pageind << LG_PAGE));
- arena_run_split(arena, run, run_size, true,
- BININD_INVALID, false);
+ arena_run_split_large(arena, run, run_size,
+ false);
/* Append to list for later processing. */
ql_elm_new(mapelm, u.ql_link);
- ql_tail_insert(&mapelms, mapelm, u.ql_link);
+ ql_tail_insert(mapelms, mapelm, u.ql_link);
}
} else {
/* Skip run. */
@@ -800,12 +877,20 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
assert(pageind == chunk_npages);
assert(chunk->ndirty == 0 || all == false);
assert(chunk->nruns_adjac == 0);
+}
+
+static size_t
+arena_chunk_purge_stashed(arena_t *arena, arena_chunk_t *chunk,
+ arena_chunk_mapelms_t *mapelms)
+{
+ size_t npurged, pageind, npages, nmadvise;
+ arena_chunk_map_t *mapelm;
malloc_mutex_unlock(&arena->lock);
if (config_stats)
nmadvise = 0;
npurged = 0;
- ql_foreach(mapelm, &mapelms, u.ql_link) {
+ ql_foreach(mapelm, mapelms, u.ql_link) {
bool unzeroed;
size_t flag_unzeroed, i;
@@ -839,30 +924,75 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
if (config_stats)
arena->stats.nmadvise += nmadvise;
+ return (npurged);
+}
+
+static void
+arena_chunk_unstash_purged(arena_t *arena, arena_chunk_t *chunk,
+ arena_chunk_mapelms_t *mapelms)
+{
+ arena_chunk_map_t *mapelm;
+ size_t pageind;
+
/* Deallocate runs. */
- for (mapelm = ql_first(&mapelms); mapelm != NULL;
- mapelm = ql_first(&mapelms)) {
+ for (mapelm = ql_first(mapelms); mapelm != NULL;
+ mapelm = ql_first(mapelms)) {
arena_run_t *run;
pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
sizeof(arena_chunk_map_t)) + map_bias;
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind <<
LG_PAGE));
- ql_remove(&mapelms, mapelm, u.ql_link);
+ ql_remove(mapelms, mapelm, u.ql_link);
arena_run_dalloc(arena, run, false, true);
}
-
- return (npurged);
}
-static arena_chunk_t *
-chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg)
+static inline size_t
+arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
{
- size_t *ndirty = (size_t *)arg;
+ size_t npurged;
+ arena_chunk_mapelms_t mapelms;
- assert(chunk->ndirty != 0);
- *ndirty += chunk->ndirty;
- return (NULL);
+ ql_new(&mapelms);
+
+ /*
+ * If chunk is the spare, temporarily re-allocate it, 1) so that its
+ * run is reinserted into runs_avail, and 2) so that it cannot be
+ * completely discarded by another thread while arena->lock is dropped
+ * by this thread. Note that the arena_run_dalloc() call will
+ * implicitly deallocate the chunk, so no explicit action is required
+ * in this function to deallocate the chunk.
+ *
+ * Note that once a chunk contains dirty pages, it cannot again contain
+ * a single run unless 1) it is a dirty run, or 2) this function purges
+ * dirty pages and causes the transition to a single clean run. Thus
+ * (chunk == arena->spare) is possible, but it is not possible for
+ * this function to be called on the spare unless it contains a dirty
+ * run.
+ */
+ if (chunk == arena->spare) {
+ assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
+ assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
+
+ arena_chunk_alloc(arena);
+ }
+
+ if (config_stats)
+ arena->stats.purged += chunk->ndirty;
+
+ /*
+ * Operate on all dirty runs if there is no clean/dirty run
+ * fragmentation.
+ */
+ if (chunk->nruns_adjac == 0)
+ all = true;
+
+ arena_chunk_stash_dirty(arena, chunk, all, &mapelms);
+ npurged = arena_chunk_purge_stashed(arena, chunk, &mapelms);
+ arena_chunk_unstash_purged(arena, chunk, &mapelms);
+
+ return (npurged);
}
static void
@@ -885,21 +1015,11 @@ arena_purge(arena_t *arena, bool all)
arena->stats.npurge++;
/*
- * Compute the minimum number of pages that this thread should try to
- * purge, and add the result to arena->npurgatory. This will keep
- * multiple threads from racing to reduce ndirty below the threshold.
+ * Add the minimum number of pages this thread should try to purge to
+ * arena->npurgatory. This will keep multiple threads from racing to
+ * reduce ndirty below the threshold.
*/
- {
- size_t npurgeable = arena->ndirty - arena->npurgatory;
-
- if (all == false) {
- size_t threshold = (arena->nactive >>
- opt_lg_dirty_mult);
-
- npurgatory = npurgeable - threshold;
- } else
- npurgatory = npurgeable;
- }
+ npurgatory = arena_compute_npurgatory(arena, all);
arena->npurgatory += npurgatory;
while (npurgatory > 0) {
@@ -966,61 +1086,12 @@ arena_purge_all(arena_t *arena)
}
static void
-arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
+arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
+ size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty)
{
- arena_chunk_t *chunk;
- size_t size, run_ind, run_pages, flag_dirty;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
- assert(run_ind >= map_bias);
- assert(run_ind < chunk_npages);
- if (arena_mapbits_large_get(chunk, run_ind) != 0) {
- size = arena_mapbits_large_size_get(chunk, run_ind);
- assert(size == PAGE ||
- arena_mapbits_large_size_get(chunk,
- run_ind+(size>>LG_PAGE)-1) == 0);
- } else {
- size_t binind = arena_bin_index(arena, run->bin);
- arena_bin_info_t *bin_info = &arena_bin_info[binind];
- size = bin_info->run_size;
- }
- run_pages = (size >> LG_PAGE);
- if (config_stats) {
- /*
- * Update stats_cactive if nactive is crossing a chunk
- * multiple.
- */
- size_t cactive_diff = CHUNK_CEILING(arena->nactive << LG_PAGE) -
- CHUNK_CEILING((arena->nactive - run_pages) << LG_PAGE);
- if (cactive_diff != 0)
- stats_cactive_sub(cactive_diff);
- }
- arena->nactive -= run_pages;
-
- /*
- * The run is dirty if the caller claims to have dirtied it, as well as
- * if it was already dirty before being allocated and the caller
- * doesn't claim to have cleaned it.
- */
- assert(arena_mapbits_dirty_get(chunk, run_ind) ==
- arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
- if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
- dirty = true;
- flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
-
- /* Mark pages as unallocated in the chunk map. */
- if (dirty) {
- arena_mapbits_unallocated_set(chunk, run_ind, size,
- CHUNK_MAP_DIRTY);
- arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
- CHUNK_MAP_DIRTY);
- } else {
- arena_mapbits_unallocated_set(chunk, run_ind, size,
- arena_mapbits_unzeroed_get(chunk, run_ind));
- arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
- arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
- }
+ size_t size = *p_size;
+ size_t run_ind = *p_run_ind;
+ size_t run_pages = *p_run_pages;
/* Try to coalesce forward. */
if (run_ind + run_pages < chunk_npages &&
@@ -1050,8 +1121,9 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
}
/* Try to coalesce backward. */
- if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, run_ind-1)
- == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == flag_dirty) {
+ if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
+ run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
+ flag_dirty) {
size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
run_ind-1);
size_t prun_pages = prun_size >> LG_PAGE;
@@ -1076,6 +1148,62 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
size);
}
+ *p_size = size;
+ *p_run_ind = run_ind;
+ *p_run_pages = run_pages;
+}
+
+static void
+arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
+{
+ arena_chunk_t *chunk;
+ size_t size, run_ind, run_pages, flag_dirty;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
+ assert(run_ind >= map_bias);
+ assert(run_ind < chunk_npages);
+ if (arena_mapbits_large_get(chunk, run_ind) != 0) {
+ size = arena_mapbits_large_size_get(chunk, run_ind);
+ assert(size == PAGE ||
+ arena_mapbits_large_size_get(chunk,
+ run_ind+(size>>LG_PAGE)-1) == 0);
+ } else {
+ size_t binind = arena_bin_index(arena, run->bin);
+ arena_bin_info_t *bin_info = &arena_bin_info[binind];
+ size = bin_info->run_size;
+ }
+ run_pages = (size >> LG_PAGE);
+ arena_cactive_update(arena, 0, run_pages);
+ arena->nactive -= run_pages;
+
+ /*
+ * The run is dirty if the caller claims to have dirtied it, as well as
+ * if it was already dirty before being allocated and the caller
+ * doesn't claim to have cleaned it.
+ */
+ assert(arena_mapbits_dirty_get(chunk, run_ind) ==
+ arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
+ if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
+ dirty = true;
+ flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
+
+ /* Mark pages as unallocated in the chunk map. */
+ if (dirty) {
+ arena_mapbits_unallocated_set(chunk, run_ind, size,
+ CHUNK_MAP_DIRTY);
+ arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
+ CHUNK_MAP_DIRTY);
+ } else {
+ arena_mapbits_unallocated_set(chunk, run_ind, size,
+ arena_mapbits_unzeroed_get(chunk, run_ind));
+ arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
+ arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
+ }
+
+ arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
+ flag_dirty);
+
/* Insert into runs_avail, now that coalescing is complete. */
assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
@@ -1243,7 +1371,7 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
malloc_mutex_unlock(&bin->lock);
/******************************/
malloc_mutex_lock(&arena->lock);
- run = arena_run_alloc(arena, bin_info->run_size, false, binind, false);
+ run = arena_run_alloc_small(arena, bin_info->run_size, binind);
if (run != NULL) {
bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
(uintptr_t)bin_info->bitmap_offset);
@@ -1266,7 +1394,7 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
}
/*
- * arena_run_alloc() failed, but another thread may have made
+ * arena_run_alloc_small() failed, but another thread may have made
* sufficient memory available while this one dropped bin->lock above,
* so search one more time.
*/
@@ -1301,12 +1429,12 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
arena_chunk_t *chunk;
/*
- * arena_run_alloc() may have allocated run, or it may
- * have pulled run from the bin's run tree. Therefore
- * it is unsafe to make any assumptions about how run
- * has previously been used, and arena_bin_lower_run()
- * must be called, as if a region were just deallocated
- * from the run.
+ * arena_run_alloc_small() may have allocated run, or
+ * it may have pulled run from the bin's run tree.
+ * Therefore it is unsafe to make any assumptions about
+ * how run has previously been used, and
+ * arena_bin_lower_run() must be called, as if a region
+ * were just deallocated from the run.
*/
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
if (run->nfree == bin_info->nregs)
@@ -1384,8 +1512,28 @@ arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
}
}
-void
-arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
+#ifdef JEMALLOC_JET
+#undef arena_redzone_corruption
+#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
+#endif
+static void
+arena_redzone_corruption(void *ptr, size_t usize, bool after,
+ size_t offset, uint8_t byte)
+{
+
+ malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
+ "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
+ after ? "after" : "before", ptr, usize, byte);
+}
+#ifdef JEMALLOC_JET
+#undef arena_redzone_corruption
+#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
+arena_redzone_corruption_t *arena_redzone_corruption =
+ JEMALLOC_N(arena_redzone_corruption_impl);
+#endif
+
+static void
+arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
{
size_t size = bin_info->reg_size;
size_t redzone_size = bin_info->redzone_size;
@@ -1393,29 +1541,61 @@ arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
bool error = false;
for (i = 1; i <= redzone_size; i++) {
- unsigned byte;
- if ((byte = *(uint8_t *)((uintptr_t)ptr - i)) != 0xa5) {
+ uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
+ if (*byte != 0xa5) {
error = true;
- malloc_printf("<jemalloc>: Corrupt redzone "
- "%zu byte%s before %p (size %zu), byte=%#x\n", i,
- (i == 1) ? "" : "s", ptr, size, byte);
+ arena_redzone_corruption(ptr, size, false, i, *byte);
+ if (reset)
+ *byte = 0xa5;
}
}
for (i = 0; i < redzone_size; i++) {
- unsigned byte;
- if ((byte = *(uint8_t *)((uintptr_t)ptr + size + i)) != 0xa5) {
+ uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
+ if (*byte != 0xa5) {
error = true;
- malloc_printf("<jemalloc>: Corrupt redzone "
- "%zu byte%s after end of %p (size %zu), byte=%#x\n",
- i, (i == 1) ? "" : "s", ptr, size, byte);
+ arena_redzone_corruption(ptr, size, true, i, *byte);
+ if (reset)
+ *byte = 0xa5;
}
}
if (opt_abort && error)
abort();
+}
+#ifdef JEMALLOC_JET
+#undef arena_dalloc_junk_small
+#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
+#endif
+void
+arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
+{
+ size_t redzone_size = bin_info->redzone_size;
+
+ arena_redzones_validate(ptr, bin_info, false);
memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
bin_info->reg_interval);
}
+#ifdef JEMALLOC_JET
+#undef arena_dalloc_junk_small
+#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
+arena_dalloc_junk_small_t *arena_dalloc_junk_small =
+ JEMALLOC_N(arena_dalloc_junk_small_impl);
+#endif
+
+void
+arena_quarantine_junk_small(void *ptr, size_t usize)
+{
+ size_t binind;
+ arena_bin_info_t *bin_info;
+ cassert(config_fill);
+ assert(opt_junk);
+ assert(opt_quarantine);
+ assert(usize <= SMALL_MAXCLASS);
+
+ binind = SMALL_SIZE2BIN(usize);
+ bin_info = &arena_bin_info[binind];
+ arena_redzones_validate(ptr, bin_info, true);
+}
void *
arena_malloc_small(arena_t *arena, size_t size, bool zero)
@@ -1458,6 +1638,7 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
} else if (opt_zero)
memset(ret, 0, size);
}
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
if (config_fill && opt_junk) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
@@ -1466,7 +1647,6 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
}
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
return (ret);
}
@@ -1480,7 +1660,7 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero)
/* Large allocation. */
size = PAGE_CEILING(size);
malloc_mutex_lock(&arena->lock);
- ret = (void *)arena_run_alloc(arena, size, true, BININD_INVALID, zero);
+ ret = (void *)arena_run_alloc_large(arena, size, zero);
if (ret == NULL) {
malloc_mutex_unlock(&arena->lock);
return (NULL);
@@ -1526,7 +1706,7 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
alloc_size = size + alignment - PAGE;
malloc_mutex_lock(&arena->lock);
- run = arena_run_alloc(arena, alloc_size, true, BININD_INVALID, zero);
+ run = arena_run_alloc_large(arena, alloc_size, false);
if (run == NULL) {
malloc_mutex_unlock(&arena->lock);
return (NULL);
@@ -1546,6 +1726,7 @@ arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
false);
}
+ arena_run_init_large(arena, (arena_run_t *)ret, size, zero);
if (config_stats) {
arena->stats.nmalloc_large++;
@@ -1749,21 +1930,38 @@ arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
}
+#ifdef JEMALLOC_JET
+#undef arena_dalloc_junk_large
+#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
+#endif
+static void
+arena_dalloc_junk_large(void *ptr, size_t usize)
+{
+
+ if (config_fill && opt_junk)
+ memset(ptr, 0x5a, usize);
+}
+#ifdef JEMALLOC_JET
+#undef arena_dalloc_junk_large
+#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
+arena_dalloc_junk_large_t *arena_dalloc_junk_large =
+ JEMALLOC_N(arena_dalloc_junk_large_impl);
+#endif
+
void
arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
{
if (config_fill || config_stats) {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- size_t size = arena_mapbits_large_size_get(chunk, pageind);
+ size_t usize = arena_mapbits_large_size_get(chunk, pageind);
- if (config_fill && config_stats && opt_junk)
- memset(ptr, 0x5a, size);
+ arena_dalloc_junk_large(ptr, usize);
if (config_stats) {
arena->stats.ndalloc_large++;
- arena->stats.allocated_large -= size;
- arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--;
+ arena->stats.allocated_large -= usize;
+ arena->stats.lstats[(usize >> LG_PAGE) - 1].ndalloc++;
+ arena->stats.lstats[(usize >> LG_PAGE) - 1].curruns--;
}
}
@@ -1834,9 +2032,8 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t flag_dirty;
size_t splitsize = (oldsize + followsize <= size + extra)
? followsize : size + extra - oldsize;
- arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
- ((pageind+npages) << LG_PAGE)), splitsize, true,
- BININD_INVALID, zero);
+ arena_run_split_large(arena, (arena_run_t *)((uintptr_t)chunk +
+ ((pageind+npages) << LG_PAGE)), splitsize, zero);
size = oldsize + splitsize;
npages = size >> LG_PAGE;
@@ -1875,6 +2072,26 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
return (true);
}
+#ifdef JEMALLOC_JET
+#undef arena_ralloc_junk_large
+#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
+#endif
+static void
+arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
+{
+
+ if (config_fill && opt_junk) {
+ memset((void *)((uintptr_t)ptr + usize), 0x5a,
+ old_usize - usize);
+ }
+}
+#ifdef JEMALLOC_JET
+#undef arena_ralloc_junk_large
+#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
+arena_ralloc_junk_large_t *arena_ralloc_junk_large =
+ JEMALLOC_N(arena_ralloc_junk_large_impl);
+#endif
+
/*
* Try to resize a large allocation, in order to avoid copying. This will
* always fail if growing an object, and the following run is already in use.
@@ -1888,10 +2105,6 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
psize = PAGE_CEILING(size + extra);
if (psize == oldsize) {
/* Same size class. */
- if (config_fill && opt_junk && size < oldsize) {
- memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize -
- size);
- }
return (false);
} else {
arena_chunk_t *chunk;
@@ -1902,10 +2115,7 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
if (psize < oldsize) {
/* Fill before shrinking in order avoid a race. */
- if (config_fill && opt_junk) {
- memset((void *)((uintptr_t)ptr + size), 0x5a,
- oldsize - size);
- }
+ arena_ralloc_junk_large(ptr, oldsize, psize);
arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
psize);
return (false);
@@ -1913,17 +2123,23 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
oldsize, PAGE_CEILING(size),
psize - PAGE_CEILING(size), zero);
- if (config_fill && ret == false && zero == false &&
- opt_zero) {
- memset((void *)((uintptr_t)ptr + oldsize), 0,
- size - oldsize);
+ if (config_fill && ret == false && zero == false) {
+ if (opt_junk) {
+ memset((void *)((uintptr_t)ptr +
+ oldsize), 0xa5, isalloc(ptr,
+ config_prof) - oldsize);
+ } else if (opt_zero) {
+ memset((void *)((uintptr_t)ptr +
+ oldsize), 0, isalloc(ptr,
+ config_prof) - oldsize);
+ }
}
return (ret);
}
}
}
-void *
+bool
arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
bool zero)
{
@@ -1938,25 +2154,20 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
if ((size + extra <= SMALL_MAXCLASS &&
SMALL_SIZE2BIN(size + extra) ==
SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
- size + extra >= oldsize)) {
- if (config_fill && opt_junk && size < oldsize) {
- memset((void *)((uintptr_t)ptr + size),
- 0x5a, oldsize - size);
- }
- return (ptr);
- }
+ size + extra >= oldsize))
+ return (false);
} else {
assert(size <= arena_maxclass);
if (size + extra > SMALL_MAXCLASS) {
if (arena_ralloc_large(ptr, oldsize, size,
extra, zero) == false)
- return (ptr);
+ return (false);
}
}
}
/* Reallocation would require a move. */
- return (NULL);
+ return (true);
}
void *
@@ -1968,9 +2179,8 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t copysize;
/* Try to avoid moving the allocation. */
- ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero);
- if (ret != NULL)
- return (ret);
+ if (arena_ralloc_no_move(ptr, oldsize, size, extra, zero) == false)
+ return (ptr);
/*
* size and oldsize are different enough that we need to move the
@@ -1981,7 +2191,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t usize = sa2u(size + extra, alignment);
if (usize == 0)
return (NULL);
- ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
+ ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
} else
ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
@@ -1993,7 +2203,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
size_t usize = sa2u(size, alignment);
if (usize == 0)
return (NULL);
- ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
+ ret = ipalloct(usize, alignment, zero, try_tcache_alloc,
arena);
} else
ret = arena_malloc(arena, size, zero, try_tcache_alloc);
@@ -2011,7 +2221,7 @@ arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
copysize = (size < oldsize) ? size : oldsize;
VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
memcpy(ret, ptr, copysize);
- iqallocx(ptr, try_tcache_dalloc);
+ iqalloct(ptr, try_tcache_dalloc);
return (ret);
}
@@ -2266,7 +2476,6 @@ bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
bin_info->reg_interval) - pad_size;
} while (try_hdr_size > try_redzone0_offset);
} while (try_run_size <= arena_maxclass
- && try_run_size <= arena_maxclass
&& RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
RUN_MAX_OVRHD_RELAX
&& (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
diff --git a/dep/jemalloc/src/bitmap.c b/dep/jemalloc/src/bitmap.c
index b47e2629093..e2bd907d558 100644
--- a/dep/jemalloc/src/bitmap.c
+++ b/dep/jemalloc/src/bitmap.c
@@ -1,4 +1,4 @@
-#define JEMALLOC_BITMAP_C_
+#define JEMALLOC_BITMAP_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
diff --git a/dep/jemalloc/src/chunk.c b/dep/jemalloc/src/chunk.c
index 044f76be96c..90ab116ae5f 100644
--- a/dep/jemalloc/src/chunk.c
+++ b/dep/jemalloc/src/chunk.c
@@ -180,7 +180,7 @@ chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
label_return:
if (ret != NULL) {
if (config_ivsalloc && base == false) {
- if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
+ if (rtree_set(chunks_rtree, (uintptr_t)ret, 1)) {
chunk_dealloc(ret, size, true);
return (NULL);
}
@@ -214,7 +214,7 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
size_t size)
{
bool unzeroed;
- extent_node_t *xnode, *node, *prev, key;
+ extent_node_t *xnode, *node, *prev, *xprev, key;
unzeroed = pages_purge(chunk, size);
VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
@@ -226,6 +226,8 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
* held.
*/
xnode = base_node_alloc();
+ /* Use xprev to implement conditional deferred deallocation of prev. */
+ xprev = NULL;
malloc_mutex_lock(&chunks_mtx);
key.addr = (void *)((uintptr_t)chunk + size);
@@ -242,8 +244,6 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
node->size += size;
node->zeroed = (node->zeroed && (unzeroed == false));
extent_tree_szad_insert(chunks_szad, node);
- if (xnode != NULL)
- base_node_dealloc(xnode);
} else {
/* Coalescing forward failed, so insert a new node. */
if (xnode == NULL) {
@@ -253,10 +253,10 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
* already been purged, so this is only a virtual
* memory leak.
*/
- malloc_mutex_unlock(&chunks_mtx);
- return;
+ goto label_return;
}
node = xnode;
+ xnode = NULL; /* Prevent deallocation below. */
node->addr = chunk;
node->size = size;
node->zeroed = (unzeroed == false);
@@ -282,9 +282,19 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
node->zeroed = (node->zeroed && prev->zeroed);
extent_tree_szad_insert(chunks_szad, node);
- base_node_dealloc(prev);
+ xprev = prev;
}
+
+label_return:
malloc_mutex_unlock(&chunks_mtx);
+ /*
+ * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
+ * avoid potential deadlock.
+ */
+ if (xnode != NULL)
+ base_node_dealloc(xnode);
+ if (xprev != NULL)
+ base_node_dealloc(xprev);
}
void
@@ -311,7 +321,7 @@ chunk_dealloc(void *chunk, size_t size, bool unmap)
assert((size & chunksize_mask) == 0);
if (config_ivsalloc)
- rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
+ rtree_set(chunks_rtree, (uintptr_t)chunk, 0);
if (config_stats || config_prof) {
malloc_mutex_lock(&chunks_mtx);
assert(stats_chunks.curchunks >= (size / chunksize));
@@ -346,7 +356,7 @@ chunk_boot(void)
extent_tree_ad_new(&chunks_ad_dss);
if (config_ivsalloc) {
chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
- opt_lg_chunk);
+ opt_lg_chunk, base_alloc, NULL);
if (chunks_rtree == NULL)
return (true);
}
@@ -358,7 +368,7 @@ void
chunk_prefork(void)
{
- malloc_mutex_lock(&chunks_mtx);
+ malloc_mutex_prefork(&chunks_mtx);
if (config_ivsalloc)
rtree_prefork(chunks_rtree);
chunk_dss_prefork();
diff --git a/dep/jemalloc/src/chunk_dss.c b/dep/jemalloc/src/chunk_dss.c
index 24781cc52dc..510bb8bee85 100644
--- a/dep/jemalloc/src/chunk_dss.c
+++ b/dep/jemalloc/src/chunk_dss.c
@@ -28,16 +28,17 @@ static void *dss_max;
/******************************************************************************/
-#ifndef JEMALLOC_HAVE_SBRK
static void *
-sbrk(intptr_t increment)
+chunk_dss_sbrk(intptr_t increment)
{
+#ifdef JEMALLOC_HAVE_SBRK
+ return (sbrk(increment));
+#else
not_implemented();
-
return (NULL);
-}
#endif
+}
dss_prec_t
chunk_dss_prec_get(void)
@@ -93,7 +94,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
*/
do {
/* Get the current end of the DSS. */
- dss_max = sbrk(0);
+ dss_max = chunk_dss_sbrk(0);
/*
* Calculate how much padding is necessary to
* chunk-align the end of the DSS.
@@ -117,7 +118,7 @@ chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
return (NULL);
}
incr = gap_size + cpad_size + size;
- dss_prev = sbrk(incr);
+ dss_prev = chunk_dss_sbrk(incr);
if (dss_prev == dss_max) {
/* Success. */
dss_max = dss_next;
@@ -163,7 +164,7 @@ chunk_dss_boot(void)
if (malloc_mutex_init(&dss_mtx))
return (true);
- dss_base = sbrk(0);
+ dss_base = chunk_dss_sbrk(0);
dss_prev = dss_base;
dss_max = dss_base;
diff --git a/dep/jemalloc/src/chunk_mmap.c b/dep/jemalloc/src/chunk_mmap.c
index 8a42e75915f..2056d793f05 100644
--- a/dep/jemalloc/src/chunk_mmap.c
+++ b/dep/jemalloc/src/chunk_mmap.c
@@ -43,7 +43,7 @@ pages_map(void *addr, size_t size)
if (munmap(ret, size) == -1) {
char buf[BUFERROR_BUF];
- buferror(buf, sizeof(buf));
+ buferror(get_errno(), buf, sizeof(buf));
malloc_printf("<jemalloc: Error in munmap(): %s\n",
buf);
if (opt_abort)
@@ -69,7 +69,7 @@ pages_unmap(void *addr, size_t size)
{
char buf[BUFERROR_BUF];
- buferror(buf, sizeof(buf));
+ buferror(get_errno(), buf, sizeof(buf));
malloc_printf("<jemalloc>: Error in "
#ifdef _WIN32
"VirtualFree"
diff --git a/dep/jemalloc/src/ckh.c b/dep/jemalloc/src/ckh.c
index 2f38348bb85..04c52966193 100644
--- a/dep/jemalloc/src/ckh.c
+++ b/dep/jemalloc/src/ckh.c
@@ -49,7 +49,7 @@ static void ckh_shrink(ckh_t *ckh);
* Search bucket for key and return the cell number if found; SIZE_T_MAX
* otherwise.
*/
-JEMALLOC_INLINE size_t
+JEMALLOC_INLINE_C size_t
ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
{
ckhc_t *cell;
@@ -67,7 +67,7 @@ ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
/*
* Search table for key and return cell number if found; SIZE_T_MAX otherwise.
*/
-JEMALLOC_INLINE size_t
+JEMALLOC_INLINE_C size_t
ckh_isearch(ckh_t *ckh, const void *key)
{
size_t hashes[2], bucket, cell;
@@ -88,7 +88,7 @@ ckh_isearch(ckh_t *ckh, const void *key)
return (cell);
}
-JEMALLOC_INLINE bool
+JEMALLOC_INLINE_C bool
ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
const void *data)
{
@@ -120,7 +120,7 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* eviction/relocation procedure until either success or detection of an
* eviction/relocation bucket cycle.
*/
-JEMALLOC_INLINE bool
+JEMALLOC_INLINE_C bool
ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
void const **argdata)
{
@@ -190,7 +190,7 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
}
}
-JEMALLOC_INLINE bool
+JEMALLOC_INLINE_C bool
ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
{
size_t hashes[2], bucket;
@@ -219,7 +219,7 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
* Try to rebuild the hash table from scratch by inserting all items from the
* old table into the new.
*/
-JEMALLOC_INLINE bool
+JEMALLOC_INLINE_C bool
ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
{
size_t count, i, nins;
diff --git a/dep/jemalloc/src/ctl.c b/dep/jemalloc/src/ctl.c
index f2ef4e60611..cc2c5aef570 100644
--- a/dep/jemalloc/src/ctl.c
+++ b/dep/jemalloc/src/ctl.c
@@ -546,43 +546,30 @@ ctl_arena_refresh(arena_t *arena, unsigned i)
static bool
ctl_grow(void)
{
- size_t astats_size;
ctl_arena_stats_t *astats;
arena_t **tarenas;
- /* Extend arena stats and arenas arrays. */
- astats_size = (ctl_stats.narenas + 2) * sizeof(ctl_arena_stats_t);
- if (ctl_stats.narenas == narenas_auto) {
- /* ctl_stats.arenas and arenas came from base_alloc(). */
- astats = (ctl_arena_stats_t *)imalloc(astats_size);
- if (astats == NULL)
- return (true);
- memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
- sizeof(ctl_arena_stats_t));
-
- tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) *
- sizeof(arena_t *));
- if (tarenas == NULL) {
- idalloc(astats);
- return (true);
- }
- memcpy(tarenas, arenas, ctl_stats.narenas * sizeof(arena_t *));
- } else {
- astats = (ctl_arena_stats_t *)iralloc(ctl_stats.arenas,
- astats_size, 0, 0, false, false);
- if (astats == NULL)
- return (true);
-
- tarenas = (arena_t **)iralloc(arenas, (ctl_stats.narenas + 1) *
- sizeof(arena_t *), 0, 0, false, false);
- if (tarenas == NULL)
- return (true);
+ /* Allocate extended arena stats and arenas arrays. */
+ astats = (ctl_arena_stats_t *)imalloc((ctl_stats.narenas + 2) *
+ sizeof(ctl_arena_stats_t));
+ if (astats == NULL)
+ return (true);
+ tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) *
+ sizeof(arena_t *));
+ if (tarenas == NULL) {
+ idalloc(astats);
+ return (true);
}
- /* Initialize the new astats and arenas elements. */
+
+ /* Initialize the new astats element. */
+ memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
+ sizeof(ctl_arena_stats_t));
memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
- if (ctl_arena_init(&astats[ctl_stats.narenas + 1]))
+ if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) {
+ idalloc(tarenas);
+ idalloc(astats);
return (true);
- tarenas[ctl_stats.narenas] = NULL;
+ }
/* Swap merged stats to their new location. */
{
ctl_arena_stats_t tstats;
@@ -593,13 +580,34 @@ ctl_grow(void)
memcpy(&astats[ctl_stats.narenas + 1], &tstats,
sizeof(ctl_arena_stats_t));
}
+ /* Initialize the new arenas element. */
+ tarenas[ctl_stats.narenas] = NULL;
+ {
+ arena_t **arenas_old = arenas;
+ /*
+ * Swap extended arenas array into place. Although ctl_mtx
+ * protects this function from other threads extending the
+ * array, it does not protect from other threads mutating it
+ * (i.e. initializing arenas and setting array elements to
+ * point to them). Therefore, array copying must happen under
+ * the protection of arenas_lock.
+ */
+ malloc_mutex_lock(&arenas_lock);
+ arenas = tarenas;
+ memcpy(arenas, arenas_old, ctl_stats.narenas *
+ sizeof(arena_t *));
+ narenas_total++;
+ arenas_extend(narenas_total - 1);
+ malloc_mutex_unlock(&arenas_lock);
+ /*
+ * Deallocate arenas_old only if it came from imalloc() (not
+ * base_alloc()).
+ */
+ if (ctl_stats.narenas != narenas_auto)
+ idalloc(arenas_old);
+ }
ctl_stats.arenas = astats;
ctl_stats.narenas++;
- malloc_mutex_lock(&arenas_lock);
- arenas = tarenas;
- narenas_total++;
- arenas_extend(narenas_total - 1);
- malloc_mutex_unlock(&arenas_lock);
return (false);
}
@@ -921,7 +929,7 @@ void
ctl_prefork(void)
{
- malloc_mutex_lock(&ctl_mtx);
+ malloc_mutex_prefork(&ctl_mtx);
}
void
@@ -1102,6 +1110,8 @@ label_return: \
return (ret); \
}
+/******************************************************************************/
+
CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
static int
@@ -1109,7 +1119,7 @@ epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
- uint64_t newval;
+ UNUSED uint64_t newval;
malloc_mutex_lock(&ctl_mtx);
WRITE(newval, uint64_t);
@@ -1123,49 +1133,52 @@ label_return:
return (ret);
}
-static int
-thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
- int ret;
- bool oldval;
-
- if (config_tcache == false)
- return (ENOENT);
-
- oldval = tcache_enabled_get();
- if (newp != NULL) {
- if (newlen != sizeof(bool)) {
- ret = EINVAL;
- goto label_return;
- }
- tcache_enabled_set(*(bool *)newp);
- }
- READ(oldval, bool);
-
- ret = 0;
-label_return:
- return (ret);
-}
-
-static int
-thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
- int ret;
+/******************************************************************************/
- if (config_tcache == false)
- return (ENOENT);
+CTL_RO_BOOL_CONFIG_GEN(config_debug)
+CTL_RO_BOOL_CONFIG_GEN(config_dss)
+CTL_RO_BOOL_CONFIG_GEN(config_fill)
+CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
+CTL_RO_BOOL_CONFIG_GEN(config_mremap)
+CTL_RO_BOOL_CONFIG_GEN(config_munmap)
+CTL_RO_BOOL_CONFIG_GEN(config_prof)
+CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
+CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
+CTL_RO_BOOL_CONFIG_GEN(config_stats)
+CTL_RO_BOOL_CONFIG_GEN(config_tcache)
+CTL_RO_BOOL_CONFIG_GEN(config_tls)
+CTL_RO_BOOL_CONFIG_GEN(config_utrace)
+CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
+CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
- READONLY();
- WRITEONLY();
+/******************************************************************************/
- tcache_flush();
+CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
+CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
+CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
+CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
+CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
+CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
+CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
+CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
+CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
+CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
+CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
+CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool)
+CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
+CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
+CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
+CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
+CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
+CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
- ret = 0;
-label_return:
- return (ret);
-}
+/******************************************************************************/
static int
thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
@@ -1227,50 +1240,49 @@ CTL_RO_NL_CGEN(config_stats, thread_deallocated,
CTL_RO_NL_CGEN(config_stats, thread_deallocatedp,
&thread_allocated_tsd_get()->deallocated, uint64_t *)
-/******************************************************************************/
+static int
+thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ bool oldval;
-CTL_RO_BOOL_CONFIG_GEN(config_debug)
-CTL_RO_BOOL_CONFIG_GEN(config_dss)
-CTL_RO_BOOL_CONFIG_GEN(config_fill)
-CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
-CTL_RO_BOOL_CONFIG_GEN(config_mremap)
-CTL_RO_BOOL_CONFIG_GEN(config_munmap)
-CTL_RO_BOOL_CONFIG_GEN(config_prof)
-CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
-CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
-CTL_RO_BOOL_CONFIG_GEN(config_stats)
-CTL_RO_BOOL_CONFIG_GEN(config_tcache)
-CTL_RO_BOOL_CONFIG_GEN(config_tls)
-CTL_RO_BOOL_CONFIG_GEN(config_utrace)
-CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
-CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
+ if (config_tcache == false)
+ return (ENOENT);
-/******************************************************************************/
+ oldval = tcache_enabled_get();
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ tcache_enabled_set(*(bool *)newp);
+ }
+ READ(oldval, bool);
-CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
-CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
-CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
-CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
-CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
-CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
-CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
-CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
-CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
-CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
-CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
-CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool)
-CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
-CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
-CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
-CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
-CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
-CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
-CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
-CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
-CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
-CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
-CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
-CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+
+ if (config_tcache == false)
+ return (ENOENT);
+
+ READONLY();
+ WRITEONLY();
+
+ tcache_flush();
+
+ ret = 0;
+label_return:
+ return (ret);
+}
/******************************************************************************/
@@ -1382,31 +1394,8 @@ label_return:
return (ret);
}
-
/******************************************************************************/
-CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
-CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
-CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
-static const ctl_named_node_t *
-arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
-{
-
- if (i > NBINS)
- return (NULL);
- return (super_arenas_bin_i_node);
-}
-
-CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
-static const ctl_named_node_t *
-arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
-{
-
- if (i > nlclasses)
- return (NULL);
- return (super_arenas_lrun_i_node);
-}
-
static int
arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen)
@@ -1460,7 +1449,28 @@ CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
+CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
+CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
+CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
+static const ctl_named_node_t *
+arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
+{
+
+ if (i > NBINS)
+ return (NULL);
+ return (super_arenas_bin_i_node);
+}
+
CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t)
+CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
+static const ctl_named_node_t *
+arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
+{
+
+ if (i > nlclasses)
+ return (NULL);
+ return (super_arenas_lrun_i_node);
+}
static int
arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
@@ -1567,6 +1577,11 @@ CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
/******************************************************************************/
+CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
+CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
+CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
+CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
+
CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
size_t)
CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
@@ -1574,6 +1589,20 @@ CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t)
CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t)
+
+CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
+CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
+CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
+CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
+ ctl_stats.arenas[mib[2]].astats.mapped, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
+ ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
+ ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
+ ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
+
CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
ctl_stats.arenas[mib[2]].allocated_small, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
@@ -1637,19 +1666,6 @@ stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
return (super_stats_arenas_i_lruns_j_node);
}
-CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
-CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
-CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
-CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
- ctl_stats.arenas[mib[2]].astats.mapped, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
- ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
- ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
- ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
-
static const ctl_named_node_t *
stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
{
@@ -1666,8 +1682,3 @@ label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
-
-CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
-CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
-CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
-CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
diff --git a/dep/jemalloc/src/huge.c b/dep/jemalloc/src/huge.c
index aa08d43d362..6d86aed881b 100644
--- a/dep/jemalloc/src/huge.c
+++ b/dep/jemalloc/src/huge.c
@@ -78,7 +78,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
return (ret);
}
-void *
+bool
huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
{
@@ -89,15 +89,11 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
&& CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
assert(CHUNK_CEILING(oldsize) == oldsize);
- if (config_fill && opt_junk && size < oldsize) {
- memset((void *)((uintptr_t)ptr + size), 0x5a,
- oldsize - size);
- }
- return (ptr);
+ return (false);
}
/* Reallocation would require a move. */
- return (NULL);
+ return (true);
}
void *
@@ -108,9 +104,8 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
size_t copysize;
/* Try to avoid moving the allocation. */
- ret = huge_ralloc_no_move(ptr, oldsize, size, extra);
- if (ret != NULL)
- return (ret);
+ if (huge_ralloc_no_move(ptr, oldsize, size, extra) == false)
+ return (ptr);
/*
* size and oldsize are different enough that we need to use a
@@ -169,23 +164,56 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
*/
char buf[BUFERROR_BUF];
- buferror(buf, sizeof(buf));
+ buferror(get_errno(), buf, sizeof(buf));
malloc_printf("<jemalloc>: Error in mremap(): %s\n",
buf);
if (opt_abort)
abort();
memcpy(ret, ptr, copysize);
chunk_dealloc_mmap(ptr, oldsize);
+ } else if (config_fill && zero == false && opt_junk && oldsize
+ < newsize) {
+ /*
+ * mremap(2) clobbers the original mapping, so
+ * junk/zero filling is not preserved. There is no
+ * need to zero fill here, since any trailing
+ * uninititialized memory is demand-zeroed by the
+ * kernel, but junk filling must be redone.
+ */
+ memset(ret + oldsize, 0xa5, newsize - oldsize);
}
} else
#endif
{
memcpy(ret, ptr, copysize);
- iqallocx(ptr, try_tcache_dalloc);
+ iqalloct(ptr, try_tcache_dalloc);
}
return (ret);
}
+#ifdef JEMALLOC_JET
+#undef huge_dalloc_junk
+#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
+#endif
+static void
+huge_dalloc_junk(void *ptr, size_t usize)
+{
+
+ if (config_fill && config_dss && opt_junk) {
+ /*
+ * Only bother junk filling if the chunk isn't about to be
+ * unmapped.
+ */
+ if (config_munmap == false || (config_dss && chunk_in_dss(ptr)))
+ memset(ptr, 0x5a, usize);
+ }
+}
+#ifdef JEMALLOC_JET
+#undef huge_dalloc_junk
+#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
+huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
+#endif
+
void
huge_dalloc(void *ptr, bool unmap)
{
@@ -208,8 +236,8 @@ huge_dalloc(void *ptr, bool unmap)
malloc_mutex_unlock(&huge_mtx);
- if (unmap && config_fill && config_dss && opt_junk)
- memset(node->addr, 0x5a, node->size);
+ if (unmap)
+ huge_dalloc_junk(node->addr, node->size);
chunk_dealloc(node->addr, node->size, unmap);
diff --git a/dep/jemalloc/src/jemalloc.c b/dep/jemalloc/src/jemalloc.c
index bc350ed953b..563d99f8816 100644
--- a/dep/jemalloc/src/jemalloc.c
+++ b/dep/jemalloc/src/jemalloc.c
@@ -100,18 +100,12 @@ typedef struct {
#endif
/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static void stats_print_atexit(void);
-static unsigned malloc_ncpus(void);
-static bool malloc_conf_next(char const **opts_p, char const **k_p,
- size_t *klen_p, char const **v_p, size_t *vlen_p);
-static void malloc_conf_error(const char *msg, const char *k, size_t klen,
- const char *v, size_t vlen);
-static void malloc_conf_init(void);
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
+
static bool malloc_init_hard(void);
-static int imemalign(void **memptr, size_t alignment, size_t size,
- size_t min_alignment);
/******************************************************************************/
/*
@@ -252,7 +246,6 @@ stats_print_atexit(void)
static unsigned
malloc_ncpus(void)
{
- unsigned ret;
long result;
#ifdef _WIN32
@@ -262,14 +255,7 @@ malloc_ncpus(void)
#else
result = sysconf(_SC_NPROCESSORS_ONLN);
#endif
- if (result == -1) {
- /* Error. */
- ret = 1;
- } else {
- ret = (unsigned)result;
- }
-
- return (ret);
+ return ((result == -1) ? 1 : (unsigned)result);
}
void
@@ -282,7 +268,7 @@ arenas_cleanup(void *arg)
malloc_mutex_unlock(&arenas_lock);
}
-static JEMALLOC_ATTR(always_inline) void
+JEMALLOC_ALWAYS_INLINE_C void
malloc_thread_init(void)
{
@@ -299,7 +285,7 @@ malloc_thread_init(void)
quarantine_alloc_hook();
}
-static JEMALLOC_ATTR(always_inline) bool
+JEMALLOC_ALWAYS_INLINE_C bool
malloc_init(void)
{
@@ -436,8 +422,9 @@ malloc_conf_init(void)
}
break;
case 1: {
+ int linklen = 0;
#ifndef _WIN32
- int linklen;
+ int saved_errno = errno;
const char *linkname =
# ifdef JEMALLOC_PREFIX
"/etc/"JEMALLOC_PREFIX"malloc.conf"
@@ -446,21 +433,20 @@ malloc_conf_init(void)
# endif
;
- if ((linklen = readlink(linkname, buf,
- sizeof(buf) - 1)) != -1) {
- /*
- * Use the contents of the "/etc/malloc.conf"
- * symbolic link's name.
- */
- buf[linklen] = '\0';
- opts = buf;
- } else
-#endif
- {
+ /*
+ * Try to use the contents of the "/etc/malloc.conf"
+ * symbolic link's name.
+ */
+ linklen = readlink(linkname, buf, sizeof(buf) - 1);
+ if (linklen == -1) {
/* No configuration specified. */
- buf[0] = '\0';
- opts = buf;
+ linklen = 0;
+ /* restore errno */
+ set_errno(saved_errno);
}
+#endif
+ buf[linklen] = '\0';
+ opts = buf;
break;
} case 2: {
const char *envname =
@@ -484,8 +470,7 @@ malloc_conf_init(void)
}
break;
} default:
- /* NOTREACHED */
- assert(false);
+ not_reached();
buf[0] = '\0';
opts = buf;
}
@@ -522,14 +507,15 @@ malloc_conf_init(void)
"Invalid conf value", \
k, klen, v, vlen); \
} else if (clip) { \
- if (um < min) \
+ if (min != 0 && um < min) \
o = min; \
else if (um > max) \
o = max; \
else \
o = um; \
} else { \
- if (um < min || um > max) { \
+ if ((min != 0 && um < min) || \
+ um > max) { \
malloc_conf_error( \
"Out-of-range " \
"conf value", \
@@ -695,17 +681,6 @@ malloc_init_hard(void)
malloc_conf_init();
-#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
- && !defined(_WIN32))
- /* Register fork handlers. */
- if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
- jemalloc_postfork_child) != 0) {
- malloc_write("<jemalloc>: Error in pthread_atfork()\n");
- if (opt_abort)
- abort();
- }
-#endif
-
if (opt_stats_print) {
/* Print statistics at exit. */
if (atexit(stats_print_atexit) != 0) {
@@ -745,8 +720,10 @@ malloc_init_hard(void)
return (true);
}
- if (malloc_mutex_init(&arenas_lock))
+ if (malloc_mutex_init(&arenas_lock)) {
+ malloc_mutex_unlock(&init_lock);
return (true);
+ }
/*
* Create enough scaffolding to allow recursive allocation in
@@ -792,9 +769,25 @@ malloc_init_hard(void)
return (true);
}
- /* Get number of CPUs. */
malloc_mutex_unlock(&init_lock);
+ /**********************************************************************/
+ /* Recursive allocation may follow. */
+
ncpus = malloc_ncpus();
+
+#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
+ && !defined(_WIN32))
+ /* LinuxThreads's pthread_atfork() allocates. */
+ if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
+ jemalloc_postfork_child) != 0) {
+ malloc_write("<jemalloc>: Error in pthread_atfork()\n");
+ if (opt_abort)
+ abort();
+ }
+#endif
+
+ /* Done recursively allocating. */
+ /**********************************************************************/
malloc_mutex_lock(&init_lock);
if (mutex_boot()) {
@@ -841,6 +834,7 @@ malloc_init_hard(void)
malloc_initialized = true;
malloc_mutex_unlock(&init_lock);
+
return (false);
}
@@ -852,42 +846,88 @@ malloc_init_hard(void)
* Begin malloc(3)-compatible functions.
*/
+static void *
+imalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt)
+{
+ void *p;
+
+ if (cnt == NULL)
+ return (NULL);
+ if (prof_promote && usize <= SMALL_MAXCLASS) {
+ p = imalloc(SMALL_MAXCLASS+1);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(p, usize);
+ } else
+ p = imalloc(usize);
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+imalloc_prof(size_t usize, prof_thr_cnt_t *cnt)
+{
+ void *p;
+
+ if ((uintptr_t)cnt != (uintptr_t)1U)
+ p = imalloc_prof_sample(usize, cnt);
+ else
+ p = imalloc(usize);
+ if (p == NULL)
+ return (NULL);
+ prof_malloc(p, usize, cnt);
+
+ return (p);
+}
+
+/*
+ * MALLOC_BODY() is a macro rather than a function because its contents are in
+ * the fast path, but inlining would cause reliability issues when determining
+ * how many frames to discard from heap profiling backtraces.
+ */
+#define MALLOC_BODY(ret, size, usize) do { \
+ if (malloc_init()) \
+ ret = NULL; \
+ else { \
+ if (config_prof && opt_prof) { \
+ prof_thr_cnt_t *cnt; \
+ \
+ usize = s2u(size); \
+ /* \
+ * Call PROF_ALLOC_PREP() here rather than in \
+ * imalloc_prof() so that imalloc_prof() can be \
+ * inlined without introducing uncertainty \
+ * about the number of backtrace frames to \
+ * ignore. imalloc_prof() is in the fast path \
+ * when heap profiling is enabled, so inlining \
+ * is critical to performance. (For \
+ * consistency all callers of PROF_ALLOC_PREP() \
+ * are structured similarly, even though e.g. \
+ * realloc() isn't called enough for inlining \
+ * to be critical.) \
+ */ \
+ PROF_ALLOC_PREP(1, usize, cnt); \
+ ret = imalloc_prof(usize, cnt); \
+ } else { \
+ if (config_stats || (config_valgrind && \
+ opt_valgrind)) \
+ usize = s2u(size); \
+ ret = imalloc(size); \
+ } \
+ } \
+} while (0)
+
void *
je_malloc(size_t size)
{
void *ret;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
- prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
-
- if (malloc_init()) {
- ret = NULL;
- goto label_oom;
- }
if (size == 0)
size = 1;
- if (config_prof && opt_prof) {
- usize = s2u(size);
- PROF_ALLOC_PREP(1, usize, cnt);
- if (cnt == NULL) {
- ret = NULL;
- goto label_oom;
- }
- if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
- SMALL_MAXCLASS) {
- ret = imalloc(SMALL_MAXCLASS+1);
- if (ret != NULL)
- arena_prof_promoted(ret, usize);
- } else
- ret = imalloc(size);
- } else {
- if (config_stats || (config_valgrind && opt_valgrind))
- usize = s2u(size);
- ret = imalloc(size);
- }
+ MALLOC_BODY(ret, size, usize);
-label_oom:
if (ret == NULL) {
if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in malloc(): "
@@ -896,8 +936,6 @@ label_oom:
}
set_errno(ENOMEM);
}
- if (config_prof && opt_prof && ret != NULL)
- prof_malloc(ret, usize, cnt);
if (config_stats && ret != NULL) {
assert(usize == isalloc(ret, config_prof));
thread_allocated_tsd_get()->allocated += usize;
@@ -907,6 +945,42 @@ label_oom:
return (ret);
}
+static void *
+imemalign_prof_sample(size_t alignment, size_t usize, prof_thr_cnt_t *cnt)
+{
+ void *p;
+
+ if (cnt == NULL)
+ return (NULL);
+ if (prof_promote && usize <= SMALL_MAXCLASS) {
+ assert(sa2u(SMALL_MAXCLASS+1, alignment) != 0);
+ p = ipalloc(sa2u(SMALL_MAXCLASS+1, alignment), alignment,
+ false);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(p, usize);
+ } else
+ p = ipalloc(usize, alignment, false);
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+imemalign_prof(size_t alignment, size_t usize, prof_thr_cnt_t *cnt)
+{
+ void *p;
+
+ if ((uintptr_t)cnt != (uintptr_t)1U)
+ p = imemalign_prof_sample(alignment, usize, cnt);
+ else
+ p = ipalloc(usize, alignment, false);
+ if (p == NULL)
+ return (NULL);
+ prof_malloc(p, usize, cnt);
+
+ return (p);
+}
+
JEMALLOC_ATTR(nonnull(1))
#ifdef JEMALLOC_PROF
/*
@@ -916,19 +990,18 @@ JEMALLOC_ATTR(nonnull(1))
JEMALLOC_NOINLINE
#endif
static int
-imemalign(void **memptr, size_t alignment, size_t size,
- size_t min_alignment)
+imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
{
int ret;
size_t usize;
void *result;
- prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
assert(min_alignment != 0);
- if (malloc_init())
+ if (malloc_init()) {
result = NULL;
- else {
+ goto label_oom;
+ } else {
if (size == 0)
size = 1;
@@ -948,57 +1021,38 @@ imemalign(void **memptr, size_t alignment, size_t size,
usize = sa2u(size, alignment);
if (usize == 0) {
result = NULL;
- ret = ENOMEM;
- goto label_return;
+ goto label_oom;
}
if (config_prof && opt_prof) {
+ prof_thr_cnt_t *cnt;
+
PROF_ALLOC_PREP(2, usize, cnt);
- if (cnt == NULL) {
- result = NULL;
- ret = EINVAL;
- } else {
- if (prof_promote && (uintptr_t)cnt !=
- (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
- assert(sa2u(SMALL_MAXCLASS+1,
- alignment) != 0);
- result = ipalloc(sa2u(SMALL_MAXCLASS+1,
- alignment), alignment, false);
- if (result != NULL) {
- arena_prof_promoted(result,
- usize);
- }
- } else {
- result = ipalloc(usize, alignment,
- false);
- }
- }
+ result = imemalign_prof(alignment, usize, cnt);
} else
result = ipalloc(usize, alignment, false);
- }
-
- if (result == NULL) {
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error allocating aligned "
- "memory: out of memory\n");
- abort();
- }
- ret = ENOMEM;
- goto label_return;
+ if (result == NULL)
+ goto label_oom;
}
*memptr = result;
ret = 0;
-
label_return:
if (config_stats && result != NULL) {
assert(usize == isalloc(result, config_prof));
thread_allocated_tsd_get()->allocated += usize;
}
- if (config_prof && opt_prof && result != NULL)
- prof_malloc(result, usize, cnt);
UTRACE(0, size, result);
return (ret);
+label_oom:
+ assert(result == NULL);
+ if (config_xmalloc && opt_xmalloc) {
+ malloc_write("<jemalloc>: Error allocating aligned memory: "
+ "out of memory\n");
+ abort();
+ }
+ ret = ENOMEM;
+ goto label_return;
}
int
@@ -1025,13 +1079,46 @@ je_aligned_alloc(size_t alignment, size_t size)
return (ret);
}
+static void *
+icalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt)
+{
+ void *p;
+
+ if (cnt == NULL)
+ return (NULL);
+ if (prof_promote && usize <= SMALL_MAXCLASS) {
+ p = icalloc(SMALL_MAXCLASS+1);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(p, usize);
+ } else
+ p = icalloc(usize);
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+icalloc_prof(size_t usize, prof_thr_cnt_t *cnt)
+{
+ void *p;
+
+ if ((uintptr_t)cnt != (uintptr_t)1U)
+ p = icalloc_prof_sample(usize, cnt);
+ else
+ p = icalloc(usize);
+ if (p == NULL)
+ return (NULL);
+ prof_malloc(p, usize, cnt);
+
+ return (p);
+}
+
void *
je_calloc(size_t num, size_t size)
{
void *ret;
size_t num_size;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
- prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
if (malloc_init()) {
num_size = 0;
@@ -1060,19 +1147,11 @@ je_calloc(size_t num, size_t size)
}
if (config_prof && opt_prof) {
+ prof_thr_cnt_t *cnt;
+
usize = s2u(num_size);
PROF_ALLOC_PREP(1, usize, cnt);
- if (cnt == NULL) {
- ret = NULL;
- goto label_return;
- }
- if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
- <= SMALL_MAXCLASS) {
- ret = icalloc(SMALL_MAXCLASS+1);
- if (ret != NULL)
- arena_prof_promoted(ret, usize);
- } else
- ret = icalloc(num_size);
+ ret = icalloc_prof(usize, cnt);
} else {
if (config_stats || (config_valgrind && opt_valgrind))
usize = s2u(num_size);
@@ -1088,9 +1167,6 @@ label_return:
}
set_errno(ENOMEM);
}
-
- if (config_prof && opt_prof && ret != NULL)
- prof_malloc(ret, usize, cnt);
if (config_stats && ret != NULL) {
assert(usize == isalloc(ret, config_prof));
thread_allocated_tsd_get()->allocated += usize;
@@ -1100,152 +1176,126 @@ label_return:
return (ret);
}
+static void *
+irealloc_prof_sample(void *oldptr, size_t usize, prof_thr_cnt_t *cnt)
+{
+ void *p;
+
+ if (cnt == NULL)
+ return (NULL);
+ if (prof_promote && usize <= SMALL_MAXCLASS) {
+ p = iralloc(oldptr, SMALL_MAXCLASS+1, 0, 0, false);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(p, usize);
+ } else
+ p = iralloc(oldptr, usize, 0, 0, false);
+
+ return (p);
+}
+
+JEMALLOC_ALWAYS_INLINE_C void *
+irealloc_prof(void *oldptr, size_t old_usize, size_t usize, prof_thr_cnt_t *cnt)
+{
+ void *p;
+ prof_ctx_t *old_ctx;
+
+ old_ctx = prof_ctx_get(oldptr);
+ if ((uintptr_t)cnt != (uintptr_t)1U)
+ p = irealloc_prof_sample(oldptr, usize, cnt);
+ else
+ p = iralloc(oldptr, usize, 0, 0, false);
+ if (p == NULL)
+ return (NULL);
+ prof_realloc(p, usize, cnt, old_usize, old_ctx);
+
+ return (p);
+}
+
+JEMALLOC_INLINE_C void
+ifree(void *ptr)
+{
+ size_t usize;
+ UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
+
+ assert(ptr != NULL);
+ assert(malloc_initialized || IS_INITIALIZER);
+
+ if (config_prof && opt_prof) {
+ usize = isalloc(ptr, config_prof);
+ prof_free(ptr, usize);
+ } else if (config_stats || config_valgrind)
+ usize = isalloc(ptr, config_prof);
+ if (config_stats)
+ thread_allocated_tsd_get()->deallocated += usize;
+ if (config_valgrind && opt_valgrind)
+ rzsize = p2rz(ptr);
+ iqalloc(ptr);
+ JEMALLOC_VALGRIND_FREE(ptr, rzsize);
+}
+
void *
je_realloc(void *ptr, size_t size)
{
void *ret;
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
- size_t old_size = 0;
- size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
- prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
- prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
+ size_t old_usize = 0;
+ UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
if (size == 0) {
if (ptr != NULL) {
- /* realloc(ptr, 0) is equivalent to free(p). */
- assert(malloc_initialized || IS_INITIALIZER);
- if (config_prof) {
- old_size = isalloc(ptr, true);
- if (config_valgrind && opt_valgrind)
- old_rzsize = p2rz(ptr);
- } else if (config_stats) {
- old_size = isalloc(ptr, false);
- if (config_valgrind && opt_valgrind)
- old_rzsize = u2rz(old_size);
- } else if (config_valgrind && opt_valgrind) {
- old_size = isalloc(ptr, false);
- old_rzsize = u2rz(old_size);
- }
- if (config_prof && opt_prof) {
- old_ctx = prof_ctx_get(ptr);
- cnt = NULL;
- }
- iqalloc(ptr);
- ret = NULL;
- goto label_return;
- } else
- size = 1;
+ /* realloc(ptr, 0) is equivalent to free(ptr). */
+ UTRACE(ptr, 0, 0);
+ ifree(ptr);
+ return (NULL);
+ }
+ size = 1;
}
if (ptr != NULL) {
assert(malloc_initialized || IS_INITIALIZER);
malloc_thread_init();
- if (config_prof) {
- old_size = isalloc(ptr, true);
- if (config_valgrind && opt_valgrind)
- old_rzsize = p2rz(ptr);
- } else if (config_stats) {
- old_size = isalloc(ptr, false);
- if (config_valgrind && opt_valgrind)
- old_rzsize = u2rz(old_size);
- } else if (config_valgrind && opt_valgrind) {
- old_size = isalloc(ptr, false);
- old_rzsize = u2rz(old_size);
- }
+ if ((config_prof && opt_prof) || config_stats ||
+ (config_valgrind && opt_valgrind))
+ old_usize = isalloc(ptr, config_prof);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
+
if (config_prof && opt_prof) {
+ prof_thr_cnt_t *cnt;
+
usize = s2u(size);
- old_ctx = prof_ctx_get(ptr);
PROF_ALLOC_PREP(1, usize, cnt);
- if (cnt == NULL) {
- old_ctx = NULL;
- ret = NULL;
- goto label_oom;
- }
- if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
- usize <= SMALL_MAXCLASS) {
- ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
- false, false);
- if (ret != NULL)
- arena_prof_promoted(ret, usize);
- else
- old_ctx = NULL;
- } else {
- ret = iralloc(ptr, size, 0, 0, false, false);
- if (ret == NULL)
- old_ctx = NULL;
- }
+ ret = irealloc_prof(ptr, old_usize, usize, cnt);
} else {
if (config_stats || (config_valgrind && opt_valgrind))
usize = s2u(size);
- ret = iralloc(ptr, size, 0, 0, false, false);
- }
-
-label_oom:
- if (ret == NULL) {
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error in realloc(): "
- "out of memory\n");
- abort();
- }
- set_errno(ENOMEM);
+ ret = iralloc(ptr, size, 0, 0, false);
}
} else {
/* realloc(NULL, size) is equivalent to malloc(size). */
- if (config_prof && opt_prof)
- old_ctx = NULL;
- if (malloc_init()) {
- if (config_prof && opt_prof)
- cnt = NULL;
- ret = NULL;
- } else {
- if (config_prof && opt_prof) {
- usize = s2u(size);
- PROF_ALLOC_PREP(1, usize, cnt);
- if (cnt == NULL)
- ret = NULL;
- else {
- if (prof_promote && (uintptr_t)cnt !=
- (uintptr_t)1U && usize <=
- SMALL_MAXCLASS) {
- ret = imalloc(SMALL_MAXCLASS+1);
- if (ret != NULL) {
- arena_prof_promoted(ret,
- usize);
- }
- } else
- ret = imalloc(size);
- }
- } else {
- if (config_stats || (config_valgrind &&
- opt_valgrind))
- usize = s2u(size);
- ret = imalloc(size);
- }
- }
+ MALLOC_BODY(ret, size, usize);
+ }
- if (ret == NULL) {
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error in realloc(): "
- "out of memory\n");
- abort();
- }
- set_errno(ENOMEM);
+ if (ret == NULL) {
+ if (config_xmalloc && opt_xmalloc) {
+ malloc_write("<jemalloc>: Error in realloc(): "
+ "out of memory\n");
+ abort();
}
+ set_errno(ENOMEM);
}
-
-label_return:
- if (config_prof && opt_prof)
- prof_realloc(ret, usize, cnt, old_size, old_ctx);
if (config_stats && ret != NULL) {
thread_allocated_t *ta;
assert(usize == isalloc(ret, config_prof));
ta = thread_allocated_tsd_get();
ta->allocated += usize;
- ta->deallocated += old_size;
+ ta->deallocated += old_usize;
}
UTRACE(ptr, size, ret);
- JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
+ JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_usize, old_rzsize,
+ false);
return (ret);
}
@@ -1254,24 +1304,8 @@ je_free(void *ptr)
{
UTRACE(ptr, 0, 0);
- if (ptr != NULL) {
- size_t usize;
- size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
-
- assert(malloc_initialized || IS_INITIALIZER);
-
- if (config_prof && opt_prof) {
- usize = isalloc(ptr, config_prof);
- prof_free(ptr, usize);
- } else if (config_stats || config_valgrind)
- usize = isalloc(ptr, config_prof);
- if (config_stats)
- thread_allocated_tsd_get()->deallocated += usize;
- if (config_valgrind && opt_valgrind)
- rzsize = p2rz(ptr);
- iqalloc(ptr);
- JEMALLOC_VALGRIND_FREE(ptr, rzsize);
- }
+ if (ptr != NULL)
+ ifree(ptr);
}
/*
@@ -1337,208 +1371,344 @@ JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
* Begin non-standard functions.
*/
-size_t
-je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
+JEMALLOC_ALWAYS_INLINE_C void *
+imallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
+ arena_t *arena)
{
- size_t ret;
- assert(malloc_initialized || IS_INITIALIZER);
- malloc_thread_init();
+ assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
+ alignment)));
- if (config_ivsalloc)
- ret = ivsalloc(ptr, config_prof);
+ if (alignment != 0)
+ return (ipalloct(usize, alignment, zero, try_tcache, arena));
+ else if (zero)
+ return (icalloct(usize, try_tcache, arena));
else
- ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
-
- return (ret);
+ return (imalloct(usize, try_tcache, arena));
}
-void
-je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *opts)
+static void *
+imallocx_prof_sample(size_t usize, size_t alignment, bool zero, bool try_tcache,
+ arena_t *arena, prof_thr_cnt_t *cnt)
{
+ void *p;
- stats_print(write_cb, cbopaque, opts);
+ if (cnt == NULL)
+ return (NULL);
+ if (prof_promote && usize <= SMALL_MAXCLASS) {
+ size_t usize_promoted = (alignment == 0) ?
+ s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, alignment);
+ assert(usize_promoted != 0);
+ p = imallocx(usize_promoted, alignment, zero, try_tcache,
+ arena);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(p, usize);
+ } else
+ p = imallocx(usize, alignment, zero, try_tcache, arena);
+
+ return (p);
}
-int
-je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
- size_t newlen)
+JEMALLOC_ALWAYS_INLINE_C void *
+imallocx_prof(size_t usize, size_t alignment, bool zero, bool try_tcache,
+ arena_t *arena, prof_thr_cnt_t *cnt)
{
+ void *p;
- if (malloc_init())
- return (EAGAIN);
+ if ((uintptr_t)cnt != (uintptr_t)1U) {
+ p = imallocx_prof_sample(usize, alignment, zero, try_tcache,
+ arena, cnt);
+ } else
+ p = imallocx(usize, alignment, zero, try_tcache, arena);
+ if (p == NULL)
+ return (NULL);
+ prof_malloc(p, usize, cnt);
- return (ctl_byname(name, oldp, oldlenp, newp, newlen));
+ return (p);
}
-int
-je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
+void *
+je_mallocx(size_t size, int flags)
{
+ void *p;
+ size_t usize;
+ size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
+ & (SIZE_T_MAX-1));
+ bool zero = flags & MALLOCX_ZERO;
+ unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
+ arena_t *arena;
+ bool try_tcache;
+
+ assert(size != 0);
if (malloc_init())
- return (EAGAIN);
+ goto label_oom;
- return (ctl_nametomib(name, mibp, miblenp));
+ if (arena_ind != UINT_MAX) {
+ arena = arenas[arena_ind];
+ try_tcache = false;
+ } else {
+ arena = NULL;
+ try_tcache = true;
+ }
+
+ usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
+ assert(usize != 0);
+
+ if (config_prof && opt_prof) {
+ prof_thr_cnt_t *cnt;
+
+ PROF_ALLOC_PREP(1, usize, cnt);
+ p = imallocx_prof(usize, alignment, zero, try_tcache, arena,
+ cnt);
+ } else
+ p = imallocx(usize, alignment, zero, try_tcache, arena);
+ if (p == NULL)
+ goto label_oom;
+
+ if (config_stats) {
+ assert(usize == isalloc(p, config_prof));
+ thread_allocated_tsd_get()->allocated += usize;
+ }
+ UTRACE(0, size, p);
+ JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
+ return (p);
+label_oom:
+ if (config_xmalloc && opt_xmalloc) {
+ malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
+ abort();
+ }
+ UTRACE(0, size, 0);
+ return (NULL);
}
-int
-je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
+static void *
+irallocx_prof_sample(void *oldptr, size_t size, size_t alignment, size_t usize,
+ bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena,
+ prof_thr_cnt_t *cnt)
{
+ void *p;
- if (malloc_init())
- return (EAGAIN);
+ if (cnt == NULL)
+ return (NULL);
+ if (prof_promote && usize <= SMALL_MAXCLASS) {
+ p = iralloct(oldptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
+ size) ? 0 : size - (SMALL_MAXCLASS+1), alignment, zero,
+ try_tcache_alloc, try_tcache_dalloc, arena);
+ if (p == NULL)
+ return (NULL);
+ arena_prof_promoted(p, usize);
+ } else {
+ p = iralloct(oldptr, size, 0, alignment, zero,
+ try_tcache_alloc, try_tcache_dalloc, arena);
+ }
- return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
+ return (p);
}
-/*
- * End non-standard functions.
- */
-/******************************************************************************/
-/*
- * Begin experimental functions.
- */
-#ifdef JEMALLOC_EXPERIMENTAL
-
-static JEMALLOC_ATTR(always_inline) void *
-iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache,
- arena_t *arena)
+JEMALLOC_ALWAYS_INLINE_C void *
+irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment,
+ size_t *usize, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
+ arena_t *arena, prof_thr_cnt_t *cnt)
{
+ void *p;
+ prof_ctx_t *old_ctx;
- assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
- alignment)));
+ old_ctx = prof_ctx_get(oldptr);
+ if ((uintptr_t)cnt != (uintptr_t)1U)
+ p = irallocx_prof_sample(oldptr, size, alignment, *usize, zero,
+ try_tcache_alloc, try_tcache_dalloc, arena, cnt);
+ else {
+ p = iralloct(oldptr, size, 0, alignment, zero,
+ try_tcache_alloc, try_tcache_dalloc, arena);
+ }
+ if (p == NULL)
+ return (NULL);
- if (alignment != 0)
- return (ipallocx(usize, alignment, zero, try_tcache, arena));
- else if (zero)
- return (icallocx(usize, try_tcache, arena));
- else
- return (imallocx(usize, try_tcache, arena));
+ if (p == oldptr && alignment != 0) {
+ /*
+ * The allocation did not move, so it is possible that the size
+ * class is smaller than would guarantee the requested
+ * alignment, and that the alignment constraint was
+ * serendipitously satisfied. Additionally, old_usize may not
+ * be the same as the current usize because of in-place large
+ * reallocation. Therefore, query the actual value of usize.
+ */
+ *usize = isalloc(p, config_prof);
+ }
+ prof_realloc(p, *usize, cnt, old_usize, old_ctx);
+
+ return (p);
}
-int
-je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
+void *
+je_rallocx(void *ptr, size_t size, int flags)
{
void *p;
- size_t usize;
- size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
+ size_t usize, old_usize;
+ UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
+ size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
- bool zero = flags & ALLOCM_ZERO;
+ bool zero = flags & MALLOCX_ZERO;
unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
+ bool try_tcache_alloc, try_tcache_dalloc;
arena_t *arena;
- bool try_tcache;
assert(ptr != NULL);
assert(size != 0);
-
- if (malloc_init())
- goto label_oom;
+ assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
if (arena_ind != UINT_MAX) {
+ arena_chunk_t *chunk;
+ try_tcache_alloc = false;
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ try_tcache_dalloc = (chunk == ptr || chunk->arena !=
+ arenas[arena_ind]);
arena = arenas[arena_ind];
- try_tcache = false;
} else {
+ try_tcache_alloc = true;
+ try_tcache_dalloc = true;
arena = NULL;
- try_tcache = true;
}
- usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
- if (usize == 0)
- goto label_oom;
+ if ((config_prof && opt_prof) || config_stats ||
+ (config_valgrind && opt_valgrind))
+ old_usize = isalloc(ptr, config_prof);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = u2rz(old_usize);
if (config_prof && opt_prof) {
prof_thr_cnt_t *cnt;
+ usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
+ assert(usize != 0);
PROF_ALLOC_PREP(1, usize, cnt);
- if (cnt == NULL)
+ p = irallocx_prof(ptr, old_usize, size, alignment, &usize, zero,
+ try_tcache_alloc, try_tcache_dalloc, arena, cnt);
+ if (p == NULL)
goto label_oom;
- if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
- SMALL_MAXCLASS) {
- size_t usize_promoted = (alignment == 0) ?
- s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
- alignment);
- assert(usize_promoted != 0);
- p = iallocm(usize_promoted, alignment, zero,
- try_tcache, arena);
- if (p == NULL)
- goto label_oom;
- arena_prof_promoted(p, usize);
- } else {
- p = iallocm(usize, alignment, zero, try_tcache, arena);
- if (p == NULL)
- goto label_oom;
- }
- prof_malloc(p, usize, cnt);
} else {
- p = iallocm(usize, alignment, zero, try_tcache, arena);
+ p = iralloct(ptr, size, 0, alignment, zero, try_tcache_alloc,
+ try_tcache_dalloc, arena);
if (p == NULL)
goto label_oom;
+ if (config_stats || (config_valgrind && opt_valgrind))
+ usize = isalloc(p, config_prof);
}
- if (rsize != NULL)
- *rsize = usize;
- *ptr = p;
if (config_stats) {
- assert(usize == isalloc(p, config_prof));
- thread_allocated_tsd_get()->allocated += usize;
+ thread_allocated_t *ta;
+ ta = thread_allocated_tsd_get();
+ ta->allocated += usize;
+ ta->deallocated += old_usize;
}
- UTRACE(0, size, p);
- JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
- return (ALLOCM_SUCCESS);
+ UTRACE(ptr, size, p);
+ JEMALLOC_VALGRIND_REALLOC(p, usize, ptr, old_usize, old_rzsize, zero);
+ return (p);
label_oom:
if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error in allocm(): "
- "out of memory\n");
+ malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
abort();
}
- *ptr = NULL;
- UTRACE(0, size, 0);
- return (ALLOCM_ERR_OOM);
+ UTRACE(ptr, size, 0);
+ return (NULL);
}
-int
-je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
+JEMALLOC_ALWAYS_INLINE_C size_t
+ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
+ size_t alignment, bool zero, arena_t *arena)
+{
+ size_t usize;
+
+ if (ixalloc(ptr, size, extra, alignment, zero))
+ return (old_usize);
+ usize = isalloc(ptr, config_prof);
+
+ return (usize);
+}
+
+static size_t
+ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
+ size_t alignment, size_t max_usize, bool zero, arena_t *arena,
+ prof_thr_cnt_t *cnt)
{
- void *p, *q;
size_t usize;
- size_t old_size;
- size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
- size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
+
+ if (cnt == NULL)
+ return (old_usize);
+ /* Use minimum usize to determine whether promotion may happen. */
+ if (prof_promote && ((alignment == 0) ? s2u(size) : sa2u(size,
+ alignment)) <= SMALL_MAXCLASS) {
+ if (ixalloc(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
+ size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
+ alignment, zero))
+ return (old_usize);
+ usize = isalloc(ptr, config_prof);
+ if (max_usize < PAGE)
+ arena_prof_promoted(ptr, usize);
+ } else {
+ usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
+ zero, arena);
+ }
+
+ return (usize);
+}
+
+JEMALLOC_ALWAYS_INLINE_C size_t
+ixallocx_prof(void *ptr, size_t old_usize, size_t size, size_t extra,
+ size_t alignment, size_t max_usize, bool zero, arena_t *arena,
+ prof_thr_cnt_t *cnt)
+{
+ size_t usize;
+ prof_ctx_t *old_ctx;
+
+ old_ctx = prof_ctx_get(ptr);
+ if ((uintptr_t)cnt != (uintptr_t)1U) {
+ usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
+ alignment, zero, max_usize, arena, cnt);
+ } else {
+ usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
+ zero, arena);
+ }
+ if (usize == old_usize)
+ return (usize);
+ prof_realloc(ptr, usize, cnt, old_usize, old_ctx);
+
+ return (usize);
+}
+
+size_t
+je_xallocx(void *ptr, size_t size, size_t extra, int flags)
+{
+ size_t usize, old_usize;
+ UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
+ size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
- bool zero = flags & ALLOCM_ZERO;
- bool no_move = flags & ALLOCM_NO_MOVE;
+ bool zero = flags & MALLOCX_ZERO;
unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
- bool try_tcache_alloc, try_tcache_dalloc;
arena_t *arena;
assert(ptr != NULL);
- assert(*ptr != NULL);
assert(size != 0);
assert(SIZE_T_MAX - size >= extra);
assert(malloc_initialized || IS_INITIALIZER);
malloc_thread_init();
- if (arena_ind != UINT_MAX) {
- arena_chunk_t *chunk;
- try_tcache_alloc = true;
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(*ptr);
- try_tcache_dalloc = (chunk == *ptr || chunk->arena !=
- arenas[arena_ind]);
+ if (arena_ind != UINT_MAX)
arena = arenas[arena_ind];
- } else {
- try_tcache_alloc = true;
- try_tcache_dalloc = true;
+ else
arena = NULL;
- }
- p = *ptr;
+ old_usize = isalloc(ptr, config_prof);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = u2rz(old_usize);
+
if (config_prof && opt_prof) {
prof_thr_cnt_t *cnt;
-
/*
- * usize isn't knowable before iralloc() returns when extra is
+ * usize isn't knowable before ixalloc() returns when extra is
* non-zero. Therefore, compute its maximum possible value and
* use that in PROF_ALLOC_PREP() to decide whether to capture a
* backtrace. prof_realloc() will use the actual usize to
@@ -1546,112 +1716,51 @@ je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
*/
size_t max_usize = (alignment == 0) ? s2u(size+extra) :
sa2u(size+extra, alignment);
- prof_ctx_t *old_ctx = prof_ctx_get(p);
- old_size = isalloc(p, true);
- if (config_valgrind && opt_valgrind)
- old_rzsize = p2rz(p);
PROF_ALLOC_PREP(1, max_usize, cnt);
- if (cnt == NULL)
- goto label_oom;
- /*
- * Use minimum usize to determine whether promotion may happen.
- */
- if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
- && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
- <= SMALL_MAXCLASS) {
- q = irallocx(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
- size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
- alignment, zero, no_move, try_tcache_alloc,
- try_tcache_dalloc, arena);
- if (q == NULL)
- goto label_err;
- if (max_usize < PAGE) {
- usize = max_usize;
- arena_prof_promoted(q, usize);
- } else
- usize = isalloc(q, config_prof);
- } else {
- q = irallocx(p, size, extra, alignment, zero, no_move,
- try_tcache_alloc, try_tcache_dalloc, arena);
- if (q == NULL)
- goto label_err;
- usize = isalloc(q, config_prof);
- }
- prof_realloc(q, usize, cnt, old_size, old_ctx);
- if (rsize != NULL)
- *rsize = usize;
+ usize = ixallocx_prof(ptr, old_usize, size, extra, alignment,
+ max_usize, zero, arena, cnt);
} else {
- if (config_stats) {
- old_size = isalloc(p, false);
- if (config_valgrind && opt_valgrind)
- old_rzsize = u2rz(old_size);
- } else if (config_valgrind && opt_valgrind) {
- old_size = isalloc(p, false);
- old_rzsize = u2rz(old_size);
- }
- q = irallocx(p, size, extra, alignment, zero, no_move,
- try_tcache_alloc, try_tcache_dalloc, arena);
- if (q == NULL)
- goto label_err;
- if (config_stats)
- usize = isalloc(q, config_prof);
- if (rsize != NULL) {
- if (config_stats == false)
- usize = isalloc(q, config_prof);
- *rsize = usize;
- }
+ usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
+ zero, arena);
}
+ if (usize == old_usize)
+ goto label_not_resized;
- *ptr = q;
if (config_stats) {
thread_allocated_t *ta;
ta = thread_allocated_tsd_get();
ta->allocated += usize;
- ta->deallocated += old_size;
+ ta->deallocated += old_usize;
}
- UTRACE(p, size, q);
- JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
- return (ALLOCM_SUCCESS);
-label_err:
- if (no_move) {
- UTRACE(p, size, q);
- return (ALLOCM_ERR_NOT_MOVED);
- }
-label_oom:
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error in rallocm(): "
- "out of memory\n");
- abort();
- }
- UTRACE(p, size, 0);
- return (ALLOCM_ERR_OOM);
+ JEMALLOC_VALGRIND_REALLOC(ptr, usize, ptr, old_usize, old_rzsize, zero);
+label_not_resized:
+ UTRACE(ptr, size, ptr);
+ return (usize);
}
-int
-je_sallocm(const void *ptr, size_t *rsize, int flags)
+size_t
+je_sallocx(const void *ptr, int flags)
{
- size_t sz;
+ size_t usize;
assert(malloc_initialized || IS_INITIALIZER);
malloc_thread_init();
if (config_ivsalloc)
- sz = ivsalloc(ptr, config_prof);
+ usize = ivsalloc(ptr, config_prof);
else {
assert(ptr != NULL);
- sz = isalloc(ptr, config_prof);
+ usize = isalloc(ptr, config_prof);
}
- assert(rsize != NULL);
- *rsize = sz;
- return (ALLOCM_SUCCESS);
+ return (usize);
}
-int
-je_dallocm(void *ptr, int flags)
+void
+je_dallocx(void *ptr, int flags)
{
size_t usize;
- size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
+ UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
bool try_tcache;
@@ -1677,28 +1786,162 @@ je_dallocm(void *ptr, int flags)
thread_allocated_tsd_get()->deallocated += usize;
if (config_valgrind && opt_valgrind)
rzsize = p2rz(ptr);
- iqallocx(ptr, try_tcache);
+ iqalloct(ptr, try_tcache);
JEMALLOC_VALGRIND_FREE(ptr, rzsize);
-
- return (ALLOCM_SUCCESS);
}
-int
-je_nallocm(size_t *rsize, size_t size, int flags)
+size_t
+je_nallocx(size_t size, int flags)
{
size_t usize;
- size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
+ size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
assert(size != 0);
if (malloc_init())
- return (ALLOCM_ERR_OOM);
+ return (0);
usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
- if (usize == 0)
+ assert(usize != 0);
+ return (usize);
+}
+
+int
+je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen)
+{
+
+ if (malloc_init())
+ return (EAGAIN);
+
+ return (ctl_byname(name, oldp, oldlenp, newp, newlen));
+}
+
+int
+je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
+{
+
+ if (malloc_init())
+ return (EAGAIN);
+
+ return (ctl_nametomib(name, mibp, miblenp));
+}
+
+int
+je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+
+ if (malloc_init())
+ return (EAGAIN);
+
+ return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
+}
+
+void
+je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *opts)
+{
+
+ stats_print(write_cb, cbopaque, opts);
+}
+
+size_t
+je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
+{
+ size_t ret;
+
+ assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
+
+ if (config_ivsalloc)
+ ret = ivsalloc(ptr, config_prof);
+ else
+ ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
+
+ return (ret);
+}
+
+/*
+ * End non-standard functions.
+ */
+/******************************************************************************/
+/*
+ * Begin experimental functions.
+ */
+#ifdef JEMALLOC_EXPERIMENTAL
+
+int
+je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
+{
+ void *p;
+
+ assert(ptr != NULL);
+
+ p = je_mallocx(size, flags);
+ if (p == NULL)
return (ALLOCM_ERR_OOM);
+ if (rsize != NULL)
+ *rsize = isalloc(p, config_prof);
+ *ptr = p;
+ return (ALLOCM_SUCCESS);
+}
+int
+je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
+{
+ int ret;
+ bool no_move = flags & ALLOCM_NO_MOVE;
+
+ assert(ptr != NULL);
+ assert(*ptr != NULL);
+ assert(size != 0);
+ assert(SIZE_T_MAX - size >= extra);
+
+ if (no_move) {
+ size_t usize = je_xallocx(*ptr, size, extra, flags);
+ ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED;
+ if (rsize != NULL)
+ *rsize = usize;
+ } else {
+ void *p = je_rallocx(*ptr, size+extra, flags);
+ if (p != NULL) {
+ *ptr = p;
+ ret = ALLOCM_SUCCESS;
+ } else
+ ret = ALLOCM_ERR_OOM;
+ if (rsize != NULL)
+ *rsize = isalloc(*ptr, config_prof);
+ }
+ return (ret);
+}
+
+int
+je_sallocm(const void *ptr, size_t *rsize, int flags)
+{
+
+ assert(rsize != NULL);
+ *rsize = je_sallocx(ptr, flags);
+ return (ALLOCM_SUCCESS);
+}
+
+int
+je_dallocm(void *ptr, int flags)
+{
+
+ je_dallocx(ptr, flags);
+ return (ALLOCM_SUCCESS);
+}
+
+int
+je_nallocm(size_t *rsize, size_t size, int flags)
+{
+ size_t usize;
+
+ usize = je_nallocx(size, flags);
+ if (usize == 0)
+ return (ALLOCM_ERR_OOM);
if (rsize != NULL)
*rsize = usize;
return (ALLOCM_SUCCESS);
diff --git a/dep/jemalloc/src/mutex.c b/dep/jemalloc/src/mutex.c
index 55e18c23713..788eca38703 100644
--- a/dep/jemalloc/src/mutex.c
+++ b/dep/jemalloc/src/mutex.c
@@ -6,7 +6,7 @@
#endif
#ifndef _CRT_SPINCOUNT
-#define _CRT_SPINCOUNT 4000
+#define _CRT_SPINCOUNT 4000
#endif
/******************************************************************************/
diff --git a/dep/jemalloc/src/prof.c b/dep/jemalloc/src/prof.c
index c133b95c2c6..1d8ccbd60ae 100644
--- a/dep/jemalloc/src/prof.c
+++ b/dep/jemalloc/src/prof.c
@@ -24,7 +24,12 @@ bool opt_prof_gdump = false;
bool opt_prof_final = true;
bool opt_prof_leak = false;
bool opt_prof_accum = false;
-char opt_prof_prefix[PATH_MAX + 1];
+char opt_prof_prefix[
+ /* Minimize memory bloat for non-prof builds. */
+#ifdef JEMALLOC_PROF
+ PATH_MAX +
+#endif
+ 1];
uint64_t prof_interval = 0;
bool prof_promote;
@@ -54,10 +59,17 @@ static uint64_t prof_dump_useq;
/*
* This buffer is rather large for stack allocation, so use a single buffer for
- * all profile dumps. The buffer is implicitly protected by bt2ctx_mtx, since
- * it must be locked anyway during dumping.
+ * all profile dumps.
*/
-static char prof_dump_buf[PROF_DUMP_BUFSIZE];
+static malloc_mutex_t prof_dump_mtx;
+static char prof_dump_buf[
+ /* Minimize memory bloat for non-prof builds. */
+#ifdef JEMALLOC_PROF
+ PROF_DUMP_BUFSIZE
+#else
+ 1
+#endif
+];
static unsigned prof_dump_buf_end;
static int prof_dump_fd;
@@ -65,36 +77,6 @@ static int prof_dump_fd;
static bool prof_booted = false;
/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static prof_bt_t *bt_dup(prof_bt_t *bt);
-static void bt_destroy(prof_bt_t *bt);
-#ifdef JEMALLOC_PROF_LIBGCC
-static _Unwind_Reason_Code prof_unwind_init_callback(
- struct _Unwind_Context *context, void *arg);
-static _Unwind_Reason_Code prof_unwind_callback(
- struct _Unwind_Context *context, void *arg);
-#endif
-static bool prof_flush(bool propagate_err);
-static bool prof_write(bool propagate_err, const char *s);
-static bool prof_printf(bool propagate_err, const char *format, ...)
- JEMALLOC_ATTR(format(printf, 2, 3));
-static void prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all,
- size_t *leak_nctx);
-static void prof_ctx_destroy(prof_ctx_t *ctx);
-static void prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt);
-static bool prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx,
- prof_bt_t *bt);
-static bool prof_dump_maps(bool propagate_err);
-static bool prof_dump(bool propagate_err, const char *filename,
- bool leakcheck);
-static void prof_dump_filename(char *filename, char v, int64_t vseq);
-static void prof_fdump(void);
-static void prof_bt_hash(const void *key, size_t r_hash[2]);
-static bool prof_bt_keycomp(const void *k1, const void *k2);
-static malloc_mutex_t *prof_ctx_mutex_choose(void);
-
-/******************************************************************************/
void
bt_init(prof_bt_t *bt, void **vec)
@@ -423,10 +405,169 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore)
{
cassert(config_prof);
- assert(false);
+ not_reached();
}
#endif
+static malloc_mutex_t *
+prof_ctx_mutex_choose(void)
+{
+ unsigned nctxs = atomic_add_u(&cum_ctxs, 1);
+
+ return (&ctx_locks[(nctxs - 1) % PROF_NCTX_LOCKS]);
+}
+
+static void
+prof_ctx_init(prof_ctx_t *ctx, prof_bt_t *bt)
+{
+
+ ctx->bt = bt;
+ ctx->lock = prof_ctx_mutex_choose();
+ /*
+ * Set nlimbo to 1, in order to avoid a race condition with
+ * prof_ctx_merge()/prof_ctx_destroy().
+ */
+ ctx->nlimbo = 1;
+ ql_elm_new(ctx, dump_link);
+ memset(&ctx->cnt_merged, 0, sizeof(prof_cnt_t));
+ ql_new(&ctx->cnts_ql);
+}
+
+static void
+prof_ctx_destroy(prof_ctx_t *ctx)
+{
+ prof_tdata_t *prof_tdata;
+
+ cassert(config_prof);
+
+ /*
+ * Check that ctx is still unused by any thread cache before destroying
+ * it. prof_lookup() increments ctx->nlimbo in order to avoid a race
+ * condition with this function, as does prof_ctx_merge() in order to
+ * avoid a race between the main body of prof_ctx_merge() and entry
+ * into this function.
+ */
+ prof_tdata = prof_tdata_get(false);
+ assert((uintptr_t)prof_tdata > (uintptr_t)PROF_TDATA_STATE_MAX);
+ prof_enter(prof_tdata);
+ malloc_mutex_lock(ctx->lock);
+ if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 0 &&
+ ctx->nlimbo == 1) {
+ assert(ctx->cnt_merged.curbytes == 0);
+ assert(ctx->cnt_merged.accumobjs == 0);
+ assert(ctx->cnt_merged.accumbytes == 0);
+ /* Remove ctx from bt2ctx. */
+ if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL))
+ not_reached();
+ prof_leave(prof_tdata);
+ /* Destroy ctx. */
+ malloc_mutex_unlock(ctx->lock);
+ bt_destroy(ctx->bt);
+ idalloc(ctx);
+ } else {
+ /*
+ * Compensate for increment in prof_ctx_merge() or
+ * prof_lookup().
+ */
+ ctx->nlimbo--;
+ malloc_mutex_unlock(ctx->lock);
+ prof_leave(prof_tdata);
+ }
+}
+
+static void
+prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt)
+{
+ bool destroy;
+
+ cassert(config_prof);
+
+ /* Merge cnt stats and detach from ctx. */
+ malloc_mutex_lock(ctx->lock);
+ ctx->cnt_merged.curobjs += cnt->cnts.curobjs;
+ ctx->cnt_merged.curbytes += cnt->cnts.curbytes;
+ ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs;
+ ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes;
+ ql_remove(&ctx->cnts_ql, cnt, cnts_link);
+ if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL &&
+ ctx->cnt_merged.curobjs == 0 && ctx->nlimbo == 0) {
+ /*
+ * Increment ctx->nlimbo in order to keep another thread from
+ * winning the race to destroy ctx while this one has ctx->lock
+ * dropped. Without this, it would be possible for another
+ * thread to:
+ *
+ * 1) Sample an allocation associated with ctx.
+ * 2) Deallocate the sampled object.
+ * 3) Successfully prof_ctx_destroy(ctx).
+ *
+ * The result would be that ctx no longer exists by the time
+ * this thread accesses it in prof_ctx_destroy().
+ */
+ ctx->nlimbo++;
+ destroy = true;
+ } else
+ destroy = false;
+ malloc_mutex_unlock(ctx->lock);
+ if (destroy)
+ prof_ctx_destroy(ctx);
+}
+
+static bool
+prof_lookup_global(prof_bt_t *bt, prof_tdata_t *prof_tdata, void **p_btkey,
+ prof_ctx_t **p_ctx, bool *p_new_ctx)
+{
+ union {
+ prof_ctx_t *p;
+ void *v;
+ } ctx;
+ union {
+ prof_bt_t *p;
+ void *v;
+ } btkey;
+ bool new_ctx;
+
+ prof_enter(prof_tdata);
+ if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) {
+ /* bt has never been seen before. Insert it. */
+ ctx.v = imalloc(sizeof(prof_ctx_t));
+ if (ctx.v == NULL) {
+ prof_leave(prof_tdata);
+ return (true);
+ }
+ btkey.p = bt_dup(bt);
+ if (btkey.v == NULL) {
+ prof_leave(prof_tdata);
+ idalloc(ctx.v);
+ return (true);
+ }
+ prof_ctx_init(ctx.p, btkey.p);
+ if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) {
+ /* OOM. */
+ prof_leave(prof_tdata);
+ idalloc(btkey.v);
+ idalloc(ctx.v);
+ return (true);
+ }
+ new_ctx = true;
+ } else {
+ /*
+ * Increment nlimbo, in order to avoid a race condition with
+ * prof_ctx_merge()/prof_ctx_destroy().
+ */
+ malloc_mutex_lock(ctx.p->lock);
+ ctx.p->nlimbo++;
+ malloc_mutex_unlock(ctx.p->lock);
+ new_ctx = false;
+ }
+ prof_leave(prof_tdata);
+
+ *p_btkey = btkey.v;
+ *p_ctx = ctx.p;
+ *p_new_ctx = new_ctx;
+ return (false);
+}
+
prof_thr_cnt_t *
prof_lookup(prof_bt_t *bt)
{
@@ -443,62 +584,16 @@ prof_lookup(prof_bt_t *bt)
return (NULL);
if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) {
- union {
- prof_bt_t *p;
- void *v;
- } btkey;
- union {
- prof_ctx_t *p;
- void *v;
- } ctx;
+ void *btkey;
+ prof_ctx_t *ctx;
bool new_ctx;
/*
* This thread's cache lacks bt. Look for it in the global
* cache.
*/
- prof_enter(prof_tdata);
- if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) {
- /* bt has never been seen before. Insert it. */
- ctx.v = imalloc(sizeof(prof_ctx_t));
- if (ctx.v == NULL) {
- prof_leave(prof_tdata);
- return (NULL);
- }
- btkey.p = bt_dup(bt);
- if (btkey.v == NULL) {
- prof_leave(prof_tdata);
- idalloc(ctx.v);
- return (NULL);
- }
- ctx.p->bt = btkey.p;
- ctx.p->lock = prof_ctx_mutex_choose();
- /*
- * Set nlimbo to 1, in order to avoid a race condition
- * with prof_ctx_merge()/prof_ctx_destroy().
- */
- ctx.p->nlimbo = 1;
- memset(&ctx.p->cnt_merged, 0, sizeof(prof_cnt_t));
- ql_new(&ctx.p->cnts_ql);
- if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) {
- /* OOM. */
- prof_leave(prof_tdata);
- idalloc(btkey.v);
- idalloc(ctx.v);
- return (NULL);
- }
- new_ctx = true;
- } else {
- /*
- * Increment nlimbo, in order to avoid a race condition
- * with prof_ctx_merge()/prof_ctx_destroy().
- */
- malloc_mutex_lock(ctx.p->lock);
- ctx.p->nlimbo++;
- malloc_mutex_unlock(ctx.p->lock);
- new_ctx = false;
- }
- prof_leave(prof_tdata);
+ if (prof_lookup_global(bt, prof_tdata, &btkey, &ctx, &new_ctx))
+ return (NULL);
/* Link a prof_thd_cnt_t into ctx for this thread. */
if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) {
@@ -511,7 +606,7 @@ prof_lookup(prof_bt_t *bt)
assert(ret.v != NULL);
if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt,
NULL, NULL))
- assert(false);
+ not_reached();
ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
prof_ctx_merge(ret.p->ctx, ret.p);
/* ret can now be re-used. */
@@ -521,27 +616,27 @@ prof_lookup(prof_bt_t *bt)
ret.v = imalloc(sizeof(prof_thr_cnt_t));
if (ret.p == NULL) {
if (new_ctx)
- prof_ctx_destroy(ctx.p);
+ prof_ctx_destroy(ctx);
return (NULL);
}
ql_elm_new(ret.p, cnts_link);
ql_elm_new(ret.p, lru_link);
}
/* Finish initializing ret. */
- ret.p->ctx = ctx.p;
+ ret.p->ctx = ctx;
ret.p->epoch = 0;
memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
- if (ckh_insert(&prof_tdata->bt2cnt, btkey.v, ret.v)) {
+ if (ckh_insert(&prof_tdata->bt2cnt, btkey, ret.v)) {
if (new_ctx)
- prof_ctx_destroy(ctx.p);
+ prof_ctx_destroy(ctx);
idalloc(ret.v);
return (NULL);
}
ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
- malloc_mutex_lock(ctx.p->lock);
- ql_tail_insert(&ctx.p->cnts_ql, ret.p, cnts_link);
- ctx.p->nlimbo--;
- malloc_mutex_unlock(ctx.p->lock);
+ malloc_mutex_lock(ctx->lock);
+ ql_tail_insert(&ctx->cnts_ql, ret.p, cnts_link);
+ ctx->nlimbo--;
+ malloc_mutex_unlock(ctx->lock);
} else {
/* Move ret to the front of the LRU. */
ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
@@ -551,8 +646,52 @@ prof_lookup(prof_bt_t *bt)
return (ret.p);
}
+#ifdef JEMALLOC_JET
+size_t
+prof_bt_count(void)
+{
+ size_t bt_count;
+ prof_tdata_t *prof_tdata;
+
+ prof_tdata = prof_tdata_get(false);
+ if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
+ return (0);
+
+ prof_enter(prof_tdata);
+ bt_count = ckh_count(&bt2ctx);
+ prof_leave(prof_tdata);
+
+ return (bt_count);
+}
+#endif
+
+#ifdef JEMALLOC_JET
+#undef prof_dump_open
+#define prof_dump_open JEMALLOC_N(prof_dump_open_impl)
+#endif
+static int
+prof_dump_open(bool propagate_err, const char *filename)
+{
+ int fd;
+
+ fd = creat(filename, 0644);
+ if (fd == -1 && propagate_err == false) {
+ malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
+ filename);
+ if (opt_abort)
+ abort();
+ }
+
+ return (fd);
+}
+#ifdef JEMALLOC_JET
+#undef prof_dump_open
+#define prof_dump_open JEMALLOC_N(prof_dump_open)
+prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl);
+#endif
+
static bool
-prof_flush(bool propagate_err)
+prof_dump_flush(bool propagate_err)
{
bool ret = false;
ssize_t err;
@@ -575,7 +714,20 @@ prof_flush(bool propagate_err)
}
static bool
-prof_write(bool propagate_err, const char *s)
+prof_dump_close(bool propagate_err)
+{
+ bool ret;
+
+ assert(prof_dump_fd != -1);
+ ret = prof_dump_flush(propagate_err);
+ close(prof_dump_fd);
+ prof_dump_fd = -1;
+
+ return (ret);
+}
+
+static bool
+prof_dump_write(bool propagate_err, const char *s)
{
unsigned i, slen, n;
@@ -586,7 +738,7 @@ prof_write(bool propagate_err, const char *s)
while (i < slen) {
/* Flush the buffer if it is full. */
if (prof_dump_buf_end == PROF_DUMP_BUFSIZE)
- if (prof_flush(propagate_err) && propagate_err)
+ if (prof_dump_flush(propagate_err) && propagate_err)
return (true);
if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
@@ -606,7 +758,7 @@ prof_write(bool propagate_err, const char *s)
JEMALLOC_ATTR(format(printf, 2, 3))
static bool
-prof_printf(bool propagate_err, const char *format, ...)
+prof_dump_printf(bool propagate_err, const char *format, ...)
{
bool ret;
va_list ap;
@@ -615,13 +767,14 @@ prof_printf(bool propagate_err, const char *format, ...)
va_start(ap, format);
malloc_vsnprintf(buf, sizeof(buf), format, ap);
va_end(ap);
- ret = prof_write(propagate_err, buf);
+ ret = prof_dump_write(propagate_err, buf);
return (ret);
}
static void
-prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx)
+prof_dump_ctx_prep(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx,
+ prof_ctx_list_t *ctx_ql)
{
prof_thr_cnt_t *thr_cnt;
prof_cnt_t tcnt;
@@ -630,6 +783,14 @@ prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx)
malloc_mutex_lock(ctx->lock);
+ /*
+ * Increment nlimbo so that ctx won't go away before dump.
+ * Additionally, link ctx into the dump list so that it is included in
+ * prof_dump()'s second pass.
+ */
+ ctx->nlimbo++;
+ ql_tail_insert(ctx_ql, ctx, dump_link);
+
memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t));
ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) {
volatile unsigned *epoch = &thr_cnt->epoch;
@@ -670,89 +831,52 @@ prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx)
malloc_mutex_unlock(ctx->lock);
}
-static void
-prof_ctx_destroy(prof_ctx_t *ctx)
+static bool
+prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
{
- prof_tdata_t *prof_tdata;
- cassert(config_prof);
-
- /*
- * Check that ctx is still unused by any thread cache before destroying
- * it. prof_lookup() increments ctx->nlimbo in order to avoid a race
- * condition with this function, as does prof_ctx_merge() in order to
- * avoid a race between the main body of prof_ctx_merge() and entry
- * into this function.
- */
- prof_tdata = prof_tdata_get(false);
- assert((uintptr_t)prof_tdata > (uintptr_t)PROF_TDATA_STATE_MAX);
- prof_enter(prof_tdata);
- malloc_mutex_lock(ctx->lock);
- if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 0 &&
- ctx->nlimbo == 1) {
- assert(ctx->cnt_merged.curbytes == 0);
- assert(ctx->cnt_merged.accumobjs == 0);
- assert(ctx->cnt_merged.accumbytes == 0);
- /* Remove ctx from bt2ctx. */
- if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL))
- assert(false);
- prof_leave(prof_tdata);
- /* Destroy ctx. */
- malloc_mutex_unlock(ctx->lock);
- bt_destroy(ctx->bt);
- idalloc(ctx);
+ if (opt_lg_prof_sample == 0) {
+ if (prof_dump_printf(propagate_err,
+ "heap profile: %"PRId64": %"PRId64
+ " [%"PRIu64": %"PRIu64"] @ heapprofile\n",
+ cnt_all->curobjs, cnt_all->curbytes,
+ cnt_all->accumobjs, cnt_all->accumbytes))
+ return (true);
} else {
- /*
- * Compensate for increment in prof_ctx_merge() or
- * prof_lookup().
- */
- ctx->nlimbo--;
- malloc_mutex_unlock(ctx->lock);
- prof_leave(prof_tdata);
+ if (prof_dump_printf(propagate_err,
+ "heap profile: %"PRId64": %"PRId64
+ " [%"PRIu64": %"PRIu64"] @ heap_v2/%"PRIu64"\n",
+ cnt_all->curobjs, cnt_all->curbytes,
+ cnt_all->accumobjs, cnt_all->accumbytes,
+ ((uint64_t)1U << opt_lg_prof_sample)))
+ return (true);
}
+
+ return (false);
}
static void
-prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt)
+prof_dump_ctx_cleanup_locked(prof_ctx_t *ctx, prof_ctx_list_t *ctx_ql)
{
- bool destroy;
- cassert(config_prof);
+ ctx->nlimbo--;
+ ql_remove(ctx_ql, ctx, dump_link);
+}
+
+static void
+prof_dump_ctx_cleanup(prof_ctx_t *ctx, prof_ctx_list_t *ctx_ql)
+{
- /* Merge cnt stats and detach from ctx. */
malloc_mutex_lock(ctx->lock);
- ctx->cnt_merged.curobjs += cnt->cnts.curobjs;
- ctx->cnt_merged.curbytes += cnt->cnts.curbytes;
- ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs;
- ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes;
- ql_remove(&ctx->cnts_ql, cnt, cnts_link);
- if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL &&
- ctx->cnt_merged.curobjs == 0 && ctx->nlimbo == 0) {
- /*
- * Increment ctx->nlimbo in order to keep another thread from
- * winning the race to destroy ctx while this one has ctx->lock
- * dropped. Without this, it would be possible for another
- * thread to:
- *
- * 1) Sample an allocation associated with ctx.
- * 2) Deallocate the sampled object.
- * 3) Successfully prof_ctx_destroy(ctx).
- *
- * The result would be that ctx no longer exists by the time
- * this thread accesses it in prof_ctx_destroy().
- */
- ctx->nlimbo++;
- destroy = true;
- } else
- destroy = false;
+ prof_dump_ctx_cleanup_locked(ctx, ctx_ql);
malloc_mutex_unlock(ctx->lock);
- if (destroy)
- prof_ctx_destroy(ctx);
}
static bool
-prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, prof_bt_t *bt)
+prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, const prof_bt_t *bt,
+ prof_ctx_list_t *ctx_ql)
{
+ bool ret;
unsigned i;
cassert(config_prof);
@@ -764,36 +888,49 @@ prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, prof_bt_t *bt)
* filled in. Avoid dumping any ctx that is an artifact of either
* implementation detail.
*/
+ malloc_mutex_lock(ctx->lock);
if ((opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) ||
(opt_prof_accum && ctx->cnt_summed.accumobjs == 0)) {
assert(ctx->cnt_summed.curobjs == 0);
assert(ctx->cnt_summed.curbytes == 0);
assert(ctx->cnt_summed.accumobjs == 0);
assert(ctx->cnt_summed.accumbytes == 0);
- return (false);
+ ret = false;
+ goto label_return;
}
- if (prof_printf(propagate_err, "%"PRId64": %"PRId64
+ if (prof_dump_printf(propagate_err, "%"PRId64": %"PRId64
" [%"PRIu64": %"PRIu64"] @",
ctx->cnt_summed.curobjs, ctx->cnt_summed.curbytes,
- ctx->cnt_summed.accumobjs, ctx->cnt_summed.accumbytes))
- return (true);
+ ctx->cnt_summed.accumobjs, ctx->cnt_summed.accumbytes)) {
+ ret = true;
+ goto label_return;
+ }
for (i = 0; i < bt->len; i++) {
- if (prof_printf(propagate_err, " %#"PRIxPTR,
- (uintptr_t)bt->vec[i]))
- return (true);
+ if (prof_dump_printf(propagate_err, " %#"PRIxPTR,
+ (uintptr_t)bt->vec[i])) {
+ ret = true;
+ goto label_return;
+ }
}
- if (prof_write(propagate_err, "\n"))
- return (true);
+ if (prof_dump_write(propagate_err, "\n")) {
+ ret = true;
+ goto label_return;
+ }
- return (false);
+ ret = false;
+label_return:
+ prof_dump_ctx_cleanup_locked(ctx, ctx_ql);
+ malloc_mutex_unlock(ctx->lock);
+ return (ret);
}
static bool
prof_dump_maps(bool propagate_err)
{
+ bool ret;
int mfd;
char filename[PATH_MAX + 1];
@@ -805,25 +942,52 @@ prof_dump_maps(bool propagate_err)
if (mfd != -1) {
ssize_t nread;
- if (prof_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
- propagate_err)
- return (true);
+ if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
+ propagate_err) {
+ ret = true;
+ goto label_return;
+ }
nread = 0;
do {
prof_dump_buf_end += nread;
if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
/* Make space in prof_dump_buf before read(). */
- if (prof_flush(propagate_err) && propagate_err)
- return (true);
+ if (prof_dump_flush(propagate_err) &&
+ propagate_err) {
+ ret = true;
+ goto label_return;
+ }
}
nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
PROF_DUMP_BUFSIZE - prof_dump_buf_end);
} while (nread > 0);
+ } else {
+ ret = true;
+ goto label_return;
+ }
+
+ ret = false;
+label_return:
+ if (mfd != -1)
close(mfd);
- } else
- return (true);
+ return (ret);
+}
- return (false);
+static void
+prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_nctx,
+ const char *filename)
+{
+
+ if (cnt_all->curbytes != 0) {
+ malloc_printf("<jemalloc>: Leak summary: %"PRId64" byte%s, %"
+ PRId64" object%s, %zu context%s\n",
+ cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "",
+ cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "",
+ leak_nctx, (leak_nctx != 1) ? "s" : "");
+ malloc_printf(
+ "<jemalloc>: Run pprof on \"%s\" for leak detail\n",
+ filename);
+ }
}
static bool
@@ -833,98 +997,74 @@ prof_dump(bool propagate_err, const char *filename, bool leakcheck)
prof_cnt_t cnt_all;
size_t tabind;
union {
- prof_bt_t *p;
- void *v;
- } bt;
- union {
prof_ctx_t *p;
void *v;
} ctx;
size_t leak_nctx;
+ prof_ctx_list_t ctx_ql;
cassert(config_prof);
prof_tdata = prof_tdata_get(false);
if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
return (true);
- prof_enter(prof_tdata);
- prof_dump_fd = creat(filename, 0644);
- if (prof_dump_fd == -1) {
- if (propagate_err == false) {
- malloc_printf(
- "<jemalloc>: creat(\"%s\"), 0644) failed\n",
- filename);
- if (opt_abort)
- abort();
- }
- goto label_error;
- }
+
+ malloc_mutex_lock(&prof_dump_mtx);
/* Merge per thread profile stats, and sum them in cnt_all. */
memset(&cnt_all, 0, sizeof(prof_cnt_t));
leak_nctx = 0;
+ ql_new(&ctx_ql);
+ prof_enter(prof_tdata);
for (tabind = 0; ckh_iter(&bt2ctx, &tabind, NULL, &ctx.v) == false;)
- prof_ctx_sum(ctx.p, &cnt_all, &leak_nctx);
+ prof_dump_ctx_prep(ctx.p, &cnt_all, &leak_nctx, &ctx_ql);
+ prof_leave(prof_tdata);
+
+ /* Create dump file. */
+ if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1)
+ goto label_open_close_error;
/* Dump profile header. */
- if (opt_lg_prof_sample == 0) {
- if (prof_printf(propagate_err,
- "heap profile: %"PRId64": %"PRId64
- " [%"PRIu64": %"PRIu64"] @ heapprofile\n",
- cnt_all.curobjs, cnt_all.curbytes,
- cnt_all.accumobjs, cnt_all.accumbytes))
- goto label_error;
- } else {
- if (prof_printf(propagate_err,
- "heap profile: %"PRId64": %"PRId64
- " [%"PRIu64": %"PRIu64"] @ heap_v2/%"PRIu64"\n",
- cnt_all.curobjs, cnt_all.curbytes,
- cnt_all.accumobjs, cnt_all.accumbytes,
- ((uint64_t)1U << opt_lg_prof_sample)))
- goto label_error;
- }
+ if (prof_dump_header(propagate_err, &cnt_all))
+ goto label_write_error;
- /* Dump per ctx profile stats. */
- for (tabind = 0; ckh_iter(&bt2ctx, &tabind, &bt.v, &ctx.v)
- == false;) {
- if (prof_dump_ctx(propagate_err, ctx.p, bt.p))
- goto label_error;
+ /* Dump per ctx profile stats. */
+ while ((ctx.p = ql_first(&ctx_ql)) != NULL) {
+ if (prof_dump_ctx(propagate_err, ctx.p, ctx.p->bt, &ctx_ql))
+ goto label_write_error;
}
/* Dump /proc/<pid>/maps if possible. */
if (prof_dump_maps(propagate_err))
- goto label_error;
+ goto label_write_error;
- if (prof_flush(propagate_err))
- goto label_error;
- close(prof_dump_fd);
- prof_leave(prof_tdata);
+ if (prof_dump_close(propagate_err))
+ goto label_open_close_error;
- if (leakcheck && cnt_all.curbytes != 0) {
- malloc_printf("<jemalloc>: Leak summary: %"PRId64" byte%s, %"
- PRId64" object%s, %zu context%s\n",
- cnt_all.curbytes, (cnt_all.curbytes != 1) ? "s" : "",
- cnt_all.curobjs, (cnt_all.curobjs != 1) ? "s" : "",
- leak_nctx, (leak_nctx != 1) ? "s" : "");
- malloc_printf(
- "<jemalloc>: Run pprof on \"%s\" for leak detail\n",
- filename);
- }
+ malloc_mutex_unlock(&prof_dump_mtx);
+
+ if (leakcheck)
+ prof_leakcheck(&cnt_all, leak_nctx, filename);
return (false);
-label_error:
- prof_leave(prof_tdata);
+label_write_error:
+ prof_dump_close(propagate_err);
+label_open_close_error:
+ while ((ctx.p = ql_first(&ctx_ql)) != NULL)
+ prof_dump_ctx_cleanup(ctx.p, &ctx_ql);
+ malloc_mutex_unlock(&prof_dump_mtx);
return (true);
}
#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
+#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
static void
prof_dump_filename(char *filename, char v, int64_t vseq)
{
cassert(config_prof);
- if (vseq != UINT64_C(0xffffffffffffffff)) {
+ if (vseq != VSEQ_INVALID) {
/* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
"%s.%d.%"PRIu64".%c%"PRId64".heap",
@@ -950,7 +1090,7 @@ prof_fdump(void)
if (opt_prof_final && opt_prof_prefix[0] != '\0') {
malloc_mutex_lock(&prof_dump_seq_mtx);
- prof_dump_filename(filename, 'f', UINT64_C(0xffffffffffffffff));
+ prof_dump_filename(filename, 'f', VSEQ_INVALID);
malloc_mutex_unlock(&prof_dump_seq_mtx);
prof_dump(false, filename, opt_prof_leak);
}
@@ -1056,14 +1196,6 @@ prof_bt_keycomp(const void *k1, const void *k2)
return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
}
-static malloc_mutex_t *
-prof_ctx_mutex_choose(void)
-{
- unsigned nctxs = atomic_add_u(&cum_ctxs, 1);
-
- return (&ctx_locks[(nctxs - 1) % PROF_NCTX_LOCKS]);
-}
-
prof_tdata_t *
prof_tdata_init(void)
{
@@ -1208,6 +1340,8 @@ prof_boot2(void)
if (malloc_mutex_init(&prof_dump_seq_mtx))
return (true);
+ if (malloc_mutex_init(&prof_dump_mtx))
+ return (true);
if (atexit(prof_fdump) != 0) {
malloc_write("<jemalloc>: Error in atexit()\n");
@@ -1245,10 +1379,10 @@ prof_prefork(void)
if (opt_prof) {
unsigned i;
- malloc_mutex_lock(&bt2ctx_mtx);
- malloc_mutex_lock(&prof_dump_seq_mtx);
+ malloc_mutex_prefork(&bt2ctx_mtx);
+ malloc_mutex_prefork(&prof_dump_seq_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++)
- malloc_mutex_lock(&ctx_locks[i]);
+ malloc_mutex_prefork(&ctx_locks[i]);
}
}
diff --git a/dep/jemalloc/src/quarantine.c b/dep/jemalloc/src/quarantine.c
index f96a948d5c7..5431511640a 100644
--- a/dep/jemalloc/src/quarantine.c
+++ b/dep/jemalloc/src/quarantine.c
@@ -141,8 +141,17 @@ quarantine(void *ptr)
obj->usize = usize;
quarantine->curbytes += usize;
quarantine->curobjs++;
- if (opt_junk)
- memset(ptr, 0x5a, usize);
+ if (config_fill && opt_junk) {
+ /*
+ * Only do redzone validation if Valgrind isn't in
+ * operation.
+ */
+ if ((config_valgrind == false || opt_valgrind == false)
+ && usize <= SMALL_MAXCLASS)
+ arena_quarantine_junk_small(ptr, usize);
+ else
+ memset(ptr, 0x5a, usize);
+ }
} else {
assert(quarantine->curbytes == 0);
idalloc(ptr);
diff --git a/dep/jemalloc/src/rtree.c b/dep/jemalloc/src/rtree.c
index 90c6935a0ed..205957ac4e1 100644
--- a/dep/jemalloc/src/rtree.c
+++ b/dep/jemalloc/src/rtree.c
@@ -2,42 +2,55 @@
#include "jemalloc/internal/jemalloc_internal.h"
rtree_t *
-rtree_new(unsigned bits)
+rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc)
{
rtree_t *ret;
- unsigned bits_per_level, height, i;
+ unsigned bits_per_level, bits_in_leaf, height, i;
+
+ assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
bits_per_level = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void *)))) - 1;
- height = bits / bits_per_level;
- if (height * bits_per_level != bits)
- height++;
- assert(height * bits_per_level >= bits);
+ bits_in_leaf = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(uint8_t)))) - 1;
+ if (bits > bits_in_leaf) {
+ height = 1 + (bits - bits_in_leaf) / bits_per_level;
+ if ((height-1) * bits_per_level + bits_in_leaf != bits)
+ height++;
+ } else {
+ height = 1;
+ }
+ assert((height-1) * bits_per_level + bits_in_leaf >= bits);
- ret = (rtree_t*)base_alloc(offsetof(rtree_t, level2bits) +
+ ret = (rtree_t*)alloc(offsetof(rtree_t, level2bits) +
(sizeof(unsigned) * height));
if (ret == NULL)
return (NULL);
memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) *
height));
+ ret->alloc = alloc;
+ ret->dalloc = dalloc;
if (malloc_mutex_init(&ret->mutex)) {
- /* Leak the rtree. */
+ if (dalloc != NULL)
+ dalloc(ret);
return (NULL);
}
ret->height = height;
- if (bits_per_level * height > bits)
- ret->level2bits[0] = bits % bits_per_level;
- else
- ret->level2bits[0] = bits_per_level;
- for (i = 1; i < height; i++)
- ret->level2bits[i] = bits_per_level;
-
- ret->root = (void**)base_alloc(sizeof(void *) << ret->level2bits[0]);
+ if (height > 1) {
+ if ((height-1) * bits_per_level + bits_in_leaf > bits) {
+ ret->level2bits[0] = (bits - bits_in_leaf) %
+ bits_per_level;
+ } else
+ ret->level2bits[0] = bits_per_level;
+ for (i = 1; i < height-1; i++)
+ ret->level2bits[i] = bits_per_level;
+ ret->level2bits[height-1] = bits_in_leaf;
+ } else
+ ret->level2bits[0] = bits;
+
+ ret->root = (void**)alloc(sizeof(void *) << ret->level2bits[0]);
if (ret->root == NULL) {
- /*
- * We leak the rtree here, since there's no generic base
- * deallocation.
- */
+ if (dalloc != NULL)
+ dalloc(ret);
return (NULL);
}
memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]);
@@ -45,6 +58,31 @@ rtree_new(unsigned bits)
return (ret);
}
+static void
+rtree_delete_subtree(rtree_t *rtree, void **node, unsigned level)
+{
+
+ if (level < rtree->height - 1) {
+ size_t nchildren, i;
+
+ nchildren = ZU(1) << rtree->level2bits[level];
+ for (i = 0; i < nchildren; i++) {
+ void **child = (void **)node[i];
+ if (child != NULL)
+ rtree_delete_subtree(rtree, child, level + 1);
+ }
+ }
+ rtree->dalloc(node);
+}
+
+void
+rtree_delete(rtree_t *rtree)
+{
+
+ rtree_delete_subtree(rtree, rtree->root, 0);
+ rtree->dalloc(rtree);
+}
+
void
rtree_prefork(rtree_t *rtree)
{
diff --git a/dep/jemalloc/src/stats.c b/dep/jemalloc/src/stats.c
index 43f87af6700..bef2ab33cd4 100644
--- a/dep/jemalloc/src/stats.c
+++ b/dep/jemalloc/src/stats.c
@@ -345,25 +345,25 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
bv ? "enabled" : "disabled");
-#define OPT_WRITE_BOOL(n) \
+#define OPT_WRITE_BOOL(n) \
if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \
== 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %s\n", bv ? "true" : "false"); \
}
-#define OPT_WRITE_SIZE_T(n) \
+#define OPT_WRITE_SIZE_T(n) \
if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \
== 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zu\n", sv); \
}
-#define OPT_WRITE_SSIZE_T(n) \
+#define OPT_WRITE_SSIZE_T(n) \
if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \
== 0) { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zd\n", ssv); \
}
-#define OPT_WRITE_CHAR_P(n) \
+#define OPT_WRITE_CHAR_P(n) \
if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \
== 0) { \
malloc_cprintf(write_cb, cbopaque, \
diff --git a/dep/jemalloc/src/tcache.c b/dep/jemalloc/src/tcache.c
index 98ed19edd52..6de92960b2d 100644
--- a/dep/jemalloc/src/tcache.c
+++ b/dep/jemalloc/src/tcache.c
@@ -260,8 +260,8 @@ tcache_arena_dissociate(tcache_t *tcache)
/* Unlink from list of extant tcaches. */
malloc_mutex_lock(&tcache->arena->lock);
ql_remove(&tcache->arena->tcache_ql, tcache, link);
- malloc_mutex_unlock(&tcache->arena->lock);
tcache_stats_merge(tcache, tcache->arena);
+ malloc_mutex_unlock(&tcache->arena->lock);
}
}
@@ -292,7 +292,7 @@ tcache_create(arena_t *arena)
else if (size <= tcache_maxclass)
tcache = (tcache_t *)arena_malloc_large(arena, size, true);
else
- tcache = (tcache_t *)icallocx(size, false, arena);
+ tcache = (tcache_t *)icalloct(size, false, arena);
if (tcache == NULL)
return (NULL);
@@ -366,7 +366,7 @@ tcache_destroy(tcache_t *tcache)
arena_dalloc_large(arena, chunk, tcache);
} else
- idallocx(tcache, false);
+ idalloct(tcache, false);
}
void
@@ -399,11 +399,14 @@ tcache_thread_cleanup(void *arg)
}
}
+/* Caller must own arena->lock. */
void
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
{
unsigned i;
+ cassert(config_stats);
+
/* Merge and reset tcache stats. */
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
diff --git a/dep/jemalloc/src/tsd.c b/dep/jemalloc/src/tsd.c
index 961a546329c..700caabfe47 100644
--- a/dep/jemalloc/src/tsd.c
+++ b/dep/jemalloc/src/tsd.c
@@ -21,7 +21,7 @@ void
malloc_tsd_dalloc(void *wrapper)
{
- idalloc(wrapper);
+ idalloct(wrapper, false);
}
void
@@ -105,3 +105,37 @@ JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
static const BOOL (WINAPI *tls_callback)(HINSTANCE hinstDLL,
DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
#endif
+
+#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
+ !defined(_WIN32))
+void *
+tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
+{
+ pthread_t self = pthread_self();
+ tsd_init_block_t *iter;
+
+ /* Check whether this thread has already inserted into the list. */
+ malloc_mutex_lock(&head->lock);
+ ql_foreach(iter, &head->blocks, link) {
+ if (iter->thread == self) {
+ malloc_mutex_unlock(&head->lock);
+ return (iter->data);
+ }
+ }
+ /* Insert block into list. */
+ ql_elm_new(block, link);
+ block->thread = self;
+ ql_tail_insert(&head->blocks, block, link);
+ malloc_mutex_unlock(&head->lock);
+ return (NULL);
+}
+
+void
+tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block)
+{
+
+ malloc_mutex_lock(&head->lock);
+ ql_remove(&head->blocks, block, link);
+ malloc_mutex_unlock(&head->lock);
+}
+#endif
diff --git a/dep/jemalloc/src/util.c b/dep/jemalloc/src/util.c
index b3a01143698..93a19fd16f7 100644
--- a/dep/jemalloc/src/util.c
+++ b/dep/jemalloc/src/util.c
@@ -77,7 +77,7 @@ malloc_write(const char *s)
* provide a wrapper.
*/
int
-buferror(char *buf, size_t buflen)
+buferror(int err, char *buf, size_t buflen)
{
#ifdef _WIN32
@@ -85,34 +85,36 @@ buferror(char *buf, size_t buflen)
(LPSTR)buf, buflen, NULL);
return (0);
#elif defined(_GNU_SOURCE)
- char *b = strerror_r(errno, buf, buflen);
+ char *b = strerror_r(err, buf, buflen);
if (b != buf) {
strncpy(buf, b, buflen);
buf[buflen-1] = '\0';
}
return (0);
#else
- return (strerror_r(errno, buf, buflen));
+ return (strerror_r(err, buf, buflen));
#endif
}
uintmax_t
-malloc_strtoumax(const char *nptr, char **endptr, int base)
+malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base)
{
uintmax_t ret, digit;
int b;
bool neg;
const char *p, *ns;
+ p = nptr;
if (base < 0 || base == 1 || base > 36) {
+ ns = p;
set_errno(EINVAL);
- return (UINTMAX_MAX);
+ ret = UINTMAX_MAX;
+ goto label_return;
}
b = base;
/* Swallow leading whitespace and get sign, if any. */
neg = false;
- p = nptr;
while (true) {
switch (*p) {
case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
@@ -146,7 +148,7 @@ malloc_strtoumax(const char *nptr, char **endptr, int base)
if (b == 8)
p++;
break;
- case 'x':
+ case 'X': case 'x':
switch (p[2]) {
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
@@ -164,7 +166,9 @@ malloc_strtoumax(const char *nptr, char **endptr, int base)
}
break;
default:
- break;
+ p++;
+ ret = 0;
+ goto label_return;
}
}
if (b == 0)
@@ -181,13 +185,22 @@ malloc_strtoumax(const char *nptr, char **endptr, int base)
if (ret < pret) {
/* Overflow. */
set_errno(ERANGE);
- return (UINTMAX_MAX);
+ ret = UINTMAX_MAX;
+ goto label_return;
}
p++;
}
if (neg)
ret = -ret;
+ if (p == ns) {
+ /* No conversion performed. */
+ set_errno(EINVAL);
+ ret = UINTMAX_MAX;
+ goto label_return;
+ }
+
+label_return:
if (endptr != NULL) {
if (p == ns) {
/* No characters were converted. */
@@ -195,7 +208,6 @@ malloc_strtoumax(const char *nptr, char **endptr, int base)
} else
*endptr = (char *)p;
}
-
return (ret);
}
@@ -331,7 +343,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
APPEND_C(' '); \
} \
} while (0)
-#define GET_ARG_NUMERIC(val, len) do { \
+#define GET_ARG_NUMERIC(val, len) do { \
switch (len) { \
case '?': \
val = va_arg(ap, int); \
@@ -354,6 +366,9 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
case 'j': \
val = va_arg(ap, intmax_t); \
break; \
+ case 'j' | 0x80: \
+ val = va_arg(ap, uintmax_t); \
+ break; \
case 't': \
val = va_arg(ap, ptrdiff_t); \
break; \
@@ -385,11 +400,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
unsigned char len = '?';
f++;
- if (*f == '%') {
- /* %% */
- APPEND_C(*f);
- break;
- }
/* Flags. */
while (true) {
switch (*f) {
@@ -419,6 +429,10 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
case '*':
width = va_arg(ap, int);
f++;
+ if (width < 0) {
+ left_justify = true;
+ width = -width;
+ }
break;
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9': {
@@ -428,19 +442,16 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
assert(uwidth != UINTMAX_MAX || get_errno() !=
ERANGE);
width = (int)uwidth;
- if (*f == '.') {
- f++;
- goto label_precision;
- } else
- goto label_length;
break;
- } case '.':
- f++;
- goto label_precision;
- default: goto label_length;
+ } default:
+ break;
}
+ /* Width/precision separator. */
+ if (*f == '.')
+ f++;
+ else
+ goto label_length;
/* Precision. */
- label_precision:
switch (*f) {
case '*':
prec = va_arg(ap, int);
@@ -469,16 +480,8 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
} else
len = 'l';
break;
- case 'j':
- len = 'j';
- f++;
- break;
- case 't':
- len = 't';
- f++;
- break;
- case 'z':
- len = 'z';
+ case 'q': case 'j': case 't': case 'z':
+ len = *f;
f++;
break;
default: break;
@@ -487,6 +490,11 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
switch (*f) {
char *s;
size_t slen;
+ case '%':
+ /* %% */
+ APPEND_C(*f);
+ f++;
+ break;
case 'd': case 'i': {
intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[D2S_BUFSIZE];
@@ -540,7 +548,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
assert(len == '?' || len == 'l');
assert_not_implemented(len != 'l');
s = va_arg(ap, char *);
- slen = (prec == -1) ? strlen(s) : prec;
+ slen = (prec < 0) ? strlen(s) : prec;
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
@@ -553,8 +561,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
APPEND_PADDED_S(s, slen, width, left_justify);
f++;
break;
- }
- default: not_implemented();
+ } default: not_reached();
}
break;
} default: {
diff --git a/dep/jemalloc/src/zone.c b/dep/jemalloc/src/zone.c
index c62c183f65e..e0302ef4edc 100644
--- a/dep/jemalloc/src/zone.c
+++ b/dep/jemalloc/src/zone.c
@@ -137,7 +137,7 @@ zone_destroy(malloc_zone_t *zone)
{
/* This function should never be called. */
- assert(false);
+ not_reached();
return (NULL);
}