summaryrefslogtreecommitdiff
path: root/deps/jemalloc/src
diff options
context:
space:
mode:
Diffstat (limited to 'deps/jemalloc/src')
-rw-r--r--deps/jemalloc/src/arena.c3838
-rw-r--r--deps/jemalloc/src/base.c448
-rw-r--r--deps/jemalloc/src/bitmap.c113
-rw-r--r--deps/jemalloc/src/ckh.c254
-rw-r--r--deps/jemalloc/src/ctl.c2706
-rw-r--r--deps/jemalloc/src/extent.c2000
-rw-r--r--deps/jemalloc/src/hash.c5
-rw-r--r--deps/jemalloc/src/jemalloc.c3537
-rw-r--r--deps/jemalloc/src/mutex.c206
-rw-r--r--deps/jemalloc/src/prof.c2404
-rw-r--r--deps/jemalloc/src/rtree.c383
-rw-r--r--deps/jemalloc/src/stats.c1574
-rw-r--r--deps/jemalloc/src/tcache.c771
-rw-r--r--deps/jemalloc/src/tsd.c282
-rw-r--r--deps/jemalloc/src/zone.c453
15 files changed, 12785 insertions, 6189 deletions
diff --git a/deps/jemalloc/src/arena.c b/deps/jemalloc/src/arena.c
index dad707b63d..632fce5233 100644
--- a/deps/jemalloc/src/arena.c
+++ b/deps/jemalloc/src/arena.c
@@ -1,40 +1,57 @@
-#define JEMALLOC_ARENA_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_ARENA_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/extent_dss.h"
+#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/util.h"
/******************************************************************************/
/* Data. */
-ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
-arena_bin_info_t arena_bin_info[NBINS];
-
-JEMALLOC_ALIGNED(CACHELINE)
-const uint8_t small_size2bin[] = {
-#define S2B_8(i) i,
-#define S2B_16(i) S2B_8(i) S2B_8(i)
-#define S2B_32(i) S2B_16(i) S2B_16(i)
-#define S2B_64(i) S2B_32(i) S2B_32(i)
-#define S2B_128(i) S2B_64(i) S2B_64(i)
-#define S2B_256(i) S2B_128(i) S2B_128(i)
-#define S2B_512(i) S2B_256(i) S2B_256(i)
-#define S2B_1024(i) S2B_512(i) S2B_512(i)
-#define S2B_2048(i) S2B_1024(i) S2B_1024(i)
-#define S2B_4096(i) S2B_2048(i) S2B_2048(i)
-#define S2B_8192(i) S2B_4096(i) S2B_4096(i)
-#define SIZE_CLASS(bin, delta, size) \
- S2B_##delta(bin)
+/*
+ * Define names for both unininitialized and initialized phases, so that
+ * options and mallctl processing are straightforward.
+ */
+const char *percpu_arena_mode_names[] = {
+ "percpu",
+ "phycpu",
+ "disabled",
+ "percpu",
+ "phycpu"
+};
+percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT;
+
+ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT;
+ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
+
+static atomic_zd_t dirty_decay_ms_default;
+static atomic_zd_t muzzy_decay_ms_default;
+
+const arena_bin_info_t arena_bin_info[NBINS] = {
+#define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \
+ {reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)},
+#define BIN_INFO_bin_no(reg_size, slab_size, nregs)
+#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \
+ lg_delta_lookup) \
+ BIN_INFO_bin_##bin((1U<<lg_grp) + (ndelta<<lg_delta), \
+ (pgs << LG_PAGE), (pgs << LG_PAGE) / ((1U<<lg_grp) + \
+ (ndelta<<lg_delta)))
SIZE_CLASSES
-#undef S2B_8
-#undef S2B_16
-#undef S2B_32
-#undef S2B_64
-#undef S2B_128
-#undef S2B_256
-#undef S2B_512
-#undef S2B_1024
-#undef S2B_2048
-#undef S2B_4096
-#undef S2B_8192
-#undef SIZE_CLASS
+#undef BIN_INFO_bin_yes
+#undef BIN_INFO_bin_no
+#undef SC
+};
+
+const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
+#define STEP(step, h, x, y) \
+ h,
+ SMOOTHSTEP
+#undef STEP
};
/******************************************************************************/
@@ -43,2535 +60,2120 @@ const uint8_t small_size2bin[] = {
* definition.
*/
-static void arena_purge(arena_t *arena, bool all);
-static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
- bool cleaned);
-static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, arena_bin_t *bin);
-static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
- arena_run_t *run, arena_bin_t *bin);
+static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
+ arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit,
+ bool is_background_thread);
+static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
+ bool is_background_thread, bool all);
+static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+ arena_bin_t *bin);
+static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+ arena_bin_t *bin);
/******************************************************************************/
-static inline int
-arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
-{
- uintptr_t a_mapelm = (uintptr_t)a;
- uintptr_t b_mapelm = (uintptr_t)b;
-
- assert(a != NULL);
- assert(b != NULL);
-
- return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
-}
-
-/* Generate red-black tree functions. */
-rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
- u.rb_link, arena_run_comp)
-
-static inline int
-arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
-{
- int ret;
- size_t a_size = a->bits & ~PAGE_MASK;
- size_t b_size = b->bits & ~PAGE_MASK;
-
- ret = (a_size > b_size) - (a_size < b_size);
- if (ret == 0) {
- uintptr_t a_mapelm, b_mapelm;
-
- if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY)
- a_mapelm = (uintptr_t)a;
- else {
- /*
- * Treat keys as though they are lower than anything
- * else.
- */
- a_mapelm = 0;
+static bool
+arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
+ if (config_debug) {
+ for (size_t i = 0; i < sizeof(arena_stats_t); i++) {
+ assert(((char *)arena_stats)[i] == 0);
}
- b_mapelm = (uintptr_t)b;
-
- ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
}
-
- return (ret);
+#ifndef JEMALLOC_ATOMIC_U64
+ if (malloc_mutex_init(&arena_stats->mtx, "arena_stats",
+ WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+#endif
+ /* Memory is zeroed, so there is no need to clear stats. */
+ return false;
}
-/* Generate red-black tree functions. */
-rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
- u.rb_link, arena_avail_comp)
-
-static inline int
-arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b)
-{
-
- assert(a != NULL);
- assert(b != NULL);
-
- /*
- * Short-circuit for self comparison. The following comparison code
- * would come to the same result, but at the cost of executing the slow
- * path.
- */
- if (a == b)
- return (0);
-
- /*
- * Order such that chunks with higher fragmentation are "less than"
- * those with lower fragmentation -- purging order is from "least" to
- * "greatest". Fragmentation is measured as:
- *
- * mean current avail run size
- * --------------------------------
- * mean defragmented avail run size
- *
- * navail
- * -----------
- * nruns_avail nruns_avail-nruns_adjac
- * = ========================= = -----------------------
- * navail nruns_avail
- * -----------------------
- * nruns_avail-nruns_adjac
- *
- * The following code multiplies away the denominator prior to
- * comparison, in order to avoid division.
- *
- */
- {
- size_t a_val = (a->nruns_avail - a->nruns_adjac) *
- b->nruns_avail;
- size_t b_val = (b->nruns_avail - b->nruns_adjac) *
- a->nruns_avail;
+static void
+arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
+#ifndef JEMALLOC_ATOMIC_U64
+ malloc_mutex_lock(tsdn, &arena_stats->mtx);
+#endif
+}
- if (a_val < b_val)
- return (1);
- if (a_val > b_val)
- return (-1);
- }
- /*
- * Break ties by chunk address. For fragmented chunks, report lower
- * addresses as "lower", so that fragmentation reduction happens first
- * at lower addresses. However, use the opposite ordering for
- * unfragmented chunks, in order to increase the chances of
- * re-allocating dirty runs.
- */
- {
- uintptr_t a_chunk = (uintptr_t)a;
- uintptr_t b_chunk = (uintptr_t)b;
- int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk));
- if (a->nruns_adjac == 0) {
- assert(b->nruns_adjac == 0);
- ret = -ret;
- }
- return (ret);
- }
+static void
+arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
+#ifndef JEMALLOC_ATOMIC_U64
+ malloc_mutex_unlock(tsdn, &arena_stats->mtx);
+#endif
}
-/* Generate red-black tree functions. */
-rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t,
- dirty_link, arena_chunk_dirty_comp)
+static uint64_t
+arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
+ arena_stats_u64_t *p) {
+#ifdef JEMALLOC_ATOMIC_U64
+ return atomic_load_u64(p, ATOMIC_RELAXED);
+#else
+ malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
+ return *p;
+#endif
+}
-static inline bool
-arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind)
-{
- bool ret;
+static void
+arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
+ arena_stats_u64_t *p, uint64_t x) {
+#ifdef JEMALLOC_ATOMIC_U64
+ atomic_fetch_add_u64(p, x, ATOMIC_RELAXED);
+#else
+ malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
+ *p += x;
+#endif
+}
- if (pageind-1 < map_bias)
- ret = false;
- else {
- ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0);
- assert(ret == false || arena_mapbits_dirty_get(chunk,
- pageind-1) != arena_mapbits_dirty_get(chunk, pageind));
- }
- return (ret);
+UNUSED static void
+arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
+ arena_stats_u64_t *p, uint64_t x) {
+#ifdef JEMALLOC_ATOMIC_U64
+ UNUSED uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
+ assert(r - x <= r);
+#else
+ malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
+ *p -= x;
+ assert(*p + x >= *p);
+#endif
}
-static inline bool
-arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages)
-{
- bool ret;
+/*
+ * Non-atomically sets *dst += src. *dst needs external synchronization.
+ * This lets us avoid the cost of a fetch_add when its unnecessary (note that
+ * the types here are atomic).
+ */
+static void
+arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
+#ifdef JEMALLOC_ATOMIC_U64
+ uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
+ atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED);
+#else
+ *dst += src;
+#endif
+}
- if (pageind+npages == chunk_npages)
- ret = false;
- else {
- assert(pageind+npages < chunk_npages);
- ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0);
- assert(ret == false || arena_mapbits_dirty_get(chunk, pageind)
- != arena_mapbits_dirty_get(chunk, pageind+npages));
- }
- return (ret);
+static size_t
+arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) {
+#ifdef JEMALLOC_ATOMIC_U64
+ return atomic_load_zu(p, ATOMIC_RELAXED);
+#else
+ malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
+ return atomic_load_zu(p, ATOMIC_RELAXED);
+#endif
}
-static inline bool
-arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages)
-{
+static void
+arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
+ size_t x) {
+#ifdef JEMALLOC_ATOMIC_U64
+ atomic_fetch_add_zu(p, x, ATOMIC_RELAXED);
+#else
+ malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
+ size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
+ atomic_store_zu(p, cur + x, ATOMIC_RELAXED);
+#endif
+}
- return (arena_avail_adjac_pred(chunk, pageind) ||
- arena_avail_adjac_succ(chunk, pageind, npages));
+static void
+arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p,
+ size_t x) {
+#ifdef JEMALLOC_ATOMIC_U64
+ UNUSED size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
+ assert(r - x <= r);
+#else
+ malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
+ size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
+ atomic_store_zu(p, cur - x, ATOMIC_RELAXED);
+#endif
}
+/* Like the _u64 variant, needs an externally synchronized *dst. */
static void
-arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
- size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
-{
+arena_stats_accum_zu(atomic_zu_t *dst, size_t src) {
+ size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
+ atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED);
+}
- assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
- LG_PAGE));
+void
+arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
+ szind_t szind, uint64_t nrequests) {
+ arena_stats_lock(tsdn, arena_stats);
+ arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind -
+ NBINS].nrequests, nrequests);
+ arena_stats_unlock(tsdn, arena_stats);
+}
- /*
- * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
- * removed and reinserted even if the run to be inserted is clean.
- */
- if (chunk->ndirty != 0)
- arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
+void
+arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
+ arena_stats_lock(tsdn, arena_stats);
+ arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size);
+ arena_stats_unlock(tsdn, arena_stats);
+}
- if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
- chunk->nruns_adjac++;
- if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
- chunk->nruns_adjac++;
- chunk->nruns_avail++;
- assert(chunk->nruns_avail > chunk->nruns_adjac);
+void
+arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
+ const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
+ size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
+ *nthreads += arena_nthreads_get(arena, false);
+ *dss = dss_prec_names[arena_dss_prec_get(arena)];
+ *dirty_decay_ms = arena_dirty_decay_ms_get(arena);
+ *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
+ *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED);
+ *ndirty += extents_npages_get(&arena->extents_dirty);
+ *nmuzzy += extents_npages_get(&arena->extents_muzzy);
+}
- if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
- arena->ndirty += npages;
- chunk->ndirty += npages;
+void
+arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
+ const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
+ size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
+ malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats) {
+ cassert(config_stats);
+
+ arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
+ muzzy_decay_ms, nactive, ndirty, nmuzzy);
+
+ size_t base_allocated, base_resident, base_mapped;
+ base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
+ &base_mapped);
+
+ arena_stats_lock(tsdn, &arena->stats);
+
+ arena_stats_accum_zu(&astats->mapped, base_mapped
+ + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped));
+ arena_stats_accum_zu(&astats->retained,
+ extents_npages_get(&arena->extents_retained) << LG_PAGE);
+
+ arena_stats_accum_u64(&astats->decay_dirty.npurge,
+ arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.decay_dirty.npurge));
+ arena_stats_accum_u64(&astats->decay_dirty.nmadvise,
+ arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.decay_dirty.nmadvise));
+ arena_stats_accum_u64(&astats->decay_dirty.purged,
+ arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.decay_dirty.purged));
+
+ arena_stats_accum_u64(&astats->decay_muzzy.npurge,
+ arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.decay_muzzy.npurge));
+ arena_stats_accum_u64(&astats->decay_muzzy.nmadvise,
+ arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.decay_muzzy.nmadvise));
+ arena_stats_accum_u64(&astats->decay_muzzy.purged,
+ arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.decay_muzzy.purged));
+
+ arena_stats_accum_zu(&astats->base, base_allocated);
+ arena_stats_accum_zu(&astats->internal, arena_internal_get(arena));
+ arena_stats_accum_zu(&astats->resident, base_resident +
+ (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
+ extents_npages_get(&arena->extents_dirty) +
+ extents_npages_get(&arena->extents_muzzy)) << LG_PAGE)));
+
+ for (szind_t i = 0; i < NSIZES - NBINS; i++) {
+ uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.lstats[i].nmalloc);
+ arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc);
+ arena_stats_accum_u64(&astats->nmalloc_large, nmalloc);
+
+ uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.lstats[i].ndalloc);
+ arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc);
+ arena_stats_accum_u64(&astats->ndalloc_large, ndalloc);
+
+ uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats,
+ &arena->stats.lstats[i].nrequests);
+ arena_stats_accum_u64(&lstats[i].nrequests,
+ nmalloc + nrequests);
+ arena_stats_accum_u64(&astats->nrequests_large,
+ nmalloc + nrequests);
+
+ assert(nmalloc >= ndalloc);
+ assert(nmalloc - ndalloc <= SIZE_T_MAX);
+ size_t curlextents = (size_t)(nmalloc - ndalloc);
+ lstats[i].curlextents += curlextents;
+ arena_stats_accum_zu(&astats->allocated_large,
+ curlextents * sz_index2size(NBINS + i));
+ }
+
+ arena_stats_unlock(tsdn, &arena->stats);
+
+ /* tcache_bytes counts currently cached bytes. */
+ atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED);
+ malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
+ tcache_t *tcache;
+ ql_foreach(tcache, &arena->tcache_ql, link) {
+ szind_t i = 0;
+ for (; i < NBINS; i++) {
+ tcache_bin_t *tbin = tcache_small_bin_get(tcache, i);
+ arena_stats_accum_zu(&astats->tcache_bytes,
+ tbin->ncached * sz_index2size(i));
+ }
+ for (; i < nhbins; i++) {
+ tcache_bin_t *tbin = tcache_large_bin_get(tcache, i);
+ arena_stats_accum_zu(&astats->tcache_bytes,
+ tbin->ncached * sz_index2size(i));
+ }
}
- if (chunk->ndirty != 0)
- arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
+ malloc_mutex_prof_read(tsdn,
+ &astats->mutex_prof_data[arena_prof_mutex_tcache_list],
+ &arena->tcache_ql_mtx);
+ malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
+
+#define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \
+ malloc_mutex_lock(tsdn, &arena->mtx); \
+ malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \
+ &arena->mtx); \
+ malloc_mutex_unlock(tsdn, &arena->mtx);
+
+ /* Gather per arena mutex profiling data. */
+ READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
+ READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx,
+ arena_prof_mutex_extent_avail)
+ READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx,
+ arena_prof_mutex_extents_dirty)
+ READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx,
+ arena_prof_mutex_extents_muzzy)
+ READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx,
+ arena_prof_mutex_extents_retained)
+ READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx,
+ arena_prof_mutex_decay_dirty)
+ READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx,
+ arena_prof_mutex_decay_muzzy)
+ READ_ARENA_MUTEX_PROF_DATA(base->mtx,
+ arena_prof_mutex_base)
+#undef READ_ARENA_MUTEX_PROF_DATA
+
+ nstime_copy(&astats->uptime, &arena->create_time);
+ nstime_update(&astats->uptime);
+ nstime_subtract(&astats->uptime, &arena->create_time);
+
+ for (szind_t i = 0; i < NBINS; i++) {
+ arena_bin_t *bin = &arena->bins[i];
- arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk,
- pageind));
+ malloc_mutex_lock(tsdn, &bin->lock);
+ malloc_mutex_prof_read(tsdn, &bstats[i].mutex_data, &bin->lock);
+ bstats[i].nmalloc += bin->stats.nmalloc;
+ bstats[i].ndalloc += bin->stats.ndalloc;
+ bstats[i].nrequests += bin->stats.nrequests;
+ bstats[i].curregs += bin->stats.curregs;
+ bstats[i].nfills += bin->stats.nfills;
+ bstats[i].nflushes += bin->stats.nflushes;
+ bstats[i].nslabs += bin->stats.nslabs;
+ bstats[i].reslabs += bin->stats.reslabs;
+ bstats[i].curslabs += bin->stats.curslabs;
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ }
}
-static void
-arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
- size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
-{
-
- assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
- LG_PAGE));
-
- /*
- * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
- * removed and reinserted even if the run to be removed is clean.
- */
- if (chunk->ndirty != 0)
- arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
-
- if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
- chunk->nruns_adjac--;
- if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
- chunk->nruns_adjac--;
- chunk->nruns_avail--;
- assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail
- == 0 && chunk->nruns_adjac == 0));
-
- if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
- arena->ndirty -= npages;
- chunk->ndirty -= npages;
+void
+arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty,
+ extent);
+ if (arena_dirty_decay_ms_get(arena) == 0) {
+ arena_decay_dirty(tsdn, arena, false, true);
+ } else {
+ arena_background_thread_inactivity_check(tsdn, arena, false);
}
- if (chunk->ndirty != 0)
- arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
-
- arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk,
- pageind));
}
-static inline void *
-arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
-{
+static void *
+arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab,
+ const arena_bin_info_t *bin_info) {
void *ret;
- unsigned regind;
- bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
- (uintptr_t)bin_info->bitmap_offset);
-
- assert(run->nfree > 0);
- assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
-
- regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
- ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
- (uintptr_t)(bin_info->reg_interval * regind));
- run->nfree--;
- if (regind == run->nextind)
- run->nextind++;
- assert(regind < run->nextind);
- return (ret);
-}
-
-static inline void
-arena_run_reg_dalloc(arena_run_t *run, void *ptr)
-{
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- size_t mapbits = arena_mapbits_get(chunk, pageind);
- size_t binind = arena_ptr_small_binind_get(ptr, mapbits);
- arena_bin_info_t *bin_info = &arena_bin_info[binind];
- unsigned regind = arena_run_regind(run, bin_info, ptr);
- bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
- (uintptr_t)bin_info->bitmap_offset);
-
- assert(run->nfree < bin_info->nregs);
- /* Freeing an interior pointer can cause assertion failure. */
- assert(((uintptr_t)ptr - ((uintptr_t)run +
- (uintptr_t)bin_info->reg0_offset)) %
- (uintptr_t)bin_info->reg_interval == 0);
- assert((uintptr_t)ptr >= (uintptr_t)run +
- (uintptr_t)bin_info->reg0_offset);
- /* Freeing an unallocated pointer can cause assertion failure. */
- assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
+ arena_slab_data_t *slab_data = extent_slab_data_get(slab);
+ size_t regind;
+
+ assert(extent_nfree_get(slab) > 0);
+ assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
- bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
- run->nfree++;
+ regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
+ ret = (void *)((uintptr_t)extent_addr_get(slab) +
+ (uintptr_t)(bin_info->reg_size * regind));
+ extent_nfree_dec(slab);
+ return ret;
}
-static inline void
-arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
-{
+#ifndef JEMALLOC_JET
+static
+#endif
+size_t
+arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
+ size_t diff, regind;
- VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
- LG_PAGE)), (npages << LG_PAGE));
- memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
- (npages << LG_PAGE));
-}
+ /* Freeing a pointer outside the slab can cause assertion failure. */
+ assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab));
+ assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
+ /* Freeing an interior pointer can cause assertion failure. */
+ assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
+ (uintptr_t)arena_bin_info[binind].reg_size == 0);
+
+ /* Avoid doing division with a variable divisor. */
+ diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
+ switch (binind) {
+#define REGIND_bin_yes(index, reg_size) \
+ case index: \
+ regind = diff / (reg_size); \
+ assert(diff == regind * (reg_size)); \
+ break;
+#define REGIND_bin_no(index, reg_size)
+#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \
+ lg_delta_lookup) \
+ REGIND_bin_##bin(index, (1U<<lg_grp) + (ndelta<<lg_delta))
+ SIZE_CLASSES
+#undef REGIND_bin_yes
+#undef REGIND_bin_no
+#undef SC
+ default: not_reached();
+ }
-static inline void
-arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
-{
+ assert(regind < arena_bin_info[binind].nregs);
- VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind <<
- LG_PAGE)), PAGE);
+ return regind;
}
-static inline void
-arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
-{
- size_t i;
- UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
+static void
+arena_slab_reg_dalloc(tsdn_t *tsdn, extent_t *slab,
+ arena_slab_data_t *slab_data, void *ptr) {
+ szind_t binind = extent_szind_get(slab);
+ const arena_bin_info_t *bin_info = &arena_bin_info[binind];
+ size_t regind = arena_slab_regind(slab, binind, ptr);
+
+ assert(extent_nfree_get(slab) < bin_info->nregs);
+ /* Freeing an unallocated pointer can cause assertion failure. */
+ assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
- arena_run_page_mark_zeroed(chunk, run_ind);
- for (i = 0; i < PAGE / sizeof(size_t); i++)
- assert(p[i] == 0);
+ bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
+ extent_nfree_inc(slab);
}
static void
-arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages)
-{
+arena_nactive_add(arena_t *arena, size_t add_pages) {
+ atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED);
+}
- if (config_stats) {
- ssize_t cactive_diff = CHUNK_CEILING((arena->nactive +
- add_pages) << LG_PAGE) - CHUNK_CEILING((arena->nactive -
- sub_pages) << LG_PAGE);
- if (cactive_diff != 0)
- stats_cactive_add(cactive_diff);
- }
+static void
+arena_nactive_sub(arena_t *arena, size_t sub_pages) {
+ assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages);
+ atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED);
}
static void
-arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
- size_t flag_dirty, size_t need_pages)
-{
- size_t total_pages, rem_pages;
-
- total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
- LG_PAGE;
- assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
- flag_dirty);
- assert(need_pages <= total_pages);
- rem_pages = total_pages - need_pages;
-
- arena_avail_remove(arena, chunk, run_ind, total_pages, true, true);
- arena_cactive_update(arena, need_pages, 0);
- arena->nactive += need_pages;
-
- /* Keep track of trailing unused pages for later use. */
- if (rem_pages > 0) {
- if (flag_dirty != 0) {
- arena_mapbits_unallocated_set(chunk,
- run_ind+need_pages, (rem_pages << LG_PAGE),
- flag_dirty);
- arena_mapbits_unallocated_set(chunk,
- run_ind+total_pages-1, (rem_pages << LG_PAGE),
- flag_dirty);
- } else {
- arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
- (rem_pages << LG_PAGE),
- arena_mapbits_unzeroed_get(chunk,
- run_ind+need_pages));
- arena_mapbits_unallocated_set(chunk,
- run_ind+total_pages-1, (rem_pages << LG_PAGE),
- arena_mapbits_unzeroed_get(chunk,
- run_ind+total_pages-1));
- }
- arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages,
- false, true);
+arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
+ szind_t index, hindex;
+
+ cassert(config_stats);
+
+ if (usize < LARGE_MINCLASS) {
+ usize = LARGE_MINCLASS;
}
+ index = sz_size2index(usize);
+ hindex = (index >= NBINS) ? index - NBINS : 0;
+
+ arena_stats_add_u64(tsdn, &arena->stats,
+ &arena->stats.lstats[hindex].nmalloc, 1);
}
static void
-arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
- bool remove, bool zero)
-{
- arena_chunk_t *chunk;
- size_t flag_dirty, run_ind, need_pages, i;
+arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
+ szind_t index, hindex;
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
- flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
- need_pages = (size >> LG_PAGE);
- assert(need_pages > 0);
+ cassert(config_stats);
- if (remove) {
- arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
- need_pages);
+ if (usize < LARGE_MINCLASS) {
+ usize = LARGE_MINCLASS;
}
+ index = sz_size2index(usize);
+ hindex = (index >= NBINS) ? index - NBINS : 0;
- if (zero) {
- if (flag_dirty == 0) {
+ arena_stats_add_u64(tsdn, &arena->stats,
+ &arena->stats.lstats[hindex].ndalloc, 1);
+}
+
+static void
+arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
+ size_t usize) {
+ arena_large_dalloc_stats_update(tsdn, arena, oldusize);
+ arena_large_malloc_stats_update(tsdn, arena, usize);
+}
+
+extent_t *
+arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool *zero) {
+ extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
+
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ szind_t szind = sz_size2index(usize);
+ size_t mapped_add;
+ bool commit = true;
+ extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks,
+ &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false,
+ szind, zero, &commit);
+ if (extent == NULL) {
+ extent = extents_alloc(tsdn, arena, &extent_hooks,
+ &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment,
+ false, szind, zero, &commit);
+ }
+ size_t size = usize + sz_large_pad;
+ if (extent == NULL) {
+ extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL,
+ usize, sz_large_pad, alignment, false, szind, zero,
+ &commit);
+ if (config_stats) {
/*
- * The run is clean, so some pages may be zeroed (i.e.
- * never before touched).
+ * extent may be NULL on OOM, but in that case
+ * mapped_add isn't used below, so there's no need to
+ * conditionlly set it to 0 here.
*/
- for (i = 0; i < need_pages; i++) {
- if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
- != 0)
- arena_run_zero(chunk, run_ind+i, 1);
- else if (config_debug) {
- arena_run_page_validate_zeroed(chunk,
- run_ind+i);
- } else {
- arena_run_page_mark_zeroed(chunk,
- run_ind+i);
- }
+ mapped_add = size;
+ }
+ } else if (config_stats) {
+ mapped_add = 0;
+ }
+
+ if (extent != NULL) {
+ if (config_stats) {
+ arena_stats_lock(tsdn, &arena->stats);
+ arena_large_malloc_stats_update(tsdn, arena, usize);
+ if (mapped_add != 0) {
+ arena_stats_add_zu(tsdn, &arena->stats,
+ &arena->stats.mapped, mapped_add);
}
- } else {
- /* The run is dirty, so all pages must be zeroed. */
- arena_run_zero(chunk, run_ind, need_pages);
+ arena_stats_unlock(tsdn, &arena->stats);
}
- } else {
- VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
- (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
+ arena_nactive_add(arena, size >> LG_PAGE);
}
- /*
- * Set the last element first, in case the run only contains one page
- * (i.e. both statements set the same element).
- */
- arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty);
- arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
+ return extent;
}
-static void
-arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
-{
-
- arena_run_split_large_helper(arena, run, size, true, zero);
+void
+arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
+ if (config_stats) {
+ arena_stats_lock(tsdn, &arena->stats);
+ arena_large_dalloc_stats_update(tsdn, arena,
+ extent_usize_get(extent));
+ arena_stats_unlock(tsdn, &arena->stats);
+ }
+ arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
}
-static void
-arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
-{
+void
+arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+ size_t oldusize) {
+ size_t usize = extent_usize_get(extent);
+ size_t udiff = oldusize - usize;
- arena_run_split_large_helper(arena, run, size, false, zero);
+ if (config_stats) {
+ arena_stats_lock(tsdn, &arena->stats);
+ arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
+ arena_stats_unlock(tsdn, &arena->stats);
+ }
+ arena_nactive_sub(arena, udiff >> LG_PAGE);
}
-static void
-arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
- size_t binind)
-{
- arena_chunk_t *chunk;
- size_t flag_dirty, run_ind, need_pages, i;
+void
+arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+ size_t oldusize) {
+ size_t usize = extent_usize_get(extent);
+ size_t udiff = usize - oldusize;
- assert(binind != BININD_INVALID);
+ if (config_stats) {
+ arena_stats_lock(tsdn, &arena->stats);
+ arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
+ arena_stats_unlock(tsdn, &arena->stats);
+ }
+ arena_nactive_add(arena, udiff >> LG_PAGE);
+}
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
- flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
- need_pages = (size >> LG_PAGE);
- assert(need_pages > 0);
+static ssize_t
+arena_decay_ms_read(arena_decay_t *decay) {
+ return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
+}
- arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages);
+static void
+arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) {
+ atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED);
+}
+static void
+arena_decay_deadline_init(arena_decay_t *decay) {
/*
- * Propagate the dirty and unzeroed flags to the allocated small run,
- * so that arena_dalloc_bin_run() has the ability to conditionally trim
- * clean pages.
- */
- arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
- /*
- * The first page will always be dirtied during small run
- * initialization, so a validation failure here would not actually
- * cause an observable failure.
+ * Generate a new deadline that is uniformly random within the next
+ * epoch after the current one.
*/
- if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
- run_ind) == 0)
- arena_run_page_validate_zeroed(chunk, run_ind);
- for (i = 1; i < need_pages - 1; i++) {
- arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
- if (config_debug && flag_dirty == 0 &&
- arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
- arena_run_page_validate_zeroed(chunk, run_ind+i);
- }
- arena_mapbits_small_set(chunk, run_ind+need_pages-1, need_pages-1,
- binind, flag_dirty);
- if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
- run_ind+need_pages-1) == 0)
- arena_run_page_validate_zeroed(chunk, run_ind+need_pages-1);
- VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
- (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
-}
-
-static arena_chunk_t *
-arena_chunk_init_spare(arena_t *arena)
-{
- arena_chunk_t *chunk;
-
- assert(arena->spare != NULL);
-
- chunk = arena->spare;
- arena->spare = NULL;
-
- assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
- assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
- assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
- arena_maxclass);
- assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
- arena_maxclass);
- assert(arena_mapbits_dirty_get(chunk, map_bias) ==
- arena_mapbits_dirty_get(chunk, chunk_npages-1));
-
- return (chunk);
-}
-
-static arena_chunk_t *
-arena_chunk_init_hard(arena_t *arena)
-{
- arena_chunk_t *chunk;
- bool zero;
- size_t unzeroed, i;
-
- assert(arena->spare == NULL);
-
- zero = false;
- malloc_mutex_unlock(&arena->lock);
- chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, false,
- &zero, arena->dss_prec);
- malloc_mutex_lock(&arena->lock);
- if (chunk == NULL)
- return (NULL);
- if (config_stats)
- arena->stats.mapped += chunksize;
+ nstime_copy(&decay->deadline, &decay->epoch);
+ nstime_add(&decay->deadline, &decay->interval);
+ if (arena_decay_ms_read(decay) > 0) {
+ nstime_t jitter;
- chunk->arena = arena;
+ nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
+ nstime_ns(&decay->interval)));
+ nstime_add(&decay->deadline, &jitter);
+ }
+}
- /*
- * Claim that no pages are in use, since the header is merely overhead.
- */
- chunk->ndirty = 0;
+static bool
+arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) {
+ return (nstime_compare(&decay->deadline, time) <= 0);
+}
- chunk->nruns_avail = 0;
- chunk->nruns_adjac = 0;
+static size_t
+arena_decay_backlog_npages_limit(const arena_decay_t *decay) {
+ uint64_t sum;
+ size_t npages_limit_backlog;
+ unsigned i;
/*
- * Initialize the map to contain one maximal free untouched run. Mark
- * the pages as zeroed iff chunk_alloc() returned a zeroed chunk.
- */
- unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
- arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
- unzeroed);
- /*
- * There is no need to initialize the internal page map entries unless
- * the chunk is not zeroed.
+ * For each element of decay_backlog, multiply by the corresponding
+ * fixed-point smoothstep decay factor. Sum the products, then divide
+ * to round down to the nearest whole number of pages.
*/
- if (zero == false) {
- VALGRIND_MAKE_MEM_UNDEFINED((void *)arena_mapp_get(chunk,
- map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk,
- chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
- map_bias+1)));
- for (i = map_bias+1; i < chunk_npages-1; i++)
- arena_mapbits_unzeroed_set(chunk, i, unzeroed);
- } else {
- VALGRIND_MAKE_MEM_DEFINED((void *)arena_mapp_get(chunk,
- map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk,
- chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
- map_bias+1)));
- if (config_debug) {
- for (i = map_bias+1; i < chunk_npages-1; i++) {
- assert(arena_mapbits_unzeroed_get(chunk, i) ==
- unzeroed);
- }
- }
+ sum = 0;
+ for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
+ sum += decay->backlog[i] * h_steps[i];
}
- arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxclass,
- unzeroed);
+ npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
- return (chunk);
+ return npages_limit_backlog;
}
-static arena_chunk_t *
-arena_chunk_alloc(arena_t *arena)
-{
- arena_chunk_t *chunk;
+static void
+arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) {
+ size_t npages_delta = (current_npages > decay->nunpurged) ?
+ current_npages - decay->nunpurged : 0;
+ decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta;
- if (arena->spare != NULL)
- chunk = arena_chunk_init_spare(arena);
- else {
- chunk = arena_chunk_init_hard(arena);
- if (chunk == NULL)
- return (NULL);
+ if (config_debug) {
+ if (current_npages > decay->ceil_npages) {
+ decay->ceil_npages = current_npages;
+ }
+ size_t npages_limit = arena_decay_backlog_npages_limit(decay);
+ assert(decay->ceil_npages >= npages_limit);
+ if (decay->ceil_npages > npages_limit) {
+ decay->ceil_npages = npages_limit;
+ }
}
-
- /* Insert the run into the runs_avail tree. */
- arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias,
- false, false);
-
- return (chunk);
}
static void
-arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
-{
- assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
- assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
- assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
- arena_maxclass);
- assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
- arena_maxclass);
- assert(arena_mapbits_dirty_get(chunk, map_bias) ==
- arena_mapbits_dirty_get(chunk, chunk_npages-1));
+arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64,
+ size_t current_npages) {
+ if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
+ memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
+ sizeof(size_t));
+ } else {
+ size_t nadvance_z = (size_t)nadvance_u64;
- /*
- * Remove run from the runs_avail tree, so that the arena does not use
- * it.
- */
- arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias,
- false, false);
+ assert((uint64_t)nadvance_z == nadvance_u64);
- if (arena->spare != NULL) {
- arena_chunk_t *spare = arena->spare;
+ memmove(decay->backlog, &decay->backlog[nadvance_z],
+ (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
+ if (nadvance_z > 1) {
+ memset(&decay->backlog[SMOOTHSTEP_NSTEPS -
+ nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
+ }
+ }
- arena->spare = chunk;
- malloc_mutex_unlock(&arena->lock);
- chunk_dealloc((void *)spare, chunksize, true);
- malloc_mutex_lock(&arena->lock);
- if (config_stats)
- arena->stats.mapped -= chunksize;
- } else
- arena->spare = chunk;
+ arena_decay_backlog_update_last(decay, current_npages);
}
-static arena_run_t *
-arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
-{
- arena_run_t *run;
- arena_chunk_map_t *mapelm, key;
-
- key.bits = size | CHUNK_MAP_KEY;
- mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
- if (mapelm != NULL) {
- arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
- size_t pageind = (((uintptr_t)mapelm -
- (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
- + map_bias;
-
- run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
- LG_PAGE));
- arena_run_split_large(arena, run, size, zero);
- return (run);
+static void
+arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
+ extents_t *extents, size_t current_npages, size_t npages_limit,
+ bool is_background_thread) {
+ if (current_npages > npages_limit) {
+ arena_decay_to_limit(tsdn, arena, decay, extents, false,
+ npages_limit, is_background_thread);
}
-
- return (NULL);
}
-static arena_run_t *
-arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
-{
- arena_chunk_t *chunk;
- arena_run_t *run;
+static void
+arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time,
+ size_t current_npages) {
+ assert(arena_decay_deadline_reached(decay, time));
- assert(size <= arena_maxclass);
- assert((size & PAGE_MASK) == 0);
+ nstime_t delta;
+ nstime_copy(&delta, time);
+ nstime_subtract(&delta, &decay->epoch);
- /* Search the arena's chunks for the lowest best fit. */
- run = arena_run_alloc_large_helper(arena, size, zero);
- if (run != NULL)
- return (run);
+ uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
+ assert(nadvance_u64 > 0);
- /*
- * No usable runs. Create a new chunk from which to allocate the run.
- */
- chunk = arena_chunk_alloc(arena);
- if (chunk != NULL) {
- run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
- arena_run_split_large(arena, run, size, zero);
- return (run);
- }
+ /* Add nadvance_u64 decay intervals to epoch. */
+ nstime_copy(&delta, &decay->interval);
+ nstime_imultiply(&delta, nadvance_u64);
+ nstime_add(&decay->epoch, &delta);
- /*
- * arena_chunk_alloc() failed, but another thread may have made
- * sufficient memory available while this one dropped arena->lock in
- * arena_chunk_alloc(), so search one more time.
- */
- return (arena_run_alloc_large_helper(arena, size, zero));
+ /* Set a new deadline. */
+ arena_decay_deadline_init(decay);
+
+ /* Update the backlog. */
+ arena_decay_backlog_update(decay, nadvance_u64, current_npages);
}
-static arena_run_t *
-arena_run_alloc_small_helper(arena_t *arena, size_t size, size_t binind)
-{
- arena_run_t *run;
- arena_chunk_map_t *mapelm, key;
+static void
+arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
+ extents_t *extents, const nstime_t *time, bool is_background_thread) {
+ size_t current_npages = extents_npages_get(extents);
+ arena_decay_epoch_advance_helper(decay, time, current_npages);
- key.bits = size | CHUNK_MAP_KEY;
- mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
- if (mapelm != NULL) {
- arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
- size_t pageind = (((uintptr_t)mapelm -
- (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
- + map_bias;
+ size_t npages_limit = arena_decay_backlog_npages_limit(decay);
+ /* We may unlock decay->mtx when try_purge(). Finish logging first. */
+ decay->nunpurged = (npages_limit > current_npages) ? npages_limit :
+ current_npages;
- run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
- LG_PAGE));
- arena_run_split_small(arena, run, size, binind);
- return (run);
+ if (!background_thread_enabled() || is_background_thread) {
+ arena_decay_try_purge(tsdn, arena, decay, extents,
+ current_npages, npages_limit, is_background_thread);
}
-
- return (NULL);
}
-static arena_run_t *
-arena_run_alloc_small(arena_t *arena, size_t size, size_t binind)
-{
- arena_chunk_t *chunk;
- arena_run_t *run;
-
- assert(size <= arena_maxclass);
- assert((size & PAGE_MASK) == 0);
- assert(binind != BININD_INVALID);
+static void
+arena_decay_reinit(arena_decay_t *decay, extents_t *extents, ssize_t decay_ms) {
+ arena_decay_ms_write(decay, decay_ms);
+ if (decay_ms > 0) {
+ nstime_init(&decay->interval, (uint64_t)decay_ms *
+ KQU(1000000));
+ nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS);
+ }
- /* Search the arena's chunks for the lowest best fit. */
- run = arena_run_alloc_small_helper(arena, size, binind);
- if (run != NULL)
- return (run);
+ nstime_init(&decay->epoch, 0);
+ nstime_update(&decay->epoch);
+ decay->jitter_state = (uint64_t)(uintptr_t)decay;
+ arena_decay_deadline_init(decay);
+ decay->nunpurged = 0;
+ memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
+}
- /*
- * No usable runs. Create a new chunk from which to allocate the run.
- */
- chunk = arena_chunk_alloc(arena);
- if (chunk != NULL) {
- run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
- arena_run_split_small(arena, run, size, binind);
- return (run);
+static bool
+arena_decay_init(arena_decay_t *decay, extents_t *extents, ssize_t decay_ms,
+ decay_stats_t *stats) {
+ if (config_debug) {
+ for (size_t i = 0; i < sizeof(arena_decay_t); i++) {
+ assert(((char *)decay)[i] == 0);
+ }
+ decay->ceil_npages = 0;
+ }
+ if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ decay->purging = false;
+ arena_decay_reinit(decay, extents, decay_ms);
+ /* Memory is zeroed, so there is no need to clear stats. */
+ if (config_stats) {
+ decay->stats = stats;
}
+ return false;
+}
- /*
- * arena_chunk_alloc() failed, but another thread may have made
- * sufficient memory available while this one dropped arena->lock in
- * arena_chunk_alloc(), so search one more time.
- */
- return (arena_run_alloc_small_helper(arena, size, binind));
+static bool
+arena_decay_ms_valid(ssize_t decay_ms) {
+ if (decay_ms < -1) {
+ return false;
+ }
+ if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX *
+ KQU(1000)) {
+ return true;
+ }
+ return false;
}
-static inline void
-arena_maybe_purge(arena_t *arena)
-{
- size_t npurgeable, threshold;
+static bool
+arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
+ extents_t *extents, bool is_background_thread) {
+ malloc_mutex_assert_owner(tsdn, &decay->mtx);
+
+ /* Purge all or nothing if the option is disabled. */
+ ssize_t decay_ms = arena_decay_ms_read(decay);
+ if (decay_ms <= 0) {
+ if (decay_ms == 0) {
+ arena_decay_to_limit(tsdn, arena, decay, extents, false,
+ 0, is_background_thread);
+ }
+ return false;
+ }
+
+ nstime_t time;
+ nstime_init(&time, 0);
+ nstime_update(&time);
+ if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time)
+ > 0)) {
+ /*
+ * Time went backwards. Move the epoch back in time and
+ * generate a new deadline, with the expectation that time
+ * typically flows forward for long enough periods of time that
+ * epochs complete. Unfortunately, this strategy is susceptible
+ * to clock jitter triggering premature epoch advances, but
+ * clock jitter estimation and compensation isn't feasible here
+ * because calls into this code are event-driven.
+ */
+ nstime_copy(&decay->epoch, &time);
+ arena_decay_deadline_init(decay);
+ } else {
+ /* Verify that time does not go backwards. */
+ assert(nstime_compare(&decay->epoch, &time) <= 0);
+ }
- /* Don't purge if the option is disabled. */
- if (opt_lg_dirty_mult < 0)
- return;
- /* Don't purge if all dirty pages are already being purged. */
- if (arena->ndirty <= arena->npurgatory)
- return;
- npurgeable = arena->ndirty - arena->npurgatory;
- threshold = (arena->nactive >> opt_lg_dirty_mult);
/*
- * Don't purge unless the number of purgeable pages exceeds the
- * threshold.
+ * If the deadline has been reached, advance to the current epoch and
+ * purge to the new limit if necessary. Note that dirty pages created
+ * during the current epoch are not subject to purge until a future
+ * epoch, so as a result purging only happens during epoch advances, or
+ * being triggered by background threads (scheduled event).
*/
- if (npurgeable <= threshold)
- return;
+ bool advance_epoch = arena_decay_deadline_reached(decay, &time);
+ if (advance_epoch) {
+ arena_decay_epoch_advance(tsdn, arena, decay, extents, &time,
+ is_background_thread);
+ } else if (is_background_thread) {
+ arena_decay_try_purge(tsdn, arena, decay, extents,
+ extents_npages_get(extents),
+ arena_decay_backlog_npages_limit(decay),
+ is_background_thread);
+ }
- arena_purge(arena, false);
+ return advance_epoch;
}
-static arena_chunk_t *
-chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg)
-{
- size_t *ndirty = (size_t *)arg;
+static ssize_t
+arena_decay_ms_get(arena_decay_t *decay) {
+ return arena_decay_ms_read(decay);
+}
- assert(chunk->ndirty != 0);
- *ndirty += chunk->ndirty;
- return (NULL);
+ssize_t
+arena_dirty_decay_ms_get(arena_t *arena) {
+ return arena_decay_ms_get(&arena->decay_dirty);
}
-static size_t
-arena_compute_npurgatory(arena_t *arena, bool all)
-{
- size_t npurgatory, npurgeable;
+ssize_t
+arena_muzzy_decay_ms_get(arena_t *arena) {
+ return arena_decay_ms_get(&arena->decay_muzzy);
+}
+static bool
+arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
+ extents_t *extents, ssize_t decay_ms) {
+ if (!arena_decay_ms_valid(decay_ms)) {
+ return true;
+ }
+
+ malloc_mutex_lock(tsdn, &decay->mtx);
/*
- * Compute the minimum number of pages that this thread should try to
- * purge.
+ * Restart decay backlog from scratch, which may cause many dirty pages
+ * to be immediately purged. It would conceptually be possible to map
+ * the old backlog onto the new backlog, but there is no justification
+ * for such complexity since decay_ms changes are intended to be
+ * infrequent, either between the {-1, 0, >0} states, or a one-time
+ * arbitrary change during initial arena configuration.
*/
- npurgeable = arena->ndirty - arena->npurgatory;
+ arena_decay_reinit(decay, extents, decay_ms);
+ arena_maybe_decay(tsdn, arena, decay, extents, false);
+ malloc_mutex_unlock(tsdn, &decay->mtx);
- if (all == false) {
- size_t threshold = (arena->nactive >> opt_lg_dirty_mult);
+ return false;
+}
- npurgatory = npurgeable - threshold;
- } else
- npurgatory = npurgeable;
+bool
+arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
+ ssize_t decay_ms) {
+ return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty,
+ &arena->extents_dirty, decay_ms);
+}
- return (npurgatory);
+bool
+arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
+ ssize_t decay_ms) {
+ return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy,
+ &arena->extents_muzzy, decay_ms);
}
-static void
-arena_chunk_stash_dirty(arena_t *arena, arena_chunk_t *chunk, bool all,
- arena_chunk_mapelms_t *mapelms)
-{
- size_t pageind, npages;
+static size_t
+arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit,
+ extent_list_t *decay_extents) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
- /*
- * Temporarily allocate free dirty runs within chunk. If all is false,
- * only operate on dirty runs that are fragments; otherwise operate on
- * all dirty runs.
- */
- for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
- arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
- if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
- size_t run_size =
- arena_mapbits_unallocated_size_get(chunk, pageind);
-
- npages = run_size >> LG_PAGE;
- assert(pageind + npages <= chunk_npages);
- assert(arena_mapbits_dirty_get(chunk, pageind) ==
- arena_mapbits_dirty_get(chunk, pageind+npages-1));
-
- if (arena_mapbits_dirty_get(chunk, pageind) != 0 &&
- (all || arena_avail_adjac(chunk, pageind,
- npages))) {
- arena_run_t *run = (arena_run_t *)((uintptr_t)
- chunk + (uintptr_t)(pageind << LG_PAGE));
-
- arena_run_split_large(arena, run, run_size,
- false);
- /* Append to list for later processing. */
- ql_elm_new(mapelm, u.ql_link);
- ql_tail_insert(mapelms, mapelm, u.ql_link);
- }
- } else {
- /* Skip run. */
- if (arena_mapbits_large_get(chunk, pageind) != 0) {
- npages = arena_mapbits_large_size_get(chunk,
- pageind) >> LG_PAGE;
- } else {
- size_t binind;
- arena_bin_info_t *bin_info;
- arena_run_t *run = (arena_run_t *)((uintptr_t)
- chunk + (uintptr_t)(pageind << LG_PAGE));
-
- assert(arena_mapbits_small_runind_get(chunk,
- pageind) == 0);
- binind = arena_bin_index(arena, run->bin);
- bin_info = &arena_bin_info[binind];
- npages = bin_info->run_size >> LG_PAGE;
- }
- }
+ /* Stash extents according to npages_limit. */
+ size_t nstashed = 0;
+ extent_t *extent;
+ while ((extent = extents_evict(tsdn, arena, r_extent_hooks, extents,
+ npages_limit)) != NULL) {
+ extent_list_append(decay_extents, extent);
+ nstashed += extent_size_get(extent) >> LG_PAGE;
}
- assert(pageind == chunk_npages);
- assert(chunk->ndirty == 0 || all == false);
- assert(chunk->nruns_adjac == 0);
+ return nstashed;
}
static size_t
-arena_chunk_purge_stashed(arena_t *arena, arena_chunk_t *chunk,
- arena_chunk_mapelms_t *mapelms)
-{
- size_t npurged, pageind, npages, nmadvise;
- arena_chunk_map_t *mapelm;
-
- malloc_mutex_unlock(&arena->lock);
- if (config_stats)
+arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents,
+ bool all, extent_list_t *decay_extents, bool is_background_thread) {
+ UNUSED size_t nmadvise, nunmapped;
+ size_t npurged;
+
+ if (config_stats) {
nmadvise = 0;
+ nunmapped = 0;
+ }
npurged = 0;
- ql_foreach(mapelm, mapelms, u.ql_link) {
- bool unzeroed;
- size_t flag_unzeroed, i;
-
- pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
- sizeof(arena_chunk_map_t)) + map_bias;
- npages = arena_mapbits_large_size_get(chunk, pageind) >>
- LG_PAGE;
- assert(pageind + npages <= chunk_npages);
- unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
- LG_PAGE)), (npages << LG_PAGE));
- flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
- /*
- * Set the unzeroed flag for all pages, now that pages_purge()
- * has returned whether the pages were zeroed as a side effect
- * of purging. This chunk map modification is safe even though
- * the arena mutex isn't currently owned by this thread,
- * because the run is marked as allocated, thus protecting it
- * from being modified by any other thread. As long as these
- * writes don't perturb the first and last elements'
- * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
- */
- for (i = 0; i < npages; i++) {
- arena_mapbits_unzeroed_set(chunk, pageind+i,
- flag_unzeroed);
+
+ ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
+ for (extent_t *extent = extent_list_first(decay_extents); extent !=
+ NULL; extent = extent_list_first(decay_extents)) {
+ if (config_stats) {
+ nmadvise++;
}
+ size_t npages = extent_size_get(extent) >> LG_PAGE;
npurged += npages;
- if (config_stats)
- nmadvise++;
+ extent_list_remove(decay_extents, extent);
+ switch (extents_state_get(extents)) {
+ case extent_state_active:
+ not_reached();
+ case extent_state_dirty:
+ if (!all && muzzy_decay_ms != 0 &&
+ !extent_purge_lazy_wrapper(tsdn, arena,
+ r_extent_hooks, extent, 0,
+ extent_size_get(extent))) {
+ extents_dalloc(tsdn, arena, r_extent_hooks,
+ &arena->extents_muzzy, extent);
+ arena_background_thread_inactivity_check(tsdn,
+ arena, is_background_thread);
+ break;
+ }
+ /* Fall through. */
+ case extent_state_muzzy:
+ extent_dalloc_wrapper(tsdn, arena, r_extent_hooks,
+ extent);
+ if (config_stats) {
+ nunmapped += npages;
+ }
+ break;
+ case extent_state_retained:
+ default:
+ not_reached();
+ }
}
- malloc_mutex_lock(&arena->lock);
- if (config_stats)
- arena->stats.nmadvise += nmadvise;
- return (npurged);
+ if (config_stats) {
+ arena_stats_lock(tsdn, &arena->stats);
+ arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge,
+ 1);
+ arena_stats_add_u64(tsdn, &arena->stats,
+ &decay->stats->nmadvise, nmadvise);
+ arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged,
+ npurged);
+ arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
+ nunmapped << LG_PAGE);
+ arena_stats_unlock(tsdn, &arena->stats);
+ }
+
+ return npurged;
}
+/*
+ * npages_limit: Decay as many dirty extents as possible without violating the
+ * invariant: (extents_npages_get(extents) >= npages_limit)
+ */
static void
-arena_chunk_unstash_purged(arena_t *arena, arena_chunk_t *chunk,
- arena_chunk_mapelms_t *mapelms)
-{
- arena_chunk_map_t *mapelm;
- size_t pageind;
-
- /* Deallocate runs. */
- for (mapelm = ql_first(mapelms); mapelm != NULL;
- mapelm = ql_first(mapelms)) {
- arena_run_t *run;
-
- pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
- sizeof(arena_chunk_map_t)) + map_bias;
- run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind <<
- LG_PAGE));
- ql_remove(mapelms, mapelm, u.ql_link);
- arena_run_dalloc(arena, run, false, true);
+arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
+ extents_t *extents, bool all, size_t npages_limit,
+ bool is_background_thread) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 1);
+ malloc_mutex_assert_owner(tsdn, &decay->mtx);
+
+ if (decay->purging) {
+ return;
}
-}
+ decay->purging = true;
+ malloc_mutex_unlock(tsdn, &decay->mtx);
-static inline size_t
-arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
-{
- size_t npurged;
- arena_chunk_mapelms_t mapelms;
+ extent_hooks_t *extent_hooks = extent_hooks_get(arena);
- ql_new(&mapelms);
+ extent_list_t decay_extents;
+ extent_list_init(&decay_extents);
- /*
- * If chunk is the spare, temporarily re-allocate it, 1) so that its
- * run is reinserted into runs_avail, and 2) so that it cannot be
- * completely discarded by another thread while arena->lock is dropped
- * by this thread. Note that the arena_run_dalloc() call will
- * implicitly deallocate the chunk, so no explicit action is required
- * in this function to deallocate the chunk.
- *
- * Note that once a chunk contains dirty pages, it cannot again contain
- * a single run unless 1) it is a dirty run, or 2) this function purges
- * dirty pages and causes the transition to a single clean run. Thus
- * (chunk == arena->spare) is possible, but it is not possible for
- * this function to be called on the spare unless it contains a dirty
- * run.
- */
- if (chunk == arena->spare) {
- assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
- assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
-
- arena_chunk_alloc(arena);
+ size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents,
+ npages_limit, &decay_extents);
+ if (npurge != 0) {
+ UNUSED size_t npurged = arena_decay_stashed(tsdn, arena,
+ &extent_hooks, decay, extents, all, &decay_extents,
+ is_background_thread);
+ assert(npurged == npurge);
}
- if (config_stats)
- arena->stats.purged += chunk->ndirty;
-
- /*
- * Operate on all dirty runs if there is no clean/dirty run
- * fragmentation.
- */
- if (chunk->nruns_adjac == 0)
- all = true;
-
- arena_chunk_stash_dirty(arena, chunk, all, &mapelms);
- npurged = arena_chunk_purge_stashed(arena, chunk, &mapelms);
- arena_chunk_unstash_purged(arena, chunk, &mapelms);
-
- return (npurged);
+ malloc_mutex_lock(tsdn, &decay->mtx);
+ decay->purging = false;
}
-static void
-arena_purge(arena_t *arena, bool all)
-{
- arena_chunk_t *chunk;
- size_t npurgatory;
- if (config_debug) {
- size_t ndirty = 0;
+static bool
+arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
+ extents_t *extents, bool is_background_thread, bool all) {
+ if (all) {
+ malloc_mutex_lock(tsdn, &decay->mtx);
+ arena_decay_to_limit(tsdn, arena, decay, extents, all, 0,
+ is_background_thread);
+ malloc_mutex_unlock(tsdn, &decay->mtx);
- arena_chunk_dirty_iter(&arena->chunks_dirty, NULL,
- chunks_dirty_iter_cb, (void *)&ndirty);
- assert(ndirty == arena->ndirty);
+ return false;
}
- assert(arena->ndirty > arena->npurgatory || all);
- assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
- arena->npurgatory) || all);
- if (config_stats)
- arena->stats.npurge++;
+ if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
+ /* No need to wait if another thread is in progress. */
+ return true;
+ }
- /*
- * Add the minimum number of pages this thread should try to purge to
- * arena->npurgatory. This will keep multiple threads from racing to
- * reduce ndirty below the threshold.
- */
- npurgatory = arena_compute_npurgatory(arena, all);
- arena->npurgatory += npurgatory;
+ bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents,
+ is_background_thread);
+ size_t npages_new;
+ if (epoch_advanced) {
+ /* Backlog is updated on epoch advance. */
+ npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1];
+ }
+ malloc_mutex_unlock(tsdn, &decay->mtx);
- while (npurgatory > 0) {
- size_t npurgeable, npurged, nunpurged;
+ if (have_background_thread && background_thread_enabled() &&
+ epoch_advanced && !is_background_thread) {
+ background_thread_interval_check(tsdn, arena, decay, npages_new);
+ }
- /* Get next chunk with dirty pages. */
- chunk = arena_chunk_dirty_first(&arena->chunks_dirty);
- if (chunk == NULL) {
- /*
- * This thread was unable to purge as many pages as
- * originally intended, due to races with other threads
- * that either did some of the purging work, or re-used
- * dirty pages.
- */
- arena->npurgatory -= npurgatory;
- return;
- }
- npurgeable = chunk->ndirty;
- assert(npurgeable != 0);
+ return false;
+}
- if (npurgeable > npurgatory && chunk->nruns_adjac == 0) {
- /*
- * This thread will purge all the dirty pages in chunk,
- * so set npurgatory to reflect this thread's intent to
- * purge the pages. This tends to reduce the chances
- * of the following scenario:
- *
- * 1) This thread sets arena->npurgatory such that
- * (arena->ndirty - arena->npurgatory) is at the
- * threshold.
- * 2) This thread drops arena->lock.
- * 3) Another thread causes one or more pages to be
- * dirtied, and immediately determines that it must
- * purge dirty pages.
- *
- * If this scenario *does* play out, that's okay,
- * because all of the purging work being done really
- * needs to happen.
- */
- arena->npurgatory += npurgeable - npurgatory;
- npurgatory = npurgeable;
- }
+static bool
+arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
+ bool all) {
+ return arena_decay_impl(tsdn, arena, &arena->decay_dirty,
+ &arena->extents_dirty, is_background_thread, all);
+}
- /*
- * Keep track of how many pages are purgeable, versus how many
- * actually get purged, and adjust counters accordingly.
- */
- arena->npurgatory -= npurgeable;
- npurgatory -= npurgeable;
- npurged = arena_chunk_purge(arena, chunk, all);
- nunpurged = npurgeable - npurged;
- arena->npurgatory += nunpurged;
- npurgatory += nunpurged;
- }
+static bool
+arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
+ bool all) {
+ return arena_decay_impl(tsdn, arena, &arena->decay_muzzy,
+ &arena->extents_muzzy, is_background_thread, all);
}
void
-arena_purge_all(arena_t *arena)
-{
-
- malloc_mutex_lock(&arena->lock);
- arena_purge(arena, true);
- malloc_mutex_unlock(&arena->lock);
+arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
+ if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) {
+ return;
+ }
+ arena_decay_muzzy(tsdn, arena, is_background_thread, all);
}
static void
-arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
- size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty)
-{
- size_t size = *p_size;
- size_t run_ind = *p_run_ind;
- size_t run_pages = *p_run_pages;
-
- /* Try to coalesce forward. */
- if (run_ind + run_pages < chunk_npages &&
- arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
- arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
- size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
- run_ind+run_pages);
- size_t nrun_pages = nrun_size >> LG_PAGE;
-
- /*
- * Remove successor from runs_avail; the coalesced run is
- * inserted later.
- */
- assert(arena_mapbits_unallocated_size_get(chunk,
- run_ind+run_pages+nrun_pages-1) == nrun_size);
- assert(arena_mapbits_dirty_get(chunk,
- run_ind+run_pages+nrun_pages-1) == flag_dirty);
- arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages,
- false, true);
+arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
+ arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
- size += nrun_size;
- run_pages += nrun_pages;
-
- arena_mapbits_unallocated_size_set(chunk, run_ind, size);
- arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
- size);
- }
-
- /* Try to coalesce backward. */
- if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
- run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
- flag_dirty) {
- size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
- run_ind-1);
- size_t prun_pages = prun_size >> LG_PAGE;
-
- run_ind -= prun_pages;
+ extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
+ arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab);
+}
- /*
- * Remove predecessor from runs_avail; the coalesced run is
- * inserted later.
- */
- assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
- prun_size);
- assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
- arena_avail_remove(arena, chunk, run_ind, prun_pages, true,
- false);
+static void
+arena_bin_slabs_nonfull_insert(arena_bin_t *bin, extent_t *slab) {
+ assert(extent_nfree_get(slab) > 0);
+ extent_heap_insert(&bin->slabs_nonfull, slab);
+}
- size += prun_size;
- run_pages += prun_pages;
+static void
+arena_bin_slabs_nonfull_remove(arena_bin_t *bin, extent_t *slab) {
+ extent_heap_remove(&bin->slabs_nonfull, slab);
+}
- arena_mapbits_unallocated_size_set(chunk, run_ind, size);
- arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
- size);
+static extent_t *
+arena_bin_slabs_nonfull_tryget(arena_bin_t *bin) {
+ extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
+ if (slab == NULL) {
+ return NULL;
}
-
- *p_size = size;
- *p_run_ind = run_ind;
- *p_run_pages = run_pages;
+ if (config_stats) {
+ bin->stats.reslabs++;
+ }
+ return slab;
}
static void
-arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
-{
- arena_chunk_t *chunk;
- size_t size, run_ind, run_pages, flag_dirty;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
- assert(run_ind >= map_bias);
- assert(run_ind < chunk_npages);
- if (arena_mapbits_large_get(chunk, run_ind) != 0) {
- size = arena_mapbits_large_size_get(chunk, run_ind);
- assert(size == PAGE ||
- arena_mapbits_large_size_get(chunk,
- run_ind+(size>>LG_PAGE)-1) == 0);
- } else {
- size_t binind = arena_bin_index(arena, run->bin);
- arena_bin_info_t *bin_info = &arena_bin_info[binind];
- size = bin_info->run_size;
- }
- run_pages = (size >> LG_PAGE);
- arena_cactive_update(arena, 0, run_pages);
- arena->nactive -= run_pages;
-
+arena_bin_slabs_full_insert(arena_t *arena, arena_bin_t *bin, extent_t *slab) {
+ assert(extent_nfree_get(slab) == 0);
/*
- * The run is dirty if the caller claims to have dirtied it, as well as
- * if it was already dirty before being allocated and the caller
- * doesn't claim to have cleaned it.
+ * Tracking extents is required by arena_reset, which is not allowed
+ * for auto arenas. Bypass this step to avoid touching the extent
+ * linkage (often results in cache misses) for auto arenas.
*/
- assert(arena_mapbits_dirty_get(chunk, run_ind) ==
- arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
- if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
- dirty = true;
- flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
-
- /* Mark pages as unallocated in the chunk map. */
- if (dirty) {
- arena_mapbits_unallocated_set(chunk, run_ind, size,
- CHUNK_MAP_DIRTY);
- arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
- CHUNK_MAP_DIRTY);
- } else {
- arena_mapbits_unallocated_set(chunk, run_ind, size,
- arena_mapbits_unzeroed_get(chunk, run_ind));
- arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
- arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
+ if (arena_is_auto(arena)) {
+ return;
}
+ extent_list_append(&bin->slabs_full, slab);
+}
- arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
- flag_dirty);
-
- /* Insert into runs_avail, now that coalescing is complete. */
- assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
- arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
- assert(arena_mapbits_dirty_get(chunk, run_ind) ==
- arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
- arena_avail_insert(arena, chunk, run_ind, run_pages, true, true);
-
- /* Deallocate chunk if it is now completely unused. */
- if (size == arena_maxclass) {
- assert(run_ind == map_bias);
- assert(run_pages == (arena_maxclass >> LG_PAGE));
- arena_chunk_dealloc(arena, chunk);
+static void
+arena_bin_slabs_full_remove(arena_t *arena, arena_bin_t *bin, extent_t *slab) {
+ if (arena_is_auto(arena)) {
+ return;
}
+ extent_list_remove(&bin->slabs_full, slab);
+}
+void
+arena_reset(tsd_t *tsd, arena_t *arena) {
/*
- * It is okay to do dirty page processing here even if the chunk was
- * deallocated above, since in that case it is the spare. Waiting
- * until after possible chunk deallocation to do dirty processing
- * allows for an old spare to be fully deallocated, thus decreasing the
- * chances of spuriously crossing the dirty page purging threshold.
+ * Locking in this function is unintuitive. The caller guarantees that
+ * no concurrent operations are happening in this arena, but there are
+ * still reasons that some locking is necessary:
+ *
+ * - Some of the functions in the transitive closure of calls assume
+ * appropriate locks are held, and in some cases these locks are
+ * temporarily dropped to avoid lock order reversal or deadlock due to
+ * reentry.
+ * - mallctl("epoch", ...) may concurrently refresh stats. While
+ * strictly speaking this is a "concurrent operation", disallowing
+ * stats refreshes would impose an inconvenient burden.
*/
- if (dirty)
- arena_maybe_purge(arena);
-}
-static void
-arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- size_t oldsize, size_t newsize)
-{
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
- size_t head_npages = (oldsize - newsize) >> LG_PAGE;
- size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
+ /* Large allocations. */
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
- assert(oldsize > newsize);
+ for (extent_t *extent = extent_list_first(&arena->large); extent !=
+ NULL; extent = extent_list_first(&arena->large)) {
+ void *ptr = extent_base_get(extent);
+ size_t usize;
- /*
- * Update the chunk map so that arena_run_dalloc() can treat the
- * leading run as separately allocated. Set the last element of each
- * run first, in case of single-page runs.
- */
- assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
- arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
- arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
+ alloc_ctx_t alloc_ctx;
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
+ rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ assert(alloc_ctx.szind != NSIZES);
- if (config_debug) {
- UNUSED size_t tail_npages = newsize >> LG_PAGE;
- assert(arena_mapbits_large_size_get(chunk,
- pageind+head_npages+tail_npages-1) == 0);
- assert(arena_mapbits_dirty_get(chunk,
- pageind+head_npages+tail_npages-1) == flag_dirty);
+ if (config_stats || (config_prof && opt_prof)) {
+ usize = sz_index2size(alloc_ctx.szind);
+ assert(usize == isalloc(tsd_tsdn(tsd), ptr));
+ }
+ /* Remove large allocation from prof sample set. */
+ if (config_prof && opt_prof) {
+ prof_free(tsd, ptr, usize, &alloc_ctx);
+ }
+ large_dalloc(tsd_tsdn(tsd), extent);
+ malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
+
+ /* Bins. */
+ for (unsigned i = 0; i < NBINS; i++) {
+ extent_t *slab;
+ arena_bin_t *bin = &arena->bins[i];
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ if (bin->slabcur != NULL) {
+ slab = bin->slabcur;
+ bin->slabcur = NULL;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ }
+ while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) !=
+ NULL) {
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ }
+ for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
+ slab = extent_list_first(&bin->slabs_full)) {
+ arena_bin_slabs_full_remove(arena, bin, slab);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ }
+ if (config_stats) {
+ bin->stats.curregs = 0;
+ bin->stats.curslabs = 0;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
}
- arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
- flag_dirty);
- arena_run_dalloc(arena, run, false, false);
+ atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
}
static void
-arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- size_t oldsize, size_t newsize, bool dirty)
-{
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
- size_t head_npages = newsize >> LG_PAGE;
- size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
-
- assert(oldsize > newsize);
-
+arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
/*
- * Update the chunk map so that arena_run_dalloc() can treat the
- * trailing run as separately allocated. Set the last element of each
- * run first, in case of single-page runs.
+ * Iterate over the retained extents and destroy them. This gives the
+ * extent allocator underlying the extent hooks an opportunity to unmap
+ * all retained memory without having to keep its own metadata
+ * structures. In practice, virtual memory for dss-allocated extents is
+ * leaked here, so best practice is to avoid dss for arenas to be
+ * destroyed, or provide custom extent hooks that track retained
+ * dss-based extents for later reuse.
*/
- assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
- arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
- arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
-
- if (config_debug) {
- UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
- assert(arena_mapbits_large_size_get(chunk,
- pageind+head_npages+tail_npages-1) == 0);
- assert(arena_mapbits_dirty_get(chunk,
- pageind+head_npages+tail_npages-1) == flag_dirty);
+ extent_hooks_t *extent_hooks = extent_hooks_get(arena);
+ extent_t *extent;
+ while ((extent = extents_evict(tsdn, arena, &extent_hooks,
+ &arena->extents_retained, 0)) != NULL) {
+ extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent);
}
- arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
- flag_dirty);
-
- arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
- dirty, false);
}
-static arena_run_t *
-arena_bin_runs_first(arena_bin_t *bin)
-{
- arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
- if (mapelm != NULL) {
- arena_chunk_t *chunk;
- size_t pageind;
- arena_run_t *run;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
- pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
- sizeof(arena_chunk_map_t))) + map_bias;
- run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
- arena_mapbits_small_runind_get(chunk, pageind)) <<
- LG_PAGE));
- return (run);
- }
+void
+arena_destroy(tsd_t *tsd, arena_t *arena) {
+ assert(base_ind_get(arena->base) >= narenas_auto);
+ assert(arena_nthreads_get(arena, false) == 0);
+ assert(arena_nthreads_get(arena, true) == 0);
- return (NULL);
-}
+ /*
+ * No allocations have occurred since arena_reset() was called.
+ * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
+ * extents, so only retained extents may remain.
+ */
+ assert(extents_npages_get(&arena->extents_dirty) == 0);
+ assert(extents_npages_get(&arena->extents_muzzy) == 0);
-static void
-arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
-{
- arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
+ /* Deallocate retained memory. */
+ arena_destroy_retained(tsd_tsdn(tsd), arena);
- assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
+ /*
+ * Remove the arena pointer from the arenas array. We rely on the fact
+ * that there is no way for the application to get a dirty read from the
+ * arenas array unless there is an inherent race in the application
+ * involving access of an arena being concurrently destroyed. The
+ * application must synchronize knowledge of the arena's validity, so as
+ * long as we use an atomic write to update the arenas array, the
+ * application will get a clean read any time after it synchronizes
+ * knowledge that the arena is no longer valid.
+ */
+ arena_set(base_ind_get(arena->base), NULL);
- arena_run_tree_insert(&bin->runs, mapelm);
+ /*
+ * Destroy the base allocator, which manages all metadata ever mapped by
+ * this arena.
+ */
+ base_delete(tsd_tsdn(tsd), arena->base);
}
-static void
-arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
-{
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
+static extent_t *
+arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, const arena_bin_info_t *bin_info,
+ szind_t szind) {
+ extent_t *slab;
+ bool zero, commit;
- assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
-
- arena_run_tree_remove(&bin->runs, mapelm);
-}
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
-static arena_run_t *
-arena_bin_nonfull_run_tryget(arena_bin_t *bin)
-{
- arena_run_t *run = arena_bin_runs_first(bin);
- if (run != NULL) {
- arena_bin_runs_remove(bin, run);
- if (config_stats)
- bin->stats.reruns++;
+ zero = false;
+ commit = true;
+ slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL,
+ bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit);
+
+ if (config_stats && slab != NULL) {
+ arena_stats_mapped_add(tsdn, &arena->stats,
+ bin_info->slab_size);
+ }
+
+ return slab;
+}
+
+static extent_t *
+arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind,
+ const arena_bin_info_t *bin_info) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
+ szind_t szind = sz_size2index(bin_info->reg_size);
+ bool zero = false;
+ bool commit = true;
+ extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks,
+ &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true,
+ binind, &zero, &commit);
+ if (slab == NULL) {
+ slab = extents_alloc(tsdn, arena, &extent_hooks,
+ &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE,
+ true, binind, &zero, &commit);
+ }
+ if (slab == NULL) {
+ slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
+ bin_info, szind);
+ if (slab == NULL) {
+ return NULL;
+ }
}
- return (run);
+ assert(extent_slab_get(slab));
+
+ /* Initialize slab internals. */
+ arena_slab_data_t *slab_data = extent_slab_data_get(slab);
+ extent_nfree_set(slab, bin_info->nregs);
+ bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
+
+ arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
+
+ return slab;
}
-static arena_run_t *
-arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
-{
- arena_run_t *run;
- size_t binind;
- arena_bin_info_t *bin_info;
+static extent_t *
+arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
+ szind_t binind) {
+ extent_t *slab;
+ const arena_bin_info_t *bin_info;
- /* Look for a usable run. */
- run = arena_bin_nonfull_run_tryget(bin);
- if (run != NULL)
- return (run);
- /* No existing runs have any space available. */
+ /* Look for a usable slab. */
+ slab = arena_bin_slabs_nonfull_tryget(bin);
+ if (slab != NULL) {
+ return slab;
+ }
+ /* No existing slabs have any space available. */
- binind = arena_bin_index(arena, bin);
bin_info = &arena_bin_info[binind];
- /* Allocate a new run. */
- malloc_mutex_unlock(&bin->lock);
+ /* Allocate a new slab. */
+ malloc_mutex_unlock(tsdn, &bin->lock);
/******************************/
- malloc_mutex_lock(&arena->lock);
- run = arena_run_alloc_small(arena, bin_info->run_size, binind);
- if (run != NULL) {
- bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
- (uintptr_t)bin_info->bitmap_offset);
-
- /* Initialize run internals. */
- run->bin = bin;
- run->nextind = 0;
- run->nfree = bin_info->nregs;
- bitmap_init(bitmap, &bin_info->bitmap_info);
- }
- malloc_mutex_unlock(&arena->lock);
+ slab = arena_slab_alloc(tsdn, arena, binind, bin_info);
/********************************/
- malloc_mutex_lock(&bin->lock);
- if (run != NULL) {
+ malloc_mutex_lock(tsdn, &bin->lock);
+ if (slab != NULL) {
if (config_stats) {
- bin->stats.nruns++;
- bin->stats.curruns++;
+ bin->stats.nslabs++;
+ bin->stats.curslabs++;
}
- return (run);
+ return slab;
}
/*
- * arena_run_alloc_small() failed, but another thread may have made
+ * arena_slab_alloc() failed, but another thread may have made
* sufficient memory available while this one dropped bin->lock above,
* so search one more time.
*/
- run = arena_bin_nonfull_run_tryget(bin);
- if (run != NULL)
- return (run);
+ slab = arena_bin_slabs_nonfull_tryget(bin);
+ if (slab != NULL) {
+ return slab;
+ }
- return (NULL);
+ return NULL;
}
-/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
+/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
static void *
-arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
-{
- void *ret;
- size_t binind;
- arena_bin_info_t *bin_info;
- arena_run_t *run;
+arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin,
+ szind_t binind) {
+ const arena_bin_info_t *bin_info;
+ extent_t *slab;
- binind = arena_bin_index(arena, bin);
bin_info = &arena_bin_info[binind];
- bin->runcur = NULL;
- run = arena_bin_nonfull_run_get(arena, bin);
- if (bin->runcur != NULL && bin->runcur->nfree > 0) {
+ if (!arena_is_auto(arena) && bin->slabcur != NULL) {
+ arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
+ bin->slabcur = NULL;
+ }
+ slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind);
+ if (bin->slabcur != NULL) {
/*
- * Another thread updated runcur while this one ran without the
- * bin lock in arena_bin_nonfull_run_get().
+ * Another thread updated slabcur while this one ran without the
+ * bin lock in arena_bin_nonfull_slab_get().
*/
- assert(bin->runcur->nfree > 0);
- ret = arena_run_reg_alloc(bin->runcur, bin_info);
- if (run != NULL) {
- arena_chunk_t *chunk;
-
- /*
- * arena_run_alloc_small() may have allocated run, or
- * it may have pulled run from the bin's run tree.
- * Therefore it is unsafe to make any assumptions about
- * how run has previously been used, and
- * arena_bin_lower_run() must be called, as if a region
- * were just deallocated from the run.
- */
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- if (run->nfree == bin_info->nregs)
- arena_dalloc_bin_run(arena, chunk, run, bin);
- else
- arena_bin_lower_run(arena, chunk, run, bin);
+ if (extent_nfree_get(bin->slabcur) > 0) {
+ void *ret = arena_slab_reg_alloc(tsdn, bin->slabcur,
+ bin_info);
+ if (slab != NULL) {
+ /*
+ * arena_slab_alloc() may have allocated slab,
+ * or it may have been pulled from
+ * slabs_nonfull. Therefore it is unsafe to
+ * make any assumptions about how slab has
+ * previously been used, and
+ * arena_bin_lower_slab() must be called, as if
+ * a region were just deallocated from the slab.
+ */
+ if (extent_nfree_get(slab) == bin_info->nregs) {
+ arena_dalloc_bin_slab(tsdn, arena, slab,
+ bin);
+ } else {
+ arena_bin_lower_slab(tsdn, arena, slab,
+ bin);
+ }
+ }
+ return ret;
}
- return (ret);
- }
- if (run == NULL)
- return (NULL);
+ arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
+ bin->slabcur = NULL;
+ }
- bin->runcur = run;
+ if (slab == NULL) {
+ return NULL;
+ }
+ bin->slabcur = slab;
- assert(bin->runcur->nfree > 0);
+ assert(extent_nfree_get(bin->slabcur) > 0);
- return (arena_run_reg_alloc(bin->runcur, bin_info));
+ return arena_slab_reg_alloc(tsdn, slab, bin_info);
}
void
-arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
- uint64_t prof_accumbytes)
-{
+arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
+ tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) {
unsigned i, nfill;
arena_bin_t *bin;
- arena_run_t *run;
- void *ptr;
assert(tbin->ncached == 0);
- if (config_prof && arena_prof_accum(arena, prof_accumbytes))
- prof_idump();
+ if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) {
+ prof_idump(tsdn);
+ }
bin = &arena->bins[binind];
- malloc_mutex_lock(&bin->lock);
+ malloc_mutex_lock(tsdn, &bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
- tbin->lg_fill_div); i < nfill; i++) {
- if ((run = bin->runcur) != NULL && run->nfree > 0)
- ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
- else
- ptr = arena_bin_malloc_hard(arena, bin);
- if (ptr == NULL)
+ tcache->lg_fill_div[binind]); i < nfill; i++) {
+ extent_t *slab;
+ void *ptr;
+ if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) >
+ 0) {
+ ptr = arena_slab_reg_alloc(tsdn, slab,
+ &arena_bin_info[binind]);
+ } else {
+ ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind);
+ }
+ if (ptr == NULL) {
+ /*
+ * OOM. tbin->avail isn't yet filled down to its first
+ * element, so the successful allocations (if any) must
+ * be moved just before tbin->avail before bailing out.
+ */
+ if (i > 0) {
+ memmove(tbin->avail - i, tbin->avail - nfill,
+ i * sizeof(void *));
+ }
break;
- if (config_fill && opt_junk) {
+ }
+ if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ptr, &arena_bin_info[binind],
true);
}
/* Insert such that low regions get used first. */
- tbin->avail[nfill - 1 - i] = ptr;
+ *(tbin->avail - nfill + i) = ptr;
}
if (config_stats) {
- bin->stats.allocated += i * arena_bin_info[binind].reg_size;
bin->stats.nmalloc += i;
bin->stats.nrequests += tbin->tstats.nrequests;
+ bin->stats.curregs += i;
bin->stats.nfills++;
tbin->tstats.nrequests = 0;
}
- malloc_mutex_unlock(&bin->lock);
+ malloc_mutex_unlock(tsdn, &bin->lock);
tbin->ncached = i;
+ arena_decay_tick(tsdn, arena);
}
void
-arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
-{
-
- if (zero) {
- size_t redzone_size = bin_info->redzone_size;
- memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
- redzone_size);
- memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
- redzone_size);
- } else {
- memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
- bin_info->reg_interval);
+arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info, bool zero) {
+ if (!zero) {
+ memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size);
}
}
-#ifdef JEMALLOC_JET
-#undef arena_redzone_corruption
-#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
-#endif
-static void
-arena_redzone_corruption(void *ptr, size_t usize, bool after,
- size_t offset, uint8_t byte)
-{
-
- malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
- "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
- after ? "after" : "before", ptr, usize, byte);
-}
-#ifdef JEMALLOC_JET
-#undef arena_redzone_corruption
-#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
-arena_redzone_corruption_t *arena_redzone_corruption =
- JEMALLOC_N(arena_redzone_corruption_impl);
-#endif
-
static void
-arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
-{
- size_t size = bin_info->reg_size;
- size_t redzone_size = bin_info->redzone_size;
- size_t i;
- bool error = false;
-
- for (i = 1; i <= redzone_size; i++) {
- uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
- if (*byte != 0xa5) {
- error = true;
- arena_redzone_corruption(ptr, size, false, i, *byte);
- if (reset)
- *byte = 0xa5;
- }
- }
- for (i = 0; i < redzone_size; i++) {
- uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
- if (*byte != 0xa5) {
- error = true;
- arena_redzone_corruption(ptr, size, true, i, *byte);
- if (reset)
- *byte = 0xa5;
- }
- }
- if (opt_abort && error)
- abort();
+arena_dalloc_junk_small_impl(void *ptr, const arena_bin_info_t *bin_info) {
+ memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
}
+arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small =
+ arena_dalloc_junk_small_impl;
-#ifdef JEMALLOC_JET
-#undef arena_dalloc_junk_small
-#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
-#endif
-void
-arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
-{
- size_t redzone_size = bin_info->redzone_size;
-
- arena_redzones_validate(ptr, bin_info, false);
- memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
- bin_info->reg_interval);
-}
-#ifdef JEMALLOC_JET
-#undef arena_dalloc_junk_small
-#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
-arena_dalloc_junk_small_t *arena_dalloc_junk_small =
- JEMALLOC_N(arena_dalloc_junk_small_impl);
-#endif
-
-void
-arena_quarantine_junk_small(void *ptr, size_t usize)
-{
- size_t binind;
- arena_bin_info_t *bin_info;
- cassert(config_fill);
- assert(opt_junk);
- assert(opt_quarantine);
- assert(usize <= SMALL_MAXCLASS);
-
- binind = SMALL_SIZE2BIN(usize);
- bin_info = &arena_bin_info[binind];
- arena_redzones_validate(ptr, bin_info, true);
-}
-
-void *
-arena_malloc_small(arena_t *arena, size_t size, bool zero)
-{
+static void *
+arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
void *ret;
arena_bin_t *bin;
- arena_run_t *run;
- size_t binind;
+ size_t usize;
+ extent_t *slab;
- binind = SMALL_SIZE2BIN(size);
assert(binind < NBINS);
bin = &arena->bins[binind];
- size = arena_bin_info[binind].reg_size;
+ usize = sz_index2size(binind);
- malloc_mutex_lock(&bin->lock);
- if ((run = bin->runcur) != NULL && run->nfree > 0)
- ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
- else
- ret = arena_bin_malloc_hard(arena, bin);
+ malloc_mutex_lock(tsdn, &bin->lock);
+ if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
+ ret = arena_slab_reg_alloc(tsdn, slab, &arena_bin_info[binind]);
+ } else {
+ ret = arena_bin_malloc_hard(tsdn, arena, bin, binind);
+ }
if (ret == NULL) {
- malloc_mutex_unlock(&bin->lock);
- return (NULL);
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ return NULL;
}
if (config_stats) {
- bin->stats.allocated += size;
bin->stats.nmalloc++;
bin->stats.nrequests++;
+ bin->stats.curregs++;
+ }
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
+ prof_idump(tsdn);
}
- malloc_mutex_unlock(&bin->lock);
- if (config_prof && isthreaded == false && arena_prof_accum(arena, size))
- prof_idump();
- if (zero == false) {
+ if (!zero) {
if (config_fill) {
- if (opt_junk) {
+ if (unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret,
&arena_bin_info[binind], false);
- } else if (opt_zero)
- memset(ret, 0, size);
+ } else if (unlikely(opt_zero)) {
+ memset(ret, 0, usize);
+ }
}
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
} else {
- if (config_fill && opt_junk) {
+ if (config_fill && unlikely(opt_junk_alloc)) {
arena_alloc_junk_small(ret, &arena_bin_info[binind],
true);
}
- VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
- memset(ret, 0, size);
+ memset(ret, 0, usize);
}
- return (ret);
+ arena_decay_tick(tsdn, arena);
+ return ret;
}
void *
-arena_malloc_large(arena_t *arena, size_t size, bool zero)
-{
- void *ret;
- UNUSED bool idump;
+arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
+ bool zero) {
+ assert(!tsdn_null(tsdn) || arena != NULL);
- /* Large allocation. */
- size = PAGE_CEILING(size);
- malloc_mutex_lock(&arena->lock);
- ret = (void *)arena_run_alloc_large(arena, size, zero);
- if (ret == NULL) {
- malloc_mutex_unlock(&arena->lock);
- return (NULL);
+ if (likely(!tsdn_null(tsdn))) {
+ arena = arena_choose(tsdn_tsd(tsdn), arena);
}
- if (config_stats) {
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
- }
- if (config_prof)
- idump = arena_prof_accum_locked(arena, size);
- malloc_mutex_unlock(&arena->lock);
- if (config_prof && idump)
- prof_idump();
-
- if (zero == false) {
- if (config_fill) {
- if (opt_junk)
- memset(ret, 0xa5, size);
- else if (opt_zero)
- memset(ret, 0, size);
- }
+ if (unlikely(arena == NULL)) {
+ return NULL;
}
- return (ret);
+ if (likely(size <= SMALL_MAXCLASS)) {
+ return arena_malloc_small(tsdn, arena, ind, zero);
+ }
+ return large_malloc(tsdn, arena, sz_index2size(ind), zero);
}
-/* Only handles large allocations that require more than page alignment. */
void *
-arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
-{
+arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
+ bool zero, tcache_t *tcache) {
void *ret;
- size_t alloc_size, leadsize, trailsize;
- arena_run_t *run;
- arena_chunk_t *chunk;
- assert((size & PAGE_MASK) == 0);
+ if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
+ && (usize & PAGE_MASK) == 0))) {
+ /* Small; alignment doesn't require special slab placement. */
+ ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
+ zero, tcache, true);
+ } else {
+ if (likely(alignment <= CACHELINE)) {
+ ret = large_malloc(tsdn, arena, usize, zero);
+ } else {
+ ret = large_palloc(tsdn, arena, usize, alignment, zero);
+ }
+ }
+ return ret;
+}
- alignment = PAGE_CEILING(alignment);
- alloc_size = size + alignment - PAGE;
+void
+arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+ assert(isalloc(tsdn, ptr) == LARGE_MINCLASS);
+ assert(usize <= SMALL_MAXCLASS);
- malloc_mutex_lock(&arena->lock);
- run = arena_run_alloc_large(arena, alloc_size, false);
- if (run == NULL) {
- malloc_mutex_unlock(&arena->lock);
- return (NULL);
- }
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
- (uintptr_t)run;
- assert(alloc_size >= leadsize + size);
- trailsize = alloc_size - leadsize - size;
- ret = (void *)((uintptr_t)run + leadsize);
- if (leadsize != 0) {
- arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size -
- leadsize);
- }
- if (trailsize != 0) {
- arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
- false);
- }
- arena_run_init_large(arena, (arena_run_t *)ret, size, zero);
+ extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true);
+ arena_t *arena = extent_arena_get(extent);
- if (config_stats) {
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
- }
- malloc_mutex_unlock(&arena->lock);
+ szind_t szind = sz_size2index(usize);
+ extent_szind_set(extent, szind);
+ rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
+ szind, false);
- if (config_fill && zero == false) {
- if (opt_junk)
- memset(ret, 0xa5, size);
- else if (opt_zero)
- memset(ret, 0, size);
- }
- return (ret);
-}
+ prof_accum_cancel(tsdn, &arena->prof_accum, usize);
-void
-arena_prof_promoted(const void *ptr, size_t size)
-{
- arena_chunk_t *chunk;
- size_t pageind, binind;
+ assert(isalloc(tsdn, ptr) == usize);
+}
+static size_t
+arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
cassert(config_prof);
assert(ptr != NULL);
- assert(CHUNK_ADDR2BASE(ptr) != ptr);
- assert(isalloc(ptr, false) == PAGE);
- assert(isalloc(ptr, true) == PAGE);
- assert(size <= SMALL_MAXCLASS);
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- binind = SMALL_SIZE2BIN(size);
- assert(binind < NBINS);
- arena_mapbits_large_binind_set(chunk, pageind, binind);
- assert(isalloc(ptr, false) == PAGE);
- assert(isalloc(ptr, true) == size);
+ extent_szind_set(extent, NBINS);
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+ rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
+ NBINS, false);
+
+ assert(isalloc(tsdn, ptr) == LARGE_MINCLASS);
+
+ return LARGE_MINCLASS;
+}
+
+void
+arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
+ bool slow_path) {
+ cassert(config_prof);
+ assert(opt_prof);
+
+ extent_t *extent = iealloc(tsdn, ptr);
+ size_t usize = arena_prof_demote(tsdn, extent, ptr);
+ if (usize <= tcache_maxclass) {
+ tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
+ sz_size2index(usize), slow_path);
+ } else {
+ large_dalloc(tsdn, extent);
+ }
}
static void
-arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
- arena_bin_t *bin)
-{
-
- /* Dissociate run from bin. */
- if (run == bin->runcur)
- bin->runcur = NULL;
- else {
- size_t binind = arena_bin_index(chunk->arena, bin);
- arena_bin_info_t *bin_info = &arena_bin_info[binind];
-
- if (bin_info->nregs != 1) {
- /*
- * This block's conditional is necessary because if the
- * run only contains one region, then it never gets
- * inserted into the non-full runs tree.
- */
- arena_bin_runs_remove(bin, run);
+arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, arena_bin_t *bin) {
+ /* Dissociate slab from bin. */
+ if (slab == bin->slabcur) {
+ bin->slabcur = NULL;
+ } else {
+ szind_t binind = extent_szind_get(slab);
+ const arena_bin_info_t *bin_info = &arena_bin_info[binind];
+
+ /*
+ * The following block's conditional is necessary because if the
+ * slab only contains one region, then it never gets inserted
+ * into the non-full slabs heap.
+ */
+ if (bin_info->nregs == 1) {
+ arena_bin_slabs_full_remove(arena, bin, slab);
+ } else {
+ arena_bin_slabs_nonfull_remove(bin, slab);
}
}
}
static void
-arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- arena_bin_t *bin)
-{
- size_t binind;
- arena_bin_info_t *bin_info;
- size_t npages, run_ind, past;
-
- assert(run != bin->runcur);
- assert(arena_run_tree_search(&bin->runs,
- arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE))
- == NULL);
-
- binind = arena_bin_index(chunk->arena, run->bin);
- bin_info = &arena_bin_info[binind];
+arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+ arena_bin_t *bin) {
+ assert(slab != bin->slabcur);
- malloc_mutex_unlock(&bin->lock);
+ malloc_mutex_unlock(tsdn, &bin->lock);
/******************************/
- npages = bin_info->run_size >> LG_PAGE;
- run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
- past = (size_t)(PAGE_CEILING((uintptr_t)run +
- (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
- bin_info->reg_interval - bin_info->redzone_size) -
- (uintptr_t)chunk) >> LG_PAGE);
- malloc_mutex_lock(&arena->lock);
-
- /*
- * If the run was originally clean, and some pages were never touched,
- * trim the clean pages before deallocating the dirty portion of the
- * run.
- */
- assert(arena_mapbits_dirty_get(chunk, run_ind) ==
- arena_mapbits_dirty_get(chunk, run_ind+npages-1));
- if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
- npages) {
- /* Trim clean pages. Convert to large run beforehand. */
- assert(npages > 0);
- arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0);
- arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0);
- arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
- ((past - run_ind) << LG_PAGE), false);
- /* npages = past - run_ind; */
- }
- arena_run_dalloc(arena, run, true, false);
- malloc_mutex_unlock(&arena->lock);
+ arena_slab_dalloc(tsdn, arena, slab);
/****************************/
- malloc_mutex_lock(&bin->lock);
- if (config_stats)
- bin->stats.curruns--;
+ malloc_mutex_lock(tsdn, &bin->lock);
+ if (config_stats) {
+ bin->stats.curslabs--;
+ }
}
static void
-arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
- arena_bin_t *bin)
-{
+arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+ arena_bin_t *bin) {
+ assert(extent_nfree_get(slab) > 0);
/*
- * Make sure that if bin->runcur is non-NULL, it refers to the lowest
- * non-full run. It is okay to NULL runcur out rather than proactively
- * keeping it pointing at the lowest non-full run.
+ * Make sure that if bin->slabcur is non-NULL, it refers to the
+ * oldest/lowest non-full slab. It is okay to NULL slabcur out rather
+ * than proactively keeping it pointing at the oldest/lowest non-full
+ * slab.
*/
- if ((uintptr_t)run < (uintptr_t)bin->runcur) {
- /* Switch runcur. */
- if (bin->runcur->nfree > 0)
- arena_bin_runs_insert(bin, bin->runcur);
- bin->runcur = run;
- if (config_stats)
- bin->stats.reruns++;
- } else
- arena_bin_runs_insert(bin, run);
+ if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) {
+ /* Switch slabcur. */
+ if (extent_nfree_get(bin->slabcur) > 0) {
+ arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
+ } else {
+ arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
+ }
+ bin->slabcur = slab;
+ if (config_stats) {
+ bin->stats.reslabs++;
+ }
+ } else {
+ arena_bin_slabs_nonfull_insert(bin, slab);
+ }
}
-void
-arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- arena_chunk_map_t *mapelm)
-{
- size_t pageind;
- arena_run_t *run;
- arena_bin_t *bin;
- arena_bin_info_t *bin_info;
- size_t size, binind;
-
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
- arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
- bin = run->bin;
- binind = arena_ptr_small_binind_get(ptr, mapelm->bits);
- bin_info = &arena_bin_info[binind];
- if (config_fill || config_stats)
- size = bin_info->reg_size;
-
- if (config_fill && opt_junk)
+static void
+arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+ void *ptr, bool junked) {
+ arena_slab_data_t *slab_data = extent_slab_data_get(slab);
+ szind_t binind = extent_szind_get(slab);
+ arena_bin_t *bin = &arena->bins[binind];
+ const arena_bin_info_t *bin_info = &arena_bin_info[binind];
+
+ if (!junked && config_fill && unlikely(opt_junk_free)) {
arena_dalloc_junk_small(ptr, bin_info);
+ }
- arena_run_reg_dalloc(run, ptr);
- if (run->nfree == bin_info->nregs) {
- arena_dissociate_bin_run(chunk, run, bin);
- arena_dalloc_bin_run(arena, chunk, run, bin);
- } else if (run->nfree == 1 && run != bin->runcur)
- arena_bin_lower_run(arena, chunk, run, bin);
+ arena_slab_reg_dalloc(tsdn, slab, slab_data, ptr);
+ unsigned nfree = extent_nfree_get(slab);
+ if (nfree == bin_info->nregs) {
+ arena_dissociate_bin_slab(arena, slab, bin);
+ arena_dalloc_bin_slab(tsdn, arena, slab, bin);
+ } else if (nfree == 1 && slab != bin->slabcur) {
+ arena_bin_slabs_full_remove(arena, bin, slab);
+ arena_bin_lower_slab(tsdn, arena, slab, bin);
+ }
if (config_stats) {
- bin->stats.allocated -= size;
bin->stats.ndalloc++;
+ bin->stats.curregs--;
}
}
void
-arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t pageind, arena_chunk_map_t *mapelm)
-{
- arena_run_t *run;
- arena_bin_t *bin;
-
- run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
- arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
- bin = run->bin;
- malloc_mutex_lock(&bin->lock);
- arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
- malloc_mutex_unlock(&bin->lock);
-}
-
-void
-arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t pageind)
-{
- arena_chunk_map_t *mapelm;
-
- if (config_debug) {
- /* arena_ptr_small_binind_get() does extra sanity checking. */
- assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
- pageind)) != BININD_INVALID);
- }
- mapelm = arena_mapp_get(chunk, pageind);
- arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
+arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+ void *ptr) {
+ arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true);
}
-#ifdef JEMALLOC_JET
-#undef arena_dalloc_junk_large
-#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
-#endif
static void
-arena_dalloc_junk_large(void *ptr, size_t usize)
-{
-
- if (config_fill && opt_junk)
- memset(ptr, 0x5a, usize);
-}
-#ifdef JEMALLOC_JET
-#undef arena_dalloc_junk_large
-#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
-arena_dalloc_junk_large_t *arena_dalloc_junk_large =
- JEMALLOC_N(arena_dalloc_junk_large_impl);
-#endif
-
-void
-arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
-{
+arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
+ szind_t binind = extent_szind_get(extent);
+ arena_bin_t *bin = &arena->bins[binind];
- if (config_fill || config_stats) {
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- size_t usize = arena_mapbits_large_size_get(chunk, pageind);
-
- arena_dalloc_junk_large(ptr, usize);
- if (config_stats) {
- arena->stats.ndalloc_large++;
- arena->stats.allocated_large -= usize;
- arena->stats.lstats[(usize >> LG_PAGE) - 1].ndalloc++;
- arena->stats.lstats[(usize >> LG_PAGE) - 1].curruns--;
- }
- }
-
- arena_run_dalloc(arena, (arena_run_t *)ptr, true, false);
+ malloc_mutex_lock(tsdn, &bin->lock);
+ arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false);
+ malloc_mutex_unlock(tsdn, &bin->lock);
}
void
-arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
-{
+arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
+ extent_t *extent = iealloc(tsdn, ptr);
+ arena_t *arena = extent_arena_get(extent);
- malloc_mutex_lock(&arena->lock);
- arena_dalloc_large_locked(arena, chunk, ptr);
- malloc_mutex_unlock(&arena->lock);
+ arena_dalloc_bin(tsdn, arena, extent, ptr);
+ arena_decay_tick(tsdn, arena);
}
-static void
-arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t oldsize, size_t size)
-{
-
- assert(size < oldsize);
-
- /*
- * Shrink the run, and make trailing pages available for other
- * allocations.
- */
- malloc_mutex_lock(&arena->lock);
- arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
- true);
- if (config_stats) {
- arena->stats.ndalloc_large++;
- arena->stats.allocated_large -= oldsize;
- arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
- arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
+bool
+arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
+ size_t extra, bool zero) {
+ /* Calls with non-zero extra had to clamp extra. */
+ assert(extra == 0 || size + extra <= LARGE_MAXCLASS);
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+ if (unlikely(size > LARGE_MAXCLASS)) {
+ return true;
}
- malloc_mutex_unlock(&arena->lock);
-}
-static bool
-arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t oldsize, size_t size, size_t extra, bool zero)
-{
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- size_t npages = oldsize >> LG_PAGE;
- size_t followsize;
-
- assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
-
- /* Try to extend the run. */
- assert(size + extra > oldsize);
- malloc_mutex_lock(&arena->lock);
- if (pageind + npages < chunk_npages &&
- arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
- (followsize = arena_mapbits_unallocated_size_get(chunk,
- pageind+npages)) >= size - oldsize) {
+ extent_t *extent = iealloc(tsdn, ptr);
+ size_t usize_min = sz_s2u(size);
+ size_t usize_max = sz_s2u(size + extra);
+ if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) {
/*
- * The next run is available and sufficiently large. Split the
- * following run, then merge the first part with the existing
- * allocation.
+ * Avoid moving the allocation if the size class can be left the
+ * same.
*/
- size_t flag_dirty;
- size_t splitsize = (oldsize + followsize <= size + extra)
- ? followsize : size + extra - oldsize;
- arena_run_split_large(arena, (arena_run_t *)((uintptr_t)chunk +
- ((pageind+npages) << LG_PAGE)), splitsize, zero);
-
- size = oldsize + splitsize;
- npages = size >> LG_PAGE;
-
- /*
- * Mark the extended run as dirty if either portion of the run
- * was dirty before allocation. This is rather pedantic,
- * because there's not actually any sequence of events that
- * could cause the resulting run to be passed to
- * arena_run_dalloc() with the dirty argument set to false
- * (which is when dirty flag consistency would really matter).
- */
- flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
- arena_mapbits_dirty_get(chunk, pageind+npages-1);
- arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
- arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
-
- if (config_stats) {
- arena->stats.ndalloc_large++;
- arena->stats.allocated_large -= oldsize;
- arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
- arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
-
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
- arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+ assert(arena_bin_info[sz_size2index(oldsize)].reg_size ==
+ oldsize);
+ if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) !=
+ sz_size2index(oldsize)) && (size > oldsize || usize_max <
+ oldsize)) {
+ return true;
}
- malloc_mutex_unlock(&arena->lock);
- return (false);
+
+ arena_decay_tick(tsdn, extent_arena_get(extent));
+ return false;
+ } else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) {
+ return large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
+ zero);
}
- malloc_mutex_unlock(&arena->lock);
- return (true);
+ return true;
}
-#ifdef JEMALLOC_JET
-#undef arena_ralloc_junk_large
-#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
-#endif
-static void
-arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
-{
-
- if (config_fill && opt_junk) {
- memset((void *)((uintptr_t)ptr + usize), 0x5a,
- old_usize - usize);
+static void *
+arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache) {
+ if (alignment == 0) {
+ return arena_malloc(tsdn, arena, usize, sz_size2index(usize),
+ zero, tcache, true);
}
-}
-#ifdef JEMALLOC_JET
-#undef arena_ralloc_junk_large
-#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
-arena_ralloc_junk_large_t *arena_ralloc_junk_large =
- JEMALLOC_N(arena_ralloc_junk_large_impl);
-#endif
-
-/*
- * Try to resize a large allocation, in order to avoid copying. This will
- * always fail if growing an object, and the following run is already in use.
- */
-static bool
-arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
- bool zero)
-{
- size_t psize;
-
- psize = PAGE_CEILING(size + extra);
- if (psize == oldsize) {
- /* Same size class. */
- return (false);
- } else {
- arena_chunk_t *chunk;
- arena_t *arena;
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- arena = chunk->arena;
-
- if (psize < oldsize) {
- /* Fill before shrinking in order avoid a race. */
- arena_ralloc_junk_large(ptr, oldsize, psize);
- arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
- psize);
- return (false);
- } else {
- bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
- oldsize, PAGE_CEILING(size),
- psize - PAGE_CEILING(size), zero);
- if (config_fill && ret == false && zero == false) {
- if (opt_junk) {
- memset((void *)((uintptr_t)ptr +
- oldsize), 0xa5, isalloc(ptr,
- config_prof) - oldsize);
- } else if (opt_zero) {
- memset((void *)((uintptr_t)ptr +
- oldsize), 0, isalloc(ptr,
- config_prof) - oldsize);
- }
- }
- return (ret);
- }
+ usize = sz_sa2u(usize, alignment);
+ if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
+ return NULL;
}
+ return ipalloct(tsdn, usize, alignment, zero, tcache, arena);
}
-bool
-arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
- bool zero)
-{
+void *
+arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
+ size_t size, size_t alignment, bool zero, tcache_t *tcache) {
+ size_t usize = sz_s2u(size);
+ if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) {
+ return NULL;
+ }
- /*
- * Avoid moving the allocation if the size class can be left the same.
- */
- if (oldsize <= arena_maxclass) {
- if (oldsize <= SMALL_MAXCLASS) {
- assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size
- == oldsize);
- if ((size + extra <= SMALL_MAXCLASS &&
- SMALL_SIZE2BIN(size + extra) ==
- SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
- size + extra >= oldsize))
- return (false);
- } else {
- assert(size <= arena_maxclass);
- if (size + extra > SMALL_MAXCLASS) {
- if (arena_ralloc_large(ptr, oldsize, size,
- extra, zero) == false)
- return (false);
- }
+ if (likely(usize <= SMALL_MAXCLASS)) {
+ /* Try to avoid moving the allocation. */
+ if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero)) {
+ return ptr;
}
}
- /* Reallocation would require a move. */
- return (true);
-}
-
-void *
-arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
- size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
- bool try_tcache_dalloc)
-{
- void *ret;
- size_t copysize;
-
- /* Try to avoid moving the allocation. */
- if (arena_ralloc_no_move(ptr, oldsize, size, extra, zero) == false)
- return (ptr);
+ if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) {
+ return large_ralloc(tsdn, arena, iealloc(tsdn, ptr), usize,
+ alignment, zero, tcache);
+ }
/*
* size and oldsize are different enough that we need to move the
- * object. In that case, fall back to allocating new space and
- * copying.
+ * object. In that case, fall back to allocating new space and copying.
*/
- if (alignment != 0) {
- size_t usize = sa2u(size + extra, alignment);
- if (usize == 0)
- return (NULL);
- ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
- } else
- ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
-
+ void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment,
+ zero, tcache);
if (ret == NULL) {
- if (extra == 0)
- return (NULL);
- /* Try again, this time without extra. */
- if (alignment != 0) {
- size_t usize = sa2u(size, alignment);
- if (usize == 0)
- return (NULL);
- ret = ipalloct(usize, alignment, zero, try_tcache_alloc,
- arena);
- } else
- ret = arena_malloc(arena, size, zero, try_tcache_alloc);
-
- if (ret == NULL)
- return (NULL);
+ return NULL;
}
- /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
-
/*
- * Copy at most size bytes (not size+extra), since the caller has no
- * expectation that the extra bytes will be reliably preserved.
+ * Junk/zero-filling were already done by
+ * ipalloc()/arena_malloc().
*/
- copysize = (size < oldsize) ? size : oldsize;
- VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
+
+ size_t copysize = (usize < oldsize) ? usize : oldsize;
memcpy(ret, ptr, copysize);
- iqalloct(ptr, try_tcache_dalloc);
- return (ret);
+ isdalloct(tsdn, ptr, oldsize, tcache, NULL, true);
+ return ret;
}
dss_prec_t
-arena_dss_prec_get(arena_t *arena)
-{
- dss_prec_t ret;
+arena_dss_prec_get(arena_t *arena) {
+ return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE);
+}
- malloc_mutex_lock(&arena->lock);
- ret = arena->dss_prec;
- malloc_mutex_unlock(&arena->lock);
- return (ret);
+bool
+arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) {
+ if (!have_dss) {
+ return (dss_prec != dss_prec_disabled);
+ }
+ atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE);
+ return false;
}
-void
-arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
-{
+ssize_t
+arena_dirty_decay_ms_default_get(void) {
+ return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED);
+}
- malloc_mutex_lock(&arena->lock);
- arena->dss_prec = dss_prec;
- malloc_mutex_unlock(&arena->lock);
+bool
+arena_dirty_decay_ms_default_set(ssize_t decay_ms) {
+ if (!arena_decay_ms_valid(decay_ms)) {
+ return true;
+ }
+ atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED);
+ return false;
}
-void
-arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
- size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
- malloc_large_stats_t *lstats)
-{
- unsigned i;
+ssize_t
+arena_muzzy_decay_ms_default_get(void) {
+ return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED);
+}
- malloc_mutex_lock(&arena->lock);
- *dss = dss_prec_names[arena->dss_prec];
- *nactive += arena->nactive;
- *ndirty += arena->ndirty;
+bool
+arena_muzzy_decay_ms_default_set(ssize_t decay_ms) {
+ if (!arena_decay_ms_valid(decay_ms)) {
+ return true;
+ }
+ atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED);
+ return false;
+}
- astats->mapped += arena->stats.mapped;
- astats->npurge += arena->stats.npurge;
- astats->nmadvise += arena->stats.nmadvise;
- astats->purged += arena->stats.purged;
- astats->allocated_large += arena->stats.allocated_large;
- astats->nmalloc_large += arena->stats.nmalloc_large;
- astats->ndalloc_large += arena->stats.ndalloc_large;
- astats->nrequests_large += arena->stats.nrequests_large;
+unsigned
+arena_nthreads_get(arena_t *arena, bool internal) {
+ return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED);
+}
- for (i = 0; i < nlclasses; i++) {
- lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
- lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
- lstats[i].nrequests += arena->stats.lstats[i].nrequests;
- lstats[i].curruns += arena->stats.lstats[i].curruns;
- }
- malloc_mutex_unlock(&arena->lock);
+void
+arena_nthreads_inc(arena_t *arena, bool internal) {
+ atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
+}
- for (i = 0; i < NBINS; i++) {
- arena_bin_t *bin = &arena->bins[i];
+void
+arena_nthreads_dec(arena_t *arena, bool internal) {
+ atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
+}
- malloc_mutex_lock(&bin->lock);
- bstats[i].allocated += bin->stats.allocated;
- bstats[i].nmalloc += bin->stats.nmalloc;
- bstats[i].ndalloc += bin->stats.ndalloc;
- bstats[i].nrequests += bin->stats.nrequests;
- if (config_tcache) {
- bstats[i].nfills += bin->stats.nfills;
- bstats[i].nflushes += bin->stats.nflushes;
- }
- bstats[i].nruns += bin->stats.nruns;
- bstats[i].reruns += bin->stats.reruns;
- bstats[i].curruns += bin->stats.curruns;
- malloc_mutex_unlock(&bin->lock);
- }
+size_t
+arena_extent_sn_next(arena_t *arena) {
+ return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED);
}
-bool
-arena_new(arena_t *arena, unsigned ind)
-{
+arena_t *
+arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
+ arena_t *arena;
+ base_t *base;
unsigned i;
- arena_bin_t *bin;
- arena->ind = ind;
- arena->nthreads = 0;
+ if (ind == 0) {
+ base = b0get();
+ } else {
+ base = base_new(tsdn, ind, extent_hooks);
+ if (base == NULL) {
+ return NULL;
+ }
+ }
+
+ arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE);
+ if (arena == NULL) {
+ goto label_error;
+ }
- if (malloc_mutex_init(&arena->lock))
- return (true);
+ atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
+ atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
+ arena->last_thd = NULL;
if (config_stats) {
- memset(&arena->stats, 0, sizeof(arena_stats_t));
- arena->stats.lstats =
- (malloc_large_stats_t *)base_alloc(nlclasses *
- sizeof(malloc_large_stats_t));
- if (arena->stats.lstats == NULL)
- return (true);
- memset(arena->stats.lstats, 0, nlclasses *
- sizeof(malloc_large_stats_t));
- if (config_tcache)
- ql_new(&arena->tcache_ql);
+ if (arena_stats_init(tsdn, &arena->stats)) {
+ goto label_error;
+ }
+
+ ql_new(&arena->tcache_ql);
+ if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql",
+ WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) {
+ goto label_error;
+ }
}
- if (config_prof)
- arena->prof_accumbytes = 0;
+ if (config_prof) {
+ if (prof_accum_init(tsdn, &arena->prof_accum)) {
+ goto label_error;
+ }
+ }
- arena->dss_prec = chunk_dss_prec_get();
+ if (config_cache_oblivious) {
+ /*
+ * A nondeterministic seed based on the address of arena reduces
+ * the likelihood of lockstep non-uniform cache index
+ * utilization among identical concurrent processes, but at the
+ * cost of test repeatability. For debug builds, instead use a
+ * deterministic seed.
+ */
+ atomic_store_zu(&arena->offset_state, config_debug ? ind :
+ (size_t)(uintptr_t)arena, ATOMIC_RELAXED);
+ }
- /* Initialize chunks. */
- arena_chunk_dirty_new(&arena->chunks_dirty);
- arena->spare = NULL;
+ atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED);
- arena->nactive = 0;
- arena->ndirty = 0;
- arena->npurgatory = 0;
+ atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
+ ATOMIC_RELAXED);
- arena_avail_tree_new(&arena->runs_avail);
+ atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
- /* Initialize bins. */
- for (i = 0; i < NBINS; i++) {
- bin = &arena->bins[i];
- if (malloc_mutex_init(&bin->lock))
- return (true);
- bin->runcur = NULL;
- arena_run_tree_new(&bin->runs);
- if (config_stats)
- memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
+ extent_list_init(&arena->large);
+ if (malloc_mutex_init(&arena->large_mtx, "arena_large",
+ WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
+ goto label_error;
}
- return (false);
-}
-
-/*
- * Calculate bin_info->run_size such that it meets the following constraints:
- *
- * *) bin_info->run_size >= min_run_size
- * *) bin_info->run_size <= arena_maxclass
- * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
- * *) bin_info->nregs <= RUN_MAXREGS
- *
- * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
- * calculated here, since these settings are all interdependent.
- */
-static size_t
-bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
-{
- size_t pad_size;
- size_t try_run_size, good_run_size;
- uint32_t try_nregs, good_nregs;
- uint32_t try_hdr_size, good_hdr_size;
- uint32_t try_bitmap_offset, good_bitmap_offset;
- uint32_t try_ctx0_offset, good_ctx0_offset;
- uint32_t try_redzone0_offset, good_redzone0_offset;
-
- assert(min_run_size >= PAGE);
- assert(min_run_size <= arena_maxclass);
-
/*
- * Determine redzone size based on minimum alignment and minimum
- * redzone size. Add padding to the end of the run if it is needed to
- * align the regions. The padding allows each redzone to be half the
- * minimum alignment; without the padding, each redzone would have to
- * be twice as large in order to maintain alignment.
+ * Delay coalescing for dirty extents despite the disruptive effect on
+ * memory layout for best-fit extent allocation, since cached extents
+ * are likely to be reused soon after deallocation, and the cost of
+ * merging/splitting extents is non-trivial.
*/
- if (config_fill && opt_redzone) {
- size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1);
- if (align_min <= REDZONE_MINSIZE) {
- bin_info->redzone_size = REDZONE_MINSIZE;
- pad_size = 0;
- } else {
- bin_info->redzone_size = align_min >> 1;
- pad_size = bin_info->redzone_size;
- }
- } else {
- bin_info->redzone_size = 0;
- pad_size = 0;
+ if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty,
+ true)) {
+ goto label_error;
}
- bin_info->reg_interval = bin_info->reg_size +
- (bin_info->redzone_size << 1);
-
/*
- * Calculate known-valid settings before entering the run_size
- * expansion loop, so that the first part of the loop always copies
- * valid settings.
- *
- * The do..while loop iteratively reduces the number of regions until
- * the run header and the regions no longer overlap. A closed formula
- * would be quite messy, since there is an interdependency between the
- * header's mask length and the number of regions.
+ * Coalesce muzzy extents immediately, because operations on them are in
+ * the critical path much less often than for dirty extents.
*/
- try_run_size = min_run_size;
- try_nregs = ((try_run_size - sizeof(arena_run_t)) /
- bin_info->reg_interval)
- + 1; /* Counter-act try_nregs-- in loop. */
- if (try_nregs > RUN_MAXREGS) {
- try_nregs = RUN_MAXREGS
- + 1; /* Counter-act try_nregs-- in loop. */
- }
- do {
- try_nregs--;
- try_hdr_size = sizeof(arena_run_t);
- /* Pad to a long boundary. */
- try_hdr_size = LONG_CEILING(try_hdr_size);
- try_bitmap_offset = try_hdr_size;
- /* Add space for bitmap. */
- try_hdr_size += bitmap_size(try_nregs);
- if (config_prof && opt_prof && prof_promote == false) {
- /* Pad to a quantum boundary. */
- try_hdr_size = QUANTUM_CEILING(try_hdr_size);
- try_ctx0_offset = try_hdr_size;
- /* Add space for one (prof_ctx_t *) per region. */
- try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
- } else
- try_ctx0_offset = 0;
- try_redzone0_offset = try_run_size - (try_nregs *
- bin_info->reg_interval) - pad_size;
- } while (try_hdr_size > try_redzone0_offset);
-
- /* run_size expansion loop. */
- do {
- /*
- * Copy valid settings before trying more aggressive settings.
- */
- good_run_size = try_run_size;
- good_nregs = try_nregs;
- good_hdr_size = try_hdr_size;
- good_bitmap_offset = try_bitmap_offset;
- good_ctx0_offset = try_ctx0_offset;
- good_redzone0_offset = try_redzone0_offset;
-
- /* Try more aggressive settings. */
- try_run_size += PAGE;
- try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
- bin_info->reg_interval)
- + 1; /* Counter-act try_nregs-- in loop. */
- if (try_nregs > RUN_MAXREGS) {
- try_nregs = RUN_MAXREGS
- + 1; /* Counter-act try_nregs-- in loop. */
+ if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy,
+ false)) {
+ goto label_error;
+ }
+ /*
+ * Coalesce retained extents immediately, in part because they will
+ * never be evicted (and therefore there's no opportunity for delayed
+ * coalescing), but also because operations on retained extents are not
+ * in the critical path.
+ */
+ if (extents_init(tsdn, &arena->extents_retained, extent_state_retained,
+ false)) {
+ goto label_error;
+ }
+
+ if (arena_decay_init(&arena->decay_dirty, &arena->extents_dirty,
+ arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) {
+ goto label_error;
+ }
+ if (arena_decay_init(&arena->decay_muzzy, &arena->extents_muzzy,
+ arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) {
+ goto label_error;
+ }
+
+ arena->extent_grow_next = sz_psz2ind(HUGEPAGE);
+ if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
+ WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
+ goto label_error;
+ }
+
+ extent_avail_new(&arena->extent_avail);
+ if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail",
+ WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) {
+ goto label_error;
+ }
+
+ /* Initialize bins. */
+ for (i = 0; i < NBINS; i++) {
+ arena_bin_t *bin = &arena->bins[i];
+ if (malloc_mutex_init(&bin->lock, "arena_bin",
+ WITNESS_RANK_ARENA_BIN, malloc_mutex_rank_exclusive)) {
+ goto label_error;
}
- do {
- try_nregs--;
- try_hdr_size = sizeof(arena_run_t);
- /* Pad to a long boundary. */
- try_hdr_size = LONG_CEILING(try_hdr_size);
- try_bitmap_offset = try_hdr_size;
- /* Add space for bitmap. */
- try_hdr_size += bitmap_size(try_nregs);
- if (config_prof && opt_prof && prof_promote == false) {
- /* Pad to a quantum boundary. */
- try_hdr_size = QUANTUM_CEILING(try_hdr_size);
- try_ctx0_offset = try_hdr_size;
- /*
- * Add space for one (prof_ctx_t *) per region.
- */
- try_hdr_size += try_nregs *
- sizeof(prof_ctx_t *);
- }
- try_redzone0_offset = try_run_size - (try_nregs *
- bin_info->reg_interval) - pad_size;
- } while (try_hdr_size > try_redzone0_offset);
- } while (try_run_size <= arena_maxclass
- && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
- RUN_MAX_OVRHD_RELAX
- && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
- && try_nregs < RUN_MAXREGS);
+ bin->slabcur = NULL;
+ extent_heap_new(&bin->slabs_nonfull);
+ extent_list_init(&bin->slabs_full);
+ if (config_stats) {
+ memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
+ }
+ }
- assert(good_hdr_size <= good_redzone0_offset);
+ arena->base = base;
+ /* Set arena before creating background threads. */
+ arena_set(ind, arena);
- /* Copy final settings. */
- bin_info->run_size = good_run_size;
- bin_info->nregs = good_nregs;
- bin_info->bitmap_offset = good_bitmap_offset;
- bin_info->ctx0_offset = good_ctx0_offset;
- bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
+ nstime_init(&arena->create_time, 0);
+ nstime_update(&arena->create_time);
- assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
- * bin_info->reg_interval) + pad_size == bin_info->run_size);
+ /* We don't support reentrancy for arena 0 bootstrapping. */
+ if (ind != 0) {
+ /*
+ * If we're here, then arena 0 already exists, so bootstrapping
+ * is done enough that we should have tsd.
+ */
+ assert(!tsdn_null(tsdn));
+ pre_reentrancy(tsdn_tsd(tsdn), arena);
+ if (hooks_arena_new_hook) {
+ hooks_arena_new_hook();
+ }
+ post_reentrancy(tsdn_tsd(tsdn));
+ }
- return (good_run_size);
+ return arena;
+label_error:
+ if (ind != 0) {
+ base_delete(tsdn, base);
+ }
+ return NULL;
}
-static void
-bin_info_init(void)
-{
- arena_bin_info_t *bin_info;
- size_t prev_run_size = PAGE;
-
-#define SIZE_CLASS(bin, delta, size) \
- bin_info = &arena_bin_info[bin]; \
- bin_info->reg_size = size; \
- prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
- bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
- SIZE_CLASSES
-#undef SIZE_CLASS
+void
+arena_boot(void) {
+ arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
+ arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms);
}
void
-arena_boot(void)
-{
- size_t header_size;
- unsigned i;
+arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
+ malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx);
+ malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx);
+}
- /*
- * Compute the header size such that it is large enough to contain the
- * page map. The page map is biased to omit entries for the header
- * itself, so some iteration is necessary to compute the map bias.
- *
- * 1) Compute safe header_size and map_bias values that include enough
- * space for an unbiased page map.
- * 2) Refine map_bias based on (1) to omit the header pages in the page
- * map. The resulting map_bias may be one too small.
- * 3) Refine map_bias based on (2). The result will be >= the result
- * from (2), and will always be correct.
- */
- map_bias = 0;
- for (i = 0; i < 3; i++) {
- header_size = offsetof(arena_chunk_t, map) +
- (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
- map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
- != 0);
+void
+arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
+ if (config_stats) {
+ malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx);
}
- assert(map_bias > 0);
+}
- arena_maxclass = chunksize - (map_bias << LG_PAGE);
+void
+arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
+ malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx);
+}
- bin_info_init();
+void
+arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
+ extents_prefork(tsdn, &arena->extents_dirty);
+ extents_prefork(tsdn, &arena->extents_muzzy);
+ extents_prefork(tsdn, &arena->extents_retained);
}
void
-arena_prefork(arena_t *arena)
-{
- unsigned i;
+arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
+ malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx);
+}
- malloc_mutex_prefork(&arena->lock);
- for (i = 0; i < NBINS; i++)
- malloc_mutex_prefork(&arena->bins[i].lock);
+void
+arena_prefork5(tsdn_t *tsdn, arena_t *arena) {
+ base_prefork(tsdn, arena->base);
+}
+
+void
+arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
+ malloc_mutex_prefork(tsdn, &arena->large_mtx);
+}
+
+void
+arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
+ for (unsigned i = 0; i < NBINS; i++) {
+ malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
+ }
}
void
-arena_postfork_parent(arena_t *arena)
-{
+arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
unsigned i;
- for (i = 0; i < NBINS; i++)
- malloc_mutex_postfork_parent(&arena->bins[i].lock);
- malloc_mutex_postfork_parent(&arena->lock);
+ for (i = 0; i < NBINS; i++) {
+ malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
+ }
+ malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
+ base_postfork_parent(tsdn, arena->base);
+ malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx);
+ extents_postfork_parent(tsdn, &arena->extents_dirty);
+ extents_postfork_parent(tsdn, &arena->extents_muzzy);
+ extents_postfork_parent(tsdn, &arena->extents_retained);
+ malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx);
+ malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
+ malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
+ if (config_stats) {
+ malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
+ }
}
void
-arena_postfork_child(arena_t *arena)
-{
+arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
unsigned i;
- for (i = 0; i < NBINS; i++)
- malloc_mutex_postfork_child(&arena->bins[i].lock);
- malloc_mutex_postfork_child(&arena->lock);
+ atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
+ atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
+ if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) {
+ arena_nthreads_inc(arena, false);
+ }
+ if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) {
+ arena_nthreads_inc(arena, true);
+ }
+ if (config_stats) {
+ ql_new(&arena->tcache_ql);
+ tcache_t *tcache = tcache_get(tsdn_tsd(tsdn));
+ if (tcache != NULL && tcache->arena == arena) {
+ ql_elm_new(tcache, link);
+ ql_tail_insert(&arena->tcache_ql, tcache, link);
+ }
+ }
+
+ for (i = 0; i < NBINS; i++) {
+ malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
+ }
+ malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
+ base_postfork_child(tsdn, arena->base);
+ malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx);
+ extents_postfork_child(tsdn, &arena->extents_dirty);
+ extents_postfork_child(tsdn, &arena->extents_muzzy);
+ extents_postfork_child(tsdn, &arena->extents_retained);
+ malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx);
+ malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
+ malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
+ if (config_stats) {
+ malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);
+ }
}
diff --git a/deps/jemalloc/src/base.c b/deps/jemalloc/src/base.c
index 4e62e8fa91..97078b134d 100644
--- a/deps/jemalloc/src/base.c
+++ b/deps/jemalloc/src/base.c
@@ -1,142 +1,402 @@
-#define JEMALLOC_BASE_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_BASE_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/sz.h"
/******************************************************************************/
/* Data. */
-static malloc_mutex_t base_mtx;
-
-/*
- * Current pages that are being used for internal memory allocations. These
- * pages are carved up in cacheline-size quanta, so that there is no chance of
- * false cache line sharing.
- */
-static void *base_pages;
-static void *base_next_addr;
-static void *base_past_addr; /* Addr immediately past base_pages. */
-static extent_node_t *base_nodes;
+static base_t *b0;
/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-static bool base_pages_alloc(size_t minsize);
+static void *
+base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
+ void *addr;
+ bool zero = true;
+ bool commit = true;
-/******************************************************************************/
+ assert(size == HUGEPAGE_CEILING(size));
+
+ if (extent_hooks == &extent_hooks_default) {
+ addr = extent_alloc_mmap(NULL, size, PAGE, &zero, &commit);
+ } else {
+ /* No arena context as we are creating new arenas. */
+ tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
+ pre_reentrancy(tsd, NULL);
+ addr = extent_hooks->alloc(extent_hooks, NULL, size, PAGE,
+ &zero, &commit, ind);
+ post_reentrancy(tsd);
+ }
+
+ return addr;
+}
+
+static void
+base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
+ size_t size) {
+ /*
+ * Cascade through dalloc, decommit, purge_forced, and purge_lazy,
+ * stopping at first success. This cascade is performed for consistency
+ * with the cascade in extent_dalloc_wrapper() because an application's
+ * custom hooks may not support e.g. dalloc. This function is only ever
+ * called as a side effect of arena destruction, so although it might
+ * seem pointless to do anything besides dalloc here, the application
+ * may in fact want the end state of all associated virtual memory to be
+ * in some consistent-but-allocated state.
+ */
+ if (extent_hooks == &extent_hooks_default) {
+ if (!extent_dalloc_mmap(addr, size)) {
+ return;
+ }
+ if (!pages_decommit(addr, size)) {
+ return;
+ }
+ if (!pages_purge_forced(addr, size)) {
+ return;
+ }
+ if (!pages_purge_lazy(addr, size)) {
+ return;
+ }
+ /* Nothing worked. This should never happen. */
+ not_reached();
+ } else {
+ tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
+ pre_reentrancy(tsd, NULL);
+ if (extent_hooks->dalloc != NULL &&
+ !extent_hooks->dalloc(extent_hooks, addr, size, true,
+ ind)) {
+ goto label_done;
+ }
+ if (extent_hooks->decommit != NULL &&
+ !extent_hooks->decommit(extent_hooks, addr, size, 0, size,
+ ind)) {
+ goto label_done;
+ }
+ if (extent_hooks->purge_forced != NULL &&
+ !extent_hooks->purge_forced(extent_hooks, addr, size, 0,
+ size, ind)) {
+ goto label_done;
+ }
+ if (extent_hooks->purge_lazy != NULL &&
+ !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
+ ind)) {
+ goto label_done;
+ }
+ /* Nothing worked. That's the application's problem. */
+ label_done:
+ post_reentrancy(tsd);
+ return;
+ }
+}
-static bool
-base_pages_alloc(size_t minsize)
-{
- size_t csize;
- bool zero;
+static void
+base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
+ size_t size) {
+ size_t sn;
- assert(minsize != 0);
- csize = CHUNK_CEILING(minsize);
- zero = false;
- base_pages = chunk_alloc(csize, chunksize, true, &zero,
- chunk_dss_prec_get());
- if (base_pages == NULL)
- return (true);
- base_next_addr = base_pages;
- base_past_addr = (void *)((uintptr_t)base_pages + csize);
+ sn = *extent_sn_next;
+ (*extent_sn_next)++;
- return (false);
+ extent_binit(extent, addr, size, sn);
}
-void *
-base_alloc(size_t size)
-{
+static void *
+base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
+ size_t alignment) {
void *ret;
- size_t csize;
- /* Round size up to nearest multiple of the cacheline size. */
- csize = CACHELINE_CEILING(size);
+ assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
+ assert(size == ALIGNMENT_CEILING(size, alignment));
- malloc_mutex_lock(&base_mtx);
- /* Make sure there's enough space for the allocation. */
- if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
- if (base_pages_alloc(csize)) {
- malloc_mutex_unlock(&base_mtx);
- return (NULL);
- }
+ *gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
+ alignment) - (uintptr_t)extent_addr_get(extent);
+ ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
+ assert(extent_bsize_get(extent) >= *gap_size + size);
+ extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
+ *gap_size + size), extent_bsize_get(extent) - *gap_size - size,
+ extent_sn_get(extent));
+ return ret;
+}
+
+static void
+base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent,
+ size_t gap_size, void *addr, size_t size) {
+ if (extent_bsize_get(extent) > 0) {
+ /*
+ * Compute the index for the largest size class that does not
+ * exceed extent's size.
+ */
+ szind_t index_floor =
+ sz_size2index(extent_bsize_get(extent) + 1) - 1;
+ extent_heap_insert(&base->avail[index_floor], extent);
}
- /* Allocate. */
- ret = base_next_addr;
- base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
- malloc_mutex_unlock(&base_mtx);
- VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
- return (ret);
+ if (config_stats) {
+ base->allocated += size;
+ /*
+ * Add one PAGE to base_resident for every page boundary that is
+ * crossed by the new allocation.
+ */
+ base->resident += PAGE_CEILING((uintptr_t)addr + size) -
+ PAGE_CEILING((uintptr_t)addr - gap_size);
+ assert(base->allocated <= base->resident);
+ assert(base->resident <= base->mapped);
+ }
}
-void *
-base_calloc(size_t number, size_t size)
-{
- void *ret = base_alloc(number * size);
+static void *
+base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, extent_t *extent,
+ size_t size, size_t alignment) {
+ void *ret;
+ size_t gap_size;
+
+ ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
+ base_extent_bump_alloc_post(tsdn, base, extent, gap_size, ret, size);
+ return ret;
+}
+
+/*
+ * Allocate a block of virtual memory that is large enough to start with a
+ * base_block_t header, followed by an object of specified size and alignment.
+ * On success a pointer to the initialized base_block_t header is returned.
+ */
+static base_block_t *
+base_block_alloc(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind,
+ pszind_t *pind_last, size_t *extent_sn_next, size_t size,
+ size_t alignment) {
+ alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
+ size_t usize = ALIGNMENT_CEILING(size, alignment);
+ size_t header_size = sizeof(base_block_t);
+ size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) -
+ header_size;
+ /*
+ * Create increasingly larger blocks in order to limit the total number
+ * of disjoint virtual memory ranges. Choose the next size in the page
+ * size class series (skipping size classes that are not a multiple of
+ * HUGEPAGE), or a size large enough to satisfy the requested size and
+ * alignment, whichever is larger.
+ */
+ size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size
+ + usize));
+ pszind_t pind_next = (*pind_last + 1 < NPSIZES) ? *pind_last + 1 :
+ *pind_last;
+ size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
+ size_t block_size = (min_block_size > next_block_size) ? min_block_size
+ : next_block_size;
+ base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind,
+ block_size);
+ if (block == NULL) {
+ return NULL;
+ }
+ *pind_last = sz_psz2ind(block_size);
+ block->size = block_size;
+ block->next = NULL;
+ assert(block_size >= header_size);
+ base_extent_init(extent_sn_next, &block->extent,
+ (void *)((uintptr_t)block + header_size), block_size - header_size);
+ return block;
+}
- if (ret != NULL)
- memset(ret, 0, number * size);
+/*
+ * Allocate an extent that is at least as large as specified size, with
+ * specified alignment.
+ */
+static extent_t *
+base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
+ malloc_mutex_assert_owner(tsdn, &base->mtx);
- return (ret);
+ extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
+ /*
+ * Drop mutex during base_block_alloc(), because an extent hook will be
+ * called.
+ */
+ malloc_mutex_unlock(tsdn, &base->mtx);
+ base_block_t *block = base_block_alloc(tsdn, extent_hooks,
+ base_ind_get(base), &base->pind_last, &base->extent_sn_next, size,
+ alignment);
+ malloc_mutex_lock(tsdn, &base->mtx);
+ if (block == NULL) {
+ return NULL;
+ }
+ block->next = base->blocks;
+ base->blocks = block;
+ if (config_stats) {
+ base->allocated += sizeof(base_block_t);
+ base->resident += PAGE_CEILING(sizeof(base_block_t));
+ base->mapped += block->size;
+ assert(base->allocated <= base->resident);
+ assert(base->resident <= base->mapped);
+ }
+ return &block->extent;
}
-extent_node_t *
-base_node_alloc(void)
-{
- extent_node_t *ret;
+base_t *
+b0get(void) {
+ return b0;
+}
- malloc_mutex_lock(&base_mtx);
- if (base_nodes != NULL) {
- ret = base_nodes;
- base_nodes = *(extent_node_t **)ret;
- malloc_mutex_unlock(&base_mtx);
- VALGRIND_MAKE_MEM_UNDEFINED(ret, sizeof(extent_node_t));
- } else {
- malloc_mutex_unlock(&base_mtx);
- ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
+base_t *
+base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
+ pszind_t pind_last = 0;
+ size_t extent_sn_next = 0;
+ base_block_t *block = base_block_alloc(tsdn, extent_hooks, ind,
+ &pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
+ if (block == NULL) {
+ return NULL;
}
- return (ret);
+ size_t gap_size;
+ size_t base_alignment = CACHELINE;
+ size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
+ base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
+ &gap_size, base_size, base_alignment);
+ base->ind = ind;
+ atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED);
+ if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
+ malloc_mutex_rank_exclusive)) {
+ base_unmap(tsdn, extent_hooks, ind, block, block->size);
+ return NULL;
+ }
+ base->pind_last = pind_last;
+ base->extent_sn_next = extent_sn_next;
+ base->blocks = block;
+ for (szind_t i = 0; i < NSIZES; i++) {
+ extent_heap_new(&base->avail[i]);
+ }
+ if (config_stats) {
+ base->allocated = sizeof(base_block_t);
+ base->resident = PAGE_CEILING(sizeof(base_block_t));
+ base->mapped = block->size;
+ assert(base->allocated <= base->resident);
+ assert(base->resident <= base->mapped);
+ }
+ base_extent_bump_alloc_post(tsdn, base, &block->extent, gap_size, base,
+ base_size);
+
+ return base;
}
void
-base_node_dealloc(extent_node_t *node)
-{
+base_delete(tsdn_t *tsdn, base_t *base) {
+ extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
+ base_block_t *next = base->blocks;
+ do {
+ base_block_t *block = next;
+ next = block->next;
+ base_unmap(tsdn, extent_hooks, base_ind_get(base), block,
+ block->size);
+ } while (next != NULL);
+}
- VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
- malloc_mutex_lock(&base_mtx);
- *(extent_node_t **)node = base_nodes;
- base_nodes = node;
- malloc_mutex_unlock(&base_mtx);
+extent_hooks_t *
+base_extent_hooks_get(base_t *base) {
+ return (extent_hooks_t *)atomic_load_p(&base->extent_hooks,
+ ATOMIC_ACQUIRE);
}
-bool
-base_boot(void)
-{
+extent_hooks_t *
+base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
+ extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base);
+ atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE);
+ return old_extent_hooks;
+}
+
+static void *
+base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
+ size_t *esn) {
+ alignment = QUANTUM_CEILING(alignment);
+ size_t usize = ALIGNMENT_CEILING(size, alignment);
+ size_t asize = usize + alignment - QUANTUM;
+
+ extent_t *extent = NULL;
+ malloc_mutex_lock(tsdn, &base->mtx);
+ for (szind_t i = sz_size2index(asize); i < NSIZES; i++) {
+ extent = extent_heap_remove_first(&base->avail[i]);
+ if (extent != NULL) {
+ /* Use existing space. */
+ break;
+ }
+ }
+ if (extent == NULL) {
+ /* Try to allocate more space. */
+ extent = base_extent_alloc(tsdn, base, usize, alignment);
+ }
+ void *ret;
+ if (extent == NULL) {
+ ret = NULL;
+ goto label_return;
+ }
- base_nodes = NULL;
- if (malloc_mutex_init(&base_mtx))
- return (true);
+ ret = base_extent_bump_alloc(tsdn, base, extent, usize, alignment);
+ if (esn != NULL) {
+ *esn = extent_sn_get(extent);
+ }
+label_return:
+ malloc_mutex_unlock(tsdn, &base->mtx);
+ return ret;
+}
- return (false);
+/*
+ * base_alloc() returns zeroed memory, which is always demand-zeroed for the
+ * auto arenas, in order to make multi-page sparse data structures such as radix
+ * tree nodes efficient with respect to physical memory usage. Upon success a
+ * pointer to at least size bytes with specified alignment is returned. Note
+ * that size is rounded up to the nearest multiple of alignment to avoid false
+ * sharing.
+ */
+void *
+base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
+ return base_alloc_impl(tsdn, base, size, alignment, NULL);
+}
+
+extent_t *
+base_alloc_extent(tsdn_t *tsdn, base_t *base) {
+ size_t esn;
+ extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
+ CACHELINE, &esn);
+ if (extent == NULL) {
+ return NULL;
+ }
+ extent_esn_set(extent, esn);
+ return extent;
}
void
-base_prefork(void)
-{
+base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident,
+ size_t *mapped) {
+ cassert(config_stats);
- malloc_mutex_prefork(&base_mtx);
+ malloc_mutex_lock(tsdn, &base->mtx);
+ assert(base->allocated <= base->resident);
+ assert(base->resident <= base->mapped);
+ *allocated = base->allocated;
+ *resident = base->resident;
+ *mapped = base->mapped;
+ malloc_mutex_unlock(tsdn, &base->mtx);
}
void
-base_postfork_parent(void)
-{
+base_prefork(tsdn_t *tsdn, base_t *base) {
+ malloc_mutex_prefork(tsdn, &base->mtx);
+}
- malloc_mutex_postfork_parent(&base_mtx);
+void
+base_postfork_parent(tsdn_t *tsdn, base_t *base) {
+ malloc_mutex_postfork_parent(tsdn, &base->mtx);
}
void
-base_postfork_child(void)
-{
+base_postfork_child(tsdn_t *tsdn, base_t *base) {
+ malloc_mutex_postfork_child(tsdn, &base->mtx);
+}
- malloc_mutex_postfork_child(&base_mtx);
+bool
+base_boot(tsdn_t *tsdn) {
+ b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);
+ return (b0 == NULL);
}
diff --git a/deps/jemalloc/src/bitmap.c b/deps/jemalloc/src/bitmap.c
index e2bd907d55..468b3178eb 100644
--- a/deps/jemalloc/src/bitmap.c
+++ b/deps/jemalloc/src/bitmap.c
@@ -1,24 +1,15 @@
-#define JEMALLOC_BITMAP_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_BITMAP_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static size_t bits2groups(size_t nbits);
+#include "jemalloc/internal/assert.h"
/******************************************************************************/
-static size_t
-bits2groups(size_t nbits)
-{
-
- return ((nbits >> LG_BITMAP_GROUP_NBITS) +
- !!(nbits & BITMAP_GROUP_NBITS_MASK));
-}
+#ifdef BITMAP_USE_TREE
void
-bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
-{
+bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
unsigned i;
size_t group_count;
@@ -31,60 +22,100 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
* that requires only one group.
*/
binfo->levels[0].group_offset = 0;
- group_count = bits2groups(nbits);
+ group_count = BITMAP_BITS2GROUPS(nbits);
for (i = 1; group_count > 1; i++) {
assert(i < BITMAP_MAX_LEVELS);
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ group_count;
- group_count = bits2groups(group_count);
+ group_count = BITMAP_BITS2GROUPS(group_count);
}
binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ group_count;
+ assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX);
binfo->nlevels = i;
binfo->nbits = nbits;
}
-size_t
-bitmap_info_ngroups(const bitmap_info_t *binfo)
-{
-
- return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP);
-}
-
-size_t
-bitmap_size(size_t nbits)
-{
- bitmap_info_t binfo;
-
- bitmap_info_init(&binfo, nbits);
- return (bitmap_info_ngroups(&binfo));
+static size_t
+bitmap_info_ngroups(const bitmap_info_t *binfo) {
+ return binfo->levels[binfo->nlevels].group_offset;
}
void
-bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
-{
+bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) {
size_t extra;
unsigned i;
/*
* Bits are actually inverted with regard to the external bitmap
- * interface, so the bitmap starts out with all 1 bits, except for
- * trailing unused bits (if any). Note that each group uses bit 0 to
- * correspond to the first logical bit in the group, so extra bits
- * are the most significant bits of the last group.
+ * interface.
+ */
+
+ if (fill) {
+ /* The "filled" bitmap starts out with all 0 bits. */
+ memset(bitmap, 0, bitmap_size(binfo));
+ return;
+ }
+
+ /*
+ * The "empty" bitmap starts out with all 1 bits, except for trailing
+ * unused bits (if any). Note that each group uses bit 0 to correspond
+ * to the first logical bit in the group, so extra bits are the most
+ * significant bits of the last group.
*/
- memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset <<
- LG_SIZEOF_BITMAP);
+ memset(bitmap, 0xffU, bitmap_size(binfo));
extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
& BITMAP_GROUP_NBITS_MASK;
- if (extra != 0)
+ if (extra != 0) {
bitmap[binfo->levels[1].group_offset - 1] >>= extra;
+ }
for (i = 1; i < binfo->nlevels; i++) {
size_t group_count = binfo->levels[i].group_offset -
binfo->levels[i-1].group_offset;
extra = (BITMAP_GROUP_NBITS - (group_count &
BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK;
- if (extra != 0)
+ if (extra != 0) {
bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
+ }
}
}
+
+#else /* BITMAP_USE_TREE */
+
+void
+bitmap_info_init(bitmap_info_t *binfo, size_t nbits) {
+ assert(nbits > 0);
+ assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
+
+ binfo->ngroups = BITMAP_BITS2GROUPS(nbits);
+ binfo->nbits = nbits;
+}
+
+static size_t
+bitmap_info_ngroups(const bitmap_info_t *binfo) {
+ return binfo->ngroups;
+}
+
+void
+bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) {
+ size_t extra;
+
+ if (fill) {
+ memset(bitmap, 0, bitmap_size(binfo));
+ return;
+ }
+
+ memset(bitmap, 0xffU, bitmap_size(binfo));
+ extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
+ & BITMAP_GROUP_NBITS_MASK;
+ if (extra != 0) {
+ bitmap[binfo->ngroups - 1] >>= extra;
+ }
+}
+
+#endif /* BITMAP_USE_TREE */
+
+size_t
+bitmap_size(const bitmap_info_t *binfo) {
+ return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP);
+}
diff --git a/deps/jemalloc/src/ckh.c b/deps/jemalloc/src/ckh.c
index 04c5296619..e95e0a3ed5 100644
--- a/deps/jemalloc/src/ckh.c
+++ b/deps/jemalloc/src/ckh.c
@@ -34,14 +34,24 @@
* respectively.
*
******************************************************************************/
-#define JEMALLOC_CKH_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_CKH_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+
+#include "jemalloc/internal/ckh.h"
+
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/hash.h"
+#include "jemalloc/internal/malloc_io.h"
+#include "jemalloc/internal/prng.h"
+#include "jemalloc/internal/util.h"
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-static bool ckh_grow(ckh_t *ckh);
-static void ckh_shrink(ckh_t *ckh);
+static bool ckh_grow(tsd_t *tsd, ckh_t *ckh);
+static void ckh_shrink(tsd_t *tsd, ckh_t *ckh);
/******************************************************************************/
@@ -49,27 +59,26 @@ static void ckh_shrink(ckh_t *ckh);
* Search bucket for key and return the cell number if found; SIZE_T_MAX
* otherwise.
*/
-JEMALLOC_INLINE_C size_t
-ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
-{
+static size_t
+ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) {
ckhc_t *cell;
unsigned i;
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
- if (cell->key != NULL && ckh->keycomp(key, cell->key))
- return ((bucket << LG_CKH_BUCKET_CELLS) + i);
+ if (cell->key != NULL && ckh->keycomp(key, cell->key)) {
+ return (bucket << LG_CKH_BUCKET_CELLS) + i;
+ }
}
- return (SIZE_T_MAX);
+ return SIZE_T_MAX;
}
/*
* Search table for key and return cell number if found; SIZE_T_MAX otherwise.
*/
-JEMALLOC_INLINE_C size_t
-ckh_isearch(ckh_t *ckh, const void *key)
-{
+static size_t
+ckh_isearch(ckh_t *ckh, const void *key) {
size_t hashes[2], bucket, cell;
assert(ckh != NULL);
@@ -79,19 +88,19 @@ ckh_isearch(ckh_t *ckh, const void *key)
/* Search primary bucket. */
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
- if (cell != SIZE_T_MAX)
- return (cell);
+ if (cell != SIZE_T_MAX) {
+ return cell;
+ }
/* Search secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
- return (cell);
+ return cell;
}
-JEMALLOC_INLINE_C bool
+static bool
ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
- const void *data)
-{
+ const void *data) {
ckhc_t *cell;
unsigned offset, i;
@@ -99,7 +108,8 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* Cycle through the cells in the bucket, starting at a random position.
* The randomness avoids worst-case search overhead as buckets fill up.
*/
- prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
+ offset = (unsigned)prng_lg_range_u64(&ckh->prng_state,
+ LG_CKH_BUCKET_CELLS);
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
@@ -107,11 +117,11 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
cell->key = key;
cell->data = data;
ckh->count++;
- return (false);
+ return false;
}
}
- return (true);
+ return true;
}
/*
@@ -120,10 +130,9 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* eviction/relocation procedure until either success or detection of an
* eviction/relocation bucket cycle.
*/
-JEMALLOC_INLINE_C bool
+static bool
ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
- void const **argdata)
-{
+ void const **argdata) {
const void *key, *data, *tkey, *tdata;
ckhc_t *cell;
size_t hashes[2], bucket, tbucket;
@@ -141,7 +150,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
* were an item for which both hashes indicated the same
* bucket.
*/
- prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
+ i = (unsigned)prng_lg_range_u64(&ckh->prng_state,
+ LG_CKH_BUCKET_CELLS);
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
assert(cell->key != NULL);
@@ -181,18 +191,18 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
if (tbucket == argbucket) {
*argkey = key;
*argdata = data;
- return (true);
+ return true;
}
bucket = tbucket;
- if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
- return (false);
+ if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
+ return false;
+ }
}
}
-JEMALLOC_INLINE_C bool
-ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
-{
+static bool
+ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) {
size_t hashes[2], bucket;
const void *key = *argkey;
const void *data = *argdata;
@@ -201,27 +211,28 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
/* Try to insert in primary bucket. */
bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
- if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
- return (false);
+ if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
+ return false;
+ }
/* Try to insert in secondary bucket. */
bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
- if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
- return (false);
+ if (!ckh_try_bucket_insert(ckh, bucket, key, data)) {
+ return false;
+ }
/*
* Try to find a place for this item via iterative eviction/relocation.
*/
- return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata));
+ return ckh_evict_reloc_insert(ckh, bucket, argkey, argdata);
}
/*
* Try to rebuild the hash table from scratch by inserting all items from the
* old table into the new.
*/
-JEMALLOC_INLINE_C bool
-ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
-{
+static bool
+ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) {
size_t count, i, nins;
const void *key, *data;
@@ -233,22 +244,20 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab)
data = aTab[i].data;
if (ckh_try_insert(ckh, &key, &data)) {
ckh->count = count;
- return (true);
+ return true;
}
nins++;
}
}
- return (false);
+ return false;
}
static bool
-ckh_grow(ckh_t *ckh)
-{
+ckh_grow(tsd_t *tsd, ckh_t *ckh) {
bool ret;
ckhc_t *tab, *ttab;
- size_t lg_curcells;
- unsigned lg_prevbuckets;
+ unsigned lg_prevbuckets, lg_curcells;
#ifdef CKH_COUNT
ckh->ngrows++;
@@ -265,12 +274,13 @@ ckh_grow(ckh_t *ckh)
size_t usize;
lg_curcells++;
- usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
- if (usize == 0) {
+ usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
+ if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
ret = true;
goto label_return;
}
- tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
+ tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE,
+ true, NULL, true, arena_ichoose(tsd, NULL));
if (tab == NULL) {
ret = true;
goto label_return;
@@ -281,28 +291,27 @@ ckh_grow(ckh_t *ckh)
tab = ttab;
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
- if (ckh_rebuild(ckh, tab) == false) {
- idalloc(tab);
+ if (!ckh_rebuild(ckh, tab)) {
+ idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true);
break;
}
/* Rebuilding failed, so back out partially rebuilt table. */
- idalloc(ckh->tab);
+ idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
}
ret = false;
label_return:
- return (ret);
+ return ret;
}
static void
-ckh_shrink(ckh_t *ckh)
-{
+ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
ckhc_t *tab, *ttab;
- size_t lg_curcells, usize;
- unsigned lg_prevbuckets;
+ size_t usize;
+ unsigned lg_prevbuckets, lg_curcells;
/*
* It is possible (though unlikely, given well behaved hashes) that the
@@ -310,10 +319,12 @@ ckh_shrink(ckh_t *ckh)
*/
lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
- usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
- if (usize == 0)
+ usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
+ if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
return;
- tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
+ }
+ tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL,
+ true, arena_ichoose(tsd, NULL));
if (tab == NULL) {
/*
* An OOM error isn't worth propagating, since it doesn't
@@ -327,8 +338,8 @@ ckh_shrink(ckh_t *ckh)
tab = ttab;
ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS;
- if (ckh_rebuild(ckh, tab) == false) {
- idalloc(tab);
+ if (!ckh_rebuild(ckh, tab)) {
+ idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true);
#ifdef CKH_COUNT
ckh->nshrinks++;
#endif
@@ -336,7 +347,7 @@ ckh_shrink(ckh_t *ckh)
}
/* Rebuilding failed, so back out partially rebuilt table. */
- idalloc(ckh->tab);
+ idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
ckh->tab = tab;
ckh->lg_curbuckets = lg_prevbuckets;
#ifdef CKH_COUNT
@@ -345,8 +356,8 @@ ckh_shrink(ckh_t *ckh)
}
bool
-ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
-{
+ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
+ ckh_keycomp_t *keycomp) {
bool ret;
size_t mincells, usize;
unsigned lg_mincells;
@@ -366,29 +377,31 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
ckh->count = 0;
/*
- * Find the minimum power of 2 that is large enough to fit aBaseCount
+ * Find the minimum power of 2 that is large enough to fit minitems
* entries. We are using (2+,2) cuckoo hashing, which has an expected
* maximum load factor of at least ~0.86, so 0.75 is a conservative load
- * factor that will typically allow 2^aLgMinItems to fit without ever
+ * factor that will typically allow mincells items to fit without ever
* growing the table.
*/
assert(LG_CKH_BUCKET_CELLS > 0);
mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2;
for (lg_mincells = LG_CKH_BUCKET_CELLS;
(ZU(1) << lg_mincells) < mincells;
- lg_mincells++)
- ; /* Do nothing. */
+ lg_mincells++) {
+ /* Do nothing. */
+ }
ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->hash = hash;
ckh->keycomp = keycomp;
- usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
- if (usize == 0) {
+ usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
+ if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
ret = true;
goto label_return;
}
- ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
+ ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true,
+ NULL, true, arena_ichoose(tsd, NULL));
if (ckh->tab == NULL) {
ret = true;
goto label_return;
@@ -396,20 +409,18 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
ret = false;
label_return:
- return (ret);
+ return ret;
}
void
-ckh_delete(ckh_t *ckh)
-{
-
+ckh_delete(tsd_t *tsd, ckh_t *ckh) {
assert(ckh != NULL);
#ifdef CKH_VERBOSE
malloc_printf(
- "%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64","
- " nshrinkfails: %"PRIu64", ninserts: %"PRIu64","
- " nrelocs: %"PRIu64"\n", __func__, ckh,
+ "%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64","
+ " nshrinkfails: %"FMTu64", ninserts: %"FMTu64","
+ " nrelocs: %"FMTu64"\n", __func__, ckh,
(unsigned long long)ckh->ngrows,
(unsigned long long)ckh->nshrinks,
(unsigned long long)ckh->nshrinkfails,
@@ -417,43 +428,42 @@ ckh_delete(ckh_t *ckh)
(unsigned long long)ckh->nrelocs);
#endif
- idalloc(ckh->tab);
- if (config_debug)
- memset(ckh, 0x5a, sizeof(ckh_t));
+ idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true);
+ if (config_debug) {
+ memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t));
+ }
}
size_t
-ckh_count(ckh_t *ckh)
-{
-
+ckh_count(ckh_t *ckh) {
assert(ckh != NULL);
- return (ckh->count);
+ return ckh->count;
}
bool
-ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data)
-{
+ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) {
size_t i, ncells;
for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets +
LG_CKH_BUCKET_CELLS)); i < ncells; i++) {
if (ckh->tab[i].key != NULL) {
- if (key != NULL)
+ if (key != NULL) {
*key = (void *)ckh->tab[i].key;
- if (data != NULL)
+ }
+ if (data != NULL) {
*data = (void *)ckh->tab[i].data;
+ }
*tabind = i + 1;
- return (false);
+ return false;
}
}
- return (true);
+ return true;
}
bool
-ckh_insert(ckh_t *ckh, const void *key, const void *data)
-{
+ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) {
bool ret;
assert(ckh != NULL);
@@ -464,7 +474,7 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
#endif
while (ckh_try_insert(ckh, &key, &data)) {
- if (ckh_grow(ckh)) {
+ if (ckh_grow(tsd, ckh)) {
ret = true;
goto label_return;
}
@@ -472,22 +482,24 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
ret = false;
label_return:
- return (ret);
+ return ret;
}
bool
-ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
-{
+ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
+ void **data) {
size_t cell;
assert(ckh != NULL);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
- if (key != NULL)
+ if (key != NULL) {
*key = (void *)ckh->tab[cell].key;
- if (data != NULL)
+ }
+ if (data != NULL) {
*data = (void *)ckh->tab[cell].data;
+ }
ckh->tab[cell].key = NULL;
ckh->tab[cell].data = NULL; /* Not necessary. */
@@ -497,54 +509,50 @@ ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
+ LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets
> ckh->lg_minbuckets) {
/* Ignore error due to OOM. */
- ckh_shrink(ckh);
+ ckh_shrink(tsd, ckh);
}
- return (false);
+ return false;
}
- return (true);
+ return true;
}
bool
-ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
-{
+ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) {
size_t cell;
assert(ckh != NULL);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
- if (key != NULL)
+ if (key != NULL) {
*key = (void *)ckh->tab[cell].key;
- if (data != NULL)
+ }
+ if (data != NULL) {
*data = (void *)ckh->tab[cell].data;
- return (false);
+ }
+ return false;
}
- return (true);
+ return true;
}
void
-ckh_string_hash(const void *key, size_t r_hash[2])
-{
-
+ckh_string_hash(const void *key, size_t r_hash[2]) {
hash(key, strlen((const char *)key), 0x94122f33U, r_hash);
}
bool
-ckh_string_keycomp(const void *k1, const void *k2)
-{
-
- assert(k1 != NULL);
- assert(k2 != NULL);
+ckh_string_keycomp(const void *k1, const void *k2) {
+ assert(k1 != NULL);
+ assert(k2 != NULL);
- return (strcmp((char *)k1, (char *)k2) ? false : true);
+ return !strcmp((char *)k1, (char *)k2);
}
void
-ckh_pointer_hash(const void *key, size_t r_hash[2])
-{
+ckh_pointer_hash(const void *key, size_t r_hash[2]) {
union {
const void *v;
size_t i;
@@ -556,8 +564,6 @@ ckh_pointer_hash(const void *key, size_t r_hash[2])
}
bool
-ckh_pointer_keycomp(const void *k1, const void *k2)
-{
-
- return ((k1 == k2) ? true : false);
+ckh_pointer_keycomp(const void *k1, const void *k2) {
+ return (k1 == k2);
}
diff --git a/deps/jemalloc/src/ctl.c b/deps/jemalloc/src/ctl.c
index cc2c5aef57..36bc8fb5b7 100644
--- a/deps/jemalloc/src/ctl.c
+++ b/deps/jemalloc/src/ctl.c
@@ -1,146 +1,146 @@
-#define JEMALLOC_CTL_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_CTL_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/extent_dss.h"
+#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/nstime.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/util.h"
/******************************************************************************/
/* Data. */
/*
* ctl_mtx protects the following:
- * - ctl_stats.*
- * - opt_prof_active
+ * - ctl_stats->*
*/
static malloc_mutex_t ctl_mtx;
static bool ctl_initialized;
-static uint64_t ctl_epoch;
-static ctl_stats_t ctl_stats;
+static ctl_stats_t *ctl_stats;
+static ctl_arenas_t *ctl_arenas;
/******************************************************************************/
/* Helpers for named and indexed nodes. */
-static inline const ctl_named_node_t *
-ctl_named_node(const ctl_node_t *node)
-{
-
+static const ctl_named_node_t *
+ctl_named_node(const ctl_node_t *node) {
return ((node->named) ? (const ctl_named_node_t *)node : NULL);
}
-static inline const ctl_named_node_t *
-ctl_named_children(const ctl_named_node_t *node, int index)
-{
+static const ctl_named_node_t *
+ctl_named_children(const ctl_named_node_t *node, size_t index) {
const ctl_named_node_t *children = ctl_named_node(node->children);
return (children ? &children[index] : NULL);
}
-static inline const ctl_indexed_node_t *
-ctl_indexed_node(const ctl_node_t *node)
-{
-
- return ((node->named == false) ? (const ctl_indexed_node_t *)node :
- NULL);
+static const ctl_indexed_node_t *
+ctl_indexed_node(const ctl_node_t *node) {
+ return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
}
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-#define CTL_PROTO(n) \
-static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen);
-
-#define INDEX_PROTO(n) \
-static const ctl_named_node_t *n##_index(const size_t *mib, \
- size_t miblen, size_t i);
-
-static bool ctl_arena_init(ctl_arena_stats_t *astats);
-static void ctl_arena_clear(ctl_arena_stats_t *astats);
-static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
- arena_t *arena);
-static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
- ctl_arena_stats_t *astats);
-static void ctl_arena_refresh(arena_t *arena, unsigned i);
-static bool ctl_grow(void);
-static void ctl_refresh(void);
-static bool ctl_init(void);
-static int ctl_lookup(const char *name, ctl_node_t const **nodesp,
- size_t *mibp, size_t *depthp);
+#define CTL_PROTO(n) \
+static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+
+#define INDEX_PROTO(n) \
+static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \
+ const size_t *mib, size_t miblen, size_t i);
CTL_PROTO(version)
CTL_PROTO(epoch)
+CTL_PROTO(background_thread)
CTL_PROTO(thread_tcache_enabled)
CTL_PROTO(thread_tcache_flush)
+CTL_PROTO(thread_prof_name)
+CTL_PROTO(thread_prof_active)
CTL_PROTO(thread_arena)
CTL_PROTO(thread_allocated)
CTL_PROTO(thread_allocatedp)
CTL_PROTO(thread_deallocated)
CTL_PROTO(thread_deallocatedp)
+CTL_PROTO(config_cache_oblivious)
CTL_PROTO(config_debug)
-CTL_PROTO(config_dss)
CTL_PROTO(config_fill)
CTL_PROTO(config_lazy_lock)
-CTL_PROTO(config_mremap)
-CTL_PROTO(config_munmap)
+CTL_PROTO(config_malloc_conf)
CTL_PROTO(config_prof)
CTL_PROTO(config_prof_libgcc)
CTL_PROTO(config_prof_libunwind)
CTL_PROTO(config_stats)
-CTL_PROTO(config_tcache)
-CTL_PROTO(config_tls)
+CTL_PROTO(config_thp)
CTL_PROTO(config_utrace)
-CTL_PROTO(config_valgrind)
CTL_PROTO(config_xmalloc)
CTL_PROTO(opt_abort)
+CTL_PROTO(opt_abort_conf)
+CTL_PROTO(opt_retain)
CTL_PROTO(opt_dss)
-CTL_PROTO(opt_lg_chunk)
CTL_PROTO(opt_narenas)
-CTL_PROTO(opt_lg_dirty_mult)
+CTL_PROTO(opt_percpu_arena)
+CTL_PROTO(opt_background_thread)
+CTL_PROTO(opt_dirty_decay_ms)
+CTL_PROTO(opt_muzzy_decay_ms)
CTL_PROTO(opt_stats_print)
+CTL_PROTO(opt_stats_print_opts)
CTL_PROTO(opt_junk)
CTL_PROTO(opt_zero)
-CTL_PROTO(opt_quarantine)
-CTL_PROTO(opt_redzone)
CTL_PROTO(opt_utrace)
-CTL_PROTO(opt_valgrind)
CTL_PROTO(opt_xmalloc)
CTL_PROTO(opt_tcache)
CTL_PROTO(opt_lg_tcache_max)
CTL_PROTO(opt_prof)
CTL_PROTO(opt_prof_prefix)
CTL_PROTO(opt_prof_active)
+CTL_PROTO(opt_prof_thread_active_init)
CTL_PROTO(opt_lg_prof_sample)
CTL_PROTO(opt_lg_prof_interval)
CTL_PROTO(opt_prof_gdump)
CTL_PROTO(opt_prof_final)
CTL_PROTO(opt_prof_leak)
CTL_PROTO(opt_prof_accum)
+CTL_PROTO(tcache_create)
+CTL_PROTO(tcache_flush)
+CTL_PROTO(tcache_destroy)
+CTL_PROTO(arena_i_initialized)
+CTL_PROTO(arena_i_decay)
CTL_PROTO(arena_i_purge)
-static void arena_purge(unsigned arena_ind);
+CTL_PROTO(arena_i_reset)
+CTL_PROTO(arena_i_destroy)
CTL_PROTO(arena_i_dss)
+CTL_PROTO(arena_i_dirty_decay_ms)
+CTL_PROTO(arena_i_muzzy_decay_ms)
+CTL_PROTO(arena_i_extent_hooks)
INDEX_PROTO(arena_i)
CTL_PROTO(arenas_bin_i_size)
CTL_PROTO(arenas_bin_i_nregs)
-CTL_PROTO(arenas_bin_i_run_size)
+CTL_PROTO(arenas_bin_i_slab_size)
INDEX_PROTO(arenas_bin_i)
-CTL_PROTO(arenas_lrun_i_size)
-INDEX_PROTO(arenas_lrun_i)
+CTL_PROTO(arenas_lextent_i_size)
+INDEX_PROTO(arenas_lextent_i)
CTL_PROTO(arenas_narenas)
-CTL_PROTO(arenas_initialized)
+CTL_PROTO(arenas_dirty_decay_ms)
+CTL_PROTO(arenas_muzzy_decay_ms)
CTL_PROTO(arenas_quantum)
CTL_PROTO(arenas_page)
CTL_PROTO(arenas_tcache_max)
CTL_PROTO(arenas_nbins)
CTL_PROTO(arenas_nhbins)
-CTL_PROTO(arenas_nlruns)
-CTL_PROTO(arenas_purge)
-CTL_PROTO(arenas_extend)
+CTL_PROTO(arenas_nlextents)
+CTL_PROTO(arenas_create)
+CTL_PROTO(prof_thread_active_init)
CTL_PROTO(prof_active)
CTL_PROTO(prof_dump)
+CTL_PROTO(prof_gdump)
+CTL_PROTO(prof_reset)
CTL_PROTO(prof_interval)
-CTL_PROTO(stats_chunks_current)
-CTL_PROTO(stats_chunks_total)
-CTL_PROTO(stats_chunks_high)
-CTL_PROTO(stats_huge_allocated)
-CTL_PROTO(stats_huge_nmalloc)
-CTL_PROTO(stats_huge_ndalloc)
+CTL_PROTO(lg_prof_sample)
CTL_PROTO(stats_arenas_i_small_allocated)
CTL_PROTO(stats_arenas_i_small_nmalloc)
CTL_PROTO(stats_arenas_i_small_ndalloc)
@@ -149,119 +149,177 @@ CTL_PROTO(stats_arenas_i_large_allocated)
CTL_PROTO(stats_arenas_i_large_nmalloc)
CTL_PROTO(stats_arenas_i_large_ndalloc)
CTL_PROTO(stats_arenas_i_large_nrequests)
-CTL_PROTO(stats_arenas_i_bins_j_allocated)
CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
CTL_PROTO(stats_arenas_i_bins_j_nrequests)
+CTL_PROTO(stats_arenas_i_bins_j_curregs)
CTL_PROTO(stats_arenas_i_bins_j_nfills)
CTL_PROTO(stats_arenas_i_bins_j_nflushes)
-CTL_PROTO(stats_arenas_i_bins_j_nruns)
-CTL_PROTO(stats_arenas_i_bins_j_nreruns)
-CTL_PROTO(stats_arenas_i_bins_j_curruns)
+CTL_PROTO(stats_arenas_i_bins_j_nslabs)
+CTL_PROTO(stats_arenas_i_bins_j_nreslabs)
+CTL_PROTO(stats_arenas_i_bins_j_curslabs)
INDEX_PROTO(stats_arenas_i_bins_j)
-CTL_PROTO(stats_arenas_i_lruns_j_nmalloc)
-CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
-CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
-CTL_PROTO(stats_arenas_i_lruns_j_curruns)
-INDEX_PROTO(stats_arenas_i_lruns_j)
+CTL_PROTO(stats_arenas_i_lextents_j_nmalloc)
+CTL_PROTO(stats_arenas_i_lextents_j_ndalloc)
+CTL_PROTO(stats_arenas_i_lextents_j_nrequests)
+CTL_PROTO(stats_arenas_i_lextents_j_curlextents)
+INDEX_PROTO(stats_arenas_i_lextents_j)
CTL_PROTO(stats_arenas_i_nthreads)
+CTL_PROTO(stats_arenas_i_uptime)
CTL_PROTO(stats_arenas_i_dss)
+CTL_PROTO(stats_arenas_i_dirty_decay_ms)
+CTL_PROTO(stats_arenas_i_muzzy_decay_ms)
CTL_PROTO(stats_arenas_i_pactive)
CTL_PROTO(stats_arenas_i_pdirty)
+CTL_PROTO(stats_arenas_i_pmuzzy)
CTL_PROTO(stats_arenas_i_mapped)
-CTL_PROTO(stats_arenas_i_npurge)
-CTL_PROTO(stats_arenas_i_nmadvise)
-CTL_PROTO(stats_arenas_i_purged)
+CTL_PROTO(stats_arenas_i_retained)
+CTL_PROTO(stats_arenas_i_dirty_npurge)
+CTL_PROTO(stats_arenas_i_dirty_nmadvise)
+CTL_PROTO(stats_arenas_i_dirty_purged)
+CTL_PROTO(stats_arenas_i_muzzy_npurge)
+CTL_PROTO(stats_arenas_i_muzzy_nmadvise)
+CTL_PROTO(stats_arenas_i_muzzy_purged)
+CTL_PROTO(stats_arenas_i_base)
+CTL_PROTO(stats_arenas_i_internal)
+CTL_PROTO(stats_arenas_i_tcache_bytes)
+CTL_PROTO(stats_arenas_i_resident)
INDEX_PROTO(stats_arenas_i)
-CTL_PROTO(stats_cactive)
CTL_PROTO(stats_allocated)
CTL_PROTO(stats_active)
+CTL_PROTO(stats_background_thread_num_threads)
+CTL_PROTO(stats_background_thread_num_runs)
+CTL_PROTO(stats_background_thread_run_interval)
+CTL_PROTO(stats_metadata)
+CTL_PROTO(stats_resident)
CTL_PROTO(stats_mapped)
+CTL_PROTO(stats_retained)
+
+#define MUTEX_STATS_CTL_PROTO_GEN(n) \
+CTL_PROTO(stats_##n##_num_ops) \
+CTL_PROTO(stats_##n##_num_wait) \
+CTL_PROTO(stats_##n##_num_spin_acq) \
+CTL_PROTO(stats_##n##_num_owner_switch) \
+CTL_PROTO(stats_##n##_total_wait_time) \
+CTL_PROTO(stats_##n##_max_wait_time) \
+CTL_PROTO(stats_##n##_max_num_thds)
+
+/* Global mutexes. */
+#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx)
+MUTEX_PROF_GLOBAL_MUTEXES
+#undef OP
+
+/* Per arena mutexes. */
+#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx)
+MUTEX_PROF_ARENA_MUTEXES
+#undef OP
+
+/* Arena bin mutexes. */
+MUTEX_STATS_CTL_PROTO_GEN(arenas_i_bins_j_mutex)
+#undef MUTEX_STATS_CTL_PROTO_GEN
+
+CTL_PROTO(stats_mutexes_reset)
/******************************************************************************/
/* mallctl tree. */
-/* Maximum tree depth. */
-#define CTL_MAX_DEPTH 6
-
-#define NAME(n) {true}, n
-#define CHILD(t, c) \
+#define NAME(n) {true}, n
+#define CHILD(t, c) \
sizeof(c##_node) / sizeof(ctl_##t##_node_t), \
(ctl_node_t *)c##_node, \
NULL
-#define CTL(c) 0, NULL, c##_ctl
+#define CTL(c) 0, NULL, c##_ctl
/*
* Only handles internal indexed nodes, since there are currently no external
* ones.
*/
-#define INDEX(i) {false}, i##_index
+#define INDEX(i) {false}, i##_index
-static const ctl_named_node_t tcache_node[] = {
+static const ctl_named_node_t thread_tcache_node[] = {
{NAME("enabled"), CTL(thread_tcache_enabled)},
{NAME("flush"), CTL(thread_tcache_flush)}
};
+static const ctl_named_node_t thread_prof_node[] = {
+ {NAME("name"), CTL(thread_prof_name)},
+ {NAME("active"), CTL(thread_prof_active)}
+};
+
static const ctl_named_node_t thread_node[] = {
{NAME("arena"), CTL(thread_arena)},
{NAME("allocated"), CTL(thread_allocated)},
{NAME("allocatedp"), CTL(thread_allocatedp)},
{NAME("deallocated"), CTL(thread_deallocated)},
{NAME("deallocatedp"), CTL(thread_deallocatedp)},
- {NAME("tcache"), CHILD(named, tcache)}
+ {NAME("tcache"), CHILD(named, thread_tcache)},
+ {NAME("prof"), CHILD(named, thread_prof)}
};
static const ctl_named_node_t config_node[] = {
- {NAME("debug"), CTL(config_debug)},
- {NAME("dss"), CTL(config_dss)},
- {NAME("fill"), CTL(config_fill)},
- {NAME("lazy_lock"), CTL(config_lazy_lock)},
- {NAME("mremap"), CTL(config_mremap)},
- {NAME("munmap"), CTL(config_munmap)},
- {NAME("prof"), CTL(config_prof)},
- {NAME("prof_libgcc"), CTL(config_prof_libgcc)},
- {NAME("prof_libunwind"), CTL(config_prof_libunwind)},
- {NAME("stats"), CTL(config_stats)},
- {NAME("tcache"), CTL(config_tcache)},
- {NAME("tls"), CTL(config_tls)},
- {NAME("utrace"), CTL(config_utrace)},
- {NAME("valgrind"), CTL(config_valgrind)},
- {NAME("xmalloc"), CTL(config_xmalloc)}
+ {NAME("cache_oblivious"), CTL(config_cache_oblivious)},
+ {NAME("debug"), CTL(config_debug)},
+ {NAME("fill"), CTL(config_fill)},
+ {NAME("lazy_lock"), CTL(config_lazy_lock)},
+ {NAME("malloc_conf"), CTL(config_malloc_conf)},
+ {NAME("prof"), CTL(config_prof)},
+ {NAME("prof_libgcc"), CTL(config_prof_libgcc)},
+ {NAME("prof_libunwind"), CTL(config_prof_libunwind)},
+ {NAME("stats"), CTL(config_stats)},
+ {NAME("thp"), CTL(config_thp)},
+ {NAME("utrace"), CTL(config_utrace)},
+ {NAME("xmalloc"), CTL(config_xmalloc)}
};
static const ctl_named_node_t opt_node[] = {
- {NAME("abort"), CTL(opt_abort)},
- {NAME("dss"), CTL(opt_dss)},
- {NAME("lg_chunk"), CTL(opt_lg_chunk)},
- {NAME("narenas"), CTL(opt_narenas)},
- {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
- {NAME("stats_print"), CTL(opt_stats_print)},
- {NAME("junk"), CTL(opt_junk)},
- {NAME("zero"), CTL(opt_zero)},
- {NAME("quarantine"), CTL(opt_quarantine)},
- {NAME("redzone"), CTL(opt_redzone)},
- {NAME("utrace"), CTL(opt_utrace)},
- {NAME("valgrind"), CTL(opt_valgrind)},
- {NAME("xmalloc"), CTL(opt_xmalloc)},
- {NAME("tcache"), CTL(opt_tcache)},
- {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
- {NAME("prof"), CTL(opt_prof)},
- {NAME("prof_prefix"), CTL(opt_prof_prefix)},
- {NAME("prof_active"), CTL(opt_prof_active)},
- {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
- {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
- {NAME("prof_gdump"), CTL(opt_prof_gdump)},
- {NAME("prof_final"), CTL(opt_prof_final)},
- {NAME("prof_leak"), CTL(opt_prof_leak)},
- {NAME("prof_accum"), CTL(opt_prof_accum)}
+ {NAME("abort"), CTL(opt_abort)},
+ {NAME("abort_conf"), CTL(opt_abort_conf)},
+ {NAME("retain"), CTL(opt_retain)},
+ {NAME("dss"), CTL(opt_dss)},
+ {NAME("narenas"), CTL(opt_narenas)},
+ {NAME("percpu_arena"), CTL(opt_percpu_arena)},
+ {NAME("background_thread"), CTL(opt_background_thread)},
+ {NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)},
+ {NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)},
+ {NAME("stats_print"), CTL(opt_stats_print)},
+ {NAME("stats_print_opts"), CTL(opt_stats_print_opts)},
+ {NAME("junk"), CTL(opt_junk)},
+ {NAME("zero"), CTL(opt_zero)},
+ {NAME("utrace"), CTL(opt_utrace)},
+ {NAME("xmalloc"), CTL(opt_xmalloc)},
+ {NAME("tcache"), CTL(opt_tcache)},
+ {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
+ {NAME("prof"), CTL(opt_prof)},
+ {NAME("prof_prefix"), CTL(opt_prof_prefix)},
+ {NAME("prof_active"), CTL(opt_prof_active)},
+ {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)},
+ {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
+ {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
+ {NAME("prof_gdump"), CTL(opt_prof_gdump)},
+ {NAME("prof_final"), CTL(opt_prof_final)},
+ {NAME("prof_leak"), CTL(opt_prof_leak)},
+ {NAME("prof_accum"), CTL(opt_prof_accum)}
+};
+
+static const ctl_named_node_t tcache_node[] = {
+ {NAME("create"), CTL(tcache_create)},
+ {NAME("flush"), CTL(tcache_flush)},
+ {NAME("destroy"), CTL(tcache_destroy)}
};
static const ctl_named_node_t arena_i_node[] = {
- {NAME("purge"), CTL(arena_i_purge)},
- {NAME("dss"), CTL(arena_i_dss)}
+ {NAME("initialized"), CTL(arena_i_initialized)},
+ {NAME("decay"), CTL(arena_i_decay)},
+ {NAME("purge"), CTL(arena_i_purge)},
+ {NAME("reset"), CTL(arena_i_reset)},
+ {NAME("destroy"), CTL(arena_i_destroy)},
+ {NAME("dss"), CTL(arena_i_dss)},
+ {NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)},
+ {NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)},
+ {NAME("extent_hooks"), CTL(arena_i_extent_hooks)}
};
static const ctl_named_node_t super_arena_i_node[] = {
- {NAME(""), CHILD(named, arena_i)}
+ {NAME(""), CHILD(named, arena_i)}
};
static const ctl_indexed_node_t arena_node[] = {
@@ -269,147 +327,208 @@ static const ctl_indexed_node_t arena_node[] = {
};
static const ctl_named_node_t arenas_bin_i_node[] = {
- {NAME("size"), CTL(arenas_bin_i_size)},
- {NAME("nregs"), CTL(arenas_bin_i_nregs)},
- {NAME("run_size"), CTL(arenas_bin_i_run_size)}
+ {NAME("size"), CTL(arenas_bin_i_size)},
+ {NAME("nregs"), CTL(arenas_bin_i_nregs)},
+ {NAME("slab_size"), CTL(arenas_bin_i_slab_size)}
};
static const ctl_named_node_t super_arenas_bin_i_node[] = {
- {NAME(""), CHILD(named, arenas_bin_i)}
+ {NAME(""), CHILD(named, arenas_bin_i)}
};
static const ctl_indexed_node_t arenas_bin_node[] = {
{INDEX(arenas_bin_i)}
};
-static const ctl_named_node_t arenas_lrun_i_node[] = {
- {NAME("size"), CTL(arenas_lrun_i_size)}
+static const ctl_named_node_t arenas_lextent_i_node[] = {
+ {NAME("size"), CTL(arenas_lextent_i_size)}
};
-static const ctl_named_node_t super_arenas_lrun_i_node[] = {
- {NAME(""), CHILD(named, arenas_lrun_i)}
+static const ctl_named_node_t super_arenas_lextent_i_node[] = {
+ {NAME(""), CHILD(named, arenas_lextent_i)}
};
-static const ctl_indexed_node_t arenas_lrun_node[] = {
- {INDEX(arenas_lrun_i)}
+static const ctl_indexed_node_t arenas_lextent_node[] = {
+ {INDEX(arenas_lextent_i)}
};
static const ctl_named_node_t arenas_node[] = {
- {NAME("narenas"), CTL(arenas_narenas)},
- {NAME("initialized"), CTL(arenas_initialized)},
- {NAME("quantum"), CTL(arenas_quantum)},
- {NAME("page"), CTL(arenas_page)},
- {NAME("tcache_max"), CTL(arenas_tcache_max)},
- {NAME("nbins"), CTL(arenas_nbins)},
- {NAME("nhbins"), CTL(arenas_nhbins)},
- {NAME("bin"), CHILD(indexed, arenas_bin)},
- {NAME("nlruns"), CTL(arenas_nlruns)},
- {NAME("lrun"), CHILD(indexed, arenas_lrun)},
- {NAME("purge"), CTL(arenas_purge)},
- {NAME("extend"), CTL(arenas_extend)}
+ {NAME("narenas"), CTL(arenas_narenas)},
+ {NAME("dirty_decay_ms"), CTL(arenas_dirty_decay_ms)},
+ {NAME("muzzy_decay_ms"), CTL(arenas_muzzy_decay_ms)},
+ {NAME("quantum"), CTL(arenas_quantum)},
+ {NAME("page"), CTL(arenas_page)},
+ {NAME("tcache_max"), CTL(arenas_tcache_max)},
+ {NAME("nbins"), CTL(arenas_nbins)},
+ {NAME("nhbins"), CTL(arenas_nhbins)},
+ {NAME("bin"), CHILD(indexed, arenas_bin)},
+ {NAME("nlextents"), CTL(arenas_nlextents)},
+ {NAME("lextent"), CHILD(indexed, arenas_lextent)},
+ {NAME("create"), CTL(arenas_create)}
};
static const ctl_named_node_t prof_node[] = {
+ {NAME("thread_active_init"), CTL(prof_thread_active_init)},
{NAME("active"), CTL(prof_active)},
{NAME("dump"), CTL(prof_dump)},
- {NAME("interval"), CTL(prof_interval)}
+ {NAME("gdump"), CTL(prof_gdump)},
+ {NAME("reset"), CTL(prof_reset)},
+ {NAME("interval"), CTL(prof_interval)},
+ {NAME("lg_sample"), CTL(lg_prof_sample)}
};
-static const ctl_named_node_t stats_chunks_node[] = {
- {NAME("current"), CTL(stats_chunks_current)},
- {NAME("total"), CTL(stats_chunks_total)},
- {NAME("high"), CTL(stats_chunks_high)}
+static const ctl_named_node_t stats_arenas_i_small_node[] = {
+ {NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
+ {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}
};
-static const ctl_named_node_t stats_huge_node[] = {
- {NAME("allocated"), CTL(stats_huge_allocated)},
- {NAME("nmalloc"), CTL(stats_huge_nmalloc)},
- {NAME("ndalloc"), CTL(stats_huge_ndalloc)}
+static const ctl_named_node_t stats_arenas_i_large_node[] = {
+ {NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
+ {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
};
-static const ctl_named_node_t stats_arenas_i_small_node[] = {
- {NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
- {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}
+#define MUTEX_PROF_DATA_NODE(prefix) \
+static const ctl_named_node_t stats_##prefix##_node[] = { \
+ {NAME("num_ops"), \
+ CTL(stats_##prefix##_num_ops)}, \
+ {NAME("num_wait"), \
+ CTL(stats_##prefix##_num_wait)}, \
+ {NAME("num_spin_acq"), \
+ CTL(stats_##prefix##_num_spin_acq)}, \
+ {NAME("num_owner_switch"), \
+ CTL(stats_##prefix##_num_owner_switch)}, \
+ {NAME("total_wait_time"), \
+ CTL(stats_##prefix##_total_wait_time)}, \
+ {NAME("max_wait_time"), \
+ CTL(stats_##prefix##_max_wait_time)}, \
+ {NAME("max_num_thds"), \
+ CTL(stats_##prefix##_max_num_thds)} \
+ /* Note that # of current waiting thread not provided. */ \
};
-static const ctl_named_node_t stats_arenas_i_large_node[] = {
- {NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
- {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
-};
+MUTEX_PROF_DATA_NODE(arenas_i_bins_j_mutex)
static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
- {NAME("allocated"), CTL(stats_arenas_i_bins_j_allocated)},
- {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)},
- {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
- {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
- {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)},
- {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)},
- {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)}
+ {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)},
+ {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)},
+ {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
+ {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
+ {NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)},
+ {NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)},
+ {NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)},
+ {NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)}
};
+
static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
- {NAME(""), CHILD(named, stats_arenas_i_bins_j)}
+ {NAME(""), CHILD(named, stats_arenas_i_bins_j)}
};
static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
{INDEX(stats_arenas_i_bins_j)}
};
-static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = {
- {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)},
- {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)},
- {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)},
- {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)}
+static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = {
+ {NAME("nmalloc"), CTL(stats_arenas_i_lextents_j_nmalloc)},
+ {NAME("ndalloc"), CTL(stats_arenas_i_lextents_j_ndalloc)},
+ {NAME("nrequests"), CTL(stats_arenas_i_lextents_j_nrequests)},
+ {NAME("curlextents"), CTL(stats_arenas_i_lextents_j_curlextents)}
};
-static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = {
- {NAME(""), CHILD(named, stats_arenas_i_lruns_j)}
+static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = {
+ {NAME(""), CHILD(named, stats_arenas_i_lextents_j)}
};
-static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
- {INDEX(stats_arenas_i_lruns_j)}
+static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = {
+ {INDEX(stats_arenas_i_lextents_j)}
+};
+
+#define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx)
+MUTEX_PROF_ARENA_MUTEXES
+#undef OP
+
+static const ctl_named_node_t stats_arenas_i_mutexes_node[] = {
+#define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)},
+MUTEX_PROF_ARENA_MUTEXES
+#undef OP
};
static const ctl_named_node_t stats_arenas_i_node[] = {
- {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
- {NAME("dss"), CTL(stats_arenas_i_dss)},
- {NAME("pactive"), CTL(stats_arenas_i_pactive)},
- {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
- {NAME("mapped"), CTL(stats_arenas_i_mapped)},
- {NAME("npurge"), CTL(stats_arenas_i_npurge)},
- {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
- {NAME("purged"), CTL(stats_arenas_i_purged)},
- {NAME("small"), CHILD(named, stats_arenas_i_small)},
- {NAME("large"), CHILD(named, stats_arenas_i_large)},
- {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
- {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)}
+ {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
+ {NAME("uptime"), CTL(stats_arenas_i_uptime)},
+ {NAME("dss"), CTL(stats_arenas_i_dss)},
+ {NAME("dirty_decay_ms"), CTL(stats_arenas_i_dirty_decay_ms)},
+ {NAME("muzzy_decay_ms"), CTL(stats_arenas_i_muzzy_decay_ms)},
+ {NAME("pactive"), CTL(stats_arenas_i_pactive)},
+ {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
+ {NAME("pmuzzy"), CTL(stats_arenas_i_pmuzzy)},
+ {NAME("mapped"), CTL(stats_arenas_i_mapped)},
+ {NAME("retained"), CTL(stats_arenas_i_retained)},
+ {NAME("dirty_npurge"), CTL(stats_arenas_i_dirty_npurge)},
+ {NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)},
+ {NAME("dirty_purged"), CTL(stats_arenas_i_dirty_purged)},
+ {NAME("muzzy_npurge"), CTL(stats_arenas_i_muzzy_npurge)},
+ {NAME("muzzy_nmadvise"), CTL(stats_arenas_i_muzzy_nmadvise)},
+ {NAME("muzzy_purged"), CTL(stats_arenas_i_muzzy_purged)},
+ {NAME("base"), CTL(stats_arenas_i_base)},
+ {NAME("internal"), CTL(stats_arenas_i_internal)},
+ {NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)},
+ {NAME("resident"), CTL(stats_arenas_i_resident)},
+ {NAME("small"), CHILD(named, stats_arenas_i_small)},
+ {NAME("large"), CHILD(named, stats_arenas_i_large)},
+ {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
+ {NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)},
+ {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)}
};
static const ctl_named_node_t super_stats_arenas_i_node[] = {
- {NAME(""), CHILD(named, stats_arenas_i)}
+ {NAME(""), CHILD(named, stats_arenas_i)}
};
static const ctl_indexed_node_t stats_arenas_node[] = {
{INDEX(stats_arenas_i)}
};
+static const ctl_named_node_t stats_background_thread_node[] = {
+ {NAME("num_threads"), CTL(stats_background_thread_num_threads)},
+ {NAME("num_runs"), CTL(stats_background_thread_num_runs)},
+ {NAME("run_interval"), CTL(stats_background_thread_run_interval)}
+};
+
+#define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx)
+MUTEX_PROF_GLOBAL_MUTEXES
+#undef OP
+
+static const ctl_named_node_t stats_mutexes_node[] = {
+#define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)},
+MUTEX_PROF_GLOBAL_MUTEXES
+#undef OP
+ {NAME("reset"), CTL(stats_mutexes_reset)}
+};
+#undef MUTEX_PROF_DATA_NODE
+
static const ctl_named_node_t stats_node[] = {
- {NAME("cactive"), CTL(stats_cactive)},
- {NAME("allocated"), CTL(stats_allocated)},
- {NAME("active"), CTL(stats_active)},
- {NAME("mapped"), CTL(stats_mapped)},
- {NAME("chunks"), CHILD(named, stats_chunks)},
- {NAME("huge"), CHILD(named, stats_huge)},
- {NAME("arenas"), CHILD(indexed, stats_arenas)}
+ {NAME("allocated"), CTL(stats_allocated)},
+ {NAME("active"), CTL(stats_active)},
+ {NAME("metadata"), CTL(stats_metadata)},
+ {NAME("resident"), CTL(stats_resident)},
+ {NAME("mapped"), CTL(stats_mapped)},
+ {NAME("retained"), CTL(stats_retained)},
+ {NAME("background_thread"),
+ CHILD(named, stats_background_thread)},
+ {NAME("mutexes"), CHILD(named, stats_mutexes)},
+ {NAME("arenas"), CHILD(indexed, stats_arenas)}
};
static const ctl_named_node_t root_node[] = {
{NAME("version"), CTL(version)},
{NAME("epoch"), CTL(epoch)},
+ {NAME("background_thread"), CTL(background_thread)},
{NAME("thread"), CHILD(named, thread)},
{NAME("config"), CHILD(named, config)},
{NAME("opt"), CHILD(named, opt)},
+ {NAME("tcache"), CHILD(named, tcache)},
{NAME("arena"), CHILD(indexed, arena)},
{NAME("arenas"), CHILD(named, arenas)},
{NAME("prof"), CHILD(named, prof)},
@@ -426,303 +545,514 @@ static const ctl_named_node_t super_root_node[] = {
/******************************************************************************/
-static bool
-ctl_arena_init(ctl_arena_stats_t *astats)
-{
+/*
+ * Sets *dst + *src non-atomically. This is safe, since everything is
+ * synchronized by the ctl mutex.
+ */
+static void
+accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) {
+#ifdef JEMALLOC_ATOMIC_U64
+ uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
+ uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED);
+ atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED);
+#else
+ *dst += *src;
+#endif
+}
- if (astats->lstats == NULL) {
- astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses *
- sizeof(malloc_large_stats_t));
- if (astats->lstats == NULL)
- return (true);
+/* Likewise: with ctl mutex synchronization, reading is simple. */
+static uint64_t
+arena_stats_read_u64(arena_stats_u64_t *p) {
+#ifdef JEMALLOC_ATOMIC_U64
+ return atomic_load_u64(p, ATOMIC_RELAXED);
+#else
+ return *p;
+#endif
+}
+
+static void accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
+ size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
+ size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED);
+ atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED);
+}
+
+/******************************************************************************/
+
+static unsigned
+arenas_i2a_impl(size_t i, bool compat, bool validate) {
+ unsigned a;
+
+ switch (i) {
+ case MALLCTL_ARENAS_ALL:
+ a = 0;
+ break;
+ case MALLCTL_ARENAS_DESTROYED:
+ a = 1;
+ break;
+ default:
+ if (compat && i == ctl_arenas->narenas) {
+ /*
+ * Provide deprecated backward compatibility for
+ * accessing the merged stats at index narenas rather
+ * than via MALLCTL_ARENAS_ALL. This is scheduled for
+ * removal in 6.0.0.
+ */
+ a = 0;
+ } else if (validate && i >= ctl_arenas->narenas) {
+ a = UINT_MAX;
+ } else {
+ /*
+ * This function should never be called for an index
+ * more than one past the range of indices that have
+ * initialized ctl data.
+ */
+ assert(i < ctl_arenas->narenas || (!validate && i ==
+ ctl_arenas->narenas));
+ a = (unsigned)i + 2;
+ }
+ break;
}
- return (false);
+ return a;
}
-static void
-ctl_arena_clear(ctl_arena_stats_t *astats)
-{
+static unsigned
+arenas_i2a(size_t i) {
+ return arenas_i2a_impl(i, true, false);
+}
+
+static ctl_arena_t *
+arenas_i_impl(tsd_t *tsd, size_t i, bool compat, bool init) {
+ ctl_arena_t *ret;
- astats->dss = dss_prec_names[dss_prec_limit];
- astats->pactive = 0;
- astats->pdirty = 0;
+ assert(!compat || !init);
+
+ ret = ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)];
+ if (init && ret == NULL) {
+ if (config_stats) {
+ struct container_s {
+ ctl_arena_t ctl_arena;
+ ctl_arena_stats_t astats;
+ };
+ struct container_s *cont =
+ (struct container_s *)base_alloc(tsd_tsdn(tsd),
+ b0get(), sizeof(struct container_s), QUANTUM);
+ if (cont == NULL) {
+ return NULL;
+ }
+ ret = &cont->ctl_arena;
+ ret->astats = &cont->astats;
+ } else {
+ ret = (ctl_arena_t *)base_alloc(tsd_tsdn(tsd), b0get(),
+ sizeof(ctl_arena_t), QUANTUM);
+ if (ret == NULL) {
+ return NULL;
+ }
+ }
+ ret->arena_ind = (unsigned)i;
+ ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)] = ret;
+ }
+
+ assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i));
+ return ret;
+}
+
+static ctl_arena_t *
+arenas_i(size_t i) {
+ ctl_arena_t *ret = arenas_i_impl(tsd_fetch(), i, true, false);
+ assert(ret != NULL);
+ return ret;
+}
+
+static void
+ctl_arena_clear(ctl_arena_t *ctl_arena) {
+ ctl_arena->nthreads = 0;
+ ctl_arena->dss = dss_prec_names[dss_prec_limit];
+ ctl_arena->dirty_decay_ms = -1;
+ ctl_arena->muzzy_decay_ms = -1;
+ ctl_arena->pactive = 0;
+ ctl_arena->pdirty = 0;
+ ctl_arena->pmuzzy = 0;
if (config_stats) {
- memset(&astats->astats, 0, sizeof(arena_stats_t));
- astats->allocated_small = 0;
- astats->nmalloc_small = 0;
- astats->ndalloc_small = 0;
- astats->nrequests_small = 0;
- memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
- memset(astats->lstats, 0, nlclasses *
+ memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t));
+ ctl_arena->astats->allocated_small = 0;
+ ctl_arena->astats->nmalloc_small = 0;
+ ctl_arena->astats->ndalloc_small = 0;
+ ctl_arena->astats->nrequests_small = 0;
+ memset(ctl_arena->astats->bstats, 0, NBINS *
+ sizeof(malloc_bin_stats_t));
+ memset(ctl_arena->astats->lstats, 0, (NSIZES - NBINS) *
sizeof(malloc_large_stats_t));
}
}
static void
-ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
-{
+ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
unsigned i;
- arena_stats_merge(arena, &cstats->dss, &cstats->pactive,
- &cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats);
-
- for (i = 0; i < NBINS; i++) {
- cstats->allocated_small += cstats->bstats[i].allocated;
- cstats->nmalloc_small += cstats->bstats[i].nmalloc;
- cstats->ndalloc_small += cstats->bstats[i].ndalloc;
- cstats->nrequests_small += cstats->bstats[i].nrequests;
+ if (config_stats) {
+ arena_stats_merge(tsdn, arena, &ctl_arena->nthreads,
+ &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
+ &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
+ &ctl_arena->pdirty, &ctl_arena->pmuzzy,
+ &ctl_arena->astats->astats, ctl_arena->astats->bstats,
+ ctl_arena->astats->lstats);
+
+ for (i = 0; i < NBINS; i++) {
+ ctl_arena->astats->allocated_small +=
+ ctl_arena->astats->bstats[i].curregs *
+ sz_index2size(i);
+ ctl_arena->astats->nmalloc_small +=
+ ctl_arena->astats->bstats[i].nmalloc;
+ ctl_arena->astats->ndalloc_small +=
+ ctl_arena->astats->bstats[i].ndalloc;
+ ctl_arena->astats->nrequests_small +=
+ ctl_arena->astats->bstats[i].nrequests;
+ }
+ } else {
+ arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads,
+ &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
+ &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
+ &ctl_arena->pdirty, &ctl_arena->pmuzzy);
}
}
static void
-ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
-{
+ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
+ bool destroyed) {
unsigned i;
- sstats->pactive += astats->pactive;
- sstats->pdirty += astats->pdirty;
-
- sstats->astats.mapped += astats->astats.mapped;
- sstats->astats.npurge += astats->astats.npurge;
- sstats->astats.nmadvise += astats->astats.nmadvise;
- sstats->astats.purged += astats->astats.purged;
-
- sstats->allocated_small += astats->allocated_small;
- sstats->nmalloc_small += astats->nmalloc_small;
- sstats->ndalloc_small += astats->ndalloc_small;
- sstats->nrequests_small += astats->nrequests_small;
-
- sstats->astats.allocated_large += astats->astats.allocated_large;
- sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
- sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
- sstats->astats.nrequests_large += astats->astats.nrequests_large;
-
- for (i = 0; i < nlclasses; i++) {
- sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
- sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
- sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
- sstats->lstats[i].curruns += astats->lstats[i].curruns;
- }
-
- for (i = 0; i < NBINS; i++) {
- sstats->bstats[i].allocated += astats->bstats[i].allocated;
- sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
- sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
- sstats->bstats[i].nrequests += astats->bstats[i].nrequests;
- if (config_tcache) {
- sstats->bstats[i].nfills += astats->bstats[i].nfills;
- sstats->bstats[i].nflushes +=
+ if (!destroyed) {
+ ctl_sdarena->nthreads += ctl_arena->nthreads;
+ ctl_sdarena->pactive += ctl_arena->pactive;
+ ctl_sdarena->pdirty += ctl_arena->pdirty;
+ ctl_sdarena->pmuzzy += ctl_arena->pmuzzy;
+ } else {
+ assert(ctl_arena->nthreads == 0);
+ assert(ctl_arena->pactive == 0);
+ assert(ctl_arena->pdirty == 0);
+ assert(ctl_arena->pmuzzy == 0);
+ }
+
+ if (config_stats) {
+ ctl_arena_stats_t *sdstats = ctl_sdarena->astats;
+ ctl_arena_stats_t *astats = ctl_arena->astats;
+
+ if (!destroyed) {
+ accum_atomic_zu(&sdstats->astats.mapped,
+ &astats->astats.mapped);
+ accum_atomic_zu(&sdstats->astats.retained,
+ &astats->astats.retained);
+ }
+
+ accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
+ &astats->astats.decay_dirty.npurge);
+ accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise,
+ &astats->astats.decay_dirty.nmadvise);
+ accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged,
+ &astats->astats.decay_dirty.purged);
+
+ accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge,
+ &astats->astats.decay_muzzy.npurge);
+ accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise,
+ &astats->astats.decay_muzzy.nmadvise);
+ accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged,
+ &astats->astats.decay_muzzy.purged);
+
+#define OP(mtx) malloc_mutex_prof_merge( \
+ &(sdstats->astats.mutex_prof_data[ \
+ arena_prof_mutex_##mtx]), \
+ &(astats->astats.mutex_prof_data[ \
+ arena_prof_mutex_##mtx]));
+MUTEX_PROF_ARENA_MUTEXES
+#undef OP
+ if (!destroyed) {
+ accum_atomic_zu(&sdstats->astats.base,
+ &astats->astats.base);
+ accum_atomic_zu(&sdstats->astats.internal,
+ &astats->astats.internal);
+ accum_atomic_zu(&sdstats->astats.resident,
+ &astats->astats.resident);
+ } else {
+ assert(atomic_load_zu(
+ &astats->astats.internal, ATOMIC_RELAXED) == 0);
+ }
+
+ if (!destroyed) {
+ sdstats->allocated_small += astats->allocated_small;
+ } else {
+ assert(astats->allocated_small == 0);
+ }
+ sdstats->nmalloc_small += astats->nmalloc_small;
+ sdstats->ndalloc_small += astats->ndalloc_small;
+ sdstats->nrequests_small += astats->nrequests_small;
+
+ if (!destroyed) {
+ accum_atomic_zu(&sdstats->astats.allocated_large,
+ &astats->astats.allocated_large);
+ } else {
+ assert(atomic_load_zu(&astats->astats.allocated_large,
+ ATOMIC_RELAXED) == 0);
+ }
+ accum_arena_stats_u64(&sdstats->astats.nmalloc_large,
+ &astats->astats.nmalloc_large);
+ accum_arena_stats_u64(&sdstats->astats.ndalloc_large,
+ &astats->astats.ndalloc_large);
+ accum_arena_stats_u64(&sdstats->astats.nrequests_large,
+ &astats->astats.nrequests_large);
+
+ accum_atomic_zu(&sdstats->astats.tcache_bytes,
+ &astats->astats.tcache_bytes);
+
+ if (ctl_arena->arena_ind == 0) {
+ sdstats->astats.uptime = astats->astats.uptime;
+ }
+
+ for (i = 0; i < NBINS; i++) {
+ sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
+ sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
+ sdstats->bstats[i].nrequests +=
+ astats->bstats[i].nrequests;
+ if (!destroyed) {
+ sdstats->bstats[i].curregs +=
+ astats->bstats[i].curregs;
+ } else {
+ assert(astats->bstats[i].curregs == 0);
+ }
+ sdstats->bstats[i].nfills += astats->bstats[i].nfills;
+ sdstats->bstats[i].nflushes +=
astats->bstats[i].nflushes;
+ sdstats->bstats[i].nslabs += astats->bstats[i].nslabs;
+ sdstats->bstats[i].reslabs += astats->bstats[i].reslabs;
+ if (!destroyed) {
+ sdstats->bstats[i].curslabs +=
+ astats->bstats[i].curslabs;
+ } else {
+ assert(astats->bstats[i].curslabs == 0);
+ }
+ malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data,
+ &astats->bstats[i].mutex_data);
+ }
+
+ for (i = 0; i < NSIZES - NBINS; i++) {
+ accum_arena_stats_u64(&sdstats->lstats[i].nmalloc,
+ &astats->lstats[i].nmalloc);
+ accum_arena_stats_u64(&sdstats->lstats[i].ndalloc,
+ &astats->lstats[i].ndalloc);
+ accum_arena_stats_u64(&sdstats->lstats[i].nrequests,
+ &astats->lstats[i].nrequests);
+ if (!destroyed) {
+ sdstats->lstats[i].curlextents +=
+ astats->lstats[i].curlextents;
+ } else {
+ assert(astats->lstats[i].curlextents == 0);
+ }
}
- sstats->bstats[i].nruns += astats->bstats[i].nruns;
- sstats->bstats[i].reruns += astats->bstats[i].reruns;
- sstats->bstats[i].curruns += astats->bstats[i].curruns;
}
}
static void
-ctl_arena_refresh(arena_t *arena, unsigned i)
-{
- ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
- ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas];
+ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena,
+ unsigned i, bool destroyed) {
+ ctl_arena_t *ctl_arena = arenas_i(i);
+
+ ctl_arena_clear(ctl_arena);
+ ctl_arena_stats_amerge(tsdn, ctl_arena, arena);
+ /* Merge into sum stats as well. */
+ ctl_arena_stats_sdmerge(ctl_sdarena, ctl_arena, destroyed);
+}
- ctl_arena_clear(astats);
+static unsigned
+ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) {
+ unsigned arena_ind;
+ ctl_arena_t *ctl_arena;
- sstats->nthreads += astats->nthreads;
- if (config_stats) {
- ctl_arena_stats_amerge(astats, arena);
- /* Merge into sum stats as well. */
- ctl_arena_stats_smerge(sstats, astats);
+ if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) !=
+ NULL) {
+ ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
+ arena_ind = ctl_arena->arena_ind;
} else {
- astats->pactive += arena->nactive;
- astats->pdirty += arena->ndirty;
- /* Merge into sum stats as well. */
- sstats->pactive += arena->nactive;
- sstats->pdirty += arena->ndirty;
+ arena_ind = ctl_arenas->narenas;
}
-}
-static bool
-ctl_grow(void)
-{
- ctl_arena_stats_t *astats;
- arena_t **tarenas;
-
- /* Allocate extended arena stats and arenas arrays. */
- astats = (ctl_arena_stats_t *)imalloc((ctl_stats.narenas + 2) *
- sizeof(ctl_arena_stats_t));
- if (astats == NULL)
- return (true);
- tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) *
- sizeof(arena_t *));
- if (tarenas == NULL) {
- idalloc(astats);
- return (true);
- }
-
- /* Initialize the new astats element. */
- memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
- sizeof(ctl_arena_stats_t));
- memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
- if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) {
- idalloc(tarenas);
- idalloc(astats);
- return (true);
- }
- /* Swap merged stats to their new location. */
- {
- ctl_arena_stats_t tstats;
- memcpy(&tstats, &astats[ctl_stats.narenas],
- sizeof(ctl_arena_stats_t));
- memcpy(&astats[ctl_stats.narenas],
- &astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t));
- memcpy(&astats[ctl_stats.narenas + 1], &tstats,
- sizeof(ctl_arena_stats_t));
- }
- /* Initialize the new arenas element. */
- tarenas[ctl_stats.narenas] = NULL;
- {
- arena_t **arenas_old = arenas;
- /*
- * Swap extended arenas array into place. Although ctl_mtx
- * protects this function from other threads extending the
- * array, it does not protect from other threads mutating it
- * (i.e. initializing arenas and setting array elements to
- * point to them). Therefore, array copying must happen under
- * the protection of arenas_lock.
- */
- malloc_mutex_lock(&arenas_lock);
- arenas = tarenas;
- memcpy(arenas, arenas_old, ctl_stats.narenas *
- sizeof(arena_t *));
- narenas_total++;
- arenas_extend(narenas_total - 1);
- malloc_mutex_unlock(&arenas_lock);
- /*
- * Deallocate arenas_old only if it came from imalloc() (not
- * base_alloc()).
- */
- if (ctl_stats.narenas != narenas_auto)
- idalloc(arenas_old);
+ /* Trigger stats allocation. */
+ if (arenas_i_impl(tsd, arena_ind, false, true) == NULL) {
+ return UINT_MAX;
}
- ctl_stats.arenas = astats;
- ctl_stats.narenas++;
- return (false);
-}
+ /* Initialize new arena. */
+ if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) {
+ return UINT_MAX;
+ }
-static void
-ctl_refresh(void)
-{
- unsigned i;
- VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
+ if (arena_ind == ctl_arenas->narenas) {
+ ctl_arenas->narenas++;
+ }
- if (config_stats) {
- malloc_mutex_lock(&chunks_mtx);
- ctl_stats.chunks.current = stats_chunks.curchunks;
- ctl_stats.chunks.total = stats_chunks.nchunks;
- ctl_stats.chunks.high = stats_chunks.highchunks;
- malloc_mutex_unlock(&chunks_mtx);
+ return arena_ind;
+}
- malloc_mutex_lock(&huge_mtx);
- ctl_stats.huge.allocated = huge_allocated;
- ctl_stats.huge.nmalloc = huge_nmalloc;
- ctl_stats.huge.ndalloc = huge_ndalloc;
- malloc_mutex_unlock(&huge_mtx);
+static void
+ctl_background_thread_stats_read(tsdn_t *tsdn) {
+ background_thread_stats_t *stats = &ctl_stats->background_thread;
+ if (!have_background_thread ||
+ background_thread_stats_read(tsdn, stats)) {
+ memset(stats, 0, sizeof(background_thread_stats_t));
+ nstime_init(&stats->run_interval, 0);
}
+}
+
+static void
+ctl_refresh(tsdn_t *tsdn) {
+ unsigned i;
+ ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL);
+ VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas);
/*
* Clear sum stats, since they will be merged into by
* ctl_arena_refresh().
*/
- ctl_stats.arenas[ctl_stats.narenas].nthreads = 0;
- ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
-
- malloc_mutex_lock(&arenas_lock);
- memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
- for (i = 0; i < ctl_stats.narenas; i++) {
- if (arenas[i] != NULL)
- ctl_stats.arenas[i].nthreads = arenas[i]->nthreads;
- else
- ctl_stats.arenas[i].nthreads = 0;
- }
- malloc_mutex_unlock(&arenas_lock);
- for (i = 0; i < ctl_stats.narenas; i++) {
+ ctl_arena_clear(ctl_sarena);
+
+ for (i = 0; i < ctl_arenas->narenas; i++) {
+ tarenas[i] = arena_get(tsdn, i, false);
+ }
+
+ for (i = 0; i < ctl_arenas->narenas; i++) {
+ ctl_arena_t *ctl_arena = arenas_i(i);
bool initialized = (tarenas[i] != NULL);
- ctl_stats.arenas[i].initialized = initialized;
- if (initialized)
- ctl_arena_refresh(tarenas[i], i);
+ ctl_arena->initialized = initialized;
+ if (initialized) {
+ ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i,
+ false);
+ }
}
if (config_stats) {
- ctl_stats.allocated =
- ctl_stats.arenas[ctl_stats.narenas].allocated_small
- + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large
- + ctl_stats.huge.allocated;
- ctl_stats.active =
- (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE)
- + ctl_stats.huge.allocated;
- ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
+ ctl_stats->allocated = ctl_sarena->astats->allocated_small +
+ atomic_load_zu(&ctl_sarena->astats->astats.allocated_large,
+ ATOMIC_RELAXED);
+ ctl_stats->active = (ctl_sarena->pactive << LG_PAGE);
+ ctl_stats->metadata = atomic_load_zu(
+ &ctl_sarena->astats->astats.base, ATOMIC_RELAXED) +
+ atomic_load_zu(&ctl_sarena->astats->astats.internal,
+ ATOMIC_RELAXED);
+ ctl_stats->resident = atomic_load_zu(
+ &ctl_sarena->astats->astats.resident, ATOMIC_RELAXED);
+ ctl_stats->mapped = atomic_load_zu(
+ &ctl_sarena->astats->astats.mapped, ATOMIC_RELAXED);
+ ctl_stats->retained = atomic_load_zu(
+ &ctl_sarena->astats->astats.retained, ATOMIC_RELAXED);
+
+ ctl_background_thread_stats_read(tsdn);
+
+#define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx) \
+ malloc_mutex_lock(tsdn, &mtx); \
+ malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx); \
+ malloc_mutex_unlock(tsdn, &mtx);
+
+ if (config_prof && opt_prof) {
+ READ_GLOBAL_MUTEX_PROF_DATA(global_prof_mutex_prof,
+ bt2gctx_mtx);
+ }
+ if (have_background_thread) {
+ READ_GLOBAL_MUTEX_PROF_DATA(
+ global_prof_mutex_background_thread,
+ background_thread_lock);
+ } else {
+ memset(&ctl_stats->mutex_prof_data[
+ global_prof_mutex_background_thread], 0,
+ sizeof(mutex_prof_data_t));
+ }
+ /* We own ctl mutex already. */
+ malloc_mutex_prof_read(tsdn,
+ &ctl_stats->mutex_prof_data[global_prof_mutex_ctl],
+ &ctl_mtx);
+#undef READ_GLOBAL_MUTEX_PROF_DATA
}
-
- ctl_epoch++;
+ ctl_arenas->epoch++;
}
static bool
-ctl_init(void)
-{
+ctl_init(tsd_t *tsd) {
bool ret;
+ tsdn_t *tsdn = tsd_tsdn(tsd);
+
+ malloc_mutex_lock(tsdn, &ctl_mtx);
+ if (!ctl_initialized) {
+ ctl_arena_t *ctl_sarena, *ctl_darena;
+ unsigned i;
- malloc_mutex_lock(&ctl_mtx);
- if (ctl_initialized == false) {
/*
- * Allocate space for one extra arena stats element, which
- * contains summed stats across all arenas.
+ * Allocate demand-zeroed space for pointers to the full
+ * range of supported arena indices.
*/
- assert(narenas_auto == narenas_total_get());
- ctl_stats.narenas = narenas_auto;
- ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc(
- (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t));
- if (ctl_stats.arenas == NULL) {
+ if (ctl_arenas == NULL) {
+ ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn,
+ b0get(), sizeof(ctl_arenas_t), QUANTUM);
+ if (ctl_arenas == NULL) {
+ ret = true;
+ goto label_return;
+ }
+ }
+
+ if (config_stats && ctl_stats == NULL) {
+ ctl_stats = (ctl_stats_t *)base_alloc(tsdn, b0get(),
+ sizeof(ctl_stats_t), QUANTUM);
+ if (ctl_stats == NULL) {
+ ret = true;
+ goto label_return;
+ }
+ }
+
+ /*
+ * Allocate space for the current full range of arenas
+ * here rather than doing it lazily elsewhere, in order
+ * to limit when OOM-caused errors can occur.
+ */
+ if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false,
+ true)) == NULL) {
ret = true;
goto label_return;
}
- memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) *
- sizeof(ctl_arena_stats_t));
+ ctl_sarena->initialized = true;
+ if ((ctl_darena = arenas_i_impl(tsd, MALLCTL_ARENAS_DESTROYED,
+ false, true)) == NULL) {
+ ret = true;
+ goto label_return;
+ }
+ ctl_arena_clear(ctl_darena);
/*
- * Initialize all stats structures, regardless of whether they
- * ever get used. Lazy initialization would allow errors to
- * cause inconsistent state to be viewable by the application.
+ * Don't toggle ctl_darena to initialized until an arena is
+ * actually destroyed, so that arena.<i>.initialized can be used
+ * to query whether the stats are relevant.
*/
- if (config_stats) {
- unsigned i;
- for (i = 0; i <= ctl_stats.narenas; i++) {
- if (ctl_arena_init(&ctl_stats.arenas[i])) {
- ret = true;
- goto label_return;
- }
+
+ ctl_arenas->narenas = narenas_total_get();
+ for (i = 0; i < ctl_arenas->narenas; i++) {
+ if (arenas_i_impl(tsd, i, false, true) == NULL) {
+ ret = true;
+ goto label_return;
}
}
- ctl_stats.arenas[ctl_stats.narenas].initialized = true;
- ctl_epoch = 0;
- ctl_refresh();
+ ql_new(&ctl_arenas->destroyed);
+ ctl_refresh(tsdn);
+
ctl_initialized = true;
}
ret = false;
label_return:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
+ return ret;
}
static int
-ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
- size_t *depthp)
-{
+ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
+ size_t *mibp, size_t *depthp) {
int ret;
const char *elm, *tdot, *dot;
size_t elen, i, j;
@@ -750,9 +1080,10 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
if (strlen(child->name) == elen &&
strncmp(elm, child->name, elen) == 0) {
node = child;
- if (nodesp != NULL)
+ if (nodesp != NULL) {
nodesp[i] =
(const ctl_node_t *)node;
+ }
mibp[i] = j;
break;
}
@@ -773,14 +1104,15 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
}
inode = ctl_indexed_node(node->children);
- node = inode->index(mibp, *depthp, (size_t)index);
+ node = inode->index(tsdn, mibp, *depthp, (size_t)index);
if (node == NULL) {
ret = ENOENT;
goto label_return;
}
- if (nodesp != NULL)
+ if (nodesp != NULL) {
nodesp[i] = (const ctl_node_t *)node;
+ }
mibp[i] = (size_t)index;
}
@@ -813,33 +1145,33 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
ret = 0;
label_return:
- return (ret);
+ return ret;
}
int
-ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
- size_t newlen)
-{
+ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen) {
int ret;
size_t depth;
ctl_node_t const *nodes[CTL_MAX_DEPTH];
size_t mib[CTL_MAX_DEPTH];
const ctl_named_node_t *node;
- if (ctl_initialized == false && ctl_init()) {
+ if (!ctl_initialized && ctl_init(tsd)) {
ret = EAGAIN;
goto label_return;
}
depth = CTL_MAX_DEPTH;
- ret = ctl_lookup(name, nodes, mib, &depth);
- if (ret != 0)
+ ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
+ if (ret != 0) {
goto label_return;
+ }
node = ctl_named_node(nodes[depth-1]);
- if (node != NULL && node->ctl)
- ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen);
- else {
+ if (node != NULL && node->ctl) {
+ ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
+ } else {
/* The name refers to a partial path through the ctl tree. */
ret = ENOENT;
}
@@ -849,29 +1181,27 @@ label_return:
}
int
-ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp)
-{
+ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) {
int ret;
- if (ctl_initialized == false && ctl_init()) {
+ if (!ctl_initialized && ctl_init(tsd)) {
ret = EAGAIN;
goto label_return;
}
- ret = ctl_lookup(name, NULL, mibp, miblenp);
+ ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp);
label_return:
return(ret);
}
int
-ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
+ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
const ctl_named_node_t *node;
size_t i;
- if (ctl_initialized == false && ctl_init()) {
+ if (!ctl_initialized && ctl_init(tsd)) {
ret = EAGAIN;
goto label_return;
}
@@ -893,7 +1223,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
/* Indexed element. */
inode = ctl_indexed_node(node->children);
- node = inode->index(mib, miblen, mib[i]);
+ node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]);
if (node == NULL) {
ret = ENOENT;
goto label_return;
@@ -902,9 +1232,9 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
}
/* Call the ctl function. */
- if (node && node->ctl)
- ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen);
- else {
+ if (node && node->ctl) {
+ ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
+ } else {
/* Partial MIB. */
ret = ENOENT;
}
@@ -914,56 +1244,58 @@ label_return:
}
bool
-ctl_boot(void)
-{
-
- if (malloc_mutex_init(&ctl_mtx))
- return (true);
+ctl_boot(void) {
+ if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
ctl_initialized = false;
- return (false);
+ return false;
}
void
-ctl_prefork(void)
-{
-
- malloc_mutex_prefork(&ctl_mtx);
+ctl_prefork(tsdn_t *tsdn) {
+ malloc_mutex_prefork(tsdn, &ctl_mtx);
}
void
-ctl_postfork_parent(void)
-{
-
- malloc_mutex_postfork_parent(&ctl_mtx);
+ctl_postfork_parent(tsdn_t *tsdn) {
+ malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
}
void
-ctl_postfork_child(void)
-{
-
- malloc_mutex_postfork_child(&ctl_mtx);
+ctl_postfork_child(tsdn_t *tsdn) {
+ malloc_mutex_postfork_child(tsdn, &ctl_mtx);
}
/******************************************************************************/
/* *_ctl() functions. */
-#define READONLY() do { \
+#define READONLY() do { \
if (newp != NULL || newlen != 0) { \
ret = EPERM; \
goto label_return; \
} \
} while (0)
-#define WRITEONLY() do { \
+#define WRITEONLY() do { \
if (oldp != NULL || oldlenp != NULL) { \
ret = EPERM; \
goto label_return; \
} \
} while (0)
-#define READ(v, t) do { \
+#define READ_XOR_WRITE() do { \
+ if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \
+ newlen != 0)) { \
+ ret = EPERM; \
+ goto label_return; \
+ } \
+} while (0)
+
+#define READ(v, t) do { \
if (oldp != NULL && oldlenp != NULL) { \
if (*oldlenp != sizeof(t)) { \
size_t copylen = (sizeof(t) <= *oldlenp) \
@@ -971,12 +1303,12 @@ ctl_postfork_child(void)
memcpy(oldp, (void *)&(v), copylen); \
ret = EINVAL; \
goto label_return; \
- } else \
- *(t *)oldp = (v); \
+ } \
+ *(t *)oldp = (v); \
} \
} while (0)
-#define WRITE(v, t) do { \
+#define WRITE(v, t) do { \
if (newp != NULL) { \
if (newlen != sizeof(t)) { \
ret = EINVAL; \
@@ -986,101 +1318,109 @@ ctl_postfork_child(void)
} \
} while (0)
+#define MIB_UNSIGNED(v, i) do { \
+ if (mib[i] > UINT_MAX) { \
+ ret = EFAULT; \
+ goto label_return; \
+ } \
+ v = (unsigned)mib[i]; \
+} while (0)
+
/*
* There's a lot of code duplication in the following macros due to limitations
* in how nested cpp macros are expanded.
*/
-#define CTL_RO_CLGEN(c, l, n, v, t) \
+#define CTL_RO_CLGEN(c, l, n, v, t) \
static int \
-n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
- void *newp, size_t newlen) \
-{ \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
- if ((c) == false) \
- return (ENOENT); \
- if (l) \
- malloc_mutex_lock(&ctl_mtx); \
+ if (!(c)) { \
+ return ENOENT; \
+ } \
+ if (l) { \
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
+ } \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
- if (l) \
- malloc_mutex_unlock(&ctl_mtx); \
- return (ret); \
+ if (l) { \
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
+ } \
+ return ret; \
}
-#define CTL_RO_CGEN(c, n, v, t) \
+#define CTL_RO_CGEN(c, n, v, t) \
static int \
-n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
- void *newp, size_t newlen) \
-{ \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
- if ((c) == false) \
- return (ENOENT); \
- malloc_mutex_lock(&ctl_mtx); \
+ if (!(c)) { \
+ return ENOENT; \
+ } \
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
- malloc_mutex_unlock(&ctl_mtx); \
- return (ret); \
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
+ return ret; \
}
-#define CTL_RO_GEN(n, v, t) \
+#define CTL_RO_GEN(n, v, t) \
static int \
-n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
- void *newp, size_t newlen) \
-{ \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
- malloc_mutex_lock(&ctl_mtx); \
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
- malloc_mutex_unlock(&ctl_mtx); \
- return (ret); \
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \
+ return ret; \
}
/*
* ctl_mtx is not acquired, under the assumption that no pertinent data will
* mutate during the call.
*/
-#define CTL_RO_NL_CGEN(c, n, v, t) \
+#define CTL_RO_NL_CGEN(c, n, v, t) \
static int \
-n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
- void *newp, size_t newlen) \
-{ \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
- if ((c) == false) \
- return (ENOENT); \
+ if (!(c)) { \
+ return ENOENT; \
+ } \
READONLY(); \
oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
label_return: \
- return (ret); \
+ return ret; \
}
-#define CTL_RO_NL_GEN(n, v, t) \
+#define CTL_RO_NL_GEN(n, v, t) \
static int \
-n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
- void *newp, size_t newlen) \
-{ \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
@@ -1090,24 +1430,42 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
\
ret = 0; \
label_return: \
- return (ret); \
+ return ret; \
}
-#define CTL_RO_BOOL_CONFIG_GEN(n) \
+#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
static int \
-n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
- void *newp, size_t newlen) \
-{ \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
- bool oldval; \
+ t oldval; \
+ \
+ if (!(c)) { \
+ return ENOENT; \
+ } \
+ READONLY(); \
+ oldval = (m(tsd)); \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ return ret; \
+}
+
+#define CTL_RO_CONFIG_GEN(n, t) \
+static int \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
+ size_t *oldlenp, void *newp, size_t newlen) { \
+ int ret; \
+ t oldval; \
\
READONLY(); \
oldval = n; \
- READ(oldval, bool); \
+ READ(oldval, t); \
\
ret = 0; \
label_return: \
- return (ret); \
+ return ret; \
}
/******************************************************************************/
@@ -1115,62 +1473,122 @@ label_return: \
CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
static int
-epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
+epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
UNUSED uint64_t newval;
- malloc_mutex_lock(&ctl_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(newval, uint64_t);
- if (newp != NULL)
- ctl_refresh();
- READ(ctl_epoch, uint64_t);
+ if (newp != NULL) {
+ ctl_refresh(tsd_tsdn(tsd));
+ }
+ READ(ctl_arenas->epoch, uint64_t);
ret = 0;
label_return:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return ret;
+}
+
+static int
+background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ bool oldval;
+
+ if (!have_background_thread) {
+ return ENOENT;
+ }
+ background_thread_ctl_init(tsd_tsdn(tsd));
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
+ if (newp == NULL) {
+ oldval = background_thread_enabled();
+ READ(oldval, bool);
+ } else {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ oldval = background_thread_enabled();
+ READ(oldval, bool);
+
+ bool newval = *(bool *)newp;
+ if (newval == oldval) {
+ ret = 0;
+ goto label_return;
+ }
+
+ background_thread_enabled_set(tsd_tsdn(tsd), newval);
+ if (newval) {
+ if (!can_enable_background_thread) {
+ malloc_printf("<jemalloc>: Error in dlsym("
+ "RTLD_NEXT, \"pthread_create\"). Cannot "
+ "enable background_thread\n");
+ ret = EFAULT;
+ goto label_return;
+ }
+ if (background_threads_enable(tsd)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ } else {
+ if (background_threads_disable(tsd)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ }
+ }
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+
+ return ret;
}
/******************************************************************************/
-CTL_RO_BOOL_CONFIG_GEN(config_debug)
-CTL_RO_BOOL_CONFIG_GEN(config_dss)
-CTL_RO_BOOL_CONFIG_GEN(config_fill)
-CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
-CTL_RO_BOOL_CONFIG_GEN(config_mremap)
-CTL_RO_BOOL_CONFIG_GEN(config_munmap)
-CTL_RO_BOOL_CONFIG_GEN(config_prof)
-CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
-CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
-CTL_RO_BOOL_CONFIG_GEN(config_stats)
-CTL_RO_BOOL_CONFIG_GEN(config_tcache)
-CTL_RO_BOOL_CONFIG_GEN(config_tls)
-CTL_RO_BOOL_CONFIG_GEN(config_utrace)
-CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
-CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
+CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
+CTL_RO_CONFIG_GEN(config_debug, bool)
+CTL_RO_CONFIG_GEN(config_fill, bool)
+CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
+CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
+CTL_RO_CONFIG_GEN(config_prof, bool)
+CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
+CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
+CTL_RO_CONFIG_GEN(config_stats, bool)
+CTL_RO_CONFIG_GEN(config_thp, bool)
+CTL_RO_CONFIG_GEN(config_utrace, bool)
+CTL_RO_CONFIG_GEN(config_xmalloc, bool)
/******************************************************************************/
CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
+CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool)
+CTL_RO_NL_GEN(opt_retain, opt_retain, bool)
CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
-CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
-CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
-CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
+CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
+CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena],
+ const char *)
+CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool)
+CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
+CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
-CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
-CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
-CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
+CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *)
+CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
-CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool)
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
-CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
-CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
+CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
+CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
-CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
+CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init,
+ opt_prof_thread_active_init, bool)
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
@@ -1181,504 +1599,1100 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
/******************************************************************************/
static int
-thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
+thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
+ arena_t *oldarena;
unsigned newind, oldind;
- malloc_mutex_lock(&ctl_mtx);
- newind = oldind = choose_arena(NULL)->ind;
+ oldarena = arena_choose(tsd, NULL);
+ if (oldarena == NULL) {
+ return EAGAIN;
+ }
+ newind = oldind = arena_ind_get(oldarena);
WRITE(newind, unsigned);
READ(oldind, unsigned);
+
if (newind != oldind) {
- arena_t *arena;
+ arena_t *newarena;
- if (newind >= ctl_stats.narenas) {
+ if (newind >= narenas_total_get()) {
/* New arena index is out of range. */
ret = EFAULT;
goto label_return;
}
+ if (have_percpu_arena &&
+ PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
+ if (newind < percpu_arena_ind_limit(opt_percpu_arena)) {
+ /*
+ * If perCPU arena is enabled, thread_arena
+ * control is not allowed for the auto arena
+ * range.
+ */
+ ret = EPERM;
+ goto label_return;
+ }
+ }
+
/* Initialize arena if necessary. */
- malloc_mutex_lock(&arenas_lock);
- if ((arena = arenas[newind]) == NULL && (arena =
- arenas_extend(newind)) == NULL) {
- malloc_mutex_unlock(&arenas_lock);
+ newarena = arena_get(tsd_tsdn(tsd), newind, true);
+ if (newarena == NULL) {
ret = EAGAIN;
goto label_return;
}
- assert(arena == arenas[newind]);
- arenas[oldind]->nthreads--;
- arenas[newind]->nthreads++;
- malloc_mutex_unlock(&arenas_lock);
-
- /* Set new arena association. */
- if (config_tcache) {
- tcache_t *tcache;
- if ((uintptr_t)(tcache = *tcache_tsd_get()) >
- (uintptr_t)TCACHE_STATE_MAX) {
- tcache_arena_dissociate(tcache);
- tcache_arena_associate(tcache, arena);
- }
+ /* Set new arena/tcache associations. */
+ arena_migrate(tsd, oldind, newind);
+ if (tcache_available(tsd)) {
+ tcache_arena_reassociate(tsd_tsdn(tsd),
+ tsd_tcachep_get(tsd), newarena);
}
- arenas_tsd_set(&arena);
}
ret = 0;
label_return:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
+ return ret;
}
-CTL_RO_NL_CGEN(config_stats, thread_allocated,
- thread_allocated_tsd_get()->allocated, uint64_t)
-CTL_RO_NL_CGEN(config_stats, thread_allocatedp,
- &thread_allocated_tsd_get()->allocated, uint64_t *)
-CTL_RO_NL_CGEN(config_stats, thread_deallocated,
- thread_allocated_tsd_get()->deallocated, uint64_t)
-CTL_RO_NL_CGEN(config_stats, thread_deallocatedp,
- &thread_allocated_tsd_get()->deallocated, uint64_t *)
+CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
+ uint64_t)
+CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get,
+ uint64_t *)
+CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get,
+ uint64_t)
+CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
+ tsd_thread_deallocatedp_get, uint64_t *)
static int
-thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
bool oldval;
- if (config_tcache == false)
- return (ENOENT);
-
- oldval = tcache_enabled_get();
+ oldval = tcache_enabled_get(tsd);
if (newp != NULL) {
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
}
- tcache_enabled_set(*(bool *)newp);
+ tcache_enabled_set(tsd, *(bool *)newp);
}
READ(oldval, bool);
ret = 0;
label_return:
- return (ret);
+ return ret;
+}
+
+static int
+thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ if (!tcache_available(tsd)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ READONLY();
+ WRITEONLY();
+
+ tcache_flush(tsd);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ if (!config_prof) {
+ return ENOENT;
+ }
+
+ READ_XOR_WRITE();
+
+ if (newp != NULL) {
+ if (newlen != sizeof(const char *)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+
+ if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
+ 0) {
+ goto label_return;
+ }
+ } else {
+ const char *oldname = prof_thread_name_get(tsd);
+ READ(oldname, const char *);
+ }
+
+ ret = 0;
+label_return:
+ return ret;
}
static int
-thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
+ bool oldval;
- if (config_tcache == false)
- return (ENOENT);
+ if (!config_prof) {
+ return ENOENT;
+ }
+
+ oldval = prof_thread_active_get(tsd);
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (prof_thread_active_set(tsd, *(bool *)newp)) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+ }
+ READ(oldval, bool);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+/******************************************************************************/
+
+static int
+tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned tcache_ind;
READONLY();
+ if (tcaches_create(tsd, &tcache_ind)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ READ(tcache_ind, unsigned);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned tcache_ind;
+
WRITEONLY();
+ tcache_ind = UINT_MAX;
+ WRITE(tcache_ind, unsigned);
+ if (tcache_ind == UINT_MAX) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ tcaches_flush(tsd, tcache_ind);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned tcache_ind;
- tcache_flush();
+ WRITEONLY();
+ tcache_ind = UINT_MAX;
+ WRITE(tcache_ind, unsigned);
+ if (tcache_ind == UINT_MAX) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ tcaches_destroy(tsd, tcache_ind);
ret = 0;
label_return:
- return (ret);
+ return ret;
}
/******************************************************************************/
-/* ctl_mutex must be held during execution of this function. */
+static int
+arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ tsdn_t *tsdn = tsd_tsdn(tsd);
+ unsigned arena_ind;
+ bool initialized;
+
+ READONLY();
+ MIB_UNSIGNED(arena_ind, 1);
+
+ malloc_mutex_lock(tsdn, &ctl_mtx);
+ initialized = arenas_i(arena_ind)->initialized;
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
+
+ READ(initialized, bool);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
static void
-arena_purge(unsigned arena_ind)
-{
- VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
+arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) {
+ malloc_mutex_lock(tsdn, &ctl_mtx);
+ {
+ unsigned narenas = ctl_arenas->narenas;
- malloc_mutex_lock(&arenas_lock);
- memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
- malloc_mutex_unlock(&arenas_lock);
+ /*
+ * Access via index narenas is deprecated, and scheduled for
+ * removal in 6.0.0.
+ */
+ if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == narenas) {
+ unsigned i;
+ VARIABLE_ARRAY(arena_t *, tarenas, narenas);
- if (arena_ind == ctl_stats.narenas) {
- unsigned i;
- for (i = 0; i < ctl_stats.narenas; i++) {
- if (tarenas[i] != NULL)
- arena_purge_all(tarenas[i]);
+ for (i = 0; i < narenas; i++) {
+ tarenas[i] = arena_get(tsdn, i, false);
+ }
+
+ /*
+ * No further need to hold ctl_mtx, since narenas and
+ * tarenas contain everything needed below.
+ */
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
+
+ for (i = 0; i < narenas; i++) {
+ if (tarenas[i] != NULL) {
+ arena_decay(tsdn, tarenas[i], false,
+ all);
+ }
+ }
+ } else {
+ arena_t *tarena;
+
+ assert(arena_ind < narenas);
+
+ tarena = arena_get(tsdn, arena_ind, false);
+
+ /* No further need to hold ctl_mtx. */
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
+
+ if (tarena != NULL) {
+ arena_decay(tsdn, tarena, false, all);
+ }
}
- } else {
- assert(arena_ind < ctl_stats.narenas);
- if (tarenas[arena_ind] != NULL)
- arena_purge_all(tarenas[arena_ind]);
}
}
static int
-arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
+arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
+ unsigned arena_ind;
READONLY();
WRITEONLY();
- malloc_mutex_lock(&ctl_mtx);
- arena_purge(mib[1]);
- malloc_mutex_unlock(&ctl_mtx);
+ MIB_UNSIGNED(arena_ind, 1);
+ arena_i_decay(tsd_tsdn(tsd), arena_ind, false);
ret = 0;
label_return:
- return (ret);
+ return ret;
}
static int
-arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
- int ret, i;
- bool match, err;
- const char *dss;
- unsigned arena_ind = mib[1];
+arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned arena_ind;
+
+ READONLY();
+ WRITEONLY();
+ MIB_UNSIGNED(arena_ind, 1);
+ arena_i_decay(tsd_tsdn(tsd), arena_ind, true);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind,
+ arena_t **arena) {
+ int ret;
+
+ READONLY();
+ WRITEONLY();
+ MIB_UNSIGNED(*arena_ind, 1);
+
+ *arena = arena_get(tsd_tsdn(tsd), *arena_ind, false);
+ if (*arena == NULL || arena_is_auto(*arena)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static void
+arena_reset_prepare_background_thread(tsd_t *tsd, unsigned arena_ind) {
+ /* Temporarily disable the background thread during arena reset. */
+ if (have_background_thread) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
+ if (background_thread_enabled()) {
+ unsigned ind = arena_ind % ncpus;
+ background_thread_info_t *info =
+ &background_thread_info[ind];
+ assert(info->state == background_thread_started);
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ info->state = background_thread_paused;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+ }
+ }
+}
+
+static void
+arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) {
+ if (have_background_thread) {
+ if (background_thread_enabled()) {
+ unsigned ind = arena_ind % ncpus;
+ background_thread_info_t *info =
+ &background_thread_info[ind];
+ assert(info->state == background_thread_paused);
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ info->state = background_thread_started;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
+ }
+}
+
+static int
+arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned arena_ind;
+ arena_t *arena;
+
+ ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
+ newp, newlen, &arena_ind, &arena);
+ if (ret != 0) {
+ return ret;
+ }
+
+ arena_reset_prepare_background_thread(tsd, arena_ind);
+ arena_reset(tsd, arena);
+ arena_reset_finish_background_thread(tsd, arena_ind);
+
+ return ret;
+}
+
+static int
+arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned arena_ind;
+ arena_t *arena;
+ ctl_arena_t *ctl_darena, *ctl_arena;
+
+ ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
+ newp, newlen, &arena_ind, &arena);
+ if (ret != 0) {
+ goto label_return;
+ }
+
+ if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena,
+ true) != 0) {
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ arena_reset_prepare_background_thread(tsd, arena_ind);
+ /* Merge stats after resetting and purging arena. */
+ arena_reset(tsd, arena);
+ arena_decay(tsd_tsdn(tsd), arena, false, true);
+ ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED);
+ ctl_darena->initialized = true;
+ ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true);
+ /* Destroy arena. */
+ arena_destroy(tsd, arena);
+ ctl_arena = arenas_i(arena_ind);
+ ctl_arena->initialized = false;
+ /* Record arena index for later recycling via arenas.create. */
+ ql_elm_new(ctl_arena, destroyed_link);
+ ql_tail_insert(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
+ arena_reset_finish_background_thread(tsd, arena_ind);
+
+ assert(ret == 0);
+label_return:
+ return ret;
+}
+
+static int
+arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ const char *dss = NULL;
+ unsigned arena_ind;
dss_prec_t dss_prec_old = dss_prec_limit;
dss_prec_t dss_prec = dss_prec_limit;
- malloc_mutex_lock(&ctl_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(dss, const char *);
- match = false;
- for (i = 0; i < dss_prec_limit; i++) {
- if (strcmp(dss_prec_names[i], dss) == 0) {
- dss_prec = i;
- match = true;
- break;
+ MIB_UNSIGNED(arena_ind, 1);
+ if (dss != NULL) {
+ int i;
+ bool match = false;
+
+ for (i = 0; i < dss_prec_limit; i++) {
+ if (strcmp(dss_prec_names[i], dss) == 0) {
+ dss_prec = i;
+ match = true;
+ break;
+ }
+ }
+
+ if (!match) {
+ ret = EINVAL;
+ goto label_return;
}
- }
- if (match == false) {
- ret = EINVAL;
- goto label_return;
}
- if (arena_ind < ctl_stats.narenas) {
- arena_t *arena = arenas[arena_ind];
- if (arena != NULL) {
- dss_prec_old = arena_dss_prec_get(arena);
- arena_dss_prec_set(arena, dss_prec);
- err = false;
- } else
- err = true;
+ /*
+ * Access via index narenas is deprecated, and scheduled for removal in
+ * 6.0.0.
+ */
+ if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind ==
+ ctl_arenas->narenas) {
+ if (dss_prec != dss_prec_limit &&
+ extent_dss_prec_set(dss_prec)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ dss_prec_old = extent_dss_prec_get();
} else {
- dss_prec_old = chunk_dss_prec_get();
- err = chunk_dss_prec_set(dss_prec);
+ arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
+ if (arena == NULL || (dss_prec != dss_prec_limit &&
+ arena_dss_prec_set(arena, dss_prec))) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ dss_prec_old = arena_dss_prec_get(arena);
}
+
dss = dss_prec_names[dss_prec_old];
READ(dss, const char *);
- if (err) {
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return ret;
+}
+
+static int
+arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
+ int ret;
+ unsigned arena_ind;
+ arena_t *arena;
+
+ MIB_UNSIGNED(arena_ind, 1);
+ arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
+ if (arena == NULL) {
ret = EFAULT;
goto label_return;
}
+ if (oldp != NULL && oldlenp != NULL) {
+ size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) :
+ arena_muzzy_decay_ms_get(arena);
+ READ(oldval, ssize_t);
+ }
+ if (newp != NULL) {
+ if (newlen != sizeof(ssize_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena,
+ *(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd),
+ arena, *(ssize_t *)newp)) {
+ ret = EFAULT;
+ goto label_return;
+ }
+ }
+
ret = 0;
label_return:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
+ return ret;
}
-static const ctl_named_node_t *
-arena_i_index(const size_t *mib, size_t miblen, size_t i)
-{
- const ctl_named_node_t * ret;
+static int
+arena_i_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
+ newlen, true);
+}
- malloc_mutex_lock(&ctl_mtx);
- if (i > ctl_stats.narenas) {
- ret = NULL;
+static int
+arena_i_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
+ newlen, false);
+}
+
+static int
+arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned arena_ind;
+ arena_t *arena;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ MIB_UNSIGNED(arena_ind, 1);
+ if (arena_ind < narenas_total_get() && (arena =
+ arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
+ if (newp != NULL) {
+ extent_hooks_t *old_extent_hooks;
+ extent_hooks_t *new_extent_hooks
+ JEMALLOC_CC_SILENCE_INIT(NULL);
+ WRITE(new_extent_hooks, extent_hooks_t *);
+ old_extent_hooks = extent_hooks_set(tsd, arena,
+ new_extent_hooks);
+ READ(old_extent_hooks, extent_hooks_t *);
+ } else {
+ extent_hooks_t *old_extent_hooks =
+ extent_hooks_get(arena);
+ READ(old_extent_hooks, extent_hooks_t *);
+ }
+ } else {
+ ret = EFAULT;
goto label_return;
}
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return ret;
+}
+
+static const ctl_named_node_t *
+arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
+ const ctl_named_node_t *ret;
+
+ malloc_mutex_lock(tsdn, &ctl_mtx);
+ switch (i) {
+ case MALLCTL_ARENAS_ALL:
+ case MALLCTL_ARENAS_DESTROYED:
+ break;
+ default:
+ if (i > ctl_arenas->narenas) {
+ ret = NULL;
+ goto label_return;
+ }
+ break;
+ }
ret = super_arena_i_node;
label_return:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
+ return ret;
}
/******************************************************************************/
static int
-arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
unsigned narenas;
- malloc_mutex_lock(&ctl_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY();
if (*oldlenp != sizeof(unsigned)) {
ret = EINVAL;
goto label_return;
}
- narenas = ctl_stats.narenas;
+ narenas = ctl_arenas->narenas;
READ(narenas, unsigned);
ret = 0;
label_return:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return ret;
}
static int
-arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
+arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
int ret;
- unsigned nread, i;
- malloc_mutex_lock(&ctl_mtx);
- READONLY();
- if (*oldlenp != ctl_stats.narenas * sizeof(bool)) {
- ret = EINVAL;
- nread = (*oldlenp < ctl_stats.narenas * sizeof(bool))
- ? (*oldlenp / sizeof(bool)) : ctl_stats.narenas;
- } else {
- ret = 0;
- nread = ctl_stats.narenas;
+ if (oldp != NULL && oldlenp != NULL) {
+ size_t oldval = (dirty ? arena_dirty_decay_ms_default_get() :
+ arena_muzzy_decay_ms_default_get());
+ READ(oldval, ssize_t);
+ }
+ if (newp != NULL) {
+ if (newlen != sizeof(ssize_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (dirty ? arena_dirty_decay_ms_default_set(*(ssize_t *)newp)
+ : arena_muzzy_decay_ms_default_set(*(ssize_t *)newp)) {
+ ret = EFAULT;
+ goto label_return;
+ }
}
- for (i = 0; i < nread; i++)
- ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
-
+ ret = 0;
label_return:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
+ return ret;
+}
+
+static int
+arenas_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
+ newlen, true);
+}
+
+static int
+arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
+ newlen, false);
}
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
-CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
+CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
-CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
+CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
-CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
+CTL_RO_NL_GEN(arenas_bin_i_slab_size, arena_bin_info[mib[2]].slab_size, size_t)
static const ctl_named_node_t *
-arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
-{
-
- if (i > NBINS)
- return (NULL);
- return (super_arenas_bin_i_node);
+arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
+ if (i > NBINS) {
+ return NULL;
+ }
+ return super_arenas_bin_i_node;
}
-CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t)
-CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
+CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned)
+CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(NBINS+(szind_t)mib[2]),
+ size_t)
static const ctl_named_node_t *
-arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
-{
-
- if (i > nlclasses)
- return (NULL);
- return (super_arenas_lrun_i_node);
+arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
+ size_t i) {
+ if (i > NSIZES - NBINS) {
+ return NULL;
+ }
+ return super_arenas_lextent_i_node;
}
static int
-arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
+arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
+ extent_hooks_t *extent_hooks;
unsigned arena_ind;
- malloc_mutex_lock(&ctl_mtx);
- WRITEONLY();
- arena_ind = UINT_MAX;
- WRITE(arena_ind, unsigned);
- if (newp != NULL && arena_ind >= ctl_stats.narenas)
- ret = EFAULT;
- else {
- if (arena_ind == UINT_MAX)
- arena_ind = ctl_stats.narenas;
- arena_purge(arena_ind);
- ret = 0;
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+
+ extent_hooks = (extent_hooks_t *)&extent_hooks_default;
+ WRITE(extent_hooks, extent_hooks_t *);
+ if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) {
+ ret = EAGAIN;
+ goto label_return;
}
+ READ(arena_ind, unsigned);
+ ret = 0;
label_return:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return ret;
}
+/******************************************************************************/
+
static int
-arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
+prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
- unsigned narenas;
+ bool oldval;
- malloc_mutex_lock(&ctl_mtx);
- READONLY();
- if (ctl_grow()) {
- ret = EAGAIN;
- goto label_return;
+ if (!config_prof) {
+ return ENOENT;
}
- narenas = ctl_stats.narenas - 1;
- READ(narenas, unsigned);
+
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
+ *(bool *)newp);
+ } else {
+ oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
+ }
+ READ(oldval, bool);
ret = 0;
label_return:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
+ return ret;
}
-/******************************************************************************/
-
static int
-prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
+prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
bool oldval;
- if (config_prof == false)
- return (ENOENT);
+ if (!config_prof) {
+ return ENOENT;
+ }
- malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */
- oldval = opt_prof_active;
if (newp != NULL) {
- /*
- * The memory barriers will tend to make opt_prof_active
- * propagate faster on systems with weak memory ordering.
- */
- mb_write();
- WRITE(opt_prof_active, bool);
- mb_write();
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
+ } else {
+ oldval = prof_active_get(tsd_tsdn(tsd));
}
READ(oldval, bool);
ret = 0;
label_return:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
+ return ret;
}
static int
-prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
+prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
int ret;
const char *filename = NULL;
- if (config_prof == false)
- return (ENOENT);
+ if (!config_prof) {
+ return ENOENT;
+ }
WRITEONLY();
WRITE(filename, const char *);
- if (prof_mdump(filename)) {
+ if (prof_mdump(tsd, filename)) {
ret = EFAULT;
goto label_return;
}
ret = 0;
label_return:
- return (ret);
+ return ret;
+}
+
+static int
+prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ bool oldval;
+
+ if (!config_prof) {
+ return ENOENT;
+ }
+
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
+ } else {
+ oldval = prof_gdump_get(tsd_tsdn(tsd));
+ }
+ READ(oldval, bool);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ size_t lg_sample = lg_prof_sample;
+
+ if (!config_prof) {
+ return ENOENT;
+ }
+
+ WRITEONLY();
+ WRITE(lg_sample, size_t);
+ if (lg_sample >= (sizeof(uint64_t) << 3)) {
+ lg_sample = (sizeof(uint64_t) << 3) - 1;
+ }
+
+ prof_reset(tsd, lg_sample);
+
+ ret = 0;
+label_return:
+ return ret;
}
CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
+CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
/******************************************************************************/
-CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
-CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
-CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
-CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
+CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)
+CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t)
+CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t)
+CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t)
+CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t)
+CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t)
+
+CTL_RO_CGEN(config_stats, stats_background_thread_num_threads,
+ ctl_stats->background_thread.num_threads, size_t)
+CTL_RO_CGEN(config_stats, stats_background_thread_num_runs,
+ ctl_stats->background_thread.num_runs, uint64_t)
+CTL_RO_CGEN(config_stats, stats_background_thread_run_interval,
+ nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t)
+
+CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *)
+CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms,
+ ssize_t)
+CTL_RO_GEN(stats_arenas_i_muzzy_decay_ms, arenas_i(mib[2])->muzzy_decay_ms,
+ ssize_t)
+CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned)
+CTL_RO_GEN(stats_arenas_i_uptime,
+ nstime_ns(&arenas_i(mib[2])->astats->astats.uptime), uint64_t)
+CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
+CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
+CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED),
+ size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED),
+ size_t)
-CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
+CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_dirty.npurge),
+ uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise,
+ arena_stats_read_u64(
+ &arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged,
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_dirty.purged),
+ uint64_t)
+
+CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge,
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_muzzy.npurge),
+ uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise,
+ arena_stats_read_u64(
+ &arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_muzzy.purged),
+ uint64_t)
+
+CTL_RO_CGEN(config_stats, stats_arenas_i_base,
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED),
+ size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED),
+ size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes,
+ ATOMIC_RELAXED), size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED),
size_t)
-CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
-CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
-CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t)
-CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t)
-CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t)
-
-CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
-CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
-CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
-CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
- ctl_stats.arenas[mib[2]].astats.mapped, size_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
- ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
- ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
- ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
- ctl_stats.arenas[mib[2]].allocated_small, size_t)
+ arenas_i(mib[2])->astats->allocated_small, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
- ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t)
+ arenas_i(mib[2])->astats->nmalloc_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
- ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t)
+ arenas_i(mib[2])->astats->ndalloc_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
- ctl_stats.arenas[mib[2]].nrequests_small, uint64_t)
+ arenas_i(mib[2])->astats->nrequests_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
- ctl_stats.arenas[mib[2]].astats.allocated_large, size_t)
+ atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large,
+ ATOMIC_RELAXED), size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
- ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t)
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.nmalloc_large),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
- ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.ndalloc_large),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
- ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.nmalloc_large),
+ uint64_t) /* Intentional. */
+
+/* Lock profiling related APIs below. */
+#define RO_MUTEX_CTL_GEN(n, l) \
+CTL_RO_CGEN(config_stats, stats_##n##_num_ops, \
+ l.n_lock_ops, uint64_t) \
+CTL_RO_CGEN(config_stats, stats_##n##_num_wait, \
+ l.n_wait_times, uint64_t) \
+CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq, \
+ l.n_spin_acquired, uint64_t) \
+CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch, \
+ l.n_owner_switches, uint64_t) \
+CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time, \
+ nstime_ns(&l.tot_wait_time), uint64_t) \
+CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time, \
+ nstime_ns(&l.max_wait_time), uint64_t) \
+CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds, \
+ l.max_n_thds, uint32_t)
+
+/* Global mutexes. */
+#define OP(mtx) \
+ RO_MUTEX_CTL_GEN(mutexes_##mtx, \
+ ctl_stats->mutex_prof_data[global_prof_mutex_##mtx])
+MUTEX_PROF_GLOBAL_MUTEXES
+#undef OP
+
+/* Per arena mutexes */
+#define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx, \
+ arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx])
+MUTEX_PROF_ARENA_MUTEXES
+#undef OP
+
+/* tcache bin mutex */
+RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex,
+ arenas_i(mib[2])->astats->bstats[mib[4]].mutex_data)
+#undef RO_MUTEX_CTL_GEN
+
+/* Resets all mutex stats, including global, arena and bin mutexes. */
+static int
+stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ if (!config_stats) {
+ return ENOENT;
+ }
+
+ tsdn_t *tsdn = tsd_tsdn(tsd);
+
+#define MUTEX_PROF_RESET(mtx) \
+ malloc_mutex_lock(tsdn, &mtx); \
+ malloc_mutex_prof_data_reset(tsdn, &mtx); \
+ malloc_mutex_unlock(tsdn, &mtx);
+
+ /* Global mutexes: ctl and prof. */
+ MUTEX_PROF_RESET(ctl_mtx);
+ if (have_background_thread) {
+ MUTEX_PROF_RESET(background_thread_lock);
+ }
+ if (config_prof && opt_prof) {
+ MUTEX_PROF_RESET(bt2gctx_mtx);
+ }
+
+
+ /* Per arena mutexes. */
+ unsigned n = narenas_total_get();
+
+ for (unsigned i = 0; i < n; i++) {
+ arena_t *arena = arena_get(tsdn, i, false);
+ if (!arena) {
+ continue;
+ }
+ MUTEX_PROF_RESET(arena->large_mtx);
+ MUTEX_PROF_RESET(arena->extent_avail_mtx);
+ MUTEX_PROF_RESET(arena->extents_dirty.mtx);
+ MUTEX_PROF_RESET(arena->extents_muzzy.mtx);
+ MUTEX_PROF_RESET(arena->extents_retained.mtx);
+ MUTEX_PROF_RESET(arena->decay_dirty.mtx);
+ MUTEX_PROF_RESET(arena->decay_muzzy.mtx);
+ MUTEX_PROF_RESET(arena->tcache_ql_mtx);
+ MUTEX_PROF_RESET(arena->base->mtx);
+
+ for (szind_t i = 0; i < NBINS; i++) {
+ arena_bin_t *bin = &arena->bins[i];
+ MUTEX_PROF_RESET(bin->lock);
+ }
+ }
+#undef MUTEX_PROF_RESET
+ return 0;
+}
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t)
-CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
-CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
+ arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills,
+ arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes,
+ arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
+ arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
+ arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
+ arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t)
static const ctl_named_node_t *
-stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j)
-{
-
- if (j > NBINS)
- return (NULL);
- return (super_stats_arenas_i_bins_j_node);
+stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
+ size_t j) {
+ if (j > NBINS) {
+ return NULL;
+ }
+ return super_stats_arenas_i_bins_j_node;
}
-CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc,
- ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc,
- ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
- ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t)
-CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
- ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc),
+ uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc),
+ uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
+ arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].nrequests),
+ uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
+ arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)
static const ctl_named_node_t *
-stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
-{
-
- if (j > nlclasses)
- return (NULL);
- return (super_stats_arenas_i_lruns_j_node);
+stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
+ size_t j) {
+ if (j > NSIZES - NBINS) {
+ return NULL;
+ }
+ return super_stats_arenas_i_lextents_j_node;
}
static const ctl_named_node_t *
-stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
-{
- const ctl_named_node_t * ret;
+stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) {
+ const ctl_named_node_t *ret;
+ size_t a;
- malloc_mutex_lock(&ctl_mtx);
- if (i > ctl_stats.narenas || ctl_stats.arenas[i].initialized == false) {
+ malloc_mutex_lock(tsdn, &ctl_mtx);
+ a = arenas_i2a_impl(i, true, true);
+ if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) {
ret = NULL;
goto label_return;
}
ret = super_stats_arenas_i_node;
label_return:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
+ malloc_mutex_unlock(tsdn, &ctl_mtx);
+ return ret;
}
diff --git a/deps/jemalloc/src/extent.c b/deps/jemalloc/src/extent.c
index 8c09b486ed..fa45c84d34 100644
--- a/deps/jemalloc/src/extent.c
+++ b/deps/jemalloc/src/extent.c
@@ -1,39 +1,1987 @@
-#define JEMALLOC_EXTENT_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_EXTENT_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/extent_dss.h"
+#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/ph.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/mutex_pool.h"
+
+/******************************************************************************/
+/* Data. */
+
+rtree_t extents_rtree;
+/* Keyed by the address of the extent_t being protected. */
+mutex_pool_t extent_mutex_pool;
+
+static const bitmap_info_t extents_bitmap_info =
+ BITMAP_INFO_INITIALIZER(NPSIZES+1);
+
+static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool *commit,
+ unsigned arena_ind);
+static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, bool committed, unsigned arena_ind);
+static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, bool committed, unsigned arena_ind);
+static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, size_t offset, size_t length, unsigned arena_ind);
+static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length, bool growing_retained);
+static bool extent_decommit_default(extent_hooks_t *extent_hooks,
+ void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
+#ifdef PAGES_CAN_PURGE_LAZY
+static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, size_t offset, size_t length, unsigned arena_ind);
+#endif
+static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length, bool growing_retained);
+#ifdef PAGES_CAN_PURGE_FORCED
+static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
+ void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
+#endif
+static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length, bool growing_retained);
+#ifdef JEMALLOC_MAPS_COALESCE
+static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, size_t size_a, size_t size_b, bool committed,
+ unsigned arena_ind);
+#endif
+static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
+ szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
+ bool growing_retained);
+#ifdef JEMALLOC_MAPS_COALESCE
+static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
+ size_t size_a, void *addr_b, size_t size_b, bool committed,
+ unsigned arena_ind);
+#endif
+static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
+ bool growing_retained);
+
+const extent_hooks_t extent_hooks_default = {
+ extent_alloc_default,
+ extent_dalloc_default,
+ extent_destroy_default,
+ extent_commit_default,
+ extent_decommit_default
+#ifdef PAGES_CAN_PURGE_LAZY
+ ,
+ extent_purge_lazy_default
+#else
+ ,
+ NULL
+#endif
+#ifdef PAGES_CAN_PURGE_FORCED
+ ,
+ extent_purge_forced_default
+#else
+ ,
+ NULL
+#endif
+#ifdef JEMALLOC_MAPS_COALESCE
+ ,
+ extent_split_default,
+ extent_merge_default
+#endif
+};
+
+/* Used exclusively for gdump triggering. */
+static atomic_zu_t curpages;
+static atomic_zu_t highpages;
+
+/******************************************************************************/
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
+
+static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
+static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
+ size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
+ bool *zero, bool *commit, bool growing_retained);
+static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
+ extent_t *extent, bool *coalesced, bool growing_retained);
+static void extent_record(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
+ bool growing_retained);
/******************************************************************************/
-static inline int
-extent_szad_comp(extent_node_t *a, extent_node_t *b)
-{
- int ret;
- size_t a_size = a->size;
- size_t b_size = b->size;
+rb_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, rb_link,
+ extent_esnead_comp)
+
+typedef enum {
+ lock_result_success,
+ lock_result_failure,
+ lock_result_no_extent
+} lock_result_t;
+
+static lock_result_t
+extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
+ extent_t **result) {
+ extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
+ elm, true);
+
+ if (extent1 == NULL) {
+ return lock_result_no_extent;
+ }
+ /*
+ * It's possible that the extent changed out from under us, and with it
+ * the leaf->extent mapping. We have to recheck while holding the lock.
+ */
+ extent_lock(tsdn, extent1);
+ extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
+ &extents_rtree, elm, true);
+
+ if (extent1 == extent2) {
+ *result = extent1;
+ return lock_result_success;
+ } else {
+ extent_unlock(tsdn, extent1);
+ return lock_result_failure;
+ }
+}
+
+/*
+ * Returns a pool-locked extent_t * if there's one associated with the given
+ * address, and NULL otherwise.
+ */
+static extent_t *
+extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) {
+ extent_t *ret = NULL;
+ rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
+ rtree_ctx, (uintptr_t)addr, false, false);
+ if (elm == NULL) {
+ return NULL;
+ }
+ lock_result_t lock_result;
+ do {
+ lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret);
+ } while (lock_result == lock_result_failure);
+ return ret;
+}
+
+extent_t *
+extent_alloc(tsdn_t *tsdn, arena_t *arena) {
+ malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
+ extent_t *extent = extent_avail_first(&arena->extent_avail);
+ if (extent == NULL) {
+ malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
+ return base_alloc_extent(tsdn, arena->base);
+ }
+ extent_avail_remove(&arena->extent_avail, extent);
+ malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
+ return extent;
+}
+
+void
+extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
+ malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
+ extent_avail_insert(&arena->extent_avail, extent);
+ malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
+}
+
+extent_hooks_t *
+extent_hooks_get(arena_t *arena) {
+ return base_extent_hooks_get(arena->base);
+}
+
+extent_hooks_t *
+extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) {
+ background_thread_info_t *info;
+ if (have_background_thread) {
+ info = arena_background_thread_info_get(arena);
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ }
+ extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
+ if (have_background_thread) {
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+ }
+
+ return ret;
+}
+
+static void
+extent_hooks_assure_initialized(arena_t *arena,
+ extent_hooks_t **r_extent_hooks) {
+ if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
+ *r_extent_hooks = extent_hooks_get(arena);
+ }
+}
+
+#ifndef JEMALLOC_JET
+static
+#endif
+size_t
+extent_size_quantize_floor(size_t size) {
+ size_t ret;
+ pszind_t pind;
+
+ assert(size > 0);
+ assert((size & PAGE_MASK) == 0);
+
+ pind = sz_psz2ind(size - sz_large_pad + 1);
+ if (pind == 0) {
+ /*
+ * Avoid underflow. This short-circuit would also do the right
+ * thing for all sizes in the range for which there are
+ * PAGE-spaced size classes, but it's simplest to just handle
+ * the one case that would cause erroneous results.
+ */
+ return size;
+ }
+ ret = sz_pind2sz(pind - 1) + sz_large_pad;
+ assert(ret <= size);
+ return ret;
+}
+
+#ifndef JEMALLOC_JET
+static
+#endif
+size_t
+extent_size_quantize_ceil(size_t size) {
+ size_t ret;
- ret = (a_size > b_size) - (a_size < b_size);
- if (ret == 0) {
- uintptr_t a_addr = (uintptr_t)a->addr;
- uintptr_t b_addr = (uintptr_t)b->addr;
+ assert(size > 0);
+ assert(size - sz_large_pad <= LARGE_MAXCLASS);
+ assert((size & PAGE_MASK) == 0);
- ret = (a_addr > b_addr) - (a_addr < b_addr);
+ ret = extent_size_quantize_floor(size);
+ if (ret < size) {
+ /*
+ * Skip a quantization that may have an adequately large extent,
+ * because under-sized extents may be mixed in. This only
+ * happens when an unusual size is requested, i.e. for aligned
+ * allocation, and is just one of several places where linear
+ * search would potentially find sufficiently aligned available
+ * memory somewhere lower.
+ */
+ ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
+ sz_large_pad;
}
+ return ret;
+}
+
+/* Generate pairing heap functions. */
+ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
+
+bool
+extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
+ bool delay_coalesce) {
+ if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ for (unsigned i = 0; i < NPSIZES+1; i++) {
+ extent_heap_new(&extents->heaps[i]);
+ }
+ bitmap_init(extents->bitmap, &extents_bitmap_info, true);
+ extent_list_init(&extents->lru);
+ atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
+ extents->state = state;
+ extents->delay_coalesce = delay_coalesce;
+ return false;
+}
+
+extent_state_t
+extents_state_get(const extents_t *extents) {
+ return extents->state;
+}
- return (ret);
+size_t
+extents_npages_get(extents_t *extents) {
+ return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
}
-/* Generate red-black tree functions. */
-rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad,
- extent_szad_comp)
+static void
+extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
+ bool preserve_lru) {
+ malloc_mutex_assert_owner(tsdn, &extents->mtx);
+ assert(extent_state_get(extent) == extents->state);
-static inline int
-extent_ad_comp(extent_node_t *a, extent_node_t *b)
-{
- uintptr_t a_addr = (uintptr_t)a->addr;
- uintptr_t b_addr = (uintptr_t)b->addr;
+ size_t size = extent_size_get(extent);
+ size_t psz = extent_size_quantize_floor(size);
+ pszind_t pind = sz_psz2ind(psz);
+ if (extent_heap_empty(&extents->heaps[pind])) {
+ bitmap_unset(extents->bitmap, &extents_bitmap_info,
+ (size_t)pind);
+ }
+ extent_heap_insert(&extents->heaps[pind], extent);
+ if (!preserve_lru) {
+ extent_list_append(&extents->lru, extent);
+ }
+ size_t npages = size >> LG_PAGE;
+ /*
+ * All modifications to npages hold the mutex (as asserted above), so we
+ * don't need an atomic fetch-add; we can get by with a load followed by
+ * a store.
+ */
+ size_t cur_extents_npages =
+ atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
+ atomic_store_zu(&extents->npages, cur_extents_npages + npages,
+ ATOMIC_RELAXED);
+}
- return ((a_addr > b_addr) - (a_addr < b_addr));
+static void
+extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
+ bool preserve_lru) {
+ malloc_mutex_assert_owner(tsdn, &extents->mtx);
+ assert(extent_state_get(extent) == extents->state);
+
+ size_t size = extent_size_get(extent);
+ size_t psz = extent_size_quantize_floor(size);
+ pszind_t pind = sz_psz2ind(psz);
+ extent_heap_remove(&extents->heaps[pind], extent);
+ if (extent_heap_empty(&extents->heaps[pind])) {
+ bitmap_set(extents->bitmap, &extents_bitmap_info,
+ (size_t)pind);
+ }
+ if (!preserve_lru) {
+ extent_list_remove(&extents->lru, extent);
+ }
+ size_t npages = size >> LG_PAGE;
+ /*
+ * As in extents_insert_locked, we hold extents->mtx and so don't need
+ * atomic operations for updating extents->npages.
+ */
+ size_t cur_extents_npages =
+ atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
+ assert(cur_extents_npages >= npages);
+ atomic_store_zu(&extents->npages,
+ cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
}
-/* Generate red-black tree functions. */
-rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad,
- extent_ad_comp)
+/* Do any-best-fit extent selection, i.e. select any extent that best fits. */
+static extent_t *
+extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
+ size_t size) {
+ pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
+ pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
+ (size_t)pind);
+ if (i < NPSIZES+1) {
+ assert(!extent_heap_empty(&extents->heaps[i]));
+ extent_t *extent = extent_heap_any(&extents->heaps[i]);
+ assert(extent_size_get(extent) >= size);
+ return extent;
+ }
+
+ return NULL;
+}
+
+/*
+ * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
+ * large enough.
+ */
+static extent_t *
+extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
+ size_t size) {
+ extent_t *ret = NULL;
+
+ pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
+ for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
+ &extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i =
+ (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
+ (size_t)i+1)) {
+ assert(!extent_heap_empty(&extents->heaps[i]));
+ extent_t *extent = extent_heap_first(&extents->heaps[i]);
+ assert(extent_size_get(extent) >= size);
+ if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
+ ret = extent;
+ }
+ if (i == NPSIZES) {
+ break;
+ }
+ assert(i < NPSIZES);
+ }
+
+ return ret;
+}
+
+/*
+ * Do {best,first}-fit extent selection, where the selection policy choice is
+ * based on extents->delay_coalesce. Best-fit selection requires less
+ * searching, but its layout policy is less stable and may cause higher virtual
+ * memory fragmentation as a side effect.
+ */
+static extent_t *
+extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
+ size_t size) {
+ malloc_mutex_assert_owner(tsdn, &extents->mtx);
+
+ return extents->delay_coalesce ? extents_best_fit_locked(tsdn, arena,
+ extents, size) : extents_first_fit_locked(tsdn, arena, extents,
+ size);
+}
+
+static bool
+extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
+ extent_t *extent) {
+ extent_state_set(extent, extent_state_active);
+ bool coalesced;
+ extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
+ extents, extent, &coalesced, false);
+ extent_state_set(extent, extents_state_get(extents));
+
+ if (!coalesced) {
+ return true;
+ }
+ extents_insert_locked(tsdn, extents, extent, true);
+ return false;
+}
+
+extent_t *
+extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
+ extents_t *extents, void *new_addr, size_t size, size_t pad,
+ size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
+ assert(size + pad != 0);
+ assert(alignment != 0);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ return extent_recycle(tsdn, arena, r_extent_hooks, extents, new_addr,
+ size, pad, alignment, slab, szind, zero, commit, false);
+}
+
+void
+extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
+ extents_t *extents, extent_t *extent) {
+ assert(extent_base_get(extent) != NULL);
+ assert(extent_size_get(extent) != 0);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ extent_addr_set(extent, extent_base_get(extent));
+ extent_zeroed_set(extent, false);
+
+ extent_record(tsdn, arena, r_extent_hooks, extents, extent, false);
+}
+
+extent_t *
+extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
+ extents_t *extents, size_t npages_min) {
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+
+ malloc_mutex_lock(tsdn, &extents->mtx);
+
+ /*
+ * Get the LRU coalesced extent, if any. If coalescing was delayed,
+ * the loop will iterate until the LRU extent is fully coalesced.
+ */
+ extent_t *extent;
+ while (true) {
+ /* Get the LRU extent, if any. */
+ extent = extent_list_first(&extents->lru);
+ if (extent == NULL) {
+ goto label_return;
+ }
+ /* Check the eviction limit. */
+ size_t npages = extent_size_get(extent) >> LG_PAGE;
+ size_t extents_npages = atomic_load_zu(&extents->npages,
+ ATOMIC_RELAXED);
+ if (extents_npages - npages < npages_min) {
+ extent = NULL;
+ goto label_return;
+ }
+ extents_remove_locked(tsdn, extents, extent, false);
+ if (!extents->delay_coalesce) {
+ break;
+ }
+ /* Try to coalesce. */
+ if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks,
+ rtree_ctx, extents, extent)) {
+ break;
+ }
+ /*
+ * The LRU extent was just coalesced and the result placed in
+ * the LRU at its neighbor's position. Start over.
+ */
+ }
+
+ /*
+ * Either mark the extent active or deregister it to protect against
+ * concurrent operations.
+ */
+ switch (extents_state_get(extents)) {
+ case extent_state_active:
+ not_reached();
+ case extent_state_dirty:
+ case extent_state_muzzy:
+ extent_state_set(extent, extent_state_active);
+ break;
+ case extent_state_retained:
+ extent_deregister(tsdn, extent);
+ break;
+ default:
+ not_reached();
+ }
+
+label_return:
+ malloc_mutex_unlock(tsdn, &extents->mtx);
+ return extent;
+}
+
+static void
+extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
+ extents_t *extents, extent_t *extent, bool growing_retained) {
+ /*
+ * Leak extent after making sure its pages have already been purged, so
+ * that this is only a virtual memory leak.
+ */
+ if (extents_state_get(extents) == extent_state_dirty) {
+ if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks,
+ extent, 0, extent_size_get(extent), growing_retained)) {
+ extent_purge_forced_impl(tsdn, arena, r_extent_hooks,
+ extent, 0, extent_size_get(extent),
+ growing_retained);
+ }
+ }
+ extent_dalloc(tsdn, arena, extent);
+}
+
+void
+extents_prefork(tsdn_t *tsdn, extents_t *extents) {
+ malloc_mutex_prefork(tsdn, &extents->mtx);
+}
+
+void
+extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
+ malloc_mutex_postfork_parent(tsdn, &extents->mtx);
+}
+
+void
+extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
+ malloc_mutex_postfork_child(tsdn, &extents->mtx);
+}
+
+static void
+extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
+ extent_t *extent, bool preserve_lru) {
+ assert(extent_arena_get(extent) == arena);
+ assert(extent_state_get(extent) == extent_state_active);
+
+ extent_state_set(extent, extents_state_get(extents));
+ extents_insert_locked(tsdn, extents, extent, preserve_lru);
+}
+
+static void
+extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
+ extent_t *extent, bool preserve_lru) {
+ malloc_mutex_lock(tsdn, &extents->mtx);
+ extent_deactivate_locked(tsdn, arena, extents, extent, preserve_lru);
+ malloc_mutex_unlock(tsdn, &extents->mtx);
+}
+
+static void
+extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
+ extent_t *extent, bool preserve_lru) {
+ assert(extent_arena_get(extent) == arena);
+ assert(extent_state_get(extent) == extents_state_get(extents));
+
+ extents_remove_locked(tsdn, extents, extent, preserve_lru);
+ extent_state_set(extent, extent_state_active);
+}
+
+static bool
+extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
+ const extent_t *extent, bool dependent, bool init_missing,
+ rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
+ *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)extent_base_get(extent), dependent, init_missing);
+ if (!dependent && *r_elm_a == NULL) {
+ return true;
+ }
+ assert(*r_elm_a != NULL);
+
+ *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)extent_last_get(extent), dependent, init_missing);
+ if (!dependent && *r_elm_b == NULL) {
+ return true;
+ }
+ assert(*r_elm_b != NULL);
+
+ return false;
+}
+
+static void
+extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
+ rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
+ rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
+ if (elm_b != NULL) {
+ rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
+ slab);
+ }
+}
+
+static void
+extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
+ szind_t szind) {
+ assert(extent_slab_get(extent));
+
+ /* Register interior. */
+ for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
+ rtree_write(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
+ LG_PAGE), extent, szind, true);
+ }
+}
+
+static void
+extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
+ cassert(config_prof);
+ /* prof_gdump() requirement. */
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ if (opt_prof && extent_state_get(extent) == extent_state_active) {
+ size_t nadd = extent_size_get(extent) >> LG_PAGE;
+ size_t cur = atomic_fetch_add_zu(&curpages, nadd,
+ ATOMIC_RELAXED) + nadd;
+ size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
+ while (cur > high && !atomic_compare_exchange_weak_zu(
+ &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
+ /*
+ * Don't refresh cur, because it may have decreased
+ * since this thread lost the highpages update race.
+ * Note that high is updated in case of CAS failure.
+ */
+ }
+ if (cur > high && prof_gdump_get_unlocked()) {
+ prof_gdump(tsdn);
+ }
+ }
+}
+
+static void
+extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
+ cassert(config_prof);
+
+ if (opt_prof && extent_state_get(extent) == extent_state_active) {
+ size_t nsub = extent_size_get(extent) >> LG_PAGE;
+ assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
+ atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
+ }
+}
+
+static bool
+extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+ rtree_leaf_elm_t *elm_a, *elm_b;
+
+ /*
+ * We need to hold the lock to protect against a concurrent coalesce
+ * operation that sees us in a partial state.
+ */
+ extent_lock(tsdn, extent);
+
+ if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
+ &elm_a, &elm_b)) {
+ return true;
+ }
+
+ szind_t szind = extent_szind_get_maybe_invalid(extent);
+ bool slab = extent_slab_get(extent);
+ extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
+ if (slab) {
+ extent_interior_register(tsdn, rtree_ctx, extent, szind);
+ }
+
+ extent_unlock(tsdn, extent);
+
+ if (config_prof && gdump_add) {
+ extent_gdump_add(tsdn, extent);
+ }
+
+ return false;
+}
+
+static bool
+extent_register(tsdn_t *tsdn, extent_t *extent) {
+ return extent_register_impl(tsdn, extent, true);
+}
+
+static bool
+extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
+ return extent_register_impl(tsdn, extent, false);
+}
+
+static void
+extent_reregister(tsdn_t *tsdn, extent_t *extent) {
+ bool err = extent_register(tsdn, extent);
+ assert(!err);
+}
+
+static void
+extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
+ extent_t *extent) {
+ size_t i;
+
+ assert(extent_slab_get(extent));
+
+ for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
+ rtree_clear(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
+ LG_PAGE));
+ }
+}
+
+static void
+extent_deregister(tsdn_t *tsdn, extent_t *extent) {
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+ rtree_leaf_elm_t *elm_a, *elm_b;
+ extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
+ &elm_a, &elm_b);
+
+ extent_lock(tsdn, extent);
+
+ extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false);
+ if (extent_slab_get(extent)) {
+ extent_interior_deregister(tsdn, rtree_ctx, extent);
+ extent_slab_set(extent, false);
+ }
+
+ extent_unlock(tsdn, extent);
+
+ if (config_prof) {
+ extent_gdump_sub(tsdn, extent);
+ }
+}
+
+static extent_t *
+extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
+ void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
+ bool *zero, bool *commit, bool growing_retained) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+ assert(alignment > 0);
+ if (config_debug && new_addr != NULL) {
+ /*
+ * Non-NULL new_addr has two use cases:
+ *
+ * 1) Recycle a known-extant extent, e.g. during purging.
+ * 2) Perform in-place expanding reallocation.
+ *
+ * Regardless of use case, new_addr must either refer to a
+ * non-existing extent, or to the base of an extant extent,
+ * since only active slabs support interior lookups (which of
+ * course cannot be recycled).
+ */
+ assert(PAGE_ADDR2BASE(new_addr) == new_addr);
+ assert(pad == 0);
+ assert(alignment <= PAGE);
+ }
+
+ size_t esize = size + pad;
+ size_t alloc_size = esize + PAGE_CEILING(alignment) - PAGE;
+ /* Beware size_t wrap-around. */
+ if (alloc_size < esize) {
+ return NULL;
+ }
+ malloc_mutex_lock(tsdn, &extents->mtx);
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+ extent_t *extent;
+ if (new_addr != NULL) {
+ extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr);
+ if (extent != NULL) {
+ /*
+ * We might null-out extent to report an error, but we
+ * still need to unlock the associated mutex after.
+ */
+ extent_t *unlock_extent = extent;
+ assert(extent_base_get(extent) == new_addr);
+ if (extent_arena_get(extent) != arena ||
+ extent_size_get(extent) < esize ||
+ extent_state_get(extent) !=
+ extents_state_get(extents)) {
+ extent = NULL;
+ }
+ extent_unlock(tsdn, unlock_extent);
+ }
+ } else {
+ extent = extents_fit_locked(tsdn, arena, extents, alloc_size);
+ }
+ if (extent == NULL) {
+ malloc_mutex_unlock(tsdn, &extents->mtx);
+ return NULL;
+ }
+
+ extent_activate_locked(tsdn, arena, extents, extent, false);
+ malloc_mutex_unlock(tsdn, &extents->mtx);
+
+ if (extent_zeroed_get(extent)) {
+ *zero = true;
+ }
+ if (extent_committed_get(extent)) {
+ *commit = true;
+ }
+
+ return extent;
+}
+
+static extent_t *
+extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
+ void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
+ szind_t szind, extent_t *extent, bool growing_retained) {
+ size_t esize = size + pad;
+ size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(extent),
+ PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(extent);
+ assert(new_addr == NULL || leadsize == 0);
+ assert(extent_size_get(extent) >= leadsize + esize);
+ size_t trailsize = extent_size_get(extent) - leadsize - esize;
+
+ /* Split the lead. */
+ if (leadsize != 0) {
+ extent_t *lead = extent;
+ extent = extent_split_impl(tsdn, arena, r_extent_hooks,
+ lead, leadsize, NSIZES, false, esize + trailsize, szind,
+ slab, growing_retained);
+ if (extent == NULL) {
+ extent_deregister(tsdn, lead);
+ extents_leak(tsdn, arena, r_extent_hooks, extents,
+ lead, growing_retained);
+ return NULL;
+ }
+ extent_deactivate(tsdn, arena, extents, lead, false);
+ }
+
+ /* Split the trail. */
+ if (trailsize != 0) {
+ extent_t *trail = extent_split_impl(tsdn, arena,
+ r_extent_hooks, extent, esize, szind, slab, trailsize,
+ NSIZES, false, growing_retained);
+ if (trail == NULL) {
+ extent_deregister(tsdn, extent);
+ extents_leak(tsdn, arena, r_extent_hooks, extents,
+ extent, growing_retained);
+ return NULL;
+ }
+ extent_deactivate(tsdn, arena, extents, trail, false);
+ } else if (leadsize == 0) {
+ /*
+ * Splitting causes szind to be set as a side effect, but no
+ * splitting occurred.
+ */
+ extent_szind_set(extent, szind);
+ if (szind != NSIZES) {
+ rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)extent_addr_get(extent), szind, slab);
+ if (slab && extent_size_get(extent) > PAGE) {
+ rtree_szind_slab_update(tsdn, &extents_rtree,
+ rtree_ctx,
+ (uintptr_t)extent_past_get(extent) -
+ (uintptr_t)PAGE, szind, slab);
+ }
+ }
+ }
+
+ return extent;
+}
+
+static extent_t *
+extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
+ extents_t *extents, void *new_addr, size_t size, size_t pad,
+ size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
+ bool growing_retained) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+ assert(new_addr == NULL || !slab);
+ assert(pad == 0 || !slab);
+ assert(!*zero || !slab);
+
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+
+ bool committed = false;
+ extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
+ rtree_ctx, extents, new_addr, size, pad, alignment, slab, zero,
+ &committed, growing_retained);
+ if (extent == NULL) {
+ return NULL;
+ }
+ if (committed) {
+ *commit = true;
+ }
+
+ extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
+ extents, new_addr, size, pad, alignment, slab, szind, extent,
+ growing_retained);
+ if (extent == NULL) {
+ return NULL;
+ }
+
+ if (*commit && !extent_committed_get(extent)) {
+ if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent,
+ 0, extent_size_get(extent), growing_retained)) {
+ extent_record(tsdn, arena, r_extent_hooks, extents,
+ extent, growing_retained);
+ return NULL;
+ }
+ extent_zeroed_set(extent, true);
+ }
+
+ if (pad != 0) {
+ extent_addr_randomize(tsdn, extent, alignment);
+ }
+ assert(extent_state_get(extent) == extent_state_active);
+ if (slab) {
+ extent_slab_set(extent, slab);
+ extent_interior_register(tsdn, rtree_ctx, extent, szind);
+ }
+
+ if (*zero) {
+ void *addr = extent_base_get(extent);
+ size_t size = extent_size_get(extent);
+ if (!extent_zeroed_get(extent)) {
+ if (pages_purge_forced(addr, size)) {
+ memset(addr, 0, size);
+ }
+ } else if (config_debug) {
+ size_t *p = (size_t *)(uintptr_t)addr;
+ for (size_t i = 0; i < size / sizeof(size_t); i++) {
+ assert(p[i] == 0);
+ }
+ }
+ }
+ return extent;
+}
+
+/*
+ * If the caller specifies (!*zero), it is still possible to receive zeroed
+ * memory, in which case *zero is toggled to true. arena_extent_alloc() takes
+ * advantage of this to avoid demanding zeroed extents, but taking advantage of
+ * them if they are returned.
+ */
+static void *
+extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
+ void *ret;
+
+ assert(size != 0);
+ assert(alignment != 0);
+
+ /* "primary" dss. */
+ if (have_dss && dss_prec == dss_prec_primary && (ret =
+ extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
+ commit)) != NULL) {
+ return ret;
+ }
+ /* mmap. */
+ if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
+ != NULL) {
+ return ret;
+ }
+ /* "secondary" dss. */
+ if (have_dss && dss_prec == dss_prec_secondary && (ret =
+ extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
+ commit)) != NULL) {
+ return ret;
+ }
+
+ /* All strategies for allocation failed. */
+ return NULL;
+}
+
+static void *
+extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool *commit) {
+ void *ret;
+
+ ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
+ commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
+ ATOMIC_RELAXED));
+ return ret;
+}
+
+static void *
+extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
+ tsdn_t *tsdn;
+ arena_t *arena;
+
+ tsdn = tsdn_fetch();
+ arena = arena_get(tsdn, arena_ind, false);
+ /*
+ * The arena we're allocating on behalf of must have been initialized
+ * already.
+ */
+ assert(arena != NULL);
+
+ return extent_alloc_default_impl(tsdn, arena, new_addr, size,
+ alignment, zero, commit);
+}
+
+static void
+extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
+ tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
+ pre_reentrancy(tsd, arena);
+}
+
+static void
+extent_hook_post_reentrancy(tsdn_t *tsdn) {
+ tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
+ post_reentrancy(tsd);
+}
+
+/*
+ * If virtual memory is retained, create increasingly larger extents from which
+ * to split requested extents in order to limit the total number of disjoint
+ * virtual memory ranges retained by each arena.
+ */
+static extent_t *
+extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment,
+ bool slab, szind_t szind, bool *zero, bool *commit) {
+ malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
+ assert(pad == 0 || !slab);
+ assert(!*zero || !slab);
+
+ size_t esize = size + pad;
+ size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
+ /* Beware size_t wrap-around. */
+ if (alloc_size_min < esize) {
+ goto label_err;
+ }
+ /*
+ * Find the next extent size in the series that would be large enough to
+ * satisfy this request.
+ */
+ pszind_t egn_skip = 0;
+ size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
+ while (alloc_size < alloc_size_min) {
+ egn_skip++;
+ if (arena->extent_grow_next + egn_skip == NPSIZES) {
+ /* Outside legal range. */
+ goto label_err;
+ }
+ assert(arena->extent_grow_next + egn_skip < NPSIZES);
+ alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
+ }
+
+ extent_t *extent = extent_alloc(tsdn, arena);
+ if (extent == NULL) {
+ goto label_err;
+ }
+ bool zeroed = false;
+ bool committed = false;
+
+ void *ptr;
+ if (*r_extent_hooks == &extent_hooks_default) {
+ ptr = extent_alloc_core(tsdn, arena, NULL, alloc_size, PAGE,
+ &zeroed, &committed, (dss_prec_t)atomic_load_u(
+ &arena->dss_prec, ATOMIC_RELAXED));
+ } else {
+ extent_hook_pre_reentrancy(tsdn, arena);
+ ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
+ alloc_size, PAGE, &zeroed, &committed,
+ arena_ind_get(arena));
+ extent_hook_post_reentrancy(tsdn);
+ }
+
+ extent_init(extent, arena, ptr, alloc_size, false, NSIZES,
+ arena_extent_sn_next(arena), extent_state_active, zeroed,
+ committed);
+ if (ptr == NULL) {
+ extent_dalloc(tsdn, arena, extent);
+ goto label_err;
+ }
+ if (extent_register_no_gdump_add(tsdn, extent)) {
+ extents_leak(tsdn, arena, r_extent_hooks,
+ &arena->extents_retained, extent, true);
+ goto label_err;
+ }
+
+ size_t leadsize = ALIGNMENT_CEILING((uintptr_t)ptr,
+ PAGE_CEILING(alignment)) - (uintptr_t)ptr;
+ assert(alloc_size >= leadsize + esize);
+ size_t trailsize = alloc_size - leadsize - esize;
+ if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
+ *zero = true;
+ }
+ if (extent_committed_get(extent)) {
+ *commit = true;
+ }
+
+ /* Split the lead. */
+ if (leadsize != 0) {
+ extent_t *lead = extent;
+ extent = extent_split_impl(tsdn, arena, r_extent_hooks, lead,
+ leadsize, NSIZES, false, esize + trailsize, szind, slab,
+ true);
+ if (extent == NULL) {
+ extent_deregister(tsdn, lead);
+ extents_leak(tsdn, arena, r_extent_hooks,
+ &arena->extents_retained, lead, true);
+ goto label_err;
+ }
+ extent_record(tsdn, arena, r_extent_hooks,
+ &arena->extents_retained, lead, true);
+ }
+
+ /* Split the trail. */
+ if (trailsize != 0) {
+ extent_t *trail = extent_split_impl(tsdn, arena, r_extent_hooks,
+ extent, esize, szind, slab, trailsize, NSIZES, false, true);
+ if (trail == NULL) {
+ extent_deregister(tsdn, extent);
+ extents_leak(tsdn, arena, r_extent_hooks,
+ &arena->extents_retained, extent, true);
+ goto label_err;
+ }
+ extent_record(tsdn, arena, r_extent_hooks,
+ &arena->extents_retained, trail, true);
+ } else if (leadsize == 0) {
+ /*
+ * Splitting causes szind to be set as a side effect, but no
+ * splitting occurred.
+ */
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
+ &rtree_ctx_fallback);
+
+ extent_szind_set(extent, szind);
+ if (szind != NSIZES) {
+ rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)extent_addr_get(extent), szind, slab);
+ if (slab && extent_size_get(extent) > PAGE) {
+ rtree_szind_slab_update(tsdn, &extents_rtree,
+ rtree_ctx,
+ (uintptr_t)extent_past_get(extent) -
+ (uintptr_t)PAGE, szind, slab);
+ }
+ }
+ }
+
+ if (*commit && !extent_committed_get(extent)) {
+ if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0,
+ extent_size_get(extent), true)) {
+ extent_record(tsdn, arena, r_extent_hooks,
+ &arena->extents_retained, extent, true);
+ goto label_err;
+ }
+ extent_zeroed_set(extent, true);
+ }
+
+ /*
+ * Increment extent_grow_next if doing so wouldn't exceed the legal
+ * range.
+ */
+ if (arena->extent_grow_next + egn_skip + 1 < NPSIZES) {
+ arena->extent_grow_next += egn_skip + 1;
+ } else {
+ arena->extent_grow_next = NPSIZES - 1;
+ }
+ /* All opportunities for failure are past. */
+ malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
+
+ if (config_prof) {
+ /* Adjust gdump stats now that extent is final size. */
+ extent_gdump_add(tsdn, extent);
+ }
+ if (pad != 0) {
+ extent_addr_randomize(tsdn, extent, alignment);
+ }
+ if (slab) {
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
+ &rtree_ctx_fallback);
+
+ extent_slab_set(extent, true);
+ extent_interior_register(tsdn, rtree_ctx, extent, szind);
+ }
+ if (*zero && !extent_zeroed_get(extent)) {
+ void *addr = extent_base_get(extent);
+ size_t size = extent_size_get(extent);
+ if (pages_purge_forced(addr, size)) {
+ memset(addr, 0, size);
+ }
+ }
+
+ return extent;
+label_err:
+ malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
+ return NULL;
+}
+
+static extent_t *
+extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
+ size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
+ assert(size != 0);
+ assert(alignment != 0);
+
+ malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
+
+ extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks,
+ &arena->extents_retained, new_addr, size, pad, alignment, slab,
+ szind, zero, commit, true);
+ if (extent != NULL) {
+ malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
+ if (config_prof) {
+ extent_gdump_add(tsdn, extent);
+ }
+ } else if (opt_retain && new_addr == NULL) {
+ extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size,
+ pad, alignment, slab, szind, zero, commit);
+ /* extent_grow_retained() always releases extent_grow_mtx. */
+ } else {
+ malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
+ }
+ malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
+
+ return extent;
+}
+
+static extent_t *
+extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
+ size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
+ size_t esize = size + pad;
+ extent_t *extent = extent_alloc(tsdn, arena);
+ if (extent == NULL) {
+ return NULL;
+ }
+ void *addr;
+ if (*r_extent_hooks == &extent_hooks_default) {
+ /* Call directly to propagate tsdn. */
+ addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
+ alignment, zero, commit);
+ } else {
+ extent_hook_pre_reentrancy(tsdn, arena);
+ addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
+ esize, alignment, zero, commit, arena_ind_get(arena));
+ extent_hook_post_reentrancy(tsdn);
+ }
+ if (addr == NULL) {
+ extent_dalloc(tsdn, arena, extent);
+ return NULL;
+ }
+ extent_init(extent, arena, addr, esize, slab, szind,
+ arena_extent_sn_next(arena), extent_state_active, zero, commit);
+ if (pad != 0) {
+ extent_addr_randomize(tsdn, extent, alignment);
+ }
+ if (extent_register(tsdn, extent)) {
+ extents_leak(tsdn, arena, r_extent_hooks,
+ &arena->extents_retained, extent, false);
+ return NULL;
+ }
+
+ return extent;
+}
+
+extent_t *
+extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
+ size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+
+ extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
+ new_addr, size, pad, alignment, slab, szind, zero, commit);
+ if (extent == NULL) {
+ extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
+ new_addr, size, pad, alignment, slab, szind, zero, commit);
+ }
+
+ return extent;
+}
+
+static bool
+extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner,
+ const extent_t *outer) {
+ assert(extent_arena_get(inner) == arena);
+ if (extent_arena_get(outer) != arena) {
+ return false;
+ }
+
+ assert(extent_state_get(inner) == extent_state_active);
+ if (extent_state_get(outer) != extents->state) {
+ return false;
+ }
+
+ if (extent_committed_get(inner) != extent_committed_get(outer)) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
+ extents_t *extents, extent_t *inner, extent_t *outer, bool forward,
+ bool growing_retained) {
+ assert(extent_can_coalesce(arena, extents, inner, outer));
+
+ if (forward && extents->delay_coalesce) {
+ /*
+ * The extent that remains after coalescing must occupy the
+ * outer extent's position in the LRU. For forward coalescing,
+ * swap the inner extent into the LRU.
+ */
+ extent_list_replace(&extents->lru, outer, inner);
+ }
+ extent_activate_locked(tsdn, arena, extents, outer,
+ extents->delay_coalesce);
+
+ malloc_mutex_unlock(tsdn, &extents->mtx);
+ bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
+ forward ? inner : outer, forward ? outer : inner, growing_retained);
+ malloc_mutex_lock(tsdn, &extents->mtx);
+
+ if (err) {
+ if (forward && extents->delay_coalesce) {
+ extent_list_replace(&extents->lru, inner, outer);
+ }
+ extent_deactivate_locked(tsdn, arena, extents, outer,
+ extents->delay_coalesce);
+ }
+
+ return err;
+}
+
+static extent_t *
+extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
+ extent_t *extent, bool *coalesced, bool growing_retained) {
+ /*
+ * Continue attempting to coalesce until failure, to protect against
+ * races with other threads that are thwarted by this one.
+ */
+ bool again;
+ do {
+ again = false;
+
+ /* Try to coalesce forward. */
+ extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
+ extent_past_get(extent));
+ if (next != NULL) {
+ /*
+ * extents->mtx only protects against races for
+ * like-state extents, so call extent_can_coalesce()
+ * before releasing next's pool lock.
+ */
+ bool can_coalesce = extent_can_coalesce(arena, extents,
+ extent, next);
+
+ extent_unlock(tsdn, next);
+
+ if (can_coalesce && !extent_coalesce(tsdn, arena,
+ r_extent_hooks, extents, extent, next, true,
+ growing_retained)) {
+ if (extents->delay_coalesce) {
+ /* Do minimal coalescing. */
+ *coalesced = true;
+ return extent;
+ }
+ again = true;
+ }
+ }
+
+ /* Try to coalesce backward. */
+ extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx,
+ extent_before_get(extent));
+ if (prev != NULL) {
+ bool can_coalesce = extent_can_coalesce(arena, extents,
+ extent, prev);
+ extent_unlock(tsdn, prev);
+
+ if (can_coalesce && !extent_coalesce(tsdn, arena,
+ r_extent_hooks, extents, extent, prev, false,
+ growing_retained)) {
+ extent = prev;
+ if (extents->delay_coalesce) {
+ /* Do minimal coalescing. */
+ *coalesced = true;
+ return extent;
+ }
+ again = true;
+ }
+ }
+ } while (again);
+
+ if (extents->delay_coalesce) {
+ *coalesced = false;
+ }
+ return extent;
+}
+
+static void
+extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
+ extents_t *extents, extent_t *extent, bool growing_retained) {
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+
+ assert((extents_state_get(extents) != extent_state_dirty &&
+ extents_state_get(extents) != extent_state_muzzy) ||
+ !extent_zeroed_get(extent));
+
+ malloc_mutex_lock(tsdn, &extents->mtx);
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+
+ extent_szind_set(extent, NSIZES);
+ if (extent_slab_get(extent)) {
+ extent_interior_deregister(tsdn, rtree_ctx, extent);
+ extent_slab_set(extent, false);
+ }
+
+ assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
+ (uintptr_t)extent_base_get(extent), true) == extent);
+
+ if (!extents->delay_coalesce) {
+ extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
+ rtree_ctx, extents, extent, NULL, growing_retained);
+ }
+
+ extent_deactivate_locked(tsdn, arena, extents, extent, false);
+
+ malloc_mutex_unlock(tsdn, &extents->mtx);
+}
+
+void
+extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
+ extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
+
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ if (extent_register(tsdn, extent)) {
+ extents_leak(tsdn, arena, &extent_hooks,
+ &arena->extents_retained, extent, false);
+ return;
+ }
+ extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
+}
+
+static bool
+extent_dalloc_default_impl(void *addr, size_t size) {
+ if (!have_dss || !extent_in_dss(addr)) {
+ return extent_dalloc_mmap(addr, size);
+ }
+ return true;
+}
+
+static bool
+extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ bool committed, unsigned arena_ind) {
+ return extent_dalloc_default_impl(addr, size);
+}
+
+static bool
+extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent) {
+ bool err;
+
+ assert(extent_base_get(extent) != NULL);
+ assert(extent_size_get(extent) != 0);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ extent_addr_set(extent, extent_base_get(extent));
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+ /* Try to deallocate. */
+ if (*r_extent_hooks == &extent_hooks_default) {
+ /* Call directly to propagate tsdn. */
+ err = extent_dalloc_default_impl(extent_base_get(extent),
+ extent_size_get(extent));
+ } else {
+ extent_hook_pre_reentrancy(tsdn, arena);
+ err = ((*r_extent_hooks)->dalloc == NULL ||
+ (*r_extent_hooks)->dalloc(*r_extent_hooks,
+ extent_base_get(extent), extent_size_get(extent),
+ extent_committed_get(extent), arena_ind_get(arena)));
+ extent_hook_post_reentrancy(tsdn);
+ }
+
+ if (!err) {
+ extent_dalloc(tsdn, arena, extent);
+ }
+
+ return err;
+}
+
+void
+extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ /*
+ * Deregister first to avoid a race with other allocating threads, and
+ * reregister if deallocation fails.
+ */
+ extent_deregister(tsdn, extent);
+ if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) {
+ return;
+ }
+
+ extent_reregister(tsdn, extent);
+ if (*r_extent_hooks != &extent_hooks_default) {
+ extent_hook_pre_reentrancy(tsdn, arena);
+ }
+ /* Try to decommit; purge if that fails. */
+ bool zeroed;
+ if (!extent_committed_get(extent)) {
+ zeroed = true;
+ } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
+ 0, extent_size_get(extent))) {
+ zeroed = true;
+ } else if ((*r_extent_hooks)->purge_forced != NULL &&
+ !(*r_extent_hooks)->purge_forced(*r_extent_hooks,
+ extent_base_get(extent), extent_size_get(extent), 0,
+ extent_size_get(extent), arena_ind_get(arena))) {
+ zeroed = true;
+ } else if (extent_state_get(extent) == extent_state_muzzy ||
+ ((*r_extent_hooks)->purge_lazy != NULL &&
+ !(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
+ extent_base_get(extent), extent_size_get(extent), 0,
+ extent_size_get(extent), arena_ind_get(arena)))) {
+ zeroed = false;
+ } else {
+ zeroed = false;
+ }
+ if (*r_extent_hooks != &extent_hooks_default) {
+ extent_hook_post_reentrancy(tsdn);
+ }
+ extent_zeroed_set(extent, zeroed);
+
+ if (config_prof) {
+ extent_gdump_sub(tsdn, extent);
+ }
+
+ extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained,
+ extent, false);
+}
+
+static void
+extent_destroy_default_impl(void *addr, size_t size) {
+ if (!have_dss || !extent_in_dss(addr)) {
+ pages_unmap(addr, size);
+ }
+}
+
+static void
+extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ bool committed, unsigned arena_ind) {
+ extent_destroy_default_impl(addr, size);
+}
+
+void
+extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent) {
+ assert(extent_base_get(extent) != NULL);
+ assert(extent_size_get(extent) != 0);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ /* Deregister first to avoid a race with other allocating threads. */
+ extent_deregister(tsdn, extent);
+
+ extent_addr_set(extent, extent_base_get(extent));
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+ /* Try to destroy; silently fail otherwise. */
+ if (*r_extent_hooks == &extent_hooks_default) {
+ /* Call directly to propagate tsdn. */
+ extent_destroy_default_impl(extent_base_get(extent),
+ extent_size_get(extent));
+ } else if ((*r_extent_hooks)->destroy != NULL) {
+ extent_hook_pre_reentrancy(tsdn, arena);
+ (*r_extent_hooks)->destroy(*r_extent_hooks,
+ extent_base_get(extent), extent_size_get(extent),
+ extent_committed_get(extent), arena_ind_get(arena));
+ extent_hook_post_reentrancy(tsdn);
+ }
+
+ extent_dalloc(tsdn, arena, extent);
+}
+
+static bool
+extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t offset, size_t length, unsigned arena_ind) {
+ return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
+ length);
+}
+
+static bool
+extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length, bool growing_retained) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+ if (*r_extent_hooks != &extent_hooks_default) {
+ extent_hook_pre_reentrancy(tsdn, arena);
+ }
+ bool err = ((*r_extent_hooks)->commit == NULL ||
+ (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
+ extent_size_get(extent), offset, length, arena_ind_get(arena)));
+ if (*r_extent_hooks != &extent_hooks_default) {
+ extent_hook_post_reentrancy(tsdn);
+ }
+ extent_committed_set(extent, extent_committed_get(extent) || !err);
+ return err;
+}
+
+bool
+extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length) {
+ return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset,
+ length, false);
+}
+
+static bool
+extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t offset, size_t length, unsigned arena_ind) {
+ return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
+ length);
+}
+
+bool
+extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+
+ if (*r_extent_hooks != &extent_hooks_default) {
+ extent_hook_pre_reentrancy(tsdn, arena);
+ }
+ bool err = ((*r_extent_hooks)->decommit == NULL ||
+ (*r_extent_hooks)->decommit(*r_extent_hooks,
+ extent_base_get(extent), extent_size_get(extent), offset, length,
+ arena_ind_get(arena)));
+ if (*r_extent_hooks != &extent_hooks_default) {
+ extent_hook_post_reentrancy(tsdn);
+ }
+ extent_committed_set(extent, extent_committed_get(extent) && err);
+ return err;
+}
+
+#ifdef PAGES_CAN_PURGE_LAZY
+static bool
+extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t offset, size_t length, unsigned arena_ind) {
+ assert(addr != NULL);
+ assert((offset & PAGE_MASK) == 0);
+ assert(length != 0);
+ assert((length & PAGE_MASK) == 0);
+
+ return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
+ length);
+}
+#endif
+
+static bool
+extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length, bool growing_retained) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+
+ if ((*r_extent_hooks)->purge_lazy == NULL) {
+ return true;
+ }
+ if (*r_extent_hooks != &extent_hooks_default) {
+ extent_hook_pre_reentrancy(tsdn, arena);
+ }
+ bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
+ extent_base_get(extent), extent_size_get(extent), offset, length,
+ arena_ind_get(arena));
+ if (*r_extent_hooks != &extent_hooks_default) {
+ extent_hook_post_reentrancy(tsdn);
+ }
+
+ return err;
+}
+
+bool
+extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length) {
+ return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent,
+ offset, length, false);
+}
+
+#ifdef PAGES_CAN_PURGE_FORCED
+static bool
+extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, size_t offset, size_t length, unsigned arena_ind) {
+ assert(addr != NULL);
+ assert((offset & PAGE_MASK) == 0);
+ assert(length != 0);
+ assert((length & PAGE_MASK) == 0);
+
+ return pages_purge_forced((void *)((uintptr_t)addr +
+ (uintptr_t)offset), length);
+}
+#endif
+
+static bool
+extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length, bool growing_retained) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+
+ if ((*r_extent_hooks)->purge_forced == NULL) {
+ return true;
+ }
+ if (*r_extent_hooks != &extent_hooks_default) {
+ extent_hook_pre_reentrancy(tsdn, arena);
+ }
+ bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks,
+ extent_base_get(extent), extent_size_get(extent), offset, length,
+ arena_ind_get(arena));
+ if (*r_extent_hooks != &extent_hooks_default) {
+ extent_hook_post_reentrancy(tsdn);
+ }
+ return err;
+}
+
+bool
+extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
+ size_t length) {
+ return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent,
+ offset, length, false);
+}
+
+#ifdef JEMALLOC_MAPS_COALESCE
+static bool
+extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
+ return !maps_coalesce;
+}
+#endif
+
+static extent_t *
+extent_split_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
+ szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
+ bool growing_retained) {
+ assert(extent_size_get(extent) == size_a + size_b);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+
+ if ((*r_extent_hooks)->split == NULL) {
+ return NULL;
+ }
+
+ extent_t *trail = extent_alloc(tsdn, arena);
+ if (trail == NULL) {
+ goto label_error_a;
+ }
+
+ extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
+ size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
+ extent_state_get(extent), extent_zeroed_get(extent),
+ extent_committed_get(extent));
+
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+ rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
+ {
+ extent_t lead;
+
+ extent_init(&lead, arena, extent_addr_get(extent), size_a,
+ slab_a, szind_a, extent_sn_get(extent),
+ extent_state_get(extent), extent_zeroed_get(extent),
+ extent_committed_get(extent));
+
+ extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
+ true, &lead_elm_a, &lead_elm_b);
+ }
+ rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
+ extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
+ &trail_elm_a, &trail_elm_b);
+
+ if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
+ || trail_elm_b == NULL) {
+ goto label_error_b;
+ }
+
+ extent_lock2(tsdn, extent, trail);
+
+ if (*r_extent_hooks != &extent_hooks_default) {
+ extent_hook_pre_reentrancy(tsdn, arena);
+ }
+ bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
+ size_a + size_b, size_a, size_b, extent_committed_get(extent),
+ arena_ind_get(arena));
+ if (*r_extent_hooks != &extent_hooks_default) {
+ extent_hook_post_reentrancy(tsdn);
+ }
+ if (err) {
+ goto label_error_c;
+ }
+
+ extent_size_set(extent, size_a);
+ extent_szind_set(extent, szind_a);
+
+ extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
+ szind_a, slab_a);
+ extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
+ szind_b, slab_b);
+
+ extent_unlock2(tsdn, extent, trail);
+
+ return trail;
+label_error_c:
+ extent_unlock2(tsdn, extent, trail);
+label_error_b:
+ extent_dalloc(tsdn, arena, trail);
+label_error_a:
+ return NULL;
+}
+
+extent_t *
+extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
+ szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
+ return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a,
+ szind_a, slab_a, size_b, szind_b, slab_b, false);
+}
+
+static bool
+extent_merge_default_impl(void *addr_a, void *addr_b) {
+ if (!maps_coalesce) {
+ return true;
+ }
+ if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
+ return true;
+ }
+
+ return false;
+}
+
+#ifdef JEMALLOC_MAPS_COALESCE
+static bool
+extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
+ void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
+ return extent_merge_default_impl(addr_a, addr_b);
+}
+#endif
+
+static bool
+extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
+ bool growing_retained) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+
+ extent_hooks_assure_initialized(arena, r_extent_hooks);
+
+ if ((*r_extent_hooks)->merge == NULL) {
+ return true;
+ }
+
+ bool err;
+ if (*r_extent_hooks == &extent_hooks_default) {
+ /* Call directly to propagate tsdn. */
+ err = extent_merge_default_impl(extent_base_get(a),
+ extent_base_get(b));
+ } else {
+ extent_hook_pre_reentrancy(tsdn, arena);
+ err = (*r_extent_hooks)->merge(*r_extent_hooks,
+ extent_base_get(a), extent_size_get(a), extent_base_get(b),
+ extent_size_get(b), extent_committed_get(a),
+ arena_ind_get(arena));
+ extent_hook_post_reentrancy(tsdn);
+ }
+
+ if (err) {
+ return true;
+ }
+
+ /*
+ * The rtree writes must happen while all the relevant elements are
+ * owned, so the following code uses decomposed helper functions rather
+ * than extent_{,de}register() to do things in the right order.
+ */
+ rtree_ctx_t rtree_ctx_fallback;
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+ rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
+ extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
+ &a_elm_b);
+ extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
+ &b_elm_b);
+
+ extent_lock2(tsdn, a, b);
+
+ if (a_elm_b != NULL) {
+ rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
+ NSIZES, false);
+ }
+ if (b_elm_b != NULL) {
+ rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
+ NSIZES, false);
+ } else {
+ b_elm_b = b_elm_a;
+ }
+
+ extent_size_set(a, extent_size_get(a) + extent_size_get(b));
+ extent_szind_set(a, NSIZES);
+ extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
+ extent_sn_get(a) : extent_sn_get(b));
+ extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
+
+ extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false);
+
+ extent_unlock2(tsdn, a, b);
+
+ extent_dalloc(tsdn, extent_arena_get(b), b);
+
+ return false;
+}
+
+bool
+extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
+ extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
+ return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false);
+}
+
+bool
+extent_boot(void) {
+ if (rtree_new(&extents_rtree, true)) {
+ return true;
+ }
+
+ if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
+ WITNESS_RANK_EXTENT_POOL)) {
+ return true;
+ }
+
+ if (have_dss) {
+ extent_dss_boot();
+ }
+
+ return false;
+}
diff --git a/deps/jemalloc/src/hash.c b/deps/jemalloc/src/hash.c
index cfa4da0275..7b2bdc2bd6 100644
--- a/deps/jemalloc/src/hash.c
+++ b/deps/jemalloc/src/hash.c
@@ -1,2 +1,3 @@
-#define JEMALLOC_HASH_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_HASH_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c
index 204778bc89..0ee8ad48b9 100644
--- a/deps/jemalloc/src/jemalloc.c
+++ b/deps/jemalloc/src/jemalloc.c
@@ -1,15 +1,31 @@
-#define JEMALLOC_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/extent_dss.h"
+#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/jemalloc_internal_types.h"
+#include "jemalloc/internal/malloc_io.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/size_classes.h"
+#include "jemalloc/internal/spin.h"
+#include "jemalloc/internal/sz.h"
+#include "jemalloc/internal/ticker.h"
+#include "jemalloc/internal/util.h"
/******************************************************************************/
/* Data. */
-malloc_tsd_data(, arenas, arena_t *, NULL)
-malloc_tsd_data(, thread_allocated, thread_allocated_t,
- THREAD_ALLOCATED_INITIALIZER)
-
/* Runtime configuration options. */
-const char *je_malloc_conf;
+const char *je_malloc_conf
+#ifndef _WIN32
+ JEMALLOC_ATTR(weak)
+#endif
+ ;
bool opt_abort =
#ifdef JEMALLOC_DEBUG
true
@@ -17,30 +33,80 @@ bool opt_abort =
false
#endif
;
-bool opt_junk =
+bool opt_abort_conf =
+#ifdef JEMALLOC_DEBUG
+ true
+#else
+ false
+#endif
+ ;
+const char *opt_junk =
+#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
+ "true"
+#else
+ "false"
+#endif
+ ;
+bool opt_junk_alloc =
+#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
+ true
+#else
+ false
+#endif
+ ;
+bool opt_junk_free =
#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
true
#else
false
#endif
;
-size_t opt_quarantine = ZU(0);
-bool opt_redzone = false;
+
bool opt_utrace = false;
-bool opt_valgrind = false;
bool opt_xmalloc = false;
bool opt_zero = false;
-size_t opt_narenas = 0;
+unsigned opt_narenas = 0;
unsigned ncpus;
-malloc_mutex_t arenas_lock;
-arena_t **arenas;
-unsigned narenas_total;
-unsigned narenas_auto;
-
-/* Set to true once the allocator has been initialized. */
-static bool malloc_initialized = false;
+/* Protects arenas initialization. */
+malloc_mutex_t arenas_lock;
+/*
+ * Arenas that are used to service external requests. Not all elements of the
+ * arenas array are necessarily used; arenas are created lazily as needed.
+ *
+ * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
+ * arenas. arenas[narenas_auto..narenas_total) are only used if the application
+ * takes some action to create them and allocate from them.
+ *
+ * Points to an arena_t.
+ */
+JEMALLOC_ALIGNED(CACHELINE)
+atomic_p_t arenas[MALLOCX_ARENA_LIMIT];
+static atomic_u_t narenas_total; /* Use narenas_total_*(). */
+static arena_t *a0; /* arenas[0]; read-only after initialization. */
+unsigned narenas_auto; /* Read-only after initialization. */
+
+typedef enum {
+ malloc_init_uninitialized = 3,
+ malloc_init_a0_initialized = 2,
+ malloc_init_recursible = 1,
+ malloc_init_initialized = 0 /* Common case --> jnz. */
+} malloc_init_t;
+static malloc_init_t malloc_init_state = malloc_init_uninitialized;
+
+/* False should be the common case. Set to true to trigger initialization. */
+bool malloc_slow = true;
+
+/* When malloc_slow is true, set the corresponding bits for sanity check. */
+enum {
+ flag_opt_junk_alloc = (1U),
+ flag_opt_junk_free = (1U << 1),
+ flag_opt_zero = (1U << 2),
+ flag_opt_utrace = (1U << 3),
+ flag_opt_xmalloc = (1U << 4)
+};
+static uint8_t malloc_slow_flags;
#ifdef JEMALLOC_THREADED_INIT
/* Used to let the initializing thread recursively allocate. */
@@ -57,14 +123,30 @@ static bool malloc_initializer = NO_INITIALIZER;
/* Used to avoid initialization races. */
#ifdef _WIN32
+#if _WIN32_WINNT >= 0x0600
+static malloc_mutex_t init_lock = SRWLOCK_INIT;
+#else
static malloc_mutex_t init_lock;
+static bool init_lock_initialized = false;
JEMALLOC_ATTR(constructor)
static void WINAPI
-_init_init_lock(void)
-{
-
- malloc_mutex_init(&init_lock);
+_init_init_lock(void) {
+ /*
+ * If another constructor in the same binary is using mallctl to e.g.
+ * set up extent hooks, it may end up running before this one, and
+ * malloc_init_hard will crash trying to lock the uninitialized lock. So
+ * we force an initialization of the lock in malloc_init_hard as well.
+ * We don't try to care about atomicity of the accessed to the
+ * init_lock_initialized boolean, since it really only matters early in
+ * the process creation, before any separate thread normally starts
+ * doing anything.
+ */
+ if (!init_lock_initialized) {
+ malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT,
+ malloc_mutex_rank_exclusive);
+ }
+ init_lock_initialized = true;
}
#ifdef _MSC_VER
@@ -72,7 +154,7 @@ _init_init_lock(void)
JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
#endif
-
+#endif
#else
static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
#endif
@@ -85,7 +167,7 @@ typedef struct {
#ifdef JEMALLOC_UTRACE
# define UTRACE(a, b, c) do { \
- if (opt_utrace) { \
+ if (unlikely(opt_utrace)) { \
int utrace_serrno = errno; \
malloc_utrace_t ut; \
ut.p = (a); \
@@ -99,12 +181,16 @@ typedef struct {
# define UTRACE(a, b, c)
#endif
+/* Whether encountered any invalid config options. */
+static bool had_conf_error = false;
+
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
+static bool malloc_init_hard_a0(void);
static bool malloc_init_hard(void);
/******************************************************************************/
@@ -112,54 +198,337 @@ static bool malloc_init_hard(void);
* Begin miscellaneous support functions.
*/
+bool
+malloc_initialized(void) {
+ return (malloc_init_state == malloc_init_initialized);
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+malloc_init_a0(void) {
+ if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
+ return malloc_init_hard_a0();
+ }
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+malloc_init(void) {
+ if (unlikely(!malloc_initialized()) && malloc_init_hard()) {
+ return true;
+ }
+ return false;
+}
+
+/*
+ * The a0*() functions are used instead of i{d,}alloc() in situations that
+ * cannot tolerate TLS variable access.
+ */
+
+static void *
+a0ialloc(size_t size, bool zero, bool is_internal) {
+ if (unlikely(malloc_init_a0())) {
+ return NULL;
+ }
+
+ return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL,
+ is_internal, arena_get(TSDN_NULL, 0, true), true);
+}
+
+static void
+a0idalloc(void *ptr, bool is_internal) {
+ idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true);
+}
+
+void *
+a0malloc(size_t size) {
+ return a0ialloc(size, false, true);
+}
+
+void
+a0dalloc(void *ptr) {
+ a0idalloc(ptr, true);
+}
+
+/*
+ * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
+ * situations that cannot tolerate TLS variable access (TLS allocation and very
+ * early internal data structure initialization).
+ */
+
+void *
+bootstrap_malloc(size_t size) {
+ if (unlikely(size == 0)) {
+ size = 1;
+ }
+
+ return a0ialloc(size, false, false);
+}
+
+void *
+bootstrap_calloc(size_t num, size_t size) {
+ size_t num_size;
+
+ num_size = num * size;
+ if (unlikely(num_size == 0)) {
+ assert(num == 0 || size == 0);
+ num_size = 1;
+ }
+
+ return a0ialloc(num_size, true, false);
+}
+
+void
+bootstrap_free(void *ptr) {
+ if (unlikely(ptr == NULL)) {
+ return;
+ }
+
+ a0idalloc(ptr, false);
+}
+
+void
+arena_set(unsigned ind, arena_t *arena) {
+ atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE);
+}
+
+static void
+narenas_total_set(unsigned narenas) {
+ atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE);
+}
+
+static void
+narenas_total_inc(void) {
+ atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE);
+}
+
+unsigned
+narenas_total_get(void) {
+ return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE);
+}
+
/* Create a new arena and insert it into the arenas array at index ind. */
+static arena_t *
+arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
+ arena_t *arena;
+
+ assert(ind <= narenas_total_get());
+ if (ind >= MALLOCX_ARENA_LIMIT) {
+ return NULL;
+ }
+ if (ind == narenas_total_get()) {
+ narenas_total_inc();
+ }
+
+ /*
+ * Another thread may have already initialized arenas[ind] if it's an
+ * auto arena.
+ */
+ arena = arena_get(tsdn, ind, false);
+ if (arena != NULL) {
+ assert(ind < narenas_auto);
+ return arena;
+ }
+
+ /* Actually initialize the arena. */
+ arena = arena_new(tsdn, ind, extent_hooks);
+
+ return arena;
+}
+
+static void
+arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
+ if (ind == 0) {
+ return;
+ }
+ if (have_background_thread) {
+ bool err;
+ malloc_mutex_lock(tsdn, &background_thread_lock);
+ err = background_thread_create(tsdn_tsd(tsdn), ind);
+ malloc_mutex_unlock(tsdn, &background_thread_lock);
+ if (err) {
+ malloc_printf("<jemalloc>: error in background thread "
+ "creation for arena %u. Abort.\n", ind);
+ abort();
+ }
+ }
+}
+
arena_t *
-arenas_extend(unsigned ind)
-{
- arena_t *ret;
+arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
+ arena_t *arena;
+
+ malloc_mutex_lock(tsdn, &arenas_lock);
+ arena = arena_init_locked(tsdn, ind, extent_hooks);
+ malloc_mutex_unlock(tsdn, &arenas_lock);
+
+ arena_new_create_background_thread(tsdn, ind);
+
+ return arena;
+}
+
+static void
+arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
+ arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false);
+ arena_nthreads_inc(arena, internal);
+
+ if (internal) {
+ tsd_iarena_set(tsd, arena);
+ } else {
+ tsd_arena_set(tsd, arena);
+ }
+}
- ret = (arena_t *)base_alloc(sizeof(arena_t));
- if (ret != NULL && arena_new(ret, ind) == false) {
- arenas[ind] = ret;
- return (ret);
+void
+arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) {
+ arena_t *oldarena, *newarena;
+
+ oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
+ newarena = arena_get(tsd_tsdn(tsd), newind, false);
+ arena_nthreads_dec(oldarena, false);
+ arena_nthreads_inc(newarena, false);
+ tsd_arena_set(tsd, newarena);
+}
+
+static void
+arena_unbind(tsd_t *tsd, unsigned ind, bool internal) {
+ arena_t *arena;
+
+ arena = arena_get(tsd_tsdn(tsd), ind, false);
+ arena_nthreads_dec(arena, internal);
+
+ if (internal) {
+ tsd_iarena_set(tsd, NULL);
+ } else {
+ tsd_arena_set(tsd, NULL);
}
- /* Only reached if there is an OOM error. */
+}
+
+arena_tdata_t *
+arena_tdata_get_hard(tsd_t *tsd, unsigned ind) {
+ arena_tdata_t *tdata, *arenas_tdata_old;
+ arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
+ unsigned narenas_tdata_old, i;
+ unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
+ unsigned narenas_actual = narenas_total_get();
/*
- * OOM here is quite inconvenient to propagate, since dealing with it
- * would require a check for failure in the fast path. Instead, punt
- * by using arenas[0]. In practice, this is an extremely unlikely
- * failure.
+ * Dissociate old tdata array (and set up for deallocation upon return)
+ * if it's too small.
*/
- malloc_write("<jemalloc>: Error initializing arena\n");
- if (opt_abort)
- abort();
+ if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
+ arenas_tdata_old = arenas_tdata;
+ narenas_tdata_old = narenas_tdata;
+ arenas_tdata = NULL;
+ narenas_tdata = 0;
+ tsd_arenas_tdata_set(tsd, arenas_tdata);
+ tsd_narenas_tdata_set(tsd, narenas_tdata);
+ } else {
+ arenas_tdata_old = NULL;
+ narenas_tdata_old = 0;
+ }
+
+ /* Allocate tdata array if it's missing. */
+ if (arenas_tdata == NULL) {
+ bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
+ narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
- return (arenas[0]);
+ if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
+ *arenas_tdata_bypassp = true;
+ arenas_tdata = (arena_tdata_t *)a0malloc(
+ sizeof(arena_tdata_t) * narenas_tdata);
+ *arenas_tdata_bypassp = false;
+ }
+ if (arenas_tdata == NULL) {
+ tdata = NULL;
+ goto label_return;
+ }
+ assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
+ tsd_arenas_tdata_set(tsd, arenas_tdata);
+ tsd_narenas_tdata_set(tsd, narenas_tdata);
+ }
+
+ /*
+ * Copy to tdata array. It's possible that the actual number of arenas
+ * has increased since narenas_total_get() was called above, but that
+ * causes no correctness issues unless two threads concurrently execute
+ * the arenas.create mallctl, which we trust mallctl synchronization to
+ * prevent.
+ */
+
+ /* Copy/initialize tickers. */
+ for (i = 0; i < narenas_actual; i++) {
+ if (i < narenas_tdata_old) {
+ ticker_copy(&arenas_tdata[i].decay_ticker,
+ &arenas_tdata_old[i].decay_ticker);
+ } else {
+ ticker_init(&arenas_tdata[i].decay_ticker,
+ DECAY_NTICKS_PER_UPDATE);
+ }
+ }
+ if (narenas_tdata > narenas_actual) {
+ memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
+ * (narenas_tdata - narenas_actual));
+ }
+
+ /* Read the refreshed tdata array. */
+ tdata = &arenas_tdata[ind];
+label_return:
+ if (arenas_tdata_old != NULL) {
+ a0dalloc(arenas_tdata_old);
+ }
+ return tdata;
}
-/* Slow path, called only by choose_arena(). */
+/* Slow path, called only by arena_choose(). */
arena_t *
-choose_arena_hard(void)
-{
- arena_t *ret;
+arena_choose_hard(tsd_t *tsd, bool internal) {
+ arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
+
+ if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
+ unsigned choose = percpu_arena_choose();
+ ret = arena_get(tsd_tsdn(tsd), choose, true);
+ assert(ret != NULL);
+ arena_bind(tsd, arena_ind_get(ret), false);
+ arena_bind(tsd, arena_ind_get(ret), true);
+
+ return ret;
+ }
if (narenas_auto > 1) {
- unsigned i, choose, first_null;
+ unsigned i, j, choose[2], first_null;
+ bool is_new_arena[2];
+
+ /*
+ * Determine binding for both non-internal and internal
+ * allocation.
+ *
+ * choose[0]: For application allocation.
+ * choose[1]: For internal metadata allocation.
+ */
+
+ for (j = 0; j < 2; j++) {
+ choose[j] = 0;
+ is_new_arena[j] = false;
+ }
- choose = 0;
first_null = narenas_auto;
- malloc_mutex_lock(&arenas_lock);
- assert(arenas[0] != NULL);
+ malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
+ assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
for (i = 1; i < narenas_auto; i++) {
- if (arenas[i] != NULL) {
+ if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
/*
* Choose the first arena that has the lowest
* number of threads assigned to it.
*/
- if (arenas[i]->nthreads <
- arenas[choose]->nthreads)
- choose = i;
+ for (j = 0; j < 2; j++) {
+ if (arena_nthreads_get(arena_get(
+ tsd_tsdn(tsd), i, false), !!j) <
+ arena_nthreads_get(arena_get(
+ tsd_tsdn(tsd), choose[j], false),
+ !!j)) {
+ choose[j] = i;
+ }
+ }
} else if (first_null == narenas_auto) {
/*
* Record the index of the first uninitialized
@@ -174,38 +543,99 @@ choose_arena_hard(void)
}
}
- if (arenas[choose]->nthreads == 0
- || first_null == narenas_auto) {
- /*
- * Use an unloaded arena, or the least loaded arena if
- * all arenas are already initialized.
- */
- ret = arenas[choose];
- } else {
- /* Initialize a new arena. */
- ret = arenas_extend(first_null);
+ for (j = 0; j < 2; j++) {
+ if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
+ choose[j], false), !!j) == 0 || first_null ==
+ narenas_auto) {
+ /*
+ * Use an unloaded arena, or the least loaded
+ * arena if all arenas are already initialized.
+ */
+ if (!!j == internal) {
+ ret = arena_get(tsd_tsdn(tsd),
+ choose[j], false);
+ }
+ } else {
+ arena_t *arena;
+
+ /* Initialize a new arena. */
+ choose[j] = first_null;
+ arena = arena_init_locked(tsd_tsdn(tsd),
+ choose[j],
+ (extent_hooks_t *)&extent_hooks_default);
+ if (arena == NULL) {
+ malloc_mutex_unlock(tsd_tsdn(tsd),
+ &arenas_lock);
+ return NULL;
+ }
+ is_new_arena[j] = true;
+ if (!!j == internal) {
+ ret = arena;
+ }
+ }
+ arena_bind(tsd, choose[j], !!j);
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
+
+ for (j = 0; j < 2; j++) {
+ if (is_new_arena[j]) {
+ assert(choose[j] > 0);
+ arena_new_create_background_thread(
+ tsd_tsdn(tsd), choose[j]);
+ }
}
- ret->nthreads++;
- malloc_mutex_unlock(&arenas_lock);
+
} else {
- ret = arenas[0];
- malloc_mutex_lock(&arenas_lock);
- ret->nthreads++;
- malloc_mutex_unlock(&arenas_lock);
+ ret = arena_get(tsd_tsdn(tsd), 0, false);
+ arena_bind(tsd, 0, false);
+ arena_bind(tsd, 0, true);
+ }
+
+ return ret;
+}
+
+void
+iarena_cleanup(tsd_t *tsd) {
+ arena_t *iarena;
+
+ iarena = tsd_iarena_get(tsd);
+ if (iarena != NULL) {
+ arena_unbind(tsd, arena_ind_get(iarena), true);
}
+}
- arenas_tsd_set(&ret);
+void
+arena_cleanup(tsd_t *tsd) {
+ arena_t *arena;
- return (ret);
+ arena = tsd_arena_get(tsd);
+ if (arena != NULL) {
+ arena_unbind(tsd, arena_ind_get(arena), false);
+ }
}
-static void
-stats_print_atexit(void)
-{
+void
+arenas_tdata_cleanup(tsd_t *tsd) {
+ arena_tdata_t *arenas_tdata;
- if (config_tcache && config_stats) {
+ /* Prevent tsd->arenas_tdata from being (re)created. */
+ *tsd_arenas_tdata_bypassp_get(tsd) = true;
+
+ arenas_tdata = tsd_arenas_tdata_get(tsd);
+ if (arenas_tdata != NULL) {
+ tsd_arenas_tdata_set(tsd, NULL);
+ a0dalloc(arenas_tdata);
+ }
+}
+
+static void
+stats_print_atexit(void) {
+ if (config_stats) {
+ tsdn_t *tsdn;
unsigned narenas, i;
+ tsdn = tsdn_fetch();
+
/*
* Merge stats from extant threads. This is racy, since
* individual threads do not lock when recording tcache stats
@@ -214,25 +644,45 @@ stats_print_atexit(void)
* continue to allocate.
*/
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
- arena_t *arena = arenas[i];
+ arena_t *arena = arena_get(tsdn, i, false);
if (arena != NULL) {
tcache_t *tcache;
- /*
- * tcache_stats_merge() locks bins, so if any
- * code is introduced that acquires both arena
- * and bin locks in the opposite order,
- * deadlocks may result.
- */
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
ql_foreach(tcache, &arena->tcache_ql, link) {
- tcache_stats_merge(tcache, arena);
+ tcache_stats_merge(tsdn, tcache, arena);
}
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn,
+ &arena->tcache_ql_mtx);
}
}
}
- je_malloc_stats_print(NULL, NULL, NULL);
+ je_malloc_stats_print(NULL, NULL, opt_stats_print_opts);
+}
+
+/*
+ * Ensure that we don't hold any locks upon entry to or exit from allocator
+ * code (in a "broad" sense that doesn't count a reentrant allocation as an
+ * entrance or exit).
+ */
+JEMALLOC_ALWAYS_INLINE void
+check_entry_exit_locking(tsdn_t *tsdn) {
+ if (!config_debug) {
+ return;
+ }
+ if (tsdn_null(tsdn)) {
+ return;
+ }
+ tsd_t *tsd = tsdn_tsd(tsdn);
+ /*
+ * It's possible we hold locks at entry/exit if we're in a nested
+ * allocation.
+ */
+ int8_t reentrancy_level = tsd_reentrancy_level_get(tsd);
+ if (reentrancy_level != 0) {
+ return;
+ }
+ witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
}
/*
@@ -243,69 +693,82 @@ stats_print_atexit(void)
* Begin initialization functions.
*/
+static char *
+jemalloc_secure_getenv(const char *name) {
+#ifdef JEMALLOC_HAVE_SECURE_GETENV
+ return secure_getenv(name);
+#else
+# ifdef JEMALLOC_HAVE_ISSETUGID
+ if (issetugid() != 0) {
+ return NULL;
+ }
+# endif
+ return getenv(name);
+#endif
+}
+
static unsigned
-malloc_ncpus(void)
-{
+malloc_ncpus(void) {
long result;
#ifdef _WIN32
SYSTEM_INFO si;
GetSystemInfo(&si);
result = si.dwNumberOfProcessors;
+#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
+ /*
+ * glibc >= 2.6 has the CPU_COUNT macro.
+ *
+ * glibc's sysconf() uses isspace(). glibc allocates for the first time
+ * *before* setting up the isspace tables. Therefore we need a
+ * different method to get the number of CPUs.
+ */
+ {
+ cpu_set_t set;
+
+ pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
+ result = CPU_COUNT(&set);
+ }
#else
result = sysconf(_SC_NPROCESSORS_ONLN);
#endif
return ((result == -1) ? 1 : (unsigned)result);
}
-void
-arenas_cleanup(void *arg)
-{
- arena_t *arena = *(arena_t **)arg;
-
- malloc_mutex_lock(&arenas_lock);
- arena->nthreads--;
- malloc_mutex_unlock(&arenas_lock);
-}
-
-JEMALLOC_ALWAYS_INLINE_C void
-malloc_thread_init(void)
-{
-
- /*
- * TSD initialization can't be safely done as a side effect of
- * deallocation, because it is possible for a thread to do nothing but
- * deallocate its TLS data via free(), in which case writing to TLS
- * would cause write-after-free memory corruption. The quarantine
- * facility *only* gets used as a side effect of deallocation, so make
- * a best effort attempt at initializing its TSD by hooking all
- * allocation events.
- */
- if (config_fill && opt_quarantine)
- quarantine_alloc_hook();
-}
-
-JEMALLOC_ALWAYS_INLINE_C bool
-malloc_init(void)
-{
+static void
+init_opt_stats_print_opts(const char *v, size_t vlen) {
+ size_t opts_len = strlen(opt_stats_print_opts);
+ assert(opts_len <= stats_print_tot_num_options);
+
+ for (size_t i = 0; i < vlen; i++) {
+ switch (v[i]) {
+#define OPTION(o, v, d, s) case o: break;
+ STATS_PRINT_OPTIONS
+#undef OPTION
+ default: continue;
+ }
- if (malloc_initialized == false && malloc_init_hard())
- return (true);
- malloc_thread_init();
+ if (strchr(opt_stats_print_opts, v[i]) != NULL) {
+ /* Ignore repeated. */
+ continue;
+ }
- return (false);
+ opt_stats_print_opts[opts_len++] = v[i];
+ opt_stats_print_opts[opts_len] = '\0';
+ assert(opts_len <= stats_print_tot_num_options);
+ }
+ assert(opts_len == strlen(opt_stats_print_opts));
}
static bool
malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
- char const **v_p, size_t *vlen_p)
-{
+ char const **v_p, size_t *vlen_p) {
bool accept;
const char *opts = *opts_p;
*k_p = opts;
- for (accept = false; accept == false;) {
+ for (accept = false; !accept;) {
switch (*opts) {
case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
@@ -333,14 +796,14 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
malloc_write("<jemalloc>: Conf string ends "
"with key\n");
}
- return (true);
+ return true;
default:
malloc_write("<jemalloc>: Malformed conf string\n");
- return (true);
+ return true;
}
}
- for (accept = false; accept == false;) {
+ for (accept = false; !accept;) {
switch (*opts) {
case ',':
opts++;
@@ -369,46 +832,57 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
}
*opts_p = opts;
- return (false);
+ return false;
}
static void
-malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
- size_t vlen)
-{
+malloc_abort_invalid_conf(void) {
+ assert(opt_abort_conf);
+ malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf "
+ "value (see above).\n");
+ abort();
+}
+static void
+malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
+ size_t vlen) {
malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
(int)vlen, v);
+ had_conf_error = true;
+ if (opt_abort_conf) {
+ malloc_abort_invalid_conf();
+ }
}
static void
-malloc_conf_init(void)
-{
+malloc_slow_flag_init(void) {
+ /*
+ * Combine the runtime options into malloc_slow for fast path. Called
+ * after processing all the options.
+ */
+ malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
+ | (opt_junk_free ? flag_opt_junk_free : 0)
+ | (opt_zero ? flag_opt_zero : 0)
+ | (opt_utrace ? flag_opt_utrace : 0)
+ | (opt_xmalloc ? flag_opt_xmalloc : 0);
+
+ malloc_slow = (malloc_slow_flags != 0);
+}
+
+static void
+malloc_conf_init(void) {
unsigned i;
char buf[PATH_MAX + 1];
const char *opts, *k, *v;
size_t klen, vlen;
- /*
- * Automatically configure valgrind before processing options. The
- * valgrind option remains in jemalloc 3.x for compatibility reasons.
- */
- if (config_valgrind) {
- opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
- if (config_fill && opt_valgrind) {
- opt_junk = false;
- assert(opt_zero == false);
- opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
- opt_redzone = true;
- }
- if (config_tcache && opt_valgrind)
- opt_tcache = false;
- }
-
- for (i = 0; i < 3; i++) {
+ for (i = 0; i < 4; i++) {
/* Get runtime configuration. */
switch (i) {
case 0:
+ opts = config_malloc_conf;
+ break;
+ case 1:
if (je_malloc_conf != NULL) {
/*
* Use options that were compiled into the
@@ -421,8 +895,8 @@ malloc_conf_init(void)
opts = buf;
}
break;
- case 1: {
- int linklen = 0;
+ case 2: {
+ ssize_t linklen = 0;
#ifndef _WIN32
int saved_errno = errno;
const char *linkname =
@@ -441,14 +915,14 @@ malloc_conf_init(void)
if (linklen == -1) {
/* No configuration specified. */
linklen = 0;
- /* restore errno */
+ /* Restore errno. */
set_errno(saved_errno);
}
#endif
buf[linklen] = '\0';
opts = buf;
break;
- } case 2: {
+ } case 3: {
const char *envname =
#ifdef JEMALLOC_PREFIX
JEMALLOC_CPREFIX"MALLOC_CONF"
@@ -457,7 +931,7 @@ malloc_conf_init(void)
#endif
;
- if ((opts = getenv(envname)) != NULL) {
+ if ((opts = jemalloc_secure_getenv(envname)) != NULL) {
/*
* Do nothing; opts is already initialized to
* the value of the MALLOC_CONF environment
@@ -475,27 +949,31 @@ malloc_conf_init(void)
opts = buf;
}
- while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
- &vlen) == false) {
-#define CONF_HANDLE_BOOL(o, n) \
- if (sizeof(n)-1 == klen && strncmp(n, k, \
- klen) == 0) { \
- if (strncmp("true", v, vlen) == 0 && \
- vlen == sizeof("true")-1) \
+ while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
+ &vlen)) {
+#define CONF_MATCH(n) \
+ (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
+#define CONF_MATCH_VALUE(n) \
+ (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
+#define CONF_HANDLE_BOOL(o, n) \
+ if (CONF_MATCH(n)) { \
+ if (CONF_MATCH_VALUE("true")) { \
o = true; \
- else if (strncmp("false", v, vlen) == \
- 0 && vlen == sizeof("false")-1) \
+ } else if (CONF_MATCH_VALUE("false")) { \
o = false; \
- else { \
+ } else { \
malloc_conf_error( \
"Invalid conf value", \
k, klen, v, vlen); \
} \
continue; \
}
-#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
- if (sizeof(n)-1 == klen && strncmp(n, k, \
- klen) == 0) { \
+#define CONF_MIN_no(um, min) false
+#define CONF_MIN_yes(um, min) ((um) < (min))
+#define CONF_MAX_no(um, max) false
+#define CONF_MAX_yes(um, max) ((um) > (max))
+#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
+ if (CONF_MATCH(n)) { \
uintmax_t um; \
char *end; \
\
@@ -507,27 +985,40 @@ malloc_conf_init(void)
"Invalid conf value", \
k, klen, v, vlen); \
} else if (clip) { \
- if (min != 0 && um < min) \
- o = min; \
- else if (um > max) \
- o = max; \
- else \
- o = um; \
+ if (CONF_MIN_##check_min(um, \
+ (t)(min))) { \
+ o = (t)(min); \
+ } else if ( \
+ CONF_MAX_##check_max(um, \
+ (t)(max))) { \
+ o = (t)(max); \
+ } else { \
+ o = (t)um; \
+ } \
} else { \
- if ((min != 0 && um < min) || \
- um > max) { \
+ if (CONF_MIN_##check_min(um, \
+ (t)(min)) || \
+ CONF_MAX_##check_max(um, \
+ (t)(max))) { \
malloc_conf_error( \
"Out-of-range " \
"conf value", \
k, klen, v, vlen); \
- } else \
- o = um; \
+ } else { \
+ o = (t)um; \
+ } \
} \
continue; \
}
-#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
- if (sizeof(n)-1 == klen && strncmp(n, k, \
- klen) == 0) { \
+#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
+ clip) \
+ CONF_HANDLE_T_U(unsigned, o, n, min, max, \
+ check_min, check_max, clip)
+#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
+ CONF_HANDLE_T_U(size_t, o, n, min, max, \
+ check_min, check_max, clip)
+#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
+ if (CONF_MATCH(n)) { \
long l; \
char *end; \
\
@@ -538,18 +1029,18 @@ malloc_conf_init(void)
malloc_conf_error( \
"Invalid conf value", \
k, klen, v, vlen); \
- } else if (l < (ssize_t)min || l > \
- (ssize_t)max) { \
+ } else if (l < (ssize_t)(min) || l > \
+ (ssize_t)(max)) { \
malloc_conf_error( \
"Out-of-range conf value", \
k, klen, v, vlen); \
- } else \
+ } else { \
o = l; \
+ } \
continue; \
}
-#define CONF_HANDLE_CHAR_P(o, n, d) \
- if (sizeof(n)-1 == klen && strncmp(n, k, \
- klen) == 0) { \
+#define CONF_HANDLE_CHAR_P(o, n, d) \
+ if (CONF_MATCH(n)) { \
size_t cpylen = (vlen <= \
sizeof(o)-1) ? vlen : \
sizeof(o)-1; \
@@ -559,23 +1050,18 @@ malloc_conf_init(void)
}
CONF_HANDLE_BOOL(opt_abort, "abort")
- /*
- * Chunks always require at least one header page, plus
- * one data page in the absence of redzones, or three
- * pages in the presence of redzones. In order to
- * simplify options processing, fix the limit based on
- * config_fill.
- */
- CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
- (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1,
- true)
+ CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
+ if (opt_abort_conf && had_conf_error) {
+ malloc_abort_invalid_conf();
+ }
+ CONF_HANDLE_BOOL(opt_retain, "retain")
if (strncmp("dss", k, klen) == 0) {
int i;
bool match = false;
for (i = 0; i < dss_prec_limit; i++) {
if (strncmp(dss_prec_names[i], v, vlen)
== 0) {
- if (chunk_dss_prec_set(i)) {
+ if (extent_dss_prec_set(i)) {
malloc_conf_error(
"Error setting dss",
k, klen, v, vlen);
@@ -587,47 +1073,98 @@ malloc_conf_init(void)
}
}
}
- if (match == false) {
+ if (!match) {
malloc_conf_error("Invalid conf value",
k, klen, v, vlen);
}
continue;
}
- CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
- SIZE_T_MAX, false)
- CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
- -1, (sizeof(size_t) << 3) - 1)
+ CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
+ UINT_MAX, yes, no, false)
+ CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms,
+ "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
+ QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
+ SSIZE_MAX);
+ CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms,
+ "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
+ QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
+ SSIZE_MAX);
CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
+ if (CONF_MATCH("stats_print_opts")) {
+ init_opt_stats_print_opts(v, vlen);
+ continue;
+ }
if (config_fill) {
- CONF_HANDLE_BOOL(opt_junk, "junk")
- CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
- 0, SIZE_T_MAX, false)
- CONF_HANDLE_BOOL(opt_redzone, "redzone")
+ if (CONF_MATCH("junk")) {
+ if (CONF_MATCH_VALUE("true")) {
+ opt_junk = "true";
+ opt_junk_alloc = opt_junk_free =
+ true;
+ } else if (CONF_MATCH_VALUE("false")) {
+ opt_junk = "false";
+ opt_junk_alloc = opt_junk_free =
+ false;
+ } else if (CONF_MATCH_VALUE("alloc")) {
+ opt_junk = "alloc";
+ opt_junk_alloc = true;
+ opt_junk_free = false;
+ } else if (CONF_MATCH_VALUE("free")) {
+ opt_junk = "free";
+ opt_junk_alloc = false;
+ opt_junk_free = true;
+ } else {
+ malloc_conf_error(
+ "Invalid conf value", k,
+ klen, v, vlen);
+ }
+ continue;
+ }
CONF_HANDLE_BOOL(opt_zero, "zero")
}
if (config_utrace) {
CONF_HANDLE_BOOL(opt_utrace, "utrace")
}
- if (config_valgrind) {
- CONF_HANDLE_BOOL(opt_valgrind, "valgrind")
- }
if (config_xmalloc) {
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
}
- if (config_tcache) {
- CONF_HANDLE_BOOL(opt_tcache, "tcache")
- CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
- "lg_tcache_max", -1,
- (sizeof(size_t) << 3) - 1)
+ CONF_HANDLE_BOOL(opt_tcache, "tcache")
+ CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max",
+ -1, (sizeof(size_t) << 3) - 1)
+ if (strncmp("percpu_arena", k, klen) == 0) {
+ int i;
+ bool match = false;
+ for (i = percpu_arena_mode_names_base; i <
+ percpu_arena_mode_names_limit; i++) {
+ if (strncmp(percpu_arena_mode_names[i],
+ v, vlen) == 0) {
+ if (!have_percpu_arena) {
+ malloc_conf_error(
+ "No getcpu support",
+ k, klen, v, vlen);
+ }
+ opt_percpu_arena = i;
+ match = true;
+ break;
+ }
+ }
+ if (!match) {
+ malloc_conf_error("Invalid conf value",
+ k, klen, v, vlen);
+ }
+ continue;
}
+ CONF_HANDLE_BOOL(opt_background_thread,
+ "background_thread");
if (config_prof) {
CONF_HANDLE_BOOL(opt_prof, "prof")
CONF_HANDLE_CHAR_P(opt_prof_prefix,
"prof_prefix", "jeprof")
CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
- CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
- "lg_prof_sample", 0,
- (sizeof(uint64_t) << 3) - 1)
+ CONF_HANDLE_BOOL(opt_prof_thread_active_init,
+ "prof_thread_active_init")
+ CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
+ "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
+ - 1, no, yes, true)
CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
"lg_prof_interval", -1,
@@ -638,7 +1175,15 @@ malloc_conf_init(void)
}
malloc_conf_error("Invalid conf pair", k, klen, v,
vlen);
+#undef CONF_MATCH
+#undef CONF_MATCH_VALUE
#undef CONF_HANDLE_BOOL
+#undef CONF_MIN_no
+#undef CONF_MIN_yes
+#undef CONF_MAX_no
+#undef CONF_MAX_yes
+#undef CONF_HANDLE_T_U
+#undef CONF_HANDLE_UNSIGNED
#undef CONF_HANDLE_SIZE_T
#undef CONF_HANDLE_SSIZE_T
#undef CONF_HANDLE_CHAR_P
@@ -647,195 +1192,325 @@ malloc_conf_init(void)
}
static bool
-malloc_init_hard(void)
-{
- arena_t *init_arenas[1];
-
- malloc_mutex_lock(&init_lock);
- if (malloc_initialized || IS_INITIALIZER) {
+malloc_init_hard_needed(void) {
+ if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
+ malloc_init_recursible)) {
/*
* Another thread initialized the allocator before this one
* acquired init_lock, or this thread is the initializing
* thread, and it is recursively allocating.
*/
- malloc_mutex_unlock(&init_lock);
- return (false);
+ return false;
}
#ifdef JEMALLOC_THREADED_INIT
- if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
+ if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
/* Busy-wait until the initializing thread completes. */
+ spin_t spinner = SPIN_INITIALIZER;
do {
- malloc_mutex_unlock(&init_lock);
- CPU_SPINWAIT;
- malloc_mutex_lock(&init_lock);
- } while (malloc_initialized == false);
- malloc_mutex_unlock(&init_lock);
- return (false);
+ malloc_mutex_unlock(TSDN_NULL, &init_lock);
+ spin_adaptive(&spinner);
+ malloc_mutex_lock(TSDN_NULL, &init_lock);
+ } while (!malloc_initialized());
+ return false;
}
#endif
+ return true;
+}
+
+static bool
+malloc_init_hard_a0_locked() {
malloc_initializer = INITIALIZER;
- malloc_tsd_boot();
- if (config_prof)
+ if (config_prof) {
prof_boot0();
-
+ }
malloc_conf_init();
-
if (opt_stats_print) {
/* Print statistics at exit. */
if (atexit(stats_print_atexit) != 0) {
malloc_write("<jemalloc>: Error in atexit()\n");
- if (opt_abort)
+ if (opt_abort) {
abort();
+ }
}
}
-
- if (base_boot()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
+ if (pages_boot()) {
+ return true;
}
-
- if (chunk_boot()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
+ if (base_boot(TSDN_NULL)) {
+ return true;
+ }
+ if (extent_boot()) {
+ return true;
}
-
if (ctl_boot()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
+ return true;
}
-
- if (config_prof)
+ if (config_prof) {
prof_boot1();
-
- arena_boot();
-
- if (config_tcache && tcache_boot0()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
}
-
- if (huge_boot()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
+ arena_boot();
+ if (tcache_boot(TSDN_NULL)) {
+ return true;
}
-
- if (malloc_mutex_init(&arenas_lock)) {
- malloc_mutex_unlock(&init_lock);
- return (true);
+ if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS,
+ malloc_mutex_rank_exclusive)) {
+ return true;
}
-
/*
* Create enough scaffolding to allow recursive allocation in
* malloc_ncpus().
*/
- narenas_total = narenas_auto = 1;
- arenas = init_arenas;
+ narenas_auto = 1;
memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
-
/*
* Initialize one arena here. The rest are lazily created in
- * choose_arena_hard().
+ * arena_choose_hard().
*/
- arenas_extend(0);
- if (arenas[0] == NULL) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
-
- /* Initialize allocation counters before any allocations can occur. */
- if (config_stats && thread_allocated_tsd_boot()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
+ if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default)
+ == NULL) {
+ return true;
}
+ a0 = arena_get(TSDN_NULL, 0, false);
+ malloc_init_state = malloc_init_a0_initialized;
- if (arenas_tsd_boot()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
-
- if (config_tcache && tcache_boot1()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
+ return false;
+}
- if (config_fill && quarantine_boot()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
+static bool
+malloc_init_hard_a0(void) {
+ bool ret;
- if (config_prof && prof_boot2()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
+ malloc_mutex_lock(TSDN_NULL, &init_lock);
+ ret = malloc_init_hard_a0_locked();
+ malloc_mutex_unlock(TSDN_NULL, &init_lock);
+ return ret;
+}
- malloc_mutex_unlock(&init_lock);
- /**********************************************************************/
- /* Recursive allocation may follow. */
+/* Initialize data structures which may trigger recursive allocation. */
+static bool
+malloc_init_hard_recursible(void) {
+ malloc_init_state = malloc_init_recursible;
ncpus = malloc_ncpus();
-#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
- && !defined(_WIN32))
- /* LinuxThreads's pthread_atfork() allocates. */
+#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
+ && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
+ !defined(__native_client__))
+ /* LinuxThreads' pthread_atfork() allocates. */
if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
jemalloc_postfork_child) != 0) {
malloc_write("<jemalloc>: Error in pthread_atfork()\n");
- if (opt_abort)
+ if (opt_abort) {
abort();
+ }
+ return true;
}
#endif
- /* Done recursively allocating. */
- /**********************************************************************/
- malloc_mutex_lock(&init_lock);
+ if (background_thread_boot0()) {
+ return true;
+ }
+
+ return false;
+}
+
+static unsigned
+malloc_narenas_default(void) {
+ assert(ncpus > 0);
+ /*
+ * For SMP systems, create more than one arena per CPU by
+ * default.
+ */
+ if (ncpus > 1) {
+ return ncpus << 2;
+ } else {
+ return 1;
+ }
+}
+
+static percpu_arena_mode_t
+percpu_arena_as_initialized(percpu_arena_mode_t mode) {
+ assert(!malloc_initialized());
+ assert(mode <= percpu_arena_disabled);
- if (mutex_boot()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
+ if (mode != percpu_arena_disabled) {
+ mode += percpu_arena_mode_enabled_base;
}
+ return mode;
+}
+
+static bool
+malloc_init_narenas(void) {
+ assert(ncpus > 0);
+
+ if (opt_percpu_arena != percpu_arena_disabled) {
+ if (!have_percpu_arena || malloc_getcpu() < 0) {
+ opt_percpu_arena = percpu_arena_disabled;
+ malloc_printf("<jemalloc>: perCPU arena getcpu() not "
+ "available. Setting narenas to %u.\n", opt_narenas ?
+ opt_narenas : malloc_narenas_default());
+ if (opt_abort) {
+ abort();
+ }
+ } else {
+ if (ncpus >= MALLOCX_ARENA_LIMIT) {
+ malloc_printf("<jemalloc>: narenas w/ percpu"
+ "arena beyond limit (%d)\n", ncpus);
+ if (opt_abort) {
+ abort();
+ }
+ return true;
+ }
+ /* NB: opt_percpu_arena isn't fully initialized yet. */
+ if (percpu_arena_as_initialized(opt_percpu_arena) ==
+ per_phycpu_arena && ncpus % 2 != 0) {
+ malloc_printf("<jemalloc>: invalid "
+ "configuration -- per physical CPU arena "
+ "with odd number (%u) of CPUs (no hyper "
+ "threading?).\n", ncpus);
+ if (opt_abort)
+ abort();
+ }
+ unsigned n = percpu_arena_ind_limit(
+ percpu_arena_as_initialized(opt_percpu_arena));
+ if (opt_narenas < n) {
+ /*
+ * If narenas is specified with percpu_arena
+ * enabled, actual narenas is set as the greater
+ * of the two. percpu_arena_choose will be free
+ * to use any of the arenas based on CPU
+ * id. This is conservative (at a small cost)
+ * but ensures correctness.
+ *
+ * If for some reason the ncpus determined at
+ * boot is not the actual number (e.g. because
+ * of affinity setting from numactl), reserving
+ * narenas this way provides a workaround for
+ * percpu_arena.
+ */
+ opt_narenas = n;
+ }
+ }
+ }
if (opt_narenas == 0) {
- /*
- * For SMP systems, create more than one arena per CPU by
- * default.
- */
- if (ncpus > 1)
- opt_narenas = ncpus << 2;
- else
- opt_narenas = 1;
+ opt_narenas = malloc_narenas_default();
}
+ assert(opt_narenas > 0);
+
narenas_auto = opt_narenas;
/*
- * Make sure that the arenas array can be allocated. In practice, this
- * limit is enough to allow the allocator to function, but the ctl
- * machinery will fail to allocate memory at far lower limits.
+ * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
*/
- if (narenas_auto > chunksize / sizeof(arena_t *)) {
- narenas_auto = chunksize / sizeof(arena_t *);
+ if (narenas_auto >= MALLOCX_ARENA_LIMIT) {
+ narenas_auto = MALLOCX_ARENA_LIMIT - 1;
malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
narenas_auto);
}
- narenas_total = narenas_auto;
+ narenas_total_set(narenas_auto);
- /* Allocate and initialize arenas. */
- arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
- if (arenas == NULL) {
- malloc_mutex_unlock(&init_lock);
- return (true);
+ return false;
+}
+
+static void
+malloc_init_percpu(void) {
+ opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena);
+}
+
+static bool
+malloc_init_hard_finish(void) {
+ if (malloc_mutex_boot()) {
+ return true;
+ }
+
+ malloc_init_state = malloc_init_initialized;
+ malloc_slow_flag_init();
+
+ return false;
+}
+
+static void
+malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) {
+ malloc_mutex_assert_owner(tsdn, &init_lock);
+ malloc_mutex_unlock(tsdn, &init_lock);
+ if (reentrancy_set) {
+ assert(!tsdn_null(tsdn));
+ tsd_t *tsd = tsdn_tsd(tsdn);
+ assert(tsd_reentrancy_level_get(tsd) > 0);
+ post_reentrancy(tsd);
+ }
+}
+
+static bool
+malloc_init_hard(void) {
+ tsd_t *tsd;
+
+#if defined(_WIN32) && _WIN32_WINNT < 0x0600
+ _init_init_lock();
+#endif
+ malloc_mutex_lock(TSDN_NULL, &init_lock);
+
+#define UNLOCK_RETURN(tsdn, ret, reentrancy) \
+ malloc_init_hard_cleanup(tsdn, reentrancy); \
+ return ret;
+
+ if (!malloc_init_hard_needed()) {
+ UNLOCK_RETURN(TSDN_NULL, false, false)
+ }
+
+ if (malloc_init_state != malloc_init_a0_initialized &&
+ malloc_init_hard_a0_locked()) {
+ UNLOCK_RETURN(TSDN_NULL, true, false)
+ }
+
+ malloc_mutex_unlock(TSDN_NULL, &init_lock);
+ /* Recursive allocation relies on functional tsd. */
+ tsd = malloc_tsd_boot0();
+ if (tsd == NULL) {
+ return true;
+ }
+ if (malloc_init_hard_recursible()) {
+ return true;
+ }
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
+ /* Set reentrancy level to 1 during init. */
+ pre_reentrancy(tsd, NULL);
+ /* Initialize narenas before prof_boot2 (for allocation). */
+ if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) {
+ UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
+ }
+ if (config_prof && prof_boot2(tsd)) {
+ UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
}
- /*
- * Zero the array. In practice, this should always be pre-zeroed,
- * since it was just mmap()ed, but let's be sure.
- */
- memset(arenas, 0, sizeof(arena_t *) * narenas_total);
- /* Copy the pointer to the one arena that was already initialized. */
- arenas[0] = init_arenas[0];
- malloc_initialized = true;
- malloc_mutex_unlock(&init_lock);
+ malloc_init_percpu();
- return (false);
+ if (malloc_init_hard_finish()) {
+ UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
+ }
+ post_reentrancy(tsd);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
+
+ malloc_tsd_boot1();
+ /* Update TSD after tsd_boot1. */
+ tsd = tsd_fetch();
+ if (opt_background_thread) {
+ assert(have_background_thread);
+ /*
+ * Need to finish init & unlock first before creating background
+ * threads (pthread_create depends on malloc).
+ */
+ malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
+ bool err = background_thread_create(tsd, 0);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
+ if (err) {
+ return true;
+ }
+ }
+#undef UNLOCK_RETURN
+ return false;
}
/*
@@ -843,469 +1518,779 @@ malloc_init_hard(void)
*/
/******************************************************************************/
/*
- * Begin malloc(3)-compatible functions.
+ * Begin allocation-path internal functions and data structures.
*/
-static void *
-imalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt)
-{
- void *p;
-
- if (cnt == NULL)
- return (NULL);
- if (prof_promote && usize <= SMALL_MAXCLASS) {
- p = imalloc(SMALL_MAXCLASS+1);
- if (p == NULL)
- return (NULL);
- arena_prof_promoted(p, usize);
- } else
- p = imalloc(usize);
+/*
+ * Settings determined by the documented behavior of the allocation functions.
+ */
+typedef struct static_opts_s static_opts_t;
+struct static_opts_s {
+ /* Whether or not allocation size may overflow. */
+ bool may_overflow;
+ /* Whether or not allocations of size 0 should be treated as size 1. */
+ bool bump_empty_alloc;
+ /*
+ * Whether to assert that allocations are not of size 0 (after any
+ * bumping).
+ */
+ bool assert_nonempty_alloc;
- return (p);
-}
+ /*
+ * Whether or not to modify the 'result' argument to malloc in case of
+ * error.
+ */
+ bool null_out_result_on_error;
+ /* Whether to set errno when we encounter an error condition. */
+ bool set_errno_on_error;
-JEMALLOC_ALWAYS_INLINE_C void *
-imalloc_prof(size_t usize, prof_thr_cnt_t *cnt)
-{
- void *p;
+ /*
+ * The minimum valid alignment for functions requesting aligned storage.
+ */
+ size_t min_alignment;
- if ((uintptr_t)cnt != (uintptr_t)1U)
- p = imalloc_prof_sample(usize, cnt);
- else
- p = imalloc(usize);
- if (p == NULL)
- return (NULL);
- prof_malloc(p, usize, cnt);
+ /* The error string to use if we oom. */
+ const char *oom_string;
+ /* The error string to use if the passed-in alignment is invalid. */
+ const char *invalid_alignment_string;
- return (p);
+ /*
+ * False if we're configured to skip some time-consuming operations.
+ *
+ * This isn't really a malloc "behavior", but it acts as a useful
+ * summary of several other static (or at least, static after program
+ * initialization) options.
+ */
+ bool slow;
+};
+
+JEMALLOC_ALWAYS_INLINE void
+static_opts_init(static_opts_t *static_opts) {
+ static_opts->may_overflow = false;
+ static_opts->bump_empty_alloc = false;
+ static_opts->assert_nonempty_alloc = false;
+ static_opts->null_out_result_on_error = false;
+ static_opts->set_errno_on_error = false;
+ static_opts->min_alignment = 0;
+ static_opts->oom_string = "";
+ static_opts->invalid_alignment_string = "";
+ static_opts->slow = false;
}
/*
- * MALLOC_BODY() is a macro rather than a function because its contents are in
- * the fast path, but inlining would cause reliability issues when determining
- * how many frames to discard from heap profiling backtraces.
+ * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we
+ * should have one constant here per magic value there. Note however that the
+ * representations need not be related.
*/
-#define MALLOC_BODY(ret, size, usize) do { \
- if (malloc_init()) \
- ret = NULL; \
- else { \
- if (config_prof && opt_prof) { \
- prof_thr_cnt_t *cnt; \
- \
- usize = s2u(size); \
- /* \
- * Call PROF_ALLOC_PREP() here rather than in \
- * imalloc_prof() so that imalloc_prof() can be \
- * inlined without introducing uncertainty \
- * about the number of backtrace frames to \
- * ignore. imalloc_prof() is in the fast path \
- * when heap profiling is enabled, so inlining \
- * is critical to performance. (For \
- * consistency all callers of PROF_ALLOC_PREP() \
- * are structured similarly, even though e.g. \
- * realloc() isn't called enough for inlining \
- * to be critical.) \
- */ \
- PROF_ALLOC_PREP(1, usize, cnt); \
- ret = imalloc_prof(usize, cnt); \
- } else { \
- if (config_stats || (config_valgrind && \
- opt_valgrind)) \
- usize = s2u(size); \
- ret = imalloc(size); \
- } \
- } \
-} while (0)
-
-void *
-je_malloc(size_t size)
-{
- void *ret;
- size_t usize JEMALLOC_CC_SILENCE_INIT(0);
-
- if (size == 0)
- size = 1;
+#define TCACHE_IND_NONE ((unsigned)-1)
+#define TCACHE_IND_AUTOMATIC ((unsigned)-2)
+#define ARENA_IND_AUTOMATIC ((unsigned)-1)
+
+typedef struct dynamic_opts_s dynamic_opts_t;
+struct dynamic_opts_s {
+ void **result;
+ size_t num_items;
+ size_t item_size;
+ size_t alignment;
+ bool zero;
+ unsigned tcache_ind;
+ unsigned arena_ind;
+};
+
+JEMALLOC_ALWAYS_INLINE void
+dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
+ dynamic_opts->result = NULL;
+ dynamic_opts->num_items = 0;
+ dynamic_opts->item_size = 0;
+ dynamic_opts->alignment = 0;
+ dynamic_opts->zero = false;
+ dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC;
+ dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC;
+}
- MALLOC_BODY(ret, size, usize);
+/* ind is ignored if dopts->alignment > 0. */
+JEMALLOC_ALWAYS_INLINE void *
+imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
+ size_t size, size_t usize, szind_t ind) {
+ tcache_t *tcache;
+ arena_t *arena;
- if (ret == NULL) {
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error in malloc(): "
- "out of memory\n");
- abort();
+ /* Fill in the tcache. */
+ if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) {
+ if (likely(!sopts->slow)) {
+ /* Getting tcache ptr unconditionally. */
+ tcache = tsd_tcachep_get(tsd);
+ assert(tcache == tcache_get(tsd));
+ } else {
+ tcache = tcache_get(tsd);
}
- set_errno(ENOMEM);
- }
- if (config_stats && ret != NULL) {
- assert(usize == isalloc(ret, config_prof));
- thread_allocated_tsd_get()->allocated += usize;
+ } else if (dopts->tcache_ind == TCACHE_IND_NONE) {
+ tcache = NULL;
+ } else {
+ tcache = tcaches_get(tsd, dopts->tcache_ind);
}
- UTRACE(0, size, ret);
- JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
- return (ret);
-}
-static void *
-imemalign_prof_sample(size_t alignment, size_t usize, prof_thr_cnt_t *cnt)
-{
- void *p;
+ /* Fill in the arena. */
+ if (dopts->arena_ind == ARENA_IND_AUTOMATIC) {
+ /*
+ * In case of automatic arena management, we defer arena
+ * computation until as late as we can, hoping to fill the
+ * allocation out of the tcache.
+ */
+ arena = NULL;
+ } else {
+ arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true);
+ }
- if (cnt == NULL)
- return (NULL);
- if (prof_promote && usize <= SMALL_MAXCLASS) {
- assert(sa2u(SMALL_MAXCLASS+1, alignment) != 0);
- p = ipalloc(sa2u(SMALL_MAXCLASS+1, alignment), alignment,
- false);
- if (p == NULL)
- return (NULL);
- arena_prof_promoted(p, usize);
- } else
- p = ipalloc(usize, alignment, false);
+ if (unlikely(dopts->alignment != 0)) {
+ return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment,
+ dopts->zero, tcache, arena);
+ }
- return (p);
+ return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false,
+ arena, sopts->slow);
}
-JEMALLOC_ALWAYS_INLINE_C void *
-imemalign_prof(size_t alignment, size_t usize, prof_thr_cnt_t *cnt)
-{
- void *p;
+JEMALLOC_ALWAYS_INLINE void *
+imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
+ size_t usize, szind_t ind) {
+ void *ret;
- if ((uintptr_t)cnt != (uintptr_t)1U)
- p = imemalign_prof_sample(alignment, usize, cnt);
- else
- p = ipalloc(usize, alignment, false);
- if (p == NULL)
- return (NULL);
- prof_malloc(p, usize, cnt);
+ /*
+ * For small allocations, sampling bumps the usize. If so, we allocate
+ * from the ind_large bucket.
+ */
+ szind_t ind_large;
+ size_t bumped_usize = usize;
+
+ if (usize <= SMALL_MAXCLASS) {
+ assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) :
+ sz_sa2u(LARGE_MINCLASS, dopts->alignment))
+ == LARGE_MINCLASS);
+ ind_large = sz_size2index(LARGE_MINCLASS);
+ bumped_usize = sz_s2u(LARGE_MINCLASS);
+ ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
+ bumped_usize, ind_large);
+ if (unlikely(ret == NULL)) {
+ return NULL;
+ }
+ arena_prof_promote(tsd_tsdn(tsd), ret, usize);
+ } else {
+ ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind);
+ }
- return (p);
+ return ret;
}
-JEMALLOC_ATTR(nonnull(1))
-#ifdef JEMALLOC_PROF
/*
- * Avoid any uncertainty as to how many backtrace frames to ignore in
- * PROF_ALLOC_PREP().
+ * Returns true if the allocation will overflow, and false otherwise. Sets
+ * *size to the product either way.
*/
-JEMALLOC_NOINLINE
-#endif
-static int
-imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
-{
- int ret;
- size_t usize;
- void *result;
+JEMALLOC_ALWAYS_INLINE bool
+compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
+ size_t *size) {
+ /*
+ * This function is just num_items * item_size, except that we may have
+ * to check for overflow.
+ */
- assert(min_alignment != 0);
+ if (!may_overflow) {
+ assert(dopts->num_items == 1);
+ *size = dopts->item_size;
+ return false;
+ }
+
+ /* A size_t with its high-half bits all set to 1. */
+ const static size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2);
+
+ *size = dopts->item_size * dopts->num_items;
+
+ if (unlikely(*size == 0)) {
+ return (dopts->num_items != 0 && dopts->item_size != 0);
+ }
+
+ /*
+ * We got a non-zero size, but we don't know if we overflowed to get
+ * there. To avoid having to do a divide, we'll be clever and note that
+ * if both A and B can be represented in N/2 bits, then their product
+ * can be represented in N bits (without the possibility of overflow).
+ */
+ if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) {
+ return false;
+ }
+ if (likely(*size / dopts->item_size == dopts->num_items)) {
+ return false;
+ }
+ return true;
+}
+
+JEMALLOC_ALWAYS_INLINE int
+imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
+ /* Where the actual allocated memory will live. */
+ void *allocation = NULL;
+ /* Filled in by compute_size_with_overflow below. */
+ size_t size = 0;
+ /*
+ * For unaligned allocations, we need only ind. For aligned
+ * allocations, or in case of stats or profiling we need usize.
+ *
+ * These are actually dead stores, in that their values are reset before
+ * any branch on their value is taken. Sometimes though, it's
+ * convenient to pass them as arguments before this point. To avoid
+ * undefined behavior then, we initialize them with dummy stores.
+ */
+ szind_t ind = 0;
+ size_t usize = 0;
- if (malloc_init()) {
- result = NULL;
+ /* Reentrancy is only checked on slow path. */
+ int8_t reentrancy_level;
+
+ /* Compute the amount of memory the user wants. */
+ if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts,
+ &size))) {
goto label_oom;
- } else {
- if (size == 0)
- size = 1;
+ }
- /* Make sure that alignment is a large enough power of 2. */
- if (((alignment - 1) & alignment) != 0
- || (alignment < min_alignment)) {
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error allocating "
- "aligned memory: invalid alignment\n");
- abort();
- }
- result = NULL;
- ret = EINVAL;
- goto label_return;
+ /* Validate the user input. */
+ if (sopts->bump_empty_alloc) {
+ if (unlikely(size == 0)) {
+ size = 1;
}
+ }
+
+ if (sopts->assert_nonempty_alloc) {
+ assert (size != 0);
+ }
+
+ if (unlikely(dopts->alignment < sopts->min_alignment
+ || (dopts->alignment & (dopts->alignment - 1)) != 0)) {
+ goto label_invalid_alignment;
+ }
+
+ /* This is the beginning of the "core" algorithm. */
- usize = sa2u(size, alignment);
- if (usize == 0) {
- result = NULL;
+ if (dopts->alignment == 0) {
+ ind = sz_size2index(size);
+ if (unlikely(ind >= NSIZES)) {
goto label_oom;
}
+ if (config_stats || (config_prof && opt_prof)) {
+ usize = sz_index2size(ind);
+ assert(usize > 0 && usize <= LARGE_MAXCLASS);
+ }
+ } else {
+ usize = sz_sa2u(size, dopts->alignment);
+ if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
+ goto label_oom;
+ }
+ }
- if (config_prof && opt_prof) {
- prof_thr_cnt_t *cnt;
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ /*
+ * If we need to handle reentrancy, we can do it out of a
+ * known-initialized arena (i.e. arena 0).
+ */
+ reentrancy_level = tsd_reentrancy_level_get(tsd);
+ if (sopts->slow && unlikely(reentrancy_level > 0)) {
+ /*
+ * We should never specify particular arenas or tcaches from
+ * within our internal allocations.
+ */
+ assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC ||
+ dopts->tcache_ind == TCACHE_IND_NONE);
+ assert(dopts->arena_ind == ARENA_IND_AUTOMATIC);
+ dopts->tcache_ind = TCACHE_IND_NONE;
+ /* We know that arena 0 has already been initialized. */
+ dopts->arena_ind = 0;
+ }
+
+ /* If profiling is on, get our profiling context. */
+ if (config_prof && opt_prof) {
+ /*
+ * Note that if we're going down this path, usize must have been
+ * initialized in the previous if statement.
+ */
+ prof_tctx_t *tctx = prof_alloc_prep(
+ tsd, usize, prof_active_get_unlocked(), true);
+
+ alloc_ctx_t alloc_ctx;
+ if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
+ alloc_ctx.slab = (usize <= SMALL_MAXCLASS);
+ allocation = imalloc_no_sample(
+ sopts, dopts, tsd, usize, usize, ind);
+ } else if ((uintptr_t)tctx > (uintptr_t)1U) {
+ /*
+ * Note that ind might still be 0 here. This is fine;
+ * imalloc_sample ignores ind if dopts->alignment > 0.
+ */
+ allocation = imalloc_sample(
+ sopts, dopts, tsd, usize, ind);
+ alloc_ctx.slab = false;
+ } else {
+ allocation = NULL;
+ }
- PROF_ALLOC_PREP(2, usize, cnt);
- result = imemalign_prof(alignment, usize, cnt);
- } else
- result = ipalloc(usize, alignment, false);
- if (result == NULL)
+ if (unlikely(allocation == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
+ goto label_oom;
+ }
+ prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx);
+ } else {
+ /*
+ * If dopts->alignment > 0, then ind is still 0, but usize was
+ * computed in the previous if statement. Down the positive
+ * alignment path, imalloc_no_sample ignores ind and size
+ * (relying only on usize).
+ */
+ allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize,
+ ind);
+ if (unlikely(allocation == NULL)) {
goto label_oom;
+ }
}
- *memptr = result;
- ret = 0;
-label_return:
- if (config_stats && result != NULL) {
- assert(usize == isalloc(result, config_prof));
- thread_allocated_tsd_get()->allocated += usize;
+ /*
+ * Allocation has been done at this point. We still have some
+ * post-allocation work to do though.
+ */
+ assert(dopts->alignment == 0
+ || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0));
+
+ if (config_stats) {
+ assert(usize == isalloc(tsd_tsdn(tsd), allocation));
+ *tsd_thread_allocatedp_get(tsd) += usize;
}
- UTRACE(0, size, result);
- return (ret);
+
+ if (sopts->slow) {
+ UTRACE(0, size, allocation);
+ }
+
+ /* Success! */
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ *dopts->result = allocation;
+ return 0;
+
label_oom:
- assert(result == NULL);
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error allocating aligned memory: "
- "out of memory\n");
+ if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_write(sopts->oom_string);
abort();
}
- ret = ENOMEM;
- goto label_return;
-}
-int
-je_posix_memalign(void **memptr, size_t alignment, size_t size)
-{
- int ret = imemalign(memptr, alignment, size, sizeof(void *));
- JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
- config_prof), false);
- return (ret);
+ if (sopts->slow) {
+ UTRACE(NULL, size, NULL);
+ }
+
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ if (sopts->set_errno_on_error) {
+ set_errno(ENOMEM);
+ }
+
+ if (sopts->null_out_result_on_error) {
+ *dopts->result = NULL;
+ }
+
+ return ENOMEM;
+
+ /*
+ * This label is only jumped to by one goto; we move it out of line
+ * anyways to avoid obscuring the non-error paths, and for symmetry with
+ * the oom case.
+ */
+label_invalid_alignment:
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_write(sopts->invalid_alignment_string);
+ abort();
+ }
+
+ if (sopts->set_errno_on_error) {
+ set_errno(EINVAL);
+ }
+
+ if (sopts->slow) {
+ UTRACE(NULL, size, NULL);
+ }
+
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ if (sopts->null_out_result_on_error) {
+ *dopts->result = NULL;
+ }
+
+ return EINVAL;
}
-void *
-je_aligned_alloc(size_t alignment, size_t size)
-{
- void *ret;
- int err;
+/* Returns the errno-style error code of the allocation. */
+JEMALLOC_ALWAYS_INLINE int
+imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
+ if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
+ malloc_write(sopts->oom_string);
+ abort();
+ }
+ UTRACE(NULL, dopts->num_items * dopts->item_size, NULL);
+ set_errno(ENOMEM);
+ *dopts->result = NULL;
+
+ return ENOMEM;
+ }
- if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
- ret = NULL;
- set_errno(err);
+ /* We always need the tsd. Let's grab it right away. */
+ tsd_t *tsd = tsd_fetch();
+ assert(tsd);
+ if (likely(tsd_fast(tsd))) {
+ /* Fast and common path. */
+ tsd_assert_fast(tsd);
+ sopts->slow = false;
+ return imalloc_body(sopts, dopts, tsd);
+ } else {
+ sopts->slow = true;
+ return imalloc_body(sopts, dopts, tsd);
}
- JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
- false);
- return (ret);
}
+/******************************************************************************/
+/*
+ * Begin malloc(3)-compatible functions.
+ */
-static void *
-icalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt)
-{
- void *p;
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
+je_malloc(size_t size) {
+ void *ret;
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
+
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
+
+ sopts.bump_empty_alloc = true;
+ sopts.null_out_result_on_error = true;
+ sopts.set_errno_on_error = true;
+ sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n";
- if (cnt == NULL)
- return (NULL);
- if (prof_promote && usize <= SMALL_MAXCLASS) {
- p = icalloc(SMALL_MAXCLASS+1);
- if (p == NULL)
- return (NULL);
- arena_prof_promoted(p, usize);
- } else
- p = icalloc(usize);
+ dopts.result = &ret;
+ dopts.num_items = 1;
+ dopts.item_size = size;
- return (p);
+ imalloc(&sopts, &dopts);
+
+ return ret;
}
-JEMALLOC_ALWAYS_INLINE_C void *
-icalloc_prof(size_t usize, prof_thr_cnt_t *cnt)
-{
- void *p;
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
+JEMALLOC_ATTR(nonnull(1))
+je_posix_memalign(void **memptr, size_t alignment, size_t size) {
+ int ret;
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
+
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
+
+ sopts.bump_empty_alloc = true;
+ sopts.min_alignment = sizeof(void *);
+ sopts.oom_string =
+ "<jemalloc>: Error allocating aligned memory: out of memory\n";
+ sopts.invalid_alignment_string =
+ "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
+
+ dopts.result = memptr;
+ dopts.num_items = 1;
+ dopts.item_size = size;
+ dopts.alignment = alignment;
+
+ ret = imalloc(&sopts, &dopts);
+ return ret;
+}
- if ((uintptr_t)cnt != (uintptr_t)1U)
- p = icalloc_prof_sample(usize, cnt);
- else
- p = icalloc(usize);
- if (p == NULL)
- return (NULL);
- prof_malloc(p, usize, cnt);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
+je_aligned_alloc(size_t alignment, size_t size) {
+ void *ret;
- return (p);
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
+
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
+
+ sopts.bump_empty_alloc = true;
+ sopts.null_out_result_on_error = true;
+ sopts.set_errno_on_error = true;
+ sopts.min_alignment = 1;
+ sopts.oom_string =
+ "<jemalloc>: Error allocating aligned memory: out of memory\n";
+ sopts.invalid_alignment_string =
+ "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
+
+ dopts.result = &ret;
+ dopts.num_items = 1;
+ dopts.item_size = size;
+ dopts.alignment = alignment;
+
+ imalloc(&sopts, &dopts);
+ return ret;
}
-void *
-je_calloc(size_t num, size_t size)
-{
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
+je_calloc(size_t num, size_t size) {
void *ret;
- size_t num_size;
- size_t usize JEMALLOC_CC_SILENCE_INIT(0);
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
- if (malloc_init()) {
- num_size = 0;
- ret = NULL;
- goto label_return;
- }
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
- num_size = num * size;
- if (num_size == 0) {
- if (num == 0 || size == 0)
- num_size = 1;
- else {
- ret = NULL;
- goto label_return;
- }
- /*
- * Try to avoid division here. We know that it isn't possible to
- * overflow during multiplication if neither operand uses any of the
- * most significant half of the bits in a size_t.
- */
- } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
- && (num_size / size != num)) {
- /* size_t overflow. */
- ret = NULL;
- goto label_return;
- }
+ sopts.may_overflow = true;
+ sopts.bump_empty_alloc = true;
+ sopts.null_out_result_on_error = true;
+ sopts.set_errno_on_error = true;
+ sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n";
- if (config_prof && opt_prof) {
- prof_thr_cnt_t *cnt;
+ dopts.result = &ret;
+ dopts.num_items = num;
+ dopts.item_size = size;
+ dopts.zero = true;
- usize = s2u(num_size);
- PROF_ALLOC_PREP(1, usize, cnt);
- ret = icalloc_prof(usize, cnt);
- } else {
- if (config_stats || (config_valgrind && opt_valgrind))
- usize = s2u(num_size);
- ret = icalloc(num_size);
- }
+ imalloc(&sopts, &dopts);
-label_return:
- if (ret == NULL) {
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error in calloc(): out of "
- "memory\n");
- abort();
- }
- set_errno(ENOMEM);
- }
- if (config_stats && ret != NULL) {
- assert(usize == isalloc(ret, config_prof));
- thread_allocated_tsd_get()->allocated += usize;
- }
- UTRACE(0, num_size, ret);
- JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
- return (ret);
+ return ret;
}
static void *
-irealloc_prof_sample(void *oldptr, size_t usize, prof_thr_cnt_t *cnt)
-{
+irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
+ prof_tctx_t *tctx) {
void *p;
- if (cnt == NULL)
- return (NULL);
- if (prof_promote && usize <= SMALL_MAXCLASS) {
- p = iralloc(oldptr, SMALL_MAXCLASS+1, 0, 0, false);
- if (p == NULL)
- return (NULL);
- arena_prof_promoted(p, usize);
- } else
- p = iralloc(oldptr, usize, 0, 0, false);
+ if (tctx == NULL) {
+ return NULL;
+ }
+ if (usize <= SMALL_MAXCLASS) {
+ p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
+ if (p == NULL) {
+ return NULL;
+ }
+ arena_prof_promote(tsd_tsdn(tsd), p, usize);
+ } else {
+ p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
+ }
- return (p);
+ return p;
}
-JEMALLOC_ALWAYS_INLINE_C void *
-irealloc_prof(void *oldptr, size_t old_usize, size_t usize, prof_thr_cnt_t *cnt)
-{
+JEMALLOC_ALWAYS_INLINE void *
+irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
+ alloc_ctx_t *alloc_ctx) {
void *p;
- prof_ctx_t *old_ctx;
-
- old_ctx = prof_ctx_get(oldptr);
- if ((uintptr_t)cnt != (uintptr_t)1U)
- p = irealloc_prof_sample(oldptr, usize, cnt);
- else
- p = iralloc(oldptr, usize, 0, 0, false);
- if (p == NULL)
- return (NULL);
- prof_realloc(p, usize, cnt, old_usize, old_ctx);
+ bool prof_active;
+ prof_tctx_t *old_tctx, *tctx;
+
+ prof_active = prof_active_get_unlocked();
+ old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
+ tctx = prof_alloc_prep(tsd, usize, prof_active, true);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
+ p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
+ } else {
+ p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
+ }
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, true);
+ return NULL;
+ }
+ prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
+ old_tctx);
- return (p);
+ return p;
}
-JEMALLOC_INLINE_C void
-ifree(void *ptr)
-{
+JEMALLOC_ALWAYS_INLINE void
+ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
+ if (!slow_path) {
+ tsd_assert_fast(tsd);
+ }
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ if (tsd_reentrancy_level_get(tsd) != 0) {
+ assert(slow_path);
+ }
+
+ assert(ptr != NULL);
+ assert(malloc_initialized() || IS_INITIALIZER);
+
+ alloc_ctx_t alloc_ctx;
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
+ rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ assert(alloc_ctx.szind != NSIZES);
+
size_t usize;
- UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
+ if (config_prof && opt_prof) {
+ usize = sz_index2size(alloc_ctx.szind);
+ prof_free(tsd, ptr, usize, &alloc_ctx);
+ } else if (config_stats) {
+ usize = sz_index2size(alloc_ctx.szind);
+ }
+ if (config_stats) {
+ *tsd_thread_deallocatedp_get(tsd) += usize;
+ }
+
+ if (likely(!slow_path)) {
+ idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
+ false);
+ } else {
+ idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
+ true);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void
+isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
+ if (!slow_path) {
+ tsd_assert_fast(tsd);
+ }
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ if (tsd_reentrancy_level_get(tsd) != 0) {
+ assert(slow_path);
+ }
assert(ptr != NULL);
- assert(malloc_initialized || IS_INITIALIZER);
+ assert(malloc_initialized() || IS_INITIALIZER);
+ alloc_ctx_t alloc_ctx, *ctx;
if (config_prof && opt_prof) {
- usize = isalloc(ptr, config_prof);
- prof_free(ptr, usize);
- } else if (config_stats || config_valgrind)
- usize = isalloc(ptr, config_prof);
- if (config_stats)
- thread_allocated_tsd_get()->deallocated += usize;
- if (config_valgrind && opt_valgrind)
- rzsize = p2rz(ptr);
- iqalloc(ptr);
- JEMALLOC_VALGRIND_FREE(ptr, rzsize);
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
+ rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ assert(alloc_ctx.szind == sz_size2index(usize));
+ ctx = &alloc_ctx;
+ prof_free(tsd, ptr, usize, ctx);
+ } else {
+ ctx = NULL;
+ }
+
+ if (config_stats) {
+ *tsd_thread_deallocatedp_get(tsd) += usize;
+ }
+
+ if (likely(!slow_path)) {
+ isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false);
+ } else {
+ isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true);
+ }
}
-void *
-je_realloc(void *ptr, size_t size)
-{
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ALLOC_SIZE(2)
+je_realloc(void *ptr, size_t size) {
void *ret;
+ tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
size_t usize JEMALLOC_CC_SILENCE_INIT(0);
size_t old_usize = 0;
- UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
- if (size == 0) {
+ if (unlikely(size == 0)) {
if (ptr != NULL) {
/* realloc(ptr, 0) is equivalent to free(ptr). */
UTRACE(ptr, 0, 0);
- ifree(ptr);
- return (NULL);
+ tcache_t *tcache;
+ tsd_t *tsd = tsd_fetch();
+ if (tsd_reentrancy_level_get(tsd) == 0) {
+ tcache = tcache_get(tsd);
+ } else {
+ tcache = NULL;
+ }
+ ifree(tsd, ptr, tcache, true);
+ return NULL;
}
size = 1;
}
- if (ptr != NULL) {
- assert(malloc_initialized || IS_INITIALIZER);
- malloc_thread_init();
+ if (likely(ptr != NULL)) {
+ assert(malloc_initialized() || IS_INITIALIZER);
+ tsd_t *tsd = tsd_fetch();
- if ((config_prof && opt_prof) || config_stats ||
- (config_valgrind && opt_valgrind))
- old_usize = isalloc(ptr, config_prof);
- if (config_valgrind && opt_valgrind)
- old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ alloc_ctx_t alloc_ctx;
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
+ rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ assert(alloc_ctx.szind != NSIZES);
+ old_usize = sz_index2size(alloc_ctx.szind);
+ assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
if (config_prof && opt_prof) {
- prof_thr_cnt_t *cnt;
-
- usize = s2u(size);
- PROF_ALLOC_PREP(1, usize, cnt);
- ret = irealloc_prof(ptr, old_usize, usize, cnt);
+ usize = sz_s2u(size);
+ ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ?
+ NULL : irealloc_prof(tsd, ptr, old_usize, usize,
+ &alloc_ctx);
} else {
- if (config_stats || (config_valgrind && opt_valgrind))
- usize = s2u(size);
- ret = iralloc(ptr, size, 0, 0, false);
+ if (config_stats) {
+ usize = sz_s2u(size);
+ }
+ ret = iralloc(tsd, ptr, old_usize, size, 0, false);
}
+ tsdn = tsd_tsdn(tsd);
} else {
/* realloc(NULL, size) is equivalent to malloc(size). */
- MALLOC_BODY(ret, size, usize);
+ return je_malloc(size);
}
- if (ret == NULL) {
- if (config_xmalloc && opt_xmalloc) {
+ if (unlikely(ret == NULL)) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
malloc_write("<jemalloc>: Error in realloc(): "
"out of memory\n");
abort();
}
set_errno(ENOMEM);
}
- if (config_stats && ret != NULL) {
- thread_allocated_t *ta;
- assert(usize == isalloc(ret, config_prof));
- ta = thread_allocated_tsd_get();
- ta->allocated += usize;
- ta->deallocated += old_usize;
+ if (config_stats && likely(ret != NULL)) {
+ tsd_t *tsd;
+
+ assert(usize == isalloc(tsdn, ret));
+ tsd = tsdn_tsd(tsdn);
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ *tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, ret);
- JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_usize, old_rzsize,
- false);
- return (ret);
+ check_entry_exit_locking(tsdn);
+ return ret;
}
-void
-je_free(void *ptr)
-{
-
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
+je_free(void *ptr) {
UTRACE(ptr, 0, 0);
- if (ptr != NULL)
- ifree(ptr);
+ if (likely(ptr != NULL)) {
+ /*
+ * We avoid setting up tsd fully (e.g. tcache, arena binding)
+ * based on only free() calls -- other activities trigger the
+ * minimal to full transition. This is because free() may
+ * happen during thread shutdown after tls deallocation: if a
+ * thread never had any malloc activities until then, a
+ * fully-setup tsd won't be destructed properly.
+ */
+ tsd_t *tsd = tsd_fetch_min();
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ tcache_t *tcache;
+ if (likely(tsd_fast(tsd))) {
+ tsd_assert_fast(tsd);
+ /* Unconditionally get tcache ptr on fast path. */
+ tcache = tsd_tcachep_get(tsd);
+ ifree(tsd, ptr, tcache, false);
+ } else {
+ if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
+ tcache = tcache_get(tsd);
+ } else {
+ tcache = NULL;
+ }
+ ifree(tsd, ptr, tcache, true);
+ }
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ }
}
/*
@@ -1317,36 +2302,68 @@ je_free(void *ptr)
*/
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
-void *
-je_memalign(size_t alignment, size_t size)
-{
- void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
- imemalign(&ret, alignment, size, 1);
- JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
- return (ret);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc)
+je_memalign(size_t alignment, size_t size) {
+ void *ret;
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
+
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
+
+ sopts.bump_empty_alloc = true;
+ sopts.min_alignment = 1;
+ sopts.oom_string =
+ "<jemalloc>: Error allocating aligned memory: out of memory\n";
+ sopts.invalid_alignment_string =
+ "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
+ sopts.null_out_result_on_error = true;
+
+ dopts.result = &ret;
+ dopts.num_items = 1;
+ dopts.item_size = size;
+ dopts.alignment = alignment;
+
+ imalloc(&sopts, &dopts);
+ return ret;
}
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
-void *
-je_valloc(size_t size)
-{
- void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
- imemalign(&ret, PAGE, size, 1);
- JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
- return (ret);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc)
+je_valloc(size_t size) {
+ void *ret;
+
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
+
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
+
+ sopts.bump_empty_alloc = true;
+ sopts.null_out_result_on_error = true;
+ sopts.min_alignment = PAGE;
+ sopts.oom_string =
+ "<jemalloc>: Error allocating aligned memory: out of memory\n";
+ sopts.invalid_alignment_string =
+ "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
+
+ dopts.result = &ret;
+ dopts.num_items = 1;
+ dopts.item_size = size;
+ dopts.alignment = PAGE;
+
+ imalloc(&sopts, &dopts);
+
+ return ret;
}
#endif
-/*
- * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
- * #define je_malloc malloc
- */
-#define malloc_is_malloc 1
-#define is_malloc_(a) malloc_is_ ## a
-#define is_malloc(a) is_malloc_(a)
-
-#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
+#if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)
/*
* glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
* to inconsistently reference libc's malloc(3)-compatible functions
@@ -1356,11 +2373,47 @@ je_valloc(size_t size)
* passed an extra argument for the caller return address, which will be
* ignored.
*/
-JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free;
-JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc;
-JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc;
-JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
+JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
+JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
+JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
+# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
+JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
je_memalign;
+# endif
+
+# ifdef CPU_COUNT
+/*
+ * To enable static linking with glibc, the libc specific malloc interface must
+ * be implemented also, so none of glibc's malloc.o functions are added to the
+ * link.
+ */
+# define ALIAS(je_fn) __attribute__((alias (#je_fn), used))
+/* To force macro expansion of je_ prefix before stringification. */
+# define PREALIAS(je_fn) ALIAS(je_fn)
+# ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC
+void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
+# endif
+# ifdef JEMALLOC_OVERRIDE___LIBC_FREE
+void __libc_free(void* ptr) PREALIAS(je_free);
+# endif
+# ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC
+void *__libc_malloc(size_t size) PREALIAS(je_malloc);
+# endif
+# ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
+void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
+# endif
+# ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC
+void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
+# endif
+# ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC
+void *__libc_valloc(size_t size) PREALIAS(je_valloc);
+# endif
+# ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
+int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign);
+# endif
+# undef PREALIAS
+# undef ALIAS
+# endif
#endif
/*
@@ -1371,162 +2424,98 @@ JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
* Begin non-standard functions.
*/
-JEMALLOC_ALWAYS_INLINE_C void *
-imallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
- arena_t *arena)
-{
-
- assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
- alignment)));
-
- if (alignment != 0)
- return (ipalloct(usize, alignment, zero, try_tcache, arena));
- else if (zero)
- return (icalloct(usize, try_tcache, arena));
- else
- return (imalloct(usize, try_tcache, arena));
-}
-
-static void *
-imallocx_prof_sample(size_t usize, size_t alignment, bool zero, bool try_tcache,
- arena_t *arena, prof_thr_cnt_t *cnt)
-{
- void *p;
-
- if (cnt == NULL)
- return (NULL);
- if (prof_promote && usize <= SMALL_MAXCLASS) {
- size_t usize_promoted = (alignment == 0) ?
- s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, alignment);
- assert(usize_promoted != 0);
- p = imallocx(usize_promoted, alignment, zero, try_tcache,
- arena);
- if (p == NULL)
- return (NULL);
- arena_prof_promoted(p, usize);
- } else
- p = imallocx(usize, alignment, zero, try_tcache, arena);
-
- return (p);
-}
-
-JEMALLOC_ALWAYS_INLINE_C void *
-imallocx_prof(size_t usize, size_t alignment, bool zero, bool try_tcache,
- arena_t *arena, prof_thr_cnt_t *cnt)
-{
- void *p;
-
- if ((uintptr_t)cnt != (uintptr_t)1U) {
- p = imallocx_prof_sample(usize, alignment, zero, try_tcache,
- arena, cnt);
- } else
- p = imallocx(usize, alignment, zero, try_tcache, arena);
- if (p == NULL)
- return (NULL);
- prof_malloc(p, usize, cnt);
-
- return (p);
-}
-
-void *
-je_mallocx(size_t size, int flags)
-{
- void *p;
- size_t usize;
- size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
- & (SIZE_T_MAX-1));
- bool zero = flags & MALLOCX_ZERO;
- unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
- arena_t *arena;
- bool try_tcache;
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
+je_mallocx(size_t size, int flags) {
+ void *ret;
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
+
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
+
+ sopts.assert_nonempty_alloc = true;
+ sopts.null_out_result_on_error = true;
+ sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
+
+ dopts.result = &ret;
+ dopts.num_items = 1;
+ dopts.item_size = size;
+ if (unlikely(flags != 0)) {
+ if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
+ dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
+ }
- assert(size != 0);
+ dopts.zero = MALLOCX_ZERO_GET(flags);
- if (malloc_init())
- goto label_oom;
+ if ((flags & MALLOCX_TCACHE_MASK) != 0) {
+ if ((flags & MALLOCX_TCACHE_MASK)
+ == MALLOCX_TCACHE_NONE) {
+ dopts.tcache_ind = TCACHE_IND_NONE;
+ } else {
+ dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
+ }
+ } else {
+ dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
+ }
- if (arena_ind != UINT_MAX) {
- arena = arenas[arena_ind];
- try_tcache = false;
- } else {
- arena = NULL;
- try_tcache = true;
+ if ((flags & MALLOCX_ARENA_MASK) != 0)
+ dopts.arena_ind = MALLOCX_ARENA_GET(flags);
}
- usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
- assert(usize != 0);
-
- if (config_prof && opt_prof) {
- prof_thr_cnt_t *cnt;
-
- PROF_ALLOC_PREP(1, usize, cnt);
- p = imallocx_prof(usize, alignment, zero, try_tcache, arena,
- cnt);
- } else
- p = imallocx(usize, alignment, zero, try_tcache, arena);
- if (p == NULL)
- goto label_oom;
-
- if (config_stats) {
- assert(usize == isalloc(p, config_prof));
- thread_allocated_tsd_get()->allocated += usize;
- }
- UTRACE(0, size, p);
- JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
- return (p);
-label_oom:
- if (config_xmalloc && opt_xmalloc) {
- malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
- abort();
- }
- UTRACE(0, size, 0);
- return (NULL);
+ imalloc(&sopts, &dopts);
+ return ret;
}
static void *
-irallocx_prof_sample(void *oldptr, size_t size, size_t alignment, size_t usize,
- bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena,
- prof_thr_cnt_t *cnt)
-{
+irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
+ size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
+ prof_tctx_t *tctx) {
void *p;
- if (cnt == NULL)
- return (NULL);
- if (prof_promote && usize <= SMALL_MAXCLASS) {
- p = iralloct(oldptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
- size) ? 0 : size - (SMALL_MAXCLASS+1), alignment, zero,
- try_tcache_alloc, try_tcache_dalloc, arena);
- if (p == NULL)
- return (NULL);
- arena_prof_promoted(p, usize);
+ if (tctx == NULL) {
+ return NULL;
+ }
+ if (usize <= SMALL_MAXCLASS) {
+ p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS,
+ alignment, zero, tcache, arena);
+ if (p == NULL) {
+ return NULL;
+ }
+ arena_prof_promote(tsdn, p, usize);
} else {
- p = iralloct(oldptr, size, 0, alignment, zero,
- try_tcache_alloc, try_tcache_dalloc, arena);
+ p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero,
+ tcache, arena);
}
- return (p);
+ return p;
}
-JEMALLOC_ALWAYS_INLINE_C void *
-irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment,
- size_t *usize, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
- arena_t *arena, prof_thr_cnt_t *cnt)
-{
+JEMALLOC_ALWAYS_INLINE void *
+irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
+ size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
+ arena_t *arena, alloc_ctx_t *alloc_ctx) {
void *p;
- prof_ctx_t *old_ctx;
-
- old_ctx = prof_ctx_get(oldptr);
- if ((uintptr_t)cnt != (uintptr_t)1U)
- p = irallocx_prof_sample(oldptr, size, alignment, *usize, zero,
- try_tcache_alloc, try_tcache_dalloc, arena, cnt);
- else {
- p = iralloct(oldptr, size, 0, alignment, zero,
- try_tcache_alloc, try_tcache_dalloc, arena);
+ bool prof_active;
+ prof_tctx_t *old_tctx, *tctx;
+
+ prof_active = prof_active_get_unlocked();
+ old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
+ tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
+ p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize,
+ *usize, alignment, zero, tcache, arena, tctx);
+ } else {
+ p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
+ zero, tcache, arena);
+ }
+ if (unlikely(p == NULL)) {
+ prof_alloc_rollback(tsd, tctx, false);
+ return NULL;
}
- if (p == NULL)
- return (NULL);
- if (p == oldptr && alignment != 0) {
+ if (p == old_ptr && alignment != 0) {
/*
* The allocation did not move, so it is possible that the size
* class is smaller than would guarantee the requested
@@ -1535,421 +2524,467 @@ irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment,
* be the same as the current usize because of in-place large
* reallocation. Therefore, query the actual value of usize.
*/
- *usize = isalloc(p, config_prof);
+ *usize = isalloc(tsd_tsdn(tsd), p);
}
- prof_realloc(p, *usize, cnt, old_usize, old_ctx);
+ prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
+ old_usize, old_tctx);
- return (p);
+ return p;
}
-void *
-je_rallocx(void *ptr, size_t size, int flags)
-{
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ALLOC_SIZE(2)
+je_rallocx(void *ptr, size_t size, int flags) {
void *p;
- size_t usize, old_usize;
- UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
- size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
- & (SIZE_T_MAX-1));
+ tsd_t *tsd;
+ size_t usize;
+ size_t old_usize;
+ size_t alignment = MALLOCX_ALIGN_GET(flags);
bool zero = flags & MALLOCX_ZERO;
- unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
- bool try_tcache_alloc, try_tcache_dalloc;
arena_t *arena;
+ tcache_t *tcache;
assert(ptr != NULL);
assert(size != 0);
- assert(malloc_initialized || IS_INITIALIZER);
- malloc_thread_init();
-
- if (arena_ind != UINT_MAX) {
- arena_chunk_t *chunk;
- try_tcache_alloc = false;
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- try_tcache_dalloc = (chunk == ptr || chunk->arena !=
- arenas[arena_ind]);
- arena = arenas[arena_ind];
+ assert(malloc_initialized() || IS_INITIALIZER);
+ tsd = tsd_fetch();
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
+ unsigned arena_ind = MALLOCX_ARENA_GET(flags);
+ arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
+ if (unlikely(arena == NULL)) {
+ goto label_oom;
+ }
} else {
- try_tcache_alloc = true;
- try_tcache_dalloc = true;
arena = NULL;
}
- if ((config_prof && opt_prof) || config_stats ||
- (config_valgrind && opt_valgrind))
- old_usize = isalloc(ptr, config_prof);
- if (config_valgrind && opt_valgrind)
- old_rzsize = u2rz(old_usize);
+ if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
+ tcache = NULL;
+ } else {
+ tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
+ }
+ } else {
+ tcache = tcache_get(tsd);
+ }
+ alloc_ctx_t alloc_ctx;
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
+ rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ assert(alloc_ctx.szind != NSIZES);
+ old_usize = sz_index2size(alloc_ctx.szind);
+ assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
if (config_prof && opt_prof) {
- prof_thr_cnt_t *cnt;
-
- usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
- assert(usize != 0);
- PROF_ALLOC_PREP(1, usize, cnt);
- p = irallocx_prof(ptr, old_usize, size, alignment, &usize, zero,
- try_tcache_alloc, try_tcache_dalloc, arena, cnt);
- if (p == NULL)
+ usize = (alignment == 0) ?
+ sz_s2u(size) : sz_sa2u(size, alignment);
+ if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
+ goto label_oom;
+ }
+ p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
+ zero, tcache, arena, &alloc_ctx);
+ if (unlikely(p == NULL)) {
goto label_oom;
+ }
} else {
- p = iralloct(ptr, size, 0, alignment, zero, try_tcache_alloc,
- try_tcache_dalloc, arena);
- if (p == NULL)
+ p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment,
+ zero, tcache, arena);
+ if (unlikely(p == NULL)) {
goto label_oom;
- if (config_stats || (config_valgrind && opt_valgrind))
- usize = isalloc(p, config_prof);
+ }
+ if (config_stats) {
+ usize = isalloc(tsd_tsdn(tsd), p);
+ }
}
+ assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
if (config_stats) {
- thread_allocated_t *ta;
- ta = thread_allocated_tsd_get();
- ta->allocated += usize;
- ta->deallocated += old_usize;
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ *tsd_thread_deallocatedp_get(tsd) += old_usize;
}
UTRACE(ptr, size, p);
- JEMALLOC_VALGRIND_REALLOC(p, usize, ptr, old_usize, old_rzsize, zero);
- return (p);
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ return p;
label_oom:
- if (config_xmalloc && opt_xmalloc) {
+ if (config_xmalloc && unlikely(opt_xmalloc)) {
malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
abort();
}
UTRACE(ptr, size, 0);
- return (NULL);
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ return NULL;
}
-JEMALLOC_ALWAYS_INLINE_C size_t
-ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
- size_t alignment, bool zero, arena_t *arena)
-{
+JEMALLOC_ALWAYS_INLINE size_t
+ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
+ size_t extra, size_t alignment, bool zero) {
size_t usize;
- if (ixalloc(ptr, size, extra, alignment, zero))
- return (old_usize);
- usize = isalloc(ptr, config_prof);
+ if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) {
+ return old_usize;
+ }
+ usize = isalloc(tsdn, ptr);
- return (usize);
+ return usize;
}
static size_t
-ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
- size_t alignment, size_t max_usize, bool zero, arena_t *arena,
- prof_thr_cnt_t *cnt)
-{
+ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
+ size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) {
size_t usize;
- if (cnt == NULL)
- return (old_usize);
- /* Use minimum usize to determine whether promotion may happen. */
- if (prof_promote && ((alignment == 0) ? s2u(size) : sa2u(size,
- alignment)) <= SMALL_MAXCLASS) {
- if (ixalloc(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
- size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
- alignment, zero))
- return (old_usize);
- usize = isalloc(ptr, config_prof);
- if (max_usize < PAGE)
- arena_prof_promoted(ptr, usize);
- } else {
- usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
- zero, arena);
+ if (tctx == NULL) {
+ return old_usize;
}
+ usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
+ zero);
- return (usize);
+ return usize;
}
-JEMALLOC_ALWAYS_INLINE_C size_t
-ixallocx_prof(void *ptr, size_t old_usize, size_t size, size_t extra,
- size_t alignment, size_t max_usize, bool zero, arena_t *arena,
- prof_thr_cnt_t *cnt)
-{
- size_t usize;
- prof_ctx_t *old_ctx;
+JEMALLOC_ALWAYS_INLINE size_t
+ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
+ size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) {
+ size_t usize_max, usize;
+ bool prof_active;
+ prof_tctx_t *old_tctx, *tctx;
- old_ctx = prof_ctx_get(ptr);
- if ((uintptr_t)cnt != (uintptr_t)1U) {
- usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
- alignment, zero, max_usize, arena, cnt);
+ prof_active = prof_active_get_unlocked();
+ old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
+ /*
+ * usize isn't knowable before ixalloc() returns when extra is non-zero.
+ * Therefore, compute its maximum possible value and use that in
+ * prof_alloc_prep() to decide whether to capture a backtrace.
+ * prof_realloc() will use the actual usize to decide whether to sample.
+ */
+ if (alignment == 0) {
+ usize_max = sz_s2u(size+extra);
+ assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS);
+ } else {
+ usize_max = sz_sa2u(size+extra, alignment);
+ if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) {
+ /*
+ * usize_max is out of range, and chances are that
+ * allocation will fail, but use the maximum possible
+ * value and carry on with prof_alloc_prep(), just in
+ * case allocation succeeds.
+ */
+ usize_max = LARGE_MAXCLASS;
+ }
+ }
+ tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
+
+ if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
+ usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
+ size, extra, alignment, zero, tctx);
} else {
- usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
- zero, arena);
+ usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
+ extra, alignment, zero);
}
- if (usize == old_usize)
- return (usize);
- prof_realloc(ptr, usize, cnt, old_usize, old_ctx);
+ if (usize == old_usize) {
+ prof_alloc_rollback(tsd, tctx, false);
+ return usize;
+ }
+ prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
+ old_tctx);
- return (usize);
+ return usize;
}
-size_t
-je_xallocx(void *ptr, size_t size, size_t extra, int flags)
-{
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
+ tsd_t *tsd;
size_t usize, old_usize;
- UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
- size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
- & (SIZE_T_MAX-1));
+ size_t alignment = MALLOCX_ALIGN_GET(flags);
bool zero = flags & MALLOCX_ZERO;
- unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
- arena_t *arena;
assert(ptr != NULL);
assert(size != 0);
assert(SIZE_T_MAX - size >= extra);
- assert(malloc_initialized || IS_INITIALIZER);
- malloc_thread_init();
-
- if (arena_ind != UINT_MAX)
- arena = arenas[arena_ind];
- else
- arena = NULL;
-
- old_usize = isalloc(ptr, config_prof);
- if (config_valgrind && opt_valgrind)
- old_rzsize = u2rz(old_usize);
+ assert(malloc_initialized() || IS_INITIALIZER);
+ tsd = tsd_fetch();
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ alloc_ctx_t alloc_ctx;
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
+ rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
+ (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ assert(alloc_ctx.szind != NSIZES);
+ old_usize = sz_index2size(alloc_ctx.szind);
+ assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
+ /*
+ * The API explicitly absolves itself of protecting against (size +
+ * extra) numerical overflow, but we may need to clamp extra to avoid
+ * exceeding LARGE_MAXCLASS.
+ *
+ * Ordinarily, size limit checking is handled deeper down, but here we
+ * have to check as part of (size + extra) clamping, since we need the
+ * clamped value in the above helper functions.
+ */
+ if (unlikely(size > LARGE_MAXCLASS)) {
+ usize = old_usize;
+ goto label_not_resized;
+ }
+ if (unlikely(LARGE_MAXCLASS - size < extra)) {
+ extra = LARGE_MAXCLASS - size;
+ }
if (config_prof && opt_prof) {
- prof_thr_cnt_t *cnt;
- /*
- * usize isn't knowable before ixalloc() returns when extra is
- * non-zero. Therefore, compute its maximum possible value and
- * use that in PROF_ALLOC_PREP() to decide whether to capture a
- * backtrace. prof_realloc() will use the actual usize to
- * decide whether to sample.
- */
- size_t max_usize = (alignment == 0) ? s2u(size+extra) :
- sa2u(size+extra, alignment);
- PROF_ALLOC_PREP(1, max_usize, cnt);
- usize = ixallocx_prof(ptr, old_usize, size, extra, alignment,
- max_usize, zero, arena, cnt);
+ usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
+ alignment, zero, &alloc_ctx);
} else {
- usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
- zero, arena);
+ usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
+ extra, alignment, zero);
}
- if (usize == old_usize)
+ if (unlikely(usize == old_usize)) {
goto label_not_resized;
+ }
if (config_stats) {
- thread_allocated_t *ta;
- ta = thread_allocated_tsd_get();
- ta->allocated += usize;
- ta->deallocated += old_usize;
+ *tsd_thread_allocatedp_get(tsd) += usize;
+ *tsd_thread_deallocatedp_get(tsd) += old_usize;
}
- JEMALLOC_VALGRIND_REALLOC(ptr, usize, ptr, old_usize, old_rzsize, zero);
label_not_resized:
UTRACE(ptr, size, ptr);
- return (usize);
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ return usize;
}
-size_t
-je_sallocx(const void *ptr, int flags)
-{
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+JEMALLOC_ATTR(pure)
+je_sallocx(const void *ptr, int flags) {
size_t usize;
+ tsdn_t *tsdn;
+
+ assert(malloc_initialized() || IS_INITIALIZER);
+ assert(ptr != NULL);
- assert(malloc_initialized || IS_INITIALIZER);
- malloc_thread_init();
+ tsdn = tsdn_fetch();
+ check_entry_exit_locking(tsdn);
- if (config_ivsalloc)
- usize = ivsalloc(ptr, config_prof);
- else {
- assert(ptr != NULL);
- usize = isalloc(ptr, config_prof);
+ if (config_debug || force_ivsalloc) {
+ usize = ivsalloc(tsdn, ptr);
+ assert(force_ivsalloc || usize != 0);
+ } else {
+ usize = isalloc(tsdn, ptr);
}
- return (usize);
+ check_entry_exit_locking(tsdn);
+ return usize;
}
-void
-je_dallocx(void *ptr, int flags)
-{
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
+je_dallocx(void *ptr, int flags) {
+ assert(ptr != NULL);
+ assert(malloc_initialized() || IS_INITIALIZER);
+
+ tsd_t *tsd = tsd_fetch();
+ bool fast = tsd_fast(tsd);
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ tcache_t *tcache;
+ if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
+ /* Not allowed to be reentrant and specify a custom tcache. */
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
+ tcache = NULL;
+ } else {
+ tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
+ }
+ } else {
+ if (likely(fast)) {
+ tcache = tsd_tcachep_get(tsd);
+ assert(tcache == tcache_get(tsd));
+ } else {
+ if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
+ tcache = tcache_get(tsd);
+ } else {
+ tcache = NULL;
+ }
+ }
+ }
+
+ UTRACE(ptr, 0, 0);
+ if (likely(fast)) {
+ tsd_assert_fast(tsd);
+ ifree(tsd, ptr, tcache, false);
+ } else {
+ ifree(tsd, ptr, tcache, true);
+ }
+ check_entry_exit_locking(tsd_tsdn(tsd));
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+inallocx(tsdn_t *tsdn, size_t size, int flags) {
+ check_entry_exit_locking(tsdn);
+
size_t usize;
- UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
- unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
- bool try_tcache;
+ if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) {
+ usize = sz_s2u(size);
+ } else {
+ usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
+ }
+ check_entry_exit_locking(tsdn);
+ return usize;
+}
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
+je_sdallocx(void *ptr, size_t size, int flags) {
assert(ptr != NULL);
- assert(malloc_initialized || IS_INITIALIZER);
-
- if (arena_ind != UINT_MAX) {
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- try_tcache = (chunk == ptr || chunk->arena !=
- arenas[arena_ind]);
- } else
- try_tcache = true;
+ assert(malloc_initialized() || IS_INITIALIZER);
+
+ tsd_t *tsd = tsd_fetch();
+ bool fast = tsd_fast(tsd);
+ size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
+ assert(usize == isalloc(tsd_tsdn(tsd), ptr));
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ tcache_t *tcache;
+ if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
+ /* Not allowed to be reentrant and specify a custom tcache. */
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+ if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
+ tcache = NULL;
+ } else {
+ tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
+ }
+ } else {
+ if (likely(fast)) {
+ tcache = tsd_tcachep_get(tsd);
+ assert(tcache == tcache_get(tsd));
+ } else {
+ if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
+ tcache = tcache_get(tsd);
+ } else {
+ tcache = NULL;
+ }
+ }
+ }
UTRACE(ptr, 0, 0);
- if (config_stats || config_valgrind)
- usize = isalloc(ptr, config_prof);
- if (config_prof && opt_prof) {
- if (config_stats == false && config_valgrind == false)
- usize = isalloc(ptr, config_prof);
- prof_free(ptr, usize);
+ if (likely(fast)) {
+ tsd_assert_fast(tsd);
+ isfree(tsd, ptr, usize, tcache, false);
+ } else {
+ isfree(tsd, ptr, usize, tcache, true);
}
- if (config_stats)
- thread_allocated_tsd_get()->deallocated += usize;
- if (config_valgrind && opt_valgrind)
- rzsize = p2rz(ptr);
- iqalloct(ptr, try_tcache);
- JEMALLOC_VALGRIND_FREE(ptr, rzsize);
+ check_entry_exit_locking(tsd_tsdn(tsd));
}
-size_t
-je_nallocx(size_t size, int flags)
-{
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+JEMALLOC_ATTR(pure)
+je_nallocx(size_t size, int flags) {
size_t usize;
- size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
- & (SIZE_T_MAX-1));
+ tsdn_t *tsdn;
assert(size != 0);
- if (malloc_init())
- return (0);
+ if (unlikely(malloc_init())) {
+ return 0;
+ }
+
+ tsdn = tsdn_fetch();
+ check_entry_exit_locking(tsdn);
- usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
- assert(usize != 0);
- return (usize);
+ usize = inallocx(tsdn, size, flags);
+ if (unlikely(usize > LARGE_MAXCLASS)) {
+ return 0;
+ }
+
+ check_entry_exit_locking(tsdn);
+ return usize;
}
-int
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
- size_t newlen)
-{
+ size_t newlen) {
+ int ret;
+ tsd_t *tsd;
- if (malloc_init())
- return (EAGAIN);
+ if (unlikely(malloc_init())) {
+ return EAGAIN;
+ }
- return (ctl_byname(name, oldp, oldlenp, newp, newlen));
+ tsd = tsd_fetch();
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ return ret;
}
-int
-je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
-{
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
+je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) {
+ int ret;
- if (malloc_init())
- return (EAGAIN);
+ if (unlikely(malloc_init())) {
+ return EAGAIN;
+ }
- return (ctl_nametomib(name, mibp, miblenp));
+ tsd_t *tsd = tsd_fetch();
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ ret = ctl_nametomib(tsd, name, mibp, miblenp);
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ return ret;
}
-int
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
+ void *newp, size_t newlen) {
+ int ret;
+ tsd_t *tsd;
- if (malloc_init())
- return (EAGAIN);
+ if (unlikely(malloc_init())) {
+ return EAGAIN;
+ }
- return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
+ tsd = tsd_fetch();
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ return ret;
}
-void
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *opts)
-{
+ const char *opts) {
+ tsdn_t *tsdn;
+ tsdn = tsdn_fetch();
+ check_entry_exit_locking(tsdn);
stats_print(write_cb, cbopaque, opts);
+ check_entry_exit_locking(tsdn);
}
-size_t
-je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
-{
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
size_t ret;
+ tsdn_t *tsdn;
- assert(malloc_initialized || IS_INITIALIZER);
- malloc_thread_init();
-
- if (config_ivsalloc)
- ret = ivsalloc(ptr, config_prof);
- else
- ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
-
- return (ret);
-}
-
-/*
- * End non-standard functions.
- */
-/******************************************************************************/
-/*
- * Begin experimental functions.
- */
-#ifdef JEMALLOC_EXPERIMENTAL
-
-int
-je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
-{
- void *p;
-
- assert(ptr != NULL);
-
- p = je_mallocx(size, flags);
- if (p == NULL)
- return (ALLOCM_ERR_OOM);
- if (rsize != NULL)
- *rsize = isalloc(p, config_prof);
- *ptr = p;
- return (ALLOCM_SUCCESS);
-}
-
-int
-je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
-{
- int ret;
- bool no_move = flags & ALLOCM_NO_MOVE;
+ assert(malloc_initialized() || IS_INITIALIZER);
- assert(ptr != NULL);
- assert(*ptr != NULL);
- assert(size != 0);
- assert(SIZE_T_MAX - size >= extra);
+ tsdn = tsdn_fetch();
+ check_entry_exit_locking(tsdn);
- if (no_move) {
- size_t usize = je_xallocx(*ptr, size, extra, flags);
- ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED;
- if (rsize != NULL)
- *rsize = usize;
+ if (unlikely(ptr == NULL)) {
+ ret = 0;
} else {
- void *p = je_rallocx(*ptr, size+extra, flags);
- if (p != NULL) {
- *ptr = p;
- ret = ALLOCM_SUCCESS;
- } else
- ret = ALLOCM_ERR_OOM;
- if (rsize != NULL)
- *rsize = isalloc(*ptr, config_prof);
+ if (config_debug || force_ivsalloc) {
+ ret = ivsalloc(tsdn, ptr);
+ assert(force_ivsalloc || ret != 0);
+ } else {
+ ret = isalloc(tsdn, ptr);
+ }
}
- return (ret);
-}
-
-int
-je_sallocm(const void *ptr, size_t *rsize, int flags)
-{
-
- assert(rsize != NULL);
- *rsize = je_sallocx(ptr, flags);
- return (ALLOCM_SUCCESS);
-}
-
-int
-je_dallocm(void *ptr, int flags)
-{
- je_dallocx(ptr, flags);
- return (ALLOCM_SUCCESS);
+ check_entry_exit_locking(tsdn);
+ return ret;
}
-int
-je_nallocm(size_t *rsize, size_t size, int flags)
-{
- size_t usize;
-
- usize = je_nallocx(size, flags);
- if (usize == 0)
- return (ALLOCM_ERR_OOM);
- if (rsize != NULL)
- *rsize = usize;
- return (ALLOCM_SUCCESS);
-}
-
-#endif
/*
- * End experimental functions.
+ * End non-standard functions.
*/
/******************************************************************************/
/*
@@ -1966,17 +3001,17 @@ je_nallocm(size_t *rsize, size_t size, int flags)
* fork/malloc races via the following functions it registers during
* initialization using pthread_atfork(), but of course that does no good if
* the allocator isn't fully initialized at fork time. The following library
- * constructor is a partial solution to this problem. It may still possible to
- * trigger the deadlock described above, but doing so would involve forking via
- * a library constructor that runs before jemalloc's runs.
+ * constructor is a partial solution to this problem. It may still be possible
+ * to trigger the deadlock described above, but doing so would involve forking
+ * via a library constructor that runs before jemalloc's runs.
*/
+#ifndef JEMALLOC_JET
JEMALLOC_ATTR(constructor)
static void
-jemalloc_constructor(void)
-{
-
+jemalloc_constructor(void) {
malloc_init();
}
+#endif
#ifndef JEMALLOC_MUTEX_INIT_CB
void
@@ -1986,25 +3021,69 @@ JEMALLOC_EXPORT void
_malloc_prefork(void)
#endif
{
- unsigned i;
+ tsd_t *tsd;
+ unsigned i, j, narenas;
+ arena_t *arena;
#ifdef JEMALLOC_MUTEX_INIT_CB
- if (malloc_initialized == false)
+ if (!malloc_initialized()) {
return;
+ }
#endif
- assert(malloc_initialized);
+ assert(malloc_initialized());
+ tsd = tsd_fetch();
+
+ narenas = narenas_total_get();
+
+ witness_prefork(tsd_witness_tsdp_get(tsd));
/* Acquire all mutexes in a safe order. */
- ctl_prefork();
- prof_prefork();
- malloc_mutex_prefork(&arenas_lock);
- for (i = 0; i < narenas_total; i++) {
- if (arenas[i] != NULL)
- arena_prefork(arenas[i]);
+ ctl_prefork(tsd_tsdn(tsd));
+ tcache_prefork(tsd_tsdn(tsd));
+ malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
+ if (have_background_thread) {
+ background_thread_prefork0(tsd_tsdn(tsd));
+ }
+ prof_prefork0(tsd_tsdn(tsd));
+ if (have_background_thread) {
+ background_thread_prefork1(tsd_tsdn(tsd));
+ }
+ /* Break arena prefork into stages to preserve lock order. */
+ for (i = 0; i < 8; i++) {
+ for (j = 0; j < narenas; j++) {
+ if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
+ NULL) {
+ switch (i) {
+ case 0:
+ arena_prefork0(tsd_tsdn(tsd), arena);
+ break;
+ case 1:
+ arena_prefork1(tsd_tsdn(tsd), arena);
+ break;
+ case 2:
+ arena_prefork2(tsd_tsdn(tsd), arena);
+ break;
+ case 3:
+ arena_prefork3(tsd_tsdn(tsd), arena);
+ break;
+ case 4:
+ arena_prefork4(tsd_tsdn(tsd), arena);
+ break;
+ case 5:
+ arena_prefork5(tsd_tsdn(tsd), arena);
+ break;
+ case 6:
+ arena_prefork6(tsd_tsdn(tsd), arena);
+ break;
+ case 7:
+ arena_prefork7(tsd_tsdn(tsd), arena);
+ break;
+ default: not_reached();
+ }
+ }
+ }
}
- chunk_prefork();
- base_prefork();
- huge_prefork();
+ prof_prefork1(tsd_tsdn(tsd));
}
#ifndef JEMALLOC_MUTEX_INIT_CB
@@ -2015,97 +3094,61 @@ JEMALLOC_EXPORT void
_malloc_postfork(void)
#endif
{
- unsigned i;
+ tsd_t *tsd;
+ unsigned i, narenas;
#ifdef JEMALLOC_MUTEX_INIT_CB
- if (malloc_initialized == false)
+ if (!malloc_initialized()) {
return;
-#endif
- assert(malloc_initialized);
-
- /* Release all mutexes, now that fork() has completed. */
- huge_postfork_parent();
- base_postfork_parent();
- chunk_postfork_parent();
- for (i = 0; i < narenas_total; i++) {
- if (arenas[i] != NULL)
- arena_postfork_parent(arenas[i]);
}
- malloc_mutex_postfork_parent(&arenas_lock);
- prof_postfork_parent();
- ctl_postfork_parent();
-}
-
-void
-jemalloc_postfork_child(void)
-{
- unsigned i;
+#endif
+ assert(malloc_initialized());
- assert(malloc_initialized);
+ tsd = tsd_fetch();
+ witness_postfork_parent(tsd_witness_tsdp_get(tsd));
/* Release all mutexes, now that fork() has completed. */
- huge_postfork_child();
- base_postfork_child();
- chunk_postfork_child();
- for (i = 0; i < narenas_total; i++) {
- if (arenas[i] != NULL)
- arena_postfork_child(arenas[i]);
- }
- malloc_mutex_postfork_child(&arenas_lock);
- prof_postfork_child();
- ctl_postfork_child();
-}
+ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
+ arena_t *arena;
-/******************************************************************************/
-/*
- * The following functions are used for TLS allocation/deallocation in static
- * binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
- * is that these avoid accessing TLS variables.
- */
-
-static void *
-a0alloc(size_t size, bool zero)
-{
-
- if (malloc_init())
- return (NULL);
-
- if (size == 0)
- size = 1;
-
- if (size <= arena_maxclass)
- return (arena_malloc(arenas[0], size, zero, false));
- else
- return (huge_malloc(size, zero, huge_dss_prec_get(arenas[0])));
-}
-
-void *
-a0malloc(size_t size)
-{
-
- return (a0alloc(size, false));
+ if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
+ arena_postfork_parent(tsd_tsdn(tsd), arena);
+ }
+ }
+ prof_postfork_parent(tsd_tsdn(tsd));
+ if (have_background_thread) {
+ background_thread_postfork_parent(tsd_tsdn(tsd));
+ }
+ malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
+ tcache_postfork_parent(tsd_tsdn(tsd));
+ ctl_postfork_parent(tsd_tsdn(tsd));
}
-void *
-a0calloc(size_t num, size_t size)
-{
+void
+jemalloc_postfork_child(void) {
+ tsd_t *tsd;
+ unsigned i, narenas;
- return (a0alloc(num * size, true));
-}
+ assert(malloc_initialized());
-void
-a0free(void *ptr)
-{
- arena_chunk_t *chunk;
+ tsd = tsd_fetch();
- if (ptr == NULL)
- return;
+ witness_postfork_child(tsd_witness_tsdp_get(tsd));
+ /* Release all mutexes, now that fork() has completed. */
+ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
+ arena_t *arena;
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (chunk != ptr)
- arena_dalloc(chunk->arena, chunk, ptr, false);
- else
- huge_dalloc(ptr, true);
+ if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
+ arena_postfork_child(tsd_tsdn(tsd), arena);
+ }
+ }
+ prof_postfork_child(tsd_tsdn(tsd));
+ if (have_background_thread) {
+ background_thread_postfork_child(tsd_tsdn(tsd));
+ }
+ malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
+ tcache_postfork_child(tsd_tsdn(tsd));
+ ctl_postfork_child(tsd_tsdn(tsd));
}
/******************************************************************************/
diff --git a/deps/jemalloc/src/mutex.c b/deps/jemalloc/src/mutex.c
index 788eca3870..a528ef0c24 100644
--- a/deps/jemalloc/src/mutex.c
+++ b/deps/jemalloc/src/mutex.c
@@ -1,12 +1,12 @@
-#define JEMALLOC_MUTEX_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_MUTEX_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
-#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
-#include <dlfcn.h>
-#endif
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/malloc_io.h"
#ifndef _CRT_SPINCOUNT
-#define _CRT_SPINCOUNT 4000
+#define _CRT_SPINCOUNT 4000
#endif
/******************************************************************************/
@@ -20,10 +20,6 @@ static bool postpone_init = true;
static malloc_mutex_t *postponed_mutexes = NULL;
#endif
-#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
-static void pthread_create_once(void);
-#endif
-
/******************************************************************************/
/*
* We intercept pthread_create() calls in order to toggle isthreaded if the
@@ -31,33 +27,11 @@ static void pthread_create_once(void);
*/
#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
-static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
- void *(*)(void *), void *__restrict);
-
-static void
-pthread_create_once(void)
-{
-
- pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
- if (pthread_create_fptr == NULL) {
- malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
- "\"pthread_create\")\n");
- abort();
- }
-
- isthreaded = true;
-}
-
JEMALLOC_EXPORT int
pthread_create(pthread_t *__restrict thread,
const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
- void *__restrict arg)
-{
- static pthread_once_t once_control = PTHREAD_ONCE_INIT;
-
- pthread_once(&once_control, pthread_create_once);
-
- return (pthread_create_fptr(thread, attr, start_routine, arg));
+ void *__restrict arg) {
+ return pthread_create_wrapper(thread, attr, start_routine, arg);
}
#endif
@@ -68,14 +42,108 @@ JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
void *(calloc_cb)(size_t, size_t));
#endif
-bool
-malloc_mutex_init(malloc_mutex_t *mutex)
-{
+void
+malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
+ mutex_prof_data_t *data = &mutex->prof_data;
+ UNUSED nstime_t before = NSTIME_ZERO_INITIALIZER;
+
+ if (ncpus == 1) {
+ goto label_spin_done;
+ }
+
+ int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN;
+ do {
+ CPU_SPINWAIT;
+ if (!malloc_mutex_trylock_final(mutex)) {
+ data->n_spin_acquired++;
+ return;
+ }
+ } while (cnt++ < max_cnt);
+
+ if (!config_stats) {
+ /* Only spin is useful when stats is off. */
+ malloc_mutex_lock_final(mutex);
+ return;
+ }
+label_spin_done:
+ nstime_update(&before);
+ /* Copy before to after to avoid clock skews. */
+ nstime_t after;
+ nstime_copy(&after, &before);
+ uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1,
+ ATOMIC_RELAXED) + 1;
+ /* One last try as above two calls may take quite some cycles. */
+ if (!malloc_mutex_trylock_final(mutex)) {
+ atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
+ data->n_spin_acquired++;
+ return;
+ }
+
+ /* True slow path. */
+ malloc_mutex_lock_final(mutex);
+ /* Update more slow-path only counters. */
+ atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED);
+ nstime_update(&after);
+
+ nstime_t delta;
+ nstime_copy(&delta, &after);
+ nstime_subtract(&delta, &before);
+ data->n_wait_times++;
+ nstime_add(&data->tot_wait_time, &delta);
+ if (nstime_compare(&data->max_wait_time, &delta) < 0) {
+ nstime_copy(&data->max_wait_time, &delta);
+ }
+ if (n_thds > data->max_n_thds) {
+ data->max_n_thds = n_thds;
+ }
+}
+
+static void
+mutex_prof_data_init(mutex_prof_data_t *data) {
+ memset(data, 0, sizeof(mutex_prof_data_t));
+ nstime_init(&data->max_wait_time, 0);
+ nstime_init(&data->tot_wait_time, 0);
+ data->prev_owner = NULL;
+}
+
+void
+malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) {
+ malloc_mutex_assert_owner(tsdn, mutex);
+ mutex_prof_data_init(&mutex->prof_data);
+}
+
+static int
+mutex_addr_comp(const witness_t *witness1, void *mutex1,
+ const witness_t *witness2, void *mutex2) {
+ assert(mutex1 != NULL);
+ assert(mutex2 != NULL);
+ uintptr_t mu1int = (uintptr_t)mutex1;
+ uintptr_t mu2int = (uintptr_t)mutex2;
+ if (mu1int < mu2int) {
+ return -1;
+ } else if (mu1int == mu2int) {
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+bool
+malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
+ witness_rank_t rank, malloc_mutex_lock_order_t lock_order) {
+ mutex_prof_data_init(&mutex->prof_data);
#ifdef _WIN32
+# if _WIN32_WINNT >= 0x0600
+ InitializeSRWLock(&mutex->lock);
+# else
if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
- _CRT_SPINCOUNT))
- return (true);
+ _CRT_SPINCOUNT)) {
+ return true;
+ }
+# endif
+#elif (defined(JEMALLOC_OS_UNFAIR_LOCK))
+ mutex->lock = OS_UNFAIR_LOCK_INIT;
#elif (defined(JEMALLOC_OSSPIN))
mutex->lock = 0;
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
@@ -83,67 +151,73 @@ malloc_mutex_init(malloc_mutex_t *mutex)
mutex->postponed_next = postponed_mutexes;
postponed_mutexes = mutex;
} else {
- if (_pthread_mutex_init_calloc_cb(&mutex->lock, base_calloc) !=
- 0)
- return (true);
+ if (_pthread_mutex_init_calloc_cb(&mutex->lock,
+ bootstrap_calloc) != 0) {
+ return true;
+ }
}
#else
pthread_mutexattr_t attr;
- if (pthread_mutexattr_init(&attr) != 0)
- return (true);
+ if (pthread_mutexattr_init(&attr) != 0) {
+ return true;
+ }
pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
pthread_mutexattr_destroy(&attr);
- return (true);
+ return true;
}
pthread_mutexattr_destroy(&attr);
#endif
- return (false);
+ if (config_debug) {
+ mutex->lock_order = lock_order;
+ if (lock_order == malloc_mutex_address_ordered) {
+ witness_init(&mutex->witness, name, rank,
+ mutex_addr_comp, &mutex);
+ } else {
+ witness_init(&mutex->witness, name, rank, NULL, NULL);
+ }
+ }
+ return false;
}
void
-malloc_mutex_prefork(malloc_mutex_t *mutex)
-{
-
- malloc_mutex_lock(mutex);
+malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) {
+ malloc_mutex_lock(tsdn, mutex);
}
void
-malloc_mutex_postfork_parent(malloc_mutex_t *mutex)
-{
-
- malloc_mutex_unlock(mutex);
+malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) {
+ malloc_mutex_unlock(tsdn, mutex);
}
void
-malloc_mutex_postfork_child(malloc_mutex_t *mutex)
-{
-
+malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) {
#ifdef JEMALLOC_MUTEX_INIT_CB
- malloc_mutex_unlock(mutex);
+ malloc_mutex_unlock(tsdn, mutex);
#else
- if (malloc_mutex_init(mutex)) {
+ if (malloc_mutex_init(mutex, mutex->witness.name,
+ mutex->witness.rank, mutex->lock_order)) {
malloc_printf("<jemalloc>: Error re-initializing mutex in "
"child\n");
- if (opt_abort)
+ if (opt_abort) {
abort();
+ }
}
#endif
}
bool
-mutex_boot(void)
-{
-
+malloc_mutex_boot(void) {
#ifdef JEMALLOC_MUTEX_INIT_CB
postpone_init = false;
while (postponed_mutexes != NULL) {
if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
- base_calloc) != 0)
- return (true);
+ bootstrap_calloc) != 0) {
+ return true;
+ }
postponed_mutexes = postponed_mutexes->postponed_next;
}
#endif
- return (false);
+ return false;
}
diff --git a/deps/jemalloc/src/prof.c b/deps/jemalloc/src/prof.c
index 7722b7b437..975722c4c3 100644
--- a/deps/jemalloc/src/prof.c
+++ b/deps/jemalloc/src/prof.c
@@ -1,27 +1,41 @@
-#define JEMALLOC_PROF_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_PROF_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/ckh.h"
+#include "jemalloc/internal/hash.h"
+#include "jemalloc/internal/malloc_io.h"
+#include "jemalloc/internal/mutex.h"
+
/******************************************************************************/
#ifdef JEMALLOC_PROF_LIBUNWIND
-#define UNW_LOCAL_ONLY
+#define UNW_LOCAL_ONLY
#include <libunwind.h>
#endif
#ifdef JEMALLOC_PROF_LIBGCC
+/*
+ * We have a circular dependency -- jemalloc_internal.h tells us if we should
+ * use libgcc's unwinding functionality, but after we've included that, we've
+ * already hooked _Unwind_Backtrace. We'll temporarily disable hooking.
+ */
+#undef _Unwind_Backtrace
#include <unwind.h>
+#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, hooks_libc_hook)
#endif
/******************************************************************************/
/* Data. */
-malloc_tsd_data(, prof_tdata, prof_tdata_t *, NULL)
-
bool opt_prof = false;
bool opt_prof_active = true;
+bool opt_prof_thread_active_init = true;
size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
bool opt_prof_gdump = false;
-bool opt_prof_final = true;
+bool opt_prof_final = false;
bool opt_prof_leak = false;
bool opt_prof_accum = false;
char opt_prof_prefix[
@@ -31,25 +45,66 @@ char opt_prof_prefix[
#endif
1];
+/*
+ * Initialized as opt_prof_active, and accessed via
+ * prof_active_[gs]et{_unlocked,}().
+ */
+bool prof_active;
+static malloc_mutex_t prof_active_mtx;
+
+/*
+ * Initialized as opt_prof_thread_active_init, and accessed via
+ * prof_thread_active_init_[gs]et().
+ */
+static bool prof_thread_active_init;
+static malloc_mutex_t prof_thread_active_init_mtx;
+
+/*
+ * Initialized as opt_prof_gdump, and accessed via
+ * prof_gdump_[gs]et{_unlocked,}().
+ */
+bool prof_gdump_val;
+static malloc_mutex_t prof_gdump_mtx;
+
uint64_t prof_interval = 0;
-bool prof_promote;
+
+size_t lg_prof_sample;
/*
- * Table of mutexes that are shared among ctx's. These are leaf locks, so
- * there is no problem with using them for more than one ctx at the same time.
- * The primary motivation for this sharing though is that ctx's are ephemeral,
+ * Table of mutexes that are shared among gctx's. These are leaf locks, so
+ * there is no problem with using them for more than one gctx at the same time.
+ * The primary motivation for this sharing though is that gctx's are ephemeral,
* and destroying mutexes causes complications for systems that allocate when
* creating/destroying mutexes.
*/
-static malloc_mutex_t *ctx_locks;
-static unsigned cum_ctxs; /* Atomic counter. */
+static malloc_mutex_t *gctx_locks;
+static atomic_u_t cum_gctxs; /* Atomic counter. */
/*
- * Global hash of (prof_bt_t *)-->(prof_ctx_t *). This is the master data
+ * Table of mutexes that are shared among tdata's. No operations require
+ * holding multiple tdata locks, so there is no problem with using them for more
+ * than one tdata at the same time, even though a gctx lock may be acquired
+ * while holding a tdata lock.
+ */
+static malloc_mutex_t *tdata_locks;
+
+/*
+ * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data
* structure that knows about all backtraces currently captured.
*/
-static ckh_t bt2ctx;
-static malloc_mutex_t bt2ctx_mtx;
+static ckh_t bt2gctx;
+/* Non static to enable profiling. */
+malloc_mutex_t bt2gctx_mtx;
+
+/*
+ * Tree of all extant prof_tdata_t structures, regardless of state,
+ * {attached,detached,expired}.
+ */
+static prof_tdata_tree_t tdatas;
+static malloc_mutex_t tdatas_mtx;
+
+static uint64_t next_thr_uid;
+static malloc_mutex_t next_thr_uid_mtx;
static malloc_mutex_t prof_dump_seq_mtx;
static uint64_t prof_dump_seq;
@@ -70,161 +125,242 @@ static char prof_dump_buf[
1
#endif
];
-static unsigned prof_dump_buf_end;
+static size_t prof_dump_buf_end;
static int prof_dump_fd;
/* Do not dump any profiles until bootstrapping is complete. */
static bool prof_booted = false;
/******************************************************************************/
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
-void
-bt_init(prof_bt_t *bt, void **vec)
-{
+static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx);
+static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
+static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
+ bool even_if_attached);
+static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
+ bool even_if_attached);
+static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
- cassert(config_prof);
+/******************************************************************************/
+/* Red-black trees. */
- bt->vec = vec;
- bt->len = 0;
+static int
+prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) {
+ uint64_t a_thr_uid = a->thr_uid;
+ uint64_t b_thr_uid = b->thr_uid;
+ int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
+ if (ret == 0) {
+ uint64_t a_thr_discrim = a->thr_discrim;
+ uint64_t b_thr_discrim = b->thr_discrim;
+ ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
+ b_thr_discrim);
+ if (ret == 0) {
+ uint64_t a_tctx_uid = a->tctx_uid;
+ uint64_t b_tctx_uid = b->tctx_uid;
+ ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
+ b_tctx_uid);
+ }
+ }
+ return ret;
}
-static void
-bt_destroy(prof_bt_t *bt)
-{
+rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
+ tctx_link, prof_tctx_comp)
- cassert(config_prof);
+static int
+prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
+ unsigned a_len = a->bt.len;
+ unsigned b_len = b->bt.len;
+ unsigned comp_len = (a_len < b_len) ? a_len : b_len;
+ int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
+ if (ret == 0) {
+ ret = (a_len > b_len) - (a_len < b_len);
+ }
+ return ret;
+}
+
+rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
+ prof_gctx_comp)
+
+static int
+prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) {
+ int ret;
+ uint64_t a_uid = a->thr_uid;
+ uint64_t b_uid = b->thr_uid;
+
+ ret = ((a_uid > b_uid) - (a_uid < b_uid));
+ if (ret == 0) {
+ uint64_t a_discrim = a->thr_discrim;
+ uint64_t b_discrim = b->thr_discrim;
- idalloc(bt);
+ ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
+ }
+ return ret;
}
-static prof_bt_t *
-bt_dup(prof_bt_t *bt)
-{
- prof_bt_t *ret;
+rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
+ prof_tdata_comp)
+
+/******************************************************************************/
+
+void
+prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) {
+ prof_tdata_t *tdata;
cassert(config_prof);
- /*
- * Create a single allocation that has space for vec immediately
- * following the prof_bt_t structure. The backtraces that get
- * stored in the backtrace caches are copied from stack-allocated
- * temporary variables, so size is known at creation time. Making this
- * a contiguous object improves cache locality.
- */
- ret = (prof_bt_t *)imalloc(QUANTUM_CEILING(sizeof(prof_bt_t)) +
- (bt->len * sizeof(void *)));
- if (ret == NULL)
- return (NULL);
- ret->vec = (void **)((uintptr_t)ret +
- QUANTUM_CEILING(sizeof(prof_bt_t)));
- memcpy(ret->vec, bt->vec, bt->len * sizeof(void *));
- ret->len = bt->len;
+ if (updated) {
+ /*
+ * Compute a new sample threshold. This isn't very important in
+ * practice, because this function is rarely executed, so the
+ * potential for sample bias is minimal except in contrived
+ * programs.
+ */
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata != NULL) {
+ prof_sample_threshold_update(tdata);
+ }
+ }
- return (ret);
+ if ((uintptr_t)tctx > (uintptr_t)1U) {
+ malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
+ tctx->prepared = false;
+ if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
+ prof_tctx_destroy(tsd, tctx);
+ } else {
+ malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
+ }
+ }
}
-static inline void
-prof_enter(prof_tdata_t *prof_tdata)
-{
+void
+prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
+ prof_tctx_t *tctx) {
+ prof_tctx_set(tsdn, ptr, usize, NULL, tctx);
- cassert(config_prof);
+ malloc_mutex_lock(tsdn, tctx->tdata->lock);
+ tctx->cnts.curobjs++;
+ tctx->cnts.curbytes += usize;
+ if (opt_prof_accum) {
+ tctx->cnts.accumobjs++;
+ tctx->cnts.accumbytes += usize;
+ }
+ tctx->prepared = false;
+ malloc_mutex_unlock(tsdn, tctx->tdata->lock);
+}
+
+void
+prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) {
+ malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
+ assert(tctx->cnts.curobjs > 0);
+ assert(tctx->cnts.curbytes >= usize);
+ tctx->cnts.curobjs--;
+ tctx->cnts.curbytes -= usize;
+
+ if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
+ prof_tctx_destroy(tsd, tctx);
+ } else {
+ malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
+ }
+}
- assert(prof_tdata->enq == false);
- prof_tdata->enq = true;
+void
+bt_init(prof_bt_t *bt, void **vec) {
+ cassert(config_prof);
- malloc_mutex_lock(&bt2ctx_mtx);
+ bt->vec = vec;
+ bt->len = 0;
}
-static inline void
-prof_leave(prof_tdata_t *prof_tdata)
-{
- bool idump, gdump;
+static void
+prof_enter(tsd_t *tsd, prof_tdata_t *tdata) {
+ cassert(config_prof);
+ assert(tdata == prof_tdata_get(tsd, false));
+
+ if (tdata != NULL) {
+ assert(!tdata->enq);
+ tdata->enq = true;
+ }
+ malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
+}
+
+static void
+prof_leave(tsd_t *tsd, prof_tdata_t *tdata) {
cassert(config_prof);
+ assert(tdata == prof_tdata_get(tsd, false));
- malloc_mutex_unlock(&bt2ctx_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
- assert(prof_tdata->enq);
- prof_tdata->enq = false;
- idump = prof_tdata->enq_idump;
- prof_tdata->enq_idump = false;
- gdump = prof_tdata->enq_gdump;
- prof_tdata->enq_gdump = false;
+ if (tdata != NULL) {
+ bool idump, gdump;
- if (idump)
- prof_idump();
- if (gdump)
- prof_gdump();
+ assert(tdata->enq);
+ tdata->enq = false;
+ idump = tdata->enq_idump;
+ tdata->enq_idump = false;
+ gdump = tdata->enq_gdump;
+ tdata->enq_gdump = false;
+
+ if (idump) {
+ prof_idump(tsd_tsdn(tsd));
+ }
+ if (gdump) {
+ prof_gdump(tsd_tsdn(tsd));
+ }
+ }
}
#ifdef JEMALLOC_PROF_LIBUNWIND
void
-prof_backtrace(prof_bt_t *bt, unsigned nignore)
-{
- unw_context_t uc;
- unw_cursor_t cursor;
- unsigned i;
- int err;
+prof_backtrace(prof_bt_t *bt) {
+ int nframes;
cassert(config_prof);
assert(bt->len == 0);
assert(bt->vec != NULL);
- unw_getcontext(&uc);
- unw_init_local(&cursor, &uc);
-
- /* Throw away (nignore+1) stack frames, if that many exist. */
- for (i = 0; i < nignore + 1; i++) {
- err = unw_step(&cursor);
- if (err <= 0)
- return;
- }
-
- /*
- * Iterate over stack frames until there are no more, or until no space
- * remains in bt.
- */
- for (i = 0; i < PROF_BT_MAX; i++) {
- unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]);
- bt->len++;
- err = unw_step(&cursor);
- if (err <= 0)
- break;
+ nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
+ if (nframes <= 0) {
+ return;
}
+ bt->len = nframes;
}
#elif (defined(JEMALLOC_PROF_LIBGCC))
static _Unwind_Reason_Code
-prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
-{
-
+prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) {
cassert(config_prof);
- return (_URC_NO_REASON);
+ return _URC_NO_REASON;
}
static _Unwind_Reason_Code
-prof_unwind_callback(struct _Unwind_Context *context, void *arg)
-{
+prof_unwind_callback(struct _Unwind_Context *context, void *arg) {
prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
+ void *ip;
cassert(config_prof);
- if (data->nignore > 0)
- data->nignore--;
- else {
- data->bt->vec[data->bt->len] = (void *)_Unwind_GetIP(context);
- data->bt->len++;
- if (data->bt->len == data->max)
- return (_URC_END_OF_STACK);
+ ip = (void *)_Unwind_GetIP(context);
+ if (ip == NULL) {
+ return _URC_END_OF_STACK;
+ }
+ data->bt->vec[data->bt->len] = ip;
+ data->bt->len++;
+ if (data->bt->len == data->max) {
+ return _URC_END_OF_STACK;
}
- return (_URC_NO_REASON);
+ return _URC_NO_REASON;
}
void
-prof_backtrace(prof_bt_t *bt, unsigned nignore)
-{
- prof_unwind_data_t data = {bt, nignore, PROF_BT_MAX};
+prof_backtrace(prof_bt_t *bt) {
+ prof_unwind_data_t data = {bt, PROF_BT_MAX};
cassert(config_prof);
@@ -232,25 +368,24 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore)
}
#elif (defined(JEMALLOC_PROF_GCC))
void
-prof_backtrace(prof_bt_t *bt, unsigned nignore)
-{
-#define BT_FRAME(i) \
- if ((i) < nignore + PROF_BT_MAX) { \
+prof_backtrace(prof_bt_t *bt) {
+#define BT_FRAME(i) \
+ if ((i) < PROF_BT_MAX) { \
void *p; \
- if (__builtin_frame_address(i) == 0) \
+ if (__builtin_frame_address(i) == 0) { \
return; \
+ } \
p = __builtin_return_address(i); \
- if (p == NULL) \
+ if (p == NULL) { \
return; \
- if (i >= nignore) { \
- bt->vec[(i) - nignore] = p; \
- bt->len = (i) - nignore + 1; \
} \
- } else \
- return;
+ bt->vec[(i)] = p; \
+ bt->len = (i) + 1; \
+ } else { \
+ return; \
+ }
cassert(config_prof);
- assert(nignore <= 3);
BT_FRAME(0)
BT_FRAME(1)
@@ -392,307 +527,452 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore)
BT_FRAME(125)
BT_FRAME(126)
BT_FRAME(127)
-
- /* Extras to compensate for nignore. */
- BT_FRAME(128)
- BT_FRAME(129)
- BT_FRAME(130)
#undef BT_FRAME
}
#else
void
-prof_backtrace(prof_bt_t *bt, unsigned nignore)
-{
-
+prof_backtrace(prof_bt_t *bt) {
cassert(config_prof);
not_reached();
}
#endif
static malloc_mutex_t *
-prof_ctx_mutex_choose(void)
-{
- unsigned nctxs = atomic_add_u(&cum_ctxs, 1);
+prof_gctx_mutex_choose(void) {
+ unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED);
- return (&ctx_locks[(nctxs - 1) % PROF_NCTX_LOCKS]);
+ return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS];
}
-static void
-prof_ctx_init(prof_ctx_t *ctx, prof_bt_t *bt)
-{
+static malloc_mutex_t *
+prof_tdata_mutex_choose(uint64_t thr_uid) {
+ return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS];
+}
- ctx->bt = bt;
- ctx->lock = prof_ctx_mutex_choose();
+static prof_gctx_t *
+prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) {
+ /*
+ * Create a single allocation that has space for vec of length bt->len.
+ */
+ size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
+ prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
+ sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
+ true);
+ if (gctx == NULL) {
+ return NULL;
+ }
+ gctx->lock = prof_gctx_mutex_choose();
/*
* Set nlimbo to 1, in order to avoid a race condition with
- * prof_ctx_merge()/prof_ctx_destroy().
+ * prof_tctx_destroy()/prof_gctx_try_destroy().
*/
- ctx->nlimbo = 1;
- ql_elm_new(ctx, dump_link);
- memset(&ctx->cnt_merged, 0, sizeof(prof_cnt_t));
- ql_new(&ctx->cnts_ql);
+ gctx->nlimbo = 1;
+ tctx_tree_new(&gctx->tctxs);
+ /* Duplicate bt. */
+ memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
+ gctx->bt.vec = gctx->vec;
+ gctx->bt.len = bt->len;
+ return gctx;
}
static void
-prof_ctx_destroy(prof_ctx_t *ctx)
-{
- prof_tdata_t *prof_tdata;
-
+prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
+ prof_tdata_t *tdata) {
cassert(config_prof);
/*
- * Check that ctx is still unused by any thread cache before destroying
- * it. prof_lookup() increments ctx->nlimbo in order to avoid a race
- * condition with this function, as does prof_ctx_merge() in order to
- * avoid a race between the main body of prof_ctx_merge() and entry
+ * Check that gctx is still unused by any thread cache before destroying
+ * it. prof_lookup() increments gctx->nlimbo in order to avoid a race
+ * condition with this function, as does prof_tctx_destroy() in order to
+ * avoid a race between the main body of prof_tctx_destroy() and entry
* into this function.
*/
- prof_tdata = prof_tdata_get(false);
- assert((uintptr_t)prof_tdata > (uintptr_t)PROF_TDATA_STATE_MAX);
- prof_enter(prof_tdata);
- malloc_mutex_lock(ctx->lock);
- if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 0 &&
- ctx->nlimbo == 1) {
- assert(ctx->cnt_merged.curbytes == 0);
- assert(ctx->cnt_merged.accumobjs == 0);
- assert(ctx->cnt_merged.accumbytes == 0);
- /* Remove ctx from bt2ctx. */
- if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL))
+ prof_enter(tsd, tdata_self);
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ assert(gctx->nlimbo != 0);
+ if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
+ /* Remove gctx from bt2gctx. */
+ if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) {
not_reached();
- prof_leave(prof_tdata);
- /* Destroy ctx. */
- malloc_mutex_unlock(ctx->lock);
- bt_destroy(ctx->bt);
- idalloc(ctx);
+ }
+ prof_leave(tsd, tdata_self);
+ /* Destroy gctx. */
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true);
} else {
/*
- * Compensate for increment in prof_ctx_merge() or
+ * Compensate for increment in prof_tctx_destroy() or
* prof_lookup().
*/
- ctx->nlimbo--;
- malloc_mutex_unlock(ctx->lock);
- prof_leave(prof_tdata);
+ gctx->nlimbo--;
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ prof_leave(tsd, tdata_self);
}
}
-static void
-prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt)
-{
- bool destroy;
+static bool
+prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) {
+ malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
- cassert(config_prof);
+ if (opt_prof_accum) {
+ return false;
+ }
+ if (tctx->cnts.curobjs != 0) {
+ return false;
+ }
+ if (tctx->prepared) {
+ return false;
+ }
+ return true;
+}
+
+static bool
+prof_gctx_should_destroy(prof_gctx_t *gctx) {
+ if (opt_prof_accum) {
+ return false;
+ }
+ if (!tctx_tree_empty(&gctx->tctxs)) {
+ return false;
+ }
+ if (gctx->nlimbo != 0) {
+ return false;
+ }
+ return true;
+}
- /* Merge cnt stats and detach from ctx. */
- malloc_mutex_lock(ctx->lock);
- ctx->cnt_merged.curobjs += cnt->cnts.curobjs;
- ctx->cnt_merged.curbytes += cnt->cnts.curbytes;
- ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs;
- ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes;
- ql_remove(&ctx->cnts_ql, cnt, cnts_link);
- if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL &&
- ctx->cnt_merged.curobjs == 0 && ctx->nlimbo == 0) {
+static void
+prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) {
+ prof_tdata_t *tdata = tctx->tdata;
+ prof_gctx_t *gctx = tctx->gctx;
+ bool destroy_tdata, destroy_tctx, destroy_gctx;
+
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+
+ assert(tctx->cnts.curobjs == 0);
+ assert(tctx->cnts.curbytes == 0);
+ assert(!opt_prof_accum);
+ assert(tctx->cnts.accumobjs == 0);
+ assert(tctx->cnts.accumbytes == 0);
+
+ ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
+ destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false);
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ tctx_tree_remove(&gctx->tctxs, tctx);
+ destroy_tctx = true;
+ if (prof_gctx_should_destroy(gctx)) {
+ /*
+ * Increment gctx->nlimbo in order to keep another
+ * thread from winning the race to destroy gctx while
+ * this one has gctx->lock dropped. Without this, it
+ * would be possible for another thread to:
+ *
+ * 1) Sample an allocation associated with gctx.
+ * 2) Deallocate the sampled object.
+ * 3) Successfully prof_gctx_try_destroy(gctx).
+ *
+ * The result would be that gctx no longer exists by the
+ * time this thread accesses it in
+ * prof_gctx_try_destroy().
+ */
+ gctx->nlimbo++;
+ destroy_gctx = true;
+ } else {
+ destroy_gctx = false;
+ }
+ break;
+ case prof_tctx_state_dumping:
/*
- * Increment ctx->nlimbo in order to keep another thread from
- * winning the race to destroy ctx while this one has ctx->lock
- * dropped. Without this, it would be possible for another
- * thread to:
- *
- * 1) Sample an allocation associated with ctx.
- * 2) Deallocate the sampled object.
- * 3) Successfully prof_ctx_destroy(ctx).
- *
- * The result would be that ctx no longer exists by the time
- * this thread accesses it in prof_ctx_destroy().
+ * A dumping thread needs tctx to remain valid until dumping
+ * has finished. Change state such that the dumping thread will
+ * complete destruction during a late dump iteration phase.
*/
- ctx->nlimbo++;
- destroy = true;
- } else
- destroy = false;
- malloc_mutex_unlock(ctx->lock);
- if (destroy)
- prof_ctx_destroy(ctx);
+ tctx->state = prof_tctx_state_purgatory;
+ destroy_tctx = false;
+ destroy_gctx = false;
+ break;
+ default:
+ not_reached();
+ destroy_tctx = false;
+ destroy_gctx = false;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ if (destroy_gctx) {
+ prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
+ tdata);
+ }
+
+ malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+
+ if (destroy_tdata) {
+ prof_tdata_destroy(tsd, tdata, false);
+ }
+
+ if (destroy_tctx) {
+ idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true);
+ }
}
static bool
-prof_lookup_global(prof_bt_t *bt, prof_tdata_t *prof_tdata, void **p_btkey,
- prof_ctx_t **p_ctx, bool *p_new_ctx)
-{
+prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
+ void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) {
union {
- prof_ctx_t *p;
+ prof_gctx_t *p;
void *v;
- } ctx;
+ } gctx, tgctx;
union {
prof_bt_t *p;
void *v;
} btkey;
- bool new_ctx;
+ bool new_gctx;
- prof_enter(prof_tdata);
- if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) {
+ prof_enter(tsd, tdata);
+ if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
/* bt has never been seen before. Insert it. */
- ctx.v = imalloc(sizeof(prof_ctx_t));
- if (ctx.v == NULL) {
- prof_leave(prof_tdata);
- return (true);
- }
- btkey.p = bt_dup(bt);
- if (btkey.v == NULL) {
- prof_leave(prof_tdata);
- idalloc(ctx.v);
- return (true);
+ prof_leave(tsd, tdata);
+ tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
+ if (tgctx.v == NULL) {
+ return true;
}
- prof_ctx_init(ctx.p, btkey.p);
- if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) {
- /* OOM. */
- prof_leave(prof_tdata);
- idalloc(btkey.v);
- idalloc(ctx.v);
- return (true);
+ prof_enter(tsd, tdata);
+ if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
+ gctx.p = tgctx.p;
+ btkey.p = &gctx.p->bt;
+ if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
+ /* OOM. */
+ prof_leave(tsd, tdata);
+ idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL,
+ true, true);
+ return true;
+ }
+ new_gctx = true;
+ } else {
+ new_gctx = false;
}
- new_ctx = true;
} else {
+ tgctx.v = NULL;
+ new_gctx = false;
+ }
+
+ if (!new_gctx) {
/*
* Increment nlimbo, in order to avoid a race condition with
- * prof_ctx_merge()/prof_ctx_destroy().
+ * prof_tctx_destroy()/prof_gctx_try_destroy().
*/
- malloc_mutex_lock(ctx.p->lock);
- ctx.p->nlimbo++;
- malloc_mutex_unlock(ctx.p->lock);
- new_ctx = false;
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock);
+ gctx.p->nlimbo++;
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
+ new_gctx = false;
+
+ if (tgctx.v != NULL) {
+ /* Lost race to insert. */
+ idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true,
+ true);
+ }
}
- prof_leave(prof_tdata);
+ prof_leave(tsd, tdata);
*p_btkey = btkey.v;
- *p_ctx = ctx.p;
- *p_new_ctx = new_ctx;
- return (false);
+ *p_gctx = gctx.p;
+ *p_new_gctx = new_gctx;
+ return false;
}
-prof_thr_cnt_t *
-prof_lookup(prof_bt_t *bt)
-{
+prof_tctx_t *
+prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
union {
- prof_thr_cnt_t *p;
+ prof_tctx_t *p;
void *v;
} ret;
- prof_tdata_t *prof_tdata;
+ prof_tdata_t *tdata;
+ bool not_found;
cassert(config_prof);
- prof_tdata = prof_tdata_get(false);
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
- return (NULL);
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL) {
+ return NULL;
+ }
- if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) {
+ malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
+ if (!not_found) { /* Note double negative! */
+ ret.p->prepared = true;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ if (not_found) {
void *btkey;
- prof_ctx_t *ctx;
- bool new_ctx;
+ prof_gctx_t *gctx;
+ bool new_gctx, error;
/*
* This thread's cache lacks bt. Look for it in the global
* cache.
*/
- if (prof_lookup_global(bt, prof_tdata, &btkey, &ctx, &new_ctx))
- return (NULL);
+ if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
+ &new_gctx)) {
+ return NULL;
+ }
- /* Link a prof_thd_cnt_t into ctx for this thread. */
- if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) {
- assert(ckh_count(&prof_tdata->bt2cnt) > 0);
- /*
- * Flush the least recently used cnt in order to keep
- * bt2cnt from becoming too large.
- */
- ret.p = ql_last(&prof_tdata->lru_ql, lru_link);
- assert(ret.v != NULL);
- if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt,
- NULL, NULL))
- not_reached();
- ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
- prof_ctx_merge(ret.p->ctx, ret.p);
- /* ret can now be re-used. */
- } else {
- assert(ckh_count(&prof_tdata->bt2cnt) < PROF_TCMAX);
- /* Allocate and partially initialize a new cnt. */
- ret.v = imalloc(sizeof(prof_thr_cnt_t));
- if (ret.p == NULL) {
- if (new_ctx)
- prof_ctx_destroy(ctx);
- return (NULL);
+ /* Link a prof_tctx_t into gctx for this thread. */
+ ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
+ sz_size2index(sizeof(prof_tctx_t)), false, NULL, true,
+ arena_ichoose(tsd, NULL), true);
+ if (ret.p == NULL) {
+ if (new_gctx) {
+ prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
}
- ql_elm_new(ret.p, cnts_link);
- ql_elm_new(ret.p, lru_link);
+ return NULL;
}
- /* Finish initializing ret. */
- ret.p->ctx = ctx;
- ret.p->epoch = 0;
+ ret.p->tdata = tdata;
+ ret.p->thr_uid = tdata->thr_uid;
+ ret.p->thr_discrim = tdata->thr_discrim;
memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
- if (ckh_insert(&prof_tdata->bt2cnt, btkey, ret.v)) {
- if (new_ctx)
- prof_ctx_destroy(ctx);
- idalloc(ret.v);
- return (NULL);
+ ret.p->gctx = gctx;
+ ret.p->tctx_uid = tdata->tctx_uid_next++;
+ ret.p->prepared = true;
+ ret.p->state = prof_tctx_state_initializing;
+ malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ if (error) {
+ if (new_gctx) {
+ prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
+ }
+ idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true);
+ return NULL;
}
- ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
- malloc_mutex_lock(ctx->lock);
- ql_tail_insert(&ctx->cnts_ql, ret.p, cnts_link);
- ctx->nlimbo--;
- malloc_mutex_unlock(ctx->lock);
- } else {
- /* Move ret to the front of the LRU. */
- ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
- ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ ret.p->state = prof_tctx_state_nominal;
+ tctx_tree_insert(&gctx->tctxs, ret.p);
+ gctx->nlimbo--;
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
}
- return (ret.p);
+ return ret.p;
+}
+
+/*
+ * The bodies of this function and prof_leakcheck() are compiled out unless heap
+ * profiling is enabled, so that it is possible to compile jemalloc with
+ * floating point support completely disabled. Avoiding floating point code is
+ * important on memory-constrained systems, but it also enables a workaround for
+ * versions of glibc that don't properly save/restore floating point registers
+ * during dynamic lazy symbol loading (which internally calls into whatever
+ * malloc implementation happens to be integrated into the application). Note
+ * that some compilers (e.g. gcc 4.8) may use floating point registers for fast
+ * memory moves, so jemalloc must be compiled with such optimizations disabled
+ * (e.g.
+ * -mno-sse) in order for the workaround to be complete.
+ */
+void
+prof_sample_threshold_update(prof_tdata_t *tdata) {
+#ifdef JEMALLOC_PROF
+ uint64_t r;
+ double u;
+
+ if (!config_prof) {
+ return;
+ }
+
+ if (lg_prof_sample == 0) {
+ tdata->bytes_until_sample = 0;
+ return;
+ }
+
+ /*
+ * Compute sample interval as a geometrically distributed random
+ * variable with mean (2^lg_prof_sample).
+ *
+ * __ __
+ * | log(u) | 1
+ * tdata->bytes_until_sample = | -------- |, where p = ---------------
+ * | log(1-p) | lg_prof_sample
+ * 2
+ *
+ * For more information on the math, see:
+ *
+ * Non-Uniform Random Variate Generation
+ * Luc Devroye
+ * Springer-Verlag, New York, 1986
+ * pp 500
+ * (http://luc.devroye.org/rnbookindex.html)
+ */
+ r = prng_lg_range_u64(&tdata->prng_state, 53);
+ u = (double)r * (1.0/9007199254740992.0L);
+ tdata->bytes_until_sample = (uint64_t)(log(u) /
+ log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
+ + (uint64_t)1U;
+#endif
}
#ifdef JEMALLOC_JET
+static prof_tdata_t *
+prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
+ void *arg) {
+ size_t *tdata_count = (size_t *)arg;
+
+ (*tdata_count)++;
+
+ return NULL;
+}
+
size_t
-prof_bt_count(void)
-{
+prof_tdata_count(void) {
+ size_t tdata_count = 0;
+ tsdn_t *tsdn;
+
+ tsdn = tsdn_fetch();
+ malloc_mutex_lock(tsdn, &tdatas_mtx);
+ tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
+ (void *)&tdata_count);
+ malloc_mutex_unlock(tsdn, &tdatas_mtx);
+
+ return tdata_count;
+}
+
+size_t
+prof_bt_count(void) {
size_t bt_count;
- prof_tdata_t *prof_tdata;
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
- prof_tdata = prof_tdata_get(false);
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
- return (0);
+ tsd = tsd_fetch();
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL) {
+ return 0;
+ }
- prof_enter(prof_tdata);
- bt_count = ckh_count(&bt2ctx);
- prof_leave(prof_tdata);
+ malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
+ bt_count = ckh_count(&bt2gctx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
- return (bt_count);
+ return bt_count;
}
#endif
-#ifdef JEMALLOC_JET
-#undef prof_dump_open
-#define prof_dump_open JEMALLOC_N(prof_dump_open_impl)
-#endif
static int
-prof_dump_open(bool propagate_err, const char *filename)
-{
+prof_dump_open_impl(bool propagate_err, const char *filename) {
int fd;
fd = creat(filename, 0644);
- if (fd == -1 && propagate_err == false) {
+ if (fd == -1 && !propagate_err) {
malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
filename);
- if (opt_abort)
+ if (opt_abort) {
abort();
+ }
}
- return (fd);
+ return fd;
}
-#ifdef JEMALLOC_JET
-#undef prof_dump_open
-#define prof_dump_open JEMALLOC_N(prof_dump_open)
-prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl);
-#endif
+prof_dump_open_t *JET_MUTABLE prof_dump_open = prof_dump_open_impl;
static bool
-prof_dump_flush(bool propagate_err)
-{
+prof_dump_flush(bool propagate_err) {
bool ret = false;
ssize_t err;
@@ -700,22 +980,22 @@ prof_dump_flush(bool propagate_err)
err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
if (err == -1) {
- if (propagate_err == false) {
+ if (!propagate_err) {
malloc_write("<jemalloc>: write() failed during heap "
"profile flush\n");
- if (opt_abort)
+ if (opt_abort) {
abort();
+ }
}
ret = true;
}
prof_dump_buf_end = 0;
- return (ret);
+ return ret;
}
static bool
-prof_dump_close(bool propagate_err)
-{
+prof_dump_close(bool propagate_err) {
bool ret;
assert(prof_dump_fd != -1);
@@ -723,13 +1003,12 @@ prof_dump_close(bool propagate_err)
close(prof_dump_fd);
prof_dump_fd = -1;
- return (ret);
+ return ret;
}
static bool
-prof_dump_write(bool propagate_err, const char *s)
-{
- unsigned i, slen, n;
+prof_dump_write(bool propagate_err, const char *s) {
+ size_t i, slen, n;
cassert(config_prof);
@@ -737,9 +1016,11 @@ prof_dump_write(bool propagate_err, const char *s)
slen = strlen(s);
while (i < slen) {
/* Flush the buffer if it is full. */
- if (prof_dump_buf_end == PROF_DUMP_BUFSIZE)
- if (prof_dump_flush(propagate_err) && propagate_err)
- return (true);
+ if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
+ if (prof_dump_flush(propagate_err) && propagate_err) {
+ return true;
+ }
+ }
if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
/* Finish writing. */
@@ -753,13 +1034,12 @@ prof_dump_write(bool propagate_err, const char *s)
i += n;
}
- return (false);
+ return false;
}
-JEMALLOC_ATTR(format(printf, 2, 3))
+JEMALLOC_FORMAT_PRINTF(2, 3)
static bool
-prof_dump_printf(bool propagate_err, const char *format, ...)
-{
+prof_dump_printf(bool propagate_err, const char *format, ...) {
bool ret;
va_list ap;
char buf[PROF_PRINTF_BUFSIZE];
@@ -769,179 +1049,401 @@ prof_dump_printf(bool propagate_err, const char *format, ...)
va_end(ap);
ret = prof_dump_write(propagate_err, buf);
- return (ret);
+ return ret;
+}
+
+static void
+prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) {
+ malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
+
+ malloc_mutex_lock(tsdn, tctx->gctx->lock);
+
+ switch (tctx->state) {
+ case prof_tctx_state_initializing:
+ malloc_mutex_unlock(tsdn, tctx->gctx->lock);
+ return;
+ case prof_tctx_state_nominal:
+ tctx->state = prof_tctx_state_dumping;
+ malloc_mutex_unlock(tsdn, tctx->gctx->lock);
+
+ memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
+
+ tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
+ tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
+ if (opt_prof_accum) {
+ tdata->cnt_summed.accumobjs +=
+ tctx->dump_cnts.accumobjs;
+ tdata->cnt_summed.accumbytes +=
+ tctx->dump_cnts.accumbytes;
+ }
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
+ not_reached();
+ }
}
static void
-prof_dump_ctx_prep(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx,
- prof_ctx_list_t *ctx_ql)
-{
- prof_thr_cnt_t *thr_cnt;
- prof_cnt_t tcnt;
+prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) {
+ malloc_mutex_assert_owner(tsdn, gctx->lock);
+
+ gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
+ gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
+ if (opt_prof_accum) {
+ gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
+ gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
+ }
+}
+
+static prof_tctx_t *
+prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
+ tsdn_t *tsdn = (tsdn_t *)arg;
+
+ malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
+
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ /* New since dumping started; ignore. */
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
+ prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx);
+ break;
+ default:
+ not_reached();
+ }
+
+ return NULL;
+}
+
+struct prof_tctx_dump_iter_arg_s {
+ tsdn_t *tsdn;
+ bool propagate_err;
+};
+
+static prof_tctx_t *
+prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) {
+ struct prof_tctx_dump_iter_arg_s *arg =
+ (struct prof_tctx_dump_iter_arg_s *)opaque;
+
+ malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
+
+ switch (tctx->state) {
+ case prof_tctx_state_initializing:
+ case prof_tctx_state_nominal:
+ /* Not captured by this dump. */
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
+ if (prof_dump_printf(arg->propagate_err,
+ " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
+ "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
+ tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
+ tctx->dump_cnts.accumbytes)) {
+ return tctx;
+ }
+ break;
+ default:
+ not_reached();
+ }
+ return NULL;
+}
+static prof_tctx_t *
+prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
+ tsdn_t *tsdn = (tsdn_t *)arg;
+ prof_tctx_t *ret;
+
+ malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
+
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ /* New since dumping started; ignore. */
+ break;
+ case prof_tctx_state_dumping:
+ tctx->state = prof_tctx_state_nominal;
+ break;
+ case prof_tctx_state_purgatory:
+ ret = tctx;
+ goto label_return;
+ default:
+ not_reached();
+ }
+
+ ret = NULL;
+label_return:
+ return ret;
+}
+
+static void
+prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) {
cassert(config_prof);
- malloc_mutex_lock(ctx->lock);
+ malloc_mutex_lock(tsdn, gctx->lock);
/*
- * Increment nlimbo so that ctx won't go away before dump.
- * Additionally, link ctx into the dump list so that it is included in
+ * Increment nlimbo so that gctx won't go away before dump.
+ * Additionally, link gctx into the dump list so that it is included in
* prof_dump()'s second pass.
*/
- ctx->nlimbo++;
- ql_tail_insert(ctx_ql, ctx, dump_link);
+ gctx->nlimbo++;
+ gctx_tree_insert(gctxs, gctx);
- memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t));
- ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) {
- volatile unsigned *epoch = &thr_cnt->epoch;
+ memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
- while (true) {
- unsigned epoch0 = *epoch;
+ malloc_mutex_unlock(tsdn, gctx->lock);
+}
- /* Make sure epoch is even. */
- if (epoch0 & 1U)
- continue;
+struct prof_gctx_merge_iter_arg_s {
+ tsdn_t *tsdn;
+ size_t leak_ngctx;
+};
+
+static prof_gctx_t *
+prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
+ struct prof_gctx_merge_iter_arg_s *arg =
+ (struct prof_gctx_merge_iter_arg_s *)opaque;
+
+ malloc_mutex_lock(arg->tsdn, gctx->lock);
+ tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
+ (void *)arg->tsdn);
+ if (gctx->cnt_summed.curobjs != 0) {
+ arg->leak_ngctx++;
+ }
+ malloc_mutex_unlock(arg->tsdn, gctx->lock);
- memcpy(&tcnt, &thr_cnt->cnts, sizeof(prof_cnt_t));
+ return NULL;
+}
- /* Terminate if epoch didn't change while reading. */
- if (*epoch == epoch0)
- break;
- }
+static void
+prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) {
+ prof_tdata_t *tdata = prof_tdata_get(tsd, false);
+ prof_gctx_t *gctx;
- ctx->cnt_summed.curobjs += tcnt.curobjs;
- ctx->cnt_summed.curbytes += tcnt.curbytes;
- if (opt_prof_accum) {
- ctx->cnt_summed.accumobjs += tcnt.accumobjs;
- ctx->cnt_summed.accumbytes += tcnt.accumbytes;
+ /*
+ * Standard tree iteration won't work here, because as soon as we
+ * decrement gctx->nlimbo and unlock gctx, another thread can
+ * concurrently destroy it, which will corrupt the tree. Therefore,
+ * tear down the tree one node at a time during iteration.
+ */
+ while ((gctx = gctx_tree_first(gctxs)) != NULL) {
+ gctx_tree_remove(gctxs, gctx);
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ {
+ prof_tctx_t *next;
+
+ next = NULL;
+ do {
+ prof_tctx_t *to_destroy =
+ tctx_tree_iter(&gctx->tctxs, next,
+ prof_tctx_finish_iter,
+ (void *)tsd_tsdn(tsd));
+ if (to_destroy != NULL) {
+ next = tctx_tree_next(&gctx->tctxs,
+ to_destroy);
+ tctx_tree_remove(&gctx->tctxs,
+ to_destroy);
+ idalloctm(tsd_tsdn(tsd), to_destroy,
+ NULL, NULL, true, true);
+ } else {
+ next = NULL;
+ }
+ } while (next != NULL);
+ }
+ gctx->nlimbo--;
+ if (prof_gctx_should_destroy(gctx)) {
+ gctx->nlimbo++;
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
+ } else {
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
}
}
+}
- if (ctx->cnt_summed.curobjs != 0)
- (*leak_nctx)++;
+struct prof_tdata_merge_iter_arg_s {
+ tsdn_t *tsdn;
+ prof_cnt_t cnt_all;
+};
+
+static prof_tdata_t *
+prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
+ void *opaque) {
+ struct prof_tdata_merge_iter_arg_s *arg =
+ (struct prof_tdata_merge_iter_arg_s *)opaque;
+
+ malloc_mutex_lock(arg->tsdn, tdata->lock);
+ if (!tdata->expired) {
+ size_t tabind;
+ union {
+ prof_tctx_t *p;
+ void *v;
+ } tctx;
+
+ tdata->dumping = true;
+ memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
+ for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
+ &tctx.v);) {
+ prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
+ }
- /* Add to cnt_all. */
- cnt_all->curobjs += ctx->cnt_summed.curobjs;
- cnt_all->curbytes += ctx->cnt_summed.curbytes;
- if (opt_prof_accum) {
- cnt_all->accumobjs += ctx->cnt_summed.accumobjs;
- cnt_all->accumbytes += ctx->cnt_summed.accumbytes;
+ arg->cnt_all.curobjs += tdata->cnt_summed.curobjs;
+ arg->cnt_all.curbytes += tdata->cnt_summed.curbytes;
+ if (opt_prof_accum) {
+ arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs;
+ arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes;
+ }
+ } else {
+ tdata->dumping = false;
}
+ malloc_mutex_unlock(arg->tsdn, tdata->lock);
- malloc_mutex_unlock(ctx->lock);
+ return NULL;
}
-static bool
-prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
-{
-
- if (opt_lg_prof_sample == 0) {
- if (prof_dump_printf(propagate_err,
- "heap profile: %"PRId64": %"PRId64
- " [%"PRIu64": %"PRIu64"] @ heapprofile\n",
- cnt_all->curobjs, cnt_all->curbytes,
- cnt_all->accumobjs, cnt_all->accumbytes))
- return (true);
- } else {
- if (prof_dump_printf(propagate_err,
- "heap profile: %"PRId64": %"PRId64
- " [%"PRIu64": %"PRIu64"] @ heap_v2/%"PRIu64"\n",
- cnt_all->curobjs, cnt_all->curbytes,
- cnt_all->accumobjs, cnt_all->accumbytes,
- ((uint64_t)1U << opt_lg_prof_sample)))
- return (true);
+static prof_tdata_t *
+prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
+ void *arg) {
+ bool propagate_err = *(bool *)arg;
+
+ if (!tdata->dumping) {
+ return NULL;
}
- return (false);
+ if (prof_dump_printf(propagate_err,
+ " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
+ tdata->thr_uid, tdata->cnt_summed.curobjs,
+ tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
+ tdata->cnt_summed.accumbytes,
+ (tdata->thread_name != NULL) ? " " : "",
+ (tdata->thread_name != NULL) ? tdata->thread_name : "")) {
+ return tdata;
+ }
+ return NULL;
}
-static void
-prof_dump_ctx_cleanup_locked(prof_ctx_t *ctx, prof_ctx_list_t *ctx_ql)
-{
-
- ctx->nlimbo--;
- ql_remove(ctx_ql, ctx, dump_link);
-}
+static bool
+prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err,
+ const prof_cnt_t *cnt_all) {
+ bool ret;
-static void
-prof_dump_ctx_cleanup(prof_ctx_t *ctx, prof_ctx_list_t *ctx_ql)
-{
+ if (prof_dump_printf(propagate_err,
+ "heap_v2/%"FMTu64"\n"
+ " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
+ ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
+ cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) {
+ return true;
+ }
- malloc_mutex_lock(ctx->lock);
- prof_dump_ctx_cleanup_locked(ctx, ctx_ql);
- malloc_mutex_unlock(ctx->lock);
+ malloc_mutex_lock(tsdn, &tdatas_mtx);
+ ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
+ (void *)&propagate_err) != NULL);
+ malloc_mutex_unlock(tsdn, &tdatas_mtx);
+ return ret;
}
+prof_dump_header_t *JET_MUTABLE prof_dump_header = prof_dump_header_impl;
static bool
-prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, const prof_bt_t *bt,
- prof_ctx_list_t *ctx_ql)
-{
+prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
+ const prof_bt_t *bt, prof_gctx_tree_t *gctxs) {
bool ret;
unsigned i;
+ struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg;
cassert(config_prof);
-
- /*
- * Current statistics can sum to 0 as a result of unmerged per thread
- * statistics. Additionally, interval- and growth-triggered dumps can
- * occur between the time a ctx is created and when its statistics are
- * filled in. Avoid dumping any ctx that is an artifact of either
- * implementation detail.
- */
- malloc_mutex_lock(ctx->lock);
- if ((opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) ||
- (opt_prof_accum && ctx->cnt_summed.accumobjs == 0)) {
- assert(ctx->cnt_summed.curobjs == 0);
- assert(ctx->cnt_summed.curbytes == 0);
- assert(ctx->cnt_summed.accumobjs == 0);
- assert(ctx->cnt_summed.accumbytes == 0);
+ malloc_mutex_assert_owner(tsdn, gctx->lock);
+
+ /* Avoid dumping such gctx's that have no useful data. */
+ if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
+ (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
+ assert(gctx->cnt_summed.curobjs == 0);
+ assert(gctx->cnt_summed.curbytes == 0);
+ assert(gctx->cnt_summed.accumobjs == 0);
+ assert(gctx->cnt_summed.accumbytes == 0);
ret = false;
goto label_return;
}
- if (prof_dump_printf(propagate_err, "%"PRId64": %"PRId64
- " [%"PRIu64": %"PRIu64"] @",
- ctx->cnt_summed.curobjs, ctx->cnt_summed.curbytes,
- ctx->cnt_summed.accumobjs, ctx->cnt_summed.accumbytes)) {
+ if (prof_dump_printf(propagate_err, "@")) {
ret = true;
goto label_return;
}
-
for (i = 0; i < bt->len; i++) {
- if (prof_dump_printf(propagate_err, " %#"PRIxPTR,
+ if (prof_dump_printf(propagate_err, " %#"FMTxPTR,
(uintptr_t)bt->vec[i])) {
ret = true;
goto label_return;
}
}
- if (prof_dump_write(propagate_err, "\n")) {
+ if (prof_dump_printf(propagate_err,
+ "\n"
+ " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
+ gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
+ gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
+ ret = true;
+ goto label_return;
+ }
+
+ prof_tctx_dump_iter_arg.tsdn = tsdn;
+ prof_tctx_dump_iter_arg.propagate_err = propagate_err;
+ if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
+ (void *)&prof_tctx_dump_iter_arg) != NULL) {
ret = true;
goto label_return;
}
ret = false;
label_return:
- prof_dump_ctx_cleanup_locked(ctx, ctx_ql);
- malloc_mutex_unlock(ctx->lock);
- return (ret);
+ return ret;
+}
+
+#ifndef _WIN32
+JEMALLOC_FORMAT_PRINTF(1, 2)
+static int
+prof_open_maps(const char *format, ...) {
+ int mfd;
+ va_list ap;
+ char filename[PATH_MAX + 1];
+
+ va_start(ap, format);
+ malloc_vsnprintf(filename, sizeof(filename), format, ap);
+ va_end(ap);
+ mfd = open(filename, O_RDONLY | O_CLOEXEC);
+
+ return mfd;
+}
+#endif
+
+static int
+prof_getpid(void) {
+#ifdef _WIN32
+ return GetCurrentProcessId();
+#else
+ return getpid();
+#endif
}
static bool
-prof_dump_maps(bool propagate_err)
-{
+prof_dump_maps(bool propagate_err) {
bool ret;
int mfd;
- char filename[PATH_MAX + 1];
cassert(config_prof);
#ifdef __FreeBSD__
- malloc_snprintf(filename, sizeof(filename), "/proc/curproc/map");
+ mfd = prof_open_maps("/proc/curproc/map");
+#elif defined(_WIN32)
+ mfd = -1; // Not implemented
#else
- malloc_snprintf(filename, sizeof(filename), "/proc/%d/maps",
- (int)getpid());
+ {
+ int pid = prof_getpid();
+
+ mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
+ if (mfd == -1) {
+ mfd = prof_open_maps("/proc/%d/maps", pid);
+ }
+ }
#endif
- mfd = open(filename, O_RDONLY);
if (mfd != -1) {
ssize_t nread;
@@ -971,214 +1473,391 @@ prof_dump_maps(bool propagate_err)
ret = false;
label_return:
- if (mfd != -1)
+ if (mfd != -1) {
close(mfd);
- return (ret);
+ }
+ return ret;
}
+/*
+ * See prof_sample_threshold_update() comment for why the body of this function
+ * is conditionally compiled.
+ */
static void
-prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_nctx,
- const char *filename)
-{
-
+prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
+ const char *filename) {
+#ifdef JEMALLOC_PROF
+ /*
+ * Scaling is equivalent AdjustSamples() in jeprof, but the result may
+ * differ slightly from what jeprof reports, because here we scale the
+ * summary values, whereas jeprof scales each context individually and
+ * reports the sums of the scaled values.
+ */
if (cnt_all->curbytes != 0) {
- malloc_printf("<jemalloc>: Leak summary: %"PRId64" byte%s, %"
- PRId64" object%s, %zu context%s\n",
- cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "",
- cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "",
- leak_nctx, (leak_nctx != 1) ? "s" : "");
+ double sample_period = (double)((uint64_t)1 << lg_prof_sample);
+ double ratio = (((double)cnt_all->curbytes) /
+ (double)cnt_all->curobjs) / sample_period;
+ double scale_factor = 1.0 / (1.0 - exp(-ratio));
+ uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
+ * scale_factor);
+ uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
+ scale_factor);
+
+ malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
+ " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
+ curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
+ 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
malloc_printf(
- "<jemalloc>: Run pprof on \"%s\" for leak detail\n",
+ "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
filename);
}
+#endif
}
-static bool
-prof_dump(bool propagate_err, const char *filename, bool leakcheck)
-{
- prof_tdata_t *prof_tdata;
- prof_cnt_t cnt_all;
+struct prof_gctx_dump_iter_arg_s {
+ tsdn_t *tsdn;
+ bool propagate_err;
+};
+
+static prof_gctx_t *
+prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
+ prof_gctx_t *ret;
+ struct prof_gctx_dump_iter_arg_s *arg =
+ (struct prof_gctx_dump_iter_arg_s *)opaque;
+
+ malloc_mutex_lock(arg->tsdn, gctx->lock);
+
+ if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt,
+ gctxs)) {
+ ret = gctx;
+ goto label_return;
+ }
+
+ ret = NULL;
+label_return:
+ malloc_mutex_unlock(arg->tsdn, gctx->lock);
+ return ret;
+}
+
+static void
+prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata,
+ struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
+ struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
+ prof_gctx_tree_t *gctxs) {
size_t tabind;
union {
- prof_ctx_t *p;
+ prof_gctx_t *p;
void *v;
- } ctx;
- size_t leak_nctx;
- prof_ctx_list_t ctx_ql;
-
- cassert(config_prof);
+ } gctx;
- prof_tdata = prof_tdata_get(false);
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
- return (true);
+ prof_enter(tsd, tdata);
- malloc_mutex_lock(&prof_dump_mtx);
+ /*
+ * Put gctx's in limbo and clear their counters in preparation for
+ * summing.
+ */
+ gctx_tree_new(gctxs);
+ for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) {
+ prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs);
+ }
- /* Merge per thread profile stats, and sum them in cnt_all. */
- memset(&cnt_all, 0, sizeof(prof_cnt_t));
- leak_nctx = 0;
- ql_new(&ctx_ql);
- prof_enter(prof_tdata);
- for (tabind = 0; ckh_iter(&bt2ctx, &tabind, NULL, &ctx.v) == false;)
- prof_dump_ctx_prep(ctx.p, &cnt_all, &leak_nctx, &ctx_ql);
- prof_leave(prof_tdata);
+ /*
+ * Iterate over tdatas, and for the non-expired ones snapshot their tctx
+ * stats and merge them into the associated gctx's.
+ */
+ prof_tdata_merge_iter_arg->tsdn = tsd_tsdn(tsd);
+ memset(&prof_tdata_merge_iter_arg->cnt_all, 0, sizeof(prof_cnt_t));
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
+ (void *)prof_tdata_merge_iter_arg);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+
+ /* Merge tctx stats into gctx's. */
+ prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd);
+ prof_gctx_merge_iter_arg->leak_ngctx = 0;
+ gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter,
+ (void *)prof_gctx_merge_iter_arg);
+
+ prof_leave(tsd, tdata);
+}
+static bool
+prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename,
+ bool leakcheck, prof_tdata_t *tdata,
+ struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
+ struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
+ struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg,
+ prof_gctx_tree_t *gctxs) {
/* Create dump file. */
- if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1)
- goto label_open_close_error;
+ if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) {
+ return true;
+ }
/* Dump profile header. */
- if (prof_dump_header(propagate_err, &cnt_all))
+ if (prof_dump_header(tsd_tsdn(tsd), propagate_err,
+ &prof_tdata_merge_iter_arg->cnt_all)) {
goto label_write_error;
+ }
- /* Dump per ctx profile stats. */
- while ((ctx.p = ql_first(&ctx_ql)) != NULL) {
- if (prof_dump_ctx(propagate_err, ctx.p, ctx.p->bt, &ctx_ql))
- goto label_write_error;
+ /* Dump per gctx profile stats. */
+ prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd);
+ prof_gctx_dump_iter_arg->propagate_err = propagate_err;
+ if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter,
+ (void *)prof_gctx_dump_iter_arg) != NULL) {
+ goto label_write_error;
}
/* Dump /proc/<pid>/maps if possible. */
- if (prof_dump_maps(propagate_err))
+ if (prof_dump_maps(propagate_err)) {
goto label_write_error;
+ }
- if (prof_dump_close(propagate_err))
- goto label_open_close_error;
-
- malloc_mutex_unlock(&prof_dump_mtx);
-
- if (leakcheck)
- prof_leakcheck(&cnt_all, leak_nctx, filename);
+ if (prof_dump_close(propagate_err)) {
+ return true;
+ }
- return (false);
+ return false;
label_write_error:
prof_dump_close(propagate_err);
-label_open_close_error:
- while ((ctx.p = ql_first(&ctx_ql)) != NULL)
- prof_dump_ctx_cleanup(ctx.p, &ctx_ql);
- malloc_mutex_unlock(&prof_dump_mtx);
- return (true);
+ return true;
}
-#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
-#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
-static void
-prof_dump_filename(char *filename, char v, int64_t vseq)
-{
+static bool
+prof_dump(tsd_t *tsd, bool propagate_err, const char *filename,
+ bool leakcheck) {
+ cassert(config_prof);
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+
+ prof_tdata_t * tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL) {
+ return true;
+ }
+
+ pre_reentrancy(tsd, NULL);
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
+
+ prof_gctx_tree_t gctxs;
+ struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
+ struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
+ struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg;
+ prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg,
+ &prof_gctx_merge_iter_arg, &gctxs);
+ bool err = prof_dump_file(tsd, propagate_err, filename, leakcheck, tdata,
+ &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg,
+ &prof_gctx_dump_iter_arg, &gctxs);
+ prof_gctx_finish(tsd, &gctxs);
+
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
+ post_reentrancy(tsd);
+
+ if (err) {
+ return true;
+ }
+
+ if (leakcheck) {
+ prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all,
+ prof_gctx_merge_iter_arg.leak_ngctx, filename);
+ }
+ return false;
+}
+
+#ifdef JEMALLOC_JET
+void
+prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
+ uint64_t *accumbytes) {
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
+ struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
+ struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
+ prof_gctx_tree_t gctxs;
+
+ tsd = tsd_fetch();
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL) {
+ if (curobjs != NULL) {
+ *curobjs = 0;
+ }
+ if (curbytes != NULL) {
+ *curbytes = 0;
+ }
+ if (accumobjs != NULL) {
+ *accumobjs = 0;
+ }
+ if (accumbytes != NULL) {
+ *accumbytes = 0;
+ }
+ return;
+ }
+
+ prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg,
+ &prof_gctx_merge_iter_arg, &gctxs);
+ prof_gctx_finish(tsd, &gctxs);
+
+ if (curobjs != NULL) {
+ *curobjs = prof_tdata_merge_iter_arg.cnt_all.curobjs;
+ }
+ if (curbytes != NULL) {
+ *curbytes = prof_tdata_merge_iter_arg.cnt_all.curbytes;
+ }
+ if (accumobjs != NULL) {
+ *accumobjs = prof_tdata_merge_iter_arg.cnt_all.accumobjs;
+ }
+ if (accumbytes != NULL) {
+ *accumbytes = prof_tdata_merge_iter_arg.cnt_all.accumbytes;
+ }
+}
+#endif
+#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
+#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
+static void
+prof_dump_filename(char *filename, char v, uint64_t vseq) {
cassert(config_prof);
if (vseq != VSEQ_INVALID) {
/* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
- "%s.%d.%"PRIu64".%c%"PRId64".heap",
- opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq);
+ "%s.%d.%"FMTu64".%c%"FMTu64".heap",
+ opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
} else {
/* "<prefix>.<pid>.<seq>.<v>.heap" */
malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
- "%s.%d.%"PRIu64".%c.heap",
- opt_prof_prefix, (int)getpid(), prof_dump_seq, v);
+ "%s.%d.%"FMTu64".%c.heap",
+ opt_prof_prefix, prof_getpid(), prof_dump_seq, v);
}
prof_dump_seq++;
}
static void
-prof_fdump(void)
-{
+prof_fdump(void) {
+ tsd_t *tsd;
char filename[DUMP_FILENAME_BUFSIZE];
cassert(config_prof);
+ assert(opt_prof_final);
+ assert(opt_prof_prefix[0] != '\0');
- if (prof_booted == false)
+ if (!prof_booted) {
return;
+ }
+ tsd = tsd_fetch();
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ prof_dump_filename(filename, 'f', VSEQ_INVALID);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ prof_dump(tsd, false, filename, opt_prof_leak);
+}
- if (opt_prof_final && opt_prof_prefix[0] != '\0') {
- malloc_mutex_lock(&prof_dump_seq_mtx);
- prof_dump_filename(filename, 'f', VSEQ_INVALID);
- malloc_mutex_unlock(&prof_dump_seq_mtx);
- prof_dump(false, filename, opt_prof_leak);
+bool
+prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) {
+ cassert(config_prof);
+
+#ifndef JEMALLOC_ATOMIC_U64
+ if (malloc_mutex_init(&prof_accum->mtx, "prof_accum",
+ WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) {
+ return true;
}
+ prof_accum->accumbytes = 0;
+#else
+ atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED);
+#endif
+ return false;
}
void
-prof_idump(void)
-{
- prof_tdata_t *prof_tdata;
- char filename[PATH_MAX + 1];
+prof_idump(tsdn_t *tsdn) {
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
cassert(config_prof);
- if (prof_booted == false)
+ if (!prof_booted || tsdn_null(tsdn)) {
+ return;
+ }
+ tsd = tsdn_tsd(tsdn);
+ if (tsd_reentrancy_level_get(tsd) > 0) {
return;
- prof_tdata = prof_tdata_get(false);
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
+ }
+
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL) {
return;
- if (prof_tdata->enq) {
- prof_tdata->enq_idump = true;
+ }
+ if (tdata->enq) {
+ tdata->enq_idump = true;
return;
}
if (opt_prof_prefix[0] != '\0') {
- malloc_mutex_lock(&prof_dump_seq_mtx);
+ char filename[PATH_MAX + 1];
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
prof_dump_filename(filename, 'i', prof_dump_iseq);
prof_dump_iseq++;
- malloc_mutex_unlock(&prof_dump_seq_mtx);
- prof_dump(false, filename, false);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
+ prof_dump(tsd, false, filename, false);
}
}
bool
-prof_mdump(const char *filename)
-{
- char filename_buf[DUMP_FILENAME_BUFSIZE];
-
+prof_mdump(tsd_t *tsd, const char *filename) {
cassert(config_prof);
+ assert(tsd_reentrancy_level_get(tsd) == 0);
- if (opt_prof == false || prof_booted == false)
- return (true);
-
+ if (!opt_prof || !prof_booted) {
+ return true;
+ }
+ char filename_buf[DUMP_FILENAME_BUFSIZE];
if (filename == NULL) {
/* No filename specified, so automatically generate one. */
- if (opt_prof_prefix[0] == '\0')
- return (true);
- malloc_mutex_lock(&prof_dump_seq_mtx);
+ if (opt_prof_prefix[0] == '\0') {
+ return true;
+ }
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
prof_dump_mseq++;
- malloc_mutex_unlock(&prof_dump_seq_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
filename = filename_buf;
}
- return (prof_dump(true, filename, false));
+ return prof_dump(tsd, true, filename, false);
}
void
-prof_gdump(void)
-{
- prof_tdata_t *prof_tdata;
- char filename[DUMP_FILENAME_BUFSIZE];
+prof_gdump(tsdn_t *tsdn) {
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
cassert(config_prof);
- if (prof_booted == false)
+ if (!prof_booted || tsdn_null(tsdn)) {
+ return;
+ }
+ tsd = tsdn_tsd(tsdn);
+ if (tsd_reentrancy_level_get(tsd) > 0) {
return;
- prof_tdata = prof_tdata_get(false);
- if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
+ }
+
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL) {
return;
- if (prof_tdata->enq) {
- prof_tdata->enq_gdump = true;
+ }
+ if (tdata->enq) {
+ tdata->enq_gdump = true;
return;
}
if (opt_prof_prefix[0] != '\0') {
- malloc_mutex_lock(&prof_dump_seq_mtx);
+ char filename[DUMP_FILENAME_BUFSIZE];
+ malloc_mutex_lock(tsdn, &prof_dump_seq_mtx);
prof_dump_filename(filename, 'u', prof_dump_useq);
prof_dump_useq++;
- malloc_mutex_unlock(&prof_dump_seq_mtx);
- prof_dump(false, filename, false);
+ malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx);
+ prof_dump(tsd, false, filename, false);
}
}
static void
-prof_bt_hash(const void *key, size_t r_hash[2])
-{
+prof_bt_hash(const void *key, size_t r_hash[2]) {
prof_bt_t *bt = (prof_bt_t *)key;
cassert(config_prof);
@@ -1187,106 +1866,389 @@ prof_bt_hash(const void *key, size_t r_hash[2])
}
static bool
-prof_bt_keycomp(const void *k1, const void *k2)
-{
+prof_bt_keycomp(const void *k1, const void *k2) {
const prof_bt_t *bt1 = (prof_bt_t *)k1;
const prof_bt_t *bt2 = (prof_bt_t *)k2;
cassert(config_prof);
- if (bt1->len != bt2->len)
- return (false);
+ if (bt1->len != bt2->len) {
+ return false;
+ }
return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
}
-prof_tdata_t *
-prof_tdata_init(void)
-{
- prof_tdata_t *prof_tdata;
+static uint64_t
+prof_thr_uid_alloc(tsdn_t *tsdn) {
+ uint64_t thr_uid;
+
+ malloc_mutex_lock(tsdn, &next_thr_uid_mtx);
+ thr_uid = next_thr_uid;
+ next_thr_uid++;
+ malloc_mutex_unlock(tsdn, &next_thr_uid_mtx);
+
+ return thr_uid;
+}
+
+static prof_tdata_t *
+prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
+ char *thread_name, bool active) {
+ prof_tdata_t *tdata;
cassert(config_prof);
/* Initialize an empty cache for this thread. */
- prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t));
- if (prof_tdata == NULL)
- return (NULL);
+ tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
+ sz_size2index(sizeof(prof_tdata_t)), false, NULL, true,
+ arena_get(TSDN_NULL, 0, true), true);
+ if (tdata == NULL) {
+ return NULL;
+ }
- if (ckh_new(&prof_tdata->bt2cnt, PROF_CKH_MINITEMS,
- prof_bt_hash, prof_bt_keycomp)) {
- idalloc(prof_tdata);
- return (NULL);
+ tdata->lock = prof_tdata_mutex_choose(thr_uid);
+ tdata->thr_uid = thr_uid;
+ tdata->thr_discrim = thr_discrim;
+ tdata->thread_name = thread_name;
+ tdata->attached = true;
+ tdata->expired = false;
+ tdata->tctx_uid_next = 0;
+
+ if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash,
+ prof_bt_keycomp)) {
+ idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
+ return NULL;
}
- ql_new(&prof_tdata->lru_ql);
- prof_tdata->vec = imalloc(sizeof(void *) * PROF_BT_MAX);
- if (prof_tdata->vec == NULL) {
- ckh_delete(&prof_tdata->bt2cnt);
- idalloc(prof_tdata);
- return (NULL);
+ tdata->prng_state = (uint64_t)(uintptr_t)tdata;
+ prof_sample_threshold_update(tdata);
+
+ tdata->enq = false;
+ tdata->enq_idump = false;
+ tdata->enq_gdump = false;
+
+ tdata->dumping = false;
+ tdata->active = active;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ tdata_tree_insert(&tdatas, tdata);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+
+ return tdata;
+}
+
+prof_tdata_t *
+prof_tdata_init(tsd_t *tsd) {
+ return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
+ NULL, prof_thread_active_init_get(tsd_tsdn(tsd)));
+}
+
+static bool
+prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) {
+ if (tdata->attached && !even_if_attached) {
+ return false;
}
+ if (ckh_count(&tdata->bt2tctx) != 0) {
+ return false;
+ }
+ return true;
+}
+
+static bool
+prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
+ bool even_if_attached) {
+ malloc_mutex_assert_owner(tsdn, tdata->lock);
- prof_tdata->prng_state = 0;
- prof_tdata->threshold = 0;
- prof_tdata->accum = 0;
+ return prof_tdata_should_destroy_unlocked(tdata, even_if_attached);
+}
+
+static void
+prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
+ bool even_if_attached) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx);
- prof_tdata->enq = false;
- prof_tdata->enq_idump = false;
- prof_tdata->enq_gdump = false;
+ tdata_tree_remove(&tdatas, tdata);
- prof_tdata_tsd_set(&prof_tdata);
+ assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
- return (prof_tdata);
+ if (tdata->thread_name != NULL) {
+ idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
+ true);
+ }
+ ckh_delete(tsd, &tdata->bt2tctx);
+ idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
}
-void
-prof_tdata_cleanup(void *arg)
-{
- prof_thr_cnt_t *cnt;
- prof_tdata_t *prof_tdata = *(prof_tdata_t **)arg;
+static void
+prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+}
- cassert(config_prof);
+static void
+prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) {
+ bool destroy_tdata;
- if (prof_tdata == PROF_TDATA_STATE_REINCARNATED) {
- /*
- * Another destructor deallocated memory after this destructor
- * was called. Reset prof_tdata to PROF_TDATA_STATE_PURGATORY
- * in order to receive another callback.
- */
- prof_tdata = PROF_TDATA_STATE_PURGATORY;
- prof_tdata_tsd_set(&prof_tdata);
- } else if (prof_tdata == PROF_TDATA_STATE_PURGATORY) {
- /*
- * The previous time this destructor was called, we set the key
- * to PROF_TDATA_STATE_PURGATORY so that other destructors
- * wouldn't cause re-creation of the prof_tdata. This time, do
- * nothing, so that the destructor will not be called again.
- */
- } else if (prof_tdata != NULL) {
- /*
- * Delete the hash table. All of its contents can still be
- * iterated over via the LRU.
- */
- ckh_delete(&prof_tdata->bt2cnt);
+ malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ if (tdata->attached) {
+ destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
+ true);
/*
- * Iteratively merge cnt's into the global stats and delete
- * them.
+ * Only detach if !destroy_tdata, because detaching would allow
+ * another thread to win the race to destroy tdata.
*/
- while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) {
- ql_remove(&prof_tdata->lru_ql, cnt, lru_link);
- prof_ctx_merge(cnt->ctx, cnt);
- idalloc(cnt);
+ if (!destroy_tdata) {
+ tdata->attached = false;
}
- idalloc(prof_tdata->vec);
- idalloc(prof_tdata);
- prof_tdata = PROF_TDATA_STATE_PURGATORY;
- prof_tdata_tsd_set(&prof_tdata);
+ tsd_prof_tdata_set(tsd, NULL);
+ } else {
+ destroy_tdata = false;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ if (destroy_tdata) {
+ prof_tdata_destroy(tsd, tdata, true);
}
}
+prof_tdata_t *
+prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) {
+ uint64_t thr_uid = tdata->thr_uid;
+ uint64_t thr_discrim = tdata->thr_discrim + 1;
+ char *thread_name = (tdata->thread_name != NULL) ?
+ prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL;
+ bool active = tdata->active;
+
+ prof_tdata_detach(tsd, tdata);
+ return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
+ active);
+}
+
+static bool
+prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) {
+ bool destroy_tdata;
+
+ malloc_mutex_lock(tsdn, tdata->lock);
+ if (!tdata->expired) {
+ tdata->expired = true;
+ destroy_tdata = tdata->attached ? false :
+ prof_tdata_should_destroy(tsdn, tdata, false);
+ } else {
+ destroy_tdata = false;
+ }
+ malloc_mutex_unlock(tsdn, tdata->lock);
+
+ return destroy_tdata;
+}
+
+static prof_tdata_t *
+prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
+ void *arg) {
+ tsdn_t *tsdn = (tsdn_t *)arg;
+
+ return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
+}
+
void
-prof_boot0(void)
-{
+prof_reset(tsd_t *tsd, size_t lg_sample) {
+ prof_tdata_t *next;
+
+ assert(lg_sample < (sizeof(uint64_t) << 3));
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+
+ lg_prof_sample = lg_sample;
+
+ next = NULL;
+ do {
+ prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
+ prof_tdata_reset_iter, (void *)tsd);
+ if (to_destroy != NULL) {
+ next = tdata_tree_next(&tdatas, to_destroy);
+ prof_tdata_destroy_locked(tsd, to_destroy, false);
+ } else {
+ next = NULL;
+ }
+ } while (next != NULL);
+
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
+}
+
+void
+prof_tdata_cleanup(tsd_t *tsd) {
+ prof_tdata_t *tdata;
+
+ if (!config_prof) {
+ return;
+ }
+
+ tdata = tsd_prof_tdata_get(tsd);
+ if (tdata != NULL) {
+ prof_tdata_detach(tsd, tdata);
+ }
+}
+
+bool
+prof_active_get(tsdn_t *tsdn) {
+ bool prof_active_current;
+
+ malloc_mutex_lock(tsdn, &prof_active_mtx);
+ prof_active_current = prof_active;
+ malloc_mutex_unlock(tsdn, &prof_active_mtx);
+ return prof_active_current;
+}
+bool
+prof_active_set(tsdn_t *tsdn, bool active) {
+ bool prof_active_old;
+
+ malloc_mutex_lock(tsdn, &prof_active_mtx);
+ prof_active_old = prof_active;
+ prof_active = active;
+ malloc_mutex_unlock(tsdn, &prof_active_mtx);
+ return prof_active_old;
+}
+
+const char *
+prof_thread_name_get(tsd_t *tsd) {
+ prof_tdata_t *tdata;
+
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL) {
+ return "";
+ }
+ return (tdata->thread_name != NULL ? tdata->thread_name : "");
+}
+
+static char *
+prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) {
+ char *ret;
+ size_t size;
+
+ if (thread_name == NULL) {
+ return NULL;
+ }
+
+ size = strlen(thread_name) + 1;
+ if (size == 1) {
+ return "";
+ }
+
+ ret = iallocztm(tsdn, size, sz_size2index(size), false, NULL, true,
+ arena_get(TSDN_NULL, 0, true), true);
+ if (ret == NULL) {
+ return NULL;
+ }
+ memcpy(ret, thread_name, size);
+ return ret;
+}
+
+int
+prof_thread_name_set(tsd_t *tsd, const char *thread_name) {
+ prof_tdata_t *tdata;
+ unsigned i;
+ char *s;
+
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL) {
+ return EAGAIN;
+ }
+
+ /* Validate input. */
+ if (thread_name == NULL) {
+ return EFAULT;
+ }
+ for (i = 0; thread_name[i] != '\0'; i++) {
+ char c = thread_name[i];
+ if (!isgraph(c) && !isblank(c)) {
+ return EFAULT;
+ }
+ }
+
+ s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
+ if (s == NULL) {
+ return EAGAIN;
+ }
+
+ if (tdata->thread_name != NULL) {
+ idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
+ true);
+ tdata->thread_name = NULL;
+ }
+ if (strlen(s) > 0) {
+ tdata->thread_name = s;
+ }
+ return 0;
+}
+
+bool
+prof_thread_active_get(tsd_t *tsd) {
+ prof_tdata_t *tdata;
+
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL) {
+ return false;
+ }
+ return tdata->active;
+}
+
+bool
+prof_thread_active_set(tsd_t *tsd, bool active) {
+ prof_tdata_t *tdata;
+
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL) {
+ return true;
+ }
+ tdata->active = active;
+ return false;
+}
+
+bool
+prof_thread_active_init_get(tsdn_t *tsdn) {
+ bool active_init;
+
+ malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
+ active_init = prof_thread_active_init;
+ malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
+ return active_init;
+}
+
+bool
+prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) {
+ bool active_init_old;
+
+ malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx);
+ active_init_old = prof_thread_active_init;
+ prof_thread_active_init = active_init;
+ malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx);
+ return active_init_old;
+}
+
+bool
+prof_gdump_get(tsdn_t *tsdn) {
+ bool prof_gdump_current;
+
+ malloc_mutex_lock(tsdn, &prof_gdump_mtx);
+ prof_gdump_current = prof_gdump_val;
+ malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
+ return prof_gdump_current;
+}
+
+bool
+prof_gdump_set(tsdn_t *tsdn, bool gdump) {
+ bool prof_gdump_old;
+
+ malloc_mutex_lock(tsdn, &prof_gdump_mtx);
+ prof_gdump_old = prof_gdump_val;
+ prof_gdump_val = gdump;
+ malloc_mutex_unlock(tsdn, &prof_gdump_mtx);
+ return prof_gdump_old;
+}
+
+void
+prof_boot0(void) {
cassert(config_prof);
memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
@@ -1294,17 +2256,15 @@ prof_boot0(void)
}
void
-prof_boot1(void)
-{
-
+prof_boot1(void) {
cassert(config_prof);
/*
- * opt_prof and prof_promote must be in their final state before any
- * arenas are initialized, so this function must be executed early.
+ * opt_prof must be in its final state before any arenas are
+ * initialized, so this function must be executed early.
*/
- if (opt_prof_leak && opt_prof == false) {
+ if (opt_prof_leak && !opt_prof) {
/*
* Enable opt_prof, but in such a way that profiles are never
* automatically dumped.
@@ -1317,48 +2277,101 @@ prof_boot1(void)
opt_lg_prof_interval);
}
}
-
- prof_promote = (opt_prof && opt_lg_prof_sample > LG_PAGE);
}
bool
-prof_boot2(void)
-{
-
+prof_boot2(tsd_t *tsd) {
cassert(config_prof);
if (opt_prof) {
unsigned i;
- if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash,
- prof_bt_keycomp))
- return (true);
- if (malloc_mutex_init(&bt2ctx_mtx))
- return (true);
- if (prof_tdata_tsd_boot()) {
- malloc_write(
- "<jemalloc>: Error in pthread_key_create()\n");
- abort();
+ lg_prof_sample = opt_lg_prof_sample;
+
+ prof_active = opt_prof_active;
+ if (malloc_mutex_init(&prof_active_mtx, "prof_active",
+ WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ prof_gdump_val = opt_prof_gdump;
+ if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
+ WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ prof_thread_active_init = opt_prof_thread_active_init;
+ if (malloc_mutex_init(&prof_thread_active_init_mtx,
+ "prof_thread_active_init",
+ WITNESS_RANK_PROF_THREAD_ACTIVE_INIT,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
+ prof_bt_keycomp)) {
+ return true;
+ }
+ if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
+ WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ tdata_tree_new(&tdatas);
+ if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
+ WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) {
+ return true;
}
- if (malloc_mutex_init(&prof_dump_seq_mtx))
- return (true);
- if (malloc_mutex_init(&prof_dump_mtx))
- return (true);
+ next_thr_uid = 0;
+ if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
+ WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
+ WITNESS_RANK_PROF_DUMP_SEQ, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
+ WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
- if (atexit(prof_fdump) != 0) {
+ if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
+ atexit(prof_fdump) != 0) {
malloc_write("<jemalloc>: Error in atexit()\n");
- if (opt_abort)
+ if (opt_abort) {
abort();
+ }
}
- ctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS *
- sizeof(malloc_mutex_t));
- if (ctx_locks == NULL)
- return (true);
+ gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
+ b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t),
+ CACHELINE);
+ if (gctx_locks == NULL) {
+ return true;
+ }
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
- if (malloc_mutex_init(&ctx_locks[i]))
- return (true);
+ if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
+ WITNESS_RANK_PROF_GCTX,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ }
+
+ tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
+ b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t),
+ CACHELINE);
+ if (tdata_locks == NULL) {
+ return true;
+ }
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
+ if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
+ WITNESS_RANK_PROF_TDATA,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
}
}
@@ -1372,48 +2385,79 @@ prof_boot2(void)
prof_booted = true;
- return (false);
+ return false;
}
void
-prof_prefork(void)
-{
-
- if (opt_prof) {
+prof_prefork0(tsdn_t *tsdn) {
+ if (config_prof && opt_prof) {
unsigned i;
- malloc_mutex_prefork(&bt2ctx_mtx);
- malloc_mutex_prefork(&prof_dump_seq_mtx);
- for (i = 0; i < PROF_NCTX_LOCKS; i++)
- malloc_mutex_prefork(&ctx_locks[i]);
+ malloc_mutex_prefork(tsdn, &prof_dump_mtx);
+ malloc_mutex_prefork(tsdn, &bt2gctx_mtx);
+ malloc_mutex_prefork(tsdn, &tdatas_mtx);
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
+ malloc_mutex_prefork(tsdn, &tdata_locks[i]);
+ }
+ for (i = 0; i < PROF_NCTX_LOCKS; i++) {
+ malloc_mutex_prefork(tsdn, &gctx_locks[i]);
+ }
}
}
void
-prof_postfork_parent(void)
-{
+prof_prefork1(tsdn_t *tsdn) {
+ if (config_prof && opt_prof) {
+ malloc_mutex_prefork(tsdn, &prof_active_mtx);
+ malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
+ malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
+ malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
+ }
+}
- if (opt_prof) {
+void
+prof_postfork_parent(tsdn_t *tsdn) {
+ if (config_prof && opt_prof) {
unsigned i;
- for (i = 0; i < PROF_NCTX_LOCKS; i++)
- malloc_mutex_postfork_parent(&ctx_locks[i]);
- malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
- malloc_mutex_postfork_parent(&bt2ctx_mtx);
+ malloc_mutex_postfork_parent(tsdn,
+ &prof_thread_active_init_mtx);
+ malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
+ for (i = 0; i < PROF_NCTX_LOCKS; i++) {
+ malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
+ }
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
+ malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
+ }
+ malloc_mutex_postfork_parent(tsdn, &tdatas_mtx);
+ malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx);
}
}
void
-prof_postfork_child(void)
-{
-
- if (opt_prof) {
+prof_postfork_child(tsdn_t *tsdn) {
+ if (config_prof && opt_prof) {
unsigned i;
- for (i = 0; i < PROF_NCTX_LOCKS; i++)
- malloc_mutex_postfork_child(&ctx_locks[i]);
- malloc_mutex_postfork_child(&prof_dump_seq_mtx);
- malloc_mutex_postfork_child(&bt2ctx_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
+ malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
+ for (i = 0; i < PROF_NCTX_LOCKS; i++) {
+ malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
+ }
+ for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
+ malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
+ }
+ malloc_mutex_postfork_child(tsdn, &tdatas_mtx);
+ malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_dump_mtx);
}
}
diff --git a/deps/jemalloc/src/rtree.c b/deps/jemalloc/src/rtree.c
index 205957ac4e..53702cf723 100644
--- a/deps/jemalloc/src/rtree.c
+++ b/deps/jemalloc/src/rtree.c
@@ -1,105 +1,320 @@
-#define JEMALLOC_RTREE_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-
-rtree_t *
-rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc)
-{
- rtree_t *ret;
- unsigned bits_per_level, bits_in_leaf, height, i;
-
- assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3));
-
- bits_per_level = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void *)))) - 1;
- bits_in_leaf = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(uint8_t)))) - 1;
- if (bits > bits_in_leaf) {
- height = 1 + (bits - bits_in_leaf) / bits_per_level;
- if ((height-1) * bits_per_level + bits_in_leaf != bits)
- height++;
- } else {
- height = 1;
- }
- assert((height-1) * bits_per_level + bits_in_leaf >= bits);
-
- ret = (rtree_t*)alloc(offsetof(rtree_t, level2bits) +
- (sizeof(unsigned) * height));
- if (ret == NULL)
- return (NULL);
- memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) *
- height));
-
- ret->alloc = alloc;
- ret->dalloc = dalloc;
- if (malloc_mutex_init(&ret->mutex)) {
- if (dalloc != NULL)
- dalloc(ret);
- return (NULL);
- }
- ret->height = height;
- if (height > 1) {
- if ((height-1) * bits_per_level + bits_in_leaf > bits) {
- ret->level2bits[0] = (bits - bits_in_leaf) %
- bits_per_level;
- } else
- ret->level2bits[0] = bits_per_level;
- for (i = 1; i < height-1; i++)
- ret->level2bits[i] = bits_per_level;
- ret->level2bits[height-1] = bits_in_leaf;
- } else
- ret->level2bits[0] = bits;
-
- ret->root = (void**)alloc(sizeof(void *) << ret->level2bits[0]);
- if (ret->root == NULL) {
- if (dalloc != NULL)
- dalloc(ret);
- return (NULL);
- }
- memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]);
-
- return (ret);
+#define JEMALLOC_RTREE_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/mutex.h"
+
+/*
+ * Only the most significant bits of keys passed to rtree_{read,write}() are
+ * used.
+ */
+bool
+rtree_new(rtree_t *rtree, bool zeroed) {
+#ifdef JEMALLOC_JET
+ if (!zeroed) {
+ memset(rtree, 0, sizeof(rtree_t)); /* Clear root. */
+ }
+#else
+ assert(zeroed);
+#endif
+
+ if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ return false;
+}
+
+static rtree_node_elm_t *
+rtree_node_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
+ return (rtree_node_elm_t *)base_alloc(tsdn, b0get(), nelms *
+ sizeof(rtree_node_elm_t), CACHELINE);
}
+rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc = rtree_node_alloc_impl;
static void
-rtree_delete_subtree(rtree_t *rtree, void **node, unsigned level)
-{
+rtree_node_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *node) {
+ /* Nodes are never deleted during normal operation. */
+ not_reached();
+}
+UNUSED rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc =
+ rtree_node_dalloc_impl;
+
+static rtree_leaf_elm_t *
+rtree_leaf_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
+ return (rtree_leaf_elm_t *)base_alloc(tsdn, b0get(), nelms *
+ sizeof(rtree_leaf_elm_t), CACHELINE);
+}
+rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc = rtree_leaf_alloc_impl;
- if (level < rtree->height - 1) {
- size_t nchildren, i;
+static void
+rtree_leaf_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *leaf) {
+ /* Leaves are never deleted during normal operation. */
+ not_reached();
+}
+UNUSED rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc =
+ rtree_leaf_dalloc_impl;
- nchildren = ZU(1) << rtree->level2bits[level];
- for (i = 0; i < nchildren; i++) {
- void **child = (void **)node[i];
- if (child != NULL)
- rtree_delete_subtree(rtree, child, level + 1);
+#ifdef JEMALLOC_JET
+# if RTREE_HEIGHT > 1
+static void
+rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *subtree,
+ unsigned level) {
+ size_t nchildren = ZU(1) << rtree_levels[level].bits;
+ if (level + 2 < RTREE_HEIGHT) {
+ for (size_t i = 0; i < nchildren; i++) {
+ rtree_node_elm_t *node =
+ (rtree_node_elm_t *)atomic_load_p(&subtree[i].child,
+ ATOMIC_RELAXED);
+ if (node != NULL) {
+ rtree_delete_subtree(tsdn, rtree, node, level +
+ 1);
+ }
+ }
+ } else {
+ for (size_t i = 0; i < nchildren; i++) {
+ rtree_leaf_elm_t *leaf =
+ (rtree_leaf_elm_t *)atomic_load_p(&subtree[i].child,
+ ATOMIC_RELAXED);
+ if (leaf != NULL) {
+ rtree_leaf_dalloc(tsdn, rtree, leaf);
+ }
}
}
- rtree->dalloc(node);
+
+ if (subtree != rtree->root) {
+ rtree_node_dalloc(tsdn, rtree, subtree);
+ }
}
+# endif
void
-rtree_delete(rtree_t *rtree)
-{
+rtree_delete(tsdn_t *tsdn, rtree_t *rtree) {
+# if RTREE_HEIGHT > 1
+ rtree_delete_subtree(tsdn, rtree, rtree->root, 0);
+# endif
+}
+#endif
- rtree_delete_subtree(rtree, rtree->root, 0);
- rtree->dalloc(rtree);
+static rtree_node_elm_t *
+rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level,
+ atomic_p_t *elmp) {
+ malloc_mutex_lock(tsdn, &rtree->init_lock);
+ /*
+ * If *elmp is non-null, then it was initialized with the init lock
+ * held, so we can get by with 'relaxed' here.
+ */
+ rtree_node_elm_t *node = atomic_load_p(elmp, ATOMIC_RELAXED);
+ if (node == NULL) {
+ node = rtree_node_alloc(tsdn, rtree, ZU(1) <<
+ rtree_levels[level].bits);
+ if (node == NULL) {
+ malloc_mutex_unlock(tsdn, &rtree->init_lock);
+ return NULL;
+ }
+ /*
+ * Even though we hold the lock, a later reader might not; we
+ * need release semantics.
+ */
+ atomic_store_p(elmp, node, ATOMIC_RELEASE);
+ }
+ malloc_mutex_unlock(tsdn, &rtree->init_lock);
+
+ return node;
}
-void
-rtree_prefork(rtree_t *rtree)
-{
+static rtree_leaf_elm_t *
+rtree_leaf_init(tsdn_t *tsdn, rtree_t *rtree, atomic_p_t *elmp) {
+ malloc_mutex_lock(tsdn, &rtree->init_lock);
+ /*
+ * If *elmp is non-null, then it was initialized with the init lock
+ * held, so we can get by with 'relaxed' here.
+ */
+ rtree_leaf_elm_t *leaf = atomic_load_p(elmp, ATOMIC_RELAXED);
+ if (leaf == NULL) {
+ leaf = rtree_leaf_alloc(tsdn, rtree, ZU(1) <<
+ rtree_levels[RTREE_HEIGHT-1].bits);
+ if (leaf == NULL) {
+ malloc_mutex_unlock(tsdn, &rtree->init_lock);
+ return NULL;
+ }
+ /*
+ * Even though we hold the lock, a later reader might not; we
+ * need release semantics.
+ */
+ atomic_store_p(elmp, leaf, ATOMIC_RELEASE);
+ }
+ malloc_mutex_unlock(tsdn, &rtree->init_lock);
- malloc_mutex_prefork(&rtree->mutex);
+ return leaf;
}
-void
-rtree_postfork_parent(rtree_t *rtree)
-{
+static bool
+rtree_node_valid(rtree_node_elm_t *node) {
+ return ((uintptr_t)node != (uintptr_t)0);
+}
- malloc_mutex_postfork_parent(&rtree->mutex);
+static bool
+rtree_leaf_valid(rtree_leaf_elm_t *leaf) {
+ return ((uintptr_t)leaf != (uintptr_t)0);
}
-void
-rtree_postfork_child(rtree_t *rtree)
-{
+static rtree_node_elm_t *
+rtree_child_node_tryread(rtree_node_elm_t *elm, bool dependent) {
+ rtree_node_elm_t *node;
+
+ if (dependent) {
+ node = (rtree_node_elm_t *)atomic_load_p(&elm->child,
+ ATOMIC_RELAXED);
+ } else {
+ node = (rtree_node_elm_t *)atomic_load_p(&elm->child,
+ ATOMIC_ACQUIRE);
+ }
- malloc_mutex_postfork_child(&rtree->mutex);
+ assert(!dependent || node != NULL);
+ return node;
+}
+
+static rtree_node_elm_t *
+rtree_child_node_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm,
+ unsigned level, bool dependent) {
+ rtree_node_elm_t *node;
+
+ node = rtree_child_node_tryread(elm, dependent);
+ if (!dependent && unlikely(!rtree_node_valid(node))) {
+ node = rtree_node_init(tsdn, rtree, level + 1, &elm->child);
+ }
+ assert(!dependent || node != NULL);
+ return node;
+}
+
+static rtree_leaf_elm_t *
+rtree_child_leaf_tryread(rtree_node_elm_t *elm, bool dependent) {
+ rtree_leaf_elm_t *leaf;
+
+ if (dependent) {
+ leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child,
+ ATOMIC_RELAXED);
+ } else {
+ leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child,
+ ATOMIC_ACQUIRE);
+ }
+
+ assert(!dependent || leaf != NULL);
+ return leaf;
+}
+
+static rtree_leaf_elm_t *
+rtree_child_leaf_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm,
+ unsigned level, bool dependent) {
+ rtree_leaf_elm_t *leaf;
+
+ leaf = rtree_child_leaf_tryread(elm, dependent);
+ if (!dependent && unlikely(!rtree_leaf_valid(leaf))) {
+ leaf = rtree_leaf_init(tsdn, rtree, &elm->child);
+ }
+ assert(!dependent || leaf != NULL);
+ return leaf;
+}
+
+rtree_leaf_elm_t *
+rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key, bool dependent, bool init_missing) {
+ rtree_node_elm_t *node;
+ rtree_leaf_elm_t *leaf;
+#if RTREE_HEIGHT > 1
+ node = rtree->root;
+#else
+ leaf = rtree->root;
+#endif
+
+ if (config_debug) {
+ uintptr_t leafkey = rtree_leafkey(key);
+ for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) {
+ assert(rtree_ctx->cache[i].leafkey != leafkey);
+ }
+ for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) {
+ assert(rtree_ctx->l2_cache[i].leafkey != leafkey);
+ }
+ }
+
+#define RTREE_GET_CHILD(level) { \
+ assert(level < RTREE_HEIGHT-1); \
+ if (level != 0 && !dependent && \
+ unlikely(!rtree_node_valid(node))) { \
+ return NULL; \
+ } \
+ uintptr_t subkey = rtree_subkey(key, level); \
+ if (level + 2 < RTREE_HEIGHT) { \
+ node = init_missing ? \
+ rtree_child_node_read(tsdn, rtree, \
+ &node[subkey], level, dependent) : \
+ rtree_child_node_tryread(&node[subkey], \
+ dependent); \
+ } else { \
+ leaf = init_missing ? \
+ rtree_child_leaf_read(tsdn, rtree, \
+ &node[subkey], level, dependent) : \
+ rtree_child_leaf_tryread(&node[subkey], \
+ dependent); \
+ } \
+ }
+ /*
+ * Cache replacement upon hard lookup (i.e. L1 & L2 rtree cache miss):
+ * (1) evict last entry in L2 cache; (2) move the collision slot from L1
+ * cache down to L2; and 3) fill L1.
+ */
+#define RTREE_GET_LEAF(level) { \
+ assert(level == RTREE_HEIGHT-1); \
+ if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { \
+ return NULL; \
+ } \
+ if (RTREE_CTX_NCACHE_L2 > 1) { \
+ memmove(&rtree_ctx->l2_cache[1], \
+ &rtree_ctx->l2_cache[0], \
+ sizeof(rtree_ctx_cache_elm_t) * \
+ (RTREE_CTX_NCACHE_L2 - 1)); \
+ } \
+ size_t slot = rtree_cache_direct_map(key); \
+ rtree_ctx->l2_cache[0].leafkey = \
+ rtree_ctx->cache[slot].leafkey; \
+ rtree_ctx->l2_cache[0].leaf = \
+ rtree_ctx->cache[slot].leaf; \
+ uintptr_t leafkey = rtree_leafkey(key); \
+ rtree_ctx->cache[slot].leafkey = leafkey; \
+ rtree_ctx->cache[slot].leaf = leaf; \
+ uintptr_t subkey = rtree_subkey(key, level); \
+ return &leaf[subkey]; \
+ }
+ if (RTREE_HEIGHT > 1) {
+ RTREE_GET_CHILD(0)
+ }
+ if (RTREE_HEIGHT > 2) {
+ RTREE_GET_CHILD(1)
+ }
+ if (RTREE_HEIGHT > 3) {
+ for (unsigned i = 2; i < RTREE_HEIGHT-1; i++) {
+ RTREE_GET_CHILD(i)
+ }
+ }
+ RTREE_GET_LEAF(RTREE_HEIGHT-1)
+#undef RTREE_GET_CHILD
+#undef RTREE_GET_LEAF
+ not_reached();
+}
+
+void
+rtree_ctx_data_init(rtree_ctx_t *ctx) {
+ for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) {
+ rtree_ctx_cache_elm_t *cache = &ctx->cache[i];
+ cache->leafkey = RTREE_LEAFKEY_INVALID;
+ cache->leaf = NULL;
+ }
+ for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) {
+ rtree_ctx_cache_elm_t *cache = &ctx->l2_cache[i];
+ cache->leafkey = RTREE_LEAFKEY_INVALID;
+ cache->leaf = NULL;
+ }
}
diff --git a/deps/jemalloc/src/stats.c b/deps/jemalloc/src/stats.c
index bef2ab33cd..087df7676e 100644
--- a/deps/jemalloc/src/stats.c
+++ b/deps/jemalloc/src/stats.c
@@ -1,549 +1,1285 @@
-#define JEMALLOC_STATS_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_STATS_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
-#define CTL_GET(n, v, t) do { \
- size_t sz = sizeof(t); \
- xmallctl(n, v, &sz, NULL, 0); \
-} while (0)
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/mutex_prof.h"
-#define CTL_I_GET(n, v, t) do { \
- size_t mib[6]; \
- size_t miblen = sizeof(mib) / sizeof(size_t); \
+const char *global_mutex_names[mutex_prof_num_global_mutexes] = {
+#define OP(mtx) #mtx,
+ MUTEX_PROF_GLOBAL_MUTEXES
+#undef OP
+};
+
+const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = {
+#define OP(mtx) #mtx,
+ MUTEX_PROF_ARENA_MUTEXES
+#undef OP
+};
+
+#define CTL_GET(n, v, t) do { \
size_t sz = sizeof(t); \
- xmallctlnametomib(n, mib, &miblen); \
- mib[2] = i; \
- xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
+ xmallctl(n, (void *)v, &sz, NULL, 0); \
} while (0)
-#define CTL_J_GET(n, v, t) do { \
- size_t mib[6]; \
+#define CTL_M2_GET(n, i, v, t) do { \
+ size_t mib[CTL_MAX_DEPTH]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \
- mib[2] = j; \
- xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
+ mib[2] = (i); \
+ xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
} while (0)
-#define CTL_IJ_GET(n, v, t) do { \
- size_t mib[6]; \
+#define CTL_M2_M4_GET(n, i, j, v, t) do { \
+ size_t mib[CTL_MAX_DEPTH]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \
- mib[2] = i; \
- mib[4] = j; \
- xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \
+ mib[2] = (i); \
+ mib[4] = (j); \
+ xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
} while (0)
/******************************************************************************/
/* Data. */
-bool opt_stats_print = false;
-
-size_t stats_cactive = 0;
+bool opt_stats_print = false;
+char opt_stats_print_opts[stats_print_tot_num_options+1] = "";
/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
- void *cbopaque, unsigned i);
-static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
- void *cbopaque, unsigned i);
-static void stats_arena_print(void (*write_cb)(void *, const char *),
- void *cbopaque, unsigned i, bool bins, bool large);
+/* Calculate x.yyy and output a string (takes a fixed sized char array). */
+static bool
+get_rate_str(uint64_t dividend, uint64_t divisor, char str[6]) {
+ if (divisor == 0 || dividend > divisor) {
+ /* The rate is not supposed to be greater than 1. */
+ return true;
+ }
+ if (dividend > 0) {
+ assert(UINT64_MAX / dividend >= 1000);
+ }
-/******************************************************************************/
+ unsigned n = (unsigned)((dividend * 1000) / divisor);
+ if (n < 10) {
+ malloc_snprintf(str, 6, "0.00%u", n);
+ } else if (n < 100) {
+ malloc_snprintf(str, 6, "0.0%u", n);
+ } else if (n < 1000) {
+ malloc_snprintf(str, 6, "0.%u", n);
+ } else {
+ malloc_snprintf(str, 6, "1");
+ }
+
+ return false;
+}
+
+#define MUTEX_CTL_STR_MAX_LENGTH 128
+static void
+gen_mutex_ctl_str(char *str, size_t buf_len, const char *prefix,
+ const char *mutex, const char *counter) {
+ malloc_snprintf(str, buf_len, "stats.%s.%s.%s", prefix, mutex, counter);
+}
+
+static void
+read_arena_bin_mutex_stats(unsigned arena_ind, unsigned bin_ind,
+ uint64_t results[mutex_prof_num_counters]) {
+ char cmd[MUTEX_CTL_STR_MAX_LENGTH];
+#define OP(c, t) \
+ gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
+ "arenas.0.bins.0","mutex", #c); \
+ CTL_M2_M4_GET(cmd, arena_ind, bin_ind, \
+ (t *)&results[mutex_counter_##c], t);
+MUTEX_PROF_COUNTERS
+#undef OP
+}
+
+static void
+mutex_stats_output_json(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *name, uint64_t stats[mutex_prof_num_counters],
+ const char *json_indent, bool last) {
+ malloc_cprintf(write_cb, cbopaque, "%s\"%s\": {\n", json_indent, name);
+
+ mutex_prof_counter_ind_t k = 0;
+ char *fmt_str[2] = {"%s\t\"%s\": %"FMTu32"%s\n",
+ "%s\t\"%s\": %"FMTu64"%s\n"};
+#define OP(c, t) \
+ malloc_cprintf(write_cb, cbopaque, \
+ fmt_str[sizeof(t) / sizeof(uint32_t) - 1], \
+ json_indent, #c, (t)stats[mutex_counter_##c], \
+ (++k == mutex_prof_num_counters) ? "" : ",");
+MUTEX_PROF_COUNTERS
+#undef OP
+ malloc_cprintf(write_cb, cbopaque, "%s}%s\n", json_indent,
+ last ? "" : ",");
+}
static void
stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
- unsigned i)
-{
+ bool json, bool large, bool mutex, unsigned i) {
size_t page;
- bool config_tcache;
- unsigned nbins, j, gap_start;
+ bool in_gap, in_gap_prev;
+ unsigned nbins, j;
CTL_GET("arenas.page", &page, size_t);
- CTL_GET("config.tcache", &config_tcache, bool);
- if (config_tcache) {
+ CTL_GET("arenas.nbins", &nbins, unsigned);
+ if (json) {
malloc_cprintf(write_cb, cbopaque,
- "bins: bin size regs pgs allocated nmalloc"
- " ndalloc nrequests nfills nflushes"
- " newruns reruns curruns\n");
+ "\t\t\t\t\"bins\": [\n");
} else {
+ char *mutex_counters = " n_lock_ops n_waiting"
+ " n_spin_acq total_wait_ns max_wait_ns\n";
malloc_cprintf(write_cb, cbopaque,
- "bins: bin size regs pgs allocated nmalloc"
- " ndalloc newruns reruns curruns\n");
+ "bins: size ind allocated nmalloc"
+ " ndalloc nrequests curregs curslabs regs"
+ " pgs util nfills nflushes newslabs"
+ " reslabs%s", mutex ? mutex_counters : "\n");
}
- CTL_GET("arenas.nbins", &nbins, unsigned);
- for (j = 0, gap_start = UINT_MAX; j < nbins; j++) {
- uint64_t nruns;
+ for (j = 0, in_gap = false; j < nbins; j++) {
+ uint64_t nslabs;
+ size_t reg_size, slab_size, curregs;
+ size_t curslabs;
+ uint32_t nregs;
+ uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
+ uint64_t nreslabs;
- CTL_IJ_GET("stats.arenas.0.bins.0.nruns", &nruns, uint64_t);
- if (nruns == 0) {
- if (gap_start == UINT_MAX)
- gap_start = j;
- } else {
- size_t reg_size, run_size, allocated;
- uint32_t nregs;
- uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
- uint64_t reruns;
- size_t curruns;
-
- if (gap_start != UINT_MAX) {
- if (j > gap_start + 1) {
- /* Gap of more than one size class. */
- malloc_cprintf(write_cb, cbopaque,
- "[%u..%u]\n", gap_start,
- j - 1);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nslabs", i, j, &nslabs,
+ uint64_t);
+ in_gap_prev = in_gap;
+ in_gap = (nslabs == 0);
+
+ if (!json && in_gap_prev && !in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ }
+
+ CTL_M2_GET("arenas.bin.0.size", j, &reg_size, size_t);
+ CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
+ CTL_M2_GET("arenas.bin.0.slab_size", j, &slab_size, size_t);
+
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc,
+ uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc,
+ uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs,
+ size_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
+ &nrequests, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j, &nfills,
+ uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j, &nflushes,
+ uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.nreslabs", i, j, &nreslabs,
+ uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j, &curslabs,
+ size_t);
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t{\n"
+ "\t\t\t\t\t\t\"nmalloc\": %"FMTu64",\n"
+ "\t\t\t\t\t\t\"ndalloc\": %"FMTu64",\n"
+ "\t\t\t\t\t\t\"curregs\": %zu,\n"
+ "\t\t\t\t\t\t\"nrequests\": %"FMTu64",\n"
+ "\t\t\t\t\t\t\"nfills\": %"FMTu64",\n"
+ "\t\t\t\t\t\t\"nflushes\": %"FMTu64",\n"
+ "\t\t\t\t\t\t\"nreslabs\": %"FMTu64",\n"
+ "\t\t\t\t\t\t\"curslabs\": %zu%s\n",
+ nmalloc, ndalloc, curregs, nrequests, nfills,
+ nflushes, nreslabs, curslabs, mutex ? "," : "");
+ if (mutex) {
+ uint64_t mutex_stats[mutex_prof_num_counters];
+ read_arena_bin_mutex_stats(i, j, mutex_stats);
+ mutex_stats_output_json(write_cb, cbopaque,
+ "mutex", mutex_stats, "\t\t\t\t\t\t", true);
+ }
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t}%s\n",
+ (j + 1 < nbins) ? "," : "");
+ } else if (!in_gap) {
+ size_t availregs = nregs * curslabs;
+ char util[6];
+ if (get_rate_str((uint64_t)curregs, (uint64_t)availregs,
+ util)) {
+ if (availregs == 0) {
+ malloc_snprintf(util, sizeof(util),
+ "1");
+ } else if (curregs > availregs) {
+ /*
+ * Race detected: the counters were read
+ * in separate mallctl calls and
+ * concurrent operations happened in
+ * between. In this case no meaningful
+ * utilization can be computed.
+ */
+ malloc_snprintf(util, sizeof(util),
+ " race");
} else {
- /* Gap of one size class. */
- malloc_cprintf(write_cb, cbopaque,
- "[%u]\n", gap_start);
+ not_reached();
}
- gap_start = UINT_MAX;
}
- CTL_J_GET("arenas.bin.0.size", &reg_size, size_t);
- CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t);
- CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.allocated",
- &allocated, size_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.nmalloc",
- &nmalloc, uint64_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.ndalloc",
- &ndalloc, uint64_t);
- if (config_tcache) {
- CTL_IJ_GET("stats.arenas.0.bins.0.nrequests",
- &nrequests, uint64_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.nfills",
- &nfills, uint64_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.nflushes",
- &nflushes, uint64_t);
+ uint64_t mutex_stats[mutex_prof_num_counters];
+ if (mutex) {
+ read_arena_bin_mutex_stats(i, j, mutex_stats);
}
- CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns,
- uint64_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns,
- size_t);
- if (config_tcache) {
+
+ malloc_cprintf(write_cb, cbopaque, "%20zu %3u %12zu %12"
+ FMTu64" %12"FMTu64" %12"FMTu64" %12zu %12zu %4u"
+ " %3zu %-5s %12"FMTu64" %12"FMTu64" %12"FMTu64
+ " %12"FMTu64, reg_size, j, curregs * reg_size,
+ nmalloc, ndalloc, nrequests, curregs, curslabs,
+ nregs, slab_size / page, util, nfills, nflushes,
+ nslabs, nreslabs);
+
+ /* Output less info for bin mutexes to save space. */
+ if (mutex) {
malloc_cprintf(write_cb, cbopaque,
- "%13u %5zu %4u %3zu %12zu %12"PRIu64
- " %12"PRIu64" %12"PRIu64" %12"PRIu64
- " %12"PRIu64" %12"PRIu64" %12"PRIu64
- " %12zu\n",
- j, reg_size, nregs, run_size / page,
- allocated, nmalloc, ndalloc, nrequests,
- nfills, nflushes, nruns, reruns, curruns);
+ " %12"FMTu64" %12"FMTu64" %12"FMTu64
+ " %14"FMTu64" %12"FMTu64"\n",
+ mutex_stats[mutex_counter_num_ops],
+ mutex_stats[mutex_counter_num_wait],
+ mutex_stats[mutex_counter_num_spin_acq],
+ mutex_stats[mutex_counter_total_wait_time],
+ mutex_stats[mutex_counter_max_wait_time]);
} else {
- malloc_cprintf(write_cb, cbopaque,
- "%13u %5zu %4u %3zu %12zu %12"PRIu64
- " %12"PRIu64" %12"PRIu64" %12"PRIu64
- " %12zu\n",
- j, reg_size, nregs, run_size / page,
- allocated, nmalloc, ndalloc, nruns, reruns,
- curruns);
+ malloc_cprintf(write_cb, cbopaque, "\n");
}
}
}
- if (gap_start != UINT_MAX) {
- if (j > gap_start + 1) {
- /* Gap of more than one size class. */
- malloc_cprintf(write_cb, cbopaque, "[%u..%u]\n",
- gap_start, j - 1);
- } else {
- /* Gap of one size class. */
- malloc_cprintf(write_cb, cbopaque, "[%u]\n", gap_start);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t]%s\n", large ? "," : "");
+ } else {
+ if (in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
}
}
}
static void
-stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
- unsigned i)
-{
- size_t page, nlruns, j;
- ssize_t gap_start;
-
- CTL_GET("arenas.page", &page, size_t);
+stats_arena_lextents_print(void (*write_cb)(void *, const char *),
+ void *cbopaque, bool json, unsigned i) {
+ unsigned nbins, nlextents, j;
+ bool in_gap, in_gap_prev;
- malloc_cprintf(write_cb, cbopaque,
- "large: size pages nmalloc ndalloc nrequests"
- " curruns\n");
- CTL_GET("arenas.nlruns", &nlruns, size_t);
- for (j = 0, gap_start = -1; j < nlruns; j++) {
+ CTL_GET("arenas.nbins", &nbins, unsigned);
+ CTL_GET("arenas.nlextents", &nlextents, unsigned);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"lextents\": [\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "large: size ind allocated nmalloc"
+ " ndalloc nrequests curlextents\n");
+ }
+ for (j = 0, in_gap = false; j < nlextents; j++) {
uint64_t nmalloc, ndalloc, nrequests;
- size_t run_size, curruns;
+ size_t lextent_size, curlextents;
- CTL_IJ_GET("stats.arenas.0.lruns.0.nmalloc", &nmalloc,
- uint64_t);
- CTL_IJ_GET("stats.arenas.0.lruns.0.ndalloc", &ndalloc,
- uint64_t);
- CTL_IJ_GET("stats.arenas.0.lruns.0.nrequests", &nrequests,
- uint64_t);
- if (nrequests == 0) {
- if (gap_start == -1)
- gap_start = j;
- } else {
- CTL_J_GET("arenas.lrun.0.size", &run_size, size_t);
- CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns,
- size_t);
- if (gap_start != -1) {
- malloc_cprintf(write_cb, cbopaque, "[%zu]\n",
- j - gap_start);
- gap_start = -1;
- }
+ CTL_M2_M4_GET("stats.arenas.0.lextents.0.nmalloc", i, j,
+ &nmalloc, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.lextents.0.ndalloc", i, j,
+ &ndalloc, uint64_t);
+ CTL_M2_M4_GET("stats.arenas.0.lextents.0.nrequests", i, j,
+ &nrequests, uint64_t);
+ in_gap_prev = in_gap;
+ in_gap = (nrequests == 0);
+
+ if (!json && in_gap_prev && !in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ " ---\n");
+ }
+
+ CTL_M2_GET("arenas.lextent.0.size", j, &lextent_size, size_t);
+ CTL_M2_M4_GET("stats.arenas.0.lextents.0.curlextents", i, j,
+ &curlextents, size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t{\n"
+ "\t\t\t\t\t\t\"curlextents\": %zu\n"
+ "\t\t\t\t\t}%s\n",
+ curlextents,
+ (j + 1 < nlextents) ? "," : "");
+ } else if (!in_gap) {
+ malloc_cprintf(write_cb, cbopaque,
+ "%20zu %3u %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64" %12zu\n",
+ lextent_size, nbins + j,
+ curlextents * lextent_size, nmalloc, ndalloc,
+ nrequests, curlextents);
+ }
+ }
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t]\n");
+ } else {
+ if (in_gap) {
malloc_cprintf(write_cb, cbopaque,
- "%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64
- " %12zu\n",
- run_size, run_size / page, nmalloc, ndalloc,
- nrequests, curruns);
+ " ---\n");
+ }
+ }
+}
+
+static void
+read_arena_mutex_stats(unsigned arena_ind,
+ uint64_t results[mutex_prof_num_arena_mutexes][mutex_prof_num_counters]) {
+ char cmd[MUTEX_CTL_STR_MAX_LENGTH];
+
+ mutex_prof_arena_ind_t i;
+ for (i = 0; i < mutex_prof_num_arena_mutexes; i++) {
+#define OP(c, t) \
+ gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
+ "arenas.0.mutexes", arena_mutex_names[i], #c); \
+ CTL_M2_GET(cmd, arena_ind, \
+ (t *)&results[i][mutex_counter_##c], t);
+MUTEX_PROF_COUNTERS
+#undef OP
+ }
+}
+
+static void
+mutex_stats_output(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *name, uint64_t stats[mutex_prof_num_counters],
+ bool first_mutex) {
+ if (first_mutex) {
+ /* Print title. */
+ malloc_cprintf(write_cb, cbopaque,
+ " n_lock_ops n_waiting"
+ " n_spin_acq n_owner_switch total_wait_ns"
+ " max_wait_ns max_n_thds\n");
+ }
+
+ malloc_cprintf(write_cb, cbopaque, "%s", name);
+ malloc_cprintf(write_cb, cbopaque, ":%*c",
+ (int)(20 - strlen(name)), ' ');
+
+ char *fmt_str[2] = {"%12"FMTu32, "%16"FMTu64};
+#define OP(c, t) \
+ malloc_cprintf(write_cb, cbopaque, \
+ fmt_str[sizeof(t) / sizeof(uint32_t) - 1], \
+ (t)stats[mutex_counter_##c]);
+MUTEX_PROF_COUNTERS
+#undef OP
+ malloc_cprintf(write_cb, cbopaque, "\n");
+}
+
+static void
+stats_arena_mutexes_print(void (*write_cb)(void *, const char *),
+ void *cbopaque, bool json, bool json_end, unsigned arena_ind) {
+ uint64_t mutex_stats[mutex_prof_num_arena_mutexes][mutex_prof_num_counters];
+ read_arena_mutex_stats(arena_ind, mutex_stats);
+
+ /* Output mutex stats. */
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque, "\t\t\t\t\"mutexes\": {\n");
+ mutex_prof_arena_ind_t i, last_mutex;
+ last_mutex = mutex_prof_num_arena_mutexes - 1;
+ for (i = 0; i < mutex_prof_num_arena_mutexes; i++) {
+ mutex_stats_output_json(write_cb, cbopaque,
+ arena_mutex_names[i], mutex_stats[i],
+ "\t\t\t\t\t", (i == last_mutex));
+ }
+ malloc_cprintf(write_cb, cbopaque, "\t\t\t\t}%s\n",
+ json_end ? "" : ",");
+ } else {
+ mutex_prof_arena_ind_t i;
+ for (i = 0; i < mutex_prof_num_arena_mutexes; i++) {
+ mutex_stats_output(write_cb, cbopaque,
+ arena_mutex_names[i], mutex_stats[i], i == 0);
}
}
- if (gap_start != -1)
- malloc_cprintf(write_cb, cbopaque, "[%zu]\n", j - gap_start);
}
static void
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
- unsigned i, bool bins, bool large)
-{
+ bool json, unsigned i, bool bins, bool large, bool mutex) {
unsigned nthreads;
const char *dss;
- size_t page, pactive, pdirty, mapped;
- uint64_t npurge, nmadvise, purged;
+ ssize_t dirty_decay_ms, muzzy_decay_ms;
+ size_t page, pactive, pdirty, pmuzzy, mapped, retained;
+ size_t base, internal, resident;
+ uint64_t dirty_npurge, dirty_nmadvise, dirty_purged;
+ uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged;
size_t small_allocated;
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
size_t large_allocated;
uint64_t large_nmalloc, large_ndalloc, large_nrequests;
+ size_t tcache_bytes;
+ uint64_t uptime;
CTL_GET("arenas.page", &page, size_t);
- CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned);
- malloc_cprintf(write_cb, cbopaque,
- "assigned threads: %u\n", nthreads);
- CTL_I_GET("stats.arenas.0.dss", &dss, const char *);
- malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n",
- dss);
- CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t);
- CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t);
- CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t);
- CTL_I_GET("stats.arenas.0.nmadvise", &nmadvise, uint64_t);
- CTL_I_GET("stats.arenas.0.purged", &purged, uint64_t);
- malloc_cprintf(write_cb, cbopaque,
- "dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s,"
- " %"PRIu64" madvise%s, %"PRIu64" purged\n",
- pactive, pdirty, npurge, npurge == 1 ? "" : "s",
- nmadvise, nmadvise == 1 ? "" : "s", purged);
-
- malloc_cprintf(write_cb, cbopaque,
- " allocated nmalloc ndalloc nrequests\n");
- CTL_I_GET("stats.arenas.0.small.allocated", &small_allocated, size_t);
- CTL_I_GET("stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t);
- CTL_I_GET("stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t);
- CTL_I_GET("stats.arenas.0.small.nrequests", &small_nrequests, uint64_t);
- malloc_cprintf(write_cb, cbopaque,
- "small: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
- small_allocated, small_nmalloc, small_ndalloc, small_nrequests);
- CTL_I_GET("stats.arenas.0.large.allocated", &large_allocated, size_t);
- CTL_I_GET("stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t);
- CTL_I_GET("stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t);
- CTL_I_GET("stats.arenas.0.large.nrequests", &large_nrequests, uint64_t);
- malloc_cprintf(write_cb, cbopaque,
- "large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
- large_allocated, large_nmalloc, large_ndalloc, large_nrequests);
- malloc_cprintf(write_cb, cbopaque,
- "total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n",
- small_allocated + large_allocated,
- small_nmalloc + large_nmalloc,
- small_ndalloc + large_ndalloc,
- small_nrequests + large_nrequests);
- malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page);
- CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t);
- malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped);
-
- if (bins)
- stats_arena_bins_print(write_cb, cbopaque, i);
- if (large)
- stats_arena_lruns_print(write_cb, cbopaque, i);
-}
+ CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"nthreads\": %u,\n", nthreads);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "assigned threads: %u\n", nthreads);
+ }
-void
-stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *opts)
-{
- int err;
- uint64_t epoch;
- size_t u64sz;
- bool general = true;
- bool merged = true;
- bool unmerged = true;
- bool bins = true;
- bool large = true;
+ CTL_M2_GET("stats.arenas.0.uptime", i, &uptime, uint64_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"uptime_ns\": %"FMTu64",\n", uptime);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "uptime: %"FMTu64"\n", uptime);
+ }
- /*
- * Refresh stats, in case mallctl() was called by the application.
- *
- * Check for OOM here, since refreshing the ctl cache can trigger
- * allocation. In practice, none of the subsequent mallctl()-related
- * calls in this function will cause OOM if this one succeeds.
- * */
- epoch = 1;
- u64sz = sizeof(uint64_t);
- err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t));
- if (err != 0) {
- if (err == EAGAIN) {
- malloc_write("<jemalloc>: Memory allocation failure in "
- "mallctl(\"epoch\", ...)\n");
- return;
+ CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"dss\": \"%s\",\n", dss);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "dss allocation precedence: %s\n", dss);
+ }
+
+ CTL_M2_GET("stats.arenas.0.dirty_decay_ms", i, &dirty_decay_ms,
+ ssize_t);
+ CTL_M2_GET("stats.arenas.0.muzzy_decay_ms", i, &muzzy_decay_ms,
+ ssize_t);
+ CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t);
+ CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t);
+ CTL_M2_GET("stats.arenas.0.pmuzzy", i, &pmuzzy, size_t);
+ CTL_M2_GET("stats.arenas.0.dirty_npurge", i, &dirty_npurge, uint64_t);
+ CTL_M2_GET("stats.arenas.0.dirty_nmadvise", i, &dirty_nmadvise,
+ uint64_t);
+ CTL_M2_GET("stats.arenas.0.dirty_purged", i, &dirty_purged, uint64_t);
+ CTL_M2_GET("stats.arenas.0.muzzy_npurge", i, &muzzy_npurge, uint64_t);
+ CTL_M2_GET("stats.arenas.0.muzzy_nmadvise", i, &muzzy_nmadvise,
+ uint64_t);
+ CTL_M2_GET("stats.arenas.0.muzzy_purged", i, &muzzy_purged, uint64_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"dirty_decay_ms\": %zd,\n", dirty_decay_ms);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"muzzy_decay_ms\": %zd,\n", muzzy_decay_ms);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"pactive\": %zu,\n", pactive);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"pdirty\": %zu,\n", pdirty);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"pmuzzy\": %zu,\n", pmuzzy);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"dirty_npurge\": %"FMTu64",\n", dirty_npurge);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"dirty_nmadvise\": %"FMTu64",\n", dirty_nmadvise);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"dirty_purged\": %"FMTu64",\n", dirty_purged);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"muzzy_npurge\": %"FMTu64",\n", muzzy_npurge);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"muzzy_nmadvise\": %"FMTu64",\n", muzzy_nmadvise);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"muzzy_purged\": %"FMTu64",\n", muzzy_purged);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "decaying: time npages sweeps madvises"
+ " purged\n");
+ if (dirty_decay_ms >= 0) {
+ malloc_cprintf(write_cb, cbopaque,
+ " dirty: %5zd %12zu %12"FMTu64" %12"FMTu64" %12"
+ FMTu64"\n", dirty_decay_ms, pdirty, dirty_npurge,
+ dirty_nmadvise, dirty_purged);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ " dirty: N/A %12zu %12"FMTu64" %12"FMTu64" %12"
+ FMTu64"\n", pdirty, dirty_npurge, dirty_nmadvise,
+ dirty_purged);
+ }
+ if (muzzy_decay_ms >= 0) {
+ malloc_cprintf(write_cb, cbopaque,
+ " muzzy: %5zd %12zu %12"FMTu64" %12"FMTu64" %12"
+ FMTu64"\n", muzzy_decay_ms, pmuzzy, muzzy_npurge,
+ muzzy_nmadvise, muzzy_purged);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ " muzzy: N/A %12zu %12"FMTu64" %12"FMTu64" %12"
+ FMTu64"\n", pmuzzy, muzzy_npurge, muzzy_nmadvise,
+ muzzy_purged);
}
- malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
- "...)\n");
- abort();
}
- if (opts != NULL) {
- unsigned i;
+ CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated,
+ size_t);
+ CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests,
+ uint64_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"small\": {\n");
- for (i = 0; opts[i] != '\0'; i++) {
- switch (opts[i]) {
- case 'g':
- general = false;
- break;
- case 'm':
- merged = false;
- break;
- case 'a':
- unmerged = false;
- break;
- case 'b':
- bins = false;
- break;
- case 'l':
- large = false;
- break;
- default:;
- }
- }
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"allocated\": %zu,\n", small_allocated);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", small_nmalloc);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", small_ndalloc);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", small_nrequests);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t},\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ " allocated nmalloc"
+ " ndalloc nrequests\n");
+ malloc_cprintf(write_cb, cbopaque,
+ "small: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
+ small_allocated, small_nmalloc, small_ndalloc,
+ small_nrequests);
}
- malloc_cprintf(write_cb, cbopaque,
- "___ Begin jemalloc statistics ___\n");
- if (general) {
- int err;
- const char *cpv;
- bool bv;
- unsigned uv;
- ssize_t ssv;
- size_t sv, bsz, ssz, sssz, cpsz;
-
- bsz = sizeof(bool);
- ssz = sizeof(size_t);
- sssz = sizeof(ssize_t);
- cpsz = sizeof(const char *);
-
- CTL_GET("version", &cpv, const char *);
+ CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated,
+ size_t);
+ CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t);
+ CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests,
+ uint64_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"large\": {\n");
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"allocated\": %zu,\n", large_allocated);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", large_nmalloc);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", large_ndalloc);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", large_nrequests);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t},\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "large: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
+ large_allocated, large_nmalloc, large_ndalloc,
+ large_nrequests);
+ malloc_cprintf(write_cb, cbopaque,
+ "total: %12zu %12"FMTu64" %12"FMTu64
+ " %12"FMTu64"\n",
+ small_allocated + large_allocated, small_nmalloc +
+ large_nmalloc, small_ndalloc + large_ndalloc,
+ small_nrequests + large_nrequests);
+ }
+ if (!json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "active: %12zu\n", pactive * page);
+ }
+
+ CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"mapped\": %zu,\n", mapped);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "mapped: %12zu\n", mapped);
+ }
+
+ CTL_M2_GET("stats.arenas.0.retained", i, &retained, size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"retained\": %zu,\n", retained);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "retained: %12zu\n", retained);
+ }
+
+ CTL_M2_GET("stats.arenas.0.base", i, &base, size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"base\": %zu,\n", base);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "base: %12zu\n", base);
+ }
+
+ CTL_M2_GET("stats.arenas.0.internal", i, &internal, size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"internal\": %zu,\n", internal);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "internal: %12zu\n", internal);
+ }
+
+ CTL_M2_GET("stats.arenas.0.tcache_bytes", i, &tcache_bytes, size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"tcache\": %zu,\n", tcache_bytes);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "tcache: %12zu\n", tcache_bytes);
+ }
+
+ CTL_M2_GET("stats.arenas.0.resident", i, &resident, size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"resident\": %zu%s\n", resident,
+ (bins || large || mutex) ? "," : "");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "resident: %12zu\n", resident);
+ }
+
+ if (mutex) {
+ stats_arena_mutexes_print(write_cb, cbopaque, json,
+ !(bins || large), i);
+ }
+ if (bins) {
+ stats_arena_bins_print(write_cb, cbopaque, json, large, mutex,
+ i);
+ }
+ if (large) {
+ stats_arena_lextents_print(write_cb, cbopaque, json, i);
+ }
+}
+
+static void
+stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ bool json, bool more) {
+ const char *cpv;
+ bool bv;
+ unsigned uv;
+ uint32_t u32v;
+ uint64_t u64v;
+ ssize_t ssv;
+ size_t sv, bsz, usz, ssz, sssz, cpsz;
+
+ bsz = sizeof(bool);
+ usz = sizeof(unsigned);
+ ssz = sizeof(size_t);
+ sssz = sizeof(ssize_t);
+ cpsz = sizeof(const char *);
+
+ CTL_GET("version", &cpv, const char *);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\"version\": \"%s\",\n", cpv);
+ } else {
malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
- CTL_GET("config.debug", &bv, bool);
+ }
+
+ /* config. */
+#define CONFIG_WRITE_BOOL_JSON(n, c) \
+ if (json) { \
+ CTL_GET("config."#n, &bv, bool); \
+ malloc_cprintf(write_cb, cbopaque, \
+ "\t\t\t\""#n"\": %s%s\n", bv ? "true" : "false", \
+ (c)); \
+ }
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\"config\": {\n");
+ }
+
+ CONFIG_WRITE_BOOL_JSON(cache_oblivious, ",")
+
+ CTL_GET("config.debug", &bv, bool);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"debug\": %s,\n", bv ? "true" : "false");
+ } else {
malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
bv ? "enabled" : "disabled");
+ }
+
+ CONFIG_WRITE_BOOL_JSON(fill, ",")
+ CONFIG_WRITE_BOOL_JSON(lazy_lock, ",")
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"malloc_conf\": \"%s\",\n",
+ config_malloc_conf);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "config.malloc_conf: \"%s\"\n", config_malloc_conf);
+ }
+
+ CONFIG_WRITE_BOOL_JSON(prof, ",")
+ CONFIG_WRITE_BOOL_JSON(prof_libgcc, ",")
+ CONFIG_WRITE_BOOL_JSON(prof_libunwind, ",")
+ CONFIG_WRITE_BOOL_JSON(stats, ",")
+ CONFIG_WRITE_BOOL_JSON(thp, ",")
+ CONFIG_WRITE_BOOL_JSON(utrace, ",")
+ CONFIG_WRITE_BOOL_JSON(xmalloc, "")
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t},\n");
+ }
+#undef CONFIG_WRITE_BOOL_JSON
-#define OPT_WRITE_BOOL(n) \
- if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \
- == 0) { \
+ /* opt. */
+#define OPT_WRITE_BOOL(n, c) \
+ if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0) { \
+ if (json) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \
+ "false", (c)); \
+ } else { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %s\n", bv ? "true" : "false"); \
- }
-#define OPT_WRITE_SIZE_T(n) \
- if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \
- == 0) { \
+ } \
+ }
+#define OPT_WRITE_BOOL_MUTABLE(n, m, c) { \
+ bool bv2; \
+ if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0 && \
+ je_mallctl(#m, (void *)&bv2, &bsz, NULL, 0) == 0) { \
+ if (json) { \
malloc_cprintf(write_cb, cbopaque, \
- " opt."#n": %zu\n", sv); \
- }
-#define OPT_WRITE_SSIZE_T(n) \
- if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \
- == 0) { \
+ "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \
+ "false", (c)); \
+ } else { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %s ("#m": %s)\n", bv ? "true" \
+ : "false", bv2 ? "true" : "false"); \
+ } \
+ } \
+}
+#define OPT_WRITE_UNSIGNED(n, c) \
+ if (je_mallctl("opt."#n, (void *)&uv, &usz, NULL, 0) == 0) { \
+ if (json) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ "\t\t\t\""#n"\": %u%s\n", uv, (c)); \
+ } else { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %u\n", uv); \
+ } \
+ }
+#define OPT_WRITE_SSIZE_T(n, c) \
+ if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0) { \
+ if (json) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \
+ } else { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": %zd\n", ssv); \
- }
-#define OPT_WRITE_CHAR_P(n) \
- if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \
- == 0) { \
+ } \
+ }
+#define OPT_WRITE_SSIZE_T_MUTABLE(n, m, c) { \
+ ssize_t ssv2; \
+ if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0 && \
+ je_mallctl(#m, (void *)&ssv2, &sssz, NULL, 0) == 0) { \
+ if (json) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \
+ } else { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %zd ("#m": %zd)\n", \
+ ssv, ssv2); \
+ } \
+ } \
+}
+#define OPT_WRITE_CHAR_P(n, c) \
+ if (je_mallctl("opt."#n, (void *)&cpv, &cpsz, NULL, 0) == 0) { \
+ if (json) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ "\t\t\t\""#n"\": \"%s\"%s\n", cpv, (c)); \
+ } else { \
malloc_cprintf(write_cb, cbopaque, \
" opt."#n": \"%s\"\n", cpv); \
- }
+ } \
+ }
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\"opt\": {\n");
+ } else {
malloc_cprintf(write_cb, cbopaque,
"Run-time option settings:\n");
- OPT_WRITE_BOOL(abort)
- OPT_WRITE_SIZE_T(lg_chunk)
- OPT_WRITE_CHAR_P(dss)
- OPT_WRITE_SIZE_T(narenas)
- OPT_WRITE_SSIZE_T(lg_dirty_mult)
- OPT_WRITE_BOOL(stats_print)
- OPT_WRITE_BOOL(junk)
- OPT_WRITE_SIZE_T(quarantine)
- OPT_WRITE_BOOL(redzone)
- OPT_WRITE_BOOL(zero)
- OPT_WRITE_BOOL(utrace)
- OPT_WRITE_BOOL(valgrind)
- OPT_WRITE_BOOL(xmalloc)
- OPT_WRITE_BOOL(tcache)
- OPT_WRITE_SSIZE_T(lg_tcache_max)
- OPT_WRITE_BOOL(prof)
- OPT_WRITE_CHAR_P(prof_prefix)
- OPT_WRITE_BOOL(prof_active)
- OPT_WRITE_SSIZE_T(lg_prof_sample)
- OPT_WRITE_BOOL(prof_accum)
- OPT_WRITE_SSIZE_T(lg_prof_interval)
- OPT_WRITE_BOOL(prof_gdump)
- OPT_WRITE_BOOL(prof_final)
- OPT_WRITE_BOOL(prof_leak)
+ }
+ OPT_WRITE_BOOL(abort, ",")
+ OPT_WRITE_BOOL(abort_conf, ",")
+ OPT_WRITE_BOOL(retain, ",")
+ OPT_WRITE_CHAR_P(dss, ",")
+ OPT_WRITE_UNSIGNED(narenas, ",")
+ OPT_WRITE_CHAR_P(percpu_arena, ",")
+ OPT_WRITE_BOOL_MUTABLE(background_thread, background_thread, ",")
+ OPT_WRITE_SSIZE_T_MUTABLE(dirty_decay_ms, arenas.dirty_decay_ms, ",")
+ OPT_WRITE_SSIZE_T_MUTABLE(muzzy_decay_ms, arenas.muzzy_decay_ms, ",")
+ OPT_WRITE_CHAR_P(junk, ",")
+ OPT_WRITE_BOOL(zero, ",")
+ OPT_WRITE_BOOL(utrace, ",")
+ OPT_WRITE_BOOL(xmalloc, ",")
+ OPT_WRITE_BOOL(tcache, ",")
+ OPT_WRITE_SSIZE_T(lg_tcache_max, ",")
+ OPT_WRITE_BOOL(prof, ",")
+ OPT_WRITE_CHAR_P(prof_prefix, ",")
+ OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active, ",")
+ OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init, prof.thread_active_init,
+ ",")
+ OPT_WRITE_SSIZE_T_MUTABLE(lg_prof_sample, prof.lg_sample, ",")
+ OPT_WRITE_BOOL(prof_accum, ",")
+ OPT_WRITE_SSIZE_T(lg_prof_interval, ",")
+ OPT_WRITE_BOOL(prof_gdump, ",")
+ OPT_WRITE_BOOL(prof_final, ",")
+ OPT_WRITE_BOOL(prof_leak, ",")
+ OPT_WRITE_BOOL(stats_print, ",")
+ if (json || opt_stats_print) {
+ /*
+ * stats_print_opts is always emitted for JSON, so as long as it
+ * comes last it's safe to unconditionally omit the comma here
+ * (rather than having to conditionally omit it elsewhere
+ * depending on configuration).
+ */
+ OPT_WRITE_CHAR_P(stats_print_opts, "")
+ }
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t},\n");
+ }
#undef OPT_WRITE_BOOL
-#undef OPT_WRITE_SIZE_T
+#undef OPT_WRITE_BOOL_MUTABLE
#undef OPT_WRITE_SSIZE_T
#undef OPT_WRITE_CHAR_P
- malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus);
+ /* arenas. */
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\"arenas\": {\n");
+ }
- CTL_GET("arenas.narenas", &uv, unsigned);
+ CTL_GET("arenas.narenas", &uv, unsigned);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"narenas\": %u,\n", uv);
+ } else {
malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv);
+ }
+
+ if (json) {
+ CTL_GET("arenas.dirty_decay_ms", &ssv, ssize_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"dirty_decay_ms\": %zd,\n", ssv);
- malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n",
- sizeof(void *));
+ CTL_GET("arenas.muzzy_decay_ms", &ssv, ssize_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"muzzy_decay_ms\": %zd,\n", ssv);
+ }
- CTL_GET("arenas.quantum", &sv, size_t);
+ CTL_GET("arenas.quantum", &sv, size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"quantum\": %zu,\n", sv);
+ } else {
malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
+ }
- CTL_GET("arenas.page", &sv, size_t);
+ CTL_GET("arenas.page", &sv, size_t);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"page\": %zu,\n", sv);
+ } else {
malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
+ }
- CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t);
- if (ssv >= 0) {
+ if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) {
+ if (json) {
malloc_cprintf(write_cb, cbopaque,
- "Min active:dirty page ratio per arena: %u:1\n",
- (1U << ssv));
+ "\t\t\t\"tcache_max\": %zu,\n", sv);
} else {
malloc_cprintf(write_cb, cbopaque,
- "Min active:dirty page ratio per arena: N/A\n");
+ "Maximum thread-cached size class: %zu\n", sv);
}
- if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0))
- == 0) {
+ }
+
+ if (json) {
+ unsigned nbins, nlextents, i;
+
+ CTL_GET("arenas.nbins", &nbins, unsigned);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"nbins\": %u,\n", nbins);
+
+ CTL_GET("arenas.nhbins", &uv, unsigned);
+ malloc_cprintf(write_cb, cbopaque, "\t\t\t\"nhbins\": %u,\n",
+ uv);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"bin\": [\n");
+ for (i = 0; i < nbins; i++) {
malloc_cprintf(write_cb, cbopaque,
- "Maximum thread-cached size class: %zu\n", sv);
+ "\t\t\t\t{\n");
+
+ CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"size\": %zu,\n", sv);
+
+ CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"nregs\": %"FMTu32",\n", u32v);
+
+ CTL_M2_GET("arenas.bin.0.slab_size", i, &sv, size_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"slab_size\": %zu\n", sv);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t}%s\n", (i + 1 < nbins) ? "," : "");
}
- if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 &&
- bv) {
- CTL_GET("opt.lg_prof_sample", &sv, size_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t],\n");
+
+ CTL_GET("arenas.nlextents", &nlextents, unsigned);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"nlextents\": %u,\n", nlextents);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"lextent\": [\n");
+ for (i = 0; i < nlextents; i++) {
malloc_cprintf(write_cb, cbopaque,
- "Average profile sample interval: %"PRIu64
- " (2^%zu)\n", (((uint64_t)1U) << sv), sv);
+ "\t\t\t\t{\n");
- CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
- if (ssv >= 0) {
- malloc_cprintf(write_cb, cbopaque,
- "Average profile dump interval: %"PRIu64
- " (2^%zd)\n",
- (((uint64_t)1U) << ssv), ssv);
- } else {
- malloc_cprintf(write_cb, cbopaque,
- "Average profile dump interval: N/A\n");
+ CTL_M2_GET("arenas.lextent.0.size", i, &sv, size_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\t\"size\": %zu\n", sv);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t}%s\n", (i + 1 < nlextents) ? "," : "");
+ }
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t]\n");
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t}%s\n", (config_prof || more) ? "," : "");
+ }
+
+ /* prof. */
+ if (config_prof && json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\"prof\": {\n");
+
+ CTL_GET("prof.thread_active_init", &bv, bool);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"thread_active_init\": %s,\n", bv ? "true" :
+ "false");
+
+ CTL_GET("prof.active", &bv, bool);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"active\": %s,\n", bv ? "true" : "false");
+
+ CTL_GET("prof.gdump", &bv, bool);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"gdump\": %s,\n", bv ? "true" : "false");
+
+ CTL_GET("prof.interval", &u64v, uint64_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"interval\": %"FMTu64",\n", u64v);
+
+ CTL_GET("prof.lg_sample", &ssv, ssize_t);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"lg_sample\": %zd\n", ssv);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t}%s\n", more ? "," : "");
+ }
+}
+
+static void
+read_global_mutex_stats(
+ uint64_t results[mutex_prof_num_global_mutexes][mutex_prof_num_counters]) {
+ char cmd[MUTEX_CTL_STR_MAX_LENGTH];
+
+ mutex_prof_global_ind_t i;
+ for (i = 0; i < mutex_prof_num_global_mutexes; i++) {
+#define OP(c, t) \
+ gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
+ "mutexes", global_mutex_names[i], #c); \
+ CTL_GET(cmd, (t *)&results[i][mutex_counter_##c], t);
+MUTEX_PROF_COUNTERS
+#undef OP
+ }
+}
+
+static void
+stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque,
+ bool json, bool merged, bool destroyed, bool unmerged, bool bins,
+ bool large, bool mutex) {
+ size_t allocated, active, metadata, resident, mapped, retained;
+ size_t num_background_threads;
+ uint64_t background_thread_num_runs, background_thread_run_interval;
+
+ CTL_GET("stats.allocated", &allocated, size_t);
+ CTL_GET("stats.active", &active, size_t);
+ CTL_GET("stats.metadata", &metadata, size_t);
+ CTL_GET("stats.resident", &resident, size_t);
+ CTL_GET("stats.mapped", &mapped, size_t);
+ CTL_GET("stats.retained", &retained, size_t);
+
+ uint64_t mutex_stats[mutex_prof_num_global_mutexes][mutex_prof_num_counters];
+ if (mutex) {
+ read_global_mutex_stats(mutex_stats);
+ }
+
+ if (have_background_thread) {
+ CTL_GET("stats.background_thread.num_threads",
+ &num_background_threads, size_t);
+ CTL_GET("stats.background_thread.num_runs",
+ &background_thread_num_runs, uint64_t);
+ CTL_GET("stats.background_thread.run_interval",
+ &background_thread_run_interval, uint64_t);
+ } else {
+ num_background_threads = 0;
+ background_thread_num_runs = 0;
+ background_thread_run_interval = 0;
+ }
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\"stats\": {\n");
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"allocated\": %zu,\n", allocated);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"active\": %zu,\n", active);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"metadata\": %zu,\n", metadata);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"resident\": %zu,\n", resident);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"mapped\": %zu,\n", mapped);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"retained\": %zu,\n", retained);
+
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"background_thread\": {\n");
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"num_threads\": %zu,\n", num_background_threads);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"num_runs\": %"FMTu64",\n",
+ background_thread_num_runs);
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\t\"run_interval\": %"FMTu64"\n",
+ background_thread_run_interval);
+ malloc_cprintf(write_cb, cbopaque, "\t\t\t}%s\n",
+ mutex ? "," : "");
+
+ if (mutex) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"mutexes\": {\n");
+ mutex_prof_global_ind_t i;
+ for (i = 0; i < mutex_prof_num_global_mutexes; i++) {
+ mutex_stats_output_json(write_cb, cbopaque,
+ global_mutex_names[i], mutex_stats[i],
+ "\t\t\t\t",
+ i == mutex_prof_num_global_mutexes - 1);
+ }
+ malloc_cprintf(write_cb, cbopaque, "\t\t\t}\n");
+ }
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t}%s\n", (merged || unmerged || destroyed) ? "," : "");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "Allocated: %zu, active: %zu, metadata: %zu,"
+ " resident: %zu, mapped: %zu, retained: %zu\n",
+ allocated, active, metadata, resident, mapped, retained);
+
+ if (have_background_thread && num_background_threads > 0) {
+ malloc_cprintf(write_cb, cbopaque,
+ "Background threads: %zu, num_runs: %"FMTu64", "
+ "run_interval: %"FMTu64" ns\n",
+ num_background_threads,
+ background_thread_num_runs,
+ background_thread_run_interval);
+ }
+ if (mutex) {
+ mutex_prof_global_ind_t i;
+ for (i = 0; i < mutex_prof_num_global_mutexes; i++) {
+ mutex_stats_output(write_cb, cbopaque,
+ global_mutex_names[i], mutex_stats[i],
+ i == 0);
}
}
- CTL_GET("opt.lg_chunk", &sv, size_t);
- malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n",
- (ZU(1) << sv), sv);
}
- if (config_stats) {
- size_t *cactive;
- size_t allocated, active, mapped;
- size_t chunks_current, chunks_high;
- uint64_t chunks_total;
- size_t huge_allocated;
- uint64_t huge_nmalloc, huge_ndalloc;
-
- CTL_GET("stats.cactive", &cactive, size_t *);
- CTL_GET("stats.allocated", &allocated, size_t);
- CTL_GET("stats.active", &active, size_t);
- CTL_GET("stats.mapped", &mapped, size_t);
- malloc_cprintf(write_cb, cbopaque,
- "Allocated: %zu, active: %zu, mapped: %zu\n",
- allocated, active, mapped);
- malloc_cprintf(write_cb, cbopaque,
- "Current active ceiling: %zu\n", atomic_read_z(cactive));
-
- /* Print chunk stats. */
- CTL_GET("stats.chunks.total", &chunks_total, uint64_t);
- CTL_GET("stats.chunks.high", &chunks_high, size_t);
- CTL_GET("stats.chunks.current", &chunks_current, size_t);
- malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
- "highchunks curchunks\n");
- malloc_cprintf(write_cb, cbopaque,
- " %13"PRIu64" %12zu %12zu\n",
- chunks_total, chunks_high, chunks_current);
-
- /* Print huge stats. */
- CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t);
- CTL_GET("stats.huge.ndalloc", &huge_ndalloc, uint64_t);
- CTL_GET("stats.huge.allocated", &huge_allocated, size_t);
- malloc_cprintf(write_cb, cbopaque,
- "huge: nmalloc ndalloc allocated\n");
- malloc_cprintf(write_cb, cbopaque,
- " %12"PRIu64" %12"PRIu64" %12zu\n",
- huge_nmalloc, huge_ndalloc, huge_allocated);
-
- if (merged) {
- unsigned narenas;
-
- CTL_GET("arenas.narenas", &narenas, unsigned);
- {
- VARIABLE_ARRAY(bool, initialized, narenas);
- size_t isz;
- unsigned i, ninitialized;
-
- isz = sizeof(bool) * narenas;
- xmallctl("arenas.initialized", initialized,
- &isz, NULL, 0);
- for (i = ninitialized = 0; i < narenas; i++) {
- if (initialized[i])
- ninitialized++;
+ if (merged || destroyed || unmerged) {
+ unsigned narenas;
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\"stats.arenas\": {\n");
+ }
+
+ CTL_GET("arenas.narenas", &narenas, unsigned);
+ {
+ size_t mib[3];
+ size_t miblen = sizeof(mib) / sizeof(size_t);
+ size_t sz;
+ VARIABLE_ARRAY(bool, initialized, narenas);
+ bool destroyed_initialized;
+ unsigned i, j, ninitialized;
+
+ xmallctlnametomib("arena.0.initialized", mib, &miblen);
+ for (i = ninitialized = 0; i < narenas; i++) {
+ mib[1] = i;
+ sz = sizeof(bool);
+ xmallctlbymib(mib, miblen, &initialized[i], &sz,
+ NULL, 0);
+ if (initialized[i]) {
+ ninitialized++;
}
+ }
+ mib[1] = MALLCTL_ARENAS_DESTROYED;
+ sz = sizeof(bool);
+ xmallctlbymib(mib, miblen, &destroyed_initialized, &sz,
+ NULL, 0);
- if (ninitialized > 1 || unmerged == false) {
- /* Print merged arena stats. */
+ /* Merged stats. */
+ if (merged && (ninitialized > 1 || !unmerged)) {
+ /* Print merged arena stats. */
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"merged\": {\n");
+ } else {
malloc_cprintf(write_cb, cbopaque,
"\nMerged arenas stats:\n");
- stats_arena_print(write_cb, cbopaque,
- narenas, bins, large);
+ }
+ stats_arena_print(write_cb, cbopaque, json,
+ MALLCTL_ARENAS_ALL, bins, large, mutex);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t}%s\n",
+ ((destroyed_initialized &&
+ destroyed) || unmerged) ? "," :
+ "");
}
}
- }
-
- if (unmerged) {
- unsigned narenas;
- /* Print stats for each arena. */
-
- CTL_GET("arenas.narenas", &narenas, unsigned);
- {
- VARIABLE_ARRAY(bool, initialized, narenas);
- size_t isz;
- unsigned i;
-
- isz = sizeof(bool) * narenas;
- xmallctl("arenas.initialized", initialized,
- &isz, NULL, 0);
+ /* Destroyed stats. */
+ if (destroyed_initialized && destroyed) {
+ /* Print destroyed arena stats. */
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t\"destroyed\": {\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "\nDestroyed arenas stats:\n");
+ }
+ stats_arena_print(write_cb, cbopaque, json,
+ MALLCTL_ARENAS_DESTROYED, bins, large,
+ mutex);
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t\t}%s\n", unmerged ? "," :
+ "");
+ }
+ }
- for (i = 0; i < narenas; i++) {
+ /* Unmerged stats. */
+ if (unmerged) {
+ for (i = j = 0; i < narenas; i++) {
if (initialized[i]) {
- malloc_cprintf(write_cb,
- cbopaque,
- "\narenas[%u]:\n", i);
+ if (json) {
+ j++;
+ malloc_cprintf(write_cb,
+ cbopaque,
+ "\t\t\t\"%u\": {\n",
+ i);
+ } else {
+ malloc_cprintf(write_cb,
+ cbopaque,
+ "\narenas[%u]:\n",
+ i);
+ }
stats_arena_print(write_cb,
- cbopaque, i, bins, large);
+ cbopaque, json, i, bins,
+ large, mutex);
+ if (json) {
+ malloc_cprintf(write_cb,
+ cbopaque,
+ "\t\t\t}%s\n", (j <
+ ninitialized) ? ","
+ : "");
+ }
}
}
}
}
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t\t}\n");
+ }
+ }
+}
+
+void
+stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *opts) {
+ int err;
+ uint64_t epoch;
+ size_t u64sz;
+#define OPTION(o, v, d, s) bool v = d;
+ STATS_PRINT_OPTIONS
+#undef OPTION
+
+ /*
+ * Refresh stats, in case mallctl() was called by the application.
+ *
+ * Check for OOM here, since refreshing the ctl cache can trigger
+ * allocation. In practice, none of the subsequent mallctl()-related
+ * calls in this function will cause OOM if this one succeeds.
+ * */
+ epoch = 1;
+ u64sz = sizeof(uint64_t);
+ err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch,
+ sizeof(uint64_t));
+ if (err != 0) {
+ if (err == EAGAIN) {
+ malloc_write("<jemalloc>: Memory allocation failure in "
+ "mallctl(\"epoch\", ...)\n");
+ return;
+ }
+ malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", "
+ "...)\n");
+ abort();
+ }
+
+ if (opts != NULL) {
+ for (unsigned i = 0; opts[i] != '\0'; i++) {
+ switch (opts[i]) {
+#define OPTION(o, v, d, s) case o: v = s; break;
+ STATS_PRINT_OPTIONS
+#undef OPTION
+ default:;
+ }
+ }
+ }
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "{\n"
+ "\t\"jemalloc\": {\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "___ Begin jemalloc statistics ___\n");
+ }
+
+ if (general) {
+ stats_general_print(write_cb, cbopaque, json, config_stats);
+ }
+ if (config_stats) {
+ stats_print_helper(write_cb, cbopaque, json, merged, destroyed,
+ unmerged, bins, large, mutex);
+ }
+
+ if (json) {
+ malloc_cprintf(write_cb, cbopaque,
+ "\t}\n"
+ "}\n");
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "--- End jemalloc statistics ---\n");
}
- malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n");
}
diff --git a/deps/jemalloc/src/tcache.c b/deps/jemalloc/src/tcache.c
index 6de92960b2..936ef3140d 100644
--- a/deps/jemalloc/src/tcache.c
+++ b/deps/jemalloc/src/tcache.c
@@ -1,131 +1,153 @@
-#define JEMALLOC_TCACHE_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_TCACHE_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/size_classes.h"
/******************************************************************************/
/* Data. */
-malloc_tsd_data(, tcache, tcache_t *, NULL)
-malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default)
-
bool opt_tcache = true;
ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
tcache_bin_info_t *tcache_bin_info;
static unsigned stack_nelms; /* Total stack elms per tcache. */
-size_t nhbins;
+unsigned nhbins;
size_t tcache_maxclass;
-/******************************************************************************/
+tcaches_t *tcaches;
+
+/* Index of first element within tcaches that has never been used. */
+static unsigned tcaches_past;
+
+/* Head of singly linked list tracking available tcaches elements. */
+static tcaches_t *tcaches_avail;
-size_t tcache_salloc(const void *ptr)
-{
+/* Protects tcaches{,_past,_avail}. */
+static malloc_mutex_t tcaches_mtx;
- return (arena_salloc(ptr, false));
+/******************************************************************************/
+
+size_t
+tcache_salloc(tsdn_t *tsdn, const void *ptr) {
+ return arena_salloc(tsdn, ptr);
}
void
-tcache_event_hard(tcache_t *tcache)
-{
- size_t binind = tcache->next_gc_bin;
- tcache_bin_t *tbin = &tcache->tbins[binind];
- tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
-
+tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
+ szind_t binind = tcache->next_gc_bin;
+
+ tcache_bin_t *tbin;
+ if (binind < NBINS) {
+ tbin = tcache_small_bin_get(tcache, binind);
+ } else {
+ tbin = tcache_large_bin_get(tcache, binind);
+ }
if (tbin->low_water > 0) {
/*
* Flush (ceiling) 3/4 of the objects below the low water mark.
*/
if (binind < NBINS) {
- tcache_bin_flush_small(tbin, binind, tbin->ncached -
- tbin->low_water + (tbin->low_water >> 2), tcache);
+ tcache_bin_flush_small(tsd, tcache, tbin, binind,
+ tbin->ncached - tbin->low_water + (tbin->low_water
+ >> 2));
+ /*
+ * Reduce fill count by 2X. Limit lg_fill_div such that
+ * the fill count is always at least 1.
+ */
+ tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
+ if ((tbin_info->ncached_max >>
+ (tcache->lg_fill_div[binind] + 1)) >= 1) {
+ tcache->lg_fill_div[binind]++;
+ }
} else {
- tcache_bin_flush_large(tbin, binind, tbin->ncached -
- tbin->low_water + (tbin->low_water >> 2), tcache);
+ tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
+ - tbin->low_water + (tbin->low_water >> 2), tcache);
}
- /*
- * Reduce fill count by 2X. Limit lg_fill_div such that the
- * fill count is always at least 1.
- */
- if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
- tbin->lg_fill_div++;
} else if (tbin->low_water < 0) {
/*
- * Increase fill count by 2X. Make sure lg_fill_div stays
- * greater than 0.
+ * Increase fill count by 2X for small bins. Make sure
+ * lg_fill_div stays greater than 0.
*/
- if (tbin->lg_fill_div > 1)
- tbin->lg_fill_div--;
+ if (binind < NBINS && tcache->lg_fill_div[binind] > 1) {
+ tcache->lg_fill_div[binind]--;
+ }
}
tbin->low_water = tbin->ncached;
tcache->next_gc_bin++;
- if (tcache->next_gc_bin == nhbins)
+ if (tcache->next_gc_bin == nhbins) {
tcache->next_gc_bin = 0;
- tcache->ev_cnt = 0;
+ }
}
void *
-tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
-{
+tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
+ tcache_bin_t *tbin, szind_t binind, bool *tcache_success) {
void *ret;
- arena_tcache_fill_small(tcache->arena, tbin, binind,
+ assert(tcache->arena != NULL);
+ arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind,
config_prof ? tcache->prof_accumbytes : 0);
- if (config_prof)
+ if (config_prof) {
tcache->prof_accumbytes = 0;
- ret = tcache_alloc_easy(tbin);
+ }
+ ret = tcache_alloc_easy(tbin, tcache_success);
- return (ret);
+ return ret;
}
void
-tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
- tcache_t *tcache)
-{
- void *ptr;
- unsigned i, nflush, ndeferred;
+tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
+ szind_t binind, unsigned rem) {
bool merged_stats = false;
assert(binind < NBINS);
assert(rem <= tbin->ncached);
- for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
- /* Lock the arena bin associated with the first object. */
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
- tbin->avail[0]);
- arena_t *arena = chunk->arena;
- arena_bin_t *bin = &arena->bins[binind];
+ arena_t *arena = tcache->arena;
+ assert(arena != NULL);
+ unsigned nflush = tbin->ncached - rem;
+ VARIABLE_ARRAY(extent_t *, item_extent, nflush);
+ /* Look up extent once per item. */
+ for (unsigned i = 0 ; i < nflush; i++) {
+ item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i));
+ }
- if (config_prof && arena == tcache->arena) {
- if (arena_prof_accum(arena, tcache->prof_accumbytes))
- prof_idump();
+ while (nflush > 0) {
+ /* Lock the arena bin associated with the first object. */
+ extent_t *extent = item_extent[0];
+ arena_t *bin_arena = extent_arena_get(extent);
+ arena_bin_t *bin = &bin_arena->bins[binind];
+
+ if (config_prof && bin_arena == arena) {
+ if (arena_prof_accum(tsd_tsdn(tsd), arena,
+ tcache->prof_accumbytes)) {
+ prof_idump(tsd_tsdn(tsd));
+ }
tcache->prof_accumbytes = 0;
}
- malloc_mutex_lock(&bin->lock);
- if (config_stats && arena == tcache->arena) {
- assert(merged_stats == false);
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
+ if (config_stats && bin_arena == arena) {
+ assert(!merged_stats);
merged_stats = true;
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
- ndeferred = 0;
- for (i = 0; i < nflush; i++) {
- ptr = tbin->avail[i];
- assert(ptr != NULL);
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (chunk->arena == arena) {
- size_t pageind = ((uintptr_t)ptr -
- (uintptr_t)chunk) >> LG_PAGE;
- arena_chunk_map_t *mapelm =
- arena_mapp_get(chunk, pageind);
- if (config_fill && opt_junk) {
- arena_alloc_junk_small(ptr,
- &arena_bin_info[binind], true);
- }
- arena_dalloc_bin_locked(arena, chunk, ptr,
- mapelm);
+ unsigned ndeferred = 0;
+ for (unsigned i = 0; i < nflush; i++) {
+ void *ptr = *(tbin->avail - 1 - i);
+ extent = item_extent[i];
+ assert(ptr != NULL && extent != NULL);
+
+ if (extent_arena_get(extent) == bin_arena) {
+ arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
+ bin_arena, extent, ptr);
} else {
/*
* This object was allocated via a different
@@ -133,276 +155,369 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
* locked. Stash the object, so that it can be
* handled in a future pass.
*/
- tbin->avail[ndeferred] = ptr;
+ *(tbin->avail - 1 - ndeferred) = ptr;
+ item_extent[ndeferred] = extent;
ndeferred++;
}
}
- malloc_mutex_unlock(&bin->lock);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
+ arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
+ nflush = ndeferred;
}
- if (config_stats && merged_stats == false) {
+ if (config_stats && !merged_stats) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
*/
- arena_bin_t *bin = &tcache->arena->bins[binind];
- malloc_mutex_lock(&bin->lock);
+ arena_bin_t *bin = &arena->bins[binind];
+ malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
- malloc_mutex_unlock(&bin->lock);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
}
- memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
- rem * sizeof(void *));
+ memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
+ sizeof(void *));
tbin->ncached = rem;
- if ((int)tbin->ncached < tbin->low_water)
+ if ((low_water_t)tbin->ncached < tbin->low_water) {
tbin->low_water = tbin->ncached;
+ }
}
void
-tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
- tcache_t *tcache)
-{
- void *ptr;
- unsigned i, nflush, ndeferred;
+tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
+ unsigned rem, tcache_t *tcache) {
bool merged_stats = false;
assert(binind < nhbins);
assert(rem <= tbin->ncached);
- for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
+ arena_t *arena = tcache->arena;
+ assert(arena != NULL);
+ unsigned nflush = tbin->ncached - rem;
+ VARIABLE_ARRAY(extent_t *, item_extent, nflush);
+ /* Look up extent once per item. */
+ for (unsigned i = 0 ; i < nflush; i++) {
+ item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i));
+ }
+
+ while (nflush > 0) {
/* Lock the arena associated with the first object. */
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
- tbin->avail[0]);
- arena_t *arena = chunk->arena;
+ extent_t *extent = item_extent[0];
+ arena_t *locked_arena = extent_arena_get(extent);
UNUSED bool idump;
- if (config_prof)
+ if (config_prof) {
idump = false;
- malloc_mutex_lock(&arena->lock);
- if ((config_prof || config_stats) && arena == tcache->arena) {
+ }
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx);
+ for (unsigned i = 0; i < nflush; i++) {
+ void *ptr = *(tbin->avail - 1 - i);
+ assert(ptr != NULL);
+ extent = item_extent[i];
+ if (extent_arena_get(extent) == locked_arena) {
+ large_dalloc_prep_junked_locked(tsd_tsdn(tsd),
+ extent);
+ }
+ }
+ if ((config_prof || config_stats) && locked_arena == arena) {
if (config_prof) {
- idump = arena_prof_accum_locked(arena,
+ idump = arena_prof_accum(tsd_tsdn(tsd), arena,
tcache->prof_accumbytes);
tcache->prof_accumbytes = 0;
}
if (config_stats) {
merged_stats = true;
- arena->stats.nrequests_large +=
- tbin->tstats.nrequests;
- arena->stats.lstats[binind - NBINS].nrequests +=
- tbin->tstats.nrequests;
+ arena_stats_large_nrequests_add(tsd_tsdn(tsd),
+ &arena->stats, binind,
+ tbin->tstats.nrequests);
tbin->tstats.nrequests = 0;
}
}
- ndeferred = 0;
- for (i = 0; i < nflush; i++) {
- ptr = tbin->avail[i];
- assert(ptr != NULL);
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- if (chunk->arena == arena)
- arena_dalloc_large_locked(arena, chunk, ptr);
- else {
+ malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx);
+
+ unsigned ndeferred = 0;
+ for (unsigned i = 0; i < nflush; i++) {
+ void *ptr = *(tbin->avail - 1 - i);
+ extent = item_extent[i];
+ assert(ptr != NULL && extent != NULL);
+
+ if (extent_arena_get(extent) == locked_arena) {
+ large_dalloc_finish(tsd_tsdn(tsd), extent);
+ } else {
/*
* This object was allocated via a different
* arena than the one that is currently locked.
* Stash the object, so that it can be handled
* in a future pass.
*/
- tbin->avail[ndeferred] = ptr;
+ *(tbin->avail - 1 - ndeferred) = ptr;
+ item_extent[ndeferred] = extent;
ndeferred++;
}
}
- malloc_mutex_unlock(&arena->lock);
- if (config_prof && idump)
- prof_idump();
+ if (config_prof && idump) {
+ prof_idump(tsd_tsdn(tsd));
+ }
+ arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
+ ndeferred);
+ nflush = ndeferred;
}
- if (config_stats && merged_stats == false) {
+ if (config_stats && !merged_stats) {
/*
* The flush loop didn't happen to flush to this thread's
* arena, so the stats didn't get merged. Manually do so now.
*/
- arena_t *arena = tcache->arena;
- malloc_mutex_lock(&arena->lock);
- arena->stats.nrequests_large += tbin->tstats.nrequests;
- arena->stats.lstats[binind - NBINS].nrequests +=
- tbin->tstats.nrequests;
+ arena_stats_large_nrequests_add(tsd_tsdn(tsd), &arena->stats,
+ binind, tbin->tstats.nrequests);
tbin->tstats.nrequests = 0;
- malloc_mutex_unlock(&arena->lock);
}
- memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
- rem * sizeof(void *));
+ memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
+ sizeof(void *));
tbin->ncached = rem;
- if ((int)tbin->ncached < tbin->low_water)
+ if ((low_water_t)tbin->ncached < tbin->low_water) {
tbin->low_water = tbin->ncached;
+ }
}
void
-tcache_arena_associate(tcache_t *tcache, arena_t *arena)
-{
+tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
+ assert(tcache->arena == NULL);
+ tcache->arena = arena;
if (config_stats) {
/* Link into list of extant tcaches. */
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
ql_elm_new(tcache, link);
ql_tail_insert(&arena->tcache_ql, tcache, link);
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
}
- tcache->arena = arena;
}
-void
-tcache_arena_dissociate(tcache_t *tcache)
-{
-
+static void
+tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) {
+ arena_t *arena = tcache->arena;
+ assert(arena != NULL);
if (config_stats) {
/* Unlink from list of extant tcaches. */
- malloc_mutex_lock(&tcache->arena->lock);
- ql_remove(&tcache->arena->tcache_ql, tcache, link);
- tcache_stats_merge(tcache, tcache->arena);
- malloc_mutex_unlock(&tcache->arena->lock);
+ malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
+ if (config_debug) {
+ bool in_ql = false;
+ tcache_t *iter;
+ ql_foreach(iter, &arena->tcache_ql, link) {
+ if (iter == tcache) {
+ in_ql = true;
+ break;
+ }
+ }
+ assert(in_ql);
+ }
+ ql_remove(&arena->tcache_ql, tcache, link);
+ tcache_stats_merge(tsdn, tcache, arena);
+ malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
}
+ tcache->arena = NULL;
}
+void
+tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
+ tcache_arena_dissociate(tsdn, tcache);
+ tcache_arena_associate(tsdn, tcache, arena);
+}
+
+bool
+tsd_tcache_enabled_data_init(tsd_t *tsd) {
+ /* Called upon tsd initialization. */
+ tsd_tcache_enabled_set(tsd, opt_tcache);
+ tsd_slow_update(tsd);
+
+ if (opt_tcache) {
+ /* Trigger tcache init. */
+ tsd_tcache_data_init(tsd);
+ }
+
+ return false;
+}
+
+/* Initialize auto tcache (embedded in TSD). */
+static void
+tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) {
+ memset(&tcache->link, 0, sizeof(ql_elm(tcache_t)));
+ tcache->prof_accumbytes = 0;
+ tcache->next_gc_bin = 0;
+ tcache->arena = NULL;
+
+ ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
+
+ size_t stack_offset = 0;
+ assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
+ memset(tcache->tbins_small, 0, sizeof(tcache_bin_t) * NBINS);
+ memset(tcache->tbins_large, 0, sizeof(tcache_bin_t) * (nhbins - NBINS));
+ unsigned i = 0;
+ for (; i < NBINS; i++) {
+ tcache->lg_fill_div[i] = 1;
+ stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
+ /*
+ * avail points past the available space. Allocations will
+ * access the slots toward higher addresses (for the benefit of
+ * prefetch).
+ */
+ tcache_small_bin_get(tcache, i)->avail =
+ (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset);
+ }
+ for (; i < nhbins; i++) {
+ stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
+ tcache_large_bin_get(tcache, i)->avail =
+ (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset);
+ }
+ assert(stack_offset == stack_nelms * sizeof(void *));
+}
+
+/* Initialize auto tcache (embedded in TSD). */
+bool
+tsd_tcache_data_init(tsd_t *tsd) {
+ tcache_t *tcache = tsd_tcachep_get_unsafe(tsd);
+ assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
+ size_t size = stack_nelms * sizeof(void *);
+ /* Avoid false cacheline sharing. */
+ size = sz_sa2u(size, CACHELINE);
+
+ void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true,
+ NULL, true, arena_get(TSDN_NULL, 0, true));
+ if (avail_array == NULL) {
+ return true;
+ }
+
+ tcache_init(tsd, tcache, avail_array);
+ /*
+ * Initialization is a bit tricky here. After malloc init is done, all
+ * threads can rely on arena_choose and associate tcache accordingly.
+ * However, the thread that does actual malloc bootstrapping relies on
+ * functional tsd, and it can only rely on a0. In that case, we
+ * associate its tcache to a0 temporarily, and later on
+ * arena_choose_hard() will re-associate properly.
+ */
+ tcache->arena = NULL;
+ arena_t *arena;
+ if (!malloc_initialized()) {
+ /* If in initialization, assign to a0. */
+ arena = arena_get(tsd_tsdn(tsd), 0, false);
+ tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
+ } else {
+ arena = arena_choose(tsd, NULL);
+ /* This may happen if thread.tcache.enabled is used. */
+ if (tcache->arena == NULL) {
+ tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
+ }
+ }
+ assert(arena == tcache->arena);
+
+ return false;
+}
+
+/* Created manual tcache for tcache.create mallctl. */
tcache_t *
-tcache_create(arena_t *arena)
-{
+tcache_create_explicit(tsd_t *tsd) {
tcache_t *tcache;
size_t size, stack_offset;
- unsigned i;
- size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
+ size = sizeof(tcache_t);
/* Naturally align the pointer stacks. */
size = PTR_CEILING(size);
stack_offset = size;
size += stack_nelms * sizeof(void *);
- /*
- * Round up to the nearest multiple of the cacheline size, in order to
- * avoid the possibility of false cacheline sharing.
- *
- * That this works relies on the same logic as in ipalloc(), but we
- * cannot directly call ipalloc() here due to tcache bootstrapping
- * issues.
- */
- size = (size + CACHELINE_MASK) & (-CACHELINE);
-
- if (size <= SMALL_MAXCLASS)
- tcache = (tcache_t *)arena_malloc_small(arena, size, true);
- else if (size <= tcache_maxclass)
- tcache = (tcache_t *)arena_malloc_large(arena, size, true);
- else
- tcache = (tcache_t *)icalloct(size, false, arena);
-
- if (tcache == NULL)
- return (NULL);
-
- tcache_arena_associate(tcache, arena);
+ /* Avoid false cacheline sharing. */
+ size = sz_sa2u(size, CACHELINE);
- assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
- for (i = 0; i < nhbins; i++) {
- tcache->tbins[i].lg_fill_div = 1;
- tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
- (uintptr_t)stack_offset);
- stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
+ tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true,
+ arena_get(TSDN_NULL, 0, true));
+ if (tcache == NULL) {
+ return NULL;
}
- tcache_tsd_set(&tcache);
+ tcache_init(tsd, tcache,
+ (void *)((uintptr_t)tcache + (uintptr_t)stack_offset));
+ tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL));
- return (tcache);
+ return tcache;
}
-void
-tcache_destroy(tcache_t *tcache)
-{
- unsigned i;
- size_t tcache_size;
+static void
+tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
+ assert(tcache->arena != NULL);
- tcache_arena_dissociate(tcache);
+ for (unsigned i = 0; i < NBINS; i++) {
+ tcache_bin_t *tbin = tcache_small_bin_get(tcache, i);
+ tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
- for (i = 0; i < NBINS; i++) {
- tcache_bin_t *tbin = &tcache->tbins[i];
- tcache_bin_flush_small(tbin, i, 0, tcache);
-
- if (config_stats && tbin->tstats.nrequests != 0) {
- arena_t *arena = tcache->arena;
- arena_bin_t *bin = &arena->bins[i];
- malloc_mutex_lock(&bin->lock);
- bin->stats.nrequests += tbin->tstats.nrequests;
- malloc_mutex_unlock(&bin->lock);
+ if (config_stats) {
+ assert(tbin->tstats.nrequests == 0);
}
}
+ for (unsigned i = NBINS; i < nhbins; i++) {
+ tcache_bin_t *tbin = tcache_large_bin_get(tcache, i);
+ tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
- for (; i < nhbins; i++) {
- tcache_bin_t *tbin = &tcache->tbins[i];
- tcache_bin_flush_large(tbin, i, 0, tcache);
-
- if (config_stats && tbin->tstats.nrequests != 0) {
- arena_t *arena = tcache->arena;
- malloc_mutex_lock(&arena->lock);
- arena->stats.nrequests_large += tbin->tstats.nrequests;
- arena->stats.lstats[i - NBINS].nrequests +=
- tbin->tstats.nrequests;
- malloc_mutex_unlock(&arena->lock);
+ if (config_stats) {
+ assert(tbin->tstats.nrequests == 0);
}
}
if (config_prof && tcache->prof_accumbytes > 0 &&
- arena_prof_accum(tcache->arena, tcache->prof_accumbytes))
- prof_idump();
-
- tcache_size = arena_salloc(tcache, false);
- if (tcache_size <= SMALL_MAXCLASS) {
- arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
- arena_t *arena = chunk->arena;
- size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
- LG_PAGE;
- arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
-
- arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm);
- } else if (tcache_size <= tcache_maxclass) {
- arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
- arena_t *arena = chunk->arena;
-
- arena_dalloc_large(arena, chunk, tcache);
- } else
- idalloct(tcache, false);
+ arena_prof_accum(tsd_tsdn(tsd), tcache->arena,
+ tcache->prof_accumbytes)) {
+ prof_idump(tsd_tsdn(tsd));
+ }
}
void
-tcache_thread_cleanup(void *arg)
-{
- tcache_t *tcache = *(tcache_t **)arg;
+tcache_flush(tsd_t *tsd) {
+ assert(tcache_available(tsd));
+ tcache_flush_cache(tsd, tsd_tcachep_get(tsd));
+}
- if (tcache == TCACHE_STATE_DISABLED) {
- /* Do nothing. */
- } else if (tcache == TCACHE_STATE_REINCARNATED) {
- /*
- * Another destructor called an allocator function after this
- * destructor was called. Reset tcache to
- * TCACHE_STATE_PURGATORY in order to receive another callback.
- */
- tcache = TCACHE_STATE_PURGATORY;
- tcache_tsd_set(&tcache);
- } else if (tcache == TCACHE_STATE_PURGATORY) {
- /*
- * The previous time this destructor was called, we set the key
- * to TCACHE_STATE_PURGATORY so that other destructors wouldn't
- * cause re-creation of the tcache. This time, do nothing, so
- * that the destructor will not be called again.
- */
- } else if (tcache != NULL) {
- assert(tcache != TCACHE_STATE_PURGATORY);
- tcache_destroy(tcache);
- tcache = TCACHE_STATE_PURGATORY;
- tcache_tsd_set(&tcache);
+static void
+tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) {
+ tcache_flush_cache(tsd, tcache);
+ tcache_arena_dissociate(tsd_tsdn(tsd), tcache);
+
+ if (tsd_tcache) {
+ /* Release the avail array for the TSD embedded auto tcache. */
+ void *avail_array =
+ (void *)((uintptr_t)tcache_small_bin_get(tcache, 0)->avail -
+ (uintptr_t)tcache_bin_info[0].ncached_max * sizeof(void *));
+ idalloctm(tsd_tsdn(tsd), avail_array, NULL, NULL, true, true);
+ } else {
+ /* Release both the tcache struct and avail array. */
+ idalloctm(tsd_tsdn(tsd), tcache, NULL, NULL, true, true);
}
}
-/* Caller must own arena->lock. */
+/* For auto tcache (embedded in TSD) only. */
void
-tcache_stats_merge(tcache_t *tcache, arena_t *arena)
-{
+tcache_cleanup(tsd_t *tsd) {
+ tcache_t *tcache = tsd_tcachep_get(tsd);
+ if (!tcache_available(tsd)) {
+ assert(tsd_tcache_enabled_get(tsd) == false);
+ if (config_debug) {
+ assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
+ }
+ return;
+ }
+ assert(tsd_tcache_enabled_get(tsd));
+ assert(tcache_small_bin_get(tcache, 0)->avail != NULL);
+
+ tcache_destroy(tsd, tcache, true);
+ if (config_debug) {
+ tcache_small_bin_get(tcache, 0)->avail = NULL;
+ }
+}
+
+void
+tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
unsigned i;
cassert(config_stats);
@@ -410,48 +525,151 @@ tcache_stats_merge(tcache_t *tcache, arena_t *arena)
/* Merge and reset tcache stats. */
for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
- tcache_bin_t *tbin = &tcache->tbins[i];
- malloc_mutex_lock(&bin->lock);
+ tcache_bin_t *tbin = tcache_small_bin_get(tcache, i);
+ malloc_mutex_lock(tsdn, &bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
- malloc_mutex_unlock(&bin->lock);
+ malloc_mutex_unlock(tsdn, &bin->lock);
tbin->tstats.nrequests = 0;
}
for (; i < nhbins; i++) {
- malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
- tcache_bin_t *tbin = &tcache->tbins[i];
- arena->stats.nrequests_large += tbin->tstats.nrequests;
- lstats->nrequests += tbin->tstats.nrequests;
+ tcache_bin_t *tbin = tcache_large_bin_get(tcache, i);
+ arena_stats_large_nrequests_add(tsdn, &arena->stats, i,
+ tbin->tstats.nrequests);
tbin->tstats.nrequests = 0;
}
}
+static bool
+tcaches_create_prep(tsd_t *tsd) {
+ bool err;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
+
+ if (tcaches == NULL) {
+ tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *)
+ * (MALLOCX_TCACHE_MAX+1), CACHELINE);
+ if (tcaches == NULL) {
+ err = true;
+ goto label_return;
+ }
+ }
+
+ if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) {
+ err = true;
+ goto label_return;
+ }
+
+ err = false;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
+ return err;
+}
+
bool
-tcache_boot0(void)
-{
- unsigned i;
+tcaches_create(tsd_t *tsd, unsigned *r_ind) {
+ witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
- /*
- * If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is
- * known.
- */
- if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
+ bool err;
+
+ if (tcaches_create_prep(tsd)) {
+ err = true;
+ goto label_return;
+ }
+
+ tcache_t *tcache = tcache_create_explicit(tsd);
+ if (tcache == NULL) {
+ err = true;
+ goto label_return;
+ }
+
+ tcaches_t *elm;
+ malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
+ if (tcaches_avail != NULL) {
+ elm = tcaches_avail;
+ tcaches_avail = tcaches_avail->next;
+ elm->tcache = tcache;
+ *r_ind = (unsigned)(elm - tcaches);
+ } else {
+ elm = &tcaches[tcaches_past];
+ elm->tcache = tcache;
+ *r_ind = tcaches_past;
+ tcaches_past++;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
+
+ err = false;
+label_return:
+ witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
+ return err;
+}
+
+static tcache_t *
+tcaches_elm_remove(tsd_t *tsd, tcaches_t *elm) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx);
+
+ if (elm->tcache == NULL) {
+ return NULL;
+ }
+ tcache_t *tcache = elm->tcache;
+ elm->tcache = NULL;
+ return tcache;
+}
+
+void
+tcaches_flush(tsd_t *tsd, unsigned ind) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
+ tcache_t *tcache = tcaches_elm_remove(tsd, &tcaches[ind]);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
+ if (tcache != NULL) {
+ tcache_destroy(tsd, tcache, false);
+ }
+}
+
+void
+tcaches_destroy(tsd_t *tsd, unsigned ind) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
+ tcaches_t *elm = &tcaches[ind];
+ tcache_t *tcache = tcaches_elm_remove(tsd, elm);
+ elm->next = tcaches_avail;
+ tcaches_avail = elm;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
+ if (tcache != NULL) {
+ tcache_destroy(tsd, tcache, false);
+ }
+}
+
+bool
+tcache_boot(tsdn_t *tsdn) {
+ /* If necessary, clamp opt_lg_tcache_max. */
+ if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) <
+ SMALL_MAXCLASS) {
tcache_maxclass = SMALL_MAXCLASS;
- else if ((1U << opt_lg_tcache_max) > arena_maxclass)
- tcache_maxclass = arena_maxclass;
- else
- tcache_maxclass = (1U << opt_lg_tcache_max);
+ } else {
+ tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
+ }
- nhbins = NBINS + (tcache_maxclass >> LG_PAGE);
+ if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ nhbins = sz_size2index(tcache_maxclass) + 1;
/* Initialize tcache_bin_info. */
- tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
- sizeof(tcache_bin_info_t));
- if (tcache_bin_info == NULL)
- return (true);
+ tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins
+ * sizeof(tcache_bin_info_t), CACHELINE);
+ if (tcache_bin_info == NULL) {
+ return true;
+ }
stack_nelms = 0;
+ unsigned i;
for (i = 0; i < NBINS; i++) {
- if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
+ if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
+ tcache_bin_info[i].ncached_max =
+ TCACHE_NSLOTS_SMALL_MIN;
+ } else if ((arena_bin_info[i].nregs << 1) <=
+ TCACHE_NSLOTS_SMALL_MAX) {
tcache_bin_info[i].ncached_max =
(arena_bin_info[i].nregs << 1);
} else {
@@ -465,15 +683,26 @@ tcache_boot0(void)
stack_nelms += tcache_bin_info[i].ncached_max;
}
- return (false);
+ return false;
}
-bool
-tcache_boot1(void)
-{
+void
+tcache_prefork(tsdn_t *tsdn) {
+ if (!config_prof && opt_tcache) {
+ malloc_mutex_prefork(tsdn, &tcaches_mtx);
+ }
+}
- if (tcache_tsd_boot() || tcache_enabled_tsd_boot())
- return (true);
+void
+tcache_postfork_parent(tsdn_t *tsdn) {
+ if (!config_prof && opt_tcache) {
+ malloc_mutex_postfork_parent(tsdn, &tcaches_mtx);
+ }
+}
- return (false);
+void
+tcache_postfork_child(tsdn_t *tsdn) {
+ if (!config_prof && opt_tcache) {
+ malloc_mutex_postfork_child(tsdn, &tcaches_mtx);
+ }
}
diff --git a/deps/jemalloc/src/tsd.c b/deps/jemalloc/src/tsd.c
index 700caabfe4..f968992f2b 100644
--- a/deps/jemalloc/src/tsd.c
+++ b/deps/jemalloc/src/tsd.c
@@ -1,5 +1,10 @@
-#define JEMALLOC_TSD_C_
-#include "jemalloc/internal/jemalloc_internal.h"
+#define JEMALLOC_TSD_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/rtree.h"
/******************************************************************************/
/* Data. */
@@ -7,28 +12,148 @@
static unsigned ncleanups;
static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
+#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP
+__thread tsd_t JEMALLOC_TLS_MODEL tsd_tls = TSD_INITIALIZER;
+__thread bool JEMALLOC_TLS_MODEL tsd_initialized = false;
+bool tsd_booted = false;
+#elif (defined(JEMALLOC_TLS))
+__thread tsd_t JEMALLOC_TLS_MODEL tsd_tls = TSD_INITIALIZER;
+pthread_key_t tsd_tsd;
+bool tsd_booted = false;
+#elif (defined(_WIN32))
+DWORD tsd_tsd;
+tsd_wrapper_t tsd_boot_wrapper = {false, TSD_INITIALIZER};
+bool tsd_booted = false;
+#else
+
+/*
+ * This contains a mutex, but it's pretty convenient to allow the mutex code to
+ * have a dependency on tsd. So we define the struct here, and only refer to it
+ * by pointer in the header.
+ */
+struct tsd_init_head_s {
+ ql_head(tsd_init_block_t) blocks;
+ malloc_mutex_t lock;
+};
+
+pthread_key_t tsd_tsd;
+tsd_init_head_t tsd_init_head = {
+ ql_head_initializer(blocks),
+ MALLOC_MUTEX_INITIALIZER
+};
+tsd_wrapper_t tsd_boot_wrapper = {
+ false,
+ TSD_INITIALIZER
+};
+bool tsd_booted = false;
+#endif
+
+
/******************************************************************************/
-void *
-malloc_tsd_malloc(size_t size)
-{
+void
+tsd_slow_update(tsd_t *tsd) {
+ if (tsd_nominal(tsd)) {
+ if (malloc_slow || !tsd_tcache_enabled_get(tsd) ||
+ tsd_reentrancy_level_get(tsd) > 0) {
+ tsd->state = tsd_state_nominal_slow;
+ } else {
+ tsd->state = tsd_state_nominal;
+ }
+ }
+}
+
+static bool
+tsd_data_init(tsd_t *tsd) {
+ /*
+ * We initialize the rtree context first (before the tcache), since the
+ * tcache initialization depends on it.
+ */
+ rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
- /* Avoid choose_arena() in order to dodge bootstrapping issues. */
- return (arena_malloc(arenas[0], size, false, false));
+ return tsd_tcache_enabled_data_init(tsd);
}
-void
-malloc_tsd_dalloc(void *wrapper)
-{
+static void
+assert_tsd_data_cleanup_done(tsd_t *tsd) {
+ assert(!tsd_nominal(tsd));
+ assert(*tsd_arenap_get_unsafe(tsd) == NULL);
+ assert(*tsd_iarenap_get_unsafe(tsd) == NULL);
+ assert(*tsd_arenas_tdata_bypassp_get_unsafe(tsd) == true);
+ assert(*tsd_arenas_tdatap_get_unsafe(tsd) == NULL);
+ assert(*tsd_tcache_enabledp_get_unsafe(tsd) == false);
+ assert(*tsd_prof_tdatap_get_unsafe(tsd) == NULL);
+}
+
+static bool
+tsd_data_init_nocleanup(tsd_t *tsd) {
+ assert(tsd->state == tsd_state_reincarnated ||
+ tsd->state == tsd_state_minimal_initialized);
+ /*
+ * During reincarnation, there is no guarantee that the cleanup function
+ * will be called (deallocation may happen after all tsd destructors).
+ * We set up tsd in a way that no cleanup is needed.
+ */
+ rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
+ *tsd_arenas_tdata_bypassp_get(tsd) = true;
+ *tsd_tcache_enabledp_get_unsafe(tsd) = false;
+ *tsd_reentrancy_levelp_get(tsd) = 1;
+ assert_tsd_data_cleanup_done(tsd);
- idalloct(wrapper, false);
+ return false;
}
-void
-malloc_tsd_no_cleanup(void *arg)
-{
+tsd_t *
+tsd_fetch_slow(tsd_t *tsd, bool minimal) {
+ assert(!tsd_fast(tsd));
+
+ if (tsd->state == tsd_state_nominal_slow) {
+ /* On slow path but no work needed. */
+ assert(malloc_slow || !tsd_tcache_enabled_get(tsd) ||
+ tsd_reentrancy_level_get(tsd) > 0 ||
+ *tsd_arenas_tdata_bypassp_get(tsd));
+ } else if (tsd->state == tsd_state_uninitialized) {
+ if (!minimal) {
+ tsd->state = tsd_state_nominal;
+ tsd_slow_update(tsd);
+ /* Trigger cleanup handler registration. */
+ tsd_set(tsd);
+ tsd_data_init(tsd);
+ } else {
+ tsd->state = tsd_state_minimal_initialized;
+ tsd_set(tsd);
+ tsd_data_init_nocleanup(tsd);
+ }
+ } else if (tsd->state == tsd_state_minimal_initialized) {
+ if (!minimal) {
+ /* Switch to fully initialized. */
+ tsd->state = tsd_state_nominal;
+ assert(*tsd_reentrancy_levelp_get(tsd) >= 1);
+ (*tsd_reentrancy_levelp_get(tsd))--;
+ tsd_slow_update(tsd);
+ tsd_data_init(tsd);
+ } else {
+ assert_tsd_data_cleanup_done(tsd);
+ }
+ } else if (tsd->state == tsd_state_purgatory) {
+ tsd->state = tsd_state_reincarnated;
+ tsd_set(tsd);
+ tsd_data_init_nocleanup(tsd);
+ } else {
+ assert(tsd->state == tsd_state_reincarnated);
+ }
+
+ return tsd;
+}
+
+void *
+malloc_tsd_malloc(size_t size) {
+ return a0malloc(CACHELINE_CEILING(size));
+}
- not_reached();
+void
+malloc_tsd_dalloc(void *wrapper) {
+ a0dalloc(wrapper);
}
#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
@@ -36,21 +161,22 @@ malloc_tsd_no_cleanup(void *arg)
JEMALLOC_EXPORT
#endif
void
-_malloc_thread_cleanup(void)
-{
+_malloc_thread_cleanup(void) {
bool pending[MALLOC_TSD_CLEANUPS_MAX], again;
unsigned i;
- for (i = 0; i < ncleanups; i++)
+ for (i = 0; i < ncleanups; i++) {
pending[i] = true;
+ }
do {
again = false;
for (i = 0; i < ncleanups; i++) {
if (pending[i]) {
pending[i] = cleanups[i]();
- if (pending[i])
+ if (pending[i]) {
again = true;
+ }
}
}
} while (again);
@@ -58,26 +184,92 @@ _malloc_thread_cleanup(void)
#endif
void
-malloc_tsd_cleanup_register(bool (*f)(void))
-{
-
+malloc_tsd_cleanup_register(bool (*f)(void)) {
assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX);
cleanups[ncleanups] = f;
ncleanups++;
}
+static void
+tsd_do_data_cleanup(tsd_t *tsd) {
+ prof_tdata_cleanup(tsd);
+ iarena_cleanup(tsd);
+ arena_cleanup(tsd);
+ arenas_tdata_cleanup(tsd);
+ tcache_cleanup(tsd);
+ witnesses_cleanup(tsd_witness_tsdp_get_unsafe(tsd));
+}
+
void
-malloc_tsd_boot(void)
-{
+tsd_cleanup(void *arg) {
+ tsd_t *tsd = (tsd_t *)arg;
+
+ switch (tsd->state) {
+ case tsd_state_uninitialized:
+ /* Do nothing. */
+ break;
+ case tsd_state_minimal_initialized:
+ /* This implies the thread only did free() in its life time. */
+ /* Fall through. */
+ case tsd_state_reincarnated:
+ /*
+ * Reincarnated means another destructor deallocated memory
+ * after the destructor was called. Cleanup isn't required but
+ * is still called for testing and completeness.
+ */
+ assert_tsd_data_cleanup_done(tsd);
+ /* Fall through. */
+ case tsd_state_nominal:
+ case tsd_state_nominal_slow:
+ tsd_do_data_cleanup(tsd);
+ tsd->state = tsd_state_purgatory;
+ tsd_set(tsd);
+ break;
+ case tsd_state_purgatory:
+ /*
+ * The previous time this destructor was called, we set the
+ * state to tsd_state_purgatory so that other destructors
+ * wouldn't cause re-creation of the tsd. This time, do
+ * nothing, and do not request another callback.
+ */
+ break;
+ default:
+ not_reached();
+ }
+#ifdef JEMALLOC_JET
+ test_callback_t test_callback = *tsd_test_callbackp_get_unsafe(tsd);
+ int *data = tsd_test_datap_get_unsafe(tsd);
+ if (test_callback != NULL) {
+ test_callback(data);
+ }
+#endif
+}
+
+tsd_t *
+malloc_tsd_boot0(void) {
+ tsd_t *tsd;
ncleanups = 0;
+ if (tsd_boot0()) {
+ return NULL;
+ }
+ tsd = tsd_fetch();
+ *tsd_arenas_tdata_bypassp_get(tsd) = true;
+ return tsd;
+}
+
+void
+malloc_tsd_boot1(void) {
+ tsd_boot1();
+ tsd_t *tsd = tsd_fetch();
+ /* malloc_slow has been set properly. Update tsd_slow. */
+ tsd_slow_update(tsd);
+ *tsd_arenas_tdata_bypassp_get(tsd) = false;
}
#ifdef _WIN32
static BOOL WINAPI
-_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
-{
-
+_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) {
switch (fdwReason) {
#ifdef JEMALLOC_LAZY_LOCK
case DLL_THREAD_ATTACH:
@@ -90,52 +282,60 @@ _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
default:
break;
}
- return (true);
+ return true;
}
+/*
+ * We need to be able to say "read" here (in the "pragma section"), but have
+ * hooked "read". We won't read for the rest of the file, so we can get away
+ * with unhooking.
+ */
+#ifdef read
+# undef read
+#endif
+
#ifdef _MSC_VER
# ifdef _M_IX86
# pragma comment(linker, "/INCLUDE:__tls_used")
+# pragma comment(linker, "/INCLUDE:_tls_callback")
# else
# pragma comment(linker, "/INCLUDE:_tls_used")
+# pragma comment(linker, "/INCLUDE:tls_callback")
# endif
# pragma section(".CRT$XLY",long,read)
#endif
JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
-static const BOOL (WINAPI *tls_callback)(HINSTANCE hinstDLL,
+BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL,
DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
#endif
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
void *
-tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block)
-{
+tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) {
pthread_t self = pthread_self();
tsd_init_block_t *iter;
/* Check whether this thread has already inserted into the list. */
- malloc_mutex_lock(&head->lock);
+ malloc_mutex_lock(TSDN_NULL, &head->lock);
ql_foreach(iter, &head->blocks, link) {
if (iter->thread == self) {
- malloc_mutex_unlock(&head->lock);
- return (iter->data);
+ malloc_mutex_unlock(TSDN_NULL, &head->lock);
+ return iter->data;
}
}
/* Insert block into list. */
ql_elm_new(block, link);
block->thread = self;
ql_tail_insert(&head->blocks, block, link);
- malloc_mutex_unlock(&head->lock);
- return (NULL);
+ malloc_mutex_unlock(TSDN_NULL, &head->lock);
+ return NULL;
}
void
-tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block)
-{
-
- malloc_mutex_lock(&head->lock);
+tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) {
+ malloc_mutex_lock(TSDN_NULL, &head->lock);
ql_remove(&head->blocks, block, link);
- malloc_mutex_unlock(&head->lock);
+ malloc_mutex_unlock(TSDN_NULL, &head->lock);
}
#endif
diff --git a/deps/jemalloc/src/zone.c b/deps/jemalloc/src/zone.c
index e0302ef4ed..9d3b7b4952 100644
--- a/deps/jemalloc/src/zone.c
+++ b/deps/jemalloc/src/zone.c
@@ -1,10 +1,83 @@
-#include "jemalloc/internal/jemalloc_internal.h"
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+
#ifndef JEMALLOC_ZONE
# error "This source file is for zones on Darwin (OS X)."
#endif
+/* Definitions of the following structs in malloc/malloc.h might be too old
+ * for the built binary to run on newer versions of OSX. So use the newest
+ * possible version of those structs.
+ */
+typedef struct _malloc_zone_t {
+ void *reserved1;
+ void *reserved2;
+ size_t (*size)(struct _malloc_zone_t *, const void *);
+ void *(*malloc)(struct _malloc_zone_t *, size_t);
+ void *(*calloc)(struct _malloc_zone_t *, size_t, size_t);
+ void *(*valloc)(struct _malloc_zone_t *, size_t);
+ void (*free)(struct _malloc_zone_t *, void *);
+ void *(*realloc)(struct _malloc_zone_t *, void *, size_t);
+ void (*destroy)(struct _malloc_zone_t *);
+ const char *zone_name;
+ unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned);
+ void (*batch_free)(struct _malloc_zone_t *, void **, unsigned);
+ struct malloc_introspection_t *introspect;
+ unsigned version;
+ void *(*memalign)(struct _malloc_zone_t *, size_t, size_t);
+ void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t);
+ size_t (*pressure_relief)(struct _malloc_zone_t *, size_t);
+} malloc_zone_t;
+
+typedef struct {
+ vm_address_t address;
+ vm_size_t size;
+} vm_range_t;
+
+typedef struct malloc_statistics_t {
+ unsigned blocks_in_use;
+ size_t size_in_use;
+ size_t max_size_in_use;
+ size_t size_allocated;
+} malloc_statistics_t;
+
+typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **);
+
+typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned);
+
+typedef struct malloc_introspection_t {
+ kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t);
+ size_t (*good_size)(malloc_zone_t *, size_t);
+ boolean_t (*check)(malloc_zone_t *);
+ void (*print)(malloc_zone_t *, boolean_t);
+ void (*log)(malloc_zone_t *, void *);
+ void (*force_lock)(malloc_zone_t *);
+ void (*force_unlock)(malloc_zone_t *);
+ void (*statistics)(malloc_zone_t *, malloc_statistics_t *);
+ boolean_t (*zone_locked)(malloc_zone_t *);
+ boolean_t (*enable_discharge_checking)(malloc_zone_t *);
+ boolean_t (*disable_discharge_checking)(malloc_zone_t *);
+ void (*discharge)(malloc_zone_t *, void *);
+#ifdef __BLOCKS__
+ void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *));
+#else
+ void *enumerate_unavailable_without_blocks;
+#endif
+ void (*reinit_lock)(malloc_zone_t *);
+} malloc_introspection_t;
+
+extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *);
+
+extern malloc_zone_t *malloc_default_zone(void);
+
+extern void malloc_zone_register(malloc_zone_t *zone);
+
+extern void malloc_zone_unregister(malloc_zone_t *zone);
+
/*
- * The malloc_default_purgeable_zone function is only available on >= 10.6.
+ * The malloc_default_purgeable_zone() function is only available on >= 10.6.
* We need to check whether it is present at runtime, thus the weak_import.
*/
extern malloc_zone_t *malloc_default_purgeable_zone(void)
@@ -13,30 +86,42 @@ JEMALLOC_ATTR(weak_import);
/******************************************************************************/
/* Data. */
-static malloc_zone_t zone;
-static struct malloc_introspection_t zone_introspect;
+static malloc_zone_t *default_zone, *purgeable_zone;
+static malloc_zone_t jemalloc_zone;
+static struct malloc_introspection_t jemalloc_zone_introspect;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-static size_t zone_size(malloc_zone_t *zone, void *ptr);
+static size_t zone_size(malloc_zone_t *zone, const void *ptr);
static void *zone_malloc(malloc_zone_t *zone, size_t size);
static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
static void *zone_valloc(malloc_zone_t *zone, size_t size);
static void zone_free(malloc_zone_t *zone, void *ptr);
static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
-#if (JEMALLOC_ZONE_VERSION >= 5)
static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
-#endif
-#if (JEMALLOC_ZONE_VERSION >= 6)
size_t size);
static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
size_t size);
-#endif
-static void *zone_destroy(malloc_zone_t *zone);
+static void zone_destroy(malloc_zone_t *zone);
+static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size,
+ void **results, unsigned num_requested);
+static void zone_batch_free(struct _malloc_zone_t *zone,
+ void **to_be_freed, unsigned num_to_be_freed);
+static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal);
static size_t zone_good_size(malloc_zone_t *zone, size_t size);
+static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask,
+ vm_address_t zone_address, memory_reader_t reader,
+ vm_range_recorder_t recorder);
+static boolean_t zone_check(malloc_zone_t *zone);
+static void zone_print(malloc_zone_t *zone, boolean_t verbose);
+static void zone_log(malloc_zone_t *zone, void *address);
static void zone_force_lock(malloc_zone_t *zone);
static void zone_force_unlock(malloc_zone_t *zone);
+static void zone_statistics(malloc_zone_t *zone,
+ malloc_statistics_t *stats);
+static boolean_t zone_locked(malloc_zone_t *zone);
+static void zone_reinit_lock(malloc_zone_t *zone);
/******************************************************************************/
/*
@@ -44,9 +129,7 @@ static void zone_force_unlock(malloc_zone_t *zone);
*/
static size_t
-zone_size(malloc_zone_t *zone, void *ptr)
-{
-
+zone_size(malloc_zone_t *zone, const void *ptr) {
/*
* There appear to be places within Darwin (such as setenv(3)) that
* cause calls to this function with pointers that *no* zone owns. If
@@ -54,40 +137,33 @@ zone_size(malloc_zone_t *zone, void *ptr)
* our zone into two parts, and use one as the default allocator and
* the other as the default deallocator/reallocator. Since that will
* not work in practice, we must check all pointers to assure that they
- * reside within a mapped chunk before determining size.
+ * reside within a mapped extent before determining size.
*/
- return (ivsalloc(ptr, config_prof));
+ return ivsalloc(tsdn_fetch(), ptr);
}
static void *
-zone_malloc(malloc_zone_t *zone, size_t size)
-{
-
- return (je_malloc(size));
+zone_malloc(malloc_zone_t *zone, size_t size) {
+ return je_malloc(size);
}
static void *
-zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
-{
-
- return (je_calloc(num, size));
+zone_calloc(malloc_zone_t *zone, size_t num, size_t size) {
+ return je_calloc(num, size);
}
static void *
-zone_valloc(malloc_zone_t *zone, size_t size)
-{
+zone_valloc(malloc_zone_t *zone, size_t size) {
void *ret = NULL; /* Assignment avoids useless compiler warning. */
je_posix_memalign(&ret, PAGE, size);
- return (ret);
+ return ret;
}
static void
-zone_free(malloc_zone_t *zone, void *ptr)
-{
-
- if (ivsalloc(ptr, config_prof) != 0) {
+zone_free(malloc_zone_t *zone, void *ptr) {
+ if (ivsalloc(tsdn_fetch(), ptr) != 0) {
je_free(ptr);
return;
}
@@ -96,163 +172,280 @@ zone_free(malloc_zone_t *zone, void *ptr)
}
static void *
-zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
-{
-
- if (ivsalloc(ptr, config_prof) != 0)
- return (je_realloc(ptr, size));
+zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
+ if (ivsalloc(tsdn_fetch(), ptr) != 0) {
+ return je_realloc(ptr, size);
+ }
- return (realloc(ptr, size));
+ return realloc(ptr, size);
}
-#if (JEMALLOC_ZONE_VERSION >= 5)
static void *
-zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
-{
+zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) {
void *ret = NULL; /* Assignment avoids useless compiler warning. */
je_posix_memalign(&ret, alignment, size);
- return (ret);
+ return ret;
}
-#endif
-#if (JEMALLOC_ZONE_VERSION >= 6)
static void
-zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
-{
+zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) {
+ size_t alloc_size;
- if (ivsalloc(ptr, config_prof) != 0) {
- assert(ivsalloc(ptr, config_prof) == size);
+ alloc_size = ivsalloc(tsdn_fetch(), ptr);
+ if (alloc_size != 0) {
+ assert(alloc_size == size);
je_free(ptr);
return;
}
free(ptr);
}
-#endif
-
-static void *
-zone_destroy(malloc_zone_t *zone)
-{
+static void
+zone_destroy(malloc_zone_t *zone) {
/* This function should never be called. */
not_reached();
- return (NULL);
+}
+
+static unsigned
+zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results,
+ unsigned num_requested) {
+ unsigned i;
+
+ for (i = 0; i < num_requested; i++) {
+ results[i] = je_malloc(size);
+ if (!results[i])
+ break;
+ }
+
+ return i;
+}
+
+static void
+zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed,
+ unsigned num_to_be_freed) {
+ unsigned i;
+
+ for (i = 0; i < num_to_be_freed; i++) {
+ zone_free(zone, to_be_freed[i]);
+ to_be_freed[i] = NULL;
+ }
}
static size_t
-zone_good_size(malloc_zone_t *zone, size_t size)
-{
+zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) {
+ return 0;
+}
- if (size == 0)
+static size_t
+zone_good_size(malloc_zone_t *zone, size_t size) {
+ if (size == 0) {
size = 1;
- return (s2u(size));
+ }
+ return sz_s2u(size);
+}
+
+static kern_return_t
+zone_enumerator(task_t task, void *data, unsigned type_mask,
+ vm_address_t zone_address, memory_reader_t reader,
+ vm_range_recorder_t recorder) {
+ return KERN_SUCCESS;
+}
+
+static boolean_t
+zone_check(malloc_zone_t *zone) {
+ return true;
+}
+
+static void
+zone_print(malloc_zone_t *zone, boolean_t verbose) {
}
static void
-zone_force_lock(malloc_zone_t *zone)
-{
+zone_log(malloc_zone_t *zone, void *address) {
+}
- if (isthreaded)
+static void
+zone_force_lock(malloc_zone_t *zone) {
+ if (isthreaded) {
jemalloc_prefork();
+ }
+}
+
+static void
+zone_force_unlock(malloc_zone_t *zone) {
+ /*
+ * Call jemalloc_postfork_child() rather than
+ * jemalloc_postfork_parent(), because this function is executed by both
+ * parent and child. The parent can tolerate having state
+ * reinitialized, but the child cannot unlock mutexes that were locked
+ * by the parent.
+ */
+ if (isthreaded) {
+ jemalloc_postfork_child();
+ }
+}
+
+static void
+zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
+ /* We make no effort to actually fill the values */
+ stats->blocks_in_use = 0;
+ stats->size_in_use = 0;
+ stats->max_size_in_use = 0;
+ stats->size_allocated = 0;
+}
+
+static boolean_t
+zone_locked(malloc_zone_t *zone) {
+ /* Pretend no lock is being held */
+ return false;
+}
+
+static void
+zone_reinit_lock(malloc_zone_t *zone) {
+ /* As of OSX 10.12, this function is only used when force_unlock would
+ * be used if the zone version were < 9. So just use force_unlock. */
+ zone_force_unlock(zone);
+}
+
+static void
+zone_init(void) {
+ jemalloc_zone.size = zone_size;
+ jemalloc_zone.malloc = zone_malloc;
+ jemalloc_zone.calloc = zone_calloc;
+ jemalloc_zone.valloc = zone_valloc;
+ jemalloc_zone.free = zone_free;
+ jemalloc_zone.realloc = zone_realloc;
+ jemalloc_zone.destroy = zone_destroy;
+ jemalloc_zone.zone_name = "jemalloc_zone";
+ jemalloc_zone.batch_malloc = zone_batch_malloc;
+ jemalloc_zone.batch_free = zone_batch_free;
+ jemalloc_zone.introspect = &jemalloc_zone_introspect;
+ jemalloc_zone.version = 9;
+ jemalloc_zone.memalign = zone_memalign;
+ jemalloc_zone.free_definite_size = zone_free_definite_size;
+ jemalloc_zone.pressure_relief = zone_pressure_relief;
+
+ jemalloc_zone_introspect.enumerator = zone_enumerator;
+ jemalloc_zone_introspect.good_size = zone_good_size;
+ jemalloc_zone_introspect.check = zone_check;
+ jemalloc_zone_introspect.print = zone_print;
+ jemalloc_zone_introspect.log = zone_log;
+ jemalloc_zone_introspect.force_lock = zone_force_lock;
+ jemalloc_zone_introspect.force_unlock = zone_force_unlock;
+ jemalloc_zone_introspect.statistics = zone_statistics;
+ jemalloc_zone_introspect.zone_locked = zone_locked;
+ jemalloc_zone_introspect.enable_discharge_checking = NULL;
+ jemalloc_zone_introspect.disable_discharge_checking = NULL;
+ jemalloc_zone_introspect.discharge = NULL;
+#ifdef __BLOCKS__
+ jemalloc_zone_introspect.enumerate_discharged_pointers = NULL;
+#else
+ jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL;
+#endif
+ jemalloc_zone_introspect.reinit_lock = zone_reinit_lock;
+}
+
+static malloc_zone_t *
+zone_default_get(void) {
+ malloc_zone_t **zones = NULL;
+ unsigned int num_zones = 0;
+
+ /*
+ * On OSX 10.12, malloc_default_zone returns a special zone that is not
+ * present in the list of registered zones. That zone uses a "lite zone"
+ * if one is present (apparently enabled when malloc stack logging is
+ * enabled), or the first registered zone otherwise. In practice this
+ * means unless malloc stack logging is enabled, the first registered
+ * zone is the default. So get the list of zones to get the first one,
+ * instead of relying on malloc_default_zone.
+ */
+ if (KERN_SUCCESS != malloc_get_all_zones(0, NULL,
+ (vm_address_t**)&zones, &num_zones)) {
+ /*
+ * Reset the value in case the failure happened after it was
+ * set.
+ */
+ num_zones = 0;
+ }
+
+ if (num_zones) {
+ return zones[0];
+ }
+
+ return malloc_default_zone();
}
+/* As written, this function can only promote jemalloc_zone. */
static void
-zone_force_unlock(malloc_zone_t *zone)
-{
+zone_promote(void) {
+ malloc_zone_t *zone;
+
+ do {
+ /*
+ * Unregister and reregister the default zone. On OSX >= 10.6,
+ * unregistering takes the last registered zone and places it
+ * at the location of the specified zone. Unregistering the
+ * default zone thus makes the last registered one the default.
+ * On OSX < 10.6, unregistering shifts all registered zones.
+ * The first registered zone then becomes the default.
+ */
+ malloc_zone_unregister(default_zone);
+ malloc_zone_register(default_zone);
- if (isthreaded)
- jemalloc_postfork_parent();
+ /*
+ * On OSX 10.6, having the default purgeable zone appear before
+ * the default zone makes some things crash because it thinks it
+ * owns the default zone allocated pointers. We thus
+ * unregister/re-register it in order to ensure it's always
+ * after the default zone. On OSX < 10.6, there is no purgeable
+ * zone, so this does nothing. On OSX >= 10.6, unregistering
+ * replaces the purgeable zone with the last registered zone
+ * above, i.e. the default zone. Registering it again then puts
+ * it at the end, obviously after the default zone.
+ */
+ if (purgeable_zone != NULL) {
+ malloc_zone_unregister(purgeable_zone);
+ malloc_zone_register(purgeable_zone);
+ }
+
+ zone = zone_default_get();
+ } while (zone != &jemalloc_zone);
}
JEMALLOC_ATTR(constructor)
void
-register_zone(void)
-{
-
+zone_register(void) {
/*
* If something else replaced the system default zone allocator, don't
* register jemalloc's.
*/
- malloc_zone_t *default_zone = malloc_default_zone();
- if (!default_zone->zone_name ||
- strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) {
+ default_zone = zone_default_get();
+ if (!default_zone->zone_name || strcmp(default_zone->zone_name,
+ "DefaultMallocZone") != 0) {
return;
}
- zone.size = (void *)zone_size;
- zone.malloc = (void *)zone_malloc;
- zone.calloc = (void *)zone_calloc;
- zone.valloc = (void *)zone_valloc;
- zone.free = (void *)zone_free;
- zone.realloc = (void *)zone_realloc;
- zone.destroy = (void *)zone_destroy;
- zone.zone_name = "jemalloc_zone";
- zone.batch_malloc = NULL;
- zone.batch_free = NULL;
- zone.introspect = &zone_introspect;
- zone.version = JEMALLOC_ZONE_VERSION;
-#if (JEMALLOC_ZONE_VERSION >= 5)
- zone.memalign = zone_memalign;
-#endif
-#if (JEMALLOC_ZONE_VERSION >= 6)
- zone.free_definite_size = zone_free_definite_size;
-#endif
-#if (JEMALLOC_ZONE_VERSION >= 8)
- zone.pressure_relief = NULL;
-#endif
-
- zone_introspect.enumerator = NULL;
- zone_introspect.good_size = (void *)zone_good_size;
- zone_introspect.check = NULL;
- zone_introspect.print = NULL;
- zone_introspect.log = NULL;
- zone_introspect.force_lock = (void *)zone_force_lock;
- zone_introspect.force_unlock = (void *)zone_force_unlock;
- zone_introspect.statistics = NULL;
-#if (JEMALLOC_ZONE_VERSION >= 6)
- zone_introspect.zone_locked = NULL;
-#endif
-#if (JEMALLOC_ZONE_VERSION >= 7)
- zone_introspect.enable_discharge_checking = NULL;
- zone_introspect.disable_discharge_checking = NULL;
- zone_introspect.discharge = NULL;
-#ifdef __BLOCKS__
- zone_introspect.enumerate_discharged_pointers = NULL;
-#else
- zone_introspect.enumerate_unavailable_without_blocks = NULL;
-#endif
-#endif
-
/*
* The default purgeable zone is created lazily by OSX's libc. It uses
* the default zone when it is created for "small" allocations
* (< 15 KiB), but assumes the default zone is a scalable_zone. This
* obviously fails when the default zone is the jemalloc zone, so
- * malloc_default_purgeable_zone is called beforehand so that the
+ * malloc_default_purgeable_zone() is called beforehand so that the
* default purgeable zone is created when the default zone is still
* a scalable_zone. As purgeable zones only exist on >= 10.6, we need
* to check for the existence of malloc_default_purgeable_zone() at
* run time.
*/
- if (malloc_default_purgeable_zone != NULL)
- malloc_default_purgeable_zone();
+ purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL :
+ malloc_default_purgeable_zone();
/* Register the custom zone. At this point it won't be the default. */
- malloc_zone_register(&zone);
+ zone_init();
+ malloc_zone_register(&jemalloc_zone);
- /*
- * Unregister and reregister the default zone. On OSX >= 10.6,
- * unregistering takes the last registered zone and places it at the
- * location of the specified zone. Unregistering the default zone thus
- * makes the last registered one the default. On OSX < 10.6,
- * unregistering shifts all registered zones. The first registered zone
- * then becomes the default.
- */
- do {
- default_zone = malloc_default_zone();
- malloc_zone_unregister(default_zone);
- malloc_zone_register(default_zone);
- } while (malloc_default_zone() != &zone);
+ /* Promote the custom zone to be default. */
+ zone_promote();
}