aboutsummaryrefslogtreecommitdiff
path: root/dep/jemalloc/src
diff options
context:
space:
mode:
authorclick <click@gonnamakeyou.com>2013-04-16 21:03:29 +0200
committerclick <click@gonnamakeyou.com>2013-04-16 21:03:29 +0200
commitacf6c79d9bd2fb41c00a39b745d29c100dd8b2c1 (patch)
treed1d66085950939b2aed3dcad1cfd1f42cd45c9a1 /dep/jemalloc/src
parente9d7b53e6dd74793b5ad81e7df7cd365adfe61e5 (diff)
Core/Dependencies: Upgrade to jemalloc-3.3.1 (Also fixes a few weird crashes), closes #7852
Note: This adds support for using jemalloc on FreeBSD, Mac OS X Lion and a few other platforms (not enabled by default) Please read dep/jemalloc/ChangeLog for further information (Changed from 2.1.1 and up apply)
Diffstat (limited to 'dep/jemalloc/src')
-rw-r--r--dep/jemalloc/src/arena.c2928
-rw-r--r--dep/jemalloc/src/atomic.c2
-rw-r--r--dep/jemalloc/src/base.c40
-rw-r--r--dep/jemalloc/src/bitmap.c90
-rw-r--r--dep/jemalloc/src/chunk.c420
-rw-r--r--dep/jemalloc/src/chunk_dss.c293
-rw-r--r--dep/jemalloc/src/chunk_mmap.c283
-rw-r--r--dep/jemalloc/src/chunk_swap.c402
-rw-r--r--dep/jemalloc/src/ckh.c144
-rw-r--r--dep/jemalloc/src/ctl.c1524
-rw-r--r--dep/jemalloc/src/extent.c2
-rw-r--r--dep/jemalloc/src/hash.c2
-rw-r--r--dep/jemalloc/src/huge.c219
-rw-r--r--dep/jemalloc/src/jemalloc.c1977
-rw-r--r--dep/jemalloc/src/mb.c2
-rw-r--r--dep/jemalloc/src/mutex.c95
-rw-r--r--dep/jemalloc/src/prof.c780
-rw-r--r--dep/jemalloc/src/quarantine.c190
-rw-r--r--dep/jemalloc/src/rtree.c28
-rw-r--r--dep/jemalloc/src/stats.c504
-rw-r--r--dep/jemalloc/src/tcache.c487
-rw-r--r--dep/jemalloc/src/tsd.c107
-rw-r--r--dep/jemalloc/src/util.c641
-rw-r--r--dep/jemalloc/src/zone.c258
24 files changed, 6116 insertions, 5302 deletions
diff --git a/dep/jemalloc/src/arena.c b/dep/jemalloc/src/arena.c
index 7f939b3cd77..05a787f89d9 100644
--- a/dep/jemalloc/src/arena.c
+++ b/dep/jemalloc/src/arena.c
@@ -4,176 +4,71 @@
/******************************************************************************/
/* Data. */
-size_t opt_lg_qspace_max = LG_QSPACE_MAX_DEFAULT;
-size_t opt_lg_cspace_max = LG_CSPACE_MAX_DEFAULT;
ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
-uint8_t const *small_size2bin;
-
-/* Various bin-related settings. */
-unsigned nqbins;
-unsigned ncbins;
-unsigned nsbins;
-unsigned nbins;
-size_t qspace_max;
-size_t cspace_min;
-size_t cspace_max;
-size_t sspace_min;
-size_t sspace_max;
-
-size_t lg_mspace;
-size_t mspace_mask;
+arena_bin_info_t arena_bin_info[NBINS];
-/*
- * const_small_size2bin is a static constant lookup table that in the common
- * case can be used as-is for small_size2bin. For dynamically linked programs,
- * this avoids a page of memory overhead per process.
- */
-#define S2B_1(i) i,
-#define S2B_2(i) S2B_1(i) S2B_1(i)
-#define S2B_4(i) S2B_2(i) S2B_2(i)
-#define S2B_8(i) S2B_4(i) S2B_4(i)
+JEMALLOC_ALIGNED(CACHELINE)
+const uint8_t small_size2bin[] = {
+#define S2B_8(i) i,
#define S2B_16(i) S2B_8(i) S2B_8(i)
#define S2B_32(i) S2B_16(i) S2B_16(i)
#define S2B_64(i) S2B_32(i) S2B_32(i)
#define S2B_128(i) S2B_64(i) S2B_64(i)
#define S2B_256(i) S2B_128(i) S2B_128(i)
-/*
- * The number of elements in const_small_size2bin is dependent on page size
- * and on the definition for SUBPAGE. If SUBPAGE changes, the '- 255' must also
- * change, along with the addition/removal of static lookup table element
- * definitions.
- */
-static const uint8_t const_small_size2bin[STATIC_PAGE_SIZE - 255] = {
- S2B_1(0xffU) /* 0 */
-#if (LG_QUANTUM == 4)
-/* 16-byte quantum **********************/
-# ifdef JEMALLOC_TINY
-# if (LG_TINY_MIN == 2)
- S2B_4(0) /* 4 */
- S2B_4(1) /* 8 */
- S2B_8(2) /* 16 */
-# define S2B_QMIN 2
-# elif (LG_TINY_MIN == 3)
- S2B_8(0) /* 8 */
- S2B_8(1) /* 16 */
-# define S2B_QMIN 1
-# else
-# error "Unsupported LG_TINY_MIN"
-# endif
-# else
- S2B_16(0) /* 16 */
-# define S2B_QMIN 0
-# endif
- S2B_16(S2B_QMIN + 1) /* 32 */
- S2B_16(S2B_QMIN + 2) /* 48 */
- S2B_16(S2B_QMIN + 3) /* 64 */
- S2B_16(S2B_QMIN + 4) /* 80 */
- S2B_16(S2B_QMIN + 5) /* 96 */
- S2B_16(S2B_QMIN + 6) /* 112 */
- S2B_16(S2B_QMIN + 7) /* 128 */
-# define S2B_CMIN (S2B_QMIN + 8)
-#else
-/* 8-byte quantum ***********************/
-# ifdef JEMALLOC_TINY
-# if (LG_TINY_MIN == 2)
- S2B_4(0) /* 4 */
- S2B_4(1) /* 8 */
-# define S2B_QMIN 1
-# else
-# error "Unsupported LG_TINY_MIN"
-# endif
-# else
- S2B_8(0) /* 8 */
-# define S2B_QMIN 0
-# endif
- S2B_8(S2B_QMIN + 1) /* 16 */
- S2B_8(S2B_QMIN + 2) /* 24 */
- S2B_8(S2B_QMIN + 3) /* 32 */
- S2B_8(S2B_QMIN + 4) /* 40 */
- S2B_8(S2B_QMIN + 5) /* 48 */
- S2B_8(S2B_QMIN + 6) /* 56 */
- S2B_8(S2B_QMIN + 7) /* 64 */
- S2B_8(S2B_QMIN + 8) /* 72 */
- S2B_8(S2B_QMIN + 9) /* 80 */
- S2B_8(S2B_QMIN + 10) /* 88 */
- S2B_8(S2B_QMIN + 11) /* 96 */
- S2B_8(S2B_QMIN + 12) /* 104 */
- S2B_8(S2B_QMIN + 13) /* 112 */
- S2B_8(S2B_QMIN + 14) /* 120 */
- S2B_8(S2B_QMIN + 15) /* 128 */
-# define S2B_CMIN (S2B_QMIN + 16)
-#endif
-/****************************************/
- S2B_64(S2B_CMIN + 0) /* 192 */
- S2B_64(S2B_CMIN + 1) /* 256 */
- S2B_64(S2B_CMIN + 2) /* 320 */
- S2B_64(S2B_CMIN + 3) /* 384 */
- S2B_64(S2B_CMIN + 4) /* 448 */
- S2B_64(S2B_CMIN + 5) /* 512 */
-# define S2B_SMIN (S2B_CMIN + 6)
- S2B_256(S2B_SMIN + 0) /* 768 */
- S2B_256(S2B_SMIN + 1) /* 1024 */
- S2B_256(S2B_SMIN + 2) /* 1280 */
- S2B_256(S2B_SMIN + 3) /* 1536 */
- S2B_256(S2B_SMIN + 4) /* 1792 */
- S2B_256(S2B_SMIN + 5) /* 2048 */
- S2B_256(S2B_SMIN + 6) /* 2304 */
- S2B_256(S2B_SMIN + 7) /* 2560 */
- S2B_256(S2B_SMIN + 8) /* 2816 */
- S2B_256(S2B_SMIN + 9) /* 3072 */
- S2B_256(S2B_SMIN + 10) /* 3328 */
- S2B_256(S2B_SMIN + 11) /* 3584 */
- S2B_256(S2B_SMIN + 12) /* 3840 */
-#if (STATIC_PAGE_SHIFT == 13)
- S2B_256(S2B_SMIN + 13) /* 4096 */
- S2B_256(S2B_SMIN + 14) /* 4352 */
- S2B_256(S2B_SMIN + 15) /* 4608 */
- S2B_256(S2B_SMIN + 16) /* 4864 */
- S2B_256(S2B_SMIN + 17) /* 5120 */
- S2B_256(S2B_SMIN + 18) /* 5376 */
- S2B_256(S2B_SMIN + 19) /* 5632 */
- S2B_256(S2B_SMIN + 20) /* 5888 */
- S2B_256(S2B_SMIN + 21) /* 6144 */
- S2B_256(S2B_SMIN + 22) /* 6400 */
- S2B_256(S2B_SMIN + 23) /* 6656 */
- S2B_256(S2B_SMIN + 24) /* 6912 */
- S2B_256(S2B_SMIN + 25) /* 7168 */
- S2B_256(S2B_SMIN + 26) /* 7424 */
- S2B_256(S2B_SMIN + 27) /* 7680 */
- S2B_256(S2B_SMIN + 28) /* 7936 */
-#endif
-};
-#undef S2B_1
-#undef S2B_2
-#undef S2B_4
+#define S2B_512(i) S2B_256(i) S2B_256(i)
+#define S2B_1024(i) S2B_512(i) S2B_512(i)
+#define S2B_2048(i) S2B_1024(i) S2B_1024(i)
+#define S2B_4096(i) S2B_2048(i) S2B_2048(i)
+#define S2B_8192(i) S2B_4096(i) S2B_4096(i)
+#define SIZE_CLASS(bin, delta, size) \
+ S2B_##delta(bin)
+ SIZE_CLASSES
#undef S2B_8
#undef S2B_16
#undef S2B_32
#undef S2B_64
#undef S2B_128
#undef S2B_256
-#undef S2B_QMIN
-#undef S2B_CMIN
-#undef S2B_SMIN
+#undef S2B_512
+#undef S2B_1024
+#undef S2B_2048
+#undef S2B_4096
+#undef S2B_8192
+#undef SIZE_CLASS
+};
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
+static void arena_avail_insert(arena_t *arena, arena_chunk_t *chunk,
+ size_t pageind, size_t npages, bool maybe_adjac_pred,
+ bool maybe_adjac_succ);
+static void arena_avail_remove(arena_t *arena, arena_chunk_t *chunk,
+ size_t pageind, size_t npages, bool maybe_adjac_pred,
+ bool maybe_adjac_succ);
static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
- bool large, bool zero);
+ bool large, size_t binind, bool zero);
static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
+static arena_run_t *arena_run_alloc_helper(arena_t *arena, size_t size,
+ bool large, size_t binind, bool zero);
static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
- bool zero);
+ size_t binind, bool zero);
+static arena_chunk_t *chunks_dirty_iter_cb(arena_chunk_tree_t *tree,
+ arena_chunk_t *chunk, void *arg);
static void arena_purge(arena_t *arena, bool all);
-static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
+static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
+ bool cleaned);
static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, size_t oldsize, size_t newsize);
static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
+static arena_run_t *arena_bin_runs_first(arena_bin_t *bin);
+static void arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run);
+static void arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run);
+static arena_run_t *arena_bin_nonfull_run_tryget(arena_bin_t *bin);
static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
-static size_t arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size);
static void arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
arena_bin_t *bin);
static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
@@ -186,11 +81,9 @@ static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
void *ptr, size_t oldsize, size_t size, size_t extra, bool zero);
static bool arena_ralloc_large(void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero);
-static bool small_size2bin_init(void);
-#ifdef JEMALLOC_DEBUG
-static void small_size2bin_validate(void);
-#endif
-static bool small_size2bin_init_hard(void);
+static size_t bin_info_run_size_calc(arena_bin_info_t *bin_info,
+ size_t min_run_size);
+static void bin_info_init(void);
/******************************************************************************/
@@ -207,8 +100,8 @@ arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
}
/* Generate red-black tree functions. */
-rb_gen(static JEMALLOC_ATTR(unused), arena_run_tree_, arena_run_tree_t,
- arena_chunk_map_t, u.rb_link, arena_run_comp)
+rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
+ u.rb_link, arena_run_comp)
static inline int
arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
@@ -217,9 +110,6 @@ arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
size_t a_size = a->bits & ~PAGE_MASK;
size_t b_size = b->bits & ~PAGE_MASK;
- assert((a->bits & CHUNK_MAP_KEY) == CHUNK_MAP_KEY || (a->bits &
- CHUNK_MAP_DIRTY) == (b->bits & CHUNK_MAP_DIRTY));
-
ret = (a_size > b_size) - (a_size < b_size);
if (ret == 0) {
uintptr_t a_mapelm, b_mapelm;
@@ -242,129 +132,311 @@ arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
}
/* Generate red-black tree functions. */
-rb_gen(static JEMALLOC_ATTR(unused), arena_avail_tree_, arena_avail_tree_t,
- arena_chunk_map_t, u.rb_link, arena_avail_comp)
+rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
+ u.rb_link, arena_avail_comp)
+
+static inline int
+arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b)
+{
+
+ assert(a != NULL);
+ assert(b != NULL);
+
+ /*
+ * Short-circuit for self comparison. The following comparison code
+ * would come to the same result, but at the cost of executing the slow
+ * path.
+ */
+ if (a == b)
+ return (0);
+
+ /*
+ * Order such that chunks with higher fragmentation are "less than"
+ * those with lower fragmentation -- purging order is from "least" to
+ * "greatest". Fragmentation is measured as:
+ *
+ * mean current avail run size
+ * --------------------------------
+ * mean defragmented avail run size
+ *
+ * navail
+ * -----------
+ * nruns_avail nruns_avail-nruns_adjac
+ * = ========================= = -----------------------
+ * navail nruns_avail
+ * -----------------------
+ * nruns_avail-nruns_adjac
+ *
+ * The following code multiplies away the denominator prior to
+ * comparison, in order to avoid division.
+ *
+ */
+ {
+ size_t a_val = (a->nruns_avail - a->nruns_adjac) *
+ b->nruns_avail;
+ size_t b_val = (b->nruns_avail - b->nruns_adjac) *
+ a->nruns_avail;
+
+ if (a_val < b_val)
+ return (1);
+ if (a_val > b_val)
+ return (-1);
+ }
+ /*
+ * Break ties by chunk address. For fragmented chunks, report lower
+ * addresses as "lower", so that fragmentation reduction happens first
+ * at lower addresses. However, use the opposite ordering for
+ * unfragmented chunks, in order to increase the chances of
+ * re-allocating dirty runs.
+ */
+ {
+ uintptr_t a_chunk = (uintptr_t)a;
+ uintptr_t b_chunk = (uintptr_t)b;
+ int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk));
+ if (a->nruns_adjac == 0) {
+ assert(b->nruns_adjac == 0);
+ ret = -ret;
+ }
+ return (ret);
+ }
+}
+
+/* Generate red-black tree functions. */
+rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t,
+ dirty_link, arena_chunk_dirty_comp)
+
+static inline bool
+arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind)
+{
+ bool ret;
+
+ if (pageind-1 < map_bias)
+ ret = false;
+ else {
+ ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0);
+ assert(ret == false || arena_mapbits_dirty_get(chunk,
+ pageind-1) != arena_mapbits_dirty_get(chunk, pageind));
+ }
+ return (ret);
+}
+
+static inline bool
+arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages)
+{
+ bool ret;
+
+ if (pageind+npages == chunk_npages)
+ ret = false;
+ else {
+ assert(pageind+npages < chunk_npages);
+ ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0);
+ assert(ret == false || arena_mapbits_dirty_get(chunk, pageind)
+ != arena_mapbits_dirty_get(chunk, pageind+npages));
+ }
+ return (ret);
+}
+
+static inline bool
+arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages)
+{
+
+ return (arena_avail_adjac_pred(chunk, pageind) ||
+ arena_avail_adjac_succ(chunk, pageind, npages));
+}
+
+static void
+arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
+ size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
+{
+
+ assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
+ LG_PAGE));
+
+ /*
+ * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
+ * removed and reinserted even if the run to be inserted is clean.
+ */
+ if (chunk->ndirty != 0)
+ arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
+
+ if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
+ chunk->nruns_adjac++;
+ if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
+ chunk->nruns_adjac++;
+ chunk->nruns_avail++;
+ assert(chunk->nruns_avail > chunk->nruns_adjac);
+
+ if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
+ arena->ndirty += npages;
+ chunk->ndirty += npages;
+ }
+ if (chunk->ndirty != 0)
+ arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
+
+ arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk,
+ pageind));
+}
+
+static void
+arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
+ size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
+{
+
+ assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
+ LG_PAGE));
+
+ /*
+ * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
+ * removed and reinserted even if the run to be removed is clean.
+ */
+ if (chunk->ndirty != 0)
+ arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
+
+ if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
+ chunk->nruns_adjac--;
+ if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
+ chunk->nruns_adjac--;
+ chunk->nruns_avail--;
+ assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail
+ == 0 && chunk->nruns_adjac == 0));
+
+ if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
+ arena->ndirty -= npages;
+ chunk->ndirty -= npages;
+ }
+ if (chunk->ndirty != 0)
+ arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
+
+ arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk,
+ pageind));
+}
static inline void *
-arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin)
+arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
{
void *ret;
+ unsigned regind;
+ bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
+ (uintptr_t)bin_info->bitmap_offset);
- assert(run->magic == ARENA_RUN_MAGIC);
assert(run->nfree > 0);
+ assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
+ regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
+ ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
+ (uintptr_t)(bin_info->reg_interval * regind));
run->nfree--;
- ret = run->avail;
- if (ret != NULL) {
- /* Double free can cause assertion failure.*/
- assert(ret != NULL);
- /* Write-after free can cause assertion failure. */
- assert((uintptr_t)ret >= (uintptr_t)run +
- (uintptr_t)bin->reg0_offset);
- assert((uintptr_t)ret < (uintptr_t)run->next);
- assert(((uintptr_t)ret - ((uintptr_t)run +
- (uintptr_t)bin->reg0_offset)) % (uintptr_t)bin->reg_size ==
- 0);
- run->avail = *(void **)ret;
- return (ret);
- }
- ret = run->next;
- run->next = (void *)((uintptr_t)ret + (uintptr_t)bin->reg_size);
- assert(ret != NULL);
+ if (regind == run->nextind)
+ run->nextind++;
+ assert(regind < run->nextind);
return (ret);
}
static inline void
arena_run_reg_dalloc(arena_run_t *run, void *ptr)
{
-
- assert(run->nfree < run->bin->nregs);
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ size_t mapbits = arena_mapbits_get(chunk, pageind);
+ size_t binind = arena_ptr_small_binind_get(ptr, mapbits);
+ arena_bin_info_t *bin_info = &arena_bin_info[binind];
+ unsigned regind = arena_run_regind(run, bin_info, ptr);
+ bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
+ (uintptr_t)bin_info->bitmap_offset);
+
+ assert(run->nfree < bin_info->nregs);
/* Freeing an interior pointer can cause assertion failure. */
assert(((uintptr_t)ptr - ((uintptr_t)run +
- (uintptr_t)run->bin->reg0_offset)) % (uintptr_t)run->bin->reg_size
- == 0);
- /*
- * Freeing a pointer lower than region zero can cause assertion
- * failure.
- */
+ (uintptr_t)bin_info->reg0_offset)) %
+ (uintptr_t)bin_info->reg_interval == 0);
assert((uintptr_t)ptr >= (uintptr_t)run +
- (uintptr_t)run->bin->reg0_offset);
- /*
- * Freeing a pointer past in the run's frontier can cause assertion
- * failure.
- */
- assert((uintptr_t)ptr < (uintptr_t)run->next);
+ (uintptr_t)bin_info->reg0_offset);
+ /* Freeing an unallocated pointer can cause assertion failure. */
+ assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
- *(void **)ptr = run->avail;
- run->avail = ptr;
+ bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
run->nfree++;
}
-#ifdef JEMALLOC_DEBUG
static inline void
-arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
+arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
+{
+
+ VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
+ LG_PAGE)), (npages << LG_PAGE));
+ memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
+ (npages << LG_PAGE));
+}
+
+static inline void
+arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
{
size_t i;
- size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << PAGE_SHIFT));
+ UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
- for (i = 0; i < PAGE_SIZE / sizeof(size_t); i++)
+ VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind <<
+ LG_PAGE)), PAGE);
+ for (i = 0; i < PAGE / sizeof(size_t); i++)
assert(p[i] == 0);
}
-#endif
static void
arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
- bool zero)
+ size_t binind, bool zero)
{
arena_chunk_t *chunk;
- size_t old_ndirty, run_ind, total_pages, need_pages, rem_pages, i;
+ size_t run_ind, total_pages, need_pages, rem_pages, i;
size_t flag_dirty;
- arena_avail_tree_t *runs_avail;
+
+ assert((large && binind == BININD_INVALID) || (large == false && binind
+ != BININD_INVALID));
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- old_ndirty = chunk->ndirty;
- run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk)
- >> PAGE_SHIFT);
- flag_dirty = chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY;
- runs_avail = (flag_dirty != 0) ? &arena->runs_avail_dirty :
- &arena->runs_avail_clean;
- total_pages = (chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) >>
- PAGE_SHIFT;
- assert((chunk->map[run_ind+total_pages-1-map_bias].bits &
- CHUNK_MAP_DIRTY) == flag_dirty);
- need_pages = (size >> PAGE_SHIFT);
+ run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
+ flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
+ total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
+ LG_PAGE;
+ assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
+ flag_dirty);
+ need_pages = (size >> LG_PAGE);
assert(need_pages > 0);
assert(need_pages <= total_pages);
rem_pages = total_pages - need_pages;
- arena_avail_tree_remove(runs_avail, &chunk->map[run_ind-map_bias]);
+ arena_avail_remove(arena, chunk, run_ind, total_pages, true, true);
+ if (config_stats) {
+ /*
+ * Update stats_cactive if nactive is crossing a chunk
+ * multiple.
+ */
+ size_t cactive_diff = CHUNK_CEILING((arena->nactive +
+ need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
+ LG_PAGE);
+ if (cactive_diff != 0)
+ stats_cactive_add(cactive_diff);
+ }
arena->nactive += need_pages;
/* Keep track of trailing unused pages for later use. */
if (rem_pages > 0) {
if (flag_dirty != 0) {
- chunk->map[run_ind+need_pages-map_bias].bits =
- (rem_pages << PAGE_SHIFT) | CHUNK_MAP_DIRTY;
- chunk->map[run_ind+total_pages-1-map_bias].bits =
- (rem_pages << PAGE_SHIFT) | CHUNK_MAP_DIRTY;
+ arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
+ (rem_pages << LG_PAGE), CHUNK_MAP_DIRTY);
+ arena_mapbits_unallocated_set(chunk,
+ run_ind+total_pages-1, (rem_pages << LG_PAGE),
+ CHUNK_MAP_DIRTY);
} else {
- chunk->map[run_ind+need_pages-map_bias].bits =
- (rem_pages << PAGE_SHIFT) |
- (chunk->map[run_ind+need_pages-map_bias].bits &
- CHUNK_MAP_UNZEROED);
- chunk->map[run_ind+total_pages-1-map_bias].bits =
- (rem_pages << PAGE_SHIFT) |
- (chunk->map[run_ind+total_pages-1-map_bias].bits &
- CHUNK_MAP_UNZEROED);
+ arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
+ (rem_pages << LG_PAGE),
+ arena_mapbits_unzeroed_get(chunk,
+ run_ind+need_pages));
+ arena_mapbits_unallocated_set(chunk,
+ run_ind+total_pages-1, (rem_pages << LG_PAGE),
+ arena_mapbits_unzeroed_get(chunk,
+ run_ind+total_pages-1));
}
- arena_avail_tree_insert(runs_avail,
- &chunk->map[run_ind+need_pages-map_bias]);
- }
-
- /* Update dirty page accounting. */
- if (flag_dirty != 0) {
- chunk->ndirty -= need_pages;
- arena->ndirty -= need_pages;
+ arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages,
+ false, true);
}
/*
@@ -379,28 +451,21 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
* zeroed (i.e. never before touched).
*/
for (i = 0; i < need_pages; i++) {
- if ((chunk->map[run_ind+i-map_bias].bits
- & CHUNK_MAP_UNZEROED) != 0) {
- memset((void *)((uintptr_t)
- chunk + ((run_ind+i) <<
- PAGE_SHIFT)), 0,
- PAGE_SIZE);
- }
-#ifdef JEMALLOC_DEBUG
- else {
- arena_chunk_validate_zeroed(
+ if (arena_mapbits_unzeroed_get(chunk,
+ run_ind+i) != 0) {
+ arena_run_zero(chunk, run_ind+i,
+ 1);
+ } else if (config_debug) {
+ arena_run_page_validate_zeroed(
chunk, run_ind+i);
}
-#endif
}
} else {
/*
* The run is dirty, so all pages must be
* zeroed.
*/
- memset((void *)((uintptr_t)chunk + (run_ind <<
- PAGE_SHIFT)), 0, (need_pages <<
- PAGE_SHIFT));
+ arena_run_zero(chunk, run_ind, need_pages);
}
}
@@ -408,10 +473,9 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
* Set the last element first, in case the run only contains one
* page (i.e. both statements set the same element).
*/
- chunk->map[run_ind+need_pages-1-map_bias].bits =
- CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED | flag_dirty;
- chunk->map[run_ind-map_bias].bits = size | flag_dirty |
- CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+ arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0,
+ flag_dirty);
+ arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
} else {
assert(zero == false);
/*
@@ -419,44 +483,34 @@ arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
* small run, so that arena_dalloc_bin_run() has the ability to
* conditionally trim clean pages.
*/
- chunk->map[run_ind-map_bias].bits =
- (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED) |
- CHUNK_MAP_ALLOCATED | flag_dirty;
-#ifdef JEMALLOC_DEBUG
+ arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
/*
* The first page will always be dirtied during small run
* initialization, so a validation failure here would not
* actually cause an observable failure.
*/
- if (flag_dirty == 0 &&
- (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED)
- == 0)
- arena_chunk_validate_zeroed(chunk, run_ind);
-#endif
+ if (config_debug && flag_dirty == 0 &&
+ arena_mapbits_unzeroed_get(chunk, run_ind) == 0)
+ arena_run_page_validate_zeroed(chunk, run_ind);
for (i = 1; i < need_pages - 1; i++) {
- chunk->map[run_ind+i-map_bias].bits = (i << PAGE_SHIFT)
- | (chunk->map[run_ind+i-map_bias].bits &
- CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED;
-#ifdef JEMALLOC_DEBUG
- if (flag_dirty == 0 &&
- (chunk->map[run_ind+i-map_bias].bits &
- CHUNK_MAP_UNZEROED) == 0)
- arena_chunk_validate_zeroed(chunk, run_ind+i);
-#endif
+ arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
+ if (config_debug && flag_dirty == 0 &&
+ arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) {
+ arena_run_page_validate_zeroed(chunk,
+ run_ind+i);
+ }
}
- chunk->map[run_ind+need_pages-1-map_bias].bits = ((need_pages
- - 1) << PAGE_SHIFT) |
- (chunk->map[run_ind+need_pages-1-map_bias].bits &
- CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED | flag_dirty;
-#ifdef JEMALLOC_DEBUG
- if (flag_dirty == 0 &&
- (chunk->map[run_ind+need_pages-1-map_bias].bits &
- CHUNK_MAP_UNZEROED) == 0) {
- arena_chunk_validate_zeroed(chunk,
+ arena_mapbits_small_set(chunk, run_ind+need_pages-1,
+ need_pages-1, binind, flag_dirty);
+ if (config_debug && flag_dirty == 0 &&
+ arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) ==
+ 0) {
+ arena_run_page_validate_zeroed(chunk,
run_ind+need_pages-1);
}
-#endif
}
+ VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
+ LG_PAGE)), (need_pages << LG_PAGE));
}
static arena_chunk_t *
@@ -466,40 +520,32 @@ arena_chunk_alloc(arena_t *arena)
size_t i;
if (arena->spare != NULL) {
- arena_avail_tree_t *runs_avail;
-
chunk = arena->spare;
arena->spare = NULL;
- /* Insert the run into the appropriate runs_avail_* tree. */
- if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0)
- runs_avail = &arena->runs_avail_clean;
- else
- runs_avail = &arena->runs_avail_dirty;
- assert((chunk->map[0].bits & ~PAGE_MASK) == arena_maxclass);
- assert((chunk->map[chunk_npages-1-map_bias].bits & ~PAGE_MASK)
- == arena_maxclass);
- assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) ==
- (chunk->map[chunk_npages-1-map_bias].bits &
- CHUNK_MAP_DIRTY));
- arena_avail_tree_insert(runs_avail, &chunk->map[0]);
+ assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
+ assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
+ assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
+ arena_maxclass);
+ assert(arena_mapbits_unallocated_size_get(chunk,
+ chunk_npages-1) == arena_maxclass);
+ assert(arena_mapbits_dirty_get(chunk, map_bias) ==
+ arena_mapbits_dirty_get(chunk, chunk_npages-1));
} else {
bool zero;
size_t unzeroed;
zero = false;
malloc_mutex_unlock(&arena->lock);
- chunk = (arena_chunk_t *)chunk_alloc(chunksize, false, &zero);
+ chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize,
+ false, &zero, arena->dss_prec);
malloc_mutex_lock(&arena->lock);
if (chunk == NULL)
return (NULL);
-#ifdef JEMALLOC_STATS
- arena->stats.mapped += chunksize;
-#endif
+ if (config_stats)
+ arena->stats.mapped += chunksize;
chunk->arena = arena;
- ql_elm_new(chunk, link_dirty);
- chunk->dirtied = false;
/*
* Claim that no pages are in use, since the header is merely
@@ -507,85 +553,87 @@ arena_chunk_alloc(arena_t *arena)
*/
chunk->ndirty = 0;
+ chunk->nruns_avail = 0;
+ chunk->nruns_adjac = 0;
+
/*
* Initialize the map to contain one maximal free untouched run.
* Mark the pages as zeroed iff chunk_alloc() returned a zeroed
* chunk.
*/
unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
- chunk->map[0].bits = arena_maxclass | unzeroed;
+ arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
+ unzeroed);
/*
* There is no need to initialize the internal page map entries
* unless the chunk is not zeroed.
*/
if (zero == false) {
for (i = map_bias+1; i < chunk_npages-1; i++)
- chunk->map[i-map_bias].bits = unzeroed;
- }
-#ifdef JEMALLOC_DEBUG
- else {
- for (i = map_bias+1; i < chunk_npages-1; i++)
- assert(chunk->map[i-map_bias].bits == unzeroed);
+ arena_mapbits_unzeroed_set(chunk, i, unzeroed);
+ } else if (config_debug) {
+ VALGRIND_MAKE_MEM_DEFINED(
+ (void *)arena_mapp_get(chunk, map_bias+1),
+ (void *)((uintptr_t)
+ arena_mapp_get(chunk, chunk_npages-1)
+ - (uintptr_t)arena_mapp_get(chunk, map_bias+1)));
+ for (i = map_bias+1; i < chunk_npages-1; i++) {
+ assert(arena_mapbits_unzeroed_get(chunk, i) ==
+ unzeroed);
+ }
}
-#endif
- chunk->map[chunk_npages-1-map_bias].bits = arena_maxclass |
- unzeroed;
-
- /* Insert the run into the runs_avail_clean tree. */
- arena_avail_tree_insert(&arena->runs_avail_clean,
- &chunk->map[0]);
+ arena_mapbits_unallocated_set(chunk, chunk_npages-1,
+ arena_maxclass, unzeroed);
}
+ /* Insert the run into the runs_avail tree. */
+ arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias,
+ false, false);
+
return (chunk);
}
static void
arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
{
- arena_avail_tree_t *runs_avail;
+ assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
+ assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
+ assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
+ arena_maxclass);
+ assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
+ arena_maxclass);
+ assert(arena_mapbits_dirty_get(chunk, map_bias) ==
+ arena_mapbits_dirty_get(chunk, chunk_npages-1));
/*
- * Remove run from the appropriate runs_avail_* tree, so that the arena
- * does not use it.
+ * Remove run from the runs_avail tree, so that the arena does not use
+ * it.
*/
- if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0)
- runs_avail = &arena->runs_avail_clean;
- else
- runs_avail = &arena->runs_avail_dirty;
- arena_avail_tree_remove(runs_avail, &chunk->map[0]);
+ arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias,
+ false, false);
if (arena->spare != NULL) {
arena_chunk_t *spare = arena->spare;
arena->spare = chunk;
- if (spare->dirtied) {
- ql_remove(&chunk->arena->chunks_dirty, spare,
- link_dirty);
- arena->ndirty -= spare->ndirty;
- }
malloc_mutex_unlock(&arena->lock);
- chunk_dealloc((void *)spare, chunksize);
+ chunk_dealloc((void *)spare, chunksize, true);
malloc_mutex_lock(&arena->lock);
-#ifdef JEMALLOC_STATS
- arena->stats.mapped -= chunksize;
-#endif
+ if (config_stats)
+ arena->stats.mapped -= chunksize;
} else
arena->spare = chunk;
}
static arena_run_t *
-arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
+arena_run_alloc_helper(arena_t *arena, size_t size, bool large, size_t binind,
+ bool zero)
{
- arena_chunk_t *chunk;
arena_run_t *run;
arena_chunk_map_t *mapelm, key;
- assert(size <= arena_maxclass);
- assert((size & PAGE_MASK) == 0);
-
- /* Search the arena's chunks for the lowest best fit. */
key.bits = size | CHUNK_MAP_KEY;
- mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
+ mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
if (mapelm != NULL) {
arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
size_t pageind = (((uintptr_t)mapelm -
@@ -593,31 +641,38 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
+ map_bias;
run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
- PAGE_SHIFT));
- arena_run_split(arena, run, size, large, zero);
+ LG_PAGE));
+ arena_run_split(arena, run, size, large, binind, zero);
return (run);
}
- mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
- if (mapelm != NULL) {
- arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
- size_t pageind = (((uintptr_t)mapelm -
- (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
- + map_bias;
- run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
- PAGE_SHIFT));
- arena_run_split(arena, run, size, large, zero);
+ return (NULL);
+}
+
+static arena_run_t *
+arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
+ bool zero)
+{
+ arena_chunk_t *chunk;
+ arena_run_t *run;
+
+ assert(size <= arena_maxclass);
+ assert((size & PAGE_MASK) == 0);
+ assert((large && binind == BININD_INVALID) || (large == false && binind
+ != BININD_INVALID));
+
+ /* Search the arena's chunks for the lowest best fit. */
+ run = arena_run_alloc_helper(arena, size, large, binind, zero);
+ if (run != NULL)
return (run);
- }
/*
* No usable runs. Create a new chunk from which to allocate the run.
*/
chunk = arena_chunk_alloc(arena);
if (chunk != NULL) {
- run = (arena_run_t *)((uintptr_t)chunk + (map_bias <<
- PAGE_SHIFT));
- arena_run_split(arena, run, size, large, zero);
+ run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
+ arena_run_split(arena, run, size, large, binind, zero);
return (run);
}
@@ -626,78 +681,46 @@ arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
* sufficient memory available while this one dropped arena->lock in
* arena_chunk_alloc(), so search one more time.
*/
- mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
- if (mapelm != NULL) {
- arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
- size_t pageind = (((uintptr_t)mapelm -
- (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
- + map_bias;
-
- run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
- PAGE_SHIFT));
- arena_run_split(arena, run, size, large, zero);
- return (run);
- }
- mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
- if (mapelm != NULL) {
- arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
- size_t pageind = (((uintptr_t)mapelm -
- (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
- + map_bias;
-
- run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
- PAGE_SHIFT));
- arena_run_split(arena, run, size, large, zero);
- return (run);
- }
-
- return (NULL);
+ return (arena_run_alloc_helper(arena, size, large, binind, zero));
}
static inline void
arena_maybe_purge(arena_t *arena)
{
+ size_t npurgeable, threshold;
+
+ /* Don't purge if the option is disabled. */
+ if (opt_lg_dirty_mult < 0)
+ return;
+ /* Don't purge if all dirty pages are already being purged. */
+ if (arena->ndirty <= arena->npurgatory)
+ return;
+ npurgeable = arena->ndirty - arena->npurgatory;
+ threshold = (arena->nactive >> opt_lg_dirty_mult);
+ /*
+ * Don't purge unless the number of purgeable pages exceeds the
+ * threshold.
+ */
+ if (npurgeable <= threshold)
+ return;
- /* Enforce opt_lg_dirty_mult. */
- if (opt_lg_dirty_mult >= 0 && arena->ndirty > arena->npurgatory &&
- (arena->ndirty - arena->npurgatory) > chunk_npages &&
- (arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
- arena->npurgatory))
- arena_purge(arena, false);
+ arena_purge(arena, false);
}
-static inline void
-arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
+static inline size_t
+arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
{
+ size_t npurged;
ql_head(arena_chunk_map_t) mapelms;
arena_chunk_map_t *mapelm;
- size_t pageind, flag_unzeroed;
-#ifdef JEMALLOC_DEBUG
- size_t ndirty;
-#endif
-#ifdef JEMALLOC_STATS
+ size_t pageind, npages;
size_t nmadvise;
-#endif
ql_new(&mapelms);
- flag_unzeroed =
-#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
- /*
- * madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous
- * mappings, but not for file-backed mappings.
- */
-# ifdef JEMALLOC_SWAP
- swap_enabled ? CHUNK_MAP_UNZEROED :
-# endif
- 0;
-#else
- CHUNK_MAP_UNZEROED;
-#endif
-
/*
* If chunk is the spare, temporarily re-allocate it, 1) so that its
- * run is reinserted into runs_avail_dirty, and 2) so that it cannot be
+ * run is reinserted into runs_avail, and 2) so that it cannot be
* completely discarded by another thread while arena->lock is dropped
* by this thread. Note that the arena_run_dalloc() call will
* implicitly deallocate the chunk, so no explicit action is required
@@ -711,124 +734,135 @@ arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
* run.
*/
if (chunk == arena->spare) {
- assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) != 0);
+ assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
+ assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
+
arena_chunk_alloc(arena);
}
- /* Temporarily allocate all free dirty runs within chunk. */
- for (pageind = map_bias; pageind < chunk_npages;) {
- mapelm = &chunk->map[pageind-map_bias];
- if ((mapelm->bits & CHUNK_MAP_ALLOCATED) == 0) {
- size_t npages;
+ if (config_stats)
+ arena->stats.purged += chunk->ndirty;
- npages = mapelm->bits >> PAGE_SHIFT;
- assert(pageind + npages <= chunk_npages);
- if (mapelm->bits & CHUNK_MAP_DIRTY) {
- size_t i;
+ /*
+ * Operate on all dirty runs if there is no clean/dirty run
+ * fragmentation.
+ */
+ if (chunk->nruns_adjac == 0)
+ all = true;
- arena_avail_tree_remove(
- &arena->runs_avail_dirty, mapelm);
+ /*
+ * Temporarily allocate free dirty runs within chunk. If all is false,
+ * only operate on dirty runs that are fragments; otherwise operate on
+ * all dirty runs.
+ */
+ for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
+ mapelm = arena_mapp_get(chunk, pageind);
+ if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
+ size_t run_size =
+ arena_mapbits_unallocated_size_get(chunk, pageind);
- mapelm->bits = (npages << PAGE_SHIFT) |
- flag_unzeroed | CHUNK_MAP_LARGE |
- CHUNK_MAP_ALLOCATED;
- /*
- * Update internal elements in the page map, so
- * that CHUNK_MAP_UNZEROED is properly set.
- */
- for (i = 1; i < npages - 1; i++) {
- chunk->map[pageind+i-map_bias].bits =
- flag_unzeroed;
- }
- if (npages > 1) {
- chunk->map[
- pageind+npages-1-map_bias].bits =
- flag_unzeroed | CHUNK_MAP_LARGE |
- CHUNK_MAP_ALLOCATED;
- }
+ npages = run_size >> LG_PAGE;
+ assert(pageind + npages <= chunk_npages);
+ assert(arena_mapbits_dirty_get(chunk, pageind) ==
+ arena_mapbits_dirty_get(chunk, pageind+npages-1));
- arena->nactive += npages;
+ if (arena_mapbits_dirty_get(chunk, pageind) != 0 &&
+ (all || arena_avail_adjac(chunk, pageind,
+ npages))) {
+ arena_run_t *run = (arena_run_t *)((uintptr_t)
+ chunk + (uintptr_t)(pageind << LG_PAGE));
+
+ arena_run_split(arena, run, run_size, true,
+ BININD_INVALID, false);
/* Append to list for later processing. */
ql_elm_new(mapelm, u.ql_link);
ql_tail_insert(&mapelms, mapelm, u.ql_link);
}
-
- pageind += npages;
} else {
- /* Skip allocated run. */
- if (mapelm->bits & CHUNK_MAP_LARGE)
- pageind += mapelm->bits >> PAGE_SHIFT;
- else {
+ /* Skip run. */
+ if (arena_mapbits_large_get(chunk, pageind) != 0) {
+ npages = arena_mapbits_large_size_get(chunk,
+ pageind) >> LG_PAGE;
+ } else {
+ size_t binind;
+ arena_bin_info_t *bin_info;
arena_run_t *run = (arena_run_t *)((uintptr_t)
- chunk + (uintptr_t)(pageind << PAGE_SHIFT));
+ chunk + (uintptr_t)(pageind << LG_PAGE));
- assert((mapelm->bits >> PAGE_SHIFT) == 0);
- assert(run->magic == ARENA_RUN_MAGIC);
- pageind += run->bin->run_size >> PAGE_SHIFT;
+ assert(arena_mapbits_small_runind_get(chunk,
+ pageind) == 0);
+ binind = arena_bin_index(arena, run->bin);
+ bin_info = &arena_bin_info[binind];
+ npages = bin_info->run_size >> LG_PAGE;
}
}
}
assert(pageind == chunk_npages);
-
-#ifdef JEMALLOC_DEBUG
- ndirty = chunk->ndirty;
-#endif
-#ifdef JEMALLOC_STATS
- arena->stats.purged += chunk->ndirty;
-#endif
- arena->ndirty -= chunk->ndirty;
- chunk->ndirty = 0;
- ql_remove(&arena->chunks_dirty, chunk, link_dirty);
- chunk->dirtied = false;
+ assert(chunk->ndirty == 0 || all == false);
+ assert(chunk->nruns_adjac == 0);
malloc_mutex_unlock(&arena->lock);
-#ifdef JEMALLOC_STATS
- nmadvise = 0;
-#endif
+ if (config_stats)
+ nmadvise = 0;
+ npurged = 0;
ql_foreach(mapelm, &mapelms, u.ql_link) {
- size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
- sizeof(arena_chunk_map_t)) + map_bias;
- size_t npages = mapelm->bits >> PAGE_SHIFT;
+ bool unzeroed;
+ size_t flag_unzeroed, i;
+ pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
+ sizeof(arena_chunk_map_t)) + map_bias;
+ npages = arena_mapbits_large_size_get(chunk, pageind) >>
+ LG_PAGE;
assert(pageind + npages <= chunk_npages);
-#ifdef JEMALLOC_DEBUG
- assert(ndirty >= npages);
- ndirty -= npages;
-#endif
-
-#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
- madvise((void *)((uintptr_t)chunk + (pageind << PAGE_SHIFT)),
- (npages << PAGE_SHIFT), MADV_DONTNEED);
-#elif defined(JEMALLOC_PURGE_MADVISE_FREE)
- madvise((void *)((uintptr_t)chunk + (pageind << PAGE_SHIFT)),
- (npages << PAGE_SHIFT), MADV_FREE);
-#else
-# error "No method defined for purging unused dirty pages."
-#endif
-
-#ifdef JEMALLOC_STATS
- nmadvise++;
-#endif
- }
-#ifdef JEMALLOC_DEBUG
- assert(ndirty == 0);
-#endif
+ unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
+ LG_PAGE)), (npages << LG_PAGE));
+ flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
+ /*
+ * Set the unzeroed flag for all pages, now that pages_purge()
+ * has returned whether the pages were zeroed as a side effect
+ * of purging. This chunk map modification is safe even though
+ * the arena mutex isn't currently owned by this thread,
+ * because the run is marked as allocated, thus protecting it
+ * from being modified by any other thread. As long as these
+ * writes don't perturb the first and last elements'
+ * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
+ */
+ for (i = 0; i < npages; i++) {
+ arena_mapbits_unzeroed_set(chunk, pageind+i,
+ flag_unzeroed);
+ }
+ npurged += npages;
+ if (config_stats)
+ nmadvise++;
+ }
malloc_mutex_lock(&arena->lock);
-#ifdef JEMALLOC_STATS
- arena->stats.nmadvise += nmadvise;
-#endif
+ if (config_stats)
+ arena->stats.nmadvise += nmadvise;
/* Deallocate runs. */
for (mapelm = ql_first(&mapelms); mapelm != NULL;
mapelm = ql_first(&mapelms)) {
- size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
- sizeof(arena_chunk_map_t)) + map_bias;
- arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
- (uintptr_t)(pageind << PAGE_SHIFT));
+ arena_run_t *run;
+ pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
+ sizeof(arena_chunk_map_t)) + map_bias;
+ run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind <<
+ LG_PAGE));
ql_remove(&mapelms, mapelm, u.ql_link);
- arena_run_dalloc(arena, run, false);
+ arena_run_dalloc(arena, run, false, true);
}
+
+ return (npurged);
+}
+
+static arena_chunk_t *
+chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg)
+{
+ size_t *ndirty = (size_t *)arg;
+
+ assert(chunk->ndirty != 0);
+ *ndirty += chunk->ndirty;
+ return (NULL);
}
static void
@@ -836,36 +870,43 @@ arena_purge(arena_t *arena, bool all)
{
arena_chunk_t *chunk;
size_t npurgatory;
-#ifdef JEMALLOC_DEBUG
- size_t ndirty = 0;
+ if (config_debug) {
+ size_t ndirty = 0;
- ql_foreach(chunk, &arena->chunks_dirty, link_dirty) {
- assert(chunk->dirtied);
- ndirty += chunk->ndirty;
+ arena_chunk_dirty_iter(&arena->chunks_dirty, NULL,
+ chunks_dirty_iter_cb, (void *)&ndirty);
+ assert(ndirty == arena->ndirty);
}
- assert(ndirty == arena->ndirty);
-#endif
- assert(arena->ndirty > arena->npurgatory);
- assert(arena->ndirty > chunk_npages || all);
- assert((arena->nactive >> opt_lg_dirty_mult) < arena->ndirty || all);
+ assert(arena->ndirty > arena->npurgatory || all);
+ assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
+ arena->npurgatory) || all);
-#ifdef JEMALLOC_STATS
- arena->stats.npurge++;
-#endif
+ if (config_stats)
+ arena->stats.npurge++;
/*
* Compute the minimum number of pages that this thread should try to
* purge, and add the result to arena->npurgatory. This will keep
* multiple threads from racing to reduce ndirty below the threshold.
*/
- npurgatory = arena->ndirty - arena->npurgatory;
- if (all == false)
- npurgatory -= arena->nactive >> opt_lg_dirty_mult;
+ {
+ size_t npurgeable = arena->ndirty - arena->npurgatory;
+
+ if (all == false) {
+ size_t threshold = (arena->nactive >>
+ opt_lg_dirty_mult);
+
+ npurgatory = npurgeable - threshold;
+ } else
+ npurgatory = npurgeable;
+ }
arena->npurgatory += npurgatory;
while (npurgatory > 0) {
+ size_t npurgeable, npurged, nunpurged;
+
/* Get next chunk with dirty pages. */
- chunk = ql_first(&arena->chunks_dirty);
+ chunk = arena_chunk_dirty_first(&arena->chunks_dirty);
if (chunk == NULL) {
/*
* This thread was unable to purge as many pages as
@@ -876,23 +917,15 @@ arena_purge(arena_t *arena, bool all)
arena->npurgatory -= npurgatory;
return;
}
- while (chunk->ndirty == 0) {
- ql_remove(&arena->chunks_dirty, chunk, link_dirty);
- chunk->dirtied = false;
- chunk = ql_first(&arena->chunks_dirty);
- if (chunk == NULL) {
- /* Same logic as for above. */
- arena->npurgatory -= npurgatory;
- return;
- }
- }
+ npurgeable = chunk->ndirty;
+ assert(npurgeable != 0);
- if (chunk->ndirty > npurgatory) {
+ if (npurgeable > npurgatory && chunk->nruns_adjac == 0) {
/*
- * This thread will, at a minimum, purge all the dirty
- * pages in chunk, so set npurgatory to reflect this
- * thread's commitment to purge the pages. This tends
- * to reduce the chances of the following scenario:
+ * This thread will purge all the dirty pages in chunk,
+ * so set npurgatory to reflect this thread's intent to
+ * purge the pages. This tends to reduce the chances
+ * of the following scenario:
*
* 1) This thread sets arena->npurgatory such that
* (arena->ndirty - arena->npurgatory) is at the
@@ -906,13 +939,20 @@ arena_purge(arena_t *arena, bool all)
* because all of the purging work being done really
* needs to happen.
*/
- arena->npurgatory += chunk->ndirty - npurgatory;
- npurgatory = chunk->ndirty;
+ arena->npurgatory += npurgeable - npurgatory;
+ npurgatory = npurgeable;
}
- arena->npurgatory -= chunk->ndirty;
- npurgatory -= chunk->ndirty;
- arena_chunk_purge(arena, chunk);
+ /*
+ * Keep track of how many pages are purgeable, versus how many
+ * actually get purged, and adjust counters accordingly.
+ */
+ arena->npurgatory -= npurgeable;
+ npurgatory -= npurgeable;
+ npurged = arena_chunk_purge(arena, chunk, all);
+ nunpurged = npurgeable - npurged;
+ arena->npurgatory += nunpurged;
+ npurgatory += nunpurged;
}
}
@@ -926,96 +966,95 @@ arena_purge_all(arena_t *arena)
}
static void
-arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
+arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
{
arena_chunk_t *chunk;
size_t size, run_ind, run_pages, flag_dirty;
- arena_avail_tree_t *runs_avail;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk)
- >> PAGE_SHIFT);
+ run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
assert(run_ind >= map_bias);
assert(run_ind < chunk_npages);
- if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_LARGE) != 0) {
- size = chunk->map[run_ind-map_bias].bits & ~PAGE_MASK;
- assert(size == PAGE_SIZE ||
- (chunk->map[run_ind+(size>>PAGE_SHIFT)-1-map_bias].bits &
- ~PAGE_MASK) == 0);
- assert((chunk->map[run_ind+(size>>PAGE_SHIFT)-1-map_bias].bits &
- CHUNK_MAP_LARGE) != 0);
- assert((chunk->map[run_ind+(size>>PAGE_SHIFT)-1-map_bias].bits &
- CHUNK_MAP_ALLOCATED) != 0);
- } else
- size = run->bin->run_size;
- run_pages = (size >> PAGE_SHIFT);
+ if (arena_mapbits_large_get(chunk, run_ind) != 0) {
+ size = arena_mapbits_large_size_get(chunk, run_ind);
+ assert(size == PAGE ||
+ arena_mapbits_large_size_get(chunk,
+ run_ind+(size>>LG_PAGE)-1) == 0);
+ } else {
+ size_t binind = arena_bin_index(arena, run->bin);
+ arena_bin_info_t *bin_info = &arena_bin_info[binind];
+ size = bin_info->run_size;
+ }
+ run_pages = (size >> LG_PAGE);
+ if (config_stats) {
+ /*
+ * Update stats_cactive if nactive is crossing a chunk
+ * multiple.
+ */
+ size_t cactive_diff = CHUNK_CEILING(arena->nactive << LG_PAGE) -
+ CHUNK_CEILING((arena->nactive - run_pages) << LG_PAGE);
+ if (cactive_diff != 0)
+ stats_cactive_sub(cactive_diff);
+ }
arena->nactive -= run_pages;
/*
* The run is dirty if the caller claims to have dirtied it, as well as
- * if it was already dirty before being allocated.
+ * if it was already dirty before being allocated and the caller
+ * doesn't claim to have cleaned it.
*/
- if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) != 0)
+ assert(arena_mapbits_dirty_get(chunk, run_ind) ==
+ arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
+ if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
dirty = true;
flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
- runs_avail = dirty ? &arena->runs_avail_dirty :
- &arena->runs_avail_clean;
/* Mark pages as unallocated in the chunk map. */
if (dirty) {
- chunk->map[run_ind-map_bias].bits = size | CHUNK_MAP_DIRTY;
- chunk->map[run_ind+run_pages-1-map_bias].bits = size |
- CHUNK_MAP_DIRTY;
-
- chunk->ndirty += run_pages;
- arena->ndirty += run_pages;
+ arena_mapbits_unallocated_set(chunk, run_ind, size,
+ CHUNK_MAP_DIRTY);
+ arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
+ CHUNK_MAP_DIRTY);
} else {
- chunk->map[run_ind-map_bias].bits = size |
- (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED);
- chunk->map[run_ind+run_pages-1-map_bias].bits = size |
- (chunk->map[run_ind+run_pages-1-map_bias].bits &
- CHUNK_MAP_UNZEROED);
+ arena_mapbits_unallocated_set(chunk, run_ind, size,
+ arena_mapbits_unzeroed_get(chunk, run_ind));
+ arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
+ arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
}
/* Try to coalesce forward. */
if (run_ind + run_pages < chunk_npages &&
- (chunk->map[run_ind+run_pages-map_bias].bits & CHUNK_MAP_ALLOCATED)
- == 0 && (chunk->map[run_ind+run_pages-map_bias].bits &
- CHUNK_MAP_DIRTY) == flag_dirty) {
- size_t nrun_size = chunk->map[run_ind+run_pages-map_bias].bits &
- ~PAGE_MASK;
- size_t nrun_pages = nrun_size >> PAGE_SHIFT;
+ arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
+ arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
+ size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
+ run_ind+run_pages);
+ size_t nrun_pages = nrun_size >> LG_PAGE;
/*
* Remove successor from runs_avail; the coalesced run is
* inserted later.
*/
- assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
- & ~PAGE_MASK) == nrun_size);
- assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
- & CHUNK_MAP_ALLOCATED) == 0);
- assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
- & CHUNK_MAP_DIRTY) == flag_dirty);
- arena_avail_tree_remove(runs_avail,
- &chunk->map[run_ind+run_pages-map_bias]);
+ assert(arena_mapbits_unallocated_size_get(chunk,
+ run_ind+run_pages+nrun_pages-1) == nrun_size);
+ assert(arena_mapbits_dirty_get(chunk,
+ run_ind+run_pages+nrun_pages-1) == flag_dirty);
+ arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages,
+ false, true);
size += nrun_size;
run_pages += nrun_pages;
- chunk->map[run_ind-map_bias].bits = size |
- (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK);
- chunk->map[run_ind+run_pages-1-map_bias].bits = size |
- (chunk->map[run_ind+run_pages-1-map_bias].bits &
- CHUNK_MAP_FLAGS_MASK);
+ arena_mapbits_unallocated_size_set(chunk, run_ind, size);
+ arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
+ size);
}
/* Try to coalesce backward. */
- if (run_ind > map_bias && (chunk->map[run_ind-1-map_bias].bits &
- CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[run_ind-1-map_bias].bits &
- CHUNK_MAP_DIRTY) == flag_dirty) {
- size_t prun_size = chunk->map[run_ind-1-map_bias].bits &
- ~PAGE_MASK;
- size_t prun_pages = prun_size >> PAGE_SHIFT;
+ if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, run_ind-1)
+ == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == flag_dirty) {
+ size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
+ run_ind-1);
+ size_t prun_pages = prun_size >> LG_PAGE;
run_ind -= prun_pages;
@@ -1023,52 +1062,33 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
* Remove predecessor from runs_avail; the coalesced run is
* inserted later.
*/
- assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK)
- == prun_size);
- assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_ALLOCATED)
- == 0);
- assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY)
- == flag_dirty);
- arena_avail_tree_remove(runs_avail,
- &chunk->map[run_ind-map_bias]);
+ assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
+ prun_size);
+ assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
+ arena_avail_remove(arena, chunk, run_ind, prun_pages, true,
+ false);
size += prun_size;
run_pages += prun_pages;
- chunk->map[run_ind-map_bias].bits = size |
- (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK);
- chunk->map[run_ind+run_pages-1-map_bias].bits = size |
- (chunk->map[run_ind+run_pages-1-map_bias].bits &
- CHUNK_MAP_FLAGS_MASK);
+ arena_mapbits_unallocated_size_set(chunk, run_ind, size);
+ arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
+ size);
}
/* Insert into runs_avail, now that coalescing is complete. */
- assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) ==
- (chunk->map[run_ind+run_pages-1-map_bias].bits & ~PAGE_MASK));
- assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) ==
- (chunk->map[run_ind+run_pages-1-map_bias].bits & CHUNK_MAP_DIRTY));
- arena_avail_tree_insert(runs_avail, &chunk->map[run_ind-map_bias]);
-
- if (dirty) {
- /*
- * Insert into chunks_dirty before potentially calling
- * arena_chunk_dealloc(), so that chunks_dirty and
- * arena->ndirty are consistent.
- */
- if (chunk->dirtied == false) {
- ql_tail_insert(&arena->chunks_dirty, chunk, link_dirty);
- chunk->dirtied = true;
- }
- }
-
- /*
- * Deallocate chunk if it is now completely unused. The bit
- * manipulation checks whether the first run is unallocated and extends
- * to the end of the chunk.
- */
- if ((chunk->map[0].bits & (~PAGE_MASK | CHUNK_MAP_ALLOCATED)) ==
- arena_maxclass)
+ assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
+ arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
+ assert(arena_mapbits_dirty_get(chunk, run_ind) ==
+ arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
+ arena_avail_insert(arena, chunk, run_ind, run_pages, true, true);
+
+ /* Deallocate chunk if it is now completely unused. */
+ if (size == arena_maxclass) {
+ assert(run_ind == map_bias);
+ assert(run_pages == (arena_maxclass >> LG_PAGE));
arena_chunk_dealloc(arena, chunk);
+ }
/*
* It is okay to do dirty page processing here even if the chunk was
@@ -1085,9 +1105,9 @@ static void
arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
size_t oldsize, size_t newsize)
{
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> PAGE_SHIFT;
- size_t head_npages = (oldsize - newsize) >> PAGE_SHIFT;
- size_t flag_dirty = chunk->map[pageind-map_bias].bits & CHUNK_MAP_DIRTY;
+ size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
+ size_t head_npages = (oldsize - newsize) >> LG_PAGE;
+ size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
assert(oldsize > newsize);
@@ -1096,44 +1116,30 @@ arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
* leading run as separately allocated. Set the last element of each
* run first, in case of single-page runs.
*/
- assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0);
- assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0);
- chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty |
- (chunk->map[pageind+head_npages-1-map_bias].bits &
- CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
- chunk->map[pageind-map_bias].bits = (oldsize - newsize)
- | flag_dirty | (chunk->map[pageind-map_bias].bits &
- CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
-
-#ifdef JEMALLOC_DEBUG
- {
- size_t tail_npages = newsize >> PAGE_SHIFT;
- assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
- .bits & ~PAGE_MASK) == 0);
- assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
- .bits & CHUNK_MAP_DIRTY) == flag_dirty);
- assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
- .bits & CHUNK_MAP_LARGE) != 0);
- assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
- .bits & CHUNK_MAP_ALLOCATED) != 0);
- }
-#endif
- chunk->map[pageind+head_npages-map_bias].bits = newsize | flag_dirty |
- (chunk->map[pageind+head_npages-map_bias].bits &
- CHUNK_MAP_FLAGS_MASK) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
-
- arena_run_dalloc(arena, run, false);
+ assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
+ arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
+ arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
+
+ if (config_debug) {
+ UNUSED size_t tail_npages = newsize >> LG_PAGE;
+ assert(arena_mapbits_large_size_get(chunk,
+ pageind+head_npages+tail_npages-1) == 0);
+ assert(arena_mapbits_dirty_get(chunk,
+ pageind+head_npages+tail_npages-1) == flag_dirty);
+ }
+ arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
+ flag_dirty);
+
+ arena_run_dalloc(arena, run, false, false);
}
static void
arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
size_t oldsize, size_t newsize, bool dirty)
{
- size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> PAGE_SHIFT;
- size_t head_npages = newsize >> PAGE_SHIFT;
- size_t tail_npages = (oldsize - newsize) >> PAGE_SHIFT;
- size_t flag_dirty = chunk->map[pageind-map_bias].bits &
- CHUNK_MAP_DIRTY;
+ size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
+ size_t head_npages = newsize >> LG_PAGE;
+ size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
assert(oldsize > newsize);
@@ -1142,87 +1148,120 @@ arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
* trailing run as separately allocated. Set the last element of each
* run first, in case of single-page runs.
*/
- assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0);
- assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0);
- chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty |
- (chunk->map[pageind+head_npages-1-map_bias].bits &
- CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
- chunk->map[pageind-map_bias].bits = newsize | flag_dirty |
- (chunk->map[pageind-map_bias].bits & CHUNK_MAP_UNZEROED) |
- CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
-
- assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
- ~PAGE_MASK) == 0);
- assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
- CHUNK_MAP_LARGE) != 0);
- assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
- CHUNK_MAP_ALLOCATED) != 0);
- chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits =
- flag_dirty |
- (chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
- CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
- chunk->map[pageind+head_npages-map_bias].bits = (oldsize - newsize) |
- flag_dirty | (chunk->map[pageind+head_npages-map_bias].bits &
- CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
+ assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
+ arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
+ arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
+
+ if (config_debug) {
+ UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
+ assert(arena_mapbits_large_size_get(chunk,
+ pageind+head_npages+tail_npages-1) == 0);
+ assert(arena_mapbits_dirty_get(chunk,
+ pageind+head_npages+tail_npages-1) == flag_dirty);
+ }
+ arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
+ flag_dirty);
arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
- dirty);
+ dirty, false);
}
static arena_run_t *
-arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
+arena_bin_runs_first(arena_bin_t *bin)
{
- arena_chunk_map_t *mapelm;
- arena_run_t *run;
-
- /* Look for a usable run. */
- mapelm = arena_run_tree_first(&bin->runs);
+ arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
if (mapelm != NULL) {
arena_chunk_t *chunk;
size_t pageind;
-
- /* run is guaranteed to have available space. */
- arena_run_tree_remove(&bin->runs, mapelm);
+ arena_run_t *run;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
sizeof(arena_chunk_map_t))) + map_bias;
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
- (mapelm->bits >> PAGE_SHIFT))
- << PAGE_SHIFT));
-#ifdef JEMALLOC_STATS
- bin->stats.reruns++;
-#endif
+ arena_mapbits_small_runind_get(chunk, pageind)) <<
+ LG_PAGE));
return (run);
}
+
+ return (NULL);
+}
+
+static void
+arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
+{
+ arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
+ size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
+ arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
+
+ assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
+
+ arena_run_tree_insert(&bin->runs, mapelm);
+}
+
+static void
+arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
+{
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
+ size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
+ arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
+
+ assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
+
+ arena_run_tree_remove(&bin->runs, mapelm);
+}
+
+static arena_run_t *
+arena_bin_nonfull_run_tryget(arena_bin_t *bin)
+{
+ arena_run_t *run = arena_bin_runs_first(bin);
+ if (run != NULL) {
+ arena_bin_runs_remove(bin, run);
+ if (config_stats)
+ bin->stats.reruns++;
+ }
+ return (run);
+}
+
+static arena_run_t *
+arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
+{
+ arena_run_t *run;
+ size_t binind;
+ arena_bin_info_t *bin_info;
+
+ /* Look for a usable run. */
+ run = arena_bin_nonfull_run_tryget(bin);
+ if (run != NULL)
+ return (run);
/* No existing runs have any space available. */
+ binind = arena_bin_index(arena, bin);
+ bin_info = &arena_bin_info[binind];
+
/* Allocate a new run. */
malloc_mutex_unlock(&bin->lock);
/******************************/
malloc_mutex_lock(&arena->lock);
- run = arena_run_alloc(arena, bin->run_size, false, false);
+ run = arena_run_alloc(arena, bin_info->run_size, false, binind, false);
if (run != NULL) {
+ bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
+ (uintptr_t)bin_info->bitmap_offset);
+
/* Initialize run internals. */
run->bin = bin;
- run->avail = NULL;
- run->next = (void *)((uintptr_t)run +
- (uintptr_t)bin->reg0_offset);
- run->nfree = bin->nregs;
-#ifdef JEMALLOC_DEBUG
- run->magic = ARENA_RUN_MAGIC;
-#endif
+ run->nextind = 0;
+ run->nfree = bin_info->nregs;
+ bitmap_init(bitmap, &bin_info->bitmap_info);
}
malloc_mutex_unlock(&arena->lock);
/********************************/
malloc_mutex_lock(&bin->lock);
if (run != NULL) {
-#ifdef JEMALLOC_STATS
- bin->stats.nruns++;
- bin->stats.curruns++;
- if (bin->stats.curruns > bin->stats.highruns)
- bin->stats.highruns = bin->stats.curruns;
-#endif
+ if (config_stats) {
+ bin->stats.nruns++;
+ bin->stats.curruns++;
+ }
return (run);
}
@@ -1231,25 +1270,9 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
* sufficient memory available while this one dropped bin->lock above,
* so search one more time.
*/
- mapelm = arena_run_tree_first(&bin->runs);
- if (mapelm != NULL) {
- arena_chunk_t *chunk;
- size_t pageind;
-
- /* run is guaranteed to have available space. */
- arena_run_tree_remove(&bin->runs, mapelm);
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
- pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
- sizeof(arena_chunk_map_t))) + map_bias;
- run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
- (mapelm->bits >> PAGE_SHIFT))
- << PAGE_SHIFT));
-#ifdef JEMALLOC_STATS
- bin->stats.reruns++;
-#endif
+ run = arena_bin_nonfull_run_tryget(bin);
+ if (run != NULL)
return (run);
- }
return (NULL);
}
@@ -1259,8 +1282,12 @@ static void *
arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
{
void *ret;
+ size_t binind;
+ arena_bin_info_t *bin_info;
arena_run_t *run;
+ binind = arena_bin_index(arena, bin);
+ bin_info = &arena_bin_info[binind];
bin->runcur = NULL;
run = arena_bin_nonfull_run_get(arena, bin);
if (bin->runcur != NULL && bin->runcur->nfree > 0) {
@@ -1268,22 +1295,21 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
* Another thread updated runcur while this one ran without the
* bin lock in arena_bin_nonfull_run_get().
*/
- assert(bin->runcur->magic == ARENA_RUN_MAGIC);
assert(bin->runcur->nfree > 0);
- ret = arena_run_reg_alloc(bin->runcur, bin);
+ ret = arena_run_reg_alloc(bin->runcur, bin_info);
if (run != NULL) {
arena_chunk_t *chunk;
/*
* arena_run_alloc() may have allocated run, or it may
- * have pulled it from the bin's run tree. Therefore
+ * have pulled run from the bin's run tree. Therefore
* it is unsafe to make any assumptions about how run
* has previously been used, and arena_bin_lower_run()
* must be called, as if a region were just deallocated
* from the run.
*/
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- if (run->nfree == bin->nregs)
+ if (run->nfree == bin_info->nregs)
arena_dalloc_bin_run(arena, chunk, run, bin);
else
arena_bin_lower_run(arena, chunk, run, bin);
@@ -1296,34 +1322,14 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
bin->runcur = run;
- assert(bin->runcur->magic == ARENA_RUN_MAGIC);
assert(bin->runcur->nfree > 0);
- return (arena_run_reg_alloc(bin->runcur, bin));
-}
-
-#ifdef JEMALLOC_PROF
-void
-arena_prof_accum(arena_t *arena, uint64_t accumbytes)
-{
-
- if (prof_interval != 0) {
- arena->prof_accumbytes += accumbytes;
- if (arena->prof_accumbytes >= prof_interval) {
- prof_idump();
- arena->prof_accumbytes -= prof_interval;
- }
- }
+ return (arena_run_reg_alloc(bin->runcur, bin_info));
}
-#endif
-#ifdef JEMALLOC_TCACHE
void
-arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind
-# ifdef JEMALLOC_PROF
- , uint64_t prof_accumbytes
-# endif
- )
+arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
+ uint64_t prof_accumbytes)
{
unsigned i, nfill;
arena_bin_t *bin;
@@ -1332,143 +1338,83 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind
assert(tbin->ncached == 0);
-#ifdef JEMALLOC_PROF
- malloc_mutex_lock(&arena->lock);
- arena_prof_accum(arena, prof_accumbytes);
- malloc_mutex_unlock(&arena->lock);
-#endif
+ if (config_prof && arena_prof_accum(arena, prof_accumbytes))
+ prof_idump();
bin = &arena->bins[binind];
malloc_mutex_lock(&bin->lock);
- for (i = 0, nfill = (tbin->ncached_max >> 1); i < nfill; i++) {
+ for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
+ tbin->lg_fill_div); i < nfill; i++) {
if ((run = bin->runcur) != NULL && run->nfree > 0)
- ptr = arena_run_reg_alloc(run, bin);
+ ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
else
ptr = arena_bin_malloc_hard(arena, bin);
if (ptr == NULL)
break;
- *(void **)ptr = tbin->avail;
- tbin->avail = ptr;
- }
-#ifdef JEMALLOC_STATS
- bin->stats.allocated += (i - tbin->ncached) * bin->reg_size;
- bin->stats.nmalloc += i;
- bin->stats.nrequests += tbin->tstats.nrequests;
- bin->stats.nfills++;
- tbin->tstats.nrequests = 0;
-#endif
+ if (config_fill && opt_junk) {
+ arena_alloc_junk_small(ptr, &arena_bin_info[binind],
+ true);
+ }
+ /* Insert such that low regions get used first. */
+ tbin->avail[nfill - 1 - i] = ptr;
+ }
+ if (config_stats) {
+ bin->stats.allocated += i * arena_bin_info[binind].reg_size;
+ bin->stats.nmalloc += i;
+ bin->stats.nrequests += tbin->tstats.nrequests;
+ bin->stats.nfills++;
+ tbin->tstats.nrequests = 0;
+ }
malloc_mutex_unlock(&bin->lock);
tbin->ncached = i;
- if (tbin->ncached > tbin->high_water)
- tbin->high_water = tbin->ncached;
}
-#endif
-/*
- * Calculate bin->run_size such that it meets the following constraints:
- *
- * *) bin->run_size >= min_run_size
- * *) bin->run_size <= arena_maxclass
- * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
- * *) run header size < PAGE_SIZE
- *
- * bin->nregs and bin->reg0_offset are also calculated here, since these
- * settings are all interdependent.
- */
-static size_t
-arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size)
+void
+arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
{
- size_t try_run_size, good_run_size;
- uint32_t try_nregs, good_nregs;
- uint32_t try_hdr_size, good_hdr_size;
-#ifdef JEMALLOC_PROF
- uint32_t try_ctx0_offset, good_ctx0_offset;
-#endif
- uint32_t try_reg0_offset, good_reg0_offset;
-
- assert(min_run_size >= PAGE_SIZE);
- assert(min_run_size <= arena_maxclass);
-
- /*
- * Calculate known-valid settings before entering the run_size
- * expansion loop, so that the first part of the loop always copies
- * valid settings.
- *
- * The do..while loop iteratively reduces the number of regions until
- * the run header and the regions no longer overlap. A closed formula
- * would be quite messy, since there is an interdependency between the
- * header's mask length and the number of regions.
- */
- try_run_size = min_run_size;
- try_nregs = ((try_run_size - sizeof(arena_run_t)) / bin->reg_size)
- + 1; /* Counter-act try_nregs-- in loop. */
- do {
- try_nregs--;
- try_hdr_size = sizeof(arena_run_t);
-#ifdef JEMALLOC_PROF
- if (opt_prof && prof_promote == false) {
- /* Pad to a quantum boundary. */
- try_hdr_size = QUANTUM_CEILING(try_hdr_size);
- try_ctx0_offset = try_hdr_size;
- /* Add space for one (prof_ctx_t *) per region. */
- try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
- } else
- try_ctx0_offset = 0;
-#endif
- try_reg0_offset = try_run_size - (try_nregs * bin->reg_size);
- } while (try_hdr_size > try_reg0_offset);
- /* run_size expansion loop. */
- do {
- /*
- * Copy valid settings before trying more aggressive settings.
- */
- good_run_size = try_run_size;
- good_nregs = try_nregs;
- good_hdr_size = try_hdr_size;
-#ifdef JEMALLOC_PROF
- good_ctx0_offset = try_ctx0_offset;
-#endif
- good_reg0_offset = try_reg0_offset;
-
- /* Try more aggressive settings. */
- try_run_size += PAGE_SIZE;
- try_nregs = ((try_run_size - sizeof(arena_run_t)) /
- bin->reg_size) + 1; /* Counter-act try_nregs-- in loop. */
- do {
- try_nregs--;
- try_hdr_size = sizeof(arena_run_t);
-#ifdef JEMALLOC_PROF
- if (opt_prof && prof_promote == false) {
- /* Pad to a quantum boundary. */
- try_hdr_size = QUANTUM_CEILING(try_hdr_size);
- try_ctx0_offset = try_hdr_size;
- /*
- * Add space for one (prof_ctx_t *) per region.
- */
- try_hdr_size += try_nregs *
- sizeof(prof_ctx_t *);
- }
-#endif
- try_reg0_offset = try_run_size - (try_nregs *
- bin->reg_size);
- } while (try_hdr_size > try_reg0_offset);
- } while (try_run_size <= arena_maxclass
- && try_run_size <= arena_maxclass
- && RUN_MAX_OVRHD * (bin->reg_size << 3) > RUN_MAX_OVRHD_RELAX
- && (try_reg0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
- && try_hdr_size < PAGE_SIZE);
-
- assert(good_hdr_size <= good_reg0_offset);
+ if (zero) {
+ size_t redzone_size = bin_info->redzone_size;
+ memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
+ redzone_size);
+ memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
+ redzone_size);
+ } else {
+ memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
+ bin_info->reg_interval);
+ }
+}
- /* Copy final settings. */
- bin->run_size = good_run_size;
- bin->nregs = good_nregs;
-#ifdef JEMALLOC_PROF
- bin->ctx0_offset = good_ctx0_offset;
-#endif
- bin->reg0_offset = good_reg0_offset;
+void
+arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
+{
+ size_t size = bin_info->reg_size;
+ size_t redzone_size = bin_info->redzone_size;
+ size_t i;
+ bool error = false;
+
+ for (i = 1; i <= redzone_size; i++) {
+ unsigned byte;
+ if ((byte = *(uint8_t *)((uintptr_t)ptr - i)) != 0xa5) {
+ error = true;
+ malloc_printf("<jemalloc>: Corrupt redzone "
+ "%zu byte%s before %p (size %zu), byte=%#x\n", i,
+ (i == 1) ? "" : "s", ptr, size, byte);
+ }
+ }
+ for (i = 0; i < redzone_size; i++) {
+ unsigned byte;
+ if ((byte = *(uint8_t *)((uintptr_t)ptr + size + i)) != 0xa5) {
+ error = true;
+ malloc_printf("<jemalloc>: Corrupt redzone "
+ "%zu byte%s after end of %p (size %zu), byte=%#x\n",
+ i, (i == 1) ? "" : "s", ptr, size, byte);
+ }
+ }
+ if (opt_abort && error)
+ abort();
- return (good_run_size);
+ memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
+ bin_info->reg_interval);
}
void *
@@ -1479,14 +1425,14 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
arena_run_t *run;
size_t binind;
- binind = small_size2bin[size];
- assert(binind < nbins);
+ binind = SMALL_SIZE2BIN(size);
+ assert(binind < NBINS);
bin = &arena->bins[binind];
- size = bin->reg_size;
+ size = arena_bin_info[binind].reg_size;
malloc_mutex_lock(&bin->lock);
if ((run = bin->runcur) != NULL && run->nfree > 0)
- ret = arena_run_reg_alloc(run, bin);
+ ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
else
ret = arena_bin_malloc_hard(arena, bin);
@@ -1495,29 +1441,32 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero)
return (NULL);
}
-#ifdef JEMALLOC_STATS
- bin->stats.allocated += size;
- bin->stats.nmalloc++;
- bin->stats.nrequests++;
-#endif
- malloc_mutex_unlock(&bin->lock);
-#ifdef JEMALLOC_PROF
- if (isthreaded == false) {
- malloc_mutex_lock(&arena->lock);
- arena_prof_accum(arena, size);
- malloc_mutex_unlock(&arena->lock);
+ if (config_stats) {
+ bin->stats.allocated += size;
+ bin->stats.nmalloc++;
+ bin->stats.nrequests++;
}
-#endif
+ malloc_mutex_unlock(&bin->lock);
+ if (config_prof && isthreaded == false && arena_prof_accum(arena, size))
+ prof_idump();
if (zero == false) {
-#ifdef JEMALLOC_FILL
- if (opt_junk)
- memset(ret, 0xa5, size);
- else if (opt_zero)
- memset(ret, 0, size);
-#endif
- } else
+ if (config_fill) {
+ if (opt_junk) {
+ arena_alloc_junk_small(ret,
+ &arena_bin_info[binind], false);
+ } else if (opt_zero)
+ memset(ret, 0, size);
+ }
+ } else {
+ if (config_fill && opt_junk) {
+ arena_alloc_junk_small(ret, &arena_bin_info[binind],
+ true);
+ }
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
memset(ret, 0, size);
+ }
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
return (ret);
}
@@ -1526,242 +1475,119 @@ void *
arena_malloc_large(arena_t *arena, size_t size, bool zero)
{
void *ret;
+ UNUSED bool idump;
/* Large allocation. */
size = PAGE_CEILING(size);
malloc_mutex_lock(&arena->lock);
- ret = (void *)arena_run_alloc(arena, size, true, zero);
+ ret = (void *)arena_run_alloc(arena, size, true, BININD_INVALID, zero);
if (ret == NULL) {
malloc_mutex_unlock(&arena->lock);
return (NULL);
}
-#ifdef JEMALLOC_STATS
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
- if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns;
- }
-#endif
-#ifdef JEMALLOC_PROF
- arena_prof_accum(arena, size);
-#endif
+ if (config_stats) {
+ arena->stats.nmalloc_large++;
+ arena->stats.nrequests_large++;
+ arena->stats.allocated_large += size;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+ }
+ if (config_prof)
+ idump = arena_prof_accum_locked(arena, size);
malloc_mutex_unlock(&arena->lock);
+ if (config_prof && idump)
+ prof_idump();
if (zero == false) {
-#ifdef JEMALLOC_FILL
- if (opt_junk)
- memset(ret, 0xa5, size);
- else if (opt_zero)
- memset(ret, 0, size);
-#endif
+ if (config_fill) {
+ if (opt_junk)
+ memset(ret, 0xa5, size);
+ else if (opt_zero)
+ memset(ret, 0, size);
+ }
}
return (ret);
}
-void *
-arena_malloc(size_t size, bool zero)
-{
-
- assert(size != 0);
- assert(QUANTUM_CEILING(size) <= arena_maxclass);
-
- if (size <= small_maxclass) {
-#ifdef JEMALLOC_TCACHE
- tcache_t *tcache;
-
- if ((tcache = tcache_get()) != NULL)
- return (tcache_alloc_small(tcache, size, zero));
- else
-
-#endif
- return (arena_malloc_small(choose_arena(), size, zero));
- } else {
-#ifdef JEMALLOC_TCACHE
- if (size <= tcache_maxclass) {
- tcache_t *tcache;
-
- if ((tcache = tcache_get()) != NULL)
- return (tcache_alloc_large(tcache, size, zero));
- else {
- return (arena_malloc_large(choose_arena(),
- size, zero));
- }
- } else
-#endif
- return (arena_malloc_large(choose_arena(), size, zero));
- }
-}
-
/* Only handles large allocations that require more than page alignment. */
void *
-arena_palloc(arena_t *arena, size_t size, size_t alloc_size, size_t alignment,
- bool zero)
+arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
{
void *ret;
- size_t offset;
+ size_t alloc_size, leadsize, trailsize;
+ arena_run_t *run;
arena_chunk_t *chunk;
assert((size & PAGE_MASK) == 0);
alignment = PAGE_CEILING(alignment);
+ alloc_size = size + alignment - PAGE;
malloc_mutex_lock(&arena->lock);
- ret = (void *)arena_run_alloc(arena, alloc_size, true, zero);
- if (ret == NULL) {
+ run = arena_run_alloc(arena, alloc_size, true, BININD_INVALID, zero);
+ if (run == NULL) {
malloc_mutex_unlock(&arena->lock);
return (NULL);
}
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret);
-
- offset = (uintptr_t)ret & (alignment - 1);
- assert((offset & PAGE_MASK) == 0);
- assert(offset < alloc_size);
- if (offset == 0)
- arena_run_trim_tail(arena, chunk, ret, alloc_size, size, false);
- else {
- size_t leadsize, trailsize;
-
- leadsize = alignment - offset;
- if (leadsize > 0) {
- arena_run_trim_head(arena, chunk, ret, alloc_size,
- alloc_size - leadsize);
- ret = (void *)((uintptr_t)ret + leadsize);
- }
-
- trailsize = alloc_size - leadsize - size;
- if (trailsize != 0) {
- /* Trim trailing space. */
- assert(trailsize < alloc_size);
- arena_run_trim_tail(arena, chunk, ret, size + trailsize,
- size, false);
- }
+ leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
+ (uintptr_t)run;
+ assert(alloc_size >= leadsize + size);
+ trailsize = alloc_size - leadsize - size;
+ ret = (void *)((uintptr_t)run + leadsize);
+ if (leadsize != 0) {
+ arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size -
+ leadsize);
+ }
+ if (trailsize != 0) {
+ arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
+ false);
}
-#ifdef JEMALLOC_STATS
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
- if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns;
+ if (config_stats) {
+ arena->stats.nmalloc_large++;
+ arena->stats.nrequests_large++;
+ arena->stats.allocated_large += size;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
}
-#endif
malloc_mutex_unlock(&arena->lock);
-#ifdef JEMALLOC_FILL
- if (zero == false) {
+ if (config_fill && zero == false) {
if (opt_junk)
memset(ret, 0xa5, size);
else if (opt_zero)
memset(ret, 0, size);
}
-#endif
return (ret);
}
-/* Return the size of the allocation pointed to by ptr. */
-size_t
-arena_salloc(const void *ptr)
-{
- size_t ret;
- arena_chunk_t *chunk;
- size_t pageind, mapbits;
-
- assert(ptr != NULL);
- assert(CHUNK_ADDR2BASE(ptr) != ptr);
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
- mapbits = chunk->map[pageind-map_bias].bits;
- assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
- if ((mapbits & CHUNK_MAP_LARGE) == 0) {
- arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
- (uintptr_t)((pageind - (mapbits >> PAGE_SHIFT)) <<
- PAGE_SHIFT));
- assert(run->magic == ARENA_RUN_MAGIC);
- assert(((uintptr_t)ptr - ((uintptr_t)run +
- (uintptr_t)run->bin->reg0_offset)) % run->bin->reg_size ==
- 0);
- ret = run->bin->reg_size;
- } else {
- assert(((uintptr_t)ptr & PAGE_MASK) == 0);
- ret = mapbits & ~PAGE_MASK;
- assert(ret != 0);
- }
-
- return (ret);
-}
-
-#ifdef JEMALLOC_PROF
void
arena_prof_promoted(const void *ptr, size_t size)
{
arena_chunk_t *chunk;
size_t pageind, binind;
+ cassert(config_prof);
assert(ptr != NULL);
assert(CHUNK_ADDR2BASE(ptr) != ptr);
- assert(isalloc(ptr) == PAGE_SIZE);
+ assert(isalloc(ptr, false) == PAGE);
+ assert(isalloc(ptr, true) == PAGE);
+ assert(size <= SMALL_MAXCLASS);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
- binind = small_size2bin[size];
- assert(binind < nbins);
- chunk->map[pageind-map_bias].bits = (chunk->map[pageind-map_bias].bits &
- ~CHUNK_MAP_CLASS_MASK) | ((binind+1) << CHUNK_MAP_CLASS_SHIFT);
-}
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ binind = SMALL_SIZE2BIN(size);
+ assert(binind < NBINS);
+ arena_mapbits_large_binind_set(chunk, pageind, binind);
-size_t
-arena_salloc_demote(const void *ptr)
-{
- size_t ret;
- arena_chunk_t *chunk;
- size_t pageind, mapbits;
-
- assert(ptr != NULL);
- assert(CHUNK_ADDR2BASE(ptr) != ptr);
-
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
- mapbits = chunk->map[pageind-map_bias].bits;
- assert((mapbits & CHUNK_MAP_ALLOCATED) != 0);
- if ((mapbits & CHUNK_MAP_LARGE) == 0) {
- arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
- (uintptr_t)((pageind - (mapbits >> PAGE_SHIFT)) <<
- PAGE_SHIFT));
- assert(run->magic == ARENA_RUN_MAGIC);
- assert(((uintptr_t)ptr - ((uintptr_t)run +
- (uintptr_t)run->bin->reg0_offset)) % run->bin->reg_size ==
- 0);
- ret = run->bin->reg_size;
- } else {
- assert(((uintptr_t)ptr & PAGE_MASK) == 0);
- ret = mapbits & ~PAGE_MASK;
- if (prof_promote && ret == PAGE_SIZE && (mapbits &
- CHUNK_MAP_CLASS_MASK) != 0) {
- size_t binind = ((mapbits & CHUNK_MAP_CLASS_MASK) >>
- CHUNK_MAP_CLASS_SHIFT) - 1;
- assert(binind < nbins);
- ret = chunk->arena->bins[binind].reg_size;
- }
- assert(ret != 0);
- }
-
- return (ret);
+ assert(isalloc(ptr, false) == PAGE);
+ assert(isalloc(ptr, true) == size);
}
-#endif
static void
arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
@@ -1771,17 +1597,18 @@ arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
/* Dissociate run from bin. */
if (run == bin->runcur)
bin->runcur = NULL;
- else if (bin->nregs != 1) {
- size_t run_pageind = (((uintptr_t)run - (uintptr_t)chunk)) >>
- PAGE_SHIFT;
- arena_chunk_map_t *run_mapelm =
- &chunk->map[run_pageind-map_bias];
- /*
- * This block's conditional is necessary because if the run
- * only contains one region, then it never gets inserted into
- * the non-full runs tree.
- */
- arena_run_tree_remove(&bin->runs, run_mapelm);
+ else {
+ size_t binind = arena_bin_index(chunk->arena, bin);
+ arena_bin_info_t *bin_info = &arena_bin_info[binind];
+
+ if (bin_info->nregs != 1) {
+ /*
+ * This block's conditional is necessary because if the
+ * run only contains one region, then it never gets
+ * inserted into the non-full runs tree.
+ */
+ arena_bin_runs_remove(bin, run);
+ }
}
}
@@ -1789,18 +1616,26 @@ static void
arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
arena_bin_t *bin)
{
+ size_t binind;
+ arena_bin_info_t *bin_info;
size_t npages, run_ind, past;
assert(run != bin->runcur);
- assert(arena_run_tree_search(&bin->runs, &chunk->map[
- (((uintptr_t)run-(uintptr_t)chunk)>>PAGE_SHIFT)-map_bias]) == NULL);
+ assert(arena_run_tree_search(&bin->runs,
+ arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE))
+ == NULL);
+
+ binind = arena_bin_index(chunk->arena, run->bin);
+ bin_info = &arena_bin_info[binind];
malloc_mutex_unlock(&bin->lock);
/******************************/
- npages = bin->run_size >> PAGE_SHIFT;
- run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> PAGE_SHIFT);
- past = (size_t)((PAGE_CEILING((uintptr_t)run->next) - (uintptr_t)chunk)
- >> PAGE_SHIFT);
+ npages = bin_info->run_size >> LG_PAGE;
+ run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
+ past = (size_t)(PAGE_CEILING((uintptr_t)run +
+ (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
+ bin_info->reg_interval - bin_info->redzone_size) -
+ (uintptr_t)chunk) >> LG_PAGE);
malloc_mutex_lock(&arena->lock);
/*
@@ -1808,32 +1643,24 @@ arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
* trim the clean pages before deallocating the dirty portion of the
* run.
*/
- if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) == 0 && past
- - run_ind < npages) {
- /*
- * Trim clean pages. Convert to large run beforehand. Set the
- * last map element first, in case this is a one-page run.
- */
- chunk->map[run_ind+npages-1-map_bias].bits = CHUNK_MAP_LARGE |
- (chunk->map[run_ind+npages-1-map_bias].bits &
- CHUNK_MAP_FLAGS_MASK);
- chunk->map[run_ind-map_bias].bits = bin->run_size |
- CHUNK_MAP_LARGE | (chunk->map[run_ind-map_bias].bits &
- CHUNK_MAP_FLAGS_MASK);
- arena_run_trim_tail(arena, chunk, run, (npages << PAGE_SHIFT),
- ((past - run_ind) << PAGE_SHIFT), false);
+ assert(arena_mapbits_dirty_get(chunk, run_ind) ==
+ arena_mapbits_dirty_get(chunk, run_ind+npages-1));
+ if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
+ npages) {
+ /* Trim clean pages. Convert to large run beforehand. */
+ assert(npages > 0);
+ arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0);
+ arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0);
+ arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
+ ((past - run_ind) << LG_PAGE), false);
/* npages = past - run_ind; */
}
-#ifdef JEMALLOC_DEBUG
- run->magic = 0;
-#endif
- arena_run_dalloc(arena, run, true);
+ arena_run_dalloc(arena, run, true, false);
malloc_mutex_unlock(&arena->lock);
/****************************/
malloc_mutex_lock(&bin->lock);
-#ifdef JEMALLOC_STATS
- bin->stats.curruns--;
-#endif
+ if (config_stats)
+ bin->stats.curruns--;
}
static void
@@ -1842,157 +1669,114 @@ arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
{
/*
- * Make sure that bin->runcur always refers to the lowest non-full run,
- * if one exists.
+ * Make sure that if bin->runcur is non-NULL, it refers to the lowest
+ * non-full run. It is okay to NULL runcur out rather than proactively
+ * keeping it pointing at the lowest non-full run.
*/
- if (bin->runcur == NULL)
- bin->runcur = run;
- else if ((uintptr_t)run < (uintptr_t)bin->runcur) {
+ if ((uintptr_t)run < (uintptr_t)bin->runcur) {
/* Switch runcur. */
- if (bin->runcur->nfree > 0) {
- arena_chunk_t *runcur_chunk =
- CHUNK_ADDR2BASE(bin->runcur);
- size_t runcur_pageind = (((uintptr_t)bin->runcur -
- (uintptr_t)runcur_chunk)) >> PAGE_SHIFT;
- arena_chunk_map_t *runcur_mapelm =
- &runcur_chunk->map[runcur_pageind-map_bias];
-
- /* Insert runcur. */
- arena_run_tree_insert(&bin->runs, runcur_mapelm);
- }
+ if (bin->runcur->nfree > 0)
+ arena_bin_runs_insert(bin, bin->runcur);
bin->runcur = run;
- } else {
- size_t run_pageind = (((uintptr_t)run -
- (uintptr_t)chunk)) >> PAGE_SHIFT;
- arena_chunk_map_t *run_mapelm =
- &chunk->map[run_pageind-map_bias];
-
- assert(arena_run_tree_search(&bin->runs, run_mapelm) == NULL);
- arena_run_tree_insert(&bin->runs, run_mapelm);
- }
+ if (config_stats)
+ bin->stats.reruns++;
+ } else
+ arena_bin_runs_insert(bin, run);
}
void
-arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
arena_chunk_map_t *mapelm)
{
size_t pageind;
arena_run_t *run;
arena_bin_t *bin;
-#if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS))
- size_t size;
-#endif
+ arena_bin_info_t *bin_info;
+ size_t size, binind;
- pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
+ pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
- (mapelm->bits >> PAGE_SHIFT)) << PAGE_SHIFT));
- assert(run->magic == ARENA_RUN_MAGIC);
+ arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
bin = run->bin;
-#if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS))
- size = bin->reg_size;
-#endif
+ binind = arena_ptr_small_binind_get(ptr, mapelm->bits);
+ bin_info = &arena_bin_info[binind];
+ if (config_fill || config_stats)
+ size = bin_info->reg_size;
-#ifdef JEMALLOC_FILL
- if (opt_junk)
- memset(ptr, 0x5a, size);
-#endif
+ if (config_fill && opt_junk)
+ arena_dalloc_junk_small(ptr, bin_info);
arena_run_reg_dalloc(run, ptr);
- if (run->nfree == bin->nregs) {
+ if (run->nfree == bin_info->nregs) {
arena_dissociate_bin_run(chunk, run, bin);
arena_dalloc_bin_run(arena, chunk, run, bin);
} else if (run->nfree == 1 && run != bin->runcur)
arena_bin_lower_run(arena, chunk, run, bin);
-#ifdef JEMALLOC_STATS
- bin->stats.allocated -= size;
- bin->stats.ndalloc++;
-#endif
+ if (config_stats) {
+ bin->stats.allocated -= size;
+ bin->stats.ndalloc++;
+ }
}
-#ifdef JEMALLOC_STATS
void
-arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
- arena_stats_t *astats, malloc_bin_stats_t *bstats,
- malloc_large_stats_t *lstats)
+arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t pageind, arena_chunk_map_t *mapelm)
{
- unsigned i;
+ arena_run_t *run;
+ arena_bin_t *bin;
- malloc_mutex_lock(&arena->lock);
- *nactive += arena->nactive;
- *ndirty += arena->ndirty;
+ run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
+ arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
+ bin = run->bin;
+ malloc_mutex_lock(&bin->lock);
+ arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
+ malloc_mutex_unlock(&bin->lock);
+}
- astats->mapped += arena->stats.mapped;
- astats->npurge += arena->stats.npurge;
- astats->nmadvise += arena->stats.nmadvise;
- astats->purged += arena->stats.purged;
- astats->allocated_large += arena->stats.allocated_large;
- astats->nmalloc_large += arena->stats.nmalloc_large;
- astats->ndalloc_large += arena->stats.ndalloc_large;
- astats->nrequests_large += arena->stats.nrequests_large;
+void
+arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
+ size_t pageind)
+{
+ arena_chunk_map_t *mapelm;
- for (i = 0; i < nlclasses; i++) {
- lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
- lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
- lstats[i].nrequests += arena->stats.lstats[i].nrequests;
- lstats[i].highruns += arena->stats.lstats[i].highruns;
- lstats[i].curruns += arena->stats.lstats[i].curruns;
+ if (config_debug) {
+ /* arena_ptr_small_binind_get() does extra sanity checking. */
+ assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
+ pageind)) != BININD_INVALID);
}
- malloc_mutex_unlock(&arena->lock);
+ mapelm = arena_mapp_get(chunk, pageind);
+ arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
+}
- for (i = 0; i < nbins; i++) {
- arena_bin_t *bin = &arena->bins[i];
+void
+arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
+{
- malloc_mutex_lock(&bin->lock);
- bstats[i].allocated += bin->stats.allocated;
- bstats[i].nmalloc += bin->stats.nmalloc;
- bstats[i].ndalloc += bin->stats.ndalloc;
- bstats[i].nrequests += bin->stats.nrequests;
-#ifdef JEMALLOC_TCACHE
- bstats[i].nfills += bin->stats.nfills;
- bstats[i].nflushes += bin->stats.nflushes;
-#endif
- bstats[i].nruns += bin->stats.nruns;
- bstats[i].reruns += bin->stats.reruns;
- bstats[i].highruns += bin->stats.highruns;
- bstats[i].curruns += bin->stats.curruns;
- malloc_mutex_unlock(&bin->lock);
+ if (config_fill || config_stats) {
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ size_t size = arena_mapbits_large_size_get(chunk, pageind);
+
+ if (config_fill && config_stats && opt_junk)
+ memset(ptr, 0x5a, size);
+ if (config_stats) {
+ arena->stats.ndalloc_large++;
+ arena->stats.allocated_large -= size;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--;
+ }
}
+
+ arena_run_dalloc(arena, (arena_run_t *)ptr, true, false);
}
-#endif
void
arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
{
- /* Large allocation. */
-#ifdef JEMALLOC_FILL
-# ifndef JEMALLOC_STATS
- if (opt_junk)
-# endif
-#endif
- {
-#if (defined(JEMALLOC_FILL) || defined(JEMALLOC_STATS))
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >>
- PAGE_SHIFT;
- size_t size = chunk->map[pageind-map_bias].bits & ~PAGE_MASK;
-#endif
-
-#ifdef JEMALLOC_FILL
-# ifdef JEMALLOC_STATS
- if (opt_junk)
-# endif
- memset(ptr, 0x5a, size);
-#endif
-#ifdef JEMALLOC_STATS
- arena->stats.ndalloc_large++;
- arena->stats.allocated_large -= size;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].ndalloc++;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns--;
-#endif
- }
-
- arena_run_dalloc(arena, (arena_run_t *)ptr, true);
+ malloc_mutex_lock(&arena->lock);
+ arena_dalloc_large_locked(arena, chunk, ptr);
+ malloc_mutex_unlock(&arena->lock);
}
static void
@@ -2009,24 +1793,19 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
malloc_mutex_lock(&arena->lock);
arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
true);
-#ifdef JEMALLOC_STATS
- arena->stats.ndalloc_large++;
- arena->stats.allocated_large -= oldsize;
- arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].ndalloc++;
- arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].curruns--;
-
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
- if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns;
- }
-#endif
+ if (config_stats) {
+ arena->stats.ndalloc_large++;
+ arena->stats.allocated_large -= oldsize;
+ arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
+ arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
+
+ arena->stats.nmalloc_large++;
+ arena->stats.nrequests_large++;
+ arena->stats.allocated_large += size;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
+ }
malloc_mutex_unlock(&arena->lock);
}
@@ -2034,20 +1813,19 @@ static bool
arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t oldsize, size_t size, size_t extra, bool zero)
{
- size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> PAGE_SHIFT;
- size_t npages = oldsize >> PAGE_SHIFT;
+ size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
+ size_t npages = oldsize >> LG_PAGE;
size_t followsize;
- assert(oldsize == (chunk->map[pageind-map_bias].bits & ~PAGE_MASK));
+ assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
/* Try to extend the run. */
assert(size + extra > oldsize);
malloc_mutex_lock(&arena->lock);
if (pageind + npages < chunk_npages &&
- (chunk->map[pageind+npages-map_bias].bits
- & CHUNK_MAP_ALLOCATED) == 0 && (followsize =
- chunk->map[pageind+npages-map_bias].bits & ~PAGE_MASK) >= size -
- oldsize) {
+ arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
+ (followsize = arena_mapbits_unallocated_size_get(chunk,
+ pageind+npages)) >= size - oldsize) {
/*
* The next run is available and sufficiently large. Split the
* following run, then merge the first part with the existing
@@ -2057,10 +1835,11 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t splitsize = (oldsize + followsize <= size + extra)
? followsize : size + extra - oldsize;
arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
- ((pageind+npages) << PAGE_SHIFT)), splitsize, true, zero);
+ ((pageind+npages) << LG_PAGE)), splitsize, true,
+ BININD_INVALID, zero);
size = oldsize + splitsize;
- npages = size >> PAGE_SHIFT;
+ npages = size >> LG_PAGE;
/*
* Mark the extended run as dirty if either portion of the run
@@ -2070,34 +1849,24 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
* arena_run_dalloc() with the dirty argument set to false
* (which is when dirty flag consistency would really matter).
*/
- flag_dirty = (chunk->map[pageind-map_bias].bits &
- CHUNK_MAP_DIRTY) |
- (chunk->map[pageind+npages-1-map_bias].bits &
- CHUNK_MAP_DIRTY);
- chunk->map[pageind-map_bias].bits = size | flag_dirty
- | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
- chunk->map[pageind+npages-1-map_bias].bits = flag_dirty |
- CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
-
-#ifdef JEMALLOC_STATS
- arena->stats.ndalloc_large++;
- arena->stats.allocated_large -= oldsize;
- arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].ndalloc++;
- arena->stats.lstats[(oldsize >> PAGE_SHIFT) - 1].curruns--;
-
- arena->stats.nmalloc_large++;
- arena->stats.nrequests_large++;
- arena->stats.allocated_large += size;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nmalloc++;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].nrequests++;
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns++;
- if (arena->stats.lstats[(size >> PAGE_SHIFT) - 1].curruns >
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns) {
- arena->stats.lstats[(size >> PAGE_SHIFT) - 1].highruns =
- arena->stats.lstats[(size >> PAGE_SHIFT) -
- 1].curruns;
+ flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
+ arena_mapbits_dirty_get(chunk, pageind+npages-1);
+ arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
+ arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
+
+ if (config_stats) {
+ arena->stats.ndalloc_large++;
+ arena->stats.allocated_large -= oldsize;
+ arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
+ arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
+
+ arena->stats.nmalloc_large++;
+ arena->stats.nrequests_large++;
+ arena->stats.allocated_large += size;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
+ arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
}
-#endif
malloc_mutex_unlock(&arena->lock);
return (false);
}
@@ -2119,12 +1888,10 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
psize = PAGE_CEILING(size + extra);
if (psize == oldsize) {
/* Same size class. */
-#ifdef JEMALLOC_FILL
- if (opt_junk && size < oldsize) {
+ if (config_fill && opt_junk && size < oldsize) {
memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize -
size);
}
-#endif
return (false);
} else {
arena_chunk_t *chunk;
@@ -2132,16 +1899,13 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
arena = chunk->arena;
- assert(arena->magic == ARENA_MAGIC);
if (psize < oldsize) {
-#ifdef JEMALLOC_FILL
/* Fill before shrinking in order avoid a race. */
- if (opt_junk) {
+ if (config_fill && opt_junk) {
memset((void *)((uintptr_t)ptr + size), 0x5a,
oldsize - size);
}
-#endif
arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
psize);
return (false);
@@ -2149,12 +1913,11 @@ arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
oldsize, PAGE_CEILING(size),
psize - PAGE_CEILING(size), zero);
-#ifdef JEMALLOC_FILL
- if (ret == false && zero == false && opt_zero) {
+ if (config_fill && ret == false && zero == false &&
+ opt_zero) {
memset((void *)((uintptr_t)ptr + oldsize), 0,
size - oldsize);
}
-#endif
return (ret);
}
}
@@ -2169,24 +1932,22 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
* Avoid moving the allocation if the size class can be left the same.
*/
if (oldsize <= arena_maxclass) {
- if (oldsize <= small_maxclass) {
- assert(choose_arena()->bins[small_size2bin[
- oldsize]].reg_size == oldsize);
- if ((size + extra <= small_maxclass &&
- small_size2bin[size + extra] ==
- small_size2bin[oldsize]) || (size <= oldsize &&
+ if (oldsize <= SMALL_MAXCLASS) {
+ assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size
+ == oldsize);
+ if ((size + extra <= SMALL_MAXCLASS &&
+ SMALL_SIZE2BIN(size + extra) ==
+ SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
size + extra >= oldsize)) {
-#ifdef JEMALLOC_FILL
- if (opt_junk && size < oldsize) {
+ if (config_fill && opt_junk && size < oldsize) {
memset((void *)((uintptr_t)ptr + size),
0x5a, oldsize - size);
}
-#endif
return (ptr);
}
} else {
assert(size <= arena_maxclass);
- if (size + extra > small_maxclass) {
+ if (size + extra > SMALL_MAXCLASS) {
if (arena_ralloc_large(ptr, oldsize, size,
extra, zero) == false)
return (ptr);
@@ -2199,8 +1960,9 @@ arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
}
void *
-arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero)
+arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
+ size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
+ bool try_tcache_dalloc)
{
void *ret;
size_t copysize;
@@ -2210,25 +1972,31 @@ arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
if (ret != NULL)
return (ret);
-
/*
* size and oldsize are different enough that we need to move the
* object. In that case, fall back to allocating new space and
* copying.
*/
- if (alignment != 0)
- ret = ipalloc(size + extra, alignment, zero);
- else
- ret = arena_malloc(size + extra, zero);
+ if (alignment != 0) {
+ size_t usize = sa2u(size + extra, alignment);
+ if (usize == 0)
+ return (NULL);
+ ret = ipallocx(usize, alignment, zero, try_tcache_alloc, arena);
+ } else
+ ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
if (ret == NULL) {
if (extra == 0)
return (NULL);
/* Try again, this time without extra. */
- if (alignment != 0)
- ret = ipalloc(size, alignment, zero);
- else
- ret = arena_malloc(size, zero);
+ if (alignment != 0) {
+ size_t usize = sa2u(size, alignment);
+ if (usize == 0)
+ return (NULL);
+ ret = ipallocx(usize, alignment, zero, try_tcache_alloc,
+ arena);
+ } else
+ ret = arena_malloc(arena, size, zero, try_tcache_alloc);
if (ret == NULL)
return (NULL);
@@ -2241,313 +2009,305 @@ arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
* expectation that the extra bytes will be reliably preserved.
*/
copysize = (size < oldsize) ? size : oldsize;
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
memcpy(ret, ptr, copysize);
- idalloc(ptr);
+ iqallocx(ptr, try_tcache_dalloc);
+ return (ret);
+}
+
+dss_prec_t
+arena_dss_prec_get(arena_t *arena)
+{
+ dss_prec_t ret;
+
+ malloc_mutex_lock(&arena->lock);
+ ret = arena->dss_prec;
+ malloc_mutex_unlock(&arena->lock);
return (ret);
}
+void
+arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
+{
+
+ malloc_mutex_lock(&arena->lock);
+ arena->dss_prec = dss_prec;
+ malloc_mutex_unlock(&arena->lock);
+}
+
+void
+arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
+ size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
+ malloc_large_stats_t *lstats)
+{
+ unsigned i;
+
+ malloc_mutex_lock(&arena->lock);
+ *dss = dss_prec_names[arena->dss_prec];
+ *nactive += arena->nactive;
+ *ndirty += arena->ndirty;
+
+ astats->mapped += arena->stats.mapped;
+ astats->npurge += arena->stats.npurge;
+ astats->nmadvise += arena->stats.nmadvise;
+ astats->purged += arena->stats.purged;
+ astats->allocated_large += arena->stats.allocated_large;
+ astats->nmalloc_large += arena->stats.nmalloc_large;
+ astats->ndalloc_large += arena->stats.ndalloc_large;
+ astats->nrequests_large += arena->stats.nrequests_large;
+
+ for (i = 0; i < nlclasses; i++) {
+ lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
+ lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
+ lstats[i].nrequests += arena->stats.lstats[i].nrequests;
+ lstats[i].curruns += arena->stats.lstats[i].curruns;
+ }
+ malloc_mutex_unlock(&arena->lock);
+
+ for (i = 0; i < NBINS; i++) {
+ arena_bin_t *bin = &arena->bins[i];
+
+ malloc_mutex_lock(&bin->lock);
+ bstats[i].allocated += bin->stats.allocated;
+ bstats[i].nmalloc += bin->stats.nmalloc;
+ bstats[i].ndalloc += bin->stats.ndalloc;
+ bstats[i].nrequests += bin->stats.nrequests;
+ if (config_tcache) {
+ bstats[i].nfills += bin->stats.nfills;
+ bstats[i].nflushes += bin->stats.nflushes;
+ }
+ bstats[i].nruns += bin->stats.nruns;
+ bstats[i].reruns += bin->stats.reruns;
+ bstats[i].curruns += bin->stats.curruns;
+ malloc_mutex_unlock(&bin->lock);
+ }
+}
+
bool
arena_new(arena_t *arena, unsigned ind)
{
unsigned i;
arena_bin_t *bin;
- size_t prev_run_size;
arena->ind = ind;
+ arena->nthreads = 0;
if (malloc_mutex_init(&arena->lock))
return (true);
-#ifdef JEMALLOC_STATS
- memset(&arena->stats, 0, sizeof(arena_stats_t));
- arena->stats.lstats = (malloc_large_stats_t *)base_alloc(nlclasses *
- sizeof(malloc_large_stats_t));
- if (arena->stats.lstats == NULL)
- return (true);
- memset(arena->stats.lstats, 0, nlclasses *
- sizeof(malloc_large_stats_t));
-# ifdef JEMALLOC_TCACHE
- ql_new(&arena->tcache_ql);
-# endif
-#endif
+ if (config_stats) {
+ memset(&arena->stats, 0, sizeof(arena_stats_t));
+ arena->stats.lstats =
+ (malloc_large_stats_t *)base_alloc(nlclasses *
+ sizeof(malloc_large_stats_t));
+ if (arena->stats.lstats == NULL)
+ return (true);
+ memset(arena->stats.lstats, 0, nlclasses *
+ sizeof(malloc_large_stats_t));
+ if (config_tcache)
+ ql_new(&arena->tcache_ql);
+ }
-#ifdef JEMALLOC_PROF
- arena->prof_accumbytes = 0;
-#endif
+ if (config_prof)
+ arena->prof_accumbytes = 0;
+
+ arena->dss_prec = chunk_dss_prec_get();
/* Initialize chunks. */
- ql_new(&arena->chunks_dirty);
+ arena_chunk_dirty_new(&arena->chunks_dirty);
arena->spare = NULL;
arena->nactive = 0;
arena->ndirty = 0;
arena->npurgatory = 0;
- arena_avail_tree_new(&arena->runs_avail_clean);
- arena_avail_tree_new(&arena->runs_avail_dirty);
+ arena_avail_tree_new(&arena->runs_avail);
/* Initialize bins. */
- prev_run_size = PAGE_SIZE;
-
- i = 0;
-#ifdef JEMALLOC_TINY
- /* (2^n)-spaced tiny bins. */
- for (; i < ntbins; i++) {
+ for (i = 0; i < NBINS; i++) {
bin = &arena->bins[i];
if (malloc_mutex_init(&bin->lock))
return (true);
bin->runcur = NULL;
arena_run_tree_new(&bin->runs);
-
- bin->reg_size = (1U << (LG_TINY_MIN + i));
-
- prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
-
-#ifdef JEMALLOC_STATS
- memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-#endif
+ if (config_stats)
+ memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
}
-#endif
- /* Quantum-spaced bins. */
- for (; i < ntbins + nqbins; i++) {
- bin = &arena->bins[i];
- if (malloc_mutex_init(&bin->lock))
- return (true);
- bin->runcur = NULL;
- arena_run_tree_new(&bin->runs);
+ return (false);
+}
- bin->reg_size = (i - ntbins + 1) << LG_QUANTUM;
+/*
+ * Calculate bin_info->run_size such that it meets the following constraints:
+ *
+ * *) bin_info->run_size >= min_run_size
+ * *) bin_info->run_size <= arena_maxclass
+ * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
+ * *) bin_info->nregs <= RUN_MAXREGS
+ *
+ * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
+ * calculated here, since these settings are all interdependent.
+ */
+static size_t
+bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
+{
+ size_t pad_size;
+ size_t try_run_size, good_run_size;
+ uint32_t try_nregs, good_nregs;
+ uint32_t try_hdr_size, good_hdr_size;
+ uint32_t try_bitmap_offset, good_bitmap_offset;
+ uint32_t try_ctx0_offset, good_ctx0_offset;
+ uint32_t try_redzone0_offset, good_redzone0_offset;
- prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
+ assert(min_run_size >= PAGE);
+ assert(min_run_size <= arena_maxclass);
-#ifdef JEMALLOC_STATS
- memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-#endif
+ /*
+ * Determine redzone size based on minimum alignment and minimum
+ * redzone size. Add padding to the end of the run if it is needed to
+ * align the regions. The padding allows each redzone to be half the
+ * minimum alignment; without the padding, each redzone would have to
+ * be twice as large in order to maintain alignment.
+ */
+ if (config_fill && opt_redzone) {
+ size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1);
+ if (align_min <= REDZONE_MINSIZE) {
+ bin_info->redzone_size = REDZONE_MINSIZE;
+ pad_size = 0;
+ } else {
+ bin_info->redzone_size = align_min >> 1;
+ pad_size = bin_info->redzone_size;
+ }
+ } else {
+ bin_info->redzone_size = 0;
+ pad_size = 0;
}
+ bin_info->reg_interval = bin_info->reg_size +
+ (bin_info->redzone_size << 1);
- /* Cacheline-spaced bins. */
- for (; i < ntbins + nqbins + ncbins; i++) {
- bin = &arena->bins[i];
- if (malloc_mutex_init(&bin->lock))
- return (true);
- bin->runcur = NULL;
- arena_run_tree_new(&bin->runs);
-
- bin->reg_size = cspace_min + ((i - (ntbins + nqbins)) <<
- LG_CACHELINE);
-
- prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
-
-#ifdef JEMALLOC_STATS
- memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-#endif
+ /*
+ * Calculate known-valid settings before entering the run_size
+ * expansion loop, so that the first part of the loop always copies
+ * valid settings.
+ *
+ * The do..while loop iteratively reduces the number of regions until
+ * the run header and the regions no longer overlap. A closed formula
+ * would be quite messy, since there is an interdependency between the
+ * header's mask length and the number of regions.
+ */
+ try_run_size = min_run_size;
+ try_nregs = ((try_run_size - sizeof(arena_run_t)) /
+ bin_info->reg_interval)
+ + 1; /* Counter-act try_nregs-- in loop. */
+ if (try_nregs > RUN_MAXREGS) {
+ try_nregs = RUN_MAXREGS
+ + 1; /* Counter-act try_nregs-- in loop. */
}
+ do {
+ try_nregs--;
+ try_hdr_size = sizeof(arena_run_t);
+ /* Pad to a long boundary. */
+ try_hdr_size = LONG_CEILING(try_hdr_size);
+ try_bitmap_offset = try_hdr_size;
+ /* Add space for bitmap. */
+ try_hdr_size += bitmap_size(try_nregs);
+ if (config_prof && opt_prof && prof_promote == false) {
+ /* Pad to a quantum boundary. */
+ try_hdr_size = QUANTUM_CEILING(try_hdr_size);
+ try_ctx0_offset = try_hdr_size;
+ /* Add space for one (prof_ctx_t *) per region. */
+ try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
+ } else
+ try_ctx0_offset = 0;
+ try_redzone0_offset = try_run_size - (try_nregs *
+ bin_info->reg_interval) - pad_size;
+ } while (try_hdr_size > try_redzone0_offset);
- /* Subpage-spaced bins. */
- for (; i < nbins; i++) {
- bin = &arena->bins[i];
- if (malloc_mutex_init(&bin->lock))
- return (true);
- bin->runcur = NULL;
- arena_run_tree_new(&bin->runs);
+ /* run_size expansion loop. */
+ do {
+ /*
+ * Copy valid settings before trying more aggressive settings.
+ */
+ good_run_size = try_run_size;
+ good_nregs = try_nregs;
+ good_hdr_size = try_hdr_size;
+ good_bitmap_offset = try_bitmap_offset;
+ good_ctx0_offset = try_ctx0_offset;
+ good_redzone0_offset = try_redzone0_offset;
- bin->reg_size = sspace_min + ((i - (ntbins + nqbins + ncbins))
- << LG_SUBPAGE);
+ /* Try more aggressive settings. */
+ try_run_size += PAGE;
+ try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
+ bin_info->reg_interval)
+ + 1; /* Counter-act try_nregs-- in loop. */
+ if (try_nregs > RUN_MAXREGS) {
+ try_nregs = RUN_MAXREGS
+ + 1; /* Counter-act try_nregs-- in loop. */
+ }
+ do {
+ try_nregs--;
+ try_hdr_size = sizeof(arena_run_t);
+ /* Pad to a long boundary. */
+ try_hdr_size = LONG_CEILING(try_hdr_size);
+ try_bitmap_offset = try_hdr_size;
+ /* Add space for bitmap. */
+ try_hdr_size += bitmap_size(try_nregs);
+ if (config_prof && opt_prof && prof_promote == false) {
+ /* Pad to a quantum boundary. */
+ try_hdr_size = QUANTUM_CEILING(try_hdr_size);
+ try_ctx0_offset = try_hdr_size;
+ /*
+ * Add space for one (prof_ctx_t *) per region.
+ */
+ try_hdr_size += try_nregs *
+ sizeof(prof_ctx_t *);
+ }
+ try_redzone0_offset = try_run_size - (try_nregs *
+ bin_info->reg_interval) - pad_size;
+ } while (try_hdr_size > try_redzone0_offset);
+ } while (try_run_size <= arena_maxclass
+ && try_run_size <= arena_maxclass
+ && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
+ RUN_MAX_OVRHD_RELAX
+ && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
+ && try_nregs < RUN_MAXREGS);
- prev_run_size = arena_bin_run_size_calc(bin, prev_run_size);
+ assert(good_hdr_size <= good_redzone0_offset);
-#ifdef JEMALLOC_STATS
- memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
-#endif
- }
+ /* Copy final settings. */
+ bin_info->run_size = good_run_size;
+ bin_info->nregs = good_nregs;
+ bin_info->bitmap_offset = good_bitmap_offset;
+ bin_info->ctx0_offset = good_ctx0_offset;
+ bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
-#ifdef JEMALLOC_DEBUG
- arena->magic = ARENA_MAGIC;
-#endif
+ assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
+ * bin_info->reg_interval) + pad_size == bin_info->run_size);
- return (false);
+ return (good_run_size);
}
-#ifdef JEMALLOC_DEBUG
static void
-small_size2bin_validate(void)
+bin_info_init(void)
{
- size_t i, size, binind;
-
- assert(small_size2bin[0] == 0xffU);
- i = 1;
-# ifdef JEMALLOC_TINY
- /* Tiny. */
- for (; i < (1U << LG_TINY_MIN); i++) {
- size = pow2_ceil(1U << LG_TINY_MIN);
- binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
- assert(small_size2bin[i] == binind);
- }
- for (; i < qspace_min; i++) {
- size = pow2_ceil(i);
- binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
- assert(small_size2bin[i] == binind);
- }
-# endif
- /* Quantum-spaced. */
- for (; i <= qspace_max; i++) {
- size = QUANTUM_CEILING(i);
- binind = ntbins + (size >> LG_QUANTUM) - 1;
- assert(small_size2bin[i] == binind);
- }
- /* Cacheline-spaced. */
- for (; i <= cspace_max; i++) {
- size = CACHELINE_CEILING(i);
- binind = ntbins + nqbins + ((size - cspace_min) >>
- LG_CACHELINE);
- assert(small_size2bin[i] == binind);
- }
- /* Sub-page. */
- for (; i <= sspace_max; i++) {
- size = SUBPAGE_CEILING(i);
- binind = ntbins + nqbins + ncbins + ((size - sspace_min)
- >> LG_SUBPAGE);
- assert(small_size2bin[i] == binind);
- }
+ arena_bin_info_t *bin_info;
+ size_t prev_run_size = PAGE;
+
+#define SIZE_CLASS(bin, delta, size) \
+ bin_info = &arena_bin_info[bin]; \
+ bin_info->reg_size = size; \
+ prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
+ bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
+ SIZE_CLASSES
+#undef SIZE_CLASS
}
-#endif
-static bool
-small_size2bin_init(void)
-{
-
- if (opt_lg_qspace_max != LG_QSPACE_MAX_DEFAULT
- || opt_lg_cspace_max != LG_CSPACE_MAX_DEFAULT
- || sizeof(const_small_size2bin) != small_maxclass + 1)
- return (small_size2bin_init_hard());
-
- small_size2bin = const_small_size2bin;
-#ifdef JEMALLOC_DEBUG
- assert(sizeof(const_small_size2bin) == small_maxclass + 1);
- small_size2bin_validate();
-#endif
- return (false);
-}
-
-static bool
-small_size2bin_init_hard(void)
-{
- size_t i, size, binind;
- uint8_t *custom_small_size2bin;
-
- assert(opt_lg_qspace_max != LG_QSPACE_MAX_DEFAULT
- || opt_lg_cspace_max != LG_CSPACE_MAX_DEFAULT
- || sizeof(const_small_size2bin) != small_maxclass + 1);
-
- custom_small_size2bin = (uint8_t *)base_alloc(small_maxclass + 1);
- if (custom_small_size2bin == NULL)
- return (true);
-
- custom_small_size2bin[0] = 0xffU;
- i = 1;
-#ifdef JEMALLOC_TINY
- /* Tiny. */
- for (; i < (1U << LG_TINY_MIN); i++) {
- size = pow2_ceil(1U << LG_TINY_MIN);
- binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
- custom_small_size2bin[i] = binind;
- }
- for (; i < qspace_min; i++) {
- size = pow2_ceil(i);
- binind = ffs((int)(size >> (LG_TINY_MIN + 1)));
- custom_small_size2bin[i] = binind;
- }
-#endif
- /* Quantum-spaced. */
- for (; i <= qspace_max; i++) {
- size = QUANTUM_CEILING(i);
- binind = ntbins + (size >> LG_QUANTUM) - 1;
- custom_small_size2bin[i] = binind;
- }
- /* Cacheline-spaced. */
- for (; i <= cspace_max; i++) {
- size = CACHELINE_CEILING(i);
- binind = ntbins + nqbins + ((size - cspace_min) >>
- LG_CACHELINE);
- custom_small_size2bin[i] = binind;
- }
- /* Sub-page. */
- for (; i <= sspace_max; i++) {
- size = SUBPAGE_CEILING(i);
- binind = ntbins + nqbins + ncbins + ((size - sspace_min) >>
- LG_SUBPAGE);
- custom_small_size2bin[i] = binind;
- }
-
- small_size2bin = custom_small_size2bin;
-#ifdef JEMALLOC_DEBUG
- small_size2bin_validate();
-#endif
- return (false);
-}
-
-bool
+void
arena_boot(void)
{
size_t header_size;
unsigned i;
- /* Set variables according to the value of opt_lg_[qc]space_max. */
- qspace_max = (1U << opt_lg_qspace_max);
- cspace_min = CACHELINE_CEILING(qspace_max);
- if (cspace_min == qspace_max)
- cspace_min += CACHELINE;
- cspace_max = (1U << opt_lg_cspace_max);
- sspace_min = SUBPAGE_CEILING(cspace_max);
- if (sspace_min == cspace_max)
- sspace_min += SUBPAGE;
- assert(sspace_min < PAGE_SIZE);
- sspace_max = PAGE_SIZE - SUBPAGE;
-
-#ifdef JEMALLOC_TINY
- assert(LG_QUANTUM >= LG_TINY_MIN);
-#endif
- assert(ntbins <= LG_QUANTUM);
- nqbins = qspace_max >> LG_QUANTUM;
- ncbins = ((cspace_max - cspace_min) >> LG_CACHELINE) + 1;
- nsbins = ((sspace_max - sspace_min) >> LG_SUBPAGE) + 1;
- nbins = ntbins + nqbins + ncbins + nsbins;
-
- /*
- * The small_size2bin lookup table uses uint8_t to encode each bin
- * index, so we cannot support more than 256 small size classes. This
- * limit is difficult to exceed (not even possible with 16B quantum and
- * 4KiB pages), and such configurations are impractical, but
- * nonetheless we need to protect against this case in order to avoid
- * undefined behavior.
- *
- * Further constrain nbins to 255 if prof_promote is true, since all
- * small size classes, plus a "not small" size class must be stored in
- * 8 bits of arena_chunk_map_t's bits field.
- */
-#ifdef JEMALLOC_PROF
- if (opt_prof && prof_promote) {
- if (nbins > 255) {
- char line_buf[UMAX2S_BUFSIZE];
- malloc_write("<jemalloc>: Too many small size classes (");
- malloc_write(u2s(nbins, 10, line_buf));
- malloc_write(" > max 255)\n");
- abort();
- }
- } else
-#endif
- if (nbins > 256) {
- char line_buf[UMAX2S_BUFSIZE];
- malloc_write("<jemalloc>: Too many small size classes (");
- malloc_write(u2s(nbins, 10, line_buf));
- malloc_write(" > max 256)\n");
- abort();
- }
-
- if (small_size2bin_init())
- return (true);
-
/*
* Compute the header size such that it is large enough to contain the
* page map. The page map is biased to omit entries for the header
@@ -2562,14 +2322,44 @@ arena_boot(void)
*/
map_bias = 0;
for (i = 0; i < 3; i++) {
- header_size = offsetof(arena_chunk_t, map)
- + (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
- map_bias = (header_size >> PAGE_SHIFT) + ((header_size &
- PAGE_MASK) != 0);
+ header_size = offsetof(arena_chunk_t, map) +
+ (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
+ map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
+ != 0);
}
assert(map_bias > 0);
- arena_maxclass = chunksize - (map_bias << PAGE_SHIFT);
+ arena_maxclass = chunksize - (map_bias << LG_PAGE);
- return (false);
+ bin_info_init();
+}
+
+void
+arena_prefork(arena_t *arena)
+{
+ unsigned i;
+
+ malloc_mutex_prefork(&arena->lock);
+ for (i = 0; i < NBINS; i++)
+ malloc_mutex_prefork(&arena->bins[i].lock);
+}
+
+void
+arena_postfork_parent(arena_t *arena)
+{
+ unsigned i;
+
+ for (i = 0; i < NBINS; i++)
+ malloc_mutex_postfork_parent(&arena->bins[i].lock);
+ malloc_mutex_postfork_parent(&arena->lock);
+}
+
+void
+arena_postfork_child(arena_t *arena)
+{
+ unsigned i;
+
+ for (i = 0; i < NBINS; i++)
+ malloc_mutex_postfork_child(&arena->bins[i].lock);
+ malloc_mutex_postfork_child(&arena->lock);
}
diff --git a/dep/jemalloc/src/atomic.c b/dep/jemalloc/src/atomic.c
new file mode 100644
index 00000000000..77ee313113b
--- /dev/null
+++ b/dep/jemalloc/src/atomic.c
@@ -0,0 +1,2 @@
+#define JEMALLOC_ATOMIC_C_
+#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/dep/jemalloc/src/base.c b/dep/jemalloc/src/base.c
index cc85e8494ec..4e62e8fa918 100644
--- a/dep/jemalloc/src/base.c
+++ b/dep/jemalloc/src/base.c
@@ -4,7 +4,7 @@
/******************************************************************************/
/* Data. */
-malloc_mutex_t base_mtx;
+static malloc_mutex_t base_mtx;
/*
* Current pages that are being used for internal memory allocations. These
@@ -32,7 +32,8 @@ base_pages_alloc(size_t minsize)
assert(minsize != 0);
csize = CHUNK_CEILING(minsize);
zero = false;
- base_pages = chunk_alloc(csize, true, &zero);
+ base_pages = chunk_alloc(csize, chunksize, true, &zero,
+ chunk_dss_prec_get());
if (base_pages == NULL)
return (true);
base_next_addr = base_pages;
@@ -62,6 +63,18 @@ base_alloc(size_t size)
ret = base_next_addr;
base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
malloc_mutex_unlock(&base_mtx);
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
+
+ return (ret);
+}
+
+void *
+base_calloc(size_t number, size_t size)
+{
+ void *ret = base_alloc(number * size);
+
+ if (ret != NULL)
+ memset(ret, 0, number * size);
return (ret);
}
@@ -76,6 +89,7 @@ base_node_alloc(void)
ret = base_nodes;
base_nodes = *(extent_node_t **)ret;
malloc_mutex_unlock(&base_mtx);
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, sizeof(extent_node_t));
} else {
malloc_mutex_unlock(&base_mtx);
ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
@@ -88,6 +102,7 @@ void
base_node_dealloc(extent_node_t *node)
{
+ VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
malloc_mutex_lock(&base_mtx);
*(extent_node_t **)node = base_nodes;
base_nodes = node;
@@ -104,3 +119,24 @@ base_boot(void)
return (false);
}
+
+void
+base_prefork(void)
+{
+
+ malloc_mutex_prefork(&base_mtx);
+}
+
+void
+base_postfork_parent(void)
+{
+
+ malloc_mutex_postfork_parent(&base_mtx);
+}
+
+void
+base_postfork_child(void)
+{
+
+ malloc_mutex_postfork_child(&base_mtx);
+}
diff --git a/dep/jemalloc/src/bitmap.c b/dep/jemalloc/src/bitmap.c
new file mode 100644
index 00000000000..b47e2629093
--- /dev/null
+++ b/dep/jemalloc/src/bitmap.c
@@ -0,0 +1,90 @@
+#define JEMALLOC_BITMAP_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static size_t bits2groups(size_t nbits);
+
+/******************************************************************************/
+
+static size_t
+bits2groups(size_t nbits)
+{
+
+ return ((nbits >> LG_BITMAP_GROUP_NBITS) +
+ !!(nbits & BITMAP_GROUP_NBITS_MASK));
+}
+
+void
+bitmap_info_init(bitmap_info_t *binfo, size_t nbits)
+{
+ unsigned i;
+ size_t group_count;
+
+ assert(nbits > 0);
+ assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS));
+
+ /*
+ * Compute the number of groups necessary to store nbits bits, and
+ * progressively work upward through the levels until reaching a level
+ * that requires only one group.
+ */
+ binfo->levels[0].group_offset = 0;
+ group_count = bits2groups(nbits);
+ for (i = 1; group_count > 1; i++) {
+ assert(i < BITMAP_MAX_LEVELS);
+ binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ + group_count;
+ group_count = bits2groups(group_count);
+ }
+ binfo->levels[i].group_offset = binfo->levels[i-1].group_offset
+ + group_count;
+ binfo->nlevels = i;
+ binfo->nbits = nbits;
+}
+
+size_t
+bitmap_info_ngroups(const bitmap_info_t *binfo)
+{
+
+ return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP);
+}
+
+size_t
+bitmap_size(size_t nbits)
+{
+ bitmap_info_t binfo;
+
+ bitmap_info_init(&binfo, nbits);
+ return (bitmap_info_ngroups(&binfo));
+}
+
+void
+bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo)
+{
+ size_t extra;
+ unsigned i;
+
+ /*
+ * Bits are actually inverted with regard to the external bitmap
+ * interface, so the bitmap starts out with all 1 bits, except for
+ * trailing unused bits (if any). Note that each group uses bit 0 to
+ * correspond to the first logical bit in the group, so extra bits
+ * are the most significant bits of the last group.
+ */
+ memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset <<
+ LG_SIZEOF_BITMAP);
+ extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK))
+ & BITMAP_GROUP_NBITS_MASK;
+ if (extra != 0)
+ bitmap[binfo->levels[1].group_offset - 1] >>= extra;
+ for (i = 1; i < binfo->nlevels; i++) {
+ size_t group_count = binfo->levels[i].group_offset -
+ binfo->levels[i-1].group_offset;
+ extra = (BITMAP_GROUP_NBITS - (group_count &
+ BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK;
+ if (extra != 0)
+ bitmap[binfo->levels[i+1].group_offset - 1] >>= extra;
+ }
+}
diff --git a/dep/jemalloc/src/chunk.c b/dep/jemalloc/src/chunk.c
index 301519e8042..044f76be96c 100644
--- a/dep/jemalloc/src/chunk.c
+++ b/dep/jemalloc/src/chunk.c
@@ -4,19 +4,24 @@
/******************************************************************************/
/* Data. */
-size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
-#ifdef JEMALLOC_SWAP
-bool opt_overcommit = true;
-#endif
+const char *opt_dss = DSS_DEFAULT;
+size_t opt_lg_chunk = LG_CHUNK_DEFAULT;
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
malloc_mutex_t chunks_mtx;
chunk_stats_t stats_chunks;
-#endif
-#ifdef JEMALLOC_IVSALLOC
+/*
+ * Trees of chunks that were previously allocated (trees differ only in node
+ * ordering). These are used when allocating chunks, in an attempt to re-use
+ * address space. Depending on function, different tree orderings are needed,
+ * which is why there are two trees with the same contents.
+ */
+static extent_tree_t chunks_szad_mmap;
+static extent_tree_t chunks_ad_mmap;
+static extent_tree_t chunks_szad_dss;
+static extent_tree_t chunks_ad_dss;
+
rtree_t *chunks_rtree;
-#endif
/* Various chunk-related settings. */
size_t chunksize;
@@ -26,6 +31,109 @@ size_t map_bias;
size_t arena_maxclass; /* Max size class for arenas. */
/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static void *chunk_recycle(extent_tree_t *chunks_szad,
+ extent_tree_t *chunks_ad, size_t size, size_t alignment, bool base,
+ bool *zero);
+static void chunk_record(extent_tree_t *chunks_szad,
+ extent_tree_t *chunks_ad, void *chunk, size_t size);
+
+/******************************************************************************/
+
+static void *
+chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
+ size_t alignment, bool base, bool *zero)
+{
+ void *ret;
+ extent_node_t *node;
+ extent_node_t key;
+ size_t alloc_size, leadsize, trailsize;
+ bool zeroed;
+
+ if (base) {
+ /*
+ * This function may need to call base_node_{,de}alloc(), but
+ * the current chunk allocation request is on behalf of the
+ * base allocator. Avoid deadlock (and if that weren't an
+ * issue, potential for infinite recursion) by returning NULL.
+ */
+ return (NULL);
+ }
+
+ alloc_size = size + alignment - chunksize;
+ /* Beware size_t wrap-around. */
+ if (alloc_size < size)
+ return (NULL);
+ key.addr = NULL;
+ key.size = alloc_size;
+ malloc_mutex_lock(&chunks_mtx);
+ node = extent_tree_szad_nsearch(chunks_szad, &key);
+ if (node == NULL) {
+ malloc_mutex_unlock(&chunks_mtx);
+ return (NULL);
+ }
+ leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
+ (uintptr_t)node->addr;
+ assert(node->size >= leadsize + size);
+ trailsize = node->size - leadsize - size;
+ ret = (void *)((uintptr_t)node->addr + leadsize);
+ zeroed = node->zeroed;
+ if (zeroed)
+ *zero = true;
+ /* Remove node from the tree. */
+ extent_tree_szad_remove(chunks_szad, node);
+ extent_tree_ad_remove(chunks_ad, node);
+ if (leadsize != 0) {
+ /* Insert the leading space as a smaller chunk. */
+ node->size = leadsize;
+ extent_tree_szad_insert(chunks_szad, node);
+ extent_tree_ad_insert(chunks_ad, node);
+ node = NULL;
+ }
+ if (trailsize != 0) {
+ /* Insert the trailing space as a smaller chunk. */
+ if (node == NULL) {
+ /*
+ * An additional node is required, but
+ * base_node_alloc() can cause a new base chunk to be
+ * allocated. Drop chunks_mtx in order to avoid
+ * deadlock, and if node allocation fails, deallocate
+ * the result before returning an error.
+ */
+ malloc_mutex_unlock(&chunks_mtx);
+ node = base_node_alloc();
+ if (node == NULL) {
+ chunk_dealloc(ret, size, true);
+ return (NULL);
+ }
+ malloc_mutex_lock(&chunks_mtx);
+ }
+ node->addr = (void *)((uintptr_t)(ret) + size);
+ node->size = trailsize;
+ node->zeroed = zeroed;
+ extent_tree_szad_insert(chunks_szad, node);
+ extent_tree_ad_insert(chunks_ad, node);
+ node = NULL;
+ }
+ malloc_mutex_unlock(&chunks_mtx);
+
+ if (node != NULL)
+ base_node_dealloc(node);
+ if (*zero) {
+ if (zeroed == false)
+ memset(ret, 0, size);
+ else if (config_debug) {
+ size_t i;
+ size_t *p = (size_t *)(uintptr_t)ret;
+
+ VALGRIND_MAKE_MEM_DEFINED(ret, size);
+ for (i = 0; i < size / sizeof(size_t); i++)
+ assert(p[i] == 0);
+ }
+ }
+ return (ret);
+}
/*
* If the caller specifies (*zero == false), it is still possible to receive
@@ -34,81 +142,167 @@ size_t arena_maxclass; /* Max size class for arenas. */
* advantage of them if they are returned.
*/
void *
-chunk_alloc(size_t size, bool base, bool *zero)
+chunk_alloc(size_t size, size_t alignment, bool base, bool *zero,
+ dss_prec_t dss_prec)
{
void *ret;
assert(size != 0);
assert((size & chunksize_mask) == 0);
+ assert(alignment != 0);
+ assert((alignment & chunksize_mask) == 0);
-#ifdef JEMALLOC_SWAP
- if (swap_enabled) {
- ret = chunk_alloc_swap(size, zero);
- if (ret != NULL)
- goto RETURN;
+ /* "primary" dss. */
+ if (config_dss && dss_prec == dss_prec_primary) {
+ if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
+ alignment, base, zero)) != NULL)
+ goto label_return;
+ if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
+ goto label_return;
}
-
- if (swap_enabled == false || opt_overcommit) {
-#endif
-#ifdef JEMALLOC_DSS
- ret = chunk_alloc_dss(size, zero);
- if (ret != NULL)
- goto RETURN;
-#endif
- ret = chunk_alloc_mmap(size);
- if (ret != NULL) {
- *zero = true;
- goto RETURN;
- }
-#ifdef JEMALLOC_SWAP
+ /* mmap. */
+ if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size,
+ alignment, base, zero)) != NULL)
+ goto label_return;
+ if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
+ goto label_return;
+ /* "secondary" dss. */
+ if (config_dss && dss_prec == dss_prec_secondary) {
+ if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
+ alignment, base, zero)) != NULL)
+ goto label_return;
+ if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
+ goto label_return;
}
-#endif
/* All strategies for allocation failed. */
ret = NULL;
-RETURN:
-#ifdef JEMALLOC_IVSALLOC
- if (base == false && ret != NULL) {
- if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
- chunk_dealloc(ret, size);
- return (NULL);
- }
- }
-#endif
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
+label_return:
if (ret != NULL) {
-# ifdef JEMALLOC_PROF
- bool gdump;
-# endif
- malloc_mutex_lock(&chunks_mtx);
-# ifdef JEMALLOC_STATS
- stats_chunks.nchunks += (size / chunksize);
-# endif
- stats_chunks.curchunks += (size / chunksize);
- if (stats_chunks.curchunks > stats_chunks.highchunks) {
- stats_chunks.highchunks = stats_chunks.curchunks;
-# ifdef JEMALLOC_PROF
- gdump = true;
-# endif
+ if (config_ivsalloc && base == false) {
+ if (rtree_set(chunks_rtree, (uintptr_t)ret, ret)) {
+ chunk_dealloc(ret, size, true);
+ return (NULL);
+ }
}
-# ifdef JEMALLOC_PROF
- else
- gdump = false;
-# endif
- malloc_mutex_unlock(&chunks_mtx);
-# ifdef JEMALLOC_PROF
- if (opt_prof && opt_prof_gdump && gdump)
- prof_gdump();
-# endif
+ if (config_stats || config_prof) {
+ bool gdump;
+ malloc_mutex_lock(&chunks_mtx);
+ if (config_stats)
+ stats_chunks.nchunks += (size / chunksize);
+ stats_chunks.curchunks += (size / chunksize);
+ if (stats_chunks.curchunks > stats_chunks.highchunks) {
+ stats_chunks.highchunks =
+ stats_chunks.curchunks;
+ if (config_prof)
+ gdump = true;
+ } else if (config_prof)
+ gdump = false;
+ malloc_mutex_unlock(&chunks_mtx);
+ if (config_prof && opt_prof && opt_prof_gdump && gdump)
+ prof_gdump();
+ }
+ if (config_valgrind)
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
}
-#endif
-
assert(CHUNK_ADDR2BASE(ret) == ret);
return (ret);
}
+static void
+chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
+ size_t size)
+{
+ bool unzeroed;
+ extent_node_t *xnode, *node, *prev, key;
+
+ unzeroed = pages_purge(chunk, size);
+ VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
+
+ /*
+ * Allocate a node before acquiring chunks_mtx even though it might not
+ * be needed, because base_node_alloc() may cause a new base chunk to
+ * be allocated, which could cause deadlock if chunks_mtx were already
+ * held.
+ */
+ xnode = base_node_alloc();
+
+ malloc_mutex_lock(&chunks_mtx);
+ key.addr = (void *)((uintptr_t)chunk + size);
+ node = extent_tree_ad_nsearch(chunks_ad, &key);
+ /* Try to coalesce forward. */
+ if (node != NULL && node->addr == key.addr) {
+ /*
+ * Coalesce chunk with the following address range. This does
+ * not change the position within chunks_ad, so only
+ * remove/insert from/into chunks_szad.
+ */
+ extent_tree_szad_remove(chunks_szad, node);
+ node->addr = chunk;
+ node->size += size;
+ node->zeroed = (node->zeroed && (unzeroed == false));
+ extent_tree_szad_insert(chunks_szad, node);
+ if (xnode != NULL)
+ base_node_dealloc(xnode);
+ } else {
+ /* Coalescing forward failed, so insert a new node. */
+ if (xnode == NULL) {
+ /*
+ * base_node_alloc() failed, which is an exceedingly
+ * unlikely failure. Leak chunk; its pages have
+ * already been purged, so this is only a virtual
+ * memory leak.
+ */
+ malloc_mutex_unlock(&chunks_mtx);
+ return;
+ }
+ node = xnode;
+ node->addr = chunk;
+ node->size = size;
+ node->zeroed = (unzeroed == false);
+ extent_tree_ad_insert(chunks_ad, node);
+ extent_tree_szad_insert(chunks_szad, node);
+ }
+
+ /* Try to coalesce backward. */
+ prev = extent_tree_ad_prev(chunks_ad, node);
+ if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
+ chunk) {
+ /*
+ * Coalesce chunk with the previous address range. This does
+ * not change the position within chunks_ad, so only
+ * remove/insert node from/into chunks_szad.
+ */
+ extent_tree_szad_remove(chunks_szad, prev);
+ extent_tree_ad_remove(chunks_ad, prev);
+
+ extent_tree_szad_remove(chunks_szad, node);
+ node->addr = prev->addr;
+ node->size += prev->size;
+ node->zeroed = (node->zeroed && prev->zeroed);
+ extent_tree_szad_insert(chunks_szad, node);
+
+ base_node_dealloc(prev);
+ }
+ malloc_mutex_unlock(&chunks_mtx);
+}
+
void
-chunk_dealloc(void *chunk, size_t size)
+chunk_unmap(void *chunk, size_t size)
+{
+ assert(chunk != NULL);
+ assert(CHUNK_ADDR2BASE(chunk) == chunk);
+ assert(size != 0);
+ assert((size & chunksize_mask) == 0);
+
+ if (config_dss && chunk_in_dss(chunk))
+ chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
+ else if (chunk_dealloc_mmap(chunk, size))
+ chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
+}
+
+void
+chunk_dealloc(void *chunk, size_t size, bool unmap)
{
assert(chunk != NULL);
@@ -116,24 +310,17 @@ chunk_dealloc(void *chunk, size_t size)
assert(size != 0);
assert((size & chunksize_mask) == 0);
-#ifdef JEMALLOC_IVSALLOC
- rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
-#endif
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
- malloc_mutex_lock(&chunks_mtx);
- stats_chunks.curchunks -= (size / chunksize);
- malloc_mutex_unlock(&chunks_mtx);
-#endif
-
-#ifdef JEMALLOC_SWAP
- if (swap_enabled && chunk_dealloc_swap(chunk, size) == false)
- return;
-#endif
-#ifdef JEMALLOC_DSS
- if (chunk_dealloc_dss(chunk, size) == false)
- return;
-#endif
- chunk_dealloc_mmap(chunk, size);
+ if (config_ivsalloc)
+ rtree_set(chunks_rtree, (uintptr_t)chunk, NULL);
+ if (config_stats || config_prof) {
+ malloc_mutex_lock(&chunks_mtx);
+ assert(stats_chunks.curchunks >= (size / chunksize));
+ stats_chunks.curchunks -= (size / chunksize);
+ malloc_mutex_unlock(&chunks_mtx);
+ }
+
+ if (unmap)
+ chunk_unmap(chunk, size);
}
bool
@@ -142,30 +329,57 @@ chunk_boot(void)
/* Set variables according to the value of opt_lg_chunk. */
chunksize = (ZU(1) << opt_lg_chunk);
- assert(chunksize >= PAGE_SIZE);
+ assert(chunksize >= PAGE);
chunksize_mask = chunksize - 1;
- chunk_npages = (chunksize >> PAGE_SHIFT);
+ chunk_npages = (chunksize >> LG_PAGE);
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
- if (malloc_mutex_init(&chunks_mtx))
- return (true);
- memset(&stats_chunks, 0, sizeof(chunk_stats_t));
-#endif
-#ifdef JEMALLOC_SWAP
- if (chunk_swap_boot())
- return (true);
-#endif
- if (chunk_mmap_boot())
- return (true);
-#ifdef JEMALLOC_DSS
- if (chunk_dss_boot())
- return (true);
-#endif
-#ifdef JEMALLOC_IVSALLOC
- chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - opt_lg_chunk);
- if (chunks_rtree == NULL)
+ if (config_stats || config_prof) {
+ if (malloc_mutex_init(&chunks_mtx))
+ return (true);
+ memset(&stats_chunks, 0, sizeof(chunk_stats_t));
+ }
+ if (config_dss && chunk_dss_boot())
return (true);
-#endif
+ extent_tree_szad_new(&chunks_szad_mmap);
+ extent_tree_ad_new(&chunks_ad_mmap);
+ extent_tree_szad_new(&chunks_szad_dss);
+ extent_tree_ad_new(&chunks_ad_dss);
+ if (config_ivsalloc) {
+ chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
+ opt_lg_chunk);
+ if (chunks_rtree == NULL)
+ return (true);
+ }
return (false);
}
+
+void
+chunk_prefork(void)
+{
+
+ malloc_mutex_lock(&chunks_mtx);
+ if (config_ivsalloc)
+ rtree_prefork(chunks_rtree);
+ chunk_dss_prefork();
+}
+
+void
+chunk_postfork_parent(void)
+{
+
+ chunk_dss_postfork_parent();
+ if (config_ivsalloc)
+ rtree_postfork_parent(chunks_rtree);
+ malloc_mutex_postfork_parent(&chunks_mtx);
+}
+
+void
+chunk_postfork_child(void)
+{
+
+ chunk_dss_postfork_child();
+ if (config_ivsalloc)
+ rtree_postfork_child(chunks_rtree);
+ malloc_mutex_postfork_child(&chunks_mtx);
+}
diff --git a/dep/jemalloc/src/chunk_dss.c b/dep/jemalloc/src/chunk_dss.c
index 5c0e290e441..24781cc52dc 100644
--- a/dep/jemalloc/src/chunk_dss.c
+++ b/dep/jemalloc/src/chunk_dss.c
@@ -1,82 +1,77 @@
#define JEMALLOC_CHUNK_DSS_C_
#include "jemalloc/internal/jemalloc_internal.h"
-#ifdef JEMALLOC_DSS
/******************************************************************************/
/* Data. */
-malloc_mutex_t dss_mtx;
+const char *dss_prec_names[] = {
+ "disabled",
+ "primary",
+ "secondary",
+ "N/A"
+};
-/* Base address of the DSS. */
-static void *dss_base;
-/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
-static void *dss_prev;
-/* Current upper limit on DSS addresses. */
-static void *dss_max;
+/* Current dss precedence default, used when creating new arenas. */
+static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT;
/*
- * Trees of chunks that were previously allocated (trees differ only in node
- * ordering). These are used when allocating chunks, in an attempt to re-use
- * address space. Depending on function, different tree orderings are needed,
- * which is why there are two trees with the same contents.
+ * Protects sbrk() calls. This avoids malloc races among threads, though it
+ * does not protect against races with threads that call sbrk() directly.
*/
-static extent_tree_t dss_chunks_szad;
-static extent_tree_t dss_chunks_ad;
-
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
+static malloc_mutex_t dss_mtx;
-static void *chunk_recycle_dss(size_t size, bool *zero);
-static extent_node_t *chunk_dealloc_dss_record(void *chunk, size_t size);
+/* Base address of the DSS. */
+static void *dss_base;
+/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
+static void *dss_prev;
+/* Current upper limit on DSS addresses. */
+static void *dss_max;
/******************************************************************************/
+#ifndef JEMALLOC_HAVE_SBRK
static void *
-chunk_recycle_dss(size_t size, bool *zero)
+sbrk(intptr_t increment)
+{
+
+ not_implemented();
+
+ return (NULL);
+}
+#endif
+
+dss_prec_t
+chunk_dss_prec_get(void)
{
- extent_node_t *node, key;
+ dss_prec_t ret;
- key.addr = NULL;
- key.size = size;
+ if (config_dss == false)
+ return (dss_prec_disabled);
malloc_mutex_lock(&dss_mtx);
- node = extent_tree_szad_nsearch(&dss_chunks_szad, &key);
- if (node != NULL) {
- void *ret = node->addr;
-
- /* Remove node from the tree. */
- extent_tree_szad_remove(&dss_chunks_szad, node);
- if (node->size == size) {
- extent_tree_ad_remove(&dss_chunks_ad, node);
- base_node_dealloc(node);
- } else {
- /*
- * Insert the remainder of node's address range as a
- * smaller chunk. Its position within dss_chunks_ad
- * does not change.
- */
- assert(node->size > size);
- node->addr = (void *)((uintptr_t)node->addr + size);
- node->size -= size;
- extent_tree_szad_insert(&dss_chunks_szad, node);
- }
- malloc_mutex_unlock(&dss_mtx);
-
- if (*zero)
- memset(ret, 0, size);
- return (ret);
- }
+ ret = dss_prec_default;
malloc_mutex_unlock(&dss_mtx);
+ return (ret);
+}
- return (NULL);
+bool
+chunk_dss_prec_set(dss_prec_t dss_prec)
+{
+
+ if (config_dss == false)
+ return (true);
+ malloc_mutex_lock(&dss_mtx);
+ dss_prec_default = dss_prec;
+ malloc_mutex_unlock(&dss_mtx);
+ return (false);
}
void *
-chunk_alloc_dss(size_t size, bool *zero)
+chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
{
void *ret;
- ret = chunk_recycle_dss(size, zero);
- if (ret != NULL)
- return (ret);
+ cassert(config_dss);
+ assert(size > 0 && (size & chunksize_mask) == 0);
+ assert(alignment > 0 && (alignment & chunksize_mask) == 0);
/*
* sbrk() uses a signed increment argument, so take care not to
@@ -87,6 +82,8 @@ chunk_alloc_dss(size_t size, bool *zero)
malloc_mutex_lock(&dss_mtx);
if (dss_prev != (void *)-1) {
+ size_t gap_size, cpad_size;
+ void *cpad, *dss_next;
intptr_t incr;
/*
@@ -97,26 +94,40 @@ chunk_alloc_dss(size_t size, bool *zero)
do {
/* Get the current end of the DSS. */
dss_max = sbrk(0);
-
/*
* Calculate how much padding is necessary to
* chunk-align the end of the DSS.
*/
- incr = (intptr_t)size
- - (intptr_t)CHUNK_ADDR2OFFSET(dss_max);
- if (incr == (intptr_t)size)
- ret = dss_max;
- else {
- ret = (void *)((intptr_t)dss_max + incr);
- incr += size;
+ gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
+ chunksize_mask;
+ /*
+ * Compute how much chunk-aligned pad space (if any) is
+ * necessary to satisfy alignment. This space can be
+ * recycled for later use.
+ */
+ cpad = (void *)((uintptr_t)dss_max + gap_size);
+ ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
+ alignment);
+ cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
+ dss_next = (void *)((uintptr_t)ret + size);
+ if ((uintptr_t)ret < (uintptr_t)dss_max ||
+ (uintptr_t)dss_next < (uintptr_t)dss_max) {
+ /* Wrap-around. */
+ malloc_mutex_unlock(&dss_mtx);
+ return (NULL);
}
-
+ incr = gap_size + cpad_size + size;
dss_prev = sbrk(incr);
if (dss_prev == dss_max) {
/* Success. */
- dss_max = (void *)((intptr_t)dss_prev + incr);
+ dss_max = dss_next;
malloc_mutex_unlock(&dss_mtx);
- *zero = true;
+ if (cpad_size != 0)
+ chunk_unmap(cpad, cpad_size);
+ if (*zero) {
+ VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
+ memset(ret, 0, size);
+ }
return (ret);
}
} while (dss_prev != (void *)-1);
@@ -126,84 +137,13 @@ chunk_alloc_dss(size_t size, bool *zero)
return (NULL);
}
-static extent_node_t *
-chunk_dealloc_dss_record(void *chunk, size_t size)
-{
- extent_node_t *xnode, *node, *prev, key;
-
- xnode = NULL;
- while (true) {
- key.addr = (void *)((uintptr_t)chunk + size);
- node = extent_tree_ad_nsearch(&dss_chunks_ad, &key);
- /* Try to coalesce forward. */
- if (node != NULL && node->addr == key.addr) {
- /*
- * Coalesce chunk with the following address range.
- * This does not change the position within
- * dss_chunks_ad, so only remove/insert from/into
- * dss_chunks_szad.
- */
- extent_tree_szad_remove(&dss_chunks_szad, node);
- node->addr = chunk;
- node->size += size;
- extent_tree_szad_insert(&dss_chunks_szad, node);
- break;
- } else if (xnode == NULL) {
- /*
- * It is possible that base_node_alloc() will cause a
- * new base chunk to be allocated, so take care not to
- * deadlock on dss_mtx, and recover if another thread
- * deallocates an adjacent chunk while this one is busy
- * allocating xnode.
- */
- malloc_mutex_unlock(&dss_mtx);
- xnode = base_node_alloc();
- malloc_mutex_lock(&dss_mtx);
- if (xnode == NULL)
- return (NULL);
- } else {
- /* Coalescing forward failed, so insert a new node. */
- node = xnode;
- xnode = NULL;
- node->addr = chunk;
- node->size = size;
- extent_tree_ad_insert(&dss_chunks_ad, node);
- extent_tree_szad_insert(&dss_chunks_szad, node);
- break;
- }
- }
- /* Discard xnode if it ended up unused do to a race. */
- if (xnode != NULL)
- base_node_dealloc(xnode);
-
- /* Try to coalesce backward. */
- prev = extent_tree_ad_prev(&dss_chunks_ad, node);
- if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
- chunk) {
- /*
- * Coalesce chunk with the previous address range. This does
- * not change the position within dss_chunks_ad, so only
- * remove/insert node from/into dss_chunks_szad.
- */
- extent_tree_szad_remove(&dss_chunks_szad, prev);
- extent_tree_ad_remove(&dss_chunks_ad, prev);
-
- extent_tree_szad_remove(&dss_chunks_szad, node);
- node->addr = prev->addr;
- node->size += prev->size;
- extent_tree_szad_insert(&dss_chunks_szad, node);
-
- base_node_dealloc(prev);
- }
-
- return (node);
-}
-
bool
chunk_in_dss(void *chunk)
{
bool ret;
+ cassert(config_dss);
+
malloc_mutex_lock(&dss_mtx);
if ((uintptr_t)chunk >= (uintptr_t)dss_base
&& (uintptr_t)chunk < (uintptr_t)dss_max)
@@ -216,69 +156,42 @@ chunk_in_dss(void *chunk)
}
bool
-chunk_dealloc_dss(void *chunk, size_t size)
+chunk_dss_boot(void)
{
- bool ret;
- malloc_mutex_lock(&dss_mtx);
- if ((uintptr_t)chunk >= (uintptr_t)dss_base
- && (uintptr_t)chunk < (uintptr_t)dss_max) {
- extent_node_t *node;
+ cassert(config_dss);
- /* Try to coalesce with other unused chunks. */
- node = chunk_dealloc_dss_record(chunk, size);
- if (node != NULL) {
- chunk = node->addr;
- size = node->size;
- }
-
- /* Get the current end of the DSS. */
- dss_max = sbrk(0);
+ if (malloc_mutex_init(&dss_mtx))
+ return (true);
+ dss_base = sbrk(0);
+ dss_prev = dss_base;
+ dss_max = dss_base;
- /*
- * Try to shrink the DSS if this chunk is at the end of the
- * DSS. The sbrk() call here is subject to a race condition
- * with threads that use brk(2) or sbrk(2) directly, but the
- * alternative would be to leak memory for the sake of poorly
- * designed multi-threaded programs.
- */
- if ((void *)((uintptr_t)chunk + size) == dss_max
- && (dss_prev = sbrk(-(intptr_t)size)) == dss_max) {
- /* Success. */
- dss_max = (void *)((intptr_t)dss_prev - (intptr_t)size);
-
- if (node != NULL) {
- extent_tree_szad_remove(&dss_chunks_szad, node);
- extent_tree_ad_remove(&dss_chunks_ad, node);
- base_node_dealloc(node);
- }
- } else
- madvise(chunk, size, MADV_DONTNEED);
+ return (false);
+}
- ret = false;
- goto RETURN;
- }
+void
+chunk_dss_prefork(void)
+{
- ret = true;
-RETURN:
- malloc_mutex_unlock(&dss_mtx);
- return (ret);
+ if (config_dss)
+ malloc_mutex_prefork(&dss_mtx);
}
-bool
-chunk_dss_boot(void)
+void
+chunk_dss_postfork_parent(void)
{
- if (malloc_mutex_init(&dss_mtx))
- return (true);
- dss_base = sbrk(0);
- dss_prev = dss_base;
- dss_max = dss_base;
- extent_tree_szad_new(&dss_chunks_szad);
- extent_tree_ad_new(&dss_chunks_ad);
+ if (config_dss)
+ malloc_mutex_postfork_parent(&dss_mtx);
+}
- return (false);
+void
+chunk_dss_postfork_child(void)
+{
+
+ if (config_dss)
+ malloc_mutex_postfork_child(&dss_mtx);
}
/******************************************************************************/
-#endif /* JEMALLOC_DSS */
diff --git a/dep/jemalloc/src/chunk_mmap.c b/dep/jemalloc/src/chunk_mmap.c
index bc367559774..8a42e75915f 100644
--- a/dep/jemalloc/src/chunk_mmap.c
+++ b/dep/jemalloc/src/chunk_mmap.c
@@ -2,53 +2,36 @@
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
-/* Data. */
-
-/*
- * Used by chunk_alloc_mmap() to decide whether to attempt the fast path and
- * potentially avoid some system calls.
- */
-#ifndef NO_TLS
-static __thread bool mmap_unaligned_tls
- JEMALLOC_ATTR(tls_model("initial-exec"));
-#define MMAP_UNALIGNED_GET() mmap_unaligned_tls
-#define MMAP_UNALIGNED_SET(v) do { \
- mmap_unaligned_tls = (v); \
-} while (0)
-#else
-static pthread_key_t mmap_unaligned_tsd;
-#define MMAP_UNALIGNED_GET() ((bool)pthread_getspecific(mmap_unaligned_tsd))
-#define MMAP_UNALIGNED_SET(v) do { \
- pthread_setspecific(mmap_unaligned_tsd, (void *)(v)); \
-} while (0)
-#endif
-
-/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-static void *pages_map(void *addr, size_t size, bool noreserve);
+static void *pages_map(void *addr, size_t size);
static void pages_unmap(void *addr, size_t size);
-static void *chunk_alloc_mmap_slow(size_t size, bool unaligned,
- bool noreserve);
-static void *chunk_alloc_mmap_internal(size_t size, bool noreserve);
+static void *chunk_alloc_mmap_slow(size_t size, size_t alignment,
+ bool *zero);
/******************************************************************************/
static void *
-pages_map(void *addr, size_t size, bool noreserve)
+pages_map(void *addr, size_t size)
{
void *ret;
+ assert(size != 0);
+
+#ifdef _WIN32
+ /*
+ * If VirtualAlloc can't allocate at the given address when one is
+ * given, it fails and returns NULL.
+ */
+ ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
+ PAGE_READWRITE);
+#else
/*
* We don't use MAP_FIXED here, because it can cause the *replacement*
* of existing mappings, and we only want to create new mappings.
*/
- int flags = MAP_PRIVATE | MAP_ANON;
-#ifdef MAP_NORESERVE
- if (noreserve)
- flags |= MAP_NORESERVE;
-#endif
- ret = mmap(addr, size, PROT_READ | PROT_WRITE, flags, -1, 0);
+ ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
+ -1, 0);
assert(ret != NULL);
if (ret == MAP_FAILED)
@@ -60,16 +43,15 @@ pages_map(void *addr, size_t size, bool noreserve)
if (munmap(ret, size) == -1) {
char buf[BUFERROR_BUF];
- buferror(errno, buf, sizeof(buf));
- malloc_write("<jemalloc>: Error in munmap(): ");
- malloc_write(buf);
- malloc_write("\n");
+ buferror(buf, sizeof(buf));
+ malloc_printf("<jemalloc: Error in munmap(): %s\n",
+ buf);
if (opt_abort)
abort();
}
ret = NULL;
}
-
+#endif
assert(ret == NULL || (addr == NULL && ret != addr)
|| (addr != NULL && ret == addr));
return (ret);
@@ -79,159 +61,150 @@ static void
pages_unmap(void *addr, size_t size)
{
- if (munmap(addr, size) == -1) {
+#ifdef _WIN32
+ if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
+#else
+ if (munmap(addr, size) == -1)
+#endif
+ {
char buf[BUFERROR_BUF];
- buferror(errno, buf, sizeof(buf));
- malloc_write("<jemalloc>: Error in munmap(): ");
- malloc_write(buf);
- malloc_write("\n");
+ buferror(buf, sizeof(buf));
+ malloc_printf("<jemalloc>: Error in "
+#ifdef _WIN32
+ "VirtualFree"
+#else
+ "munmap"
+#endif
+ "(): %s\n", buf);
if (opt_abort)
abort();
}
}
static void *
-chunk_alloc_mmap_slow(size_t size, bool unaligned, bool noreserve)
+pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
{
- void *ret;
- size_t offset;
-
- /* Beware size_t wrap-around. */
- if (size + chunksize <= size)
+ void *ret = (void *)((uintptr_t)addr + leadsize);
+
+ assert(alloc_size >= leadsize + size);
+#ifdef _WIN32
+ {
+ void *new_addr;
+
+ pages_unmap(addr, alloc_size);
+ new_addr = pages_map(ret, size);
+ if (new_addr == ret)
+ return (ret);
+ if (new_addr)
+ pages_unmap(new_addr, size);
return (NULL);
+ }
+#else
+ {
+ size_t trailsize = alloc_size - leadsize - size;
+
+ if (leadsize != 0)
+ pages_unmap(addr, leadsize);
+ if (trailsize != 0)
+ pages_unmap((void *)((uintptr_t)ret + size), trailsize);
+ return (ret);
+ }
+#endif
+}
- ret = pages_map(NULL, size + chunksize, noreserve);
- if (ret == NULL)
- return (NULL);
+bool
+pages_purge(void *addr, size_t length)
+{
+ bool unzeroed;
- /* Clean up unneeded leading/trailing space. */
- offset = CHUNK_ADDR2OFFSET(ret);
- if (offset != 0) {
- /* Note that mmap() returned an unaligned mapping. */
- unaligned = true;
-
- /* Leading space. */
- pages_unmap(ret, chunksize - offset);
-
- ret = (void *)((uintptr_t)ret +
- (chunksize - offset));
-
- /* Trailing space. */
- pages_unmap((void *)((uintptr_t)ret + size),
- offset);
- } else {
- /* Trailing space only. */
- pages_unmap((void *)((uintptr_t)ret + size),
- chunksize);
- }
+#ifdef _WIN32
+ VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
+ unzeroed = true;
+#else
+# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
+# define JEMALLOC_MADV_PURGE MADV_DONTNEED
+# define JEMALLOC_MADV_ZEROS true
+# elif defined(JEMALLOC_PURGE_MADVISE_FREE)
+# define JEMALLOC_MADV_PURGE MADV_FREE
+# define JEMALLOC_MADV_ZEROS false
+# else
+# error "No method defined for purging unused dirty pages."
+# endif
+ int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
+ unzeroed = (JEMALLOC_MADV_ZEROS == false || err != 0);
+# undef JEMALLOC_MADV_PURGE
+# undef JEMALLOC_MADV_ZEROS
+#endif
+ return (unzeroed);
+}
- /*
- * If mmap() returned an aligned mapping, reset mmap_unaligned so that
- * the next chunk_alloc_mmap() execution tries the fast allocation
- * method.
- */
- if (unaligned == false)
- MMAP_UNALIGNED_SET(false);
+static void *
+chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
+{
+ void *ret, *pages;
+ size_t alloc_size, leadsize;
+ alloc_size = size + alignment - PAGE;
+ /* Beware size_t wrap-around. */
+ if (alloc_size < size)
+ return (NULL);
+ do {
+ pages = pages_map(NULL, alloc_size);
+ if (pages == NULL)
+ return (NULL);
+ leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
+ (uintptr_t)pages;
+ ret = pages_trim(pages, alloc_size, leadsize, size);
+ } while (ret == NULL);
+
+ assert(ret != NULL);
+ *zero = true;
return (ret);
}
-static void *
-chunk_alloc_mmap_internal(size_t size, bool noreserve)
+void *
+chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
{
void *ret;
+ size_t offset;
/*
* Ideally, there would be a way to specify alignment to mmap() (like
* NetBSD has), but in the absence of such a feature, we have to work
* hard to efficiently create aligned mappings. The reliable, but
* slow method is to create a mapping that is over-sized, then trim the
- * excess. However, that always results in at least one call to
+ * excess. However, that always results in one or two calls to
* pages_unmap().
*
- * A more optimistic approach is to try mapping precisely the right
- * amount, then try to append another mapping if alignment is off. In
- * practice, this works out well as long as the application is not
- * interleaving mappings via direct mmap() calls. If we do run into a
- * situation where there is an interleaved mapping and we are unable to
- * extend an unaligned mapping, our best option is to switch to the
- * slow method until mmap() returns another aligned mapping. This will
- * tend to leave a gap in the memory map that is too small to cause
- * later problems for the optimistic method.
- *
- * Another possible confounding factor is address space layout
- * randomization (ASLR), which causes mmap(2) to disregard the
- * requested address. mmap_unaligned tracks whether the previous
- * chunk_alloc_mmap() execution received any unaligned or relocated
- * mappings, and if so, the current execution will immediately fall
- * back to the slow method. However, we keep track of whether the fast
- * method would have succeeded, and if so, we make a note to try the
- * fast method next time.
+ * Optimistically try mapping precisely the right amount before falling
+ * back to the slow method, with the expectation that the optimistic
+ * approach works most of the time.
*/
- if (MMAP_UNALIGNED_GET() == false) {
- size_t offset;
+ assert(alignment != 0);
+ assert((alignment & chunksize_mask) == 0);
- ret = pages_map(NULL, size, noreserve);
- if (ret == NULL)
- return (NULL);
-
- offset = CHUNK_ADDR2OFFSET(ret);
- if (offset != 0) {
- MMAP_UNALIGNED_SET(true);
- /* Try to extend chunk boundary. */
- if (pages_map((void *)((uintptr_t)ret + size),
- chunksize - offset, noreserve) == NULL) {
- /*
- * Extension failed. Clean up, then revert to
- * the reliable-but-expensive method.
- */
- pages_unmap(ret, size);
- ret = chunk_alloc_mmap_slow(size, true,
- noreserve);
- } else {
- /* Clean up unneeded leading space. */
- pages_unmap(ret, chunksize - offset);
- ret = (void *)((uintptr_t)ret + (chunksize -
- offset));
- }
- }
- } else
- ret = chunk_alloc_mmap_slow(size, false, noreserve);
+ ret = pages_map(NULL, size);
+ if (ret == NULL)
+ return (NULL);
+ offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
+ if (offset != 0) {
+ pages_unmap(ret, size);
+ return (chunk_alloc_mmap_slow(size, alignment, zero));
+ }
+ assert(ret != NULL);
+ *zero = true;
return (ret);
}
-void *
-chunk_alloc_mmap(size_t size)
-{
- return chunk_alloc_mmap_internal(size, false);
-}
-
-void *
-chunk_alloc_mmap_noreserve(size_t size)
-{
- return chunk_alloc_mmap_internal(size, true);
-}
-
-void
-chunk_dealloc_mmap(void *chunk, size_t size)
-{
-
- pages_unmap(chunk, size);
-}
-
bool
-chunk_mmap_boot(void)
+chunk_dealloc_mmap(void *chunk, size_t size)
{
-#ifdef NO_TLS
- if (pthread_key_create(&mmap_unaligned_tsd, NULL) != 0) {
- malloc_write("<jemalloc>: Error in pthread_key_create()\n");
- return (true);
- }
-#endif
+ if (config_munmap)
+ pages_unmap(chunk, size);
- return (false);
+ return (config_munmap == false);
}
diff --git a/dep/jemalloc/src/chunk_swap.c b/dep/jemalloc/src/chunk_swap.c
deleted file mode 100644
index cb25ae0dde2..00000000000
--- a/dep/jemalloc/src/chunk_swap.c
+++ /dev/null
@@ -1,402 +0,0 @@
-#define JEMALLOC_CHUNK_SWAP_C_
-#include "jemalloc/internal/jemalloc_internal.h"
-#ifdef JEMALLOC_SWAP
-/******************************************************************************/
-/* Data. */
-
-malloc_mutex_t swap_mtx;
-bool swap_enabled;
-bool swap_prezeroed;
-size_t swap_nfds;
-int *swap_fds;
-#ifdef JEMALLOC_STATS
-size_t swap_avail;
-#endif
-
-/* Base address of the mmap()ed file(s). */
-static void *swap_base;
-/* Current end of the space in use (<= swap_max). */
-static void *swap_end;
-/* Absolute upper limit on file-backed addresses. */
-static void *swap_max;
-
-/*
- * Trees of chunks that were previously allocated (trees differ only in node
- * ordering). These are used when allocating chunks, in an attempt to re-use
- * address space. Depending on function, different tree orderings are needed,
- * which is why there are two trees with the same contents.
- */
-static extent_tree_t swap_chunks_szad;
-static extent_tree_t swap_chunks_ad;
-
-/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-
-static void *chunk_recycle_swap(size_t size, bool *zero);
-static extent_node_t *chunk_dealloc_swap_record(void *chunk, size_t size);
-
-/******************************************************************************/
-
-static void *
-chunk_recycle_swap(size_t size, bool *zero)
-{
- extent_node_t *node, key;
-
- key.addr = NULL;
- key.size = size;
- malloc_mutex_lock(&swap_mtx);
- node = extent_tree_szad_nsearch(&swap_chunks_szad, &key);
- if (node != NULL) {
- void *ret = node->addr;
-
- /* Remove node from the tree. */
- extent_tree_szad_remove(&swap_chunks_szad, node);
- if (node->size == size) {
- extent_tree_ad_remove(&swap_chunks_ad, node);
- base_node_dealloc(node);
- } else {
- /*
- * Insert the remainder of node's address range as a
- * smaller chunk. Its position within swap_chunks_ad
- * does not change.
- */
- assert(node->size > size);
- node->addr = (void *)((uintptr_t)node->addr + size);
- node->size -= size;
- extent_tree_szad_insert(&swap_chunks_szad, node);
- }
-#ifdef JEMALLOC_STATS
- swap_avail -= size;
-#endif
- malloc_mutex_unlock(&swap_mtx);
-
- if (*zero)
- memset(ret, 0, size);
- return (ret);
- }
- malloc_mutex_unlock(&swap_mtx);
-
- return (NULL);
-}
-
-void *
-chunk_alloc_swap(size_t size, bool *zero)
-{
- void *ret;
-
- assert(swap_enabled);
-
- ret = chunk_recycle_swap(size, zero);
- if (ret != NULL)
- return (ret);
-
- malloc_mutex_lock(&swap_mtx);
- if ((uintptr_t)swap_end + size <= (uintptr_t)swap_max) {
- ret = swap_end;
- swap_end = (void *)((uintptr_t)swap_end + size);
-#ifdef JEMALLOC_STATS
- swap_avail -= size;
-#endif
- malloc_mutex_unlock(&swap_mtx);
-
- if (swap_prezeroed)
- *zero = true;
- else if (*zero)
- memset(ret, 0, size);
- } else {
- malloc_mutex_unlock(&swap_mtx);
- return (NULL);
- }
-
- return (ret);
-}
-
-static extent_node_t *
-chunk_dealloc_swap_record(void *chunk, size_t size)
-{
- extent_node_t *xnode, *node, *prev, key;
-
- xnode = NULL;
- while (true) {
- key.addr = (void *)((uintptr_t)chunk + size);
- node = extent_tree_ad_nsearch(&swap_chunks_ad, &key);
- /* Try to coalesce forward. */
- if (node != NULL && node->addr == key.addr) {
- /*
- * Coalesce chunk with the following address range.
- * This does not change the position within
- * swap_chunks_ad, so only remove/insert from/into
- * swap_chunks_szad.
- */
- extent_tree_szad_remove(&swap_chunks_szad, node);
- node->addr = chunk;
- node->size += size;
- extent_tree_szad_insert(&swap_chunks_szad, node);
- break;
- } else if (xnode == NULL) {
- /*
- * It is possible that base_node_alloc() will cause a
- * new base chunk to be allocated, so take care not to
- * deadlock on swap_mtx, and recover if another thread
- * deallocates an adjacent chunk while this one is busy
- * allocating xnode.
- */
- malloc_mutex_unlock(&swap_mtx);
- xnode = base_node_alloc();
- malloc_mutex_lock(&swap_mtx);
- if (xnode == NULL)
- return (NULL);
- } else {
- /* Coalescing forward failed, so insert a new node. */
- node = xnode;
- xnode = NULL;
- node->addr = chunk;
- node->size = size;
- extent_tree_ad_insert(&swap_chunks_ad, node);
- extent_tree_szad_insert(&swap_chunks_szad, node);
- break;
- }
- }
- /* Discard xnode if it ended up unused do to a race. */
- if (xnode != NULL)
- base_node_dealloc(xnode);
-
- /* Try to coalesce backward. */
- prev = extent_tree_ad_prev(&swap_chunks_ad, node);
- if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
- chunk) {
- /*
- * Coalesce chunk with the previous address range. This does
- * not change the position within swap_chunks_ad, so only
- * remove/insert node from/into swap_chunks_szad.
- */
- extent_tree_szad_remove(&swap_chunks_szad, prev);
- extent_tree_ad_remove(&swap_chunks_ad, prev);
-
- extent_tree_szad_remove(&swap_chunks_szad, node);
- node->addr = prev->addr;
- node->size += prev->size;
- extent_tree_szad_insert(&swap_chunks_szad, node);
-
- base_node_dealloc(prev);
- }
-
- return (node);
-}
-
-bool
-chunk_in_swap(void *chunk)
-{
- bool ret;
-
- assert(swap_enabled);
-
- malloc_mutex_lock(&swap_mtx);
- if ((uintptr_t)chunk >= (uintptr_t)swap_base
- && (uintptr_t)chunk < (uintptr_t)swap_max)
- ret = true;
- else
- ret = false;
- malloc_mutex_unlock(&swap_mtx);
-
- return (ret);
-}
-
-bool
-chunk_dealloc_swap(void *chunk, size_t size)
-{
- bool ret;
-
- assert(swap_enabled);
-
- malloc_mutex_lock(&swap_mtx);
- if ((uintptr_t)chunk >= (uintptr_t)swap_base
- && (uintptr_t)chunk < (uintptr_t)swap_max) {
- extent_node_t *node;
-
- /* Try to coalesce with other unused chunks. */
- node = chunk_dealloc_swap_record(chunk, size);
- if (node != NULL) {
- chunk = node->addr;
- size = node->size;
- }
-
- /*
- * Try to shrink the in-use memory if this chunk is at the end
- * of the in-use memory.
- */
- if ((void *)((uintptr_t)chunk + size) == swap_end) {
- swap_end = (void *)((uintptr_t)swap_end - size);
-
- if (node != NULL) {
- extent_tree_szad_remove(&swap_chunks_szad,
- node);
- extent_tree_ad_remove(&swap_chunks_ad, node);
- base_node_dealloc(node);
- }
- } else
- madvise(chunk, size, MADV_DONTNEED);
-
-#ifdef JEMALLOC_STATS
- swap_avail += size;
-#endif
- ret = false;
- goto RETURN;
- }
-
- ret = true;
-RETURN:
- malloc_mutex_unlock(&swap_mtx);
- return (ret);
-}
-
-bool
-chunk_swap_enable(const int *fds, unsigned nfds, bool prezeroed)
-{
- bool ret;
- unsigned i;
- off_t off;
- void *vaddr;
- size_t cumsize, voff;
- size_t sizes[nfds];
-
- malloc_mutex_lock(&swap_mtx);
-
- /* Get file sizes. */
- for (i = 0, cumsize = 0; i < nfds; i++) {
- off = lseek(fds[i], 0, SEEK_END);
- if (off == ((off_t)-1)) {
- ret = true;
- goto RETURN;
- }
- if (PAGE_CEILING(off) != off) {
- /* Truncate to a multiple of the page size. */
- off &= ~PAGE_MASK;
- if (ftruncate(fds[i], off) != 0) {
- ret = true;
- goto RETURN;
- }
- }
- sizes[i] = off;
- if (cumsize + off < cumsize) {
- /*
- * Cumulative file size is greater than the total
- * address space. Bail out while it's still obvious
- * what the problem is.
- */
- ret = true;
- goto RETURN;
- }
- cumsize += off;
- }
-
- /* Round down to a multiple of the chunk size. */
- cumsize &= ~chunksize_mask;
- if (cumsize == 0) {
- ret = true;
- goto RETURN;
- }
-
- /*
- * Allocate a chunk-aligned region of anonymous memory, which will
- * be the final location for the memory-mapped files.
- */
- vaddr = chunk_alloc_mmap_noreserve(cumsize);
- if (vaddr == NULL) {
- ret = true;
- goto RETURN;
- }
-
- /* Overlay the files onto the anonymous mapping. */
- for (i = 0, voff = 0; i < nfds; i++) {
- void *addr = mmap((void *)((uintptr_t)vaddr + voff), sizes[i],
- PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fds[i], 0);
- if (addr == MAP_FAILED) {
- char buf[BUFERROR_BUF];
-
-
- buferror(errno, buf, sizeof(buf));
- malloc_write(
- "<jemalloc>: Error in mmap(..., MAP_FIXED, ...): ");
- malloc_write(buf);
- malloc_write("\n");
- if (opt_abort)
- abort();
- if (munmap(vaddr, voff) == -1) {
- buferror(errno, buf, sizeof(buf));
- malloc_write("<jemalloc>: Error in munmap(): ");
- malloc_write(buf);
- malloc_write("\n");
- }
- ret = true;
- goto RETURN;
- }
- assert(addr == (void *)((uintptr_t)vaddr + voff));
-
- /*
- * Tell the kernel that the mapping will be accessed randomly,
- * and that it should not gratuitously sync pages to the
- * filesystem.
- */
-#ifdef MADV_RANDOM
- madvise(addr, sizes[i], MADV_RANDOM);
-#endif
-#ifdef MADV_NOSYNC
- madvise(addr, sizes[i], MADV_NOSYNC);
-#endif
-
- voff += sizes[i];
- }
-
- swap_prezeroed = prezeroed;
- swap_base = vaddr;
- swap_end = swap_base;
- swap_max = (void *)((uintptr_t)vaddr + cumsize);
-
- /* Copy the fds array for mallctl purposes. */
- swap_fds = (int *)base_alloc(nfds * sizeof(int));
- if (swap_fds == NULL) {
- ret = true;
- goto RETURN;
- }
- memcpy(swap_fds, fds, nfds * sizeof(int));
- swap_nfds = nfds;
-
-#ifdef JEMALLOC_STATS
- swap_avail = cumsize;
-#endif
-
- swap_enabled = true;
-
- ret = false;
-RETURN:
- malloc_mutex_unlock(&swap_mtx);
- return (ret);
-}
-
-bool
-chunk_swap_boot(void)
-{
-
- if (malloc_mutex_init(&swap_mtx))
- return (true);
-
- swap_enabled = false;
- swap_prezeroed = false; /* swap.* mallctl's depend on this. */
- swap_nfds = 0;
- swap_fds = NULL;
-#ifdef JEMALLOC_STATS
- swap_avail = 0;
-#endif
- swap_base = NULL;
- swap_end = NULL;
- swap_max = NULL;
-
- extent_tree_szad_new(&swap_chunks_szad);
- extent_tree_ad_new(&swap_chunks_ad);
-
- return (false);
-}
-
-/******************************************************************************/
-#endif /* JEMALLOC_SWAP */
diff --git a/dep/jemalloc/src/ckh.c b/dep/jemalloc/src/ckh.c
index 682a8db65bf..2f38348bb85 100644
--- a/dep/jemalloc/src/ckh.c
+++ b/dep/jemalloc/src/ckh.c
@@ -34,7 +34,7 @@
* respectively.
*
******************************************************************************/
-#define CKH_C_
+#define JEMALLOC_CKH_C_
#include "jemalloc/internal/jemalloc_internal.h"
/******************************************************************************/
@@ -70,21 +70,20 @@ ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key)
JEMALLOC_INLINE size_t
ckh_isearch(ckh_t *ckh, const void *key)
{
- size_t hash1, hash2, bucket, cell;
+ size_t hashes[2], bucket, cell;
assert(ckh != NULL);
- assert(ckh->magic = CKH_MAGIG);
- ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
+ ckh->hash(key, hashes);
/* Search primary bucket. */
- bucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
if (cell != SIZE_T_MAX)
return (cell);
/* Search secondary bucket. */
- bucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
cell = ckh_bucket_search(ckh, bucket, key);
return (cell);
}
@@ -100,7 +99,7 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key,
* Cycle through the cells in the bucket, starting at a random position.
* The randomness avoids worst-case search overhead as buckets fill up.
*/
- prn32(offset, LG_CKH_BUCKET_CELLS, ckh->prn_state, CKH_A, CKH_C);
+ prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) {
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) +
((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))];
@@ -127,7 +126,7 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
{
const void *key, *data, *tkey, *tdata;
ckhc_t *cell;
- size_t hash1, hash2, bucket, tbucket;
+ size_t hashes[2], bucket, tbucket;
unsigned i;
bucket = argbucket;
@@ -142,7 +141,7 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
* were an item for which both hashes indicated the same
* bucket.
*/
- prn32(i, LG_CKH_BUCKET_CELLS, ckh->prn_state, CKH_A, CKH_C);
+ prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C);
cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i];
assert(cell->key != NULL);
@@ -156,10 +155,11 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
#endif
/* Find the alternate bucket for the evicted item. */
- ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
- tbucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ ckh->hash(key, hashes);
+ tbucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (tbucket == bucket) {
- tbucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ tbucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets)
+ - 1);
/*
* It may be that (tbucket == bucket) still, if the
* item's hashes both indicate this bucket. However,
@@ -193,19 +193,19 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey,
JEMALLOC_INLINE bool
ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata)
{
- size_t hash1, hash2, bucket;
+ size_t hashes[2], bucket;
const void *key = *argkey;
const void *data = *argdata;
- ckh->hash(key, ckh->lg_curbuckets, &hash1, &hash2);
+ ckh->hash(key, hashes);
/* Try to insert in primary bucket. */
- bucket = hash1 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
return (false);
/* Try to insert in secondary bucket. */
- bucket = hash2 & ((ZU(1) << ckh->lg_curbuckets) - 1);
+ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1);
if (ckh_try_bucket_insert(ckh, bucket, key, data) == false)
return (false);
@@ -262,12 +262,18 @@ ckh_grow(ckh_t *ckh)
lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS;
while (true) {
+ size_t usize;
+
lg_curcells++;
- tab = (ckhc_t *)ipalloc(sizeof(ckhc_t) << lg_curcells,
- ZU(1) << LG_CACHELINE, true);
+ usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
+ if (usize == 0) {
+ ret = true;
+ goto label_return;
+ }
+ tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
if (tab == NULL) {
ret = true;
- goto RETURN;
+ goto label_return;
}
/* Swap in new table. */
ttab = ckh->tab;
@@ -287,7 +293,7 @@ ckh_grow(ckh_t *ckh)
}
ret = false;
-RETURN:
+label_return:
return (ret);
}
@@ -295,7 +301,7 @@ static void
ckh_shrink(ckh_t *ckh)
{
ckhc_t *tab, *ttab;
- size_t lg_curcells;
+ size_t lg_curcells, usize;
unsigned lg_prevbuckets;
/*
@@ -304,8 +310,10 @@ ckh_shrink(ckh_t *ckh)
*/
lg_prevbuckets = ckh->lg_curbuckets;
lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1;
- tab = (ckhc_t *)ipalloc(sizeof(ckhc_t) << lg_curcells,
- ZU(1) << LG_CACHELINE, true);
+ usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE);
+ if (usize == 0)
+ return;
+ tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
if (tab == NULL) {
/*
* An OOM error isn't worth propagating, since it doesn't
@@ -340,7 +348,7 @@ bool
ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
{
bool ret;
- size_t mincells;
+ size_t mincells, usize;
unsigned lg_mincells;
assert(minitems > 0);
@@ -354,7 +362,7 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
ckh->ninserts = 0;
ckh->nrelocs = 0;
#endif
- ckh->prn_state = 42; /* Value doesn't really matter. */
+ ckh->prng_state = 42; /* Value doesn't really matter. */
ckh->count = 0;
/*
@@ -375,19 +383,19 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp)
ckh->hash = hash;
ckh->keycomp = keycomp;
- ckh->tab = (ckhc_t *)ipalloc(sizeof(ckhc_t) << lg_mincells,
- (ZU(1) << LG_CACHELINE), true);
+ usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
+ if (usize == 0) {
+ ret = true;
+ goto label_return;
+ }
+ ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true);
if (ckh->tab == NULL) {
ret = true;
- goto RETURN;
+ goto label_return;
}
-#ifdef JEMALLOC_DEBUG
- ckh->magic = CKH_MAGIG;
-#endif
-
ret = false;
-RETURN:
+label_return:
return (ret);
}
@@ -396,7 +404,6 @@ ckh_delete(ckh_t *ckh)
{
assert(ckh != NULL);
- assert(ckh->magic = CKH_MAGIG);
#ifdef CKH_VERBOSE
malloc_printf(
@@ -411,9 +418,8 @@ ckh_delete(ckh_t *ckh)
#endif
idalloc(ckh->tab);
-#ifdef JEMALLOC_DEBUG
- memset(ckh, 0x5a, sizeof(ckh_t));
-#endif
+ if (config_debug)
+ memset(ckh, 0x5a, sizeof(ckh_t));
}
size_t
@@ -421,7 +427,6 @@ ckh_count(ckh_t *ckh)
{
assert(ckh != NULL);
- assert(ckh->magic = CKH_MAGIG);
return (ckh->count);
}
@@ -452,7 +457,6 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
bool ret;
assert(ckh != NULL);
- assert(ckh->magic = CKH_MAGIG);
assert(ckh_search(ckh, key, NULL, NULL));
#ifdef CKH_COUNT
@@ -462,12 +466,12 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data)
while (ckh_try_insert(ckh, &key, &data)) {
if (ckh_grow(ckh)) {
ret = true;
- goto RETURN;
+ goto label_return;
}
}
ret = false;
-RETURN:
+label_return:
return (ret);
}
@@ -477,7 +481,6 @@ ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data)
size_t cell;
assert(ckh != NULL);
- assert(ckh->magic = CKH_MAGIG);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
@@ -509,7 +512,6 @@ ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
size_t cell;
assert(ckh != NULL);
- assert(ckh->magic = CKH_MAGIG);
cell = ckh_isearch(ckh, searchkey);
if (cell != SIZE_T_MAX) {
@@ -524,31 +526,10 @@ ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data)
}
void
-ckh_string_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
+ckh_string_hash(const void *key, size_t r_hash[2])
{
- size_t ret1, ret2;
- uint64_t h;
-
- assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
- assert(hash1 != NULL);
- assert(hash2 != NULL);
- h = hash(key, strlen((const char *)key), 0x94122f335b332aeaLLU);
- if (minbits <= 32) {
- /*
- * Avoid doing multiple hashes, since a single hash provides
- * enough bits.
- */
- ret1 = h & ZU(0xffffffffU);
- ret2 = h >> 32;
- } else {
- ret1 = h;
- ret2 = hash(key, strlen((const char *)key),
- 0x8432a476666bbc13U);
- }
-
- *hash1 = ret1;
- *hash2 = ret2;
+ hash(key, strlen((const char *)key), 0x94122f33U, r_hash);
}
bool
@@ -562,41 +543,16 @@ ckh_string_keycomp(const void *k1, const void *k2)
}
void
-ckh_pointer_hash(const void *key, unsigned minbits, size_t *hash1,
- size_t *hash2)
+ckh_pointer_hash(const void *key, size_t r_hash[2])
{
- size_t ret1, ret2;
- uint64_t h;
union {
const void *v;
- uint64_t i;
+ size_t i;
} u;
- assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
- assert(hash1 != NULL);
- assert(hash2 != NULL);
-
assert(sizeof(u.v) == sizeof(u.i));
-#if (LG_SIZEOF_PTR != LG_SIZEOF_INT)
- u.i = 0;
-#endif
u.v = key;
- h = hash(&u.i, sizeof(u.i), 0xd983396e68886082LLU);
- if (minbits <= 32) {
- /*
- * Avoid doing multiple hashes, since a single hash provides
- * enough bits.
- */
- ret1 = h & ZU(0xffffffffU);
- ret2 = h >> 32;
- } else {
- assert(SIZEOF_PTR == 8);
- ret1 = h;
- ret2 = hash(&u.i, sizeof(u.i), 0x5e2be9aff8709a5dLLU);
- }
-
- *hash1 = ret1;
- *hash2 = ret2;
+ hash(&u.i, sizeof(u.i), 0xd983396eU, r_hash);
}
bool
diff --git a/dep/jemalloc/src/ctl.c b/dep/jemalloc/src/ctl.c
index 3c8adab90a3..f2ef4e60611 100644
--- a/dep/jemalloc/src/ctl.c
+++ b/dep/jemalloc/src/ctl.c
@@ -8,8 +8,6 @@
* ctl_mtx protects the following:
* - ctl_stats.*
* - opt_prof_active
- * - swap_enabled
- * - swap_prezeroed
*/
static malloc_mutex_t ctl_mtx;
static bool ctl_initialized;
@@ -17,6 +15,32 @@ static uint64_t ctl_epoch;
static ctl_stats_t ctl_stats;
/******************************************************************************/
+/* Helpers for named and indexed nodes. */
+
+static inline const ctl_named_node_t *
+ctl_named_node(const ctl_node_t *node)
+{
+
+ return ((node->named) ? (const ctl_named_node_t *)node : NULL);
+}
+
+static inline const ctl_named_node_t *
+ctl_named_children(const ctl_named_node_t *node, int index)
+{
+ const ctl_named_node_t *children = ctl_named_node(node->children);
+
+ return (children ? &children[index] : NULL);
+}
+
+static inline const ctl_indexed_node_t *
+ctl_indexed_node(const ctl_node_t *node)
+{
+
+ return ((node->named == false) ? (const ctl_indexed_node_t *)node :
+ NULL);
+}
+
+/******************************************************************************/
/* Function prototypes for non-inline static functions. */
#define CTL_PROTO(n) \
@@ -24,20 +48,17 @@ static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \
size_t *oldlenp, void *newp, size_t newlen);
#define INDEX_PROTO(n) \
-const ctl_node_t *n##_index(const size_t *mib, size_t miblen, \
- size_t i);
+static const ctl_named_node_t *n##_index(const size_t *mib, \
+ size_t miblen, size_t i);
-#ifdef JEMALLOC_STATS
static bool ctl_arena_init(ctl_arena_stats_t *astats);
-#endif
static void ctl_arena_clear(ctl_arena_stats_t *astats);
-#ifdef JEMALLOC_STATS
static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
arena_t *arena);
static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
ctl_arena_stats_t *astats);
-#endif
static void ctl_arena_refresh(arena_t *arena, unsigned i);
+static bool ctl_grow(void);
static void ctl_refresh(void);
static bool ctl_init(void);
static int ctl_lookup(const char *name, ctl_node_t const **nodesp,
@@ -45,67 +66,56 @@ static int ctl_lookup(const char *name, ctl_node_t const **nodesp,
CTL_PROTO(version)
CTL_PROTO(epoch)
-#ifdef JEMALLOC_TCACHE
-CTL_PROTO(tcache_flush)
-#endif
+CTL_PROTO(thread_tcache_enabled)
+CTL_PROTO(thread_tcache_flush)
CTL_PROTO(thread_arena)
-#ifdef JEMALLOC_STATS
CTL_PROTO(thread_allocated)
CTL_PROTO(thread_allocatedp)
CTL_PROTO(thread_deallocated)
CTL_PROTO(thread_deallocatedp)
-#endif
CTL_PROTO(config_debug)
CTL_PROTO(config_dss)
-CTL_PROTO(config_dynamic_page_shift)
CTL_PROTO(config_fill)
CTL_PROTO(config_lazy_lock)
+CTL_PROTO(config_mremap)
+CTL_PROTO(config_munmap)
CTL_PROTO(config_prof)
CTL_PROTO(config_prof_libgcc)
CTL_PROTO(config_prof_libunwind)
CTL_PROTO(config_stats)
-CTL_PROTO(config_swap)
-CTL_PROTO(config_sysv)
CTL_PROTO(config_tcache)
-CTL_PROTO(config_tiny)
CTL_PROTO(config_tls)
+CTL_PROTO(config_utrace)
+CTL_PROTO(config_valgrind)
CTL_PROTO(config_xmalloc)
CTL_PROTO(opt_abort)
-CTL_PROTO(opt_lg_qspace_max)
-CTL_PROTO(opt_lg_cspace_max)
+CTL_PROTO(opt_dss)
CTL_PROTO(opt_lg_chunk)
CTL_PROTO(opt_narenas)
CTL_PROTO(opt_lg_dirty_mult)
CTL_PROTO(opt_stats_print)
-#ifdef JEMALLOC_FILL
CTL_PROTO(opt_junk)
CTL_PROTO(opt_zero)
-#endif
-#ifdef JEMALLOC_SYSV
-CTL_PROTO(opt_sysv)
-#endif
-#ifdef JEMALLOC_XMALLOC
+CTL_PROTO(opt_quarantine)
+CTL_PROTO(opt_redzone)
+CTL_PROTO(opt_utrace)
+CTL_PROTO(opt_valgrind)
CTL_PROTO(opt_xmalloc)
-#endif
-#ifdef JEMALLOC_TCACHE
CTL_PROTO(opt_tcache)
-CTL_PROTO(opt_lg_tcache_gc_sweep)
-#endif
-#ifdef JEMALLOC_PROF
+CTL_PROTO(opt_lg_tcache_max)
CTL_PROTO(opt_prof)
CTL_PROTO(opt_prof_prefix)
CTL_PROTO(opt_prof_active)
-CTL_PROTO(opt_lg_prof_bt_max)
CTL_PROTO(opt_lg_prof_sample)
CTL_PROTO(opt_lg_prof_interval)
CTL_PROTO(opt_prof_gdump)
+CTL_PROTO(opt_prof_final)
CTL_PROTO(opt_prof_leak)
CTL_PROTO(opt_prof_accum)
-CTL_PROTO(opt_lg_prof_tcmax)
-#endif
-#ifdef JEMALLOC_SWAP
-CTL_PROTO(opt_overcommit)
-#endif
+CTL_PROTO(arena_i_purge)
+static void arena_purge(unsigned arena_ind);
+CTL_PROTO(arena_i_dss)
+INDEX_PROTO(arena_i)
CTL_PROTO(arenas_bin_i_size)
CTL_PROTO(arenas_bin_i_nregs)
CTL_PROTO(arenas_bin_i_run_size)
@@ -115,39 +125,16 @@ INDEX_PROTO(arenas_lrun_i)
CTL_PROTO(arenas_narenas)
CTL_PROTO(arenas_initialized)
CTL_PROTO(arenas_quantum)
-CTL_PROTO(arenas_cacheline)
-CTL_PROTO(arenas_subpage)
-CTL_PROTO(arenas_pagesize)
-CTL_PROTO(arenas_chunksize)
-#ifdef JEMALLOC_TINY
-CTL_PROTO(arenas_tspace_min)
-CTL_PROTO(arenas_tspace_max)
-#endif
-CTL_PROTO(arenas_qspace_min)
-CTL_PROTO(arenas_qspace_max)
-CTL_PROTO(arenas_cspace_min)
-CTL_PROTO(arenas_cspace_max)
-CTL_PROTO(arenas_sspace_min)
-CTL_PROTO(arenas_sspace_max)
-#ifdef JEMALLOC_TCACHE
+CTL_PROTO(arenas_page)
CTL_PROTO(arenas_tcache_max)
-#endif
-CTL_PROTO(arenas_ntbins)
-CTL_PROTO(arenas_nqbins)
-CTL_PROTO(arenas_ncbins)
-CTL_PROTO(arenas_nsbins)
CTL_PROTO(arenas_nbins)
-#ifdef JEMALLOC_TCACHE
CTL_PROTO(arenas_nhbins)
-#endif
CTL_PROTO(arenas_nlruns)
CTL_PROTO(arenas_purge)
-#ifdef JEMALLOC_PROF
+CTL_PROTO(arenas_extend)
CTL_PROTO(prof_active)
CTL_PROTO(prof_dump)
CTL_PROTO(prof_interval)
-#endif
-#ifdef JEMALLOC_STATS
CTL_PROTO(stats_chunks_current)
CTL_PROTO(stats_chunks_total)
CTL_PROTO(stats_chunks_high)
@@ -166,44 +153,30 @@ CTL_PROTO(stats_arenas_i_bins_j_allocated)
CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
CTL_PROTO(stats_arenas_i_bins_j_nrequests)
-#ifdef JEMALLOC_TCACHE
CTL_PROTO(stats_arenas_i_bins_j_nfills)
CTL_PROTO(stats_arenas_i_bins_j_nflushes)
-#endif
CTL_PROTO(stats_arenas_i_bins_j_nruns)
CTL_PROTO(stats_arenas_i_bins_j_nreruns)
-CTL_PROTO(stats_arenas_i_bins_j_highruns)
CTL_PROTO(stats_arenas_i_bins_j_curruns)
INDEX_PROTO(stats_arenas_i_bins_j)
CTL_PROTO(stats_arenas_i_lruns_j_nmalloc)
CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
-CTL_PROTO(stats_arenas_i_lruns_j_highruns)
CTL_PROTO(stats_arenas_i_lruns_j_curruns)
INDEX_PROTO(stats_arenas_i_lruns_j)
-#endif
+CTL_PROTO(stats_arenas_i_nthreads)
+CTL_PROTO(stats_arenas_i_dss)
CTL_PROTO(stats_arenas_i_pactive)
CTL_PROTO(stats_arenas_i_pdirty)
-#ifdef JEMALLOC_STATS
CTL_PROTO(stats_arenas_i_mapped)
CTL_PROTO(stats_arenas_i_npurge)
CTL_PROTO(stats_arenas_i_nmadvise)
CTL_PROTO(stats_arenas_i_purged)
-#endif
INDEX_PROTO(stats_arenas_i)
-#ifdef JEMALLOC_STATS
+CTL_PROTO(stats_cactive)
CTL_PROTO(stats_allocated)
CTL_PROTO(stats_active)
CTL_PROTO(stats_mapped)
-#endif
-#ifdef JEMALLOC_SWAP
-# ifdef JEMALLOC_STATS
-CTL_PROTO(swap_avail)
-# endif
-CTL_PROTO(swap_prezeroed)
-CTL_PROTO(swap_nfds)
-CTL_PROTO(swap_fds)
-#endif
/******************************************************************************/
/* mallctl tree. */
@@ -211,294 +184,239 @@ CTL_PROTO(swap_fds)
/* Maximum tree depth. */
#define CTL_MAX_DEPTH 6
-#define NAME(n) true, {.named = {n
-#define CHILD(c) sizeof(c##_node) / sizeof(ctl_node_t), c##_node}}, NULL
-#define CTL(c) 0, NULL}}, c##_ctl
+#define NAME(n) {true}, n
+#define CHILD(t, c) \
+ sizeof(c##_node) / sizeof(ctl_##t##_node_t), \
+ (ctl_node_t *)c##_node, \
+ NULL
+#define CTL(c) 0, NULL, c##_ctl
/*
* Only handles internal indexed nodes, since there are currently no external
* ones.
*/
-#define INDEX(i) false, {.indexed = {i##_index}}, NULL
+#define INDEX(i) {false}, i##_index
-#ifdef JEMALLOC_TCACHE
-static const ctl_node_t tcache_node[] = {
- {NAME("flush"), CTL(tcache_flush)}
+static const ctl_named_node_t tcache_node[] = {
+ {NAME("enabled"), CTL(thread_tcache_enabled)},
+ {NAME("flush"), CTL(thread_tcache_flush)}
};
-#endif
-static const ctl_node_t thread_node[] = {
- {NAME("arena"), CTL(thread_arena)}
-#ifdef JEMALLOC_STATS
- ,
+static const ctl_named_node_t thread_node[] = {
+ {NAME("arena"), CTL(thread_arena)},
{NAME("allocated"), CTL(thread_allocated)},
{NAME("allocatedp"), CTL(thread_allocatedp)},
{NAME("deallocated"), CTL(thread_deallocated)},
- {NAME("deallocatedp"), CTL(thread_deallocatedp)}
-#endif
+ {NAME("deallocatedp"), CTL(thread_deallocatedp)},
+ {NAME("tcache"), CHILD(named, tcache)}
};
-static const ctl_node_t config_node[] = {
+static const ctl_named_node_t config_node[] = {
{NAME("debug"), CTL(config_debug)},
{NAME("dss"), CTL(config_dss)},
- {NAME("dynamic_page_shift"), CTL(config_dynamic_page_shift)},
{NAME("fill"), CTL(config_fill)},
{NAME("lazy_lock"), CTL(config_lazy_lock)},
+ {NAME("mremap"), CTL(config_mremap)},
+ {NAME("munmap"), CTL(config_munmap)},
{NAME("prof"), CTL(config_prof)},
{NAME("prof_libgcc"), CTL(config_prof_libgcc)},
{NAME("prof_libunwind"), CTL(config_prof_libunwind)},
{NAME("stats"), CTL(config_stats)},
- {NAME("swap"), CTL(config_swap)},
- {NAME("sysv"), CTL(config_sysv)},
{NAME("tcache"), CTL(config_tcache)},
- {NAME("tiny"), CTL(config_tiny)},
{NAME("tls"), CTL(config_tls)},
+ {NAME("utrace"), CTL(config_utrace)},
+ {NAME("valgrind"), CTL(config_valgrind)},
{NAME("xmalloc"), CTL(config_xmalloc)}
};
-static const ctl_node_t opt_node[] = {
+static const ctl_named_node_t opt_node[] = {
{NAME("abort"), CTL(opt_abort)},
- {NAME("lg_qspace_max"), CTL(opt_lg_qspace_max)},
- {NAME("lg_cspace_max"), CTL(opt_lg_cspace_max)},
+ {NAME("dss"), CTL(opt_dss)},
{NAME("lg_chunk"), CTL(opt_lg_chunk)},
{NAME("narenas"), CTL(opt_narenas)},
{NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)},
- {NAME("stats_print"), CTL(opt_stats_print)}
-#ifdef JEMALLOC_FILL
- ,
+ {NAME("stats_print"), CTL(opt_stats_print)},
{NAME("junk"), CTL(opt_junk)},
- {NAME("zero"), CTL(opt_zero)}
-#endif
-#ifdef JEMALLOC_SYSV
- ,
- {NAME("sysv"), CTL(opt_sysv)}
-#endif
-#ifdef JEMALLOC_XMALLOC
- ,
- {NAME("xmalloc"), CTL(opt_xmalloc)}
-#endif
-#ifdef JEMALLOC_TCACHE
- ,
+ {NAME("zero"), CTL(opt_zero)},
+ {NAME("quarantine"), CTL(opt_quarantine)},
+ {NAME("redzone"), CTL(opt_redzone)},
+ {NAME("utrace"), CTL(opt_utrace)},
+ {NAME("valgrind"), CTL(opt_valgrind)},
+ {NAME("xmalloc"), CTL(opt_xmalloc)},
{NAME("tcache"), CTL(opt_tcache)},
- {NAME("lg_tcache_gc_sweep"), CTL(opt_lg_tcache_gc_sweep)}
-#endif
-#ifdef JEMALLOC_PROF
- ,
+ {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
{NAME("prof"), CTL(opt_prof)},
{NAME("prof_prefix"), CTL(opt_prof_prefix)},
{NAME("prof_active"), CTL(opt_prof_active)},
- {NAME("lg_prof_bt_max"), CTL(opt_lg_prof_bt_max)},
{NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
{NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
{NAME("prof_gdump"), CTL(opt_prof_gdump)},
+ {NAME("prof_final"), CTL(opt_prof_final)},
{NAME("prof_leak"), CTL(opt_prof_leak)},
- {NAME("prof_accum"), CTL(opt_prof_accum)},
- {NAME("lg_prof_tcmax"), CTL(opt_lg_prof_tcmax)}
-#endif
-#ifdef JEMALLOC_SWAP
- ,
- {NAME("overcommit"), CTL(opt_overcommit)}
-#endif
+ {NAME("prof_accum"), CTL(opt_prof_accum)}
};
-static const ctl_node_t arenas_bin_i_node[] = {
+static const ctl_named_node_t arena_i_node[] = {
+ {NAME("purge"), CTL(arena_i_purge)},
+ {NAME("dss"), CTL(arena_i_dss)}
+};
+static const ctl_named_node_t super_arena_i_node[] = {
+ {NAME(""), CHILD(named, arena_i)}
+};
+
+static const ctl_indexed_node_t arena_node[] = {
+ {INDEX(arena_i)}
+};
+
+static const ctl_named_node_t arenas_bin_i_node[] = {
{NAME("size"), CTL(arenas_bin_i_size)},
{NAME("nregs"), CTL(arenas_bin_i_nregs)},
{NAME("run_size"), CTL(arenas_bin_i_run_size)}
};
-static const ctl_node_t super_arenas_bin_i_node[] = {
- {NAME(""), CHILD(arenas_bin_i)}
+static const ctl_named_node_t super_arenas_bin_i_node[] = {
+ {NAME(""), CHILD(named, arenas_bin_i)}
};
-static const ctl_node_t arenas_bin_node[] = {
+static const ctl_indexed_node_t arenas_bin_node[] = {
{INDEX(arenas_bin_i)}
};
-static const ctl_node_t arenas_lrun_i_node[] = {
+static const ctl_named_node_t arenas_lrun_i_node[] = {
{NAME("size"), CTL(arenas_lrun_i_size)}
};
-static const ctl_node_t super_arenas_lrun_i_node[] = {
- {NAME(""), CHILD(arenas_lrun_i)}
+static const ctl_named_node_t super_arenas_lrun_i_node[] = {
+ {NAME(""), CHILD(named, arenas_lrun_i)}
};
-static const ctl_node_t arenas_lrun_node[] = {
+static const ctl_indexed_node_t arenas_lrun_node[] = {
{INDEX(arenas_lrun_i)}
};
-static const ctl_node_t arenas_node[] = {
+static const ctl_named_node_t arenas_node[] = {
{NAME("narenas"), CTL(arenas_narenas)},
{NAME("initialized"), CTL(arenas_initialized)},
{NAME("quantum"), CTL(arenas_quantum)},
- {NAME("cacheline"), CTL(arenas_cacheline)},
- {NAME("subpage"), CTL(arenas_subpage)},
- {NAME("pagesize"), CTL(arenas_pagesize)},
- {NAME("chunksize"), CTL(arenas_chunksize)},
-#ifdef JEMALLOC_TINY
- {NAME("tspace_min"), CTL(arenas_tspace_min)},
- {NAME("tspace_max"), CTL(arenas_tspace_max)},
-#endif
- {NAME("qspace_min"), CTL(arenas_qspace_min)},
- {NAME("qspace_max"), CTL(arenas_qspace_max)},
- {NAME("cspace_min"), CTL(arenas_cspace_min)},
- {NAME("cspace_max"), CTL(arenas_cspace_max)},
- {NAME("sspace_min"), CTL(arenas_sspace_min)},
- {NAME("sspace_max"), CTL(arenas_sspace_max)},
-#ifdef JEMALLOC_TCACHE
+ {NAME("page"), CTL(arenas_page)},
{NAME("tcache_max"), CTL(arenas_tcache_max)},
-#endif
- {NAME("ntbins"), CTL(arenas_ntbins)},
- {NAME("nqbins"), CTL(arenas_nqbins)},
- {NAME("ncbins"), CTL(arenas_ncbins)},
- {NAME("nsbins"), CTL(arenas_nsbins)},
{NAME("nbins"), CTL(arenas_nbins)},
-#ifdef JEMALLOC_TCACHE
{NAME("nhbins"), CTL(arenas_nhbins)},
-#endif
- {NAME("bin"), CHILD(arenas_bin)},
+ {NAME("bin"), CHILD(indexed, arenas_bin)},
{NAME("nlruns"), CTL(arenas_nlruns)},
- {NAME("lrun"), CHILD(arenas_lrun)},
- {NAME("purge"), CTL(arenas_purge)}
+ {NAME("lrun"), CHILD(indexed, arenas_lrun)},
+ {NAME("purge"), CTL(arenas_purge)},
+ {NAME("extend"), CTL(arenas_extend)}
};
-#ifdef JEMALLOC_PROF
-static const ctl_node_t prof_node[] = {
+static const ctl_named_node_t prof_node[] = {
{NAME("active"), CTL(prof_active)},
{NAME("dump"), CTL(prof_dump)},
{NAME("interval"), CTL(prof_interval)}
};
-#endif
-#ifdef JEMALLOC_STATS
-static const ctl_node_t stats_chunks_node[] = {
+static const ctl_named_node_t stats_chunks_node[] = {
{NAME("current"), CTL(stats_chunks_current)},
{NAME("total"), CTL(stats_chunks_total)},
{NAME("high"), CTL(stats_chunks_high)}
};
-static const ctl_node_t stats_huge_node[] = {
+static const ctl_named_node_t stats_huge_node[] = {
{NAME("allocated"), CTL(stats_huge_allocated)},
{NAME("nmalloc"), CTL(stats_huge_nmalloc)},
{NAME("ndalloc"), CTL(stats_huge_ndalloc)}
};
-static const ctl_node_t stats_arenas_i_small_node[] = {
+static const ctl_named_node_t stats_arenas_i_small_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)},
{NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)}
};
-static const ctl_node_t stats_arenas_i_large_node[] = {
+static const ctl_named_node_t stats_arenas_i_large_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_large_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)},
{NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)}
};
-static const ctl_node_t stats_arenas_i_bins_j_node[] = {
+static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_bins_j_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)},
{NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)},
-#ifdef JEMALLOC_TCACHE
{NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)},
{NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)},
-#endif
{NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)},
{NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)},
- {NAME("highruns"), CTL(stats_arenas_i_bins_j_highruns)},
{NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)}
};
-static const ctl_node_t super_stats_arenas_i_bins_j_node[] = {
- {NAME(""), CHILD(stats_arenas_i_bins_j)}
+static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
+ {NAME(""), CHILD(named, stats_arenas_i_bins_j)}
};
-static const ctl_node_t stats_arenas_i_bins_node[] = {
+static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
{INDEX(stats_arenas_i_bins_j)}
};
-static const ctl_node_t stats_arenas_i_lruns_j_node[] = {
+static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = {
{NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)},
{NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)},
{NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)},
- {NAME("highruns"), CTL(stats_arenas_i_lruns_j_highruns)},
{NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)}
};
-static const ctl_node_t super_stats_arenas_i_lruns_j_node[] = {
- {NAME(""), CHILD(stats_arenas_i_lruns_j)}
+static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = {
+ {NAME(""), CHILD(named, stats_arenas_i_lruns_j)}
};
-static const ctl_node_t stats_arenas_i_lruns_node[] = {
+static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
{INDEX(stats_arenas_i_lruns_j)}
};
-#endif
-static const ctl_node_t stats_arenas_i_node[] = {
+static const ctl_named_node_t stats_arenas_i_node[] = {
+ {NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
+ {NAME("dss"), CTL(stats_arenas_i_dss)},
{NAME("pactive"), CTL(stats_arenas_i_pactive)},
- {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}
-#ifdef JEMALLOC_STATS
- ,
+ {NAME("pdirty"), CTL(stats_arenas_i_pdirty)},
{NAME("mapped"), CTL(stats_arenas_i_mapped)},
{NAME("npurge"), CTL(stats_arenas_i_npurge)},
{NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)},
{NAME("purged"), CTL(stats_arenas_i_purged)},
- {NAME("small"), CHILD(stats_arenas_i_small)},
- {NAME("large"), CHILD(stats_arenas_i_large)},
- {NAME("bins"), CHILD(stats_arenas_i_bins)},
- {NAME("lruns"), CHILD(stats_arenas_i_lruns)}
-#endif
+ {NAME("small"), CHILD(named, stats_arenas_i_small)},
+ {NAME("large"), CHILD(named, stats_arenas_i_large)},
+ {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
+ {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)}
};
-static const ctl_node_t super_stats_arenas_i_node[] = {
- {NAME(""), CHILD(stats_arenas_i)}
+static const ctl_named_node_t super_stats_arenas_i_node[] = {
+ {NAME(""), CHILD(named, stats_arenas_i)}
};
-static const ctl_node_t stats_arenas_node[] = {
+static const ctl_indexed_node_t stats_arenas_node[] = {
{INDEX(stats_arenas_i)}
};
-static const ctl_node_t stats_node[] = {
-#ifdef JEMALLOC_STATS
+static const ctl_named_node_t stats_node[] = {
+ {NAME("cactive"), CTL(stats_cactive)},
{NAME("allocated"), CTL(stats_allocated)},
{NAME("active"), CTL(stats_active)},
{NAME("mapped"), CTL(stats_mapped)},
- {NAME("chunks"), CHILD(stats_chunks)},
- {NAME("huge"), CHILD(stats_huge)},
-#endif
- {NAME("arenas"), CHILD(stats_arenas)}
-};
-
-#ifdef JEMALLOC_SWAP
-static const ctl_node_t swap_node[] = {
-# ifdef JEMALLOC_STATS
- {NAME("avail"), CTL(swap_avail)},
-# endif
- {NAME("prezeroed"), CTL(swap_prezeroed)},
- {NAME("nfds"), CTL(swap_nfds)},
- {NAME("fds"), CTL(swap_fds)}
+ {NAME("chunks"), CHILD(named, stats_chunks)},
+ {NAME("huge"), CHILD(named, stats_huge)},
+ {NAME("arenas"), CHILD(indexed, stats_arenas)}
};
-#endif
-static const ctl_node_t root_node[] = {
+static const ctl_named_node_t root_node[] = {
{NAME("version"), CTL(version)},
{NAME("epoch"), CTL(epoch)},
-#ifdef JEMALLOC_TCACHE
- {NAME("tcache"), CHILD(tcache)},
-#endif
- {NAME("thread"), CHILD(thread)},
- {NAME("config"), CHILD(config)},
- {NAME("opt"), CHILD(opt)},
- {NAME("arenas"), CHILD(arenas)},
-#ifdef JEMALLOC_PROF
- {NAME("prof"), CHILD(prof)},
-#endif
- {NAME("stats"), CHILD(stats)}
-#ifdef JEMALLOC_SWAP
- ,
- {NAME("swap"), CHILD(swap)}
-#endif
+ {NAME("thread"), CHILD(named, thread)},
+ {NAME("config"), CHILD(named, config)},
+ {NAME("opt"), CHILD(named, opt)},
+ {NAME("arena"), CHILD(indexed, arena)},
+ {NAME("arenas"), CHILD(named, arenas)},
+ {NAME("prof"), CHILD(named, prof)},
+ {NAME("stats"), CHILD(named, stats)}
};
-static const ctl_node_t super_root_node[] = {
- {NAME(""), CHILD(root)}
+static const ctl_named_node_t super_root_node[] = {
+ {NAME(""), CHILD(named, root)}
};
#undef NAME
@@ -508,17 +426,10 @@ static const ctl_node_t super_root_node[] = {
/******************************************************************************/
-#ifdef JEMALLOC_STATS
static bool
ctl_arena_init(ctl_arena_stats_t *astats)
{
- if (astats->bstats == NULL) {
- astats->bstats = (malloc_bin_stats_t *)base_alloc(nbins *
- sizeof(malloc_bin_stats_t));
- if (astats->bstats == NULL)
- return (true);
- }
if (astats->lstats == NULL) {
astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses *
sizeof(malloc_large_stats_t));
@@ -528,35 +439,35 @@ ctl_arena_init(ctl_arena_stats_t *astats)
return (false);
}
-#endif
static void
ctl_arena_clear(ctl_arena_stats_t *astats)
{
+ astats->dss = dss_prec_names[dss_prec_limit];
astats->pactive = 0;
astats->pdirty = 0;
-#ifdef JEMALLOC_STATS
- memset(&astats->astats, 0, sizeof(arena_stats_t));
- astats->allocated_small = 0;
- astats->nmalloc_small = 0;
- astats->ndalloc_small = 0;
- astats->nrequests_small = 0;
- memset(astats->bstats, 0, nbins * sizeof(malloc_bin_stats_t));
- memset(astats->lstats, 0, nlclasses * sizeof(malloc_large_stats_t));
-#endif
+ if (config_stats) {
+ memset(&astats->astats, 0, sizeof(arena_stats_t));
+ astats->allocated_small = 0;
+ astats->nmalloc_small = 0;
+ astats->ndalloc_small = 0;
+ astats->nrequests_small = 0;
+ memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
+ memset(astats->lstats, 0, nlclasses *
+ sizeof(malloc_large_stats_t));
+ }
}
-#ifdef JEMALLOC_STATS
static void
ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
{
unsigned i;
- arena_stats_merge(arena, &cstats->pactive, &cstats->pdirty,
- &cstats->astats, cstats->bstats, cstats->lstats);
+ arena_stats_merge(arena, &cstats->dss, &cstats->pactive,
+ &cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats);
- for (i = 0; i < nbins; i++) {
+ for (i = 0; i < NBINS; i++) {
cstats->allocated_small += cstats->bstats[i].allocated;
cstats->nmalloc_small += cstats->bstats[i].nmalloc;
cstats->ndalloc_small += cstats->bstats[i].ndalloc;
@@ -591,78 +502,145 @@ ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
- sstats->lstats[i].highruns += astats->lstats[i].highruns;
sstats->lstats[i].curruns += astats->lstats[i].curruns;
}
- for (i = 0; i < nbins; i++) {
+ for (i = 0; i < NBINS; i++) {
sstats->bstats[i].allocated += astats->bstats[i].allocated;
sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
sstats->bstats[i].nrequests += astats->bstats[i].nrequests;
-#ifdef JEMALLOC_TCACHE
- sstats->bstats[i].nfills += astats->bstats[i].nfills;
- sstats->bstats[i].nflushes += astats->bstats[i].nflushes;
-#endif
+ if (config_tcache) {
+ sstats->bstats[i].nfills += astats->bstats[i].nfills;
+ sstats->bstats[i].nflushes +=
+ astats->bstats[i].nflushes;
+ }
sstats->bstats[i].nruns += astats->bstats[i].nruns;
sstats->bstats[i].reruns += astats->bstats[i].reruns;
- sstats->bstats[i].highruns += astats->bstats[i].highruns;
sstats->bstats[i].curruns += astats->bstats[i].curruns;
}
}
-#endif
static void
ctl_arena_refresh(arena_t *arena, unsigned i)
{
ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
- ctl_arena_stats_t *sstats = &ctl_stats.arenas[narenas];
+ ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas];
ctl_arena_clear(astats);
-#ifdef JEMALLOC_STATS
- ctl_arena_stats_amerge(astats, arena);
- /* Merge into sum stats as well. */
- ctl_arena_stats_smerge(sstats, astats);
-#else
- astats->pactive += arena->nactive;
- astats->pdirty += arena->ndirty;
- /* Merge into sum stats as well. */
- sstats->pactive += arena->nactive;
- sstats->pdirty += arena->ndirty;
-#endif
+ sstats->nthreads += astats->nthreads;
+ if (config_stats) {
+ ctl_arena_stats_amerge(astats, arena);
+ /* Merge into sum stats as well. */
+ ctl_arena_stats_smerge(sstats, astats);
+ } else {
+ astats->pactive += arena->nactive;
+ astats->pdirty += arena->ndirty;
+ /* Merge into sum stats as well. */
+ sstats->pactive += arena->nactive;
+ sstats->pdirty += arena->ndirty;
+ }
+}
+
+static bool
+ctl_grow(void)
+{
+ size_t astats_size;
+ ctl_arena_stats_t *astats;
+ arena_t **tarenas;
+
+ /* Extend arena stats and arenas arrays. */
+ astats_size = (ctl_stats.narenas + 2) * sizeof(ctl_arena_stats_t);
+ if (ctl_stats.narenas == narenas_auto) {
+ /* ctl_stats.arenas and arenas came from base_alloc(). */
+ astats = (ctl_arena_stats_t *)imalloc(astats_size);
+ if (astats == NULL)
+ return (true);
+ memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) *
+ sizeof(ctl_arena_stats_t));
+
+ tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) *
+ sizeof(arena_t *));
+ if (tarenas == NULL) {
+ idalloc(astats);
+ return (true);
+ }
+ memcpy(tarenas, arenas, ctl_stats.narenas * sizeof(arena_t *));
+ } else {
+ astats = (ctl_arena_stats_t *)iralloc(ctl_stats.arenas,
+ astats_size, 0, 0, false, false);
+ if (astats == NULL)
+ return (true);
+
+ tarenas = (arena_t **)iralloc(arenas, (ctl_stats.narenas + 1) *
+ sizeof(arena_t *), 0, 0, false, false);
+ if (tarenas == NULL)
+ return (true);
+ }
+ /* Initialize the new astats and arenas elements. */
+ memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t));
+ if (ctl_arena_init(&astats[ctl_stats.narenas + 1]))
+ return (true);
+ tarenas[ctl_stats.narenas] = NULL;
+ /* Swap merged stats to their new location. */
+ {
+ ctl_arena_stats_t tstats;
+ memcpy(&tstats, &astats[ctl_stats.narenas],
+ sizeof(ctl_arena_stats_t));
+ memcpy(&astats[ctl_stats.narenas],
+ &astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t));
+ memcpy(&astats[ctl_stats.narenas + 1], &tstats,
+ sizeof(ctl_arena_stats_t));
+ }
+ ctl_stats.arenas = astats;
+ ctl_stats.narenas++;
+ malloc_mutex_lock(&arenas_lock);
+ arenas = tarenas;
+ narenas_total++;
+ arenas_extend(narenas_total - 1);
+ malloc_mutex_unlock(&arenas_lock);
+
+ return (false);
}
static void
ctl_refresh(void)
{
unsigned i;
- arena_t *tarenas[narenas];
-
-#ifdef JEMALLOC_STATS
- malloc_mutex_lock(&chunks_mtx);
- ctl_stats.chunks.current = stats_chunks.curchunks;
- ctl_stats.chunks.total = stats_chunks.nchunks;
- ctl_stats.chunks.high = stats_chunks.highchunks;
- malloc_mutex_unlock(&chunks_mtx);
-
- malloc_mutex_lock(&huge_mtx);
- ctl_stats.huge.allocated = huge_allocated;
- ctl_stats.huge.nmalloc = huge_nmalloc;
- ctl_stats.huge.ndalloc = huge_ndalloc;
- malloc_mutex_unlock(&huge_mtx);
-#endif
+ VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
+
+ if (config_stats) {
+ malloc_mutex_lock(&chunks_mtx);
+ ctl_stats.chunks.current = stats_chunks.curchunks;
+ ctl_stats.chunks.total = stats_chunks.nchunks;
+ ctl_stats.chunks.high = stats_chunks.highchunks;
+ malloc_mutex_unlock(&chunks_mtx);
+
+ malloc_mutex_lock(&huge_mtx);
+ ctl_stats.huge.allocated = huge_allocated;
+ ctl_stats.huge.nmalloc = huge_nmalloc;
+ ctl_stats.huge.ndalloc = huge_ndalloc;
+ malloc_mutex_unlock(&huge_mtx);
+ }
/*
* Clear sum stats, since they will be merged into by
* ctl_arena_refresh().
*/
- ctl_arena_clear(&ctl_stats.arenas[narenas]);
+ ctl_stats.arenas[ctl_stats.narenas].nthreads = 0;
+ ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]);
malloc_mutex_lock(&arenas_lock);
- memcpy(tarenas, arenas, sizeof(arena_t *) * narenas);
+ memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
+ for (i = 0; i < ctl_stats.narenas; i++) {
+ if (arenas[i] != NULL)
+ ctl_stats.arenas[i].nthreads = arenas[i]->nthreads;
+ else
+ ctl_stats.arenas[i].nthreads = 0;
+ }
malloc_mutex_unlock(&arenas_lock);
- for (i = 0; i < narenas; i++) {
+ for (i = 0; i < ctl_stats.narenas; i++) {
bool initialized = (tarenas[i] != NULL);
ctl_stats.arenas[i].initialized = initialized;
@@ -670,20 +648,16 @@ ctl_refresh(void)
ctl_arena_refresh(tarenas[i], i);
}
-#ifdef JEMALLOC_STATS
- ctl_stats.allocated = ctl_stats.arenas[narenas].allocated_small
- + ctl_stats.arenas[narenas].astats.allocated_large
- + ctl_stats.huge.allocated;
- ctl_stats.active = (ctl_stats.arenas[narenas].pactive << PAGE_SHIFT)
- + ctl_stats.huge.allocated;
- ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
-
-# ifdef JEMALLOC_SWAP
- malloc_mutex_lock(&swap_mtx);
- ctl_stats.swap_avail = swap_avail;
- malloc_mutex_unlock(&swap_mtx);
-# endif
-#endif
+ if (config_stats) {
+ ctl_stats.allocated =
+ ctl_stats.arenas[ctl_stats.narenas].allocated_small
+ + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large
+ + ctl_stats.huge.allocated;
+ ctl_stats.active =
+ (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE)
+ + ctl_stats.huge.allocated;
+ ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
+ }
ctl_epoch++;
}
@@ -695,21 +669,19 @@ ctl_init(void)
malloc_mutex_lock(&ctl_mtx);
if (ctl_initialized == false) {
-#ifdef JEMALLOC_STATS
- unsigned i;
-#endif
-
/*
* Allocate space for one extra arena stats element, which
* contains summed stats across all arenas.
*/
+ assert(narenas_auto == narenas_total_get());
+ ctl_stats.narenas = narenas_auto;
ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc(
- (narenas + 1) * sizeof(ctl_arena_stats_t));
+ (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t));
if (ctl_stats.arenas == NULL) {
ret = true;
- goto RETURN;
+ goto label_return;
}
- memset(ctl_stats.arenas, 0, (narenas + 1) *
+ memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) *
sizeof(ctl_arena_stats_t));
/*
@@ -717,15 +689,16 @@ ctl_init(void)
* ever get used. Lazy initialization would allow errors to
* cause inconsistent state to be viewable by the application.
*/
-#ifdef JEMALLOC_STATS
- for (i = 0; i <= narenas; i++) {
- if (ctl_arena_init(&ctl_stats.arenas[i])) {
- ret = true;
- goto RETURN;
+ if (config_stats) {
+ unsigned i;
+ for (i = 0; i <= ctl_stats.narenas; i++) {
+ if (ctl_arena_init(&ctl_stats.arenas[i])) {
+ ret = true;
+ goto label_return;
+ }
}
}
-#endif
- ctl_stats.arenas[narenas].initialized = true;
+ ctl_stats.arenas[ctl_stats.narenas].initialized = true;
ctl_epoch = 0;
ctl_refresh();
@@ -733,7 +706,7 @@ ctl_init(void)
}
ret = false;
-RETURN:
+label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
@@ -745,7 +718,7 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
int ret;
const char *elm, *tdot, *dot;
size_t elen, i, j;
- const ctl_node_t *node;
+ const ctl_named_node_t *node;
elm = name;
/* Equivalent to strchrnul(). */
@@ -753,54 +726,53 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
if (elen == 0) {
ret = ENOENT;
- goto RETURN;
+ goto label_return;
}
node = super_root_node;
for (i = 0; i < *depthp; i++) {
- assert(node->named);
- assert(node->u.named.nchildren > 0);
- if (node->u.named.children[0].named) {
- const ctl_node_t *pnode = node;
+ assert(node);
+ assert(node->nchildren > 0);
+ if (ctl_named_node(node->children) != NULL) {
+ const ctl_named_node_t *pnode = node;
/* Children are named. */
- for (j = 0; j < node->u.named.nchildren; j++) {
- const ctl_node_t *child =
- &node->u.named.children[j];
- if (strlen(child->u.named.name) == elen
- && strncmp(elm, child->u.named.name,
- elen) == 0) {
+ for (j = 0; j < node->nchildren; j++) {
+ const ctl_named_node_t *child =
+ ctl_named_children(node, j);
+ if (strlen(child->name) == elen &&
+ strncmp(elm, child->name, elen) == 0) {
node = child;
if (nodesp != NULL)
- nodesp[i] = node;
+ nodesp[i] =
+ (const ctl_node_t *)node;
mibp[i] = j;
break;
}
}
if (node == pnode) {
ret = ENOENT;
- goto RETURN;
+ goto label_return;
}
} else {
- unsigned long index;
- const ctl_node_t *inode;
+ uintmax_t index;
+ const ctl_indexed_node_t *inode;
/* Children are indexed. */
- index = strtoul(elm, NULL, 10);
- if (index == ULONG_MAX) {
+ index = malloc_strtoumax(elm, NULL, 10);
+ if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
ret = ENOENT;
- goto RETURN;
+ goto label_return;
}
- inode = &node->u.named.children[0];
- node = inode->u.indexed.index(mibp, *depthp,
- index);
+ inode = ctl_indexed_node(node->children);
+ node = inode->index(mibp, *depthp, (size_t)index);
if (node == NULL) {
ret = ENOENT;
- goto RETURN;
+ goto label_return;
}
if (nodesp != NULL)
- nodesp[i] = node;
+ nodesp[i] = (const ctl_node_t *)node;
mibp[i] = (size_t)index;
}
@@ -812,7 +784,7 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
* in this path through the tree.
*/
ret = ENOENT;
- goto RETURN;
+ goto label_return;
}
/* Complete lookup successful. */
*depthp = i + 1;
@@ -823,7 +795,7 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
if (*dot == '\0') {
/* No more elements. */
ret = ENOENT;
- goto RETURN;
+ goto label_return;
}
elm = &dot[1];
dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
@@ -832,7 +804,7 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
}
ret = 0;
-RETURN:
+label_return:
return (ret);
}
@@ -844,25 +816,27 @@ ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t depth;
ctl_node_t const *nodes[CTL_MAX_DEPTH];
size_t mib[CTL_MAX_DEPTH];
+ const ctl_named_node_t *node;
if (ctl_initialized == false && ctl_init()) {
ret = EAGAIN;
- goto RETURN;
+ goto label_return;
}
depth = CTL_MAX_DEPTH;
ret = ctl_lookup(name, nodes, mib, &depth);
if (ret != 0)
- goto RETURN;
+ goto label_return;
- if (nodes[depth-1]->ctl == NULL) {
+ node = ctl_named_node(nodes[depth-1]);
+ if (node != NULL && node->ctl)
+ ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen);
+ else {
/* The name refers to a partial path through the ctl tree. */
ret = ENOENT;
- goto RETURN;
}
- ret = nodes[depth-1]->ctl(mib, depth, oldp, oldlenp, newp, newlen);
-RETURN:
+label_return:
return(ret);
}
@@ -873,11 +847,11 @@ ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp)
if (ctl_initialized == false && ctl_init()) {
ret = EAGAIN;
- goto RETURN;
+ goto label_return;
}
ret = ctl_lookup(name, NULL, mibp, miblenp);
-RETURN:
+label_return:
return(ret);
}
@@ -886,46 +860,48 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
- const ctl_node_t *node;
+ const ctl_named_node_t *node;
size_t i;
if (ctl_initialized == false && ctl_init()) {
ret = EAGAIN;
- goto RETURN;
+ goto label_return;
}
/* Iterate down the tree. */
node = super_root_node;
for (i = 0; i < miblen; i++) {
- if (node->u.named.children[0].named) {
+ assert(node);
+ assert(node->nchildren > 0);
+ if (ctl_named_node(node->children) != NULL) {
/* Children are named. */
- if (node->u.named.nchildren <= mib[i]) {
+ if (node->nchildren <= mib[i]) {
ret = ENOENT;
- goto RETURN;
+ goto label_return;
}
- node = &node->u.named.children[mib[i]];
+ node = ctl_named_children(node, mib[i]);
} else {
- const ctl_node_t *inode;
+ const ctl_indexed_node_t *inode;
/* Indexed element. */
- inode = &node->u.named.children[0];
- node = inode->u.indexed.index(mib, miblen, mib[i]);
+ inode = ctl_indexed_node(node->children);
+ node = inode->index(mib, miblen, mib[i]);
if (node == NULL) {
ret = ENOENT;
- goto RETURN;
+ goto label_return;
}
}
}
/* Call the ctl function. */
- if (node->ctl == NULL) {
+ if (node && node->ctl)
+ ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen);
+ else {
/* Partial MIB. */
ret = ENOENT;
- goto RETURN;
}
- ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen);
-RETURN:
+label_return:
return(ret);
}
@@ -941,38 +917,54 @@ ctl_boot(void)
return (false);
}
+void
+ctl_prefork(void)
+{
+
+ malloc_mutex_lock(&ctl_mtx);
+}
+
+void
+ctl_postfork_parent(void)
+{
+
+ malloc_mutex_postfork_parent(&ctl_mtx);
+}
+
+void
+ctl_postfork_child(void)
+{
+
+ malloc_mutex_postfork_child(&ctl_mtx);
+}
+
/******************************************************************************/
/* *_ctl() functions. */
#define READONLY() do { \
if (newp != NULL || newlen != 0) { \
ret = EPERM; \
- goto RETURN; \
+ goto label_return; \
} \
} while (0)
#define WRITEONLY() do { \
if (oldp != NULL || oldlenp != NULL) { \
ret = EPERM; \
- goto RETURN; \
+ goto label_return; \
} \
} while (0)
-#define VOID() do { \
- READONLY(); \
- WRITEONLY(); \
-} while (0)
-
#define READ(v, t) do { \
if (oldp != NULL && oldlenp != NULL) { \
if (*oldlenp != sizeof(t)) { \
size_t copylen = (sizeof(t) <= *oldlenp) \
? sizeof(t) : *oldlenp; \
- memcpy(oldp, (void *)&v, copylen); \
+ memcpy(oldp, (void *)&(v), copylen); \
ret = EINVAL; \
- goto RETURN; \
+ goto label_return; \
} else \
- *(t *)oldp = v; \
+ *(t *)oldp = (v); \
} \
} while (0)
@@ -980,12 +972,60 @@ ctl_boot(void)
if (newp != NULL) { \
if (newlen != sizeof(t)) { \
ret = EINVAL; \
- goto RETURN; \
+ goto label_return; \
} \
- v = *(t *)newp; \
+ (v) = *(t *)newp; \
} \
} while (0)
+/*
+ * There's a lot of code duplication in the following macros due to limitations
+ * in how nested cpp macros are expanded.
+ */
+#define CTL_RO_CLGEN(c, l, n, v, t) \
+static int \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ \
+ if ((c) == false) \
+ return (ENOENT); \
+ if (l) \
+ malloc_mutex_lock(&ctl_mtx); \
+ READONLY(); \
+ oldval = (v); \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ if (l) \
+ malloc_mutex_unlock(&ctl_mtx); \
+ return (ret); \
+}
+
+#define CTL_RO_CGEN(c, n, v, t) \
+static int \
+n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
+ void *newp, size_t newlen) \
+{ \
+ int ret; \
+ t oldval; \
+ \
+ if ((c) == false) \
+ return (ENOENT); \
+ malloc_mutex_lock(&ctl_mtx); \
+ READONLY(); \
+ oldval = (v); \
+ READ(oldval, t); \
+ \
+ ret = 0; \
+label_return: \
+ malloc_mutex_unlock(&ctl_mtx); \
+ return (ret); \
+}
+
#define CTL_RO_GEN(n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
@@ -996,11 +1036,11 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
\
malloc_mutex_lock(&ctl_mtx); \
READONLY(); \
- oldval = v; \
+ oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
-RETURN: \
+label_return: \
malloc_mutex_unlock(&ctl_mtx); \
return (ret); \
}
@@ -1009,7 +1049,7 @@ RETURN: \
* ctl_mtx is not acquired, under the assumption that no pertinent data will
* mutate during the call.
*/
-#define CTL_RO_NL_GEN(n, v, t) \
+#define CTL_RO_NL_CGEN(c, n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
@@ -1017,33 +1057,35 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
int ret; \
t oldval; \
\
+ if ((c) == false) \
+ return (ENOENT); \
READONLY(); \
- oldval = v; \
+ oldval = (v); \
READ(oldval, t); \
\
ret = 0; \
-RETURN: \
+label_return: \
return (ret); \
}
-#define CTL_RO_TRUE_GEN(n) \
+#define CTL_RO_NL_GEN(n, v, t) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
{ \
int ret; \
- bool oldval; \
+ t oldval; \
\
READONLY(); \
- oldval = true; \
- READ(oldval, bool); \
+ oldval = (v); \
+ READ(oldval, t); \
\
ret = 0; \
-RETURN: \
+label_return: \
return (ret); \
}
-#define CTL_RO_FALSE_GEN(n) \
+#define CTL_RO_BOOL_CONFIG_GEN(n) \
static int \
n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
void *newp, size_t newlen) \
@@ -1052,11 +1094,11 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \
bool oldval; \
\
READONLY(); \
- oldval = false; \
+ oldval = n; \
READ(oldval, bool); \
\
ret = 0; \
-RETURN: \
+label_return: \
return (ret); \
}
@@ -1070,41 +1112,60 @@ epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
uint64_t newval;
malloc_mutex_lock(&ctl_mtx);
- newval = 0;
WRITE(newval, uint64_t);
- if (newval != 0)
+ if (newp != NULL)
ctl_refresh();
READ(ctl_epoch, uint64_t);
ret = 0;
-RETURN:
+label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
-#ifdef JEMALLOC_TCACHE
static int
-tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
+thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
{
int ret;
- tcache_t *tcache;
+ bool oldval;
- VOID();
+ if (config_tcache == false)
+ return (ENOENT);
- tcache = TCACHE_GET();
- if (tcache == NULL) {
- ret = 0;
- goto RETURN;
+ oldval = tcache_enabled_get();
+ if (newp != NULL) {
+ if (newlen != sizeof(bool)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ tcache_enabled_set(*(bool *)newp);
}
- tcache_destroy(tcache);
- TCACHE_SET(NULL);
+ READ(oldval, bool);
ret = 0;
-RETURN:
+label_return:
+ return (ret);
+}
+
+static int
+thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+
+ if (config_tcache == false)
+ return (ENOENT);
+
+ READONLY();
+ WRITEONLY();
+
+ tcache_flush();
+
+ ret = 0;
+label_return:
return (ret);
}
-#endif
static int
thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
@@ -1113,191 +1174,231 @@ thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
int ret;
unsigned newind, oldind;
- newind = oldind = choose_arena()->ind;
- WRITE(oldind, unsigned);
- READ(newind, unsigned);
+ malloc_mutex_lock(&ctl_mtx);
+ newind = oldind = choose_arena(NULL)->ind;
+ WRITE(newind, unsigned);
+ READ(oldind, unsigned);
if (newind != oldind) {
arena_t *arena;
- if (newind >= narenas) {
+ if (newind >= ctl_stats.narenas) {
/* New arena index is out of range. */
ret = EFAULT;
- goto RETURN;
+ goto label_return;
}
/* Initialize arena if necessary. */
malloc_mutex_lock(&arenas_lock);
- if ((arena = arenas[newind]) == NULL)
- arena = arenas_extend(newind);
- malloc_mutex_unlock(&arenas_lock);
- if (arena == NULL) {
+ if ((arena = arenas[newind]) == NULL && (arena =
+ arenas_extend(newind)) == NULL) {
+ malloc_mutex_unlock(&arenas_lock);
ret = EAGAIN;
- goto RETURN;
+ goto label_return;
}
+ assert(arena == arenas[newind]);
+ arenas[oldind]->nthreads--;
+ arenas[newind]->nthreads++;
+ malloc_mutex_unlock(&arenas_lock);
/* Set new arena association. */
- ARENA_SET(arena);
+ if (config_tcache) {
+ tcache_t *tcache;
+ if ((uintptr_t)(tcache = *tcache_tsd_get()) >
+ (uintptr_t)TCACHE_STATE_MAX) {
+ tcache_arena_dissociate(tcache);
+ tcache_arena_associate(tcache, arena);
+ }
+ }
+ arenas_tsd_set(&arena);
}
ret = 0;
-RETURN:
+label_return:
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
-#ifdef JEMALLOC_STATS
-CTL_RO_NL_GEN(thread_allocated, ALLOCATED_GET(), uint64_t);
-CTL_RO_NL_GEN(thread_allocatedp, &ALLOCATED_GET(), uint64_t *);
-CTL_RO_NL_GEN(thread_deallocated, DEALLOCATED_GET(), uint64_t);
-CTL_RO_NL_GEN(thread_deallocatedp, &DEALLOCATED_GET(), uint64_t *);
-#endif
+CTL_RO_NL_CGEN(config_stats, thread_allocated,
+ thread_allocated_tsd_get()->allocated, uint64_t)
+CTL_RO_NL_CGEN(config_stats, thread_allocatedp,
+ &thread_allocated_tsd_get()->allocated, uint64_t *)
+CTL_RO_NL_CGEN(config_stats, thread_deallocated,
+ thread_allocated_tsd_get()->deallocated, uint64_t)
+CTL_RO_NL_CGEN(config_stats, thread_deallocatedp,
+ &thread_allocated_tsd_get()->deallocated, uint64_t *)
/******************************************************************************/
-#ifdef JEMALLOC_DEBUG
-CTL_RO_TRUE_GEN(config_debug)
-#else
-CTL_RO_FALSE_GEN(config_debug)
-#endif
-
-#ifdef JEMALLOC_DSS
-CTL_RO_TRUE_GEN(config_dss)
-#else
-CTL_RO_FALSE_GEN(config_dss)
-#endif
-
-#ifdef JEMALLOC_DYNAMIC_PAGE_SHIFT
-CTL_RO_TRUE_GEN(config_dynamic_page_shift)
-#else
-CTL_RO_FALSE_GEN(config_dynamic_page_shift)
-#endif
-
-#ifdef JEMALLOC_FILL
-CTL_RO_TRUE_GEN(config_fill)
-#else
-CTL_RO_FALSE_GEN(config_fill)
-#endif
-
-#ifdef JEMALLOC_LAZY_LOCK
-CTL_RO_TRUE_GEN(config_lazy_lock)
-#else
-CTL_RO_FALSE_GEN(config_lazy_lock)
-#endif
-
-#ifdef JEMALLOC_PROF
-CTL_RO_TRUE_GEN(config_prof)
-#else
-CTL_RO_FALSE_GEN(config_prof)
-#endif
-
-#ifdef JEMALLOC_PROF_LIBGCC
-CTL_RO_TRUE_GEN(config_prof_libgcc)
-#else
-CTL_RO_FALSE_GEN(config_prof_libgcc)
-#endif
-
-#ifdef JEMALLOC_PROF_LIBUNWIND
-CTL_RO_TRUE_GEN(config_prof_libunwind)
-#else
-CTL_RO_FALSE_GEN(config_prof_libunwind)
-#endif
-
-#ifdef JEMALLOC_STATS
-CTL_RO_TRUE_GEN(config_stats)
-#else
-CTL_RO_FALSE_GEN(config_stats)
-#endif
-
-#ifdef JEMALLOC_SWAP
-CTL_RO_TRUE_GEN(config_swap)
-#else
-CTL_RO_FALSE_GEN(config_swap)
-#endif
-
-#ifdef JEMALLOC_SYSV
-CTL_RO_TRUE_GEN(config_sysv)
-#else
-CTL_RO_FALSE_GEN(config_sysv)
-#endif
-
-#ifdef JEMALLOC_TCACHE
-CTL_RO_TRUE_GEN(config_tcache)
-#else
-CTL_RO_FALSE_GEN(config_tcache)
-#endif
-
-#ifdef JEMALLOC_TINY
-CTL_RO_TRUE_GEN(config_tiny)
-#else
-CTL_RO_FALSE_GEN(config_tiny)
-#endif
-
-#ifdef JEMALLOC_TLS
-CTL_RO_TRUE_GEN(config_tls)
-#else
-CTL_RO_FALSE_GEN(config_tls)
-#endif
-
-#ifdef JEMALLOC_XMALLOC
-CTL_RO_TRUE_GEN(config_xmalloc)
-#else
-CTL_RO_FALSE_GEN(config_xmalloc)
-#endif
+CTL_RO_BOOL_CONFIG_GEN(config_debug)
+CTL_RO_BOOL_CONFIG_GEN(config_dss)
+CTL_RO_BOOL_CONFIG_GEN(config_fill)
+CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
+CTL_RO_BOOL_CONFIG_GEN(config_mremap)
+CTL_RO_BOOL_CONFIG_GEN(config_munmap)
+CTL_RO_BOOL_CONFIG_GEN(config_prof)
+CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
+CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
+CTL_RO_BOOL_CONFIG_GEN(config_stats)
+CTL_RO_BOOL_CONFIG_GEN(config_tcache)
+CTL_RO_BOOL_CONFIG_GEN(config_tls)
+CTL_RO_BOOL_CONFIG_GEN(config_utrace)
+CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
+CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
/******************************************************************************/
CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
-CTL_RO_NL_GEN(opt_lg_qspace_max, opt_lg_qspace_max, size_t)
-CTL_RO_NL_GEN(opt_lg_cspace_max, opt_lg_cspace_max, size_t)
+CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
-#ifdef JEMALLOC_FILL
-CTL_RO_NL_GEN(opt_junk, opt_junk, bool)
-CTL_RO_NL_GEN(opt_zero, opt_zero, bool)
-#endif
-#ifdef JEMALLOC_SYSV
-CTL_RO_NL_GEN(opt_sysv, opt_sysv, bool)
-#endif
-#ifdef JEMALLOC_XMALLOC
-CTL_RO_NL_GEN(opt_xmalloc, opt_xmalloc, bool)
-#endif
-#ifdef JEMALLOC_TCACHE
-CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
-CTL_RO_NL_GEN(opt_lg_tcache_gc_sweep, opt_lg_tcache_gc_sweep, ssize_t)
-#endif
-#ifdef JEMALLOC_PROF
-CTL_RO_NL_GEN(opt_prof, opt_prof, bool)
-CTL_RO_NL_GEN(opt_prof_prefix, opt_prof_prefix, const char *)
-CTL_RO_GEN(opt_prof_active, opt_prof_active, bool) /* Mutable. */
-CTL_RO_NL_GEN(opt_lg_prof_bt_max, opt_lg_prof_bt_max, size_t)
-CTL_RO_NL_GEN(opt_lg_prof_sample, opt_lg_prof_sample, size_t)
-CTL_RO_NL_GEN(opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
-CTL_RO_NL_GEN(opt_prof_gdump, opt_prof_gdump, bool)
-CTL_RO_NL_GEN(opt_prof_leak, opt_prof_leak, bool)
-CTL_RO_NL_GEN(opt_prof_accum, opt_prof_accum, bool)
-CTL_RO_NL_GEN(opt_lg_prof_tcmax, opt_lg_prof_tcmax, ssize_t)
-#endif
-#ifdef JEMALLOC_SWAP
-CTL_RO_NL_GEN(opt_overcommit, opt_overcommit, bool)
-#endif
+CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
+CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
+CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
+CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
+CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
+CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool)
+CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
+CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
+CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
+CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
+CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
+CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
/******************************************************************************/
-CTL_RO_NL_GEN(arenas_bin_i_size, arenas[0]->bins[mib[2]].reg_size, size_t)
-CTL_RO_NL_GEN(arenas_bin_i_nregs, arenas[0]->bins[mib[2]].nregs, uint32_t)
-CTL_RO_NL_GEN(arenas_bin_i_run_size, arenas[0]->bins[mib[2]].run_size, size_t)
-const ctl_node_t *
+/* ctl_mutex must be held during execution of this function. */
+static void
+arena_purge(unsigned arena_ind)
+{
+ VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas);
+
+ malloc_mutex_lock(&arenas_lock);
+ memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas);
+ malloc_mutex_unlock(&arenas_lock);
+
+ if (arena_ind == ctl_stats.narenas) {
+ unsigned i;
+ for (i = 0; i < ctl_stats.narenas; i++) {
+ if (tarenas[i] != NULL)
+ arena_purge_all(tarenas[i]);
+ }
+ } else {
+ assert(arena_ind < ctl_stats.narenas);
+ if (tarenas[arena_ind] != NULL)
+ arena_purge_all(tarenas[arena_ind]);
+ }
+}
+
+static int
+arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+
+ READONLY();
+ WRITEONLY();
+ malloc_mutex_lock(&ctl_mtx);
+ arena_purge(mib[1]);
+ malloc_mutex_unlock(&ctl_mtx);
+
+ ret = 0;
+label_return:
+ return (ret);
+}
+
+static int
+arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret, i;
+ bool match, err;
+ const char *dss;
+ unsigned arena_ind = mib[1];
+ dss_prec_t dss_prec_old = dss_prec_limit;
+ dss_prec_t dss_prec = dss_prec_limit;
+
+ malloc_mutex_lock(&ctl_mtx);
+ WRITE(dss, const char *);
+ match = false;
+ for (i = 0; i < dss_prec_limit; i++) {
+ if (strcmp(dss_prec_names[i], dss) == 0) {
+ dss_prec = i;
+ match = true;
+ break;
+ }
+ }
+ if (match == false) {
+ ret = EINVAL;
+ goto label_return;
+ }
+
+ if (arena_ind < ctl_stats.narenas) {
+ arena_t *arena = arenas[arena_ind];
+ if (arena != NULL) {
+ dss_prec_old = arena_dss_prec_get(arena);
+ arena_dss_prec_set(arena, dss_prec);
+ err = false;
+ } else
+ err = true;
+ } else {
+ dss_prec_old = chunk_dss_prec_get();
+ err = chunk_dss_prec_set(dss_prec);
+ }
+ dss = dss_prec_names[dss_prec_old];
+ READ(dss, const char *);
+ if (err) {
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
+
+static const ctl_named_node_t *
+arena_i_index(const size_t *mib, size_t miblen, size_t i)
+{
+ const ctl_named_node_t * ret;
+
+ malloc_mutex_lock(&ctl_mtx);
+ if (i > ctl_stats.narenas) {
+ ret = NULL;
+ goto label_return;
+ }
+
+ ret = super_arena_i_node;
+label_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
+
+
+/******************************************************************************/
+
+CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
+CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
+CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
+static const ctl_named_node_t *
arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
{
- if (i > nbins)
+ if (i > NBINS)
return (NULL);
return (super_arenas_bin_i_node);
}
-CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << PAGE_SHIFT), size_t)
-const ctl_node_t *
+CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
+static const ctl_named_node_t *
arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
{
@@ -1306,7 +1407,27 @@ arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
return (super_arenas_lrun_i_node);
}
-CTL_RO_NL_GEN(arenas_narenas, narenas, unsigned)
+static int
+arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen)
+{
+ int ret;
+ unsigned narenas;
+
+ malloc_mutex_lock(&ctl_mtx);
+ READONLY();
+ if (*oldlenp != sizeof(unsigned)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ narenas = ctl_stats.narenas;
+ READ(narenas, unsigned);
+
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
static int
arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
@@ -1317,49 +1438,28 @@ arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
malloc_mutex_lock(&ctl_mtx);
READONLY();
- if (*oldlenp != narenas * sizeof(bool)) {
+ if (*oldlenp != ctl_stats.narenas * sizeof(bool)) {
ret = EINVAL;
- nread = (*oldlenp < narenas * sizeof(bool))
- ? (*oldlenp / sizeof(bool)) : narenas;
+ nread = (*oldlenp < ctl_stats.narenas * sizeof(bool))
+ ? (*oldlenp / sizeof(bool)) : ctl_stats.narenas;
} else {
ret = 0;
- nread = narenas;
+ nread = ctl_stats.narenas;
}
for (i = 0; i < nread; i++)
((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
-RETURN:
+label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
-CTL_RO_NL_GEN(arenas_cacheline, CACHELINE, size_t)
-CTL_RO_NL_GEN(arenas_subpage, SUBPAGE, size_t)
-CTL_RO_NL_GEN(arenas_pagesize, PAGE_SIZE, size_t)
-CTL_RO_NL_GEN(arenas_chunksize, chunksize, size_t)
-#ifdef JEMALLOC_TINY
-CTL_RO_NL_GEN(arenas_tspace_min, (1U << LG_TINY_MIN), size_t)
-CTL_RO_NL_GEN(arenas_tspace_max, (qspace_min >> 1), size_t)
-#endif
-CTL_RO_NL_GEN(arenas_qspace_min, qspace_min, size_t)
-CTL_RO_NL_GEN(arenas_qspace_max, qspace_max, size_t)
-CTL_RO_NL_GEN(arenas_cspace_min, cspace_min, size_t)
-CTL_RO_NL_GEN(arenas_cspace_max, cspace_max, size_t)
-CTL_RO_NL_GEN(arenas_sspace_min, sspace_min, size_t)
-CTL_RO_NL_GEN(arenas_sspace_max, sspace_max, size_t)
-#ifdef JEMALLOC_TCACHE
-CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
-#endif
-CTL_RO_NL_GEN(arenas_ntbins, ntbins, unsigned)
-CTL_RO_NL_GEN(arenas_nqbins, nqbins, unsigned)
-CTL_RO_NL_GEN(arenas_ncbins, ncbins, unsigned)
-CTL_RO_NL_GEN(arenas_nsbins, nsbins, unsigned)
-CTL_RO_NL_GEN(arenas_nbins, nbins, unsigned)
-#ifdef JEMALLOC_TCACHE
-CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
-#endif
+CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
+CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
+CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
+CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t)
static int
@@ -1367,42 +1467,50 @@ arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
{
int ret;
- unsigned arena;
+ unsigned arena_ind;
+ malloc_mutex_lock(&ctl_mtx);
WRITEONLY();
- arena = UINT_MAX;
- WRITE(arena, unsigned);
- if (newp != NULL && arena >= narenas) {
+ arena_ind = UINT_MAX;
+ WRITE(arena_ind, unsigned);
+ if (newp != NULL && arena_ind >= ctl_stats.narenas)
ret = EFAULT;
- goto RETURN;
- } else {
- arena_t *tarenas[narenas];
+ else {
+ if (arena_ind == UINT_MAX)
+ arena_ind = ctl_stats.narenas;
+ arena_purge(arena_ind);
+ ret = 0;
+ }
- malloc_mutex_lock(&arenas_lock);
- memcpy(tarenas, arenas, sizeof(arena_t *) * narenas);
- malloc_mutex_unlock(&arenas_lock);
+label_return:
+ malloc_mutex_unlock(&ctl_mtx);
+ return (ret);
+}
- if (arena == UINT_MAX) {
- unsigned i;
- for (i = 0; i < narenas; i++) {
- if (tarenas[i] != NULL)
- arena_purge_all(tarenas[i]);
- }
- } else {
- assert(arena < narenas);
- if (tarenas[arena] != NULL)
- arena_purge_all(tarenas[arena]);
- }
+static int
+arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
+{
+ int ret;
+ unsigned narenas;
+
+ malloc_mutex_lock(&ctl_mtx);
+ READONLY();
+ if (ctl_grow()) {
+ ret = EAGAIN;
+ goto label_return;
}
+ narenas = ctl_stats.narenas - 1;
+ READ(narenas, unsigned);
ret = 0;
-RETURN:
+label_return:
+ malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
/******************************************************************************/
-#ifdef JEMALLOC_PROF
static int
prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen)
@@ -1410,6 +1518,9 @@ prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
int ret;
bool oldval;
+ if (config_prof == false)
+ return (ENOENT);
+
malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */
oldval = opt_prof_active;
if (newp != NULL) {
@@ -1424,7 +1535,7 @@ prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
READ(oldval, bool);
ret = 0;
-RETURN:
+label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
@@ -1436,92 +1547,88 @@ prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
int ret;
const char *filename = NULL;
+ if (config_prof == false)
+ return (ENOENT);
+
WRITEONLY();
WRITE(filename, const char *);
if (prof_mdump(filename)) {
ret = EFAULT;
- goto RETURN;
+ goto label_return;
}
ret = 0;
-RETURN:
+label_return:
return (ret);
}
-CTL_RO_NL_GEN(prof_interval, prof_interval, uint64_t)
-#endif
+CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
/******************************************************************************/
-#ifdef JEMALLOC_STATS
-CTL_RO_GEN(stats_chunks_current, ctl_stats.chunks.current, size_t)
-CTL_RO_GEN(stats_chunks_total, ctl_stats.chunks.total, uint64_t)
-CTL_RO_GEN(stats_chunks_high, ctl_stats.chunks.high, size_t)
-CTL_RO_GEN(stats_huge_allocated, huge_allocated, size_t)
-CTL_RO_GEN(stats_huge_nmalloc, huge_nmalloc, uint64_t)
-CTL_RO_GEN(stats_huge_ndalloc, huge_ndalloc, uint64_t)
-CTL_RO_GEN(stats_arenas_i_small_allocated,
+CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
+ size_t)
+CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
+CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
+CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t)
+CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
ctl_stats.arenas[mib[2]].allocated_small, size_t)
-CTL_RO_GEN(stats_arenas_i_small_nmalloc,
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t)
-CTL_RO_GEN(stats_arenas_i_small_ndalloc,
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t)
-CTL_RO_GEN(stats_arenas_i_small_nrequests,
+CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
ctl_stats.arenas[mib[2]].nrequests_small, uint64_t)
-CTL_RO_GEN(stats_arenas_i_large_allocated,
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
ctl_stats.arenas[mib[2]].astats.allocated_large, size_t)
-CTL_RO_GEN(stats_arenas_i_large_nmalloc,
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t)
-CTL_RO_GEN(stats_arenas_i_large_ndalloc,
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
-CTL_RO_GEN(stats_arenas_i_large_nrequests,
+CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
-CTL_RO_GEN(stats_arenas_i_bins_j_allocated,
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated,
ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t)
-CTL_RO_GEN(stats_arenas_i_bins_j_nmalloc,
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t)
-CTL_RO_GEN(stats_arenas_i_bins_j_ndalloc,
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t)
-CTL_RO_GEN(stats_arenas_i_bins_j_nrequests,
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t)
-#ifdef JEMALLOC_TCACHE
-CTL_RO_GEN(stats_arenas_i_bins_j_nfills,
+CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
-CTL_RO_GEN(stats_arenas_i_bins_j_nflushes,
+CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t)
-#endif
-CTL_RO_GEN(stats_arenas_i_bins_j_nruns,
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t)
-CTL_RO_GEN(stats_arenas_i_bins_j_nreruns,
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t)
-CTL_RO_GEN(stats_arenas_i_bins_j_highruns,
- ctl_stats.arenas[mib[2]].bstats[mib[4]].highruns, size_t)
-CTL_RO_GEN(stats_arenas_i_bins_j_curruns,
+CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
-const ctl_node_t *
+static const ctl_named_node_t *
stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j)
{
- if (j > nbins)
+ if (j > NBINS)
return (NULL);
return (super_stats_arenas_i_bins_j_node);
}
-CTL_RO_GEN(stats_arenas_i_lruns_j_nmalloc,
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc,
ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t)
-CTL_RO_GEN(stats_arenas_i_lruns_j_ndalloc,
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc,
ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t)
-CTL_RO_GEN(stats_arenas_i_lruns_j_nrequests,
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t)
-CTL_RO_GEN(stats_arenas_i_lruns_j_curruns,
+CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
-CTL_RO_GEN(stats_arenas_i_lruns_j_highruns,
- ctl_stats.arenas[mib[2]].lstats[mib[4]].highruns, size_t)
-const ctl_node_t *
+static const ctl_named_node_t *
stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
{
@@ -1530,118 +1637,37 @@ stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
return (super_stats_arenas_i_lruns_j_node);
}
-#endif
+CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
+CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *)
CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
-#ifdef JEMALLOC_STATS
-CTL_RO_GEN(stats_arenas_i_mapped, ctl_stats.arenas[mib[2]].astats.mapped,
- size_t)
-CTL_RO_GEN(stats_arenas_i_npurge, ctl_stats.arenas[mib[2]].astats.npurge,
- uint64_t)
-CTL_RO_GEN(stats_arenas_i_nmadvise, ctl_stats.arenas[mib[2]].astats.nmadvise,
- uint64_t)
-CTL_RO_GEN(stats_arenas_i_purged, ctl_stats.arenas[mib[2]].astats.purged,
- uint64_t)
-#endif
-
-const ctl_node_t *
+CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
+ ctl_stats.arenas[mib[2]].astats.mapped, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
+ ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
+ ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
+ ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
+
+static const ctl_named_node_t *
stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
{
- const ctl_node_t * ret;
+ const ctl_named_node_t * ret;
malloc_mutex_lock(&ctl_mtx);
- if (ctl_stats.arenas[i].initialized == false) {
+ if (i > ctl_stats.narenas || ctl_stats.arenas[i].initialized == false) {
ret = NULL;
- goto RETURN;
+ goto label_return;
}
ret = super_stats_arenas_i_node;
-RETURN:
+label_return:
malloc_mutex_unlock(&ctl_mtx);
return (ret);
}
-#ifdef JEMALLOC_STATS
-CTL_RO_GEN(stats_allocated, ctl_stats.allocated, size_t)
-CTL_RO_GEN(stats_active, ctl_stats.active, size_t)
-CTL_RO_GEN(stats_mapped, ctl_stats.mapped, size_t)
-#endif
-
-/******************************************************************************/
-
-#ifdef JEMALLOC_SWAP
-# ifdef JEMALLOC_STATS
-CTL_RO_GEN(swap_avail, ctl_stats.swap_avail, size_t)
-# endif
-
-static int
-swap_prezeroed_ctl(const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
-{
- int ret;
-
- malloc_mutex_lock(&ctl_mtx);
- if (swap_enabled) {
- READONLY();
- } else {
- /*
- * swap_prezeroed isn't actually used by the swap code until it
- * is set during a successful chunk_swap_enabled() call. We
- * use it here to store the value that we'll pass to
- * chunk_swap_enable() in a swap.fds mallctl(). This is not
- * very clean, but the obvious alternatives are even worse.
- */
- WRITE(swap_prezeroed, bool);
- }
-
- READ(swap_prezeroed, bool);
-
- ret = 0;
-RETURN:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
-}
-
-CTL_RO_GEN(swap_nfds, swap_nfds, size_t)
-
-static int
-swap_fds_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
- void *newp, size_t newlen)
-{
- int ret;
-
- malloc_mutex_lock(&ctl_mtx);
- if (swap_enabled) {
- READONLY();
- } else if (newp != NULL) {
- size_t nfds = newlen / sizeof(int);
-
- {
- int fds[nfds];
-
- memcpy(fds, newp, nfds * sizeof(int));
- if (chunk_swap_enable(fds, nfds, swap_prezeroed)) {
- ret = EFAULT;
- goto RETURN;
- }
- }
- }
-
- if (oldp != NULL && oldlenp != NULL) {
- if (*oldlenp != swap_nfds * sizeof(int)) {
- size_t copylen = (swap_nfds * sizeof(int) <= *oldlenp)
- ? swap_nfds * sizeof(int) : *oldlenp;
-
- memcpy(oldp, swap_fds, copylen);
- ret = EINVAL;
- goto RETURN;
- } else
- memcpy(oldp, swap_fds, *oldlenp);
- }
-
- ret = 0;
-RETURN:
- malloc_mutex_unlock(&ctl_mtx);
- return (ret);
-}
-#endif
+CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
+CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
+CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
+CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
diff --git a/dep/jemalloc/src/extent.c b/dep/jemalloc/src/extent.c
index 3c04d3aa5d1..8c09b486ed8 100644
--- a/dep/jemalloc/src/extent.c
+++ b/dep/jemalloc/src/extent.c
@@ -3,7 +3,6 @@
/******************************************************************************/
-#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
static inline int
extent_szad_comp(extent_node_t *a, extent_node_t *b)
{
@@ -25,7 +24,6 @@ extent_szad_comp(extent_node_t *a, extent_node_t *b)
/* Generate red-black tree functions. */
rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad,
extent_szad_comp)
-#endif
static inline int
extent_ad_comp(extent_node_t *a, extent_node_t *b)
diff --git a/dep/jemalloc/src/hash.c b/dep/jemalloc/src/hash.c
index 6a13d7a03c0..cfa4da0275c 100644
--- a/dep/jemalloc/src/hash.c
+++ b/dep/jemalloc/src/hash.c
@@ -1,2 +1,2 @@
-#define HASH_C_
+#define JEMALLOC_HASH_C_
#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/dep/jemalloc/src/huge.c b/dep/jemalloc/src/huge.c
index 0aadc4339a9..aa08d43d362 100644
--- a/dep/jemalloc/src/huge.c
+++ b/dep/jemalloc/src/huge.c
@@ -4,11 +4,9 @@
/******************************************************************************/
/* Data. */
-#ifdef JEMALLOC_STATS
uint64_t huge_nmalloc;
uint64_t huge_ndalloc;
size_t huge_allocated;
-#endif
malloc_mutex_t huge_mtx;
@@ -20,9 +18,17 @@ static extent_tree_t huge;
void *
huge_malloc(size_t size, bool zero)
{
+
+ return (huge_palloc(size, chunksize, zero));
+}
+
+void *
+huge_palloc(size_t size, size_t alignment, bool zero)
+{
void *ret;
size_t csize;
extent_node_t *node;
+ bool is_zeroed;
/* Allocate one or more contiguous chunks for this request. */
@@ -37,7 +43,13 @@ huge_malloc(size_t size, bool zero)
if (node == NULL)
return (NULL);
- ret = chunk_alloc(csize, false, &zero);
+ /*
+ * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
+ * it is possible to make correct junk/zero fill decisions below.
+ */
+ is_zeroed = zero;
+ ret = chunk_alloc(csize, alignment, false, &is_zeroed,
+ chunk_dss_prec_get());
if (ret == NULL) {
base_node_dealloc(node);
return (NULL);
@@ -49,104 +61,19 @@ huge_malloc(size_t size, bool zero)
malloc_mutex_lock(&huge_mtx);
extent_tree_ad_insert(&huge, node);
-#ifdef JEMALLOC_STATS
- huge_nmalloc++;
- huge_allocated += csize;
-#endif
+ if (config_stats) {
+ stats_cactive_add(csize);
+ huge_nmalloc++;
+ huge_allocated += csize;
+ }
malloc_mutex_unlock(&huge_mtx);
-#ifdef JEMALLOC_FILL
- if (zero == false) {
+ if (config_fill && zero == false) {
if (opt_junk)
memset(ret, 0xa5, csize);
- else if (opt_zero)
+ else if (opt_zero && is_zeroed == false)
memset(ret, 0, csize);
}
-#endif
-
- return (ret);
-}
-
-/* Only handles large allocations that require more than chunk alignment. */
-void *
-huge_palloc(size_t size, size_t alignment, bool zero)
-{
- void *ret;
- size_t alloc_size, chunk_size, offset;
- extent_node_t *node;
-
- /*
- * This allocation requires alignment that is even larger than chunk
- * alignment. This means that huge_malloc() isn't good enough.
- *
- * Allocate almost twice as many chunks as are demanded by the size or
- * alignment, in order to assure the alignment can be achieved, then
- * unmap leading and trailing chunks.
- */
- assert(alignment >= chunksize);
-
- chunk_size = CHUNK_CEILING(size);
-
- if (size >= alignment)
- alloc_size = chunk_size + alignment - chunksize;
- else
- alloc_size = (alignment << 1) - chunksize;
-
- /* Allocate an extent node with which to track the chunk. */
- node = base_node_alloc();
- if (node == NULL)
- return (NULL);
-
- ret = chunk_alloc(alloc_size, false, &zero);
- if (ret == NULL) {
- base_node_dealloc(node);
- return (NULL);
- }
-
- offset = (uintptr_t)ret & (alignment - 1);
- assert((offset & chunksize_mask) == 0);
- assert(offset < alloc_size);
- if (offset == 0) {
- /* Trim trailing space. */
- chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
- - chunk_size);
- } else {
- size_t trailsize;
-
- /* Trim leading space. */
- chunk_dealloc(ret, alignment - offset);
-
- ret = (void *)((uintptr_t)ret + (alignment - offset));
-
- trailsize = alloc_size - (alignment - offset) - chunk_size;
- if (trailsize != 0) {
- /* Trim trailing space. */
- assert(trailsize < alloc_size);
- chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
- trailsize);
- }
- }
-
- /* Insert node into huge. */
- node->addr = ret;
- node->size = chunk_size;
-
- malloc_mutex_lock(&huge_mtx);
- extent_tree_ad_insert(&huge, node);
-#ifdef JEMALLOC_STATS
- huge_nmalloc++;
- huge_allocated += chunk_size;
-#endif
- malloc_mutex_unlock(&huge_mtx);
-
-#ifdef JEMALLOC_FILL
- if (zero == false) {
- if (opt_junk)
- memset(ret, 0xa5, chunk_size);
- else if (opt_zero)
- memset(ret, 0, chunk_size);
- }
-#endif
return (ret);
}
@@ -162,12 +89,10 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
&& CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
assert(CHUNK_CEILING(oldsize) == oldsize);
-#ifdef JEMALLOC_FILL
- if (opt_junk && size < oldsize) {
+ if (config_fill && opt_junk && size < oldsize) {
memset((void *)((uintptr_t)ptr + size), 0x5a,
oldsize - size);
}
-#endif
return (ptr);
}
@@ -177,7 +102,7 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
void *
huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
- size_t alignment, bool zero)
+ size_t alignment, bool zero, bool try_tcache_dalloc)
{
void *ret;
size_t copysize;
@@ -192,7 +117,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
* different size class. In that case, fall back to allocating new
* space and copying.
*/
- if (alignment != 0)
+ if (alignment > chunksize)
ret = huge_palloc(size + extra, alignment, zero);
else
ret = huge_malloc(size + extra, zero);
@@ -201,7 +126,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
if (extra == 0)
return (NULL);
/* Try again, this time without extra. */
- if (alignment != 0)
+ if (alignment > chunksize)
ret = huge_palloc(size, alignment, zero);
else
ret = huge_malloc(size, zero);
@@ -216,22 +141,22 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
*/
copysize = (size < oldsize) ? size : oldsize;
+#ifdef JEMALLOC_MREMAP
/*
* Use mremap(2) if this is a huge-->huge reallocation, and neither the
- * source nor the destination are in swap or dss.
+ * source nor the destination are in dss.
*/
-#ifdef JEMALLOC_MREMAP_FIXED
- if (oldsize >= chunksize
-# ifdef JEMALLOC_SWAP
- && (swap_enabled == false || (chunk_in_swap(ptr) == false &&
- chunk_in_swap(ret) == false))
-# endif
-# ifdef JEMALLOC_DSS
- && chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false
-# endif
- ) {
+ if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
+ == false && chunk_in_dss(ret) == false))) {
size_t newsize = huge_salloc(ret);
+ /*
+ * Remove ptr from the tree of huge allocations before
+ * performing the remap operation, in order to avoid the
+ * possibility of another thread acquiring that mapping before
+ * this one removes it from the tree.
+ */
+ huge_dalloc(ptr, false);
if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
ret) == MAP_FAILED) {
/*
@@ -244,21 +169,19 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
*/
char buf[BUFERROR_BUF];
- buferror(errno, buf, sizeof(buf));
- malloc_write("<jemalloc>: Error in mremap(): ");
- malloc_write(buf);
- malloc_write("\n");
+ buferror(buf, sizeof(buf));
+ malloc_printf("<jemalloc>: Error in mremap(): %s\n",
+ buf);
if (opt_abort)
abort();
memcpy(ret, ptr, copysize);
- idalloc(ptr);
- } else
- huge_dalloc(ptr, false);
+ chunk_dealloc_mmap(ptr, oldsize);
+ }
} else
#endif
{
memcpy(ret, ptr, copysize);
- idalloc(ptr);
+ iqallocx(ptr, try_tcache_dalloc);
}
return (ret);
}
@@ -277,23 +200,18 @@ huge_dalloc(void *ptr, bool unmap)
assert(node->addr == ptr);
extent_tree_ad_remove(&huge, node);
-#ifdef JEMALLOC_STATS
- huge_ndalloc++;
- huge_allocated -= node->size;
-#endif
+ if (config_stats) {
+ stats_cactive_sub(node->size);
+ huge_ndalloc++;
+ huge_allocated -= node->size;
+ }
malloc_mutex_unlock(&huge_mtx);
- if (unmap) {
- /* Unmap chunk. */
-#ifdef JEMALLOC_FILL
-#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
- if (opt_junk)
- memset(node->addr, 0x5a, node->size);
-#endif
-#endif
- chunk_dealloc(node->addr, node->size);
- }
+ if (unmap && config_fill && config_dss && opt_junk)
+ memset(node->addr, 0x5a, node->size);
+
+ chunk_dealloc(node->addr, node->size, unmap);
base_node_dealloc(node);
}
@@ -318,7 +236,6 @@ huge_salloc(const void *ptr)
return (ret);
}
-#ifdef JEMALLOC_PROF
prof_ctx_t *
huge_prof_ctx_get(const void *ptr)
{
@@ -355,7 +272,6 @@ huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
malloc_mutex_unlock(&huge_mtx);
}
-#endif
bool
huge_boot(void)
@@ -366,11 +282,32 @@ huge_boot(void)
return (true);
extent_tree_ad_new(&huge);
-#ifdef JEMALLOC_STATS
- huge_nmalloc = 0;
- huge_ndalloc = 0;
- huge_allocated = 0;
-#endif
+ if (config_stats) {
+ huge_nmalloc = 0;
+ huge_ndalloc = 0;
+ huge_allocated = 0;
+ }
return (false);
}
+
+void
+huge_prefork(void)
+{
+
+ malloc_mutex_prefork(&huge_mtx);
+}
+
+void
+huge_postfork_parent(void)
+{
+
+ malloc_mutex_postfork_parent(&huge_mtx);
+}
+
+void
+huge_postfork_child(void)
+{
+
+ malloc_mutex_postfork_child(&huge_mtx);
+}
diff --git a/dep/jemalloc/src/jemalloc.c b/dep/jemalloc/src/jemalloc.c
index 2aebc51dd19..bc350ed953b 100644
--- a/dep/jemalloc/src/jemalloc.c
+++ b/dep/jemalloc/src/jemalloc.c
@@ -4,105 +4,114 @@
/******************************************************************************/
/* Data. */
-malloc_mutex_t arenas_lock;
-arena_t **arenas;
-unsigned narenas;
-static unsigned next_arena;
+malloc_tsd_data(, arenas, arena_t *, NULL)
+malloc_tsd_data(, thread_allocated, thread_allocated_t,
+ THREAD_ALLOCATED_INITIALIZER)
-#ifndef NO_TLS
-__thread arena_t *arenas_tls JEMALLOC_ATTR(tls_model("initial-exec"));
+/* Runtime configuration options. */
+const char *je_malloc_conf;
+bool opt_abort =
+#ifdef JEMALLOC_DEBUG
+ true
#else
-pthread_key_t arenas_tsd;
+ false
#endif
-
-#ifdef JEMALLOC_STATS
-# ifndef NO_TLS
-__thread thread_allocated_t thread_allocated_tls;
-# else
-pthread_key_t thread_allocated_tsd;
-# endif
+ ;
+bool opt_junk =
+#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
+ true
+#else
+ false
#endif
+ ;
+size_t opt_quarantine = ZU(0);
+bool opt_redzone = false;
+bool opt_utrace = false;
+bool opt_valgrind = false;
+bool opt_xmalloc = false;
+bool opt_zero = false;
+size_t opt_narenas = 0;
+
+unsigned ncpus;
+
+malloc_mutex_t arenas_lock;
+arena_t **arenas;
+unsigned narenas_total;
+unsigned narenas_auto;
/* Set to true once the allocator has been initialized. */
static bool malloc_initialized = false;
+#ifdef JEMALLOC_THREADED_INIT
/* Used to let the initializing thread recursively allocate. */
-static pthread_t malloc_initializer = (unsigned long)0;
+# define NO_INITIALIZER ((unsigned long)0)
+# define INITIALIZER pthread_self()
+# define IS_INITIALIZER (malloc_initializer == pthread_self())
+static pthread_t malloc_initializer = NO_INITIALIZER;
+#else
+# define NO_INITIALIZER false
+# define INITIALIZER true
+# define IS_INITIALIZER malloc_initializer
+static bool malloc_initializer = NO_INITIALIZER;
+#endif
/* Used to avoid initialization races. */
-static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
+#ifdef _WIN32
+static malloc_mutex_t init_lock;
-#ifdef DYNAMIC_PAGE_SHIFT
-size_t pagesize;
-size_t pagesize_mask;
-size_t lg_pagesize;
-#endif
+JEMALLOC_ATTR(constructor)
+static void WINAPI
+_init_init_lock(void)
+{
-unsigned ncpus;
+ malloc_mutex_init(&init_lock);
+}
-/* Runtime configuration options. */
-const char *JEMALLOC_P(malloc_conf) JEMALLOC_ATTR(visibility("default"));
-#ifdef JEMALLOC_DEBUG
-bool opt_abort = true;
-# ifdef JEMALLOC_FILL
-bool opt_junk = true;
-# endif
-#else
-bool opt_abort = false;
-# ifdef JEMALLOC_FILL
-bool opt_junk = false;
-# endif
-#endif
-#ifdef JEMALLOC_SYSV
-bool opt_sysv = false;
+#ifdef _MSC_VER
+# pragma section(".CRT$XCU", read)
+JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
+static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
#endif
-#ifdef JEMALLOC_XMALLOC
-bool opt_xmalloc = false;
+
+#else
+static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
#endif
-#ifdef JEMALLOC_FILL
-bool opt_zero = false;
+
+typedef struct {
+ void *p; /* Input pointer (as in realloc(p, s)). */
+ size_t s; /* Request size. */
+ void *r; /* Result pointer. */
+} malloc_utrace_t;
+
+#ifdef JEMALLOC_UTRACE
+# define UTRACE(a, b, c) do { \
+ if (opt_utrace) { \
+ int utrace_serrno = errno; \
+ malloc_utrace_t ut; \
+ ut.p = (a); \
+ ut.s = (b); \
+ ut.r = (c); \
+ utrace(&ut, sizeof(ut)); \
+ errno = utrace_serrno; \
+ } \
+} while (0)
+#else
+# define UTRACE(a, b, c)
#endif
-size_t opt_narenas = 0;
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-static void wrtmessage(void *cbopaque, const char *s);
static void stats_print_atexit(void);
static unsigned malloc_ncpus(void);
-#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
-static void thread_allocated_cleanup(void *arg);
-#endif
static bool malloc_conf_next(char const **opts_p, char const **k_p,
size_t *klen_p, char const **v_p, size_t *vlen_p);
static void malloc_conf_error(const char *msg, const char *k, size_t klen,
const char *v, size_t vlen);
static void malloc_conf_init(void);
static bool malloc_init_hard(void);
-
-/******************************************************************************/
-/* malloc_message() setup. */
-
-#ifdef JEMALLOC_HAVE_ATTR
-JEMALLOC_ATTR(visibility("hidden"))
-#else
-static
-#endif
-void
-wrtmessage(void *cbopaque, const char *s)
-{
-#ifdef JEMALLOC_CC_SILENCE
- int result =
-#endif
- write(STDERR_FILENO, s, strlen(s));
-#ifdef JEMALLOC_CC_SILENCE
- if (result < 0)
- result = errno;
-#endif
-}
-
-void (*JEMALLOC_P(malloc_message))(void *, const char *s)
- JEMALLOC_ATTR(visibility("default")) = wrtmessage;
+static int imemalign(void **memptr, size_t alignment, size_t size,
+ size_t min_alignment);
/******************************************************************************/
/*
@@ -115,9 +124,7 @@ arenas_extend(unsigned ind)
{
arena_t *ret;
- /* Allocate enough space for trailing bins. */
- ret = (arena_t *)base_alloc(offsetof(arena_t, bins)
- + (sizeof(arena_bin_t) * nbins));
+ ret = (arena_t *)base_alloc(sizeof(arena_t));
if (ret != NULL && arena_new(ret, ind) == false) {
arenas[ind] = ret;
return (ret);
@@ -137,80 +144,101 @@ arenas_extend(unsigned ind)
return (arenas[0]);
}
-/*
- * Choose an arena based on a per-thread value (slow-path code only, called
- * only by choose_arena()).
- */
+/* Slow path, called only by choose_arena(). */
arena_t *
choose_arena_hard(void)
{
arena_t *ret;
- if (narenas > 1) {
+ if (narenas_auto > 1) {
+ unsigned i, choose, first_null;
+
+ choose = 0;
+ first_null = narenas_auto;
malloc_mutex_lock(&arenas_lock);
- if ((ret = arenas[next_arena]) == NULL)
- ret = arenas_extend(next_arena);
- next_arena = (next_arena + 1) % narenas;
+ assert(arenas[0] != NULL);
+ for (i = 1; i < narenas_auto; i++) {
+ if (arenas[i] != NULL) {
+ /*
+ * Choose the first arena that has the lowest
+ * number of threads assigned to it.
+ */
+ if (arenas[i]->nthreads <
+ arenas[choose]->nthreads)
+ choose = i;
+ } else if (first_null == narenas_auto) {
+ /*
+ * Record the index of the first uninitialized
+ * arena, in case all extant arenas are in use.
+ *
+ * NB: It is possible for there to be
+ * discontinuities in terms of initialized
+ * versus uninitialized arenas, due to the
+ * "thread.arena" mallctl.
+ */
+ first_null = i;
+ }
+ }
+
+ if (arenas[choose]->nthreads == 0
+ || first_null == narenas_auto) {
+ /*
+ * Use an unloaded arena, or the least loaded arena if
+ * all arenas are already initialized.
+ */
+ ret = arenas[choose];
+ } else {
+ /* Initialize a new arena. */
+ ret = arenas_extend(first_null);
+ }
+ ret->nthreads++;
malloc_mutex_unlock(&arenas_lock);
- } else
+ } else {
ret = arenas[0];
+ malloc_mutex_lock(&arenas_lock);
+ ret->nthreads++;
+ malloc_mutex_unlock(&arenas_lock);
+ }
- ARENA_SET(ret);
+ arenas_tsd_set(&ret);
return (ret);
}
-/*
- * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
- * provide a wrapper.
- */
-int
-buferror(int errnum, char *buf, size_t buflen)
-{
-#ifdef _GNU_SOURCE
- char *b = strerror_r(errno, buf, buflen);
- if (b != buf) {
- strncpy(buf, b, buflen);
- buf[buflen-1] = '\0';
- }
- return (0);
-#else
- return (strerror_r(errno, buf, buflen));
-#endif
-}
-
static void
stats_print_atexit(void)
{
-#if (defined(JEMALLOC_TCACHE) && defined(JEMALLOC_STATS))
- unsigned i;
+ if (config_tcache && config_stats) {
+ unsigned narenas, i;
- /*
- * Merge stats from extant threads. This is racy, since individual
- * threads do not lock when recording tcache stats events. As a
- * consequence, the final stats may be slightly out of date by the time
- * they are reported, if other threads continue to allocate.
- */
- for (i = 0; i < narenas; i++) {
- arena_t *arena = arenas[i];
- if (arena != NULL) {
- tcache_t *tcache;
+ /*
+ * Merge stats from extant threads. This is racy, since
+ * individual threads do not lock when recording tcache stats
+ * events. As a consequence, the final stats may be slightly
+ * out of date by the time they are reported, if other threads
+ * continue to allocate.
+ */
+ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
+ arena_t *arena = arenas[i];
+ if (arena != NULL) {
+ tcache_t *tcache;
- /*
- * tcache_stats_merge() locks bins, so if any code is
- * introduced that acquires both arena and bin locks in
- * the opposite order, deadlocks may result.
- */
- malloc_mutex_lock(&arena->lock);
- ql_foreach(tcache, &arena->tcache_ql, link) {
- tcache_stats_merge(tcache, arena);
+ /*
+ * tcache_stats_merge() locks bins, so if any
+ * code is introduced that acquires both arena
+ * and bin locks in the opposite order,
+ * deadlocks may result.
+ */
+ malloc_mutex_lock(&arena->lock);
+ ql_foreach(tcache, &arena->tcache_ql, link) {
+ tcache_stats_merge(tcache, arena);
+ }
+ malloc_mutex_unlock(&arena->lock);
}
- malloc_mutex_unlock(&arena->lock);
}
}
-#endif
- JEMALLOC_P(malloc_stats_print)(NULL, NULL, NULL);
+ je_malloc_stats_print(NULL, NULL, NULL);
}
/*
@@ -227,38 +255,57 @@ malloc_ncpus(void)
unsigned ret;
long result;
+#ifdef _WIN32
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ result = si.dwNumberOfProcessors;
+#else
result = sysconf(_SC_NPROCESSORS_ONLN);
+#endif
if (result == -1) {
/* Error. */
ret = 1;
- }
- ret = (unsigned)result;
+ } else {
+ ret = (unsigned)result;
+ }
return (ret);
}
-#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
-static void
-thread_allocated_cleanup(void *arg)
+void
+arenas_cleanup(void *arg)
{
- uint64_t *allocated = (uint64_t *)arg;
+ arena_t *arena = *(arena_t **)arg;
- if (allocated != NULL)
- idalloc(allocated);
+ malloc_mutex_lock(&arenas_lock);
+ arena->nthreads--;
+ malloc_mutex_unlock(&arenas_lock);
}
-#endif
-/*
- * FreeBSD's pthreads implementation calls malloc(3), so the malloc
- * implementation has to take pains to avoid infinite recursion during
- * initialization.
- */
-static inline bool
+static JEMALLOC_ATTR(always_inline) void
+malloc_thread_init(void)
+{
+
+ /*
+ * TSD initialization can't be safely done as a side effect of
+ * deallocation, because it is possible for a thread to do nothing but
+ * deallocate its TLS data via free(), in which case writing to TLS
+ * would cause write-after-free memory corruption. The quarantine
+ * facility *only* gets used as a side effect of deallocation, so make
+ * a best effort attempt at initializing its TSD by hooking all
+ * allocation events.
+ */
+ if (config_fill && opt_quarantine)
+ quarantine_alloc_hook();
+}
+
+static JEMALLOC_ATTR(always_inline) bool
malloc_init(void)
{
- if (malloc_initialized == false)
- return (malloc_init_hard());
+ if (malloc_initialized == false && malloc_init_hard())
+ return (true);
+ malloc_thread_init();
return (false);
}
@@ -274,68 +321,64 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
for (accept = false; accept == false;) {
switch (*opts) {
- case 'A': case 'B': case 'C': case 'D': case 'E':
- case 'F': case 'G': case 'H': case 'I': case 'J':
- case 'K': case 'L': case 'M': case 'N': case 'O':
- case 'P': case 'Q': case 'R': case 'S': case 'T':
- case 'U': case 'V': case 'W': case 'X': case 'Y':
- case 'Z':
- case 'a': case 'b': case 'c': case 'd': case 'e':
- case 'f': case 'g': case 'h': case 'i': case 'j':
- case 'k': case 'l': case 'm': case 'n': case 'o':
- case 'p': case 'q': case 'r': case 's': case 't':
- case 'u': case 'v': case 'w': case 'x': case 'y':
- case 'z':
- case '0': case '1': case '2': case '3': case '4':
- case '5': case '6': case '7': case '8': case '9':
- case '_':
- opts++;
- break;
- case ':':
- opts++;
- *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
- *v_p = opts;
- accept = true;
- break;
- case '\0':
- if (opts != *opts_p) {
- malloc_write("<jemalloc>: Conf string "
- "ends with key\n");
- }
- return (true);
- default:
- malloc_write("<jemalloc>: Malformed conf "
- "string\n");
- return (true);
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
+ case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
+ case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
+ case 'Y': case 'Z':
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
+ case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
+ case 's': case 't': case 'u': case 'v': case 'w': case 'x':
+ case 'y': case 'z':
+ case '0': case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7': case '8': case '9':
+ case '_':
+ opts++;
+ break;
+ case ':':
+ opts++;
+ *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
+ *v_p = opts;
+ accept = true;
+ break;
+ case '\0':
+ if (opts != *opts_p) {
+ malloc_write("<jemalloc>: Conf string ends "
+ "with key\n");
+ }
+ return (true);
+ default:
+ malloc_write("<jemalloc>: Malformed conf string\n");
+ return (true);
}
}
for (accept = false; accept == false;) {
switch (*opts) {
- case ',':
- opts++;
- /*
- * Look ahead one character here, because the
- * next time this function is called, it will
- * assume that end of input has been cleanly
- * reached if no input remains, but we have
- * optimistically already consumed the comma if
- * one exists.
- */
- if (*opts == '\0') {
- malloc_write("<jemalloc>: Conf string "
- "ends with comma\n");
- }
- *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
- accept = true;
- break;
- case '\0':
- *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
- accept = true;
- break;
- default:
- opts++;
- break;
+ case ',':
+ opts++;
+ /*
+ * Look ahead one character here, because the next time
+ * this function is called, it will assume that end of
+ * input has been cleanly reached if no input remains,
+ * but we have optimistically already consumed the
+ * comma if one exists.
+ */
+ if (*opts == '\0') {
+ malloc_write("<jemalloc>: Conf string ends "
+ "with comma\n");
+ }
+ *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
+ accept = true;
+ break;
+ case '\0':
+ *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
+ accept = true;
+ break;
+ default:
+ opts++;
+ break;
}
}
@@ -347,17 +390,9 @@ static void
malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
size_t vlen)
{
- char buf[PATH_MAX + 1];
- malloc_write("<jemalloc>: ");
- malloc_write(msg);
- malloc_write(": ");
- memcpy(buf, k, klen);
- memcpy(&buf[klen], ":", 1);
- memcpy(&buf[klen+1], v, vlen);
- buf[klen+1+vlen] = '\0';
- malloc_write(buf);
- malloc_write("\n");
+ malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
+ (int)vlen, v);
}
static void
@@ -368,16 +403,32 @@ malloc_conf_init(void)
const char *opts, *k, *v;
size_t klen, vlen;
+ /*
+ * Automatically configure valgrind before processing options. The
+ * valgrind option remains in jemalloc 3.x for compatibility reasons.
+ */
+ if (config_valgrind) {
+ opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
+ if (config_fill && opt_valgrind) {
+ opt_junk = false;
+ assert(opt_zero == false);
+ opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
+ opt_redzone = true;
+ }
+ if (config_tcache && opt_valgrind)
+ opt_tcache = false;
+ }
+
for (i = 0; i < 3; i++) {
/* Get runtime configuration. */
switch (i) {
case 0:
- if (JEMALLOC_P(malloc_conf) != NULL) {
+ if (je_malloc_conf != NULL) {
/*
* Use options that were compiled into the
* program.
*/
- opts = JEMALLOC_P(malloc_conf);
+ opts = je_malloc_conf;
} else {
/* No configuration specified. */
buf[0] = '\0';
@@ -385,13 +436,14 @@ malloc_conf_init(void)
}
break;
case 1: {
+#ifndef _WIN32
int linklen;
const char *linkname =
-#ifdef JEMALLOC_PREFIX
+# ifdef JEMALLOC_PREFIX
"/etc/"JEMALLOC_PREFIX"malloc.conf"
-#else
+# else
"/etc/malloc.conf"
-#endif
+# endif
;
if ((linklen = readlink(linkname, buf,
@@ -402,14 +454,15 @@ malloc_conf_init(void)
*/
buf[linklen] = '\0';
opts = buf;
- } else {
+ } else
+#endif
+ {
/* No configuration specified. */
buf[0] = '\0';
opts = buf;
}
break;
- }
- case 2: {
+ } case 2: {
const char *envname =
#ifdef JEMALLOC_PREFIX
JEMALLOC_CPREFIX"MALLOC_CONF"
@@ -421,8 +474,8 @@ malloc_conf_init(void)
if ((opts = getenv(envname)) != NULL) {
/*
* Do nothing; opts is already initialized to
- * the value of the JEMALLOC_OPTIONS
- * environment variable.
+ * the value of the MALLOC_CONF environment
+ * variable.
*/
} else {
/* No configuration specified. */
@@ -430,8 +483,7 @@ malloc_conf_init(void)
opts = buf;
}
break;
- }
- default:
+ } default:
/* NOTREACHED */
assert(false);
buf[0] = '\0';
@@ -440,15 +492,15 @@ malloc_conf_init(void)
while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
&vlen) == false) {
-#define CONF_HANDLE_BOOL(n) \
- if (sizeof(#n)-1 == klen && strncmp(#n, k, \
+#define CONF_HANDLE_BOOL(o, n) \
+ if (sizeof(n)-1 == klen && strncmp(n, k, \
klen) == 0) { \
if (strncmp("true", v, vlen) == 0 && \
vlen == sizeof("true")-1) \
- opt_##n = true; \
+ o = true; \
else if (strncmp("false", v, vlen) == \
0 && vlen == sizeof("false")-1) \
- opt_##n = false; \
+ o = false; \
else { \
malloc_conf_error( \
"Invalid conf value", \
@@ -456,36 +508,46 @@ malloc_conf_init(void)
} \
continue; \
}
-#define CONF_HANDLE_SIZE_T(n, min, max) \
- if (sizeof(#n)-1 == klen && strncmp(#n, k, \
+#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \
+ if (sizeof(n)-1 == klen && strncmp(n, k, \
klen) == 0) { \
- unsigned long ul; \
+ uintmax_t um; \
char *end; \
\
- errno = 0; \
- ul = strtoul(v, &end, 0); \
- if (errno != 0 || (uintptr_t)end - \
+ set_errno(0); \
+ um = malloc_strtoumax(v, &end, 0); \
+ if (get_errno() != 0 || (uintptr_t)end -\
(uintptr_t)v != vlen) { \
malloc_conf_error( \
"Invalid conf value", \
k, klen, v, vlen); \
- } else if (ul < min || ul > max) { \
- malloc_conf_error( \
- "Out-of-range conf value", \
- k, klen, v, vlen); \
- } else \
- opt_##n = ul; \
+ } else if (clip) { \
+ if (um < min) \
+ o = min; \
+ else if (um > max) \
+ o = max; \
+ else \
+ o = um; \
+ } else { \
+ if (um < min || um > max) { \
+ malloc_conf_error( \
+ "Out-of-range " \
+ "conf value", \
+ k, klen, v, vlen); \
+ } else \
+ o = um; \
+ } \
continue; \
}
-#define CONF_HANDLE_SSIZE_T(n, min, max) \
- if (sizeof(#n)-1 == klen && strncmp(#n, k, \
+#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
+ if (sizeof(n)-1 == klen && strncmp(n, k, \
klen) == 0) { \
long l; \
char *end; \
\
- errno = 0; \
+ set_errno(0); \
l = strtol(v, &end, 0); \
- if (errno != 0 || (uintptr_t)end - \
+ if (get_errno() != 0 || (uintptr_t)end -\
(uintptr_t)v != vlen) { \
malloc_conf_error( \
"Invalid conf value", \
@@ -496,70 +558,98 @@ malloc_conf_init(void)
"Out-of-range conf value", \
k, klen, v, vlen); \
} else \
- opt_##n = l; \
+ o = l; \
continue; \
}
-#define CONF_HANDLE_CHAR_P(n, d) \
- if (sizeof(#n)-1 == klen && strncmp(#n, k, \
+#define CONF_HANDLE_CHAR_P(o, n, d) \
+ if (sizeof(n)-1 == klen && strncmp(n, k, \
klen) == 0) { \
size_t cpylen = (vlen <= \
- sizeof(opt_##n)-1) ? vlen : \
- sizeof(opt_##n)-1; \
- strncpy(opt_##n, v, cpylen); \
- opt_##n[cpylen] = '\0'; \
+ sizeof(o)-1) ? vlen : \
+ sizeof(o)-1; \
+ strncpy(o, v, cpylen); \
+ o[cpylen] = '\0'; \
continue; \
}
- CONF_HANDLE_BOOL(abort)
- CONF_HANDLE_SIZE_T(lg_qspace_max, LG_QUANTUM,
- PAGE_SHIFT-1)
- CONF_HANDLE_SIZE_T(lg_cspace_max, LG_QUANTUM,
- PAGE_SHIFT-1)
+ CONF_HANDLE_BOOL(opt_abort, "abort")
/*
- * Chunks always require at least one * header page,
- * plus one data page.
+ * Chunks always require at least one header page, plus
+ * one data page in the absence of redzones, or three
+ * pages in the presence of redzones. In order to
+ * simplify options processing, fix the limit based on
+ * config_fill.
*/
- CONF_HANDLE_SIZE_T(lg_chunk, PAGE_SHIFT+1,
- (sizeof(size_t) << 3) - 1)
- CONF_HANDLE_SIZE_T(narenas, 1, SIZE_T_MAX)
- CONF_HANDLE_SSIZE_T(lg_dirty_mult, -1,
- (sizeof(size_t) << 3) - 1)
- CONF_HANDLE_BOOL(stats_print)
-#ifdef JEMALLOC_FILL
- CONF_HANDLE_BOOL(junk)
- CONF_HANDLE_BOOL(zero)
-#endif
-#ifdef JEMALLOC_SYSV
- CONF_HANDLE_BOOL(sysv)
-#endif
-#ifdef JEMALLOC_XMALLOC
- CONF_HANDLE_BOOL(xmalloc)
-#endif
-#ifdef JEMALLOC_TCACHE
- CONF_HANDLE_BOOL(tcache)
- CONF_HANDLE_SSIZE_T(lg_tcache_gc_sweep, -1,
- (sizeof(size_t) << 3) - 1)
- CONF_HANDLE_SSIZE_T(lg_tcache_max, -1,
- (sizeof(size_t) << 3) - 1)
-#endif
-#ifdef JEMALLOC_PROF
- CONF_HANDLE_BOOL(prof)
- CONF_HANDLE_CHAR_P(prof_prefix, "jeprof")
- CONF_HANDLE_SIZE_T(lg_prof_bt_max, 0, LG_PROF_BT_MAX)
- CONF_HANDLE_BOOL(prof_active)
- CONF_HANDLE_SSIZE_T(lg_prof_sample, 0,
- (sizeof(uint64_t) << 3) - 1)
- CONF_HANDLE_BOOL(prof_accum)
- CONF_HANDLE_SSIZE_T(lg_prof_tcmax, -1,
- (sizeof(size_t) << 3) - 1)
- CONF_HANDLE_SSIZE_T(lg_prof_interval, -1,
- (sizeof(uint64_t) << 3) - 1)
- CONF_HANDLE_BOOL(prof_gdump)
- CONF_HANDLE_BOOL(prof_leak)
-#endif
-#ifdef JEMALLOC_SWAP
- CONF_HANDLE_BOOL(overcommit)
-#endif
+ CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
+ (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1,
+ true)
+ if (strncmp("dss", k, klen) == 0) {
+ int i;
+ bool match = false;
+ for (i = 0; i < dss_prec_limit; i++) {
+ if (strncmp(dss_prec_names[i], v, vlen)
+ == 0) {
+ if (chunk_dss_prec_set(i)) {
+ malloc_conf_error(
+ "Error setting dss",
+ k, klen, v, vlen);
+ } else {
+ opt_dss =
+ dss_prec_names[i];
+ match = true;
+ break;
+ }
+ }
+ }
+ if (match == false) {
+ malloc_conf_error("Invalid conf value",
+ k, klen, v, vlen);
+ }
+ continue;
+ }
+ CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
+ SIZE_T_MAX, false)
+ CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
+ -1, (sizeof(size_t) << 3) - 1)
+ CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
+ if (config_fill) {
+ CONF_HANDLE_BOOL(opt_junk, "junk")
+ CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
+ 0, SIZE_T_MAX, false)
+ CONF_HANDLE_BOOL(opt_redzone, "redzone")
+ CONF_HANDLE_BOOL(opt_zero, "zero")
+ }
+ if (config_utrace) {
+ CONF_HANDLE_BOOL(opt_utrace, "utrace")
+ }
+ if (config_valgrind) {
+ CONF_HANDLE_BOOL(opt_valgrind, "valgrind")
+ }
+ if (config_xmalloc) {
+ CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
+ }
+ if (config_tcache) {
+ CONF_HANDLE_BOOL(opt_tcache, "tcache")
+ CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
+ "lg_tcache_max", -1,
+ (sizeof(size_t) << 3) - 1)
+ }
+ if (config_prof) {
+ CONF_HANDLE_BOOL(opt_prof, "prof")
+ CONF_HANDLE_CHAR_P(opt_prof_prefix,
+ "prof_prefix", "jeprof")
+ CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
+ CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
+ "lg_prof_sample", 0,
+ (sizeof(uint64_t) << 3) - 1)
+ CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
+ CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
+ "lg_prof_interval", -1,
+ (sizeof(uint64_t) << 3) - 1)
+ CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
+ CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
+ CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
+ }
malloc_conf_error("Invalid conf pair", k, klen, v,
vlen);
#undef CONF_HANDLE_BOOL
@@ -567,14 +657,6 @@ malloc_conf_init(void)
#undef CONF_HANDLE_SSIZE_T
#undef CONF_HANDLE_CHAR_P
}
-
- /* Validate configuration of options that are inter-related. */
- if (opt_lg_qspace_max+1 >= opt_lg_cspace_max) {
- malloc_write("<jemalloc>: Invalid lg_[qc]space_max "
- "relationship; restoring defaults\n");
- opt_lg_qspace_max = LG_QSPACE_MAX_DEFAULT;
- opt_lg_cspace_max = LG_CSPACE_MAX_DEFAULT;
- }
}
}
@@ -584,7 +666,7 @@ malloc_init_hard(void)
arena_t *init_arenas[1];
malloc_mutex_lock(&init_lock);
- if (malloc_initialized || malloc_initializer == pthread_self()) {
+ if (malloc_initialized || IS_INITIALIZER) {
/*
* Another thread initialized the allocator before this one
* acquired init_lock, or this thread is the initializing
@@ -593,7 +675,8 @@ malloc_init_hard(void)
malloc_mutex_unlock(&init_lock);
return (false);
}
- if (malloc_initializer != (unsigned long)0) {
+#ifdef JEMALLOC_THREADED_INIT
+ if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
/* Busy-wait until the initializing thread completes. */
do {
malloc_mutex_unlock(&init_lock);
@@ -603,44 +686,25 @@ malloc_init_hard(void)
malloc_mutex_unlock(&init_lock);
return (false);
}
-
-#ifdef DYNAMIC_PAGE_SHIFT
- /* Get page size. */
- {
- long result;
-
- result = sysconf(_SC_PAGESIZE);
- assert(result != -1);
- pagesize = (unsigned)result;
-
- /*
- * We assume that pagesize is a power of 2 when calculating
- * pagesize_mask and lg_pagesize.
- */
- assert(((result - 1) & result) == 0);
- pagesize_mask = result - 1;
- lg_pagesize = ffs((int)result) - 1;
- }
#endif
+ malloc_initializer = INITIALIZER;
-#ifdef JEMALLOC_PROF
- prof_boot0();
-#endif
+ malloc_tsd_boot();
+ if (config_prof)
+ prof_boot0();
malloc_conf_init();
+#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
+ && !defined(_WIN32))
/* Register fork handlers. */
- if (pthread_atfork(jemalloc_prefork, jemalloc_postfork,
- jemalloc_postfork) != 0) {
+ if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
+ jemalloc_postfork_child) != 0) {
malloc_write("<jemalloc>: Error in pthread_atfork()\n");
if (opt_abort)
abort();
}
-
- if (ctl_boot()) {
- malloc_mutex_unlock(&init_lock);
- return (true);
- }
+#endif
if (opt_stats_print) {
/* Print statistics at exit. */
@@ -651,50 +715,46 @@ malloc_init_hard(void)
}
}
- if (chunk_boot()) {
+ if (base_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
- if (base_boot()) {
+ if (chunk_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
-#ifdef JEMALLOC_PROF
- prof_boot1();
-#endif
-
- if (arena_boot()) {
+ if (ctl_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
-#ifdef JEMALLOC_TCACHE
- tcache_boot();
-#endif
+ if (config_prof)
+ prof_boot1();
- if (huge_boot()) {
+ arena_boot();
+
+ if (config_tcache && tcache_boot0()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
-#if (defined(JEMALLOC_STATS) && defined(NO_TLS))
- /* Initialize allocation counters before any allocations can occur. */
- if (pthread_key_create(&thread_allocated_tsd, thread_allocated_cleanup)
- != 0) {
+ if (huge_boot()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
-#endif
+
+ if (malloc_mutex_init(&arenas_lock))
+ return (true);
/*
* Create enough scaffolding to allow recursive allocation in
* malloc_ncpus().
*/
- narenas = 1;
+ narenas_total = narenas_auto = 1;
arenas = init_arenas;
- memset(arenas, 0, sizeof(arena_t *) * narenas);
+ memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
/*
* Initialize one arena here. The rest are lazily created in
@@ -706,28 +766,42 @@ malloc_init_hard(void)
return (true);
}
- /*
- * Assign the initial arena to the initial thread, in order to avoid
- * spurious creation of an extra arena if the application switches to
- * threaded mode.
- */
- ARENA_SET(arenas[0]);
+ /* Initialize allocation counters before any allocations can occur. */
+ if (config_stats && thread_allocated_tsd_boot()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
- malloc_mutex_init(&arenas_lock);
+ if (arenas_tsd_boot()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
-#ifdef JEMALLOC_PROF
- if (prof_boot2()) {
+ if (config_tcache && tcache_boot1()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+
+ if (config_fill && quarantine_boot()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+
+ if (config_prof && prof_boot2()) {
malloc_mutex_unlock(&init_lock);
return (true);
}
-#endif
/* Get number of CPUs. */
- malloc_initializer = pthread_self();
malloc_mutex_unlock(&init_lock);
ncpus = malloc_ncpus();
malloc_mutex_lock(&init_lock);
+ if (mutex_boot()) {
+ malloc_mutex_unlock(&init_lock);
+ return (true);
+ }
+
if (opt_narenas == 0) {
/*
* For SMP systems, create more than one arena per CPU by
@@ -738,32 +812,21 @@ malloc_init_hard(void)
else
opt_narenas = 1;
}
- narenas = opt_narenas;
+ narenas_auto = opt_narenas;
/*
* Make sure that the arenas array can be allocated. In practice, this
* limit is enough to allow the allocator to function, but the ctl
* machinery will fail to allocate memory at far lower limits.
*/
- if (narenas > chunksize / sizeof(arena_t *)) {
- char buf[UMAX2S_BUFSIZE];
-
- narenas = chunksize / sizeof(arena_t *);
- malloc_write("<jemalloc>: Reducing narenas to limit (");
- malloc_write(u2s(narenas, 10, buf));
- malloc_write(")\n");
- }
-
- next_arena = (narenas > 0) ? 1 : 0;
-
-#ifdef NO_TLS
- if (pthread_key_create(&arenas_tsd, NULL) != 0) {
- malloc_mutex_unlock(&init_lock);
- return (true);
+ if (narenas_auto > chunksize / sizeof(arena_t *)) {
+ narenas_auto = chunksize / sizeof(arena_t *);
+ malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
+ narenas_auto);
}
-#endif
+ narenas_total = narenas_auto;
/* Allocate and initialize arenas. */
- arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
+ arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
if (arenas == NULL) {
malloc_mutex_unlock(&init_lock);
return (true);
@@ -772,39 +835,15 @@ malloc_init_hard(void)
* Zero the array. In practice, this should always be pre-zeroed,
* since it was just mmap()ed, but let's be sure.
*/
- memset(arenas, 0, sizeof(arena_t *) * narenas);
+ memset(arenas, 0, sizeof(arena_t *) * narenas_total);
/* Copy the pointer to the one arena that was already initialized. */
arenas[0] = init_arenas[0];
-#ifdef JEMALLOC_ZONE
- /* Register the custom zone. */
- malloc_zone_register(create_zone());
-
- /*
- * Convert the default szone to an "overlay zone" that is capable of
- * deallocating szone-allocated objects, but allocating new objects
- * from jemalloc.
- */
- szone2ozone(malloc_default_zone());
-#endif
-
malloc_initialized = true;
malloc_mutex_unlock(&init_lock);
return (false);
}
-
-#ifdef JEMALLOC_ZONE
-JEMALLOC_ATTR(constructor)
-void
-jemalloc_darwin_init(void)
-{
-
- if (malloc_init_hard())
- abort();
-}
-#endif
-
/*
* End initialization functions.
*/
@@ -813,265 +852,201 @@ jemalloc_darwin_init(void)
* Begin malloc(3)-compatible functions.
*/
-JEMALLOC_ATTR(malloc)
-JEMALLOC_ATTR(visibility("default"))
void *
-JEMALLOC_P(malloc)(size_t size)
+je_malloc(size_t size)
{
void *ret;
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
- size_t usize
-# ifdef JEMALLOC_CC_SILENCE
- = 0
-# endif
- ;
-#endif
-#ifdef JEMALLOC_PROF
- prof_thr_cnt_t *cnt
-# ifdef JEMALLOC_CC_SILENCE
- = NULL
-# endif
- ;
-#endif
+ size_t usize JEMALLOC_CC_SILENCE_INIT(0);
+ prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
if (malloc_init()) {
ret = NULL;
- goto OOM;
+ goto label_oom;
}
- if (size == 0) {
-#ifdef JEMALLOC_SYSV
- if (opt_sysv == false)
-#endif
- size = 1;
-#ifdef JEMALLOC_SYSV
- else {
-# ifdef JEMALLOC_XMALLOC
- if (opt_xmalloc) {
- malloc_write("<jemalloc>: Error in malloc(): "
- "invalid size 0\n");
- abort();
- }
-# endif
- ret = NULL;
- goto RETURN;
- }
-#endif
- }
+ if (size == 0)
+ size = 1;
-#ifdef JEMALLOC_PROF
- if (opt_prof) {
+ if (config_prof && opt_prof) {
usize = s2u(size);
- if ((cnt = prof_alloc_prep(usize)) == NULL) {
+ PROF_ALLOC_PREP(1, usize, cnt);
+ if (cnt == NULL) {
ret = NULL;
- goto OOM;
+ goto label_oom;
}
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
- small_maxclass) {
- ret = imalloc(small_maxclass+1);
+ SMALL_MAXCLASS) {
+ ret = imalloc(SMALL_MAXCLASS+1);
if (ret != NULL)
arena_prof_promoted(ret, usize);
} else
ret = imalloc(size);
- } else
-#endif
- {
-#ifdef JEMALLOC_STATS
- usize = s2u(size);
-#endif
+ } else {
+ if (config_stats || (config_valgrind && opt_valgrind))
+ usize = s2u(size);
ret = imalloc(size);
}
-OOM:
+label_oom:
if (ret == NULL) {
-#ifdef JEMALLOC_XMALLOC
- if (opt_xmalloc) {
+ if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in malloc(): "
"out of memory\n");
abort();
}
-#endif
- errno = ENOMEM;
+ set_errno(ENOMEM);
}
-
-#ifdef JEMALLOC_SYSV
-RETURN:
-#endif
-#ifdef JEMALLOC_PROF
- if (opt_prof && ret != NULL)
+ if (config_prof && opt_prof && ret != NULL)
prof_malloc(ret, usize, cnt);
-#endif
-#ifdef JEMALLOC_STATS
- if (ret != NULL) {
- assert(usize == isalloc(ret));
- ALLOCATED_ADD(usize, 0);
+ if (config_stats && ret != NULL) {
+ assert(usize == isalloc(ret, config_prof));
+ thread_allocated_tsd_get()->allocated += usize;
}
-#endif
+ UTRACE(0, size, ret);
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
return (ret);
}
JEMALLOC_ATTR(nonnull(1))
-JEMALLOC_ATTR(visibility("default"))
-int
-JEMALLOC_P(posix_memalign)(void **memptr, size_t alignment, size_t size)
+#ifdef JEMALLOC_PROF
+/*
+ * Avoid any uncertainty as to how many backtrace frames to ignore in
+ * PROF_ALLOC_PREP().
+ */
+JEMALLOC_NOINLINE
+#endif
+static int
+imemalign(void **memptr, size_t alignment, size_t size,
+ size_t min_alignment)
{
int ret;
+ size_t usize;
void *result;
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
- size_t usize
-# ifdef JEMALLOC_CC_SILENCE
- = 0
-# endif
- ;
-#endif
-#ifdef JEMALLOC_PROF
- prof_thr_cnt_t *cnt
-# ifdef JEMALLOC_CC_SILENCE
- = NULL
-# endif
- ;
-#endif
+ prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
+
+ assert(min_alignment != 0);
if (malloc_init())
result = NULL;
else {
- if (size == 0) {
-#ifdef JEMALLOC_SYSV
- if (opt_sysv == false)
-#endif
- size = 1;
-#ifdef JEMALLOC_SYSV
- else {
-# ifdef JEMALLOC_XMALLOC
- if (opt_xmalloc) {
- malloc_write("<jemalloc>: Error in "
- "posix_memalign(): invalid size "
- "0\n");
- abort();
- }
-# endif
- result = NULL;
- *memptr = NULL;
- ret = 0;
- goto RETURN;
- }
-#endif
- }
+ if (size == 0)
+ size = 1;
/* Make sure that alignment is a large enough power of 2. */
if (((alignment - 1) & alignment) != 0
- || alignment < sizeof(void *)) {
-#ifdef JEMALLOC_XMALLOC
- if (opt_xmalloc) {
- malloc_write("<jemalloc>: Error in "
- "posix_memalign(): invalid alignment\n");
+ || (alignment < min_alignment)) {
+ if (config_xmalloc && opt_xmalloc) {
+ malloc_write("<jemalloc>: Error allocating "
+ "aligned memory: invalid alignment\n");
abort();
}
-#endif
result = NULL;
ret = EINVAL;
- goto RETURN;
+ goto label_return;
}
-#ifdef JEMALLOC_PROF
- if (opt_prof) {
- usize = sa2u(size, alignment, NULL);
- if ((cnt = prof_alloc_prep(usize)) == NULL) {
+ usize = sa2u(size, alignment);
+ if (usize == 0) {
+ result = NULL;
+ ret = ENOMEM;
+ goto label_return;
+ }
+
+ if (config_prof && opt_prof) {
+ PROF_ALLOC_PREP(2, usize, cnt);
+ if (cnt == NULL) {
result = NULL;
ret = EINVAL;
} else {
if (prof_promote && (uintptr_t)cnt !=
- (uintptr_t)1U && usize <= small_maxclass) {
- result = ipalloc(small_maxclass+1,
- alignment, false);
+ (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
+ assert(sa2u(SMALL_MAXCLASS+1,
+ alignment) != 0);
+ result = ipalloc(sa2u(SMALL_MAXCLASS+1,
+ alignment), alignment, false);
if (result != NULL) {
arena_prof_promoted(result,
usize);
}
} else {
- result = ipalloc(size, alignment,
+ result = ipalloc(usize, alignment,
false);
}
}
} else
-#endif
- {
-#ifdef JEMALLOC_STATS
- usize = sa2u(size, alignment, NULL);
-#endif
- result = ipalloc(size, alignment, false);
- }
+ result = ipalloc(usize, alignment, false);
}
if (result == NULL) {
-#ifdef JEMALLOC_XMALLOC
- if (opt_xmalloc) {
- malloc_write("<jemalloc>: Error in posix_memalign(): "
- "out of memory\n");
+ if (config_xmalloc && opt_xmalloc) {
+ malloc_write("<jemalloc>: Error allocating aligned "
+ "memory: out of memory\n");
abort();
}
-#endif
ret = ENOMEM;
- goto RETURN;
+ goto label_return;
}
*memptr = result;
ret = 0;
-RETURN:
-#ifdef JEMALLOC_STATS
- if (result != NULL) {
- assert(usize == isalloc(result));
- ALLOCATED_ADD(usize, 0);
+label_return:
+ if (config_stats && result != NULL) {
+ assert(usize == isalloc(result, config_prof));
+ thread_allocated_tsd_get()->allocated += usize;
}
-#endif
-#ifdef JEMALLOC_PROF
- if (opt_prof && result != NULL)
+ if (config_prof && opt_prof && result != NULL)
prof_malloc(result, usize, cnt);
-#endif
+ UTRACE(0, size, result);
+ return (ret);
+}
+
+int
+je_posix_memalign(void **memptr, size_t alignment, size_t size)
+{
+ int ret = imemalign(memptr, alignment, size, sizeof(void *));
+ JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
+ config_prof), false);
+ return (ret);
+}
+
+void *
+je_aligned_alloc(size_t alignment, size_t size)
+{
+ void *ret;
+ int err;
+
+ if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
+ ret = NULL;
+ set_errno(err);
+ }
+ JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
+ false);
return (ret);
}
-JEMALLOC_ATTR(malloc)
-JEMALLOC_ATTR(visibility("default"))
void *
-JEMALLOC_P(calloc)(size_t num, size_t size)
+je_calloc(size_t num, size_t size)
{
void *ret;
size_t num_size;
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
- size_t usize
-# ifdef JEMALLOC_CC_SILENCE
- = 0
-# endif
- ;
-#endif
-#ifdef JEMALLOC_PROF
- prof_thr_cnt_t *cnt
-# ifdef JEMALLOC_CC_SILENCE
- = NULL
-# endif
- ;
-#endif
+ size_t usize JEMALLOC_CC_SILENCE_INIT(0);
+ prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
if (malloc_init()) {
num_size = 0;
ret = NULL;
- goto RETURN;
+ goto label_return;
}
num_size = num * size;
if (num_size == 0) {
-#ifdef JEMALLOC_SYSV
- if ((opt_sysv == false) && ((num == 0) || (size == 0)))
-#endif
+ if (num == 0 || size == 0)
num_size = 1;
-#ifdef JEMALLOC_SYSV
else {
ret = NULL;
- goto RETURN;
+ goto label_return;
}
-#endif
/*
* Try to avoid division here. We know that it isn't possible to
* overflow during multiplication if neither operand uses any of the
@@ -1081,181 +1056,159 @@ JEMALLOC_P(calloc)(size_t num, size_t size)
&& (num_size / size != num)) {
/* size_t overflow. */
ret = NULL;
- goto RETURN;
+ goto label_return;
}
-#ifdef JEMALLOC_PROF
- if (opt_prof) {
+ if (config_prof && opt_prof) {
usize = s2u(num_size);
- if ((cnt = prof_alloc_prep(usize)) == NULL) {
+ PROF_ALLOC_PREP(1, usize, cnt);
+ if (cnt == NULL) {
ret = NULL;
- goto RETURN;
+ goto label_return;
}
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
- <= small_maxclass) {
- ret = icalloc(small_maxclass+1);
+ <= SMALL_MAXCLASS) {
+ ret = icalloc(SMALL_MAXCLASS+1);
if (ret != NULL)
arena_prof_promoted(ret, usize);
} else
ret = icalloc(num_size);
- } else
-#endif
- {
-#ifdef JEMALLOC_STATS
- usize = s2u(num_size);
-#endif
+ } else {
+ if (config_stats || (config_valgrind && opt_valgrind))
+ usize = s2u(num_size);
ret = icalloc(num_size);
}
-RETURN:
+label_return:
if (ret == NULL) {
-#ifdef JEMALLOC_XMALLOC
- if (opt_xmalloc) {
+ if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in calloc(): out of "
"memory\n");
abort();
}
-#endif
- errno = ENOMEM;
+ set_errno(ENOMEM);
}
-#ifdef JEMALLOC_PROF
- if (opt_prof && ret != NULL)
+ if (config_prof && opt_prof && ret != NULL)
prof_malloc(ret, usize, cnt);
-#endif
-#ifdef JEMALLOC_STATS
- if (ret != NULL) {
- assert(usize == isalloc(ret));
- ALLOCATED_ADD(usize, 0);
+ if (config_stats && ret != NULL) {
+ assert(usize == isalloc(ret, config_prof));
+ thread_allocated_tsd_get()->allocated += usize;
}
-#endif
+ UTRACE(0, num_size, ret);
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
return (ret);
}
-JEMALLOC_ATTR(visibility("default"))
void *
-JEMALLOC_P(realloc)(void *ptr, size_t size)
+je_realloc(void *ptr, size_t size)
{
void *ret;
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
- size_t usize
-# ifdef JEMALLOC_CC_SILENCE
- = 0
-# endif
- ;
+ size_t usize JEMALLOC_CC_SILENCE_INIT(0);
size_t old_size = 0;
-#endif
-#ifdef JEMALLOC_PROF
- prof_thr_cnt_t *cnt
-# ifdef JEMALLOC_CC_SILENCE
- = NULL
-# endif
- ;
- prof_ctx_t *old_ctx
-# ifdef JEMALLOC_CC_SILENCE
- = NULL
-# endif
- ;
-#endif
+ size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
+ prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
+ prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
if (size == 0) {
-#ifdef JEMALLOC_SYSV
- if (opt_sysv == false)
-#endif
- size = 1;
-#ifdef JEMALLOC_SYSV
- else {
- if (ptr != NULL) {
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
- old_size = isalloc(ptr);
-#endif
-#ifdef JEMALLOC_PROF
- if (opt_prof) {
- old_ctx = prof_ctx_get(ptr);
- cnt = NULL;
- }
-#endif
- idalloc(ptr);
+ if (ptr != NULL) {
+ /* realloc(ptr, 0) is equivalent to free(p). */
+ assert(malloc_initialized || IS_INITIALIZER);
+ if (config_prof) {
+ old_size = isalloc(ptr, true);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = p2rz(ptr);
+ } else if (config_stats) {
+ old_size = isalloc(ptr, false);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = u2rz(old_size);
+ } else if (config_valgrind && opt_valgrind) {
+ old_size = isalloc(ptr, false);
+ old_rzsize = u2rz(old_size);
}
-#ifdef JEMALLOC_PROF
- else if (opt_prof) {
- old_ctx = NULL;
+ if (config_prof && opt_prof) {
+ old_ctx = prof_ctx_get(ptr);
cnt = NULL;
}
-#endif
+ iqalloc(ptr);
ret = NULL;
- goto RETURN;
- }
-#endif
+ goto label_return;
+ } else
+ size = 1;
}
if (ptr != NULL) {
- assert(malloc_initialized || malloc_initializer ==
- pthread_self());
-
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
- old_size = isalloc(ptr);
-#endif
-#ifdef JEMALLOC_PROF
- if (opt_prof) {
+ assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
+
+ if (config_prof) {
+ old_size = isalloc(ptr, true);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = p2rz(ptr);
+ } else if (config_stats) {
+ old_size = isalloc(ptr, false);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = u2rz(old_size);
+ } else if (config_valgrind && opt_valgrind) {
+ old_size = isalloc(ptr, false);
+ old_rzsize = u2rz(old_size);
+ }
+ if (config_prof && opt_prof) {
usize = s2u(size);
old_ctx = prof_ctx_get(ptr);
- if ((cnt = prof_alloc_prep(usize)) == NULL) {
+ PROF_ALLOC_PREP(1, usize, cnt);
+ if (cnt == NULL) {
+ old_ctx = NULL;
ret = NULL;
- goto OOM;
+ goto label_oom;
}
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
- usize <= small_maxclass) {
- ret = iralloc(ptr, small_maxclass+1, 0, 0,
+ usize <= SMALL_MAXCLASS) {
+ ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
false, false);
if (ret != NULL)
arena_prof_promoted(ret, usize);
- } else
+ else
+ old_ctx = NULL;
+ } else {
ret = iralloc(ptr, size, 0, 0, false, false);
- } else
-#endif
- {
-#ifdef JEMALLOC_STATS
- usize = s2u(size);
-#endif
+ if (ret == NULL)
+ old_ctx = NULL;
+ }
+ } else {
+ if (config_stats || (config_valgrind && opt_valgrind))
+ usize = s2u(size);
ret = iralloc(ptr, size, 0, 0, false, false);
}
-#ifdef JEMALLOC_PROF
-OOM:
-#endif
+label_oom:
if (ret == NULL) {
-#ifdef JEMALLOC_XMALLOC
- if (opt_xmalloc) {
+ if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in realloc(): "
"out of memory\n");
abort();
}
-#endif
- errno = ENOMEM;
+ set_errno(ENOMEM);
}
} else {
-#ifdef JEMALLOC_PROF
- if (opt_prof)
+ /* realloc(NULL, size) is equivalent to malloc(size). */
+ if (config_prof && opt_prof)
old_ctx = NULL;
-#endif
if (malloc_init()) {
-#ifdef JEMALLOC_PROF
- if (opt_prof)
+ if (config_prof && opt_prof)
cnt = NULL;
-#endif
ret = NULL;
} else {
-#ifdef JEMALLOC_PROF
- if (opt_prof) {
+ if (config_prof && opt_prof) {
usize = s2u(size);
- if ((cnt = prof_alloc_prep(usize)) == NULL)
+ PROF_ALLOC_PREP(1, usize, cnt);
+ if (cnt == NULL)
ret = NULL;
else {
if (prof_promote && (uintptr_t)cnt !=
(uintptr_t)1U && usize <=
- small_maxclass) {
- ret = imalloc(small_maxclass+1);
+ SMALL_MAXCLASS) {
+ ret = imalloc(SMALL_MAXCLASS+1);
if (ret != NULL) {
arena_prof_promoted(ret,
usize);
@@ -1263,72 +1216,61 @@ OOM:
} else
ret = imalloc(size);
}
- } else
-#endif
- {
-#ifdef JEMALLOC_STATS
- usize = s2u(size);
-#endif
+ } else {
+ if (config_stats || (config_valgrind &&
+ opt_valgrind))
+ usize = s2u(size);
ret = imalloc(size);
}
}
if (ret == NULL) {
-#ifdef JEMALLOC_XMALLOC
- if (opt_xmalloc) {
+ if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in realloc(): "
"out of memory\n");
abort();
}
-#endif
- errno = ENOMEM;
+ set_errno(ENOMEM);
}
}
-#ifdef JEMALLOC_SYSV
-RETURN:
-#endif
-#ifdef JEMALLOC_PROF
- if (opt_prof)
+label_return:
+ if (config_prof && opt_prof)
prof_realloc(ret, usize, cnt, old_size, old_ctx);
-#endif
-#ifdef JEMALLOC_STATS
- if (ret != NULL) {
- assert(usize == isalloc(ret));
- ALLOCATED_ADD(usize, old_size);
+ if (config_stats && ret != NULL) {
+ thread_allocated_t *ta;
+ assert(usize == isalloc(ret, config_prof));
+ ta = thread_allocated_tsd_get();
+ ta->allocated += usize;
+ ta->deallocated += old_size;
}
-#endif
+ UTRACE(ptr, size, ret);
+ JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
return (ret);
}
-JEMALLOC_ATTR(visibility("default"))
void
-JEMALLOC_P(free)(void *ptr)
+je_free(void *ptr)
{
+ UTRACE(ptr, 0, 0);
if (ptr != NULL) {
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
size_t usize;
-#endif
+ size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
- assert(malloc_initialized || malloc_initializer ==
- pthread_self());
+ assert(malloc_initialized || IS_INITIALIZER);
-#ifdef JEMALLOC_STATS
- usize = isalloc(ptr);
-#endif
-#ifdef JEMALLOC_PROF
- if (opt_prof) {
-# ifndef JEMALLOC_STATS
- usize = isalloc(ptr);
-# endif
+ if (config_prof && opt_prof) {
+ usize = isalloc(ptr, config_prof);
prof_free(ptr, usize);
- }
-#endif
-#ifdef JEMALLOC_STATS
- ALLOCATED_ADD(0, usize);
-#endif
- idalloc(ptr);
+ } else if (config_stats || config_valgrind)
+ usize = isalloc(ptr, config_prof);
+ if (config_stats)
+ thread_allocated_tsd_get()->deallocated += usize;
+ if (config_valgrind && opt_valgrind)
+ rzsize = p2rz(ptr);
+ iqalloc(ptr);
+ JEMALLOC_VALGRIND_FREE(ptr, rzsize);
}
}
@@ -1338,51 +1280,55 @@ JEMALLOC_P(free)(void *ptr)
/******************************************************************************/
/*
* Begin non-standard override functions.
- *
- * These overrides are omitted if the JEMALLOC_PREFIX is defined, since the
- * entire point is to avoid accidental mixed allocator usage.
*/
-#ifndef JEMALLOC_PREFIX
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
-JEMALLOC_ATTR(malloc)
-JEMALLOC_ATTR(visibility("default"))
void *
-JEMALLOC_P(memalign)(size_t alignment, size_t size)
+je_memalign(size_t alignment, size_t size)
{
- void *ret;
-#ifdef JEMALLOC_CC_SILENCE
- int result =
-#endif
- JEMALLOC_P(posix_memalign)(&ret, alignment, size);
-#ifdef JEMALLOC_CC_SILENCE
- if (result != 0)
- return (NULL);
-#endif
+ void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
+ imemalign(&ret, alignment, size, 1);
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
return (ret);
}
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
-JEMALLOC_ATTR(malloc)
-JEMALLOC_ATTR(visibility("default"))
void *
-JEMALLOC_P(valloc)(size_t size)
+je_valloc(size_t size)
{
- void *ret;
-#ifdef JEMALLOC_CC_SILENCE
- int result =
-#endif
- JEMALLOC_P(posix_memalign)(&ret, PAGE_SIZE, size);
-#ifdef JEMALLOC_CC_SILENCE
- if (result != 0)
- return (NULL);
-#endif
+ void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
+ imemalign(&ret, PAGE, size, 1);
+ JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
return (ret);
}
#endif
-#endif /* JEMALLOC_PREFIX */
+/*
+ * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
+ * #define je_malloc malloc
+ */
+#define malloc_is_malloc 1
+#define is_malloc_(a) malloc_is_ ## a
+#define is_malloc(a) is_malloc_(a)
+
+#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
+/*
+ * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
+ * to inconsistently reference libc's malloc(3)-compatible functions
+ * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
+ *
+ * These definitions interpose hooks in glibc. The functions are actually
+ * passed an extra argument for the caller return address, which will be
+ * ignored.
+ */
+JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free;
+JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc;
+JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc;
+JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
+ je_memalign;
+#endif
+
/*
* End non-standard override functions.
*/
@@ -1391,36 +1337,32 @@ JEMALLOC_P(valloc)(size_t size)
* Begin non-standard functions.
*/
-JEMALLOC_ATTR(visibility("default"))
size_t
-JEMALLOC_P(malloc_usable_size)(const void *ptr)
+je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
{
size_t ret;
- assert(malloc_initialized || malloc_initializer == pthread_self());
+ assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
-#ifdef JEMALLOC_IVSALLOC
- ret = ivsalloc(ptr);
-#else
- assert(ptr != NULL);
- ret = isalloc(ptr);
-#endif
+ if (config_ivsalloc)
+ ret = ivsalloc(ptr, config_prof);
+ else
+ ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
return (ret);
}
-JEMALLOC_ATTR(visibility("default"))
void
-JEMALLOC_P(malloc_stats_print)(void (*write_cb)(void *, const char *),
- void *cbopaque, const char *opts)
+je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *opts)
{
stats_print(write_cb, cbopaque, opts);
}
-JEMALLOC_ATTR(visibility("default"))
int
-JEMALLOC_P(mallctl)(const char *name, void *oldp, size_t *oldlenp, void *newp,
+je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
size_t newlen)
{
@@ -1430,9 +1372,8 @@ JEMALLOC_P(mallctl)(const char *name, void *oldp, size_t *oldlenp, void *newp,
return (ctl_byname(name, oldp, oldlenp, newp, newlen));
}
-JEMALLOC_ATTR(visibility("default"))
int
-JEMALLOC_P(mallctlnametomib)(const char *name, size_t *mibp, size_t *miblenp)
+je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
{
if (malloc_init())
@@ -1441,10 +1382,9 @@ JEMALLOC_P(mallctlnametomib)(const char *name, size_t *mibp, size_t *miblenp)
return (ctl_nametomib(name, mibp, miblenp));
}
-JEMALLOC_ATTR(visibility("default"))
int
-JEMALLOC_P(mallctlbymib)(const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen)
+je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+ void *newp, size_t newlen)
{
if (malloc_init())
@@ -1453,307 +1393,476 @@ JEMALLOC_P(mallctlbymib)(const size_t *mib, size_t miblen, void *oldp,
return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
}
-JEMALLOC_INLINE void *
-iallocm(size_t size, size_t alignment, bool zero)
+/*
+ * End non-standard functions.
+ */
+/******************************************************************************/
+/*
+ * Begin experimental functions.
+ */
+#ifdef JEMALLOC_EXPERIMENTAL
+
+static JEMALLOC_ATTR(always_inline) void *
+iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache,
+ arena_t *arena)
{
+ assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
+ alignment)));
+
if (alignment != 0)
- return (ipalloc(size, alignment, zero));
+ return (ipallocx(usize, alignment, zero, try_tcache, arena));
else if (zero)
- return (icalloc(size));
+ return (icallocx(usize, try_tcache, arena));
else
- return (imalloc(size));
+ return (imallocx(usize, try_tcache, arena));
}
-JEMALLOC_ATTR(nonnull(1))
-JEMALLOC_ATTR(visibility("default"))
int
-JEMALLOC_P(allocm)(void **ptr, size_t *rsize, size_t size, int flags)
+je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
{
void *p;
size_t usize;
size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
bool zero = flags & ALLOCM_ZERO;
-#ifdef JEMALLOC_PROF
- prof_thr_cnt_t *cnt;
-#endif
+ unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
+ arena_t *arena;
+ bool try_tcache;
assert(ptr != NULL);
assert(size != 0);
if (malloc_init())
- goto OOM;
+ goto label_oom;
-#ifdef JEMALLOC_PROF
- if (opt_prof) {
- usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment,
- NULL);
- if ((cnt = prof_alloc_prep(usize)) == NULL)
- goto OOM;
+ if (arena_ind != UINT_MAX) {
+ arena = arenas[arena_ind];
+ try_tcache = false;
+ } else {
+ arena = NULL;
+ try_tcache = true;
+ }
+
+ usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
+ if (usize == 0)
+ goto label_oom;
+
+ if (config_prof && opt_prof) {
+ prof_thr_cnt_t *cnt;
+
+ PROF_ALLOC_PREP(1, usize, cnt);
+ if (cnt == NULL)
+ goto label_oom;
if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
- small_maxclass) {
- p = iallocm(small_maxclass+1, alignment, zero);
+ SMALL_MAXCLASS) {
+ size_t usize_promoted = (alignment == 0) ?
+ s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
+ alignment);
+ assert(usize_promoted != 0);
+ p = iallocm(usize_promoted, alignment, zero,
+ try_tcache, arena);
if (p == NULL)
- goto OOM;
+ goto label_oom;
arena_prof_promoted(p, usize);
} else {
- p = iallocm(size, alignment, zero);
+ p = iallocm(usize, alignment, zero, try_tcache, arena);
if (p == NULL)
- goto OOM;
+ goto label_oom;
}
-
- if (rsize != NULL)
- *rsize = usize;
- } else
-#endif
- {
- p = iallocm(size, alignment, zero);
+ prof_malloc(p, usize, cnt);
+ } else {
+ p = iallocm(usize, alignment, zero, try_tcache, arena);
if (p == NULL)
- goto OOM;
-#ifndef JEMALLOC_STATS
- if (rsize != NULL)
-#endif
- {
- usize = (alignment == 0) ? s2u(size) : sa2u(size,
- alignment, NULL);
-#ifdef JEMALLOC_STATS
- if (rsize != NULL)
-#endif
- *rsize = usize;
- }
+ goto label_oom;
}
+ if (rsize != NULL)
+ *rsize = usize;
*ptr = p;
-#ifdef JEMALLOC_STATS
- assert(usize == isalloc(p));
- ALLOCATED_ADD(usize, 0);
-#endif
+ if (config_stats) {
+ assert(usize == isalloc(p, config_prof));
+ thread_allocated_tsd_get()->allocated += usize;
+ }
+ UTRACE(0, size, p);
+ JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
return (ALLOCM_SUCCESS);
-OOM:
-#ifdef JEMALLOC_XMALLOC
- if (opt_xmalloc) {
+label_oom:
+ if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in allocm(): "
"out of memory\n");
abort();
}
-#endif
*ptr = NULL;
+ UTRACE(0, size, 0);
return (ALLOCM_ERR_OOM);
}
-JEMALLOC_ATTR(nonnull(1))
-JEMALLOC_ATTR(visibility("default"))
int
-JEMALLOC_P(rallocm)(void **ptr, size_t *rsize, size_t size, size_t extra,
- int flags)
+je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
{
void *p, *q;
size_t usize;
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
size_t old_size;
-#endif
+ size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
& (SIZE_T_MAX-1));
bool zero = flags & ALLOCM_ZERO;
bool no_move = flags & ALLOCM_NO_MOVE;
-#ifdef JEMALLOC_PROF
- prof_thr_cnt_t *cnt;
- prof_ctx_t *old_ctx;
-#endif
+ unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
+ bool try_tcache_alloc, try_tcache_dalloc;
+ arena_t *arena;
assert(ptr != NULL);
assert(*ptr != NULL);
assert(size != 0);
assert(SIZE_T_MAX - size >= extra);
- assert(malloc_initialized || malloc_initializer == pthread_self());
+ assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
+
+ if (arena_ind != UINT_MAX) {
+ arena_chunk_t *chunk;
+ try_tcache_alloc = true;
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(*ptr);
+ try_tcache_dalloc = (chunk == *ptr || chunk->arena !=
+ arenas[arena_ind]);
+ arena = arenas[arena_ind];
+ } else {
+ try_tcache_alloc = true;
+ try_tcache_dalloc = true;
+ arena = NULL;
+ }
p = *ptr;
-#ifdef JEMALLOC_PROF
- if (opt_prof) {
+ if (config_prof && opt_prof) {
+ prof_thr_cnt_t *cnt;
+
/*
* usize isn't knowable before iralloc() returns when extra is
* non-zero. Therefore, compute its maximum possible value and
- * use that in prof_alloc_prep() to decide whether to capture a
+ * use that in PROF_ALLOC_PREP() to decide whether to capture a
* backtrace. prof_realloc() will use the actual usize to
* decide whether to sample.
*/
size_t max_usize = (alignment == 0) ? s2u(size+extra) :
- sa2u(size+extra, alignment, NULL);
- old_size = isalloc(p);
- old_ctx = prof_ctx_get(p);
- if ((cnt = prof_alloc_prep(max_usize)) == NULL)
- goto OOM;
- if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && max_usize
- <= small_maxclass) {
- q = iralloc(p, small_maxclass+1, (small_maxclass+1 >=
- size+extra) ? 0 : size+extra - (small_maxclass+1),
- alignment, zero, no_move);
+ sa2u(size+extra, alignment);
+ prof_ctx_t *old_ctx = prof_ctx_get(p);
+ old_size = isalloc(p, true);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = p2rz(p);
+ PROF_ALLOC_PREP(1, max_usize, cnt);
+ if (cnt == NULL)
+ goto label_oom;
+ /*
+ * Use minimum usize to determine whether promotion may happen.
+ */
+ if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
+ && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
+ <= SMALL_MAXCLASS) {
+ q = irallocx(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
+ size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
+ alignment, zero, no_move, try_tcache_alloc,
+ try_tcache_dalloc, arena);
if (q == NULL)
- goto ERR;
- usize = isalloc(q);
- arena_prof_promoted(q, usize);
+ goto label_err;
+ if (max_usize < PAGE) {
+ usize = max_usize;
+ arena_prof_promoted(q, usize);
+ } else
+ usize = isalloc(q, config_prof);
} else {
- q = iralloc(p, size, extra, alignment, zero, no_move);
+ q = irallocx(p, size, extra, alignment, zero, no_move,
+ try_tcache_alloc, try_tcache_dalloc, arena);
if (q == NULL)
- goto ERR;
- usize = isalloc(q);
+ goto label_err;
+ usize = isalloc(q, config_prof);
}
prof_realloc(q, usize, cnt, old_size, old_ctx);
- } else
-#endif
- {
-#ifdef JEMALLOC_STATS
- old_size = isalloc(p);
-#endif
- q = iralloc(p, size, extra, alignment, zero, no_move);
- if (q == NULL)
- goto ERR;
-#ifndef JEMALLOC_STATS
if (rsize != NULL)
-#endif
- {
- usize = isalloc(q);
-#ifdef JEMALLOC_STATS
- if (rsize != NULL)
-#endif
- *rsize = usize;
+ *rsize = usize;
+ } else {
+ if (config_stats) {
+ old_size = isalloc(p, false);
+ if (config_valgrind && opt_valgrind)
+ old_rzsize = u2rz(old_size);
+ } else if (config_valgrind && opt_valgrind) {
+ old_size = isalloc(p, false);
+ old_rzsize = u2rz(old_size);
+ }
+ q = irallocx(p, size, extra, alignment, zero, no_move,
+ try_tcache_alloc, try_tcache_dalloc, arena);
+ if (q == NULL)
+ goto label_err;
+ if (config_stats)
+ usize = isalloc(q, config_prof);
+ if (rsize != NULL) {
+ if (config_stats == false)
+ usize = isalloc(q, config_prof);
+ *rsize = usize;
}
}
*ptr = q;
-#ifdef JEMALLOC_STATS
- ALLOCATED_ADD(usize, old_size);
-#endif
+ if (config_stats) {
+ thread_allocated_t *ta;
+ ta = thread_allocated_tsd_get();
+ ta->allocated += usize;
+ ta->deallocated += old_size;
+ }
+ UTRACE(p, size, q);
+ JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
return (ALLOCM_SUCCESS);
-ERR:
- if (no_move)
+label_err:
+ if (no_move) {
+ UTRACE(p, size, q);
return (ALLOCM_ERR_NOT_MOVED);
-#ifdef JEMALLOC_PROF
-OOM:
-#endif
-#ifdef JEMALLOC_XMALLOC
- if (opt_xmalloc) {
+ }
+label_oom:
+ if (config_xmalloc && opt_xmalloc) {
malloc_write("<jemalloc>: Error in rallocm(): "
"out of memory\n");
abort();
}
-#endif
+ UTRACE(p, size, 0);
return (ALLOCM_ERR_OOM);
}
-JEMALLOC_ATTR(nonnull(1))
-JEMALLOC_ATTR(visibility("default"))
int
-JEMALLOC_P(sallocm)(const void *ptr, size_t *rsize, int flags)
+je_sallocm(const void *ptr, size_t *rsize, int flags)
{
size_t sz;
- assert(malloc_initialized || malloc_initializer == pthread_self());
+ assert(malloc_initialized || IS_INITIALIZER);
+ malloc_thread_init();
-#ifdef JEMALLOC_IVSALLOC
- sz = ivsalloc(ptr);
-#else
- assert(ptr != NULL);
- sz = isalloc(ptr);
-#endif
+ if (config_ivsalloc)
+ sz = ivsalloc(ptr, config_prof);
+ else {
+ assert(ptr != NULL);
+ sz = isalloc(ptr, config_prof);
+ }
assert(rsize != NULL);
*rsize = sz;
return (ALLOCM_SUCCESS);
}
-JEMALLOC_ATTR(nonnull(1))
-JEMALLOC_ATTR(visibility("default"))
int
-JEMALLOC_P(dallocm)(void *ptr, int flags)
+je_dallocm(void *ptr, int flags)
{
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
size_t usize;
-#endif
+ size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
+ unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
+ bool try_tcache;
assert(ptr != NULL);
- assert(malloc_initialized || malloc_initializer == pthread_self());
+ assert(malloc_initialized || IS_INITIALIZER);
-#ifdef JEMALLOC_STATS
- usize = isalloc(ptr);
-#endif
-#ifdef JEMALLOC_PROF
- if (opt_prof) {
-# ifndef JEMALLOC_STATS
- usize = isalloc(ptr);
-# endif
+ if (arena_ind != UINT_MAX) {
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ try_tcache = (chunk == ptr || chunk->arena !=
+ arenas[arena_ind]);
+ } else
+ try_tcache = true;
+
+ UTRACE(ptr, 0, 0);
+ if (config_stats || config_valgrind)
+ usize = isalloc(ptr, config_prof);
+ if (config_prof && opt_prof) {
+ if (config_stats == false && config_valgrind == false)
+ usize = isalloc(ptr, config_prof);
prof_free(ptr, usize);
}
-#endif
-#ifdef JEMALLOC_STATS
- ALLOCATED_ADD(0, usize);
-#endif
- idalloc(ptr);
+ if (config_stats)
+ thread_allocated_tsd_get()->deallocated += usize;
+ if (config_valgrind && opt_valgrind)
+ rzsize = p2rz(ptr);
+ iqallocx(ptr, try_tcache);
+ JEMALLOC_VALGRIND_FREE(ptr, rzsize);
+
+ return (ALLOCM_SUCCESS);
+}
+
+int
+je_nallocm(size_t *rsize, size_t size, int flags)
+{
+ size_t usize;
+ size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
+ & (SIZE_T_MAX-1));
+ assert(size != 0);
+
+ if (malloc_init())
+ return (ALLOCM_ERR_OOM);
+
+ usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
+ if (usize == 0)
+ return (ALLOCM_ERR_OOM);
+
+ if (rsize != NULL)
+ *rsize = usize;
return (ALLOCM_SUCCESS);
}
+#endif
/*
- * End non-standard functions.
+ * End experimental functions.
*/
/******************************************************************************/
-
/*
* The following functions are used by threading libraries for protection of
* malloc during fork().
*/
+/*
+ * If an application creates a thread before doing any allocation in the main
+ * thread, then calls fork(2) in the main thread followed by memory allocation
+ * in the child process, a race can occur that results in deadlock within the
+ * child: the main thread may have forked while the created thread had
+ * partially initialized the allocator. Ordinarily jemalloc prevents
+ * fork/malloc races via the following functions it registers during
+ * initialization using pthread_atfork(), but of course that does no good if
+ * the allocator isn't fully initialized at fork time. The following library
+ * constructor is a partial solution to this problem. It may still possible to
+ * trigger the deadlock described above, but doing so would involve forking via
+ * a library constructor that runs before jemalloc's runs.
+ */
+JEMALLOC_ATTR(constructor)
+static void
+jemalloc_constructor(void)
+{
+
+ malloc_init();
+}
+
+#ifndef JEMALLOC_MUTEX_INIT_CB
void
jemalloc_prefork(void)
+#else
+JEMALLOC_EXPORT void
+_malloc_prefork(void)
+#endif
{
unsigned i;
- /* Acquire all mutexes in a safe order. */
+#ifdef JEMALLOC_MUTEX_INIT_CB
+ if (malloc_initialized == false)
+ return;
+#endif
+ assert(malloc_initialized);
- malloc_mutex_lock(&arenas_lock);
- for (i = 0; i < narenas; i++) {
+ /* Acquire all mutexes in a safe order. */
+ ctl_prefork();
+ prof_prefork();
+ malloc_mutex_prefork(&arenas_lock);
+ for (i = 0; i < narenas_total; i++) {
if (arenas[i] != NULL)
- malloc_mutex_lock(&arenas[i]->lock);
+ arena_prefork(arenas[i]);
}
+ chunk_prefork();
+ base_prefork();
+ huge_prefork();
+}
- malloc_mutex_lock(&base_mtx);
-
- malloc_mutex_lock(&huge_mtx);
-
-#ifdef JEMALLOC_DSS
- malloc_mutex_lock(&dss_mtx);
+#ifndef JEMALLOC_MUTEX_INIT_CB
+void
+jemalloc_postfork_parent(void)
+#else
+JEMALLOC_EXPORT void
+_malloc_postfork(void)
#endif
+{
+ unsigned i;
-#ifdef JEMALLOC_SWAP
- malloc_mutex_lock(&swap_mtx);
+#ifdef JEMALLOC_MUTEX_INIT_CB
+ if (malloc_initialized == false)
+ return;
#endif
+ assert(malloc_initialized);
+
+ /* Release all mutexes, now that fork() has completed. */
+ huge_postfork_parent();
+ base_postfork_parent();
+ chunk_postfork_parent();
+ for (i = 0; i < narenas_total; i++) {
+ if (arenas[i] != NULL)
+ arena_postfork_parent(arenas[i]);
+ }
+ malloc_mutex_postfork_parent(&arenas_lock);
+ prof_postfork_parent();
+ ctl_postfork_parent();
}
void
-jemalloc_postfork(void)
+jemalloc_postfork_child(void)
{
unsigned i;
+ assert(malloc_initialized);
+
/* Release all mutexes, now that fork() has completed. */
+ huge_postfork_child();
+ base_postfork_child();
+ chunk_postfork_child();
+ for (i = 0; i < narenas_total; i++) {
+ if (arenas[i] != NULL)
+ arena_postfork_child(arenas[i]);
+ }
+ malloc_mutex_postfork_child(&arenas_lock);
+ prof_postfork_child();
+ ctl_postfork_child();
+}
-#ifdef JEMALLOC_SWAP
- malloc_mutex_unlock(&swap_mtx);
-#endif
+/******************************************************************************/
+/*
+ * The following functions are used for TLS allocation/deallocation in static
+ * binaries on FreeBSD. The primary difference between these and i[mcd]alloc()
+ * is that these avoid accessing TLS variables.
+ */
-#ifdef JEMALLOC_DSS
- malloc_mutex_unlock(&dss_mtx);
-#endif
+static void *
+a0alloc(size_t size, bool zero)
+{
- malloc_mutex_unlock(&huge_mtx);
+ if (malloc_init())
+ return (NULL);
- malloc_mutex_unlock(&base_mtx);
+ if (size == 0)
+ size = 1;
- for (i = 0; i < narenas; i++) {
- if (arenas[i] != NULL)
- malloc_mutex_unlock(&arenas[i]->lock);
- }
- malloc_mutex_unlock(&arenas_lock);
+ if (size <= arena_maxclass)
+ return (arena_malloc(arenas[0], size, zero, false));
+ else
+ return (huge_malloc(size, zero));
+}
+
+void *
+a0malloc(size_t size)
+{
+
+ return (a0alloc(size, false));
+}
+
+void *
+a0calloc(size_t num, size_t size)
+{
+
+ return (a0alloc(num * size, true));
+}
+
+void
+a0free(void *ptr)
+{
+ arena_chunk_t *chunk;
+
+ if (ptr == NULL)
+ return;
+
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ if (chunk != ptr)
+ arena_dalloc(chunk->arena, chunk, ptr, false);
+ else
+ huge_dalloc(ptr, true);
}
/******************************************************************************/
diff --git a/dep/jemalloc/src/mb.c b/dep/jemalloc/src/mb.c
index 30a1a2e997a..dc2c0a256fd 100644
--- a/dep/jemalloc/src/mb.c
+++ b/dep/jemalloc/src/mb.c
@@ -1,2 +1,2 @@
-#define MB_C_
+#define JEMALLOC_MB_C_
#include "jemalloc/internal/jemalloc_internal.h"
diff --git a/dep/jemalloc/src/mutex.c b/dep/jemalloc/src/mutex.c
index 3ecb18a340e..55e18c23713 100644
--- a/dep/jemalloc/src/mutex.c
+++ b/dep/jemalloc/src/mutex.c
@@ -1,14 +1,26 @@
#define JEMALLOC_MUTEX_C_
#include "jemalloc/internal/jemalloc_internal.h"
+#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
+#include <dlfcn.h>
+#endif
+
+#ifndef _CRT_SPINCOUNT
+#define _CRT_SPINCOUNT 4000
+#endif
+
/******************************************************************************/
/* Data. */
#ifdef JEMALLOC_LAZY_LOCK
bool isthreaded = false;
#endif
+#ifdef JEMALLOC_MUTEX_INIT_CB
+static bool postpone_init = true;
+static malloc_mutex_t *postponed_mutexes = NULL;
+#endif
-#ifdef JEMALLOC_LAZY_LOCK
+#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
static void pthread_create_once(void);
#endif
@@ -18,7 +30,7 @@ static void pthread_create_once(void);
* process goes multi-threaded.
*/
-#ifdef JEMALLOC_LAZY_LOCK
+#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32)
static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
void *(*)(void *), void *__restrict);
@@ -36,8 +48,7 @@ pthread_create_once(void)
isthreaded = true;
}
-JEMALLOC_ATTR(visibility("default"))
-int
+JEMALLOC_EXPORT int
pthread_create(pthread_t *__restrict thread,
const pthread_attr_t *__restrict attr, void *(*start_routine)(void *),
void *__restrict arg)
@@ -52,33 +63,87 @@ pthread_create(pthread_t *__restrict thread,
/******************************************************************************/
+#ifdef JEMALLOC_MUTEX_INIT_CB
+JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
+ void *(calloc_cb)(size_t, size_t));
+#endif
+
bool
malloc_mutex_init(malloc_mutex_t *mutex)
{
+
+#ifdef _WIN32
+ if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
+ _CRT_SPINCOUNT))
+ return (true);
+#elif (defined(JEMALLOC_OSSPIN))
+ mutex->lock = 0;
+#elif (defined(JEMALLOC_MUTEX_INIT_CB))
+ if (postpone_init) {
+ mutex->postponed_next = postponed_mutexes;
+ postponed_mutexes = mutex;
+ } else {
+ if (_pthread_mutex_init_calloc_cb(&mutex->lock, base_calloc) !=
+ 0)
+ return (true);
+ }
+#else
pthread_mutexattr_t attr;
if (pthread_mutexattr_init(&attr) != 0)
return (true);
-#ifdef PTHREAD_MUTEX_ADAPTIVE_NP
- pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
-#else
- pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_DEFAULT);
-#endif
- if (pthread_mutex_init(mutex, &attr) != 0) {
+ pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE);
+ if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
pthread_mutexattr_destroy(&attr);
return (true);
}
pthread_mutexattr_destroy(&attr);
-
+#endif
return (false);
}
void
-malloc_mutex_destroy(malloc_mutex_t *mutex)
+malloc_mutex_prefork(malloc_mutex_t *mutex)
{
- if (pthread_mutex_destroy(mutex) != 0) {
- malloc_write("<jemalloc>: Error in pthread_mutex_destroy()\n");
- abort();
+ malloc_mutex_lock(mutex);
+}
+
+void
+malloc_mutex_postfork_parent(malloc_mutex_t *mutex)
+{
+
+ malloc_mutex_unlock(mutex);
+}
+
+void
+malloc_mutex_postfork_child(malloc_mutex_t *mutex)
+{
+
+#ifdef JEMALLOC_MUTEX_INIT_CB
+ malloc_mutex_unlock(mutex);
+#else
+ if (malloc_mutex_init(mutex)) {
+ malloc_printf("<jemalloc>: Error re-initializing mutex in "
+ "child\n");
+ if (opt_abort)
+ abort();
}
+#endif
+}
+
+bool
+mutex_boot(void)
+{
+
+#ifdef JEMALLOC_MUTEX_INIT_CB
+ postpone_init = false;
+ while (postponed_mutexes != NULL) {
+ if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
+ base_calloc) != 0)
+ return (true);
+ postponed_mutexes = postponed_mutexes->postponed_next;
+ }
+#endif
+ return (false);
}
diff --git a/dep/jemalloc/src/prof.c b/dep/jemalloc/src/prof.c
index 636cccef52a..c133b95c2c6 100644
--- a/dep/jemalloc/src/prof.c
+++ b/dep/jemalloc/src/prof.c
@@ -1,41 +1,43 @@
#define JEMALLOC_PROF_C_
#include "jemalloc/internal/jemalloc_internal.h"
-#ifdef JEMALLOC_PROF
/******************************************************************************/
-#ifdef JEMALLOC_PROF_LIBGCC
-#include <unwind.h>
-#endif
-
#ifdef JEMALLOC_PROF_LIBUNWIND
#define UNW_LOCAL_ONLY
#include <libunwind.h>
#endif
+#ifdef JEMALLOC_PROF_LIBGCC
+#include <unwind.h>
+#endif
+
/******************************************************************************/
/* Data. */
+malloc_tsd_data(, prof_tdata, prof_tdata_t *, NULL)
+
bool opt_prof = false;
bool opt_prof_active = true;
-size_t opt_lg_prof_bt_max = LG_PROF_BT_MAX_DEFAULT;
size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
bool opt_prof_gdump = false;
+bool opt_prof_final = true;
bool opt_prof_leak = false;
-bool opt_prof_accum = true;
-ssize_t opt_lg_prof_tcmax = LG_PROF_TCMAX_DEFAULT;
+bool opt_prof_accum = false;
char opt_prof_prefix[PATH_MAX + 1];
-uint64_t prof_interval;
+uint64_t prof_interval = 0;
bool prof_promote;
-unsigned prof_bt_max;
-
-#ifndef NO_TLS
-__thread prof_tdata_t *prof_tdata_tls
- JEMALLOC_ATTR(tls_model("initial-exec"));
-#endif
-pthread_key_t prof_tdata_tsd;
+/*
+ * Table of mutexes that are shared among ctx's. These are leaf locks, so
+ * there is no problem with using them for more than one ctx at the same time.
+ * The primary motivation for this sharing though is that ctx's are ephemeral,
+ * and destroying mutexes causes complications for systems that allocate when
+ * creating/destroying mutexes.
+ */
+static malloc_mutex_t *ctx_locks;
+static unsigned cum_ctxs; /* Atomic counter. */
/*
* Global hash of (prof_bt_t *)-->(prof_ctx_t *). This is the master data
@@ -55,18 +57,13 @@ static uint64_t prof_dump_useq;
* all profile dumps. The buffer is implicitly protected by bt2ctx_mtx, since
* it must be locked anyway during dumping.
*/
-static char prof_dump_buf[PROF_DUMP_BUF_SIZE];
+static char prof_dump_buf[PROF_DUMP_BUFSIZE];
static unsigned prof_dump_buf_end;
static int prof_dump_fd;
/* Do not dump any profiles until bootstrapping is complete. */
static bool prof_booted = false;
-static malloc_mutex_t enq_mtx;
-static bool enq;
-static bool enq_idump;
-static bool enq_gdump;
-
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
@@ -79,22 +76,23 @@ static _Unwind_Reason_Code prof_unwind_callback(
struct _Unwind_Context *context, void *arg);
#endif
static bool prof_flush(bool propagate_err);
-static bool prof_write(const char *s, bool propagate_err);
+static bool prof_write(bool propagate_err, const char *s);
+static bool prof_printf(bool propagate_err, const char *format, ...)
+ JEMALLOC_ATTR(format(printf, 2, 3));
static void prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all,
size_t *leak_nctx);
static void prof_ctx_destroy(prof_ctx_t *ctx);
static void prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt);
-static bool prof_dump_ctx(prof_ctx_t *ctx, prof_bt_t *bt,
- bool propagate_err);
+static bool prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx,
+ prof_bt_t *bt);
static bool prof_dump_maps(bool propagate_err);
-static bool prof_dump(const char *filename, bool leakcheck,
- bool propagate_err);
+static bool prof_dump(bool propagate_err, const char *filename,
+ bool leakcheck);
static void prof_dump_filename(char *filename, char v, int64_t vseq);
static void prof_fdump(void);
-static void prof_bt_hash(const void *key, unsigned minbits, size_t *hash1,
- size_t *hash2);
+static void prof_bt_hash(const void *key, size_t r_hash[2]);
static bool prof_bt_keycomp(const void *k1, const void *k2);
-static void prof_tdata_cleanup(void *arg);
+static malloc_mutex_t *prof_ctx_mutex_choose(void);
/******************************************************************************/
@@ -102,6 +100,8 @@ void
bt_init(prof_bt_t *bt, void **vec)
{
+ cassert(config_prof);
+
bt->vec = vec;
bt->len = 0;
}
@@ -110,6 +110,8 @@ static void
bt_destroy(prof_bt_t *bt)
{
+ cassert(config_prof);
+
idalloc(bt);
}
@@ -118,6 +120,8 @@ bt_dup(prof_bt_t *bt)
{
prof_bt_t *ret;
+ cassert(config_prof);
+
/*
* Create a single allocation that has space for vec immediately
* following the prof_bt_t structure. The backtraces that get
@@ -138,30 +142,32 @@ bt_dup(prof_bt_t *bt)
}
static inline void
-prof_enter(void)
+prof_enter(prof_tdata_t *prof_tdata)
{
- malloc_mutex_lock(&enq_mtx);
- enq = true;
- malloc_mutex_unlock(&enq_mtx);
+ cassert(config_prof);
+
+ assert(prof_tdata->enq == false);
+ prof_tdata->enq = true;
malloc_mutex_lock(&bt2ctx_mtx);
}
static inline void
-prof_leave(void)
+prof_leave(prof_tdata_t *prof_tdata)
{
bool idump, gdump;
+ cassert(config_prof);
+
malloc_mutex_unlock(&bt2ctx_mtx);
- malloc_mutex_lock(&enq_mtx);
- enq = false;
- idump = enq_idump;
- enq_idump = false;
- gdump = enq_gdump;
- enq_gdump = false;
- malloc_mutex_unlock(&enq_mtx);
+ assert(prof_tdata->enq);
+ prof_tdata->enq = false;
+ idump = prof_tdata->enq_idump;
+ prof_tdata->enq_idump = false;
+ gdump = prof_tdata->enq_gdump;
+ prof_tdata->enq_gdump = false;
if (idump)
prof_idump();
@@ -169,50 +175,18 @@ prof_leave(void)
prof_gdump();
}
-#ifdef JEMALLOC_PROF_LIBGCC
-static _Unwind_Reason_Code
-prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
-{
-
- return (_URC_NO_REASON);
-}
-
-static _Unwind_Reason_Code
-prof_unwind_callback(struct _Unwind_Context *context, void *arg)
-{
- prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
-
- if (data->nignore > 0)
- data->nignore--;
- else {
- data->bt->vec[data->bt->len] = (void *)_Unwind_GetIP(context);
- data->bt->len++;
- if (data->bt->len == data->max)
- return (_URC_END_OF_STACK);
- }
-
- return (_URC_NO_REASON);
-}
-
-void
-prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
-{
- prof_unwind_data_t data = {bt, nignore, max};
-
- _Unwind_Backtrace(prof_unwind_callback, &data);
-}
-#elif defined(JEMALLOC_PROF_LIBUNWIND)
+#ifdef JEMALLOC_PROF_LIBUNWIND
void
-prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
+prof_backtrace(prof_bt_t *bt, unsigned nignore)
{
unw_context_t uc;
unw_cursor_t cursor;
unsigned i;
int err;
+ cassert(config_prof);
assert(bt->len == 0);
assert(bt->vec != NULL);
- assert(max <= (1U << opt_lg_prof_bt_max));
unw_getcontext(&uc);
unw_init_local(&cursor, &uc);
@@ -228,7 +202,7 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
* Iterate over stack frames until there are no more, or until no space
* remains in bt.
*/
- for (i = 0; i < max; i++) {
+ for (i = 0; i < PROF_BT_MAX; i++) {
unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]);
bt->len++;
err = unw_step(&cursor);
@@ -236,12 +210,50 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
break;
}
}
-#else
+#elif (defined(JEMALLOC_PROF_LIBGCC))
+static _Unwind_Reason_Code
+prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
+{
+
+ cassert(config_prof);
+
+ return (_URC_NO_REASON);
+}
+
+static _Unwind_Reason_Code
+prof_unwind_callback(struct _Unwind_Context *context, void *arg)
+{
+ prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
+
+ cassert(config_prof);
+
+ if (data->nignore > 0)
+ data->nignore--;
+ else {
+ data->bt->vec[data->bt->len] = (void *)_Unwind_GetIP(context);
+ data->bt->len++;
+ if (data->bt->len == data->max)
+ return (_URC_END_OF_STACK);
+ }
+
+ return (_URC_NO_REASON);
+}
+
+void
+prof_backtrace(prof_bt_t *bt, unsigned nignore)
+{
+ prof_unwind_data_t data = {bt, nignore, PROF_BT_MAX};
+
+ cassert(config_prof);
+
+ _Unwind_Backtrace(prof_unwind_callback, &data);
+}
+#elif (defined(JEMALLOC_PROF_GCC))
void
-prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
+prof_backtrace(prof_bt_t *bt, unsigned nignore)
{
#define BT_FRAME(i) \
- if ((i) < nignore + max) { \
+ if ((i) < nignore + PROF_BT_MAX) { \
void *p; \
if (__builtin_frame_address(i) == 0) \
return; \
@@ -255,8 +267,8 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
} else \
return;
+ cassert(config_prof);
assert(nignore <= 3);
- assert(max <= (1U << opt_lg_prof_bt_max));
BT_FRAME(0)
BT_FRAME(1)
@@ -405,6 +417,14 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore, unsigned max)
BT_FRAME(130)
#undef BT_FRAME
}
+#else
+void
+prof_backtrace(prof_bt_t *bt, unsigned nignore)
+{
+
+ cassert(config_prof);
+ assert(false);
+}
#endif
prof_thr_cnt_t *
@@ -416,12 +436,11 @@ prof_lookup(prof_bt_t *bt)
} ret;
prof_tdata_t *prof_tdata;
- prof_tdata = PROF_TCACHE_GET();
- if (prof_tdata == NULL) {
- prof_tdata = prof_tdata_init();
- if (prof_tdata == NULL)
- return (NULL);
- }
+ cassert(config_prof);
+
+ prof_tdata = prof_tdata_get(false);
+ if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
+ return (NULL);
if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) {
union {
@@ -432,53 +451,57 @@ prof_lookup(prof_bt_t *bt)
prof_ctx_t *p;
void *v;
} ctx;
+ bool new_ctx;
/*
* This thread's cache lacks bt. Look for it in the global
* cache.
*/
- prof_enter();
+ prof_enter(prof_tdata);
if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) {
/* bt has never been seen before. Insert it. */
ctx.v = imalloc(sizeof(prof_ctx_t));
if (ctx.v == NULL) {
- prof_leave();
+ prof_leave(prof_tdata);
return (NULL);
}
btkey.p = bt_dup(bt);
if (btkey.v == NULL) {
- prof_leave();
+ prof_leave(prof_tdata);
idalloc(ctx.v);
return (NULL);
}
ctx.p->bt = btkey.p;
- if (malloc_mutex_init(&ctx.p->lock)) {
- prof_leave();
- idalloc(btkey.v);
- idalloc(ctx.v);
- return (NULL);
- }
+ ctx.p->lock = prof_ctx_mutex_choose();
+ /*
+ * Set nlimbo to 1, in order to avoid a race condition
+ * with prof_ctx_merge()/prof_ctx_destroy().
+ */
+ ctx.p->nlimbo = 1;
memset(&ctx.p->cnt_merged, 0, sizeof(prof_cnt_t));
ql_new(&ctx.p->cnts_ql);
if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) {
/* OOM. */
- prof_leave();
- malloc_mutex_destroy(&ctx.p->lock);
+ prof_leave(prof_tdata);
idalloc(btkey.v);
idalloc(ctx.v);
return (NULL);
}
+ new_ctx = true;
+ } else {
+ /*
+ * Increment nlimbo, in order to avoid a race condition
+ * with prof_ctx_merge()/prof_ctx_destroy().
+ */
+ malloc_mutex_lock(ctx.p->lock);
+ ctx.p->nlimbo++;
+ malloc_mutex_unlock(ctx.p->lock);
+ new_ctx = false;
}
- /*
- * Acquire ctx's lock before releasing bt2ctx_mtx, in order to
- * avoid a race condition with prof_ctx_destroy().
- */
- malloc_mutex_lock(&ctx.p->lock);
- prof_leave();
+ prof_leave(prof_tdata);
/* Link a prof_thd_cnt_t into ctx for this thread. */
- if (opt_lg_prof_tcmax >= 0 && ckh_count(&prof_tdata->bt2cnt)
- == (ZU(1) << opt_lg_prof_tcmax)) {
+ if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) {
assert(ckh_count(&prof_tdata->bt2cnt) > 0);
/*
* Flush the least recently used cnt in order to keep
@@ -486,19 +509,19 @@ prof_lookup(prof_bt_t *bt)
*/
ret.p = ql_last(&prof_tdata->lru_ql, lru_link);
assert(ret.v != NULL);
- ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt, NULL,
- NULL);
+ if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt,
+ NULL, NULL))
+ assert(false);
ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
prof_ctx_merge(ret.p->ctx, ret.p);
/* ret can now be re-used. */
} else {
- assert(opt_lg_prof_tcmax < 0 ||
- ckh_count(&prof_tdata->bt2cnt) < (ZU(1) <<
- opt_lg_prof_tcmax));
+ assert(ckh_count(&prof_tdata->bt2cnt) < PROF_TCMAX);
/* Allocate and partially initialize a new cnt. */
ret.v = imalloc(sizeof(prof_thr_cnt_t));
if (ret.p == NULL) {
- malloc_mutex_unlock(&ctx.p->lock);
+ if (new_ctx)
+ prof_ctx_destroy(ctx.p);
return (NULL);
}
ql_elm_new(ret.p, cnts_link);
@@ -509,13 +532,16 @@ prof_lookup(prof_bt_t *bt)
ret.p->epoch = 0;
memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
if (ckh_insert(&prof_tdata->bt2cnt, btkey.v, ret.v)) {
- malloc_mutex_unlock(&ctx.p->lock);
+ if (new_ctx)
+ prof_ctx_destroy(ctx.p);
idalloc(ret.v);
return (NULL);
}
ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link);
+ malloc_mutex_lock(ctx.p->lock);
ql_tail_insert(&ctx.p->cnts_ql, ret.p, cnts_link);
- malloc_mutex_unlock(&ctx.p->lock);
+ ctx.p->nlimbo--;
+ malloc_mutex_unlock(ctx.p->lock);
} else {
/* Move ret to the front of the LRU. */
ql_remove(&prof_tdata->lru_ql, ret.p, lru_link);
@@ -531,6 +557,8 @@ prof_flush(bool propagate_err)
bool ret = false;
ssize_t err;
+ cassert(config_prof);
+
err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
if (err == -1) {
if (propagate_err == false) {
@@ -547,24 +575,26 @@ prof_flush(bool propagate_err)
}
static bool
-prof_write(const char *s, bool propagate_err)
+prof_write(bool propagate_err, const char *s)
{
unsigned i, slen, n;
+ cassert(config_prof);
+
i = 0;
slen = strlen(s);
while (i < slen) {
/* Flush the buffer if it is full. */
- if (prof_dump_buf_end == PROF_DUMP_BUF_SIZE)
+ if (prof_dump_buf_end == PROF_DUMP_BUFSIZE)
if (prof_flush(propagate_err) && propagate_err)
return (true);
- if (prof_dump_buf_end + slen <= PROF_DUMP_BUF_SIZE) {
+ if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
/* Finish writing. */
n = slen - i;
} else {
/* Write as much of s as will fit. */
- n = PROF_DUMP_BUF_SIZE - prof_dump_buf_end;
+ n = PROF_DUMP_BUFSIZE - prof_dump_buf_end;
}
memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
prof_dump_buf_end += n;
@@ -574,13 +604,31 @@ prof_write(const char *s, bool propagate_err)
return (false);
}
+JEMALLOC_ATTR(format(printf, 2, 3))
+static bool
+prof_printf(bool propagate_err, const char *format, ...)
+{
+ bool ret;
+ va_list ap;
+ char buf[PROF_PRINTF_BUFSIZE];
+
+ va_start(ap, format);
+ malloc_vsnprintf(buf, sizeof(buf), format, ap);
+ va_end(ap);
+ ret = prof_write(propagate_err, buf);
+
+ return (ret);
+}
+
static void
prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx)
{
prof_thr_cnt_t *thr_cnt;
prof_cnt_t tcnt;
- malloc_mutex_lock(&ctx->lock);
+ cassert(config_prof);
+
+ malloc_mutex_lock(ctx->lock);
memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t));
ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) {
@@ -619,40 +667,48 @@ prof_ctx_sum(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx)
cnt_all->accumbytes += ctx->cnt_summed.accumbytes;
}
- malloc_mutex_unlock(&ctx->lock);
+ malloc_mutex_unlock(ctx->lock);
}
static void
prof_ctx_destroy(prof_ctx_t *ctx)
{
+ prof_tdata_t *prof_tdata;
+
+ cassert(config_prof);
/*
* Check that ctx is still unused by any thread cache before destroying
- * it. prof_lookup() interlocks bt2ctx_mtx and ctx->lock in order to
- * avoid a race condition with this function, and prof_ctx_merge()
- * artificially raises ctx->cnt_merged.curobjs in order to avoid a race
- * between the main body of prof_ctx_merge() and entry into this
- * function.
+ * it. prof_lookup() increments ctx->nlimbo in order to avoid a race
+ * condition with this function, as does prof_ctx_merge() in order to
+ * avoid a race between the main body of prof_ctx_merge() and entry
+ * into this function.
*/
- prof_enter();
- malloc_mutex_lock(&ctx->lock);
- if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 1) {
+ prof_tdata = prof_tdata_get(false);
+ assert((uintptr_t)prof_tdata > (uintptr_t)PROF_TDATA_STATE_MAX);
+ prof_enter(prof_tdata);
+ malloc_mutex_lock(ctx->lock);
+ if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 0 &&
+ ctx->nlimbo == 1) {
assert(ctx->cnt_merged.curbytes == 0);
assert(ctx->cnt_merged.accumobjs == 0);
assert(ctx->cnt_merged.accumbytes == 0);
/* Remove ctx from bt2ctx. */
- ckh_remove(&bt2ctx, ctx->bt, NULL, NULL);
- prof_leave();
+ if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL))
+ assert(false);
+ prof_leave(prof_tdata);
/* Destroy ctx. */
- malloc_mutex_unlock(&ctx->lock);
+ malloc_mutex_unlock(ctx->lock);
bt_destroy(ctx->bt);
- malloc_mutex_destroy(&ctx->lock);
idalloc(ctx);
} else {
- /* Compensate for increment in prof_ctx_merge(). */
- ctx->cnt_merged.curobjs--;
- malloc_mutex_unlock(&ctx->lock);
- prof_leave();
+ /*
+ * Compensate for increment in prof_ctx_merge() or
+ * prof_lookup().
+ */
+ ctx->nlimbo--;
+ malloc_mutex_unlock(ctx->lock);
+ prof_leave(prof_tdata);
}
}
@@ -661,20 +717,22 @@ prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt)
{
bool destroy;
+ cassert(config_prof);
+
/* Merge cnt stats and detach from ctx. */
- malloc_mutex_lock(&ctx->lock);
+ malloc_mutex_lock(ctx->lock);
ctx->cnt_merged.curobjs += cnt->cnts.curobjs;
ctx->cnt_merged.curbytes += cnt->cnts.curbytes;
ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs;
ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes;
ql_remove(&ctx->cnts_ql, cnt, cnts_link);
if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL &&
- ctx->cnt_merged.curobjs == 0) {
+ ctx->cnt_merged.curobjs == 0 && ctx->nlimbo == 0) {
/*
- * Artificially raise ctx->cnt_merged.curobjs in order to keep
- * another thread from winning the race to destroy ctx while
- * this one has ctx->lock dropped. Without this, it would be
- * possible for another thread to:
+ * Increment ctx->nlimbo in order to keep another thread from
+ * winning the race to destroy ctx while this one has ctx->lock
+ * dropped. Without this, it would be possible for another
+ * thread to:
*
* 1) Sample an allocation associated with ctx.
* 2) Deallocate the sampled object.
@@ -683,49 +741,51 @@ prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt)
* The result would be that ctx no longer exists by the time
* this thread accesses it in prof_ctx_destroy().
*/
- ctx->cnt_merged.curobjs++;
+ ctx->nlimbo++;
destroy = true;
} else
destroy = false;
- malloc_mutex_unlock(&ctx->lock);
+ malloc_mutex_unlock(ctx->lock);
if (destroy)
prof_ctx_destroy(ctx);
}
static bool
-prof_dump_ctx(prof_ctx_t *ctx, prof_bt_t *bt, bool propagate_err)
+prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, prof_bt_t *bt)
{
- char buf[UMAX2S_BUFSIZE];
unsigned i;
- if (opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) {
+ cassert(config_prof);
+
+ /*
+ * Current statistics can sum to 0 as a result of unmerged per thread
+ * statistics. Additionally, interval- and growth-triggered dumps can
+ * occur between the time a ctx is created and when its statistics are
+ * filled in. Avoid dumping any ctx that is an artifact of either
+ * implementation detail.
+ */
+ if ((opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) ||
+ (opt_prof_accum && ctx->cnt_summed.accumobjs == 0)) {
+ assert(ctx->cnt_summed.curobjs == 0);
assert(ctx->cnt_summed.curbytes == 0);
assert(ctx->cnt_summed.accumobjs == 0);
assert(ctx->cnt_summed.accumbytes == 0);
return (false);
}
- if (prof_write(u2s(ctx->cnt_summed.curobjs, 10, buf), propagate_err)
- || prof_write(": ", propagate_err)
- || prof_write(u2s(ctx->cnt_summed.curbytes, 10, buf),
- propagate_err)
- || prof_write(" [", propagate_err)
- || prof_write(u2s(ctx->cnt_summed.accumobjs, 10, buf),
- propagate_err)
- || prof_write(": ", propagate_err)
- || prof_write(u2s(ctx->cnt_summed.accumbytes, 10, buf),
- propagate_err)
- || prof_write("] @", propagate_err))
+ if (prof_printf(propagate_err, "%"PRId64": %"PRId64
+ " [%"PRIu64": %"PRIu64"] @",
+ ctx->cnt_summed.curobjs, ctx->cnt_summed.curbytes,
+ ctx->cnt_summed.accumobjs, ctx->cnt_summed.accumbytes))
return (true);
for (i = 0; i < bt->len; i++) {
- if (prof_write(" 0x", propagate_err)
- || prof_write(u2s((uintptr_t)bt->vec[i], 16, buf),
- propagate_err))
+ if (prof_printf(propagate_err, " %#"PRIxPTR,
+ (uintptr_t)bt->vec[i]))
return (true);
}
- if (prof_write("\n", propagate_err))
+ if (prof_write(propagate_err, "\n"))
return (true);
return (false);
@@ -735,49 +795,29 @@ static bool
prof_dump_maps(bool propagate_err)
{
int mfd;
- char buf[UMAX2S_BUFSIZE];
- char *s;
- unsigned i, slen;
- /* /proc/<pid>/maps\0 */
- char mpath[6 + UMAX2S_BUFSIZE
- + 5 + 1];
-
- i = 0;
-
- s = "/proc/";
- slen = strlen(s);
- memcpy(&mpath[i], s, slen);
- i += slen;
-
- s = u2s(getpid(), 10, buf);
- slen = strlen(s);
- memcpy(&mpath[i], s, slen);
- i += slen;
-
- s = "/maps";
- slen = strlen(s);
- memcpy(&mpath[i], s, slen);
- i += slen;
+ char filename[PATH_MAX + 1];
- mpath[i] = '\0';
+ cassert(config_prof);
- mfd = open(mpath, O_RDONLY);
+ malloc_snprintf(filename, sizeof(filename), "/proc/%d/maps",
+ (int)getpid());
+ mfd = open(filename, O_RDONLY);
if (mfd != -1) {
ssize_t nread;
- if (prof_write("\nMAPPED_LIBRARIES:\n", propagate_err) &&
+ if (prof_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
propagate_err)
return (true);
nread = 0;
do {
prof_dump_buf_end += nread;
- if (prof_dump_buf_end == PROF_DUMP_BUF_SIZE) {
+ if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
/* Make space in prof_dump_buf before read(). */
if (prof_flush(propagate_err) && propagate_err)
return (true);
}
nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
- PROF_DUMP_BUF_SIZE - prof_dump_buf_end);
+ PROF_DUMP_BUFSIZE - prof_dump_buf_end);
} while (nread > 0);
close(mfd);
} else
@@ -787,8 +827,9 @@ prof_dump_maps(bool propagate_err)
}
static bool
-prof_dump(const char *filename, bool leakcheck, bool propagate_err)
+prof_dump(bool propagate_err, const char *filename, bool leakcheck)
{
+ prof_tdata_t *prof_tdata;
prof_cnt_t cnt_all;
size_t tabind;
union {
@@ -799,20 +840,24 @@ prof_dump(const char *filename, bool leakcheck, bool propagate_err)
prof_ctx_t *p;
void *v;
} ctx;
- char buf[UMAX2S_BUFSIZE];
size_t leak_nctx;
- prof_enter();
+ cassert(config_prof);
+
+ prof_tdata = prof_tdata_get(false);
+ if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
+ return (true);
+ prof_enter(prof_tdata);
prof_dump_fd = creat(filename, 0644);
if (prof_dump_fd == -1) {
if (propagate_err == false) {
- malloc_write("<jemalloc>: creat(\"");
- malloc_write(filename);
- malloc_write("\", 0644) failed\n");
+ malloc_printf(
+ "<jemalloc>: creat(\"%s\"), 0644) failed\n",
+ filename);
if (opt_abort)
abort();
}
- goto ERROR;
+ goto label_error;
}
/* Merge per thread profile stats, and sum them in cnt_all. */
@@ -822,131 +867,75 @@ prof_dump(const char *filename, bool leakcheck, bool propagate_err)
prof_ctx_sum(ctx.p, &cnt_all, &leak_nctx);
/* Dump profile header. */
- if (prof_write("heap profile: ", propagate_err)
- || prof_write(u2s(cnt_all.curobjs, 10, buf), propagate_err)
- || prof_write(": ", propagate_err)
- || prof_write(u2s(cnt_all.curbytes, 10, buf), propagate_err)
- || prof_write(" [", propagate_err)
- || prof_write(u2s(cnt_all.accumobjs, 10, buf), propagate_err)
- || prof_write(": ", propagate_err)
- || prof_write(u2s(cnt_all.accumbytes, 10, buf), propagate_err))
- goto ERROR;
-
if (opt_lg_prof_sample == 0) {
- if (prof_write("] @ heapprofile\n", propagate_err))
- goto ERROR;
+ if (prof_printf(propagate_err,
+ "heap profile: %"PRId64": %"PRId64
+ " [%"PRIu64": %"PRIu64"] @ heapprofile\n",
+ cnt_all.curobjs, cnt_all.curbytes,
+ cnt_all.accumobjs, cnt_all.accumbytes))
+ goto label_error;
} else {
- if (prof_write("] @ heap_v2/", propagate_err)
- || prof_write(u2s((uint64_t)1U << opt_lg_prof_sample, 10,
- buf), propagate_err)
- || prof_write("\n", propagate_err))
- goto ERROR;
+ if (prof_printf(propagate_err,
+ "heap profile: %"PRId64": %"PRId64
+ " [%"PRIu64": %"PRIu64"] @ heap_v2/%"PRIu64"\n",
+ cnt_all.curobjs, cnt_all.curbytes,
+ cnt_all.accumobjs, cnt_all.accumbytes,
+ ((uint64_t)1U << opt_lg_prof_sample)))
+ goto label_error;
}
/* Dump per ctx profile stats. */
for (tabind = 0; ckh_iter(&bt2ctx, &tabind, &bt.v, &ctx.v)
== false;) {
- if (prof_dump_ctx(ctx.p, bt.p, propagate_err))
- goto ERROR;
+ if (prof_dump_ctx(propagate_err, ctx.p, bt.p))
+ goto label_error;
}
/* Dump /proc/<pid>/maps if possible. */
if (prof_dump_maps(propagate_err))
- goto ERROR;
+ goto label_error;
if (prof_flush(propagate_err))
- goto ERROR;
+ goto label_error;
close(prof_dump_fd);
- prof_leave();
+ prof_leave(prof_tdata);
if (leakcheck && cnt_all.curbytes != 0) {
- malloc_write("<jemalloc>: Leak summary: ");
- malloc_write(u2s(cnt_all.curbytes, 10, buf));
- malloc_write((cnt_all.curbytes != 1) ? " bytes, " : " byte, ");
- malloc_write(u2s(cnt_all.curobjs, 10, buf));
- malloc_write((cnt_all.curobjs != 1) ? " objects, " :
- " object, ");
- malloc_write(u2s(leak_nctx, 10, buf));
- malloc_write((leak_nctx != 1) ? " contexts\n" : " context\n");
- malloc_write("<jemalloc>: Run pprof on \"");
- malloc_write(filename);
- malloc_write("\" for leak detail\n");
+ malloc_printf("<jemalloc>: Leak summary: %"PRId64" byte%s, %"
+ PRId64" object%s, %zu context%s\n",
+ cnt_all.curbytes, (cnt_all.curbytes != 1) ? "s" : "",
+ cnt_all.curobjs, (cnt_all.curobjs != 1) ? "s" : "",
+ leak_nctx, (leak_nctx != 1) ? "s" : "");
+ malloc_printf(
+ "<jemalloc>: Run pprof on \"%s\" for leak detail\n",
+ filename);
}
return (false);
-ERROR:
- prof_leave();
+label_error:
+ prof_leave(prof_tdata);
return (true);
}
-#define DUMP_FILENAME_BUFSIZE (PATH_MAX+ UMAX2S_BUFSIZE \
- + 1 \
- + UMAX2S_BUFSIZE \
- + 2 \
- + UMAX2S_BUFSIZE \
- + 5 + 1)
+#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
static void
prof_dump_filename(char *filename, char v, int64_t vseq)
{
- char buf[UMAX2S_BUFSIZE];
- char *s;
- unsigned i, slen;
-
- /*
- * Construct a filename of the form:
- *
- * <prefix>.<pid>.<seq>.v<vseq>.heap\0
- */
- i = 0;
-
- s = opt_prof_prefix;
- slen = strlen(s);
- memcpy(&filename[i], s, slen);
- i += slen;
-
- s = ".";
- slen = strlen(s);
- memcpy(&filename[i], s, slen);
- i += slen;
-
- s = u2s(getpid(), 10, buf);
- slen = strlen(s);
- memcpy(&filename[i], s, slen);
- i += slen;
+ cassert(config_prof);
- s = ".";
- slen = strlen(s);
- memcpy(&filename[i], s, slen);
- i += slen;
-
- s = u2s(prof_dump_seq, 10, buf);
- prof_dump_seq++;
- slen = strlen(s);
- memcpy(&filename[i], s, slen);
- i += slen;
-
- s = ".";
- slen = strlen(s);
- memcpy(&filename[i], s, slen);
- i += slen;
-
- filename[i] = v;
- i++;
-
- if (vseq != 0xffffffffffffffffLLU) {
- s = u2s(vseq, 10, buf);
- slen = strlen(s);
- memcpy(&filename[i], s, slen);
- i += slen;
+ if (vseq != UINT64_C(0xffffffffffffffff)) {
+ /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
+ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
+ "%s.%d.%"PRIu64".%c%"PRId64".heap",
+ opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq);
+ } else {
+ /* "<prefix>.<pid>.<seq>.<v>.heap" */
+ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
+ "%s.%d.%"PRIu64".%c.heap",
+ opt_prof_prefix, (int)getpid(), prof_dump_seq, v);
}
-
- s = ".heap";
- slen = strlen(s);
- memcpy(&filename[i], s, slen);
- i += slen;
-
- filename[i] = '\0';
+ prof_dump_seq++;
}
static void
@@ -954,38 +943,43 @@ prof_fdump(void)
{
char filename[DUMP_FILENAME_BUFSIZE];
+ cassert(config_prof);
+
if (prof_booted == false)
return;
- if (opt_prof_prefix[0] != '\0') {
+ if (opt_prof_final && opt_prof_prefix[0] != '\0') {
malloc_mutex_lock(&prof_dump_seq_mtx);
- prof_dump_filename(filename, 'f', 0xffffffffffffffffLLU);
+ prof_dump_filename(filename, 'f', UINT64_C(0xffffffffffffffff));
malloc_mutex_unlock(&prof_dump_seq_mtx);
- prof_dump(filename, opt_prof_leak, false);
+ prof_dump(false, filename, opt_prof_leak);
}
}
void
prof_idump(void)
{
- char filename[DUMP_FILENAME_BUFSIZE];
+ prof_tdata_t *prof_tdata;
+ char filename[PATH_MAX + 1];
+
+ cassert(config_prof);
if (prof_booted == false)
return;
- malloc_mutex_lock(&enq_mtx);
- if (enq) {
- enq_idump = true;
- malloc_mutex_unlock(&enq_mtx);
+ prof_tdata = prof_tdata_get(false);
+ if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
+ return;
+ if (prof_tdata->enq) {
+ prof_tdata->enq_idump = true;
return;
}
- malloc_mutex_unlock(&enq_mtx);
if (opt_prof_prefix[0] != '\0') {
malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'i', prof_dump_iseq);
prof_dump_iseq++;
malloc_mutex_unlock(&prof_dump_seq_mtx);
- prof_dump(filename, false, false);
+ prof_dump(false, filename, false);
}
}
@@ -994,6 +988,8 @@ prof_mdump(const char *filename)
{
char filename_buf[DUMP_FILENAME_BUFSIZE];
+ cassert(config_prof);
+
if (opt_prof == false || prof_booted == false)
return (true);
@@ -1007,60 +1003,44 @@ prof_mdump(const char *filename)
malloc_mutex_unlock(&prof_dump_seq_mtx);
filename = filename_buf;
}
- return (prof_dump(filename, false, true));
+ return (prof_dump(true, filename, false));
}
void
prof_gdump(void)
{
+ prof_tdata_t *prof_tdata;
char filename[DUMP_FILENAME_BUFSIZE];
+ cassert(config_prof);
+
if (prof_booted == false)
return;
- malloc_mutex_lock(&enq_mtx);
- if (enq) {
- enq_gdump = true;
- malloc_mutex_unlock(&enq_mtx);
+ prof_tdata = prof_tdata_get(false);
+ if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)
+ return;
+ if (prof_tdata->enq) {
+ prof_tdata->enq_gdump = true;
return;
}
- malloc_mutex_unlock(&enq_mtx);
if (opt_prof_prefix[0] != '\0') {
malloc_mutex_lock(&prof_dump_seq_mtx);
prof_dump_filename(filename, 'u', prof_dump_useq);
prof_dump_useq++;
malloc_mutex_unlock(&prof_dump_seq_mtx);
- prof_dump(filename, false, false);
+ prof_dump(false, filename, false);
}
}
static void
-prof_bt_hash(const void *key, unsigned minbits, size_t *hash1, size_t *hash2)
+prof_bt_hash(const void *key, size_t r_hash[2])
{
- size_t ret1, ret2;
- uint64_t h;
prof_bt_t *bt = (prof_bt_t *)key;
- assert(minbits <= 32 || (SIZEOF_PTR == 8 && minbits <= 64));
- assert(hash1 != NULL);
- assert(hash2 != NULL);
-
- h = hash(bt->vec, bt->len * sizeof(void *), 0x94122f335b332aeaLLU);
- if (minbits <= 32) {
- /*
- * Avoid doing multiple hashes, since a single hash provides
- * enough bits.
- */
- ret1 = h & ZU(0xffffffffU);
- ret2 = h >> 32;
- } else {
- ret1 = h;
- ret2 = hash(bt->vec, bt->len * sizeof(void *),
- 0x8432a476666bbc13U);
- }
+ cassert(config_prof);
- *hash1 = ret1;
- *hash2 = ret2;
+ hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
}
static bool
@@ -1069,16 +1049,28 @@ prof_bt_keycomp(const void *k1, const void *k2)
const prof_bt_t *bt1 = (prof_bt_t *)k1;
const prof_bt_t *bt2 = (prof_bt_t *)k2;
+ cassert(config_prof);
+
if (bt1->len != bt2->len)
return (false);
return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
}
+static malloc_mutex_t *
+prof_ctx_mutex_choose(void)
+{
+ unsigned nctxs = atomic_add_u(&cum_ctxs, 1);
+
+ return (&ctx_locks[(nctxs - 1) % PROF_NCTX_LOCKS]);
+}
+
prof_tdata_t *
prof_tdata_init(void)
{
prof_tdata_t *prof_tdata;
+ cassert(config_prof);
+
/* Initialize an empty cache for this thread. */
prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t));
if (prof_tdata == NULL)
@@ -1091,52 +1083,68 @@ prof_tdata_init(void)
}
ql_new(&prof_tdata->lru_ql);
- prof_tdata->vec = imalloc(sizeof(void *) * prof_bt_max);
+ prof_tdata->vec = imalloc(sizeof(void *) * PROF_BT_MAX);
if (prof_tdata->vec == NULL) {
-
ckh_delete(&prof_tdata->bt2cnt);
idalloc(prof_tdata);
return (NULL);
}
- prof_tdata->prn_state = 0;
+ prof_tdata->prng_state = 0;
prof_tdata->threshold = 0;
prof_tdata->accum = 0;
- PROF_TCACHE_SET(prof_tdata);
+ prof_tdata->enq = false;
+ prof_tdata->enq_idump = false;
+ prof_tdata->enq_gdump = false;
+
+ prof_tdata_tsd_set(&prof_tdata);
return (prof_tdata);
}
-static void
+void
prof_tdata_cleanup(void *arg)
{
- prof_tdata_t *prof_tdata;
+ prof_thr_cnt_t *cnt;
+ prof_tdata_t *prof_tdata = *(prof_tdata_t **)arg;
- prof_tdata = PROF_TCACHE_GET();
- if (prof_tdata != NULL) {
- prof_thr_cnt_t *cnt;
+ cassert(config_prof);
+ if (prof_tdata == PROF_TDATA_STATE_REINCARNATED) {
+ /*
+ * Another destructor deallocated memory after this destructor
+ * was called. Reset prof_tdata to PROF_TDATA_STATE_PURGATORY
+ * in order to receive another callback.
+ */
+ prof_tdata = PROF_TDATA_STATE_PURGATORY;
+ prof_tdata_tsd_set(&prof_tdata);
+ } else if (prof_tdata == PROF_TDATA_STATE_PURGATORY) {
+ /*
+ * The previous time this destructor was called, we set the key
+ * to PROF_TDATA_STATE_PURGATORY so that other destructors
+ * wouldn't cause re-creation of the prof_tdata. This time, do
+ * nothing, so that the destructor will not be called again.
+ */
+ } else if (prof_tdata != NULL) {
/*
* Delete the hash table. All of its contents can still be
* iterated over via the LRU.
*/
ckh_delete(&prof_tdata->bt2cnt);
-
/*
* Iteratively merge cnt's into the global stats and delete
* them.
*/
while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) {
- prof_ctx_merge(cnt->ctx, cnt);
ql_remove(&prof_tdata->lru_ql, cnt, lru_link);
+ prof_ctx_merge(cnt->ctx, cnt);
idalloc(cnt);
}
-
idalloc(prof_tdata->vec);
-
idalloc(prof_tdata);
- PROF_TCACHE_SET(NULL);
+ prof_tdata = PROF_TDATA_STATE_PURGATORY;
+ prof_tdata_tsd_set(&prof_tdata);
}
}
@@ -1144,6 +1152,8 @@ void
prof_boot0(void)
{
+ cassert(config_prof);
+
memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
sizeof(PROF_PREFIX_DEFAULT));
}
@@ -1152,6 +1162,8 @@ void
prof_boot1(void)
{
+ cassert(config_prof);
+
/*
* opt_prof and prof_promote must be in their final state before any
* arenas are initialized, so this function must be executed early.
@@ -1164,50 +1176,53 @@ prof_boot1(void)
*/
opt_prof = true;
opt_prof_gdump = false;
- prof_interval = 0;
} else if (opt_prof) {
if (opt_lg_prof_interval >= 0) {
prof_interval = (((uint64_t)1U) <<
opt_lg_prof_interval);
- } else
- prof_interval = 0;
+ }
}
- prof_promote = (opt_prof && opt_lg_prof_sample > PAGE_SHIFT);
+ prof_promote = (opt_prof && opt_lg_prof_sample > LG_PAGE);
}
bool
prof_boot2(void)
{
+ cassert(config_prof);
+
if (opt_prof) {
+ unsigned i;
+
if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash,
prof_bt_keycomp))
return (true);
if (malloc_mutex_init(&bt2ctx_mtx))
return (true);
- if (pthread_key_create(&prof_tdata_tsd, prof_tdata_cleanup)
- != 0) {
+ if (prof_tdata_tsd_boot()) {
malloc_write(
"<jemalloc>: Error in pthread_key_create()\n");
abort();
}
- prof_bt_max = (1U << opt_lg_prof_bt_max);
if (malloc_mutex_init(&prof_dump_seq_mtx))
return (true);
- if (malloc_mutex_init(&enq_mtx))
- return (true);
- enq = false;
- enq_idump = false;
- enq_gdump = false;
-
if (atexit(prof_fdump) != 0) {
malloc_write("<jemalloc>: Error in atexit()\n");
if (opt_abort)
abort();
}
+
+ ctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS *
+ sizeof(malloc_mutex_t));
+ if (ctx_locks == NULL)
+ return (true);
+ for (i = 0; i < PROF_NCTX_LOCKS; i++) {
+ if (malloc_mutex_init(&ctx_locks[i]))
+ return (true);
+ }
}
#ifdef JEMALLOC_PROF_LIBGCC
@@ -1223,5 +1238,46 @@ prof_boot2(void)
return (false);
}
+void
+prof_prefork(void)
+{
+
+ if (opt_prof) {
+ unsigned i;
+
+ malloc_mutex_lock(&bt2ctx_mtx);
+ malloc_mutex_lock(&prof_dump_seq_mtx);
+ for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ malloc_mutex_lock(&ctx_locks[i]);
+ }
+}
+
+void
+prof_postfork_parent(void)
+{
+
+ if (opt_prof) {
+ unsigned i;
+
+ for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ malloc_mutex_postfork_parent(&ctx_locks[i]);
+ malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
+ malloc_mutex_postfork_parent(&bt2ctx_mtx);
+ }
+}
+
+void
+prof_postfork_child(void)
+{
+
+ if (opt_prof) {
+ unsigned i;
+
+ for (i = 0; i < PROF_NCTX_LOCKS; i++)
+ malloc_mutex_postfork_child(&ctx_locks[i]);
+ malloc_mutex_postfork_child(&prof_dump_seq_mtx);
+ malloc_mutex_postfork_child(&bt2ctx_mtx);
+ }
+}
+
/******************************************************************************/
-#endif /* JEMALLOC_PROF */
diff --git a/dep/jemalloc/src/quarantine.c b/dep/jemalloc/src/quarantine.c
new file mode 100644
index 00000000000..f96a948d5c7
--- /dev/null
+++ b/dep/jemalloc/src/quarantine.c
@@ -0,0 +1,190 @@
+#define JEMALLOC_QUARANTINE_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/*
+ * quarantine pointers close to NULL are used to encode state information that
+ * is used for cleaning up during thread shutdown.
+ */
+#define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1)
+#define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2)
+#define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY
+
+/******************************************************************************/
+/* Data. */
+
+malloc_tsd_data(, quarantine, quarantine_t *, NULL)
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static quarantine_t *quarantine_grow(quarantine_t *quarantine);
+static void quarantine_drain_one(quarantine_t *quarantine);
+static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound);
+
+/******************************************************************************/
+
+quarantine_t *
+quarantine_init(size_t lg_maxobjs)
+{
+ quarantine_t *quarantine;
+
+ quarantine = (quarantine_t *)imalloc(offsetof(quarantine_t, objs) +
+ ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)));
+ if (quarantine == NULL)
+ return (NULL);
+ quarantine->curbytes = 0;
+ quarantine->curobjs = 0;
+ quarantine->first = 0;
+ quarantine->lg_maxobjs = lg_maxobjs;
+
+ quarantine_tsd_set(&quarantine);
+
+ return (quarantine);
+}
+
+static quarantine_t *
+quarantine_grow(quarantine_t *quarantine)
+{
+ quarantine_t *ret;
+
+ ret = quarantine_init(quarantine->lg_maxobjs + 1);
+ if (ret == NULL) {
+ quarantine_drain_one(quarantine);
+ return (quarantine);
+ }
+
+ ret->curbytes = quarantine->curbytes;
+ ret->curobjs = quarantine->curobjs;
+ if (quarantine->first + quarantine->curobjs <= (ZU(1) <<
+ quarantine->lg_maxobjs)) {
+ /* objs ring buffer data are contiguous. */
+ memcpy(ret->objs, &quarantine->objs[quarantine->first],
+ quarantine->curobjs * sizeof(quarantine_obj_t));
+ } else {
+ /* objs ring buffer data wrap around. */
+ size_t ncopy_a = (ZU(1) << quarantine->lg_maxobjs) -
+ quarantine->first;
+ size_t ncopy_b = quarantine->curobjs - ncopy_a;
+
+ memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a
+ * sizeof(quarantine_obj_t));
+ memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b *
+ sizeof(quarantine_obj_t));
+ }
+ idalloc(quarantine);
+
+ return (ret);
+}
+
+static void
+quarantine_drain_one(quarantine_t *quarantine)
+{
+ quarantine_obj_t *obj = &quarantine->objs[quarantine->first];
+ assert(obj->usize == isalloc(obj->ptr, config_prof));
+ idalloc(obj->ptr);
+ quarantine->curbytes -= obj->usize;
+ quarantine->curobjs--;
+ quarantine->first = (quarantine->first + 1) & ((ZU(1) <<
+ quarantine->lg_maxobjs) - 1);
+}
+
+static void
+quarantine_drain(quarantine_t *quarantine, size_t upper_bound)
+{
+
+ while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0)
+ quarantine_drain_one(quarantine);
+}
+
+void
+quarantine(void *ptr)
+{
+ quarantine_t *quarantine;
+ size_t usize = isalloc(ptr, config_prof);
+
+ cassert(config_fill);
+ assert(opt_quarantine);
+
+ quarantine = *quarantine_tsd_get();
+ if ((uintptr_t)quarantine <= (uintptr_t)QUARANTINE_STATE_MAX) {
+ if (quarantine == QUARANTINE_STATE_PURGATORY) {
+ /*
+ * Make a note that quarantine() was called after
+ * quarantine_cleanup() was called.
+ */
+ quarantine = QUARANTINE_STATE_REINCARNATED;
+ quarantine_tsd_set(&quarantine);
+ }
+ idalloc(ptr);
+ return;
+ }
+ /*
+ * Drain one or more objects if the quarantine size limit would be
+ * exceeded by appending ptr.
+ */
+ if (quarantine->curbytes + usize > opt_quarantine) {
+ size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine
+ - usize : 0;
+ quarantine_drain(quarantine, upper_bound);
+ }
+ /* Grow the quarantine ring buffer if it's full. */
+ if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs))
+ quarantine = quarantine_grow(quarantine);
+ /* quarantine_grow() must free a slot if it fails to grow. */
+ assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs));
+ /* Append ptr if its size doesn't exceed the quarantine size. */
+ if (quarantine->curbytes + usize <= opt_quarantine) {
+ size_t offset = (quarantine->first + quarantine->curobjs) &
+ ((ZU(1) << quarantine->lg_maxobjs) - 1);
+ quarantine_obj_t *obj = &quarantine->objs[offset];
+ obj->ptr = ptr;
+ obj->usize = usize;
+ quarantine->curbytes += usize;
+ quarantine->curobjs++;
+ if (opt_junk)
+ memset(ptr, 0x5a, usize);
+ } else {
+ assert(quarantine->curbytes == 0);
+ idalloc(ptr);
+ }
+}
+
+void
+quarantine_cleanup(void *arg)
+{
+ quarantine_t *quarantine = *(quarantine_t **)arg;
+
+ if (quarantine == QUARANTINE_STATE_REINCARNATED) {
+ /*
+ * Another destructor deallocated memory after this destructor
+ * was called. Reset quarantine to QUARANTINE_STATE_PURGATORY
+ * in order to receive another callback.
+ */
+ quarantine = QUARANTINE_STATE_PURGATORY;
+ quarantine_tsd_set(&quarantine);
+ } else if (quarantine == QUARANTINE_STATE_PURGATORY) {
+ /*
+ * The previous time this destructor was called, we set the key
+ * to QUARANTINE_STATE_PURGATORY so that other destructors
+ * wouldn't cause re-creation of the quarantine. This time, do
+ * nothing, so that the destructor will not be called again.
+ */
+ } else if (quarantine != NULL) {
+ quarantine_drain(quarantine, 0);
+ idalloc(quarantine);
+ quarantine = QUARANTINE_STATE_PURGATORY;
+ quarantine_tsd_set(&quarantine);
+ }
+}
+
+bool
+quarantine_boot(void)
+{
+
+ cassert(config_fill);
+
+ if (quarantine_tsd_boot())
+ return (true);
+
+ return (false);
+}
diff --git a/dep/jemalloc/src/rtree.c b/dep/jemalloc/src/rtree.c
index 7753743c5e6..90c6935a0ed 100644
--- a/dep/jemalloc/src/rtree.c
+++ b/dep/jemalloc/src/rtree.c
@@ -1,4 +1,4 @@
-#define RTREE_C_
+#define JEMALLOC_RTREE_C_
#include "jemalloc/internal/jemalloc_internal.h"
rtree_t *
@@ -20,7 +20,10 @@ rtree_new(unsigned bits)
memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) *
height));
- malloc_mutex_init(&ret->mutex);
+ if (malloc_mutex_init(&ret->mutex)) {
+ /* Leak the rtree. */
+ return (NULL);
+ }
ret->height = height;
if (bits_per_level * height > bits)
ret->level2bits[0] = bits % bits_per_level;
@@ -41,3 +44,24 @@ rtree_new(unsigned bits)
return (ret);
}
+
+void
+rtree_prefork(rtree_t *rtree)
+{
+
+ malloc_mutex_prefork(&rtree->mutex);
+}
+
+void
+rtree_postfork_parent(rtree_t *rtree)
+{
+
+ malloc_mutex_postfork_parent(&rtree->mutex);
+}
+
+void
+rtree_postfork_child(rtree_t *rtree)
+{
+
+ malloc_mutex_postfork_child(&rtree->mutex);
+}
diff --git a/dep/jemalloc/src/stats.c b/dep/jemalloc/src/stats.c
index 3dfe0d232a6..43f87af6700 100644
--- a/dep/jemalloc/src/stats.c
+++ b/dep/jemalloc/src/stats.c
@@ -39,136 +39,40 @@
bool opt_stats_print = false;
+size_t stats_cactive = 0;
+
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-#ifdef JEMALLOC_STATS
-static void malloc_vcprintf(void (*write_cb)(void *, const char *),
- void *cbopaque, const char *format, va_list ap);
static void stats_arena_bins_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i);
static void stats_arena_lruns_print(void (*write_cb)(void *, const char *),
void *cbopaque, unsigned i);
static void stats_arena_print(void (*write_cb)(void *, const char *),
- void *cbopaque, unsigned i);
-#endif
+ void *cbopaque, unsigned i, bool bins, bool large);
/******************************************************************************/
-/*
- * We don't want to depend on vsnprintf() for production builds, since that can
- * cause unnecessary bloat for static binaries. u2s() provides minimal integer
- * printing functionality, so that malloc_printf() use can be limited to
- * JEMALLOC_STATS code.
- */
-char *
-u2s(uint64_t x, unsigned base, char *s)
-{
- unsigned i;
-
- i = UMAX2S_BUFSIZE - 1;
- s[i] = '\0';
- switch (base) {
- case 10:
- do {
- i--;
- s[i] = "0123456789"[x % (uint64_t)10];
- x /= (uint64_t)10;
- } while (x > 0);
- break;
- case 16:
- do {
- i--;
- s[i] = "0123456789abcdef"[x & 0xf];
- x >>= 4;
- } while (x > 0);
- break;
- default:
- do {
- i--;
- s[i] = "0123456789abcdefghijklmnopqrstuvwxyz"[x %
- (uint64_t)base];
- x /= (uint64_t)base;
- } while (x > 0);
- }
-
- return (&s[i]);
-}
-
-#ifdef JEMALLOC_STATS
-static void
-malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *format, va_list ap)
-{
- char buf[4096];
-
- if (write_cb == NULL) {
- /*
- * The caller did not provide an alternate write_cb callback
- * function, so use the default one. malloc_write() is an
- * inline function, so use malloc_message() directly here.
- */
- write_cb = JEMALLOC_P(malloc_message);
- cbopaque = NULL;
- }
-
- vsnprintf(buf, sizeof(buf), format, ap);
- write_cb(cbopaque, buf);
-}
-
-/*
- * Print to a callback function in such a way as to (hopefully) avoid memory
- * allocation.
- */
-JEMALLOC_ATTR(format(printf, 3, 4))
-void
-malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *format, ...)
-{
- va_list ap;
-
- va_start(ap, format);
- malloc_vcprintf(write_cb, cbopaque, format, ap);
- va_end(ap);
-}
-
-/*
- * Print to stderr in such a way as to (hopefully) avoid memory allocation.
- */
-JEMALLOC_ATTR(format(printf, 1, 2))
-void
-malloc_printf(const char *format, ...)
-{
- va_list ap;
-
- va_start(ap, format);
- malloc_vcprintf(NULL, NULL, format, ap);
- va_end(ap);
-}
-#endif
-
-#ifdef JEMALLOC_STATS
static void
stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned i)
{
- size_t pagesize;
+ size_t page;
bool config_tcache;
unsigned nbins, j, gap_start;
- CTL_GET("arenas.pagesize", &pagesize, size_t);
+ CTL_GET("arenas.page", &page, size_t);
CTL_GET("config.tcache", &config_tcache, bool);
if (config_tcache) {
malloc_cprintf(write_cb, cbopaque,
- "bins: bin size regs pgs allocated nmalloc"
+ "bins: bin size regs pgs allocated nmalloc"
" ndalloc nrequests nfills nflushes"
- " newruns reruns maxruns curruns\n");
+ " newruns reruns curruns\n");
} else {
malloc_cprintf(write_cb, cbopaque,
- "bins: bin size regs pgs allocated nmalloc"
- " ndalloc newruns reruns maxruns"
- " curruns\n");
+ "bins: bin size regs pgs allocated nmalloc"
+ " ndalloc newruns reruns curruns\n");
}
CTL_GET("arenas.nbins", &nbins, unsigned);
for (j = 0, gap_start = UINT_MAX; j < nbins; j++) {
@@ -179,12 +83,11 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
if (gap_start == UINT_MAX)
gap_start = j;
} else {
- unsigned ntbins_, nqbins, ncbins, nsbins;
size_t reg_size, run_size, allocated;
uint32_t nregs;
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t reruns;
- size_t highruns, curruns;
+ size_t curruns;
if (gap_start != UINT_MAX) {
if (j > gap_start + 1) {
@@ -199,10 +102,6 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
}
gap_start = UINT_MAX;
}
- CTL_GET("arenas.ntbins", &ntbins_, unsigned);
- CTL_GET("arenas.nqbins", &nqbins, unsigned);
- CTL_GET("arenas.ncbins", &ncbins, unsigned);
- CTL_GET("arenas.nsbins", &nsbins, unsigned);
CTL_J_GET("arenas.bin.0.size", &reg_size, size_t);
CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t);
CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t);
@@ -222,36 +121,25 @@ stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque,
}
CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns,
uint64_t);
- CTL_IJ_GET("stats.arenas.0.bins.0.highruns", &highruns,
- size_t);
CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns,
size_t);
if (config_tcache) {
malloc_cprintf(write_cb, cbopaque,
- "%13u %1s %5zu %4u %3zu %12zu %12"PRIu64
+ "%13u %5zu %4u %3zu %12zu %12"PRIu64
" %12"PRIu64" %12"PRIu64" %12"PRIu64
" %12"PRIu64" %12"PRIu64" %12"PRIu64
- " %12zu %12zu\n",
- j,
- j < ntbins_ ? "T" : j < ntbins_ + nqbins ?
- "Q" : j < ntbins_ + nqbins + ncbins ? "C" :
- "S",
- reg_size, nregs, run_size / pagesize,
+ " %12zu\n",
+ j, reg_size, nregs, run_size / page,
allocated, nmalloc, ndalloc, nrequests,
- nfills, nflushes, nruns, reruns, highruns,
- curruns);
+ nfills, nflushes, nruns, reruns, curruns);
} else {
malloc_cprintf(write_cb, cbopaque,
- "%13u %1s %5zu %4u %3zu %12zu %12"PRIu64
+ "%13u %5zu %4u %3zu %12zu %12"PRIu64
" %12"PRIu64" %12"PRIu64" %12"PRIu64
- " %12zu %12zu\n",
- j,
- j < ntbins_ ? "T" : j < ntbins_ + nqbins ?
- "Q" : j < ntbins_ + nqbins + ncbins ? "C" :
- "S",
- reg_size, nregs, run_size / pagesize,
+ " %12zu\n",
+ j, reg_size, nregs, run_size / page,
allocated, nmalloc, ndalloc, nruns, reruns,
- highruns, curruns);
+ curruns);
}
}
}
@@ -271,18 +159,18 @@ static void
stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
unsigned i)
{
- size_t pagesize, nlruns, j;
+ size_t page, nlruns, j;
ssize_t gap_start;
- CTL_GET("arenas.pagesize", &pagesize, size_t);
+ CTL_GET("arenas.page", &page, size_t);
malloc_cprintf(write_cb, cbopaque,
"large: size pages nmalloc ndalloc nrequests"
- " maxruns curruns\n");
+ " curruns\n");
CTL_GET("arenas.nlruns", &nlruns, size_t);
for (j = 0, gap_start = -1; j < nlruns; j++) {
uint64_t nmalloc, ndalloc, nrequests;
- size_t run_size, highruns, curruns;
+ size_t run_size, curruns;
CTL_IJ_GET("stats.arenas.0.lruns.0.nmalloc", &nmalloc,
uint64_t);
@@ -295,8 +183,6 @@ stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
gap_start = j;
} else {
CTL_J_GET("arenas.lrun.0.size", &run_size, size_t);
- CTL_IJ_GET("stats.arenas.0.lruns.0.highruns", &highruns,
- size_t);
CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns,
size_t);
if (gap_start != -1) {
@@ -306,9 +192,9 @@ stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
}
malloc_cprintf(write_cb, cbopaque,
"%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64
- " %12zu %12zu\n",
- run_size, run_size / pagesize, nmalloc, ndalloc,
- nrequests, highruns, curruns);
+ " %12zu\n",
+ run_size, run_size / page, nmalloc, ndalloc,
+ nrequests, curruns);
}
}
if (gap_start != -1)
@@ -317,17 +203,25 @@ stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque,
static void
stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
- unsigned i)
+ unsigned i, bool bins, bool large)
{
- size_t pagesize, pactive, pdirty, mapped;
+ unsigned nthreads;
+ const char *dss;
+ size_t page, pactive, pdirty, mapped;
uint64_t npurge, nmadvise, purged;
size_t small_allocated;
uint64_t small_nmalloc, small_ndalloc, small_nrequests;
size_t large_allocated;
uint64_t large_nmalloc, large_ndalloc, large_nrequests;
- CTL_GET("arenas.pagesize", &pagesize, size_t);
+ CTL_GET("arenas.page", &page, size_t);
+ CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned);
+ malloc_cprintf(write_cb, cbopaque,
+ "assigned threads: %u\n", nthreads);
+ CTL_I_GET("stats.arenas.0.dss", &dss, const char *);
+ malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n",
+ dss);
CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t);
CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t);
CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t);
@@ -361,15 +255,15 @@ stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque,
small_nmalloc + large_nmalloc,
small_ndalloc + large_ndalloc,
small_nrequests + large_nrequests);
- malloc_cprintf(write_cb, cbopaque, "active: %12zu\n",
- pactive * pagesize );
+ malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page);
CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped);
- stats_arena_bins_print(write_cb, cbopaque, i);
- stats_arena_lruns_print(write_cb, cbopaque, i);
+ if (bins)
+ stats_arena_bins_print(write_cb, cbopaque, i);
+ if (large)
+ stats_arena_lruns_print(write_cb, cbopaque, i);
}
-#endif
void
stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
@@ -378,7 +272,6 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
int err;
uint64_t epoch;
size_t u64sz;
- char s[UMAX2S_BUFSIZE];
bool general = true;
bool merged = true;
bool unmerged = true;
@@ -394,8 +287,7 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
* */
epoch = 1;
u64sz = sizeof(uint64_t);
- err = JEMALLOC_P(mallctl)("epoch", &epoch, &u64sz, &epoch,
- sizeof(uint64_t));
+ err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t));
if (err != 0) {
if (err == EAGAIN) {
malloc_write("<jemalloc>: Memory allocation failure in "
@@ -407,42 +299,33 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
abort();
}
- if (write_cb == NULL) {
- /*
- * The caller did not provide an alternate write_cb callback
- * function, so use the default one. malloc_write() is an
- * inline function, so use malloc_message() directly here.
- */
- write_cb = JEMALLOC_P(malloc_message);
- cbopaque = NULL;
- }
-
if (opts != NULL) {
unsigned i;
for (i = 0; opts[i] != '\0'; i++) {
switch (opts[i]) {
- case 'g':
- general = false;
- break;
- case 'm':
- merged = false;
- break;
- case 'a':
- unmerged = false;
- break;
- case 'b':
- bins = false;
- break;
- case 'l':
- large = false;
- break;
- default:;
+ case 'g':
+ general = false;
+ break;
+ case 'm':
+ merged = false;
+ break;
+ case 'a':
+ unmerged = false;
+ break;
+ case 'b':
+ bins = false;
+ break;
+ case 'l':
+ large = false;
+ break;
+ default:;
}
}
}
- write_cb(cbopaque, "___ Begin jemalloc statistics ___\n");
+ malloc_cprintf(write_cb, cbopaque,
+ "___ Begin jemalloc statistics ___\n");
if (general) {
int err;
const char *cpv;
@@ -457,256 +340,146 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
cpsz = sizeof(const char *);
CTL_GET("version", &cpv, const char *);
- write_cb(cbopaque, "Version: ");
- write_cb(cbopaque, cpv);
- write_cb(cbopaque, "\n");
+ malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv);
CTL_GET("config.debug", &bv, bool);
- write_cb(cbopaque, "Assertions ");
- write_cb(cbopaque, bv ? "enabled" : "disabled");
- write_cb(cbopaque, "\n");
+ malloc_cprintf(write_cb, cbopaque, "Assertions %s\n",
+ bv ? "enabled" : "disabled");
#define OPT_WRITE_BOOL(n) \
- if ((err = JEMALLOC_P(mallctl)("opt."#n, &bv, &bsz, \
- NULL, 0)) == 0) { \
- write_cb(cbopaque, " opt."#n": "); \
- write_cb(cbopaque, bv ? "true" : "false"); \
- write_cb(cbopaque, "\n"); \
+ if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \
+ == 0) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %s\n", bv ? "true" : "false"); \
}
#define OPT_WRITE_SIZE_T(n) \
- if ((err = JEMALLOC_P(mallctl)("opt."#n, &sv, &ssz, \
- NULL, 0)) == 0) { \
- write_cb(cbopaque, " opt."#n": "); \
- write_cb(cbopaque, u2s(sv, 10, s)); \
- write_cb(cbopaque, "\n"); \
+ if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \
+ == 0) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %zu\n", sv); \
}
#define OPT_WRITE_SSIZE_T(n) \
- if ((err = JEMALLOC_P(mallctl)("opt."#n, &ssv, &sssz, \
- NULL, 0)) == 0) { \
- if (ssv >= 0) { \
- write_cb(cbopaque, " opt."#n": "); \
- write_cb(cbopaque, u2s(ssv, 10, s)); \
- } else { \
- write_cb(cbopaque, " opt."#n": -"); \
- write_cb(cbopaque, u2s(-ssv, 10, s)); \
- } \
- write_cb(cbopaque, "\n"); \
+ if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \
+ == 0) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": %zd\n", ssv); \
}
#define OPT_WRITE_CHAR_P(n) \
- if ((err = JEMALLOC_P(mallctl)("opt."#n, &cpv, &cpsz, \
- NULL, 0)) == 0) { \
- write_cb(cbopaque, " opt."#n": \""); \
- write_cb(cbopaque, cpv); \
- write_cb(cbopaque, "\"\n"); \
+ if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \
+ == 0) { \
+ malloc_cprintf(write_cb, cbopaque, \
+ " opt."#n": \"%s\"\n", cpv); \
}
- write_cb(cbopaque, "Run-time option settings:\n");
+ malloc_cprintf(write_cb, cbopaque,
+ "Run-time option settings:\n");
OPT_WRITE_BOOL(abort)
- OPT_WRITE_SIZE_T(lg_qspace_max)
- OPT_WRITE_SIZE_T(lg_cspace_max)
OPT_WRITE_SIZE_T(lg_chunk)
+ OPT_WRITE_CHAR_P(dss)
OPT_WRITE_SIZE_T(narenas)
OPT_WRITE_SSIZE_T(lg_dirty_mult)
OPT_WRITE_BOOL(stats_print)
OPT_WRITE_BOOL(junk)
+ OPT_WRITE_SIZE_T(quarantine)
+ OPT_WRITE_BOOL(redzone)
OPT_WRITE_BOOL(zero)
- OPT_WRITE_BOOL(sysv)
+ OPT_WRITE_BOOL(utrace)
+ OPT_WRITE_BOOL(valgrind)
OPT_WRITE_BOOL(xmalloc)
OPT_WRITE_BOOL(tcache)
- OPT_WRITE_SSIZE_T(lg_tcache_gc_sweep)
OPT_WRITE_SSIZE_T(lg_tcache_max)
OPT_WRITE_BOOL(prof)
OPT_WRITE_CHAR_P(prof_prefix)
- OPT_WRITE_SIZE_T(lg_prof_bt_max)
OPT_WRITE_BOOL(prof_active)
OPT_WRITE_SSIZE_T(lg_prof_sample)
OPT_WRITE_BOOL(prof_accum)
- OPT_WRITE_SSIZE_T(lg_prof_tcmax)
OPT_WRITE_SSIZE_T(lg_prof_interval)
OPT_WRITE_BOOL(prof_gdump)
+ OPT_WRITE_BOOL(prof_final)
OPT_WRITE_BOOL(prof_leak)
- OPT_WRITE_BOOL(overcommit)
#undef OPT_WRITE_BOOL
#undef OPT_WRITE_SIZE_T
#undef OPT_WRITE_SSIZE_T
#undef OPT_WRITE_CHAR_P
- write_cb(cbopaque, "CPUs: ");
- write_cb(cbopaque, u2s(ncpus, 10, s));
- write_cb(cbopaque, "\n");
+ malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus);
CTL_GET("arenas.narenas", &uv, unsigned);
- write_cb(cbopaque, "Max arenas: ");
- write_cb(cbopaque, u2s(uv, 10, s));
- write_cb(cbopaque, "\n");
+ malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv);
- write_cb(cbopaque, "Pointer size: ");
- write_cb(cbopaque, u2s(sizeof(void *), 10, s));
- write_cb(cbopaque, "\n");
+ malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n",
+ sizeof(void *));
CTL_GET("arenas.quantum", &sv, size_t);
- write_cb(cbopaque, "Quantum size: ");
- write_cb(cbopaque, u2s(sv, 10, s));
- write_cb(cbopaque, "\n");
-
- CTL_GET("arenas.cacheline", &sv, size_t);
- write_cb(cbopaque, "Cacheline size (assumed): ");
- write_cb(cbopaque, u2s(sv, 10, s));
- write_cb(cbopaque, "\n");
-
- CTL_GET("arenas.subpage", &sv, size_t);
- write_cb(cbopaque, "Subpage spacing: ");
- write_cb(cbopaque, u2s(sv, 10, s));
- write_cb(cbopaque, "\n");
-
- if ((err = JEMALLOC_P(mallctl)("arenas.tspace_min", &sv, &ssz,
- NULL, 0)) == 0) {
- write_cb(cbopaque, "Tiny 2^n-spaced sizes: [");
- write_cb(cbopaque, u2s(sv, 10, s));
- write_cb(cbopaque, "..");
-
- CTL_GET("arenas.tspace_max", &sv, size_t);
- write_cb(cbopaque, u2s(sv, 10, s));
- write_cb(cbopaque, "]\n");
- }
+ malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv);
- CTL_GET("arenas.qspace_min", &sv, size_t);
- write_cb(cbopaque, "Quantum-spaced sizes: [");
- write_cb(cbopaque, u2s(sv, 10, s));
- write_cb(cbopaque, "..");
- CTL_GET("arenas.qspace_max", &sv, size_t);
- write_cb(cbopaque, u2s(sv, 10, s));
- write_cb(cbopaque, "]\n");
-
- CTL_GET("arenas.cspace_min", &sv, size_t);
- write_cb(cbopaque, "Cacheline-spaced sizes: [");
- write_cb(cbopaque, u2s(sv, 10, s));
- write_cb(cbopaque, "..");
- CTL_GET("arenas.cspace_max", &sv, size_t);
- write_cb(cbopaque, u2s(sv, 10, s));
- write_cb(cbopaque, "]\n");
-
- CTL_GET("arenas.sspace_min", &sv, size_t);
- write_cb(cbopaque, "Subpage-spaced sizes: [");
- write_cb(cbopaque, u2s(sv, 10, s));
- write_cb(cbopaque, "..");
- CTL_GET("arenas.sspace_max", &sv, size_t);
- write_cb(cbopaque, u2s(sv, 10, s));
- write_cb(cbopaque, "]\n");
+ CTL_GET("arenas.page", &sv, size_t);
+ malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv);
CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t);
if (ssv >= 0) {
- write_cb(cbopaque,
- "Min active:dirty page ratio per arena: ");
- write_cb(cbopaque, u2s((1U << ssv), 10, s));
- write_cb(cbopaque, ":1\n");
+ malloc_cprintf(write_cb, cbopaque,
+ "Min active:dirty page ratio per arena: %u:1\n",
+ (1U << ssv));
} else {
- write_cb(cbopaque,
+ malloc_cprintf(write_cb, cbopaque,
"Min active:dirty page ratio per arena: N/A\n");
}
- if ((err = JEMALLOC_P(mallctl)("arenas.tcache_max", &sv,
- &ssz, NULL, 0)) == 0) {
- write_cb(cbopaque,
- "Maximum thread-cached size class: ");
- write_cb(cbopaque, u2s(sv, 10, s));
- write_cb(cbopaque, "\n");
- }
- if ((err = JEMALLOC_P(mallctl)("opt.lg_tcache_gc_sweep", &ssv,
- &ssz, NULL, 0)) == 0) {
- size_t tcache_gc_sweep = (1U << ssv);
- bool tcache_enabled;
- CTL_GET("opt.tcache", &tcache_enabled, bool);
- write_cb(cbopaque, "Thread cache GC sweep interval: ");
- write_cb(cbopaque, tcache_enabled && ssv >= 0 ?
- u2s(tcache_gc_sweep, 10, s) : "N/A");
- write_cb(cbopaque, "\n");
+ if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0))
+ == 0) {
+ malloc_cprintf(write_cb, cbopaque,
+ "Maximum thread-cached size class: %zu\n", sv);
}
- if ((err = JEMALLOC_P(mallctl)("opt.prof", &bv, &bsz, NULL, 0))
- == 0 && bv) {
- CTL_GET("opt.lg_prof_bt_max", &sv, size_t);
- write_cb(cbopaque, "Maximum profile backtrace depth: ");
- write_cb(cbopaque, u2s((1U << sv), 10, s));
- write_cb(cbopaque, "\n");
-
- CTL_GET("opt.lg_prof_tcmax", &ssv, ssize_t);
- write_cb(cbopaque,
- "Maximum per thread backtrace cache: ");
- if (ssv >= 0) {
- write_cb(cbopaque, u2s((1U << ssv), 10, s));
- write_cb(cbopaque, " (2^");
- write_cb(cbopaque, u2s(ssv, 10, s));
- write_cb(cbopaque, ")\n");
- } else
- write_cb(cbopaque, "N/A\n");
-
+ if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 &&
+ bv) {
CTL_GET("opt.lg_prof_sample", &sv, size_t);
- write_cb(cbopaque, "Average profile sample interval: ");
- write_cb(cbopaque, u2s((((uint64_t)1U) << sv), 10, s));
- write_cb(cbopaque, " (2^");
- write_cb(cbopaque, u2s(sv, 10, s));
- write_cb(cbopaque, ")\n");
+ malloc_cprintf(write_cb, cbopaque,
+ "Average profile sample interval: %"PRIu64
+ " (2^%zu)\n", (((uint64_t)1U) << sv), sv);
CTL_GET("opt.lg_prof_interval", &ssv, ssize_t);
- write_cb(cbopaque, "Average profile dump interval: ");
if (ssv >= 0) {
- write_cb(cbopaque, u2s((((uint64_t)1U) << ssv),
- 10, s));
- write_cb(cbopaque, " (2^");
- write_cb(cbopaque, u2s(ssv, 10, s));
- write_cb(cbopaque, ")\n");
- } else
- write_cb(cbopaque, "N/A\n");
+ malloc_cprintf(write_cb, cbopaque,
+ "Average profile dump interval: %"PRIu64
+ " (2^%zd)\n",
+ (((uint64_t)1U) << ssv), ssv);
+ } else {
+ malloc_cprintf(write_cb, cbopaque,
+ "Average profile dump interval: N/A\n");
+ }
}
- CTL_GET("arenas.chunksize", &sv, size_t);
- write_cb(cbopaque, "Chunk size: ");
- write_cb(cbopaque, u2s(sv, 10, s));
CTL_GET("opt.lg_chunk", &sv, size_t);
- write_cb(cbopaque, " (2^");
- write_cb(cbopaque, u2s(sv, 10, s));
- write_cb(cbopaque, ")\n");
+ malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n",
+ (ZU(1) << sv), sv);
}
-#ifdef JEMALLOC_STATS
- {
- int err;
- size_t ssz;
+ if (config_stats) {
+ size_t *cactive;
size_t allocated, active, mapped;
- size_t chunks_current, chunks_high, swap_avail;
+ size_t chunks_current, chunks_high;
uint64_t chunks_total;
size_t huge_allocated;
uint64_t huge_nmalloc, huge_ndalloc;
- ssz = sizeof(size_t);
-
+ CTL_GET("stats.cactive", &cactive, size_t *);
CTL_GET("stats.allocated", &allocated, size_t);
CTL_GET("stats.active", &active, size_t);
CTL_GET("stats.mapped", &mapped, size_t);
malloc_cprintf(write_cb, cbopaque,
- "Allocated: %zu, active: %zu, mapped: %zu\n", allocated,
- active, mapped);
+ "Allocated: %zu, active: %zu, mapped: %zu\n",
+ allocated, active, mapped);
+ malloc_cprintf(write_cb, cbopaque,
+ "Current active ceiling: %zu\n", atomic_read_z(cactive));
/* Print chunk stats. */
CTL_GET("stats.chunks.total", &chunks_total, uint64_t);
CTL_GET("stats.chunks.high", &chunks_high, size_t);
CTL_GET("stats.chunks.current", &chunks_current, size_t);
- if ((err = JEMALLOC_P(mallctl)("swap.avail", &swap_avail, &ssz,
- NULL, 0)) == 0) {
- size_t lg_chunk;
-
- malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
- "highchunks curchunks swap_avail\n");
- CTL_GET("opt.lg_chunk", &lg_chunk, size_t);
- malloc_cprintf(write_cb, cbopaque,
- " %13"PRIu64"%13zu%13zu%13zu\n",
- chunks_total, chunks_high, chunks_current,
- swap_avail << lg_chunk);
- } else {
- malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
- "highchunks curchunks\n");
- malloc_cprintf(write_cb, cbopaque,
- " %13"PRIu64"%13zu%13zu\n",
- chunks_total, chunks_high, chunks_current);
- }
+ malloc_cprintf(write_cb, cbopaque, "chunks: nchunks "
+ "highchunks curchunks\n");
+ malloc_cprintf(write_cb, cbopaque,
+ " %13"PRIu64" %12zu %12zu\n",
+ chunks_total, chunks_high, chunks_current);
/* Print huge stats. */
CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t);
@@ -723,11 +496,11 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_GET("arenas.narenas", &narenas, unsigned);
{
- bool initialized[narenas];
+ VARIABLE_ARRAY(bool, initialized, narenas);
size_t isz;
unsigned i, ninitialized;
- isz = sizeof(initialized);
+ isz = sizeof(bool) * narenas;
xmallctl("arenas.initialized", initialized,
&isz, NULL, 0);
for (i = ninitialized = 0; i < narenas; i++) {
@@ -735,12 +508,12 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
ninitialized++;
}
- if (ninitialized > 1) {
+ if (ninitialized > 1 || unmerged == false) {
/* Print merged arena stats. */
malloc_cprintf(write_cb, cbopaque,
"\nMerged arenas stats:\n");
stats_arena_print(write_cb, cbopaque,
- narenas);
+ narenas, bins, large);
}
}
}
@@ -752,11 +525,11 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
CTL_GET("arenas.narenas", &narenas, unsigned);
{
- bool initialized[narenas];
+ VARIABLE_ARRAY(bool, initialized, narenas);
size_t isz;
unsigned i;
- isz = sizeof(initialized);
+ isz = sizeof(bool) * narenas;
xmallctl("arenas.initialized", initialized,
&isz, NULL, 0);
@@ -766,12 +539,11 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
cbopaque,
"\narenas[%u]:\n", i);
stats_arena_print(write_cb,
- cbopaque, i);
+ cbopaque, i, bins, large);
}
}
}
}
}
-#endif /* #ifdef JEMALLOC_STATS */
- write_cb(cbopaque, "--- End jemalloc statistics ---\n");
+ malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n");
}
diff --git a/dep/jemalloc/src/tcache.c b/dep/jemalloc/src/tcache.c
index cbbe7a113a9..98ed19edd52 100644
--- a/dep/jemalloc/src/tcache.c
+++ b/dep/jemalloc/src/tcache.c
@@ -1,105 +1,131 @@
#define JEMALLOC_TCACHE_C_
#include "jemalloc/internal/jemalloc_internal.h"
-#ifdef JEMALLOC_TCACHE
+
/******************************************************************************/
/* Data. */
+malloc_tsd_data(, tcache, tcache_t *, NULL)
+malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default)
+
bool opt_tcache = true;
ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
-ssize_t opt_lg_tcache_gc_sweep = LG_TCACHE_GC_SWEEP_DEFAULT;
-
-/* Map of thread-specific caches. */
-#ifndef NO_TLS
-__thread tcache_t *tcache_tls JEMALLOC_ATTR(tls_model("initial-exec"));
-#endif
-/*
- * Same contents as tcache, but initialized such that the TSD destructor is
- * called when a thread exits, so that the cache can be cleaned up.
- */
-pthread_key_t tcache_tsd;
+tcache_bin_info_t *tcache_bin_info;
+static unsigned stack_nelms; /* Total stack elms per tcache. */
-size_t nhbins;
-size_t tcache_maxclass;
-unsigned tcache_gc_incr;
+size_t nhbins;
+size_t tcache_maxclass;
/******************************************************************************/
-/* Function prototypes for non-inline static functions. */
-static void tcache_thread_cleanup(void *arg);
+size_t tcache_salloc(const void *ptr)
+{
-/******************************************************************************/
+ return (arena_salloc(ptr, false));
+}
+
+void
+tcache_event_hard(tcache_t *tcache)
+{
+ size_t binind = tcache->next_gc_bin;
+ tcache_bin_t *tbin = &tcache->tbins[binind];
+ tcache_bin_info_t *tbin_info = &tcache_bin_info[binind];
+
+ if (tbin->low_water > 0) {
+ /*
+ * Flush (ceiling) 3/4 of the objects below the low water mark.
+ */
+ if (binind < NBINS) {
+ tcache_bin_flush_small(tbin, binind, tbin->ncached -
+ tbin->low_water + (tbin->low_water >> 2), tcache);
+ } else {
+ tcache_bin_flush_large(tbin, binind, tbin->ncached -
+ tbin->low_water + (tbin->low_water >> 2), tcache);
+ }
+ /*
+ * Reduce fill count by 2X. Limit lg_fill_div such that the
+ * fill count is always at least 1.
+ */
+ if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1)
+ tbin->lg_fill_div++;
+ } else if (tbin->low_water < 0) {
+ /*
+ * Increase fill count by 2X. Make sure lg_fill_div stays
+ * greater than 0.
+ */
+ if (tbin->lg_fill_div > 1)
+ tbin->lg_fill_div--;
+ }
+ tbin->low_water = tbin->ncached;
+
+ tcache->next_gc_bin++;
+ if (tcache->next_gc_bin == nhbins)
+ tcache->next_gc_bin = 0;
+ tcache->ev_cnt = 0;
+}
void *
tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind)
{
void *ret;
- arena_tcache_fill_small(tcache->arena, tbin, binind
-#ifdef JEMALLOC_PROF
- , tcache->prof_accumbytes
-#endif
- );
-#ifdef JEMALLOC_PROF
- tcache->prof_accumbytes = 0;
-#endif
+ arena_tcache_fill_small(tcache->arena, tbin, binind,
+ config_prof ? tcache->prof_accumbytes : 0);
+ if (config_prof)
+ tcache->prof_accumbytes = 0;
ret = tcache_alloc_easy(tbin);
return (ret);
}
void
-tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
- , tcache_t *tcache
-#endif
- )
+tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem,
+ tcache_t *tcache)
{
- void *flush, *deferred, *ptr;
+ void *ptr;
unsigned i, nflush, ndeferred;
- bool first_pass;
+ bool merged_stats = false;
- assert(binind < nbins);
+ assert(binind < NBINS);
assert(rem <= tbin->ncached);
- assert(tbin->ncached > 0 || tbin->avail == NULL);
- for (flush = tbin->avail, nflush = tbin->ncached - rem, first_pass =
- true; flush != NULL; flush = deferred, nflush = ndeferred) {
+ for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena bin associated with the first object. */
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(flush);
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
+ tbin->avail[0]);
arena_t *arena = chunk->arena;
arena_bin_t *bin = &arena->bins[binind];
-#ifdef JEMALLOC_PROF
- if (arena == tcache->arena) {
- malloc_mutex_lock(&arena->lock);
- arena_prof_accum(arena, tcache->prof_accumbytes);
- malloc_mutex_unlock(&arena->lock);
+ if (config_prof && arena == tcache->arena) {
+ if (arena_prof_accum(arena, tcache->prof_accumbytes))
+ prof_idump();
tcache->prof_accumbytes = 0;
}
-#endif
malloc_mutex_lock(&bin->lock);
-#ifdef JEMALLOC_STATS
- if (arena == tcache->arena) {
+ if (config_stats && arena == tcache->arena) {
+ assert(merged_stats == false);
+ merged_stats = true;
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
-#endif
- deferred = NULL;
ndeferred = 0;
for (i = 0; i < nflush; i++) {
- ptr = flush;
+ ptr = tbin->avail[i];
assert(ptr != NULL);
- flush = *(void **)ptr;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk->arena == arena) {
size_t pageind = ((uintptr_t)ptr -
- (uintptr_t)chunk) >> PAGE_SHIFT;
+ (uintptr_t)chunk) >> LG_PAGE;
arena_chunk_map_t *mapelm =
- &chunk->map[pageind-map_bias];
- arena_dalloc_bin(arena, chunk, ptr, mapelm);
+ arena_mapp_get(chunk, pageind);
+ if (config_fill && opt_junk) {
+ arena_alloc_junk_small(ptr,
+ &arena_bin_info[binind], true);
+ }
+ arena_dalloc_bin_locked(arena, chunk, ptr,
+ mapelm);
} else {
/*
* This object was allocated via a different
@@ -107,71 +133,75 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
* locked. Stash the object, so that it can be
* handled in a future pass.
*/
- *(void **)ptr = deferred;
- deferred = ptr;
+ tbin->avail[ndeferred] = ptr;
ndeferred++;
}
}
malloc_mutex_unlock(&bin->lock);
-
- if (first_pass) {
- tbin->avail = flush;
- first_pass = false;
- }
+ }
+ if (config_stats && merged_stats == false) {
+ /*
+ * The flush loop didn't happen to flush to this thread's
+ * arena, so the stats didn't get merged. Manually do so now.
+ */
+ arena_bin_t *bin = &tcache->arena->bins[binind];
+ malloc_mutex_lock(&bin->lock);
+ bin->stats.nflushes++;
+ bin->stats.nrequests += tbin->tstats.nrequests;
+ tbin->tstats.nrequests = 0;
+ malloc_mutex_unlock(&bin->lock);
}
+ memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
+ rem * sizeof(void *));
tbin->ncached = rem;
- if (tbin->ncached < tbin->low_water)
+ if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
}
void
-tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
- , tcache_t *tcache
-#endif
- )
+tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem,
+ tcache_t *tcache)
{
- void *flush, *deferred, *ptr;
+ void *ptr;
unsigned i, nflush, ndeferred;
- bool first_pass;
+ bool merged_stats = false;
assert(binind < nhbins);
assert(rem <= tbin->ncached);
- assert(tbin->ncached > 0 || tbin->avail == NULL);
- for (flush = tbin->avail, nflush = tbin->ncached - rem, first_pass =
- true; flush != NULL; flush = deferred, nflush = ndeferred) {
+ for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena associated with the first object. */
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(flush);
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
+ tbin->avail[0]);
arena_t *arena = chunk->arena;
+ UNUSED bool idump;
+ if (config_prof)
+ idump = false;
malloc_mutex_lock(&arena->lock);
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
- if (arena == tcache->arena) {
-#endif
-#ifdef JEMALLOC_PROF
- arena_prof_accum(arena, tcache->prof_accumbytes);
- tcache->prof_accumbytes = 0;
-#endif
-#ifdef JEMALLOC_STATS
- arena->stats.nrequests_large += tbin->tstats.nrequests;
- arena->stats.lstats[binind - nbins].nrequests +=
- tbin->tstats.nrequests;
- tbin->tstats.nrequests = 0;
-#endif
-#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
+ if ((config_prof || config_stats) && arena == tcache->arena) {
+ if (config_prof) {
+ idump = arena_prof_accum_locked(arena,
+ tcache->prof_accumbytes);
+ tcache->prof_accumbytes = 0;
+ }
+ if (config_stats) {
+ merged_stats = true;
+ arena->stats.nrequests_large +=
+ tbin->tstats.nrequests;
+ arena->stats.lstats[binind - NBINS].nrequests +=
+ tbin->tstats.nrequests;
+ tbin->tstats.nrequests = 0;
+ }
}
-#endif
- deferred = NULL;
ndeferred = 0;
for (i = 0; i < nflush; i++) {
- ptr = flush;
+ ptr = tbin->avail[i];
assert(ptr != NULL);
- flush = *(void **)ptr;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk->arena == arena)
- arena_dalloc_large(arena, chunk, ptr);
+ arena_dalloc_large_locked(arena, chunk, ptr);
else {
/*
* This object was allocated via a different
@@ -179,32 +209,74 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
* Stash the object, so that it can be handled
* in a future pass.
*/
- *(void **)ptr = deferred;
- deferred = ptr;
+ tbin->avail[ndeferred] = ptr;
ndeferred++;
}
}
malloc_mutex_unlock(&arena->lock);
-
- if (first_pass) {
- tbin->avail = flush;
- first_pass = false;
- }
+ if (config_prof && idump)
+ prof_idump();
+ }
+ if (config_stats && merged_stats == false) {
+ /*
+ * The flush loop didn't happen to flush to this thread's
+ * arena, so the stats didn't get merged. Manually do so now.
+ */
+ arena_t *arena = tcache->arena;
+ malloc_mutex_lock(&arena->lock);
+ arena->stats.nrequests_large += tbin->tstats.nrequests;
+ arena->stats.lstats[binind - NBINS].nrequests +=
+ tbin->tstats.nrequests;
+ tbin->tstats.nrequests = 0;
+ malloc_mutex_unlock(&arena->lock);
}
+ memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
+ rem * sizeof(void *));
tbin->ncached = rem;
- if (tbin->ncached < tbin->low_water)
+ if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
}
+void
+tcache_arena_associate(tcache_t *tcache, arena_t *arena)
+{
+
+ if (config_stats) {
+ /* Link into list of extant tcaches. */
+ malloc_mutex_lock(&arena->lock);
+ ql_elm_new(tcache, link);
+ ql_tail_insert(&arena->tcache_ql, tcache, link);
+ malloc_mutex_unlock(&arena->lock);
+ }
+ tcache->arena = arena;
+}
+
+void
+tcache_arena_dissociate(tcache_t *tcache)
+{
+
+ if (config_stats) {
+ /* Unlink from list of extant tcaches. */
+ malloc_mutex_lock(&tcache->arena->lock);
+ ql_remove(&tcache->arena->tcache_ql, tcache, link);
+ malloc_mutex_unlock(&tcache->arena->lock);
+ tcache_stats_merge(tcache, tcache->arena);
+ }
+}
+
tcache_t *
tcache_create(arena_t *arena)
{
tcache_t *tcache;
- size_t size;
+ size_t size, stack_offset;
unsigned i;
size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
+ /* Naturally align the pointer stacks. */
+ size = PTR_CEILING(size);
+ stack_offset = size;
+ size += stack_nelms * sizeof(void *);
/*
* Round up to the nearest multiple of the cacheline size, in order to
* avoid the possibility of false cacheline sharing.
@@ -215,35 +287,27 @@ tcache_create(arena_t *arena)
*/
size = (size + CACHELINE_MASK) & (-CACHELINE);
- if (size <= small_maxclass)
+ if (size <= SMALL_MAXCLASS)
tcache = (tcache_t *)arena_malloc_small(arena, size, true);
+ else if (size <= tcache_maxclass)
+ tcache = (tcache_t *)arena_malloc_large(arena, size, true);
else
- tcache = (tcache_t *)icalloc(size);
+ tcache = (tcache_t *)icallocx(size, false, arena);
if (tcache == NULL)
return (NULL);
-#ifdef JEMALLOC_STATS
- /* Link into list of extant tcaches. */
- malloc_mutex_lock(&arena->lock);
- ql_elm_new(tcache, link);
- ql_tail_insert(&arena->tcache_ql, tcache, link);
- malloc_mutex_unlock(&arena->lock);
-#endif
+ tcache_arena_associate(tcache, arena);
- tcache->arena = arena;
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
- for (i = 0; i < nbins; i++) {
- if ((arena->bins[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
- tcache->tbins[i].ncached_max = (arena->bins[i].nregs <<
- 1);
- } else
- tcache->tbins[i].ncached_max = TCACHE_NSLOTS_SMALL_MAX;
+ for (i = 0; i < nhbins; i++) {
+ tcache->tbins[i].lg_fill_div = 1;
+ tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
+ (uintptr_t)stack_offset);
+ stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
}
- for (; i < nhbins; i++)
- tcache->tbins[i].ncached_max = TCACHE_NSLOTS_LARGE;
- TCACHE_SET(tcache);
+ tcache_tsd_set(&tcache);
return (tcache);
}
@@ -252,114 +316,96 @@ void
tcache_destroy(tcache_t *tcache)
{
unsigned i;
+ size_t tcache_size;
-#ifdef JEMALLOC_STATS
- /* Unlink from list of extant tcaches. */
- malloc_mutex_lock(&tcache->arena->lock);
- ql_remove(&tcache->arena->tcache_ql, tcache, link);
- malloc_mutex_unlock(&tcache->arena->lock);
- tcache_stats_merge(tcache, tcache->arena);
-#endif
+ tcache_arena_dissociate(tcache);
- for (i = 0; i < nbins; i++) {
+ for (i = 0; i < NBINS; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
- tcache_bin_flush_small(tbin, i, 0
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
- , tcache
-#endif
- );
-
-#ifdef JEMALLOC_STATS
- if (tbin->tstats.nrequests != 0) {
+ tcache_bin_flush_small(tbin, i, 0, tcache);
+
+ if (config_stats && tbin->tstats.nrequests != 0) {
arena_t *arena = tcache->arena;
arena_bin_t *bin = &arena->bins[i];
malloc_mutex_lock(&bin->lock);
bin->stats.nrequests += tbin->tstats.nrequests;
malloc_mutex_unlock(&bin->lock);
}
-#endif
}
for (; i < nhbins; i++) {
tcache_bin_t *tbin = &tcache->tbins[i];
- tcache_bin_flush_large(tbin, i, 0
-#if (defined(JEMALLOC_STATS) || defined(JEMALLOC_PROF))
- , tcache
-#endif
- );
-
-#ifdef JEMALLOC_STATS
- if (tbin->tstats.nrequests != 0) {
+ tcache_bin_flush_large(tbin, i, 0, tcache);
+
+ if (config_stats && tbin->tstats.nrequests != 0) {
arena_t *arena = tcache->arena;
malloc_mutex_lock(&arena->lock);
arena->stats.nrequests_large += tbin->tstats.nrequests;
- arena->stats.lstats[i - nbins].nrequests +=
+ arena->stats.lstats[i - NBINS].nrequests +=
tbin->tstats.nrequests;
malloc_mutex_unlock(&arena->lock);
}
-#endif
}
-#ifdef JEMALLOC_PROF
- if (tcache->prof_accumbytes > 0) {
- malloc_mutex_lock(&tcache->arena->lock);
- arena_prof_accum(tcache->arena, tcache->prof_accumbytes);
- malloc_mutex_unlock(&tcache->arena->lock);
- }
-#endif
+ if (config_prof && tcache->prof_accumbytes > 0 &&
+ arena_prof_accum(tcache->arena, tcache->prof_accumbytes))
+ prof_idump();
- if (arena_salloc(tcache) <= small_maxclass) {
+ tcache_size = arena_salloc(tcache, false);
+ if (tcache_size <= SMALL_MAXCLASS) {
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
arena_t *arena = chunk->arena;
size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
- PAGE_SHIFT;
- arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias];
- arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
- (uintptr_t)((pageind - (mapelm->bits >> PAGE_SHIFT)) <<
- PAGE_SHIFT));
- arena_bin_t *bin = run->bin;
+ LG_PAGE;
+ arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
- malloc_mutex_lock(&bin->lock);
- arena_dalloc_bin(arena, chunk, tcache, mapelm);
- malloc_mutex_unlock(&bin->lock);
+ arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm);
+ } else if (tcache_size <= tcache_maxclass) {
+ arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
+ arena_t *arena = chunk->arena;
+
+ arena_dalloc_large(arena, chunk, tcache);
} else
- idalloc(tcache);
+ idallocx(tcache, false);
}
-static void
+void
tcache_thread_cleanup(void *arg)
{
- tcache_t *tcache = (tcache_t *)arg;
+ tcache_t *tcache = *(tcache_t **)arg;
- if (tcache == (void *)(uintptr_t)1) {
+ if (tcache == TCACHE_STATE_DISABLED) {
+ /* Do nothing. */
+ } else if (tcache == TCACHE_STATE_REINCARNATED) {
/*
- * The previous time this destructor was called, we set the key
- * to 1 so that other destructors wouldn't cause re-creation of
- * the tcache. This time, do nothing, so that the destructor
- * will not be called again.
+ * Another destructor called an allocator function after this
+ * destructor was called. Reset tcache to
+ * TCACHE_STATE_PURGATORY in order to receive another callback.
*/
- } else if (tcache == (void *)(uintptr_t)2) {
+ tcache = TCACHE_STATE_PURGATORY;
+ tcache_tsd_set(&tcache);
+ } else if (tcache == TCACHE_STATE_PURGATORY) {
/*
- * Another destructor called an allocator function after this
- * destructor was called. Reset tcache to 1 in order to
- * receive another callback.
+ * The previous time this destructor was called, we set the key
+ * to TCACHE_STATE_PURGATORY so that other destructors wouldn't
+ * cause re-creation of the tcache. This time, do nothing, so
+ * that the destructor will not be called again.
*/
- TCACHE_SET((uintptr_t)1);
} else if (tcache != NULL) {
- assert(tcache != (void *)(uintptr_t)1);
+ assert(tcache != TCACHE_STATE_PURGATORY);
tcache_destroy(tcache);
- TCACHE_SET((uintptr_t)1);
+ tcache = TCACHE_STATE_PURGATORY;
+ tcache_tsd_set(&tcache);
}
}
-#ifdef JEMALLOC_STATS
void
tcache_stats_merge(tcache_t *tcache, arena_t *arena)
{
unsigned i;
/* Merge and reset tcache stats. */
- for (i = 0; i < nbins; i++) {
+ for (i = 0; i < NBINS; i++) {
arena_bin_t *bin = &arena->bins[i];
tcache_bin_t *tbin = &tcache->tbins[i];
malloc_mutex_lock(&bin->lock);
@@ -369,49 +415,62 @@ tcache_stats_merge(tcache_t *tcache, arena_t *arena)
}
for (; i < nhbins; i++) {
- malloc_large_stats_t *lstats = &arena->stats.lstats[i - nbins];
+ malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS];
tcache_bin_t *tbin = &tcache->tbins[i];
arena->stats.nrequests_large += tbin->tstats.nrequests;
lstats->nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
}
-#endif
-void
-tcache_boot(void)
+bool
+tcache_boot0(void)
{
+ unsigned i;
- if (opt_tcache) {
- /*
- * If necessary, clamp opt_lg_tcache_max, now that
- * small_maxclass and arena_maxclass are known.
- */
- if (opt_lg_tcache_max < 0 || (1U <<
- opt_lg_tcache_max) < small_maxclass)
- tcache_maxclass = small_maxclass;
- else if ((1U << opt_lg_tcache_max) > arena_maxclass)
- tcache_maxclass = arena_maxclass;
- else
- tcache_maxclass = (1U << opt_lg_tcache_max);
-
- nhbins = nbins + (tcache_maxclass >> PAGE_SHIFT);
-
- /* Compute incremental GC event threshold. */
- if (opt_lg_tcache_gc_sweep >= 0) {
- tcache_gc_incr = ((1U << opt_lg_tcache_gc_sweep) /
- nbins) + (((1U << opt_lg_tcache_gc_sweep) % nbins ==
- 0) ? 0 : 1);
- } else
- tcache_gc_incr = 0;
-
- if (pthread_key_create(&tcache_tsd, tcache_thread_cleanup) !=
- 0) {
- malloc_write(
- "<jemalloc>: Error in pthread_key_create()\n");
- abort();
+ /*
+ * If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is
+ * known.
+ */
+ if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS)
+ tcache_maxclass = SMALL_MAXCLASS;
+ else if ((1U << opt_lg_tcache_max) > arena_maxclass)
+ tcache_maxclass = arena_maxclass;
+ else
+ tcache_maxclass = (1U << opt_lg_tcache_max);
+
+ nhbins = NBINS + (tcache_maxclass >> LG_PAGE);
+
+ /* Initialize tcache_bin_info. */
+ tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
+ sizeof(tcache_bin_info_t));
+ if (tcache_bin_info == NULL)
+ return (true);
+ stack_nelms = 0;
+ for (i = 0; i < NBINS; i++) {
+ if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
+ tcache_bin_info[i].ncached_max =
+ (arena_bin_info[i].nregs << 1);
+ } else {
+ tcache_bin_info[i].ncached_max =
+ TCACHE_NSLOTS_SMALL_MAX;
}
+ stack_nelms += tcache_bin_info[i].ncached_max;
+ }
+ for (; i < nhbins; i++) {
+ tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
+ stack_nelms += tcache_bin_info[i].ncached_max;
}
+
+ return (false);
+}
+
+bool
+tcache_boot1(void)
+{
+
+ if (tcache_tsd_boot() || tcache_enabled_tsd_boot())
+ return (true);
+
+ return (false);
}
-/******************************************************************************/
-#endif /* JEMALLOC_TCACHE */
diff --git a/dep/jemalloc/src/tsd.c b/dep/jemalloc/src/tsd.c
new file mode 100644
index 00000000000..961a546329c
--- /dev/null
+++ b/dep/jemalloc/src/tsd.c
@@ -0,0 +1,107 @@
+#define JEMALLOC_TSD_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Data. */
+
+static unsigned ncleanups;
+static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
+
+/******************************************************************************/
+
+void *
+malloc_tsd_malloc(size_t size)
+{
+
+ /* Avoid choose_arena() in order to dodge bootstrapping issues. */
+ return (arena_malloc(arenas[0], size, false, false));
+}
+
+void
+malloc_tsd_dalloc(void *wrapper)
+{
+
+ idalloc(wrapper);
+}
+
+void
+malloc_tsd_no_cleanup(void *arg)
+{
+
+ not_reached();
+}
+
+#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
+#ifndef _WIN32
+JEMALLOC_EXPORT
+#endif
+void
+_malloc_thread_cleanup(void)
+{
+ bool pending[MALLOC_TSD_CLEANUPS_MAX], again;
+ unsigned i;
+
+ for (i = 0; i < ncleanups; i++)
+ pending[i] = true;
+
+ do {
+ again = false;
+ for (i = 0; i < ncleanups; i++) {
+ if (pending[i]) {
+ pending[i] = cleanups[i]();
+ if (pending[i])
+ again = true;
+ }
+ }
+ } while (again);
+}
+#endif
+
+void
+malloc_tsd_cleanup_register(bool (*f)(void))
+{
+
+ assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX);
+ cleanups[ncleanups] = f;
+ ncleanups++;
+}
+
+void
+malloc_tsd_boot(void)
+{
+
+ ncleanups = 0;
+}
+
+#ifdef _WIN32
+static BOOL WINAPI
+_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
+{
+
+ switch (fdwReason) {
+#ifdef JEMALLOC_LAZY_LOCK
+ case DLL_THREAD_ATTACH:
+ isthreaded = true;
+ break;
+#endif
+ case DLL_THREAD_DETACH:
+ _malloc_thread_cleanup();
+ break;
+ default:
+ break;
+ }
+ return (true);
+}
+
+#ifdef _MSC_VER
+# ifdef _M_IX86
+# pragma comment(linker, "/INCLUDE:__tls_used")
+# else
+# pragma comment(linker, "/INCLUDE:_tls_used")
+# endif
+# pragma section(".CRT$XLY",long,read)
+#endif
+JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used)
+static const BOOL (WINAPI *tls_callback)(HINSTANCE hinstDLL,
+ DWORD fdwReason, LPVOID lpvReserved) = _tls_callback;
+#endif
diff --git a/dep/jemalloc/src/util.c b/dep/jemalloc/src/util.c
new file mode 100644
index 00000000000..b3a01143698
--- /dev/null
+++ b/dep/jemalloc/src/util.c
@@ -0,0 +1,641 @@
+#define assert(e) do { \
+ if (config_debug && !(e)) { \
+ malloc_write("<jemalloc>: Failed assertion\n"); \
+ abort(); \
+ } \
+} while (0)
+
+#define not_reached() do { \
+ if (config_debug) { \
+ malloc_write("<jemalloc>: Unreachable code reached\n"); \
+ abort(); \
+ } \
+} while (0)
+
+#define not_implemented() do { \
+ if (config_debug) { \
+ malloc_write("<jemalloc>: Not implemented\n"); \
+ abort(); \
+ } \
+} while (0)
+
+#define JEMALLOC_UTIL_C_
+#include "jemalloc/internal/jemalloc_internal.h"
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static void wrtmessage(void *cbopaque, const char *s);
+#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
+static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s,
+ size_t *slen_p);
+#define D2S_BUFSIZE (1 + U2S_BUFSIZE)
+static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p);
+#define O2S_BUFSIZE (1 + U2S_BUFSIZE)
+static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p);
+#define X2S_BUFSIZE (2 + U2S_BUFSIZE)
+static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
+ size_t *slen_p);
+
+/******************************************************************************/
+
+/* malloc_message() setup. */
+static void
+wrtmessage(void *cbopaque, const char *s)
+{
+
+#ifdef SYS_write
+ /*
+ * Use syscall(2) rather than write(2) when possible in order to avoid
+ * the possibility of memory allocation within libc. This is necessary
+ * on FreeBSD; most operating systems do not have this problem though.
+ */
+ UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s));
+#else
+ UNUSED int result = write(STDERR_FILENO, s, strlen(s));
+#endif
+}
+
+JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s);
+
+/*
+ * Wrapper around malloc_message() that avoids the need for
+ * je_malloc_message(...) throughout the code.
+ */
+void
+malloc_write(const char *s)
+{
+
+ if (je_malloc_message != NULL)
+ je_malloc_message(NULL, s);
+ else
+ wrtmessage(NULL, s);
+}
+
+/*
+ * glibc provides a non-standard strerror_r() when _GNU_SOURCE is defined, so
+ * provide a wrapper.
+ */
+int
+buferror(char *buf, size_t buflen)
+{
+
+#ifdef _WIN32
+ FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, GetLastError(), 0,
+ (LPSTR)buf, buflen, NULL);
+ return (0);
+#elif defined(_GNU_SOURCE)
+ char *b = strerror_r(errno, buf, buflen);
+ if (b != buf) {
+ strncpy(buf, b, buflen);
+ buf[buflen-1] = '\0';
+ }
+ return (0);
+#else
+ return (strerror_r(errno, buf, buflen));
+#endif
+}
+
+uintmax_t
+malloc_strtoumax(const char *nptr, char **endptr, int base)
+{
+ uintmax_t ret, digit;
+ int b;
+ bool neg;
+ const char *p, *ns;
+
+ if (base < 0 || base == 1 || base > 36) {
+ set_errno(EINVAL);
+ return (UINTMAX_MAX);
+ }
+ b = base;
+
+ /* Swallow leading whitespace and get sign, if any. */
+ neg = false;
+ p = nptr;
+ while (true) {
+ switch (*p) {
+ case '\t': case '\n': case '\v': case '\f': case '\r': case ' ':
+ p++;
+ break;
+ case '-':
+ neg = true;
+ /* Fall through. */
+ case '+':
+ p++;
+ /* Fall through. */
+ default:
+ goto label_prefix;
+ }
+ }
+
+ /* Get prefix, if any. */
+ label_prefix:
+ /*
+ * Note where the first non-whitespace/sign character is so that it is
+ * possible to tell whether any digits are consumed (e.g., " 0" vs.
+ * " -x").
+ */
+ ns = p;
+ if (*p == '0') {
+ switch (p[1]) {
+ case '0': case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7':
+ if (b == 0)
+ b = 8;
+ if (b == 8)
+ p++;
+ break;
+ case 'x':
+ switch (p[2]) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ case 'A': case 'B': case 'C': case 'D': case 'E':
+ case 'F':
+ case 'a': case 'b': case 'c': case 'd': case 'e':
+ case 'f':
+ if (b == 0)
+ b = 16;
+ if (b == 16)
+ p += 2;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ if (b == 0)
+ b = 10;
+
+ /* Convert. */
+ ret = 0;
+ while ((*p >= '0' && *p <= '9' && (digit = *p - '0') < b)
+ || (*p >= 'A' && *p <= 'Z' && (digit = 10 + *p - 'A') < b)
+ || (*p >= 'a' && *p <= 'z' && (digit = 10 + *p - 'a') < b)) {
+ uintmax_t pret = ret;
+ ret *= b;
+ ret += digit;
+ if (ret < pret) {
+ /* Overflow. */
+ set_errno(ERANGE);
+ return (UINTMAX_MAX);
+ }
+ p++;
+ }
+ if (neg)
+ ret = -ret;
+
+ if (endptr != NULL) {
+ if (p == ns) {
+ /* No characters were converted. */
+ *endptr = (char *)nptr;
+ } else
+ *endptr = (char *)p;
+ }
+
+ return (ret);
+}
+
+static char *
+u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p)
+{
+ unsigned i;
+
+ i = U2S_BUFSIZE - 1;
+ s[i] = '\0';
+ switch (base) {
+ case 10:
+ do {
+ i--;
+ s[i] = "0123456789"[x % (uint64_t)10];
+ x /= (uint64_t)10;
+ } while (x > 0);
+ break;
+ case 16: {
+ const char *digits = (uppercase)
+ ? "0123456789ABCDEF"
+ : "0123456789abcdef";
+
+ do {
+ i--;
+ s[i] = digits[x & 0xf];
+ x >>= 4;
+ } while (x > 0);
+ break;
+ } default: {
+ const char *digits = (uppercase)
+ ? "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ : "0123456789abcdefghijklmnopqrstuvwxyz";
+
+ assert(base >= 2 && base <= 36);
+ do {
+ i--;
+ s[i] = digits[x % (uint64_t)base];
+ x /= (uint64_t)base;
+ } while (x > 0);
+ }}
+
+ *slen_p = U2S_BUFSIZE - 1 - i;
+ return (&s[i]);
+}
+
+static char *
+d2s(intmax_t x, char sign, char *s, size_t *slen_p)
+{
+ bool neg;
+
+ if ((neg = (x < 0)))
+ x = -x;
+ s = u2s(x, 10, false, s, slen_p);
+ if (neg)
+ sign = '-';
+ switch (sign) {
+ case '-':
+ if (neg == false)
+ break;
+ /* Fall through. */
+ case ' ':
+ case '+':
+ s--;
+ (*slen_p)++;
+ *s = sign;
+ break;
+ default: not_reached();
+ }
+ return (s);
+}
+
+static char *
+o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p)
+{
+
+ s = u2s(x, 8, false, s, slen_p);
+ if (alt_form && *s != '0') {
+ s--;
+ (*slen_p)++;
+ *s = '0';
+ }
+ return (s);
+}
+
+static char *
+x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p)
+{
+
+ s = u2s(x, 16, uppercase, s, slen_p);
+ if (alt_form) {
+ s -= 2;
+ (*slen_p) += 2;
+ memcpy(s, uppercase ? "0X" : "0x", 2);
+ }
+ return (s);
+}
+
+int
+malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap)
+{
+ int ret;
+ size_t i;
+ const char *f;
+
+#define APPEND_C(c) do { \
+ if (i < size) \
+ str[i] = (c); \
+ i++; \
+} while (0)
+#define APPEND_S(s, slen) do { \
+ if (i < size) { \
+ size_t cpylen = (slen <= size - i) ? slen : size - i; \
+ memcpy(&str[i], s, cpylen); \
+ } \
+ i += slen; \
+} while (0)
+#define APPEND_PADDED_S(s, slen, width, left_justify) do { \
+ /* Left padding. */ \
+ size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \
+ (size_t)width - slen : 0); \
+ if (left_justify == false && pad_len != 0) { \
+ size_t j; \
+ for (j = 0; j < pad_len; j++) \
+ APPEND_C(' '); \
+ } \
+ /* Value. */ \
+ APPEND_S(s, slen); \
+ /* Right padding. */ \
+ if (left_justify && pad_len != 0) { \
+ size_t j; \
+ for (j = 0; j < pad_len; j++) \
+ APPEND_C(' '); \
+ } \
+} while (0)
+#define GET_ARG_NUMERIC(val, len) do { \
+ switch (len) { \
+ case '?': \
+ val = va_arg(ap, int); \
+ break; \
+ case '?' | 0x80: \
+ val = va_arg(ap, unsigned int); \
+ break; \
+ case 'l': \
+ val = va_arg(ap, long); \
+ break; \
+ case 'l' | 0x80: \
+ val = va_arg(ap, unsigned long); \
+ break; \
+ case 'q': \
+ val = va_arg(ap, long long); \
+ break; \
+ case 'q' | 0x80: \
+ val = va_arg(ap, unsigned long long); \
+ break; \
+ case 'j': \
+ val = va_arg(ap, intmax_t); \
+ break; \
+ case 't': \
+ val = va_arg(ap, ptrdiff_t); \
+ break; \
+ case 'z': \
+ val = va_arg(ap, ssize_t); \
+ break; \
+ case 'z' | 0x80: \
+ val = va_arg(ap, size_t); \
+ break; \
+ case 'p': /* Synthetic; used for %p. */ \
+ val = va_arg(ap, uintptr_t); \
+ break; \
+ default: not_reached(); \
+ } \
+} while (0)
+
+ i = 0;
+ f = format;
+ while (true) {
+ switch (*f) {
+ case '\0': goto label_out;
+ case '%': {
+ bool alt_form = false;
+ bool left_justify = false;
+ bool plus_space = false;
+ bool plus_plus = false;
+ int prec = -1;
+ int width = -1;
+ unsigned char len = '?';
+
+ f++;
+ if (*f == '%') {
+ /* %% */
+ APPEND_C(*f);
+ break;
+ }
+ /* Flags. */
+ while (true) {
+ switch (*f) {
+ case '#':
+ assert(alt_form == false);
+ alt_form = true;
+ break;
+ case '-':
+ assert(left_justify == false);
+ left_justify = true;
+ break;
+ case ' ':
+ assert(plus_space == false);
+ plus_space = true;
+ break;
+ case '+':
+ assert(plus_plus == false);
+ plus_plus = true;
+ break;
+ default: goto label_width;
+ }
+ f++;
+ }
+ /* Width. */
+ label_width:
+ switch (*f) {
+ case '*':
+ width = va_arg(ap, int);
+ f++;
+ break;
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9': {
+ uintmax_t uwidth;
+ set_errno(0);
+ uwidth = malloc_strtoumax(f, (char **)&f, 10);
+ assert(uwidth != UINTMAX_MAX || get_errno() !=
+ ERANGE);
+ width = (int)uwidth;
+ if (*f == '.') {
+ f++;
+ goto label_precision;
+ } else
+ goto label_length;
+ break;
+ } case '.':
+ f++;
+ goto label_precision;
+ default: goto label_length;
+ }
+ /* Precision. */
+ label_precision:
+ switch (*f) {
+ case '*':
+ prec = va_arg(ap, int);
+ f++;
+ break;
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9': {
+ uintmax_t uprec;
+ set_errno(0);
+ uprec = malloc_strtoumax(f, (char **)&f, 10);
+ assert(uprec != UINTMAX_MAX || get_errno() !=
+ ERANGE);
+ prec = (int)uprec;
+ break;
+ }
+ default: break;
+ }
+ /* Length. */
+ label_length:
+ switch (*f) {
+ case 'l':
+ f++;
+ if (*f == 'l') {
+ len = 'q';
+ f++;
+ } else
+ len = 'l';
+ break;
+ case 'j':
+ len = 'j';
+ f++;
+ break;
+ case 't':
+ len = 't';
+ f++;
+ break;
+ case 'z':
+ len = 'z';
+ f++;
+ break;
+ default: break;
+ }
+ /* Conversion specifier. */
+ switch (*f) {
+ char *s;
+ size_t slen;
+ case 'd': case 'i': {
+ intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
+ char buf[D2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, len);
+ s = d2s(val, (plus_plus ? '+' : (plus_space ?
+ ' ' : '-')), buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ } case 'o': {
+ uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
+ char buf[O2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, len | 0x80);
+ s = o2s(val, alt_form, buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ } case 'u': {
+ uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
+ char buf[U2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, len | 0x80);
+ s = u2s(val, 10, false, buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ } case 'x': case 'X': {
+ uintmax_t val JEMALLOC_CC_SILENCE_INIT(0);
+ char buf[X2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, len | 0x80);
+ s = x2s(val, alt_form, *f == 'X', buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ } case 'c': {
+ unsigned char val;
+ char buf[2];
+
+ assert(len == '?' || len == 'l');
+ assert_not_implemented(len != 'l');
+ val = va_arg(ap, int);
+ buf[0] = val;
+ buf[1] = '\0';
+ APPEND_PADDED_S(buf, 1, width, left_justify);
+ f++;
+ break;
+ } case 's':
+ assert(len == '?' || len == 'l');
+ assert_not_implemented(len != 'l');
+ s = va_arg(ap, char *);
+ slen = (prec == -1) ? strlen(s) : prec;
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ case 'p': {
+ uintmax_t val;
+ char buf[X2S_BUFSIZE];
+
+ GET_ARG_NUMERIC(val, 'p');
+ s = x2s(val, true, false, buf, &slen);
+ APPEND_PADDED_S(s, slen, width, left_justify);
+ f++;
+ break;
+ }
+ default: not_implemented();
+ }
+ break;
+ } default: {
+ APPEND_C(*f);
+ f++;
+ break;
+ }}
+ }
+ label_out:
+ if (i < size)
+ str[i] = '\0';
+ else
+ str[size - 1] = '\0';
+ ret = i;
+
+#undef APPEND_C
+#undef APPEND_S
+#undef APPEND_PADDED_S
+#undef GET_ARG_NUMERIC
+ return (ret);
+}
+
+JEMALLOC_ATTR(format(printf, 3, 4))
+int
+malloc_snprintf(char *str, size_t size, const char *format, ...)
+{
+ int ret;
+ va_list ap;
+
+ va_start(ap, format);
+ ret = malloc_vsnprintf(str, size, format, ap);
+ va_end(ap);
+
+ return (ret);
+}
+
+void
+malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *format, va_list ap)
+{
+ char buf[MALLOC_PRINTF_BUFSIZE];
+
+ if (write_cb == NULL) {
+ /*
+ * The caller did not provide an alternate write_cb callback
+ * function, so use the default one. malloc_write() is an
+ * inline function, so use malloc_message() directly here.
+ */
+ write_cb = (je_malloc_message != NULL) ? je_malloc_message :
+ wrtmessage;
+ cbopaque = NULL;
+ }
+
+ malloc_vsnprintf(buf, sizeof(buf), format, ap);
+ write_cb(cbopaque, buf);
+}
+
+/*
+ * Print to a callback function in such a way as to (hopefully) avoid memory
+ * allocation.
+ */
+JEMALLOC_ATTR(format(printf, 3, 4))
+void
+malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
+ const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ malloc_vcprintf(write_cb, cbopaque, format, ap);
+ va_end(ap);
+}
+
+/* Print to stderr in such a way as to avoid memory allocation. */
+JEMALLOC_ATTR(format(printf, 1, 2))
+void
+malloc_printf(const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ malloc_vcprintf(NULL, NULL, format, ap);
+ va_end(ap);
+}
diff --git a/dep/jemalloc/src/zone.c b/dep/jemalloc/src/zone.c
new file mode 100644
index 00000000000..c62c183f65e
--- /dev/null
+++ b/dep/jemalloc/src/zone.c
@@ -0,0 +1,258 @@
+#include "jemalloc/internal/jemalloc_internal.h"
+#ifndef JEMALLOC_ZONE
+# error "This source file is for zones on Darwin (OS X)."
+#endif
+
+/*
+ * The malloc_default_purgeable_zone function is only available on >= 10.6.
+ * We need to check whether it is present at runtime, thus the weak_import.
+ */
+extern malloc_zone_t *malloc_default_purgeable_zone(void)
+JEMALLOC_ATTR(weak_import);
+
+/******************************************************************************/
+/* Data. */
+
+static malloc_zone_t zone;
+static struct malloc_introspection_t zone_introspect;
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static size_t zone_size(malloc_zone_t *zone, void *ptr);
+static void *zone_malloc(malloc_zone_t *zone, size_t size);
+static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
+static void *zone_valloc(malloc_zone_t *zone, size_t size);
+static void zone_free(malloc_zone_t *zone, void *ptr);
+static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
+#if (JEMALLOC_ZONE_VERSION >= 5)
+static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
+#endif
+#if (JEMALLOC_ZONE_VERSION >= 6)
+ size_t size);
+static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
+ size_t size);
+#endif
+static void *zone_destroy(malloc_zone_t *zone);
+static size_t zone_good_size(malloc_zone_t *zone, size_t size);
+static void zone_force_lock(malloc_zone_t *zone);
+static void zone_force_unlock(malloc_zone_t *zone);
+
+/******************************************************************************/
+/*
+ * Functions.
+ */
+
+static size_t
+zone_size(malloc_zone_t *zone, void *ptr)
+{
+
+ /*
+ * There appear to be places within Darwin (such as setenv(3)) that
+ * cause calls to this function with pointers that *no* zone owns. If
+ * we knew that all pointers were owned by *some* zone, we could split
+ * our zone into two parts, and use one as the default allocator and
+ * the other as the default deallocator/reallocator. Since that will
+ * not work in practice, we must check all pointers to assure that they
+ * reside within a mapped chunk before determining size.
+ */
+ return (ivsalloc(ptr, config_prof));
+}
+
+static void *
+zone_malloc(malloc_zone_t *zone, size_t size)
+{
+
+ return (je_malloc(size));
+}
+
+static void *
+zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
+{
+
+ return (je_calloc(num, size));
+}
+
+static void *
+zone_valloc(malloc_zone_t *zone, size_t size)
+{
+ void *ret = NULL; /* Assignment avoids useless compiler warning. */
+
+ je_posix_memalign(&ret, PAGE, size);
+
+ return (ret);
+}
+
+static void
+zone_free(malloc_zone_t *zone, void *ptr)
+{
+
+ if (ivsalloc(ptr, config_prof) != 0) {
+ je_free(ptr);
+ return;
+ }
+
+ free(ptr);
+}
+
+static void *
+zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
+{
+
+ if (ivsalloc(ptr, config_prof) != 0)
+ return (je_realloc(ptr, size));
+
+ return (realloc(ptr, size));
+}
+
+#if (JEMALLOC_ZONE_VERSION >= 5)
+static void *
+zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
+{
+ void *ret = NULL; /* Assignment avoids useless compiler warning. */
+
+ je_posix_memalign(&ret, alignment, size);
+
+ return (ret);
+}
+#endif
+
+#if (JEMALLOC_ZONE_VERSION >= 6)
+static void
+zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
+{
+
+ if (ivsalloc(ptr, config_prof) != 0) {
+ assert(ivsalloc(ptr, config_prof) == size);
+ je_free(ptr);
+ return;
+ }
+
+ free(ptr);
+}
+#endif
+
+static void *
+zone_destroy(malloc_zone_t *zone)
+{
+
+ /* This function should never be called. */
+ assert(false);
+ return (NULL);
+}
+
+static size_t
+zone_good_size(malloc_zone_t *zone, size_t size)
+{
+
+ if (size == 0)
+ size = 1;
+ return (s2u(size));
+}
+
+static void
+zone_force_lock(malloc_zone_t *zone)
+{
+
+ if (isthreaded)
+ jemalloc_prefork();
+}
+
+static void
+zone_force_unlock(malloc_zone_t *zone)
+{
+
+ if (isthreaded)
+ jemalloc_postfork_parent();
+}
+
+JEMALLOC_ATTR(constructor)
+void
+register_zone(void)
+{
+
+ /*
+ * If something else replaced the system default zone allocator, don't
+ * register jemalloc's.
+ */
+ malloc_zone_t *default_zone = malloc_default_zone();
+ if (!default_zone->zone_name ||
+ strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) {
+ return;
+ }
+
+ zone.size = (void *)zone_size;
+ zone.malloc = (void *)zone_malloc;
+ zone.calloc = (void *)zone_calloc;
+ zone.valloc = (void *)zone_valloc;
+ zone.free = (void *)zone_free;
+ zone.realloc = (void *)zone_realloc;
+ zone.destroy = (void *)zone_destroy;
+ zone.zone_name = "jemalloc_zone";
+ zone.batch_malloc = NULL;
+ zone.batch_free = NULL;
+ zone.introspect = &zone_introspect;
+ zone.version = JEMALLOC_ZONE_VERSION;
+#if (JEMALLOC_ZONE_VERSION >= 5)
+ zone.memalign = zone_memalign;
+#endif
+#if (JEMALLOC_ZONE_VERSION >= 6)
+ zone.free_definite_size = zone_free_definite_size;
+#endif
+#if (JEMALLOC_ZONE_VERSION >= 8)
+ zone.pressure_relief = NULL;
+#endif
+
+ zone_introspect.enumerator = NULL;
+ zone_introspect.good_size = (void *)zone_good_size;
+ zone_introspect.check = NULL;
+ zone_introspect.print = NULL;
+ zone_introspect.log = NULL;
+ zone_introspect.force_lock = (void *)zone_force_lock;
+ zone_introspect.force_unlock = (void *)zone_force_unlock;
+ zone_introspect.statistics = NULL;
+#if (JEMALLOC_ZONE_VERSION >= 6)
+ zone_introspect.zone_locked = NULL;
+#endif
+#if (JEMALLOC_ZONE_VERSION >= 7)
+ zone_introspect.enable_discharge_checking = NULL;
+ zone_introspect.disable_discharge_checking = NULL;
+ zone_introspect.discharge = NULL;
+#ifdef __BLOCKS__
+ zone_introspect.enumerate_discharged_pointers = NULL;
+#else
+ zone_introspect.enumerate_unavailable_without_blocks = NULL;
+#endif
+#endif
+
+ /*
+ * The default purgeable zone is created lazily by OSX's libc. It uses
+ * the default zone when it is created for "small" allocations
+ * (< 15 KiB), but assumes the default zone is a scalable_zone. This
+ * obviously fails when the default zone is the jemalloc zone, so
+ * malloc_default_purgeable_zone is called beforehand so that the
+ * default purgeable zone is created when the default zone is still
+ * a scalable_zone. As purgeable zones only exist on >= 10.6, we need
+ * to check for the existence of malloc_default_purgeable_zone() at
+ * run time.
+ */
+ if (malloc_default_purgeable_zone != NULL)
+ malloc_default_purgeable_zone();
+
+ /* Register the custom zone. At this point it won't be the default. */
+ malloc_zone_register(&zone);
+
+ /*
+ * Unregister and reregister the default zone. On OSX >= 10.6,
+ * unregistering takes the last registered zone and places it at the
+ * location of the specified zone. Unregistering the default zone thus
+ * makes the last registered one the default. On OSX < 10.6,
+ * unregistering shifts all registered zones. The first registered zone
+ * then becomes the default.
+ */
+ do {
+ default_zone = malloc_default_zone();
+ malloc_zone_unregister(default_zone);
+ malloc_zone_register(default_zone);
+ } while (malloc_default_zone() != &zone);
+}