aboutsummaryrefslogtreecommitdiff
path: root/dep/jemalloc/src/tcache.c
diff options
context:
space:
mode:
Diffstat (limited to 'dep/jemalloc/src/tcache.c')
-rw-r--r--dep/jemalloc/src/tcache.c153
1 files changed, 108 insertions, 45 deletions
diff --git a/dep/jemalloc/src/tcache.c b/dep/jemalloc/src/tcache.c
index cbbe7a113a9..31c329e1613 100644
--- a/dep/jemalloc/src/tcache.c
+++ b/dep/jemalloc/src/tcache.c
@@ -8,6 +8,9 @@ bool opt_tcache = true;
ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
ssize_t opt_lg_tcache_gc_sweep = LG_TCACHE_GC_SWEEP_DEFAULT;
+tcache_bin_info_t *tcache_bin_info;
+static unsigned stack_nelms; /* Total stack elms per tcache. */
+
/* Map of thread-specific caches. */
#ifndef NO_TLS
__thread tcache_t *tcache_tls JEMALLOC_ATTR(tls_model("initial-exec"));
@@ -55,18 +58,19 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
#endif
)
{
- void *flush, *deferred, *ptr;
+ void *ptr;
unsigned i, nflush, ndeferred;
- bool first_pass;
+#ifdef JEMALLOC_STATS
+ bool merged_stats = false;
+#endif
assert(binind < nbins);
assert(rem <= tbin->ncached);
- assert(tbin->ncached > 0 || tbin->avail == NULL);
- for (flush = tbin->avail, nflush = tbin->ncached - rem, first_pass =
- true; flush != NULL; flush = deferred, nflush = ndeferred) {
+ for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena bin associated with the first object. */
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(flush);
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
+ tbin->avail[0]);
arena_t *arena = chunk->arena;
arena_bin_t *bin = &arena->bins[binind];
@@ -82,17 +86,17 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
malloc_mutex_lock(&bin->lock);
#ifdef JEMALLOC_STATS
if (arena == tcache->arena) {
+ assert(merged_stats == false);
+ merged_stats = true;
bin->stats.nflushes++;
bin->stats.nrequests += tbin->tstats.nrequests;
tbin->tstats.nrequests = 0;
}
#endif
- deferred = NULL;
ndeferred = 0;
for (i = 0; i < nflush; i++) {
- ptr = flush;
+ ptr = tbin->avail[i];
assert(ptr != NULL);
- flush = *(void **)ptr;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk->arena == arena) {
size_t pageind = ((uintptr_t)ptr -
@@ -107,21 +111,31 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem
* locked. Stash the object, so that it can be
* handled in a future pass.
*/
- *(void **)ptr = deferred;
- deferred = ptr;
+ tbin->avail[ndeferred] = ptr;
ndeferred++;
}
}
malloc_mutex_unlock(&bin->lock);
-
- if (first_pass) {
- tbin->avail = flush;
- first_pass = false;
- }
}
+#ifdef JEMALLOC_STATS
+ if (merged_stats == false) {
+ /*
+ * The flush loop didn't happen to flush to this thread's
+ * arena, so the stats didn't get merged. Manually do so now.
+ */
+ arena_bin_t *bin = &tcache->arena->bins[binind];
+ malloc_mutex_lock(&bin->lock);
+ bin->stats.nflushes++;
+ bin->stats.nrequests += tbin->tstats.nrequests;
+ tbin->tstats.nrequests = 0;
+ malloc_mutex_unlock(&bin->lock);
+ }
+#endif
+ memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
+ rem * sizeof(void *));
tbin->ncached = rem;
- if (tbin->ncached < tbin->low_water)
+ if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
}
@@ -132,18 +146,19 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
#endif
)
{
- void *flush, *deferred, *ptr;
+ void *ptr;
unsigned i, nflush, ndeferred;
- bool first_pass;
+#ifdef JEMALLOC_STATS
+ bool merged_stats = false;
+#endif
assert(binind < nhbins);
assert(rem <= tbin->ncached);
- assert(tbin->ncached > 0 || tbin->avail == NULL);
- for (flush = tbin->avail, nflush = tbin->ncached - rem, first_pass =
- true; flush != NULL; flush = deferred, nflush = ndeferred) {
+ for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) {
/* Lock the arena associated with the first object. */
- arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(flush);
+ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
+ tbin->avail[0]);
arena_t *arena = chunk->arena;
malloc_mutex_lock(&arena->lock);
@@ -155,6 +170,7 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
tcache->prof_accumbytes = 0;
#endif
#ifdef JEMALLOC_STATS
+ merged_stats = true;
arena->stats.nrequests_large += tbin->tstats.nrequests;
arena->stats.lstats[binind - nbins].nrequests +=
tbin->tstats.nrequests;
@@ -163,12 +179,10 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
#if (defined(JEMALLOC_PROF) || defined(JEMALLOC_STATS))
}
#endif
- deferred = NULL;
ndeferred = 0;
for (i = 0; i < nflush; i++) {
- ptr = flush;
+ ptr = tbin->avail[i];
assert(ptr != NULL);
- flush = *(void **)ptr;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk->arena == arena)
arena_dalloc_large(arena, chunk, ptr);
@@ -179,21 +193,32 @@ tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem
* Stash the object, so that it can be handled
* in a future pass.
*/
- *(void **)ptr = deferred;
- deferred = ptr;
+ tbin->avail[ndeferred] = ptr;
ndeferred++;
}
}
malloc_mutex_unlock(&arena->lock);
-
- if (first_pass) {
- tbin->avail = flush;
- first_pass = false;
- }
}
+#ifdef JEMALLOC_STATS
+ if (merged_stats == false) {
+ /*
+ * The flush loop didn't happen to flush to this thread's
+ * arena, so the stats didn't get merged. Manually do so now.
+ */
+ arena_t *arena = tcache->arena;
+ malloc_mutex_lock(&arena->lock);
+ arena->stats.nrequests_large += tbin->tstats.nrequests;
+ arena->stats.lstats[binind - nbins].nrequests +=
+ tbin->tstats.nrequests;
+ tbin->tstats.nrequests = 0;
+ malloc_mutex_unlock(&arena->lock);
+ }
+#endif
+ memmove(tbin->avail, &tbin->avail[tbin->ncached - rem],
+ rem * sizeof(void *));
tbin->ncached = rem;
- if (tbin->ncached < tbin->low_water)
+ if ((int)tbin->ncached < tbin->low_water)
tbin->low_water = tbin->ncached;
}
@@ -201,10 +226,14 @@ tcache_t *
tcache_create(arena_t *arena)
{
tcache_t *tcache;
- size_t size;
+ size_t size, stack_offset;
unsigned i;
size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins);
+ /* Naturally align the pointer stacks. */
+ size = PTR_CEILING(size);
+ stack_offset = size;
+ size += stack_nelms * sizeof(void *);
/*
* Round up to the nearest multiple of the cacheline size, in order to
* avoid the possibility of false cacheline sharing.
@@ -217,6 +246,8 @@ tcache_create(arena_t *arena)
if (size <= small_maxclass)
tcache = (tcache_t *)arena_malloc_small(arena, size, true);
+ else if (size <= tcache_maxclass)
+ tcache = (tcache_t *)arena_malloc_large(arena, size, true);
else
tcache = (tcache_t *)icalloc(size);
@@ -233,15 +264,12 @@ tcache_create(arena_t *arena)
tcache->arena = arena;
assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
- for (i = 0; i < nbins; i++) {
- if ((arena->bins[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) {
- tcache->tbins[i].ncached_max = (arena->bins[i].nregs <<
- 1);
- } else
- tcache->tbins[i].ncached_max = TCACHE_NSLOTS_SMALL_MAX;
+ for (i = 0; i < nhbins; i++) {
+ tcache->tbins[i].lg_fill_div = 1;
+ tcache->tbins[i].avail = (void **)((uintptr_t)tcache +
+ (uintptr_t)stack_offset);
+ stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
}
- for (; i < nhbins; i++)
- tcache->tbins[i].ncached_max = TCACHE_NSLOTS_LARGE;
TCACHE_SET(tcache);
@@ -252,6 +280,7 @@ void
tcache_destroy(tcache_t *tcache)
{
unsigned i;
+ size_t tcache_size;
#ifdef JEMALLOC_STATS
/* Unlink from list of extant tcaches. */
@@ -308,7 +337,8 @@ tcache_destroy(tcache_t *tcache)
}
#endif
- if (arena_salloc(tcache) <= small_maxclass) {
+ tcache_size = arena_salloc(tcache);
+ if (tcache_size <= small_maxclass) {
arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
arena_t *arena = chunk->arena;
size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >>
@@ -322,6 +352,13 @@ tcache_destroy(tcache_t *tcache)
malloc_mutex_lock(&bin->lock);
arena_dalloc_bin(arena, chunk, tcache, mapelm);
malloc_mutex_unlock(&bin->lock);
+ } else if (tcache_size <= tcache_maxclass) {
+ arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache);
+ arena_t *arena = chunk->arena;
+
+ malloc_mutex_lock(&arena->lock);
+ arena_dalloc_large(arena, chunk, tcache);
+ malloc_mutex_unlock(&arena->lock);
} else
idalloc(tcache);
}
@@ -378,11 +415,13 @@ tcache_stats_merge(tcache_t *tcache, arena_t *arena)
}
#endif
-void
+bool
tcache_boot(void)
{
if (opt_tcache) {
+ unsigned i;
+
/*
* If necessary, clamp opt_lg_tcache_max, now that
* small_maxclass and arena_maxclass are known.
@@ -397,6 +436,28 @@ tcache_boot(void)
nhbins = nbins + (tcache_maxclass >> PAGE_SHIFT);
+ /* Initialize tcache_bin_info. */
+ tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins *
+ sizeof(tcache_bin_info_t));
+ if (tcache_bin_info == NULL)
+ return (true);
+ stack_nelms = 0;
+ for (i = 0; i < nbins; i++) {
+ if ((arena_bin_info[i].nregs << 1) <=
+ TCACHE_NSLOTS_SMALL_MAX) {
+ tcache_bin_info[i].ncached_max =
+ (arena_bin_info[i].nregs << 1);
+ } else {
+ tcache_bin_info[i].ncached_max =
+ TCACHE_NSLOTS_SMALL_MAX;
+ }
+ stack_nelms += tcache_bin_info[i].ncached_max;
+ }
+ for (; i < nhbins; i++) {
+ tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
+ stack_nelms += tcache_bin_info[i].ncached_max;
+ }
+
/* Compute incremental GC event threshold. */
if (opt_lg_tcache_gc_sweep >= 0) {
tcache_gc_incr = ((1U << opt_lg_tcache_gc_sweep) /
@@ -412,6 +473,8 @@ tcache_boot(void)
abort();
}
}
+
+ return (false);
}
/******************************************************************************/
#endif /* JEMALLOC_TCACHE */