diff options
author | Viste <viste02@gmail.com> | 2019-11-14 23:17:38 +0300 |
---|---|---|
committer | Kargatum <dowlandtop@yandex.com> | 2019-11-15 03:17:38 +0700 |
commit | 685538b01b27ba38c605448e3a0de225bed4bb29 (patch) | |
tree | 36196f0965c5fc2fccdbc45a86a8155f2c986e4d /deps/jemalloc/src/tcache.c | |
parent | fae7ae95a373530e0b206814662df557882c8f1a (diff) |
feat(Deps/Jemalloc): update Jemalloc to 5.2.1 (#2413)
Diffstat (limited to 'deps/jemalloc/src/tcache.c')
-rw-r--r-- | deps/jemalloc/src/tcache.c | 226 |
1 files changed, 158 insertions, 68 deletions
diff --git a/deps/jemalloc/src/tcache.c b/deps/jemalloc/src/tcache.c index 936ef3140d..50099a9f2c 100644 --- a/deps/jemalloc/src/tcache.c +++ b/deps/jemalloc/src/tcache.c @@ -4,7 +4,8 @@ #include "jemalloc/internal/assert.h" #include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/safety_check.h" +#include "jemalloc/internal/sc.h" /******************************************************************************/ /* Data. */ @@ -12,7 +13,7 @@ bool opt_tcache = true; ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; -tcache_bin_info_t *tcache_bin_info; +cache_bin_info_t *tcache_bin_info; static unsigned stack_nelms; /* Total stack elms per tcache. */ unsigned nhbins; @@ -40,8 +41,8 @@ void tcache_event_hard(tsd_t *tsd, tcache_t *tcache) { szind_t binind = tcache->next_gc_bin; - tcache_bin_t *tbin; - if (binind < NBINS) { + cache_bin_t *tbin; + if (binind < SC_NBINS) { tbin = tcache_small_bin_get(tcache, binind); } else { tbin = tcache_large_bin_get(tcache, binind); @@ -50,7 +51,7 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) { /* * Flush (ceiling) 3/4 of the objects below the low water mark. */ - if (binind < NBINS) { + if (binind < SC_NBINS) { tcache_bin_flush_small(tsd, tcache, tbin, binind, tbin->ncached - tbin->low_water + (tbin->low_water >> 2)); @@ -58,7 +59,7 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) { * Reduce fill count by 2X. Limit lg_fill_div such that * the fill count is always at least 1. */ - tcache_bin_info_t *tbin_info = &tcache_bin_info[binind]; + cache_bin_info_t *tbin_info = &tcache_bin_info[binind]; if ((tbin_info->ncached_max >> (tcache->lg_fill_div[binind] + 1)) >= 1) { tcache->lg_fill_div[binind]++; @@ -72,7 +73,7 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) { * Increase fill count by 2X for small bins. Make sure * lg_fill_div stays greater than 0. */ - if (binind < NBINS && tcache->lg_fill_div[binind] > 1) { + if (binind < SC_NBINS && tcache->lg_fill_div[binind] > 1) { tcache->lg_fill_div[binind]--; } } @@ -86,7 +87,7 @@ tcache_event_hard(tsd_t *tsd, tcache_t *tcache) { void * tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, - tcache_bin_t *tbin, szind_t binind, bool *tcache_success) { + cache_bin_t *tbin, szind_t binind, bool *tcache_success) { void *ret; assert(tcache->arena != NULL); @@ -95,33 +96,72 @@ tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, if (config_prof) { tcache->prof_accumbytes = 0; } - ret = tcache_alloc_easy(tbin, tcache_success); + ret = cache_bin_alloc_easy(tbin, tcache_success); return ret; } +/* Enabled with --enable-extra-size-check. */ +static void +tbin_extents_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind, + size_t nflush, extent_t **extents){ + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + /* + * Verify that the items in the tcache all have the correct size; this + * is useful for catching sized deallocation bugs, also to fail early + * instead of corrupting metadata. Since this can be turned on for opt + * builds, avoid the branch in the loop. + */ + szind_t szind; + size_t sz_sum = binind * nflush; + for (unsigned i = 0 ; i < nflush; i++) { + rtree_extent_szind_read(tsdn, &extents_rtree, + rtree_ctx, (uintptr_t)*(tbin->avail - 1 - i), true, + &extents[i], &szind); + sz_sum -= szind; + } + if (sz_sum != 0) { + safety_check_fail("<jemalloc>: size mismatch in thread cache " + "detected, likely caused by sized deallocation bugs by " + "application. Abort.\n"); + abort(); + } +} + void -tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, +tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin, szind_t binind, unsigned rem) { bool merged_stats = false; - assert(binind < NBINS); - assert(rem <= tbin->ncached); + assert(binind < SC_NBINS); + assert((cache_bin_sz_t)rem <= tbin->ncached); arena_t *arena = tcache->arena; assert(arena != NULL); unsigned nflush = tbin->ncached - rem; VARIABLE_ARRAY(extent_t *, item_extent, nflush); + /* Look up extent once per item. */ - for (unsigned i = 0 ; i < nflush; i++) { - item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i)); + if (config_opt_safety_checks) { + tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind, + nflush, item_extent); + } else { + for (unsigned i = 0 ; i < nflush; i++) { + item_extent[i] = iealloc(tsd_tsdn(tsd), + *(tbin->avail - 1 - i)); + } } - while (nflush > 0) { /* Lock the arena bin associated with the first object. */ extent_t *extent = item_extent[0]; - arena_t *bin_arena = extent_arena_get(extent); - arena_bin_t *bin = &bin_arena->bins[binind]; + unsigned bin_arena_ind = extent_arena_ind_get(extent); + arena_t *bin_arena = arena_get(tsd_tsdn(tsd), bin_arena_ind, + false); + unsigned binshard = extent_binshard_get(extent); + assert(binshard < bin_infos[binind].n_shards); + bin_t *bin = &bin_arena->bins[binind].bin_shards[binshard]; if (config_prof && bin_arena == arena) { if (arena_prof_accum(tsd_tsdn(tsd), arena, @@ -132,8 +172,7 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, } malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); - if (config_stats && bin_arena == arena) { - assert(!merged_stats); + if (config_stats && bin_arena == arena && !merged_stats) { merged_stats = true; bin->stats.nflushes++; bin->stats.nrequests += tbin->tstats.nrequests; @@ -145,9 +184,10 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, extent = item_extent[i]; assert(ptr != NULL && extent != NULL); - if (extent_arena_get(extent) == bin_arena) { + if (extent_arena_ind_get(extent) == bin_arena_ind + && extent_binshard_get(extent) == binshard) { arena_dalloc_bin_junked_locked(tsd_tsdn(tsd), - bin_arena, extent, ptr); + bin_arena, bin, binind, extent, ptr); } else { /* * This object was allocated via a different @@ -169,8 +209,9 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, * The flush loop didn't happen to flush to this thread's * arena, so the stats didn't get merged. Manually do so now. */ - arena_bin_t *bin = &arena->bins[binind]; - malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + unsigned binshard; + bin_t *bin = arena_bin_choose_lock(tsd_tsdn(tsd), arena, binind, + &binshard); bin->stats.nflushes++; bin->stats.nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; @@ -180,63 +221,76 @@ tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * sizeof(void *)); tbin->ncached = rem; - if ((low_water_t)tbin->ncached < tbin->low_water) { + if (tbin->ncached < tbin->low_water) { tbin->low_water = tbin->ncached; } } void -tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, +tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind, unsigned rem, tcache_t *tcache) { bool merged_stats = false; assert(binind < nhbins); - assert(rem <= tbin->ncached); + assert((cache_bin_sz_t)rem <= tbin->ncached); - arena_t *arena = tcache->arena; - assert(arena != NULL); + arena_t *tcache_arena = tcache->arena; + assert(tcache_arena != NULL); unsigned nflush = tbin->ncached - rem; VARIABLE_ARRAY(extent_t *, item_extent, nflush); + +#ifndef JEMALLOC_EXTRA_SIZE_CHECK /* Look up extent once per item. */ for (unsigned i = 0 ; i < nflush; i++) { item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i)); } - +#else + tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind, nflush, + item_extent); +#endif while (nflush > 0) { /* Lock the arena associated with the first object. */ extent_t *extent = item_extent[0]; - arena_t *locked_arena = extent_arena_get(extent); - UNUSED bool idump; + unsigned locked_arena_ind = extent_arena_ind_get(extent); + arena_t *locked_arena = arena_get(tsd_tsdn(tsd), + locked_arena_ind, false); + bool idump; if (config_prof) { idump = false; } - malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx); + bool lock_large = !arena_is_auto(locked_arena); + if (lock_large) { + malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx); + } for (unsigned i = 0; i < nflush; i++) { void *ptr = *(tbin->avail - 1 - i); assert(ptr != NULL); extent = item_extent[i]; - if (extent_arena_get(extent) == locked_arena) { + if (extent_arena_ind_get(extent) == locked_arena_ind) { large_dalloc_prep_junked_locked(tsd_tsdn(tsd), extent); } } - if ((config_prof || config_stats) && locked_arena == arena) { + if ((config_prof || config_stats) && + (locked_arena == tcache_arena)) { if (config_prof) { - idump = arena_prof_accum(tsd_tsdn(tsd), arena, - tcache->prof_accumbytes); + idump = arena_prof_accum(tsd_tsdn(tsd), + tcache_arena, tcache->prof_accumbytes); tcache->prof_accumbytes = 0; } if (config_stats) { merged_stats = true; - arena_stats_large_nrequests_add(tsd_tsdn(tsd), - &arena->stats, binind, + arena_stats_large_flush_nrequests_add( + tsd_tsdn(tsd), &tcache_arena->stats, binind, tbin->tstats.nrequests); tbin->tstats.nrequests = 0; } } - malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx); + if (lock_large) { + malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx); + } unsigned ndeferred = 0; for (unsigned i = 0; i < nflush; i++) { @@ -244,7 +298,7 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, extent = item_extent[i]; assert(ptr != NULL && extent != NULL); - if (extent_arena_get(extent) == locked_arena) { + if (extent_arena_ind_get(extent) == locked_arena_ind) { large_dalloc_finish(tsd_tsdn(tsd), extent); } else { /* @@ -270,15 +324,15 @@ tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, * The flush loop didn't happen to flush to this thread's * arena, so the stats didn't get merged. Manually do so now. */ - arena_stats_large_nrequests_add(tsd_tsdn(tsd), &arena->stats, - binind, tbin->tstats.nrequests); + arena_stats_large_flush_nrequests_add(tsd_tsdn(tsd), + &tcache_arena->stats, binind, tbin->tstats.nrequests); tbin->tstats.nrequests = 0; } memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * sizeof(void *)); tbin->ncached = rem; - if ((low_water_t)tbin->ncached < tbin->low_water) { + if (tbin->ncached < tbin->low_water) { tbin->low_water = tbin->ncached; } } @@ -291,8 +345,15 @@ tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { if (config_stats) { /* Link into list of extant tcaches. */ malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); + ql_elm_new(tcache, link); ql_tail_insert(&arena->tcache_ql, tcache, link); + cache_bin_array_descriptor_init( + &tcache->cache_bin_array_descriptor, tcache->bins_small, + tcache->bins_large); + ql_tail_insert(&arena->cache_bin_array_descriptor_ql, + &tcache->cache_bin_array_descriptor, link); + malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); } } @@ -316,6 +377,8 @@ tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) { assert(in_ql); } ql_remove(&arena->tcache_ql, tcache, link); + ql_remove(&arena->cache_bin_array_descriptor_ql, + &tcache->cache_bin_array_descriptor, link); tcache_stats_merge(tsdn, tcache, arena); malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); } @@ -354,10 +417,10 @@ tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) { size_t stack_offset = 0; assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); - memset(tcache->tbins_small, 0, sizeof(tcache_bin_t) * NBINS); - memset(tcache->tbins_large, 0, sizeof(tcache_bin_t) * (nhbins - NBINS)); + memset(tcache->bins_small, 0, sizeof(cache_bin_t) * SC_NBINS); + memset(tcache->bins_large, 0, sizeof(cache_bin_t) * (nhbins - SC_NBINS)); unsigned i = 0; - for (; i < NBINS; i++) { + for (; i < SC_NBINS; i++) { tcache->lg_fill_div[i] = 1; stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); /* @@ -449,16 +512,16 @@ static void tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) { assert(tcache->arena != NULL); - for (unsigned i = 0; i < NBINS; i++) { - tcache_bin_t *tbin = tcache_small_bin_get(tcache, i); + for (unsigned i = 0; i < SC_NBINS; i++) { + cache_bin_t *tbin = tcache_small_bin_get(tcache, i); tcache_bin_flush_small(tsd, tcache, tbin, i, 0); if (config_stats) { assert(tbin->tstats.nrequests == 0); } } - for (unsigned i = NBINS; i < nhbins; i++) { - tcache_bin_t *tbin = tcache_large_bin_get(tcache, i); + for (unsigned i = SC_NBINS; i < nhbins; i++) { + cache_bin_t *tbin = tcache_large_bin_get(tcache, i); tcache_bin_flush_large(tsd, tbin, i, 0, tcache); if (config_stats) { @@ -482,6 +545,7 @@ tcache_flush(tsd_t *tsd) { static void tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) { tcache_flush_cache(tsd, tcache); + arena_t *arena = tcache->arena; tcache_arena_dissociate(tsd_tsdn(tsd), tcache); if (tsd_tcache) { @@ -494,6 +558,23 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) { /* Release both the tcache struct and avail array. */ idalloctm(tsd_tsdn(tsd), tcache, NULL, NULL, true, true); } + + /* + * The deallocation and tcache flush above may not trigger decay since + * we are on the tcache shutdown path (potentially with non-nominal + * tsd). Manually trigger decay to avoid pathological cases. Also + * include arena 0 because the tcache array is allocated from it. + */ + arena_decay(tsd_tsdn(tsd), arena_get(tsd_tsdn(tsd), 0, false), + false, false); + + if (arena_nthreads_get(arena, false) == 0 && + !background_thread_enabled()) { + /* Force purging when no threads assigned to the arena anymore. */ + arena_decay(tsd_tsdn(tsd), arena, false, true); + } else { + arena_decay(tsd_tsdn(tsd), arena, false, false); + } } /* For auto tcache (embedded in TSD) only. */ @@ -523,18 +604,18 @@ tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { cassert(config_stats); /* Merge and reset tcache stats. */ - for (i = 0; i < NBINS; i++) { - arena_bin_t *bin = &arena->bins[i]; - tcache_bin_t *tbin = tcache_small_bin_get(tcache, i); - malloc_mutex_lock(tsdn, &bin->lock); + for (i = 0; i < SC_NBINS; i++) { + cache_bin_t *tbin = tcache_small_bin_get(tcache, i); + unsigned binshard; + bin_t *bin = arena_bin_choose_lock(tsdn, arena, i, &binshard); bin->stats.nrequests += tbin->tstats.nrequests; malloc_mutex_unlock(tsdn, &bin->lock); tbin->tstats.nrequests = 0; } for (; i < nhbins; i++) { - tcache_bin_t *tbin = tcache_large_bin_get(tcache, i); - arena_stats_large_nrequests_add(tsdn, &arena->stats, i, + cache_bin_t *tbin = tcache_large_bin_get(tcache, i); + arena_stats_large_flush_nrequests_add(tsdn, &arena->stats, i, tbin->tstats.nrequests); tbin->tstats.nrequests = 0; } @@ -605,23 +686,32 @@ label_return: } static tcache_t * -tcaches_elm_remove(tsd_t *tsd, tcaches_t *elm) { +tcaches_elm_remove(tsd_t *tsd, tcaches_t *elm, bool allow_reinit) { malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx); if (elm->tcache == NULL) { return NULL; } tcache_t *tcache = elm->tcache; - elm->tcache = NULL; + if (allow_reinit) { + elm->tcache = TCACHES_ELM_NEED_REINIT; + } else { + elm->tcache = NULL; + } + + if (tcache == TCACHES_ELM_NEED_REINIT) { + return NULL; + } return tcache; } void tcaches_flush(tsd_t *tsd, unsigned ind) { malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); - tcache_t *tcache = tcaches_elm_remove(tsd, &tcaches[ind]); + tcache_t *tcache = tcaches_elm_remove(tsd, &tcaches[ind], true); malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); if (tcache != NULL) { + /* Destroy the tcache; recreate in tcaches_get() if needed. */ tcache_destroy(tsd, tcache, false); } } @@ -630,7 +720,7 @@ void tcaches_destroy(tsd_t *tsd, unsigned ind) { malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); tcaches_t *elm = &tcaches[ind]; - tcache_t *tcache = tcaches_elm_remove(tsd, elm); + tcache_t *tcache = tcaches_elm_remove(tsd, elm, false); elm->next = tcaches_avail; tcaches_avail = elm; malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); @@ -643,8 +733,8 @@ bool tcache_boot(tsdn_t *tsdn) { /* If necessary, clamp opt_lg_tcache_max. */ if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < - SMALL_MAXCLASS) { - tcache_maxclass = SMALL_MAXCLASS; + SC_SMALL_MAXCLASS) { + tcache_maxclass = SC_SMALL_MAXCLASS; } else { tcache_maxclass = (ZU(1) << opt_lg_tcache_max); } @@ -657,21 +747,21 @@ tcache_boot(tsdn_t *tsdn) { nhbins = sz_size2index(tcache_maxclass) + 1; /* Initialize tcache_bin_info. */ - tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins - * sizeof(tcache_bin_info_t), CACHELINE); + tcache_bin_info = (cache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins + * sizeof(cache_bin_info_t), CACHELINE); if (tcache_bin_info == NULL) { return true; } stack_nelms = 0; unsigned i; - for (i = 0; i < NBINS; i++) { - if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { + for (i = 0; i < SC_NBINS; i++) { + if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_SMALL_MIN; - } else if ((arena_bin_info[i].nregs << 1) <= + } else if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) { tcache_bin_info[i].ncached_max = - (arena_bin_info[i].nregs << 1); + (bin_infos[i].nregs << 1); } else { tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_SMALL_MAX; |