diff options
Diffstat (limited to 'dep/jemalloc/src/extent.c')
-rw-r--r-- | dep/jemalloc/src/extent.c | 211 |
1 files changed, 158 insertions, 53 deletions
diff --git a/dep/jemalloc/src/extent.c b/dep/jemalloc/src/extent.c index 09d6d771817..62086c7d7b5 100644 --- a/dep/jemalloc/src/extent.c +++ b/dep/jemalloc/src/extent.c @@ -20,7 +20,7 @@ mutex_pool_t extent_mutex_pool; size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT; static const bitmap_info_t extents_bitmap_info = - BITMAP_INFO_INITIALIZER(NPSIZES+1); + BITMAP_INFO_INITIALIZER(SC_NPSIZES+1); static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, @@ -119,9 +119,13 @@ static void extent_record(tsdn_t *tsdn, arena_t *arena, /******************************************************************************/ -ph_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, ph_link, +#define ATTR_NONE /* does nothing */ + +ph_gen(ATTR_NONE, extent_avail_, extent_tree_t, extent_t, ph_link, extent_esnead_comp) +#undef ATTR_NONE + typedef enum { lock_result_success, lock_result_failure, @@ -130,13 +134,16 @@ typedef enum { static lock_result_t extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm, - extent_t **result) { + extent_t **result, bool inactive_only) { extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree, elm, true); - if (extent1 == NULL) { + /* Slab implies active extents and should be skipped. */ + if (extent1 == NULL || (inactive_only && rtree_leaf_elm_slab_read(tsdn, + &extents_rtree, elm, true))) { return lock_result_no_extent; } + /* * It's possible that the extent changed out from under us, and with it * the leaf->extent mapping. We have to recheck while holding the lock. @@ -159,7 +166,8 @@ extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm, * address, and NULL otherwise. */ static extent_t * -extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) { +extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr, + bool inactive_only) { extent_t *ret = NULL; rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)addr, false, false); @@ -168,7 +176,8 @@ extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) { } lock_result_t lock_result; do { - lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret); + lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret, + inactive_only); } while (lock_result == lock_result_failure); return ret; } @@ -182,6 +191,7 @@ extent_alloc(tsdn_t *tsdn, arena_t *arena) { return base_alloc_extent(tsdn, arena->base); } extent_avail_remove(&arena->extent_avail, extent); + atomic_fetch_sub_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED); malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); return extent; } @@ -190,6 +200,7 @@ void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); extent_avail_insert(&arena->extent_avail, extent); + atomic_fetch_add_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED); malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); } @@ -255,7 +266,7 @@ extent_size_quantize_ceil(size_t size) { size_t ret; assert(size > 0); - assert(size - sz_large_pad <= LARGE_MAXCLASS); + assert(size - sz_large_pad <= SC_LARGE_MAXCLASS); assert((size & PAGE_MASK) == 0); ret = extent_size_quantize_floor(size); @@ -284,7 +295,7 @@ extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state, malloc_mutex_rank_exclusive)) { return true; } - for (unsigned i = 0; i < NPSIZES+1; i++) { + for (unsigned i = 0; i < SC_NPSIZES + 1; i++) { extent_heap_new(&extents->heaps[i]); } bitmap_init(extents->bitmap, &extents_bitmap_info, true); @@ -305,6 +316,32 @@ extents_npages_get(extents_t *extents) { return atomic_load_zu(&extents->npages, ATOMIC_RELAXED); } +size_t +extents_nextents_get(extents_t *extents, pszind_t pind) { + return atomic_load_zu(&extents->nextents[pind], ATOMIC_RELAXED); +} + +size_t +extents_nbytes_get(extents_t *extents, pszind_t pind) { + return atomic_load_zu(&extents->nbytes[pind], ATOMIC_RELAXED); +} + +static void +extents_stats_add(extents_t *extent, pszind_t pind, size_t sz) { + size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED); + atomic_store_zu(&extent->nextents[pind], cur + 1, ATOMIC_RELAXED); + cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED); + atomic_store_zu(&extent->nbytes[pind], cur + sz, ATOMIC_RELAXED); +} + +static void +extents_stats_sub(extents_t *extent, pszind_t pind, size_t sz) { + size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED); + atomic_store_zu(&extent->nextents[pind], cur - 1, ATOMIC_RELAXED); + cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED); + atomic_store_zu(&extent->nbytes[pind], cur - sz, ATOMIC_RELAXED); +} + static void extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) { malloc_mutex_assert_owner(tsdn, &extents->mtx); @@ -318,6 +355,11 @@ extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) { (size_t)pind); } extent_heap_insert(&extents->heaps[pind], extent); + + if (config_stats) { + extents_stats_add(extents, pind, size); + } + extent_list_append(&extents->lru, extent); size_t npages = size >> LG_PAGE; /* @@ -340,6 +382,11 @@ extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) { size_t psz = extent_size_quantize_floor(size); pszind_t pind = sz_psz2ind(psz); extent_heap_remove(&extents->heaps[pind], extent); + + if (config_stats) { + extents_stats_sub(extents, pind, size); + } + if (extent_heap_empty(&extents->heaps[pind])) { bitmap_set(extents->bitmap, &extents_bitmap_info, (size_t)pind); @@ -371,7 +418,7 @@ extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size, &extents_bitmap_info, (size_t)pind); i < pind_max; i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, (size_t)i+1)) { - assert(i < NPSIZES); + assert(i < SC_NPSIZES); assert(!extent_heap_empty(&extents->heaps[i])); extent_t *extent = extent_heap_first(&extents->heaps[i]); uintptr_t base = (uintptr_t)extent_base_get(extent); @@ -401,7 +448,7 @@ extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, (size_t)pind); - if (i < NPSIZES+1) { + if (i < SC_NPSIZES + 1) { /* * In order to reduce fragmentation, avoid reusing and splitting * large extents for much smaller sizes. @@ -429,8 +476,9 @@ extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, - &extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i = - (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, + &extents_bitmap_info, (size_t)pind); + i < SC_NPSIZES + 1; + i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, (size_t)i+1)) { assert(!extent_heap_empty(&extents->heaps[i])); extent_t *extent = extent_heap_first(&extents->heaps[i]); @@ -438,10 +486,10 @@ extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, if (ret == NULL || extent_snad_comp(extent, ret) < 0) { ret = extent; } - if (i == NPSIZES) { + if (i == SC_NPSIZES) { break; } - assert(i < NPSIZES); + assert(i < SC_NPSIZES); } return ret; @@ -748,6 +796,7 @@ extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) { if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true, &elm_a, &elm_b)) { + extent_unlock(tsdn, extent); return true; } @@ -817,7 +866,7 @@ extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) { extent_lock(tsdn, extent); - extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false); + extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, SC_NSIZES, false); if (extent_slab_get(extent)) { extent_interior_deregister(tsdn, rtree_ctx, extent); extent_slab_set(extent, false); @@ -874,7 +923,8 @@ extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, extent_hooks_assure_initialized(arena, r_extent_hooks); extent_t *extent; if (new_addr != NULL) { - extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr); + extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr, + false); if (extent != NULL) { /* * We might null-out extent to report an error, but we @@ -958,7 +1008,7 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, if (leadsize != 0) { *lead = *extent; *extent = extent_split_impl(tsdn, arena, r_extent_hooks, - *lead, leadsize, NSIZES, false, esize + trailsize, szind, + *lead, leadsize, SC_NSIZES, false, esize + trailsize, szind, slab, growing_retained); if (*extent == NULL) { *to_leak = *lead; @@ -970,7 +1020,7 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, /* Split the trail. */ if (trailsize != 0) { *trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent, - esize, szind, slab, trailsize, NSIZES, false, + esize, szind, slab, trailsize, SC_NSIZES, false, growing_retained); if (*trail == NULL) { *to_leak = *extent; @@ -987,7 +1037,7 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena, * splitting occurred. */ extent_szind_set(*extent, szind); - if (szind != NSIZES) { + if (szind != SC_NSIZES) { rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)extent_addr_get(*extent), szind, slab); if (slab && extent_size_get(*extent) > PAGE) { @@ -1045,14 +1095,25 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena, extent_deregister_no_gdump_sub(tsdn, to_leak); extents_leak(tsdn, arena, r_extent_hooks, extents, to_leak, growing_retained); - assert(extent_lock_from_addr(tsdn, rtree_ctx, leak) - == NULL); + assert(extent_lock_from_addr(tsdn, rtree_ctx, leak, + false) == NULL); } return NULL; } unreachable(); } +static bool +extent_need_manual_zero(arena_t *arena) { + /* + * Need to manually zero the extent on repopulating if either; 1) non + * default extent hooks installed (in which case the purge semantics may + * change); or 2) transparent huge pages enabled. + */ + return (!arena_has_default_hooks(arena) || + (opt_thp == thp_mode_always)); +} + /* * Tries to satisfy the given allocation request by reusing one of the extents * in the given extents_t. @@ -1092,7 +1153,9 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, extent, growing_retained); return NULL; } - extent_zeroed_set(extent, true); + if (!extent_need_manual_zero(arena)) { + extent_zeroed_set(extent, true); + } } if (extent_committed_get(extent)) { @@ -1113,14 +1176,16 @@ extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, if (*zero) { void *addr = extent_base_get(extent); - size_t size = extent_size_get(extent); if (!extent_zeroed_get(extent)) { - if (pages_purge_forced(addr, size)) { + size_t size = extent_size_get(extent); + if (extent_need_manual_zero(arena) || + pages_purge_forced(addr, size)) { memset(addr, 0, size); } } else if (config_debug) { size_t *p = (size_t *)(uintptr_t)addr; - for (size_t i = 0; i < size / sizeof(size_t); i++) { + /* Check the first page only. */ + for (size_t i = 0; i < PAGE / sizeof(size_t); i++) { assert(p[i] == 0); } } @@ -1244,11 +1309,11 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); while (alloc_size < alloc_size_min) { egn_skip++; - if (arena->extent_grow_next + egn_skip == NPSIZES) { + if (arena->extent_grow_next + egn_skip >= + sz_psz2ind(SC_LARGE_MAXCLASS)) { /* Outside legal range. */ goto label_err; } - assert(arena->extent_grow_next + egn_skip < NPSIZES); alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); } @@ -1271,7 +1336,7 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, extent_hook_post_reentrancy(tsdn); } - extent_init(extent, arena, ptr, alloc_size, false, NSIZES, + extent_init(extent, arena, ptr, alloc_size, false, SC_NSIZES, arena_extent_sn_next(arena), extent_state_active, zeroed, committed, true); if (ptr == NULL) { @@ -1341,7 +1406,9 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, &arena->extents_retained, extent, true); goto label_err; } - extent_zeroed_set(extent, true); + if (!extent_need_manual_zero(arena)) { + extent_zeroed_set(extent, true); + } } /* @@ -1375,7 +1442,8 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena, if (*zero && !extent_zeroed_get(extent)) { void *addr = extent_base_get(extent); size_t size = extent_size_get(extent); - if (pages_purge_forced(addr, size)) { + if (extent_need_manual_zero(arena) || + pages_purge_forced(addr, size)) { memset(addr, 0, size); } } @@ -1524,9 +1592,15 @@ extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, } static extent_t * -extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, +extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, - extent_t *extent, bool *coalesced, bool growing_retained) { + extent_t *extent, bool *coalesced, bool growing_retained, + bool inactive_only) { + /* + * We avoid checking / locking inactive neighbors for large size + * classes, since they are eagerly coalesced on deallocation which can + * cause lock contention. + */ /* * Continue attempting to coalesce until failure, to protect against * races with other threads that are thwarted by this one. @@ -1537,7 +1611,7 @@ extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, /* Try to coalesce forward. */ extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx, - extent_past_get(extent)); + extent_past_get(extent), inactive_only); if (next != NULL) { /* * extents->mtx only protects against races for @@ -1563,7 +1637,7 @@ extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, /* Try to coalesce backward. */ extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx, - extent_before_get(extent)); + extent_before_get(extent), inactive_only); if (prev != NULL) { bool can_coalesce = extent_can_coalesce(arena, extents, extent, prev); @@ -1589,6 +1663,22 @@ extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, return extent; } +static extent_t * +extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + extent_t *extent, bool *coalesced, bool growing_retained) { + return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx, + extents, extent, coalesced, growing_retained, false); +} + +static extent_t * +extent_try_coalesce_large(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + extent_t *extent, bool *coalesced, bool growing_retained) { + return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx, + extents, extent, coalesced, growing_retained, true); +} + /* * Does the metadata management portions of putting an unused extent into the * given extents_t (coalesces, deregisters slab interiors, the heap operations). @@ -1606,7 +1696,7 @@ extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, malloc_mutex_lock(tsdn, &extents->mtx); extent_hooks_assure_initialized(arena, r_extent_hooks); - extent_szind_set(extent, NSIZES); + extent_szind_set(extent, SC_NSIZES); if (extent_slab_get(extent)) { extent_interior_deregister(tsdn, rtree_ctx, extent); extent_slab_set(extent, false); @@ -1618,18 +1708,22 @@ extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, if (!extents->delay_coalesce) { extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx, extents, extent, NULL, growing_retained); - } else if (extent_size_get(extent) >= LARGE_MINCLASS) { + } else if (extent_size_get(extent) >= SC_LARGE_MINCLASS) { + assert(extents == &arena->extents_dirty); /* Always coalesce large extents eagerly. */ bool coalesced; - size_t prev_size; do { - prev_size = extent_size_get(extent); assert(extent_state_get(extent) == extent_state_active); - extent = extent_try_coalesce(tsdn, arena, + extent = extent_try_coalesce_large(tsdn, arena, r_extent_hooks, rtree_ctx, extents, extent, &coalesced, growing_retained); - } while (coalesced && - extent_size_get(extent) >= prev_size + LARGE_MINCLASS); + } while (coalesced); + if (extent_size_get(extent) >= oversize_threshold) { + /* Shortcut to purge the oversize extent eagerly. */ + malloc_mutex_unlock(tsdn, &extents->mtx); + arena_decay_extent(tsdn, arena, r_extent_hooks, extent); + return; + } } extent_deactivate_locked(tsdn, arena, extents, extent); @@ -1652,6 +1746,12 @@ extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { } static bool +extent_may_dalloc(void) { + /* With retain enabled, the default dalloc always fails. */ + return !opt_retain; +} + +static bool extent_dalloc_default_impl(void *addr, size_t size) { if (!have_dss || !extent_in_dss(addr)) { return extent_dalloc_mmap(addr, size); @@ -1706,16 +1806,20 @@ extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE, 0); - /* - * Deregister first to avoid a race with other allocating threads, and - * reregister if deallocation fails. - */ - extent_deregister(tsdn, extent); - if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) { - return; + /* Avoid calling the default extent_dalloc unless have to. */ + if (*r_extent_hooks != &extent_hooks_default || extent_may_dalloc()) { + /* + * Deregister first to avoid a race with other allocating + * threads, and reregister if deallocation fails. + */ + extent_deregister(tsdn, extent); + if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, + extent)) { + return; + } + extent_reregister(tsdn, extent); } - extent_reregister(tsdn, extent); if (*r_extent_hooks != &extent_hooks_default) { extent_hook_pre_reentrancy(tsdn, arena); } @@ -2128,22 +2232,23 @@ extent_merge_impl(tsdn_t *tsdn, arena_t *arena, if (a_elm_b != NULL) { rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL, - NSIZES, false); + SC_NSIZES, false); } if (b_elm_b != NULL) { rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL, - NSIZES, false); + SC_NSIZES, false); } else { b_elm_b = b_elm_a; } extent_size_set(a, extent_size_get(a) + extent_size_get(b)); - extent_szind_set(a, NSIZES); + extent_szind_set(a, SC_NSIZES); extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ? extent_sn_get(a) : extent_sn_get(b)); extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b)); - extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false); + extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, SC_NSIZES, + false); extent_unlock2(tsdn, a, b); |