aboutsummaryrefslogtreecommitdiff
path: root/dep/jemalloc/src/background_thread.c
diff options
context:
space:
mode:
authorjackpoz <giacomopoz@gmail.com>2018-05-11 20:36:14 +0200
committerShauren <shauren.trinity@gmail.com>2021-09-26 20:50:35 +0200
commit501bb9dc6702f8130ccd077bad30828705454a01 (patch)
treec092752bb73ed7ef212ad31516e08e23e2eb123c /dep/jemalloc/src/background_thread.c
parent9c65f4e27c360464869dc401453ebc5c7ba3d78b (diff)
Dep/Jemalloc: Update to Jemalloc 5.1.0
(cherry picked from commit 8fe74bf0f90aab0b23d5ff21079cba4201bb4fdf)
Diffstat (limited to 'dep/jemalloc/src/background_thread.c')
-rw-r--r--dep/jemalloc/src/background_thread.c151
1 files changed, 90 insertions, 61 deletions
diff --git a/dep/jemalloc/src/background_thread.c b/dep/jemalloc/src/background_thread.c
index eb30eb5b423..3517a3bb8ed 100644
--- a/dep/jemalloc/src/background_thread.c
+++ b/dep/jemalloc/src/background_thread.c
@@ -11,12 +11,14 @@
#define BACKGROUND_THREAD_DEFAULT false
/* Read-only after initialization. */
bool opt_background_thread = BACKGROUND_THREAD_DEFAULT;
+size_t opt_max_background_threads = MAX_BACKGROUND_THREAD_LIMIT;
/* Used for thread creation, termination and stats. */
malloc_mutex_t background_thread_lock;
/* Indicates global state. Atomic because decay reads this w/o locking. */
atomic_b_t background_thread_enabled_state;
size_t n_background_threads;
+size_t max_background_threads;
/* Thread info per-index. */
background_thread_info_t *background_thread_info;
@@ -30,19 +32,20 @@ bool can_enable_background_thread;
static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *,
void *(*)(void *), void *__restrict);
-static pthread_once_t once_control = PTHREAD_ONCE_INIT;
static void
-pthread_create_wrapper_once(void) {
+pthread_create_wrapper_init(void) {
#ifdef JEMALLOC_LAZY_LOCK
- isthreaded = true;
+ if (!isthreaded) {
+ isthreaded = true;
+ }
#endif
}
int
pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr,
void *(*start_routine)(void *), void *__restrict arg) {
- pthread_once(&once_control, pthread_create_wrapper_once);
+ pthread_create_wrapper_init();
return pthread_create_fptr(thread, attr, start_routine, arg);
}
@@ -286,7 +289,7 @@ background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigne
uint64_t min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
unsigned narenas = narenas_total_get();
- for (unsigned i = ind; i < narenas; i += ncpus) {
+ for (unsigned i = ind; i < narenas; i += max_background_threads) {
arena_t *arena = arena_get(tsdn, i, false);
if (!arena) {
continue;
@@ -379,35 +382,32 @@ background_thread_create_signals_masked(pthread_t *thread,
return create_err;
}
-static void
+static bool
check_background_thread_creation(tsd_t *tsd, unsigned *n_created,
bool *created_threads) {
+ bool ret = false;
if (likely(*n_created == n_background_threads)) {
- return;
+ return ret;
}
- malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_info[0].mtx);
-label_restart:
- malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
- for (unsigned i = 1; i < ncpus; i++) {
+ tsdn_t *tsdn = tsd_tsdn(tsd);
+ malloc_mutex_unlock(tsdn, &background_thread_info[0].mtx);
+ for (unsigned i = 1; i < max_background_threads; i++) {
if (created_threads[i]) {
continue;
}
background_thread_info_t *info = &background_thread_info[i];
- malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
- assert(info->state != background_thread_paused);
+ malloc_mutex_lock(tsdn, &info->mtx);
+ /*
+ * In case of the background_thread_paused state because of
+ * arena reset, delay the creation.
+ */
bool create = (info->state == background_thread_started);
- malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+ malloc_mutex_unlock(tsdn, &info->mtx);
if (!create) {
continue;
}
- /*
- * To avoid deadlock with prefork handlers (which waits for the
- * mutex held here), unlock before calling pthread_create().
- */
- malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
-
pre_reentrancy(tsd, NULL);
int err = background_thread_create_signals_masked(&info->thread,
NULL, background_thread_entry, (void *)(uintptr_t)i);
@@ -423,19 +423,21 @@ label_restart:
abort();
}
}
- /* Restart since we unlocked. */
- goto label_restart;
+ /* Return to restart the loop since we unlocked. */
+ ret = true;
+ break;
}
- malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_info[0].mtx);
- malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
+ malloc_mutex_lock(tsdn, &background_thread_info[0].mtx);
+
+ return ret;
}
static void
background_thread0_work(tsd_t *tsd) {
/* Thread0 is also responsible for launching / terminating threads. */
- VARIABLE_ARRAY(bool, created_threads, ncpus);
+ VARIABLE_ARRAY(bool, created_threads, max_background_threads);
unsigned i;
- for (i = 1; i < ncpus; i++) {
+ for (i = 1; i < max_background_threads; i++) {
created_threads[i] = false;
}
/* Start working, and create more threads when asked. */
@@ -445,8 +447,10 @@ background_thread0_work(tsd_t *tsd) {
&background_thread_info[0])) {
continue;
}
- check_background_thread_creation(tsd, &n_created,
- (bool *)&created_threads);
+ if (check_background_thread_creation(tsd, &n_created,
+ (bool *)&created_threads)) {
+ continue;
+ }
background_work_sleep_once(tsd_tsdn(tsd),
&background_thread_info[0], 0);
}
@@ -456,15 +460,20 @@ background_thread0_work(tsd_t *tsd) {
* the global background_thread mutex (and is waiting) for us.
*/
assert(!background_thread_enabled());
- for (i = 1; i < ncpus; i++) {
+ for (i = 1; i < max_background_threads; i++) {
background_thread_info_t *info = &background_thread_info[i];
assert(info->state != background_thread_paused);
if (created_threads[i]) {
background_threads_disable_single(tsd, info);
} else {
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
- /* Clear in case the thread wasn't created. */
- info->state = background_thread_stopped;
+ if (info->state != background_thread_stopped) {
+ /* The thread was not created. */
+ assert(info->state ==
+ background_thread_started);
+ n_background_threads--;
+ info->state = background_thread_stopped;
+ }
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
}
}
@@ -498,7 +507,7 @@ background_work(tsd_t *tsd, unsigned ind) {
static void *
background_thread_entry(void *ind_arg) {
unsigned thread_ind = (unsigned)(uintptr_t)ind_arg;
- assert(thread_ind < ncpus);
+ assert(thread_ind < max_background_threads);
#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
pthread_setname_np(pthread_self(), "jemalloc_bg_thd");
#endif
@@ -532,7 +541,7 @@ background_thread_create(tsd_t *tsd, unsigned arena_ind) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
/* We create at most NCPUs threads. */
- size_t thread_ind = arena_ind % ncpus;
+ size_t thread_ind = arena_ind % max_background_threads;
background_thread_info_t *info = &background_thread_info[thread_ind];
bool need_new_thread;
@@ -586,26 +595,29 @@ background_threads_enable(tsd_t *tsd) {
assert(background_thread_enabled());
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
- VARIABLE_ARRAY(bool, marked, ncpus);
+ VARIABLE_ARRAY(bool, marked, max_background_threads);
unsigned i, nmarked;
- for (i = 0; i < ncpus; i++) {
+ for (i = 0; i < max_background_threads; i++) {
marked[i] = false;
}
nmarked = 0;
+ /* Thread 0 is required and created at the end. */
+ marked[0] = true;
/* Mark the threads we need to create for thread 0. */
unsigned n = narenas_total_get();
for (i = 1; i < n; i++) {
- if (marked[i % ncpus] ||
+ if (marked[i % max_background_threads] ||
arena_get(tsd_tsdn(tsd), i, false) == NULL) {
continue;
}
- background_thread_info_t *info = &background_thread_info[i];
+ background_thread_info_t *info = &background_thread_info[
+ i % max_background_threads];
malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
assert(info->state == background_thread_stopped);
background_thread_init(tsd, info);
malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
- marked[i % ncpus] = true;
- if (++nmarked == ncpus) {
+ marked[i % max_background_threads] = true;
+ if (++nmarked == max_background_threads) {
break;
}
}
@@ -720,14 +732,14 @@ background_thread_prefork0(tsdn_t *tsdn) {
void
background_thread_prefork1(tsdn_t *tsdn) {
- for (unsigned i = 0; i < ncpus; i++) {
+ for (unsigned i = 0; i < max_background_threads; i++) {
malloc_mutex_prefork(tsdn, &background_thread_info[i].mtx);
}
}
void
background_thread_postfork_parent(tsdn_t *tsdn) {
- for (unsigned i = 0; i < ncpus; i++) {
+ for (unsigned i = 0; i < max_background_threads; i++) {
malloc_mutex_postfork_parent(tsdn,
&background_thread_info[i].mtx);
}
@@ -736,7 +748,7 @@ background_thread_postfork_parent(tsdn_t *tsdn) {
void
background_thread_postfork_child(tsdn_t *tsdn) {
- for (unsigned i = 0; i < ncpus; i++) {
+ for (unsigned i = 0; i < max_background_threads; i++) {
malloc_mutex_postfork_child(tsdn,
&background_thread_info[i].mtx);
}
@@ -749,7 +761,7 @@ background_thread_postfork_child(tsdn_t *tsdn) {
malloc_mutex_lock(tsdn, &background_thread_lock);
n_background_threads = 0;
background_thread_enabled_set(tsdn, false);
- for (unsigned i = 0; i < ncpus; i++) {
+ for (unsigned i = 0; i < max_background_threads; i++) {
background_thread_info_t *info = &background_thread_info[i];
malloc_mutex_lock(tsdn, &info->mtx);
info->state = background_thread_stopped;
@@ -773,7 +785,7 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
stats->num_threads = n_background_threads;
uint64_t num_runs = 0;
nstime_init(&stats->run_interval, 0);
- for (unsigned i = 0; i < ncpus; i++) {
+ for (unsigned i = 0; i < max_background_threads; i++) {
background_thread_info_t *info = &background_thread_info[i];
malloc_mutex_lock(tsdn, &info->mtx);
if (info->state != background_thread_stopped) {
@@ -795,6 +807,26 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
#undef BILLION
#undef BACKGROUND_THREAD_MIN_INTERVAL_NS
+static bool
+pthread_create_fptr_init(void) {
+ if (pthread_create_fptr != NULL) {
+ return false;
+ }
+ pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
+ if (pthread_create_fptr == NULL) {
+ can_enable_background_thread = false;
+ if (config_lazy_lock || opt_background_thread) {
+ malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
+ "\"pthread_create\")\n");
+ abort();
+ }
+ } else {
+ can_enable_background_thread = true;
+ }
+
+ return false;
+}
+
/*
* When lazy lock is enabled, we need to make sure setting isthreaded before
* taking any background_thread locks. This is called early in ctl (instead of
@@ -805,7 +837,8 @@ void
background_thread_ctl_init(tsdn_t *tsdn) {
malloc_mutex_assert_not_owner(tsdn, &background_thread_lock);
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
- pthread_once(&once_control, pthread_create_wrapper_once);
+ pthread_create_fptr_init();
+ pthread_create_wrapper_init();
#endif
}
@@ -818,18 +851,10 @@ background_thread_boot0(void) {
"supports pthread only\n");
return true;
}
-
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
- pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create");
- if (pthread_create_fptr == NULL) {
- can_enable_background_thread = false;
- if (config_lazy_lock || opt_background_thread) {
- malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, "
- "\"pthread_create\")\n");
- abort();
- }
- } else {
- can_enable_background_thread = true;
+ if ((config_lazy_lock || opt_background_thread) &&
+ pthread_create_fptr_init()) {
+ return true;
}
#endif
return false;
@@ -841,6 +866,12 @@ background_thread_boot1(tsdn_t *tsdn) {
assert(have_background_thread);
assert(narenas_total_get() > 0);
+ if (opt_max_background_threads == MAX_BACKGROUND_THREAD_LIMIT &&
+ ncpus < MAX_BACKGROUND_THREAD_LIMIT) {
+ opt_max_background_threads = ncpus;
+ }
+ max_background_threads = opt_max_background_threads;
+
background_thread_enabled_set(tsdn, opt_background_thread);
if (malloc_mutex_init(&background_thread_lock,
"background_thread_global",
@@ -848,17 +879,15 @@ background_thread_boot1(tsdn_t *tsdn) {
malloc_mutex_rank_exclusive)) {
return true;
}
- if (opt_background_thread) {
- background_thread_ctl_init(tsdn);
- }
background_thread_info = (background_thread_info_t *)base_alloc(tsdn,
- b0get(), ncpus * sizeof(background_thread_info_t), CACHELINE);
+ b0get(), opt_max_background_threads *
+ sizeof(background_thread_info_t), CACHELINE);
if (background_thread_info == NULL) {
return true;
}
- for (unsigned i = 0; i < ncpus; i++) {
+ for (unsigned i = 0; i < max_background_threads; i++) {
background_thread_info_t *info = &background_thread_info[i];
/* Thread mutex is rank_inclusive because of thread0. */
if (malloc_mutex_init(&info->mtx, "background_thread",