aboutsummaryrefslogtreecommitdiff
path: root/dep/jemalloc/src/huge.c
diff options
context:
space:
mode:
Diffstat (limited to 'dep/jemalloc/src/huge.c')
-rw-r--r--dep/jemalloc/src/huge.c30
1 files changed, 20 insertions, 10 deletions
diff --git a/dep/jemalloc/src/huge.c b/dep/jemalloc/src/huge.c
index 0aadc4339a9..a4f9b054ed5 100644
--- a/dep/jemalloc/src/huge.c
+++ b/dep/jemalloc/src/huge.c
@@ -50,6 +50,7 @@ huge_malloc(size_t size, bool zero)
malloc_mutex_lock(&huge_mtx);
extent_tree_ad_insert(&huge, node);
#ifdef JEMALLOC_STATS
+ stats_cactive_add(csize);
huge_nmalloc++;
huge_allocated += csize;
#endif
@@ -83,7 +84,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
* alignment, in order to assure the alignment can be achieved, then
* unmap leading and trailing chunks.
*/
- assert(alignment >= chunksize);
+ assert(alignment > chunksize);
chunk_size = CHUNK_CEILING(size);
@@ -109,12 +110,12 @@ huge_palloc(size_t size, size_t alignment, bool zero)
if (offset == 0) {
/* Trim trailing space. */
chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
- - chunk_size);
+ - chunk_size, true);
} else {
size_t trailsize;
/* Trim leading space. */
- chunk_dealloc(ret, alignment - offset);
+ chunk_dealloc(ret, alignment - offset, true);
ret = (void *)((uintptr_t)ret + (alignment - offset));
@@ -123,7 +124,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
/* Trim trailing space. */
assert(trailsize < alloc_size);
chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
- trailsize);
+ trailsize, true);
}
}
@@ -134,6 +135,7 @@ huge_palloc(size_t size, size_t alignment, bool zero)
malloc_mutex_lock(&huge_mtx);
extent_tree_ad_insert(&huge, node);
#ifdef JEMALLOC_STATS
+ stats_cactive_add(chunk_size);
huge_nmalloc++;
huge_allocated += chunk_size;
#endif
@@ -192,7 +194,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
* different size class. In that case, fall back to allocating new
* space and copying.
*/
- if (alignment != 0)
+ if (alignment > chunksize)
ret = huge_palloc(size + extra, alignment, zero);
else
ret = huge_malloc(size + extra, zero);
@@ -201,7 +203,7 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
if (extra == 0)
return (NULL);
/* Try again, this time without extra. */
- if (alignment != 0)
+ if (alignment > chunksize)
ret = huge_palloc(size, alignment, zero);
else
ret = huge_malloc(size, zero);
@@ -232,6 +234,13 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
) {
size_t newsize = huge_salloc(ret);
+ /*
+ * Remove ptr from the tree of huge allocations before
+ * performing the remap operation, in order to avoid the
+ * possibility of another thread acquiring that mapping before
+ * this one removes it from the tree.
+ */
+ huge_dalloc(ptr, false);
if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED,
ret) == MAP_FAILED) {
/*
@@ -251,9 +260,8 @@ huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
if (opt_abort)
abort();
memcpy(ret, ptr, copysize);
- idalloc(ptr);
- } else
- huge_dalloc(ptr, false);
+ chunk_dealloc_mmap(ptr, oldsize);
+ }
} else
#endif
{
@@ -278,6 +286,7 @@ huge_dalloc(void *ptr, bool unmap)
extent_tree_ad_remove(&huge, node);
#ifdef JEMALLOC_STATS
+ stats_cactive_sub(node->size);
huge_ndalloc++;
huge_allocated -= node->size;
#endif
@@ -292,9 +301,10 @@ huge_dalloc(void *ptr, bool unmap)
memset(node->addr, 0x5a, node->size);
#endif
#endif
- chunk_dealloc(node->addr, node->size);
}
+ chunk_dealloc(node->addr, node->size, unmap);
+
base_node_dealloc(node);
}