diff options
Diffstat (limited to 'deps')
72 files changed, 18038 insertions, 23248 deletions
diff --git a/deps/CMakeLists.txt b/deps/CMakeLists.txt index c64a15ca1c..30ce4fe1fc 100644 --- a/deps/CMakeLists.txt +++ b/deps/CMakeLists.txt @@ -20,6 +20,10 @@ if(CMAKE_SYSTEM_NAME MATCHES "Linux") if(SERVERS AND NOT NOJEM) add_subdirectory(jemalloc) endif() + if(TOOLS) + add_subdirectory(bzip2) + endif() + add_subdirectory(zlib) endif() if(CMAKE_SYSTEM_NAME MATCHES "Windows") diff --git a/deps/PackageList.txt b/deps/PackageList.txt index 41edd5348f..30fcb8dbab 100644 --- a/deps/PackageList.txt +++ b/deps/PackageList.txt @@ -14,7 +14,7 @@ G3D (a commercial-grade C++ 3D engine available as Open Source (BSD License) jemalloc (a general-purpose scalable concurrent malloc-implementation) http://www.canonware.com/jemalloc/ - Version: 3.5.1 + Version: 5.0.1 libMPQ (a library for reading MPQ files) https://libmpq.org/ @@ -41,5 +41,5 @@ gSOAP (a portable development toolkit for C and C++ XML Web services and XML dat Version: 2.8.10 recastnavigation (Recast is state of the art navigation mesh construction toolset for games) - http://code.google.com/p/recastnavigation/ - Version: 1.4 + https://github.com/memononen/recastnavigation + Version: 64385e9ed0822427bca5814d03a3f4c4d7a6db9f diff --git a/deps/jemalloc/CMakeLists.txt b/deps/jemalloc/CMakeLists.txt index b1725f6e19..8e937b67c1 100644 --- a/deps/jemalloc/CMakeLists.txt +++ b/deps/jemalloc/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (C) +# Copyright (C) 2008-2017 TrinityCore <http://www.trinitycore.org/> # # This file is free software; as a special exception the author gives # unlimited permission to copy and/or distribute it, with or without @@ -8,14 +8,24 @@ # WITHOUT ANY WARRANTY, to the extent permitted by law; without even the # implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# We need to generate the jemalloc_def.h header based on platform-specific settings -if (PLATFORM EQUAL 32) - set(JEM_SIZEDEF 2) - set(JEM_TLSMODEL) -else() - set(JEM_SIZEDEF 3) - set(JEM_TLSMODEL "__attribute__\(\(tls_model\(\"initial-exec\"\)\)\)") -endif() + # We need to generate the jemalloc_def.h header based on platform-specific settings + CHECK_SYMBOL_EXISTS(MADV_FREE "sys/mman.h" HAVE_MADV_FREE) + + if (PLATFORM EQUAL 32) + set(JEM_SIZEDEF 2) + set(JEM_TLSMODEL) + set(JEM_VADDRBITS 32) + else() + set(JEM_SIZEDEF 3) + set(JEM_TLSMODEL "__attribute__\(\(tls_model\(\"initial-exec\"\)\)\)") + set(JEM_VADDRBITS 48) + endif() + + if (HAVE_MADV_FREE) + set(JEM_MADFREE_DEF "#define") + else() + set(JEM_MADFREE_DEF "#undef") + endif() # Create the header, so we can use it configure_file( @@ -24,31 +34,38 @@ configure_file( @ONLY ) -# Done, let's continue -set(jemalloc_STAT_SRC - ${CMAKE_CURRENT_SOURCE_DIR}/src/arena.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/atomic.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/base.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/bitmap.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/chunk.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/chunk_dss.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/chunk_mmap.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/ckh.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/ctl.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/extent.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/hash.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/huge.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/jemalloc.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/mb.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/mutex.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/prof.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/quarantine.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/rtree.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/stats.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/tcache.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/tsd.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/util.c -) + # Done, let's continue + set(jemalloc_STAT_SRC + ${CMAKE_CURRENT_SOURCE_DIR}/src/arena.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/background_thread.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/base.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/bitmap.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/ckh.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/ctl.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/extent.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/extent_dss.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/extent_mmap.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/hash.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/hooks.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/jemalloc.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/jemalloc_cpp.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/large.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/malloc_io.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/mutex.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/mutex_pool.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/nstime.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/pages.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/prng.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/prof.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/rtree.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/spin.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/stats.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/sz.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/tcache.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/ticker.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/tsd.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/witness.c + ) include_directories( ${BUILDDIR}/ @@ -58,3 +75,12 @@ include_directories( add_definitions(-D_GNU_SOURCE -D_REENTRANT) add_library(jemalloc STATIC ${jemalloc_STAT_SRC}) + +target_link_libraries(jemalloc + PUBLIC + ${CMAKE_DL_LIBS}) + +set_target_properties(jemalloc + PROPERTIES + FOLDER + "deps") diff --git a/deps/jemalloc/COPYING b/deps/jemalloc/COPYING index bdda0feb9e..e308632a81 100644 --- a/deps/jemalloc/COPYING +++ b/deps/jemalloc/COPYING @@ -1,10 +1,10 @@ Unless otherwise specified, files in the jemalloc source distribution are subject to the following license: -------------------------------------------------------------------------------- -Copyright (C) 2002-2014 Jason Evans <jasone@canonware.com>. +Copyright (C) 2002-2017 Jason Evans <jasone@canonware.com>. All rights reserved. Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. -Copyright (C) 2009-2014 Facebook, Inc. All rights reserved. +Copyright (C) 2009-2017 Facebook, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/deps/jemalloc/ChangeLog b/deps/jemalloc/ChangeLog index d56ee999e6..ee1b7ead39 100644 --- a/deps/jemalloc/ChangeLog +++ b/deps/jemalloc/ChangeLog @@ -1,10 +1,727 @@ Following are change highlights associated with official releases. Important -bug fixes are all mentioned, but internal enhancements are omitted here for -brevity (even though they are more fun to write about). Much more detail can be -found in the git revision history: +bug fixes are all mentioned, but some internal enhancements are omitted here for +brevity. Much more detail can be found in the git revision history: https://github.com/jemalloc/jemalloc +* 5.0.1 (July 1, 2017) + + This bugfix release fixes several issues, most of which are obscure enough + that typical applications are not impacted. + + Bug fixes: + - Update decay->nunpurged before purging, in order to avoid potential update + races and subsequent incorrect purging volume. (@interwq) + - Only abort on dlsym(3) error if the failure impacts an enabled feature (lazy + locking and/or background threads). This mitigates an initialization + failure bug for which we still do not have a clear reproduction test case. + (@interwq) + - Modify tsd management so that it neither crashes nor leaks if a thread's + only allocation activity is to call free() after TLS destructors have been + executed. This behavior was observed when operating with GNU libc, and is + unlikely to be an issue with other libc implementations. (@interwq) + - Mask signals during background thread creation. This prevents signals from + being inadvertently delivered to background threads. (@jasone, + @davidgoldblatt, @interwq) + - Avoid inactivity checks within background threads, in order to prevent + recursive mutex acquisition. (@interwq) + - Fix extent_grow_retained() to use the specified hooks when the + arena.<i>.extent_hooks mallctl is used to override the default hooks. + (@interwq) + - Add missing reentrancy support for custom extent hooks which allocate. + (@interwq) + - Post-fork(2), re-initialize the list of tcaches associated with each arena + to contain no tcaches except the forking thread's. (@interwq) + - Add missing post-fork(2) mutex reinitialization for extent_grow_mtx. This + fixes potential deadlocks after fork(2). (@interwq) + - Enforce minimum autoconf version (currently 2.68), since 2.63 is known to + generate corrupt configure scripts. (@jasone) + - Ensure that the configured page size (--with-lg-page) is no larger than the + configured huge page size (--with-lg-hugepage). (@jasone) + +* 5.0.0 (June 13, 2017) + + Unlike all previous jemalloc releases, this release does not use naturally + aligned "chunks" for virtual memory management, and instead uses page-aligned + "extents". This change has few externally visible effects, but the internal + impacts are... extensive. Many other internal changes combine to make this + the most cohesively designed version of jemalloc so far, with ample + opportunity for further enhancements. + + Continuous integration is now an integral aspect of development thanks to the + efforts of @davidtgoldblatt, and the dev branch tends to remain reasonably + stable on the tested platforms (Linux, FreeBSD, macOS, and Windows). As a + side effect the official release frequency may decrease over time. + + New features: + - Implement optional per-CPU arena support; threads choose which arena to use + based on current CPU rather than on fixed thread-->arena associations. + (@interwq) + - Implement two-phase decay of unused dirty pages. Pages transition from + dirty-->muzzy-->clean, where the first phase transition relies on + madvise(... MADV_FREE) semantics, and the second phase transition discards + pages such that they are replaced with demand-zeroed pages on next access. + (@jasone) + - Increase decay time resolution from seconds to milliseconds. (@jasone) + - Implement opt-in per CPU background threads, and use them for asynchronous + decay-driven unused dirty page purging. (@interwq) + - Add mutex profiling, which collects a variety of statistics useful for + diagnosing overhead/contention issues. (@interwq) + - Add C++ new/delete operator bindings. (@djwatson) + - Support manually created arena destruction, such that all data and metadata + are discarded. Add MALLCTL_ARENAS_DESTROYED for accessing merged stats + associated with destroyed arenas. (@jasone) + - Add MALLCTL_ARENAS_ALL as a fixed index for use in accessing + merged/destroyed arena statistics via mallctl. (@jasone) + - Add opt.abort_conf to optionally abort if invalid configuration options are + detected during initialization. (@interwq) + - Add opt.stats_print_opts, so that e.g. JSON output can be selected for the + stats dumped during exit if opt.stats_print is true. (@jasone) + - Add --with-version=VERSION for use when embedding jemalloc into another + project's git repository. (@jasone) + - Add --disable-thp to support cross compiling. (@jasone) + - Add --with-lg-hugepage to support cross compiling. (@jasone) + - Add mallctl interfaces (various authors): + + background_thread + + opt.abort_conf + + opt.retain + + opt.percpu_arena + + opt.background_thread + + opt.{dirty,muzzy}_decay_ms + + opt.stats_print_opts + + arena.<i>.initialized + + arena.<i>.destroy + + arena.<i>.{dirty,muzzy}_decay_ms + + arena.<i>.extent_hooks + + arenas.{dirty,muzzy}_decay_ms + + arenas.bin.<i>.slab_size + + arenas.nlextents + + arenas.lextent.<i>.size + + arenas.create + + stats.background_thread.{num_threads,num_runs,run_interval} + + stats.mutexes.{ctl,background_thread,prof,reset}. + {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds, + num_owner_switch} + + stats.arenas.<i>.{dirty,muzzy}_decay_ms + + stats.arenas.<i>.uptime + + stats.arenas.<i>.{pmuzzy,base,internal,resident} + + stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged} + + stats.arenas.<i>.bins.<j>.{nslabs,reslabs,curslabs} + + stats.arenas.<i>.bins.<j>.mutex. + {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds, + num_owner_switch} + + stats.arenas.<i>.lextents.<j>.{nmalloc,ndalloc,nrequests,curlextents} + + stats.arenas.i.mutexes.{large,extent_avail,extents_dirty,extents_muzzy, + extents_retained,decay_dirty,decay_muzzy,base,tcache_list}. + {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds, + num_owner_switch} + + Portability improvements: + - Improve reentrant allocation support, such that deadlock is less likely if + e.g. a system library call in turn allocates memory. (@davidtgoldblatt, + @interwq) + - Support static linking of jemalloc with glibc. (@djwatson) + + Optimizations and refactors: + - Organize virtual memory as "extents" of virtual memory pages, rather than as + naturally aligned "chunks", and store all metadata in arbitrarily distant + locations. This reduces virtual memory external fragmentation, and will + interact better with huge pages (not yet explicitly supported). (@jasone) + - Fold large and huge size classes together; only small and large size classes + remain. (@jasone) + - Unify the allocation paths, and merge most fast-path branching decisions. + (@davidtgoldblatt, @interwq) + - Embed per thread automatic tcache into thread-specific data, which reduces + conditional branches and dereferences. Also reorganize tcache to increase + fast-path data locality. (@interwq) + - Rewrite atomics to closely model the C11 API, convert various + synchronization from mutex-based to atomic, and use the explicit memory + ordering control to resolve various hypothetical races without increasing + synchronization overhead. (@davidtgoldblatt) + - Extensively optimize rtree via various methods: + + Add multiple layers of rtree lookup caching, since rtree lookups are now + part of fast-path deallocation. (@interwq) + + Determine rtree layout at compile time. (@jasone) + + Make the tree shallower for common configurations. (@jasone) + + Embed the root node in the top-level rtree data structure, thus avoiding + one level of indirection. (@jasone) + + Further specialize leaf elements as compared to internal node elements, + and directly embed extent metadata needed for fast-path deallocation. + (@jasone) + + Ignore leading always-zero address bits (architecture-specific). + (@jasone) + - Reorganize headers (ongoing work) to make them hermetic, and disentangle + various module dependencies. (@davidtgoldblatt) + - Convert various internal data structures such as size class metadata from + boot-time-initialized to compile-time-initialized. Propagate resulting data + structure simplifications, such as making arena metadata fixed-size. + (@jasone) + - Simplify size class lookups when constrained to size classes that are + multiples of the page size. This speeds lookups, but the primary benefit is + complexity reduction in code that was the source of numerous regressions. + (@jasone) + - Lock individual extents when possible for localized extent operations, + rather than relying on a top-level arena lock. (@davidtgoldblatt, @jasone) + - Use first fit layout policy instead of best fit, in order to improve + packing. (@jasone) + - If munmap(2) is not in use, use an exponential series to grow each arena's + virtual memory, so that the number of disjoint virtual memory mappings + remains low. (@jasone) + - Implement per arena base allocators, so that arenas never share any virtual + memory pages. (@jasone) + - Automatically generate private symbol name mangling macros. (@jasone) + + Incompatible changes: + - Replace chunk hooks with an expanded/normalized set of extent hooks. + (@jasone) + - Remove ratio-based purging. (@jasone) + - Remove --disable-tcache. (@jasone) + - Remove --disable-tls. (@jasone) + - Remove --enable-ivsalloc. (@jasone) + - Remove --with-lg-size-class-group. (@jasone) + - Remove --with-lg-tiny-min. (@jasone) + - Remove --disable-cc-silence. (@jasone) + - Remove --enable-code-coverage. (@jasone) + - Remove --disable-munmap (replaced by opt.retain). (@jasone) + - Remove Valgrind support. (@jasone) + - Remove quarantine support. (@jasone) + - Remove redzone support. (@jasone) + - Remove mallctl interfaces (various authors): + + config.munmap + + config.tcache + + config.tls + + config.valgrind + + opt.lg_chunk + + opt.purge + + opt.lg_dirty_mult + + opt.decay_time + + opt.quarantine + + opt.redzone + + opt.thp + + arena.<i>.lg_dirty_mult + + arena.<i>.decay_time + + arena.<i>.chunk_hooks + + arenas.initialized + + arenas.lg_dirty_mult + + arenas.decay_time + + arenas.bin.<i>.run_size + + arenas.nlruns + + arenas.lrun.<i>.size + + arenas.nhchunks + + arenas.hchunk.<i>.size + + arenas.extend + + stats.cactive + + stats.arenas.<i>.lg_dirty_mult + + stats.arenas.<i>.decay_time + + stats.arenas.<i>.metadata.{mapped,allocated} + + stats.arenas.<i>.{npurge,nmadvise,purged} + + stats.arenas.<i>.huge.{allocated,nmalloc,ndalloc,nrequests} + + stats.arenas.<i>.bins.<j>.{nruns,reruns,curruns} + + stats.arenas.<i>.lruns.<j>.{nmalloc,ndalloc,nrequests,curruns} + + stats.arenas.<i>.hchunks.<j>.{nmalloc,ndalloc,nrequests,curhchunks} + + Bug fixes: + - Improve interval-based profile dump triggering to dump only one profile when + a single allocation's size exceeds the interval. (@jasone) + - Use prefixed function names (as controlled by --with-jemalloc-prefix) when + pruning backtrace frames in jeprof. (@jasone) + +* 4.5.0 (February 28, 2017) + + This is the first release to benefit from much broader continuous integration + testing, thanks to @davidtgoldblatt. Had we had this testing infrastructure + in place for prior releases, it would have caught all of the most serious + regressions fixed by this release. + + New features: + - Add --disable-thp and the opt.thp mallctl to provide opt-out mechanisms for + transparent huge page integration. (@jasone) + - Update zone allocator integration to work with macOS 10.12. (@glandium) + - Restructure *CFLAGS configuration, so that CFLAGS behaves typically, and + EXTRA_CFLAGS provides a way to specify e.g. -Werror during building, but not + during configuration. (@jasone, @ronawho) + + Bug fixes: + - Fix DSS (sbrk(2)-based) allocation. This regression was first released in + 4.3.0. (@jasone) + - Handle race in per size class utilization computation. This functionality + was first released in 4.0.0. (@interwq) + - Fix lock order reversal during gdump. (@jasone) + - Fix/refactor tcache synchronization. This regression was first released in + 4.0.0. (@jasone) + - Fix various JSON-formatted malloc_stats_print() bugs. This functionality + was first released in 4.3.0. (@jasone) + - Fix huge-aligned allocation. This regression was first released in 4.4.0. + (@jasone) + - When transparent huge page integration is enabled, detect what state pages + start in according to the kernel's current operating mode, and only convert + arena chunks to non-huge during purging if that is not their initial state. + This functionality was first released in 4.4.0. (@jasone) + - Fix lg_chunk clamping for the --enable-cache-oblivious --disable-fill case. + This regression was first released in 4.0.0. (@jasone, @428desmo) + - Properly detect sparc64 when building for Linux. (@glaubitz) + +* 4.4.0 (December 3, 2016) + + New features: + - Add configure support for *-*-linux-android. (@cferris1000, @jasone) + - Add the --disable-syscall configure option, for use on systems that place + security-motivated limitations on syscall(2). (@jasone) + - Add support for Debian GNU/kFreeBSD. (@thesam) + + Optimizations: + - Add extent serial numbers and use them where appropriate as a sort key that + is higher priority than address, so that the allocation policy prefers older + extents. This tends to improve locality (decrease fragmentation) when + memory grows downward. (@jasone) + - Refactor madvise(2) configuration so that MADV_FREE is detected and utilized + on Linux 4.5 and newer. (@jasone) + - Mark partially purged arena chunks as non-huge-page. This improves + interaction with Linux's transparent huge page functionality. (@jasone) + + Bug fixes: + - Fix size class computations for edge conditions involving extremely large + allocations. This regression was first released in 4.0.0. (@jasone, + @ingvarha) + - Remove overly restrictive assertions related to the cactive statistic. This + regression was first released in 4.1.0. (@jasone) + - Implement a more reliable detection scheme for os_unfair_lock on macOS. + (@jszakmeister) + +* 4.3.1 (November 7, 2016) + + Bug fixes: + - Fix a severe virtual memory leak. This regression was first released in + 4.3.0. (@interwq, @jasone) + - Refactor atomic and prng APIs to restore support for 32-bit platforms that + use pre-C11 toolchains, e.g. FreeBSD's mips. (@jasone) + +* 4.3.0 (November 4, 2016) + + This is the first release that passes the test suite for multiple Windows + configurations, thanks in large part to @glandium setting up continuous + integration via AppVeyor (and Travis CI for Linux and OS X). + + New features: + - Add "J" (JSON) support to malloc_stats_print(). (@jasone) + - Add Cray compiler support. (@ronawho) + + Optimizations: + - Add/use adaptive spinning for bootstrapping and radix tree node + initialization. (@jasone) + + Bug fixes: + - Fix large allocation to search starting in the optimal size class heap, + which can substantially reduce virtual memory churn and fragmentation. This + regression was first released in 4.0.0. (@mjp41, @jasone) + - Fix stats.arenas.<i>.nthreads accounting. (@interwq) + - Fix and simplify decay-based purging. (@jasone) + - Make DSS (sbrk(2)-related) operations lockless, which resolves potential + deadlocks during thread exit. (@jasone) + - Fix over-sized allocation of radix tree leaf nodes. (@mjp41, @ogaun, + @jasone) + - Fix over-sized allocation of arena_t (plus associated stats) data + structures. (@jasone, @interwq) + - Fix EXTRA_CFLAGS to not affect configuration. (@jasone) + - Fix a Valgrind integration bug. (@ronawho) + - Disallow 0x5a junk filling when running in Valgrind. (@jasone) + - Fix a file descriptor leak on Linux. This regression was first released in + 4.2.0. (@vsarunas, @jasone) + - Fix static linking of jemalloc with glibc. (@djwatson) + - Use syscall(2) rather than {open,read,close}(2) during boot on Linux. This + works around other libraries' system call wrappers performing reentrant + allocation. (@kspinka, @Whissi, @jasone) + - Fix OS X default zone replacement to work with OS X 10.12. (@glandium, + @jasone) + - Fix cached memory management to avoid needless commit/decommit operations + during purging, which resolves permanent virtual memory map fragmentation + issues on Windows. (@mjp41, @jasone) + - Fix TSD fetches to avoid (recursive) allocation. This is relevant to + non-TLS and Windows configurations. (@jasone) + - Fix malloc_conf overriding to work on Windows. (@jasone) + - Forcibly disable lazy-lock on Windows (was forcibly *enabled*). (@jasone) + +* 4.2.1 (June 8, 2016) + + Bug fixes: + - Fix bootstrapping issues for configurations that require allocation during + tsd initialization (e.g. --disable-tls). (@cferris1000, @jasone) + - Fix gettimeofday() version of nstime_update(). (@ronawho) + - Fix Valgrind regressions in calloc() and chunk_alloc_wrapper(). (@ronawho) + - Fix potential VM map fragmentation regression. (@jasone) + - Fix opt_zero-triggered in-place huge reallocation zeroing. (@jasone) + - Fix heap profiling context leaks in reallocation edge cases. (@jasone) + +* 4.2.0 (May 12, 2016) + + New features: + - Add the arena.<i>.reset mallctl, which makes it possible to discard all of + an arena's allocations in a single operation. (@jasone) + - Add the stats.retained and stats.arenas.<i>.retained statistics. (@jasone) + - Add the --with-version configure option. (@jasone) + - Support --with-lg-page values larger than actual page size. (@jasone) + + Optimizations: + - Use pairing heaps rather than red-black trees for various hot data + structures. (@djwatson, @jasone) + - Streamline fast paths of rtree operations. (@jasone) + - Optimize the fast paths of calloc() and [m,d,sd]allocx(). (@jasone) + - Decommit unused virtual memory if the OS does not overcommit. (@jasone) + - Specify MAP_NORESERVE on Linux if [heuristic] overcommit is active, in order + to avoid unfortunate interactions during fork(2). (@jasone) + + Bug fixes: + - Fix chunk accounting related to triggering gdump profiles. (@jasone) + - Link against librt for clock_gettime(2) if glibc < 2.17. (@jasone) + - Scale leak report summary according to sampling probability. (@jasone) + +* 4.1.1 (May 3, 2016) + + This bugfix release resolves a variety of mostly minor issues, though the + bitmap fix is critical for 64-bit Windows. + + Bug fixes: + - Fix the linear scan version of bitmap_sfu() to shift by the proper amount + even when sizeof(long) is not the same as sizeof(void *), as on 64-bit + Windows. (@jasone) + - Fix hashing functions to avoid unaligned memory accesses (and resulting + crashes). This is relevant at least to some ARM-based platforms. + (@rkmisra) + - Fix fork()-related lock rank ordering reversals. These reversals were + unlikely to cause deadlocks in practice except when heap profiling was + enabled and active. (@jasone) + - Fix various chunk leaks in OOM code paths. (@jasone) + - Fix malloc_stats_print() to print opt.narenas correctly. (@jasone) + - Fix MSVC-specific build/test issues. (@rustyx, @yuslepukhin) + - Fix a variety of test failures that were due to test fragility rather than + core bugs. (@jasone) + +* 4.1.0 (February 28, 2016) + + This release is primarily about optimizations, but it also incorporates a lot + of portability-motivated refactoring and enhancements. Many people worked on + this release, to an extent that even with the omission here of minor changes + (see git revision history), and of the people who reported and diagnosed + issues, so much of the work was contributed that starting with this release, + changes are annotated with author credits to help reflect the collaborative + effort involved. + + New features: + - Implement decay-based unused dirty page purging, a major optimization with + mallctl API impact. This is an alternative to the existing ratio-based + unused dirty page purging, and is intended to eventually become the sole + purging mechanism. New mallctls: + + opt.purge + + opt.decay_time + + arena.<i>.decay + + arena.<i>.decay_time + + arenas.decay_time + + stats.arenas.<i>.decay_time + (@jasone, @cevans87) + - Add --with-malloc-conf, which makes it possible to embed a default + options string during configuration. This was motivated by the desire to + specify --with-malloc-conf=purge:decay , since the default must remain + purge:ratio until the 5.0.0 release. (@jasone) + - Add MS Visual Studio 2015 support. (@rustyx, @yuslepukhin) + - Make *allocx() size class overflow behavior defined. The maximum + size class is now less than PTRDIFF_MAX to protect applications against + numerical overflow, and all allocation functions are guaranteed to indicate + errors rather than potentially crashing if the request size exceeds the + maximum size class. (@jasone) + - jeprof: + + Add raw heap profile support. (@jasone) + + Add --retain and --exclude for backtrace symbol filtering. (@jasone) + + Optimizations: + - Optimize the fast path to combine various bootstrapping and configuration + checks and execute more streamlined code in the common case. (@interwq) + - Use linear scan for small bitmaps (used for small object tracking). In + addition to speeding up bitmap operations on 64-bit systems, this reduces + allocator metadata overhead by approximately 0.2%. (@djwatson) + - Separate arena_avail trees, which substantially speeds up run tree + operations. (@djwatson) + - Use memoization (boot-time-computed table) for run quantization. Separate + arena_avail trees reduced the importance of this optimization. (@jasone) + - Attempt mmap-based in-place huge reallocation. This can dramatically speed + up incremental huge reallocation. (@jasone) + + Incompatible changes: + - Make opt.narenas unsigned rather than size_t. (@jasone) + + Bug fixes: + - Fix stats.cactive accounting regression. (@rustyx, @jasone) + - Handle unaligned keys in hash(). This caused problems for some ARM systems. + (@jasone, @cferris1000) + - Refactor arenas array. In addition to fixing a fork-related deadlock, this + makes arena lookups faster and simpler. (@jasone) + - Move retained memory allocation out of the default chunk allocation + function, to a location that gets executed even if the application installs + a custom chunk allocation function. This resolves a virtual memory leak. + (@buchgr) + - Fix a potential tsd cleanup leak. (@cferris1000, @jasone) + - Fix run quantization. In practice this bug had no impact unless + applications requested memory with alignment exceeding one page. + (@jasone, @djwatson) + - Fix LinuxThreads-specific bootstrapping deadlock. (Cosmin Paraschiv) + - jeprof: + + Don't discard curl options if timeout is not defined. (@djwatson) + + Detect failed profile fetches. (@djwatson) + - Fix stats.arenas.<i>.{dss,lg_dirty_mult,decay_time,pactive,pdirty} for + --disable-stats case. (@jasone) + +* 4.0.4 (October 24, 2015) + + This bugfix release fixes another xallocx() regression. No other regressions + have come to light in over a month, so this is likely a good starting point + for people who prefer to wait for "dot one" releases with all the major issues + shaken out. + + Bug fixes: + - Fix xallocx(..., MALLOCX_ZERO to zero the last full trailing page of large + allocations that have been randomly assigned an offset of 0 when + --enable-cache-oblivious configure option is enabled. + +* 4.0.3 (September 24, 2015) + + This bugfix release continues the trend of xallocx() and heap profiling fixes. + + Bug fixes: + - Fix xallocx(..., MALLOCX_ZERO) to zero all trailing bytes of large + allocations when --enable-cache-oblivious configure option is enabled. + - Fix xallocx(..., MALLOCX_ZERO) to zero trailing bytes of huge allocations + when resizing from/to a size class that is not a multiple of the chunk size. + - Fix prof_tctx_dump_iter() to filter out nodes that were created after heap + profile dumping started. + - Work around a potentially bad thread-specific data initialization + interaction with NPTL (glibc's pthreads implementation). + +* 4.0.2 (September 21, 2015) + + This bugfix release addresses a few bugs specific to heap profiling. + + Bug fixes: + - Fix ixallocx_prof_sample() to never modify nor create sampled small + allocations. xallocx() is in general incapable of moving small allocations, + so this fix removes buggy code without loss of generality. + - Fix irallocx_prof_sample() to always allocate large regions, even when + alignment is non-zero. + - Fix prof_alloc_rollback() to read tdata from thread-specific data rather + than dereferencing a potentially invalid tctx. + +* 4.0.1 (September 15, 2015) + + This is a bugfix release that is somewhat high risk due to the amount of + refactoring required to address deep xallocx() problems. As a side effect of + these fixes, xallocx() now tries harder to partially fulfill requests for + optional extra space. Note that a couple of minor heap profiling + optimizations are included, but these are better thought of as performance + fixes that were integral to disovering most of the other bugs. + + Optimizations: + - Avoid a chunk metadata read in arena_prof_tctx_set(), since it is in the + fast path when heap profiling is enabled. Additionally, split a special + case out into arena_prof_tctx_reset(), which also avoids chunk metadata + reads. + - Optimize irallocx_prof() to optimistically update the sampler state. The + prior implementation appears to have been a holdover from when + rallocx()/xallocx() functionality was combined as rallocm(). + + Bug fixes: + - Fix TLS configuration such that it is enabled by default for platforms on + which it works correctly. + - Fix arenas_cache_cleanup() and arena_get_hard() to handle + allocation/deallocation within the application's thread-specific data + cleanup functions even after arenas_cache is torn down. + - Fix xallocx() bugs related to size+extra exceeding HUGE_MAXCLASS. + - Fix chunk purge hook calls for in-place huge shrinking reallocation to + specify the old chunk size rather than the new chunk size. This bug caused + no correctness issues for the default chunk purge function, but was + visible to custom functions set via the "arena.<i>.chunk_hooks" mallctl. + - Fix heap profiling bugs: + + Fix heap profiling to distinguish among otherwise identical sample sites + with interposed resets (triggered via the "prof.reset" mallctl). This bug + could cause data structure corruption that would most likely result in a + segfault. + + Fix irealloc_prof() to prof_alloc_rollback() on OOM. + + Make one call to prof_active_get_unlocked() per allocation event, and use + the result throughout the relevant functions that handle an allocation + event. Also add a missing check in prof_realloc(). These fixes protect + allocation events against concurrent prof_active changes. + + Fix ixallocx_prof() to pass usize_max and zero to ixallocx_prof_sample() + in the correct order. + + Fix prof_realloc() to call prof_free_sampled_object() after calling + prof_malloc_sample_object(). Prior to this fix, if tctx and old_tctx were + the same, the tctx could have been prematurely destroyed. + - Fix portability bugs: + + Don't bitshift by negative amounts when encoding/decoding run sizes in + chunk header maps. This affected systems with page sizes greater than 8 + KiB. + + Rename index_t to szind_t to avoid an existing type on Solaris. + + Add JEMALLOC_CXX_THROW to the memalign() function prototype, in order to + match glibc and avoid compilation errors when including both + jemalloc/jemalloc.h and malloc.h in C++ code. + + Don't assume that /bin/sh is appropriate when running size_classes.sh + during configuration. + + Consider __sparcv9 a synonym for __sparc64__ when defining LG_QUANTUM. + + Link tests to librt if it contains clock_gettime(2). + +* 4.0.0 (August 17, 2015) + + This version contains many speed and space optimizations, both minor and + major. The major themes are generalization, unification, and simplification. + Although many of these optimizations cause no visible behavior change, their + cumulative effect is substantial. + + New features: + - Normalize size class spacing to be consistent across the complete size + range. By default there are four size classes per size doubling, but this + is now configurable via the --with-lg-size-class-group option. Also add the + --with-lg-page, --with-lg-page-sizes, --with-lg-quantum, and + --with-lg-tiny-min options, which can be used to tweak page and size class + settings. Impacts: + + Worst case performance for incrementally growing/shrinking reallocation + is improved because there are far fewer size classes, and therefore + copying happens less often. + + Internal fragmentation is limited to 20% for all but the smallest size + classes (those less than four times the quantum). (1B + 4 KiB) + and (1B + 4 MiB) previously suffered nearly 50% internal fragmentation. + + Chunk fragmentation tends to be lower because there are fewer distinct run + sizes to pack. + - Add support for explicit tcaches. The "tcache.create", "tcache.flush", and + "tcache.destroy" mallctls control tcache lifetime and flushing, and the + MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to the *allocx() API + control which tcache is used for each operation. + - Implement per thread heap profiling, as well as the ability to + enable/disable heap profiling on a per thread basis. Add the "prof.reset", + "prof.lg_sample", "thread.prof.name", "thread.prof.active", + "opt.prof_thread_active_init", "prof.thread_active_init", and + "thread.prof.active" mallctls. + - Add support for per arena application-specified chunk allocators, configured + via the "arena.<i>.chunk_hooks" mallctl. + - Refactor huge allocation to be managed by arenas, so that arenas now + function as general purpose independent allocators. This is important in + the context of user-specified chunk allocators, aside from the scalability + benefits. Related new statistics: + + The "stats.arenas.<i>.huge.allocated", "stats.arenas.<i>.huge.nmalloc", + "stats.arenas.<i>.huge.ndalloc", and "stats.arenas.<i>.huge.nrequests" + mallctls provide high level per arena huge allocation statistics. + + The "arenas.nhchunks", "arenas.hchunk.<i>.size", + "stats.arenas.<i>.hchunks.<j>.nmalloc", + "stats.arenas.<i>.hchunks.<j>.ndalloc", + "stats.arenas.<i>.hchunks.<j>.nrequests", and + "stats.arenas.<i>.hchunks.<j>.curhchunks" mallctls provide per size class + statistics. + - Add the 'util' column to malloc_stats_print() output, which reports the + proportion of available regions that are currently in use for each small + size class. + - Add "alloc" and "free" modes for for junk filling (see the "opt.junk" + mallctl), so that it is possible to separately enable junk filling for + allocation versus deallocation. + - Add the jemalloc-config script, which provides information about how + jemalloc was configured, and how to integrate it into application builds. + - Add metadata statistics, which are accessible via the "stats.metadata", + "stats.arenas.<i>.metadata.mapped", and + "stats.arenas.<i>.metadata.allocated" mallctls. + - Add the "stats.resident" mallctl, which reports the upper limit of + physically resident memory mapped by the allocator. + - Add per arena control over unused dirty page purging, via the + "arenas.lg_dirty_mult", "arena.<i>.lg_dirty_mult", and + "stats.arenas.<i>.lg_dirty_mult" mallctls. + - Add the "prof.gdump" mallctl, which makes it possible to toggle the gdump + feature on/off during program execution. + - Add sdallocx(), which implements sized deallocation. The primary + optimization over dallocx() is the removal of a metadata read, which often + suffers an L1 cache miss. + - Add missing header includes in jemalloc/jemalloc.h, so that applications + only have to #include <jemalloc/jemalloc.h>. + - Add support for additional platforms: + + Bitrig + + Cygwin + + DragonFlyBSD + + iOS + + OpenBSD + + OpenRISC/or1k + + Optimizations: + - Maintain dirty runs in per arena LRUs rather than in per arena trees of + dirty-run-containing chunks. In practice this change significantly reduces + dirty page purging volume. + - Integrate whole chunks into the unused dirty page purging machinery. This + reduces the cost of repeated huge allocation/deallocation, because it + effectively introduces a cache of chunks. + - Split the arena chunk map into two separate arrays, in order to increase + cache locality for the frequently accessed bits. + - Move small run metadata out of runs, into arena chunk headers. This reduces + run fragmentation, smaller runs reduce external fragmentation for small size + classes, and packed (less uniformly aligned) metadata layout improves CPU + cache set distribution. + - Randomly distribute large allocation base pointer alignment relative to page + boundaries in order to more uniformly utilize CPU cache sets. This can be + disabled via the --disable-cache-oblivious configure option, and queried via + the "config.cache_oblivious" mallctl. + - Micro-optimize the fast paths for the public API functions. + - Refactor thread-specific data to reside in a single structure. This assures + that only a single TLS read is necessary per call into the public API. + - Implement in-place huge allocation growing and shrinking. + - Refactor rtree (radix tree for chunk lookups) to be lock-free, and make + additional optimizations that reduce maximum lookup depth to one or two + levels. This resolves what was a concurrency bottleneck for per arena huge + allocation, because a global data structure is critical for determining + which arenas own which huge allocations. + + Incompatible changes: + - Replace --enable-cc-silence with --disable-cc-silence to suppress spurious + warnings by default. + - Assure that the constness of malloc_usable_size()'s return type matches that + of the system implementation. + - Change the heap profile dump format to support per thread heap profiling, + rename pprof to jeprof, and enhance it with the --thread=<n> option. As a + result, the bundled jeprof must now be used rather than the upstream + (gperftools) pprof. + - Disable "opt.prof_final" by default, in order to avoid atexit(3), which can + internally deadlock on some platforms. + - Change the "arenas.nlruns" mallctl type from size_t to unsigned. + - Replace the "stats.arenas.<i>.bins.<j>.allocated" mallctl with + "stats.arenas.<i>.bins.<j>.curregs". + - Ignore MALLOC_CONF in set{uid,gid,cap} binaries. + - Ignore MALLOCX_ARENA(a) in dallocx(), in favor of using the + MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to control tcache usage. + + Removed features: + - Remove the *allocm() API, which is superseded by the *allocx() API. + - Remove the --enable-dss options, and make dss non-optional on all platforms + which support sbrk(2). + - Remove the "arenas.purge" mallctl, which was obsoleted by the + "arena.<i>.purge" mallctl in 3.1.0. + - Remove the unnecessary "opt.valgrind" mallctl; jemalloc automatically + detects whether it is running inside Valgrind. + - Remove the "stats.huge.allocated", "stats.huge.nmalloc", and + "stats.huge.ndalloc" mallctls. + - Remove the --enable-mremap option. + - Remove the "stats.chunks.current", "stats.chunks.total", and + "stats.chunks.high" mallctls. + + Bug fixes: + - Fix the cactive statistic to decrease (rather than increase) when active + memory decreases. This regression was first released in 3.5.0. + - Fix OOM handling in memalign() and valloc(). A variant of this bug existed + in all releases since 2.0.0, which introduced these functions. + - Fix an OOM-related regression in arena_tcache_fill_small(), which could + cause cache corruption on OOM. This regression was present in all releases + from 2.2.0 through 3.6.0. + - Fix size class overflow handling for malloc(), posix_memalign(), memalign(), + calloc(), and realloc() when profiling is enabled. + - Fix the "arena.<i>.dss" mallctl to return an error if "primary" or + "secondary" precedence is specified, but sbrk(2) is not supported. + - Fix fallback lg_floor() implementations to handle extremely large inputs. + - Ensure the default purgeable zone is after the default zone on OS X. + - Fix latent bugs in atomic_*(). + - Fix the "arena.<i>.dss" mallctl to handle read-only calls. + - Fix tls_model configuration to enable the initial-exec model when possible. + - Mark malloc_conf as a weak symbol so that the application can override it. + - Correctly detect glibc's adaptive pthread mutexes. + - Fix the --without-export configure option. + * 3.6.0 (March 31, 2014) This version contains a critical bug fix for a regression present in 3.5.0 and @@ -21,7 +738,7 @@ found in the git revision history: backtracing to be reliable. - Use dss allocation precedence for huge allocations as well as small/large allocations. - - Fix test assertion failure message formatting. This bug did not manifect on + - Fix test assertion failure message formatting. This bug did not manifest on x86_64 systems because of implementation subtleties in va_list. - Fix inconsequential test failures for hash and SFMT code. @@ -516,7 +1233,7 @@ found in the git revision history: - Make it possible for the application to manually flush a thread's cache, via the "tcache.flush" mallctl. - Base maximum dirty page count on proportion of active memory. - - Compute various addtional run-time statistics, including per size class + - Compute various additional run-time statistics, including per size class statistics for large objects. - Expose malloc_stats_print(), which can be called repeatedly by the application. diff --git a/deps/jemalloc/README b/deps/jemalloc/README index 9b268f4228..3a6e0d2725 100644 --- a/deps/jemalloc/README +++ b/deps/jemalloc/README @@ -3,12 +3,12 @@ fragmentation avoidance and scalable concurrency support. jemalloc first came into use as the FreeBSD libc allocator in 2005, and since then it has found its way into numerous applications that rely on its predictable behavior. In 2010 jemalloc development efforts broadened to include developer support features -such as heap profiling, Valgrind integration, and extensive monitoring/tuning -hooks. Modern jemalloc releases continue to be integrated back into FreeBSD, -and therefore versatility remains critical. Ongoing development efforts trend -toward making jemalloc among the best allocators for a broad range of demanding -applications, and eliminating/mitigating weaknesses that have practical -repercussions for real world applications. +such as heap profiling and extensive monitoring/tuning hooks. Modern jemalloc +releases continue to be integrated back into FreeBSD, and therefore versatility +remains critical. Ongoing development efforts trend toward making jemalloc +among the best allocators for a broad range of demanding applications, and +eliminating/mitigating weaknesses that have practical repercussions for real +world applications. The COPYING file contains copyright and licensing information. @@ -17,4 +17,4 @@ jemalloc. The ChangeLog file contains a brief summary of changes for each release. -URL: http://www.canonware.com/jemalloc/ +URL: http://jemalloc.net/ diff --git a/deps/jemalloc/include/jemalloc/internal/atomic.h b/deps/jemalloc/include/jemalloc/internal/atomic.h index 11a7b47fe0..adadb1a3ac 100644 --- a/deps/jemalloc/include/jemalloc/internal/atomic.h +++ b/deps/jemalloc/include/jemalloc/internal/atomic.h @@ -1,304 +1,77 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#define atomic_read_uint64(p) atomic_add_uint64(p, 0) -#define atomic_read_uint32(p) atomic_add_uint32(p, 0) -#define atomic_read_z(p) atomic_add_z(p, 0) -#define atomic_read_u(p) atomic_add_u(p, 0) - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -uint64_t atomic_add_uint64(uint64_t *p, uint64_t x); -uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x); -uint32_t atomic_add_uint32(uint32_t *p, uint32_t x); -uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x); -size_t atomic_add_z(size_t *p, size_t x); -size_t atomic_sub_z(size_t *p, size_t x); -unsigned atomic_add_u(unsigned *p, unsigned x); -unsigned atomic_sub_u(unsigned *p, unsigned x); +#ifndef JEMALLOC_INTERNAL_ATOMIC_H +#define JEMALLOC_INTERNAL_ATOMIC_H + +#define ATOMIC_INLINE static inline + +#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) +# include "jemalloc/internal/atomic_gcc_atomic.h" +#elif defined(JEMALLOC_GCC_SYNC_ATOMICS) +# include "jemalloc/internal/atomic_gcc_sync.h" +#elif defined(_MSC_VER) +# include "jemalloc/internal/atomic_msvc.h" +#elif defined(JEMALLOC_C11_ATOMICS) +# include "jemalloc/internal/atomic_c11.h" +#else +# error "Don't have atomics implemented on this platform." #endif -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_)) -/******************************************************************************/ -/* 64-bit operations. */ +/* + * This header gives more or less a backport of C11 atomics. The user can write + * JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_sizeof_type); to generate + * counterparts of the C11 atomic functions for type, as so: + * JEMALLOC_GENERATE_ATOMICS(int *, pi, 3); + * and then write things like: + * int *some_ptr; + * atomic_pi_t atomic_ptr_to_int; + * atomic_store_pi(&atomic_ptr_to_int, some_ptr, ATOMIC_RELAXED); + * int *prev_value = atomic_exchange_pi(&ptr_to_int, NULL, ATOMIC_ACQ_REL); + * assert(some_ptr == prev_value); + * and expect things to work in the obvious way. + * + * Also included (with naming differences to avoid conflicts with the standard + * library): + * atomic_fence(atomic_memory_order_t) (mimics C11's atomic_thread_fence). + * ATOMIC_INIT (mimics C11's ATOMIC_VAR_INIT). + */ + +/* + * Pure convenience, so that we don't have to type "atomic_memory_order_" + * quite so often. + */ +#define ATOMIC_RELAXED atomic_memory_order_relaxed +#define ATOMIC_ACQUIRE atomic_memory_order_acquire +#define ATOMIC_RELEASE atomic_memory_order_release +#define ATOMIC_ACQ_REL atomic_memory_order_acq_rel +#define ATOMIC_SEQ_CST atomic_memory_order_seq_cst + +/* + * Not all platforms have 64-bit atomics. If we do, this #define exposes that + * fact. + */ #if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) -# ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (__sync_add_and_fetch(p, x)); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (__sync_sub_and_fetch(p, x)); -} -#elif (defined(_MSC_VER)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (InterlockedExchangeAdd64(p, x)); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (InterlockedExchangeAdd64(p, -((int64_t)x))); -} -#elif (defined(JEMALLOC_OSATOMIC)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (OSAtomicAdd64((int64_t)x, (int64_t *)p)); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p)); -} -# elif (defined(__amd64__) || defined(__x86_64__)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - asm volatile ( - "lock; xaddq %0, %1;" - : "+r" (x), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (x); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - x = (uint64_t)(-(int64_t)x); - asm volatile ( - "lock; xaddq %0, %1;" - : "+r" (x), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (x); -} -# elif (defined(JEMALLOC_ATOMIC9)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - /* - * atomic_fetchadd_64() doesn't exist, but we only ever use this - * function on LP64 systems, so atomic_fetchadd_long() will do. - */ - assert(sizeof(uint64_t) == sizeof(unsigned long)); - - return (atomic_fetchadd_long(p, (unsigned long)x) + x); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - assert(sizeof(uint64_t) == sizeof(unsigned long)); - - return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x); -} -# elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (__sync_add_and_fetch(p, x)); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (__sync_sub_and_fetch(p, x)); -} -# else -# error "Missing implementation for 64-bit atomic operations" -# endif +# define JEMALLOC_ATOMIC_U64 #endif -/******************************************************************************/ -/* 32-bit operations. */ -#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ +JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR) - return (__sync_add_and_fetch(p, x)); -} +/* + * There's no actual guarantee that sizeof(bool) == 1, but it's true on the only + * platform that actually needs to know the size, MSVC. + */ +JEMALLOC_GENERATE_ATOMICS(bool, b, 0) -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ +JEMALLOC_GENERATE_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT) - return (__sync_sub_and_fetch(p, x)); -} -#elif (defined(_MSC_VER)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ +JEMALLOC_GENERATE_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR) - return (InterlockedExchangeAdd(p, x)); -} +JEMALLOC_GENERATE_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR) -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ +JEMALLOC_GENERATE_INT_ATOMICS(uint32_t, u32, 2) - return (InterlockedExchangeAdd(p, -((int32_t)x))); -} -#elif (defined(JEMALLOC_OSATOMIC)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (OSAtomicAdd32((int32_t)x, (int32_t *)p)); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p)); -} -#elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - asm volatile ( - "lock; xaddl %0, %1;" - : "+r" (x), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (x); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - x = (uint32_t)(-(int32_t)x); - asm volatile ( - "lock; xaddl %0, %1;" - : "+r" (x), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (x); -} -#elif (defined(JEMALLOC_ATOMIC9)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (atomic_fetchadd_32(p, x) + x); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x); -} -#elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (__sync_add_and_fetch(p, x)); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (__sync_sub_and_fetch(p, x)); -} -#else -# error "Missing implementation for 32-bit atomic operations" -#endif - -/******************************************************************************/ -/* size_t operations. */ -JEMALLOC_INLINE size_t -atomic_add_z(size_t *p, size_t x) -{ - -#if (LG_SIZEOF_PTR == 3) - return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); -#elif (LG_SIZEOF_PTR == 2) - return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); -#endif -} - -JEMALLOC_INLINE size_t -atomic_sub_z(size_t *p, size_t x) -{ - -#if (LG_SIZEOF_PTR == 3) - return ((size_t)atomic_add_uint64((uint64_t *)p, - (uint64_t)-((int64_t)x))); -#elif (LG_SIZEOF_PTR == 2) - return ((size_t)atomic_add_uint32((uint32_t *)p, - (uint32_t)-((int32_t)x))); +#ifdef JEMALLOC_ATOMIC_U64 +JEMALLOC_GENERATE_INT_ATOMICS(uint64_t, u64, 3) #endif -} -/******************************************************************************/ -/* unsigned operations. */ -JEMALLOC_INLINE unsigned -atomic_add_u(unsigned *p, unsigned x) -{ - -#if (LG_SIZEOF_INT == 3) - return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); -#elif (LG_SIZEOF_INT == 2) - return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); -#endif -} - -JEMALLOC_INLINE unsigned -atomic_sub_u(unsigned *p, unsigned x) -{ - -#if (LG_SIZEOF_INT == 3) - return ((unsigned)atomic_add_uint64((uint64_t *)p, - (uint64_t)-((int64_t)x))); -#elif (LG_SIZEOF_INT == 2) - return ((unsigned)atomic_add_uint32((uint32_t *)p, - (uint32_t)-((int32_t)x))); -#endif -} -/******************************************************************************/ -#endif +#undef ATOMIC_INLINE -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ +#endif /* JEMALLOC_INTERNAL_ATOMIC_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/bitmap.h b/deps/jemalloc/include/jemalloc/internal/bitmap.h index 605ebac58c..ac990290a5 100644 --- a/deps/jemalloc/include/jemalloc/internal/bitmap.h +++ b/deps/jemalloc/include/jemalloc/internal/bitmap.h @@ -1,37 +1,159 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES +#ifndef JEMALLOC_INTERNAL_BITMAP_H +#define JEMALLOC_INTERNAL_BITMAP_H -/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ -#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS +#include "jemalloc/internal/arena_types.h" +#include "jemalloc/internal/bit_util.h" +#include "jemalloc/internal/size_classes.h" -typedef struct bitmap_level_s bitmap_level_t; -typedef struct bitmap_info_s bitmap_info_t; typedef unsigned long bitmap_t; -#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG +#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG + +/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ +#if LG_SLAB_MAXREGS > LG_CEIL_NSIZES +/* Maximum bitmap bit count is determined by maximum regions per slab. */ +# define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS +#else +/* Maximum bitmap bit count is determined by number of extent size classes. */ +# define LG_BITMAP_MAXBITS LG_CEIL_NSIZES +#endif +#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS) /* Number of bits per group. */ -#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) -#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS) -#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) +#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) +#define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS) +#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) + +/* + * Do some analysis on how big the bitmap is before we use a tree. For a brute + * force linear search, if we would have to call ffs_lu() more than 2^3 times, + * use a tree instead. + */ +#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3 +# define BITMAP_USE_TREE +#endif + +/* Number of groups required to store a given number of bits. */ +#define BITMAP_BITS2GROUPS(nbits) \ + (((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS) + +/* + * Number of groups required at a particular level for a given number of bits. + */ +#define BITMAP_GROUPS_L0(nbits) \ + BITMAP_BITS2GROUPS(nbits) +#define BITMAP_GROUPS_L1(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits)) +#define BITMAP_GROUPS_L2(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))) +#define BITMAP_GROUPS_L3(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ + BITMAP_BITS2GROUPS((nbits))))) +#define BITMAP_GROUPS_L4(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))))) + +/* + * Assuming the number of levels, number of groups required for a given number + * of bits. + */ +#define BITMAP_GROUPS_1_LEVEL(nbits) \ + BITMAP_GROUPS_L0(nbits) +#define BITMAP_GROUPS_2_LEVEL(nbits) \ + (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits)) +#define BITMAP_GROUPS_3_LEVEL(nbits) \ + (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits)) +#define BITMAP_GROUPS_4_LEVEL(nbits) \ + (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits)) +#define BITMAP_GROUPS_5_LEVEL(nbits) \ + (BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits)) + +/* + * Maximum number of groups required to support LG_BITMAP_MAXBITS. + */ +#ifdef BITMAP_USE_TREE -/* Maximum number of levels possible. */ -#define BITMAP_MAX_LEVELS \ - (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \ - + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP) +#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_1_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2 +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_2_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3 +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_3_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4 +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_4_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5 +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_5_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS) +#else +# error "Unsupported bitmap size" +#endif + +/* + * Maximum number of levels possible. This could be statically computed based + * on LG_BITMAP_MAXBITS: + * + * #define BITMAP_MAX_LEVELS \ + * (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \ + * + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP) + * + * However, that would not allow the generic BITMAP_INFO_INITIALIZER() macro, so + * instead hardcode BITMAP_MAX_LEVELS to the largest number supported by the + * various cascading macros. The only additional cost this incurs is some + * unused trailing entries in bitmap_info_t structures; the bitmaps themselves + * are not impacted. + */ +#define BITMAP_MAX_LEVELS 5 + +#define BITMAP_INFO_INITIALIZER(nbits) { \ + /* nbits. */ \ + nbits, \ + /* nlevels. */ \ + (BITMAP_GROUPS_L0(nbits) > BITMAP_GROUPS_L1(nbits)) + \ + (BITMAP_GROUPS_L1(nbits) > BITMAP_GROUPS_L2(nbits)) + \ + (BITMAP_GROUPS_L2(nbits) > BITMAP_GROUPS_L3(nbits)) + \ + (BITMAP_GROUPS_L3(nbits) > BITMAP_GROUPS_L4(nbits)) + 1, \ + /* levels. */ \ + { \ + {0}, \ + {BITMAP_GROUPS_L0(nbits)}, \ + {BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \ + {BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) + \ + BITMAP_GROUPS_L0(nbits)}, \ + {BITMAP_GROUPS_L3(nbits) + BITMAP_GROUPS_L2(nbits) + \ + BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \ + {BITMAP_GROUPS_L4(nbits) + BITMAP_GROUPS_L3(nbits) + \ + BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) \ + + BITMAP_GROUPS_L0(nbits)} \ + } \ +} + +#else /* BITMAP_USE_TREE */ -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS +#define BITMAP_GROUPS(nbits) BITMAP_BITS2GROUPS(nbits) +#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS) -struct bitmap_level_s { +#define BITMAP_INFO_INITIALIZER(nbits) { \ + /* nbits. */ \ + nbits, \ + /* ngroups. */ \ + BITMAP_BITS2GROUPS(nbits) \ +} + +#endif /* BITMAP_USE_TREE */ + +typedef struct bitmap_level_s { /* Offset of this level's groups within the array of groups. */ size_t group_offset; -}; +} bitmap_level_t; -struct bitmap_info_s { +typedef struct bitmap_info_s { /* Logical number of bits in bitmap (stored at bottom level). */ size_t nbits; +#ifdef BITMAP_USE_TREE /* Number of levels necessary for nbits. */ unsigned nlevels; @@ -40,67 +162,62 @@ struct bitmap_info_s { * bottom to top (e.g. the bottom level is stored in levels[0]). */ bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); -size_t bitmap_info_ngroups(const bitmap_info_t *binfo); -size_t bitmap_size(size_t nbits); -void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo); -bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); -void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); -size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo); -void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); -#endif +#else /* BITMAP_USE_TREE */ + /* Number of groups necessary for nbits. */ + size_t ngroups; +#endif /* BITMAP_USE_TREE */ +} bitmap_info_t; + +void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); +void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill); +size_t bitmap_size(const bitmap_info_t *binfo); -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_)) -JEMALLOC_INLINE bool -bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) -{ - unsigned rgoff = binfo->levels[binfo->nlevels].group_offset - 1; +static inline bool +bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) { +#ifdef BITMAP_USE_TREE + size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1; bitmap_t rg = bitmap[rgoff]; /* The bitmap is full iff the root group is 0. */ return (rg == 0); +#else + size_t i; + + for (i = 0; i < binfo->ngroups; i++) { + if (bitmap[i] != 0) { + return false; + } + } + return true; +#endif } -JEMALLOC_INLINE bool -bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) -{ +static inline bool +bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { size_t goff; bitmap_t g; assert(bit < binfo->nbits); goff = bit >> LG_BITMAP_GROUP_NBITS; g = bitmap[goff]; - return (!(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)))); + return !(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); } -JEMALLOC_INLINE void -bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) -{ +static inline void +bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { size_t goff; bitmap_t *gp; bitmap_t g; assert(bit < binfo->nbits); - assert(bitmap_get(bitmap, binfo, bit) == false); + assert(!bitmap_get(bitmap, binfo, bit)); goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[goff]; g = *gp; - assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))); - g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); + assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); + g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; assert(bitmap_get(bitmap, binfo, bit)); +#ifdef BITMAP_USE_TREE /* Propagate group state transitions up the tree. */ if (g == 0) { unsigned i; @@ -109,45 +226,113 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[binfo->levels[i].group_offset + goff]; g = *gp; - assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))); - g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); + assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); + g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; - if (g != 0) + if (g != 0) { break; + } } } +#endif +} + +/* ffu: find first unset >= bit. */ +static inline size_t +bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) { + assert(min_bit < binfo->nbits); + +#ifdef BITMAP_USE_TREE + size_t bit = 0; + for (unsigned level = binfo->nlevels; level--;) { + size_t lg_bits_per_group = (LG_BITMAP_GROUP_NBITS * (level + + 1)); + bitmap_t group = bitmap[binfo->levels[level].group_offset + (bit + >> lg_bits_per_group)]; + unsigned group_nmask = (unsigned)(((min_bit > bit) ? (min_bit - + bit) : 0) >> (lg_bits_per_group - LG_BITMAP_GROUP_NBITS)); + assert(group_nmask <= BITMAP_GROUP_NBITS); + bitmap_t group_mask = ~((1LU << group_nmask) - 1); + bitmap_t group_masked = group & group_mask; + if (group_masked == 0LU) { + if (group == 0LU) { + return binfo->nbits; + } + /* + * min_bit was preceded by one or more unset bits in + * this group, but there are no other unset bits in this + * group. Try again starting at the first bit of the + * next sibling. This will recurse at most once per + * non-root level. + */ + size_t sib_base = bit + (ZU(1) << lg_bits_per_group); + assert(sib_base > min_bit); + assert(sib_base > bit); + if (sib_base >= binfo->nbits) { + return binfo->nbits; + } + return bitmap_ffu(bitmap, binfo, sib_base); + } + bit += ((size_t)(ffs_lu(group_masked) - 1)) << + (lg_bits_per_group - LG_BITMAP_GROUP_NBITS); + } + assert(bit >= min_bit); + assert(bit < binfo->nbits); + return bit; +#else + size_t i = min_bit >> LG_BITMAP_GROUP_NBITS; + bitmap_t g = bitmap[i] & ~((1LU << (min_bit & BITMAP_GROUP_NBITS_MASK)) + - 1); + size_t bit; + do { + bit = ffs_lu(g); + if (bit != 0) { + return (i << LG_BITMAP_GROUP_NBITS) + (bit - 1); + } + i++; + g = bitmap[i]; + } while (i < binfo->ngroups); + return binfo->nbits; +#endif } /* sfu: set first unset. */ -JEMALLOC_INLINE size_t -bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) -{ +static inline size_t +bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) { size_t bit; bitmap_t g; unsigned i; - assert(bitmap_full(bitmap, binfo) == false); + assert(!bitmap_full(bitmap, binfo)); +#ifdef BITMAP_USE_TREE i = binfo->nlevels - 1; g = bitmap[binfo->levels[i].group_offset]; - bit = ffsl(g) - 1; + bit = ffs_lu(g) - 1; while (i > 0) { i--; g = bitmap[binfo->levels[i].group_offset + bit]; - bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffsl(g) - 1); + bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1); } - +#else + i = 0; + g = bitmap[0]; + while ((bit = ffs_lu(g)) == 0) { + i++; + g = bitmap[i]; + } + bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1); +#endif bitmap_set(bitmap, binfo, bit); - return (bit); + return bit; } -JEMALLOC_INLINE void -bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) -{ +static inline void +bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { size_t goff; bitmap_t *gp; bitmap_t g; - bool propagate; + UNUSED bool propagate; assert(bit < binfo->nbits); assert(bitmap_get(bitmap, binfo, bit)); @@ -155,10 +340,11 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) gp = &bitmap[goff]; g = *gp; propagate = (g == 0); - assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); - g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); + assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); + g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; - assert(bitmap_get(bitmap, binfo, bit) == false); + assert(!bitmap_get(bitmap, binfo, bit)); +#ifdef BITMAP_USE_TREE /* Propagate group state transitions up the tree. */ if (propagate) { unsigned i; @@ -168,17 +354,16 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) gp = &bitmap[binfo->levels[i].group_offset + goff]; g = *gp; propagate = (g == 0); - assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) + assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); - g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); + g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; - if (propagate == false) + if (!propagate) { break; + } } } +#endif /* BITMAP_USE_TREE */ } -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ +#endif /* JEMALLOC_INTERNAL_BITMAP_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/ckh.h b/deps/jemalloc/include/jemalloc/internal/ckh.h index 58712a6a76..7b3850bc16 100644 --- a/deps/jemalloc/include/jemalloc/internal/ckh.h +++ b/deps/jemalloc/include/jemalloc/internal/ckh.h @@ -1,88 +1,101 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES +#ifndef JEMALLOC_INTERNAL_CKH_H +#define JEMALLOC_INTERNAL_CKH_H -typedef struct ckh_s ckh_t; -typedef struct ckhc_s ckhc_t; +#include "jemalloc/internal/tsd.h" -/* Typedefs to allow easy function pointer passing. */ -typedef void ckh_hash_t (const void *, size_t[2]); -typedef bool ckh_keycomp_t (const void *, const void *); +/* Cuckoo hashing implementation. Skip to the end for the interface. */ + +/******************************************************************************/ +/* INTERNAL DEFINITIONS -- IGNORE */ +/******************************************************************************/ /* Maintain counters used to get an idea of performance. */ -/* #define CKH_COUNT */ +/* #define CKH_COUNT */ /* Print counter values in ckh_delete() (requires CKH_COUNT). */ -/* #define CKH_VERBOSE */ +/* #define CKH_VERBOSE */ /* * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit * one bucket per L1 cache line. */ -#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) +#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS +/* Typedefs to allow easy function pointer passing. */ +typedef void ckh_hash_t (const void *, size_t[2]); +typedef bool ckh_keycomp_t (const void *, const void *); /* Hash table cell. */ -struct ckhc_s { - const void *key; - const void *data; -}; +typedef struct { + const void *key; + const void *data; +} ckhc_t; -struct ckh_s { +/* The hash table itself. */ +typedef struct { #ifdef CKH_COUNT /* Counters used to get an idea of performance. */ - uint64_t ngrows; - uint64_t nshrinks; - uint64_t nshrinkfails; - uint64_t ninserts; - uint64_t nrelocs; + uint64_t ngrows; + uint64_t nshrinks; + uint64_t nshrinkfails; + uint64_t ninserts; + uint64_t nrelocs; #endif /* Used for pseudo-random number generation. */ -#define CKH_A 1103515241 -#define CKH_C 12347 - uint32_t prng_state; + uint64_t prng_state; /* Total number of items. */ - size_t count; + size_t count; /* * Minimum and current number of hash table buckets. There are * 2^LG_CKH_BUCKET_CELLS cells per bucket. */ - unsigned lg_minbuckets; - unsigned lg_curbuckets; + unsigned lg_minbuckets; + unsigned lg_curbuckets; /* Hash and comparison functions. */ - ckh_hash_t *hash; - ckh_keycomp_t *keycomp; + ckh_hash_t *hash; + ckh_keycomp_t *keycomp; /* Hash table with 2^lg_curbuckets buckets. */ - ckhc_t *tab; -}; + ckhc_t *tab; +} ckh_t; -#endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS +/* BEGIN PUBLIC API */ +/******************************************************************************/ -bool ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, +/* Lifetime management. Minitems is the initial capacity. */ +bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp); -void ckh_delete(ckh_t *ckh); -size_t ckh_count(ckh_t *ckh); -bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); -bool ckh_insert(ckh_t *ckh, const void *key, const void *data); -bool ckh_remove(ckh_t *ckh, const void *searchkey, void **key, +void ckh_delete(tsd_t *tsd, ckh_t *ckh); + +/* Get the number of elements in the set. */ +size_t ckh_count(ckh_t *ckh); + +/* + * To iterate over the elements in the table, initialize *tabind to 0 and call + * this function until it returns true. Each call that returns false will + * update *key and *data to the next element in the table, assuming the pointers + * are non-NULL. + */ +bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); + +/* + * Basic hash table operations -- insert, removal, lookup. For ckh_remove and + * ckh_search, key or data can be NULL. The hash-table only stores pointers to + * the key and value, and doesn't do any lifetime management. + */ +bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data); +bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, void **data); -bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data); -void ckh_string_hash(const void *key, size_t r_hash[2]); -bool ckh_string_keycomp(const void *k1, const void *k2); -void ckh_pointer_hash(const void *key, size_t r_hash[2]); -bool ckh_pointer_keycomp(const void *k1, const void *k2); +bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data); -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES +/* Some useful hash and comparison functions for strings and pointers. */ +void ckh_string_hash(const void *key, size_t r_hash[2]); +bool ckh_string_keycomp(const void *k1, const void *k2); +void ckh_pointer_hash(const void *key, size_t r_hash[2]); +bool ckh_pointer_keycomp(const void *k1, const void *k2); -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ +#endif /* JEMALLOC_INTERNAL_CKH_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/ctl.h b/deps/jemalloc/include/jemalloc/internal/ctl.h index 0ffecc5f2a..a91c4cf556 100644 --- a/deps/jemalloc/include/jemalloc/internal/ctl.h +++ b/deps/jemalloc/include/jemalloc/internal/ctl.h @@ -1,87 +1,106 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct ctl_node_s ctl_node_t; -typedef struct ctl_named_node_s ctl_named_node_t; -typedef struct ctl_indexed_node_s ctl_indexed_node_t; -typedef struct ctl_arena_stats_s ctl_arena_stats_t; -typedef struct ctl_stats_s ctl_stats_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct ctl_node_s { - bool named; -}; - -struct ctl_named_node_s { - struct ctl_node_s node; - const char *name; +#ifndef JEMALLOC_INTERNAL_CTL_H +#define JEMALLOC_INTERNAL_CTL_H + +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/mutex_prof.h" +#include "jemalloc/internal/ql.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/stats.h" + +/* Maximum ctl tree depth. */ +#define CTL_MAX_DEPTH 7 + +typedef struct ctl_node_s { + bool named; +} ctl_node_t; + +typedef struct ctl_named_node_s { + ctl_node_t node; + const char *name; /* If (nchildren == 0), this is a terminal node. */ - unsigned nchildren; - const ctl_node_t *children; - int (*ctl)(const size_t *, size_t, void *, size_t *, - void *, size_t); -}; + size_t nchildren; + const ctl_node_t *children; + int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *, + size_t); +} ctl_named_node_t; -struct ctl_indexed_node_s { - struct ctl_node_s node; - const ctl_named_node_t *(*index)(const size_t *, size_t, size_t); -}; +typedef struct ctl_indexed_node_s { + struct ctl_node_s node; + const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t, + size_t); +} ctl_indexed_node_t; -struct ctl_arena_stats_s { - bool initialized; - unsigned nthreads; - const char *dss; - size_t pactive; - size_t pdirty; - arena_stats_t astats; +typedef struct ctl_arena_stats_s { + arena_stats_t astats; /* Aggregate stats for small size classes, based on bin stats. */ - size_t allocated_small; - uint64_t nmalloc_small; - uint64_t ndalloc_small; - uint64_t nrequests_small; - - malloc_bin_stats_t bstats[NBINS]; - malloc_large_stats_t *lstats; /* nlclasses elements. */ + size_t allocated_small; + uint64_t nmalloc_small; + uint64_t ndalloc_small; + uint64_t nrequests_small; + + malloc_bin_stats_t bstats[NBINS]; + malloc_large_stats_t lstats[NSIZES - NBINS]; +} ctl_arena_stats_t; + +typedef struct ctl_stats_s { + size_t allocated; + size_t active; + size_t metadata; + size_t resident; + size_t mapped; + size_t retained; + + background_thread_stats_t background_thread; + mutex_prof_data_t mutex_prof_data[mutex_prof_num_global_mutexes]; +} ctl_stats_t; + +typedef struct ctl_arena_s ctl_arena_t; +struct ctl_arena_s { + unsigned arena_ind; + bool initialized; + ql_elm(ctl_arena_t) destroyed_link; + + /* Basic stats, supported even if !config_stats. */ + unsigned nthreads; + const char *dss; + ssize_t dirty_decay_ms; + ssize_t muzzy_decay_ms; + size_t pactive; + size_t pdirty; + size_t pmuzzy; + + /* NULL if !config_stats. */ + ctl_arena_stats_t *astats; }; -struct ctl_stats_s { - size_t allocated; - size_t active; - size_t mapped; - struct { - size_t current; /* stats_chunks.curchunks */ - uint64_t total; /* stats_chunks.nchunks */ - size_t high; /* stats_chunks.highchunks */ - } chunks; - struct { - size_t allocated; /* huge_allocated */ - uint64_t nmalloc; /* huge_nmalloc */ - uint64_t ndalloc; /* huge_ndalloc */ - } huge; - unsigned narenas; - ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen); -int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp); - -int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, +typedef struct ctl_arenas_s { + uint64_t epoch; + unsigned narenas; + ql_head(ctl_arena_t) destroyed; + + /* + * Element 0 corresponds to merged stats for extant arenas (accessed via + * MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for + * destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the + * remaining MALLOCX_ARENA_LIMIT elements correspond to arenas. + */ + ctl_arena_t *arenas[2 + MALLOCX_ARENA_LIMIT]; +} ctl_arenas_t; + +int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen); -bool ctl_boot(void); -void ctl_prefork(void); -void ctl_postfork_parent(void); -void ctl_postfork_child(void); +int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp); -#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ +int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen); +bool ctl_boot(void); +void ctl_prefork(tsdn_t *tsdn); +void ctl_postfork_parent(tsdn_t *tsdn); +void ctl_postfork_child(tsdn_t *tsdn); + +#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ != 0) { \ malloc_printf( \ @@ -91,7 +110,7 @@ void ctl_postfork_child(void); } \ } while (0) -#define xmallctlnametomib(name, mibp, miblenp) do { \ +#define xmallctlnametomib(name, mibp, miblenp) do { \ if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ malloc_printf("<jemalloc>: Failure in " \ "xmallctlnametomib(\"%s\", ...)\n", name); \ @@ -99,7 +118,7 @@ void ctl_postfork_child(void); } \ } while (0) -#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ +#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ newlen) != 0) { \ malloc_write( \ @@ -108,10 +127,4 @@ void ctl_postfork_child(void); } \ } while (0) -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - +#endif /* JEMALLOC_INTERNAL_CTL_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/hash.h b/deps/jemalloc/include/jemalloc/internal/hash.h index c7183ede82..188296cf0e 100644 --- a/deps/jemalloc/include/jemalloc/internal/hash.h +++ b/deps/jemalloc/include/jemalloc/internal/hash.h @@ -1,92 +1,76 @@ +#ifndef JEMALLOC_INTERNAL_HASH_H +#define JEMALLOC_INTERNAL_HASH_H + +#include "jemalloc/internal/assert.h" + /* * The following hash function is based on MurmurHash3, placed into the public - * domain by Austin Appleby. See http://code.google.com/p/smhasher/ for + * domain by Austin Appleby. See https://github.com/aappleby/smhasher for * details. */ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -uint32_t hash_x86_32(const void *key, int len, uint32_t seed); -void hash_x86_128(const void *key, const int len, uint32_t seed, - uint64_t r_out[2]); -void hash_x64_128(const void *key, const int len, const uint32_t seed, - uint64_t r_out[2]); -void hash(const void *key, size_t len, const uint32_t seed, - size_t r_hash[2]); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_)) /******************************************************************************/ /* Internal implementation. */ -JEMALLOC_INLINE uint32_t -hash_rotl_32(uint32_t x, int8_t r) -{ - - return (x << r) | (x >> (32 - r)); +static inline uint32_t +hash_rotl_32(uint32_t x, int8_t r) { + return ((x << r) | (x >> (32 - r))); } -JEMALLOC_INLINE uint64_t -hash_rotl_64(uint64_t x, int8_t r) -{ - return (x << r) | (x >> (64 - r)); +static inline uint64_t +hash_rotl_64(uint64_t x, int8_t r) { + return ((x << r) | (x >> (64 - r))); } -JEMALLOC_INLINE uint32_t -hash_get_block_32(const uint32_t *p, int i) -{ +static inline uint32_t +hash_get_block_32(const uint32_t *p, int i) { + /* Handle unaligned read. */ + if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) { + uint32_t ret; - return (p[i]); + memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t)); + return ret; + } + + return p[i]; } -JEMALLOC_INLINE uint64_t -hash_get_block_64(const uint64_t *p, int i) -{ +static inline uint64_t +hash_get_block_64(const uint64_t *p, int i) { + /* Handle unaligned read. */ + if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) { + uint64_t ret; - return (p[i]); -} + memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t)); + return ret; + } -JEMALLOC_INLINE uint32_t -hash_fmix_32(uint32_t h) -{ + return p[i]; +} +static inline uint32_t +hash_fmix_32(uint32_t h) { h ^= h >> 16; h *= 0x85ebca6b; h ^= h >> 13; h *= 0xc2b2ae35; h ^= h >> 16; - return (h); + return h; } -JEMALLOC_INLINE uint64_t -hash_fmix_64(uint64_t k) -{ - +static inline uint64_t +hash_fmix_64(uint64_t k) { k ^= k >> 33; - k *= QU(0xff51afd7ed558ccdLLU); + k *= KQU(0xff51afd7ed558ccd); k ^= k >> 33; - k *= QU(0xc4ceb9fe1a85ec53LLU); + k *= KQU(0xc4ceb9fe1a85ec53); k ^= k >> 33; - return (k); + return k; } -JEMALLOC_INLINE uint32_t -hash_x86_32(const void *key, int len, uint32_t seed) -{ +static inline uint32_t +hash_x86_32(const void *key, int len, uint32_t seed) { const uint8_t *data = (const uint8_t *) key; const int nblocks = len / 4; @@ -132,13 +116,12 @@ hash_x86_32(const void *key, int len, uint32_t seed) h1 = hash_fmix_32(h1); - return (h1); + return h1; } -UNUSED JEMALLOC_INLINE void +UNUSED static inline void hash_x86_128(const void *key, const int len, uint32_t seed, - uint64_t r_out[2]) -{ + uint64_t r_out[2]) { const uint8_t * data = (const uint8_t *) key; const int nblocks = len / 16; @@ -237,18 +220,17 @@ hash_x86_128(const void *key, const int len, uint32_t seed, r_out[1] = (((uint64_t) h4) << 32) | h3; } -UNUSED JEMALLOC_INLINE void +UNUSED static inline void hash_x64_128(const void *key, const int len, const uint32_t seed, - uint64_t r_out[2]) -{ + uint64_t r_out[2]) { const uint8_t *data = (const uint8_t *) key; const int nblocks = len / 16; uint64_t h1 = seed; uint64_t h2 = seed; - const uint64_t c1 = QU(0x87c37b91114253d5LLU); - const uint64_t c2 = QU(0x4cf5ad432745937fLLU); + const uint64_t c1 = KQU(0x87c37b91114253d5); + const uint64_t c2 = KQU(0x4cf5ad432745937f); /* body */ { @@ -317,19 +299,20 @@ hash_x64_128(const void *key, const int len, const uint32_t seed, /******************************************************************************/ /* API. */ -JEMALLOC_INLINE void -hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) -{ +static inline void +hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) { + assert(len <= INT_MAX); /* Unfortunate implementation limitation. */ + #if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN)) - hash_x64_128(key, len, seed, (uint64_t *)r_hash); + hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash); #else - uint64_t hashes[2]; - hash_x86_128(key, len, seed, hashes); - r_hash[0] = (size_t)hashes[0]; - r_hash[1] = (size_t)hashes[1]; + { + uint64_t hashes[2]; + hash_x86_128(key, (int)len, seed, hashes); + r_hash[0] = (size_t)hashes[0]; + r_hash[1] = (size_t)hashes[1]; + } #endif } -#endif -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ +#endif /* JEMALLOC_INTERNAL_HASH_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/mutex.h b/deps/jemalloc/include/jemalloc/internal/mutex.h index de44e1435a..6520c2512d 100644 --- a/deps/jemalloc/include/jemalloc/internal/mutex.h +++ b/deps/jemalloc/include/jemalloc/internal/mutex.h @@ -1,45 +1,123 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES +#ifndef JEMALLOC_INTERNAL_MUTEX_H +#define JEMALLOC_INTERNAL_MUTEX_H -typedef struct malloc_mutex_s malloc_mutex_t; +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/mutex_prof.h" +#include "jemalloc/internal/tsd.h" +#include "jemalloc/internal/witness.h" + +typedef enum { + /* Can only acquire one mutex of a given witness rank at a time. */ + malloc_mutex_rank_exclusive, + /* + * Can acquire multiple mutexes of the same witness rank, but in + * address-ascending order only. + */ + malloc_mutex_address_ordered +} malloc_mutex_lock_order_t; +typedef struct malloc_mutex_s malloc_mutex_t; +struct malloc_mutex_s { + union { + struct { + /* + * prof_data is defined first to reduce cacheline + * bouncing: the data is not touched by the mutex holder + * during unlocking, while might be modified by + * contenders. Having it before the mutex itself could + * avoid prefetching a modified cacheline (for the + * unlocking thread). + */ + mutex_prof_data_t prof_data; #ifdef _WIN32 -# define MALLOC_MUTEX_INITIALIZER +# if _WIN32_WINNT >= 0x0600 + SRWLOCK lock; +# else + CRITICAL_SECTION lock; +# endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock lock; #elif (defined(JEMALLOC_OSSPIN)) -# define MALLOC_MUTEX_INITIALIZER {0} + OSSpinLock lock; #elif (defined(JEMALLOC_MUTEX_INIT_CB)) -# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL} + pthread_mutex_t lock; + malloc_mutex_t *postponed_next; #else -# if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) && \ - defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)) -# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP -# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP} + pthread_mutex_t lock; +#endif + }; + /* + * We only touch witness when configured w/ debug. However we + * keep the field in a union when !debug so that we don't have + * to pollute the code base with #ifdefs, while avoid paying the + * memory cost. + */ +#if !defined(JEMALLOC_DEBUG) + witness_t witness; + malloc_mutex_lock_order_t lock_order; +#endif + }; + +#if defined(JEMALLOC_DEBUG) + witness_t witness; + malloc_mutex_lock_order_t lock_order; +#endif +}; + +/* + * Based on benchmark results, a fixed spin with this amount of retries works + * well for our critical sections. + */ +#define MALLOC_MUTEX_MAX_SPIN 250 + +#ifdef _WIN32 +# if _WIN32_WINNT >= 0x0600 +# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock) +# define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock) +# define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock)) # else -# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT -# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER} +# define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock) +# define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock) +# define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock)) # endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) +# define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock) +# define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock) +# define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock)) +#elif (defined(JEMALLOC_OSSPIN)) +# define MALLOC_MUTEX_LOCK(m) OSSpinLockLock(&(m)->lock) +# define MALLOC_MUTEX_UNLOCK(m) OSSpinLockUnlock(&(m)->lock) +# define MALLOC_MUTEX_TRYLOCK(m) (!OSSpinLockTry(&(m)->lock)) +#else +# define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock) +# define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock) +# define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0) #endif -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS +#define LOCK_PROF_DATA_INITIALIZER \ + {NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \ + ATOMIC_INIT(0), 0, NULL, 0} -struct malloc_mutex_s { #ifdef _WIN32 - CRITICAL_SECTION lock; +# define MALLOC_MUTEX_INITIALIZER +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) +# define MALLOC_MUTEX_INITIALIZER \ + {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT}}, \ + WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} #elif (defined(JEMALLOC_OSSPIN)) - OSSpinLock lock; +# define MALLOC_MUTEX_INITIALIZER \ + {{{LOCK_PROF_DATA_INITIALIZER, 0}}, \ + WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} #elif (defined(JEMALLOC_MUTEX_INIT_CB)) - pthread_mutex_t lock; - malloc_mutex_t *postponed_next; +# define MALLOC_MUTEX_INITIALIZER \ + {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL}}, \ + WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} #else - pthread_mutex_t lock; +# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT +# define MALLOC_MUTEX_INITIALIZER \ + {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \ + WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} #endif -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_LAZY_LOCK extern bool isthreaded; @@ -48,52 +126,123 @@ extern bool isthreaded; # define isthreaded true #endif -bool malloc_mutex_init(malloc_mutex_t *mutex); -void malloc_mutex_prefork(malloc_mutex_t *mutex); -void malloc_mutex_postfork_parent(malloc_mutex_t *mutex); -void malloc_mutex_postfork_child(malloc_mutex_t *mutex); -bool mutex_boot(void); +bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, + witness_rank_t rank, malloc_mutex_lock_order_t lock_order); +void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex); +bool malloc_mutex_boot(void); +void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex); -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES +void malloc_mutex_lock_slow(malloc_mutex_t *mutex); -#ifndef JEMALLOC_ENABLE_INLINE -void malloc_mutex_lock(malloc_mutex_t *mutex); -void malloc_mutex_unlock(malloc_mutex_t *mutex); -#endif +static inline void +malloc_mutex_lock_final(malloc_mutex_t *mutex) { + MALLOC_MUTEX_LOCK(mutex); +} -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) -JEMALLOC_INLINE void -malloc_mutex_lock(malloc_mutex_t *mutex) -{ +static inline bool +malloc_mutex_trylock_final(malloc_mutex_t *mutex) { + return MALLOC_MUTEX_TRYLOCK(mutex); +} +static inline void +mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) { + if (config_stats) { + mutex_prof_data_t *data = &mutex->prof_data; + data->n_lock_ops++; + if (data->prev_owner != tsdn) { + data->prev_owner = tsdn; + data->n_owner_switches++; + } + } +} + +/* Trylock: return false if the lock is successfully acquired. */ +static inline bool +malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) { + witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); if (isthreaded) { -#ifdef _WIN32 - EnterCriticalSection(&mutex->lock); -#elif (defined(JEMALLOC_OSSPIN)) - OSSpinLockLock(&mutex->lock); -#else - pthread_mutex_lock(&mutex->lock); -#endif + if (malloc_mutex_trylock_final(mutex)) { + return true; + } + mutex_owner_stats_update(tsdn, mutex); } + witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness); + + return false; } -JEMALLOC_INLINE void -malloc_mutex_unlock(malloc_mutex_t *mutex) -{ +/* Aggregate lock prof data. */ +static inline void +malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) { + nstime_add(&sum->tot_wait_time, &data->tot_wait_time); + if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) { + nstime_copy(&sum->max_wait_time, &data->max_wait_time); + } + + sum->n_wait_times += data->n_wait_times; + sum->n_spin_acquired += data->n_spin_acquired; + + if (sum->max_n_thds < data->max_n_thds) { + sum->max_n_thds = data->max_n_thds; + } + uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds, + ATOMIC_RELAXED); + uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32( + &data->n_waiting_thds, ATOMIC_RELAXED); + atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds, + ATOMIC_RELAXED); + sum->n_owner_switches += data->n_owner_switches; + sum->n_lock_ops += data->n_lock_ops; +} +static inline void +malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) { + witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); if (isthreaded) { -#ifdef _WIN32 - LeaveCriticalSection(&mutex->lock); -#elif (defined(JEMALLOC_OSSPIN)) - OSSpinLockUnlock(&mutex->lock); -#else - pthread_mutex_unlock(&mutex->lock); -#endif + if (malloc_mutex_trylock_final(mutex)) { + malloc_mutex_lock_slow(mutex); + } + mutex_owner_stats_update(tsdn, mutex); } + witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness); +} + +static inline void +malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) { + witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness); + if (isthreaded) { + MALLOC_MUTEX_UNLOCK(mutex); + } +} + +static inline void +malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) { + witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); +} + +static inline void +malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) { + witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); +} + +/* Copy the prof data from mutex for processing. */ +static inline void +malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data, + malloc_mutex_t *mutex) { + mutex_prof_data_t *source = &mutex->prof_data; + /* Can only read holding the mutex. */ + malloc_mutex_assert_owner(tsdn, mutex); + + /* + * Not *really* allowed (we shouldn't be doing non-atomic loads of + * atomic data), but the mutex protection makes this safe, and writing + * a member-for-member copy is tedious for this situation. + */ + *data = *source; + /* n_wait_thds is not reported (modified w/o locking). */ + atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED); } -#endif -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ +#endif /* JEMALLOC_INTERNAL_MUTEX_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/private_namespace.h b/deps/jemalloc/include/jemalloc/internal/private_namespace.h index 35c3b0c6c7..7ebeeba826 100644 --- a/deps/jemalloc/include/jemalloc/internal/private_namespace.h +++ b/deps/jemalloc/include/jemalloc/internal/private_namespace.h @@ -1,147 +1,199 @@ -#define a0calloc JEMALLOC_N(a0calloc) -#define a0free JEMALLOC_N(a0free) +#define a0dalloc JEMALLOC_N(a0dalloc) +#define a0get JEMALLOC_N(a0get) #define a0malloc JEMALLOC_N(a0malloc) +#define arena_aalloc JEMALLOC_N(arena_aalloc) #define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small) +#define arena_basic_stats_merge JEMALLOC_N(arena_basic_stats_merge) #define arena_bin_index JEMALLOC_N(arena_bin_index) #define arena_bin_info JEMALLOC_N(arena_bin_info) +#define arena_bitselm_get_const JEMALLOC_N(arena_bitselm_get_const) +#define arena_bitselm_get_mutable JEMALLOC_N(arena_bitselm_get_mutable) #define arena_boot JEMALLOC_N(arena_boot) +#define arena_choose JEMALLOC_N(arena_choose) +#define arena_choose_hard JEMALLOC_N(arena_choose_hard) +#define arena_choose_impl JEMALLOC_N(arena_choose_impl) +#define arena_chunk_alloc_huge JEMALLOC_N(arena_chunk_alloc_huge) +#define arena_chunk_cache_maybe_insert JEMALLOC_N(arena_chunk_cache_maybe_insert) +#define arena_chunk_cache_maybe_remove JEMALLOC_N(arena_chunk_cache_maybe_remove) +#define arena_chunk_dalloc_huge JEMALLOC_N(arena_chunk_dalloc_huge) +#define arena_chunk_ralloc_huge_expand JEMALLOC_N(arena_chunk_ralloc_huge_expand) +#define arena_chunk_ralloc_huge_shrink JEMALLOC_N(arena_chunk_ralloc_huge_shrink) +#define arena_chunk_ralloc_huge_similar JEMALLOC_N(arena_chunk_ralloc_huge_similar) +#define arena_cleanup JEMALLOC_N(arena_cleanup) #define arena_dalloc JEMALLOC_N(arena_dalloc) #define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin) -#define arena_dalloc_bin_locked JEMALLOC_N(arena_dalloc_bin_locked) +#define arena_dalloc_bin_junked_locked JEMALLOC_N(arena_dalloc_bin_junked_locked) #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) #define arena_dalloc_large JEMALLOC_N(arena_dalloc_large) -#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked) +#define arena_dalloc_large_junked_locked JEMALLOC_N(arena_dalloc_large_junked_locked) #define arena_dalloc_small JEMALLOC_N(arena_dalloc_small) +#define arena_decay_tick JEMALLOC_N(arena_decay_tick) +#define arena_decay_ticks JEMALLOC_N(arena_decay_ticks) +#define arena_decay_time_default_get JEMALLOC_N(arena_decay_time_default_get) +#define arena_decay_time_default_set JEMALLOC_N(arena_decay_time_default_set) +#define arena_decay_time_get JEMALLOC_N(arena_decay_time_get) +#define arena_decay_time_set JEMALLOC_N(arena_decay_time_set) #define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get) #define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set) +#define arena_extent_sn_next JEMALLOC_N(arena_extent_sn_next) +#define arena_get JEMALLOC_N(arena_get) +#define arena_ichoose JEMALLOC_N(arena_ichoose) +#define arena_init JEMALLOC_N(arena_init) +#define arena_lg_dirty_mult_default_get JEMALLOC_N(arena_lg_dirty_mult_default_get) +#define arena_lg_dirty_mult_default_set JEMALLOC_N(arena_lg_dirty_mult_default_set) +#define arena_lg_dirty_mult_get JEMALLOC_N(arena_lg_dirty_mult_get) +#define arena_lg_dirty_mult_set JEMALLOC_N(arena_lg_dirty_mult_set) #define arena_malloc JEMALLOC_N(arena_malloc) +#define arena_malloc_hard JEMALLOC_N(arena_malloc_hard) #define arena_malloc_large JEMALLOC_N(arena_malloc_large) -#define arena_malloc_small JEMALLOC_N(arena_malloc_small) #define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get) #define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get) +#define arena_mapbits_decommitted_get JEMALLOC_N(arena_mapbits_decommitted_get) #define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get) #define arena_mapbits_get JEMALLOC_N(arena_mapbits_get) +#define arena_mapbits_internal_set JEMALLOC_N(arena_mapbits_internal_set) #define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set) #define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get) #define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set) #define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get) +#define arena_mapbits_size_decode JEMALLOC_N(arena_mapbits_size_decode) +#define arena_mapbits_size_encode JEMALLOC_N(arena_mapbits_size_encode) #define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get) #define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set) #define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set) #define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get) #define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set) #define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get) -#define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set) -#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get) +#define arena_mapbitsp_get_const JEMALLOC_N(arena_mapbitsp_get_const) +#define arena_mapbitsp_get_mutable JEMALLOC_N(arena_mapbitsp_get_mutable) #define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read) #define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write) -#define arena_mapp_get JEMALLOC_N(arena_mapp_get) -#define arena_maxclass JEMALLOC_N(arena_maxclass) +#define arena_maxrun JEMALLOC_N(arena_maxrun) +#define arena_maybe_purge JEMALLOC_N(arena_maybe_purge) +#define arena_metadata_allocated_add JEMALLOC_N(arena_metadata_allocated_add) +#define arena_metadata_allocated_get JEMALLOC_N(arena_metadata_allocated_get) +#define arena_metadata_allocated_sub JEMALLOC_N(arena_metadata_allocated_sub) +#define arena_migrate JEMALLOC_N(arena_migrate) +#define arena_miscelm_get_const JEMALLOC_N(arena_miscelm_get_const) +#define arena_miscelm_get_mutable JEMALLOC_N(arena_miscelm_get_mutable) +#define arena_miscelm_to_pageind JEMALLOC_N(arena_miscelm_to_pageind) +#define arena_miscelm_to_rpages JEMALLOC_N(arena_miscelm_to_rpages) #define arena_new JEMALLOC_N(arena_new) +#define arena_node_alloc JEMALLOC_N(arena_node_alloc) +#define arena_node_dalloc JEMALLOC_N(arena_node_dalloc) +#define arena_nthreads_dec JEMALLOC_N(arena_nthreads_dec) +#define arena_nthreads_get JEMALLOC_N(arena_nthreads_get) +#define arena_nthreads_inc JEMALLOC_N(arena_nthreads_inc) #define arena_palloc JEMALLOC_N(arena_palloc) #define arena_postfork_child JEMALLOC_N(arena_postfork_child) #define arena_postfork_parent JEMALLOC_N(arena_postfork_parent) -#define arena_prefork JEMALLOC_N(arena_prefork) +#define arena_prefork0 JEMALLOC_N(arena_prefork0) +#define arena_prefork1 JEMALLOC_N(arena_prefork1) +#define arena_prefork2 JEMALLOC_N(arena_prefork2) +#define arena_prefork3 JEMALLOC_N(arena_prefork3) #define arena_prof_accum JEMALLOC_N(arena_prof_accum) #define arena_prof_accum_impl JEMALLOC_N(arena_prof_accum_impl) #define arena_prof_accum_locked JEMALLOC_N(arena_prof_accum_locked) -#define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get) -#define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set) #define arena_prof_promoted JEMALLOC_N(arena_prof_promoted) +#define arena_prof_tctx_get JEMALLOC_N(arena_prof_tctx_get) +#define arena_prof_tctx_reset JEMALLOC_N(arena_prof_tctx_reset) +#define arena_prof_tctx_set JEMALLOC_N(arena_prof_tctx_set) #define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get) -#define arena_purge_all JEMALLOC_N(arena_purge_all) +#define arena_purge JEMALLOC_N(arena_purge) #define arena_quarantine_junk_small JEMALLOC_N(arena_quarantine_junk_small) #define arena_ralloc JEMALLOC_N(arena_ralloc) #define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) #define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move) +#define arena_rd_to_miscelm JEMALLOC_N(arena_rd_to_miscelm) #define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) +#define arena_reset JEMALLOC_N(arena_reset) #define arena_run_regind JEMALLOC_N(arena_run_regind) +#define arena_run_to_miscelm JEMALLOC_N(arena_run_to_miscelm) #define arena_salloc JEMALLOC_N(arena_salloc) +#define arena_sdalloc JEMALLOC_N(arena_sdalloc) #define arena_stats_merge JEMALLOC_N(arena_stats_merge) #define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small) +#define arena_tdata_get JEMALLOC_N(arena_tdata_get) +#define arena_tdata_get_hard JEMALLOC_N(arena_tdata_get_hard) #define arenas JEMALLOC_N(arenas) -#define arenas_booted JEMALLOC_N(arenas_booted) -#define arenas_cleanup JEMALLOC_N(arenas_cleanup) -#define arenas_extend JEMALLOC_N(arenas_extend) -#define arenas_initialized JEMALLOC_N(arenas_initialized) -#define arenas_lock JEMALLOC_N(arenas_lock) -#define arenas_tls JEMALLOC_N(arenas_tls) -#define arenas_tsd JEMALLOC_N(arenas_tsd) -#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot) -#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper) -#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get) -#define arenas_tsd_get_wrapper JEMALLOC_N(arenas_tsd_get_wrapper) -#define arenas_tsd_init_head JEMALLOC_N(arenas_tsd_init_head) -#define arenas_tsd_set JEMALLOC_N(arenas_tsd_set) +#define arenas_tdata_bypass_cleanup JEMALLOC_N(arenas_tdata_bypass_cleanup) +#define arenas_tdata_cleanup JEMALLOC_N(arenas_tdata_cleanup) +#define atomic_add_p JEMALLOC_N(atomic_add_p) #define atomic_add_u JEMALLOC_N(atomic_add_u) #define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32) #define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64) #define atomic_add_z JEMALLOC_N(atomic_add_z) +#define atomic_cas_p JEMALLOC_N(atomic_cas_p) +#define atomic_cas_u JEMALLOC_N(atomic_cas_u) +#define atomic_cas_uint32 JEMALLOC_N(atomic_cas_uint32) +#define atomic_cas_uint64 JEMALLOC_N(atomic_cas_uint64) +#define atomic_cas_z JEMALLOC_N(atomic_cas_z) +#define atomic_sub_p JEMALLOC_N(atomic_sub_p) #define atomic_sub_u JEMALLOC_N(atomic_sub_u) #define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32) #define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64) #define atomic_sub_z JEMALLOC_N(atomic_sub_z) +#define atomic_write_p JEMALLOC_N(atomic_write_p) +#define atomic_write_u JEMALLOC_N(atomic_write_u) +#define atomic_write_uint32 JEMALLOC_N(atomic_write_uint32) +#define atomic_write_uint64 JEMALLOC_N(atomic_write_uint64) +#define atomic_write_z JEMALLOC_N(atomic_write_z) #define base_alloc JEMALLOC_N(base_alloc) #define base_boot JEMALLOC_N(base_boot) -#define base_calloc JEMALLOC_N(base_calloc) -#define base_node_alloc JEMALLOC_N(base_node_alloc) -#define base_node_dealloc JEMALLOC_N(base_node_dealloc) #define base_postfork_child JEMALLOC_N(base_postfork_child) #define base_postfork_parent JEMALLOC_N(base_postfork_parent) #define base_prefork JEMALLOC_N(base_prefork) +#define base_stats_get JEMALLOC_N(base_stats_get) #define bitmap_full JEMALLOC_N(bitmap_full) #define bitmap_get JEMALLOC_N(bitmap_get) #define bitmap_info_init JEMALLOC_N(bitmap_info_init) -#define bitmap_info_ngroups JEMALLOC_N(bitmap_info_ngroups) #define bitmap_init JEMALLOC_N(bitmap_init) #define bitmap_set JEMALLOC_N(bitmap_set) #define bitmap_sfu JEMALLOC_N(bitmap_sfu) #define bitmap_size JEMALLOC_N(bitmap_size) #define bitmap_unset JEMALLOC_N(bitmap_unset) +#define bootstrap_calloc JEMALLOC_N(bootstrap_calloc) +#define bootstrap_free JEMALLOC_N(bootstrap_free) +#define bootstrap_malloc JEMALLOC_N(bootstrap_malloc) #define bt_init JEMALLOC_N(bt_init) #define buferror JEMALLOC_N(buferror) -#define choose_arena JEMALLOC_N(choose_arena) -#define choose_arena_hard JEMALLOC_N(choose_arena_hard) -#define chunk_alloc JEMALLOC_N(chunk_alloc) +#define chunk_alloc_base JEMALLOC_N(chunk_alloc_base) +#define chunk_alloc_cache JEMALLOC_N(chunk_alloc_cache) #define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss) #define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap) +#define chunk_alloc_wrapper JEMALLOC_N(chunk_alloc_wrapper) #define chunk_boot JEMALLOC_N(chunk_boot) -#define chunk_dealloc JEMALLOC_N(chunk_dealloc) -#define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap) +#define chunk_dalloc_cache JEMALLOC_N(chunk_dalloc_cache) +#define chunk_dalloc_mmap JEMALLOC_N(chunk_dalloc_mmap) +#define chunk_dalloc_wrapper JEMALLOC_N(chunk_dalloc_wrapper) +#define chunk_deregister JEMALLOC_N(chunk_deregister) #define chunk_dss_boot JEMALLOC_N(chunk_dss_boot) -#define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child) -#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent) +#define chunk_dss_mergeable JEMALLOC_N(chunk_dss_mergeable) #define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get) #define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set) -#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork) +#define chunk_hooks_default JEMALLOC_N(chunk_hooks_default) +#define chunk_hooks_get JEMALLOC_N(chunk_hooks_get) +#define chunk_hooks_set JEMALLOC_N(chunk_hooks_set) #define chunk_in_dss JEMALLOC_N(chunk_in_dss) +#define chunk_lookup JEMALLOC_N(chunk_lookup) #define chunk_npages JEMALLOC_N(chunk_npages) -#define chunk_postfork_child JEMALLOC_N(chunk_postfork_child) -#define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent) -#define chunk_prefork JEMALLOC_N(chunk_prefork) -#define chunk_unmap JEMALLOC_N(chunk_unmap) -#define chunks_mtx JEMALLOC_N(chunks_mtx) +#define chunk_purge_wrapper JEMALLOC_N(chunk_purge_wrapper) +#define chunk_register JEMALLOC_N(chunk_register) #define chunks_rtree JEMALLOC_N(chunks_rtree) #define chunksize JEMALLOC_N(chunksize) #define chunksize_mask JEMALLOC_N(chunksize_mask) -#define ckh_bucket_search JEMALLOC_N(ckh_bucket_search) #define ckh_count JEMALLOC_N(ckh_count) #define ckh_delete JEMALLOC_N(ckh_delete) -#define ckh_evict_reloc_insert JEMALLOC_N(ckh_evict_reloc_insert) #define ckh_insert JEMALLOC_N(ckh_insert) -#define ckh_isearch JEMALLOC_N(ckh_isearch) #define ckh_iter JEMALLOC_N(ckh_iter) #define ckh_new JEMALLOC_N(ckh_new) #define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash) #define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp) -#define ckh_rebuild JEMALLOC_N(ckh_rebuild) #define ckh_remove JEMALLOC_N(ckh_remove) #define ckh_search JEMALLOC_N(ckh_search) #define ckh_string_hash JEMALLOC_N(ckh_string_hash) #define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp) -#define ckh_try_bucket_insert JEMALLOC_N(ckh_try_bucket_insert) -#define ckh_try_insert JEMALLOC_N(ckh_try_insert) #define ctl_boot JEMALLOC_N(ctl_boot) #define ctl_bymib JEMALLOC_N(ctl_bymib) #define ctl_byname JEMALLOC_N(ctl_byname) @@ -149,7 +201,33 @@ #define ctl_postfork_child JEMALLOC_N(ctl_postfork_child) #define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent) #define ctl_prefork JEMALLOC_N(ctl_prefork) +#define decay_ticker_get JEMALLOC_N(decay_ticker_get) #define dss_prec_names JEMALLOC_N(dss_prec_names) +#define extent_node_achunk_get JEMALLOC_N(extent_node_achunk_get) +#define extent_node_achunk_set JEMALLOC_N(extent_node_achunk_set) +#define extent_node_addr_get JEMALLOC_N(extent_node_addr_get) +#define extent_node_addr_set JEMALLOC_N(extent_node_addr_set) +#define extent_node_arena_get JEMALLOC_N(extent_node_arena_get) +#define extent_node_arena_set JEMALLOC_N(extent_node_arena_set) +#define extent_node_committed_get JEMALLOC_N(extent_node_committed_get) +#define extent_node_committed_set JEMALLOC_N(extent_node_committed_set) +#define extent_node_dirty_insert JEMALLOC_N(extent_node_dirty_insert) +#define extent_node_dirty_linkage_init JEMALLOC_N(extent_node_dirty_linkage_init) +#define extent_node_dirty_remove JEMALLOC_N(extent_node_dirty_remove) +#define extent_node_init JEMALLOC_N(extent_node_init) +#define extent_node_prof_tctx_get JEMALLOC_N(extent_node_prof_tctx_get) +#define extent_node_prof_tctx_set JEMALLOC_N(extent_node_prof_tctx_set) +#define extent_node_size_get JEMALLOC_N(extent_node_size_get) +#define extent_node_size_set JEMALLOC_N(extent_node_size_set) +#define extent_node_sn_get JEMALLOC_N(extent_node_sn_get) +#define extent_node_sn_set JEMALLOC_N(extent_node_sn_set) +#define extent_node_zeroed_get JEMALLOC_N(extent_node_zeroed_get) +#define extent_node_zeroed_set JEMALLOC_N(extent_node_zeroed_set) +#define extent_size_quantize_ceil JEMALLOC_N(extent_size_quantize_ceil) +#define extent_size_quantize_floor JEMALLOC_N(extent_size_quantize_floor) +#define extent_tree_ad_destroy JEMALLOC_N(extent_tree_ad_destroy) +#define extent_tree_ad_destroy_recurse JEMALLOC_N(extent_tree_ad_destroy_recurse) +#define extent_tree_ad_empty JEMALLOC_N(extent_tree_ad_empty) #define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first) #define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert) #define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter) @@ -166,22 +244,31 @@ #define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse) #define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start) #define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search) -#define extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first) -#define extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert) -#define extent_tree_szad_iter JEMALLOC_N(extent_tree_szad_iter) -#define extent_tree_szad_iter_recurse JEMALLOC_N(extent_tree_szad_iter_recurse) -#define extent_tree_szad_iter_start JEMALLOC_N(extent_tree_szad_iter_start) -#define extent_tree_szad_last JEMALLOC_N(extent_tree_szad_last) -#define extent_tree_szad_new JEMALLOC_N(extent_tree_szad_new) -#define extent_tree_szad_next JEMALLOC_N(extent_tree_szad_next) -#define extent_tree_szad_nsearch JEMALLOC_N(extent_tree_szad_nsearch) -#define extent_tree_szad_prev JEMALLOC_N(extent_tree_szad_prev) -#define extent_tree_szad_psearch JEMALLOC_N(extent_tree_szad_psearch) -#define extent_tree_szad_remove JEMALLOC_N(extent_tree_szad_remove) -#define extent_tree_szad_reverse_iter JEMALLOC_N(extent_tree_szad_reverse_iter) -#define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse) -#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start) -#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search) +#define extent_tree_szsnad_destroy JEMALLOC_N(extent_tree_szsnad_destroy) +#define extent_tree_szsnad_destroy_recurse JEMALLOC_N(extent_tree_szsnad_destroy_recurse) +#define extent_tree_szsnad_empty JEMALLOC_N(extent_tree_szsnad_empty) +#define extent_tree_szsnad_first JEMALLOC_N(extent_tree_szsnad_first) +#define extent_tree_szsnad_insert JEMALLOC_N(extent_tree_szsnad_insert) +#define extent_tree_szsnad_iter JEMALLOC_N(extent_tree_szsnad_iter) +#define extent_tree_szsnad_iter_recurse JEMALLOC_N(extent_tree_szsnad_iter_recurse) +#define extent_tree_szsnad_iter_start JEMALLOC_N(extent_tree_szsnad_iter_start) +#define extent_tree_szsnad_last JEMALLOC_N(extent_tree_szsnad_last) +#define extent_tree_szsnad_new JEMALLOC_N(extent_tree_szsnad_new) +#define extent_tree_szsnad_next JEMALLOC_N(extent_tree_szsnad_next) +#define extent_tree_szsnad_nsearch JEMALLOC_N(extent_tree_szsnad_nsearch) +#define extent_tree_szsnad_prev JEMALLOC_N(extent_tree_szsnad_prev) +#define extent_tree_szsnad_psearch JEMALLOC_N(extent_tree_szsnad_psearch) +#define extent_tree_szsnad_remove JEMALLOC_N(extent_tree_szsnad_remove) +#define extent_tree_szsnad_reverse_iter JEMALLOC_N(extent_tree_szsnad_reverse_iter) +#define extent_tree_szsnad_reverse_iter_recurse JEMALLOC_N(extent_tree_szsnad_reverse_iter_recurse) +#define extent_tree_szsnad_reverse_iter_start JEMALLOC_N(extent_tree_szsnad_reverse_iter_start) +#define extent_tree_szsnad_search JEMALLOC_N(extent_tree_szsnad_search) +#define ffs_llu JEMALLOC_N(ffs_llu) +#define ffs_lu JEMALLOC_N(ffs_lu) +#define ffs_u JEMALLOC_N(ffs_u) +#define ffs_u32 JEMALLOC_N(ffs_u32) +#define ffs_u64 JEMALLOC_N(ffs_u64) +#define ffs_zu JEMALLOC_N(ffs_zu) #define get_errno JEMALLOC_N(get_errno) #define hash JEMALLOC_N(hash) #define hash_fmix_32 JEMALLOC_N(hash_fmix_32) @@ -193,46 +280,51 @@ #define hash_x64_128 JEMALLOC_N(hash_x64_128) #define hash_x86_128 JEMALLOC_N(hash_x86_128) #define hash_x86_32 JEMALLOC_N(hash_x86_32) -#define huge_allocated JEMALLOC_N(huge_allocated) -#define huge_boot JEMALLOC_N(huge_boot) +#define huge_aalloc JEMALLOC_N(huge_aalloc) #define huge_dalloc JEMALLOC_N(huge_dalloc) #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk) -#define huge_dss_prec_get JEMALLOC_N(huge_dss_prec_get) #define huge_malloc JEMALLOC_N(huge_malloc) -#define huge_mtx JEMALLOC_N(huge_mtx) -#define huge_ndalloc JEMALLOC_N(huge_ndalloc) -#define huge_nmalloc JEMALLOC_N(huge_nmalloc) #define huge_palloc JEMALLOC_N(huge_palloc) -#define huge_postfork_child JEMALLOC_N(huge_postfork_child) -#define huge_postfork_parent JEMALLOC_N(huge_postfork_parent) -#define huge_prefork JEMALLOC_N(huge_prefork) -#define huge_prof_ctx_get JEMALLOC_N(huge_prof_ctx_get) -#define huge_prof_ctx_set JEMALLOC_N(huge_prof_ctx_set) +#define huge_prof_tctx_get JEMALLOC_N(huge_prof_tctx_get) +#define huge_prof_tctx_reset JEMALLOC_N(huge_prof_tctx_reset) +#define huge_prof_tctx_set JEMALLOC_N(huge_prof_tctx_set) #define huge_ralloc JEMALLOC_N(huge_ralloc) #define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move) #define huge_salloc JEMALLOC_N(huge_salloc) -#define iallocm JEMALLOC_N(iallocm) -#define icalloc JEMALLOC_N(icalloc) -#define icalloct JEMALLOC_N(icalloct) +#define iaalloc JEMALLOC_N(iaalloc) +#define ialloc JEMALLOC_N(ialloc) +#define iallocztm JEMALLOC_N(iallocztm) +#define iarena_cleanup JEMALLOC_N(iarena_cleanup) #define idalloc JEMALLOC_N(idalloc) -#define idalloct JEMALLOC_N(idalloct) -#define imalloc JEMALLOC_N(imalloc) -#define imalloct JEMALLOC_N(imalloct) +#define idalloctm JEMALLOC_N(idalloctm) +#define in_valgrind JEMALLOC_N(in_valgrind) +#define index2size JEMALLOC_N(index2size) +#define index2size_compute JEMALLOC_N(index2size_compute) +#define index2size_lookup JEMALLOC_N(index2size_lookup) +#define index2size_tab JEMALLOC_N(index2size_tab) #define ipalloc JEMALLOC_N(ipalloc) #define ipalloct JEMALLOC_N(ipalloct) +#define ipallocztm JEMALLOC_N(ipallocztm) #define iqalloc JEMALLOC_N(iqalloc) -#define iqalloct JEMALLOC_N(iqalloct) #define iralloc JEMALLOC_N(iralloc) #define iralloct JEMALLOC_N(iralloct) #define iralloct_realign JEMALLOC_N(iralloct_realign) #define isalloc JEMALLOC_N(isalloc) +#define isdalloct JEMALLOC_N(isdalloct) +#define isqalloc JEMALLOC_N(isqalloc) #define isthreaded JEMALLOC_N(isthreaded) #define ivsalloc JEMALLOC_N(ivsalloc) #define ixalloc JEMALLOC_N(ixalloc) #define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child) #define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent) #define jemalloc_prefork JEMALLOC_N(jemalloc_prefork) +#define large_maxclass JEMALLOC_N(large_maxclass) +#define lg_floor JEMALLOC_N(lg_floor) +#define lg_prof_sample JEMALLOC_N(lg_prof_sample) #define malloc_cprintf JEMALLOC_N(malloc_cprintf) +#define malloc_mutex_assert_not_owner JEMALLOC_N(malloc_mutex_assert_not_owner) +#define malloc_mutex_assert_owner JEMALLOC_N(malloc_mutex_assert_owner) +#define malloc_mutex_boot JEMALLOC_N(malloc_mutex_boot) #define malloc_mutex_init JEMALLOC_N(malloc_mutex_init) #define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock) #define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child) @@ -242,7 +334,8 @@ #define malloc_printf JEMALLOC_N(malloc_printf) #define malloc_snprintf JEMALLOC_N(malloc_snprintf) #define malloc_strtoumax JEMALLOC_N(malloc_strtoumax) -#define malloc_tsd_boot JEMALLOC_N(malloc_tsd_boot) +#define malloc_tsd_boot0 JEMALLOC_N(malloc_tsd_boot0) +#define malloc_tsd_boot1 JEMALLOC_N(malloc_tsd_boot1) #define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register) #define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc) #define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc) @@ -251,16 +344,35 @@ #define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf) #define malloc_write JEMALLOC_N(malloc_write) #define map_bias JEMALLOC_N(map_bias) +#define map_misc_offset JEMALLOC_N(map_misc_offset) #define mb_write JEMALLOC_N(mb_write) -#define mutex_boot JEMALLOC_N(mutex_boot) #define narenas_auto JEMALLOC_N(narenas_auto) -#define narenas_total JEMALLOC_N(narenas_total) +#define narenas_tdata_cleanup JEMALLOC_N(narenas_tdata_cleanup) #define narenas_total_get JEMALLOC_N(narenas_total_get) #define ncpus JEMALLOC_N(ncpus) #define nhbins JEMALLOC_N(nhbins) +#define nhclasses JEMALLOC_N(nhclasses) +#define nlclasses JEMALLOC_N(nlclasses) +#define nstime_add JEMALLOC_N(nstime_add) +#define nstime_compare JEMALLOC_N(nstime_compare) +#define nstime_copy JEMALLOC_N(nstime_copy) +#define nstime_divide JEMALLOC_N(nstime_divide) +#define nstime_idivide JEMALLOC_N(nstime_idivide) +#define nstime_imultiply JEMALLOC_N(nstime_imultiply) +#define nstime_init JEMALLOC_N(nstime_init) +#define nstime_init2 JEMALLOC_N(nstime_init2) +#define nstime_monotonic JEMALLOC_N(nstime_monotonic) +#define nstime_ns JEMALLOC_N(nstime_ns) +#define nstime_nsec JEMALLOC_N(nstime_nsec) +#define nstime_sec JEMALLOC_N(nstime_sec) +#define nstime_subtract JEMALLOC_N(nstime_subtract) +#define nstime_update JEMALLOC_N(nstime_update) #define opt_abort JEMALLOC_N(opt_abort) +#define opt_decay_time JEMALLOC_N(opt_decay_time) #define opt_dss JEMALLOC_N(opt_dss) #define opt_junk JEMALLOC_N(opt_junk) +#define opt_junk_alloc JEMALLOC_N(opt_junk_alloc) +#define opt_junk_free JEMALLOC_N(opt_junk_free) #define opt_lg_chunk JEMALLOC_N(opt_lg_chunk) #define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult) #define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval) @@ -274,140 +386,254 @@ #define opt_prof_gdump JEMALLOC_N(opt_prof_gdump) #define opt_prof_leak JEMALLOC_N(opt_prof_leak) #define opt_prof_prefix JEMALLOC_N(opt_prof_prefix) +#define opt_prof_thread_active_init JEMALLOC_N(opt_prof_thread_active_init) +#define opt_purge JEMALLOC_N(opt_purge) #define opt_quarantine JEMALLOC_N(opt_quarantine) #define opt_redzone JEMALLOC_N(opt_redzone) #define opt_stats_print JEMALLOC_N(opt_stats_print) #define opt_tcache JEMALLOC_N(opt_tcache) +#define opt_thp JEMALLOC_N(opt_thp) #define opt_utrace JEMALLOC_N(opt_utrace) -#define opt_valgrind JEMALLOC_N(opt_valgrind) #define opt_xmalloc JEMALLOC_N(opt_xmalloc) #define opt_zero JEMALLOC_N(opt_zero) #define p2rz JEMALLOC_N(p2rz) +#define pages_boot JEMALLOC_N(pages_boot) +#define pages_commit JEMALLOC_N(pages_commit) +#define pages_decommit JEMALLOC_N(pages_decommit) +#define pages_huge JEMALLOC_N(pages_huge) +#define pages_map JEMALLOC_N(pages_map) +#define pages_nohuge JEMALLOC_N(pages_nohuge) #define pages_purge JEMALLOC_N(pages_purge) -#define pow2_ceil JEMALLOC_N(pow2_ceil) +#define pages_trim JEMALLOC_N(pages_trim) +#define pages_unmap JEMALLOC_N(pages_unmap) +#define pind2sz JEMALLOC_N(pind2sz) +#define pind2sz_compute JEMALLOC_N(pind2sz_compute) +#define pind2sz_lookup JEMALLOC_N(pind2sz_lookup) +#define pind2sz_tab JEMALLOC_N(pind2sz_tab) +#define pow2_ceil_u32 JEMALLOC_N(pow2_ceil_u32) +#define pow2_ceil_u64 JEMALLOC_N(pow2_ceil_u64) +#define pow2_ceil_zu JEMALLOC_N(pow2_ceil_zu) +#define prng_lg_range_u32 JEMALLOC_N(prng_lg_range_u32) +#define prng_lg_range_u64 JEMALLOC_N(prng_lg_range_u64) +#define prng_lg_range_zu JEMALLOC_N(prng_lg_range_zu) +#define prng_range_u32 JEMALLOC_N(prng_range_u32) +#define prng_range_u64 JEMALLOC_N(prng_range_u64) +#define prng_range_zu JEMALLOC_N(prng_range_zu) +#define prng_state_next_u32 JEMALLOC_N(prng_state_next_u32) +#define prng_state_next_u64 JEMALLOC_N(prng_state_next_u64) +#define prng_state_next_zu JEMALLOC_N(prng_state_next_zu) +#define prof_active JEMALLOC_N(prof_active) +#define prof_active_get JEMALLOC_N(prof_active_get) +#define prof_active_get_unlocked JEMALLOC_N(prof_active_get_unlocked) +#define prof_active_set JEMALLOC_N(prof_active_set) +#define prof_alloc_prep JEMALLOC_N(prof_alloc_prep) +#define prof_alloc_rollback JEMALLOC_N(prof_alloc_rollback) #define prof_backtrace JEMALLOC_N(prof_backtrace) #define prof_boot0 JEMALLOC_N(prof_boot0) #define prof_boot1 JEMALLOC_N(prof_boot1) #define prof_boot2 JEMALLOC_N(prof_boot2) #define prof_bt_count JEMALLOC_N(prof_bt_count) -#define prof_ctx_get JEMALLOC_N(prof_ctx_get) -#define prof_ctx_set JEMALLOC_N(prof_ctx_set) +#define prof_dump_header JEMALLOC_N(prof_dump_header) #define prof_dump_open JEMALLOC_N(prof_dump_open) #define prof_free JEMALLOC_N(prof_free) +#define prof_free_sampled_object JEMALLOC_N(prof_free_sampled_object) #define prof_gdump JEMALLOC_N(prof_gdump) +#define prof_gdump_get JEMALLOC_N(prof_gdump_get) +#define prof_gdump_get_unlocked JEMALLOC_N(prof_gdump_get_unlocked) +#define prof_gdump_set JEMALLOC_N(prof_gdump_set) +#define prof_gdump_val JEMALLOC_N(prof_gdump_val) #define prof_idump JEMALLOC_N(prof_idump) #define prof_interval JEMALLOC_N(prof_interval) #define prof_lookup JEMALLOC_N(prof_lookup) #define prof_malloc JEMALLOC_N(prof_malloc) +#define prof_malloc_sample_object JEMALLOC_N(prof_malloc_sample_object) #define prof_mdump JEMALLOC_N(prof_mdump) #define prof_postfork_child JEMALLOC_N(prof_postfork_child) #define prof_postfork_parent JEMALLOC_N(prof_postfork_parent) -#define prof_prefork JEMALLOC_N(prof_prefork) -#define prof_promote JEMALLOC_N(prof_promote) +#define prof_prefork0 JEMALLOC_N(prof_prefork0) +#define prof_prefork1 JEMALLOC_N(prof_prefork1) #define prof_realloc JEMALLOC_N(prof_realloc) +#define prof_reset JEMALLOC_N(prof_reset) #define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update) #define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update) -#define prof_tdata_booted JEMALLOC_N(prof_tdata_booted) +#define prof_tctx_get JEMALLOC_N(prof_tctx_get) +#define prof_tctx_reset JEMALLOC_N(prof_tctx_reset) +#define prof_tctx_set JEMALLOC_N(prof_tctx_set) #define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup) +#define prof_tdata_count JEMALLOC_N(prof_tdata_count) #define prof_tdata_get JEMALLOC_N(prof_tdata_get) #define prof_tdata_init JEMALLOC_N(prof_tdata_init) -#define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized) -#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls) -#define prof_tdata_tsd JEMALLOC_N(prof_tdata_tsd) -#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot) -#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper) -#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get) -#define prof_tdata_tsd_get_wrapper JEMALLOC_N(prof_tdata_tsd_get_wrapper) -#define prof_tdata_tsd_init_head JEMALLOC_N(prof_tdata_tsd_init_head) -#define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set) +#define prof_tdata_reinit JEMALLOC_N(prof_tdata_reinit) +#define prof_thread_active_get JEMALLOC_N(prof_thread_active_get) +#define prof_thread_active_init_get JEMALLOC_N(prof_thread_active_init_get) +#define prof_thread_active_init_set JEMALLOC_N(prof_thread_active_init_set) +#define prof_thread_active_set JEMALLOC_N(prof_thread_active_set) +#define prof_thread_name_get JEMALLOC_N(prof_thread_name_get) +#define prof_thread_name_set JEMALLOC_N(prof_thread_name_set) +#define psz2ind JEMALLOC_N(psz2ind) +#define psz2u JEMALLOC_N(psz2u) +#define purge_mode_names JEMALLOC_N(purge_mode_names) #define quarantine JEMALLOC_N(quarantine) #define quarantine_alloc_hook JEMALLOC_N(quarantine_alloc_hook) -#define quarantine_boot JEMALLOC_N(quarantine_boot) -#define quarantine_booted JEMALLOC_N(quarantine_booted) +#define quarantine_alloc_hook_work JEMALLOC_N(quarantine_alloc_hook_work) #define quarantine_cleanup JEMALLOC_N(quarantine_cleanup) -#define quarantine_init JEMALLOC_N(quarantine_init) -#define quarantine_tls JEMALLOC_N(quarantine_tls) -#define quarantine_tsd JEMALLOC_N(quarantine_tsd) -#define quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot) -#define quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper) -#define quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get) -#define quarantine_tsd_get_wrapper JEMALLOC_N(quarantine_tsd_get_wrapper) -#define quarantine_tsd_init_head JEMALLOC_N(quarantine_tsd_init_head) -#define quarantine_tsd_set JEMALLOC_N(quarantine_tsd_set) -#define register_zone JEMALLOC_N(register_zone) +#define rtree_child_read JEMALLOC_N(rtree_child_read) +#define rtree_child_read_hard JEMALLOC_N(rtree_child_read_hard) +#define rtree_child_tryread JEMALLOC_N(rtree_child_tryread) #define rtree_delete JEMALLOC_N(rtree_delete) #define rtree_get JEMALLOC_N(rtree_get) -#define rtree_get_locked JEMALLOC_N(rtree_get_locked) #define rtree_new JEMALLOC_N(rtree_new) -#define rtree_postfork_child JEMALLOC_N(rtree_postfork_child) -#define rtree_postfork_parent JEMALLOC_N(rtree_postfork_parent) -#define rtree_prefork JEMALLOC_N(rtree_prefork) +#define rtree_node_valid JEMALLOC_N(rtree_node_valid) #define rtree_set JEMALLOC_N(rtree_set) +#define rtree_start_level JEMALLOC_N(rtree_start_level) +#define rtree_subkey JEMALLOC_N(rtree_subkey) +#define rtree_subtree_read JEMALLOC_N(rtree_subtree_read) +#define rtree_subtree_read_hard JEMALLOC_N(rtree_subtree_read_hard) +#define rtree_subtree_tryread JEMALLOC_N(rtree_subtree_tryread) +#define rtree_val_read JEMALLOC_N(rtree_val_read) +#define rtree_val_write JEMALLOC_N(rtree_val_write) +#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil) +#define run_quantize_floor JEMALLOC_N(run_quantize_floor) #define s2u JEMALLOC_N(s2u) +#define s2u_compute JEMALLOC_N(s2u_compute) +#define s2u_lookup JEMALLOC_N(s2u_lookup) #define sa2u JEMALLOC_N(sa2u) #define set_errno JEMALLOC_N(set_errno) -#define small_size2bin JEMALLOC_N(small_size2bin) +#define size2index JEMALLOC_N(size2index) +#define size2index_compute JEMALLOC_N(size2index_compute) +#define size2index_lookup JEMALLOC_N(size2index_lookup) +#define size2index_tab JEMALLOC_N(size2index_tab) +#define spin_adaptive JEMALLOC_N(spin_adaptive) +#define spin_init JEMALLOC_N(spin_init) #define stats_cactive JEMALLOC_N(stats_cactive) #define stats_cactive_add JEMALLOC_N(stats_cactive_add) #define stats_cactive_get JEMALLOC_N(stats_cactive_get) #define stats_cactive_sub JEMALLOC_N(stats_cactive_sub) -#define stats_chunks JEMALLOC_N(stats_chunks) #define stats_print JEMALLOC_N(stats_print) #define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy) #define tcache_alloc_large JEMALLOC_N(tcache_alloc_large) #define tcache_alloc_small JEMALLOC_N(tcache_alloc_small) #define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard) -#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate) -#define tcache_arena_dissociate JEMALLOC_N(tcache_arena_dissociate) +#define tcache_arena_reassociate JEMALLOC_N(tcache_arena_reassociate) #define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large) #define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small) #define tcache_bin_info JEMALLOC_N(tcache_bin_info) -#define tcache_boot0 JEMALLOC_N(tcache_boot0) -#define tcache_boot1 JEMALLOC_N(tcache_boot1) -#define tcache_booted JEMALLOC_N(tcache_booted) +#define tcache_boot JEMALLOC_N(tcache_boot) +#define tcache_cleanup JEMALLOC_N(tcache_cleanup) #define tcache_create JEMALLOC_N(tcache_create) #define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large) #define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small) -#define tcache_destroy JEMALLOC_N(tcache_destroy) -#define tcache_enabled_booted JEMALLOC_N(tcache_enabled_booted) +#define tcache_enabled_cleanup JEMALLOC_N(tcache_enabled_cleanup) #define tcache_enabled_get JEMALLOC_N(tcache_enabled_get) -#define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized) #define tcache_enabled_set JEMALLOC_N(tcache_enabled_set) -#define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls) -#define tcache_enabled_tsd JEMALLOC_N(tcache_enabled_tsd) -#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot) -#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper) -#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get) -#define tcache_enabled_tsd_get_wrapper JEMALLOC_N(tcache_enabled_tsd_get_wrapper) -#define tcache_enabled_tsd_init_head JEMALLOC_N(tcache_enabled_tsd_init_head) -#define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set) #define tcache_event JEMALLOC_N(tcache_event) #define tcache_event_hard JEMALLOC_N(tcache_event_hard) #define tcache_flush JEMALLOC_N(tcache_flush) #define tcache_get JEMALLOC_N(tcache_get) -#define tcache_initialized JEMALLOC_N(tcache_initialized) +#define tcache_get_hard JEMALLOC_N(tcache_get_hard) #define tcache_maxclass JEMALLOC_N(tcache_maxclass) +#define tcache_postfork_child JEMALLOC_N(tcache_postfork_child) +#define tcache_postfork_parent JEMALLOC_N(tcache_postfork_parent) +#define tcache_prefork JEMALLOC_N(tcache_prefork) #define tcache_salloc JEMALLOC_N(tcache_salloc) #define tcache_stats_merge JEMALLOC_N(tcache_stats_merge) -#define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup) -#define tcache_tls JEMALLOC_N(tcache_tls) -#define tcache_tsd JEMALLOC_N(tcache_tsd) -#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot) -#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper) -#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get) -#define tcache_tsd_get_wrapper JEMALLOC_N(tcache_tsd_get_wrapper) -#define tcache_tsd_init_head JEMALLOC_N(tcache_tsd_init_head) -#define tcache_tsd_set JEMALLOC_N(tcache_tsd_set) -#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted) -#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized) -#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls) -#define thread_allocated_tsd JEMALLOC_N(thread_allocated_tsd) -#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot) -#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper) -#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get) -#define thread_allocated_tsd_get_wrapper JEMALLOC_N(thread_allocated_tsd_get_wrapper) -#define thread_allocated_tsd_init_head JEMALLOC_N(thread_allocated_tsd_init_head) -#define thread_allocated_tsd_set JEMALLOC_N(thread_allocated_tsd_set) +#define tcaches JEMALLOC_N(tcaches) +#define tcaches_create JEMALLOC_N(tcaches_create) +#define tcaches_destroy JEMALLOC_N(tcaches_destroy) +#define tcaches_flush JEMALLOC_N(tcaches_flush) +#define tcaches_get JEMALLOC_N(tcaches_get) +#define thread_allocated_cleanup JEMALLOC_N(thread_allocated_cleanup) +#define thread_deallocated_cleanup JEMALLOC_N(thread_deallocated_cleanup) +#define ticker_copy JEMALLOC_N(ticker_copy) +#define ticker_init JEMALLOC_N(ticker_init) +#define ticker_read JEMALLOC_N(ticker_read) +#define ticker_tick JEMALLOC_N(ticker_tick) +#define ticker_ticks JEMALLOC_N(ticker_ticks) +#define tsd_arena_get JEMALLOC_N(tsd_arena_get) +#define tsd_arena_set JEMALLOC_N(tsd_arena_set) +#define tsd_arenap_get JEMALLOC_N(tsd_arenap_get) +#define tsd_arenas_tdata_bypass_get JEMALLOC_N(tsd_arenas_tdata_bypass_get) +#define tsd_arenas_tdata_bypass_set JEMALLOC_N(tsd_arenas_tdata_bypass_set) +#define tsd_arenas_tdata_bypassp_get JEMALLOC_N(tsd_arenas_tdata_bypassp_get) +#define tsd_arenas_tdata_get JEMALLOC_N(tsd_arenas_tdata_get) +#define tsd_arenas_tdata_set JEMALLOC_N(tsd_arenas_tdata_set) +#define tsd_arenas_tdatap_get JEMALLOC_N(tsd_arenas_tdatap_get) +#define tsd_boot JEMALLOC_N(tsd_boot) +#define tsd_boot0 JEMALLOC_N(tsd_boot0) +#define tsd_boot1 JEMALLOC_N(tsd_boot1) +#define tsd_booted JEMALLOC_N(tsd_booted) +#define tsd_booted_get JEMALLOC_N(tsd_booted_get) +#define tsd_cleanup JEMALLOC_N(tsd_cleanup) +#define tsd_cleanup_wrapper JEMALLOC_N(tsd_cleanup_wrapper) +#define tsd_fetch JEMALLOC_N(tsd_fetch) +#define tsd_fetch_impl JEMALLOC_N(tsd_fetch_impl) +#define tsd_get JEMALLOC_N(tsd_get) +#define tsd_get_allocates JEMALLOC_N(tsd_get_allocates) +#define tsd_iarena_get JEMALLOC_N(tsd_iarena_get) +#define tsd_iarena_set JEMALLOC_N(tsd_iarena_set) +#define tsd_iarenap_get JEMALLOC_N(tsd_iarenap_get) +#define tsd_initialized JEMALLOC_N(tsd_initialized) #define tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion) #define tsd_init_finish JEMALLOC_N(tsd_init_finish) +#define tsd_init_head JEMALLOC_N(tsd_init_head) +#define tsd_narenas_tdata_get JEMALLOC_N(tsd_narenas_tdata_get) +#define tsd_narenas_tdata_set JEMALLOC_N(tsd_narenas_tdata_set) +#define tsd_narenas_tdatap_get JEMALLOC_N(tsd_narenas_tdatap_get) +#define tsd_wrapper_get JEMALLOC_N(tsd_wrapper_get) +#define tsd_wrapper_set JEMALLOC_N(tsd_wrapper_set) +#define tsd_nominal JEMALLOC_N(tsd_nominal) +#define tsd_prof_tdata_get JEMALLOC_N(tsd_prof_tdata_get) +#define tsd_prof_tdata_set JEMALLOC_N(tsd_prof_tdata_set) +#define tsd_prof_tdatap_get JEMALLOC_N(tsd_prof_tdatap_get) +#define tsd_quarantine_get JEMALLOC_N(tsd_quarantine_get) +#define tsd_quarantine_set JEMALLOC_N(tsd_quarantine_set) +#define tsd_quarantinep_get JEMALLOC_N(tsd_quarantinep_get) +#define tsd_set JEMALLOC_N(tsd_set) +#define tsd_tcache_enabled_get JEMALLOC_N(tsd_tcache_enabled_get) +#define tsd_tcache_enabled_set JEMALLOC_N(tsd_tcache_enabled_set) +#define tsd_tcache_enabledp_get JEMALLOC_N(tsd_tcache_enabledp_get) +#define tsd_tcache_get JEMALLOC_N(tsd_tcache_get) +#define tsd_tcache_set JEMALLOC_N(tsd_tcache_set) +#define tsd_tcachep_get JEMALLOC_N(tsd_tcachep_get) +#define tsd_thread_allocated_get JEMALLOC_N(tsd_thread_allocated_get) +#define tsd_thread_allocated_set JEMALLOC_N(tsd_thread_allocated_set) +#define tsd_thread_allocatedp_get JEMALLOC_N(tsd_thread_allocatedp_get) +#define tsd_thread_deallocated_get JEMALLOC_N(tsd_thread_deallocated_get) +#define tsd_thread_deallocated_set JEMALLOC_N(tsd_thread_deallocated_set) +#define tsd_thread_deallocatedp_get JEMALLOC_N(tsd_thread_deallocatedp_get) +#define tsd_tls JEMALLOC_N(tsd_tls) +#define tsd_tsd JEMALLOC_N(tsd_tsd) +#define tsd_tsdn JEMALLOC_N(tsd_tsdn) +#define tsd_witness_fork_get JEMALLOC_N(tsd_witness_fork_get) +#define tsd_witness_fork_set JEMALLOC_N(tsd_witness_fork_set) +#define tsd_witness_forkp_get JEMALLOC_N(tsd_witness_forkp_get) +#define tsd_witnesses_get JEMALLOC_N(tsd_witnesses_get) +#define tsd_witnesses_set JEMALLOC_N(tsd_witnesses_set) +#define tsd_witnessesp_get JEMALLOC_N(tsd_witnessesp_get) +#define tsdn_fetch JEMALLOC_N(tsdn_fetch) +#define tsdn_null JEMALLOC_N(tsdn_null) +#define tsdn_tsd JEMALLOC_N(tsdn_tsd) #define u2rz JEMALLOC_N(u2rz) +#define valgrind_freelike_block JEMALLOC_N(valgrind_freelike_block) +#define valgrind_make_mem_defined JEMALLOC_N(valgrind_make_mem_defined) +#define valgrind_make_mem_noaccess JEMALLOC_N(valgrind_make_mem_noaccess) +#define valgrind_make_mem_undefined JEMALLOC_N(valgrind_make_mem_undefined) +#define witness_assert_depth JEMALLOC_N(witness_assert_depth) +#define witness_assert_depth_to_rank JEMALLOC_N(witness_assert_depth_to_rank) +#define witness_assert_lockless JEMALLOC_N(witness_assert_lockless) +#define witness_assert_not_owner JEMALLOC_N(witness_assert_not_owner) +#define witness_assert_owner JEMALLOC_N(witness_assert_owner) +#define witness_depth_error JEMALLOC_N(witness_depth_error) +#define witness_fork_cleanup JEMALLOC_N(witness_fork_cleanup) +#define witness_init JEMALLOC_N(witness_init) +#define witness_lock JEMALLOC_N(witness_lock) +#define witness_lock_error JEMALLOC_N(witness_lock_error) +#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error) +#define witness_owner JEMALLOC_N(witness_owner) +#define witness_owner_error JEMALLOC_N(witness_owner_error) +#define witness_postfork_child JEMALLOC_N(witness_postfork_child) +#define witness_postfork_parent JEMALLOC_N(witness_postfork_parent) +#define witness_prefork JEMALLOC_N(witness_prefork) +#define witness_unlock JEMALLOC_N(witness_unlock) +#define witnesses_cleanup JEMALLOC_N(witnesses_cleanup) +#define zone_register JEMALLOC_N(zone_register) diff --git a/deps/jemalloc/include/jemalloc/internal/prng.h b/deps/jemalloc/include/jemalloc/internal/prng.h index 7b2b06512f..15cc2d18fa 100644 --- a/deps/jemalloc/include/jemalloc/internal/prng.h +++ b/deps/jemalloc/include/jemalloc/internal/prng.h @@ -1,5 +1,8 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES +#ifndef JEMALLOC_INTERNAL_PRNG_H +#define JEMALLOC_INTERNAL_PRNG_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/bit_util.h" /* * Simple linear congruential pseudo-random number generator: @@ -15,46 +18,168 @@ * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. * * This choice of m has the disadvantage that the quality of the bits is - * proportional to bit position. For example. the lowest bit has a cycle of 2, + * proportional to bit position. For example, the lowest bit has a cycle of 2, * the next has a cycle of 4, etc. For this reason, we prefer to use the upper * bits. - * - * Macro parameters: - * uint32_t r : Result. - * unsigned lg_range : (0..32], number of least significant bits to return. - * uint32_t state : Seed value. - * const uint32_t a, c : See above discussion. */ -#define prng32(r, lg_range, state, a, c) do { \ - assert(lg_range > 0); \ - assert(lg_range <= 32); \ - \ - r = (state * (a)) + (c); \ - state = r; \ - r >>= (32 - lg_range); \ -} while (false) - -/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */ -#define prng64(r, lg_range, state, a, c) do { \ - assert(lg_range > 0); \ - assert(lg_range <= 64); \ - \ - r = (state * (a)) + (c); \ - state = r; \ - r >>= (64 - lg_range); \ -} while (false) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS -#endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#endif /* JEMALLOC_H_EXTERNS */ +/* INTERNAL DEFINITIONS -- IGNORE */ /******************************************************************************/ -#ifdef JEMALLOC_H_INLINES +#define PRNG_A_32 UINT32_C(1103515241) +#define PRNG_C_32 UINT32_C(12347) + +#define PRNG_A_64 UINT64_C(6364136223846793005) +#define PRNG_C_64 UINT64_C(1442695040888963407) + +JEMALLOC_ALWAYS_INLINE uint32_t +prng_state_next_u32(uint32_t state) { + return (state * PRNG_A_32) + PRNG_C_32; +} + +JEMALLOC_ALWAYS_INLINE uint64_t +prng_state_next_u64(uint64_t state) { + return (state * PRNG_A_64) + PRNG_C_64; +} + +JEMALLOC_ALWAYS_INLINE size_t +prng_state_next_zu(size_t state) { +#if LG_SIZEOF_PTR == 2 + return (state * PRNG_A_32) + PRNG_C_32; +#elif LG_SIZEOF_PTR == 3 + return (state * PRNG_A_64) + PRNG_C_64; +#else +#error Unsupported pointer size +#endif +} -#endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ +/* BEGIN PUBLIC API */ +/******************************************************************************/ + +/* + * The prng_lg_range functions give a uniform int in the half-open range [0, + * 2**lg_range). If atomic is true, they do so safely from multiple threads. + * Multithreaded 64-bit prngs aren't supported. + */ + +JEMALLOC_ALWAYS_INLINE uint32_t +prng_lg_range_u32(atomic_u32_t *state, unsigned lg_range, bool atomic) { + uint32_t ret, state0, state1; + + assert(lg_range > 0); + assert(lg_range <= 32); + + state0 = atomic_load_u32(state, ATOMIC_RELAXED); + + if (atomic) { + do { + state1 = prng_state_next_u32(state0); + } while (!atomic_compare_exchange_weak_u32(state, &state0, + state1, ATOMIC_RELAXED, ATOMIC_RELAXED)); + } else { + state1 = prng_state_next_u32(state0); + atomic_store_u32(state, state1, ATOMIC_RELAXED); + } + ret = state1 >> (32 - lg_range); + + return ret; +} + +JEMALLOC_ALWAYS_INLINE uint64_t +prng_lg_range_u64(uint64_t *state, unsigned lg_range) { + uint64_t ret, state1; + + assert(lg_range > 0); + assert(lg_range <= 64); + + state1 = prng_state_next_u64(*state); + *state = state1; + ret = state1 >> (64 - lg_range); + + return ret; +} + +JEMALLOC_ALWAYS_INLINE size_t +prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) { + size_t ret, state0, state1; + + assert(lg_range > 0); + assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR)); + + state0 = atomic_load_zu(state, ATOMIC_RELAXED); + + if (atomic) { + do { + state1 = prng_state_next_zu(state0); + } while (atomic_compare_exchange_weak_zu(state, &state0, + state1, ATOMIC_RELAXED, ATOMIC_RELAXED)); + } else { + state1 = prng_state_next_zu(state0); + atomic_store_zu(state, state1, ATOMIC_RELAXED); + } + ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range); + + return ret; +} + +/* + * The prng_range functions behave like the prng_lg_range, but return a result + * in [0, range) instead of [0, 2**lg_range). + */ + +JEMALLOC_ALWAYS_INLINE uint32_t +prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) { + uint32_t ret; + unsigned lg_range; + + assert(range > 1); + + /* Compute the ceiling of lg(range). */ + lg_range = ffs_u32(pow2_ceil_u32(range)) - 1; + + /* Generate a result in [0..range) via repeated trial. */ + do { + ret = prng_lg_range_u32(state, lg_range, atomic); + } while (ret >= range); + + return ret; +} + +JEMALLOC_ALWAYS_INLINE uint64_t +prng_range_u64(uint64_t *state, uint64_t range) { + uint64_t ret; + unsigned lg_range; + + assert(range > 1); + + /* Compute the ceiling of lg(range). */ + lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; + + /* Generate a result in [0..range) via repeated trial. */ + do { + ret = prng_lg_range_u64(state, lg_range); + } while (ret >= range); + + return ret; +} + +JEMALLOC_ALWAYS_INLINE size_t +prng_range_zu(atomic_zu_t *state, size_t range, bool atomic) { + size_t ret; + unsigned lg_range; + + assert(range > 1); + + /* Compute the ceiling of lg(range). */ + lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; + + /* Generate a result in [0..range) via repeated trial. */ + do { + ret = prng_lg_range_zu(state, lg_range, atomic); + } while (ret >= range); + + return ret; +} + +#endif /* JEMALLOC_INTERNAL_PRNG_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/ql.h b/deps/jemalloc/include/jemalloc/internal/ql.h index f70c5f6f39..8029040771 100644 --- a/deps/jemalloc/include/jemalloc/internal/ql.h +++ b/deps/jemalloc/include/jemalloc/internal/ql.h @@ -1,61 +1,64 @@ -/* - * List definitions. - */ -#define ql_head(a_type) \ +#ifndef JEMALLOC_INTERNAL_QL_H +#define JEMALLOC_INTERNAL_QL_H + +#include "jemalloc/internal/qr.h" + +/* List definitions. */ +#define ql_head(a_type) \ struct { \ a_type *qlh_first; \ } -#define ql_head_initializer(a_head) {NULL} +#define ql_head_initializer(a_head) {NULL} -#define ql_elm(a_type) qr(a_type) +#define ql_elm(a_type) qr(a_type) /* List functions. */ -#define ql_new(a_head) do { \ +#define ql_new(a_head) do { \ (a_head)->qlh_first = NULL; \ } while (0) -#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) +#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) -#define ql_first(a_head) ((a_head)->qlh_first) +#define ql_first(a_head) ((a_head)->qlh_first) -#define ql_last(a_head, a_field) \ +#define ql_last(a_head, a_field) \ ((ql_first(a_head) != NULL) \ ? qr_prev(ql_first(a_head), a_field) : NULL) -#define ql_next(a_head, a_elm, a_field) \ +#define ql_next(a_head, a_elm, a_field) \ ((ql_last(a_head, a_field) != (a_elm)) \ ? qr_next((a_elm), a_field) : NULL) -#define ql_prev(a_head, a_elm, a_field) \ +#define ql_prev(a_head, a_elm, a_field) \ ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ : NULL) -#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ +#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ qr_before_insert((a_qlelm), (a_elm), a_field); \ if (ql_first(a_head) == (a_qlelm)) { \ ql_first(a_head) = (a_elm); \ } \ } while (0) -#define ql_after_insert(a_qlelm, a_elm, a_field) \ +#define ql_after_insert(a_qlelm, a_elm, a_field) \ qr_after_insert((a_qlelm), (a_elm), a_field) -#define ql_head_insert(a_head, a_elm, a_field) do { \ +#define ql_head_insert(a_head, a_elm, a_field) do { \ if (ql_first(a_head) != NULL) { \ qr_before_insert(ql_first(a_head), (a_elm), a_field); \ } \ ql_first(a_head) = (a_elm); \ } while (0) -#define ql_tail_insert(a_head, a_elm, a_field) do { \ +#define ql_tail_insert(a_head, a_elm, a_field) do { \ if (ql_first(a_head) != NULL) { \ qr_before_insert(ql_first(a_head), (a_elm), a_field); \ } \ ql_first(a_head) = qr_next((a_elm), a_field); \ } while (0) -#define ql_remove(a_head, a_elm, a_field) do { \ +#define ql_remove(a_head, a_elm, a_field) do { \ if (ql_first(a_head) == (a_elm)) { \ ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ } \ @@ -66,18 +69,20 @@ struct { \ } \ } while (0) -#define ql_head_remove(a_head, a_type, a_field) do { \ +#define ql_head_remove(a_head, a_type, a_field) do { \ a_type *t = ql_first(a_head); \ ql_remove((a_head), t, a_field); \ } while (0) -#define ql_tail_remove(a_head, a_type, a_field) do { \ +#define ql_tail_remove(a_head, a_type, a_field) do { \ a_type *t = ql_last(a_head, a_field); \ ql_remove((a_head), t, a_field); \ } while (0) -#define ql_foreach(a_var, a_head, a_field) \ +#define ql_foreach(a_var, a_head, a_field) \ qr_foreach((a_var), ql_first(a_head), a_field) -#define ql_reverse_foreach(a_var, a_head, a_field) \ +#define ql_reverse_foreach(a_var, a_head, a_field) \ qr_reverse_foreach((a_var), ql_first(a_head), a_field) + +#endif /* JEMALLOC_INTERNAL_QL_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/qr.h b/deps/jemalloc/include/jemalloc/internal/qr.h index 602944b9b4..1e1056b386 100644 --- a/deps/jemalloc/include/jemalloc/internal/qr.h +++ b/deps/jemalloc/include/jemalloc/internal/qr.h @@ -1,38 +1,39 @@ +#ifndef JEMALLOC_INTERNAL_QR_H +#define JEMALLOC_INTERNAL_QR_H + /* Ring definitions. */ -#define qr(a_type) \ +#define qr(a_type) \ struct { \ a_type *qre_next; \ a_type *qre_prev; \ } /* Ring functions. */ -#define qr_new(a_qr, a_field) do { \ +#define qr_new(a_qr, a_field) do { \ (a_qr)->a_field.qre_next = (a_qr); \ (a_qr)->a_field.qre_prev = (a_qr); \ } while (0) -#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) +#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) -#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) +#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) -#define qr_before_insert(a_qrelm, a_qr, a_field) do { \ +#define qr_before_insert(a_qrelm, a_qr, a_field) do { \ (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ (a_qr)->a_field.qre_next = (a_qrelm); \ (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ (a_qrelm)->a_field.qre_prev = (a_qr); \ } while (0) -#define qr_after_insert(a_qrelm, a_qr, a_field) \ - do \ - { \ +#define qr_after_insert(a_qrelm, a_qr, a_field) do { \ (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ (a_qr)->a_field.qre_prev = (a_qrelm); \ (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \ (a_qrelm)->a_field.qre_next = (a_qr); \ - } while (0) +} while (0) -#define qr_meld(a_qr_a, a_qr_b, a_field) do { \ - void *t; \ +#define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \ + a_type *t; \ (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ t = (a_qr_a)->a_field.qre_prev; \ @@ -40,12 +41,14 @@ struct { \ (a_qr_b)->a_field.qre_prev = t; \ } while (0) -/* qr_meld() and qr_split() are functionally equivalent, so there's no need to - * have two copies of the code. */ -#define qr_split(a_qr_a, a_qr_b, a_field) \ - qr_meld((a_qr_a), (a_qr_b), a_field) +/* + * qr_meld() and qr_split() are functionally equivalent, so there's no need to + * have two copies of the code. + */ +#define qr_split(a_qr_a, a_qr_b, a_type, a_field) \ + qr_meld((a_qr_a), (a_qr_b), a_type, a_field) -#define qr_remove(a_qr, a_field) do { \ +#define qr_remove(a_qr, a_field) do { \ (a_qr)->a_field.qre_prev->a_field.qre_next \ = (a_qr)->a_field.qre_next; \ (a_qr)->a_field.qre_next->a_field.qre_prev \ @@ -54,14 +57,16 @@ struct { \ (a_qr)->a_field.qre_prev = (a_qr); \ } while (0) -#define qr_foreach(var, a_qr, a_field) \ +#define qr_foreach(var, a_qr, a_field) \ for ((var) = (a_qr); \ (var) != NULL; \ (var) = (((var)->a_field.qre_next != (a_qr)) \ ? (var)->a_field.qre_next : NULL)) -#define qr_reverse_foreach(var, a_qr, a_field) \ +#define qr_reverse_foreach(var, a_qr, a_field) \ for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ (var) != NULL; \ (var) = (((var) != (a_qr)) \ ? (var)->a_field.qre_prev : NULL)) + +#endif /* JEMALLOC_INTERNAL_QR_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/rb.h b/deps/jemalloc/include/jemalloc/internal/rb.h index 423802eb2d..47fa5ca99b 100644 --- a/deps/jemalloc/include/jemalloc/internal/rb.h +++ b/deps/jemalloc/include/jemalloc/internal/rb.h @@ -20,17 +20,21 @@ */ #ifndef RB_H_ -#define RB_H_ +#define RB_H_ + +#ifndef __PGI +#define RB_COMPACT +#endif #ifdef RB_COMPACT /* Node structure. */ -#define rb_node(a_type) \ +#define rb_node(a_type) \ struct { \ a_type *rbn_left; \ a_type *rbn_right_red; \ } #else -#define rb_node(a_type) \ +#define rb_node(a_type) \ struct { \ a_type *rbn_left; \ a_type *rbn_right; \ @@ -39,111 +43,116 @@ struct { \ #endif /* Root structure. */ -#define rb_tree(a_type) \ +#define rb_tree(a_type) \ struct { \ a_type *rbt_root; \ - a_type rbt_nil; \ } /* Left accessors. */ -#define rbtn_left_get(a_type, a_field, a_node) \ +#define rbtn_left_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_left) -#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \ +#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \ (a_node)->a_field.rbn_left = a_left; \ } while (0) #ifdef RB_COMPACT /* Right accessors. */ -#define rbtn_right_get(a_type, a_field, a_node) \ +#define rbtn_right_get(a_type, a_field, a_node) \ ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \ & ((ssize_t)-2))) -#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ +#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \ | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \ } while (0) /* Color accessors. */ -#define rbtn_red_get(a_type, a_field, a_node) \ +#define rbtn_red_get(a_type, a_field, a_node) \ ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \ & ((size_t)1))) -#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ +#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \ | ((ssize_t)a_red)); \ } while (0) -#define rbtn_red_set(a_type, a_field, a_node) do { \ +#define rbtn_red_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \ (a_node)->a_field.rbn_right_red) | ((size_t)1)); \ } while (0) -#define rbtn_black_set(a_type, a_field, a_node) do { \ +#define rbtn_black_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \ } while (0) + +/* Node initializer. */ +#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ + /* Bookkeeping bit cannot be used by node pointer. */ \ + assert(((uintptr_t)(a_node) & 0x1) == 0); \ + rbtn_left_set(a_type, a_field, (a_node), NULL); \ + rbtn_right_set(a_type, a_field, (a_node), NULL); \ + rbtn_red_set(a_type, a_field, (a_node)); \ +} while (0) #else /* Right accessors. */ -#define rbtn_right_get(a_type, a_field, a_node) \ +#define rbtn_right_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_right) -#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ +#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ (a_node)->a_field.rbn_right = a_right; \ } while (0) /* Color accessors. */ -#define rbtn_red_get(a_type, a_field, a_node) \ +#define rbtn_red_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_red) -#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ +#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ (a_node)->a_field.rbn_red = (a_red); \ } while (0) -#define rbtn_red_set(a_type, a_field, a_node) do { \ +#define rbtn_red_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_red = true; \ } while (0) -#define rbtn_black_set(a_type, a_field, a_node) do { \ +#define rbtn_black_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_red = false; \ } while (0) -#endif /* Node initializer. */ -#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ - rbtn_left_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \ - rbtn_right_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \ +#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ + rbtn_left_set(a_type, a_field, (a_node), NULL); \ + rbtn_right_set(a_type, a_field, (a_node), NULL); \ rbtn_red_set(a_type, a_field, (a_node)); \ } while (0) +#endif /* Tree initializer. */ -#define rb_new(a_type, a_field, a_rbt) do { \ - (a_rbt)->rbt_root = &(a_rbt)->rbt_nil; \ - rbt_node_new(a_type, a_field, a_rbt, &(a_rbt)->rbt_nil); \ - rbtn_black_set(a_type, a_field, &(a_rbt)->rbt_nil); \ +#define rb_new(a_type, a_field, a_rbt) do { \ + (a_rbt)->rbt_root = NULL; \ } while (0) /* Internal utility macros. */ -#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ +#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ (r_node) = (a_root); \ - if ((r_node) != &(a_rbt)->rbt_nil) { \ + if ((r_node) != NULL) { \ for (; \ - rbtn_left_get(a_type, a_field, (r_node)) != &(a_rbt)->rbt_nil;\ + rbtn_left_get(a_type, a_field, (r_node)) != NULL; \ (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \ } \ } \ } while (0) -#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ +#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ (r_node) = (a_root); \ - if ((r_node) != &(a_rbt)->rbt_nil) { \ - for (; rbtn_right_get(a_type, a_field, (r_node)) != \ - &(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \ - (r_node))) { \ + if ((r_node) != NULL) { \ + for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \ + (r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \ } \ } \ } while (0) -#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \ +#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \ (r_node) = rbtn_right_get(a_type, a_field, (a_node)); \ rbtn_right_set(a_type, a_field, (a_node), \ rbtn_left_get(a_type, a_field, (r_node))); \ rbtn_left_set(a_type, a_field, (r_node), (a_node)); \ } while (0) -#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \ +#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \ (r_node) = rbtn_left_get(a_type, a_field, (a_node)); \ rbtn_left_set(a_type, a_field, (a_node), \ rbtn_right_get(a_type, a_field, (r_node))); \ @@ -155,9 +164,11 @@ struct { \ * functions generated by an equivalently parameterized call to rb_gen(). */ -#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ +#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ a_attr void \ a_prefix##new(a_rbt_type *rbtree); \ +a_attr bool \ +a_prefix##empty(a_rbt_type *rbtree); \ a_attr a_type * \ a_prefix##first(a_rbt_type *rbtree); \ a_attr a_type * \ @@ -167,11 +178,11 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node); \ a_attr a_type * \ a_prefix##prev(a_rbt_type *rbtree, a_type *node); \ a_attr a_type * \ -a_prefix##search(a_rbt_type *rbtree, a_type *key); \ +a_prefix##search(a_rbt_type *rbtree, const a_type *key); \ a_attr a_type * \ -a_prefix##nsearch(a_rbt_type *rbtree, a_type *key); \ +a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \ a_attr a_type * \ -a_prefix##psearch(a_rbt_type *rbtree, a_type *key); \ +a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \ a_attr void \ a_prefix##insert(a_rbt_type *rbtree, a_type *node); \ a_attr void \ @@ -181,7 +192,10 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ a_rbt_type *, a_type *, void *), void *arg); \ a_attr a_type * \ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \ +a_attr void \ +a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ + void *arg); /* * The rb_gen() macro generates a type-specific red-black tree implementation, @@ -198,7 +212,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ * int (a_cmp *)(a_type *a_node, a_type *a_other); * ^^^^^^ * or a_key - * Interpretation of comparision function return values: + * Interpretation of comparison function return values: * -1 : a_node < a_other * 0 : a_node == a_other * 1 : a_node > a_other @@ -224,6 +238,13 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ * Args: * tree: Pointer to an uninitialized red-black tree object. * + * static bool + * ex_empty(ex_t *tree); + * Description: Determine whether tree is empty. + * Args: + * tree: Pointer to an initialized red-black tree object. + * Ret: True if tree is empty, false otherwise. + * * static ex_node_t * * ex_first(ex_t *tree); * static ex_node_t * @@ -245,7 +266,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ * last/first. * * static ex_node_t * - * ex_search(ex_t *tree, ex_node_t *key); + * ex_search(ex_t *tree, const ex_node_t *key); * Description: Search for node that matches key. * Args: * tree: Pointer to an initialized red-black tree object. @@ -253,9 +274,9 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ * Ret: Node in tree that matches key, or NULL if no match. * * static ex_node_t * - * ex_nsearch(ex_t *tree, ex_node_t *key); + * ex_nsearch(ex_t *tree, const ex_node_t *key); * static ex_node_t * - * ex_psearch(ex_t *tree, ex_node_t *key); + * ex_psearch(ex_t *tree, const ex_node_t *key); * Description: Search for node that matches key. If no match is found, * return what would be key's successor/predecessor, were * key in tree. @@ -303,40 +324,52 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ * arg : Opaque pointer passed to cb(). * Ret: NULL if iteration completed, or the non-NULL callback return value * that caused termination of the iteration. + * + * static void + * ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg); + * Description: Iterate over the tree with post-order traversal, remove + * each node, and run the callback if non-null. This is + * used for destroying a tree without paying the cost to + * rebalance it. The tree must not be otherwise altered + * during traversal. + * Args: + * tree: Pointer to an initialized red-black tree object. + * cb : Callback function, which, if non-null, is called for each node + * during iteration. There is no way to stop iteration once it + * has begun. + * arg : Opaque pointer passed to cb(). */ -#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ +#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ a_attr void \ a_prefix##new(a_rbt_type *rbtree) { \ rb_new(a_type, a_field, rbtree); \ } \ +a_attr bool \ +a_prefix##empty(a_rbt_type *rbtree) { \ + return (rbtree->rbt_root == NULL); \ +} \ a_attr a_type * \ a_prefix##first(a_rbt_type *rbtree) { \ a_type *ret; \ rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ - if (ret == &rbtree->rbt_nil) { \ - ret = NULL; \ - } \ - return (ret); \ + return ret; \ } \ a_attr a_type * \ a_prefix##last(a_rbt_type *rbtree) { \ a_type *ret; \ rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ - if (ret == &rbtree->rbt_nil) { \ - ret = NULL; \ - } \ - return (ret); \ + return ret; \ } \ a_attr a_type * \ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ a_type *ret; \ - if (rbtn_right_get(a_type, a_field, node) != &rbtree->rbt_nil) { \ + if (rbtn_right_get(a_type, a_field, node) != NULL) { \ rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \ a_field, node), ret); \ } else { \ a_type *tnode = rbtree->rbt_root; \ - assert(tnode != &rbtree->rbt_nil); \ - ret = &rbtree->rbt_nil; \ + assert(tnode != NULL); \ + ret = NULL; \ while (true) { \ int cmp = (a_cmp)(node, tnode); \ if (cmp < 0) { \ @@ -347,24 +380,21 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ } else { \ break; \ } \ - assert(tnode != &rbtree->rbt_nil); \ + assert(tnode != NULL); \ } \ } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ + return ret; \ } \ a_attr a_type * \ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ a_type *ret; \ - if (rbtn_left_get(a_type, a_field, node) != &rbtree->rbt_nil) { \ + if (rbtn_left_get(a_type, a_field, node) != NULL) { \ rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \ a_field, node), ret); \ } else { \ a_type *tnode = rbtree->rbt_root; \ - assert(tnode != &rbtree->rbt_nil); \ - ret = &rbtree->rbt_nil; \ + assert(tnode != NULL); \ + ret = NULL; \ while (true) { \ int cmp = (a_cmp)(node, tnode); \ if (cmp < 0) { \ @@ -375,20 +405,17 @@ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ } else { \ break; \ } \ - assert(tnode != &rbtree->rbt_nil); \ + assert(tnode != NULL); \ } \ } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ + return ret; \ } \ a_attr a_type * \ -a_prefix##search(a_rbt_type *rbtree, a_type *key) { \ +a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \ a_type *ret; \ int cmp; \ ret = rbtree->rbt_root; \ - while (ret != &rbtree->rbt_nil \ + while (ret != NULL \ && (cmp = (a_cmp)(key, ret)) != 0) { \ if (cmp < 0) { \ ret = rbtn_left_get(a_type, a_field, ret); \ @@ -396,17 +423,14 @@ a_prefix##search(a_rbt_type *rbtree, a_type *key) { \ ret = rbtn_right_get(a_type, a_field, ret); \ } \ } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ + return ret; \ } \ a_attr a_type * \ -a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \ +a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \ a_type *ret; \ a_type *tnode = rbtree->rbt_root; \ - ret = &rbtree->rbt_nil; \ - while (tnode != &rbtree->rbt_nil) { \ + ret = NULL; \ + while (tnode != NULL) { \ int cmp = (a_cmp)(key, tnode); \ if (cmp < 0) { \ ret = tnode; \ @@ -418,17 +442,14 @@ a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \ break; \ } \ } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ + return ret; \ } \ a_attr a_type * \ -a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \ +a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \ a_type *ret; \ a_type *tnode = rbtree->rbt_root; \ - ret = &rbtree->rbt_nil; \ - while (tnode != &rbtree->rbt_nil) { \ + ret = NULL; \ + while (tnode != NULL) { \ int cmp = (a_cmp)(key, tnode); \ if (cmp < 0) { \ tnode = rbtn_left_get(a_type, a_field, tnode); \ @@ -440,10 +461,7 @@ a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \ break; \ } \ } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ + return ret; \ } \ a_attr void \ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ @@ -454,7 +472,7 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ rbt_node_new(a_type, a_field, rbtree, node); \ /* Wind. */ \ path->node = rbtree->rbt_root; \ - for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \ + for (pathp = path; pathp->node != NULL; pathp++) { \ int cmp = pathp->cmp = a_cmp(node, pathp->node); \ assert(cmp != 0); \ if (cmp < 0) { \ @@ -474,7 +492,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ rbtn_left_set(a_type, a_field, cnode, left); \ if (rbtn_red_get(a_type, a_field, left)) { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ - if (rbtn_red_get(a_type, a_field, leftleft)) { \ + if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ + leftleft)) { \ /* Fix up 4-node. */ \ a_type *tnode; \ rbtn_black_set(a_type, a_field, leftleft); \ @@ -489,7 +508,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ rbtn_right_set(a_type, a_field, cnode, right); \ if (rbtn_red_get(a_type, a_field, right)) { \ a_type *left = rbtn_left_get(a_type, a_field, cnode); \ - if (rbtn_red_get(a_type, a_field, left)) { \ + if (left != NULL && rbtn_red_get(a_type, a_field, \ + left)) { \ /* Split 4-node. */ \ rbtn_black_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, right); \ @@ -522,7 +542,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ /* Wind. */ \ nodep = NULL; /* Silence compiler warning. */ \ path->node = rbtree->rbt_root; \ - for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \ + for (pathp = path; pathp->node != NULL; pathp++) { \ int cmp = pathp->cmp = a_cmp(node, pathp->node); \ if (cmp < 0) { \ pathp[1].node = rbtn_left_get(a_type, a_field, \ @@ -534,8 +554,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ /* Find node's successor, in preparation for swap. */ \ pathp->cmp = 1; \ nodep = pathp; \ - for (pathp++; pathp->node != &rbtree->rbt_nil; \ - pathp++) { \ + for (pathp++; pathp->node != NULL; pathp++) { \ pathp->cmp = -1; \ pathp[1].node = rbtn_left_get(a_type, a_field, \ pathp->node); \ @@ -577,10 +596,10 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ } \ } else { \ a_type *left = rbtn_left_get(a_type, a_field, node); \ - if (left != &rbtree->rbt_nil) { \ + if (left != NULL) { \ /* node has no successor, but it has a left child. */\ /* Splice node out, without losing the left child. */\ - assert(rbtn_red_get(a_type, a_field, node) == false); \ + assert(!rbtn_red_get(a_type, a_field, node)); \ assert(rbtn_red_get(a_type, a_field, left)); \ rbtn_black_set(a_type, a_field, left); \ if (pathp == path) { \ @@ -597,34 +616,32 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ return; \ } else if (pathp == path) { \ /* The tree only contained one node. */ \ - rbtree->rbt_root = &rbtree->rbt_nil; \ + rbtree->rbt_root = NULL; \ return; \ } \ } \ if (rbtn_red_get(a_type, a_field, pathp->node)) { \ /* Prune red node, which requires no fixup. */ \ assert(pathp[-1].cmp < 0); \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - &rbtree->rbt_nil); \ + rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \ return; \ } \ /* The node to be pruned is black, so unwind until balance is */\ /* restored. */\ - pathp->node = &rbtree->rbt_nil; \ + pathp->node = NULL; \ for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ assert(pathp->cmp != 0); \ if (pathp->cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp->node, \ pathp[1].node); \ - assert(rbtn_red_get(a_type, a_field, pathp[1].node) \ - == false); \ if (rbtn_red_get(a_type, a_field, pathp->node)) { \ a_type *right = rbtn_right_get(a_type, a_field, \ pathp->node); \ a_type *rightleft = rbtn_left_get(a_type, a_field, \ right); \ a_type *tnode; \ - if (rbtn_red_get(a_type, a_field, rightleft)) { \ + if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ + rightleft)) { \ /* In the following diagrams, ||, //, and \\ */\ /* indicate the path to the removed node. */\ /* */\ @@ -667,7 +684,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ pathp->node); \ a_type *rightleft = rbtn_left_get(a_type, a_field, \ right); \ - if (rbtn_red_get(a_type, a_field, rightleft)) { \ + if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ + rightleft)) { \ /* || */\ /* pathp(b) */\ /* // \ */\ @@ -681,7 +699,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ /* Balance restored, but rotation modified */\ - /* subree root, which may actually be the tree */\ + /* subtree root, which may actually be the tree */\ /* root. */\ if (pathp == path) { \ /* Set root. */ \ @@ -721,7 +739,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ left); \ a_type *leftrightleft = rbtn_left_get(a_type, a_field, \ leftright); \ - if (rbtn_red_get(a_type, a_field, leftrightleft)) { \ + if (leftrightleft != NULL && rbtn_red_get(a_type, \ + a_field, leftrightleft)) { \ /* || */\ /* pathp(b) */\ /* / \\ */\ @@ -747,7 +766,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ /* (b) */\ /* / */\ /* (b) */\ - assert(leftright != &rbtree->rbt_nil); \ + assert(leftright != NULL); \ rbtn_red_set(a_type, a_field, leftright); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ @@ -770,7 +789,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ return; \ } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ - if (rbtn_red_get(a_type, a_field, leftleft)) { \ + if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ + leftleft)) { \ /* || */\ /* pathp(r) */\ /* / \\ */\ @@ -808,7 +828,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ } \ } else { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ - if (rbtn_red_get(a_type, a_field, leftleft)) { \ + if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ + leftleft)) { \ /* || */\ /* pathp(b) */\ /* / \\ */\ @@ -849,22 +870,22 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ } \ /* Set root. */ \ rbtree->rbt_root = path->node; \ - assert(rbtn_red_get(a_type, a_field, rbtree->rbt_root) == false); \ + assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \ } \ a_attr a_type * \ a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - if (node == &rbtree->rbt_nil) { \ - return (&rbtree->rbt_nil); \ + if (node == NULL) { \ + return NULL; \ } else { \ a_type *ret; \ if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \ - a_field, node), cb, arg)) != &rbtree->rbt_nil \ - || (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ + a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \ + arg)) != NULL) { \ + return ret; \ } \ - return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ - a_field, node), cb, arg)); \ + return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ + a_field, node), cb, arg); \ } \ } \ a_attr a_type * \ @@ -874,22 +895,22 @@ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \ if (cmp < 0) { \ a_type *ret; \ if ((ret = a_prefix##iter_start(rbtree, start, \ - rbtn_left_get(a_type, a_field, node), cb, arg)) != \ - &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ + rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \ + (ret = cb(rbtree, node, arg)) != NULL) { \ + return ret; \ } \ - return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ - a_field, node), cb, arg)); \ + return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ + a_field, node), cb, arg); \ } else if (cmp > 0) { \ - return (a_prefix##iter_start(rbtree, start, \ - rbtn_right_get(a_type, a_field, node), cb, arg)); \ + return a_prefix##iter_start(rbtree, start, \ + rbtn_right_get(a_type, a_field, node), cb, arg); \ } else { \ a_type *ret; \ if ((ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ + return ret; \ } \ - return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ - a_field, node), cb, arg)); \ + return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ + a_field, node), cb, arg); \ } \ } \ a_attr a_type * \ @@ -902,25 +923,22 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ } else { \ ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\ } \ - if (ret == &rbtree->rbt_nil) { \ - ret = NULL; \ - } \ - return (ret); \ + return ret; \ } \ a_attr a_type * \ a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - if (node == &rbtree->rbt_nil) { \ - return (&rbtree->rbt_nil); \ + if (node == NULL) { \ + return NULL; \ } else { \ a_type *ret; \ if ((ret = a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_right_get(a_type, a_field, node), cb, arg)) != \ - &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ + rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ + (ret = cb(rbtree, node, arg)) != NULL) { \ + return ret; \ } \ - return (a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ + return a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_left_get(a_type, a_field, node), cb, arg); \ } \ } \ a_attr a_type * \ @@ -931,22 +949,22 @@ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \ if (cmp > 0) { \ a_type *ret; \ if ((ret = a_prefix##reverse_iter_start(rbtree, start, \ - rbtn_right_get(a_type, a_field, node), cb, arg)) != \ - &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ + rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ + (ret = cb(rbtree, node, arg)) != NULL) { \ + return ret; \ } \ - return (a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ + return a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_left_get(a_type, a_field, node), cb, arg); \ } else if (cmp < 0) { \ - return (a_prefix##reverse_iter_start(rbtree, start, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ + return a_prefix##reverse_iter_start(rbtree, start, \ + rbtn_left_get(a_type, a_field, node), cb, arg); \ } else { \ a_type *ret; \ if ((ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ + return ret; \ } \ - return (a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ + return a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_left_get(a_type, a_field, node), cb, arg); \ } \ } \ a_attr a_type * \ @@ -960,10 +978,29 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \ cb, arg); \ } \ - if (ret == &rbtree->rbt_nil) { \ - ret = NULL; \ + return ret; \ +} \ +a_attr void \ +a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \ + a_type *, void *), void *arg) { \ + if (node == NULL) { \ + return; \ } \ - return (ret); \ + a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \ + node), cb, arg); \ + rbtn_left_set(a_type, a_field, (node), NULL); \ + a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \ + node), cb, arg); \ + rbtn_right_set(a_type, a_field, (node), NULL); \ + if (cb) { \ + cb(node, arg); \ + } \ +} \ +a_attr void \ +a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ + void *arg) { \ + a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \ + rbtree->rbt_root = NULL; \ } #endif /* RB_H_ */ diff --git a/deps/jemalloc/include/jemalloc/internal/rtree.h b/deps/jemalloc/include/jemalloc/internal/rtree.h index bc74769f50..b5d4db3988 100644 --- a/deps/jemalloc/include/jemalloc/internal/rtree.h +++ b/deps/jemalloc/include/jemalloc/internal/rtree.h @@ -1,172 +1,474 @@ +#ifndef JEMALLOC_INTERNAL_RTREE_H +#define JEMALLOC_INTERNAL_RTREE_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree_tsd.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/tsd.h" + /* * This radix tree implementation is tailored to the singular purpose of - * tracking which chunks are currently owned by jemalloc. This functionality - * is mandatory for OS X, where jemalloc must be able to respond to object - * ownership queries. + * associating metadata with extents that are currently owned by jemalloc. * ******************************************************************************* */ -#ifdef JEMALLOC_H_TYPES + +/* Number of high insignificant bits. */ +#define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR) +/* Number of low insigificant bits. */ +#define RTREE_NLIB LG_PAGE +/* Number of significant bits. */ +#define RTREE_NSB (LG_VADDR - RTREE_NLIB) +/* Number of levels in radix tree. */ +#if RTREE_NSB <= 10 +# define RTREE_HEIGHT 1 +#elif RTREE_NSB <= 36 +# define RTREE_HEIGHT 2 +#elif RTREE_NSB <= 52 +# define RTREE_HEIGHT 3 +#else +# error Unsupported number of significant virtual address bits +#endif +/* Use compact leaf representation if virtual address encoding allows. */ +#if RTREE_NHIB >= LG_CEIL_NSIZES +# define RTREE_LEAF_COMPACT +#endif + +/* Needed for initialization only. */ +#define RTREE_LEAFKEY_INVALID ((uintptr_t)1) + +typedef struct rtree_node_elm_s rtree_node_elm_t; +struct rtree_node_elm_s { + atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */ +}; + +struct rtree_leaf_elm_s { +#ifdef RTREE_LEAF_COMPACT + /* + * Single pointer-width field containing all three leaf element fields. + * For example, on a 64-bit x64 system with 48 significant virtual + * memory address bits, the index, extent, and slab fields are packed as + * such: + * + * x: index + * e: extent + * b: slab + * + * 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b + */ + atomic_p_t le_bits; +#else + atomic_p_t le_extent; /* (extent_t *) */ + atomic_u_t le_szind; /* (szind_t) */ + atomic_b_t le_slab; /* (bool) */ +#endif +}; + +typedef struct rtree_level_s rtree_level_t; +struct rtree_level_s { + /* Number of key bits distinguished by this level. */ + unsigned bits; + /* + * Cumulative number of key bits distinguished by traversing to + * corresponding tree level. + */ + unsigned cumbits; +}; typedef struct rtree_s rtree_t; +struct rtree_s { + malloc_mutex_t init_lock; + /* Number of elements based on rtree_levels[0].bits. */ +#if RTREE_HEIGHT > 1 + rtree_node_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)]; +#else + rtree_leaf_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)]; +#endif +}; /* - * Size of each radix tree node (must be a power of 2). This impacts tree - * depth. + * Split the bits into one to three partitions depending on number of + * significant bits. It the number of bits does not divide evenly into the + * number of levels, place one remainder bit per level starting at the leaf + * level. */ -#define RTREE_NODESIZE (1U << 16) +static const rtree_level_t rtree_levels[] = { +#if RTREE_HEIGHT == 1 + {RTREE_NSB, RTREE_NHIB + RTREE_NSB} +#elif RTREE_HEIGHT == 2 + {RTREE_NSB/2, RTREE_NHIB + RTREE_NSB/2}, + {RTREE_NSB/2 + RTREE_NSB%2, RTREE_NHIB + RTREE_NSB} +#elif RTREE_HEIGHT == 3 + {RTREE_NSB/3, RTREE_NHIB + RTREE_NSB/3}, + {RTREE_NSB/3 + RTREE_NSB%3/2, + RTREE_NHIB + RTREE_NSB/3*2 + RTREE_NSB%3/2}, + {RTREE_NSB/3 + RTREE_NSB%3 - RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB} +#else +# error Unsupported rtree height +#endif +}; -typedef void *(rtree_alloc_t)(size_t); -typedef void (rtree_dalloc_t)(void *); +bool rtree_new(rtree_t *rtree, bool zeroed); -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS +typedef rtree_node_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t); +extern rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc; -struct rtree_s { - rtree_alloc_t *alloc; - rtree_dalloc_t *dalloc; - malloc_mutex_t mutex; - void **root; - unsigned height; - unsigned level2bits[1]; /* Dynamically sized. */ -}; +typedef rtree_leaf_elm_t *(rtree_leaf_alloc_t)(tsdn_t *, rtree_t *, size_t); +extern rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc; + +typedef void (rtree_node_dalloc_t)(tsdn_t *, rtree_t *, rtree_node_elm_t *); +extern rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc; + +typedef void (rtree_leaf_dalloc_t)(tsdn_t *, rtree_t *, rtree_leaf_elm_t *); +extern rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc; +#ifdef JEMALLOC_JET +void rtree_delete(tsdn_t *tsdn, rtree_t *rtree); +#endif +rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, + rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing); + +JEMALLOC_ALWAYS_INLINE uintptr_t +rtree_leafkey(uintptr_t key) { + unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); + unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits - + rtree_levels[RTREE_HEIGHT-1].bits); + unsigned maskbits = ptrbits - cumbits; + uintptr_t mask = ~((ZU(1) << maskbits) - 1); + return (key & mask); +} + +JEMALLOC_ALWAYS_INLINE size_t +rtree_cache_direct_map(uintptr_t key) { + unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); + unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits - + rtree_levels[RTREE_HEIGHT-1].bits); + unsigned maskbits = ptrbits - cumbits; + return (size_t)((key >> maskbits) & (RTREE_CTX_NCACHE - 1)); +} + +JEMALLOC_ALWAYS_INLINE uintptr_t +rtree_subkey(uintptr_t key, unsigned level) { + unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); + unsigned cumbits = rtree_levels[level].cumbits; + unsigned shiftbits = ptrbits - cumbits; + unsigned maskbits = rtree_levels[level].bits; + uintptr_t mask = (ZU(1) << maskbits) - 1; + return ((key >> shiftbits) & mask); +} + +/* + * Atomic getters. + * + * dependent: Reading a value on behalf of a pointer to a valid allocation + * is guaranteed to be a clean read even without synchronization, + * because the rtree update became visible in memory before the + * pointer came into existence. + * !dependent: An arbitrary read, e.g. on behalf of ivsalloc(), may not be + * dependent on a previous rtree write, which means a stale read + * could result if synchronization were omitted here. + */ +# ifdef RTREE_LEAF_COMPACT +JEMALLOC_ALWAYS_INLINE uintptr_t +rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, + bool dependent) { + return (uintptr_t)atomic_load_p(&elm->le_bits, dependent + ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); +} + +JEMALLOC_ALWAYS_INLINE extent_t * +rtree_leaf_elm_bits_extent_get(uintptr_t bits) { + /* Restore sign-extended high bits, mask slab bit. */ + return (extent_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >> + RTREE_NHIB) & ~((uintptr_t)0x1)); +} -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS +JEMALLOC_ALWAYS_INLINE szind_t +rtree_leaf_elm_bits_szind_get(uintptr_t bits) { + return (szind_t)(bits >> LG_VADDR); +} -rtree_t *rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc); -void rtree_delete(rtree_t *rtree); -void rtree_prefork(rtree_t *rtree); -void rtree_postfork_parent(rtree_t *rtree); -void rtree_postfork_child(rtree_t *rtree); +JEMALLOC_ALWAYS_INLINE bool +rtree_leaf_elm_bits_slab_get(uintptr_t bits) { + return (bool)(bits & (uintptr_t)0x1); +} -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES +# endif -#ifndef JEMALLOC_ENABLE_INLINE -#ifdef JEMALLOC_DEBUG -uint8_t rtree_get_locked(rtree_t *rtree, uintptr_t key); +JEMALLOC_ALWAYS_INLINE extent_t * +rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, + bool dependent) { +#ifdef RTREE_LEAF_COMPACT + uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); + return rtree_leaf_elm_bits_extent_get(bits); +#else + extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent + ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); + return extent; #endif -uint8_t rtree_get(rtree_t *rtree, uintptr_t key); -bool rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val); +} + +JEMALLOC_ALWAYS_INLINE szind_t +rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, + bool dependent) { +#ifdef RTREE_LEAF_COMPACT + uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); + return rtree_leaf_elm_bits_szind_get(bits); +#else + return (szind_t)atomic_load_u(&elm->le_szind, dependent ? ATOMIC_RELAXED + : ATOMIC_ACQUIRE); #endif +} -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_)) -#define RTREE_GET_GENERATE(f) \ -/* The least significant bits of the key are ignored. */ \ -JEMALLOC_INLINE uint8_t \ -f(rtree_t *rtree, uintptr_t key) \ -{ \ - uint8_t ret; \ - uintptr_t subkey; \ - unsigned i, lshift, height, bits; \ - void **node, **child; \ - \ - RTREE_LOCK(&rtree->mutex); \ - for (i = lshift = 0, height = rtree->height, node = rtree->root;\ - i < height - 1; \ - i++, lshift += bits, node = child) { \ - bits = rtree->level2bits[i]; \ - subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \ - 3)) - bits); \ - child = (void**)node[subkey]; \ - if (child == NULL) { \ - RTREE_UNLOCK(&rtree->mutex); \ - return (0); \ - } \ - } \ - \ - /* \ - * node is a leaf, so it contains values rather than node \ - * pointers. \ - */ \ - bits = rtree->level2bits[i]; \ - subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \ - bits); \ - { \ - uint8_t *leaf = (uint8_t *)node; \ - ret = leaf[subkey]; \ - } \ - RTREE_UNLOCK(&rtree->mutex); \ - \ - RTREE_GET_VALIDATE \ - return (ret); \ -} - -#ifdef JEMALLOC_DEBUG -# define RTREE_LOCK(l) malloc_mutex_lock(l) -# define RTREE_UNLOCK(l) malloc_mutex_unlock(l) -# define RTREE_GET_VALIDATE -RTREE_GET_GENERATE(rtree_get_locked) -# undef RTREE_LOCK -# undef RTREE_UNLOCK -# undef RTREE_GET_VALIDATE +JEMALLOC_ALWAYS_INLINE bool +rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, + bool dependent) { +#ifdef RTREE_LEAF_COMPACT + uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); + return rtree_leaf_elm_bits_slab_get(bits); +#else + return atomic_load_b(&elm->le_slab, dependent ? ATOMIC_RELAXED : + ATOMIC_ACQUIRE); #endif +} -#define RTREE_LOCK(l) -#define RTREE_UNLOCK(l) -#ifdef JEMALLOC_DEBUG - /* - * Suppose that it were possible for a jemalloc-allocated chunk to be - * munmap()ped, followed by a different allocator in another thread re-using - * overlapping virtual memory, all without invalidating the cached rtree - * value. The result would be a false positive (the rtree would claim that - * jemalloc owns memory that it had actually discarded). This scenario - * seems impossible, but the following assertion is a prudent sanity check. - */ -# define RTREE_GET_VALIDATE \ - assert(rtree_get_locked(rtree, key) == ret); +static inline void +rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, + extent_t *extent) { +#ifdef RTREE_LEAF_COMPACT + uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true); + uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) << + LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) + | ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits)); + atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); #else -# define RTREE_GET_VALIDATE + atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE); #endif -RTREE_GET_GENERATE(rtree_get) -#undef RTREE_LOCK -#undef RTREE_UNLOCK -#undef RTREE_GET_VALIDATE - -JEMALLOC_INLINE bool -rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val) -{ - uintptr_t subkey; - unsigned i, lshift, height, bits; - void **node, **child; - - malloc_mutex_lock(&rtree->mutex); - for (i = lshift = 0, height = rtree->height, node = rtree->root; - i < height - 1; - i++, lshift += bits, node = child) { - bits = rtree->level2bits[i]; - subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - - bits); - child = (void**)node[subkey]; - if (child == NULL) { - size_t size = ((i + 1 < height - 1) ? sizeof(void *) - : (sizeof(uint8_t))) << rtree->level2bits[i+1]; - child = (void**)rtree->alloc(size); - if (child == NULL) { - malloc_mutex_unlock(&rtree->mutex); - return (true); - } - memset(child, 0, size); - node[subkey] = child; - } +} + +static inline void +rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, + szind_t szind) { + assert(szind <= NSIZES); + +#ifdef RTREE_LEAF_COMPACT + uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, + true); + uintptr_t bits = ((uintptr_t)szind << LG_VADDR) | + ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) & + (((uintptr_t)0x1 << LG_VADDR) - 1)) | + ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits)); + atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); +#else + atomic_store_u(&elm->le_szind, szind, ATOMIC_RELEASE); +#endif +} + +static inline void +rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, + bool slab) { +#ifdef RTREE_LEAF_COMPACT + uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, + true); + uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) << + LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) & + (((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab); + atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); +#else + atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE); +#endif +} + +static inline void +rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, + extent_t *extent, szind_t szind, bool slab) { +#ifdef RTREE_LEAF_COMPACT + uintptr_t bits = ((uintptr_t)szind << LG_VADDR) | + ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) | + ((uintptr_t)slab); + atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); +#else + rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab); + rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind); + /* + * Write extent last, since the element is atomically considered valid + * as soon as the extent field is non-NULL. + */ + rtree_leaf_elm_extent_write(tsdn, rtree, elm, extent); +#endif +} + +static inline void +rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, + rtree_leaf_elm_t *elm, szind_t szind, bool slab) { + assert(!slab || szind < NBINS); + + /* + * The caller implicitly assures that it is the only writer to the szind + * and slab fields, and that the extent field cannot currently change. + */ + rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab); + rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind); +} + +JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t * +rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent, bool init_missing) { + assert(key != 0); + assert(!dependent || !init_missing); + + size_t slot = rtree_cache_direct_map(key); + uintptr_t leafkey = rtree_leafkey(key); + assert(leafkey != RTREE_LEAFKEY_INVALID); + + /* Fast path: L1 direct mapped cache. */ + if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) { + rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf; + assert(leaf != NULL); + uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); + return &leaf[subkey]; } + /* + * Search the L2 LRU cache. On hit, swap the matching element into the + * slot in L1 cache, and move the position in L2 up by 1. + */ +#define RTREE_CACHE_CHECK_L2(i) do { \ + if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \ + rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \ + assert(leaf != NULL); \ + if (i > 0) { \ + /* Bubble up by one. */ \ + rtree_ctx->l2_cache[i].leafkey = \ + rtree_ctx->l2_cache[i - 1].leafkey; \ + rtree_ctx->l2_cache[i].leaf = \ + rtree_ctx->l2_cache[i - 1].leaf; \ + rtree_ctx->l2_cache[i - 1].leafkey = \ + rtree_ctx->cache[slot].leafkey; \ + rtree_ctx->l2_cache[i - 1].leaf = \ + rtree_ctx->cache[slot].leaf; \ + } else { \ + rtree_ctx->l2_cache[0].leafkey = \ + rtree_ctx->cache[slot].leafkey; \ + rtree_ctx->l2_cache[0].leaf = \ + rtree_ctx->cache[slot].leaf; \ + } \ + rtree_ctx->cache[slot].leafkey = leafkey; \ + rtree_ctx->cache[slot].leaf = leaf; \ + uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); \ + return &leaf[subkey]; \ + } \ +} while (0) + /* Check the first cache entry. */ + RTREE_CACHE_CHECK_L2(0); + /* Search the remaining cache elements. */ + for (unsigned i = 1; i < RTREE_CTX_NCACHE_L2; i++) { + RTREE_CACHE_CHECK_L2(i); + } +#undef RTREE_CACHE_CHECK_L2 + + return rtree_leaf_elm_lookup_hard(tsdn, rtree, rtree_ctx, key, + dependent, init_missing); +} + +static inline bool +rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, + extent_t *extent, szind_t szind, bool slab) { + /* Use rtree_clear() to set the extent to NULL. */ + assert(extent != NULL); - /* node is a leaf, so it contains values rather than node pointers. */ - bits = rtree->level2bits[i]; - subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits); - { - uint8_t *leaf = (uint8_t *)node; - leaf[subkey] = val; + rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, + key, false, true); + if (elm == NULL) { + return true; } - malloc_mutex_unlock(&rtree->mutex); - return (false); + assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) == NULL); + rtree_leaf_elm_write(tsdn, rtree, elm, extent, szind, slab); + + return false; +} + +JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t * +rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, + bool dependent) { + rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, + key, dependent, false); + if (!dependent && elm == NULL) { + return NULL; + } + assert(elm != NULL); + return elm; +} + +JEMALLOC_ALWAYS_INLINE extent_t * +rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent) { + rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, + dependent); + if (!dependent && elm == NULL) { + return NULL; + } + return rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent); +} + +JEMALLOC_ALWAYS_INLINE szind_t +rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent) { + rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, + dependent); + if (!dependent && elm == NULL) { + return NSIZES; + } + return rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent); +} + +/* + * rtree_slab_read() is intentionally omitted because slab is always read in + * conjunction with szind, which makes rtree_szind_slab_read() a better choice. + */ + +JEMALLOC_ALWAYS_INLINE bool +rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent, extent_t **r_extent, szind_t *r_szind) { + rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, + dependent); + if (!dependent && elm == NULL) { + return true; + } + *r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent); + *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent); + return false; +} + +JEMALLOC_ALWAYS_INLINE bool +rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent, szind_t *r_szind, bool *r_slab) { + rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, + dependent); + if (!dependent && elm == NULL) { + return true; + } + *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent); + *r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, dependent); + return false; +} + +static inline void +rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, szind_t szind, bool slab) { + assert(!slab || szind < NBINS); + + rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true); + rtree_leaf_elm_szind_slab_update(tsdn, rtree, elm, szind, slab); +} + +static inline void +rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key) { + rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true); + assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) != + NULL); + rtree_leaf_elm_write(tsdn, rtree, elm, NULL, NSIZES, false); } -#endif -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ +#endif /* JEMALLOC_INTERNAL_RTREE_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/size_classes.h b/deps/jemalloc/include/jemalloc/internal/size_classes.h index 821102e5c1..0b7d3cd369 100644 --- a/deps/jemalloc/include/jemalloc/internal/size_classes.h +++ b/deps/jemalloc/include/jemalloc/internal/size_classes.h @@ -1,690 +1,1416 @@ -/* This file was automatically generated by size_classes.sh. */ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 12) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 8, 8) \ - SIZE_CLASS(1, 8, 16) \ - SIZE_CLASS(2, 8, 24) \ - SIZE_CLASS(3, 8, 32) \ - SIZE_CLASS(4, 8, 40) \ - SIZE_CLASS(5, 8, 48) \ - SIZE_CLASS(6, 8, 56) \ - SIZE_CLASS(7, 8, 64) \ - SIZE_CLASS(8, 16, 80) \ - SIZE_CLASS(9, 16, 96) \ - SIZE_CLASS(10, 16, 112) \ - SIZE_CLASS(11, 16, 128) \ - SIZE_CLASS(12, 32, 160) \ - SIZE_CLASS(13, 32, 192) \ - SIZE_CLASS(14, 32, 224) \ - SIZE_CLASS(15, 32, 256) \ - SIZE_CLASS(16, 64, 320) \ - SIZE_CLASS(17, 64, 384) \ - SIZE_CLASS(18, 64, 448) \ - SIZE_CLASS(19, 64, 512) \ - SIZE_CLASS(20, 128, 640) \ - SIZE_CLASS(21, 128, 768) \ - SIZE_CLASS(22, 128, 896) \ - SIZE_CLASS(23, 128, 1024) \ - SIZE_CLASS(24, 256, 1280) \ - SIZE_CLASS(25, 256, 1536) \ - SIZE_CLASS(26, 256, 1792) \ - SIZE_CLASS(27, 256, 2048) \ - SIZE_CLASS(28, 512, 2560) \ - SIZE_CLASS(29, 512, 3072) \ - SIZE_CLASS(30, 512, 3584) \ - -#define NBINS 31 -#define SMALL_MAXCLASS 3584 -#endif - -#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 13) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 8, 8) \ - SIZE_CLASS(1, 8, 16) \ - SIZE_CLASS(2, 8, 24) \ - SIZE_CLASS(3, 8, 32) \ - SIZE_CLASS(4, 8, 40) \ - SIZE_CLASS(5, 8, 48) \ - SIZE_CLASS(6, 8, 56) \ - SIZE_CLASS(7, 8, 64) \ - SIZE_CLASS(8, 16, 80) \ - SIZE_CLASS(9, 16, 96) \ - SIZE_CLASS(10, 16, 112) \ - SIZE_CLASS(11, 16, 128) \ - SIZE_CLASS(12, 32, 160) \ - SIZE_CLASS(13, 32, 192) \ - SIZE_CLASS(14, 32, 224) \ - SIZE_CLASS(15, 32, 256) \ - SIZE_CLASS(16, 64, 320) \ - SIZE_CLASS(17, 64, 384) \ - SIZE_CLASS(18, 64, 448) \ - SIZE_CLASS(19, 64, 512) \ - SIZE_CLASS(20, 128, 640) \ - SIZE_CLASS(21, 128, 768) \ - SIZE_CLASS(22, 128, 896) \ - SIZE_CLASS(23, 128, 1024) \ - SIZE_CLASS(24, 256, 1280) \ - SIZE_CLASS(25, 256, 1536) \ - SIZE_CLASS(26, 256, 1792) \ - SIZE_CLASS(27, 256, 2048) \ - SIZE_CLASS(28, 512, 2560) \ - SIZE_CLASS(29, 512, 3072) \ - SIZE_CLASS(30, 512, 3584) \ - SIZE_CLASS(31, 512, 4096) \ - SIZE_CLASS(32, 1024, 5120) \ - SIZE_CLASS(33, 1024, 6144) \ - SIZE_CLASS(34, 1024, 7168) \ - -#define NBINS 35 -#define SMALL_MAXCLASS 7168 -#endif - -#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 14) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 8, 8) \ - SIZE_CLASS(1, 8, 16) \ - SIZE_CLASS(2, 8, 24) \ - SIZE_CLASS(3, 8, 32) \ - SIZE_CLASS(4, 8, 40) \ - SIZE_CLASS(5, 8, 48) \ - SIZE_CLASS(6, 8, 56) \ - SIZE_CLASS(7, 8, 64) \ - SIZE_CLASS(8, 16, 80) \ - SIZE_CLASS(9, 16, 96) \ - SIZE_CLASS(10, 16, 112) \ - SIZE_CLASS(11, 16, 128) \ - SIZE_CLASS(12, 32, 160) \ - SIZE_CLASS(13, 32, 192) \ - SIZE_CLASS(14, 32, 224) \ - SIZE_CLASS(15, 32, 256) \ - SIZE_CLASS(16, 64, 320) \ - SIZE_CLASS(17, 64, 384) \ - SIZE_CLASS(18, 64, 448) \ - SIZE_CLASS(19, 64, 512) \ - SIZE_CLASS(20, 128, 640) \ - SIZE_CLASS(21, 128, 768) \ - SIZE_CLASS(22, 128, 896) \ - SIZE_CLASS(23, 128, 1024) \ - SIZE_CLASS(24, 256, 1280) \ - SIZE_CLASS(25, 256, 1536) \ - SIZE_CLASS(26, 256, 1792) \ - SIZE_CLASS(27, 256, 2048) \ - SIZE_CLASS(28, 512, 2560) \ - SIZE_CLASS(29, 512, 3072) \ - SIZE_CLASS(30, 512, 3584) \ - SIZE_CLASS(31, 512, 4096) \ - SIZE_CLASS(32, 1024, 5120) \ - SIZE_CLASS(33, 1024, 6144) \ - SIZE_CLASS(34, 1024, 7168) \ - SIZE_CLASS(35, 1024, 8192) \ - SIZE_CLASS(36, 2048, 10240) \ - SIZE_CLASS(37, 2048, 12288) \ - SIZE_CLASS(38, 2048, 14336) \ - -#define NBINS 39 -#define SMALL_MAXCLASS 14336 -#endif - -#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 15) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 8, 8) \ - SIZE_CLASS(1, 8, 16) \ - SIZE_CLASS(2, 8, 24) \ - SIZE_CLASS(3, 8, 32) \ - SIZE_CLASS(4, 8, 40) \ - SIZE_CLASS(5, 8, 48) \ - SIZE_CLASS(6, 8, 56) \ - SIZE_CLASS(7, 8, 64) \ - SIZE_CLASS(8, 16, 80) \ - SIZE_CLASS(9, 16, 96) \ - SIZE_CLASS(10, 16, 112) \ - SIZE_CLASS(11, 16, 128) \ - SIZE_CLASS(12, 32, 160) \ - SIZE_CLASS(13, 32, 192) \ - SIZE_CLASS(14, 32, 224) \ - SIZE_CLASS(15, 32, 256) \ - SIZE_CLASS(16, 64, 320) \ - SIZE_CLASS(17, 64, 384) \ - SIZE_CLASS(18, 64, 448) \ - SIZE_CLASS(19, 64, 512) \ - SIZE_CLASS(20, 128, 640) \ - SIZE_CLASS(21, 128, 768) \ - SIZE_CLASS(22, 128, 896) \ - SIZE_CLASS(23, 128, 1024) \ - SIZE_CLASS(24, 256, 1280) \ - SIZE_CLASS(25, 256, 1536) \ - SIZE_CLASS(26, 256, 1792) \ - SIZE_CLASS(27, 256, 2048) \ - SIZE_CLASS(28, 512, 2560) \ - SIZE_CLASS(29, 512, 3072) \ - SIZE_CLASS(30, 512, 3584) \ - SIZE_CLASS(31, 512, 4096) \ - SIZE_CLASS(32, 1024, 5120) \ - SIZE_CLASS(33, 1024, 6144) \ - SIZE_CLASS(34, 1024, 7168) \ - SIZE_CLASS(35, 1024, 8192) \ - SIZE_CLASS(36, 2048, 10240) \ - SIZE_CLASS(37, 2048, 12288) \ - SIZE_CLASS(38, 2048, 14336) \ - SIZE_CLASS(39, 2048, 16384) \ - SIZE_CLASS(40, 4096, 20480) \ - SIZE_CLASS(41, 4096, 24576) \ - SIZE_CLASS(42, 4096, 28672) \ - -#define NBINS 43 -#define SMALL_MAXCLASS 28672 -#endif - -#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 16) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 8, 8) \ - SIZE_CLASS(1, 8, 16) \ - SIZE_CLASS(2, 8, 24) \ - SIZE_CLASS(3, 8, 32) \ - SIZE_CLASS(4, 8, 40) \ - SIZE_CLASS(5, 8, 48) \ - SIZE_CLASS(6, 8, 56) \ - SIZE_CLASS(7, 8, 64) \ - SIZE_CLASS(8, 16, 80) \ - SIZE_CLASS(9, 16, 96) \ - SIZE_CLASS(10, 16, 112) \ - SIZE_CLASS(11, 16, 128) \ - SIZE_CLASS(12, 32, 160) \ - SIZE_CLASS(13, 32, 192) \ - SIZE_CLASS(14, 32, 224) \ - SIZE_CLASS(15, 32, 256) \ - SIZE_CLASS(16, 64, 320) \ - SIZE_CLASS(17, 64, 384) \ - SIZE_CLASS(18, 64, 448) \ - SIZE_CLASS(19, 64, 512) \ - SIZE_CLASS(20, 128, 640) \ - SIZE_CLASS(21, 128, 768) \ - SIZE_CLASS(22, 128, 896) \ - SIZE_CLASS(23, 128, 1024) \ - SIZE_CLASS(24, 256, 1280) \ - SIZE_CLASS(25, 256, 1536) \ - SIZE_CLASS(26, 256, 1792) \ - SIZE_CLASS(27, 256, 2048) \ - SIZE_CLASS(28, 512, 2560) \ - SIZE_CLASS(29, 512, 3072) \ - SIZE_CLASS(30, 512, 3584) \ - SIZE_CLASS(31, 512, 4096) \ - SIZE_CLASS(32, 1024, 5120) \ - SIZE_CLASS(33, 1024, 6144) \ - SIZE_CLASS(34, 1024, 7168) \ - SIZE_CLASS(35, 1024, 8192) \ - SIZE_CLASS(36, 2048, 10240) \ - SIZE_CLASS(37, 2048, 12288) \ - SIZE_CLASS(38, 2048, 14336) \ - SIZE_CLASS(39, 2048, 16384) \ - SIZE_CLASS(40, 4096, 20480) \ - SIZE_CLASS(41, 4096, 24576) \ - SIZE_CLASS(42, 4096, 28672) \ - SIZE_CLASS(43, 4096, 32768) \ - SIZE_CLASS(44, 8192, 40960) \ - SIZE_CLASS(45, 8192, 49152) \ - SIZE_CLASS(46, 8192, 57344) \ - -#define NBINS 47 -#define SMALL_MAXCLASS 57344 -#endif - -#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 12) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 8, 8) \ - SIZE_CLASS(1, 8, 16) \ - SIZE_CLASS(2, 16, 32) \ - SIZE_CLASS(3, 16, 48) \ - SIZE_CLASS(4, 16, 64) \ - SIZE_CLASS(5, 16, 80) \ - SIZE_CLASS(6, 16, 96) \ - SIZE_CLASS(7, 16, 112) \ - SIZE_CLASS(8, 16, 128) \ - SIZE_CLASS(9, 32, 160) \ - SIZE_CLASS(10, 32, 192) \ - SIZE_CLASS(11, 32, 224) \ - SIZE_CLASS(12, 32, 256) \ - SIZE_CLASS(13, 64, 320) \ - SIZE_CLASS(14, 64, 384) \ - SIZE_CLASS(15, 64, 448) \ - SIZE_CLASS(16, 64, 512) \ - SIZE_CLASS(17, 128, 640) \ - SIZE_CLASS(18, 128, 768) \ - SIZE_CLASS(19, 128, 896) \ - SIZE_CLASS(20, 128, 1024) \ - SIZE_CLASS(21, 256, 1280) \ - SIZE_CLASS(22, 256, 1536) \ - SIZE_CLASS(23, 256, 1792) \ - SIZE_CLASS(24, 256, 2048) \ - SIZE_CLASS(25, 512, 2560) \ - SIZE_CLASS(26, 512, 3072) \ - SIZE_CLASS(27, 512, 3584) \ +#ifndef JEMALLOC_INTERNAL_SIZE_CLASSES_H +#define JEMALLOC_INTERNAL_SIZE_CLASSES_H -#define NBINS 28 -#define SMALL_MAXCLASS 3584 -#endif - -#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 13) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 8, 8) \ - SIZE_CLASS(1, 8, 16) \ - SIZE_CLASS(2, 16, 32) \ - SIZE_CLASS(3, 16, 48) \ - SIZE_CLASS(4, 16, 64) \ - SIZE_CLASS(5, 16, 80) \ - SIZE_CLASS(6, 16, 96) \ - SIZE_CLASS(7, 16, 112) \ - SIZE_CLASS(8, 16, 128) \ - SIZE_CLASS(9, 32, 160) \ - SIZE_CLASS(10, 32, 192) \ - SIZE_CLASS(11, 32, 224) \ - SIZE_CLASS(12, 32, 256) \ - SIZE_CLASS(13, 64, 320) \ - SIZE_CLASS(14, 64, 384) \ - SIZE_CLASS(15, 64, 448) \ - SIZE_CLASS(16, 64, 512) \ - SIZE_CLASS(17, 128, 640) \ - SIZE_CLASS(18, 128, 768) \ - SIZE_CLASS(19, 128, 896) \ - SIZE_CLASS(20, 128, 1024) \ - SIZE_CLASS(21, 256, 1280) \ - SIZE_CLASS(22, 256, 1536) \ - SIZE_CLASS(23, 256, 1792) \ - SIZE_CLASS(24, 256, 2048) \ - SIZE_CLASS(25, 512, 2560) \ - SIZE_CLASS(26, 512, 3072) \ - SIZE_CLASS(27, 512, 3584) \ - SIZE_CLASS(28, 512, 4096) \ - SIZE_CLASS(29, 1024, 5120) \ - SIZE_CLASS(30, 1024, 6144) \ - SIZE_CLASS(31, 1024, 7168) \ - -#define NBINS 32 -#define SMALL_MAXCLASS 7168 -#endif - -#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 14) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 8, 8) \ - SIZE_CLASS(1, 8, 16) \ - SIZE_CLASS(2, 16, 32) \ - SIZE_CLASS(3, 16, 48) \ - SIZE_CLASS(4, 16, 64) \ - SIZE_CLASS(5, 16, 80) \ - SIZE_CLASS(6, 16, 96) \ - SIZE_CLASS(7, 16, 112) \ - SIZE_CLASS(8, 16, 128) \ - SIZE_CLASS(9, 32, 160) \ - SIZE_CLASS(10, 32, 192) \ - SIZE_CLASS(11, 32, 224) \ - SIZE_CLASS(12, 32, 256) \ - SIZE_CLASS(13, 64, 320) \ - SIZE_CLASS(14, 64, 384) \ - SIZE_CLASS(15, 64, 448) \ - SIZE_CLASS(16, 64, 512) \ - SIZE_CLASS(17, 128, 640) \ - SIZE_CLASS(18, 128, 768) \ - SIZE_CLASS(19, 128, 896) \ - SIZE_CLASS(20, 128, 1024) \ - SIZE_CLASS(21, 256, 1280) \ - SIZE_CLASS(22, 256, 1536) \ - SIZE_CLASS(23, 256, 1792) \ - SIZE_CLASS(24, 256, 2048) \ - SIZE_CLASS(25, 512, 2560) \ - SIZE_CLASS(26, 512, 3072) \ - SIZE_CLASS(27, 512, 3584) \ - SIZE_CLASS(28, 512, 4096) \ - SIZE_CLASS(29, 1024, 5120) \ - SIZE_CLASS(30, 1024, 6144) \ - SIZE_CLASS(31, 1024, 7168) \ - SIZE_CLASS(32, 1024, 8192) \ - SIZE_CLASS(33, 2048, 10240) \ - SIZE_CLASS(34, 2048, 12288) \ - SIZE_CLASS(35, 2048, 14336) \ +/* This file was automatically generated by size_classes.sh. */ -#define NBINS 36 -#define SMALL_MAXCLASS 14336 -#endif +#include "jemalloc/internal/jemalloc_internal_types.h" -#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 15) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 8, 8) \ - SIZE_CLASS(1, 8, 16) \ - SIZE_CLASS(2, 16, 32) \ - SIZE_CLASS(3, 16, 48) \ - SIZE_CLASS(4, 16, 64) \ - SIZE_CLASS(5, 16, 80) \ - SIZE_CLASS(6, 16, 96) \ - SIZE_CLASS(7, 16, 112) \ - SIZE_CLASS(8, 16, 128) \ - SIZE_CLASS(9, 32, 160) \ - SIZE_CLASS(10, 32, 192) \ - SIZE_CLASS(11, 32, 224) \ - SIZE_CLASS(12, 32, 256) \ - SIZE_CLASS(13, 64, 320) \ - SIZE_CLASS(14, 64, 384) \ - SIZE_CLASS(15, 64, 448) \ - SIZE_CLASS(16, 64, 512) \ - SIZE_CLASS(17, 128, 640) \ - SIZE_CLASS(18, 128, 768) \ - SIZE_CLASS(19, 128, 896) \ - SIZE_CLASS(20, 128, 1024) \ - SIZE_CLASS(21, 256, 1280) \ - SIZE_CLASS(22, 256, 1536) \ - SIZE_CLASS(23, 256, 1792) \ - SIZE_CLASS(24, 256, 2048) \ - SIZE_CLASS(25, 512, 2560) \ - SIZE_CLASS(26, 512, 3072) \ - SIZE_CLASS(27, 512, 3584) \ - SIZE_CLASS(28, 512, 4096) \ - SIZE_CLASS(29, 1024, 5120) \ - SIZE_CLASS(30, 1024, 6144) \ - SIZE_CLASS(31, 1024, 7168) \ - SIZE_CLASS(32, 1024, 8192) \ - SIZE_CLASS(33, 2048, 10240) \ - SIZE_CLASS(34, 2048, 12288) \ - SIZE_CLASS(35, 2048, 14336) \ - SIZE_CLASS(36, 2048, 16384) \ - SIZE_CLASS(37, 4096, 20480) \ - SIZE_CLASS(38, 4096, 24576) \ - SIZE_CLASS(39, 4096, 28672) \ +/* + * This header file defines: + * + * LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling. + * LG_TINY_MIN: Lg of minimum size class to support. + * SIZE_CLASSES: Complete table of SC(index, lg_grp, lg_delta, ndelta, psz, + * bin, pgs, lg_delta_lookup) tuples. + * index: Size class index. + * lg_grp: Lg group base size (no deltas added). + * lg_delta: Lg delta to previous size class. + * ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta + * psz: 'yes' if a multiple of the page size, 'no' otherwise. + * bin: 'yes' if a small bin size class, 'no' otherwise. + * pgs: Slab page count if a small bin size class, 0 otherwise. + * lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no' + * otherwise. + * NTBINS: Number of tiny bins. + * NLBINS: Number of bins supported by the lookup table. + * NBINS: Number of small size class bins. + * NSIZES: Number of size classes. + * LG_CEIL_NSIZES: Number of bits required to store NSIZES. + * NPSIZES: Number of size classes that are a multiple of (1U << LG_PAGE). + * LG_TINY_MAXCLASS: Lg of maximum tiny size class. + * LOOKUP_MAXCLASS: Maximum size class included in lookup table. + * SMALL_MAXCLASS: Maximum small size class. + * LG_LARGE_MINCLASS: Lg of minimum large size class. + * LARGE_MAXCLASS: Maximum (large) size class. + */ -#define NBINS 40 -#define SMALL_MAXCLASS 28672 -#endif +#define LG_SIZE_CLASS_GROUP 2 +#define LG_TINY_MIN 3 -#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 16) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 8, 8) \ - SIZE_CLASS(1, 8, 16) \ - SIZE_CLASS(2, 16, 32) \ - SIZE_CLASS(3, 16, 48) \ - SIZE_CLASS(4, 16, 64) \ - SIZE_CLASS(5, 16, 80) \ - SIZE_CLASS(6, 16, 96) \ - SIZE_CLASS(7, 16, 112) \ - SIZE_CLASS(8, 16, 128) \ - SIZE_CLASS(9, 32, 160) \ - SIZE_CLASS(10, 32, 192) \ - SIZE_CLASS(11, 32, 224) \ - SIZE_CLASS(12, 32, 256) \ - SIZE_CLASS(13, 64, 320) \ - SIZE_CLASS(14, 64, 384) \ - SIZE_CLASS(15, 64, 448) \ - SIZE_CLASS(16, 64, 512) \ - SIZE_CLASS(17, 128, 640) \ - SIZE_CLASS(18, 128, 768) \ - SIZE_CLASS(19, 128, 896) \ - SIZE_CLASS(20, 128, 1024) \ - SIZE_CLASS(21, 256, 1280) \ - SIZE_CLASS(22, 256, 1536) \ - SIZE_CLASS(23, 256, 1792) \ - SIZE_CLASS(24, 256, 2048) \ - SIZE_CLASS(25, 512, 2560) \ - SIZE_CLASS(26, 512, 3072) \ - SIZE_CLASS(27, 512, 3584) \ - SIZE_CLASS(28, 512, 4096) \ - SIZE_CLASS(29, 1024, 5120) \ - SIZE_CLASS(30, 1024, 6144) \ - SIZE_CLASS(31, 1024, 7168) \ - SIZE_CLASS(32, 1024, 8192) \ - SIZE_CLASS(33, 2048, 10240) \ - SIZE_CLASS(34, 2048, 12288) \ - SIZE_CLASS(35, 2048, 14336) \ - SIZE_CLASS(36, 2048, 16384) \ - SIZE_CLASS(37, 4096, 20480) \ - SIZE_CLASS(38, 4096, 24576) \ - SIZE_CLASS(39, 4096, 28672) \ - SIZE_CLASS(40, 4096, 32768) \ - SIZE_CLASS(41, 8192, 40960) \ - SIZE_CLASS(42, 8192, 49152) \ - SIZE_CLASS(43, 8192, 57344) \ +#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 12) +#define SIZE_CLASSES \ + /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \ + SC( 0, 3, 3, 0, no, yes, 1, 3) \ + SC( 1, 3, 3, 1, no, yes, 1, 3) \ + SC( 2, 3, 3, 2, no, yes, 3, 3) \ + SC( 3, 3, 3, 3, no, yes, 1, 3) \ + \ + SC( 4, 5, 3, 1, no, yes, 5, 3) \ + SC( 5, 5, 3, 2, no, yes, 3, 3) \ + SC( 6, 5, 3, 3, no, yes, 7, 3) \ + SC( 7, 5, 3, 4, no, yes, 1, 3) \ + \ + SC( 8, 6, 4, 1, no, yes, 5, 4) \ + SC( 9, 6, 4, 2, no, yes, 3, 4) \ + SC( 10, 6, 4, 3, no, yes, 7, 4) \ + SC( 11, 6, 4, 4, no, yes, 1, 4) \ + \ + SC( 12, 7, 5, 1, no, yes, 5, 5) \ + SC( 13, 7, 5, 2, no, yes, 3, 5) \ + SC( 14, 7, 5, 3, no, yes, 7, 5) \ + SC( 15, 7, 5, 4, no, yes, 1, 5) \ + \ + SC( 16, 8, 6, 1, no, yes, 5, 6) \ + SC( 17, 8, 6, 2, no, yes, 3, 6) \ + SC( 18, 8, 6, 3, no, yes, 7, 6) \ + SC( 19, 8, 6, 4, no, yes, 1, 6) \ + \ + SC( 20, 9, 7, 1, no, yes, 5, 7) \ + SC( 21, 9, 7, 2, no, yes, 3, 7) \ + SC( 22, 9, 7, 3, no, yes, 7, 7) \ + SC( 23, 9, 7, 4, no, yes, 1, 7) \ + \ + SC( 24, 10, 8, 1, no, yes, 5, 8) \ + SC( 25, 10, 8, 2, no, yes, 3, 8) \ + SC( 26, 10, 8, 3, no, yes, 7, 8) \ + SC( 27, 10, 8, 4, no, yes, 1, 8) \ + \ + SC( 28, 11, 9, 1, no, yes, 5, 9) \ + SC( 29, 11, 9, 2, no, yes, 3, 9) \ + SC( 30, 11, 9, 3, no, yes, 7, 9) \ + SC( 31, 11, 9, 4, yes, yes, 1, 9) \ + \ + SC( 32, 12, 10, 1, no, yes, 5, no) \ + SC( 33, 12, 10, 2, no, yes, 3, no) \ + SC( 34, 12, 10, 3, no, yes, 7, no) \ + SC( 35, 12, 10, 4, yes, yes, 2, no) \ + \ + SC( 36, 13, 11, 1, no, yes, 5, no) \ + SC( 37, 13, 11, 2, yes, yes, 3, no) \ + SC( 38, 13, 11, 3, no, yes, 7, no) \ + SC( 39, 13, 11, 4, yes, no, 0, no) \ + \ + SC( 40, 14, 12, 1, yes, no, 0, no) \ + SC( 41, 14, 12, 2, yes, no, 0, no) \ + SC( 42, 14, 12, 3, yes, no, 0, no) \ + SC( 43, 14, 12, 4, yes, no, 0, no) \ + \ + SC( 44, 15, 13, 1, yes, no, 0, no) \ + SC( 45, 15, 13, 2, yes, no, 0, no) \ + SC( 46, 15, 13, 3, yes, no, 0, no) \ + SC( 47, 15, 13, 4, yes, no, 0, no) \ + \ + SC( 48, 16, 14, 1, yes, no, 0, no) \ + SC( 49, 16, 14, 2, yes, no, 0, no) \ + SC( 50, 16, 14, 3, yes, no, 0, no) \ + SC( 51, 16, 14, 4, yes, no, 0, no) \ + \ + SC( 52, 17, 15, 1, yes, no, 0, no) \ + SC( 53, 17, 15, 2, yes, no, 0, no) \ + SC( 54, 17, 15, 3, yes, no, 0, no) \ + SC( 55, 17, 15, 4, yes, no, 0, no) \ + \ + SC( 56, 18, 16, 1, yes, no, 0, no) \ + SC( 57, 18, 16, 2, yes, no, 0, no) \ + SC( 58, 18, 16, 3, yes, no, 0, no) \ + SC( 59, 18, 16, 4, yes, no, 0, no) \ + \ + SC( 60, 19, 17, 1, yes, no, 0, no) \ + SC( 61, 19, 17, 2, yes, no, 0, no) \ + SC( 62, 19, 17, 3, yes, no, 0, no) \ + SC( 63, 19, 17, 4, yes, no, 0, no) \ + \ + SC( 64, 20, 18, 1, yes, no, 0, no) \ + SC( 65, 20, 18, 2, yes, no, 0, no) \ + SC( 66, 20, 18, 3, yes, no, 0, no) \ + SC( 67, 20, 18, 4, yes, no, 0, no) \ + \ + SC( 68, 21, 19, 1, yes, no, 0, no) \ + SC( 69, 21, 19, 2, yes, no, 0, no) \ + SC( 70, 21, 19, 3, yes, no, 0, no) \ + SC( 71, 21, 19, 4, yes, no, 0, no) \ + \ + SC( 72, 22, 20, 1, yes, no, 0, no) \ + SC( 73, 22, 20, 2, yes, no, 0, no) \ + SC( 74, 22, 20, 3, yes, no, 0, no) \ + SC( 75, 22, 20, 4, yes, no, 0, no) \ + \ + SC( 76, 23, 21, 1, yes, no, 0, no) \ + SC( 77, 23, 21, 2, yes, no, 0, no) \ + SC( 78, 23, 21, 3, yes, no, 0, no) \ + SC( 79, 23, 21, 4, yes, no, 0, no) \ + \ + SC( 80, 24, 22, 1, yes, no, 0, no) \ + SC( 81, 24, 22, 2, yes, no, 0, no) \ + SC( 82, 24, 22, 3, yes, no, 0, no) \ + SC( 83, 24, 22, 4, yes, no, 0, no) \ + \ + SC( 84, 25, 23, 1, yes, no, 0, no) \ + SC( 85, 25, 23, 2, yes, no, 0, no) \ + SC( 86, 25, 23, 3, yes, no, 0, no) \ + SC( 87, 25, 23, 4, yes, no, 0, no) \ + \ + SC( 88, 26, 24, 1, yes, no, 0, no) \ + SC( 89, 26, 24, 2, yes, no, 0, no) \ + SC( 90, 26, 24, 3, yes, no, 0, no) \ + SC( 91, 26, 24, 4, yes, no, 0, no) \ + \ + SC( 92, 27, 25, 1, yes, no, 0, no) \ + SC( 93, 27, 25, 2, yes, no, 0, no) \ + SC( 94, 27, 25, 3, yes, no, 0, no) \ + SC( 95, 27, 25, 4, yes, no, 0, no) \ + \ + SC( 96, 28, 26, 1, yes, no, 0, no) \ + SC( 97, 28, 26, 2, yes, no, 0, no) \ + SC( 98, 28, 26, 3, yes, no, 0, no) \ + SC( 99, 28, 26, 4, yes, no, 0, no) \ + \ + SC(100, 29, 27, 1, yes, no, 0, no) \ + SC(101, 29, 27, 2, yes, no, 0, no) \ + SC(102, 29, 27, 3, yes, no, 0, no) \ + SC(103, 29, 27, 4, yes, no, 0, no) \ + \ + SC(104, 30, 28, 1, yes, no, 0, no) \ + SC(105, 30, 28, 2, yes, no, 0, no) \ + SC(106, 30, 28, 3, yes, no, 0, no) \ -#define NBINS 44 -#define SMALL_MAXCLASS 57344 +#define SIZE_CLASSES_DEFINED +#define NTBINS 0 +#define NLBINS 32 +#define NBINS 39 +#define NSIZES 107 +#define LG_CEIL_NSIZES 7 +#define NPSIZES 71 +#define LG_TINY_MAXCLASS "NA" +#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) +#define SMALL_MAXCLASS ((((size_t)1) << 13) + (((size_t)3) << 11)) +#define LG_LARGE_MINCLASS 14 +#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS) +#define LARGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28)) #endif -#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 12) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 16, 16) \ - SIZE_CLASS(1, 16, 32) \ - SIZE_CLASS(2, 16, 48) \ - SIZE_CLASS(3, 16, 64) \ - SIZE_CLASS(4, 16, 80) \ - SIZE_CLASS(5, 16, 96) \ - SIZE_CLASS(6, 16, 112) \ - SIZE_CLASS(7, 16, 128) \ - SIZE_CLASS(8, 32, 160) \ - SIZE_CLASS(9, 32, 192) \ - SIZE_CLASS(10, 32, 224) \ - SIZE_CLASS(11, 32, 256) \ - SIZE_CLASS(12, 64, 320) \ - SIZE_CLASS(13, 64, 384) \ - SIZE_CLASS(14, 64, 448) \ - SIZE_CLASS(15, 64, 512) \ - SIZE_CLASS(16, 128, 640) \ - SIZE_CLASS(17, 128, 768) \ - SIZE_CLASS(18, 128, 896) \ - SIZE_CLASS(19, 128, 1024) \ - SIZE_CLASS(20, 256, 1280) \ - SIZE_CLASS(21, 256, 1536) \ - SIZE_CLASS(22, 256, 1792) \ - SIZE_CLASS(23, 256, 2048) \ - SIZE_CLASS(24, 512, 2560) \ - SIZE_CLASS(25, 512, 3072) \ - SIZE_CLASS(26, 512, 3584) \ +#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 12) +#define SIZE_CLASSES \ + /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \ + SC( 0, 3, 3, 0, no, yes, 1, 3) \ + \ + SC( 1, 3, 3, 1, no, yes, 1, 3) \ + SC( 2, 4, 4, 1, no, yes, 1, 4) \ + SC( 3, 4, 4, 2, no, yes, 3, 4) \ + SC( 4, 4, 4, 3, no, yes, 1, 4) \ + \ + SC( 5, 6, 4, 1, no, yes, 5, 4) \ + SC( 6, 6, 4, 2, no, yes, 3, 4) \ + SC( 7, 6, 4, 3, no, yes, 7, 4) \ + SC( 8, 6, 4, 4, no, yes, 1, 4) \ + \ + SC( 9, 7, 5, 1, no, yes, 5, 5) \ + SC( 10, 7, 5, 2, no, yes, 3, 5) \ + SC( 11, 7, 5, 3, no, yes, 7, 5) \ + SC( 12, 7, 5, 4, no, yes, 1, 5) \ + \ + SC( 13, 8, 6, 1, no, yes, 5, 6) \ + SC( 14, 8, 6, 2, no, yes, 3, 6) \ + SC( 15, 8, 6, 3, no, yes, 7, 6) \ + SC( 16, 8, 6, 4, no, yes, 1, 6) \ + \ + SC( 17, 9, 7, 1, no, yes, 5, 7) \ + SC( 18, 9, 7, 2, no, yes, 3, 7) \ + SC( 19, 9, 7, 3, no, yes, 7, 7) \ + SC( 20, 9, 7, 4, no, yes, 1, 7) \ + \ + SC( 21, 10, 8, 1, no, yes, 5, 8) \ + SC( 22, 10, 8, 2, no, yes, 3, 8) \ + SC( 23, 10, 8, 3, no, yes, 7, 8) \ + SC( 24, 10, 8, 4, no, yes, 1, 8) \ + \ + SC( 25, 11, 9, 1, no, yes, 5, 9) \ + SC( 26, 11, 9, 2, no, yes, 3, 9) \ + SC( 27, 11, 9, 3, no, yes, 7, 9) \ + SC( 28, 11, 9, 4, yes, yes, 1, 9) \ + \ + SC( 29, 12, 10, 1, no, yes, 5, no) \ + SC( 30, 12, 10, 2, no, yes, 3, no) \ + SC( 31, 12, 10, 3, no, yes, 7, no) \ + SC( 32, 12, 10, 4, yes, yes, 2, no) \ + \ + SC( 33, 13, 11, 1, no, yes, 5, no) \ + SC( 34, 13, 11, 2, yes, yes, 3, no) \ + SC( 35, 13, 11, 3, no, yes, 7, no) \ + SC( 36, 13, 11, 4, yes, no, 0, no) \ + \ + SC( 37, 14, 12, 1, yes, no, 0, no) \ + SC( 38, 14, 12, 2, yes, no, 0, no) \ + SC( 39, 14, 12, 3, yes, no, 0, no) \ + SC( 40, 14, 12, 4, yes, no, 0, no) \ + \ + SC( 41, 15, 13, 1, yes, no, 0, no) \ + SC( 42, 15, 13, 2, yes, no, 0, no) \ + SC( 43, 15, 13, 3, yes, no, 0, no) \ + SC( 44, 15, 13, 4, yes, no, 0, no) \ + \ + SC( 45, 16, 14, 1, yes, no, 0, no) \ + SC( 46, 16, 14, 2, yes, no, 0, no) \ + SC( 47, 16, 14, 3, yes, no, 0, no) \ + SC( 48, 16, 14, 4, yes, no, 0, no) \ + \ + SC( 49, 17, 15, 1, yes, no, 0, no) \ + SC( 50, 17, 15, 2, yes, no, 0, no) \ + SC( 51, 17, 15, 3, yes, no, 0, no) \ + SC( 52, 17, 15, 4, yes, no, 0, no) \ + \ + SC( 53, 18, 16, 1, yes, no, 0, no) \ + SC( 54, 18, 16, 2, yes, no, 0, no) \ + SC( 55, 18, 16, 3, yes, no, 0, no) \ + SC( 56, 18, 16, 4, yes, no, 0, no) \ + \ + SC( 57, 19, 17, 1, yes, no, 0, no) \ + SC( 58, 19, 17, 2, yes, no, 0, no) \ + SC( 59, 19, 17, 3, yes, no, 0, no) \ + SC( 60, 19, 17, 4, yes, no, 0, no) \ + \ + SC( 61, 20, 18, 1, yes, no, 0, no) \ + SC( 62, 20, 18, 2, yes, no, 0, no) \ + SC( 63, 20, 18, 3, yes, no, 0, no) \ + SC( 64, 20, 18, 4, yes, no, 0, no) \ + \ + SC( 65, 21, 19, 1, yes, no, 0, no) \ + SC( 66, 21, 19, 2, yes, no, 0, no) \ + SC( 67, 21, 19, 3, yes, no, 0, no) \ + SC( 68, 21, 19, 4, yes, no, 0, no) \ + \ + SC( 69, 22, 20, 1, yes, no, 0, no) \ + SC( 70, 22, 20, 2, yes, no, 0, no) \ + SC( 71, 22, 20, 3, yes, no, 0, no) \ + SC( 72, 22, 20, 4, yes, no, 0, no) \ + \ + SC( 73, 23, 21, 1, yes, no, 0, no) \ + SC( 74, 23, 21, 2, yes, no, 0, no) \ + SC( 75, 23, 21, 3, yes, no, 0, no) \ + SC( 76, 23, 21, 4, yes, no, 0, no) \ + \ + SC( 77, 24, 22, 1, yes, no, 0, no) \ + SC( 78, 24, 22, 2, yes, no, 0, no) \ + SC( 79, 24, 22, 3, yes, no, 0, no) \ + SC( 80, 24, 22, 4, yes, no, 0, no) \ + \ + SC( 81, 25, 23, 1, yes, no, 0, no) \ + SC( 82, 25, 23, 2, yes, no, 0, no) \ + SC( 83, 25, 23, 3, yes, no, 0, no) \ + SC( 84, 25, 23, 4, yes, no, 0, no) \ + \ + SC( 85, 26, 24, 1, yes, no, 0, no) \ + SC( 86, 26, 24, 2, yes, no, 0, no) \ + SC( 87, 26, 24, 3, yes, no, 0, no) \ + SC( 88, 26, 24, 4, yes, no, 0, no) \ + \ + SC( 89, 27, 25, 1, yes, no, 0, no) \ + SC( 90, 27, 25, 2, yes, no, 0, no) \ + SC( 91, 27, 25, 3, yes, no, 0, no) \ + SC( 92, 27, 25, 4, yes, no, 0, no) \ + \ + SC( 93, 28, 26, 1, yes, no, 0, no) \ + SC( 94, 28, 26, 2, yes, no, 0, no) \ + SC( 95, 28, 26, 3, yes, no, 0, no) \ + SC( 96, 28, 26, 4, yes, no, 0, no) \ + \ + SC( 97, 29, 27, 1, yes, no, 0, no) \ + SC( 98, 29, 27, 2, yes, no, 0, no) \ + SC( 99, 29, 27, 3, yes, no, 0, no) \ + SC(100, 29, 27, 4, yes, no, 0, no) \ + \ + SC(101, 30, 28, 1, yes, no, 0, no) \ + SC(102, 30, 28, 2, yes, no, 0, no) \ + SC(103, 30, 28, 3, yes, no, 0, no) \ -#define NBINS 27 -#define SMALL_MAXCLASS 3584 +#define SIZE_CLASSES_DEFINED +#define NTBINS 1 +#define NLBINS 29 +#define NBINS 36 +#define NSIZES 104 +#define LG_CEIL_NSIZES 7 +#define NPSIZES 71 +#define LG_TINY_MAXCLASS 3 +#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) +#define SMALL_MAXCLASS ((((size_t)1) << 13) + (((size_t)3) << 11)) +#define LG_LARGE_MINCLASS 14 +#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS) +#define LARGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28)) #endif -#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 13) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 16, 16) \ - SIZE_CLASS(1, 16, 32) \ - SIZE_CLASS(2, 16, 48) \ - SIZE_CLASS(3, 16, 64) \ - SIZE_CLASS(4, 16, 80) \ - SIZE_CLASS(5, 16, 96) \ - SIZE_CLASS(6, 16, 112) \ - SIZE_CLASS(7, 16, 128) \ - SIZE_CLASS(8, 32, 160) \ - SIZE_CLASS(9, 32, 192) \ - SIZE_CLASS(10, 32, 224) \ - SIZE_CLASS(11, 32, 256) \ - SIZE_CLASS(12, 64, 320) \ - SIZE_CLASS(13, 64, 384) \ - SIZE_CLASS(14, 64, 448) \ - SIZE_CLASS(15, 64, 512) \ - SIZE_CLASS(16, 128, 640) \ - SIZE_CLASS(17, 128, 768) \ - SIZE_CLASS(18, 128, 896) \ - SIZE_CLASS(19, 128, 1024) \ - SIZE_CLASS(20, 256, 1280) \ - SIZE_CLASS(21, 256, 1536) \ - SIZE_CLASS(22, 256, 1792) \ - SIZE_CLASS(23, 256, 2048) \ - SIZE_CLASS(24, 512, 2560) \ - SIZE_CLASS(25, 512, 3072) \ - SIZE_CLASS(26, 512, 3584) \ - SIZE_CLASS(27, 512, 4096) \ - SIZE_CLASS(28, 1024, 5120) \ - SIZE_CLASS(29, 1024, 6144) \ - SIZE_CLASS(30, 1024, 7168) \ +#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 12) +#define SIZE_CLASSES \ + /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \ + SC( 0, 4, 4, 0, no, yes, 1, 4) \ + SC( 1, 4, 4, 1, no, yes, 1, 4) \ + SC( 2, 4, 4, 2, no, yes, 3, 4) \ + SC( 3, 4, 4, 3, no, yes, 1, 4) \ + \ + SC( 4, 6, 4, 1, no, yes, 5, 4) \ + SC( 5, 6, 4, 2, no, yes, 3, 4) \ + SC( 6, 6, 4, 3, no, yes, 7, 4) \ + SC( 7, 6, 4, 4, no, yes, 1, 4) \ + \ + SC( 8, 7, 5, 1, no, yes, 5, 5) \ + SC( 9, 7, 5, 2, no, yes, 3, 5) \ + SC( 10, 7, 5, 3, no, yes, 7, 5) \ + SC( 11, 7, 5, 4, no, yes, 1, 5) \ + \ + SC( 12, 8, 6, 1, no, yes, 5, 6) \ + SC( 13, 8, 6, 2, no, yes, 3, 6) \ + SC( 14, 8, 6, 3, no, yes, 7, 6) \ + SC( 15, 8, 6, 4, no, yes, 1, 6) \ + \ + SC( 16, 9, 7, 1, no, yes, 5, 7) \ + SC( 17, 9, 7, 2, no, yes, 3, 7) \ + SC( 18, 9, 7, 3, no, yes, 7, 7) \ + SC( 19, 9, 7, 4, no, yes, 1, 7) \ + \ + SC( 20, 10, 8, 1, no, yes, 5, 8) \ + SC( 21, 10, 8, 2, no, yes, 3, 8) \ + SC( 22, 10, 8, 3, no, yes, 7, 8) \ + SC( 23, 10, 8, 4, no, yes, 1, 8) \ + \ + SC( 24, 11, 9, 1, no, yes, 5, 9) \ + SC( 25, 11, 9, 2, no, yes, 3, 9) \ + SC( 26, 11, 9, 3, no, yes, 7, 9) \ + SC( 27, 11, 9, 4, yes, yes, 1, 9) \ + \ + SC( 28, 12, 10, 1, no, yes, 5, no) \ + SC( 29, 12, 10, 2, no, yes, 3, no) \ + SC( 30, 12, 10, 3, no, yes, 7, no) \ + SC( 31, 12, 10, 4, yes, yes, 2, no) \ + \ + SC( 32, 13, 11, 1, no, yes, 5, no) \ + SC( 33, 13, 11, 2, yes, yes, 3, no) \ + SC( 34, 13, 11, 3, no, yes, 7, no) \ + SC( 35, 13, 11, 4, yes, no, 0, no) \ + \ + SC( 36, 14, 12, 1, yes, no, 0, no) \ + SC( 37, 14, 12, 2, yes, no, 0, no) \ + SC( 38, 14, 12, 3, yes, no, 0, no) \ + SC( 39, 14, 12, 4, yes, no, 0, no) \ + \ + SC( 40, 15, 13, 1, yes, no, 0, no) \ + SC( 41, 15, 13, 2, yes, no, 0, no) \ + SC( 42, 15, 13, 3, yes, no, 0, no) \ + SC( 43, 15, 13, 4, yes, no, 0, no) \ + \ + SC( 44, 16, 14, 1, yes, no, 0, no) \ + SC( 45, 16, 14, 2, yes, no, 0, no) \ + SC( 46, 16, 14, 3, yes, no, 0, no) \ + SC( 47, 16, 14, 4, yes, no, 0, no) \ + \ + SC( 48, 17, 15, 1, yes, no, 0, no) \ + SC( 49, 17, 15, 2, yes, no, 0, no) \ + SC( 50, 17, 15, 3, yes, no, 0, no) \ + SC( 51, 17, 15, 4, yes, no, 0, no) \ + \ + SC( 52, 18, 16, 1, yes, no, 0, no) \ + SC( 53, 18, 16, 2, yes, no, 0, no) \ + SC( 54, 18, 16, 3, yes, no, 0, no) \ + SC( 55, 18, 16, 4, yes, no, 0, no) \ + \ + SC( 56, 19, 17, 1, yes, no, 0, no) \ + SC( 57, 19, 17, 2, yes, no, 0, no) \ + SC( 58, 19, 17, 3, yes, no, 0, no) \ + SC( 59, 19, 17, 4, yes, no, 0, no) \ + \ + SC( 60, 20, 18, 1, yes, no, 0, no) \ + SC( 61, 20, 18, 2, yes, no, 0, no) \ + SC( 62, 20, 18, 3, yes, no, 0, no) \ + SC( 63, 20, 18, 4, yes, no, 0, no) \ + \ + SC( 64, 21, 19, 1, yes, no, 0, no) \ + SC( 65, 21, 19, 2, yes, no, 0, no) \ + SC( 66, 21, 19, 3, yes, no, 0, no) \ + SC( 67, 21, 19, 4, yes, no, 0, no) \ + \ + SC( 68, 22, 20, 1, yes, no, 0, no) \ + SC( 69, 22, 20, 2, yes, no, 0, no) \ + SC( 70, 22, 20, 3, yes, no, 0, no) \ + SC( 71, 22, 20, 4, yes, no, 0, no) \ + \ + SC( 72, 23, 21, 1, yes, no, 0, no) \ + SC( 73, 23, 21, 2, yes, no, 0, no) \ + SC( 74, 23, 21, 3, yes, no, 0, no) \ + SC( 75, 23, 21, 4, yes, no, 0, no) \ + \ + SC( 76, 24, 22, 1, yes, no, 0, no) \ + SC( 77, 24, 22, 2, yes, no, 0, no) \ + SC( 78, 24, 22, 3, yes, no, 0, no) \ + SC( 79, 24, 22, 4, yes, no, 0, no) \ + \ + SC( 80, 25, 23, 1, yes, no, 0, no) \ + SC( 81, 25, 23, 2, yes, no, 0, no) \ + SC( 82, 25, 23, 3, yes, no, 0, no) \ + SC( 83, 25, 23, 4, yes, no, 0, no) \ + \ + SC( 84, 26, 24, 1, yes, no, 0, no) \ + SC( 85, 26, 24, 2, yes, no, 0, no) \ + SC( 86, 26, 24, 3, yes, no, 0, no) \ + SC( 87, 26, 24, 4, yes, no, 0, no) \ + \ + SC( 88, 27, 25, 1, yes, no, 0, no) \ + SC( 89, 27, 25, 2, yes, no, 0, no) \ + SC( 90, 27, 25, 3, yes, no, 0, no) \ + SC( 91, 27, 25, 4, yes, no, 0, no) \ + \ + SC( 92, 28, 26, 1, yes, no, 0, no) \ + SC( 93, 28, 26, 2, yes, no, 0, no) \ + SC( 94, 28, 26, 3, yes, no, 0, no) \ + SC( 95, 28, 26, 4, yes, no, 0, no) \ + \ + SC( 96, 29, 27, 1, yes, no, 0, no) \ + SC( 97, 29, 27, 2, yes, no, 0, no) \ + SC( 98, 29, 27, 3, yes, no, 0, no) \ + SC( 99, 29, 27, 4, yes, no, 0, no) \ + \ + SC(100, 30, 28, 1, yes, no, 0, no) \ + SC(101, 30, 28, 2, yes, no, 0, no) \ + SC(102, 30, 28, 3, yes, no, 0, no) \ -#define NBINS 31 -#define SMALL_MAXCLASS 7168 +#define SIZE_CLASSES_DEFINED +#define NTBINS 0 +#define NLBINS 28 +#define NBINS 35 +#define NSIZES 103 +#define LG_CEIL_NSIZES 7 +#define NPSIZES 71 +#define LG_TINY_MAXCLASS "NA" +#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) +#define SMALL_MAXCLASS ((((size_t)1) << 13) + (((size_t)3) << 11)) +#define LG_LARGE_MINCLASS 14 +#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS) +#define LARGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28)) #endif -#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 14) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 16, 16) \ - SIZE_CLASS(1, 16, 32) \ - SIZE_CLASS(2, 16, 48) \ - SIZE_CLASS(3, 16, 64) \ - SIZE_CLASS(4, 16, 80) \ - SIZE_CLASS(5, 16, 96) \ - SIZE_CLASS(6, 16, 112) \ - SIZE_CLASS(7, 16, 128) \ - SIZE_CLASS(8, 32, 160) \ - SIZE_CLASS(9, 32, 192) \ - SIZE_CLASS(10, 32, 224) \ - SIZE_CLASS(11, 32, 256) \ - SIZE_CLASS(12, 64, 320) \ - SIZE_CLASS(13, 64, 384) \ - SIZE_CLASS(14, 64, 448) \ - SIZE_CLASS(15, 64, 512) \ - SIZE_CLASS(16, 128, 640) \ - SIZE_CLASS(17, 128, 768) \ - SIZE_CLASS(18, 128, 896) \ - SIZE_CLASS(19, 128, 1024) \ - SIZE_CLASS(20, 256, 1280) \ - SIZE_CLASS(21, 256, 1536) \ - SIZE_CLASS(22, 256, 1792) \ - SIZE_CLASS(23, 256, 2048) \ - SIZE_CLASS(24, 512, 2560) \ - SIZE_CLASS(25, 512, 3072) \ - SIZE_CLASS(26, 512, 3584) \ - SIZE_CLASS(27, 512, 4096) \ - SIZE_CLASS(28, 1024, 5120) \ - SIZE_CLASS(29, 1024, 6144) \ - SIZE_CLASS(30, 1024, 7168) \ - SIZE_CLASS(31, 1024, 8192) \ - SIZE_CLASS(32, 2048, 10240) \ - SIZE_CLASS(33, 2048, 12288) \ - SIZE_CLASS(34, 2048, 14336) \ +#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 12) +#define SIZE_CLASSES \ + /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \ + SC( 0, 3, 3, 0, no, yes, 1, 3) \ + SC( 1, 3, 3, 1, no, yes, 1, 3) \ + SC( 2, 3, 3, 2, no, yes, 3, 3) \ + SC( 3, 3, 3, 3, no, yes, 1, 3) \ + \ + SC( 4, 5, 3, 1, no, yes, 5, 3) \ + SC( 5, 5, 3, 2, no, yes, 3, 3) \ + SC( 6, 5, 3, 3, no, yes, 7, 3) \ + SC( 7, 5, 3, 4, no, yes, 1, 3) \ + \ + SC( 8, 6, 4, 1, no, yes, 5, 4) \ + SC( 9, 6, 4, 2, no, yes, 3, 4) \ + SC( 10, 6, 4, 3, no, yes, 7, 4) \ + SC( 11, 6, 4, 4, no, yes, 1, 4) \ + \ + SC( 12, 7, 5, 1, no, yes, 5, 5) \ + SC( 13, 7, 5, 2, no, yes, 3, 5) \ + SC( 14, 7, 5, 3, no, yes, 7, 5) \ + SC( 15, 7, 5, 4, no, yes, 1, 5) \ + \ + SC( 16, 8, 6, 1, no, yes, 5, 6) \ + SC( 17, 8, 6, 2, no, yes, 3, 6) \ + SC( 18, 8, 6, 3, no, yes, 7, 6) \ + SC( 19, 8, 6, 4, no, yes, 1, 6) \ + \ + SC( 20, 9, 7, 1, no, yes, 5, 7) \ + SC( 21, 9, 7, 2, no, yes, 3, 7) \ + SC( 22, 9, 7, 3, no, yes, 7, 7) \ + SC( 23, 9, 7, 4, no, yes, 1, 7) \ + \ + SC( 24, 10, 8, 1, no, yes, 5, 8) \ + SC( 25, 10, 8, 2, no, yes, 3, 8) \ + SC( 26, 10, 8, 3, no, yes, 7, 8) \ + SC( 27, 10, 8, 4, no, yes, 1, 8) \ + \ + SC( 28, 11, 9, 1, no, yes, 5, 9) \ + SC( 29, 11, 9, 2, no, yes, 3, 9) \ + SC( 30, 11, 9, 3, no, yes, 7, 9) \ + SC( 31, 11, 9, 4, yes, yes, 1, 9) \ + \ + SC( 32, 12, 10, 1, no, yes, 5, no) \ + SC( 33, 12, 10, 2, no, yes, 3, no) \ + SC( 34, 12, 10, 3, no, yes, 7, no) \ + SC( 35, 12, 10, 4, yes, yes, 2, no) \ + \ + SC( 36, 13, 11, 1, no, yes, 5, no) \ + SC( 37, 13, 11, 2, yes, yes, 3, no) \ + SC( 38, 13, 11, 3, no, yes, 7, no) \ + SC( 39, 13, 11, 4, yes, no, 0, no) \ + \ + SC( 40, 14, 12, 1, yes, no, 0, no) \ + SC( 41, 14, 12, 2, yes, no, 0, no) \ + SC( 42, 14, 12, 3, yes, no, 0, no) \ + SC( 43, 14, 12, 4, yes, no, 0, no) \ + \ + SC( 44, 15, 13, 1, yes, no, 0, no) \ + SC( 45, 15, 13, 2, yes, no, 0, no) \ + SC( 46, 15, 13, 3, yes, no, 0, no) \ + SC( 47, 15, 13, 4, yes, no, 0, no) \ + \ + SC( 48, 16, 14, 1, yes, no, 0, no) \ + SC( 49, 16, 14, 2, yes, no, 0, no) \ + SC( 50, 16, 14, 3, yes, no, 0, no) \ + SC( 51, 16, 14, 4, yes, no, 0, no) \ + \ + SC( 52, 17, 15, 1, yes, no, 0, no) \ + SC( 53, 17, 15, 2, yes, no, 0, no) \ + SC( 54, 17, 15, 3, yes, no, 0, no) \ + SC( 55, 17, 15, 4, yes, no, 0, no) \ + \ + SC( 56, 18, 16, 1, yes, no, 0, no) \ + SC( 57, 18, 16, 2, yes, no, 0, no) \ + SC( 58, 18, 16, 3, yes, no, 0, no) \ + SC( 59, 18, 16, 4, yes, no, 0, no) \ + \ + SC( 60, 19, 17, 1, yes, no, 0, no) \ + SC( 61, 19, 17, 2, yes, no, 0, no) \ + SC( 62, 19, 17, 3, yes, no, 0, no) \ + SC( 63, 19, 17, 4, yes, no, 0, no) \ + \ + SC( 64, 20, 18, 1, yes, no, 0, no) \ + SC( 65, 20, 18, 2, yes, no, 0, no) \ + SC( 66, 20, 18, 3, yes, no, 0, no) \ + SC( 67, 20, 18, 4, yes, no, 0, no) \ + \ + SC( 68, 21, 19, 1, yes, no, 0, no) \ + SC( 69, 21, 19, 2, yes, no, 0, no) \ + SC( 70, 21, 19, 3, yes, no, 0, no) \ + SC( 71, 21, 19, 4, yes, no, 0, no) \ + \ + SC( 72, 22, 20, 1, yes, no, 0, no) \ + SC( 73, 22, 20, 2, yes, no, 0, no) \ + SC( 74, 22, 20, 3, yes, no, 0, no) \ + SC( 75, 22, 20, 4, yes, no, 0, no) \ + \ + SC( 76, 23, 21, 1, yes, no, 0, no) \ + SC( 77, 23, 21, 2, yes, no, 0, no) \ + SC( 78, 23, 21, 3, yes, no, 0, no) \ + SC( 79, 23, 21, 4, yes, no, 0, no) \ + \ + SC( 80, 24, 22, 1, yes, no, 0, no) \ + SC( 81, 24, 22, 2, yes, no, 0, no) \ + SC( 82, 24, 22, 3, yes, no, 0, no) \ + SC( 83, 24, 22, 4, yes, no, 0, no) \ + \ + SC( 84, 25, 23, 1, yes, no, 0, no) \ + SC( 85, 25, 23, 2, yes, no, 0, no) \ + SC( 86, 25, 23, 3, yes, no, 0, no) \ + SC( 87, 25, 23, 4, yes, no, 0, no) \ + \ + SC( 88, 26, 24, 1, yes, no, 0, no) \ + SC( 89, 26, 24, 2, yes, no, 0, no) \ + SC( 90, 26, 24, 3, yes, no, 0, no) \ + SC( 91, 26, 24, 4, yes, no, 0, no) \ + \ + SC( 92, 27, 25, 1, yes, no, 0, no) \ + SC( 93, 27, 25, 2, yes, no, 0, no) \ + SC( 94, 27, 25, 3, yes, no, 0, no) \ + SC( 95, 27, 25, 4, yes, no, 0, no) \ + \ + SC( 96, 28, 26, 1, yes, no, 0, no) \ + SC( 97, 28, 26, 2, yes, no, 0, no) \ + SC( 98, 28, 26, 3, yes, no, 0, no) \ + SC( 99, 28, 26, 4, yes, no, 0, no) \ + \ + SC(100, 29, 27, 1, yes, no, 0, no) \ + SC(101, 29, 27, 2, yes, no, 0, no) \ + SC(102, 29, 27, 3, yes, no, 0, no) \ + SC(103, 29, 27, 4, yes, no, 0, no) \ + \ + SC(104, 30, 28, 1, yes, no, 0, no) \ + SC(105, 30, 28, 2, yes, no, 0, no) \ + SC(106, 30, 28, 3, yes, no, 0, no) \ + SC(107, 30, 28, 4, yes, no, 0, no) \ + \ + SC(108, 31, 29, 1, yes, no, 0, no) \ + SC(109, 31, 29, 2, yes, no, 0, no) \ + SC(110, 31, 29, 3, yes, no, 0, no) \ + SC(111, 31, 29, 4, yes, no, 0, no) \ + \ + SC(112, 32, 30, 1, yes, no, 0, no) \ + SC(113, 32, 30, 2, yes, no, 0, no) \ + SC(114, 32, 30, 3, yes, no, 0, no) \ + SC(115, 32, 30, 4, yes, no, 0, no) \ + \ + SC(116, 33, 31, 1, yes, no, 0, no) \ + SC(117, 33, 31, 2, yes, no, 0, no) \ + SC(118, 33, 31, 3, yes, no, 0, no) \ + SC(119, 33, 31, 4, yes, no, 0, no) \ + \ + SC(120, 34, 32, 1, yes, no, 0, no) \ + SC(121, 34, 32, 2, yes, no, 0, no) \ + SC(122, 34, 32, 3, yes, no, 0, no) \ + SC(123, 34, 32, 4, yes, no, 0, no) \ + \ + SC(124, 35, 33, 1, yes, no, 0, no) \ + SC(125, 35, 33, 2, yes, no, 0, no) \ + SC(126, 35, 33, 3, yes, no, 0, no) \ + SC(127, 35, 33, 4, yes, no, 0, no) \ + \ + SC(128, 36, 34, 1, yes, no, 0, no) \ + SC(129, 36, 34, 2, yes, no, 0, no) \ + SC(130, 36, 34, 3, yes, no, 0, no) \ + SC(131, 36, 34, 4, yes, no, 0, no) \ + \ + SC(132, 37, 35, 1, yes, no, 0, no) \ + SC(133, 37, 35, 2, yes, no, 0, no) \ + SC(134, 37, 35, 3, yes, no, 0, no) \ + SC(135, 37, 35, 4, yes, no, 0, no) \ + \ + SC(136, 38, 36, 1, yes, no, 0, no) \ + SC(137, 38, 36, 2, yes, no, 0, no) \ + SC(138, 38, 36, 3, yes, no, 0, no) \ + SC(139, 38, 36, 4, yes, no, 0, no) \ + \ + SC(140, 39, 37, 1, yes, no, 0, no) \ + SC(141, 39, 37, 2, yes, no, 0, no) \ + SC(142, 39, 37, 3, yes, no, 0, no) \ + SC(143, 39, 37, 4, yes, no, 0, no) \ + \ + SC(144, 40, 38, 1, yes, no, 0, no) \ + SC(145, 40, 38, 2, yes, no, 0, no) \ + SC(146, 40, 38, 3, yes, no, 0, no) \ + SC(147, 40, 38, 4, yes, no, 0, no) \ + \ + SC(148, 41, 39, 1, yes, no, 0, no) \ + SC(149, 41, 39, 2, yes, no, 0, no) \ + SC(150, 41, 39, 3, yes, no, 0, no) \ + SC(151, 41, 39, 4, yes, no, 0, no) \ + \ + SC(152, 42, 40, 1, yes, no, 0, no) \ + SC(153, 42, 40, 2, yes, no, 0, no) \ + SC(154, 42, 40, 3, yes, no, 0, no) \ + SC(155, 42, 40, 4, yes, no, 0, no) \ + \ + SC(156, 43, 41, 1, yes, no, 0, no) \ + SC(157, 43, 41, 2, yes, no, 0, no) \ + SC(158, 43, 41, 3, yes, no, 0, no) \ + SC(159, 43, 41, 4, yes, no, 0, no) \ + \ + SC(160, 44, 42, 1, yes, no, 0, no) \ + SC(161, 44, 42, 2, yes, no, 0, no) \ + SC(162, 44, 42, 3, yes, no, 0, no) \ + SC(163, 44, 42, 4, yes, no, 0, no) \ + \ + SC(164, 45, 43, 1, yes, no, 0, no) \ + SC(165, 45, 43, 2, yes, no, 0, no) \ + SC(166, 45, 43, 3, yes, no, 0, no) \ + SC(167, 45, 43, 4, yes, no, 0, no) \ + \ + SC(168, 46, 44, 1, yes, no, 0, no) \ + SC(169, 46, 44, 2, yes, no, 0, no) \ + SC(170, 46, 44, 3, yes, no, 0, no) \ + SC(171, 46, 44, 4, yes, no, 0, no) \ + \ + SC(172, 47, 45, 1, yes, no, 0, no) \ + SC(173, 47, 45, 2, yes, no, 0, no) \ + SC(174, 47, 45, 3, yes, no, 0, no) \ + SC(175, 47, 45, 4, yes, no, 0, no) \ + \ + SC(176, 48, 46, 1, yes, no, 0, no) \ + SC(177, 48, 46, 2, yes, no, 0, no) \ + SC(178, 48, 46, 3, yes, no, 0, no) \ + SC(179, 48, 46, 4, yes, no, 0, no) \ + \ + SC(180, 49, 47, 1, yes, no, 0, no) \ + SC(181, 49, 47, 2, yes, no, 0, no) \ + SC(182, 49, 47, 3, yes, no, 0, no) \ + SC(183, 49, 47, 4, yes, no, 0, no) \ + \ + SC(184, 50, 48, 1, yes, no, 0, no) \ + SC(185, 50, 48, 2, yes, no, 0, no) \ + SC(186, 50, 48, 3, yes, no, 0, no) \ + SC(187, 50, 48, 4, yes, no, 0, no) \ + \ + SC(188, 51, 49, 1, yes, no, 0, no) \ + SC(189, 51, 49, 2, yes, no, 0, no) \ + SC(190, 51, 49, 3, yes, no, 0, no) \ + SC(191, 51, 49, 4, yes, no, 0, no) \ + \ + SC(192, 52, 50, 1, yes, no, 0, no) \ + SC(193, 52, 50, 2, yes, no, 0, no) \ + SC(194, 52, 50, 3, yes, no, 0, no) \ + SC(195, 52, 50, 4, yes, no, 0, no) \ + \ + SC(196, 53, 51, 1, yes, no, 0, no) \ + SC(197, 53, 51, 2, yes, no, 0, no) \ + SC(198, 53, 51, 3, yes, no, 0, no) \ + SC(199, 53, 51, 4, yes, no, 0, no) \ + \ + SC(200, 54, 52, 1, yes, no, 0, no) \ + SC(201, 54, 52, 2, yes, no, 0, no) \ + SC(202, 54, 52, 3, yes, no, 0, no) \ + SC(203, 54, 52, 4, yes, no, 0, no) \ + \ + SC(204, 55, 53, 1, yes, no, 0, no) \ + SC(205, 55, 53, 2, yes, no, 0, no) \ + SC(206, 55, 53, 3, yes, no, 0, no) \ + SC(207, 55, 53, 4, yes, no, 0, no) \ + \ + SC(208, 56, 54, 1, yes, no, 0, no) \ + SC(209, 56, 54, 2, yes, no, 0, no) \ + SC(210, 56, 54, 3, yes, no, 0, no) \ + SC(211, 56, 54, 4, yes, no, 0, no) \ + \ + SC(212, 57, 55, 1, yes, no, 0, no) \ + SC(213, 57, 55, 2, yes, no, 0, no) \ + SC(214, 57, 55, 3, yes, no, 0, no) \ + SC(215, 57, 55, 4, yes, no, 0, no) \ + \ + SC(216, 58, 56, 1, yes, no, 0, no) \ + SC(217, 58, 56, 2, yes, no, 0, no) \ + SC(218, 58, 56, 3, yes, no, 0, no) \ + SC(219, 58, 56, 4, yes, no, 0, no) \ + \ + SC(220, 59, 57, 1, yes, no, 0, no) \ + SC(221, 59, 57, 2, yes, no, 0, no) \ + SC(222, 59, 57, 3, yes, no, 0, no) \ + SC(223, 59, 57, 4, yes, no, 0, no) \ + \ + SC(224, 60, 58, 1, yes, no, 0, no) \ + SC(225, 60, 58, 2, yes, no, 0, no) \ + SC(226, 60, 58, 3, yes, no, 0, no) \ + SC(227, 60, 58, 4, yes, no, 0, no) \ + \ + SC(228, 61, 59, 1, yes, no, 0, no) \ + SC(229, 61, 59, 2, yes, no, 0, no) \ + SC(230, 61, 59, 3, yes, no, 0, no) \ + SC(231, 61, 59, 4, yes, no, 0, no) \ + \ + SC(232, 62, 60, 1, yes, no, 0, no) \ + SC(233, 62, 60, 2, yes, no, 0, no) \ + SC(234, 62, 60, 3, yes, no, 0, no) \ -#define NBINS 35 -#define SMALL_MAXCLASS 14336 +#define SIZE_CLASSES_DEFINED +#define NTBINS 0 +#define NLBINS 32 +#define NBINS 39 +#define NSIZES 235 +#define LG_CEIL_NSIZES 8 +#define NPSIZES 199 +#define LG_TINY_MAXCLASS "NA" +#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) +#define SMALL_MAXCLASS ((((size_t)1) << 13) + (((size_t)3) << 11)) +#define LG_LARGE_MINCLASS 14 +#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS) +#define LARGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60)) #endif -#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 15) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 16, 16) \ - SIZE_CLASS(1, 16, 32) \ - SIZE_CLASS(2, 16, 48) \ - SIZE_CLASS(3, 16, 64) \ - SIZE_CLASS(4, 16, 80) \ - SIZE_CLASS(5, 16, 96) \ - SIZE_CLASS(6, 16, 112) \ - SIZE_CLASS(7, 16, 128) \ - SIZE_CLASS(8, 32, 160) \ - SIZE_CLASS(9, 32, 192) \ - SIZE_CLASS(10, 32, 224) \ - SIZE_CLASS(11, 32, 256) \ - SIZE_CLASS(12, 64, 320) \ - SIZE_CLASS(13, 64, 384) \ - SIZE_CLASS(14, 64, 448) \ - SIZE_CLASS(15, 64, 512) \ - SIZE_CLASS(16, 128, 640) \ - SIZE_CLASS(17, 128, 768) \ - SIZE_CLASS(18, 128, 896) \ - SIZE_CLASS(19, 128, 1024) \ - SIZE_CLASS(20, 256, 1280) \ - SIZE_CLASS(21, 256, 1536) \ - SIZE_CLASS(22, 256, 1792) \ - SIZE_CLASS(23, 256, 2048) \ - SIZE_CLASS(24, 512, 2560) \ - SIZE_CLASS(25, 512, 3072) \ - SIZE_CLASS(26, 512, 3584) \ - SIZE_CLASS(27, 512, 4096) \ - SIZE_CLASS(28, 1024, 5120) \ - SIZE_CLASS(29, 1024, 6144) \ - SIZE_CLASS(30, 1024, 7168) \ - SIZE_CLASS(31, 1024, 8192) \ - SIZE_CLASS(32, 2048, 10240) \ - SIZE_CLASS(33, 2048, 12288) \ - SIZE_CLASS(34, 2048, 14336) \ - SIZE_CLASS(35, 2048, 16384) \ - SIZE_CLASS(36, 4096, 20480) \ - SIZE_CLASS(37, 4096, 24576) \ - SIZE_CLASS(38, 4096, 28672) \ +#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 12) +#define SIZE_CLASSES \ + /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \ + SC( 0, 3, 3, 0, no, yes, 1, 3) \ + \ + SC( 1, 3, 3, 1, no, yes, 1, 3) \ + SC( 2, 4, 4, 1, no, yes, 1, 4) \ + SC( 3, 4, 4, 2, no, yes, 3, 4) \ + SC( 4, 4, 4, 3, no, yes, 1, 4) \ + \ + SC( 5, 6, 4, 1, no, yes, 5, 4) \ + SC( 6, 6, 4, 2, no, yes, 3, 4) \ + SC( 7, 6, 4, 3, no, yes, 7, 4) \ + SC( 8, 6, 4, 4, no, yes, 1, 4) \ + \ + SC( 9, 7, 5, 1, no, yes, 5, 5) \ + SC( 10, 7, 5, 2, no, yes, 3, 5) \ + SC( 11, 7, 5, 3, no, yes, 7, 5) \ + SC( 12, 7, 5, 4, no, yes, 1, 5) \ + \ + SC( 13, 8, 6, 1, no, yes, 5, 6) \ + SC( 14, 8, 6, 2, no, yes, 3, 6) \ + SC( 15, 8, 6, 3, no, yes, 7, 6) \ + SC( 16, 8, 6, 4, no, yes, 1, 6) \ + \ + SC( 17, 9, 7, 1, no, yes, 5, 7) \ + SC( 18, 9, 7, 2, no, yes, 3, 7) \ + SC( 19, 9, 7, 3, no, yes, 7, 7) \ + SC( 20, 9, 7, 4, no, yes, 1, 7) \ + \ + SC( 21, 10, 8, 1, no, yes, 5, 8) \ + SC( 22, 10, 8, 2, no, yes, 3, 8) \ + SC( 23, 10, 8, 3, no, yes, 7, 8) \ + SC( 24, 10, 8, 4, no, yes, 1, 8) \ + \ + SC( 25, 11, 9, 1, no, yes, 5, 9) \ + SC( 26, 11, 9, 2, no, yes, 3, 9) \ + SC( 27, 11, 9, 3, no, yes, 7, 9) \ + SC( 28, 11, 9, 4, yes, yes, 1, 9) \ + \ + SC( 29, 12, 10, 1, no, yes, 5, no) \ + SC( 30, 12, 10, 2, no, yes, 3, no) \ + SC( 31, 12, 10, 3, no, yes, 7, no) \ + SC( 32, 12, 10, 4, yes, yes, 2, no) \ + \ + SC( 33, 13, 11, 1, no, yes, 5, no) \ + SC( 34, 13, 11, 2, yes, yes, 3, no) \ + SC( 35, 13, 11, 3, no, yes, 7, no) \ + SC( 36, 13, 11, 4, yes, no, 0, no) \ + \ + SC( 37, 14, 12, 1, yes, no, 0, no) \ + SC( 38, 14, 12, 2, yes, no, 0, no) \ + SC( 39, 14, 12, 3, yes, no, 0, no) \ + SC( 40, 14, 12, 4, yes, no, 0, no) \ + \ + SC( 41, 15, 13, 1, yes, no, 0, no) \ + SC( 42, 15, 13, 2, yes, no, 0, no) \ + SC( 43, 15, 13, 3, yes, no, 0, no) \ + SC( 44, 15, 13, 4, yes, no, 0, no) \ + \ + SC( 45, 16, 14, 1, yes, no, 0, no) \ + SC( 46, 16, 14, 2, yes, no, 0, no) \ + SC( 47, 16, 14, 3, yes, no, 0, no) \ + SC( 48, 16, 14, 4, yes, no, 0, no) \ + \ + SC( 49, 17, 15, 1, yes, no, 0, no) \ + SC( 50, 17, 15, 2, yes, no, 0, no) \ + SC( 51, 17, 15, 3, yes, no, 0, no) \ + SC( 52, 17, 15, 4, yes, no, 0, no) \ + \ + SC( 53, 18, 16, 1, yes, no, 0, no) \ + SC( 54, 18, 16, 2, yes, no, 0, no) \ + SC( 55, 18, 16, 3, yes, no, 0, no) \ + SC( 56, 18, 16, 4, yes, no, 0, no) \ + \ + SC( 57, 19, 17, 1, yes, no, 0, no) \ + SC( 58, 19, 17, 2, yes, no, 0, no) \ + SC( 59, 19, 17, 3, yes, no, 0, no) \ + SC( 60, 19, 17, 4, yes, no, 0, no) \ + \ + SC( 61, 20, 18, 1, yes, no, 0, no) \ + SC( 62, 20, 18, 2, yes, no, 0, no) \ + SC( 63, 20, 18, 3, yes, no, 0, no) \ + SC( 64, 20, 18, 4, yes, no, 0, no) \ + \ + SC( 65, 21, 19, 1, yes, no, 0, no) \ + SC( 66, 21, 19, 2, yes, no, 0, no) \ + SC( 67, 21, 19, 3, yes, no, 0, no) \ + SC( 68, 21, 19, 4, yes, no, 0, no) \ + \ + SC( 69, 22, 20, 1, yes, no, 0, no) \ + SC( 70, 22, 20, 2, yes, no, 0, no) \ + SC( 71, 22, 20, 3, yes, no, 0, no) \ + SC( 72, 22, 20, 4, yes, no, 0, no) \ + \ + SC( 73, 23, 21, 1, yes, no, 0, no) \ + SC( 74, 23, 21, 2, yes, no, 0, no) \ + SC( 75, 23, 21, 3, yes, no, 0, no) \ + SC( 76, 23, 21, 4, yes, no, 0, no) \ + \ + SC( 77, 24, 22, 1, yes, no, 0, no) \ + SC( 78, 24, 22, 2, yes, no, 0, no) \ + SC( 79, 24, 22, 3, yes, no, 0, no) \ + SC( 80, 24, 22, 4, yes, no, 0, no) \ + \ + SC( 81, 25, 23, 1, yes, no, 0, no) \ + SC( 82, 25, 23, 2, yes, no, 0, no) \ + SC( 83, 25, 23, 3, yes, no, 0, no) \ + SC( 84, 25, 23, 4, yes, no, 0, no) \ + \ + SC( 85, 26, 24, 1, yes, no, 0, no) \ + SC( 86, 26, 24, 2, yes, no, 0, no) \ + SC( 87, 26, 24, 3, yes, no, 0, no) \ + SC( 88, 26, 24, 4, yes, no, 0, no) \ + \ + SC( 89, 27, 25, 1, yes, no, 0, no) \ + SC( 90, 27, 25, 2, yes, no, 0, no) \ + SC( 91, 27, 25, 3, yes, no, 0, no) \ + SC( 92, 27, 25, 4, yes, no, 0, no) \ + \ + SC( 93, 28, 26, 1, yes, no, 0, no) \ + SC( 94, 28, 26, 2, yes, no, 0, no) \ + SC( 95, 28, 26, 3, yes, no, 0, no) \ + SC( 96, 28, 26, 4, yes, no, 0, no) \ + \ + SC( 97, 29, 27, 1, yes, no, 0, no) \ + SC( 98, 29, 27, 2, yes, no, 0, no) \ + SC( 99, 29, 27, 3, yes, no, 0, no) \ + SC(100, 29, 27, 4, yes, no, 0, no) \ + \ + SC(101, 30, 28, 1, yes, no, 0, no) \ + SC(102, 30, 28, 2, yes, no, 0, no) \ + SC(103, 30, 28, 3, yes, no, 0, no) \ + SC(104, 30, 28, 4, yes, no, 0, no) \ + \ + SC(105, 31, 29, 1, yes, no, 0, no) \ + SC(106, 31, 29, 2, yes, no, 0, no) \ + SC(107, 31, 29, 3, yes, no, 0, no) \ + SC(108, 31, 29, 4, yes, no, 0, no) \ + \ + SC(109, 32, 30, 1, yes, no, 0, no) \ + SC(110, 32, 30, 2, yes, no, 0, no) \ + SC(111, 32, 30, 3, yes, no, 0, no) \ + SC(112, 32, 30, 4, yes, no, 0, no) \ + \ + SC(113, 33, 31, 1, yes, no, 0, no) \ + SC(114, 33, 31, 2, yes, no, 0, no) \ + SC(115, 33, 31, 3, yes, no, 0, no) \ + SC(116, 33, 31, 4, yes, no, 0, no) \ + \ + SC(117, 34, 32, 1, yes, no, 0, no) \ + SC(118, 34, 32, 2, yes, no, 0, no) \ + SC(119, 34, 32, 3, yes, no, 0, no) \ + SC(120, 34, 32, 4, yes, no, 0, no) \ + \ + SC(121, 35, 33, 1, yes, no, 0, no) \ + SC(122, 35, 33, 2, yes, no, 0, no) \ + SC(123, 35, 33, 3, yes, no, 0, no) \ + SC(124, 35, 33, 4, yes, no, 0, no) \ + \ + SC(125, 36, 34, 1, yes, no, 0, no) \ + SC(126, 36, 34, 2, yes, no, 0, no) \ + SC(127, 36, 34, 3, yes, no, 0, no) \ + SC(128, 36, 34, 4, yes, no, 0, no) \ + \ + SC(129, 37, 35, 1, yes, no, 0, no) \ + SC(130, 37, 35, 2, yes, no, 0, no) \ + SC(131, 37, 35, 3, yes, no, 0, no) \ + SC(132, 37, 35, 4, yes, no, 0, no) \ + \ + SC(133, 38, 36, 1, yes, no, 0, no) \ + SC(134, 38, 36, 2, yes, no, 0, no) \ + SC(135, 38, 36, 3, yes, no, 0, no) \ + SC(136, 38, 36, 4, yes, no, 0, no) \ + \ + SC(137, 39, 37, 1, yes, no, 0, no) \ + SC(138, 39, 37, 2, yes, no, 0, no) \ + SC(139, 39, 37, 3, yes, no, 0, no) \ + SC(140, 39, 37, 4, yes, no, 0, no) \ + \ + SC(141, 40, 38, 1, yes, no, 0, no) \ + SC(142, 40, 38, 2, yes, no, 0, no) \ + SC(143, 40, 38, 3, yes, no, 0, no) \ + SC(144, 40, 38, 4, yes, no, 0, no) \ + \ + SC(145, 41, 39, 1, yes, no, 0, no) \ + SC(146, 41, 39, 2, yes, no, 0, no) \ + SC(147, 41, 39, 3, yes, no, 0, no) \ + SC(148, 41, 39, 4, yes, no, 0, no) \ + \ + SC(149, 42, 40, 1, yes, no, 0, no) \ + SC(150, 42, 40, 2, yes, no, 0, no) \ + SC(151, 42, 40, 3, yes, no, 0, no) \ + SC(152, 42, 40, 4, yes, no, 0, no) \ + \ + SC(153, 43, 41, 1, yes, no, 0, no) \ + SC(154, 43, 41, 2, yes, no, 0, no) \ + SC(155, 43, 41, 3, yes, no, 0, no) \ + SC(156, 43, 41, 4, yes, no, 0, no) \ + \ + SC(157, 44, 42, 1, yes, no, 0, no) \ + SC(158, 44, 42, 2, yes, no, 0, no) \ + SC(159, 44, 42, 3, yes, no, 0, no) \ + SC(160, 44, 42, 4, yes, no, 0, no) \ + \ + SC(161, 45, 43, 1, yes, no, 0, no) \ + SC(162, 45, 43, 2, yes, no, 0, no) \ + SC(163, 45, 43, 3, yes, no, 0, no) \ + SC(164, 45, 43, 4, yes, no, 0, no) \ + \ + SC(165, 46, 44, 1, yes, no, 0, no) \ + SC(166, 46, 44, 2, yes, no, 0, no) \ + SC(167, 46, 44, 3, yes, no, 0, no) \ + SC(168, 46, 44, 4, yes, no, 0, no) \ + \ + SC(169, 47, 45, 1, yes, no, 0, no) \ + SC(170, 47, 45, 2, yes, no, 0, no) \ + SC(171, 47, 45, 3, yes, no, 0, no) \ + SC(172, 47, 45, 4, yes, no, 0, no) \ + \ + SC(173, 48, 46, 1, yes, no, 0, no) \ + SC(174, 48, 46, 2, yes, no, 0, no) \ + SC(175, 48, 46, 3, yes, no, 0, no) \ + SC(176, 48, 46, 4, yes, no, 0, no) \ + \ + SC(177, 49, 47, 1, yes, no, 0, no) \ + SC(178, 49, 47, 2, yes, no, 0, no) \ + SC(179, 49, 47, 3, yes, no, 0, no) \ + SC(180, 49, 47, 4, yes, no, 0, no) \ + \ + SC(181, 50, 48, 1, yes, no, 0, no) \ + SC(182, 50, 48, 2, yes, no, 0, no) \ + SC(183, 50, 48, 3, yes, no, 0, no) \ + SC(184, 50, 48, 4, yes, no, 0, no) \ + \ + SC(185, 51, 49, 1, yes, no, 0, no) \ + SC(186, 51, 49, 2, yes, no, 0, no) \ + SC(187, 51, 49, 3, yes, no, 0, no) \ + SC(188, 51, 49, 4, yes, no, 0, no) \ + \ + SC(189, 52, 50, 1, yes, no, 0, no) \ + SC(190, 52, 50, 2, yes, no, 0, no) \ + SC(191, 52, 50, 3, yes, no, 0, no) \ + SC(192, 52, 50, 4, yes, no, 0, no) \ + \ + SC(193, 53, 51, 1, yes, no, 0, no) \ + SC(194, 53, 51, 2, yes, no, 0, no) \ + SC(195, 53, 51, 3, yes, no, 0, no) \ + SC(196, 53, 51, 4, yes, no, 0, no) \ + \ + SC(197, 54, 52, 1, yes, no, 0, no) \ + SC(198, 54, 52, 2, yes, no, 0, no) \ + SC(199, 54, 52, 3, yes, no, 0, no) \ + SC(200, 54, 52, 4, yes, no, 0, no) \ + \ + SC(201, 55, 53, 1, yes, no, 0, no) \ + SC(202, 55, 53, 2, yes, no, 0, no) \ + SC(203, 55, 53, 3, yes, no, 0, no) \ + SC(204, 55, 53, 4, yes, no, 0, no) \ + \ + SC(205, 56, 54, 1, yes, no, 0, no) \ + SC(206, 56, 54, 2, yes, no, 0, no) \ + SC(207, 56, 54, 3, yes, no, 0, no) \ + SC(208, 56, 54, 4, yes, no, 0, no) \ + \ + SC(209, 57, 55, 1, yes, no, 0, no) \ + SC(210, 57, 55, 2, yes, no, 0, no) \ + SC(211, 57, 55, 3, yes, no, 0, no) \ + SC(212, 57, 55, 4, yes, no, 0, no) \ + \ + SC(213, 58, 56, 1, yes, no, 0, no) \ + SC(214, 58, 56, 2, yes, no, 0, no) \ + SC(215, 58, 56, 3, yes, no, 0, no) \ + SC(216, 58, 56, 4, yes, no, 0, no) \ + \ + SC(217, 59, 57, 1, yes, no, 0, no) \ + SC(218, 59, 57, 2, yes, no, 0, no) \ + SC(219, 59, 57, 3, yes, no, 0, no) \ + SC(220, 59, 57, 4, yes, no, 0, no) \ + \ + SC(221, 60, 58, 1, yes, no, 0, no) \ + SC(222, 60, 58, 2, yes, no, 0, no) \ + SC(223, 60, 58, 3, yes, no, 0, no) \ + SC(224, 60, 58, 4, yes, no, 0, no) \ + \ + SC(225, 61, 59, 1, yes, no, 0, no) \ + SC(226, 61, 59, 2, yes, no, 0, no) \ + SC(227, 61, 59, 3, yes, no, 0, no) \ + SC(228, 61, 59, 4, yes, no, 0, no) \ + \ + SC(229, 62, 60, 1, yes, no, 0, no) \ + SC(230, 62, 60, 2, yes, no, 0, no) \ + SC(231, 62, 60, 3, yes, no, 0, no) \ -#define NBINS 39 -#define SMALL_MAXCLASS 28672 +#define SIZE_CLASSES_DEFINED +#define NTBINS 1 +#define NLBINS 29 +#define NBINS 36 +#define NSIZES 232 +#define LG_CEIL_NSIZES 8 +#define NPSIZES 199 +#define LG_TINY_MAXCLASS 3 +#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) +#define SMALL_MAXCLASS ((((size_t)1) << 13) + (((size_t)3) << 11)) +#define LG_LARGE_MINCLASS 14 +#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS) +#define LARGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60)) #endif -#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 16) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 16, 16) \ - SIZE_CLASS(1, 16, 32) \ - SIZE_CLASS(2, 16, 48) \ - SIZE_CLASS(3, 16, 64) \ - SIZE_CLASS(4, 16, 80) \ - SIZE_CLASS(5, 16, 96) \ - SIZE_CLASS(6, 16, 112) \ - SIZE_CLASS(7, 16, 128) \ - SIZE_CLASS(8, 32, 160) \ - SIZE_CLASS(9, 32, 192) \ - SIZE_CLASS(10, 32, 224) \ - SIZE_CLASS(11, 32, 256) \ - SIZE_CLASS(12, 64, 320) \ - SIZE_CLASS(13, 64, 384) \ - SIZE_CLASS(14, 64, 448) \ - SIZE_CLASS(15, 64, 512) \ - SIZE_CLASS(16, 128, 640) \ - SIZE_CLASS(17, 128, 768) \ - SIZE_CLASS(18, 128, 896) \ - SIZE_CLASS(19, 128, 1024) \ - SIZE_CLASS(20, 256, 1280) \ - SIZE_CLASS(21, 256, 1536) \ - SIZE_CLASS(22, 256, 1792) \ - SIZE_CLASS(23, 256, 2048) \ - SIZE_CLASS(24, 512, 2560) \ - SIZE_CLASS(25, 512, 3072) \ - SIZE_CLASS(26, 512, 3584) \ - SIZE_CLASS(27, 512, 4096) \ - SIZE_CLASS(28, 1024, 5120) \ - SIZE_CLASS(29, 1024, 6144) \ - SIZE_CLASS(30, 1024, 7168) \ - SIZE_CLASS(31, 1024, 8192) \ - SIZE_CLASS(32, 2048, 10240) \ - SIZE_CLASS(33, 2048, 12288) \ - SIZE_CLASS(34, 2048, 14336) \ - SIZE_CLASS(35, 2048, 16384) \ - SIZE_CLASS(36, 4096, 20480) \ - SIZE_CLASS(37, 4096, 24576) \ - SIZE_CLASS(38, 4096, 28672) \ - SIZE_CLASS(39, 4096, 32768) \ - SIZE_CLASS(40, 8192, 40960) \ - SIZE_CLASS(41, 8192, 49152) \ - SIZE_CLASS(42, 8192, 57344) \ +#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 12) +#define SIZE_CLASSES \ + /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \ + SC( 0, 4, 4, 0, no, yes, 1, 4) \ + SC( 1, 4, 4, 1, no, yes, 1, 4) \ + SC( 2, 4, 4, 2, no, yes, 3, 4) \ + SC( 3, 4, 4, 3, no, yes, 1, 4) \ + \ + SC( 4, 6, 4, 1, no, yes, 5, 4) \ + SC( 5, 6, 4, 2, no, yes, 3, 4) \ + SC( 6, 6, 4, 3, no, yes, 7, 4) \ + SC( 7, 6, 4, 4, no, yes, 1, 4) \ + \ + SC( 8, 7, 5, 1, no, yes, 5, 5) \ + SC( 9, 7, 5, 2, no, yes, 3, 5) \ + SC( 10, 7, 5, 3, no, yes, 7, 5) \ + SC( 11, 7, 5, 4, no, yes, 1, 5) \ + \ + SC( 12, 8, 6, 1, no, yes, 5, 6) \ + SC( 13, 8, 6, 2, no, yes, 3, 6) \ + SC( 14, 8, 6, 3, no, yes, 7, 6) \ + SC( 15, 8, 6, 4, no, yes, 1, 6) \ + \ + SC( 16, 9, 7, 1, no, yes, 5, 7) \ + SC( 17, 9, 7, 2, no, yes, 3, 7) \ + SC( 18, 9, 7, 3, no, yes, 7, 7) \ + SC( 19, 9, 7, 4, no, yes, 1, 7) \ + \ + SC( 20, 10, 8, 1, no, yes, 5, 8) \ + SC( 21, 10, 8, 2, no, yes, 3, 8) \ + SC( 22, 10, 8, 3, no, yes, 7, 8) \ + SC( 23, 10, 8, 4, no, yes, 1, 8) \ + \ + SC( 24, 11, 9, 1, no, yes, 5, 9) \ + SC( 25, 11, 9, 2, no, yes, 3, 9) \ + SC( 26, 11, 9, 3, no, yes, 7, 9) \ + SC( 27, 11, 9, 4, yes, yes, 1, 9) \ + \ + SC( 28, 12, 10, 1, no, yes, 5, no) \ + SC( 29, 12, 10, 2, no, yes, 3, no) \ + SC( 30, 12, 10, 3, no, yes, 7, no) \ + SC( 31, 12, 10, 4, yes, yes, 2, no) \ + \ + SC( 32, 13, 11, 1, no, yes, 5, no) \ + SC( 33, 13, 11, 2, yes, yes, 3, no) \ + SC( 34, 13, 11, 3, no, yes, 7, no) \ + SC( 35, 13, 11, 4, yes, no, 0, no) \ + \ + SC( 36, 14, 12, 1, yes, no, 0, no) \ + SC( 37, 14, 12, 2, yes, no, 0, no) \ + SC( 38, 14, 12, 3, yes, no, 0, no) \ + SC( 39, 14, 12, 4, yes, no, 0, no) \ + \ + SC( 40, 15, 13, 1, yes, no, 0, no) \ + SC( 41, 15, 13, 2, yes, no, 0, no) \ + SC( 42, 15, 13, 3, yes, no, 0, no) \ + SC( 43, 15, 13, 4, yes, no, 0, no) \ + \ + SC( 44, 16, 14, 1, yes, no, 0, no) \ + SC( 45, 16, 14, 2, yes, no, 0, no) \ + SC( 46, 16, 14, 3, yes, no, 0, no) \ + SC( 47, 16, 14, 4, yes, no, 0, no) \ + \ + SC( 48, 17, 15, 1, yes, no, 0, no) \ + SC( 49, 17, 15, 2, yes, no, 0, no) \ + SC( 50, 17, 15, 3, yes, no, 0, no) \ + SC( 51, 17, 15, 4, yes, no, 0, no) \ + \ + SC( 52, 18, 16, 1, yes, no, 0, no) \ + SC( 53, 18, 16, 2, yes, no, 0, no) \ + SC( 54, 18, 16, 3, yes, no, 0, no) \ + SC( 55, 18, 16, 4, yes, no, 0, no) \ + \ + SC( 56, 19, 17, 1, yes, no, 0, no) \ + SC( 57, 19, 17, 2, yes, no, 0, no) \ + SC( 58, 19, 17, 3, yes, no, 0, no) \ + SC( 59, 19, 17, 4, yes, no, 0, no) \ + \ + SC( 60, 20, 18, 1, yes, no, 0, no) \ + SC( 61, 20, 18, 2, yes, no, 0, no) \ + SC( 62, 20, 18, 3, yes, no, 0, no) \ + SC( 63, 20, 18, 4, yes, no, 0, no) \ + \ + SC( 64, 21, 19, 1, yes, no, 0, no) \ + SC( 65, 21, 19, 2, yes, no, 0, no) \ + SC( 66, 21, 19, 3, yes, no, 0, no) \ + SC( 67, 21, 19, 4, yes, no, 0, no) \ + \ + SC( 68, 22, 20, 1, yes, no, 0, no) \ + SC( 69, 22, 20, 2, yes, no, 0, no) \ + SC( 70, 22, 20, 3, yes, no, 0, no) \ + SC( 71, 22, 20, 4, yes, no, 0, no) \ + \ + SC( 72, 23, 21, 1, yes, no, 0, no) \ + SC( 73, 23, 21, 2, yes, no, 0, no) \ + SC( 74, 23, 21, 3, yes, no, 0, no) \ + SC( 75, 23, 21, 4, yes, no, 0, no) \ + \ + SC( 76, 24, 22, 1, yes, no, 0, no) \ + SC( 77, 24, 22, 2, yes, no, 0, no) \ + SC( 78, 24, 22, 3, yes, no, 0, no) \ + SC( 79, 24, 22, 4, yes, no, 0, no) \ + \ + SC( 80, 25, 23, 1, yes, no, 0, no) \ + SC( 81, 25, 23, 2, yes, no, 0, no) \ + SC( 82, 25, 23, 3, yes, no, 0, no) \ + SC( 83, 25, 23, 4, yes, no, 0, no) \ + \ + SC( 84, 26, 24, 1, yes, no, 0, no) \ + SC( 85, 26, 24, 2, yes, no, 0, no) \ + SC( 86, 26, 24, 3, yes, no, 0, no) \ + SC( 87, 26, 24, 4, yes, no, 0, no) \ + \ + SC( 88, 27, 25, 1, yes, no, 0, no) \ + SC( 89, 27, 25, 2, yes, no, 0, no) \ + SC( 90, 27, 25, 3, yes, no, 0, no) \ + SC( 91, 27, 25, 4, yes, no, 0, no) \ + \ + SC( 92, 28, 26, 1, yes, no, 0, no) \ + SC( 93, 28, 26, 2, yes, no, 0, no) \ + SC( 94, 28, 26, 3, yes, no, 0, no) \ + SC( 95, 28, 26, 4, yes, no, 0, no) \ + \ + SC( 96, 29, 27, 1, yes, no, 0, no) \ + SC( 97, 29, 27, 2, yes, no, 0, no) \ + SC( 98, 29, 27, 3, yes, no, 0, no) \ + SC( 99, 29, 27, 4, yes, no, 0, no) \ + \ + SC(100, 30, 28, 1, yes, no, 0, no) \ + SC(101, 30, 28, 2, yes, no, 0, no) \ + SC(102, 30, 28, 3, yes, no, 0, no) \ + SC(103, 30, 28, 4, yes, no, 0, no) \ + \ + SC(104, 31, 29, 1, yes, no, 0, no) \ + SC(105, 31, 29, 2, yes, no, 0, no) \ + SC(106, 31, 29, 3, yes, no, 0, no) \ + SC(107, 31, 29, 4, yes, no, 0, no) \ + \ + SC(108, 32, 30, 1, yes, no, 0, no) \ + SC(109, 32, 30, 2, yes, no, 0, no) \ + SC(110, 32, 30, 3, yes, no, 0, no) \ + SC(111, 32, 30, 4, yes, no, 0, no) \ + \ + SC(112, 33, 31, 1, yes, no, 0, no) \ + SC(113, 33, 31, 2, yes, no, 0, no) \ + SC(114, 33, 31, 3, yes, no, 0, no) \ + SC(115, 33, 31, 4, yes, no, 0, no) \ + \ + SC(116, 34, 32, 1, yes, no, 0, no) \ + SC(117, 34, 32, 2, yes, no, 0, no) \ + SC(118, 34, 32, 3, yes, no, 0, no) \ + SC(119, 34, 32, 4, yes, no, 0, no) \ + \ + SC(120, 35, 33, 1, yes, no, 0, no) \ + SC(121, 35, 33, 2, yes, no, 0, no) \ + SC(122, 35, 33, 3, yes, no, 0, no) \ + SC(123, 35, 33, 4, yes, no, 0, no) \ + \ + SC(124, 36, 34, 1, yes, no, 0, no) \ + SC(125, 36, 34, 2, yes, no, 0, no) \ + SC(126, 36, 34, 3, yes, no, 0, no) \ + SC(127, 36, 34, 4, yes, no, 0, no) \ + \ + SC(128, 37, 35, 1, yes, no, 0, no) \ + SC(129, 37, 35, 2, yes, no, 0, no) \ + SC(130, 37, 35, 3, yes, no, 0, no) \ + SC(131, 37, 35, 4, yes, no, 0, no) \ + \ + SC(132, 38, 36, 1, yes, no, 0, no) \ + SC(133, 38, 36, 2, yes, no, 0, no) \ + SC(134, 38, 36, 3, yes, no, 0, no) \ + SC(135, 38, 36, 4, yes, no, 0, no) \ + \ + SC(136, 39, 37, 1, yes, no, 0, no) \ + SC(137, 39, 37, 2, yes, no, 0, no) \ + SC(138, 39, 37, 3, yes, no, 0, no) \ + SC(139, 39, 37, 4, yes, no, 0, no) \ + \ + SC(140, 40, 38, 1, yes, no, 0, no) \ + SC(141, 40, 38, 2, yes, no, 0, no) \ + SC(142, 40, 38, 3, yes, no, 0, no) \ + SC(143, 40, 38, 4, yes, no, 0, no) \ + \ + SC(144, 41, 39, 1, yes, no, 0, no) \ + SC(145, 41, 39, 2, yes, no, 0, no) \ + SC(146, 41, 39, 3, yes, no, 0, no) \ + SC(147, 41, 39, 4, yes, no, 0, no) \ + \ + SC(148, 42, 40, 1, yes, no, 0, no) \ + SC(149, 42, 40, 2, yes, no, 0, no) \ + SC(150, 42, 40, 3, yes, no, 0, no) \ + SC(151, 42, 40, 4, yes, no, 0, no) \ + \ + SC(152, 43, 41, 1, yes, no, 0, no) \ + SC(153, 43, 41, 2, yes, no, 0, no) \ + SC(154, 43, 41, 3, yes, no, 0, no) \ + SC(155, 43, 41, 4, yes, no, 0, no) \ + \ + SC(156, 44, 42, 1, yes, no, 0, no) \ + SC(157, 44, 42, 2, yes, no, 0, no) \ + SC(158, 44, 42, 3, yes, no, 0, no) \ + SC(159, 44, 42, 4, yes, no, 0, no) \ + \ + SC(160, 45, 43, 1, yes, no, 0, no) \ + SC(161, 45, 43, 2, yes, no, 0, no) \ + SC(162, 45, 43, 3, yes, no, 0, no) \ + SC(163, 45, 43, 4, yes, no, 0, no) \ + \ + SC(164, 46, 44, 1, yes, no, 0, no) \ + SC(165, 46, 44, 2, yes, no, 0, no) \ + SC(166, 46, 44, 3, yes, no, 0, no) \ + SC(167, 46, 44, 4, yes, no, 0, no) \ + \ + SC(168, 47, 45, 1, yes, no, 0, no) \ + SC(169, 47, 45, 2, yes, no, 0, no) \ + SC(170, 47, 45, 3, yes, no, 0, no) \ + SC(171, 47, 45, 4, yes, no, 0, no) \ + \ + SC(172, 48, 46, 1, yes, no, 0, no) \ + SC(173, 48, 46, 2, yes, no, 0, no) \ + SC(174, 48, 46, 3, yes, no, 0, no) \ + SC(175, 48, 46, 4, yes, no, 0, no) \ + \ + SC(176, 49, 47, 1, yes, no, 0, no) \ + SC(177, 49, 47, 2, yes, no, 0, no) \ + SC(178, 49, 47, 3, yes, no, 0, no) \ + SC(179, 49, 47, 4, yes, no, 0, no) \ + \ + SC(180, 50, 48, 1, yes, no, 0, no) \ + SC(181, 50, 48, 2, yes, no, 0, no) \ + SC(182, 50, 48, 3, yes, no, 0, no) \ + SC(183, 50, 48, 4, yes, no, 0, no) \ + \ + SC(184, 51, 49, 1, yes, no, 0, no) \ + SC(185, 51, 49, 2, yes, no, 0, no) \ + SC(186, 51, 49, 3, yes, no, 0, no) \ + SC(187, 51, 49, 4, yes, no, 0, no) \ + \ + SC(188, 52, 50, 1, yes, no, 0, no) \ + SC(189, 52, 50, 2, yes, no, 0, no) \ + SC(190, 52, 50, 3, yes, no, 0, no) \ + SC(191, 52, 50, 4, yes, no, 0, no) \ + \ + SC(192, 53, 51, 1, yes, no, 0, no) \ + SC(193, 53, 51, 2, yes, no, 0, no) \ + SC(194, 53, 51, 3, yes, no, 0, no) \ + SC(195, 53, 51, 4, yes, no, 0, no) \ + \ + SC(196, 54, 52, 1, yes, no, 0, no) \ + SC(197, 54, 52, 2, yes, no, 0, no) \ + SC(198, 54, 52, 3, yes, no, 0, no) \ + SC(199, 54, 52, 4, yes, no, 0, no) \ + \ + SC(200, 55, 53, 1, yes, no, 0, no) \ + SC(201, 55, 53, 2, yes, no, 0, no) \ + SC(202, 55, 53, 3, yes, no, 0, no) \ + SC(203, 55, 53, 4, yes, no, 0, no) \ + \ + SC(204, 56, 54, 1, yes, no, 0, no) \ + SC(205, 56, 54, 2, yes, no, 0, no) \ + SC(206, 56, 54, 3, yes, no, 0, no) \ + SC(207, 56, 54, 4, yes, no, 0, no) \ + \ + SC(208, 57, 55, 1, yes, no, 0, no) \ + SC(209, 57, 55, 2, yes, no, 0, no) \ + SC(210, 57, 55, 3, yes, no, 0, no) \ + SC(211, 57, 55, 4, yes, no, 0, no) \ + \ + SC(212, 58, 56, 1, yes, no, 0, no) \ + SC(213, 58, 56, 2, yes, no, 0, no) \ + SC(214, 58, 56, 3, yes, no, 0, no) \ + SC(215, 58, 56, 4, yes, no, 0, no) \ + \ + SC(216, 59, 57, 1, yes, no, 0, no) \ + SC(217, 59, 57, 2, yes, no, 0, no) \ + SC(218, 59, 57, 3, yes, no, 0, no) \ + SC(219, 59, 57, 4, yes, no, 0, no) \ + \ + SC(220, 60, 58, 1, yes, no, 0, no) \ + SC(221, 60, 58, 2, yes, no, 0, no) \ + SC(222, 60, 58, 3, yes, no, 0, no) \ + SC(223, 60, 58, 4, yes, no, 0, no) \ + \ + SC(224, 61, 59, 1, yes, no, 0, no) \ + SC(225, 61, 59, 2, yes, no, 0, no) \ + SC(226, 61, 59, 3, yes, no, 0, no) \ + SC(227, 61, 59, 4, yes, no, 0, no) \ + \ + SC(228, 62, 60, 1, yes, no, 0, no) \ + SC(229, 62, 60, 2, yes, no, 0, no) \ + SC(230, 62, 60, 3, yes, no, 0, no) \ -#define NBINS 43 -#define SMALL_MAXCLASS 57344 +#define SIZE_CLASSES_DEFINED +#define NTBINS 0 +#define NLBINS 28 +#define NBINS 35 +#define NSIZES 231 +#define LG_CEIL_NSIZES 8 +#define NPSIZES 199 +#define LG_TINY_MAXCLASS "NA" +#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) +#define SMALL_MAXCLASS ((((size_t)1) << 13) + (((size_t)3) << 11)) +#define LG_LARGE_MINCLASS 14 +#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS) +#define LARGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60)) #endif #ifndef SIZE_CLASSES_DEFINED @@ -692,30 +1418,11 @@ #endif #undef SIZE_CLASSES_DEFINED /* - * The small_size2bin lookup table uses uint8_t to encode each bin index, so we - * cannot support more than 256 small size classes. Further constrain NBINS to - * 255 to support prof_promote, since all small size classes, plus a "not - * small" size class must be stored in 8 bits of arena_chunk_map_t's bits - * field. + * The size2index_tab lookup table uses uint8_t to encode each bin index, so we + * cannot support more than 256 small size classes. */ -#if (NBINS > 255) +#if (NBINS > 256) # error "Too many small size classes" #endif -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ +#endif /* JEMALLOC_INTERNAL_SIZE_CLASSES_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/stats.h b/deps/jemalloc/include/jemalloc/internal/stats.h index 27f68e3681..1198779ab9 100644 --- a/deps/jemalloc/include/jemalloc/internal/stats.h +++ b/deps/jemalloc/include/jemalloc/internal/stats.h @@ -1,31 +1,51 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct tcache_bin_stats_s tcache_bin_stats_t; -typedef struct malloc_bin_stats_s malloc_bin_stats_t; -typedef struct malloc_large_stats_s malloc_large_stats_t; -typedef struct arena_stats_s arena_stats_t; -typedef struct chunk_stats_s chunk_stats_t; +#ifndef JEMALLOC_INTERNAL_STATS_H +#define JEMALLOC_INTERNAL_STATS_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/mutex_prof.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/stats_tsd.h" + +/* OPTION(opt, var_name, default, set_value_to) */ +#define STATS_PRINT_OPTIONS \ + OPTION('J', json, false, true) \ + OPTION('g', general, true, false) \ + OPTION('m', merged, config_stats, false) \ + OPTION('d', destroyed, config_stats, false) \ + OPTION('a', unmerged, config_stats, false) \ + OPTION('b', bins, true, false) \ + OPTION('l', large, true, false) \ + OPTION('x', mutex, true, false) + +enum { +#define OPTION(o, v, d, s) stats_print_option_num_##v, + STATS_PRINT_OPTIONS +#undef OPTION + stats_print_tot_num_options +}; -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS +/* Options for stats_print. */ +extern bool opt_stats_print; +extern char opt_stats_print_opts[stats_print_tot_num_options+1]; -struct tcache_bin_stats_s { - /* - * Number of allocation requests that corresponded to the size of this - * bin. - */ - uint64_t nrequests; -}; +/* Implements je_malloc_stats_print. */ +void stats_print(void (*write_cb)(void *, const char *), void *cbopaque, + const char *opts); -struct malloc_bin_stats_s { - /* - * Current number of bytes allocated, including objects currently - * cached by tcache. - */ - size_t allocated; +/* + * In those architectures that support 64-bit atomics, we use atomic updates for + * our 64-bit values. Otherwise, we use a plain uint64_t and synchronize + * externally. + */ +#ifdef JEMALLOC_ATOMIC_U64 +typedef atomic_u64_t arena_stats_u64_t; +#else +/* Must hold the arena stats mutex while reading atomically. */ +typedef uint64_t arena_stats_u64_t; +#endif +typedef struct malloc_bin_stats_s { /* * Total number of allocation/deallocation requests served directly by * the bin. Note that tcache may allocate an object, then recycle it @@ -42,132 +62,103 @@ struct malloc_bin_stats_s { */ uint64_t nrequests; + /* + * Current number of regions of this size class, including regions + * currently cached by tcache. + */ + size_t curregs; + /* Number of tcache fills from this bin. */ uint64_t nfills; /* Number of tcache flushes to this bin. */ uint64_t nflushes; - /* Total number of runs created for this bin's size class. */ - uint64_t nruns; + /* Total number of slabs created for this bin's size class. */ + uint64_t nslabs; /* - * Total number of runs reused by extracting them from the runs tree for - * this bin's size class. + * Total number of slabs reused by extracting them from the slabs heap + * for this bin's size class. */ - uint64_t reruns; + uint64_t reslabs; - /* Current number of runs in this bin. */ - size_t curruns; -}; + /* Current number of slabs in this bin. */ + size_t curslabs; -struct malloc_large_stats_s { + mutex_prof_data_t mutex_data; +} malloc_bin_stats_t; + +typedef struct malloc_large_stats_s { /* * Total number of allocation/deallocation requests served directly by - * the arena. Note that tcache may allocate an object, then recycle it - * many times, resulting many increments to nrequests, but only one - * each to nmalloc and ndalloc. + * the arena. */ - uint64_t nmalloc; - uint64_t ndalloc; + arena_stats_u64_t nmalloc; + arena_stats_u64_t ndalloc; /* * Number of allocation requests that correspond to this size class. * This includes requests served by tcache, though tcache only * periodically merges into this counter. */ - uint64_t nrequests; - - /* Current number of runs of this size class. */ - size_t curruns; -}; - -struct arena_stats_s { - /* Number of bytes currently mapped. */ - size_t mapped; - - /* - * Total number of purge sweeps, total number of madvise calls made, - * and total pages purged in order to keep dirty unused memory under - * control. - */ - uint64_t npurge; - uint64_t nmadvise; - uint64_t purged; - - /* Per-size-category statistics. */ - size_t allocated_large; - uint64_t nmalloc_large; - uint64_t ndalloc_large; - uint64_t nrequests_large; - - /* - * One element for each possible size class, including sizes that - * overlap with bin size classes. This is necessary because ipalloc() - * sometimes has to use such large objects in order to assure proper - * alignment. - */ - malloc_large_stats_t *lstats; -}; - -struct chunk_stats_s { - /* Number of chunks that were allocated. */ - uint64_t nchunks; + arena_stats_u64_t nrequests; /* Partially derived. */ + + /* Current number of allocations of this size class. */ + size_t curlextents; /* Derived. */ +} malloc_large_stats_t; + +typedef struct decay_stats_s { + /* Total number of purge sweeps. */ + arena_stats_u64_t npurge; + /* Total number of madvise calls made. */ + arena_stats_u64_t nmadvise; + /* Total number of pages purged. */ + arena_stats_u64_t purged; +} decay_stats_t; + +/* + * Arena stats. Note that fields marked "derived" are not directly maintained + * within the arena code; rather their values are derived during stats merge + * requests. + */ +typedef struct arena_stats_s { +#ifndef JEMALLOC_ATOMIC_U64 + malloc_mutex_t mtx; +#endif - /* High-water mark for number of chunks allocated. */ - size_t highchunks; + /* Number of bytes currently mapped, excluding retained memory. */ + atomic_zu_t mapped; /* Partially derived. */ /* - * Current number of chunks allocated. This value isn't maintained for - * any other purpose, so keep track of it in order to be able to set - * highchunks. + * Number of unused virtual memory bytes currently retained. Retained + * bytes are technically mapped (though always decommitted or purged), + * but they are excluded from the mapped statistic (above). */ - size_t curchunks; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern bool opt_stats_print; - -extern size_t stats_cactive; + atomic_zu_t retained; /* Derived. */ -void stats_print(void (*write)(void *, const char *), void *cbopaque, - const char *opts); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES + decay_stats_t decay_dirty; + decay_stats_t decay_muzzy; -#ifndef JEMALLOC_ENABLE_INLINE -size_t stats_cactive_get(void); -void stats_cactive_add(size_t size); -void stats_cactive_sub(size_t size); -#endif + atomic_zu_t base; /* Derived. */ + atomic_zu_t internal; + atomic_zu_t resident; /* Derived. */ -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_)) -JEMALLOC_INLINE size_t -stats_cactive_get(void) -{ + atomic_zu_t allocated_large; /* Derived. */ + arena_stats_u64_t nmalloc_large; /* Derived. */ + arena_stats_u64_t ndalloc_large; /* Derived. */ + arena_stats_u64_t nrequests_large; /* Derived. */ - return (atomic_read_z(&stats_cactive)); -} + /* Number of bytes cached in tcache associated with this arena. */ + atomic_zu_t tcache_bytes; /* Derived. */ -JEMALLOC_INLINE void -stats_cactive_add(size_t size) -{ + mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]; - atomic_add_z(&stats_cactive, size); -} + /* One element for each large size class. */ + malloc_large_stats_t lstats[NSIZES - NBINS]; -JEMALLOC_INLINE void -stats_cactive_sub(size_t size) -{ - - atomic_sub_z(&stats_cactive, size); -} -#endif + /* Arena uptime. */ + nstime_t uptime; +} arena_stats_t; -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ +#endif /* JEMALLOC_INTERNAL_STATS_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/tsd.h b/deps/jemalloc/include/jemalloc/internal/tsd.h index 9fb4a23ec6..155a2ec6c4 100644 --- a/deps/jemalloc/include/jemalloc/internal/tsd.h +++ b/deps/jemalloc/include/jemalloc/internal/tsd.h @@ -1,434 +1,324 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES +#ifndef JEMALLOC_INTERNAL_TSD_H +#define JEMALLOC_INTERNAL_TSD_H -/* Maximum number of malloc_tsd users with cleanup functions. */ -#define MALLOC_TSD_CLEANUPS_MAX 8 +#include "jemalloc/internal/arena_types.h" +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/jemalloc_internal_externs.h" +#include "jemalloc/internal/prof_types.h" +#include "jemalloc/internal/ql.h" +#include "jemalloc/internal/rtree_tsd.h" +#include "jemalloc/internal/tcache_types.h" +#include "jemalloc/internal/tcache_structs.h" +#include "jemalloc/internal/util.h" +#include "jemalloc/internal/witness.h" -typedef bool (*malloc_tsd_cleanup_t)(void); - -#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ - !defined(_WIN32)) -typedef struct tsd_init_block_s tsd_init_block_t; -typedef struct tsd_init_head_s tsd_init_head_t; +/* + * Thread-Specific-Data layout + * --- data accessed on tcache fast path: state, rtree_ctx, stats, prof --- + * s: state + * e: tcache_enabled + * m: thread_allocated (config_stats) + * f: thread_deallocated (config_stats) + * p: prof_tdata (config_prof) + * c: rtree_ctx (rtree cache accessed on deallocation) + * t: tcache + * --- data not accessed on tcache fast path: arena-related fields --- + * d: arenas_tdata_bypass + * r: reentrancy_level + * x: narenas_tdata + * i: iarena + * a: arena + * o: arenas_tdata + * Loading TSD data is on the critical path of basically all malloc operations. + * In particular, tcache and rtree_ctx rely on hot CPU cache to be effective. + * Use a compact layout to reduce cache footprint. + * +--- 64-bit and 64B cacheline; 1B each letter; First byte on the left. ---+ + * |---------------------------- 1st cacheline ----------------------------| + * | sedrxxxx mmmmmmmm ffffffff pppppppp [c * 32 ........ ........ .......] | + * |---------------------------- 2nd cacheline ----------------------------| + * | [c * 64 ........ ........ ........ ........ ........ ........ .......] | + * |---------------------------- 3nd cacheline ----------------------------| + * | [c * 32 ........ ........ .......] iiiiiiii aaaaaaaa oooooooo [t...... | + * +-------------------------------------------------------------------------+ + * Note: the entire tcache is embedded into TSD and spans multiple cachelines. + * + * The last 3 members (i, a and o) before tcache isn't really needed on tcache + * fast path. However we have a number of unused tcache bins and witnesses + * (never touched unless config_debug) at the end of tcache, so we place them + * there to avoid breaking the cachelines and possibly paging in an extra page. + */ +#ifdef JEMALLOC_JET +typedef void (*test_callback_t)(int *); +# define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10 +# define MALLOC_TEST_TSD \ + O(test_data, int, int) \ + O(test_callback, test_callback_t, int) +# define MALLOC_TEST_TSD_INITIALIZER , MALLOC_TSD_TEST_DATA_INIT, NULL +#else +# define MALLOC_TEST_TSD +# define MALLOC_TEST_TSD_INITIALIZER #endif +/* O(name, type, nullable type */ +#define MALLOC_TSD \ + O(tcache_enabled, bool, bool) \ + O(arenas_tdata_bypass, bool, bool) \ + O(reentrancy_level, int8_t, int8_t) \ + O(narenas_tdata, uint32_t, uint32_t) \ + O(thread_allocated, uint64_t, uint64_t) \ + O(thread_deallocated, uint64_t, uint64_t) \ + O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \ + O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) \ + O(iarena, arena_t *, arena_t *) \ + O(arena, arena_t *, arena_t *) \ + O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\ + O(tcache, tcache_t, tcache_t) \ + O(witness_tsd, witness_tsd_t, witness_tsdn_t) \ + MALLOC_TEST_TSD + +#define TSD_INITIALIZER { \ + tsd_state_uninitialized, \ + TCACHE_ENABLED_ZERO_INITIALIZER, \ + false, \ + 0, \ + 0, \ + 0, \ + 0, \ + NULL, \ + RTREE_CTX_ZERO_INITIALIZER, \ + NULL, \ + NULL, \ + NULL, \ + TCACHE_ZERO_INITIALIZER, \ + WITNESS_TSD_INITIALIZER \ + MALLOC_TEST_TSD_INITIALIZER \ +} + +enum { + tsd_state_nominal = 0, /* Common case --> jnz. */ + tsd_state_nominal_slow = 1, /* Initialized but on slow path. */ + /* the above 2 nominal states should be lower values. */ + tsd_state_nominal_max = 1, /* used for comparison only. */ + tsd_state_minimal_initialized = 2, + tsd_state_purgatory = 3, + tsd_state_reincarnated = 4, + tsd_state_uninitialized = 5 +}; + +/* Manually limit tsd_state_t to a single byte. */ +typedef uint8_t tsd_state_t; + +/* The actual tsd. */ +struct tsd_s { + /* + * The contents should be treated as totally opaque outside the tsd + * module. Access any thread-local state through the getters and + * setters below. + */ + tsd_state_t state; +#define O(n, t, nt) \ + t use_a_getter_or_setter_instead_##n; +MALLOC_TSD +#undef O +}; + /* - * TLS/TSD-agnostic macro-based implementation of thread-specific data. There - * are four macros that support (at least) three use cases: file-private, - * library-private, and library-private inlined. Following is an example - * library-private tsd variable: - * - * In example.h: - * typedef struct { - * int x; - * int y; - * } example_t; - * #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0}) - * malloc_tsd_protos(, example, example_t *) - * malloc_tsd_externs(example, example_t *) - * In example.c: - * malloc_tsd_data(, example, example_t *, EX_INITIALIZER) - * malloc_tsd_funcs(, example, example_t *, EX_INITIALIZER, - * example_tsd_cleanup) - * - * The result is a set of generated functions, e.g.: - * - * bool example_tsd_boot(void) {...} - * example_t **example_tsd_get() {...} - * void example_tsd_set(example_t **val) {...} - * - * Note that all of the functions deal in terms of (a_type *) rather than - * (a_type) so that it is possible to support non-pointer types (unlike - * pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is - * cast to (void *). This means that the cleanup function needs to cast *and* - * dereference the function argument, e.g.: - * - * void - * example_tsd_cleanup(void *arg) - * { - * example_t *example = *(example_t **)arg; - * - * [...] - * if ([want the cleanup function to be called again]) { - * example_tsd_set(&example); - * } - * } - * - * If example_tsd_set() is called within example_tsd_cleanup(), it will be - * called again. This is similar to how pthreads TSD destruction works, except - * that pthreads only calls the cleanup function again if the value was set to - * non-NULL. + * Wrapper around tsd_t that makes it possible to avoid implicit conversion + * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be + * explicitly converted to tsd_t, which is non-nullable. */ +struct tsdn_s { + tsd_t tsd; +}; +#define TSDN_NULL ((tsdn_t *)0) +JEMALLOC_ALWAYS_INLINE tsdn_t * +tsd_tsdn(tsd_t *tsd) { + return (tsdn_t *)tsd; +} -/* malloc_tsd_protos(). */ -#define malloc_tsd_protos(a_attr, a_name, a_type) \ -a_attr bool \ -a_name##_tsd_boot(void); \ -a_attr a_type * \ -a_name##_tsd_get(void); \ -a_attr void \ -a_name##_tsd_set(a_type *val); +JEMALLOC_ALWAYS_INLINE bool +tsdn_null(const tsdn_t *tsdn) { + return tsdn == NULL; +} -/* malloc_tsd_externs(). */ -#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#define malloc_tsd_externs(a_name, a_type) \ -extern __thread a_type a_name##_tls; \ -extern __thread bool a_name##_initialized; \ -extern bool a_name##_booted; -#elif (defined(JEMALLOC_TLS)) -#define malloc_tsd_externs(a_name, a_type) \ -extern __thread a_type a_name##_tls; \ -extern pthread_key_t a_name##_tsd; \ -extern bool a_name##_booted; -#elif (defined(_WIN32)) -#define malloc_tsd_externs(a_name, a_type) \ -extern DWORD a_name##_tsd; \ -extern bool a_name##_booted; -#else -#define malloc_tsd_externs(a_name, a_type) \ -extern pthread_key_t a_name##_tsd; \ -extern tsd_init_head_t a_name##_tsd_init_head; \ -extern bool a_name##_booted; -#endif +JEMALLOC_ALWAYS_INLINE tsd_t * +tsdn_tsd(tsdn_t *tsdn) { + assert(!tsdn_null(tsdn)); + + return &tsdn->tsd; +} + +void *malloc_tsd_malloc(size_t size); +void malloc_tsd_dalloc(void *wrapper); +void malloc_tsd_cleanup_register(bool (*f)(void)); +tsd_t *malloc_tsd_boot0(void); +void malloc_tsd_boot1(void); +void tsd_cleanup(void *arg); +tsd_t *tsd_fetch_slow(tsd_t *tsd, bool internal); +void tsd_slow_update(tsd_t *tsd); -/* malloc_tsd_data(). */ +/* + * We put the platform-specific data declarations and inlines into their own + * header files to avoid cluttering this file. They define tsd_boot0, + * tsd_boot1, tsd_boot, tsd_booted_get, tsd_get_allocates, tsd_get, and tsd_set. + */ #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr __thread a_type JEMALLOC_TLS_MODEL \ - a_name##_tls = a_initializer; \ -a_attr __thread bool JEMALLOC_TLS_MODEL \ - a_name##_initialized = false; \ -a_attr bool a_name##_booted = false; +#include "jemalloc/internal/tsd_malloc_thread_cleanup.h" #elif (defined(JEMALLOC_TLS)) -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr __thread a_type JEMALLOC_TLS_MODEL \ - a_name##_tls = a_initializer; \ -a_attr pthread_key_t a_name##_tsd; \ -a_attr bool a_name##_booted = false; +#include "jemalloc/internal/tsd_tls.h" #elif (defined(_WIN32)) -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr DWORD a_name##_tsd; \ -a_attr bool a_name##_booted = false; +#include "jemalloc/internal/tsd_win.h" #else -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr pthread_key_t a_name##_tsd; \ -a_attr tsd_init_head_t a_name##_tsd_init_head = { \ - ql_head_initializer(blocks), \ - MALLOC_MUTEX_INITIALIZER \ -}; \ -a_attr bool a_name##_booted = false; +#include "jemalloc/internal/tsd_generic.h" #endif -/* malloc_tsd_funcs(). */ -#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Initialization/cleanup. */ \ -a_attr bool \ -a_name##_tsd_cleanup_wrapper(void) \ -{ \ - \ - if (a_name##_initialized) { \ - a_name##_initialized = false; \ - a_cleanup(&a_name##_tls); \ - } \ - return (a_name##_initialized); \ -} \ -a_attr bool \ -a_name##_tsd_boot(void) \ -{ \ - \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - malloc_tsd_cleanup_register( \ - &a_name##_tsd_cleanup_wrapper); \ - } \ - a_name##_booted = true; \ - return (false); \ -} \ -/* Get/set. */ \ -a_attr a_type * \ -a_name##_tsd_get(void) \ -{ \ - \ - assert(a_name##_booted); \ - return (&a_name##_tls); \ -} \ -a_attr void \ -a_name##_tsd_set(a_type *val) \ -{ \ - \ - assert(a_name##_booted); \ - a_name##_tls = (*val); \ - if (a_cleanup != malloc_tsd_no_cleanup) \ - a_name##_initialized = true; \ +/* + * tsd_foop_get_unsafe(tsd) returns a pointer to the thread-local instance of + * foo. This omits some safety checks, and so can be used during tsd + * initialization and cleanup. + */ +#define O(n, t, nt) \ +JEMALLOC_ALWAYS_INLINE t * \ +tsd_##n##p_get_unsafe(tsd_t *tsd) { \ + return &tsd->use_a_getter_or_setter_instead_##n; \ } -#elif (defined(JEMALLOC_TLS)) -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Initialization/cleanup. */ \ -a_attr bool \ -a_name##_tsd_boot(void) \ -{ \ - \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - if (pthread_key_create(&a_name##_tsd, a_cleanup) != 0) \ - return (true); \ - } \ - a_name##_booted = true; \ - return (false); \ -} \ -/* Get/set. */ \ -a_attr a_type * \ -a_name##_tsd_get(void) \ -{ \ - \ - assert(a_name##_booted); \ - return (&a_name##_tls); \ -} \ -a_attr void \ -a_name##_tsd_set(a_type *val) \ -{ \ - \ - assert(a_name##_booted); \ - a_name##_tls = (*val); \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - if (pthread_setspecific(a_name##_tsd, \ - (void *)(&a_name##_tls))) { \ - malloc_write("<jemalloc>: Error" \ - " setting TSD for "#a_name"\n"); \ - if (opt_abort) \ - abort(); \ - } \ - } \ +MALLOC_TSD +#undef O + +/* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */ +#define O(n, t, nt) \ +JEMALLOC_ALWAYS_INLINE t * \ +tsd_##n##p_get(tsd_t *tsd) { \ + assert(tsd->state == tsd_state_nominal || \ + tsd->state == tsd_state_nominal_slow || \ + tsd->state == tsd_state_reincarnated || \ + tsd->state == tsd_state_minimal_initialized); \ + return tsd_##n##p_get_unsafe(tsd); \ } -#elif (defined(_WIN32)) -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Data structure. */ \ -typedef struct { \ - bool initialized; \ - a_type val; \ -} a_name##_tsd_wrapper_t; \ -/* Initialization/cleanup. */ \ -a_attr bool \ -a_name##_tsd_cleanup_wrapper(void) \ -{ \ - a_name##_tsd_wrapper_t *wrapper; \ - \ - wrapper = (a_name##_tsd_wrapper_t *) TlsGetValue(a_name##_tsd); \ - if (wrapper == NULL) \ - return (false); \ - if (a_cleanup != malloc_tsd_no_cleanup && \ - wrapper->initialized) { \ - a_type val = wrapper->val; \ - a_type tsd_static_data = a_initializer; \ - wrapper->initialized = false; \ - wrapper->val = tsd_static_data; \ - a_cleanup(&val); \ - if (wrapper->initialized) { \ - /* Trigger another cleanup round. */ \ - return (true); \ - } \ - } \ - malloc_tsd_dalloc(wrapper); \ - return (false); \ -} \ -a_attr bool \ -a_name##_tsd_boot(void) \ -{ \ - \ - a_name##_tsd = TlsAlloc(); \ - if (a_name##_tsd == TLS_OUT_OF_INDEXES) \ - return (true); \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - malloc_tsd_cleanup_register( \ - &a_name##_tsd_cleanup_wrapper); \ - } \ - a_name##_booted = true; \ - return (false); \ -} \ -/* Get/set. */ \ -a_attr a_name##_tsd_wrapper_t * \ -a_name##_tsd_get_wrapper(void) \ -{ \ - a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \ - TlsGetValue(a_name##_tsd); \ - \ - if (wrapper == NULL) { \ - wrapper = (a_name##_tsd_wrapper_t *) \ - malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \ - if (wrapper == NULL) { \ - malloc_write("<jemalloc>: Error allocating" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } else { \ - static a_type tsd_static_data = a_initializer; \ - wrapper->initialized = false; \ - wrapper->val = tsd_static_data; \ - } \ - if (!TlsSetValue(a_name##_tsd, (void *)wrapper)) { \ - malloc_write("<jemalloc>: Error setting" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } \ +MALLOC_TSD +#undef O + +/* + * tsdn_foop_get(tsdn) returns either the thread-local instance of foo (if tsdn + * isn't NULL), or NULL (if tsdn is NULL), cast to the nullable pointer type. + */ +#define O(n, t, nt) \ +JEMALLOC_ALWAYS_INLINE nt * \ +tsdn_##n##p_get(tsdn_t *tsdn) { \ + if (tsdn_null(tsdn)) { \ + return NULL; \ } \ - return (wrapper); \ -} \ -a_attr a_type * \ -a_name##_tsd_get(void) \ -{ \ - a_name##_tsd_wrapper_t *wrapper; \ - \ - assert(a_name##_booted); \ - wrapper = a_name##_tsd_get_wrapper(); \ - return (&wrapper->val); \ -} \ -a_attr void \ -a_name##_tsd_set(a_type *val) \ -{ \ - a_name##_tsd_wrapper_t *wrapper; \ - \ - assert(a_name##_booted); \ - wrapper = a_name##_tsd_get_wrapper(); \ - wrapper->val = *(val); \ - if (a_cleanup != malloc_tsd_no_cleanup) \ - wrapper->initialized = true; \ + tsd_t *tsd = tsdn_tsd(tsdn); \ + return (nt *)tsd_##n##p_get(tsd); \ } -#else -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Data structure. */ \ -typedef struct { \ - bool initialized; \ - a_type val; \ -} a_name##_tsd_wrapper_t; \ -/* Initialization/cleanup. */ \ -a_attr void \ -a_name##_tsd_cleanup_wrapper(void *arg) \ -{ \ - a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *)arg;\ - \ - if (a_cleanup != malloc_tsd_no_cleanup && \ - wrapper->initialized) { \ - wrapper->initialized = false; \ - a_cleanup(&wrapper->val); \ - if (wrapper->initialized) { \ - /* Trigger another cleanup round. */ \ - if (pthread_setspecific(a_name##_tsd, \ - (void *)wrapper)) { \ - malloc_write("<jemalloc>: Error" \ - " setting TSD for "#a_name"\n"); \ - if (opt_abort) \ - abort(); \ - } \ - return; \ - } \ - } \ - malloc_tsd_dalloc(wrapper); \ -} \ -a_attr bool \ -a_name##_tsd_boot(void) \ -{ \ - \ - if (pthread_key_create(&a_name##_tsd, \ - a_name##_tsd_cleanup_wrapper) != 0) \ - return (true); \ - a_name##_booted = true; \ - return (false); \ -} \ -/* Get/set. */ \ -a_attr a_name##_tsd_wrapper_t * \ -a_name##_tsd_get_wrapper(void) \ -{ \ - a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \ - pthread_getspecific(a_name##_tsd); \ - \ - if (wrapper == NULL) { \ - tsd_init_block_t block; \ - wrapper = tsd_init_check_recursion( \ - &a_name##_tsd_init_head, &block); \ - if (wrapper) \ - return (wrapper); \ - wrapper = (a_name##_tsd_wrapper_t *) \ - malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \ - block.data = wrapper; \ - if (wrapper == NULL) { \ - malloc_write("<jemalloc>: Error allocating" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } else { \ - static a_type tsd_static_data = a_initializer; \ - wrapper->initialized = false; \ - wrapper->val = tsd_static_data; \ - } \ - if (pthread_setspecific(a_name##_tsd, \ - (void *)wrapper)) { \ - malloc_write("<jemalloc>: Error setting" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } \ - tsd_init_finish(&a_name##_tsd_init_head, &block); \ - } \ - return (wrapper); \ -} \ -a_attr a_type * \ -a_name##_tsd_get(void) \ -{ \ - a_name##_tsd_wrapper_t *wrapper; \ - \ - assert(a_name##_booted); \ - wrapper = a_name##_tsd_get_wrapper(); \ - return (&wrapper->val); \ -} \ -a_attr void \ -a_name##_tsd_set(a_type *val) \ -{ \ - a_name##_tsd_wrapper_t *wrapper; \ - \ - assert(a_name##_booted); \ - wrapper = a_name##_tsd_get_wrapper(); \ - wrapper->val = *(val); \ - if (a_cleanup != malloc_tsd_no_cleanup) \ - wrapper->initialized = true; \ +MALLOC_TSD +#undef O + +/* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */ +#define O(n, t, nt) \ +JEMALLOC_ALWAYS_INLINE t \ +tsd_##n##_get(tsd_t *tsd) { \ + return *tsd_##n##p_get(tsd); \ } -#endif +MALLOC_TSD +#undef O -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS +/* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */ +#define O(n, t, nt) \ +JEMALLOC_ALWAYS_INLINE void \ +tsd_##n##_set(tsd_t *tsd, t val) { \ + assert(tsd->state != tsd_state_reincarnated && \ + tsd->state != tsd_state_minimal_initialized); \ + *tsd_##n##p_get(tsd) = val; \ +} +MALLOC_TSD +#undef O -#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ - !defined(_WIN32)) -struct tsd_init_block_s { - ql_elm(tsd_init_block_t) link; - pthread_t thread; - void *data; -}; -struct tsd_init_head_s { - ql_head(tsd_init_block_t) blocks; - malloc_mutex_t lock; -}; -#endif +JEMALLOC_ALWAYS_INLINE void +tsd_assert_fast(tsd_t *tsd) { + assert(!malloc_slow && tsd_tcache_enabled_get(tsd) && + tsd_reentrancy_level_get(tsd) == 0); +} -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void *malloc_tsd_malloc(size_t size); -void malloc_tsd_dalloc(void *wrapper); -void malloc_tsd_no_cleanup(void *); -void malloc_tsd_cleanup_register(bool (*f)(void)); -void malloc_tsd_boot(void); -#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ - !defined(_WIN32)) -void *tsd_init_check_recursion(tsd_init_head_t *head, - tsd_init_block_t *block); -void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block); -#endif +JEMALLOC_ALWAYS_INLINE bool +tsd_fast(tsd_t *tsd) { + bool fast = (tsd->state == tsd_state_nominal); + if (fast) { + tsd_assert_fast(tsd); + } + + return fast; +} -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_fetch_impl(bool init, bool minimal) { + tsd_t *tsd = tsd_get(init); + + if (!init && tsd_get_allocates() && tsd == NULL) { + return NULL; + } + assert(tsd != NULL); + + if (unlikely(tsd->state != tsd_state_nominal)) { + return tsd_fetch_slow(tsd, minimal); + } + assert(tsd_fast(tsd)); + tsd_assert_fast(tsd); + + return tsd; +} + +/* Get a minimal TSD that requires no cleanup. See comments in free(). */ +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_fetch_min(void) { + return tsd_fetch_impl(true, true); +} + +/* For internal background threads use only. */ +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_internal_fetch(void) { + tsd_t *tsd = tsd_fetch_min(); + /* Use reincarnated state to prevent full initialization. */ + tsd->state = tsd_state_reincarnated; + + return tsd; +} + +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_fetch(void) { + return tsd_fetch_impl(true, false); +} + +static inline bool +tsd_nominal(tsd_t *tsd) { + return (tsd->state <= tsd_state_nominal_max); +} + +JEMALLOC_ALWAYS_INLINE tsdn_t * +tsdn_fetch(void) { + if (!tsd_booted_get()) { + return NULL; + } + + return tsd_tsdn(tsd_fetch_impl(false, false)); +} + +JEMALLOC_ALWAYS_INLINE rtree_ctx_t * +tsd_rtree_ctx(tsd_t *tsd) { + return tsd_rtree_ctxp_get(tsd); +} + +JEMALLOC_ALWAYS_INLINE rtree_ctx_t * +tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) { + /* + * If tsd cannot be accessed, initialize the fallback rtree_ctx and + * return a pointer to it. + */ + if (unlikely(tsdn_null(tsdn))) { + rtree_ctx_data_init(fallback); + return fallback; + } + return tsd_rtree_ctx(tsdn_tsd(tsdn)); +} -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ +#endif /* JEMALLOC_INTERNAL_TSD_H */ diff --git a/deps/jemalloc/include/jemalloc/internal/util.h b/deps/jemalloc/include/jemalloc/internal/util.h index 6b938f7468..304cb545af 100644 --- a/deps/jemalloc/include/jemalloc/internal/util.h +++ b/deps/jemalloc/include/jemalloc/internal/util.h @@ -1,143 +1,50 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES +#ifndef JEMALLOC_INTERNAL_UTIL_H +#define JEMALLOC_INTERNAL_UTIL_H -/* Size of stack-allocated buffer passed to buferror(). */ -#define BUFERROR_BUF 64 +#define UTIL_INLINE static inline -/* - * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be - * large enough for all possible uses within jemalloc. - */ -#define MALLOC_PRINTF_BUFSIZE 4096 +/* Junk fill patterns. */ +#ifndef JEMALLOC_ALLOC_JUNK +# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5) +#endif +#ifndef JEMALLOC_FREE_JUNK +# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a) +#endif /* * Wrap a cpp argument that contains commas such that it isn't broken up into * multiple arguments. */ -#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__ +#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__ + +/* cpp macro definition stringification. */ +#define STRINGIFY_HELPER(x) #x +#define STRINGIFY(x) STRINGIFY_HELPER(x) /* * Silence compiler warnings due to uninitialized values. This is used * wherever the compiler fails to recognize that the variable is never used * uninitialized. */ -#ifdef JEMALLOC_CC_SILENCE -# define JEMALLOC_CC_SILENCE_INIT(v) = v -#else -# define JEMALLOC_CC_SILENCE_INIT(v) -#endif - -/* - * Define a custom assert() in order to reduce the chances of deadlock during - * assertion failure. - */ -#ifndef assert -#define assert(e) do { \ - if (config_debug && !(e)) { \ - malloc_printf( \ - "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \ - __FILE__, __LINE__, #e); \ - abort(); \ - } \ -} while (0) -#endif - -#ifndef not_reached -#define not_reached() do { \ - if (config_debug) { \ - malloc_printf( \ - "<jemalloc>: %s:%d: Unreachable code reached\n", \ - __FILE__, __LINE__); \ - abort(); \ - } \ -} while (0) -#endif - -#ifndef not_implemented -#define not_implemented() do { \ - if (config_debug) { \ - malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \ - __FILE__, __LINE__); \ - abort(); \ - } \ -} while (0) -#endif +#define JEMALLOC_CC_SILENCE_INIT(v) = v -#ifndef assert_not_implemented -#define assert_not_implemented(e) do { \ - if (config_debug && !(e)) \ - not_implemented(); \ -} while (0) -#endif - -/* Use to assert a particular configuration, e.g., cassert(config_debug). */ -#define cassert(c) do { \ - if ((c) == false) \ - not_reached(); \ -} while (0) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -int buferror(int err, char *buf, size_t buflen); -uintmax_t malloc_strtoumax(const char *restrict nptr, - char **restrict endptr, int base); -void malloc_write(const char *s); - -/* - * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating - * point math. - */ -int malloc_vsnprintf(char *str, size_t size, const char *format, - va_list ap); -int malloc_snprintf(char *str, size_t size, const char *format, ...) - JEMALLOC_ATTR(format(printf, 3, 4)); -void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, - const char *format, va_list ap); -void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque, - const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4)); -void malloc_printf(const char *format, ...) - JEMALLOC_ATTR(format(printf, 1, 2)); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -size_t pow2_ceil(size_t x); -void set_errno(int errnum); -int get_errno(void); +#ifdef __GNUC__ +# define likely(x) __builtin_expect(!!(x), 1) +# define unlikely(x) __builtin_expect(!!(x), 0) +#else +# define likely(x) !!(x) +# define unlikely(x) !!(x) #endif -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_)) -/* Compute the smallest power of 2 that is >= x. */ -JEMALLOC_INLINE size_t -pow2_ceil(size_t x) -{ - - x--; - x |= x >> 1; - x |= x >> 2; - x |= x >> 4; - x |= x >> 8; - x |= x >> 16; -#if (LG_SIZEOF_PTR == 3) - x |= x >> 32; +#if !defined(JEMALLOC_INTERNAL_UNREACHABLE) +# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure #endif - x++; - return (x); -} -/* Sets error code */ -JEMALLOC_INLINE void -set_errno(int errnum) -{ +#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE() +/* Set error code. */ +UTIL_INLINE void +set_errno(int errnum) { #ifdef _WIN32 SetLastError(errnum); #else @@ -145,18 +52,16 @@ set_errno(int errnum) #endif } -/* Get last error code */ -JEMALLOC_INLINE int -get_errno(void) -{ - +/* Get last error code. */ +UTIL_INLINE int +get_errno(void) { #ifdef _WIN32 - return (GetLastError()); + return GetLastError(); #else - return (errno); + return errno; #endif } -#endif -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ +#undef UTIL_INLINE + +#endif /* JEMALLOC_INTERNAL_UTIL_H */ diff --git a/deps/jemalloc/include/jemalloc/jemalloc.h b/deps/jemalloc/include/jemalloc/jemalloc.h index b8ea851e52..6ffe5c71b3 100644 --- a/deps/jemalloc/include/jemalloc/jemalloc.h +++ b/deps/jemalloc/include/jemalloc/jemalloc.h @@ -1,45 +1,203 @@ #ifndef JEMALLOC_H_ -#define JEMALLOC_H_ +#define JEMALLOC_H_ #ifdef __cplusplus extern "C" { #endif +/* Defined if __attribute__((...)) syntax is supported. */ +#define JEMALLOC_HAVE_ATTR + +/* Defined if alloc_size attribute is supported. */ +#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE + +/* Defined if format(gnu_printf, ...) attribute is supported. */ +#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF + +/* Defined if format(printf, ...) attribute is supported. */ +#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF + +/* + * Define overrides for non-standard allocator-related functions if they are + * present on the system. + */ +#define JEMALLOC_OVERRIDE_MEMALIGN +#define JEMALLOC_OVERRIDE_VALLOC + +/* + * At least Linux omits the "const" in: + * + * size_t malloc_usable_size(const void *ptr); + * + * Match the operating system's prototype. + */ +#define JEMALLOC_USABLE_SIZE_CONST + +/* + * If defined, specify throw() for the public function prototypes when compiling + * with C++. The only justification for this is to match the prototypes that + * glibc defines. + */ +#define JEMALLOC_USE_CXX_THROW + +#ifdef _MSC_VER +# ifdef _WIN64 +# define LG_SIZEOF_PTR_WIN 3 +# else +# define LG_SIZEOF_PTR_WIN 2 +# endif +#endif + +/* + * Name mangling for public symbols is controlled by --with-mangling and + * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by + * these macro definitions. + */ +#ifndef JEMALLOC_NO_RENAME +# define je_aligned_alloc aligned_alloc +# define je_calloc calloc +# define je_dallocx dallocx +# define je_free free +# define je_mallctl mallctl +# define je_mallctlbymib mallctlbymib +# define je_mallctlnametomib mallctlnametomib +# define je_malloc malloc +# define je_malloc_conf malloc_conf +# define je_malloc_message malloc_message +# define je_malloc_stats_print malloc_stats_print +# define je_malloc_usable_size malloc_usable_size +# define je_mallocx mallocx +# define je_nallocx nallocx +# define je_posix_memalign posix_memalign +# define je_rallocx rallocx +# define je_realloc realloc +# define je_sallocx sallocx +# define je_sdallocx sdallocx +# define je_xallocx xallocx +# define je_memalign memalign +# define je_valloc valloc +#endif + +#include <stdlib.h> +#include <stdbool.h> +#include <stdint.h> #include <limits.h> #include <strings.h> -#define JEMALLOC_VERSION "3.6.0-0-g46c0af68bd248b04df75e4f92d5fb804c3d75340" -#define JEMALLOC_VERSION_MAJOR 3 -#define JEMALLOC_VERSION_MINOR 6 -#define JEMALLOC_VERSION_BUGFIX 0 -#define JEMALLOC_VERSION_NREV 0 -#define JEMALLOC_VERSION_GID "46c0af68bd248b04df75e4f92d5fb804c3d75340" +#define JEMALLOC_VERSION "5.0.1-0-g896ed3a8b3f41998d4fb4d625d30ac63ef2d51fb" +#define JEMALLOC_VERSION_MAJOR 5 +#define JEMALLOC_VERSION_MINOR 0 +#define JEMALLOC_VERSION_BUGFIX 1 +#define JEMALLOC_VERSION_NREV 0 +#define JEMALLOC_VERSION_GID "896ed3a8b3f41998d4fb4d625d30ac63ef2d51fb" + +#define MALLOCX_LG_ALIGN(la) ((int)(la)) +#if LG_SIZEOF_PTR == 2 +# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1)) +#else +# define MALLOCX_ALIGN(a) \ + ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \ + ffs((int)(((size_t)(a))>>32))+31)) +#endif +#define MALLOCX_ZERO ((int)0x40) +/* + * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1 + * encodes MALLOCX_TCACHE_NONE. + */ +#define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8)) +#define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1) +/* + * Bias arena index bits so that 0 encodes "use an automatically chosen arena". + */ +#define MALLOCX_ARENA(a) ((((int)(a))+1) << 20) + +/* + * Use as arena index in "arena.<i>.{purge,decay,dss}" and + * "stats.arenas.<i>.*" mallctl interfaces to select all arenas. This + * definition is intentionally specified in raw decimal format to support + * cpp-based string concatenation, e.g. + * + * #define STRINGIFY_HELPER(x) #x + * #define STRINGIFY(x) STRINGIFY_HELPER(x) + * + * mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL, + * 0); + */ +#define MALLCTL_ARENAS_ALL 4096 +/* + * Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select + * destroyed arenas. + */ +#define MALLCTL_ARENAS_DESTROYED 4097 -# define MALLOCX_LG_ALIGN(la) (la) -# if LG_SIZEOF_PTR == 2 -# define MALLOCX_ALIGN(a) (ffs(a)-1) +#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW) +# define JEMALLOC_CXX_THROW throw() +#else +# define JEMALLOC_CXX_THROW +#endif + +#if defined(_MSC_VER) +# define JEMALLOC_ATTR(s) +# define JEMALLOC_ALIGNED(s) __declspec(align(s)) +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# ifndef JEMALLOC_EXPORT +# ifdef DLLEXPORT +# define JEMALLOC_EXPORT __declspec(dllexport) +# else +# define JEMALLOC_EXPORT __declspec(dllimport) +# endif +# endif +# define JEMALLOC_FORMAT_PRINTF(s, i) +# define JEMALLOC_NOINLINE __declspec(noinline) +# ifdef __cplusplus +# define JEMALLOC_NOTHROW __declspec(nothrow) +# else +# define JEMALLOC_NOTHROW +# endif +# define JEMALLOC_SECTION(s) __declspec(allocate(s)) +# define JEMALLOC_RESTRICT_RETURN __declspec(restrict) +# if _MSC_VER >= 1900 && !defined(__EDG__) +# define JEMALLOC_ALLOCATOR __declspec(allocator) +# else +# define JEMALLOC_ALLOCATOR +# endif +#elif defined(JEMALLOC_HAVE_ATTR) +# define JEMALLOC_ATTR(s) __attribute__((s)) +# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) +# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE +# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s)) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2)) # else -# define MALLOCX_ALIGN(a) \ - ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31) +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# endif +# ifndef JEMALLOC_EXPORT +# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) # endif -# define MALLOCX_ZERO ((int)0x40) -/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */ -# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8)) - -#ifdef JEMALLOC_EXPERIMENTAL -# define ALLOCM_LG_ALIGN(la) (la) -# if LG_SIZEOF_PTR == 2 -# define ALLOCM_ALIGN(a) (ffs(a)-1) +# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF +# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i)) +# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF) +# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i)) # else -# define ALLOCM_ALIGN(a) \ - ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31) +# define JEMALLOC_FORMAT_PRINTF(s, i) # endif -# define ALLOCM_ZERO ((int)0x40) -# define ALLOCM_NO_MOVE ((int)0x80) -/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */ -# define ALLOCM_ARENA(a) ((int)(((a)+1) << 8)) -# define ALLOCM_SUCCESS 0 -# define ALLOCM_ERR_OOM 1 -# define ALLOCM_ERR_NOT_MOVED 2 +# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) +# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow) +# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) +# define JEMALLOC_RESTRICT_RETURN +# define JEMALLOC_ALLOCATOR +#else +# define JEMALLOC_ATTR(s) +# define JEMALLOC_ALIGNED(s) +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# define JEMALLOC_EXPORT +# define JEMALLOC_FORMAT_PRINTF(s, i) +# define JEMALLOC_NOINLINE +# define JEMALLOC_NOTHROW +# define JEMALLOC_SECTION(s) +# define JEMALLOC_RESTRICT_RETURN +# define JEMALLOC_ALLOCATOR #endif /* @@ -51,55 +209,141 @@ extern JEMALLOC_EXPORT const char *je_malloc_conf; extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque, const char *s); -JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc); -JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size) - JEMALLOC_ATTR(malloc); -JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment, - size_t size) JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size) - JEMALLOC_ATTR(malloc); -JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size); -JEMALLOC_EXPORT void je_free(void *ptr); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_malloc(size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr, + size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1)); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment, + size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) + JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr) + JEMALLOC_CXX_THROW; -JEMALLOC_EXPORT void *je_mallocx(size_t size, int flags); -JEMALLOC_EXPORT void *je_rallocx(void *ptr, size_t size, int flags); -JEMALLOC_EXPORT size_t je_xallocx(void *ptr, size_t size, size_t extra, +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags) + JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size, + int flags) JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size, + size_t extra, int flags); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr, + int flags) JEMALLOC_ATTR(pure); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size, int flags); -JEMALLOC_EXPORT size_t je_sallocx(const void *ptr, int flags); -JEMALLOC_EXPORT void je_dallocx(void *ptr, int flags); -JEMALLOC_EXPORT size_t je_nallocx(size_t size, int flags); - -JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp, - size_t *oldlenp, void *newp, size_t newlen); -JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp, - size_t *miblenp); -JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen, +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags) + JEMALLOC_ATTR(pure); + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen); -JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *, - const char *), void *je_cbopaque, const char *opts); -JEMALLOC_EXPORT size_t je_malloc_usable_size( - JEMALLOC_USABLE_SIZE_CONST void *ptr); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name, + size_t *mibp, size_t *miblenp); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print( + void (*write_cb)(void *, const char *), void *je_cbopaque, + const char *opts); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size( + JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW; #ifdef JEMALLOC_OVERRIDE_MEMALIGN -JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size) - JEMALLOC_ATTR(malloc); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc); #endif #ifdef JEMALLOC_OVERRIDE_VALLOC -JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW + JEMALLOC_ATTR(malloc); #endif -#ifdef JEMALLOC_EXPERIMENTAL -JEMALLOC_EXPORT int je_allocm(void **ptr, size_t *rsize, size_t size, - int flags) JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT int je_rallocm(void **ptr, size_t *rsize, size_t size, - size_t extra, int flags) JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT int je_sallocm(const void *ptr, size_t *rsize, int flags) - JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT int je_dallocm(void *ptr, int flags) - JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags); -#endif +typedef struct extent_hooks_s extent_hooks_t; + +/* + * void * + * extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size, + * size_t alignment, bool *zero, bool *commit, unsigned arena_ind); + */ +typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *, + bool *, unsigned); + +/* + * bool + * extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size, + * bool committed, unsigned arena_ind); + */ +typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool, + unsigned); + +/* + * void + * extent_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size, + * bool committed, unsigned arena_ind); + */ +typedef void (extent_destroy_t)(extent_hooks_t *, void *, size_t, bool, + unsigned); + +/* + * bool + * extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size, + * size_t offset, size_t length, unsigned arena_ind); + */ +typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t, + unsigned); + +/* + * bool + * extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size, + * size_t offset, size_t length, unsigned arena_ind); + */ +typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t, + size_t, unsigned); + +/* + * bool + * extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size, + * size_t offset, size_t length, unsigned arena_ind); + */ +typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t, + unsigned); + +/* + * bool + * extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size, + * size_t size_a, size_t size_b, bool committed, unsigned arena_ind); + */ +typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t, + bool, unsigned); + +/* + * bool + * extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, + * void *addr_b, size_t size_b, bool committed, unsigned arena_ind); + */ +typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t, + bool, unsigned); + +struct extent_hooks_s { + extent_alloc_t *alloc; + extent_dalloc_t *dalloc; + extent_destroy_t *destroy; + extent_commit_t *commit; + extent_decommit_t *decommit; + extent_purge_t *purge_lazy; + extent_purge_t *purge_forced; + extent_split_t *split; + extent_merge_t *merge; +}; /* * By default application code must explicitly refer to mangled symbol names, @@ -112,32 +356,28 @@ JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags); # ifndef JEMALLOC_NO_DEMANGLE # define JEMALLOC_NO_DEMANGLE # endif -# define malloc_conf je_malloc_conf -# define malloc_message je_malloc_message -# define malloc je_malloc -# define calloc je_calloc -# define posix_memalign je_posix_memalign # define aligned_alloc je_aligned_alloc -# define realloc je_realloc -# define free je_free -# define mallocx je_mallocx -# define rallocx je_rallocx -# define xallocx je_xallocx -# define sallocx je_sallocx +# define calloc je_calloc # define dallocx je_dallocx -# define nallocx je_nallocx +# define free je_free # define mallctl je_mallctl -# define mallctlnametomib je_mallctlnametomib # define mallctlbymib je_mallctlbymib +# define mallctlnametomib je_mallctlnametomib +# define malloc je_malloc +# define malloc_conf je_malloc_conf +# define malloc_message je_malloc_message # define malloc_stats_print je_malloc_stats_print # define malloc_usable_size je_malloc_usable_size +# define mallocx je_mallocx +# define nallocx je_nallocx +# define posix_memalign je_posix_memalign +# define rallocx je_rallocx +# define realloc je_realloc +# define sallocx je_sallocx +# define sdallocx je_sdallocx +# define xallocx je_xallocx # define memalign je_memalign # define valloc je_valloc -# define allocm je_allocm -# define dallocm je_dallocm -# define nallocm je_nallocm -# define rallocm je_rallocm -# define sallocm je_sallocm #endif /* @@ -148,35 +388,31 @@ JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags); * and/or --with-jemalloc-prefix. */ #ifndef JEMALLOC_NO_DEMANGLE -# undef je_malloc_conf -# undef je_malloc_message -# undef je_malloc -# undef je_calloc -# undef je_posix_memalign # undef je_aligned_alloc -# undef je_realloc -# undef je_free -# undef je_mallocx -# undef je_rallocx -# undef je_xallocx -# undef je_sallocx +# undef je_calloc # undef je_dallocx -# undef je_nallocx +# undef je_free # undef je_mallctl -# undef je_mallctlnametomib # undef je_mallctlbymib +# undef je_mallctlnametomib +# undef je_malloc +# undef je_malloc_conf +# undef je_malloc_message # undef je_malloc_stats_print # undef je_malloc_usable_size +# undef je_mallocx +# undef je_nallocx +# undef je_posix_memalign +# undef je_rallocx +# undef je_realloc +# undef je_sallocx +# undef je_sdallocx +# undef je_xallocx # undef je_memalign # undef je_valloc -# undef je_allocm -# undef je_dallocm -# undef je_nallocm -# undef je_rallocm -# undef je_sallocm #endif #ifdef __cplusplus -}; +} #endif #endif /* JEMALLOC_H_ */ diff --git a/deps/jemalloc/include/msvc_compat/strings.h b/deps/jemalloc/include/msvc_compat/strings.h index c84975b6b8..996f256ce8 100644 --- a/deps/jemalloc/include/msvc_compat/strings.h +++ b/deps/jemalloc/include/msvc_compat/strings.h @@ -3,21 +3,56 @@ /* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided * for both */ -#include <intrin.h> -#pragma intrinsic(_BitScanForward) -static __forceinline int ffsl(long x) -{ +#ifdef _MSC_VER +# include <intrin.h> +# pragma intrinsic(_BitScanForward) +static __forceinline int ffsl(long x) { unsigned long i; - if (_BitScanForward(&i, x)) - return (i + 1); - return (0); + if (_BitScanForward(&i, x)) { + return i + 1; + } + return 0; } -static __forceinline int ffs(int x) -{ +static __forceinline int ffs(int x) { + return ffsl(x); +} + +# ifdef _M_X64 +# pragma intrinsic(_BitScanForward64) +# endif + +static __forceinline int ffsll(unsigned __int64 x) { + unsigned long i; +#ifdef _M_X64 + if (_BitScanForward64(&i, x)) { + return i + 1; + } + return 0; +#else +// Fallback for 32-bit build where 64-bit version not available +// assuming little endian + union { + unsigned __int64 ll; + unsigned long l[2]; + } s; - return (ffsl(x)); + s.ll = x; + + if (_BitScanForward(&i, s.l[0])) { + return i + 1; + } else if(_BitScanForward(&i, s.l[1])) { + return i + 33; + } + return 0; +#endif } +#else +# define ffsll(x) __builtin_ffsll(x) +# define ffsl(x) __builtin_ffsl(x) +# define ffs(x) __builtin_ffs(x) #endif + +#endif /* strings_h */ diff --git a/deps/jemalloc/jemalloc_defs.h.in.cmake b/deps/jemalloc/jemalloc_defs.h.in.cmake index 04512ccf6a..769aa227c7 100644 --- a/deps/jemalloc/jemalloc_defs.h.in.cmake +++ b/deps/jemalloc/jemalloc_defs.h.in.cmake @@ -1,4 +1,6 @@ -/* include/jemalloc/jemalloc_defs.h. Generated from jemalloc_defs.h.in by configure. */ +/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */ +#ifndef JEMALLOC_INTERNAL_DEFS_H_ +#define JEMALLOC_INTERNAL_DEFS_H_ /* * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all * public APIs to be prefixed. This makes it possible, with some care, to use @@ -8,30 +10,16 @@ /* #undef JEMALLOC_CPREFIX */ /* - * Name mangling for public symbols is controlled by --with-mangling and - * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by - * these macro definitions. - */ -#define je_malloc_conf malloc_conf -#define je_malloc_message malloc_message -#define je_malloc malloc -#define je_calloc calloc -#define je_posix_memalign posix_memalign -#define je_aligned_alloc aligned_alloc -#define je_realloc realloc -#define je_free free -#define je_malloc_usable_size malloc_usable_size -#define je_malloc_stats_print malloc_stats_print -#define je_mallctl mallctl -#define je_mallctlnametomib mallctlnametomib -#define je_mallctlbymib mallctlbymib -#define je_memalign memalign -#define je_valloc valloc -#define je_allocm allocm -#define je_rallocm rallocm -#define je_sallocm sallocm -#define je_dallocm dallocm -#define je_nallocm nallocm + * Define overrides for non-standard allocator-related functions if they are + * present on the system. + */ +#define JEMALLOC_OVERRIDE___LIBC_CALLOC +#define JEMALLOC_OVERRIDE___LIBC_FREE +#define JEMALLOC_OVERRIDE___LIBC_MALLOC +#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN +#define JEMALLOC_OVERRIDE___LIBC_REALLOC +#define JEMALLOC_OVERRIDE___LIBC_VALLOC +/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */ /* * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. @@ -39,8 +27,7 @@ * from being exported, but for static libraries, naming collisions are a real * possibility. */ -#define JEMALLOC_PRIVATE_NAMESPACE "" -#define JEMALLOC_N(string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix) string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix +#define JEMALLOC_PRIVATE_NAMESPACE je_ /* * Hyper-threaded CPUs may need a special instruction inside spin loops in @@ -48,20 +35,27 @@ */ #define CPU_SPINWAIT __asm__ volatile("pause") -/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */ -/* #undef JEMALLOC_ATOMIC9 */ - /* - * Defined if OSAtomic*() functions are available, as provided by Darwin, and - * documented in the atomic(3) manual page. + * Number of significant bits in virtual addresses. This may be less than the + * total number of bits in a pointer, e.g. on x64, for which the uppermost 16 + * bits are the same as bit 47. */ -/* #undef JEMALLOC_OSATOMIC */ +#define LG_VADDR @JEM_VADDRBITS@ + +/* Defined if C11 atomics are available. */ +#define JEMALLOC_C11_ATOMICS 1 + +/* Defined if GCC __atomic atomics are available. */ +#define JEMALLOC_GCC_ATOMIC_ATOMICS 1 + +/* Defined if GCC __sync atomics are available. */ +#define JEMALLOC_GCC_SYNC_ATOMICS 1 /* * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the - * functions are defined in libgcc instead of being inlines) + * functions are defined in libgcc instead of being inlines). */ /* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */ @@ -69,16 +63,60 @@ * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the - * functions are defined in libgcc instead of being inlines) + * functions are defined in libgcc instead of being inlines). */ /* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */ /* + * Defined if __builtin_clz() and __builtin_clzl() are available. + */ +#define JEMALLOC_HAVE_BUILTIN_CLZ + +/* + * Defined if os_unfair_lock_*() functions are available, as provided by Darwin. + */ +/* #undef JEMALLOC_OS_UNFAIR_LOCK */ + +/* * Defined if OSSpin*() functions are available, as provided by Darwin, and * documented in the spinlock(3) manual page. */ /* #undef JEMALLOC_OSSPIN */ +/* Defined if syscall(2) is usable. */ +#define JEMALLOC_USE_SYSCALL + +/* + * Defined if secure_getenv(3) is available. + */ +#define JEMALLOC_HAVE_SECURE_GETENV + +/* + * Defined if issetugid(2) is available. + */ +/* #undef JEMALLOC_HAVE_ISSETUGID */ + +/* Defined if pthread_atfork(3) is available. */ +#define JEMALLOC_HAVE_PTHREAD_ATFORK + +/* Defined if pthread_setname_np(3) is available. */ +#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP + +/* + * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. + */ +#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1 + +/* + * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. + */ +#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1 + +/* + * Defined if mach_absolute_time() is available. + */ +/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */ + /* * Defined if _malloc_thread_cleanup() exists. At least in the case of * FreeBSD, pthread_key_create() allocates, which if used during malloc @@ -102,41 +140,9 @@ */ /* #undef JEMALLOC_MUTEX_INIT_CB */ -/* Defined if __attribute__((...)) syntax is supported. */ -#define JEMALLOC_HAVE_ATTR -#ifdef JEMALLOC_HAVE_ATTR -# define JEMALLOC_ATTR(s) __attribute__((s)) -# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) -# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) -# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) -# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) -#elif _MSC_VER -# define JEMALLOC_ATTR(s) -# ifdef DLLEXPORT -# define JEMALLOC_EXPORT __declspec(dllexport) -# else -# define JEMALLOC_EXPORT __declspec(dllimport) -# endif -# define JEMALLOC_ALIGNED(s) __declspec(align(s)) -# define JEMALLOC_SECTION(s) __declspec(allocate(s)) -# define JEMALLOC_NOINLINE __declspec(noinline) -#else -# define JEMALLOC_ATTR(s) -# define JEMALLOC_EXPORT -# define JEMALLOC_ALIGNED(s) -# define JEMALLOC_SECTION(s) -# define JEMALLOC_NOINLINE -#endif - -/* Defined if sbrk() is supported. */ -#define JEMALLOC_HAVE_SBRK - /* Non-empty if the tls_model attribute is supported. */ #define JEMALLOC_TLS_MODEL @JEM_TLSMODEL@ -/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */ -/* #undef JEMALLOC_CC_SILENCE */ - /* * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables * inline functions. @@ -144,7 +150,7 @@ /* #undef JEMALLOC_DEBUG */ /* JEMALLOC_STATS enables statistics calculation. */ -#define JEMALLOC_STATS +/* #undef JEMALLOC_STATS */ /* JEMALLOC_PROF enables allocation profiling. */ /* #undef JEMALLOC_PROF */ @@ -159,104 +165,127 @@ /* #undef JEMALLOC_PROF_GCC */ /* - * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects. - * This makes it possible to allocate/deallocate objects without any locking - * when the cache is in the steady state. - */ -#define JEMALLOC_TCACHE - -/* - * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage + * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage * segment (DSS). */ -/* #undef JEMALLOC_DSS */ +#define JEMALLOC_DSS -/* Support memory filling (junk/zero/quarantine/redzone). */ +/* Support memory filling (junk/zero). */ #define JEMALLOC_FILL -/* Support the experimental API. */ -#define JEMALLOC_EXPERIMENTAL - /* Support utrace(2)-based tracing. */ /* #undef JEMALLOC_UTRACE */ -/* Support Valgrind. */ -/* #undef JEMALLOC_VALGRIND */ - /* Support optional abort() on OOM. */ /* #undef JEMALLOC_XMALLOC */ /* Support lazy locking (avoid locking unless a second thread is launched). */ /* #undef JEMALLOC_LAZY_LOCK */ -/* One page is 2^STATIC_PAGE_SHIFT bytes. */ -#define STATIC_PAGE_SHIFT 12 +/* + * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size + * classes). + */ +/* #undef LG_QUANTUM */ + +/* One page is 2^LG_PAGE bytes. */ +#define LG_PAGE 12 /* - * If defined, use munmap() to unmap freed chunks, rather than storing them for - * later reuse. This is disabled by default on Linux because common sequences - * of mmap()/munmap() calls will cause virtual memory map holes. + * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the + * system does not explicitly support huge pages; system calls that require + * explicit huge page support are separately configured. */ -/* #undef JEMALLOC_MUNMAP */ +#define LG_HUGEPAGE 21 /* - * If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is - * disabled by default because it is Linux-specific and it will cause virtual - * memory map holes, much like munmap(2) does. + * If defined, adjacent virtual memory mappings with identical attributes + * automatically coalesce, and they fragment when changes are made to subranges. + * This is the normal order of things for mmap()/munmap(), but on Windows + * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e. + * mappings do *not* coalesce/fragment. */ -/* #undef JEMALLOC_MREMAP */ +#define JEMALLOC_MAPS_COALESCE + +/* + * If defined, retain memory for later reuse by default rather than using e.g. + * munmap() to unmap freed extents. This is enabled on 64-bit Linux because + * common sequences of mmap()/munmap() calls will cause virtual memory map + * holes. + */ +#define JEMALLOC_RETAIN /* TLS is used to map arenas and magazine caches to threads. */ #define JEMALLOC_TLS /* - * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside - * within jemalloc-owned chunks before dereferencing them. + * Used to mark unreachable code to quiet "end of non-void" compiler warnings. + * Don't use this directly; instead use unreachable() from util.h */ -/* #undef JEMALLOC_IVSALLOC */ +#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable /* - * Define overrides for non-standard allocator-related functions if they - * are present on the system. + * ffs*() functions to use for bitmapping. Don't use these directly; instead, + * use ffs_*() from util.h. */ -#define JEMALLOC_OVERRIDE_MEMALIGN -#define JEMALLOC_OVERRIDE_VALLOC +#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll +#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl +#define JEMALLOC_INTERNAL_FFS __builtin_ffs /* - * At least Linux omits the "const" in: - * - * size_t malloc_usable_size(const void *ptr); - * - * Match the operating system's prototype. + * If defined, explicitly attempt to more uniformly distribute large allocation + * pointer alignments across all cache indices. */ -#define JEMALLOC_USABLE_SIZE_CONST +#define JEMALLOC_CACHE_OBLIVIOUS /* * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. */ /* #undef JEMALLOC_ZONE */ -/* #undef JEMALLOC_ZONE_VERSION */ + +/* + * Methods for determining whether the OS overcommits. + * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's + * /proc/sys/vm.overcommit_memory file. + * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl. + */ +/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */ +#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY + +/* Defined if madvise(2) is available. */ +#define JEMALLOC_HAVE_MADVISE /* * Methods for purging unused pages differ between operating systems. * - * madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages, + * madvise(..., MADV_FREE) : This marks pages as being unused, such that they + * will be discarded rather than swapped out. + * madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is + * defined, this immediately discards pages, * such that new pages will be demand-zeroed if - * the address region is later touched. - * madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being - * unused, such that they will be discarded rather - * than swapped out. + * the address region is later touched; + * otherwise this behaves similarly to + * MADV_FREE, though typically with higher + * system overhead. */ +@JEM_MADFREE_DEF@ JEMALLOC_PURGE_MADVISE_FREE #define JEMALLOC_PURGE_MADVISE_DONTNEED -/* #undef JEMALLOC_PURGE_MADVISE_FREE */ +#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS 1 /* - * Define if operating system has alloca.h header. + * Defined if transparent huge pages (THPs) are supported via the + * MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled. */ -#define JEMALLOC_HAS_ALLOCA_H +#define JEMALLOC_THP -/* sizeof(void *) == 2^LG_SIZEOF_PTR. */ -#define LG_SIZEOF_PTR @JEM_SIZEDEF@ +/* Define if operating system has alloca.h header. */ +#define JEMALLOC_HAS_ALLOCA_H 1 + +/* C99 restrict keyword supported. */ +#define JEMALLOC_HAS_RESTRICT 1 + +/* For use by hash code. */ +/* #undef JEMALLOC_BIG_ENDIAN */ /* sizeof(int) == 2^LG_SIZEOF_INT. */ #define LG_SIZEOF_INT 2 @@ -264,11 +293,51 @@ /* sizeof(long) == 2^LG_SIZEOF_LONG. */ #define LG_SIZEOF_LONG @JEM_SIZEDEF@ +/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */ +#define LG_SIZEOF_LONG_LONG 3 + /* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ #define LG_SIZEOF_INTMAX_T 3 -/* C99 restrict keyword supported. */ -/*#define JEMALLOC_HAS_RESTRICT*/ +/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */ +#define JEMALLOC_GLIBC_MALLOC_HOOK + +/* glibc memalign hook. */ +#define JEMALLOC_GLIBC_MEMALIGN_HOOK + +/* pthread support */ +#define JEMALLOC_HAVE_PTHREAD + +/* dlsym() support */ +#define JEMALLOC_HAVE_DLSYM + +/* Adaptive mutex support in pthreads. */ +#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP + +/* GNU specific sched_getcpu support */ +#define JEMALLOC_HAVE_SCHED_GETCPU + +/* GNU specific sched_setaffinity support */ +#define JEMALLOC_HAVE_SCHED_SETAFFINITY + +/* + * If defined, all the features necessary for background threads are present. + */ +#define JEMALLOC_BACKGROUND_THREAD 1 + +/* + * If defined, jemalloc symbols are not exported (doesn't work when + * JEMALLOC_PREFIX is not defined). + */ +/* #undef JEMALLOC_EXPORT */ + +/* config.malloc_conf options string. */ +#define JEMALLOC_CONFIG_MALLOC_CONF "" + +/* If defined, jemalloc takes the malloc/free/etc. symbol names. */ +#define JEMALLOC_IS_MALLOC 1 + +/* sizeof(void *) == 2^LG_SIZEOF_PTR. */ +#define LG_SIZEOF_PTR @JEM_SIZEDEF@ -/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */ -#undef JEMALLOC_CODE_COVERAGE +#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
\ No newline at end of file diff --git a/deps/jemalloc/src/arena.c b/deps/jemalloc/src/arena.c index dad707b63d..632fce5233 100644 --- a/deps/jemalloc/src/arena.c +++ b/deps/jemalloc/src/arena.c @@ -1,40 +1,57 @@ -#define JEMALLOC_ARENA_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_ARENA_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/util.h" /******************************************************************************/ /* Data. */ -ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; -arena_bin_info_t arena_bin_info[NBINS]; - -JEMALLOC_ALIGNED(CACHELINE) -const uint8_t small_size2bin[] = { -#define S2B_8(i) i, -#define S2B_16(i) S2B_8(i) S2B_8(i) -#define S2B_32(i) S2B_16(i) S2B_16(i) -#define S2B_64(i) S2B_32(i) S2B_32(i) -#define S2B_128(i) S2B_64(i) S2B_64(i) -#define S2B_256(i) S2B_128(i) S2B_128(i) -#define S2B_512(i) S2B_256(i) S2B_256(i) -#define S2B_1024(i) S2B_512(i) S2B_512(i) -#define S2B_2048(i) S2B_1024(i) S2B_1024(i) -#define S2B_4096(i) S2B_2048(i) S2B_2048(i) -#define S2B_8192(i) S2B_4096(i) S2B_4096(i) -#define SIZE_CLASS(bin, delta, size) \ - S2B_##delta(bin) +/* + * Define names for both unininitialized and initialized phases, so that + * options and mallctl processing are straightforward. + */ +const char *percpu_arena_mode_names[] = { + "percpu", + "phycpu", + "disabled", + "percpu", + "phycpu" +}; +percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT; + +ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT; +ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT; + +static atomic_zd_t dirty_decay_ms_default; +static atomic_zd_t muzzy_decay_ms_default; + +const arena_bin_info_t arena_bin_info[NBINS] = { +#define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \ + {reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)}, +#define BIN_INFO_bin_no(reg_size, slab_size, nregs) +#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \ + lg_delta_lookup) \ + BIN_INFO_bin_##bin((1U<<lg_grp) + (ndelta<<lg_delta), \ + (pgs << LG_PAGE), (pgs << LG_PAGE) / ((1U<<lg_grp) + \ + (ndelta<<lg_delta))) SIZE_CLASSES -#undef S2B_8 -#undef S2B_16 -#undef S2B_32 -#undef S2B_64 -#undef S2B_128 -#undef S2B_256 -#undef S2B_512 -#undef S2B_1024 -#undef S2B_2048 -#undef S2B_4096 -#undef S2B_8192 -#undef SIZE_CLASS +#undef BIN_INFO_bin_yes +#undef BIN_INFO_bin_no +#undef SC +}; + +const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = { +#define STEP(step, h, x, y) \ + h, + SMOOTHSTEP +#undef STEP }; /******************************************************************************/ @@ -43,2535 +60,2120 @@ const uint8_t small_size2bin[] = { * definition. */ -static void arena_purge(arena_t *arena, bool all); -static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, - bool cleaned); -static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run, arena_bin_t *bin); -static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run, arena_bin_t *bin); +static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, + arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit, + bool is_background_thread); +static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, + bool is_background_thread, bool all); +static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, + arena_bin_t *bin); +static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, + arena_bin_t *bin); /******************************************************************************/ -static inline int -arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) -{ - uintptr_t a_mapelm = (uintptr_t)a; - uintptr_t b_mapelm = (uintptr_t)b; - - assert(a != NULL); - assert(b != NULL); - - return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm)); -} - -/* Generate red-black tree functions. */ -rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t, - u.rb_link, arena_run_comp) - -static inline int -arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) -{ - int ret; - size_t a_size = a->bits & ~PAGE_MASK; - size_t b_size = b->bits & ~PAGE_MASK; - - ret = (a_size > b_size) - (a_size < b_size); - if (ret == 0) { - uintptr_t a_mapelm, b_mapelm; - - if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY) - a_mapelm = (uintptr_t)a; - else { - /* - * Treat keys as though they are lower than anything - * else. - */ - a_mapelm = 0; +static bool +arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) { + if (config_debug) { + for (size_t i = 0; i < sizeof(arena_stats_t); i++) { + assert(((char *)arena_stats)[i] == 0); } - b_mapelm = (uintptr_t)b; - - ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm); } - - return (ret); +#ifndef JEMALLOC_ATOMIC_U64 + if (malloc_mutex_init(&arena_stats->mtx, "arena_stats", + WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) { + return true; + } +#endif + /* Memory is zeroed, so there is no need to clear stats. */ + return false; } -/* Generate red-black tree functions. */ -rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t, - u.rb_link, arena_avail_comp) - -static inline int -arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b) -{ - - assert(a != NULL); - assert(b != NULL); - - /* - * Short-circuit for self comparison. The following comparison code - * would come to the same result, but at the cost of executing the slow - * path. - */ - if (a == b) - return (0); - - /* - * Order such that chunks with higher fragmentation are "less than" - * those with lower fragmentation -- purging order is from "least" to - * "greatest". Fragmentation is measured as: - * - * mean current avail run size - * -------------------------------- - * mean defragmented avail run size - * - * navail - * ----------- - * nruns_avail nruns_avail-nruns_adjac - * = ========================= = ----------------------- - * navail nruns_avail - * ----------------------- - * nruns_avail-nruns_adjac - * - * The following code multiplies away the denominator prior to - * comparison, in order to avoid division. - * - */ - { - size_t a_val = (a->nruns_avail - a->nruns_adjac) * - b->nruns_avail; - size_t b_val = (b->nruns_avail - b->nruns_adjac) * - a->nruns_avail; +static void +arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) { +#ifndef JEMALLOC_ATOMIC_U64 + malloc_mutex_lock(tsdn, &arena_stats->mtx); +#endif +} - if (a_val < b_val) - return (1); - if (a_val > b_val) - return (-1); - } - /* - * Break ties by chunk address. For fragmented chunks, report lower - * addresses as "lower", so that fragmentation reduction happens first - * at lower addresses. However, use the opposite ordering for - * unfragmented chunks, in order to increase the chances of - * re-allocating dirty runs. - */ - { - uintptr_t a_chunk = (uintptr_t)a; - uintptr_t b_chunk = (uintptr_t)b; - int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk)); - if (a->nruns_adjac == 0) { - assert(b->nruns_adjac == 0); - ret = -ret; - } - return (ret); - } +static void +arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) { +#ifndef JEMALLOC_ATOMIC_U64 + malloc_mutex_unlock(tsdn, &arena_stats->mtx); +#endif } -/* Generate red-black tree functions. */ -rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t, - dirty_link, arena_chunk_dirty_comp) +static uint64_t +arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, + arena_stats_u64_t *p) { +#ifdef JEMALLOC_ATOMIC_U64 + return atomic_load_u64(p, ATOMIC_RELAXED); +#else + malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); + return *p; +#endif +} -static inline bool -arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind) -{ - bool ret; +static void +arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, + arena_stats_u64_t *p, uint64_t x) { +#ifdef JEMALLOC_ATOMIC_U64 + atomic_fetch_add_u64(p, x, ATOMIC_RELAXED); +#else + malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); + *p += x; +#endif +} - if (pageind-1 < map_bias) - ret = false; - else { - ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0); - assert(ret == false || arena_mapbits_dirty_get(chunk, - pageind-1) != arena_mapbits_dirty_get(chunk, pageind)); - } - return (ret); +UNUSED static void +arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, + arena_stats_u64_t *p, uint64_t x) { +#ifdef JEMALLOC_ATOMIC_U64 + UNUSED uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED); + assert(r - x <= r); +#else + malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); + *p -= x; + assert(*p + x >= *p); +#endif } -static inline bool -arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages) -{ - bool ret; +/* + * Non-atomically sets *dst += src. *dst needs external synchronization. + * This lets us avoid the cost of a fetch_add when its unnecessary (note that + * the types here are atomic). + */ +static void +arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) { +#ifdef JEMALLOC_ATOMIC_U64 + uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED); + atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED); +#else + *dst += src; +#endif +} - if (pageind+npages == chunk_npages) - ret = false; - else { - assert(pageind+npages < chunk_npages); - ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0); - assert(ret == false || arena_mapbits_dirty_get(chunk, pageind) - != arena_mapbits_dirty_get(chunk, pageind+npages)); - } - return (ret); +static size_t +arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) { +#ifdef JEMALLOC_ATOMIC_U64 + return atomic_load_zu(p, ATOMIC_RELAXED); +#else + malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); + return atomic_load_zu(p, ATOMIC_RELAXED); +#endif } -static inline bool -arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages) -{ +static void +arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p, + size_t x) { +#ifdef JEMALLOC_ATOMIC_U64 + atomic_fetch_add_zu(p, x, ATOMIC_RELAXED); +#else + malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); + size_t cur = atomic_load_zu(p, ATOMIC_RELAXED); + atomic_store_zu(p, cur + x, ATOMIC_RELAXED); +#endif +} - return (arena_avail_adjac_pred(chunk, pageind) || - arena_avail_adjac_succ(chunk, pageind, npages)); +static void +arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p, + size_t x) { +#ifdef JEMALLOC_ATOMIC_U64 + UNUSED size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED); + assert(r - x <= r); +#else + malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); + size_t cur = atomic_load_zu(p, ATOMIC_RELAXED); + atomic_store_zu(p, cur - x, ATOMIC_RELAXED); +#endif } +/* Like the _u64 variant, needs an externally synchronized *dst. */ static void -arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, - size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ) -{ +arena_stats_accum_zu(atomic_zu_t *dst, size_t src) { + size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED); + atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED); +} - assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> - LG_PAGE)); +void +arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats, + szind_t szind, uint64_t nrequests) { + arena_stats_lock(tsdn, arena_stats); + arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind - + NBINS].nrequests, nrequests); + arena_stats_unlock(tsdn, arena_stats); +} - /* - * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be - * removed and reinserted even if the run to be inserted is clean. - */ - if (chunk->ndirty != 0) - arena_chunk_dirty_remove(&arena->chunks_dirty, chunk); +void +arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) { + arena_stats_lock(tsdn, arena_stats); + arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size); + arena_stats_unlock(tsdn, arena_stats); +} - if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind)) - chunk->nruns_adjac++; - if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages)) - chunk->nruns_adjac++; - chunk->nruns_avail++; - assert(chunk->nruns_avail > chunk->nruns_adjac); +void +arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, + const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, + size_t *nactive, size_t *ndirty, size_t *nmuzzy) { + *nthreads += arena_nthreads_get(arena, false); + *dss = dss_prec_names[arena_dss_prec_get(arena)]; + *dirty_decay_ms = arena_dirty_decay_ms_get(arena); + *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); + *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED); + *ndirty += extents_npages_get(&arena->extents_dirty); + *nmuzzy += extents_npages_get(&arena->extents_muzzy); +} - if (arena_mapbits_dirty_get(chunk, pageind) != 0) { - arena->ndirty += npages; - chunk->ndirty += npages; +void +arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, + const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, + size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, + malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats) { + cassert(config_stats); + + arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms, + muzzy_decay_ms, nactive, ndirty, nmuzzy); + + size_t base_allocated, base_resident, base_mapped; + base_stats_get(tsdn, arena->base, &base_allocated, &base_resident, + &base_mapped); + + arena_stats_lock(tsdn, &arena->stats); + + arena_stats_accum_zu(&astats->mapped, base_mapped + + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped)); + arena_stats_accum_zu(&astats->retained, + extents_npages_get(&arena->extents_retained) << LG_PAGE); + + arena_stats_accum_u64(&astats->decay_dirty.npurge, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_dirty.npurge)); + arena_stats_accum_u64(&astats->decay_dirty.nmadvise, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_dirty.nmadvise)); + arena_stats_accum_u64(&astats->decay_dirty.purged, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_dirty.purged)); + + arena_stats_accum_u64(&astats->decay_muzzy.npurge, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_muzzy.npurge)); + arena_stats_accum_u64(&astats->decay_muzzy.nmadvise, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_muzzy.nmadvise)); + arena_stats_accum_u64(&astats->decay_muzzy.purged, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_muzzy.purged)); + + arena_stats_accum_zu(&astats->base, base_allocated); + arena_stats_accum_zu(&astats->internal, arena_internal_get(arena)); + arena_stats_accum_zu(&astats->resident, base_resident + + (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) + + extents_npages_get(&arena->extents_dirty) + + extents_npages_get(&arena->extents_muzzy)) << LG_PAGE))); + + for (szind_t i = 0; i < NSIZES - NBINS; i++) { + uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.lstats[i].nmalloc); + arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc); + arena_stats_accum_u64(&astats->nmalloc_large, nmalloc); + + uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.lstats[i].ndalloc); + arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc); + arena_stats_accum_u64(&astats->ndalloc_large, ndalloc); + + uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.lstats[i].nrequests); + arena_stats_accum_u64(&lstats[i].nrequests, + nmalloc + nrequests); + arena_stats_accum_u64(&astats->nrequests_large, + nmalloc + nrequests); + + assert(nmalloc >= ndalloc); + assert(nmalloc - ndalloc <= SIZE_T_MAX); + size_t curlextents = (size_t)(nmalloc - ndalloc); + lstats[i].curlextents += curlextents; + arena_stats_accum_zu(&astats->allocated_large, + curlextents * sz_index2size(NBINS + i)); + } + + arena_stats_unlock(tsdn, &arena->stats); + + /* tcache_bytes counts currently cached bytes. */ + atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED); + malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); + tcache_t *tcache; + ql_foreach(tcache, &arena->tcache_ql, link) { + szind_t i = 0; + for (; i < NBINS; i++) { + tcache_bin_t *tbin = tcache_small_bin_get(tcache, i); + arena_stats_accum_zu(&astats->tcache_bytes, + tbin->ncached * sz_index2size(i)); + } + for (; i < nhbins; i++) { + tcache_bin_t *tbin = tcache_large_bin_get(tcache, i); + arena_stats_accum_zu(&astats->tcache_bytes, + tbin->ncached * sz_index2size(i)); + } } - if (chunk->ndirty != 0) - arena_chunk_dirty_insert(&arena->chunks_dirty, chunk); + malloc_mutex_prof_read(tsdn, + &astats->mutex_prof_data[arena_prof_mutex_tcache_list], + &arena->tcache_ql_mtx); + malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); + +#define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \ + malloc_mutex_lock(tsdn, &arena->mtx); \ + malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \ + &arena->mtx); \ + malloc_mutex_unlock(tsdn, &arena->mtx); + + /* Gather per arena mutex profiling data. */ + READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large); + READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx, + arena_prof_mutex_extent_avail) + READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx, + arena_prof_mutex_extents_dirty) + READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx, + arena_prof_mutex_extents_muzzy) + READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx, + arena_prof_mutex_extents_retained) + READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx, + arena_prof_mutex_decay_dirty) + READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx, + arena_prof_mutex_decay_muzzy) + READ_ARENA_MUTEX_PROF_DATA(base->mtx, + arena_prof_mutex_base) +#undef READ_ARENA_MUTEX_PROF_DATA + + nstime_copy(&astats->uptime, &arena->create_time); + nstime_update(&astats->uptime); + nstime_subtract(&astats->uptime, &arena->create_time); + + for (szind_t i = 0; i < NBINS; i++) { + arena_bin_t *bin = &arena->bins[i]; - arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk, - pageind)); + malloc_mutex_lock(tsdn, &bin->lock); + malloc_mutex_prof_read(tsdn, &bstats[i].mutex_data, &bin->lock); + bstats[i].nmalloc += bin->stats.nmalloc; + bstats[i].ndalloc += bin->stats.ndalloc; + bstats[i].nrequests += bin->stats.nrequests; + bstats[i].curregs += bin->stats.curregs; + bstats[i].nfills += bin->stats.nfills; + bstats[i].nflushes += bin->stats.nflushes; + bstats[i].nslabs += bin->stats.nslabs; + bstats[i].reslabs += bin->stats.reslabs; + bstats[i].curslabs += bin->stats.curslabs; + malloc_mutex_unlock(tsdn, &bin->lock); + } } -static void -arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, - size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ) -{ - - assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> - LG_PAGE)); - - /* - * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be - * removed and reinserted even if the run to be removed is clean. - */ - if (chunk->ndirty != 0) - arena_chunk_dirty_remove(&arena->chunks_dirty, chunk); - - if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind)) - chunk->nruns_adjac--; - if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages)) - chunk->nruns_adjac--; - chunk->nruns_avail--; - assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail - == 0 && chunk->nruns_adjac == 0)); - - if (arena_mapbits_dirty_get(chunk, pageind) != 0) { - arena->ndirty -= npages; - chunk->ndirty -= npages; +void +arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty, + extent); + if (arena_dirty_decay_ms_get(arena) == 0) { + arena_decay_dirty(tsdn, arena, false, true); + } else { + arena_background_thread_inactivity_check(tsdn, arena, false); } - if (chunk->ndirty != 0) - arena_chunk_dirty_insert(&arena->chunks_dirty, chunk); - - arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk, - pageind)); } -static inline void * -arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) -{ +static void * +arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab, + const arena_bin_info_t *bin_info) { void *ret; - unsigned regind; - bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + - (uintptr_t)bin_info->bitmap_offset); - - assert(run->nfree > 0); - assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false); - - regind = bitmap_sfu(bitmap, &bin_info->bitmap_info); - ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset + - (uintptr_t)(bin_info->reg_interval * regind)); - run->nfree--; - if (regind == run->nextind) - run->nextind++; - assert(regind < run->nextind); - return (ret); -} - -static inline void -arena_run_reg_dalloc(arena_run_t *run, void *ptr) -{ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t mapbits = arena_mapbits_get(chunk, pageind); - size_t binind = arena_ptr_small_binind_get(ptr, mapbits); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; - unsigned regind = arena_run_regind(run, bin_info, ptr); - bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + - (uintptr_t)bin_info->bitmap_offset); - - assert(run->nfree < bin_info->nregs); - /* Freeing an interior pointer can cause assertion failure. */ - assert(((uintptr_t)ptr - ((uintptr_t)run + - (uintptr_t)bin_info->reg0_offset)) % - (uintptr_t)bin_info->reg_interval == 0); - assert((uintptr_t)ptr >= (uintptr_t)run + - (uintptr_t)bin_info->reg0_offset); - /* Freeing an unallocated pointer can cause assertion failure. */ - assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind)); + arena_slab_data_t *slab_data = extent_slab_data_get(slab); + size_t regind; + + assert(extent_nfree_get(slab) > 0); + assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); - bitmap_unset(bitmap, &bin_info->bitmap_info, regind); - run->nfree++; + regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info); + ret = (void *)((uintptr_t)extent_addr_get(slab) + + (uintptr_t)(bin_info->reg_size * regind)); + extent_nfree_dec(slab); + return ret; } -static inline void -arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) -{ +#ifndef JEMALLOC_JET +static +#endif +size_t +arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) { + size_t diff, regind; - VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind << - LG_PAGE)), (npages << LG_PAGE)); - memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, - (npages << LG_PAGE)); -} + /* Freeing a pointer outside the slab can cause assertion failure. */ + assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab)); + assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab)); + /* Freeing an interior pointer can cause assertion failure. */ + assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) % + (uintptr_t)arena_bin_info[binind].reg_size == 0); + + /* Avoid doing division with a variable divisor. */ + diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)); + switch (binind) { +#define REGIND_bin_yes(index, reg_size) \ + case index: \ + regind = diff / (reg_size); \ + assert(diff == regind * (reg_size)); \ + break; +#define REGIND_bin_no(index, reg_size) +#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \ + lg_delta_lookup) \ + REGIND_bin_##bin(index, (1U<<lg_grp) + (ndelta<<lg_delta)) + SIZE_CLASSES +#undef REGIND_bin_yes +#undef REGIND_bin_no +#undef SC + default: not_reached(); + } -static inline void -arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) -{ + assert(regind < arena_bin_info[binind].nregs); - VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind << - LG_PAGE)), PAGE); + return regind; } -static inline void -arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) -{ - size_t i; - UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); +static void +arena_slab_reg_dalloc(tsdn_t *tsdn, extent_t *slab, + arena_slab_data_t *slab_data, void *ptr) { + szind_t binind = extent_szind_get(slab); + const arena_bin_info_t *bin_info = &arena_bin_info[binind]; + size_t regind = arena_slab_regind(slab, binind, ptr); + + assert(extent_nfree_get(slab) < bin_info->nregs); + /* Freeing an unallocated pointer can cause assertion failure. */ + assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind)); - arena_run_page_mark_zeroed(chunk, run_ind); - for (i = 0; i < PAGE / sizeof(size_t); i++) - assert(p[i] == 0); + bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind); + extent_nfree_inc(slab); } static void -arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages) -{ +arena_nactive_add(arena_t *arena, size_t add_pages) { + atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED); +} - if (config_stats) { - ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + - add_pages) << LG_PAGE) - CHUNK_CEILING((arena->nactive - - sub_pages) << LG_PAGE); - if (cactive_diff != 0) - stats_cactive_add(cactive_diff); - } +static void +arena_nactive_sub(arena_t *arena, size_t sub_pages) { + assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages); + atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED); } static void -arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, - size_t flag_dirty, size_t need_pages) -{ - size_t total_pages, rem_pages; - - total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> - LG_PAGE; - assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == - flag_dirty); - assert(need_pages <= total_pages); - rem_pages = total_pages - need_pages; - - arena_avail_remove(arena, chunk, run_ind, total_pages, true, true); - arena_cactive_update(arena, need_pages, 0); - arena->nactive += need_pages; - - /* Keep track of trailing unused pages for later use. */ - if (rem_pages > 0) { - if (flag_dirty != 0) { - arena_mapbits_unallocated_set(chunk, - run_ind+need_pages, (rem_pages << LG_PAGE), - flag_dirty); - arena_mapbits_unallocated_set(chunk, - run_ind+total_pages-1, (rem_pages << LG_PAGE), - flag_dirty); - } else { - arena_mapbits_unallocated_set(chunk, run_ind+need_pages, - (rem_pages << LG_PAGE), - arena_mapbits_unzeroed_get(chunk, - run_ind+need_pages)); - arena_mapbits_unallocated_set(chunk, - run_ind+total_pages-1, (rem_pages << LG_PAGE), - arena_mapbits_unzeroed_get(chunk, - run_ind+total_pages-1)); - } - arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages, - false, true); +arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { + szind_t index, hindex; + + cassert(config_stats); + + if (usize < LARGE_MINCLASS) { + usize = LARGE_MINCLASS; } + index = sz_size2index(usize); + hindex = (index >= NBINS) ? index - NBINS : 0; + + arena_stats_add_u64(tsdn, &arena->stats, + &arena->stats.lstats[hindex].nmalloc, 1); } static void -arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, - bool remove, bool zero) -{ - arena_chunk_t *chunk; - size_t flag_dirty, run_ind, need_pages, i; +arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { + szind_t index, hindex; - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); - flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); - need_pages = (size >> LG_PAGE); - assert(need_pages > 0); + cassert(config_stats); - if (remove) { - arena_run_split_remove(arena, chunk, run_ind, flag_dirty, - need_pages); + if (usize < LARGE_MINCLASS) { + usize = LARGE_MINCLASS; } + index = sz_size2index(usize); + hindex = (index >= NBINS) ? index - NBINS : 0; - if (zero) { - if (flag_dirty == 0) { + arena_stats_add_u64(tsdn, &arena->stats, + &arena->stats.lstats[hindex].ndalloc, 1); +} + +static void +arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize, + size_t usize) { + arena_large_dalloc_stats_update(tsdn, arena, oldusize); + arena_large_malloc_stats_update(tsdn, arena, usize); +} + +extent_t * +arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool *zero) { + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; + + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + szind_t szind = sz_size2index(usize); + size_t mapped_add; + bool commit = true; + extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false, + szind, zero, &commit); + if (extent == NULL) { + extent = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment, + false, szind, zero, &commit); + } + size_t size = usize + sz_large_pad; + if (extent == NULL) { + extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL, + usize, sz_large_pad, alignment, false, szind, zero, + &commit); + if (config_stats) { /* - * The run is clean, so some pages may be zeroed (i.e. - * never before touched). + * extent may be NULL on OOM, but in that case + * mapped_add isn't used below, so there's no need to + * conditionlly set it to 0 here. */ - for (i = 0; i < need_pages; i++) { - if (arena_mapbits_unzeroed_get(chunk, run_ind+i) - != 0) - arena_run_zero(chunk, run_ind+i, 1); - else if (config_debug) { - arena_run_page_validate_zeroed(chunk, - run_ind+i); - } else { - arena_run_page_mark_zeroed(chunk, - run_ind+i); - } + mapped_add = size; + } + } else if (config_stats) { + mapped_add = 0; + } + + if (extent != NULL) { + if (config_stats) { + arena_stats_lock(tsdn, &arena->stats); + arena_large_malloc_stats_update(tsdn, arena, usize); + if (mapped_add != 0) { + arena_stats_add_zu(tsdn, &arena->stats, + &arena->stats.mapped, mapped_add); } - } else { - /* The run is dirty, so all pages must be zeroed. */ - arena_run_zero(chunk, run_ind, need_pages); + arena_stats_unlock(tsdn, &arena->stats); } - } else { - VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + - (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); + arena_nactive_add(arena, size >> LG_PAGE); } - /* - * Set the last element first, in case the run only contains one page - * (i.e. both statements set the same element). - */ - arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty); - arena_mapbits_large_set(chunk, run_ind, size, flag_dirty); + return extent; } -static void -arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) -{ - - arena_run_split_large_helper(arena, run, size, true, zero); +void +arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { + if (config_stats) { + arena_stats_lock(tsdn, &arena->stats); + arena_large_dalloc_stats_update(tsdn, arena, + extent_usize_get(extent)); + arena_stats_unlock(tsdn, &arena->stats); + } + arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE); } -static void -arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) -{ +void +arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, + size_t oldusize) { + size_t usize = extent_usize_get(extent); + size_t udiff = oldusize - usize; - arena_run_split_large_helper(arena, run, size, false, zero); + if (config_stats) { + arena_stats_lock(tsdn, &arena->stats); + arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); + arena_stats_unlock(tsdn, &arena->stats); + } + arena_nactive_sub(arena, udiff >> LG_PAGE); } -static void -arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, - size_t binind) -{ - arena_chunk_t *chunk; - size_t flag_dirty, run_ind, need_pages, i; +void +arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, + size_t oldusize) { + size_t usize = extent_usize_get(extent); + size_t udiff = usize - oldusize; - assert(binind != BININD_INVALID); + if (config_stats) { + arena_stats_lock(tsdn, &arena->stats); + arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); + arena_stats_unlock(tsdn, &arena->stats); + } + arena_nactive_add(arena, udiff >> LG_PAGE); +} - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); - flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); - need_pages = (size >> LG_PAGE); - assert(need_pages > 0); +static ssize_t +arena_decay_ms_read(arena_decay_t *decay) { + return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); +} - arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages); +static void +arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) { + atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED); +} +static void +arena_decay_deadline_init(arena_decay_t *decay) { /* - * Propagate the dirty and unzeroed flags to the allocated small run, - * so that arena_dalloc_bin_run() has the ability to conditionally trim - * clean pages. - */ - arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty); - /* - * The first page will always be dirtied during small run - * initialization, so a validation failure here would not actually - * cause an observable failure. + * Generate a new deadline that is uniformly random within the next + * epoch after the current one. */ - if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk, - run_ind) == 0) - arena_run_page_validate_zeroed(chunk, run_ind); - for (i = 1; i < need_pages - 1; i++) { - arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0); - if (config_debug && flag_dirty == 0 && - arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) - arena_run_page_validate_zeroed(chunk, run_ind+i); - } - arena_mapbits_small_set(chunk, run_ind+need_pages-1, need_pages-1, - binind, flag_dirty); - if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk, - run_ind+need_pages-1) == 0) - arena_run_page_validate_zeroed(chunk, run_ind+need_pages-1); - VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + - (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); -} - -static arena_chunk_t * -arena_chunk_init_spare(arena_t *arena) -{ - arena_chunk_t *chunk; - - assert(arena->spare != NULL); - - chunk = arena->spare; - arena->spare = NULL; - - assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); - assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); - assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == - arena_maxclass); - assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == - arena_maxclass); - assert(arena_mapbits_dirty_get(chunk, map_bias) == - arena_mapbits_dirty_get(chunk, chunk_npages-1)); - - return (chunk); -} - -static arena_chunk_t * -arena_chunk_init_hard(arena_t *arena) -{ - arena_chunk_t *chunk; - bool zero; - size_t unzeroed, i; - - assert(arena->spare == NULL); - - zero = false; - malloc_mutex_unlock(&arena->lock); - chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, false, - &zero, arena->dss_prec); - malloc_mutex_lock(&arena->lock); - if (chunk == NULL) - return (NULL); - if (config_stats) - arena->stats.mapped += chunksize; + nstime_copy(&decay->deadline, &decay->epoch); + nstime_add(&decay->deadline, &decay->interval); + if (arena_decay_ms_read(decay) > 0) { + nstime_t jitter; - chunk->arena = arena; + nstime_init(&jitter, prng_range_u64(&decay->jitter_state, + nstime_ns(&decay->interval))); + nstime_add(&decay->deadline, &jitter); + } +} - /* - * Claim that no pages are in use, since the header is merely overhead. - */ - chunk->ndirty = 0; +static bool +arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) { + return (nstime_compare(&decay->deadline, time) <= 0); +} - chunk->nruns_avail = 0; - chunk->nruns_adjac = 0; +static size_t +arena_decay_backlog_npages_limit(const arena_decay_t *decay) { + uint64_t sum; + size_t npages_limit_backlog; + unsigned i; /* - * Initialize the map to contain one maximal free untouched run. Mark - * the pages as zeroed iff chunk_alloc() returned a zeroed chunk. - */ - unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED; - arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass, - unzeroed); - /* - * There is no need to initialize the internal page map entries unless - * the chunk is not zeroed. + * For each element of decay_backlog, multiply by the corresponding + * fixed-point smoothstep decay factor. Sum the products, then divide + * to round down to the nearest whole number of pages. */ - if (zero == false) { - VALGRIND_MAKE_MEM_UNDEFINED((void *)arena_mapp_get(chunk, - map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk, - chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk, - map_bias+1))); - for (i = map_bias+1; i < chunk_npages-1; i++) - arena_mapbits_unzeroed_set(chunk, i, unzeroed); - } else { - VALGRIND_MAKE_MEM_DEFINED((void *)arena_mapp_get(chunk, - map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk, - chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk, - map_bias+1))); - if (config_debug) { - for (i = map_bias+1; i < chunk_npages-1; i++) { - assert(arena_mapbits_unzeroed_get(chunk, i) == - unzeroed); - } - } + sum = 0; + for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { + sum += decay->backlog[i] * h_steps[i]; } - arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxclass, - unzeroed); + npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); - return (chunk); + return npages_limit_backlog; } -static arena_chunk_t * -arena_chunk_alloc(arena_t *arena) -{ - arena_chunk_t *chunk; +static void +arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) { + size_t npages_delta = (current_npages > decay->nunpurged) ? + current_npages - decay->nunpurged : 0; + decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta; - if (arena->spare != NULL) - chunk = arena_chunk_init_spare(arena); - else { - chunk = arena_chunk_init_hard(arena); - if (chunk == NULL) - return (NULL); + if (config_debug) { + if (current_npages > decay->ceil_npages) { + decay->ceil_npages = current_npages; + } + size_t npages_limit = arena_decay_backlog_npages_limit(decay); + assert(decay->ceil_npages >= npages_limit); + if (decay->ceil_npages > npages_limit) { + decay->ceil_npages = npages_limit; + } } - - /* Insert the run into the runs_avail tree. */ - arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias, - false, false); - - return (chunk); } static void -arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk) -{ - assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); - assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); - assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == - arena_maxclass); - assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == - arena_maxclass); - assert(arena_mapbits_dirty_get(chunk, map_bias) == - arena_mapbits_dirty_get(chunk, chunk_npages-1)); +arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64, + size_t current_npages) { + if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { + memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) * + sizeof(size_t)); + } else { + size_t nadvance_z = (size_t)nadvance_u64; - /* - * Remove run from the runs_avail tree, so that the arena does not use - * it. - */ - arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias, - false, false); + assert((uint64_t)nadvance_z == nadvance_u64); - if (arena->spare != NULL) { - arena_chunk_t *spare = arena->spare; + memmove(decay->backlog, &decay->backlog[nadvance_z], + (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); + if (nadvance_z > 1) { + memset(&decay->backlog[SMOOTHSTEP_NSTEPS - + nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); + } + } - arena->spare = chunk; - malloc_mutex_unlock(&arena->lock); - chunk_dealloc((void *)spare, chunksize, true); - malloc_mutex_lock(&arena->lock); - if (config_stats) - arena->stats.mapped -= chunksize; - } else - arena->spare = chunk; + arena_decay_backlog_update_last(decay, current_npages); } -static arena_run_t * -arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) -{ - arena_run_t *run; - arena_chunk_map_t *mapelm, key; - - key.bits = size | CHUNK_MAP_KEY; - mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key); - if (mapelm != NULL) { - arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); - size_t pageind = (((uintptr_t)mapelm - - (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) - + map_bias; - - run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << - LG_PAGE)); - arena_run_split_large(arena, run, size, zero); - return (run); +static void +arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, size_t current_npages, size_t npages_limit, + bool is_background_thread) { + if (current_npages > npages_limit) { + arena_decay_to_limit(tsdn, arena, decay, extents, false, + npages_limit, is_background_thread); } - - return (NULL); } -static arena_run_t * -arena_run_alloc_large(arena_t *arena, size_t size, bool zero) -{ - arena_chunk_t *chunk; - arena_run_t *run; +static void +arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time, + size_t current_npages) { + assert(arena_decay_deadline_reached(decay, time)); - assert(size <= arena_maxclass); - assert((size & PAGE_MASK) == 0); + nstime_t delta; + nstime_copy(&delta, time); + nstime_subtract(&delta, &decay->epoch); - /* Search the arena's chunks for the lowest best fit. */ - run = arena_run_alloc_large_helper(arena, size, zero); - if (run != NULL) - return (run); + uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval); + assert(nadvance_u64 > 0); - /* - * No usable runs. Create a new chunk from which to allocate the run. - */ - chunk = arena_chunk_alloc(arena); - if (chunk != NULL) { - run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE)); - arena_run_split_large(arena, run, size, zero); - return (run); - } + /* Add nadvance_u64 decay intervals to epoch. */ + nstime_copy(&delta, &decay->interval); + nstime_imultiply(&delta, nadvance_u64); + nstime_add(&decay->epoch, &delta); - /* - * arena_chunk_alloc() failed, but another thread may have made - * sufficient memory available while this one dropped arena->lock in - * arena_chunk_alloc(), so search one more time. - */ - return (arena_run_alloc_large_helper(arena, size, zero)); + /* Set a new deadline. */ + arena_decay_deadline_init(decay); + + /* Update the backlog. */ + arena_decay_backlog_update(decay, nadvance_u64, current_npages); } -static arena_run_t * -arena_run_alloc_small_helper(arena_t *arena, size_t size, size_t binind) -{ - arena_run_t *run; - arena_chunk_map_t *mapelm, key; +static void +arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, const nstime_t *time, bool is_background_thread) { + size_t current_npages = extents_npages_get(extents); + arena_decay_epoch_advance_helper(decay, time, current_npages); - key.bits = size | CHUNK_MAP_KEY; - mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key); - if (mapelm != NULL) { - arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); - size_t pageind = (((uintptr_t)mapelm - - (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) - + map_bias; + size_t npages_limit = arena_decay_backlog_npages_limit(decay); + /* We may unlock decay->mtx when try_purge(). Finish logging first. */ + decay->nunpurged = (npages_limit > current_npages) ? npages_limit : + current_npages; - run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << - LG_PAGE)); - arena_run_split_small(arena, run, size, binind); - return (run); + if (!background_thread_enabled() || is_background_thread) { + arena_decay_try_purge(tsdn, arena, decay, extents, + current_npages, npages_limit, is_background_thread); } - - return (NULL); } -static arena_run_t * -arena_run_alloc_small(arena_t *arena, size_t size, size_t binind) -{ - arena_chunk_t *chunk; - arena_run_t *run; - - assert(size <= arena_maxclass); - assert((size & PAGE_MASK) == 0); - assert(binind != BININD_INVALID); +static void +arena_decay_reinit(arena_decay_t *decay, extents_t *extents, ssize_t decay_ms) { + arena_decay_ms_write(decay, decay_ms); + if (decay_ms > 0) { + nstime_init(&decay->interval, (uint64_t)decay_ms * + KQU(1000000)); + nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS); + } - /* Search the arena's chunks for the lowest best fit. */ - run = arena_run_alloc_small_helper(arena, size, binind); - if (run != NULL) - return (run); + nstime_init(&decay->epoch, 0); + nstime_update(&decay->epoch); + decay->jitter_state = (uint64_t)(uintptr_t)decay; + arena_decay_deadline_init(decay); + decay->nunpurged = 0; + memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); +} - /* - * No usable runs. Create a new chunk from which to allocate the run. - */ - chunk = arena_chunk_alloc(arena); - if (chunk != NULL) { - run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE)); - arena_run_split_small(arena, run, size, binind); - return (run); +static bool +arena_decay_init(arena_decay_t *decay, extents_t *extents, ssize_t decay_ms, + decay_stats_t *stats) { + if (config_debug) { + for (size_t i = 0; i < sizeof(arena_decay_t); i++) { + assert(((char *)decay)[i] == 0); + } + decay->ceil_npages = 0; + } + if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY, + malloc_mutex_rank_exclusive)) { + return true; + } + decay->purging = false; + arena_decay_reinit(decay, extents, decay_ms); + /* Memory is zeroed, so there is no need to clear stats. */ + if (config_stats) { + decay->stats = stats; } + return false; +} - /* - * arena_chunk_alloc() failed, but another thread may have made - * sufficient memory available while this one dropped arena->lock in - * arena_chunk_alloc(), so search one more time. - */ - return (arena_run_alloc_small_helper(arena, size, binind)); +static bool +arena_decay_ms_valid(ssize_t decay_ms) { + if (decay_ms < -1) { + return false; + } + if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX * + KQU(1000)) { + return true; + } + return false; } -static inline void -arena_maybe_purge(arena_t *arena) -{ - size_t npurgeable, threshold; +static bool +arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, bool is_background_thread) { + malloc_mutex_assert_owner(tsdn, &decay->mtx); + + /* Purge all or nothing if the option is disabled. */ + ssize_t decay_ms = arena_decay_ms_read(decay); + if (decay_ms <= 0) { + if (decay_ms == 0) { + arena_decay_to_limit(tsdn, arena, decay, extents, false, + 0, is_background_thread); + } + return false; + } + + nstime_t time; + nstime_init(&time, 0); + nstime_update(&time); + if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time) + > 0)) { + /* + * Time went backwards. Move the epoch back in time and + * generate a new deadline, with the expectation that time + * typically flows forward for long enough periods of time that + * epochs complete. Unfortunately, this strategy is susceptible + * to clock jitter triggering premature epoch advances, but + * clock jitter estimation and compensation isn't feasible here + * because calls into this code are event-driven. + */ + nstime_copy(&decay->epoch, &time); + arena_decay_deadline_init(decay); + } else { + /* Verify that time does not go backwards. */ + assert(nstime_compare(&decay->epoch, &time) <= 0); + } - /* Don't purge if the option is disabled. */ - if (opt_lg_dirty_mult < 0) - return; - /* Don't purge if all dirty pages are already being purged. */ - if (arena->ndirty <= arena->npurgatory) - return; - npurgeable = arena->ndirty - arena->npurgatory; - threshold = (arena->nactive >> opt_lg_dirty_mult); /* - * Don't purge unless the number of purgeable pages exceeds the - * threshold. + * If the deadline has been reached, advance to the current epoch and + * purge to the new limit if necessary. Note that dirty pages created + * during the current epoch are not subject to purge until a future + * epoch, so as a result purging only happens during epoch advances, or + * being triggered by background threads (scheduled event). */ - if (npurgeable <= threshold) - return; + bool advance_epoch = arena_decay_deadline_reached(decay, &time); + if (advance_epoch) { + arena_decay_epoch_advance(tsdn, arena, decay, extents, &time, + is_background_thread); + } else if (is_background_thread) { + arena_decay_try_purge(tsdn, arena, decay, extents, + extents_npages_get(extents), + arena_decay_backlog_npages_limit(decay), + is_background_thread); + } - arena_purge(arena, false); + return advance_epoch; } -static arena_chunk_t * -chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg) -{ - size_t *ndirty = (size_t *)arg; +static ssize_t +arena_decay_ms_get(arena_decay_t *decay) { + return arena_decay_ms_read(decay); +} - assert(chunk->ndirty != 0); - *ndirty += chunk->ndirty; - return (NULL); +ssize_t +arena_dirty_decay_ms_get(arena_t *arena) { + return arena_decay_ms_get(&arena->decay_dirty); } -static size_t -arena_compute_npurgatory(arena_t *arena, bool all) -{ - size_t npurgatory, npurgeable; +ssize_t +arena_muzzy_decay_ms_get(arena_t *arena) { + return arena_decay_ms_get(&arena->decay_muzzy); +} +static bool +arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, ssize_t decay_ms) { + if (!arena_decay_ms_valid(decay_ms)) { + return true; + } + + malloc_mutex_lock(tsdn, &decay->mtx); /* - * Compute the minimum number of pages that this thread should try to - * purge. + * Restart decay backlog from scratch, which may cause many dirty pages + * to be immediately purged. It would conceptually be possible to map + * the old backlog onto the new backlog, but there is no justification + * for such complexity since decay_ms changes are intended to be + * infrequent, either between the {-1, 0, >0} states, or a one-time + * arbitrary change during initial arena configuration. */ - npurgeable = arena->ndirty - arena->npurgatory; + arena_decay_reinit(decay, extents, decay_ms); + arena_maybe_decay(tsdn, arena, decay, extents, false); + malloc_mutex_unlock(tsdn, &decay->mtx); - if (all == false) { - size_t threshold = (arena->nactive >> opt_lg_dirty_mult); + return false; +} - npurgatory = npurgeable - threshold; - } else - npurgatory = npurgeable; +bool +arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, + ssize_t decay_ms) { + return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty, + &arena->extents_dirty, decay_ms); +} - return (npurgatory); +bool +arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, + ssize_t decay_ms) { + return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy, + &arena->extents_muzzy, decay_ms); } -static void -arena_chunk_stash_dirty(arena_t *arena, arena_chunk_t *chunk, bool all, - arena_chunk_mapelms_t *mapelms) -{ - size_t pageind, npages; +static size_t +arena_stash_decayed(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit, + extent_list_t *decay_extents) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); - /* - * Temporarily allocate free dirty runs within chunk. If all is false, - * only operate on dirty runs that are fragments; otherwise operate on - * all dirty runs. - */ - for (pageind = map_bias; pageind < chunk_npages; pageind += npages) { - arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); - if (arena_mapbits_allocated_get(chunk, pageind) == 0) { - size_t run_size = - arena_mapbits_unallocated_size_get(chunk, pageind); - - npages = run_size >> LG_PAGE; - assert(pageind + npages <= chunk_npages); - assert(arena_mapbits_dirty_get(chunk, pageind) == - arena_mapbits_dirty_get(chunk, pageind+npages-1)); - - if (arena_mapbits_dirty_get(chunk, pageind) != 0 && - (all || arena_avail_adjac(chunk, pageind, - npages))) { - arena_run_t *run = (arena_run_t *)((uintptr_t) - chunk + (uintptr_t)(pageind << LG_PAGE)); - - arena_run_split_large(arena, run, run_size, - false); - /* Append to list for later processing. */ - ql_elm_new(mapelm, u.ql_link); - ql_tail_insert(mapelms, mapelm, u.ql_link); - } - } else { - /* Skip run. */ - if (arena_mapbits_large_get(chunk, pageind) != 0) { - npages = arena_mapbits_large_size_get(chunk, - pageind) >> LG_PAGE; - } else { - size_t binind; - arena_bin_info_t *bin_info; - arena_run_t *run = (arena_run_t *)((uintptr_t) - chunk + (uintptr_t)(pageind << LG_PAGE)); - - assert(arena_mapbits_small_runind_get(chunk, - pageind) == 0); - binind = arena_bin_index(arena, run->bin); - bin_info = &arena_bin_info[binind]; - npages = bin_info->run_size >> LG_PAGE; - } - } + /* Stash extents according to npages_limit. */ + size_t nstashed = 0; + extent_t *extent; + while ((extent = extents_evict(tsdn, arena, r_extent_hooks, extents, + npages_limit)) != NULL) { + extent_list_append(decay_extents, extent); + nstashed += extent_size_get(extent) >> LG_PAGE; } - assert(pageind == chunk_npages); - assert(chunk->ndirty == 0 || all == false); - assert(chunk->nruns_adjac == 0); + return nstashed; } static size_t -arena_chunk_purge_stashed(arena_t *arena, arena_chunk_t *chunk, - arena_chunk_mapelms_t *mapelms) -{ - size_t npurged, pageind, npages, nmadvise; - arena_chunk_map_t *mapelm; - - malloc_mutex_unlock(&arena->lock); - if (config_stats) +arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents, + bool all, extent_list_t *decay_extents, bool is_background_thread) { + UNUSED size_t nmadvise, nunmapped; + size_t npurged; + + if (config_stats) { nmadvise = 0; + nunmapped = 0; + } npurged = 0; - ql_foreach(mapelm, mapelms, u.ql_link) { - bool unzeroed; - size_t flag_unzeroed, i; - - pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / - sizeof(arena_chunk_map_t)) + map_bias; - npages = arena_mapbits_large_size_get(chunk, pageind) >> - LG_PAGE; - assert(pageind + npages <= chunk_npages); - unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind << - LG_PAGE)), (npages << LG_PAGE)); - flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0; - /* - * Set the unzeroed flag for all pages, now that pages_purge() - * has returned whether the pages were zeroed as a side effect - * of purging. This chunk map modification is safe even though - * the arena mutex isn't currently owned by this thread, - * because the run is marked as allocated, thus protecting it - * from being modified by any other thread. As long as these - * writes don't perturb the first and last elements' - * CHUNK_MAP_ALLOCATED bits, behavior is well defined. - */ - for (i = 0; i < npages; i++) { - arena_mapbits_unzeroed_set(chunk, pageind+i, - flag_unzeroed); + + ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); + for (extent_t *extent = extent_list_first(decay_extents); extent != + NULL; extent = extent_list_first(decay_extents)) { + if (config_stats) { + nmadvise++; } + size_t npages = extent_size_get(extent) >> LG_PAGE; npurged += npages; - if (config_stats) - nmadvise++; + extent_list_remove(decay_extents, extent); + switch (extents_state_get(extents)) { + case extent_state_active: + not_reached(); + case extent_state_dirty: + if (!all && muzzy_decay_ms != 0 && + !extent_purge_lazy_wrapper(tsdn, arena, + r_extent_hooks, extent, 0, + extent_size_get(extent))) { + extents_dalloc(tsdn, arena, r_extent_hooks, + &arena->extents_muzzy, extent); + arena_background_thread_inactivity_check(tsdn, + arena, is_background_thread); + break; + } + /* Fall through. */ + case extent_state_muzzy: + extent_dalloc_wrapper(tsdn, arena, r_extent_hooks, + extent); + if (config_stats) { + nunmapped += npages; + } + break; + case extent_state_retained: + default: + not_reached(); + } } - malloc_mutex_lock(&arena->lock); - if (config_stats) - arena->stats.nmadvise += nmadvise; - return (npurged); + if (config_stats) { + arena_stats_lock(tsdn, &arena->stats); + arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge, + 1); + arena_stats_add_u64(tsdn, &arena->stats, + &decay->stats->nmadvise, nmadvise); + arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged, + npurged); + arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped, + nunmapped << LG_PAGE); + arena_stats_unlock(tsdn, &arena->stats); + } + + return npurged; } +/* + * npages_limit: Decay as many dirty extents as possible without violating the + * invariant: (extents_npages_get(extents) >= npages_limit) + */ static void -arena_chunk_unstash_purged(arena_t *arena, arena_chunk_t *chunk, - arena_chunk_mapelms_t *mapelms) -{ - arena_chunk_map_t *mapelm; - size_t pageind; - - /* Deallocate runs. */ - for (mapelm = ql_first(mapelms); mapelm != NULL; - mapelm = ql_first(mapelms)) { - arena_run_t *run; - - pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / - sizeof(arena_chunk_map_t)) + map_bias; - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind << - LG_PAGE)); - ql_remove(mapelms, mapelm, u.ql_link); - arena_run_dalloc(arena, run, false, true); +arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, bool all, size_t npages_limit, + bool is_background_thread) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 1); + malloc_mutex_assert_owner(tsdn, &decay->mtx); + + if (decay->purging) { + return; } -} + decay->purging = true; + malloc_mutex_unlock(tsdn, &decay->mtx); -static inline size_t -arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all) -{ - size_t npurged; - arena_chunk_mapelms_t mapelms; + extent_hooks_t *extent_hooks = extent_hooks_get(arena); - ql_new(&mapelms); + extent_list_t decay_extents; + extent_list_init(&decay_extents); - /* - * If chunk is the spare, temporarily re-allocate it, 1) so that its - * run is reinserted into runs_avail, and 2) so that it cannot be - * completely discarded by another thread while arena->lock is dropped - * by this thread. Note that the arena_run_dalloc() call will - * implicitly deallocate the chunk, so no explicit action is required - * in this function to deallocate the chunk. - * - * Note that once a chunk contains dirty pages, it cannot again contain - * a single run unless 1) it is a dirty run, or 2) this function purges - * dirty pages and causes the transition to a single clean run. Thus - * (chunk == arena->spare) is possible, but it is not possible for - * this function to be called on the spare unless it contains a dirty - * run. - */ - if (chunk == arena->spare) { - assert(arena_mapbits_dirty_get(chunk, map_bias) != 0); - assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0); - - arena_chunk_alloc(arena); + size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents, + npages_limit, &decay_extents); + if (npurge != 0) { + UNUSED size_t npurged = arena_decay_stashed(tsdn, arena, + &extent_hooks, decay, extents, all, &decay_extents, + is_background_thread); + assert(npurged == npurge); } - if (config_stats) - arena->stats.purged += chunk->ndirty; - - /* - * Operate on all dirty runs if there is no clean/dirty run - * fragmentation. - */ - if (chunk->nruns_adjac == 0) - all = true; - - arena_chunk_stash_dirty(arena, chunk, all, &mapelms); - npurged = arena_chunk_purge_stashed(arena, chunk, &mapelms); - arena_chunk_unstash_purged(arena, chunk, &mapelms); - - return (npurged); + malloc_mutex_lock(tsdn, &decay->mtx); + decay->purging = false; } -static void -arena_purge(arena_t *arena, bool all) -{ - arena_chunk_t *chunk; - size_t npurgatory; - if (config_debug) { - size_t ndirty = 0; +static bool +arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, bool is_background_thread, bool all) { + if (all) { + malloc_mutex_lock(tsdn, &decay->mtx); + arena_decay_to_limit(tsdn, arena, decay, extents, all, 0, + is_background_thread); + malloc_mutex_unlock(tsdn, &decay->mtx); - arena_chunk_dirty_iter(&arena->chunks_dirty, NULL, - chunks_dirty_iter_cb, (void *)&ndirty); - assert(ndirty == arena->ndirty); + return false; } - assert(arena->ndirty > arena->npurgatory || all); - assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty - - arena->npurgatory) || all); - if (config_stats) - arena->stats.npurge++; + if (malloc_mutex_trylock(tsdn, &decay->mtx)) { + /* No need to wait if another thread is in progress. */ + return true; + } - /* - * Add the minimum number of pages this thread should try to purge to - * arena->npurgatory. This will keep multiple threads from racing to - * reduce ndirty below the threshold. - */ - npurgatory = arena_compute_npurgatory(arena, all); - arena->npurgatory += npurgatory; + bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents, + is_background_thread); + size_t npages_new; + if (epoch_advanced) { + /* Backlog is updated on epoch advance. */ + npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1]; + } + malloc_mutex_unlock(tsdn, &decay->mtx); - while (npurgatory > 0) { - size_t npurgeable, npurged, nunpurged; + if (have_background_thread && background_thread_enabled() && + epoch_advanced && !is_background_thread) { + background_thread_interval_check(tsdn, arena, decay, npages_new); + } - /* Get next chunk with dirty pages. */ - chunk = arena_chunk_dirty_first(&arena->chunks_dirty); - if (chunk == NULL) { - /* - * This thread was unable to purge as many pages as - * originally intended, due to races with other threads - * that either did some of the purging work, or re-used - * dirty pages. - */ - arena->npurgatory -= npurgatory; - return; - } - npurgeable = chunk->ndirty; - assert(npurgeable != 0); + return false; +} - if (npurgeable > npurgatory && chunk->nruns_adjac == 0) { - /* - * This thread will purge all the dirty pages in chunk, - * so set npurgatory to reflect this thread's intent to - * purge the pages. This tends to reduce the chances - * of the following scenario: - * - * 1) This thread sets arena->npurgatory such that - * (arena->ndirty - arena->npurgatory) is at the - * threshold. - * 2) This thread drops arena->lock. - * 3) Another thread causes one or more pages to be - * dirtied, and immediately determines that it must - * purge dirty pages. - * - * If this scenario *does* play out, that's okay, - * because all of the purging work being done really - * needs to happen. - */ - arena->npurgatory += npurgeable - npurgatory; - npurgatory = npurgeable; - } +static bool +arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, + bool all) { + return arena_decay_impl(tsdn, arena, &arena->decay_dirty, + &arena->extents_dirty, is_background_thread, all); +} - /* - * Keep track of how many pages are purgeable, versus how many - * actually get purged, and adjust counters accordingly. - */ - arena->npurgatory -= npurgeable; - npurgatory -= npurgeable; - npurged = arena_chunk_purge(arena, chunk, all); - nunpurged = npurgeable - npurged; - arena->npurgatory += nunpurged; - npurgatory += nunpurged; - } +static bool +arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, + bool all) { + return arena_decay_impl(tsdn, arena, &arena->decay_muzzy, + &arena->extents_muzzy, is_background_thread, all); } void -arena_purge_all(arena_t *arena) -{ - - malloc_mutex_lock(&arena->lock); - arena_purge(arena, true); - malloc_mutex_unlock(&arena->lock); +arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { + if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) { + return; + } + arena_decay_muzzy(tsdn, arena, is_background_thread, all); } static void -arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, - size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty) -{ - size_t size = *p_size; - size_t run_ind = *p_run_ind; - size_t run_pages = *p_run_pages; - - /* Try to coalesce forward. */ - if (run_ind + run_pages < chunk_npages && - arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 && - arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) { - size_t nrun_size = arena_mapbits_unallocated_size_get(chunk, - run_ind+run_pages); - size_t nrun_pages = nrun_size >> LG_PAGE; - - /* - * Remove successor from runs_avail; the coalesced run is - * inserted later. - */ - assert(arena_mapbits_unallocated_size_get(chunk, - run_ind+run_pages+nrun_pages-1) == nrun_size); - assert(arena_mapbits_dirty_get(chunk, - run_ind+run_pages+nrun_pages-1) == flag_dirty); - arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages, - false, true); +arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) { + arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE); - size += nrun_size; - run_pages += nrun_pages; - - arena_mapbits_unallocated_size_set(chunk, run_ind, size); - arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, - size); - } - - /* Try to coalesce backward. */ - if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, - run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == - flag_dirty) { - size_t prun_size = arena_mapbits_unallocated_size_get(chunk, - run_ind-1); - size_t prun_pages = prun_size >> LG_PAGE; - - run_ind -= prun_pages; + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; + arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab); +} - /* - * Remove predecessor from runs_avail; the coalesced run is - * inserted later. - */ - assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == - prun_size); - assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty); - arena_avail_remove(arena, chunk, run_ind, prun_pages, true, - false); +static void +arena_bin_slabs_nonfull_insert(arena_bin_t *bin, extent_t *slab) { + assert(extent_nfree_get(slab) > 0); + extent_heap_insert(&bin->slabs_nonfull, slab); +} - size += prun_size; - run_pages += prun_pages; +static void +arena_bin_slabs_nonfull_remove(arena_bin_t *bin, extent_t *slab) { + extent_heap_remove(&bin->slabs_nonfull, slab); +} - arena_mapbits_unallocated_size_set(chunk, run_ind, size); - arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, - size); +static extent_t * +arena_bin_slabs_nonfull_tryget(arena_bin_t *bin) { + extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull); + if (slab == NULL) { + return NULL; } - - *p_size = size; - *p_run_ind = run_ind; - *p_run_pages = run_pages; + if (config_stats) { + bin->stats.reslabs++; + } + return slab; } static void -arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned) -{ - arena_chunk_t *chunk; - size_t size, run_ind, run_pages, flag_dirty; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); - assert(run_ind >= map_bias); - assert(run_ind < chunk_npages); - if (arena_mapbits_large_get(chunk, run_ind) != 0) { - size = arena_mapbits_large_size_get(chunk, run_ind); - assert(size == PAGE || - arena_mapbits_large_size_get(chunk, - run_ind+(size>>LG_PAGE)-1) == 0); - } else { - size_t binind = arena_bin_index(arena, run->bin); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; - size = bin_info->run_size; - } - run_pages = (size >> LG_PAGE); - arena_cactive_update(arena, 0, run_pages); - arena->nactive -= run_pages; - +arena_bin_slabs_full_insert(arena_t *arena, arena_bin_t *bin, extent_t *slab) { + assert(extent_nfree_get(slab) == 0); /* - * The run is dirty if the caller claims to have dirtied it, as well as - * if it was already dirty before being allocated and the caller - * doesn't claim to have cleaned it. + * Tracking extents is required by arena_reset, which is not allowed + * for auto arenas. Bypass this step to avoid touching the extent + * linkage (often results in cache misses) for auto arenas. */ - assert(arena_mapbits_dirty_get(chunk, run_ind) == - arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); - if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0) - dirty = true; - flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; - - /* Mark pages as unallocated in the chunk map. */ - if (dirty) { - arena_mapbits_unallocated_set(chunk, run_ind, size, - CHUNK_MAP_DIRTY); - arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, - CHUNK_MAP_DIRTY); - } else { - arena_mapbits_unallocated_set(chunk, run_ind, size, - arena_mapbits_unzeroed_get(chunk, run_ind)); - arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, - arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); + if (arena_is_auto(arena)) { + return; } + extent_list_append(&bin->slabs_full, slab); +} - arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, - flag_dirty); - - /* Insert into runs_avail, now that coalescing is complete. */ - assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == - arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); - assert(arena_mapbits_dirty_get(chunk, run_ind) == - arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); - arena_avail_insert(arena, chunk, run_ind, run_pages, true, true); - - /* Deallocate chunk if it is now completely unused. */ - if (size == arena_maxclass) { - assert(run_ind == map_bias); - assert(run_pages == (arena_maxclass >> LG_PAGE)); - arena_chunk_dealloc(arena, chunk); +static void +arena_bin_slabs_full_remove(arena_t *arena, arena_bin_t *bin, extent_t *slab) { + if (arena_is_auto(arena)) { + return; } + extent_list_remove(&bin->slabs_full, slab); +} +void +arena_reset(tsd_t *tsd, arena_t *arena) { /* - * It is okay to do dirty page processing here even if the chunk was - * deallocated above, since in that case it is the spare. Waiting - * until after possible chunk deallocation to do dirty processing - * allows for an old spare to be fully deallocated, thus decreasing the - * chances of spuriously crossing the dirty page purging threshold. + * Locking in this function is unintuitive. The caller guarantees that + * no concurrent operations are happening in this arena, but there are + * still reasons that some locking is necessary: + * + * - Some of the functions in the transitive closure of calls assume + * appropriate locks are held, and in some cases these locks are + * temporarily dropped to avoid lock order reversal or deadlock due to + * reentry. + * - mallctl("epoch", ...) may concurrently refresh stats. While + * strictly speaking this is a "concurrent operation", disallowing + * stats refreshes would impose an inconvenient burden. */ - if (dirty) - arena_maybe_purge(arena); -} -static void -arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - size_t oldsize, size_t newsize) -{ - size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; - size_t head_npages = (oldsize - newsize) >> LG_PAGE; - size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); + /* Large allocations. */ + malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); - assert(oldsize > newsize); + for (extent_t *extent = extent_list_first(&arena->large); extent != + NULL; extent = extent_list_first(&arena->large)) { + void *ptr = extent_base_get(extent); + size_t usize; - /* - * Update the chunk map so that arena_run_dalloc() can treat the - * leading run as separately allocated. Set the last element of each - * run first, in case of single-page runs. - */ - assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); - arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty); - arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty); + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != NSIZES); - if (config_debug) { - UNUSED size_t tail_npages = newsize >> LG_PAGE; - assert(arena_mapbits_large_size_get(chunk, - pageind+head_npages+tail_npages-1) == 0); - assert(arena_mapbits_dirty_get(chunk, - pageind+head_npages+tail_npages-1) == flag_dirty); + if (config_stats || (config_prof && opt_prof)) { + usize = sz_index2size(alloc_ctx.szind); + assert(usize == isalloc(tsd_tsdn(tsd), ptr)); + } + /* Remove large allocation from prof sample set. */ + if (config_prof && opt_prof) { + prof_free(tsd, ptr, usize, &alloc_ctx); + } + large_dalloc(tsd_tsdn(tsd), extent); + malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); + } + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); + + /* Bins. */ + for (unsigned i = 0; i < NBINS; i++) { + extent_t *slab; + arena_bin_t *bin = &arena->bins[i]; + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + if (bin->slabcur != NULL) { + slab = bin->slabcur; + bin->slabcur = NULL; + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + } + while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != + NULL) { + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + } + for (slab = extent_list_first(&bin->slabs_full); slab != NULL; + slab = extent_list_first(&bin->slabs_full)) { + arena_bin_slabs_full_remove(arena, bin, slab); + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + } + if (config_stats) { + bin->stats.curregs = 0; + bin->stats.curslabs = 0; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); } - arena_mapbits_large_set(chunk, pageind+head_npages, newsize, - flag_dirty); - arena_run_dalloc(arena, run, false, false); + atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); } static void -arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - size_t oldsize, size_t newsize, bool dirty) -{ - size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; - size_t head_npages = newsize >> LG_PAGE; - size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); - - assert(oldsize > newsize); - +arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) { /* - * Update the chunk map so that arena_run_dalloc() can treat the - * trailing run as separately allocated. Set the last element of each - * run first, in case of single-page runs. + * Iterate over the retained extents and destroy them. This gives the + * extent allocator underlying the extent hooks an opportunity to unmap + * all retained memory without having to keep its own metadata + * structures. In practice, virtual memory for dss-allocated extents is + * leaked here, so best practice is to avoid dss for arenas to be + * destroyed, or provide custom extent hooks that track retained + * dss-based extents for later reuse. */ - assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); - arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty); - arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty); - - if (config_debug) { - UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; - assert(arena_mapbits_large_size_get(chunk, - pageind+head_npages+tail_npages-1) == 0); - assert(arena_mapbits_dirty_get(chunk, - pageind+head_npages+tail_npages-1) == flag_dirty); + extent_hooks_t *extent_hooks = extent_hooks_get(arena); + extent_t *extent; + while ((extent = extents_evict(tsdn, arena, &extent_hooks, + &arena->extents_retained, 0)) != NULL) { + extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent); } - arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, - flag_dirty); - - arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize), - dirty, false); } -static arena_run_t * -arena_bin_runs_first(arena_bin_t *bin) -{ - arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs); - if (mapelm != NULL) { - arena_chunk_t *chunk; - size_t pageind; - arena_run_t *run; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm); - pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) / - sizeof(arena_chunk_map_t))) + map_bias; - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - - arena_mapbits_small_runind_get(chunk, pageind)) << - LG_PAGE)); - return (run); - } +void +arena_destroy(tsd_t *tsd, arena_t *arena) { + assert(base_ind_get(arena->base) >= narenas_auto); + assert(arena_nthreads_get(arena, false) == 0); + assert(arena_nthreads_get(arena, true) == 0); - return (NULL); -} + /* + * No allocations have occurred since arena_reset() was called. + * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached + * extents, so only retained extents may remain. + */ + assert(extents_npages_get(&arena->extents_dirty) == 0); + assert(extents_npages_get(&arena->extents_muzzy) == 0); -static void -arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) -{ - arena_chunk_t *chunk = CHUNK_ADDR2BASE(run); - size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); + /* Deallocate retained memory. */ + arena_destroy_retained(tsd_tsdn(tsd), arena); - assert(arena_run_tree_search(&bin->runs, mapelm) == NULL); + /* + * Remove the arena pointer from the arenas array. We rely on the fact + * that there is no way for the application to get a dirty read from the + * arenas array unless there is an inherent race in the application + * involving access of an arena being concurrently destroyed. The + * application must synchronize knowledge of the arena's validity, so as + * long as we use an atomic write to update the arenas array, the + * application will get a clean read any time after it synchronizes + * knowledge that the arena is no longer valid. + */ + arena_set(base_ind_get(arena->base), NULL); - arena_run_tree_insert(&bin->runs, mapelm); + /* + * Destroy the base allocator, which manages all metadata ever mapped by + * this arena. + */ + base_delete(tsd_tsdn(tsd), arena->base); } -static void -arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run) -{ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); +static extent_t * +arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, const arena_bin_info_t *bin_info, + szind_t szind) { + extent_t *slab; + bool zero, commit; - assert(arena_run_tree_search(&bin->runs, mapelm) != NULL); - - arena_run_tree_remove(&bin->runs, mapelm); -} + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); -static arena_run_t * -arena_bin_nonfull_run_tryget(arena_bin_t *bin) -{ - arena_run_t *run = arena_bin_runs_first(bin); - if (run != NULL) { - arena_bin_runs_remove(bin, run); - if (config_stats) - bin->stats.reruns++; + zero = false; + commit = true; + slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL, + bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit); + + if (config_stats && slab != NULL) { + arena_stats_mapped_add(tsdn, &arena->stats, + bin_info->slab_size); + } + + return slab; +} + +static extent_t * +arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, + const arena_bin_info_t *bin_info) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; + szind_t szind = sz_size2index(bin_info->reg_size); + bool zero = false; + bool commit = true; + extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true, + binind, &zero, &commit); + if (slab == NULL) { + slab = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE, + true, binind, &zero, &commit); + } + if (slab == NULL) { + slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks, + bin_info, szind); + if (slab == NULL) { + return NULL; + } } - return (run); + assert(extent_slab_get(slab)); + + /* Initialize slab internals. */ + arena_slab_data_t *slab_data = extent_slab_data_get(slab); + extent_nfree_set(slab, bin_info->nregs); + bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false); + + arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE); + + return slab; } -static arena_run_t * -arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) -{ - arena_run_t *run; - size_t binind; - arena_bin_info_t *bin_info; +static extent_t * +arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, + szind_t binind) { + extent_t *slab; + const arena_bin_info_t *bin_info; - /* Look for a usable run. */ - run = arena_bin_nonfull_run_tryget(bin); - if (run != NULL) - return (run); - /* No existing runs have any space available. */ + /* Look for a usable slab. */ + slab = arena_bin_slabs_nonfull_tryget(bin); + if (slab != NULL) { + return slab; + } + /* No existing slabs have any space available. */ - binind = arena_bin_index(arena, bin); bin_info = &arena_bin_info[binind]; - /* Allocate a new run. */ - malloc_mutex_unlock(&bin->lock); + /* Allocate a new slab. */ + malloc_mutex_unlock(tsdn, &bin->lock); /******************************/ - malloc_mutex_lock(&arena->lock); - run = arena_run_alloc_small(arena, bin_info->run_size, binind); - if (run != NULL) { - bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + - (uintptr_t)bin_info->bitmap_offset); - - /* Initialize run internals. */ - run->bin = bin; - run->nextind = 0; - run->nfree = bin_info->nregs; - bitmap_init(bitmap, &bin_info->bitmap_info); - } - malloc_mutex_unlock(&arena->lock); + slab = arena_slab_alloc(tsdn, arena, binind, bin_info); /********************************/ - malloc_mutex_lock(&bin->lock); - if (run != NULL) { + malloc_mutex_lock(tsdn, &bin->lock); + if (slab != NULL) { if (config_stats) { - bin->stats.nruns++; - bin->stats.curruns++; + bin->stats.nslabs++; + bin->stats.curslabs++; } - return (run); + return slab; } /* - * arena_run_alloc_small() failed, but another thread may have made + * arena_slab_alloc() failed, but another thread may have made * sufficient memory available while this one dropped bin->lock above, * so search one more time. */ - run = arena_bin_nonfull_run_tryget(bin); - if (run != NULL) - return (run); + slab = arena_bin_slabs_nonfull_tryget(bin); + if (slab != NULL) { + return slab; + } - return (NULL); + return NULL; } -/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ +/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */ static void * -arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) -{ - void *ret; - size_t binind; - arena_bin_info_t *bin_info; - arena_run_t *run; +arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, + szind_t binind) { + const arena_bin_info_t *bin_info; + extent_t *slab; - binind = arena_bin_index(arena, bin); bin_info = &arena_bin_info[binind]; - bin->runcur = NULL; - run = arena_bin_nonfull_run_get(arena, bin); - if (bin->runcur != NULL && bin->runcur->nfree > 0) { + if (!arena_is_auto(arena) && bin->slabcur != NULL) { + arena_bin_slabs_full_insert(arena, bin, bin->slabcur); + bin->slabcur = NULL; + } + slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind); + if (bin->slabcur != NULL) { /* - * Another thread updated runcur while this one ran without the - * bin lock in arena_bin_nonfull_run_get(). + * Another thread updated slabcur while this one ran without the + * bin lock in arena_bin_nonfull_slab_get(). */ - assert(bin->runcur->nfree > 0); - ret = arena_run_reg_alloc(bin->runcur, bin_info); - if (run != NULL) { - arena_chunk_t *chunk; - - /* - * arena_run_alloc_small() may have allocated run, or - * it may have pulled run from the bin's run tree. - * Therefore it is unsafe to make any assumptions about - * how run has previously been used, and - * arena_bin_lower_run() must be called, as if a region - * were just deallocated from the run. - */ - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - if (run->nfree == bin_info->nregs) - arena_dalloc_bin_run(arena, chunk, run, bin); - else - arena_bin_lower_run(arena, chunk, run, bin); + if (extent_nfree_get(bin->slabcur) > 0) { + void *ret = arena_slab_reg_alloc(tsdn, bin->slabcur, + bin_info); + if (slab != NULL) { + /* + * arena_slab_alloc() may have allocated slab, + * or it may have been pulled from + * slabs_nonfull. Therefore it is unsafe to + * make any assumptions about how slab has + * previously been used, and + * arena_bin_lower_slab() must be called, as if + * a region were just deallocated from the slab. + */ + if (extent_nfree_get(slab) == bin_info->nregs) { + arena_dalloc_bin_slab(tsdn, arena, slab, + bin); + } else { + arena_bin_lower_slab(tsdn, arena, slab, + bin); + } + } + return ret; } - return (ret); - } - if (run == NULL) - return (NULL); + arena_bin_slabs_full_insert(arena, bin, bin->slabcur); + bin->slabcur = NULL; + } - bin->runcur = run; + if (slab == NULL) { + return NULL; + } + bin->slabcur = slab; - assert(bin->runcur->nfree > 0); + assert(extent_nfree_get(bin->slabcur) > 0); - return (arena_run_reg_alloc(bin->runcur, bin_info)); + return arena_slab_reg_alloc(tsdn, slab, bin_info); } void -arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind, - uint64_t prof_accumbytes) -{ +arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, + tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) { unsigned i, nfill; arena_bin_t *bin; - arena_run_t *run; - void *ptr; assert(tbin->ncached == 0); - if (config_prof && arena_prof_accum(arena, prof_accumbytes)) - prof_idump(); + if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) { + prof_idump(tsdn); + } bin = &arena->bins[binind]; - malloc_mutex_lock(&bin->lock); + malloc_mutex_lock(tsdn, &bin->lock); for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> - tbin->lg_fill_div); i < nfill; i++) { - if ((run = bin->runcur) != NULL && run->nfree > 0) - ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); - else - ptr = arena_bin_malloc_hard(arena, bin); - if (ptr == NULL) + tcache->lg_fill_div[binind]); i < nfill; i++) { + extent_t *slab; + void *ptr; + if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > + 0) { + ptr = arena_slab_reg_alloc(tsdn, slab, + &arena_bin_info[binind]); + } else { + ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind); + } + if (ptr == NULL) { + /* + * OOM. tbin->avail isn't yet filled down to its first + * element, so the successful allocations (if any) must + * be moved just before tbin->avail before bailing out. + */ + if (i > 0) { + memmove(tbin->avail - i, tbin->avail - nfill, + i * sizeof(void *)); + } break; - if (config_fill && opt_junk) { + } + if (config_fill && unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ptr, &arena_bin_info[binind], true); } /* Insert such that low regions get used first. */ - tbin->avail[nfill - 1 - i] = ptr; + *(tbin->avail - nfill + i) = ptr; } if (config_stats) { - bin->stats.allocated += i * arena_bin_info[binind].reg_size; bin->stats.nmalloc += i; bin->stats.nrequests += tbin->tstats.nrequests; + bin->stats.curregs += i; bin->stats.nfills++; tbin->tstats.nrequests = 0; } - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); tbin->ncached = i; + arena_decay_tick(tsdn, arena); } void -arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) -{ - - if (zero) { - size_t redzone_size = bin_info->redzone_size; - memset((void *)((uintptr_t)ptr - redzone_size), 0xa5, - redzone_size); - memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5, - redzone_size); - } else { - memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5, - bin_info->reg_interval); +arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info, bool zero) { + if (!zero) { + memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size); } } -#ifdef JEMALLOC_JET -#undef arena_redzone_corruption -#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl) -#endif -static void -arena_redzone_corruption(void *ptr, size_t usize, bool after, - size_t offset, uint8_t byte) -{ - - malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p " - "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s", - after ? "after" : "before", ptr, usize, byte); -} -#ifdef JEMALLOC_JET -#undef arena_redzone_corruption -#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) -arena_redzone_corruption_t *arena_redzone_corruption = - JEMALLOC_N(arena_redzone_corruption_impl); -#endif - static void -arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) -{ - size_t size = bin_info->reg_size; - size_t redzone_size = bin_info->redzone_size; - size_t i; - bool error = false; - - for (i = 1; i <= redzone_size; i++) { - uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); - if (*byte != 0xa5) { - error = true; - arena_redzone_corruption(ptr, size, false, i, *byte); - if (reset) - *byte = 0xa5; - } - } - for (i = 0; i < redzone_size; i++) { - uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); - if (*byte != 0xa5) { - error = true; - arena_redzone_corruption(ptr, size, true, i, *byte); - if (reset) - *byte = 0xa5; - } - } - if (opt_abort && error) - abort(); +arena_dalloc_junk_small_impl(void *ptr, const arena_bin_info_t *bin_info) { + memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size); } +arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small = + arena_dalloc_junk_small_impl; -#ifdef JEMALLOC_JET -#undef arena_dalloc_junk_small -#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl) -#endif -void -arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) -{ - size_t redzone_size = bin_info->redzone_size; - - arena_redzones_validate(ptr, bin_info, false); - memset((void *)((uintptr_t)ptr - redzone_size), 0x5a, - bin_info->reg_interval); -} -#ifdef JEMALLOC_JET -#undef arena_dalloc_junk_small -#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) -arena_dalloc_junk_small_t *arena_dalloc_junk_small = - JEMALLOC_N(arena_dalloc_junk_small_impl); -#endif - -void -arena_quarantine_junk_small(void *ptr, size_t usize) -{ - size_t binind; - arena_bin_info_t *bin_info; - cassert(config_fill); - assert(opt_junk); - assert(opt_quarantine); - assert(usize <= SMALL_MAXCLASS); - - binind = SMALL_SIZE2BIN(usize); - bin_info = &arena_bin_info[binind]; - arena_redzones_validate(ptr, bin_info, true); -} - -void * -arena_malloc_small(arena_t *arena, size_t size, bool zero) -{ +static void * +arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { void *ret; arena_bin_t *bin; - arena_run_t *run; - size_t binind; + size_t usize; + extent_t *slab; - binind = SMALL_SIZE2BIN(size); assert(binind < NBINS); bin = &arena->bins[binind]; - size = arena_bin_info[binind].reg_size; + usize = sz_index2size(binind); - malloc_mutex_lock(&bin->lock); - if ((run = bin->runcur) != NULL && run->nfree > 0) - ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); - else - ret = arena_bin_malloc_hard(arena, bin); + malloc_mutex_lock(tsdn, &bin->lock); + if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) { + ret = arena_slab_reg_alloc(tsdn, slab, &arena_bin_info[binind]); + } else { + ret = arena_bin_malloc_hard(tsdn, arena, bin, binind); + } if (ret == NULL) { - malloc_mutex_unlock(&bin->lock); - return (NULL); + malloc_mutex_unlock(tsdn, &bin->lock); + return NULL; } if (config_stats) { - bin->stats.allocated += size; bin->stats.nmalloc++; bin->stats.nrequests++; + bin->stats.curregs++; + } + malloc_mutex_unlock(tsdn, &bin->lock); + if (config_prof && arena_prof_accum(tsdn, arena, usize)) { + prof_idump(tsdn); } - malloc_mutex_unlock(&bin->lock); - if (config_prof && isthreaded == false && arena_prof_accum(arena, size)) - prof_idump(); - if (zero == false) { + if (!zero) { if (config_fill) { - if (opt_junk) { + if (unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &arena_bin_info[binind], false); - } else if (opt_zero) - memset(ret, 0, size); + } else if (unlikely(opt_zero)) { + memset(ret, 0, usize); + } } - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); } else { - if (config_fill && opt_junk) { + if (config_fill && unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &arena_bin_info[binind], true); } - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - memset(ret, 0, size); + memset(ret, 0, usize); } - return (ret); + arena_decay_tick(tsdn, arena); + return ret; } void * -arena_malloc_large(arena_t *arena, size_t size, bool zero) -{ - void *ret; - UNUSED bool idump; +arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, + bool zero) { + assert(!tsdn_null(tsdn) || arena != NULL); - /* Large allocation. */ - size = PAGE_CEILING(size); - malloc_mutex_lock(&arena->lock); - ret = (void *)arena_run_alloc_large(arena, size, zero); - if (ret == NULL) { - malloc_mutex_unlock(&arena->lock); - return (NULL); + if (likely(!tsdn_null(tsdn))) { + arena = arena_choose(tsdn_tsd(tsdn), arena); } - if (config_stats) { - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; - arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; - arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; - } - if (config_prof) - idump = arena_prof_accum_locked(arena, size); - malloc_mutex_unlock(&arena->lock); - if (config_prof && idump) - prof_idump(); - - if (zero == false) { - if (config_fill) { - if (opt_junk) - memset(ret, 0xa5, size); - else if (opt_zero) - memset(ret, 0, size); - } + if (unlikely(arena == NULL)) { + return NULL; } - return (ret); + if (likely(size <= SMALL_MAXCLASS)) { + return arena_malloc_small(tsdn, arena, ind, zero); + } + return large_malloc(tsdn, arena, sz_index2size(ind), zero); } -/* Only handles large allocations that require more than page alignment. */ void * -arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero) -{ +arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, + bool zero, tcache_t *tcache) { void *ret; - size_t alloc_size, leadsize, trailsize; - arena_run_t *run; - arena_chunk_t *chunk; - assert((size & PAGE_MASK) == 0); + if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE + && (usize & PAGE_MASK) == 0))) { + /* Small; alignment doesn't require special slab placement. */ + ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize), + zero, tcache, true); + } else { + if (likely(alignment <= CACHELINE)) { + ret = large_malloc(tsdn, arena, usize, zero); + } else { + ret = large_palloc(tsdn, arena, usize, alignment, zero); + } + } + return ret; +} - alignment = PAGE_CEILING(alignment); - alloc_size = size + alignment - PAGE; +void +arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) { + cassert(config_prof); + assert(ptr != NULL); + assert(isalloc(tsdn, ptr) == LARGE_MINCLASS); + assert(usize <= SMALL_MAXCLASS); - malloc_mutex_lock(&arena->lock); - run = arena_run_alloc_large(arena, alloc_size, false); - if (run == NULL) { - malloc_mutex_unlock(&arena->lock); - return (NULL); - } - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) - - (uintptr_t)run; - assert(alloc_size >= leadsize + size); - trailsize = alloc_size - leadsize - size; - ret = (void *)((uintptr_t)run + leadsize); - if (leadsize != 0) { - arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size - - leadsize); - } - if (trailsize != 0) { - arena_run_trim_tail(arena, chunk, ret, size + trailsize, size, - false); - } - arena_run_init_large(arena, (arena_run_t *)ret, size, zero); + extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true); + arena_t *arena = extent_arena_get(extent); - if (config_stats) { - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; - arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; - arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; - } - malloc_mutex_unlock(&arena->lock); + szind_t szind = sz_size2index(usize); + extent_szind_set(extent, szind); + rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, + szind, false); - if (config_fill && zero == false) { - if (opt_junk) - memset(ret, 0xa5, size); - else if (opt_zero) - memset(ret, 0, size); - } - return (ret); -} + prof_accum_cancel(tsdn, &arena->prof_accum, usize); -void -arena_prof_promoted(const void *ptr, size_t size) -{ - arena_chunk_t *chunk; - size_t pageind, binind; + assert(isalloc(tsdn, ptr) == usize); +} +static size_t +arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) { cassert(config_prof); assert(ptr != NULL); - assert(CHUNK_ADDR2BASE(ptr) != ptr); - assert(isalloc(ptr, false) == PAGE); - assert(isalloc(ptr, true) == PAGE); - assert(size <= SMALL_MAXCLASS); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - binind = SMALL_SIZE2BIN(size); - assert(binind < NBINS); - arena_mapbits_large_binind_set(chunk, pageind, binind); - assert(isalloc(ptr, false) == PAGE); - assert(isalloc(ptr, true) == size); + extent_szind_set(extent, NBINS); + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, + NBINS, false); + + assert(isalloc(tsdn, ptr) == LARGE_MINCLASS); + + return LARGE_MINCLASS; +} + +void +arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, + bool slow_path) { + cassert(config_prof); + assert(opt_prof); + + extent_t *extent = iealloc(tsdn, ptr); + size_t usize = arena_prof_demote(tsdn, extent, ptr); + if (usize <= tcache_maxclass) { + tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, + sz_size2index(usize), slow_path); + } else { + large_dalloc(tsdn, extent); + } } static void -arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin) -{ - - /* Dissociate run from bin. */ - if (run == bin->runcur) - bin->runcur = NULL; - else { - size_t binind = arena_bin_index(chunk->arena, bin); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; - - if (bin_info->nregs != 1) { - /* - * This block's conditional is necessary because if the - * run only contains one region, then it never gets - * inserted into the non-full runs tree. - */ - arena_bin_runs_remove(bin, run); +arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, arena_bin_t *bin) { + /* Dissociate slab from bin. */ + if (slab == bin->slabcur) { + bin->slabcur = NULL; + } else { + szind_t binind = extent_szind_get(slab); + const arena_bin_info_t *bin_info = &arena_bin_info[binind]; + + /* + * The following block's conditional is necessary because if the + * slab only contains one region, then it never gets inserted + * into the non-full slabs heap. + */ + if (bin_info->nregs == 1) { + arena_bin_slabs_full_remove(arena, bin, slab); + } else { + arena_bin_slabs_nonfull_remove(bin, slab); } } } static void -arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin) -{ - size_t binind; - arena_bin_info_t *bin_info; - size_t npages, run_ind, past; - - assert(run != bin->runcur); - assert(arena_run_tree_search(&bin->runs, - arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE)) - == NULL); - - binind = arena_bin_index(chunk->arena, run->bin); - bin_info = &arena_bin_info[binind]; +arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, + arena_bin_t *bin) { + assert(slab != bin->slabcur); - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); /******************************/ - npages = bin_info->run_size >> LG_PAGE; - run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); - past = (size_t)(PAGE_CEILING((uintptr_t)run + - (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind * - bin_info->reg_interval - bin_info->redzone_size) - - (uintptr_t)chunk) >> LG_PAGE); - malloc_mutex_lock(&arena->lock); - - /* - * If the run was originally clean, and some pages were never touched, - * trim the clean pages before deallocating the dirty portion of the - * run. - */ - assert(arena_mapbits_dirty_get(chunk, run_ind) == - arena_mapbits_dirty_get(chunk, run_ind+npages-1)); - if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind < - npages) { - /* Trim clean pages. Convert to large run beforehand. */ - assert(npages > 0); - arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0); - arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0); - arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE), - ((past - run_ind) << LG_PAGE), false); - /* npages = past - run_ind; */ - } - arena_run_dalloc(arena, run, true, false); - malloc_mutex_unlock(&arena->lock); + arena_slab_dalloc(tsdn, arena, slab); /****************************/ - malloc_mutex_lock(&bin->lock); - if (config_stats) - bin->stats.curruns--; + malloc_mutex_lock(tsdn, &bin->lock); + if (config_stats) { + bin->stats.curslabs--; + } } static void -arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin) -{ +arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, + arena_bin_t *bin) { + assert(extent_nfree_get(slab) > 0); /* - * Make sure that if bin->runcur is non-NULL, it refers to the lowest - * non-full run. It is okay to NULL runcur out rather than proactively - * keeping it pointing at the lowest non-full run. + * Make sure that if bin->slabcur is non-NULL, it refers to the + * oldest/lowest non-full slab. It is okay to NULL slabcur out rather + * than proactively keeping it pointing at the oldest/lowest non-full + * slab. */ - if ((uintptr_t)run < (uintptr_t)bin->runcur) { - /* Switch runcur. */ - if (bin->runcur->nfree > 0) - arena_bin_runs_insert(bin, bin->runcur); - bin->runcur = run; - if (config_stats) - bin->stats.reruns++; - } else - arena_bin_runs_insert(bin, run); + if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) { + /* Switch slabcur. */ + if (extent_nfree_get(bin->slabcur) > 0) { + arena_bin_slabs_nonfull_insert(bin, bin->slabcur); + } else { + arena_bin_slabs_full_insert(arena, bin, bin->slabcur); + } + bin->slabcur = slab; + if (config_stats) { + bin->stats.reslabs++; + } + } else { + arena_bin_slabs_nonfull_insert(bin, slab); + } } -void -arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, - arena_chunk_map_t *mapelm) -{ - size_t pageind; - arena_run_t *run; - arena_bin_t *bin; - arena_bin_info_t *bin_info; - size_t size, binind; - - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - - arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE)); - bin = run->bin; - binind = arena_ptr_small_binind_get(ptr, mapelm->bits); - bin_info = &arena_bin_info[binind]; - if (config_fill || config_stats) - size = bin_info->reg_size; - - if (config_fill && opt_junk) +static void +arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab, + void *ptr, bool junked) { + arena_slab_data_t *slab_data = extent_slab_data_get(slab); + szind_t binind = extent_szind_get(slab); + arena_bin_t *bin = &arena->bins[binind]; + const arena_bin_info_t *bin_info = &arena_bin_info[binind]; + + if (!junked && config_fill && unlikely(opt_junk_free)) { arena_dalloc_junk_small(ptr, bin_info); + } - arena_run_reg_dalloc(run, ptr); - if (run->nfree == bin_info->nregs) { - arena_dissociate_bin_run(chunk, run, bin); - arena_dalloc_bin_run(arena, chunk, run, bin); - } else if (run->nfree == 1 && run != bin->runcur) - arena_bin_lower_run(arena, chunk, run, bin); + arena_slab_reg_dalloc(tsdn, slab, slab_data, ptr); + unsigned nfree = extent_nfree_get(slab); + if (nfree == bin_info->nregs) { + arena_dissociate_bin_slab(arena, slab, bin); + arena_dalloc_bin_slab(tsdn, arena, slab, bin); + } else if (nfree == 1 && slab != bin->slabcur) { + arena_bin_slabs_full_remove(arena, bin, slab); + arena_bin_lower_slab(tsdn, arena, slab, bin); + } if (config_stats) { - bin->stats.allocated -= size; bin->stats.ndalloc++; + bin->stats.curregs--; } } void -arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind, arena_chunk_map_t *mapelm) -{ - arena_run_t *run; - arena_bin_t *bin; - - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - - arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE)); - bin = run->bin; - malloc_mutex_lock(&bin->lock); - arena_dalloc_bin_locked(arena, chunk, ptr, mapelm); - malloc_mutex_unlock(&bin->lock); -} - -void -arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind) -{ - arena_chunk_map_t *mapelm; - - if (config_debug) { - /* arena_ptr_small_binind_get() does extra sanity checking. */ - assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, - pageind)) != BININD_INVALID); - } - mapelm = arena_mapp_get(chunk, pageind); - arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm); +arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent, + void *ptr) { + arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true); } -#ifdef JEMALLOC_JET -#undef arena_dalloc_junk_large -#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl) -#endif static void -arena_dalloc_junk_large(void *ptr, size_t usize) -{ - - if (config_fill && opt_junk) - memset(ptr, 0x5a, usize); -} -#ifdef JEMALLOC_JET -#undef arena_dalloc_junk_large -#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) -arena_dalloc_junk_large_t *arena_dalloc_junk_large = - JEMALLOC_N(arena_dalloc_junk_large_impl); -#endif - -void -arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr) -{ +arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) { + szind_t binind = extent_szind_get(extent); + arena_bin_t *bin = &arena->bins[binind]; - if (config_fill || config_stats) { - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t usize = arena_mapbits_large_size_get(chunk, pageind); - - arena_dalloc_junk_large(ptr, usize); - if (config_stats) { - arena->stats.ndalloc_large++; - arena->stats.allocated_large -= usize; - arena->stats.lstats[(usize >> LG_PAGE) - 1].ndalloc++; - arena->stats.lstats[(usize >> LG_PAGE) - 1].curruns--; - } - } - - arena_run_dalloc(arena, (arena_run_t *)ptr, true, false); + malloc_mutex_lock(tsdn, &bin->lock); + arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false); + malloc_mutex_unlock(tsdn, &bin->lock); } void -arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) -{ +arena_dalloc_small(tsdn_t *tsdn, void *ptr) { + extent_t *extent = iealloc(tsdn, ptr); + arena_t *arena = extent_arena_get(extent); - malloc_mutex_lock(&arena->lock); - arena_dalloc_large_locked(arena, chunk, ptr); - malloc_mutex_unlock(&arena->lock); + arena_dalloc_bin(tsdn, arena, extent, ptr); + arena_decay_tick(tsdn, arena); } -static void -arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t oldsize, size_t size) -{ - - assert(size < oldsize); - - /* - * Shrink the run, and make trailing pages available for other - * allocations. - */ - malloc_mutex_lock(&arena->lock); - arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size, - true); - if (config_stats) { - arena->stats.ndalloc_large++; - arena->stats.allocated_large -= oldsize; - arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++; - arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--; +bool +arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, + size_t extra, bool zero) { + /* Calls with non-zero extra had to clamp extra. */ + assert(extra == 0 || size + extra <= LARGE_MAXCLASS); - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; - arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; - arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; + if (unlikely(size > LARGE_MAXCLASS)) { + return true; } - malloc_mutex_unlock(&arena->lock); -} -static bool -arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t oldsize, size_t size, size_t extra, bool zero) -{ - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t npages = oldsize >> LG_PAGE; - size_t followsize; - - assert(oldsize == arena_mapbits_large_size_get(chunk, pageind)); - - /* Try to extend the run. */ - assert(size + extra > oldsize); - malloc_mutex_lock(&arena->lock); - if (pageind + npages < chunk_npages && - arena_mapbits_allocated_get(chunk, pageind+npages) == 0 && - (followsize = arena_mapbits_unallocated_size_get(chunk, - pageind+npages)) >= size - oldsize) { + extent_t *extent = iealloc(tsdn, ptr); + size_t usize_min = sz_s2u(size); + size_t usize_max = sz_s2u(size + extra); + if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) { /* - * The next run is available and sufficiently large. Split the - * following run, then merge the first part with the existing - * allocation. + * Avoid moving the allocation if the size class can be left the + * same. */ - size_t flag_dirty; - size_t splitsize = (oldsize + followsize <= size + extra) - ? followsize : size + extra - oldsize; - arena_run_split_large(arena, (arena_run_t *)((uintptr_t)chunk + - ((pageind+npages) << LG_PAGE)), splitsize, zero); - - size = oldsize + splitsize; - npages = size >> LG_PAGE; - - /* - * Mark the extended run as dirty if either portion of the run - * was dirty before allocation. This is rather pedantic, - * because there's not actually any sequence of events that - * could cause the resulting run to be passed to - * arena_run_dalloc() with the dirty argument set to false - * (which is when dirty flag consistency would really matter). - */ - flag_dirty = arena_mapbits_dirty_get(chunk, pageind) | - arena_mapbits_dirty_get(chunk, pageind+npages-1); - arena_mapbits_large_set(chunk, pageind, size, flag_dirty); - arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty); - - if (config_stats) { - arena->stats.ndalloc_large++; - arena->stats.allocated_large -= oldsize; - arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++; - arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--; - - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; - arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; - arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; + assert(arena_bin_info[sz_size2index(oldsize)].reg_size == + oldsize); + if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) != + sz_size2index(oldsize)) && (size > oldsize || usize_max < + oldsize)) { + return true; } - malloc_mutex_unlock(&arena->lock); - return (false); + + arena_decay_tick(tsdn, extent_arena_get(extent)); + return false; + } else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) { + return large_ralloc_no_move(tsdn, extent, usize_min, usize_max, + zero); } - malloc_mutex_unlock(&arena->lock); - return (true); + return true; } -#ifdef JEMALLOC_JET -#undef arena_ralloc_junk_large -#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl) -#endif -static void -arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) -{ - - if (config_fill && opt_junk) { - memset((void *)((uintptr_t)ptr + usize), 0x5a, - old_usize - usize); +static void * +arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool zero, tcache_t *tcache) { + if (alignment == 0) { + return arena_malloc(tsdn, arena, usize, sz_size2index(usize), + zero, tcache, true); } -} -#ifdef JEMALLOC_JET -#undef arena_ralloc_junk_large -#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) -arena_ralloc_junk_large_t *arena_ralloc_junk_large = - JEMALLOC_N(arena_ralloc_junk_large_impl); -#endif - -/* - * Try to resize a large allocation, in order to avoid copying. This will - * always fail if growing an object, and the following run is already in use. - */ -static bool -arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, - bool zero) -{ - size_t psize; - - psize = PAGE_CEILING(size + extra); - if (psize == oldsize) { - /* Same size class. */ - return (false); - } else { - arena_chunk_t *chunk; - arena_t *arena; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - arena = chunk->arena; - - if (psize < oldsize) { - /* Fill before shrinking in order avoid a race. */ - arena_ralloc_junk_large(ptr, oldsize, psize); - arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, - psize); - return (false); - } else { - bool ret = arena_ralloc_large_grow(arena, chunk, ptr, - oldsize, PAGE_CEILING(size), - psize - PAGE_CEILING(size), zero); - if (config_fill && ret == false && zero == false) { - if (opt_junk) { - memset((void *)((uintptr_t)ptr + - oldsize), 0xa5, isalloc(ptr, - config_prof) - oldsize); - } else if (opt_zero) { - memset((void *)((uintptr_t)ptr + - oldsize), 0, isalloc(ptr, - config_prof) - oldsize); - } - } - return (ret); - } + usize = sz_sa2u(usize, alignment); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { + return NULL; } + return ipalloct(tsdn, usize, alignment, zero, tcache, arena); } -bool -arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, - bool zero) -{ +void * +arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, + size_t size, size_t alignment, bool zero, tcache_t *tcache) { + size_t usize = sz_s2u(size); + if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) { + return NULL; + } - /* - * Avoid moving the allocation if the size class can be left the same. - */ - if (oldsize <= arena_maxclass) { - if (oldsize <= SMALL_MAXCLASS) { - assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size - == oldsize); - if ((size + extra <= SMALL_MAXCLASS && - SMALL_SIZE2BIN(size + extra) == - SMALL_SIZE2BIN(oldsize)) || (size <= oldsize && - size + extra >= oldsize)) - return (false); - } else { - assert(size <= arena_maxclass); - if (size + extra > SMALL_MAXCLASS) { - if (arena_ralloc_large(ptr, oldsize, size, - extra, zero) == false) - return (false); - } + if (likely(usize <= SMALL_MAXCLASS)) { + /* Try to avoid moving the allocation. */ + if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero)) { + return ptr; } } - /* Reallocation would require a move. */ - return (true); -} - -void * -arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, - size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, - bool try_tcache_dalloc) -{ - void *ret; - size_t copysize; - - /* Try to avoid moving the allocation. */ - if (arena_ralloc_no_move(ptr, oldsize, size, extra, zero) == false) - return (ptr); + if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) { + return large_ralloc(tsdn, arena, iealloc(tsdn, ptr), usize, + alignment, zero, tcache); + } /* * size and oldsize are different enough that we need to move the - * object. In that case, fall back to allocating new space and - * copying. + * object. In that case, fall back to allocating new space and copying. */ - if (alignment != 0) { - size_t usize = sa2u(size + extra, alignment); - if (usize == 0) - return (NULL); - ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); - } else - ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc); - + void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment, + zero, tcache); if (ret == NULL) { - if (extra == 0) - return (NULL); - /* Try again, this time without extra. */ - if (alignment != 0) { - size_t usize = sa2u(size, alignment); - if (usize == 0) - return (NULL); - ret = ipalloct(usize, alignment, zero, try_tcache_alloc, - arena); - } else - ret = arena_malloc(arena, size, zero, try_tcache_alloc); - - if (ret == NULL) - return (NULL); + return NULL; } - /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */ - /* - * Copy at most size bytes (not size+extra), since the caller has no - * expectation that the extra bytes will be reliably preserved. + * Junk/zero-filling were already done by + * ipalloc()/arena_malloc(). */ - copysize = (size < oldsize) ? size : oldsize; - VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); + + size_t copysize = (usize < oldsize) ? usize : oldsize; memcpy(ret, ptr, copysize); - iqalloct(ptr, try_tcache_dalloc); - return (ret); + isdalloct(tsdn, ptr, oldsize, tcache, NULL, true); + return ret; } dss_prec_t -arena_dss_prec_get(arena_t *arena) -{ - dss_prec_t ret; +arena_dss_prec_get(arena_t *arena) { + return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE); +} - malloc_mutex_lock(&arena->lock); - ret = arena->dss_prec; - malloc_mutex_unlock(&arena->lock); - return (ret); +bool +arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) { + if (!have_dss) { + return (dss_prec != dss_prec_disabled); + } + atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE); + return false; } -void -arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) -{ +ssize_t +arena_dirty_decay_ms_default_get(void) { + return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED); +} - malloc_mutex_lock(&arena->lock); - arena->dss_prec = dss_prec; - malloc_mutex_unlock(&arena->lock); +bool +arena_dirty_decay_ms_default_set(ssize_t decay_ms) { + if (!arena_decay_ms_valid(decay_ms)) { + return true; + } + atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED); + return false; } -void -arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive, - size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, - malloc_large_stats_t *lstats) -{ - unsigned i; +ssize_t +arena_muzzy_decay_ms_default_get(void) { + return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED); +} - malloc_mutex_lock(&arena->lock); - *dss = dss_prec_names[arena->dss_prec]; - *nactive += arena->nactive; - *ndirty += arena->ndirty; +bool +arena_muzzy_decay_ms_default_set(ssize_t decay_ms) { + if (!arena_decay_ms_valid(decay_ms)) { + return true; + } + atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED); + return false; +} - astats->mapped += arena->stats.mapped; - astats->npurge += arena->stats.npurge; - astats->nmadvise += arena->stats.nmadvise; - astats->purged += arena->stats.purged; - astats->allocated_large += arena->stats.allocated_large; - astats->nmalloc_large += arena->stats.nmalloc_large; - astats->ndalloc_large += arena->stats.ndalloc_large; - astats->nrequests_large += arena->stats.nrequests_large; +unsigned +arena_nthreads_get(arena_t *arena, bool internal) { + return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED); +} - for (i = 0; i < nlclasses; i++) { - lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; - lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; - lstats[i].nrequests += arena->stats.lstats[i].nrequests; - lstats[i].curruns += arena->stats.lstats[i].curruns; - } - malloc_mutex_unlock(&arena->lock); +void +arena_nthreads_inc(arena_t *arena, bool internal) { + atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); +} - for (i = 0; i < NBINS; i++) { - arena_bin_t *bin = &arena->bins[i]; +void +arena_nthreads_dec(arena_t *arena, bool internal) { + atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); +} - malloc_mutex_lock(&bin->lock); - bstats[i].allocated += bin->stats.allocated; - bstats[i].nmalloc += bin->stats.nmalloc; - bstats[i].ndalloc += bin->stats.ndalloc; - bstats[i].nrequests += bin->stats.nrequests; - if (config_tcache) { - bstats[i].nfills += bin->stats.nfills; - bstats[i].nflushes += bin->stats.nflushes; - } - bstats[i].nruns += bin->stats.nruns; - bstats[i].reruns += bin->stats.reruns; - bstats[i].curruns += bin->stats.curruns; - malloc_mutex_unlock(&bin->lock); - } +size_t +arena_extent_sn_next(arena_t *arena) { + return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED); } -bool -arena_new(arena_t *arena, unsigned ind) -{ +arena_t * +arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { + arena_t *arena; + base_t *base; unsigned i; - arena_bin_t *bin; - arena->ind = ind; - arena->nthreads = 0; + if (ind == 0) { + base = b0get(); + } else { + base = base_new(tsdn, ind, extent_hooks); + if (base == NULL) { + return NULL; + } + } + + arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE); + if (arena == NULL) { + goto label_error; + } - if (malloc_mutex_init(&arena->lock)) - return (true); + atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); + atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); + arena->last_thd = NULL; if (config_stats) { - memset(&arena->stats, 0, sizeof(arena_stats_t)); - arena->stats.lstats = - (malloc_large_stats_t *)base_alloc(nlclasses * - sizeof(malloc_large_stats_t)); - if (arena->stats.lstats == NULL) - return (true); - memset(arena->stats.lstats, 0, nlclasses * - sizeof(malloc_large_stats_t)); - if (config_tcache) - ql_new(&arena->tcache_ql); + if (arena_stats_init(tsdn, &arena->stats)) { + goto label_error; + } + + ql_new(&arena->tcache_ql); + if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql", + WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) { + goto label_error; + } } - if (config_prof) - arena->prof_accumbytes = 0; + if (config_prof) { + if (prof_accum_init(tsdn, &arena->prof_accum)) { + goto label_error; + } + } - arena->dss_prec = chunk_dss_prec_get(); + if (config_cache_oblivious) { + /* + * A nondeterministic seed based on the address of arena reduces + * the likelihood of lockstep non-uniform cache index + * utilization among identical concurrent processes, but at the + * cost of test repeatability. For debug builds, instead use a + * deterministic seed. + */ + atomic_store_zu(&arena->offset_state, config_debug ? ind : + (size_t)(uintptr_t)arena, ATOMIC_RELAXED); + } - /* Initialize chunks. */ - arena_chunk_dirty_new(&arena->chunks_dirty); - arena->spare = NULL; + atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED); - arena->nactive = 0; - arena->ndirty = 0; - arena->npurgatory = 0; + atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(), + ATOMIC_RELAXED); - arena_avail_tree_new(&arena->runs_avail); + atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); - /* Initialize bins. */ - for (i = 0; i < NBINS; i++) { - bin = &arena->bins[i]; - if (malloc_mutex_init(&bin->lock)) - return (true); - bin->runcur = NULL; - arena_run_tree_new(&bin->runs); - if (config_stats) - memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); + extent_list_init(&arena->large); + if (malloc_mutex_init(&arena->large_mtx, "arena_large", + WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) { + goto label_error; } - return (false); -} - -/* - * Calculate bin_info->run_size such that it meets the following constraints: - * - * *) bin_info->run_size >= min_run_size - * *) bin_info->run_size <= arena_maxclass - * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed). - * *) bin_info->nregs <= RUN_MAXREGS - * - * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also - * calculated here, since these settings are all interdependent. - */ -static size_t -bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size) -{ - size_t pad_size; - size_t try_run_size, good_run_size; - uint32_t try_nregs, good_nregs; - uint32_t try_hdr_size, good_hdr_size; - uint32_t try_bitmap_offset, good_bitmap_offset; - uint32_t try_ctx0_offset, good_ctx0_offset; - uint32_t try_redzone0_offset, good_redzone0_offset; - - assert(min_run_size >= PAGE); - assert(min_run_size <= arena_maxclass); - /* - * Determine redzone size based on minimum alignment and minimum - * redzone size. Add padding to the end of the run if it is needed to - * align the regions. The padding allows each redzone to be half the - * minimum alignment; without the padding, each redzone would have to - * be twice as large in order to maintain alignment. + * Delay coalescing for dirty extents despite the disruptive effect on + * memory layout for best-fit extent allocation, since cached extents + * are likely to be reused soon after deallocation, and the cost of + * merging/splitting extents is non-trivial. */ - if (config_fill && opt_redzone) { - size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1); - if (align_min <= REDZONE_MINSIZE) { - bin_info->redzone_size = REDZONE_MINSIZE; - pad_size = 0; - } else { - bin_info->redzone_size = align_min >> 1; - pad_size = bin_info->redzone_size; - } - } else { - bin_info->redzone_size = 0; - pad_size = 0; + if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty, + true)) { + goto label_error; } - bin_info->reg_interval = bin_info->reg_size + - (bin_info->redzone_size << 1); - /* - * Calculate known-valid settings before entering the run_size - * expansion loop, so that the first part of the loop always copies - * valid settings. - * - * The do..while loop iteratively reduces the number of regions until - * the run header and the regions no longer overlap. A closed formula - * would be quite messy, since there is an interdependency between the - * header's mask length and the number of regions. + * Coalesce muzzy extents immediately, because operations on them are in + * the critical path much less often than for dirty extents. */ - try_run_size = min_run_size; - try_nregs = ((try_run_size - sizeof(arena_run_t)) / - bin_info->reg_interval) - + 1; /* Counter-act try_nregs-- in loop. */ - if (try_nregs > RUN_MAXREGS) { - try_nregs = RUN_MAXREGS - + 1; /* Counter-act try_nregs-- in loop. */ - } - do { - try_nregs--; - try_hdr_size = sizeof(arena_run_t); - /* Pad to a long boundary. */ - try_hdr_size = LONG_CEILING(try_hdr_size); - try_bitmap_offset = try_hdr_size; - /* Add space for bitmap. */ - try_hdr_size += bitmap_size(try_nregs); - if (config_prof && opt_prof && prof_promote == false) { - /* Pad to a quantum boundary. */ - try_hdr_size = QUANTUM_CEILING(try_hdr_size); - try_ctx0_offset = try_hdr_size; - /* Add space for one (prof_ctx_t *) per region. */ - try_hdr_size += try_nregs * sizeof(prof_ctx_t *); - } else - try_ctx0_offset = 0; - try_redzone0_offset = try_run_size - (try_nregs * - bin_info->reg_interval) - pad_size; - } while (try_hdr_size > try_redzone0_offset); - - /* run_size expansion loop. */ - do { - /* - * Copy valid settings before trying more aggressive settings. - */ - good_run_size = try_run_size; - good_nregs = try_nregs; - good_hdr_size = try_hdr_size; - good_bitmap_offset = try_bitmap_offset; - good_ctx0_offset = try_ctx0_offset; - good_redzone0_offset = try_redzone0_offset; - - /* Try more aggressive settings. */ - try_run_size += PAGE; - try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) / - bin_info->reg_interval) - + 1; /* Counter-act try_nregs-- in loop. */ - if (try_nregs > RUN_MAXREGS) { - try_nregs = RUN_MAXREGS - + 1; /* Counter-act try_nregs-- in loop. */ + if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy, + false)) { + goto label_error; + } + /* + * Coalesce retained extents immediately, in part because they will + * never be evicted (and therefore there's no opportunity for delayed + * coalescing), but also because operations on retained extents are not + * in the critical path. + */ + if (extents_init(tsdn, &arena->extents_retained, extent_state_retained, + false)) { + goto label_error; + } + + if (arena_decay_init(&arena->decay_dirty, &arena->extents_dirty, + arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) { + goto label_error; + } + if (arena_decay_init(&arena->decay_muzzy, &arena->extents_muzzy, + arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) { + goto label_error; + } + + arena->extent_grow_next = sz_psz2ind(HUGEPAGE); + if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow", + WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) { + goto label_error; + } + + extent_avail_new(&arena->extent_avail); + if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail", + WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) { + goto label_error; + } + + /* Initialize bins. */ + for (i = 0; i < NBINS; i++) { + arena_bin_t *bin = &arena->bins[i]; + if (malloc_mutex_init(&bin->lock, "arena_bin", + WITNESS_RANK_ARENA_BIN, malloc_mutex_rank_exclusive)) { + goto label_error; } - do { - try_nregs--; - try_hdr_size = sizeof(arena_run_t); - /* Pad to a long boundary. */ - try_hdr_size = LONG_CEILING(try_hdr_size); - try_bitmap_offset = try_hdr_size; - /* Add space for bitmap. */ - try_hdr_size += bitmap_size(try_nregs); - if (config_prof && opt_prof && prof_promote == false) { - /* Pad to a quantum boundary. */ - try_hdr_size = QUANTUM_CEILING(try_hdr_size); - try_ctx0_offset = try_hdr_size; - /* - * Add space for one (prof_ctx_t *) per region. - */ - try_hdr_size += try_nregs * - sizeof(prof_ctx_t *); - } - try_redzone0_offset = try_run_size - (try_nregs * - bin_info->reg_interval) - pad_size; - } while (try_hdr_size > try_redzone0_offset); - } while (try_run_size <= arena_maxclass - && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) > - RUN_MAX_OVRHD_RELAX - && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size - && try_nregs < RUN_MAXREGS); + bin->slabcur = NULL; + extent_heap_new(&bin->slabs_nonfull); + extent_list_init(&bin->slabs_full); + if (config_stats) { + memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); + } + } - assert(good_hdr_size <= good_redzone0_offset); + arena->base = base; + /* Set arena before creating background threads. */ + arena_set(ind, arena); - /* Copy final settings. */ - bin_info->run_size = good_run_size; - bin_info->nregs = good_nregs; - bin_info->bitmap_offset = good_bitmap_offset; - bin_info->ctx0_offset = good_ctx0_offset; - bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size; + nstime_init(&arena->create_time, 0); + nstime_update(&arena->create_time); - assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs - * bin_info->reg_interval) + pad_size == bin_info->run_size); + /* We don't support reentrancy for arena 0 bootstrapping. */ + if (ind != 0) { + /* + * If we're here, then arena 0 already exists, so bootstrapping + * is done enough that we should have tsd. + */ + assert(!tsdn_null(tsdn)); + pre_reentrancy(tsdn_tsd(tsdn), arena); + if (hooks_arena_new_hook) { + hooks_arena_new_hook(); + } + post_reentrancy(tsdn_tsd(tsdn)); + } - return (good_run_size); + return arena; +label_error: + if (ind != 0) { + base_delete(tsdn, base); + } + return NULL; } -static void -bin_info_init(void) -{ - arena_bin_info_t *bin_info; - size_t prev_run_size = PAGE; - -#define SIZE_CLASS(bin, delta, size) \ - bin_info = &arena_bin_info[bin]; \ - bin_info->reg_size = size; \ - prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\ - bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); - SIZE_CLASSES -#undef SIZE_CLASS +void +arena_boot(void) { + arena_dirty_decay_ms_default_set(opt_dirty_decay_ms); + arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms); } void -arena_boot(void) -{ - size_t header_size; - unsigned i; +arena_prefork0(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx); + malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx); +} - /* - * Compute the header size such that it is large enough to contain the - * page map. The page map is biased to omit entries for the header - * itself, so some iteration is necessary to compute the map bias. - * - * 1) Compute safe header_size and map_bias values that include enough - * space for an unbiased page map. - * 2) Refine map_bias based on (1) to omit the header pages in the page - * map. The resulting map_bias may be one too small. - * 3) Refine map_bias based on (2). The result will be >= the result - * from (2), and will always be correct. - */ - map_bias = 0; - for (i = 0; i < 3; i++) { - header_size = offsetof(arena_chunk_t, map) + - (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias)); - map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK) - != 0); +void +arena_prefork1(tsdn_t *tsdn, arena_t *arena) { + if (config_stats) { + malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx); } - assert(map_bias > 0); +} - arena_maxclass = chunksize - (map_bias << LG_PAGE); +void +arena_prefork2(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx); +} - bin_info_init(); +void +arena_prefork3(tsdn_t *tsdn, arena_t *arena) { + extents_prefork(tsdn, &arena->extents_dirty); + extents_prefork(tsdn, &arena->extents_muzzy); + extents_prefork(tsdn, &arena->extents_retained); } void -arena_prefork(arena_t *arena) -{ - unsigned i; +arena_prefork4(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx); +} - malloc_mutex_prefork(&arena->lock); - for (i = 0; i < NBINS; i++) - malloc_mutex_prefork(&arena->bins[i].lock); +void +arena_prefork5(tsdn_t *tsdn, arena_t *arena) { + base_prefork(tsdn, arena->base); +} + +void +arena_prefork6(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_prefork(tsdn, &arena->large_mtx); +} + +void +arena_prefork7(tsdn_t *tsdn, arena_t *arena) { + for (unsigned i = 0; i < NBINS; i++) { + malloc_mutex_prefork(tsdn, &arena->bins[i].lock); + } } void -arena_postfork_parent(arena_t *arena) -{ +arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { unsigned i; - for (i = 0; i < NBINS; i++) - malloc_mutex_postfork_parent(&arena->bins[i].lock); - malloc_mutex_postfork_parent(&arena->lock); + for (i = 0; i < NBINS; i++) { + malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock); + } + malloc_mutex_postfork_parent(tsdn, &arena->large_mtx); + base_postfork_parent(tsdn, arena->base); + malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx); + extents_postfork_parent(tsdn, &arena->extents_dirty); + extents_postfork_parent(tsdn, &arena->extents_muzzy); + extents_postfork_parent(tsdn, &arena->extents_retained); + malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx); + malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx); + malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx); + if (config_stats) { + malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx); + } } void -arena_postfork_child(arena_t *arena) -{ +arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { unsigned i; - for (i = 0; i < NBINS; i++) - malloc_mutex_postfork_child(&arena->bins[i].lock); - malloc_mutex_postfork_child(&arena->lock); + atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); + atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); + if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) { + arena_nthreads_inc(arena, false); + } + if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) { + arena_nthreads_inc(arena, true); + } + if (config_stats) { + ql_new(&arena->tcache_ql); + tcache_t *tcache = tcache_get(tsdn_tsd(tsdn)); + if (tcache != NULL && tcache->arena == arena) { + ql_elm_new(tcache, link); + ql_tail_insert(&arena->tcache_ql, tcache, link); + } + } + + for (i = 0; i < NBINS; i++) { + malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock); + } + malloc_mutex_postfork_child(tsdn, &arena->large_mtx); + base_postfork_child(tsdn, arena->base); + malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx); + extents_postfork_child(tsdn, &arena->extents_dirty); + extents_postfork_child(tsdn, &arena->extents_muzzy); + extents_postfork_child(tsdn, &arena->extents_retained); + malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx); + malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx); + malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx); + if (config_stats) { + malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx); + } } diff --git a/deps/jemalloc/src/base.c b/deps/jemalloc/src/base.c index 4e62e8fa91..97078b134d 100644 --- a/deps/jemalloc/src/base.c +++ b/deps/jemalloc/src/base.c @@ -1,142 +1,402 @@ -#define JEMALLOC_BASE_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_BASE_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/sz.h" /******************************************************************************/ /* Data. */ -static malloc_mutex_t base_mtx; - -/* - * Current pages that are being used for internal memory allocations. These - * pages are carved up in cacheline-size quanta, so that there is no chance of - * false cache line sharing. - */ -static void *base_pages; -static void *base_next_addr; -static void *base_past_addr; /* Addr immediately past base_pages. */ -static extent_node_t *base_nodes; +static base_t *b0; /******************************************************************************/ -/* Function prototypes for non-inline static functions. */ -static bool base_pages_alloc(size_t minsize); +static void * +base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) { + void *addr; + bool zero = true; + bool commit = true; -/******************************************************************************/ + assert(size == HUGEPAGE_CEILING(size)); + + if (extent_hooks == &extent_hooks_default) { + addr = extent_alloc_mmap(NULL, size, PAGE, &zero, &commit); + } else { + /* No arena context as we are creating new arenas. */ + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + pre_reentrancy(tsd, NULL); + addr = extent_hooks->alloc(extent_hooks, NULL, size, PAGE, + &zero, &commit, ind); + post_reentrancy(tsd); + } + + return addr; +} + +static void +base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr, + size_t size) { + /* + * Cascade through dalloc, decommit, purge_forced, and purge_lazy, + * stopping at first success. This cascade is performed for consistency + * with the cascade in extent_dalloc_wrapper() because an application's + * custom hooks may not support e.g. dalloc. This function is only ever + * called as a side effect of arena destruction, so although it might + * seem pointless to do anything besides dalloc here, the application + * may in fact want the end state of all associated virtual memory to be + * in some consistent-but-allocated state. + */ + if (extent_hooks == &extent_hooks_default) { + if (!extent_dalloc_mmap(addr, size)) { + return; + } + if (!pages_decommit(addr, size)) { + return; + } + if (!pages_purge_forced(addr, size)) { + return; + } + if (!pages_purge_lazy(addr, size)) { + return; + } + /* Nothing worked. This should never happen. */ + not_reached(); + } else { + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + pre_reentrancy(tsd, NULL); + if (extent_hooks->dalloc != NULL && + !extent_hooks->dalloc(extent_hooks, addr, size, true, + ind)) { + goto label_done; + } + if (extent_hooks->decommit != NULL && + !extent_hooks->decommit(extent_hooks, addr, size, 0, size, + ind)) { + goto label_done; + } + if (extent_hooks->purge_forced != NULL && + !extent_hooks->purge_forced(extent_hooks, addr, size, 0, + size, ind)) { + goto label_done; + } + if (extent_hooks->purge_lazy != NULL && + !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size, + ind)) { + goto label_done; + } + /* Nothing worked. That's the application's problem. */ + label_done: + post_reentrancy(tsd); + return; + } +} -static bool -base_pages_alloc(size_t minsize) -{ - size_t csize; - bool zero; +static void +base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr, + size_t size) { + size_t sn; - assert(minsize != 0); - csize = CHUNK_CEILING(minsize); - zero = false; - base_pages = chunk_alloc(csize, chunksize, true, &zero, - chunk_dss_prec_get()); - if (base_pages == NULL) - return (true); - base_next_addr = base_pages; - base_past_addr = (void *)((uintptr_t)base_pages + csize); + sn = *extent_sn_next; + (*extent_sn_next)++; - return (false); + extent_binit(extent, addr, size, sn); } -void * -base_alloc(size_t size) -{ +static void * +base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size, + size_t alignment) { void *ret; - size_t csize; - /* Round size up to nearest multiple of the cacheline size. */ - csize = CACHELINE_CEILING(size); + assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM)); + assert(size == ALIGNMENT_CEILING(size, alignment)); - malloc_mutex_lock(&base_mtx); - /* Make sure there's enough space for the allocation. */ - if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) { - if (base_pages_alloc(csize)) { - malloc_mutex_unlock(&base_mtx); - return (NULL); - } + *gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent), + alignment) - (uintptr_t)extent_addr_get(extent); + ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size); + assert(extent_bsize_get(extent) >= *gap_size + size); + extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) + + *gap_size + size), extent_bsize_get(extent) - *gap_size - size, + extent_sn_get(extent)); + return ret; +} + +static void +base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent, + size_t gap_size, void *addr, size_t size) { + if (extent_bsize_get(extent) > 0) { + /* + * Compute the index for the largest size class that does not + * exceed extent's size. + */ + szind_t index_floor = + sz_size2index(extent_bsize_get(extent) + 1) - 1; + extent_heap_insert(&base->avail[index_floor], extent); } - /* Allocate. */ - ret = base_next_addr; - base_next_addr = (void *)((uintptr_t)base_next_addr + csize); - malloc_mutex_unlock(&base_mtx); - VALGRIND_MAKE_MEM_UNDEFINED(ret, csize); - return (ret); + if (config_stats) { + base->allocated += size; + /* + * Add one PAGE to base_resident for every page boundary that is + * crossed by the new allocation. + */ + base->resident += PAGE_CEILING((uintptr_t)addr + size) - + PAGE_CEILING((uintptr_t)addr - gap_size); + assert(base->allocated <= base->resident); + assert(base->resident <= base->mapped); + } } -void * -base_calloc(size_t number, size_t size) -{ - void *ret = base_alloc(number * size); +static void * +base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, extent_t *extent, + size_t size, size_t alignment) { + void *ret; + size_t gap_size; + + ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment); + base_extent_bump_alloc_post(tsdn, base, extent, gap_size, ret, size); + return ret; +} + +/* + * Allocate a block of virtual memory that is large enough to start with a + * base_block_t header, followed by an object of specified size and alignment. + * On success a pointer to the initialized base_block_t header is returned. + */ +static base_block_t * +base_block_alloc(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, + pszind_t *pind_last, size_t *extent_sn_next, size_t size, + size_t alignment) { + alignment = ALIGNMENT_CEILING(alignment, QUANTUM); + size_t usize = ALIGNMENT_CEILING(size, alignment); + size_t header_size = sizeof(base_block_t); + size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) - + header_size; + /* + * Create increasingly larger blocks in order to limit the total number + * of disjoint virtual memory ranges. Choose the next size in the page + * size class series (skipping size classes that are not a multiple of + * HUGEPAGE), or a size large enough to satisfy the requested size and + * alignment, whichever is larger. + */ + size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size + + usize)); + pszind_t pind_next = (*pind_last + 1 < NPSIZES) ? *pind_last + 1 : + *pind_last; + size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next)); + size_t block_size = (min_block_size > next_block_size) ? min_block_size + : next_block_size; + base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind, + block_size); + if (block == NULL) { + return NULL; + } + *pind_last = sz_psz2ind(block_size); + block->size = block_size; + block->next = NULL; + assert(block_size >= header_size); + base_extent_init(extent_sn_next, &block->extent, + (void *)((uintptr_t)block + header_size), block_size - header_size); + return block; +} - if (ret != NULL) - memset(ret, 0, number * size); +/* + * Allocate an extent that is at least as large as specified size, with + * specified alignment. + */ +static extent_t * +base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { + malloc_mutex_assert_owner(tsdn, &base->mtx); - return (ret); + extent_hooks_t *extent_hooks = base_extent_hooks_get(base); + /* + * Drop mutex during base_block_alloc(), because an extent hook will be + * called. + */ + malloc_mutex_unlock(tsdn, &base->mtx); + base_block_t *block = base_block_alloc(tsdn, extent_hooks, + base_ind_get(base), &base->pind_last, &base->extent_sn_next, size, + alignment); + malloc_mutex_lock(tsdn, &base->mtx); + if (block == NULL) { + return NULL; + } + block->next = base->blocks; + base->blocks = block; + if (config_stats) { + base->allocated += sizeof(base_block_t); + base->resident += PAGE_CEILING(sizeof(base_block_t)); + base->mapped += block->size; + assert(base->allocated <= base->resident); + assert(base->resident <= base->mapped); + } + return &block->extent; } -extent_node_t * -base_node_alloc(void) -{ - extent_node_t *ret; +base_t * +b0get(void) { + return b0; +} - malloc_mutex_lock(&base_mtx); - if (base_nodes != NULL) { - ret = base_nodes; - base_nodes = *(extent_node_t **)ret; - malloc_mutex_unlock(&base_mtx); - VALGRIND_MAKE_MEM_UNDEFINED(ret, sizeof(extent_node_t)); - } else { - malloc_mutex_unlock(&base_mtx); - ret = (extent_node_t *)base_alloc(sizeof(extent_node_t)); +base_t * +base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { + pszind_t pind_last = 0; + size_t extent_sn_next = 0; + base_block_t *block = base_block_alloc(tsdn, extent_hooks, ind, + &pind_last, &extent_sn_next, sizeof(base_t), QUANTUM); + if (block == NULL) { + return NULL; } - return (ret); + size_t gap_size; + size_t base_alignment = CACHELINE; + size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment); + base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent, + &gap_size, base_size, base_alignment); + base->ind = ind; + atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED); + if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE, + malloc_mutex_rank_exclusive)) { + base_unmap(tsdn, extent_hooks, ind, block, block->size); + return NULL; + } + base->pind_last = pind_last; + base->extent_sn_next = extent_sn_next; + base->blocks = block; + for (szind_t i = 0; i < NSIZES; i++) { + extent_heap_new(&base->avail[i]); + } + if (config_stats) { + base->allocated = sizeof(base_block_t); + base->resident = PAGE_CEILING(sizeof(base_block_t)); + base->mapped = block->size; + assert(base->allocated <= base->resident); + assert(base->resident <= base->mapped); + } + base_extent_bump_alloc_post(tsdn, base, &block->extent, gap_size, base, + base_size); + + return base; } void -base_node_dealloc(extent_node_t *node) -{ +base_delete(tsdn_t *tsdn, base_t *base) { + extent_hooks_t *extent_hooks = base_extent_hooks_get(base); + base_block_t *next = base->blocks; + do { + base_block_t *block = next; + next = block->next; + base_unmap(tsdn, extent_hooks, base_ind_get(base), block, + block->size); + } while (next != NULL); +} - VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); - malloc_mutex_lock(&base_mtx); - *(extent_node_t **)node = base_nodes; - base_nodes = node; - malloc_mutex_unlock(&base_mtx); +extent_hooks_t * +base_extent_hooks_get(base_t *base) { + return (extent_hooks_t *)atomic_load_p(&base->extent_hooks, + ATOMIC_ACQUIRE); } -bool -base_boot(void) -{ +extent_hooks_t * +base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) { + extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base); + atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE); + return old_extent_hooks; +} + +static void * +base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment, + size_t *esn) { + alignment = QUANTUM_CEILING(alignment); + size_t usize = ALIGNMENT_CEILING(size, alignment); + size_t asize = usize + alignment - QUANTUM; + + extent_t *extent = NULL; + malloc_mutex_lock(tsdn, &base->mtx); + for (szind_t i = sz_size2index(asize); i < NSIZES; i++) { + extent = extent_heap_remove_first(&base->avail[i]); + if (extent != NULL) { + /* Use existing space. */ + break; + } + } + if (extent == NULL) { + /* Try to allocate more space. */ + extent = base_extent_alloc(tsdn, base, usize, alignment); + } + void *ret; + if (extent == NULL) { + ret = NULL; + goto label_return; + } - base_nodes = NULL; - if (malloc_mutex_init(&base_mtx)) - return (true); + ret = base_extent_bump_alloc(tsdn, base, extent, usize, alignment); + if (esn != NULL) { + *esn = extent_sn_get(extent); + } +label_return: + malloc_mutex_unlock(tsdn, &base->mtx); + return ret; +} - return (false); +/* + * base_alloc() returns zeroed memory, which is always demand-zeroed for the + * auto arenas, in order to make multi-page sparse data structures such as radix + * tree nodes efficient with respect to physical memory usage. Upon success a + * pointer to at least size bytes with specified alignment is returned. Note + * that size is rounded up to the nearest multiple of alignment to avoid false + * sharing. + */ +void * +base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { + return base_alloc_impl(tsdn, base, size, alignment, NULL); +} + +extent_t * +base_alloc_extent(tsdn_t *tsdn, base_t *base) { + size_t esn; + extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t), + CACHELINE, &esn); + if (extent == NULL) { + return NULL; + } + extent_esn_set(extent, esn); + return extent; } void -base_prefork(void) -{ +base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident, + size_t *mapped) { + cassert(config_stats); - malloc_mutex_prefork(&base_mtx); + malloc_mutex_lock(tsdn, &base->mtx); + assert(base->allocated <= base->resident); + assert(base->resident <= base->mapped); + *allocated = base->allocated; + *resident = base->resident; + *mapped = base->mapped; + malloc_mutex_unlock(tsdn, &base->mtx); } void -base_postfork_parent(void) -{ +base_prefork(tsdn_t *tsdn, base_t *base) { + malloc_mutex_prefork(tsdn, &base->mtx); +} - malloc_mutex_postfork_parent(&base_mtx); +void +base_postfork_parent(tsdn_t *tsdn, base_t *base) { + malloc_mutex_postfork_parent(tsdn, &base->mtx); } void -base_postfork_child(void) -{ +base_postfork_child(tsdn_t *tsdn, base_t *base) { + malloc_mutex_postfork_child(tsdn, &base->mtx); +} - malloc_mutex_postfork_child(&base_mtx); +bool +base_boot(tsdn_t *tsdn) { + b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default); + return (b0 == NULL); } diff --git a/deps/jemalloc/src/bitmap.c b/deps/jemalloc/src/bitmap.c index e2bd907d55..468b3178eb 100644 --- a/deps/jemalloc/src/bitmap.c +++ b/deps/jemalloc/src/bitmap.c @@ -1,24 +1,15 @@ -#define JEMALLOC_BITMAP_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_BITMAP_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static size_t bits2groups(size_t nbits); +#include "jemalloc/internal/assert.h" /******************************************************************************/ -static size_t -bits2groups(size_t nbits) -{ - - return ((nbits >> LG_BITMAP_GROUP_NBITS) + - !!(nbits & BITMAP_GROUP_NBITS_MASK)); -} +#ifdef BITMAP_USE_TREE void -bitmap_info_init(bitmap_info_t *binfo, size_t nbits) -{ +bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { unsigned i; size_t group_count; @@ -31,60 +22,100 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits) * that requires only one group. */ binfo->levels[0].group_offset = 0; - group_count = bits2groups(nbits); + group_count = BITMAP_BITS2GROUPS(nbits); for (i = 1; group_count > 1; i++) { assert(i < BITMAP_MAX_LEVELS); binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + group_count; - group_count = bits2groups(group_count); + group_count = BITMAP_BITS2GROUPS(group_count); } binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + group_count; + assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX); binfo->nlevels = i; binfo->nbits = nbits; } -size_t -bitmap_info_ngroups(const bitmap_info_t *binfo) -{ - - return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP); -} - -size_t -bitmap_size(size_t nbits) -{ - bitmap_info_t binfo; - - bitmap_info_init(&binfo, nbits); - return (bitmap_info_ngroups(&binfo)); +static size_t +bitmap_info_ngroups(const bitmap_info_t *binfo) { + return binfo->levels[binfo->nlevels].group_offset; } void -bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) -{ +bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) { size_t extra; unsigned i; /* * Bits are actually inverted with regard to the external bitmap - * interface, so the bitmap starts out with all 1 bits, except for - * trailing unused bits (if any). Note that each group uses bit 0 to - * correspond to the first logical bit in the group, so extra bits - * are the most significant bits of the last group. + * interface. + */ + + if (fill) { + /* The "filled" bitmap starts out with all 0 bits. */ + memset(bitmap, 0, bitmap_size(binfo)); + return; + } + + /* + * The "empty" bitmap starts out with all 1 bits, except for trailing + * unused bits (if any). Note that each group uses bit 0 to correspond + * to the first logical bit in the group, so extra bits are the most + * significant bits of the last group. */ - memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset << - LG_SIZEOF_BITMAP); + memset(bitmap, 0xffU, bitmap_size(binfo)); extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; - if (extra != 0) + if (extra != 0) { bitmap[binfo->levels[1].group_offset - 1] >>= extra; + } for (i = 1; i < binfo->nlevels; i++) { size_t group_count = binfo->levels[i].group_offset - binfo->levels[i-1].group_offset; extra = (BITMAP_GROUP_NBITS - (group_count & BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; - if (extra != 0) + if (extra != 0) { bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; + } } } + +#else /* BITMAP_USE_TREE */ + +void +bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { + assert(nbits > 0); + assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); + + binfo->ngroups = BITMAP_BITS2GROUPS(nbits); + binfo->nbits = nbits; +} + +static size_t +bitmap_info_ngroups(const bitmap_info_t *binfo) { + return binfo->ngroups; +} + +void +bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) { + size_t extra; + + if (fill) { + memset(bitmap, 0, bitmap_size(binfo)); + return; + } + + memset(bitmap, 0xffU, bitmap_size(binfo)); + extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) + & BITMAP_GROUP_NBITS_MASK; + if (extra != 0) { + bitmap[binfo->ngroups - 1] >>= extra; + } +} + +#endif /* BITMAP_USE_TREE */ + +size_t +bitmap_size(const bitmap_info_t *binfo) { + return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP); +} diff --git a/deps/jemalloc/src/ckh.c b/deps/jemalloc/src/ckh.c index 04c5296619..e95e0a3ed5 100644 --- a/deps/jemalloc/src/ckh.c +++ b/deps/jemalloc/src/ckh.c @@ -34,14 +34,24 @@ * respectively. * ******************************************************************************/ -#define JEMALLOC_CKH_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_CKH_C_ +#include "jemalloc/internal/jemalloc_preamble.h" + +#include "jemalloc/internal/ckh.h" + +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/util.h" /******************************************************************************/ /* Function prototypes for non-inline static functions. */ -static bool ckh_grow(ckh_t *ckh); -static void ckh_shrink(ckh_t *ckh); +static bool ckh_grow(tsd_t *tsd, ckh_t *ckh); +static void ckh_shrink(tsd_t *tsd, ckh_t *ckh); /******************************************************************************/ @@ -49,27 +59,26 @@ static void ckh_shrink(ckh_t *ckh); * Search bucket for key and return the cell number if found; SIZE_T_MAX * otherwise. */ -JEMALLOC_INLINE_C size_t -ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) -{ +static size_t +ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) { ckhc_t *cell; unsigned i; for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; - if (cell->key != NULL && ckh->keycomp(key, cell->key)) - return ((bucket << LG_CKH_BUCKET_CELLS) + i); + if (cell->key != NULL && ckh->keycomp(key, cell->key)) { + return (bucket << LG_CKH_BUCKET_CELLS) + i; + } } - return (SIZE_T_MAX); + return SIZE_T_MAX; } /* * Search table for key and return cell number if found; SIZE_T_MAX otherwise. */ -JEMALLOC_INLINE_C size_t -ckh_isearch(ckh_t *ckh, const void *key) -{ +static size_t +ckh_isearch(ckh_t *ckh, const void *key) { size_t hashes[2], bucket, cell; assert(ckh != NULL); @@ -79,19 +88,19 @@ ckh_isearch(ckh_t *ckh, const void *key) /* Search primary bucket. */ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); cell = ckh_bucket_search(ckh, bucket, key); - if (cell != SIZE_T_MAX) - return (cell); + if (cell != SIZE_T_MAX) { + return cell; + } /* Search secondary bucket. */ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); cell = ckh_bucket_search(ckh, bucket, key); - return (cell); + return cell; } -JEMALLOC_INLINE_C bool +static bool ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, - const void *data) -{ + const void *data) { ckhc_t *cell; unsigned offset, i; @@ -99,7 +108,8 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, * Cycle through the cells in the bucket, starting at a random position. * The randomness avoids worst-case search overhead as buckets fill up. */ - prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); + offset = (unsigned)prng_lg_range_u64(&ckh->prng_state, + LG_CKH_BUCKET_CELLS); for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; @@ -107,11 +117,11 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, cell->key = key; cell->data = data; ckh->count++; - return (false); + return false; } } - return (true); + return true; } /* @@ -120,10 +130,9 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, * eviction/relocation procedure until either success or detection of an * eviction/relocation bucket cycle. */ -JEMALLOC_INLINE_C bool +static bool ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, - void const **argdata) -{ + void const **argdata) { const void *key, *data, *tkey, *tdata; ckhc_t *cell; size_t hashes[2], bucket, tbucket; @@ -141,7 +150,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, * were an item for which both hashes indicated the same * bucket. */ - prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); + i = (unsigned)prng_lg_range_u64(&ckh->prng_state, + LG_CKH_BUCKET_CELLS); cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; assert(cell->key != NULL); @@ -181,18 +191,18 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, if (tbucket == argbucket) { *argkey = key; *argdata = data; - return (true); + return true; } bucket = tbucket; - if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) - return (false); + if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { + return false; + } } } -JEMALLOC_INLINE_C bool -ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) -{ +static bool +ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) { size_t hashes[2], bucket; const void *key = *argkey; const void *data = *argdata; @@ -201,27 +211,28 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) /* Try to insert in primary bucket. */ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); - if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) - return (false); + if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { + return false; + } /* Try to insert in secondary bucket. */ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); - if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) - return (false); + if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { + return false; + } /* * Try to find a place for this item via iterative eviction/relocation. */ - return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata)); + return ckh_evict_reloc_insert(ckh, bucket, argkey, argdata); } /* * Try to rebuild the hash table from scratch by inserting all items from the * old table into the new. */ -JEMALLOC_INLINE_C bool -ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) -{ +static bool +ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) { size_t count, i, nins; const void *key, *data; @@ -233,22 +244,20 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) data = aTab[i].data; if (ckh_try_insert(ckh, &key, &data)) { ckh->count = count; - return (true); + return true; } nins++; } } - return (false); + return false; } static bool -ckh_grow(ckh_t *ckh) -{ +ckh_grow(tsd_t *tsd, ckh_t *ckh) { bool ret; ckhc_t *tab, *ttab; - size_t lg_curcells; - unsigned lg_prevbuckets; + unsigned lg_prevbuckets, lg_curcells; #ifdef CKH_COUNT ckh->ngrows++; @@ -265,12 +274,13 @@ ckh_grow(ckh_t *ckh) size_t usize; lg_curcells++; - usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); - if (usize == 0) { + usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { ret = true; goto label_return; } - tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); + tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, + true, NULL, true, arena_ichoose(tsd, NULL)); if (tab == NULL) { ret = true; goto label_return; @@ -281,28 +291,27 @@ ckh_grow(ckh_t *ckh) tab = ttab; ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; - if (ckh_rebuild(ckh, tab) == false) { - idalloc(tab); + if (!ckh_rebuild(ckh, tab)) { + idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true); break; } /* Rebuilding failed, so back out partially rebuilt table. */ - idalloc(ckh->tab); + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); ckh->tab = tab; ckh->lg_curbuckets = lg_prevbuckets; } ret = false; label_return: - return (ret); + return ret; } static void -ckh_shrink(ckh_t *ckh) -{ +ckh_shrink(tsd_t *tsd, ckh_t *ckh) { ckhc_t *tab, *ttab; - size_t lg_curcells, usize; - unsigned lg_prevbuckets; + size_t usize; + unsigned lg_prevbuckets, lg_curcells; /* * It is possible (though unlikely, given well behaved hashes) that the @@ -310,10 +319,12 @@ ckh_shrink(ckh_t *ckh) */ lg_prevbuckets = ckh->lg_curbuckets; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; - usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); - if (usize == 0) + usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { return; - tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); + } + tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL, + true, arena_ichoose(tsd, NULL)); if (tab == NULL) { /* * An OOM error isn't worth propagating, since it doesn't @@ -327,8 +338,8 @@ ckh_shrink(ckh_t *ckh) tab = ttab; ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; - if (ckh_rebuild(ckh, tab) == false) { - idalloc(tab); + if (!ckh_rebuild(ckh, tab)) { + idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true); #ifdef CKH_COUNT ckh->nshrinks++; #endif @@ -336,7 +347,7 @@ ckh_shrink(ckh_t *ckh) } /* Rebuilding failed, so back out partially rebuilt table. */ - idalloc(ckh->tab); + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); ckh->tab = tab; ckh->lg_curbuckets = lg_prevbuckets; #ifdef CKH_COUNT @@ -345,8 +356,8 @@ ckh_shrink(ckh_t *ckh) } bool -ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp) -{ +ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, + ckh_keycomp_t *keycomp) { bool ret; size_t mincells, usize; unsigned lg_mincells; @@ -366,29 +377,31 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp) ckh->count = 0; /* - * Find the minimum power of 2 that is large enough to fit aBaseCount + * Find the minimum power of 2 that is large enough to fit minitems * entries. We are using (2+,2) cuckoo hashing, which has an expected * maximum load factor of at least ~0.86, so 0.75 is a conservative load - * factor that will typically allow 2^aLgMinItems to fit without ever + * factor that will typically allow mincells items to fit without ever * growing the table. */ assert(LG_CKH_BUCKET_CELLS > 0); mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2; for (lg_mincells = LG_CKH_BUCKET_CELLS; (ZU(1) << lg_mincells) < mincells; - lg_mincells++) - ; /* Do nothing. */ + lg_mincells++) { + /* Do nothing. */ + } ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; ckh->hash = hash; ckh->keycomp = keycomp; - usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); - if (usize == 0) { + usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { ret = true; goto label_return; } - ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); + ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, + NULL, true, arena_ichoose(tsd, NULL)); if (ckh->tab == NULL) { ret = true; goto label_return; @@ -396,20 +409,18 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp) ret = false; label_return: - return (ret); + return ret; } void -ckh_delete(ckh_t *ckh) -{ - +ckh_delete(tsd_t *tsd, ckh_t *ckh) { assert(ckh != NULL); #ifdef CKH_VERBOSE malloc_printf( - "%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64"," - " nshrinkfails: %"PRIu64", ninserts: %"PRIu64"," - " nrelocs: %"PRIu64"\n", __func__, ckh, + "%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64"," + " nshrinkfails: %"FMTu64", ninserts: %"FMTu64"," + " nrelocs: %"FMTu64"\n", __func__, ckh, (unsigned long long)ckh->ngrows, (unsigned long long)ckh->nshrinks, (unsigned long long)ckh->nshrinkfails, @@ -417,43 +428,42 @@ ckh_delete(ckh_t *ckh) (unsigned long long)ckh->nrelocs); #endif - idalloc(ckh->tab); - if (config_debug) - memset(ckh, 0x5a, sizeof(ckh_t)); + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); + if (config_debug) { + memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t)); + } } size_t -ckh_count(ckh_t *ckh) -{ - +ckh_count(ckh_t *ckh) { assert(ckh != NULL); - return (ckh->count); + return ckh->count; } bool -ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) -{ +ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) { size_t i, ncells; for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS)); i < ncells; i++) { if (ckh->tab[i].key != NULL) { - if (key != NULL) + if (key != NULL) { *key = (void *)ckh->tab[i].key; - if (data != NULL) + } + if (data != NULL) { *data = (void *)ckh->tab[i].data; + } *tabind = i + 1; - return (false); + return false; } } - return (true); + return true; } bool -ckh_insert(ckh_t *ckh, const void *key, const void *data) -{ +ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) { bool ret; assert(ckh != NULL); @@ -464,7 +474,7 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data) #endif while (ckh_try_insert(ckh, &key, &data)) { - if (ckh_grow(ckh)) { + if (ckh_grow(tsd, ckh)) { ret = true; goto label_return; } @@ -472,22 +482,24 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data) ret = false; label_return: - return (ret); + return ret; } bool -ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data) -{ +ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, + void **data) { size_t cell; assert(ckh != NULL); cell = ckh_isearch(ckh, searchkey); if (cell != SIZE_T_MAX) { - if (key != NULL) + if (key != NULL) { *key = (void *)ckh->tab[cell].key; - if (data != NULL) + } + if (data != NULL) { *data = (void *)ckh->tab[cell].data; + } ckh->tab[cell].key = NULL; ckh->tab[cell].data = NULL; /* Not necessary. */ @@ -497,54 +509,50 @@ ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data) + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets > ckh->lg_minbuckets) { /* Ignore error due to OOM. */ - ckh_shrink(ckh); + ckh_shrink(tsd, ckh); } - return (false); + return false; } - return (true); + return true; } bool -ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) -{ +ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) { size_t cell; assert(ckh != NULL); cell = ckh_isearch(ckh, searchkey); if (cell != SIZE_T_MAX) { - if (key != NULL) + if (key != NULL) { *key = (void *)ckh->tab[cell].key; - if (data != NULL) + } + if (data != NULL) { *data = (void *)ckh->tab[cell].data; - return (false); + } + return false; } - return (true); + return true; } void -ckh_string_hash(const void *key, size_t r_hash[2]) -{ - +ckh_string_hash(const void *key, size_t r_hash[2]) { hash(key, strlen((const char *)key), 0x94122f33U, r_hash); } bool -ckh_string_keycomp(const void *k1, const void *k2) -{ - - assert(k1 != NULL); - assert(k2 != NULL); +ckh_string_keycomp(const void *k1, const void *k2) { + assert(k1 != NULL); + assert(k2 != NULL); - return (strcmp((char *)k1, (char *)k2) ? false : true); + return !strcmp((char *)k1, (char *)k2); } void -ckh_pointer_hash(const void *key, size_t r_hash[2]) -{ +ckh_pointer_hash(const void *key, size_t r_hash[2]) { union { const void *v; size_t i; @@ -556,8 +564,6 @@ ckh_pointer_hash(const void *key, size_t r_hash[2]) } bool -ckh_pointer_keycomp(const void *k1, const void *k2) -{ - - return ((k1 == k2) ? true : false); +ckh_pointer_keycomp(const void *k1, const void *k2) { + return (k1 == k2); } diff --git a/deps/jemalloc/src/ctl.c b/deps/jemalloc/src/ctl.c index cc2c5aef57..36bc8fb5b7 100644 --- a/deps/jemalloc/src/ctl.c +++ b/deps/jemalloc/src/ctl.c @@ -1,146 +1,146 @@ -#define JEMALLOC_CTL_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_CTL_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/util.h" /******************************************************************************/ /* Data. */ /* * ctl_mtx protects the following: - * - ctl_stats.* - * - opt_prof_active + * - ctl_stats->* */ static malloc_mutex_t ctl_mtx; static bool ctl_initialized; -static uint64_t ctl_epoch; -static ctl_stats_t ctl_stats; +static ctl_stats_t *ctl_stats; +static ctl_arenas_t *ctl_arenas; /******************************************************************************/ /* Helpers for named and indexed nodes. */ -static inline const ctl_named_node_t * -ctl_named_node(const ctl_node_t *node) -{ - +static const ctl_named_node_t * +ctl_named_node(const ctl_node_t *node) { return ((node->named) ? (const ctl_named_node_t *)node : NULL); } -static inline const ctl_named_node_t * -ctl_named_children(const ctl_named_node_t *node, int index) -{ +static const ctl_named_node_t * +ctl_named_children(const ctl_named_node_t *node, size_t index) { const ctl_named_node_t *children = ctl_named_node(node->children); return (children ? &children[index] : NULL); } -static inline const ctl_indexed_node_t * -ctl_indexed_node(const ctl_node_t *node) -{ - - return ((node->named == false) ? (const ctl_indexed_node_t *)node : - NULL); +static const ctl_indexed_node_t * +ctl_indexed_node(const ctl_node_t *node) { + return (!node->named ? (const ctl_indexed_node_t *)node : NULL); } /******************************************************************************/ /* Function prototypes for non-inline static functions. */ -#define CTL_PROTO(n) \ -static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen); - -#define INDEX_PROTO(n) \ -static const ctl_named_node_t *n##_index(const size_t *mib, \ - size_t miblen, size_t i); - -static bool ctl_arena_init(ctl_arena_stats_t *astats); -static void ctl_arena_clear(ctl_arena_stats_t *astats); -static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, - arena_t *arena); -static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, - ctl_arena_stats_t *astats); -static void ctl_arena_refresh(arena_t *arena, unsigned i); -static bool ctl_grow(void); -static void ctl_refresh(void); -static bool ctl_init(void); -static int ctl_lookup(const char *name, ctl_node_t const **nodesp, - size_t *mibp, size_t *depthp); +#define CTL_PROTO(n) \ +static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen); + +#define INDEX_PROTO(n) \ +static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \ + const size_t *mib, size_t miblen, size_t i); CTL_PROTO(version) CTL_PROTO(epoch) +CTL_PROTO(background_thread) CTL_PROTO(thread_tcache_enabled) CTL_PROTO(thread_tcache_flush) +CTL_PROTO(thread_prof_name) +CTL_PROTO(thread_prof_active) CTL_PROTO(thread_arena) CTL_PROTO(thread_allocated) CTL_PROTO(thread_allocatedp) CTL_PROTO(thread_deallocated) CTL_PROTO(thread_deallocatedp) +CTL_PROTO(config_cache_oblivious) CTL_PROTO(config_debug) -CTL_PROTO(config_dss) CTL_PROTO(config_fill) CTL_PROTO(config_lazy_lock) -CTL_PROTO(config_mremap) -CTL_PROTO(config_munmap) +CTL_PROTO(config_malloc_conf) CTL_PROTO(config_prof) CTL_PROTO(config_prof_libgcc) CTL_PROTO(config_prof_libunwind) CTL_PROTO(config_stats) -CTL_PROTO(config_tcache) -CTL_PROTO(config_tls) +CTL_PROTO(config_thp) CTL_PROTO(config_utrace) -CTL_PROTO(config_valgrind) CTL_PROTO(config_xmalloc) CTL_PROTO(opt_abort) +CTL_PROTO(opt_abort_conf) +CTL_PROTO(opt_retain) CTL_PROTO(opt_dss) -CTL_PROTO(opt_lg_chunk) CTL_PROTO(opt_narenas) -CTL_PROTO(opt_lg_dirty_mult) +CTL_PROTO(opt_percpu_arena) +CTL_PROTO(opt_background_thread) +CTL_PROTO(opt_dirty_decay_ms) +CTL_PROTO(opt_muzzy_decay_ms) CTL_PROTO(opt_stats_print) +CTL_PROTO(opt_stats_print_opts) CTL_PROTO(opt_junk) CTL_PROTO(opt_zero) -CTL_PROTO(opt_quarantine) -CTL_PROTO(opt_redzone) CTL_PROTO(opt_utrace) -CTL_PROTO(opt_valgrind) CTL_PROTO(opt_xmalloc) CTL_PROTO(opt_tcache) CTL_PROTO(opt_lg_tcache_max) CTL_PROTO(opt_prof) CTL_PROTO(opt_prof_prefix) CTL_PROTO(opt_prof_active) +CTL_PROTO(opt_prof_thread_active_init) CTL_PROTO(opt_lg_prof_sample) CTL_PROTO(opt_lg_prof_interval) CTL_PROTO(opt_prof_gdump) CTL_PROTO(opt_prof_final) CTL_PROTO(opt_prof_leak) CTL_PROTO(opt_prof_accum) +CTL_PROTO(tcache_create) +CTL_PROTO(tcache_flush) +CTL_PROTO(tcache_destroy) +CTL_PROTO(arena_i_initialized) +CTL_PROTO(arena_i_decay) CTL_PROTO(arena_i_purge) -static void arena_purge(unsigned arena_ind); +CTL_PROTO(arena_i_reset) +CTL_PROTO(arena_i_destroy) CTL_PROTO(arena_i_dss) +CTL_PROTO(arena_i_dirty_decay_ms) +CTL_PROTO(arena_i_muzzy_decay_ms) +CTL_PROTO(arena_i_extent_hooks) INDEX_PROTO(arena_i) CTL_PROTO(arenas_bin_i_size) CTL_PROTO(arenas_bin_i_nregs) -CTL_PROTO(arenas_bin_i_run_size) +CTL_PROTO(arenas_bin_i_slab_size) INDEX_PROTO(arenas_bin_i) -CTL_PROTO(arenas_lrun_i_size) -INDEX_PROTO(arenas_lrun_i) +CTL_PROTO(arenas_lextent_i_size) +INDEX_PROTO(arenas_lextent_i) CTL_PROTO(arenas_narenas) -CTL_PROTO(arenas_initialized) +CTL_PROTO(arenas_dirty_decay_ms) +CTL_PROTO(arenas_muzzy_decay_ms) CTL_PROTO(arenas_quantum) CTL_PROTO(arenas_page) CTL_PROTO(arenas_tcache_max) CTL_PROTO(arenas_nbins) CTL_PROTO(arenas_nhbins) -CTL_PROTO(arenas_nlruns) -CTL_PROTO(arenas_purge) -CTL_PROTO(arenas_extend) +CTL_PROTO(arenas_nlextents) +CTL_PROTO(arenas_create) +CTL_PROTO(prof_thread_active_init) CTL_PROTO(prof_active) CTL_PROTO(prof_dump) +CTL_PROTO(prof_gdump) +CTL_PROTO(prof_reset) CTL_PROTO(prof_interval) -CTL_PROTO(stats_chunks_current) -CTL_PROTO(stats_chunks_total) -CTL_PROTO(stats_chunks_high) -CTL_PROTO(stats_huge_allocated) -CTL_PROTO(stats_huge_nmalloc) -CTL_PROTO(stats_huge_ndalloc) +CTL_PROTO(lg_prof_sample) CTL_PROTO(stats_arenas_i_small_allocated) CTL_PROTO(stats_arenas_i_small_nmalloc) CTL_PROTO(stats_arenas_i_small_ndalloc) @@ -149,119 +149,177 @@ CTL_PROTO(stats_arenas_i_large_allocated) CTL_PROTO(stats_arenas_i_large_nmalloc) CTL_PROTO(stats_arenas_i_large_ndalloc) CTL_PROTO(stats_arenas_i_large_nrequests) -CTL_PROTO(stats_arenas_i_bins_j_allocated) CTL_PROTO(stats_arenas_i_bins_j_nmalloc) CTL_PROTO(stats_arenas_i_bins_j_ndalloc) CTL_PROTO(stats_arenas_i_bins_j_nrequests) +CTL_PROTO(stats_arenas_i_bins_j_curregs) CTL_PROTO(stats_arenas_i_bins_j_nfills) CTL_PROTO(stats_arenas_i_bins_j_nflushes) -CTL_PROTO(stats_arenas_i_bins_j_nruns) -CTL_PROTO(stats_arenas_i_bins_j_nreruns) -CTL_PROTO(stats_arenas_i_bins_j_curruns) +CTL_PROTO(stats_arenas_i_bins_j_nslabs) +CTL_PROTO(stats_arenas_i_bins_j_nreslabs) +CTL_PROTO(stats_arenas_i_bins_j_curslabs) INDEX_PROTO(stats_arenas_i_bins_j) -CTL_PROTO(stats_arenas_i_lruns_j_nmalloc) -CTL_PROTO(stats_arenas_i_lruns_j_ndalloc) -CTL_PROTO(stats_arenas_i_lruns_j_nrequests) -CTL_PROTO(stats_arenas_i_lruns_j_curruns) -INDEX_PROTO(stats_arenas_i_lruns_j) +CTL_PROTO(stats_arenas_i_lextents_j_nmalloc) +CTL_PROTO(stats_arenas_i_lextents_j_ndalloc) +CTL_PROTO(stats_arenas_i_lextents_j_nrequests) +CTL_PROTO(stats_arenas_i_lextents_j_curlextents) +INDEX_PROTO(stats_arenas_i_lextents_j) CTL_PROTO(stats_arenas_i_nthreads) +CTL_PROTO(stats_arenas_i_uptime) CTL_PROTO(stats_arenas_i_dss) +CTL_PROTO(stats_arenas_i_dirty_decay_ms) +CTL_PROTO(stats_arenas_i_muzzy_decay_ms) CTL_PROTO(stats_arenas_i_pactive) CTL_PROTO(stats_arenas_i_pdirty) +CTL_PROTO(stats_arenas_i_pmuzzy) CTL_PROTO(stats_arenas_i_mapped) -CTL_PROTO(stats_arenas_i_npurge) -CTL_PROTO(stats_arenas_i_nmadvise) -CTL_PROTO(stats_arenas_i_purged) +CTL_PROTO(stats_arenas_i_retained) +CTL_PROTO(stats_arenas_i_dirty_npurge) +CTL_PROTO(stats_arenas_i_dirty_nmadvise) +CTL_PROTO(stats_arenas_i_dirty_purged) +CTL_PROTO(stats_arenas_i_muzzy_npurge) +CTL_PROTO(stats_arenas_i_muzzy_nmadvise) +CTL_PROTO(stats_arenas_i_muzzy_purged) +CTL_PROTO(stats_arenas_i_base) +CTL_PROTO(stats_arenas_i_internal) +CTL_PROTO(stats_arenas_i_tcache_bytes) +CTL_PROTO(stats_arenas_i_resident) INDEX_PROTO(stats_arenas_i) -CTL_PROTO(stats_cactive) CTL_PROTO(stats_allocated) CTL_PROTO(stats_active) +CTL_PROTO(stats_background_thread_num_threads) +CTL_PROTO(stats_background_thread_num_runs) +CTL_PROTO(stats_background_thread_run_interval) +CTL_PROTO(stats_metadata) +CTL_PROTO(stats_resident) CTL_PROTO(stats_mapped) +CTL_PROTO(stats_retained) + +#define MUTEX_STATS_CTL_PROTO_GEN(n) \ +CTL_PROTO(stats_##n##_num_ops) \ +CTL_PROTO(stats_##n##_num_wait) \ +CTL_PROTO(stats_##n##_num_spin_acq) \ +CTL_PROTO(stats_##n##_num_owner_switch) \ +CTL_PROTO(stats_##n##_total_wait_time) \ +CTL_PROTO(stats_##n##_max_wait_time) \ +CTL_PROTO(stats_##n##_max_num_thds) + +/* Global mutexes. */ +#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx) +MUTEX_PROF_GLOBAL_MUTEXES +#undef OP + +/* Per arena mutexes. */ +#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx) +MUTEX_PROF_ARENA_MUTEXES +#undef OP + +/* Arena bin mutexes. */ +MUTEX_STATS_CTL_PROTO_GEN(arenas_i_bins_j_mutex) +#undef MUTEX_STATS_CTL_PROTO_GEN + +CTL_PROTO(stats_mutexes_reset) /******************************************************************************/ /* mallctl tree. */ -/* Maximum tree depth. */ -#define CTL_MAX_DEPTH 6 - -#define NAME(n) {true}, n -#define CHILD(t, c) \ +#define NAME(n) {true}, n +#define CHILD(t, c) \ sizeof(c##_node) / sizeof(ctl_##t##_node_t), \ (ctl_node_t *)c##_node, \ NULL -#define CTL(c) 0, NULL, c##_ctl +#define CTL(c) 0, NULL, c##_ctl /* * Only handles internal indexed nodes, since there are currently no external * ones. */ -#define INDEX(i) {false}, i##_index +#define INDEX(i) {false}, i##_index -static const ctl_named_node_t tcache_node[] = { +static const ctl_named_node_t thread_tcache_node[] = { {NAME("enabled"), CTL(thread_tcache_enabled)}, {NAME("flush"), CTL(thread_tcache_flush)} }; +static const ctl_named_node_t thread_prof_node[] = { + {NAME("name"), CTL(thread_prof_name)}, + {NAME("active"), CTL(thread_prof_active)} +}; + static const ctl_named_node_t thread_node[] = { {NAME("arena"), CTL(thread_arena)}, {NAME("allocated"), CTL(thread_allocated)}, {NAME("allocatedp"), CTL(thread_allocatedp)}, {NAME("deallocated"), CTL(thread_deallocated)}, {NAME("deallocatedp"), CTL(thread_deallocatedp)}, - {NAME("tcache"), CHILD(named, tcache)} + {NAME("tcache"), CHILD(named, thread_tcache)}, + {NAME("prof"), CHILD(named, thread_prof)} }; static const ctl_named_node_t config_node[] = { - {NAME("debug"), CTL(config_debug)}, - {NAME("dss"), CTL(config_dss)}, - {NAME("fill"), CTL(config_fill)}, - {NAME("lazy_lock"), CTL(config_lazy_lock)}, - {NAME("mremap"), CTL(config_mremap)}, - {NAME("munmap"), CTL(config_munmap)}, - {NAME("prof"), CTL(config_prof)}, - {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, - {NAME("prof_libunwind"), CTL(config_prof_libunwind)}, - {NAME("stats"), CTL(config_stats)}, - {NAME("tcache"), CTL(config_tcache)}, - {NAME("tls"), CTL(config_tls)}, - {NAME("utrace"), CTL(config_utrace)}, - {NAME("valgrind"), CTL(config_valgrind)}, - {NAME("xmalloc"), CTL(config_xmalloc)} + {NAME("cache_oblivious"), CTL(config_cache_oblivious)}, + {NAME("debug"), CTL(config_debug)}, + {NAME("fill"), CTL(config_fill)}, + {NAME("lazy_lock"), CTL(config_lazy_lock)}, + {NAME("malloc_conf"), CTL(config_malloc_conf)}, + {NAME("prof"), CTL(config_prof)}, + {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, + {NAME("prof_libunwind"), CTL(config_prof_libunwind)}, + {NAME("stats"), CTL(config_stats)}, + {NAME("thp"), CTL(config_thp)}, + {NAME("utrace"), CTL(config_utrace)}, + {NAME("xmalloc"), CTL(config_xmalloc)} }; static const ctl_named_node_t opt_node[] = { - {NAME("abort"), CTL(opt_abort)}, - {NAME("dss"), CTL(opt_dss)}, - {NAME("lg_chunk"), CTL(opt_lg_chunk)}, - {NAME("narenas"), CTL(opt_narenas)}, - {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)}, - {NAME("stats_print"), CTL(opt_stats_print)}, - {NAME("junk"), CTL(opt_junk)}, - {NAME("zero"), CTL(opt_zero)}, - {NAME("quarantine"), CTL(opt_quarantine)}, - {NAME("redzone"), CTL(opt_redzone)}, - {NAME("utrace"), CTL(opt_utrace)}, - {NAME("valgrind"), CTL(opt_valgrind)}, - {NAME("xmalloc"), CTL(opt_xmalloc)}, - {NAME("tcache"), CTL(opt_tcache)}, - {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)}, - {NAME("prof"), CTL(opt_prof)}, - {NAME("prof_prefix"), CTL(opt_prof_prefix)}, - {NAME("prof_active"), CTL(opt_prof_active)}, - {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)}, - {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)}, - {NAME("prof_gdump"), CTL(opt_prof_gdump)}, - {NAME("prof_final"), CTL(opt_prof_final)}, - {NAME("prof_leak"), CTL(opt_prof_leak)}, - {NAME("prof_accum"), CTL(opt_prof_accum)} + {NAME("abort"), CTL(opt_abort)}, + {NAME("abort_conf"), CTL(opt_abort_conf)}, + {NAME("retain"), CTL(opt_retain)}, + {NAME("dss"), CTL(opt_dss)}, + {NAME("narenas"), CTL(opt_narenas)}, + {NAME("percpu_arena"), CTL(opt_percpu_arena)}, + {NAME("background_thread"), CTL(opt_background_thread)}, + {NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)}, + {NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)}, + {NAME("stats_print"), CTL(opt_stats_print)}, + {NAME("stats_print_opts"), CTL(opt_stats_print_opts)}, + {NAME("junk"), CTL(opt_junk)}, + {NAME("zero"), CTL(opt_zero)}, + {NAME("utrace"), CTL(opt_utrace)}, + {NAME("xmalloc"), CTL(opt_xmalloc)}, + {NAME("tcache"), CTL(opt_tcache)}, + {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)}, + {NAME("prof"), CTL(opt_prof)}, + {NAME("prof_prefix"), CTL(opt_prof_prefix)}, + {NAME("prof_active"), CTL(opt_prof_active)}, + {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)}, + {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)}, + {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)}, + {NAME("prof_gdump"), CTL(opt_prof_gdump)}, + {NAME("prof_final"), CTL(opt_prof_final)}, + {NAME("prof_leak"), CTL(opt_prof_leak)}, + {NAME("prof_accum"), CTL(opt_prof_accum)} +}; + +static const ctl_named_node_t tcache_node[] = { + {NAME("create"), CTL(tcache_create)}, + {NAME("flush"), CTL(tcache_flush)}, + {NAME("destroy"), CTL(tcache_destroy)} }; static const ctl_named_node_t arena_i_node[] = { - {NAME("purge"), CTL(arena_i_purge)}, - {NAME("dss"), CTL(arena_i_dss)} + {NAME("initialized"), CTL(arena_i_initialized)}, + {NAME("decay"), CTL(arena_i_decay)}, + {NAME("purge"), CTL(arena_i_purge)}, + {NAME("reset"), CTL(arena_i_reset)}, + {NAME("destroy"), CTL(arena_i_destroy)}, + {NAME("dss"), CTL(arena_i_dss)}, + {NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)}, + {NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)}, + {NAME("extent_hooks"), CTL(arena_i_extent_hooks)} }; static const ctl_named_node_t super_arena_i_node[] = { - {NAME(""), CHILD(named, arena_i)} + {NAME(""), CHILD(named, arena_i)} }; static const ctl_indexed_node_t arena_node[] = { @@ -269,147 +327,208 @@ static const ctl_indexed_node_t arena_node[] = { }; static const ctl_named_node_t arenas_bin_i_node[] = { - {NAME("size"), CTL(arenas_bin_i_size)}, - {NAME("nregs"), CTL(arenas_bin_i_nregs)}, - {NAME("run_size"), CTL(arenas_bin_i_run_size)} + {NAME("size"), CTL(arenas_bin_i_size)}, + {NAME("nregs"), CTL(arenas_bin_i_nregs)}, + {NAME("slab_size"), CTL(arenas_bin_i_slab_size)} }; static const ctl_named_node_t super_arenas_bin_i_node[] = { - {NAME(""), CHILD(named, arenas_bin_i)} + {NAME(""), CHILD(named, arenas_bin_i)} }; static const ctl_indexed_node_t arenas_bin_node[] = { {INDEX(arenas_bin_i)} }; -static const ctl_named_node_t arenas_lrun_i_node[] = { - {NAME("size"), CTL(arenas_lrun_i_size)} +static const ctl_named_node_t arenas_lextent_i_node[] = { + {NAME("size"), CTL(arenas_lextent_i_size)} }; -static const ctl_named_node_t super_arenas_lrun_i_node[] = { - {NAME(""), CHILD(named, arenas_lrun_i)} +static const ctl_named_node_t super_arenas_lextent_i_node[] = { + {NAME(""), CHILD(named, arenas_lextent_i)} }; -static const ctl_indexed_node_t arenas_lrun_node[] = { - {INDEX(arenas_lrun_i)} +static const ctl_indexed_node_t arenas_lextent_node[] = { + {INDEX(arenas_lextent_i)} }; static const ctl_named_node_t arenas_node[] = { - {NAME("narenas"), CTL(arenas_narenas)}, - {NAME("initialized"), CTL(arenas_initialized)}, - {NAME("quantum"), CTL(arenas_quantum)}, - {NAME("page"), CTL(arenas_page)}, - {NAME("tcache_max"), CTL(arenas_tcache_max)}, - {NAME("nbins"), CTL(arenas_nbins)}, - {NAME("nhbins"), CTL(arenas_nhbins)}, - {NAME("bin"), CHILD(indexed, arenas_bin)}, - {NAME("nlruns"), CTL(arenas_nlruns)}, - {NAME("lrun"), CHILD(indexed, arenas_lrun)}, - {NAME("purge"), CTL(arenas_purge)}, - {NAME("extend"), CTL(arenas_extend)} + {NAME("narenas"), CTL(arenas_narenas)}, + {NAME("dirty_decay_ms"), CTL(arenas_dirty_decay_ms)}, + {NAME("muzzy_decay_ms"), CTL(arenas_muzzy_decay_ms)}, + {NAME("quantum"), CTL(arenas_quantum)}, + {NAME("page"), CTL(arenas_page)}, + {NAME("tcache_max"), CTL(arenas_tcache_max)}, + {NAME("nbins"), CTL(arenas_nbins)}, + {NAME("nhbins"), CTL(arenas_nhbins)}, + {NAME("bin"), CHILD(indexed, arenas_bin)}, + {NAME("nlextents"), CTL(arenas_nlextents)}, + {NAME("lextent"), CHILD(indexed, arenas_lextent)}, + {NAME("create"), CTL(arenas_create)} }; static const ctl_named_node_t prof_node[] = { + {NAME("thread_active_init"), CTL(prof_thread_active_init)}, {NAME("active"), CTL(prof_active)}, {NAME("dump"), CTL(prof_dump)}, - {NAME("interval"), CTL(prof_interval)} + {NAME("gdump"), CTL(prof_gdump)}, + {NAME("reset"), CTL(prof_reset)}, + {NAME("interval"), CTL(prof_interval)}, + {NAME("lg_sample"), CTL(lg_prof_sample)} }; -static const ctl_named_node_t stats_chunks_node[] = { - {NAME("current"), CTL(stats_chunks_current)}, - {NAME("total"), CTL(stats_chunks_total)}, - {NAME("high"), CTL(stats_chunks_high)} +static const ctl_named_node_t stats_arenas_i_small_node[] = { + {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)} }; -static const ctl_named_node_t stats_huge_node[] = { - {NAME("allocated"), CTL(stats_huge_allocated)}, - {NAME("nmalloc"), CTL(stats_huge_nmalloc)}, - {NAME("ndalloc"), CTL(stats_huge_ndalloc)} +static const ctl_named_node_t stats_arenas_i_large_node[] = { + {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)} }; -static const ctl_named_node_t stats_arenas_i_small_node[] = { - {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, - {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)} +#define MUTEX_PROF_DATA_NODE(prefix) \ +static const ctl_named_node_t stats_##prefix##_node[] = { \ + {NAME("num_ops"), \ + CTL(stats_##prefix##_num_ops)}, \ + {NAME("num_wait"), \ + CTL(stats_##prefix##_num_wait)}, \ + {NAME("num_spin_acq"), \ + CTL(stats_##prefix##_num_spin_acq)}, \ + {NAME("num_owner_switch"), \ + CTL(stats_##prefix##_num_owner_switch)}, \ + {NAME("total_wait_time"), \ + CTL(stats_##prefix##_total_wait_time)}, \ + {NAME("max_wait_time"), \ + CTL(stats_##prefix##_max_wait_time)}, \ + {NAME("max_num_thds"), \ + CTL(stats_##prefix##_max_num_thds)} \ + /* Note that # of current waiting thread not provided. */ \ }; -static const ctl_named_node_t stats_arenas_i_large_node[] = { - {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, - {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)} -}; +MUTEX_PROF_DATA_NODE(arenas_i_bins_j_mutex) static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { - {NAME("allocated"), CTL(stats_arenas_i_bins_j_allocated)}, - {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, - {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, - {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, - {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)}, - {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)}, - {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)} + {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, + {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)}, + {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, + {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, + {NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)}, + {NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)}, + {NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)}, + {NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)} }; + static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = { - {NAME(""), CHILD(named, stats_arenas_i_bins_j)} + {NAME(""), CHILD(named, stats_arenas_i_bins_j)} }; static const ctl_indexed_node_t stats_arenas_i_bins_node[] = { {INDEX(stats_arenas_i_bins_j)} }; -static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = { - {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)}, - {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)} +static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = { + {NAME("nmalloc"), CTL(stats_arenas_i_lextents_j_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_lextents_j_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_lextents_j_nrequests)}, + {NAME("curlextents"), CTL(stats_arenas_i_lextents_j_curlextents)} }; -static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = { - {NAME(""), CHILD(named, stats_arenas_i_lruns_j)} +static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = { + {NAME(""), CHILD(named, stats_arenas_i_lextents_j)} }; -static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = { - {INDEX(stats_arenas_i_lruns_j)} +static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = { + {INDEX(stats_arenas_i_lextents_j)} +}; + +#define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx) +MUTEX_PROF_ARENA_MUTEXES +#undef OP + +static const ctl_named_node_t stats_arenas_i_mutexes_node[] = { +#define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)}, +MUTEX_PROF_ARENA_MUTEXES +#undef OP }; static const ctl_named_node_t stats_arenas_i_node[] = { - {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, - {NAME("dss"), CTL(stats_arenas_i_dss)}, - {NAME("pactive"), CTL(stats_arenas_i_pactive)}, - {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, - {NAME("mapped"), CTL(stats_arenas_i_mapped)}, - {NAME("npurge"), CTL(stats_arenas_i_npurge)}, - {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)}, - {NAME("purged"), CTL(stats_arenas_i_purged)}, - {NAME("small"), CHILD(named, stats_arenas_i_small)}, - {NAME("large"), CHILD(named, stats_arenas_i_large)}, - {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, - {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)} + {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, + {NAME("uptime"), CTL(stats_arenas_i_uptime)}, + {NAME("dss"), CTL(stats_arenas_i_dss)}, + {NAME("dirty_decay_ms"), CTL(stats_arenas_i_dirty_decay_ms)}, + {NAME("muzzy_decay_ms"), CTL(stats_arenas_i_muzzy_decay_ms)}, + {NAME("pactive"), CTL(stats_arenas_i_pactive)}, + {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, + {NAME("pmuzzy"), CTL(stats_arenas_i_pmuzzy)}, + {NAME("mapped"), CTL(stats_arenas_i_mapped)}, + {NAME("retained"), CTL(stats_arenas_i_retained)}, + {NAME("dirty_npurge"), CTL(stats_arenas_i_dirty_npurge)}, + {NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)}, + {NAME("dirty_purged"), CTL(stats_arenas_i_dirty_purged)}, + {NAME("muzzy_npurge"), CTL(stats_arenas_i_muzzy_npurge)}, + {NAME("muzzy_nmadvise"), CTL(stats_arenas_i_muzzy_nmadvise)}, + {NAME("muzzy_purged"), CTL(stats_arenas_i_muzzy_purged)}, + {NAME("base"), CTL(stats_arenas_i_base)}, + {NAME("internal"), CTL(stats_arenas_i_internal)}, + {NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)}, + {NAME("resident"), CTL(stats_arenas_i_resident)}, + {NAME("small"), CHILD(named, stats_arenas_i_small)}, + {NAME("large"), CHILD(named, stats_arenas_i_large)}, + {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, + {NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)}, + {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)} }; static const ctl_named_node_t super_stats_arenas_i_node[] = { - {NAME(""), CHILD(named, stats_arenas_i)} + {NAME(""), CHILD(named, stats_arenas_i)} }; static const ctl_indexed_node_t stats_arenas_node[] = { {INDEX(stats_arenas_i)} }; +static const ctl_named_node_t stats_background_thread_node[] = { + {NAME("num_threads"), CTL(stats_background_thread_num_threads)}, + {NAME("num_runs"), CTL(stats_background_thread_num_runs)}, + {NAME("run_interval"), CTL(stats_background_thread_run_interval)} +}; + +#define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx) +MUTEX_PROF_GLOBAL_MUTEXES +#undef OP + +static const ctl_named_node_t stats_mutexes_node[] = { +#define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)}, +MUTEX_PROF_GLOBAL_MUTEXES +#undef OP + {NAME("reset"), CTL(stats_mutexes_reset)} +}; +#undef MUTEX_PROF_DATA_NODE + static const ctl_named_node_t stats_node[] = { - {NAME("cactive"), CTL(stats_cactive)}, - {NAME("allocated"), CTL(stats_allocated)}, - {NAME("active"), CTL(stats_active)}, - {NAME("mapped"), CTL(stats_mapped)}, - {NAME("chunks"), CHILD(named, stats_chunks)}, - {NAME("huge"), CHILD(named, stats_huge)}, - {NAME("arenas"), CHILD(indexed, stats_arenas)} + {NAME("allocated"), CTL(stats_allocated)}, + {NAME("active"), CTL(stats_active)}, + {NAME("metadata"), CTL(stats_metadata)}, + {NAME("resident"), CTL(stats_resident)}, + {NAME("mapped"), CTL(stats_mapped)}, + {NAME("retained"), CTL(stats_retained)}, + {NAME("background_thread"), + CHILD(named, stats_background_thread)}, + {NAME("mutexes"), CHILD(named, stats_mutexes)}, + {NAME("arenas"), CHILD(indexed, stats_arenas)} }; static const ctl_named_node_t root_node[] = { {NAME("version"), CTL(version)}, {NAME("epoch"), CTL(epoch)}, + {NAME("background_thread"), CTL(background_thread)}, {NAME("thread"), CHILD(named, thread)}, {NAME("config"), CHILD(named, config)}, {NAME("opt"), CHILD(named, opt)}, + {NAME("tcache"), CHILD(named, tcache)}, {NAME("arena"), CHILD(indexed, arena)}, {NAME("arenas"), CHILD(named, arenas)}, {NAME("prof"), CHILD(named, prof)}, @@ -426,303 +545,514 @@ static const ctl_named_node_t super_root_node[] = { /******************************************************************************/ -static bool -ctl_arena_init(ctl_arena_stats_t *astats) -{ +/* + * Sets *dst + *src non-atomically. This is safe, since everything is + * synchronized by the ctl mutex. + */ +static void +accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) { +#ifdef JEMALLOC_ATOMIC_U64 + uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED); + uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED); + atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED); +#else + *dst += *src; +#endif +} - if (astats->lstats == NULL) { - astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses * - sizeof(malloc_large_stats_t)); - if (astats->lstats == NULL) - return (true); +/* Likewise: with ctl mutex synchronization, reading is simple. */ +static uint64_t +arena_stats_read_u64(arena_stats_u64_t *p) { +#ifdef JEMALLOC_ATOMIC_U64 + return atomic_load_u64(p, ATOMIC_RELAXED); +#else + return *p; +#endif +} + +static void accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) { + size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED); + size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED); + atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED); +} + +/******************************************************************************/ + +static unsigned +arenas_i2a_impl(size_t i, bool compat, bool validate) { + unsigned a; + + switch (i) { + case MALLCTL_ARENAS_ALL: + a = 0; + break; + case MALLCTL_ARENAS_DESTROYED: + a = 1; + break; + default: + if (compat && i == ctl_arenas->narenas) { + /* + * Provide deprecated backward compatibility for + * accessing the merged stats at index narenas rather + * than via MALLCTL_ARENAS_ALL. This is scheduled for + * removal in 6.0.0. + */ + a = 0; + } else if (validate && i >= ctl_arenas->narenas) { + a = UINT_MAX; + } else { + /* + * This function should never be called for an index + * more than one past the range of indices that have + * initialized ctl data. + */ + assert(i < ctl_arenas->narenas || (!validate && i == + ctl_arenas->narenas)); + a = (unsigned)i + 2; + } + break; } - return (false); + return a; } -static void -ctl_arena_clear(ctl_arena_stats_t *astats) -{ +static unsigned +arenas_i2a(size_t i) { + return arenas_i2a_impl(i, true, false); +} + +static ctl_arena_t * +arenas_i_impl(tsd_t *tsd, size_t i, bool compat, bool init) { + ctl_arena_t *ret; - astats->dss = dss_prec_names[dss_prec_limit]; - astats->pactive = 0; - astats->pdirty = 0; + assert(!compat || !init); + + ret = ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)]; + if (init && ret == NULL) { + if (config_stats) { + struct container_s { + ctl_arena_t ctl_arena; + ctl_arena_stats_t astats; + }; + struct container_s *cont = + (struct container_s *)base_alloc(tsd_tsdn(tsd), + b0get(), sizeof(struct container_s), QUANTUM); + if (cont == NULL) { + return NULL; + } + ret = &cont->ctl_arena; + ret->astats = &cont->astats; + } else { + ret = (ctl_arena_t *)base_alloc(tsd_tsdn(tsd), b0get(), + sizeof(ctl_arena_t), QUANTUM); + if (ret == NULL) { + return NULL; + } + } + ret->arena_ind = (unsigned)i; + ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)] = ret; + } + + assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i)); + return ret; +} + +static ctl_arena_t * +arenas_i(size_t i) { + ctl_arena_t *ret = arenas_i_impl(tsd_fetch(), i, true, false); + assert(ret != NULL); + return ret; +} + +static void +ctl_arena_clear(ctl_arena_t *ctl_arena) { + ctl_arena->nthreads = 0; + ctl_arena->dss = dss_prec_names[dss_prec_limit]; + ctl_arena->dirty_decay_ms = -1; + ctl_arena->muzzy_decay_ms = -1; + ctl_arena->pactive = 0; + ctl_arena->pdirty = 0; + ctl_arena->pmuzzy = 0; if (config_stats) { - memset(&astats->astats, 0, sizeof(arena_stats_t)); - astats->allocated_small = 0; - astats->nmalloc_small = 0; - astats->ndalloc_small = 0; - astats->nrequests_small = 0; - memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t)); - memset(astats->lstats, 0, nlclasses * + memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t)); + ctl_arena->astats->allocated_small = 0; + ctl_arena->astats->nmalloc_small = 0; + ctl_arena->astats->ndalloc_small = 0; + ctl_arena->astats->nrequests_small = 0; + memset(ctl_arena->astats->bstats, 0, NBINS * + sizeof(malloc_bin_stats_t)); + memset(ctl_arena->astats->lstats, 0, (NSIZES - NBINS) * sizeof(malloc_large_stats_t)); } } static void -ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena) -{ +ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) { unsigned i; - arena_stats_merge(arena, &cstats->dss, &cstats->pactive, - &cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats); - - for (i = 0; i < NBINS; i++) { - cstats->allocated_small += cstats->bstats[i].allocated; - cstats->nmalloc_small += cstats->bstats[i].nmalloc; - cstats->ndalloc_small += cstats->bstats[i].ndalloc; - cstats->nrequests_small += cstats->bstats[i].nrequests; + if (config_stats) { + arena_stats_merge(tsdn, arena, &ctl_arena->nthreads, + &ctl_arena->dss, &ctl_arena->dirty_decay_ms, + &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive, + &ctl_arena->pdirty, &ctl_arena->pmuzzy, + &ctl_arena->astats->astats, ctl_arena->astats->bstats, + ctl_arena->astats->lstats); + + for (i = 0; i < NBINS; i++) { + ctl_arena->astats->allocated_small += + ctl_arena->astats->bstats[i].curregs * + sz_index2size(i); + ctl_arena->astats->nmalloc_small += + ctl_arena->astats->bstats[i].nmalloc; + ctl_arena->astats->ndalloc_small += + ctl_arena->astats->bstats[i].ndalloc; + ctl_arena->astats->nrequests_small += + ctl_arena->astats->bstats[i].nrequests; + } + } else { + arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads, + &ctl_arena->dss, &ctl_arena->dirty_decay_ms, + &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive, + &ctl_arena->pdirty, &ctl_arena->pmuzzy); } } static void -ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats) -{ +ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, + bool destroyed) { unsigned i; - sstats->pactive += astats->pactive; - sstats->pdirty += astats->pdirty; - - sstats->astats.mapped += astats->astats.mapped; - sstats->astats.npurge += astats->astats.npurge; - sstats->astats.nmadvise += astats->astats.nmadvise; - sstats->astats.purged += astats->astats.purged; - - sstats->allocated_small += astats->allocated_small; - sstats->nmalloc_small += astats->nmalloc_small; - sstats->ndalloc_small += astats->ndalloc_small; - sstats->nrequests_small += astats->nrequests_small; - - sstats->astats.allocated_large += astats->astats.allocated_large; - sstats->astats.nmalloc_large += astats->astats.nmalloc_large; - sstats->astats.ndalloc_large += astats->astats.ndalloc_large; - sstats->astats.nrequests_large += astats->astats.nrequests_large; - - for (i = 0; i < nlclasses; i++) { - sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc; - sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc; - sstats->lstats[i].nrequests += astats->lstats[i].nrequests; - sstats->lstats[i].curruns += astats->lstats[i].curruns; - } - - for (i = 0; i < NBINS; i++) { - sstats->bstats[i].allocated += astats->bstats[i].allocated; - sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; - sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; - sstats->bstats[i].nrequests += astats->bstats[i].nrequests; - if (config_tcache) { - sstats->bstats[i].nfills += astats->bstats[i].nfills; - sstats->bstats[i].nflushes += + if (!destroyed) { + ctl_sdarena->nthreads += ctl_arena->nthreads; + ctl_sdarena->pactive += ctl_arena->pactive; + ctl_sdarena->pdirty += ctl_arena->pdirty; + ctl_sdarena->pmuzzy += ctl_arena->pmuzzy; + } else { + assert(ctl_arena->nthreads == 0); + assert(ctl_arena->pactive == 0); + assert(ctl_arena->pdirty == 0); + assert(ctl_arena->pmuzzy == 0); + } + + if (config_stats) { + ctl_arena_stats_t *sdstats = ctl_sdarena->astats; + ctl_arena_stats_t *astats = ctl_arena->astats; + + if (!destroyed) { + accum_atomic_zu(&sdstats->astats.mapped, + &astats->astats.mapped); + accum_atomic_zu(&sdstats->astats.retained, + &astats->astats.retained); + } + + accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge, + &astats->astats.decay_dirty.npurge); + accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise, + &astats->astats.decay_dirty.nmadvise); + accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged, + &astats->astats.decay_dirty.purged); + + accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge, + &astats->astats.decay_muzzy.npurge); + accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise, + &astats->astats.decay_muzzy.nmadvise); + accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged, + &astats->astats.decay_muzzy.purged); + +#define OP(mtx) malloc_mutex_prof_merge( \ + &(sdstats->astats.mutex_prof_data[ \ + arena_prof_mutex_##mtx]), \ + &(astats->astats.mutex_prof_data[ \ + arena_prof_mutex_##mtx])); +MUTEX_PROF_ARENA_MUTEXES +#undef OP + if (!destroyed) { + accum_atomic_zu(&sdstats->astats.base, + &astats->astats.base); + accum_atomic_zu(&sdstats->astats.internal, + &astats->astats.internal); + accum_atomic_zu(&sdstats->astats.resident, + &astats->astats.resident); + } else { + assert(atomic_load_zu( + &astats->astats.internal, ATOMIC_RELAXED) == 0); + } + + if (!destroyed) { + sdstats->allocated_small += astats->allocated_small; + } else { + assert(astats->allocated_small == 0); + } + sdstats->nmalloc_small += astats->nmalloc_small; + sdstats->ndalloc_small += astats->ndalloc_small; + sdstats->nrequests_small += astats->nrequests_small; + + if (!destroyed) { + accum_atomic_zu(&sdstats->astats.allocated_large, + &astats->astats.allocated_large); + } else { + assert(atomic_load_zu(&astats->astats.allocated_large, + ATOMIC_RELAXED) == 0); + } + accum_arena_stats_u64(&sdstats->astats.nmalloc_large, + &astats->astats.nmalloc_large); + accum_arena_stats_u64(&sdstats->astats.ndalloc_large, + &astats->astats.ndalloc_large); + accum_arena_stats_u64(&sdstats->astats.nrequests_large, + &astats->astats.nrequests_large); + + accum_atomic_zu(&sdstats->astats.tcache_bytes, + &astats->astats.tcache_bytes); + + if (ctl_arena->arena_ind == 0) { + sdstats->astats.uptime = astats->astats.uptime; + } + + for (i = 0; i < NBINS; i++) { + sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; + sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; + sdstats->bstats[i].nrequests += + astats->bstats[i].nrequests; + if (!destroyed) { + sdstats->bstats[i].curregs += + astats->bstats[i].curregs; + } else { + assert(astats->bstats[i].curregs == 0); + } + sdstats->bstats[i].nfills += astats->bstats[i].nfills; + sdstats->bstats[i].nflushes += astats->bstats[i].nflushes; + sdstats->bstats[i].nslabs += astats->bstats[i].nslabs; + sdstats->bstats[i].reslabs += astats->bstats[i].reslabs; + if (!destroyed) { + sdstats->bstats[i].curslabs += + astats->bstats[i].curslabs; + } else { + assert(astats->bstats[i].curslabs == 0); + } + malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data, + &astats->bstats[i].mutex_data); + } + + for (i = 0; i < NSIZES - NBINS; i++) { + accum_arena_stats_u64(&sdstats->lstats[i].nmalloc, + &astats->lstats[i].nmalloc); + accum_arena_stats_u64(&sdstats->lstats[i].ndalloc, + &astats->lstats[i].ndalloc); + accum_arena_stats_u64(&sdstats->lstats[i].nrequests, + &astats->lstats[i].nrequests); + if (!destroyed) { + sdstats->lstats[i].curlextents += + astats->lstats[i].curlextents; + } else { + assert(astats->lstats[i].curlextents == 0); + } } - sstats->bstats[i].nruns += astats->bstats[i].nruns; - sstats->bstats[i].reruns += astats->bstats[i].reruns; - sstats->bstats[i].curruns += astats->bstats[i].curruns; } } static void -ctl_arena_refresh(arena_t *arena, unsigned i) -{ - ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; - ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas]; +ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena, + unsigned i, bool destroyed) { + ctl_arena_t *ctl_arena = arenas_i(i); + + ctl_arena_clear(ctl_arena); + ctl_arena_stats_amerge(tsdn, ctl_arena, arena); + /* Merge into sum stats as well. */ + ctl_arena_stats_sdmerge(ctl_sdarena, ctl_arena, destroyed); +} - ctl_arena_clear(astats); +static unsigned +ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) { + unsigned arena_ind; + ctl_arena_t *ctl_arena; - sstats->nthreads += astats->nthreads; - if (config_stats) { - ctl_arena_stats_amerge(astats, arena); - /* Merge into sum stats as well. */ - ctl_arena_stats_smerge(sstats, astats); + if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) != + NULL) { + ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link); + arena_ind = ctl_arena->arena_ind; } else { - astats->pactive += arena->nactive; - astats->pdirty += arena->ndirty; - /* Merge into sum stats as well. */ - sstats->pactive += arena->nactive; - sstats->pdirty += arena->ndirty; + arena_ind = ctl_arenas->narenas; } -} -static bool -ctl_grow(void) -{ - ctl_arena_stats_t *astats; - arena_t **tarenas; - - /* Allocate extended arena stats and arenas arrays. */ - astats = (ctl_arena_stats_t *)imalloc((ctl_stats.narenas + 2) * - sizeof(ctl_arena_stats_t)); - if (astats == NULL) - return (true); - tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) * - sizeof(arena_t *)); - if (tarenas == NULL) { - idalloc(astats); - return (true); - } - - /* Initialize the new astats element. */ - memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) * - sizeof(ctl_arena_stats_t)); - memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t)); - if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) { - idalloc(tarenas); - idalloc(astats); - return (true); - } - /* Swap merged stats to their new location. */ - { - ctl_arena_stats_t tstats; - memcpy(&tstats, &astats[ctl_stats.narenas], - sizeof(ctl_arena_stats_t)); - memcpy(&astats[ctl_stats.narenas], - &astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t)); - memcpy(&astats[ctl_stats.narenas + 1], &tstats, - sizeof(ctl_arena_stats_t)); - } - /* Initialize the new arenas element. */ - tarenas[ctl_stats.narenas] = NULL; - { - arena_t **arenas_old = arenas; - /* - * Swap extended arenas array into place. Although ctl_mtx - * protects this function from other threads extending the - * array, it does not protect from other threads mutating it - * (i.e. initializing arenas and setting array elements to - * point to them). Therefore, array copying must happen under - * the protection of arenas_lock. - */ - malloc_mutex_lock(&arenas_lock); - arenas = tarenas; - memcpy(arenas, arenas_old, ctl_stats.narenas * - sizeof(arena_t *)); - narenas_total++; - arenas_extend(narenas_total - 1); - malloc_mutex_unlock(&arenas_lock); - /* - * Deallocate arenas_old only if it came from imalloc() (not - * base_alloc()). - */ - if (ctl_stats.narenas != narenas_auto) - idalloc(arenas_old); + /* Trigger stats allocation. */ + if (arenas_i_impl(tsd, arena_ind, false, true) == NULL) { + return UINT_MAX; } - ctl_stats.arenas = astats; - ctl_stats.narenas++; - return (false); -} + /* Initialize new arena. */ + if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) { + return UINT_MAX; + } -static void -ctl_refresh(void) -{ - unsigned i; - VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); + if (arena_ind == ctl_arenas->narenas) { + ctl_arenas->narenas++; + } - if (config_stats) { - malloc_mutex_lock(&chunks_mtx); - ctl_stats.chunks.current = stats_chunks.curchunks; - ctl_stats.chunks.total = stats_chunks.nchunks; - ctl_stats.chunks.high = stats_chunks.highchunks; - malloc_mutex_unlock(&chunks_mtx); + return arena_ind; +} - malloc_mutex_lock(&huge_mtx); - ctl_stats.huge.allocated = huge_allocated; - ctl_stats.huge.nmalloc = huge_nmalloc; - ctl_stats.huge.ndalloc = huge_ndalloc; - malloc_mutex_unlock(&huge_mtx); +static void +ctl_background_thread_stats_read(tsdn_t *tsdn) { + background_thread_stats_t *stats = &ctl_stats->background_thread; + if (!have_background_thread || + background_thread_stats_read(tsdn, stats)) { + memset(stats, 0, sizeof(background_thread_stats_t)); + nstime_init(&stats->run_interval, 0); } +} + +static void +ctl_refresh(tsdn_t *tsdn) { + unsigned i; + ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL); + VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas); /* * Clear sum stats, since they will be merged into by * ctl_arena_refresh(). */ - ctl_stats.arenas[ctl_stats.narenas].nthreads = 0; - ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]); - - malloc_mutex_lock(&arenas_lock); - memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas); - for (i = 0; i < ctl_stats.narenas; i++) { - if (arenas[i] != NULL) - ctl_stats.arenas[i].nthreads = arenas[i]->nthreads; - else - ctl_stats.arenas[i].nthreads = 0; - } - malloc_mutex_unlock(&arenas_lock); - for (i = 0; i < ctl_stats.narenas; i++) { + ctl_arena_clear(ctl_sarena); + + for (i = 0; i < ctl_arenas->narenas; i++) { + tarenas[i] = arena_get(tsdn, i, false); + } + + for (i = 0; i < ctl_arenas->narenas; i++) { + ctl_arena_t *ctl_arena = arenas_i(i); bool initialized = (tarenas[i] != NULL); - ctl_stats.arenas[i].initialized = initialized; - if (initialized) - ctl_arena_refresh(tarenas[i], i); + ctl_arena->initialized = initialized; + if (initialized) { + ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i, + false); + } } if (config_stats) { - ctl_stats.allocated = - ctl_stats.arenas[ctl_stats.narenas].allocated_small - + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large - + ctl_stats.huge.allocated; - ctl_stats.active = - (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE) - + ctl_stats.huge.allocated; - ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk); + ctl_stats->allocated = ctl_sarena->astats->allocated_small + + atomic_load_zu(&ctl_sarena->astats->astats.allocated_large, + ATOMIC_RELAXED); + ctl_stats->active = (ctl_sarena->pactive << LG_PAGE); + ctl_stats->metadata = atomic_load_zu( + &ctl_sarena->astats->astats.base, ATOMIC_RELAXED) + + atomic_load_zu(&ctl_sarena->astats->astats.internal, + ATOMIC_RELAXED); + ctl_stats->resident = atomic_load_zu( + &ctl_sarena->astats->astats.resident, ATOMIC_RELAXED); + ctl_stats->mapped = atomic_load_zu( + &ctl_sarena->astats->astats.mapped, ATOMIC_RELAXED); + ctl_stats->retained = atomic_load_zu( + &ctl_sarena->astats->astats.retained, ATOMIC_RELAXED); + + ctl_background_thread_stats_read(tsdn); + +#define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx) \ + malloc_mutex_lock(tsdn, &mtx); \ + malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx); \ + malloc_mutex_unlock(tsdn, &mtx); + + if (config_prof && opt_prof) { + READ_GLOBAL_MUTEX_PROF_DATA(global_prof_mutex_prof, + bt2gctx_mtx); + } + if (have_background_thread) { + READ_GLOBAL_MUTEX_PROF_DATA( + global_prof_mutex_background_thread, + background_thread_lock); + } else { + memset(&ctl_stats->mutex_prof_data[ + global_prof_mutex_background_thread], 0, + sizeof(mutex_prof_data_t)); + } + /* We own ctl mutex already. */ + malloc_mutex_prof_read(tsdn, + &ctl_stats->mutex_prof_data[global_prof_mutex_ctl], + &ctl_mtx); +#undef READ_GLOBAL_MUTEX_PROF_DATA } - - ctl_epoch++; + ctl_arenas->epoch++; } static bool -ctl_init(void) -{ +ctl_init(tsd_t *tsd) { bool ret; + tsdn_t *tsdn = tsd_tsdn(tsd); + + malloc_mutex_lock(tsdn, &ctl_mtx); + if (!ctl_initialized) { + ctl_arena_t *ctl_sarena, *ctl_darena; + unsigned i; - malloc_mutex_lock(&ctl_mtx); - if (ctl_initialized == false) { /* - * Allocate space for one extra arena stats element, which - * contains summed stats across all arenas. + * Allocate demand-zeroed space for pointers to the full + * range of supported arena indices. */ - assert(narenas_auto == narenas_total_get()); - ctl_stats.narenas = narenas_auto; - ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc( - (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t)); - if (ctl_stats.arenas == NULL) { + if (ctl_arenas == NULL) { + ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn, + b0get(), sizeof(ctl_arenas_t), QUANTUM); + if (ctl_arenas == NULL) { + ret = true; + goto label_return; + } + } + + if (config_stats && ctl_stats == NULL) { + ctl_stats = (ctl_stats_t *)base_alloc(tsdn, b0get(), + sizeof(ctl_stats_t), QUANTUM); + if (ctl_stats == NULL) { + ret = true; + goto label_return; + } + } + + /* + * Allocate space for the current full range of arenas + * here rather than doing it lazily elsewhere, in order + * to limit when OOM-caused errors can occur. + */ + if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false, + true)) == NULL) { ret = true; goto label_return; } - memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) * - sizeof(ctl_arena_stats_t)); + ctl_sarena->initialized = true; + if ((ctl_darena = arenas_i_impl(tsd, MALLCTL_ARENAS_DESTROYED, + false, true)) == NULL) { + ret = true; + goto label_return; + } + ctl_arena_clear(ctl_darena); /* - * Initialize all stats structures, regardless of whether they - * ever get used. Lazy initialization would allow errors to - * cause inconsistent state to be viewable by the application. + * Don't toggle ctl_darena to initialized until an arena is + * actually destroyed, so that arena.<i>.initialized can be used + * to query whether the stats are relevant. */ - if (config_stats) { - unsigned i; - for (i = 0; i <= ctl_stats.narenas; i++) { - if (ctl_arena_init(&ctl_stats.arenas[i])) { - ret = true; - goto label_return; - } + + ctl_arenas->narenas = narenas_total_get(); + for (i = 0; i < ctl_arenas->narenas; i++) { + if (arenas_i_impl(tsd, i, false, true) == NULL) { + ret = true; + goto label_return; } } - ctl_stats.arenas[ctl_stats.narenas].initialized = true; - ctl_epoch = 0; - ctl_refresh(); + ql_new(&ctl_arenas->destroyed); + ctl_refresh(tsdn); + ctl_initialized = true; } ret = false; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + malloc_mutex_unlock(tsdn, &ctl_mtx); + return ret; } static int -ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, - size_t *depthp) -{ +ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, + size_t *mibp, size_t *depthp) { int ret; const char *elm, *tdot, *dot; size_t elen, i, j; @@ -750,9 +1080,10 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, if (strlen(child->name) == elen && strncmp(elm, child->name, elen) == 0) { node = child; - if (nodesp != NULL) + if (nodesp != NULL) { nodesp[i] = (const ctl_node_t *)node; + } mibp[i] = j; break; } @@ -773,14 +1104,15 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, } inode = ctl_indexed_node(node->children); - node = inode->index(mibp, *depthp, (size_t)index); + node = inode->index(tsdn, mibp, *depthp, (size_t)index); if (node == NULL) { ret = ENOENT; goto label_return; } - if (nodesp != NULL) + if (nodesp != NULL) { nodesp[i] = (const ctl_node_t *)node; + } mibp[i] = (size_t)index; } @@ -813,33 +1145,33 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, ret = 0; label_return: - return (ret); + return ret; } int -ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) -{ +ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) { int ret; size_t depth; ctl_node_t const *nodes[CTL_MAX_DEPTH]; size_t mib[CTL_MAX_DEPTH]; const ctl_named_node_t *node; - if (ctl_initialized == false && ctl_init()) { + if (!ctl_initialized && ctl_init(tsd)) { ret = EAGAIN; goto label_return; } depth = CTL_MAX_DEPTH; - ret = ctl_lookup(name, nodes, mib, &depth); - if (ret != 0) + ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth); + if (ret != 0) { goto label_return; + } node = ctl_named_node(nodes[depth-1]); - if (node != NULL && node->ctl) - ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen); - else { + if (node != NULL && node->ctl) { + ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen); + } else { /* The name refers to a partial path through the ctl tree. */ ret = ENOENT; } @@ -849,29 +1181,27 @@ label_return: } int -ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp) -{ +ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) { int ret; - if (ctl_initialized == false && ctl_init()) { + if (!ctl_initialized && ctl_init(tsd)) { ret = EAGAIN; goto label_return; } - ret = ctl_lookup(name, NULL, mibp, miblenp); + ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp); label_return: return(ret); } int -ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; const ctl_named_node_t *node; size_t i; - if (ctl_initialized == false && ctl_init()) { + if (!ctl_initialized && ctl_init(tsd)) { ret = EAGAIN; goto label_return; } @@ -893,7 +1223,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, /* Indexed element. */ inode = ctl_indexed_node(node->children); - node = inode->index(mib, miblen, mib[i]); + node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]); if (node == NULL) { ret = ENOENT; goto label_return; @@ -902,9 +1232,9 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, } /* Call the ctl function. */ - if (node && node->ctl) - ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen); - else { + if (node && node->ctl) { + ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen); + } else { /* Partial MIB. */ ret = ENOENT; } @@ -914,56 +1244,58 @@ label_return: } bool -ctl_boot(void) -{ - - if (malloc_mutex_init(&ctl_mtx)) - return (true); +ctl_boot(void) { + if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL, + malloc_mutex_rank_exclusive)) { + return true; + } ctl_initialized = false; - return (false); + return false; } void -ctl_prefork(void) -{ - - malloc_mutex_prefork(&ctl_mtx); +ctl_prefork(tsdn_t *tsdn) { + malloc_mutex_prefork(tsdn, &ctl_mtx); } void -ctl_postfork_parent(void) -{ - - malloc_mutex_postfork_parent(&ctl_mtx); +ctl_postfork_parent(tsdn_t *tsdn) { + malloc_mutex_postfork_parent(tsdn, &ctl_mtx); } void -ctl_postfork_child(void) -{ - - malloc_mutex_postfork_child(&ctl_mtx); +ctl_postfork_child(tsdn_t *tsdn) { + malloc_mutex_postfork_child(tsdn, &ctl_mtx); } /******************************************************************************/ /* *_ctl() functions. */ -#define READONLY() do { \ +#define READONLY() do { \ if (newp != NULL || newlen != 0) { \ ret = EPERM; \ goto label_return; \ } \ } while (0) -#define WRITEONLY() do { \ +#define WRITEONLY() do { \ if (oldp != NULL || oldlenp != NULL) { \ ret = EPERM; \ goto label_return; \ } \ } while (0) -#define READ(v, t) do { \ +#define READ_XOR_WRITE() do { \ + if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \ + newlen != 0)) { \ + ret = EPERM; \ + goto label_return; \ + } \ +} while (0) + +#define READ(v, t) do { \ if (oldp != NULL && oldlenp != NULL) { \ if (*oldlenp != sizeof(t)) { \ size_t copylen = (sizeof(t) <= *oldlenp) \ @@ -971,12 +1303,12 @@ ctl_postfork_child(void) memcpy(oldp, (void *)&(v), copylen); \ ret = EINVAL; \ goto label_return; \ - } else \ - *(t *)oldp = (v); \ + } \ + *(t *)oldp = (v); \ } \ } while (0) -#define WRITE(v, t) do { \ +#define WRITE(v, t) do { \ if (newp != NULL) { \ if (newlen != sizeof(t)) { \ ret = EINVAL; \ @@ -986,101 +1318,109 @@ ctl_postfork_child(void) } \ } while (0) +#define MIB_UNSIGNED(v, i) do { \ + if (mib[i] > UINT_MAX) { \ + ret = EFAULT; \ + goto label_return; \ + } \ + v = (unsigned)mib[i]; \ +} while (0) + /* * There's a lot of code duplication in the following macros due to limitations * in how nested cpp macros are expanded. */ -#define CTL_RO_CLGEN(c, l, n, v, t) \ +#define CTL_RO_CLGEN(c, l, n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ - if ((c) == false) \ - return (ENOENT); \ - if (l) \ - malloc_mutex_lock(&ctl_mtx); \ + if (!(c)) { \ + return ENOENT; \ + } \ + if (l) { \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ + } \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ - if (l) \ - malloc_mutex_unlock(&ctl_mtx); \ - return (ret); \ + if (l) { \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ + } \ + return ret; \ } -#define CTL_RO_CGEN(c, n, v, t) \ +#define CTL_RO_CGEN(c, n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ - if ((c) == false) \ - return (ENOENT); \ - malloc_mutex_lock(&ctl_mtx); \ + if (!(c)) { \ + return ENOENT; \ + } \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ - malloc_mutex_unlock(&ctl_mtx); \ - return (ret); \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ + return ret; \ } -#define CTL_RO_GEN(n, v, t) \ +#define CTL_RO_GEN(n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ - malloc_mutex_lock(&ctl_mtx); \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ - malloc_mutex_unlock(&ctl_mtx); \ - return (ret); \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ + return ret; \ } /* * ctl_mtx is not acquired, under the assumption that no pertinent data will * mutate during the call. */ -#define CTL_RO_NL_CGEN(c, n, v, t) \ +#define CTL_RO_NL_CGEN(c, n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ - if ((c) == false) \ - return (ENOENT); \ + if (!(c)) { \ + return ENOENT; \ + } \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ - return (ret); \ + return ret; \ } -#define CTL_RO_NL_GEN(n, v, t) \ +#define CTL_RO_NL_GEN(n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ @@ -1090,24 +1430,42 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ \ ret = 0; \ label_return: \ - return (ret); \ + return ret; \ } -#define CTL_RO_BOOL_CONFIG_GEN(n) \ +#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ - bool oldval; \ + t oldval; \ + \ + if (!(c)) { \ + return ENOENT; \ + } \ + READONLY(); \ + oldval = (m(tsd)); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + return ret; \ +} + +#define CTL_RO_CONFIG_GEN(n, t) \ +static int \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ + int ret; \ + t oldval; \ \ READONLY(); \ oldval = n; \ - READ(oldval, bool); \ + READ(oldval, t); \ \ ret = 0; \ label_return: \ - return (ret); \ + return ret; \ } /******************************************************************************/ @@ -1115,62 +1473,122 @@ label_return: \ CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) static int -epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; UNUSED uint64_t newval; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); WRITE(newval, uint64_t); - if (newp != NULL) - ctl_refresh(); - READ(ctl_epoch, uint64_t); + if (newp != NULL) { + ctl_refresh(tsd_tsdn(tsd)); + } + READ(ctl_arenas->epoch, uint64_t); ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} + +static int +background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + bool oldval; + + if (!have_background_thread) { + return ENOENT; + } + background_thread_ctl_init(tsd_tsdn(tsd)); + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); + if (newp == NULL) { + oldval = background_thread_enabled(); + READ(oldval, bool); + } else { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + oldval = background_thread_enabled(); + READ(oldval, bool); + + bool newval = *(bool *)newp; + if (newval == oldval) { + ret = 0; + goto label_return; + } + + background_thread_enabled_set(tsd_tsdn(tsd), newval); + if (newval) { + if (!can_enable_background_thread) { + malloc_printf("<jemalloc>: Error in dlsym(" + "RTLD_NEXT, \"pthread_create\"). Cannot " + "enable background_thread\n"); + ret = EFAULT; + goto label_return; + } + if (background_threads_enable(tsd)) { + ret = EFAULT; + goto label_return; + } + } else { + if (background_threads_disable(tsd)) { + ret = EFAULT; + goto label_return; + } + } + } + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + + return ret; } /******************************************************************************/ -CTL_RO_BOOL_CONFIG_GEN(config_debug) -CTL_RO_BOOL_CONFIG_GEN(config_dss) -CTL_RO_BOOL_CONFIG_GEN(config_fill) -CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock) -CTL_RO_BOOL_CONFIG_GEN(config_mremap) -CTL_RO_BOOL_CONFIG_GEN(config_munmap) -CTL_RO_BOOL_CONFIG_GEN(config_prof) -CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc) -CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind) -CTL_RO_BOOL_CONFIG_GEN(config_stats) -CTL_RO_BOOL_CONFIG_GEN(config_tcache) -CTL_RO_BOOL_CONFIG_GEN(config_tls) -CTL_RO_BOOL_CONFIG_GEN(config_utrace) -CTL_RO_BOOL_CONFIG_GEN(config_valgrind) -CTL_RO_BOOL_CONFIG_GEN(config_xmalloc) +CTL_RO_CONFIG_GEN(config_cache_oblivious, bool) +CTL_RO_CONFIG_GEN(config_debug, bool) +CTL_RO_CONFIG_GEN(config_fill, bool) +CTL_RO_CONFIG_GEN(config_lazy_lock, bool) +CTL_RO_CONFIG_GEN(config_malloc_conf, const char *) +CTL_RO_CONFIG_GEN(config_prof, bool) +CTL_RO_CONFIG_GEN(config_prof_libgcc, bool) +CTL_RO_CONFIG_GEN(config_prof_libunwind, bool) +CTL_RO_CONFIG_GEN(config_stats, bool) +CTL_RO_CONFIG_GEN(config_thp, bool) +CTL_RO_CONFIG_GEN(config_utrace, bool) +CTL_RO_CONFIG_GEN(config_xmalloc, bool) /******************************************************************************/ CTL_RO_NL_GEN(opt_abort, opt_abort, bool) +CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool) +CTL_RO_NL_GEN(opt_retain, opt_retain, bool) CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) -CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) -CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t) -CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t) +CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned) +CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena], + const char *) +CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool) +CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t) +CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t) CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) -CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool) -CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t) -CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool) +CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *) +CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *) CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) -CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool) CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) -CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool) -CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) +CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool) +CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) -CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */ +CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init, + opt_prof_thread_active_init, bool) CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t) CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool) CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) @@ -1181,504 +1599,1100 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) /******************************************************************************/ static int -thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; + arena_t *oldarena; unsigned newind, oldind; - malloc_mutex_lock(&ctl_mtx); - newind = oldind = choose_arena(NULL)->ind; + oldarena = arena_choose(tsd, NULL); + if (oldarena == NULL) { + return EAGAIN; + } + newind = oldind = arena_ind_get(oldarena); WRITE(newind, unsigned); READ(oldind, unsigned); + if (newind != oldind) { - arena_t *arena; + arena_t *newarena; - if (newind >= ctl_stats.narenas) { + if (newind >= narenas_total_get()) { /* New arena index is out of range. */ ret = EFAULT; goto label_return; } + if (have_percpu_arena && + PERCPU_ARENA_ENABLED(opt_percpu_arena)) { + if (newind < percpu_arena_ind_limit(opt_percpu_arena)) { + /* + * If perCPU arena is enabled, thread_arena + * control is not allowed for the auto arena + * range. + */ + ret = EPERM; + goto label_return; + } + } + /* Initialize arena if necessary. */ - malloc_mutex_lock(&arenas_lock); - if ((arena = arenas[newind]) == NULL && (arena = - arenas_extend(newind)) == NULL) { - malloc_mutex_unlock(&arenas_lock); + newarena = arena_get(tsd_tsdn(tsd), newind, true); + if (newarena == NULL) { ret = EAGAIN; goto label_return; } - assert(arena == arenas[newind]); - arenas[oldind]->nthreads--; - arenas[newind]->nthreads++; - malloc_mutex_unlock(&arenas_lock); - - /* Set new arena association. */ - if (config_tcache) { - tcache_t *tcache; - if ((uintptr_t)(tcache = *tcache_tsd_get()) > - (uintptr_t)TCACHE_STATE_MAX) { - tcache_arena_dissociate(tcache); - tcache_arena_associate(tcache, arena); - } + /* Set new arena/tcache associations. */ + arena_migrate(tsd, oldind, newind); + if (tcache_available(tsd)) { + tcache_arena_reassociate(tsd_tsdn(tsd), + tsd_tcachep_get(tsd), newarena); } - arenas_tsd_set(&arena); } ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + return ret; } -CTL_RO_NL_CGEN(config_stats, thread_allocated, - thread_allocated_tsd_get()->allocated, uint64_t) -CTL_RO_NL_CGEN(config_stats, thread_allocatedp, - &thread_allocated_tsd_get()->allocated, uint64_t *) -CTL_RO_NL_CGEN(config_stats, thread_deallocated, - thread_allocated_tsd_get()->deallocated, uint64_t) -CTL_RO_NL_CGEN(config_stats, thread_deallocatedp, - &thread_allocated_tsd_get()->deallocated, uint64_t *) +CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get, + uint64_t) +CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get, + uint64_t *) +CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get, + uint64_t) +CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp, + tsd_thread_deallocatedp_get, uint64_t *) static int -thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ +thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; - if (config_tcache == false) - return (ENOENT); - - oldval = tcache_enabled_get(); + oldval = tcache_enabled_get(tsd); if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } - tcache_enabled_set(*(bool *)newp); + tcache_enabled_set(tsd, *(bool *)newp); } READ(oldval, bool); ret = 0; label_return: - return (ret); + return ret; +} + +static int +thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + + if (!tcache_available(tsd)) { + ret = EFAULT; + goto label_return; + } + + READONLY(); + WRITEONLY(); + + tcache_flush(tsd); + + ret = 0; +label_return: + return ret; +} + +static int +thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + + if (!config_prof) { + return ENOENT; + } + + READ_XOR_WRITE(); + + if (newp != NULL) { + if (newlen != sizeof(const char *)) { + ret = EINVAL; + goto label_return; + } + + if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) != + 0) { + goto label_return; + } + } else { + const char *oldname = prof_thread_name_get(tsd); + READ(oldname, const char *); + } + + ret = 0; +label_return: + return ret; } static int -thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ +thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; + bool oldval; - if (config_tcache == false) - return (ENOENT); + if (!config_prof) { + return ENOENT; + } + + oldval = prof_thread_active_get(tsd); + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + if (prof_thread_active_set(tsd, *(bool *)newp)) { + ret = EAGAIN; + goto label_return; + } + } + READ(oldval, bool); + + ret = 0; +label_return: + return ret; +} + +/******************************************************************************/ + +static int +tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned tcache_ind; READONLY(); + if (tcaches_create(tsd, &tcache_ind)) { + ret = EFAULT; + goto label_return; + } + READ(tcache_ind, unsigned); + + ret = 0; +label_return: + return ret; +} + +static int +tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned tcache_ind; + WRITEONLY(); + tcache_ind = UINT_MAX; + WRITE(tcache_ind, unsigned); + if (tcache_ind == UINT_MAX) { + ret = EFAULT; + goto label_return; + } + tcaches_flush(tsd, tcache_ind); + + ret = 0; +label_return: + return ret; +} + +static int +tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned tcache_ind; - tcache_flush(); + WRITEONLY(); + tcache_ind = UINT_MAX; + WRITE(tcache_ind, unsigned); + if (tcache_ind == UINT_MAX) { + ret = EFAULT; + goto label_return; + } + tcaches_destroy(tsd, tcache_ind); ret = 0; label_return: - return (ret); + return ret; } /******************************************************************************/ -/* ctl_mutex must be held during execution of this function. */ +static int +arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + tsdn_t *tsdn = tsd_tsdn(tsd); + unsigned arena_ind; + bool initialized; + + READONLY(); + MIB_UNSIGNED(arena_ind, 1); + + malloc_mutex_lock(tsdn, &ctl_mtx); + initialized = arenas_i(arena_ind)->initialized; + malloc_mutex_unlock(tsdn, &ctl_mtx); + + READ(initialized, bool); + + ret = 0; +label_return: + return ret; +} + static void -arena_purge(unsigned arena_ind) -{ - VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); +arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) { + malloc_mutex_lock(tsdn, &ctl_mtx); + { + unsigned narenas = ctl_arenas->narenas; - malloc_mutex_lock(&arenas_lock); - memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas); - malloc_mutex_unlock(&arenas_lock); + /* + * Access via index narenas is deprecated, and scheduled for + * removal in 6.0.0. + */ + if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == narenas) { + unsigned i; + VARIABLE_ARRAY(arena_t *, tarenas, narenas); - if (arena_ind == ctl_stats.narenas) { - unsigned i; - for (i = 0; i < ctl_stats.narenas; i++) { - if (tarenas[i] != NULL) - arena_purge_all(tarenas[i]); + for (i = 0; i < narenas; i++) { + tarenas[i] = arena_get(tsdn, i, false); + } + + /* + * No further need to hold ctl_mtx, since narenas and + * tarenas contain everything needed below. + */ + malloc_mutex_unlock(tsdn, &ctl_mtx); + + for (i = 0; i < narenas; i++) { + if (tarenas[i] != NULL) { + arena_decay(tsdn, tarenas[i], false, + all); + } + } + } else { + arena_t *tarena; + + assert(arena_ind < narenas); + + tarena = arena_get(tsdn, arena_ind, false); + + /* No further need to hold ctl_mtx. */ + malloc_mutex_unlock(tsdn, &ctl_mtx); + + if (tarena != NULL) { + arena_decay(tsdn, tarena, false, all); + } } - } else { - assert(arena_ind < ctl_stats.narenas); - if (tarenas[arena_ind] != NULL) - arena_purge_all(tarenas[arena_ind]); } } static int -arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; + unsigned arena_ind; READONLY(); WRITEONLY(); - malloc_mutex_lock(&ctl_mtx); - arena_purge(mib[1]); - malloc_mutex_unlock(&ctl_mtx); + MIB_UNSIGNED(arena_ind, 1); + arena_i_decay(tsd_tsdn(tsd), arena_ind, false); ret = 0; label_return: - return (ret); + return ret; } static int -arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret, i; - bool match, err; - const char *dss; - unsigned arena_ind = mib[1]; +arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + + READONLY(); + WRITEONLY(); + MIB_UNSIGNED(arena_ind, 1); + arena_i_decay(tsd_tsdn(tsd), arena_ind, true); + + ret = 0; +label_return: + return ret; +} + +static int +arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind, + arena_t **arena) { + int ret; + + READONLY(); + WRITEONLY(); + MIB_UNSIGNED(*arena_ind, 1); + + *arena = arena_get(tsd_tsdn(tsd), *arena_ind, false); + if (*arena == NULL || arena_is_auto(*arena)) { + ret = EFAULT; + goto label_return; + } + + ret = 0; +label_return: + return ret; +} + +static void +arena_reset_prepare_background_thread(tsd_t *tsd, unsigned arena_ind) { + /* Temporarily disable the background thread during arena reset. */ + if (have_background_thread) { + malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); + if (background_thread_enabled()) { + unsigned ind = arena_ind % ncpus; + background_thread_info_t *info = + &background_thread_info[ind]; + assert(info->state == background_thread_started); + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + info->state = background_thread_paused; + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + } + } +} + +static void +arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) { + if (have_background_thread) { + if (background_thread_enabled()) { + unsigned ind = arena_ind % ncpus; + background_thread_info_t *info = + &background_thread_info[ind]; + assert(info->state == background_thread_paused); + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + info->state = background_thread_started; + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + } + malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); + } +} + +static int +arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + arena_t *arena; + + ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp, + newp, newlen, &arena_ind, &arena); + if (ret != 0) { + return ret; + } + + arena_reset_prepare_background_thread(tsd, arena_ind); + arena_reset(tsd, arena); + arena_reset_finish_background_thread(tsd, arena_ind); + + return ret; +} + +static int +arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + arena_t *arena; + ctl_arena_t *ctl_darena, *ctl_arena; + + ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp, + newp, newlen, &arena_ind, &arena); + if (ret != 0) { + goto label_return; + } + + if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena, + true) != 0) { + ret = EFAULT; + goto label_return; + } + + arena_reset_prepare_background_thread(tsd, arena_ind); + /* Merge stats after resetting and purging arena. */ + arena_reset(tsd, arena); + arena_decay(tsd_tsdn(tsd), arena, false, true); + ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED); + ctl_darena->initialized = true; + ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true); + /* Destroy arena. */ + arena_destroy(tsd, arena); + ctl_arena = arenas_i(arena_ind); + ctl_arena->initialized = false; + /* Record arena index for later recycling via arenas.create. */ + ql_elm_new(ctl_arena, destroyed_link); + ql_tail_insert(&ctl_arenas->destroyed, ctl_arena, destroyed_link); + arena_reset_finish_background_thread(tsd, arena_ind); + + assert(ret == 0); +label_return: + return ret; +} + +static int +arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + const char *dss = NULL; + unsigned arena_ind; dss_prec_t dss_prec_old = dss_prec_limit; dss_prec_t dss_prec = dss_prec_limit; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); WRITE(dss, const char *); - match = false; - for (i = 0; i < dss_prec_limit; i++) { - if (strcmp(dss_prec_names[i], dss) == 0) { - dss_prec = i; - match = true; - break; + MIB_UNSIGNED(arena_ind, 1); + if (dss != NULL) { + int i; + bool match = false; + + for (i = 0; i < dss_prec_limit; i++) { + if (strcmp(dss_prec_names[i], dss) == 0) { + dss_prec = i; + match = true; + break; + } + } + + if (!match) { + ret = EINVAL; + goto label_return; } - } - if (match == false) { - ret = EINVAL; - goto label_return; } - if (arena_ind < ctl_stats.narenas) { - arena_t *arena = arenas[arena_ind]; - if (arena != NULL) { - dss_prec_old = arena_dss_prec_get(arena); - arena_dss_prec_set(arena, dss_prec); - err = false; - } else - err = true; + /* + * Access via index narenas is deprecated, and scheduled for removal in + * 6.0.0. + */ + if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == + ctl_arenas->narenas) { + if (dss_prec != dss_prec_limit && + extent_dss_prec_set(dss_prec)) { + ret = EFAULT; + goto label_return; + } + dss_prec_old = extent_dss_prec_get(); } else { - dss_prec_old = chunk_dss_prec_get(); - err = chunk_dss_prec_set(dss_prec); + arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false); + if (arena == NULL || (dss_prec != dss_prec_limit && + arena_dss_prec_set(arena, dss_prec))) { + ret = EFAULT; + goto label_return; + } + dss_prec_old = arena_dss_prec_get(arena); } + dss = dss_prec_names[dss_prec_old]; READ(dss, const char *); - if (err) { + + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} + +static int +arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) { + int ret; + unsigned arena_ind; + arena_t *arena; + + MIB_UNSIGNED(arena_ind, 1); + arena = arena_get(tsd_tsdn(tsd), arena_ind, false); + if (arena == NULL) { ret = EFAULT; goto label_return; } + if (oldp != NULL && oldlenp != NULL) { + size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) : + arena_muzzy_decay_ms_get(arena); + READ(oldval, ssize_t); + } + if (newp != NULL) { + if (newlen != sizeof(ssize_t)) { + ret = EINVAL; + goto label_return; + } + if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena, + *(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd), + arena, *(ssize_t *)newp)) { + ret = EFAULT; + goto label_return; + } + } + ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + return ret; } -static const ctl_named_node_t * -arena_i_index(const size_t *mib, size_t miblen, size_t i) -{ - const ctl_named_node_t * ret; +static int +arena_i_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, + newlen, true); +} - malloc_mutex_lock(&ctl_mtx); - if (i > ctl_stats.narenas) { - ret = NULL; +static int +arena_i_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, + newlen, false); +} + +static int +arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + arena_t *arena; + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + MIB_UNSIGNED(arena_ind, 1); + if (arena_ind < narenas_total_get() && (arena = + arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) { + if (newp != NULL) { + extent_hooks_t *old_extent_hooks; + extent_hooks_t *new_extent_hooks + JEMALLOC_CC_SILENCE_INIT(NULL); + WRITE(new_extent_hooks, extent_hooks_t *); + old_extent_hooks = extent_hooks_set(tsd, arena, + new_extent_hooks); + READ(old_extent_hooks, extent_hooks_t *); + } else { + extent_hooks_t *old_extent_hooks = + extent_hooks_get(arena); + READ(old_extent_hooks, extent_hooks_t *); + } + } else { + ret = EFAULT; goto label_return; } + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} + +static const ctl_named_node_t * +arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { + const ctl_named_node_t *ret; + + malloc_mutex_lock(tsdn, &ctl_mtx); + switch (i) { + case MALLCTL_ARENAS_ALL: + case MALLCTL_ARENAS_DESTROYED: + break; + default: + if (i > ctl_arenas->narenas) { + ret = NULL; + goto label_return; + } + break; + } ret = super_arena_i_node; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + malloc_mutex_unlock(tsdn, &ctl_mtx); + return ret; } /******************************************************************************/ static int -arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ +arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned narenas; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); READONLY(); if (*oldlenp != sizeof(unsigned)) { ret = EINVAL; goto label_return; } - narenas = ctl_stats.narenas; + narenas = ctl_arenas->narenas; READ(narenas, unsigned); ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; } static int -arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ +arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) { int ret; - unsigned nread, i; - malloc_mutex_lock(&ctl_mtx); - READONLY(); - if (*oldlenp != ctl_stats.narenas * sizeof(bool)) { - ret = EINVAL; - nread = (*oldlenp < ctl_stats.narenas * sizeof(bool)) - ? (*oldlenp / sizeof(bool)) : ctl_stats.narenas; - } else { - ret = 0; - nread = ctl_stats.narenas; + if (oldp != NULL && oldlenp != NULL) { + size_t oldval = (dirty ? arena_dirty_decay_ms_default_get() : + arena_muzzy_decay_ms_default_get()); + READ(oldval, ssize_t); + } + if (newp != NULL) { + if (newlen != sizeof(ssize_t)) { + ret = EINVAL; + goto label_return; + } + if (dirty ? arena_dirty_decay_ms_default_set(*(ssize_t *)newp) + : arena_muzzy_decay_ms_default_set(*(ssize_t *)newp)) { + ret = EFAULT; + goto label_return; + } } - for (i = 0; i < nread; i++) - ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized; - + ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + return ret; +} + +static int +arenas_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, + newlen, true); +} + +static int +arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, + newlen, false); } CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) CTL_RO_NL_GEN(arenas_page, PAGE, size_t) -CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t) +CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t) CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned) -CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned) +CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned) CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) -CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t) +CTL_RO_NL_GEN(arenas_bin_i_slab_size, arena_bin_info[mib[2]].slab_size, size_t) static const ctl_named_node_t * -arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i) -{ - - if (i > NBINS) - return (NULL); - return (super_arenas_bin_i_node); +arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { + if (i > NBINS) { + return NULL; + } + return super_arenas_bin_i_node; } -CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t) -CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t) +CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned) +CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(NBINS+(szind_t)mib[2]), + size_t) static const ctl_named_node_t * -arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i) -{ - - if (i > nlclasses) - return (NULL); - return (super_arenas_lrun_i_node); +arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t i) { + if (i > NSIZES - NBINS) { + return NULL; + } + return super_arenas_lextent_i_node; } static int -arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; + extent_hooks_t *extent_hooks; unsigned arena_ind; - malloc_mutex_lock(&ctl_mtx); - WRITEONLY(); - arena_ind = UINT_MAX; - WRITE(arena_ind, unsigned); - if (newp != NULL && arena_ind >= ctl_stats.narenas) - ret = EFAULT; - else { - if (arena_ind == UINT_MAX) - arena_ind = ctl_stats.narenas; - arena_purge(arena_ind); - ret = 0; + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + + extent_hooks = (extent_hooks_t *)&extent_hooks_default; + WRITE(extent_hooks, extent_hooks_t *); + if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) { + ret = EAGAIN; + goto label_return; } + READ(arena_ind, unsigned); + ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; } +/******************************************************************************/ + static int -arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; - unsigned narenas; + bool oldval; - malloc_mutex_lock(&ctl_mtx); - READONLY(); - if (ctl_grow()) { - ret = EAGAIN; - goto label_return; + if (!config_prof) { + return ENOENT; } - narenas = ctl_stats.narenas - 1; - READ(narenas, unsigned); + + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + oldval = prof_thread_active_init_set(tsd_tsdn(tsd), + *(bool *)newp); + } else { + oldval = prof_thread_active_init_get(tsd_tsdn(tsd)); + } + READ(oldval, bool); ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + return ret; } -/******************************************************************************/ - static int -prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; - if (config_prof == false) - return (ENOENT); + if (!config_prof) { + return ENOENT; + } - malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */ - oldval = opt_prof_active; if (newp != NULL) { - /* - * The memory barriers will tend to make opt_prof_active - * propagate faster on systems with weak memory ordering. - */ - mb_write(); - WRITE(opt_prof_active, bool); - mb_write(); + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp); + } else { + oldval = prof_active_get(tsd_tsdn(tsd)); } READ(oldval, bool); ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + return ret; } static int -prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; const char *filename = NULL; - if (config_prof == false) - return (ENOENT); + if (!config_prof) { + return ENOENT; + } WRITEONLY(); WRITE(filename, const char *); - if (prof_mdump(filename)) { + if (prof_mdump(tsd, filename)) { ret = EFAULT; goto label_return; } ret = 0; label_return: - return (ret); + return ret; +} + +static int +prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + bool oldval; + + if (!config_prof) { + return ENOENT; + } + + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp); + } else { + oldval = prof_gdump_get(tsd_tsdn(tsd)); + } + READ(oldval, bool); + + ret = 0; +label_return: + return ret; +} + +static int +prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + size_t lg_sample = lg_prof_sample; + + if (!config_prof) { + return ENOENT; + } + + WRITEONLY(); + WRITE(lg_sample, size_t); + if (lg_sample >= (sizeof(uint64_t) << 3)) { + lg_sample = (sizeof(uint64_t) << 3) - 1; + } + + prof_reset(tsd, lg_sample); + + ret = 0; +label_return: + return ret; } CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) +CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t) /******************************************************************************/ -CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *) -CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t) -CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) -CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t) +CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t) +CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t) +CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t) +CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t) +CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t) +CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t) + +CTL_RO_CGEN(config_stats, stats_background_thread_num_threads, + ctl_stats->background_thread.num_threads, size_t) +CTL_RO_CGEN(config_stats, stats_background_thread_num_runs, + ctl_stats->background_thread.num_runs, uint64_t) +CTL_RO_CGEN(config_stats, stats_background_thread_run_interval, + nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t) + +CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *) +CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms, + ssize_t) +CTL_RO_GEN(stats_arenas_i_muzzy_decay_ms, arenas_i(mib[2])->muzzy_decay_ms, + ssize_t) +CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned) +CTL_RO_GEN(stats_arenas_i_uptime, + nstime_ns(&arenas_i(mib[2])->astats->astats.uptime), uint64_t) +CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t) +CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t) +CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED), + size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_retained, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED), + size_t) -CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current, +CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge, + arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_dirty.npurge), + uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise, + arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged, + arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_dirty.purged), + uint64_t) + +CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge, + arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_muzzy.npurge), + uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise, + arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged, + arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_muzzy.purged), + uint64_t) + +CTL_RO_CGEN(config_stats, stats_arenas_i_base, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED), + size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_internal, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED), + size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes, + ATOMIC_RELAXED), size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_resident, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED), size_t) -CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t) -CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t) -CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t) -CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t) - -CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *) -CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned) -CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) -CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, - ctl_stats.arenas[mib[2]].astats.mapped, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_npurge, - ctl_stats.arenas[mib[2]].astats.npurge, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise, - ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_purged, - ctl_stats.arenas[mib[2]].astats.purged, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, - ctl_stats.arenas[mib[2]].allocated_small, size_t) + arenas_i(mib[2])->astats->allocated_small, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc, - ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t) + arenas_i(mib[2])->astats->nmalloc_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc, - ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t) + arenas_i(mib[2])->astats->ndalloc_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests, - ctl_stats.arenas[mib[2]].nrequests_small, uint64_t) + arenas_i(mib[2])->astats->nrequests_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, - ctl_stats.arenas[mib[2]].astats.allocated_large, size_t) + atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large, + ATOMIC_RELAXED), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, - ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t) + arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.nmalloc_large), + uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, - ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t) + arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.ndalloc_large), + uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, - ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t) + arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.nmalloc_large), + uint64_t) /* Intentional. */ + +/* Lock profiling related APIs below. */ +#define RO_MUTEX_CTL_GEN(n, l) \ +CTL_RO_CGEN(config_stats, stats_##n##_num_ops, \ + l.n_lock_ops, uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_num_wait, \ + l.n_wait_times, uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq, \ + l.n_spin_acquired, uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch, \ + l.n_owner_switches, uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time, \ + nstime_ns(&l.tot_wait_time), uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time, \ + nstime_ns(&l.max_wait_time), uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds, \ + l.max_n_thds, uint32_t) + +/* Global mutexes. */ +#define OP(mtx) \ + RO_MUTEX_CTL_GEN(mutexes_##mtx, \ + ctl_stats->mutex_prof_data[global_prof_mutex_##mtx]) +MUTEX_PROF_GLOBAL_MUTEXES +#undef OP + +/* Per arena mutexes */ +#define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx, \ + arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx]) +MUTEX_PROF_ARENA_MUTEXES +#undef OP + +/* tcache bin mutex */ +RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex, + arenas_i(mib[2])->astats->bstats[mib[4]].mutex_data) +#undef RO_MUTEX_CTL_GEN + +/* Resets all mutex stats, including global, arena and bin mutexes. */ +static int +stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + if (!config_stats) { + return ENOENT; + } + + tsdn_t *tsdn = tsd_tsdn(tsd); + +#define MUTEX_PROF_RESET(mtx) \ + malloc_mutex_lock(tsdn, &mtx); \ + malloc_mutex_prof_data_reset(tsdn, &mtx); \ + malloc_mutex_unlock(tsdn, &mtx); + + /* Global mutexes: ctl and prof. */ + MUTEX_PROF_RESET(ctl_mtx); + if (have_background_thread) { + MUTEX_PROF_RESET(background_thread_lock); + } + if (config_prof && opt_prof) { + MUTEX_PROF_RESET(bt2gctx_mtx); + } + + + /* Per arena mutexes. */ + unsigned n = narenas_total_get(); + + for (unsigned i = 0; i < n; i++) { + arena_t *arena = arena_get(tsdn, i, false); + if (!arena) { + continue; + } + MUTEX_PROF_RESET(arena->large_mtx); + MUTEX_PROF_RESET(arena->extent_avail_mtx); + MUTEX_PROF_RESET(arena->extents_dirty.mtx); + MUTEX_PROF_RESET(arena->extents_muzzy.mtx); + MUTEX_PROF_RESET(arena->extents_retained.mtx); + MUTEX_PROF_RESET(arena->decay_dirty.mtx); + MUTEX_PROF_RESET(arena->decay_muzzy.mtx); + MUTEX_PROF_RESET(arena->tcache_ql_mtx); + MUTEX_PROF_RESET(arena->base->mtx); + + for (szind_t i = 0; i < NBINS; i++) { + arena_bin_t *bin = &arena->bins[i]; + MUTEX_PROF_RESET(bin->lock); + } + } +#undef MUTEX_PROF_RESET + return 0; +} -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated, - ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t) + arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc, - ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t) + arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t) -CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t) -CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns, - ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns, - ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t) + arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs, + arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills, + arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes, + arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs, + arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs, + arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs, + arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t) static const ctl_named_node_t * -stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j) -{ - - if (j > NBINS) - return (NULL); - return (super_stats_arenas_i_bins_j_node); +stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t j) { + if (j > NBINS) { + return NULL; + } + return super_stats_arenas_i_bins_j_node; } -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc, - ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc, - ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests, - ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns, - ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc, + arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), + uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc, + arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), + uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests, + arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), + uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents, + arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t) static const ctl_named_node_t * -stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j) -{ - - if (j > nlclasses) - return (NULL); - return (super_stats_arenas_i_lruns_j_node); +stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t j) { + if (j > NSIZES - NBINS) { + return NULL; + } + return super_stats_arenas_i_lextents_j_node; } static const ctl_named_node_t * -stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i) -{ - const ctl_named_node_t * ret; +stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { + const ctl_named_node_t *ret; + size_t a; - malloc_mutex_lock(&ctl_mtx); - if (i > ctl_stats.narenas || ctl_stats.arenas[i].initialized == false) { + malloc_mutex_lock(tsdn, &ctl_mtx); + a = arenas_i2a_impl(i, true, true); + if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) { ret = NULL; goto label_return; } ret = super_stats_arenas_i_node; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + malloc_mutex_unlock(tsdn, &ctl_mtx); + return ret; } diff --git a/deps/jemalloc/src/extent.c b/deps/jemalloc/src/extent.c index 8c09b486ed..fa45c84d34 100644 --- a/deps/jemalloc/src/extent.c +++ b/deps/jemalloc/src/extent.c @@ -1,39 +1,1987 @@ -#define JEMALLOC_EXTENT_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_EXTENT_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/ph.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mutex_pool.h" + +/******************************************************************************/ +/* Data. */ + +rtree_t extents_rtree; +/* Keyed by the address of the extent_t being protected. */ +mutex_pool_t extent_mutex_pool; + +static const bitmap_info_t extents_bitmap_info = + BITMAP_INFO_INITIALIZER(NPSIZES+1); + +static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit, + unsigned arena_ind); +static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, bool committed, unsigned arena_ind); +static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, bool committed, unsigned arena_ind); +static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind); +static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained); +static bool extent_decommit_default(extent_hooks_t *extent_hooks, + void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); +#ifdef PAGES_CAN_PURGE_LAZY +static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind); +#endif +static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained); +#ifdef PAGES_CAN_PURGE_FORCED +static bool extent_purge_forced_default(extent_hooks_t *extent_hooks, + void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); +#endif +static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained); +#ifdef JEMALLOC_MAPS_COALESCE +static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t size_a, size_t size_b, bool committed, + unsigned arena_ind); +#endif +static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, + szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, + bool growing_retained); +#ifdef JEMALLOC_MAPS_COALESCE +static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, + size_t size_a, void *addr_b, size_t size_b, bool committed, + unsigned arena_ind); +#endif +static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, + bool growing_retained); + +const extent_hooks_t extent_hooks_default = { + extent_alloc_default, + extent_dalloc_default, + extent_destroy_default, + extent_commit_default, + extent_decommit_default +#ifdef PAGES_CAN_PURGE_LAZY + , + extent_purge_lazy_default +#else + , + NULL +#endif +#ifdef PAGES_CAN_PURGE_FORCED + , + extent_purge_forced_default +#else + , + NULL +#endif +#ifdef JEMALLOC_MAPS_COALESCE + , + extent_split_default, + extent_merge_default +#endif +}; + +/* Used exclusively for gdump triggering. */ +static atomic_zu_t curpages; +static atomic_zu_t highpages; + +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + +static void extent_deregister(tsdn_t *tsdn, extent_t *extent); +static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr, + size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind, + bool *zero, bool *commit, bool growing_retained); +static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + extent_t *extent, bool *coalesced, bool growing_retained); +static void extent_record(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent, + bool growing_retained); /******************************************************************************/ -static inline int -extent_szad_comp(extent_node_t *a, extent_node_t *b) -{ - int ret; - size_t a_size = a->size; - size_t b_size = b->size; +rb_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, rb_link, + extent_esnead_comp) + +typedef enum { + lock_result_success, + lock_result_failure, + lock_result_no_extent +} lock_result_t; + +static lock_result_t +extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm, + extent_t **result) { + extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree, + elm, true); + + if (extent1 == NULL) { + return lock_result_no_extent; + } + /* + * It's possible that the extent changed out from under us, and with it + * the leaf->extent mapping. We have to recheck while holding the lock. + */ + extent_lock(tsdn, extent1); + extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn, + &extents_rtree, elm, true); + + if (extent1 == extent2) { + *result = extent1; + return lock_result_success; + } else { + extent_unlock(tsdn, extent1); + return lock_result_failure; + } +} + +/* + * Returns a pool-locked extent_t * if there's one associated with the given + * address, and NULL otherwise. + */ +static extent_t * +extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) { + extent_t *ret = NULL; + rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree, + rtree_ctx, (uintptr_t)addr, false, false); + if (elm == NULL) { + return NULL; + } + lock_result_t lock_result; + do { + lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret); + } while (lock_result == lock_result_failure); + return ret; +} + +extent_t * +extent_alloc(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); + extent_t *extent = extent_avail_first(&arena->extent_avail); + if (extent == NULL) { + malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); + return base_alloc_extent(tsdn, arena->base); + } + extent_avail_remove(&arena->extent_avail, extent); + malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); + return extent; +} + +void +extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { + malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); + extent_avail_insert(&arena->extent_avail, extent); + malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); +} + +extent_hooks_t * +extent_hooks_get(arena_t *arena) { + return base_extent_hooks_get(arena->base); +} + +extent_hooks_t * +extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) { + background_thread_info_t *info; + if (have_background_thread) { + info = arena_background_thread_info_get(arena); + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + } + extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks); + if (have_background_thread) { + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + } + + return ret; +} + +static void +extent_hooks_assure_initialized(arena_t *arena, + extent_hooks_t **r_extent_hooks) { + if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) { + *r_extent_hooks = extent_hooks_get(arena); + } +} + +#ifndef JEMALLOC_JET +static +#endif +size_t +extent_size_quantize_floor(size_t size) { + size_t ret; + pszind_t pind; + + assert(size > 0); + assert((size & PAGE_MASK) == 0); + + pind = sz_psz2ind(size - sz_large_pad + 1); + if (pind == 0) { + /* + * Avoid underflow. This short-circuit would also do the right + * thing for all sizes in the range for which there are + * PAGE-spaced size classes, but it's simplest to just handle + * the one case that would cause erroneous results. + */ + return size; + } + ret = sz_pind2sz(pind - 1) + sz_large_pad; + assert(ret <= size); + return ret; +} + +#ifndef JEMALLOC_JET +static +#endif +size_t +extent_size_quantize_ceil(size_t size) { + size_t ret; - ret = (a_size > b_size) - (a_size < b_size); - if (ret == 0) { - uintptr_t a_addr = (uintptr_t)a->addr; - uintptr_t b_addr = (uintptr_t)b->addr; + assert(size > 0); + assert(size - sz_large_pad <= LARGE_MAXCLASS); + assert((size & PAGE_MASK) == 0); - ret = (a_addr > b_addr) - (a_addr < b_addr); + ret = extent_size_quantize_floor(size); + if (ret < size) { + /* + * Skip a quantization that may have an adequately large extent, + * because under-sized extents may be mixed in. This only + * happens when an unusual size is requested, i.e. for aligned + * allocation, and is just one of several places where linear + * search would potentially find sufficiently aligned available + * memory somewhere lower. + */ + ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) + + sz_large_pad; } + return ret; +} + +/* Generate pairing heap functions. */ +ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp) + +bool +extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state, + bool delay_coalesce) { + if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS, + malloc_mutex_rank_exclusive)) { + return true; + } + for (unsigned i = 0; i < NPSIZES+1; i++) { + extent_heap_new(&extents->heaps[i]); + } + bitmap_init(extents->bitmap, &extents_bitmap_info, true); + extent_list_init(&extents->lru); + atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED); + extents->state = state; + extents->delay_coalesce = delay_coalesce; + return false; +} + +extent_state_t +extents_state_get(const extents_t *extents) { + return extents->state; +} - return (ret); +size_t +extents_npages_get(extents_t *extents) { + return atomic_load_zu(&extents->npages, ATOMIC_RELAXED); } -/* Generate red-black tree functions. */ -rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad, - extent_szad_comp) +static void +extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent, + bool preserve_lru) { + malloc_mutex_assert_owner(tsdn, &extents->mtx); + assert(extent_state_get(extent) == extents->state); -static inline int -extent_ad_comp(extent_node_t *a, extent_node_t *b) -{ - uintptr_t a_addr = (uintptr_t)a->addr; - uintptr_t b_addr = (uintptr_t)b->addr; + size_t size = extent_size_get(extent); + size_t psz = extent_size_quantize_floor(size); + pszind_t pind = sz_psz2ind(psz); + if (extent_heap_empty(&extents->heaps[pind])) { + bitmap_unset(extents->bitmap, &extents_bitmap_info, + (size_t)pind); + } + extent_heap_insert(&extents->heaps[pind], extent); + if (!preserve_lru) { + extent_list_append(&extents->lru, extent); + } + size_t npages = size >> LG_PAGE; + /* + * All modifications to npages hold the mutex (as asserted above), so we + * don't need an atomic fetch-add; we can get by with a load followed by + * a store. + */ + size_t cur_extents_npages = + atomic_load_zu(&extents->npages, ATOMIC_RELAXED); + atomic_store_zu(&extents->npages, cur_extents_npages + npages, + ATOMIC_RELAXED); +} - return ((a_addr > b_addr) - (a_addr < b_addr)); +static void +extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent, + bool preserve_lru) { + malloc_mutex_assert_owner(tsdn, &extents->mtx); + assert(extent_state_get(extent) == extents->state); + + size_t size = extent_size_get(extent); + size_t psz = extent_size_quantize_floor(size); + pszind_t pind = sz_psz2ind(psz); + extent_heap_remove(&extents->heaps[pind], extent); + if (extent_heap_empty(&extents->heaps[pind])) { + bitmap_set(extents->bitmap, &extents_bitmap_info, + (size_t)pind); + } + if (!preserve_lru) { + extent_list_remove(&extents->lru, extent); + } + size_t npages = size >> LG_PAGE; + /* + * As in extents_insert_locked, we hold extents->mtx and so don't need + * atomic operations for updating extents->npages. + */ + size_t cur_extents_npages = + atomic_load_zu(&extents->npages, ATOMIC_RELAXED); + assert(cur_extents_npages >= npages); + atomic_store_zu(&extents->npages, + cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED); } -/* Generate red-black tree functions. */ -rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad, - extent_ad_comp) +/* Do any-best-fit extent selection, i.e. select any extent that best fits. */ +static extent_t * +extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + size_t size) { + pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); + pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, + (size_t)pind); + if (i < NPSIZES+1) { + assert(!extent_heap_empty(&extents->heaps[i])); + extent_t *extent = extent_heap_any(&extents->heaps[i]); + assert(extent_size_get(extent) >= size); + return extent; + } + + return NULL; +} + +/* + * Do first-fit extent selection, i.e. select the oldest/lowest extent that is + * large enough. + */ +static extent_t * +extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + size_t size) { + extent_t *ret = NULL; + + pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); + for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, + &extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i = + (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, + (size_t)i+1)) { + assert(!extent_heap_empty(&extents->heaps[i])); + extent_t *extent = extent_heap_first(&extents->heaps[i]); + assert(extent_size_get(extent) >= size); + if (ret == NULL || extent_snad_comp(extent, ret) < 0) { + ret = extent; + } + if (i == NPSIZES) { + break; + } + assert(i < NPSIZES); + } + + return ret; +} + +/* + * Do {best,first}-fit extent selection, where the selection policy choice is + * based on extents->delay_coalesce. Best-fit selection requires less + * searching, but its layout policy is less stable and may cause higher virtual + * memory fragmentation as a side effect. + */ +static extent_t * +extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + size_t size) { + malloc_mutex_assert_owner(tsdn, &extents->mtx); + + return extents->delay_coalesce ? extents_best_fit_locked(tsdn, arena, + extents, size) : extents_first_fit_locked(tsdn, arena, extents, + size); +} + +static bool +extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + extent_t *extent) { + extent_state_set(extent, extent_state_active); + bool coalesced; + extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx, + extents, extent, &coalesced, false); + extent_state_set(extent, extents_state_get(extents)); + + if (!coalesced) { + return true; + } + extents_insert_locked(tsdn, extents, extent, true); + return false; +} + +extent_t * +extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { + assert(size + pad != 0); + assert(alignment != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + return extent_recycle(tsdn, arena, r_extent_hooks, extents, new_addr, + size, pad, alignment, slab, szind, zero, commit, false); +} + +void +extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, extent_t *extent) { + assert(extent_base_get(extent) != NULL); + assert(extent_size_get(extent) != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_addr_set(extent, extent_base_get(extent)); + extent_zeroed_set(extent, false); + + extent_record(tsdn, arena, r_extent_hooks, extents, extent, false); +} + +extent_t * +extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, size_t npages_min) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + malloc_mutex_lock(tsdn, &extents->mtx); + + /* + * Get the LRU coalesced extent, if any. If coalescing was delayed, + * the loop will iterate until the LRU extent is fully coalesced. + */ + extent_t *extent; + while (true) { + /* Get the LRU extent, if any. */ + extent = extent_list_first(&extents->lru); + if (extent == NULL) { + goto label_return; + } + /* Check the eviction limit. */ + size_t npages = extent_size_get(extent) >> LG_PAGE; + size_t extents_npages = atomic_load_zu(&extents->npages, + ATOMIC_RELAXED); + if (extents_npages - npages < npages_min) { + extent = NULL; + goto label_return; + } + extents_remove_locked(tsdn, extents, extent, false); + if (!extents->delay_coalesce) { + break; + } + /* Try to coalesce. */ + if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks, + rtree_ctx, extents, extent)) { + break; + } + /* + * The LRU extent was just coalesced and the result placed in + * the LRU at its neighbor's position. Start over. + */ + } + + /* + * Either mark the extent active or deregister it to protect against + * concurrent operations. + */ + switch (extents_state_get(extents)) { + case extent_state_active: + not_reached(); + case extent_state_dirty: + case extent_state_muzzy: + extent_state_set(extent, extent_state_active); + break; + case extent_state_retained: + extent_deregister(tsdn, extent); + break; + default: + not_reached(); + } + +label_return: + malloc_mutex_unlock(tsdn, &extents->mtx); + return extent; +} + +static void +extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, extent_t *extent, bool growing_retained) { + /* + * Leak extent after making sure its pages have already been purged, so + * that this is only a virtual memory leak. + */ + if (extents_state_get(extents) == extent_state_dirty) { + if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, + extent, 0, extent_size_get(extent), growing_retained)) { + extent_purge_forced_impl(tsdn, arena, r_extent_hooks, + extent, 0, extent_size_get(extent), + growing_retained); + } + } + extent_dalloc(tsdn, arena, extent); +} + +void +extents_prefork(tsdn_t *tsdn, extents_t *extents) { + malloc_mutex_prefork(tsdn, &extents->mtx); +} + +void +extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) { + malloc_mutex_postfork_parent(tsdn, &extents->mtx); +} + +void +extents_postfork_child(tsdn_t *tsdn, extents_t *extents) { + malloc_mutex_postfork_child(tsdn, &extents->mtx); +} + +static void +extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + extent_t *extent, bool preserve_lru) { + assert(extent_arena_get(extent) == arena); + assert(extent_state_get(extent) == extent_state_active); + + extent_state_set(extent, extents_state_get(extents)); + extents_insert_locked(tsdn, extents, extent, preserve_lru); +} + +static void +extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + extent_t *extent, bool preserve_lru) { + malloc_mutex_lock(tsdn, &extents->mtx); + extent_deactivate_locked(tsdn, arena, extents, extent, preserve_lru); + malloc_mutex_unlock(tsdn, &extents->mtx); +} + +static void +extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + extent_t *extent, bool preserve_lru) { + assert(extent_arena_get(extent) == arena); + assert(extent_state_get(extent) == extents_state_get(extents)); + + extents_remove_locked(tsdn, extents, extent, preserve_lru); + extent_state_set(extent, extent_state_active); +} + +static bool +extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, + const extent_t *extent, bool dependent, bool init_missing, + rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) { + *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_base_get(extent), dependent, init_missing); + if (!dependent && *r_elm_a == NULL) { + return true; + } + assert(*r_elm_a != NULL); + + *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_last_get(extent), dependent, init_missing); + if (!dependent && *r_elm_b == NULL) { + return true; + } + assert(*r_elm_b != NULL); + + return false; +} + +static void +extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a, + rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) { + rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab); + if (elm_b != NULL) { + rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind, + slab); + } +} + +static void +extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent, + szind_t szind) { + assert(extent_slab_get(extent)); + + /* Register interior. */ + for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) { + rtree_write(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_base_get(extent) + (uintptr_t)(i << + LG_PAGE), extent, szind, true); + } +} + +static void +extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) { + cassert(config_prof); + /* prof_gdump() requirement. */ + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + if (opt_prof && extent_state_get(extent) == extent_state_active) { + size_t nadd = extent_size_get(extent) >> LG_PAGE; + size_t cur = atomic_fetch_add_zu(&curpages, nadd, + ATOMIC_RELAXED) + nadd; + size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED); + while (cur > high && !atomic_compare_exchange_weak_zu( + &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) { + /* + * Don't refresh cur, because it may have decreased + * since this thread lost the highpages update race. + * Note that high is updated in case of CAS failure. + */ + } + if (cur > high && prof_gdump_get_unlocked()) { + prof_gdump(tsdn); + } + } +} + +static void +extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) { + cassert(config_prof); + + if (opt_prof && extent_state_get(extent) == extent_state_active) { + size_t nsub = extent_size_get(extent) >> LG_PAGE; + assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub); + atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED); + } +} + +static bool +extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_leaf_elm_t *elm_a, *elm_b; + + /* + * We need to hold the lock to protect against a concurrent coalesce + * operation that sees us in a partial state. + */ + extent_lock(tsdn, extent); + + if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true, + &elm_a, &elm_b)) { + return true; + } + + szind_t szind = extent_szind_get_maybe_invalid(extent); + bool slab = extent_slab_get(extent); + extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab); + if (slab) { + extent_interior_register(tsdn, rtree_ctx, extent, szind); + } + + extent_unlock(tsdn, extent); + + if (config_prof && gdump_add) { + extent_gdump_add(tsdn, extent); + } + + return false; +} + +static bool +extent_register(tsdn_t *tsdn, extent_t *extent) { + return extent_register_impl(tsdn, extent, true); +} + +static bool +extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) { + return extent_register_impl(tsdn, extent, false); +} + +static void +extent_reregister(tsdn_t *tsdn, extent_t *extent) { + bool err = extent_register(tsdn, extent); + assert(!err); +} + +static void +extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, + extent_t *extent) { + size_t i; + + assert(extent_slab_get(extent)); + + for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) { + rtree_clear(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_base_get(extent) + (uintptr_t)(i << + LG_PAGE)); + } +} + +static void +extent_deregister(tsdn_t *tsdn, extent_t *extent) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_leaf_elm_t *elm_a, *elm_b; + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false, + &elm_a, &elm_b); + + extent_lock(tsdn, extent); + + extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false); + if (extent_slab_get(extent)) { + extent_interior_deregister(tsdn, rtree_ctx, extent); + extent_slab_set(extent, false); + } + + extent_unlock(tsdn, extent); + + if (config_prof) { + extent_gdump_sub(tsdn, extent); + } +} + +static extent_t * +extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, + bool *zero, bool *commit, bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + assert(alignment > 0); + if (config_debug && new_addr != NULL) { + /* + * Non-NULL new_addr has two use cases: + * + * 1) Recycle a known-extant extent, e.g. during purging. + * 2) Perform in-place expanding reallocation. + * + * Regardless of use case, new_addr must either refer to a + * non-existing extent, or to the base of an extant extent, + * since only active slabs support interior lookups (which of + * course cannot be recycled). + */ + assert(PAGE_ADDR2BASE(new_addr) == new_addr); + assert(pad == 0); + assert(alignment <= PAGE); + } + + size_t esize = size + pad; + size_t alloc_size = esize + PAGE_CEILING(alignment) - PAGE; + /* Beware size_t wrap-around. */ + if (alloc_size < esize) { + return NULL; + } + malloc_mutex_lock(tsdn, &extents->mtx); + extent_hooks_assure_initialized(arena, r_extent_hooks); + extent_t *extent; + if (new_addr != NULL) { + extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr); + if (extent != NULL) { + /* + * We might null-out extent to report an error, but we + * still need to unlock the associated mutex after. + */ + extent_t *unlock_extent = extent; + assert(extent_base_get(extent) == new_addr); + if (extent_arena_get(extent) != arena || + extent_size_get(extent) < esize || + extent_state_get(extent) != + extents_state_get(extents)) { + extent = NULL; + } + extent_unlock(tsdn, unlock_extent); + } + } else { + extent = extents_fit_locked(tsdn, arena, extents, alloc_size); + } + if (extent == NULL) { + malloc_mutex_unlock(tsdn, &extents->mtx); + return NULL; + } + + extent_activate_locked(tsdn, arena, extents, extent, false); + malloc_mutex_unlock(tsdn, &extents->mtx); + + if (extent_zeroed_get(extent)) { + *zero = true; + } + if (extent_committed_get(extent)) { + *commit = true; + } + + return extent; +} + +static extent_t * +extent_recycle_split(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, + szind_t szind, extent_t *extent, bool growing_retained) { + size_t esize = size + pad; + size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(extent), + PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(extent); + assert(new_addr == NULL || leadsize == 0); + assert(extent_size_get(extent) >= leadsize + esize); + size_t trailsize = extent_size_get(extent) - leadsize - esize; + + /* Split the lead. */ + if (leadsize != 0) { + extent_t *lead = extent; + extent = extent_split_impl(tsdn, arena, r_extent_hooks, + lead, leadsize, NSIZES, false, esize + trailsize, szind, + slab, growing_retained); + if (extent == NULL) { + extent_deregister(tsdn, lead); + extents_leak(tsdn, arena, r_extent_hooks, extents, + lead, growing_retained); + return NULL; + } + extent_deactivate(tsdn, arena, extents, lead, false); + } + + /* Split the trail. */ + if (trailsize != 0) { + extent_t *trail = extent_split_impl(tsdn, arena, + r_extent_hooks, extent, esize, szind, slab, trailsize, + NSIZES, false, growing_retained); + if (trail == NULL) { + extent_deregister(tsdn, extent); + extents_leak(tsdn, arena, r_extent_hooks, extents, + extent, growing_retained); + return NULL; + } + extent_deactivate(tsdn, arena, extents, trail, false); + } else if (leadsize == 0) { + /* + * Splitting causes szind to be set as a side effect, but no + * splitting occurred. + */ + extent_szind_set(extent, szind); + if (szind != NSIZES) { + rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_addr_get(extent), szind, slab); + if (slab && extent_size_get(extent) > PAGE) { + rtree_szind_slab_update(tsdn, &extents_rtree, + rtree_ctx, + (uintptr_t)extent_past_get(extent) - + (uintptr_t)PAGE, szind, slab); + } + } + } + + return extent; +} + +static extent_t * +extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit, + bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + assert(new_addr == NULL || !slab); + assert(pad == 0 || !slab); + assert(!*zero || !slab); + + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + bool committed = false; + extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks, + rtree_ctx, extents, new_addr, size, pad, alignment, slab, zero, + &committed, growing_retained); + if (extent == NULL) { + return NULL; + } + if (committed) { + *commit = true; + } + + extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx, + extents, new_addr, size, pad, alignment, slab, szind, extent, + growing_retained); + if (extent == NULL) { + return NULL; + } + + if (*commit && !extent_committed_get(extent)) { + if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, + 0, extent_size_get(extent), growing_retained)) { + extent_record(tsdn, arena, r_extent_hooks, extents, + extent, growing_retained); + return NULL; + } + extent_zeroed_set(extent, true); + } + + if (pad != 0) { + extent_addr_randomize(tsdn, extent, alignment); + } + assert(extent_state_get(extent) == extent_state_active); + if (slab) { + extent_slab_set(extent, slab); + extent_interior_register(tsdn, rtree_ctx, extent, szind); + } + + if (*zero) { + void *addr = extent_base_get(extent); + size_t size = extent_size_get(extent); + if (!extent_zeroed_get(extent)) { + if (pages_purge_forced(addr, size)) { + memset(addr, 0, size); + } + } else if (config_debug) { + size_t *p = (size_t *)(uintptr_t)addr; + for (size_t i = 0; i < size / sizeof(size_t); i++) { + assert(p[i] == 0); + } + } + } + return extent; +} + +/* + * If the caller specifies (!*zero), it is still possible to receive zeroed + * memory, in which case *zero is toggled to true. arena_extent_alloc() takes + * advantage of this to avoid demanding zeroed extents, but taking advantage of + * them if they are returned. + */ +static void * +extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) { + void *ret; + + assert(size != 0); + assert(alignment != 0); + + /* "primary" dss. */ + if (have_dss && dss_prec == dss_prec_primary && (ret = + extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, + commit)) != NULL) { + return ret; + } + /* mmap. */ + if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit)) + != NULL) { + return ret; + } + /* "secondary" dss. */ + if (have_dss && dss_prec == dss_prec_secondary && (ret = + extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, + commit)) != NULL) { + return ret; + } + + /* All strategies for allocation failed. */ + return NULL; +} + +static void * +extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit) { + void *ret; + + ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero, + commit, (dss_prec_t)atomic_load_u(&arena->dss_prec, + ATOMIC_RELAXED)); + return ret; +} + +static void * +extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { + tsdn_t *tsdn; + arena_t *arena; + + tsdn = tsdn_fetch(); + arena = arena_get(tsdn, arena_ind, false); + /* + * The arena we're allocating on behalf of must have been initialized + * already. + */ + assert(arena != NULL); + + return extent_alloc_default_impl(tsdn, arena, new_addr, size, + alignment, zero, commit); +} + +static void +extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) { + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + pre_reentrancy(tsd, arena); +} + +static void +extent_hook_post_reentrancy(tsdn_t *tsdn) { + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + post_reentrancy(tsd); +} + +/* + * If virtual memory is retained, create increasingly larger extents from which + * to split requested extents in order to limit the total number of disjoint + * virtual memory ranges retained by each arena. + */ +static extent_t * +extent_grow_retained(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment, + bool slab, szind_t szind, bool *zero, bool *commit) { + malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx); + assert(pad == 0 || !slab); + assert(!*zero || !slab); + + size_t esize = size + pad; + size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE; + /* Beware size_t wrap-around. */ + if (alloc_size_min < esize) { + goto label_err; + } + /* + * Find the next extent size in the series that would be large enough to + * satisfy this request. + */ + pszind_t egn_skip = 0; + size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); + while (alloc_size < alloc_size_min) { + egn_skip++; + if (arena->extent_grow_next + egn_skip == NPSIZES) { + /* Outside legal range. */ + goto label_err; + } + assert(arena->extent_grow_next + egn_skip < NPSIZES); + alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); + } + + extent_t *extent = extent_alloc(tsdn, arena); + if (extent == NULL) { + goto label_err; + } + bool zeroed = false; + bool committed = false; + + void *ptr; + if (*r_extent_hooks == &extent_hooks_default) { + ptr = extent_alloc_core(tsdn, arena, NULL, alloc_size, PAGE, + &zeroed, &committed, (dss_prec_t)atomic_load_u( + &arena->dss_prec, ATOMIC_RELAXED)); + } else { + extent_hook_pre_reentrancy(tsdn, arena); + ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL, + alloc_size, PAGE, &zeroed, &committed, + arena_ind_get(arena)); + extent_hook_post_reentrancy(tsdn); + } + + extent_init(extent, arena, ptr, alloc_size, false, NSIZES, + arena_extent_sn_next(arena), extent_state_active, zeroed, + committed); + if (ptr == NULL) { + extent_dalloc(tsdn, arena, extent); + goto label_err; + } + if (extent_register_no_gdump_add(tsdn, extent)) { + extents_leak(tsdn, arena, r_extent_hooks, + &arena->extents_retained, extent, true); + goto label_err; + } + + size_t leadsize = ALIGNMENT_CEILING((uintptr_t)ptr, + PAGE_CEILING(alignment)) - (uintptr_t)ptr; + assert(alloc_size >= leadsize + esize); + size_t trailsize = alloc_size - leadsize - esize; + if (extent_zeroed_get(extent) && extent_committed_get(extent)) { + *zero = true; + } + if (extent_committed_get(extent)) { + *commit = true; + } + + /* Split the lead. */ + if (leadsize != 0) { + extent_t *lead = extent; + extent = extent_split_impl(tsdn, arena, r_extent_hooks, lead, + leadsize, NSIZES, false, esize + trailsize, szind, slab, + true); + if (extent == NULL) { + extent_deregister(tsdn, lead); + extents_leak(tsdn, arena, r_extent_hooks, + &arena->extents_retained, lead, true); + goto label_err; + } + extent_record(tsdn, arena, r_extent_hooks, + &arena->extents_retained, lead, true); + } + + /* Split the trail. */ + if (trailsize != 0) { + extent_t *trail = extent_split_impl(tsdn, arena, r_extent_hooks, + extent, esize, szind, slab, trailsize, NSIZES, false, true); + if (trail == NULL) { + extent_deregister(tsdn, extent); + extents_leak(tsdn, arena, r_extent_hooks, + &arena->extents_retained, extent, true); + goto label_err; + } + extent_record(tsdn, arena, r_extent_hooks, + &arena->extents_retained, trail, true); + } else if (leadsize == 0) { + /* + * Splitting causes szind to be set as a side effect, but no + * splitting occurred. + */ + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, + &rtree_ctx_fallback); + + extent_szind_set(extent, szind); + if (szind != NSIZES) { + rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_addr_get(extent), szind, slab); + if (slab && extent_size_get(extent) > PAGE) { + rtree_szind_slab_update(tsdn, &extents_rtree, + rtree_ctx, + (uintptr_t)extent_past_get(extent) - + (uintptr_t)PAGE, szind, slab); + } + } + } + + if (*commit && !extent_committed_get(extent)) { + if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0, + extent_size_get(extent), true)) { + extent_record(tsdn, arena, r_extent_hooks, + &arena->extents_retained, extent, true); + goto label_err; + } + extent_zeroed_set(extent, true); + } + + /* + * Increment extent_grow_next if doing so wouldn't exceed the legal + * range. + */ + if (arena->extent_grow_next + egn_skip + 1 < NPSIZES) { + arena->extent_grow_next += egn_skip + 1; + } else { + arena->extent_grow_next = NPSIZES - 1; + } + /* All opportunities for failure are past. */ + malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); + + if (config_prof) { + /* Adjust gdump stats now that extent is final size. */ + extent_gdump_add(tsdn, extent); + } + if (pad != 0) { + extent_addr_randomize(tsdn, extent, alignment); + } + if (slab) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, + &rtree_ctx_fallback); + + extent_slab_set(extent, true); + extent_interior_register(tsdn, rtree_ctx, extent, szind); + } + if (*zero && !extent_zeroed_get(extent)) { + void *addr = extent_base_get(extent); + size_t size = extent_size_get(extent); + if (pages_purge_forced(addr, size)) { + memset(addr, 0, size); + } + } + + return extent; +label_err: + malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); + return NULL; +} + +static extent_t * +extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { + assert(size != 0); + assert(alignment != 0); + + malloc_mutex_lock(tsdn, &arena->extent_grow_mtx); + + extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, + &arena->extents_retained, new_addr, size, pad, alignment, slab, + szind, zero, commit, true); + if (extent != NULL) { + malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); + if (config_prof) { + extent_gdump_add(tsdn, extent); + } + } else if (opt_retain && new_addr == NULL) { + extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size, + pad, alignment, slab, szind, zero, commit); + /* extent_grow_retained() always releases extent_grow_mtx. */ + } else { + malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); + } + malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx); + + return extent; +} + +static extent_t * +extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { + size_t esize = size + pad; + extent_t *extent = extent_alloc(tsdn, arena); + if (extent == NULL) { + return NULL; + } + void *addr; + if (*r_extent_hooks == &extent_hooks_default) { + /* Call directly to propagate tsdn. */ + addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize, + alignment, zero, commit); + } else { + extent_hook_pre_reentrancy(tsdn, arena); + addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr, + esize, alignment, zero, commit, arena_ind_get(arena)); + extent_hook_post_reentrancy(tsdn); + } + if (addr == NULL) { + extent_dalloc(tsdn, arena, extent); + return NULL; + } + extent_init(extent, arena, addr, esize, slab, szind, + arena_extent_sn_next(arena), extent_state_active, zero, commit); + if (pad != 0) { + extent_addr_randomize(tsdn, extent, alignment); + } + if (extent_register(tsdn, extent)) { + extents_leak(tsdn, arena, r_extent_hooks, + &arena->extents_retained, extent, false); + return NULL; + } + + return extent; +} + +extent_t * +extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks, + new_addr, size, pad, alignment, slab, szind, zero, commit); + if (extent == NULL) { + extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks, + new_addr, size, pad, alignment, slab, szind, zero, commit); + } + + return extent; +} + +static bool +extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner, + const extent_t *outer) { + assert(extent_arena_get(inner) == arena); + if (extent_arena_get(outer) != arena) { + return false; + } + + assert(extent_state_get(inner) == extent_state_active); + if (extent_state_get(outer) != extents->state) { + return false; + } + + if (extent_committed_get(inner) != extent_committed_get(outer)) { + return false; + } + + return true; +} + +static bool +extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, extent_t *inner, extent_t *outer, bool forward, + bool growing_retained) { + assert(extent_can_coalesce(arena, extents, inner, outer)); + + if (forward && extents->delay_coalesce) { + /* + * The extent that remains after coalescing must occupy the + * outer extent's position in the LRU. For forward coalescing, + * swap the inner extent into the LRU. + */ + extent_list_replace(&extents->lru, outer, inner); + } + extent_activate_locked(tsdn, arena, extents, outer, + extents->delay_coalesce); + + malloc_mutex_unlock(tsdn, &extents->mtx); + bool err = extent_merge_impl(tsdn, arena, r_extent_hooks, + forward ? inner : outer, forward ? outer : inner, growing_retained); + malloc_mutex_lock(tsdn, &extents->mtx); + + if (err) { + if (forward && extents->delay_coalesce) { + extent_list_replace(&extents->lru, inner, outer); + } + extent_deactivate_locked(tsdn, arena, extents, outer, + extents->delay_coalesce); + } + + return err; +} + +static extent_t * +extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + extent_t *extent, bool *coalesced, bool growing_retained) { + /* + * Continue attempting to coalesce until failure, to protect against + * races with other threads that are thwarted by this one. + */ + bool again; + do { + again = false; + + /* Try to coalesce forward. */ + extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx, + extent_past_get(extent)); + if (next != NULL) { + /* + * extents->mtx only protects against races for + * like-state extents, so call extent_can_coalesce() + * before releasing next's pool lock. + */ + bool can_coalesce = extent_can_coalesce(arena, extents, + extent, next); + + extent_unlock(tsdn, next); + + if (can_coalesce && !extent_coalesce(tsdn, arena, + r_extent_hooks, extents, extent, next, true, + growing_retained)) { + if (extents->delay_coalesce) { + /* Do minimal coalescing. */ + *coalesced = true; + return extent; + } + again = true; + } + } + + /* Try to coalesce backward. */ + extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx, + extent_before_get(extent)); + if (prev != NULL) { + bool can_coalesce = extent_can_coalesce(arena, extents, + extent, prev); + extent_unlock(tsdn, prev); + + if (can_coalesce && !extent_coalesce(tsdn, arena, + r_extent_hooks, extents, extent, prev, false, + growing_retained)) { + extent = prev; + if (extents->delay_coalesce) { + /* Do minimal coalescing. */ + *coalesced = true; + return extent; + } + again = true; + } + } + } while (again); + + if (extents->delay_coalesce) { + *coalesced = false; + } + return extent; +} + +static void +extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, extent_t *extent, bool growing_retained) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + assert((extents_state_get(extents) != extent_state_dirty && + extents_state_get(extents) != extent_state_muzzy) || + !extent_zeroed_get(extent)); + + malloc_mutex_lock(tsdn, &extents->mtx); + extent_hooks_assure_initialized(arena, r_extent_hooks); + + extent_szind_set(extent, NSIZES); + if (extent_slab_get(extent)) { + extent_interior_deregister(tsdn, rtree_ctx, extent); + extent_slab_set(extent, false); + } + + assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_base_get(extent), true) == extent); + + if (!extents->delay_coalesce) { + extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, + rtree_ctx, extents, extent, NULL, growing_retained); + } + + extent_deactivate_locked(tsdn, arena, extents, extent, false); + + malloc_mutex_unlock(tsdn, &extents->mtx); +} + +void +extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; + + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + if (extent_register(tsdn, extent)) { + extents_leak(tsdn, arena, &extent_hooks, + &arena->extents_retained, extent, false); + return; + } + extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent); +} + +static bool +extent_dalloc_default_impl(void *addr, size_t size) { + if (!have_dss || !extent_in_dss(addr)) { + return extent_dalloc_mmap(addr, size); + } + return true; +} + +static bool +extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + bool committed, unsigned arena_ind) { + return extent_dalloc_default_impl(addr, size); +} + +static bool +extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent) { + bool err; + + assert(extent_base_get(extent) != NULL); + assert(extent_size_get(extent) != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_addr_set(extent, extent_base_get(extent)); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + /* Try to deallocate. */ + if (*r_extent_hooks == &extent_hooks_default) { + /* Call directly to propagate tsdn. */ + err = extent_dalloc_default_impl(extent_base_get(extent), + extent_size_get(extent)); + } else { + extent_hook_pre_reentrancy(tsdn, arena); + err = ((*r_extent_hooks)->dalloc == NULL || + (*r_extent_hooks)->dalloc(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), + extent_committed_get(extent), arena_ind_get(arena))); + extent_hook_post_reentrancy(tsdn); + } + + if (!err) { + extent_dalloc(tsdn, arena, extent); + } + + return err; +} + +void +extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + /* + * Deregister first to avoid a race with other allocating threads, and + * reregister if deallocation fails. + */ + extent_deregister(tsdn, extent); + if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) { + return; + } + + extent_reregister(tsdn, extent); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + /* Try to decommit; purge if that fails. */ + bool zeroed; + if (!extent_committed_get(extent)) { + zeroed = true; + } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent, + 0, extent_size_get(extent))) { + zeroed = true; + } else if ((*r_extent_hooks)->purge_forced != NULL && + !(*r_extent_hooks)->purge_forced(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), 0, + extent_size_get(extent), arena_ind_get(arena))) { + zeroed = true; + } else if (extent_state_get(extent) == extent_state_muzzy || + ((*r_extent_hooks)->purge_lazy != NULL && + !(*r_extent_hooks)->purge_lazy(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), 0, + extent_size_get(extent), arena_ind_get(arena)))) { + zeroed = false; + } else { + zeroed = false; + } + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + extent_zeroed_set(extent, zeroed); + + if (config_prof) { + extent_gdump_sub(tsdn, extent); + } + + extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained, + extent, false); +} + +static void +extent_destroy_default_impl(void *addr, size_t size) { + if (!have_dss || !extent_in_dss(addr)) { + pages_unmap(addr, size); + } +} + +static void +extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + bool committed, unsigned arena_ind) { + extent_destroy_default_impl(addr, size); +} + +void +extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent) { + assert(extent_base_get(extent) != NULL); + assert(extent_size_get(extent) != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + /* Deregister first to avoid a race with other allocating threads. */ + extent_deregister(tsdn, extent); + + extent_addr_set(extent, extent_base_get(extent)); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + /* Try to destroy; silently fail otherwise. */ + if (*r_extent_hooks == &extent_hooks_default) { + /* Call directly to propagate tsdn. */ + extent_destroy_default_impl(extent_base_get(extent), + extent_size_get(extent)); + } else if ((*r_extent_hooks)->destroy != NULL) { + extent_hook_pre_reentrancy(tsdn, arena); + (*r_extent_hooks)->destroy(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), + extent_committed_get(extent), arena_ind_get(arena)); + extent_hook_post_reentrancy(tsdn); + } + + extent_dalloc(tsdn, arena, extent); +} + +static bool +extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset), + length); +} + +static bool +extent_commit_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = ((*r_extent_hooks)->commit == NULL || + (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent), + extent_size_get(extent), offset, length, arena_ind_get(arena))); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + extent_committed_set(extent, extent_committed_get(extent) || !err); + return err; +} + +bool +extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length) { + return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset, + length, false); +} + +static bool +extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset), + length); +} + +bool +extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = ((*r_extent_hooks)->decommit == NULL || + (*r_extent_hooks)->decommit(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), offset, length, + arena_ind_get(arena))); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + extent_committed_set(extent, extent_committed_get(extent) && err); + return err; +} + +#ifdef PAGES_CAN_PURGE_LAZY +static bool +extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + assert(addr != NULL); + assert((offset & PAGE_MASK) == 0); + assert(length != 0); + assert((length & PAGE_MASK) == 0); + + return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset), + length); +} +#endif + +static bool +extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if ((*r_extent_hooks)->purge_lazy == NULL) { + return true; + } + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), offset, length, + arena_ind_get(arena)); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + + return err; +} + +bool +extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length) { + return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent, + offset, length, false); +} + +#ifdef PAGES_CAN_PURGE_FORCED +static bool +extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind) { + assert(addr != NULL); + assert((offset & PAGE_MASK) == 0); + assert(length != 0); + assert((length & PAGE_MASK) == 0); + + return pages_purge_forced((void *)((uintptr_t)addr + + (uintptr_t)offset), length); +} +#endif + +static bool +extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if ((*r_extent_hooks)->purge_forced == NULL) { + return true; + } + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), offset, length, + arena_ind_get(arena)); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + return err; +} + +bool +extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length) { + return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent, + offset, length, false); +} + +#ifdef JEMALLOC_MAPS_COALESCE +static bool +extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { + return !maps_coalesce; +} +#endif + +static extent_t * +extent_split_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, + szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, + bool growing_retained) { + assert(extent_size_get(extent) == size_a + size_b); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if ((*r_extent_hooks)->split == NULL) { + return NULL; + } + + extent_t *trail = extent_alloc(tsdn, arena); + if (trail == NULL) { + goto label_error_a; + } + + extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) + + size_a), size_b, slab_b, szind_b, extent_sn_get(extent), + extent_state_get(extent), extent_zeroed_get(extent), + extent_committed_get(extent)); + + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_leaf_elm_t *lead_elm_a, *lead_elm_b; + { + extent_t lead; + + extent_init(&lead, arena, extent_addr_get(extent), size_a, + slab_a, szind_a, extent_sn_get(extent), + extent_state_get(extent), extent_zeroed_get(extent), + extent_committed_get(extent)); + + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false, + true, &lead_elm_a, &lead_elm_b); + } + rtree_leaf_elm_t *trail_elm_a, *trail_elm_b; + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true, + &trail_elm_a, &trail_elm_b); + + if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL + || trail_elm_b == NULL) { + goto label_error_b; + } + + extent_lock2(tsdn, extent, trail); + + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent), + size_a + size_b, size_a, size_b, extent_committed_get(extent), + arena_ind_get(arena)); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + if (err) { + goto label_error_c; + } + + extent_size_set(extent, size_a); + extent_szind_set(extent, szind_a); + + extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent, + szind_a, slab_a); + extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail, + szind_b, slab_b); + + extent_unlock2(tsdn, extent, trail); + + return trail; +label_error_c: + extent_unlock2(tsdn, extent, trail); +label_error_b: + extent_dalloc(tsdn, arena, trail); +label_error_a: + return NULL; +} + +extent_t * +extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, + szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) { + return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a, + szind_a, slab_a, size_b, szind_b, slab_b, false); +} + +static bool +extent_merge_default_impl(void *addr_a, void *addr_b) { + if (!maps_coalesce) { + return true; + } + if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) { + return true; + } + + return false; +} + +#ifdef JEMALLOC_MAPS_COALESCE +static bool +extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, + void *addr_b, size_t size_b, bool committed, unsigned arena_ind) { + return extent_merge_default_impl(addr_a, addr_b); +} +#endif + +static bool +extent_merge_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, + bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if ((*r_extent_hooks)->merge == NULL) { + return true; + } + + bool err; + if (*r_extent_hooks == &extent_hooks_default) { + /* Call directly to propagate tsdn. */ + err = extent_merge_default_impl(extent_base_get(a), + extent_base_get(b)); + } else { + extent_hook_pre_reentrancy(tsdn, arena); + err = (*r_extent_hooks)->merge(*r_extent_hooks, + extent_base_get(a), extent_size_get(a), extent_base_get(b), + extent_size_get(b), extent_committed_get(a), + arena_ind_get(arena)); + extent_hook_post_reentrancy(tsdn); + } + + if (err) { + return true; + } + + /* + * The rtree writes must happen while all the relevant elements are + * owned, so the following code uses decomposed helper functions rather + * than extent_{,de}register() to do things in the right order. + */ + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b; + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a, + &a_elm_b); + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a, + &b_elm_b); + + extent_lock2(tsdn, a, b); + + if (a_elm_b != NULL) { + rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL, + NSIZES, false); + } + if (b_elm_b != NULL) { + rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL, + NSIZES, false); + } else { + b_elm_b = b_elm_a; + } + + extent_size_set(a, extent_size_get(a) + extent_size_get(b)); + extent_szind_set(a, NSIZES); + extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ? + extent_sn_get(a) : extent_sn_get(b)); + extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b)); + + extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false); + + extent_unlock2(tsdn, a, b); + + extent_dalloc(tsdn, extent_arena_get(b), b); + + return false; +} + +bool +extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) { + return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false); +} + +bool +extent_boot(void) { + if (rtree_new(&extents_rtree, true)) { + return true; + } + + if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool", + WITNESS_RANK_EXTENT_POOL)) { + return true; + } + + if (have_dss) { + extent_dss_boot(); + } + + return false; +} diff --git a/deps/jemalloc/src/hash.c b/deps/jemalloc/src/hash.c index cfa4da0275..7b2bdc2bd6 100644 --- a/deps/jemalloc/src/hash.c +++ b/deps/jemalloc/src/hash.c @@ -1,2 +1,3 @@ -#define JEMALLOC_HASH_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_HASH_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" diff --git a/deps/jemalloc/src/jemalloc.c b/deps/jemalloc/src/jemalloc.c index 204778bc89..0ee8ad48b9 100644 --- a/deps/jemalloc/src/jemalloc.c +++ b/deps/jemalloc/src/jemalloc.c @@ -1,15 +1,31 @@ -#define JEMALLOC_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/spin.h" +#include "jemalloc/internal/sz.h" +#include "jemalloc/internal/ticker.h" +#include "jemalloc/internal/util.h" /******************************************************************************/ /* Data. */ -malloc_tsd_data(, arenas, arena_t *, NULL) -malloc_tsd_data(, thread_allocated, thread_allocated_t, - THREAD_ALLOCATED_INITIALIZER) - /* Runtime configuration options. */ -const char *je_malloc_conf; +const char *je_malloc_conf +#ifndef _WIN32 + JEMALLOC_ATTR(weak) +#endif + ; bool opt_abort = #ifdef JEMALLOC_DEBUG true @@ -17,30 +33,80 @@ bool opt_abort = false #endif ; -bool opt_junk = +bool opt_abort_conf = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; +const char *opt_junk = +#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) + "true" +#else + "false" +#endif + ; +bool opt_junk_alloc = +#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) + true +#else + false +#endif + ; +bool opt_junk_free = #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) true #else false #endif ; -size_t opt_quarantine = ZU(0); -bool opt_redzone = false; + bool opt_utrace = false; -bool opt_valgrind = false; bool opt_xmalloc = false; bool opt_zero = false; -size_t opt_narenas = 0; +unsigned opt_narenas = 0; unsigned ncpus; -malloc_mutex_t arenas_lock; -arena_t **arenas; -unsigned narenas_total; -unsigned narenas_auto; - -/* Set to true once the allocator has been initialized. */ -static bool malloc_initialized = false; +/* Protects arenas initialization. */ +malloc_mutex_t arenas_lock; +/* + * Arenas that are used to service external requests. Not all elements of the + * arenas array are necessarily used; arenas are created lazily as needed. + * + * arenas[0..narenas_auto) are used for automatic multiplexing of threads and + * arenas. arenas[narenas_auto..narenas_total) are only used if the application + * takes some action to create them and allocate from them. + * + * Points to an arena_t. + */ +JEMALLOC_ALIGNED(CACHELINE) +atomic_p_t arenas[MALLOCX_ARENA_LIMIT]; +static atomic_u_t narenas_total; /* Use narenas_total_*(). */ +static arena_t *a0; /* arenas[0]; read-only after initialization. */ +unsigned narenas_auto; /* Read-only after initialization. */ + +typedef enum { + malloc_init_uninitialized = 3, + malloc_init_a0_initialized = 2, + malloc_init_recursible = 1, + malloc_init_initialized = 0 /* Common case --> jnz. */ +} malloc_init_t; +static malloc_init_t malloc_init_state = malloc_init_uninitialized; + +/* False should be the common case. Set to true to trigger initialization. */ +bool malloc_slow = true; + +/* When malloc_slow is true, set the corresponding bits for sanity check. */ +enum { + flag_opt_junk_alloc = (1U), + flag_opt_junk_free = (1U << 1), + flag_opt_zero = (1U << 2), + flag_opt_utrace = (1U << 3), + flag_opt_xmalloc = (1U << 4) +}; +static uint8_t malloc_slow_flags; #ifdef JEMALLOC_THREADED_INIT /* Used to let the initializing thread recursively allocate. */ @@ -57,14 +123,30 @@ static bool malloc_initializer = NO_INITIALIZER; /* Used to avoid initialization races. */ #ifdef _WIN32 +#if _WIN32_WINNT >= 0x0600 +static malloc_mutex_t init_lock = SRWLOCK_INIT; +#else static malloc_mutex_t init_lock; +static bool init_lock_initialized = false; JEMALLOC_ATTR(constructor) static void WINAPI -_init_init_lock(void) -{ - - malloc_mutex_init(&init_lock); +_init_init_lock(void) { + /* + * If another constructor in the same binary is using mallctl to e.g. + * set up extent hooks, it may end up running before this one, and + * malloc_init_hard will crash trying to lock the uninitialized lock. So + * we force an initialization of the lock in malloc_init_hard as well. + * We don't try to care about atomicity of the accessed to the + * init_lock_initialized boolean, since it really only matters early in + * the process creation, before any separate thread normally starts + * doing anything. + */ + if (!init_lock_initialized) { + malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT, + malloc_mutex_rank_exclusive); + } + init_lock_initialized = true; } #ifdef _MSC_VER @@ -72,7 +154,7 @@ _init_init_lock(void) JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) static const void (WINAPI *init_init_lock)(void) = _init_init_lock; #endif - +#endif #else static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; #endif @@ -85,7 +167,7 @@ typedef struct { #ifdef JEMALLOC_UTRACE # define UTRACE(a, b, c) do { \ - if (opt_utrace) { \ + if (unlikely(opt_utrace)) { \ int utrace_serrno = errno; \ malloc_utrace_t ut; \ ut.p = (a); \ @@ -99,12 +181,16 @@ typedef struct { # define UTRACE(a, b, c) #endif +/* Whether encountered any invalid config options. */ +static bool had_conf_error = false; + /******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to * definition. */ +static bool malloc_init_hard_a0(void); static bool malloc_init_hard(void); /******************************************************************************/ @@ -112,54 +198,337 @@ static bool malloc_init_hard(void); * Begin miscellaneous support functions. */ +bool +malloc_initialized(void) { + return (malloc_init_state == malloc_init_initialized); +} + +JEMALLOC_ALWAYS_INLINE bool +malloc_init_a0(void) { + if (unlikely(malloc_init_state == malloc_init_uninitialized)) { + return malloc_init_hard_a0(); + } + return false; +} + +JEMALLOC_ALWAYS_INLINE bool +malloc_init(void) { + if (unlikely(!malloc_initialized()) && malloc_init_hard()) { + return true; + } + return false; +} + +/* + * The a0*() functions are used instead of i{d,}alloc() in situations that + * cannot tolerate TLS variable access. + */ + +static void * +a0ialloc(size_t size, bool zero, bool is_internal) { + if (unlikely(malloc_init_a0())) { + return NULL; + } + + return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL, + is_internal, arena_get(TSDN_NULL, 0, true), true); +} + +static void +a0idalloc(void *ptr, bool is_internal) { + idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true); +} + +void * +a0malloc(size_t size) { + return a0ialloc(size, false, true); +} + +void +a0dalloc(void *ptr) { + a0idalloc(ptr, true); +} + +/* + * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive + * situations that cannot tolerate TLS variable access (TLS allocation and very + * early internal data structure initialization). + */ + +void * +bootstrap_malloc(size_t size) { + if (unlikely(size == 0)) { + size = 1; + } + + return a0ialloc(size, false, false); +} + +void * +bootstrap_calloc(size_t num, size_t size) { + size_t num_size; + + num_size = num * size; + if (unlikely(num_size == 0)) { + assert(num == 0 || size == 0); + num_size = 1; + } + + return a0ialloc(num_size, true, false); +} + +void +bootstrap_free(void *ptr) { + if (unlikely(ptr == NULL)) { + return; + } + + a0idalloc(ptr, false); +} + +void +arena_set(unsigned ind, arena_t *arena) { + atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE); +} + +static void +narenas_total_set(unsigned narenas) { + atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE); +} + +static void +narenas_total_inc(void) { + atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE); +} + +unsigned +narenas_total_get(void) { + return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE); +} + /* Create a new arena and insert it into the arenas array at index ind. */ +static arena_t * +arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { + arena_t *arena; + + assert(ind <= narenas_total_get()); + if (ind >= MALLOCX_ARENA_LIMIT) { + return NULL; + } + if (ind == narenas_total_get()) { + narenas_total_inc(); + } + + /* + * Another thread may have already initialized arenas[ind] if it's an + * auto arena. + */ + arena = arena_get(tsdn, ind, false); + if (arena != NULL) { + assert(ind < narenas_auto); + return arena; + } + + /* Actually initialize the arena. */ + arena = arena_new(tsdn, ind, extent_hooks); + + return arena; +} + +static void +arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) { + if (ind == 0) { + return; + } + if (have_background_thread) { + bool err; + malloc_mutex_lock(tsdn, &background_thread_lock); + err = background_thread_create(tsdn_tsd(tsdn), ind); + malloc_mutex_unlock(tsdn, &background_thread_lock); + if (err) { + malloc_printf("<jemalloc>: error in background thread " + "creation for arena %u. Abort.\n", ind); + abort(); + } + } +} + arena_t * -arenas_extend(unsigned ind) -{ - arena_t *ret; +arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { + arena_t *arena; + + malloc_mutex_lock(tsdn, &arenas_lock); + arena = arena_init_locked(tsdn, ind, extent_hooks); + malloc_mutex_unlock(tsdn, &arenas_lock); + + arena_new_create_background_thread(tsdn, ind); + + return arena; +} + +static void +arena_bind(tsd_t *tsd, unsigned ind, bool internal) { + arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false); + arena_nthreads_inc(arena, internal); + + if (internal) { + tsd_iarena_set(tsd, arena); + } else { + tsd_arena_set(tsd, arena); + } +} - ret = (arena_t *)base_alloc(sizeof(arena_t)); - if (ret != NULL && arena_new(ret, ind) == false) { - arenas[ind] = ret; - return (ret); +void +arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) { + arena_t *oldarena, *newarena; + + oldarena = arena_get(tsd_tsdn(tsd), oldind, false); + newarena = arena_get(tsd_tsdn(tsd), newind, false); + arena_nthreads_dec(oldarena, false); + arena_nthreads_inc(newarena, false); + tsd_arena_set(tsd, newarena); +} + +static void +arena_unbind(tsd_t *tsd, unsigned ind, bool internal) { + arena_t *arena; + + arena = arena_get(tsd_tsdn(tsd), ind, false); + arena_nthreads_dec(arena, internal); + + if (internal) { + tsd_iarena_set(tsd, NULL); + } else { + tsd_arena_set(tsd, NULL); } - /* Only reached if there is an OOM error. */ +} + +arena_tdata_t * +arena_tdata_get_hard(tsd_t *tsd, unsigned ind) { + arena_tdata_t *tdata, *arenas_tdata_old; + arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); + unsigned narenas_tdata_old, i; + unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); + unsigned narenas_actual = narenas_total_get(); /* - * OOM here is quite inconvenient to propagate, since dealing with it - * would require a check for failure in the fast path. Instead, punt - * by using arenas[0]. In practice, this is an extremely unlikely - * failure. + * Dissociate old tdata array (and set up for deallocation upon return) + * if it's too small. */ - malloc_write("<jemalloc>: Error initializing arena\n"); - if (opt_abort) - abort(); + if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { + arenas_tdata_old = arenas_tdata; + narenas_tdata_old = narenas_tdata; + arenas_tdata = NULL; + narenas_tdata = 0; + tsd_arenas_tdata_set(tsd, arenas_tdata); + tsd_narenas_tdata_set(tsd, narenas_tdata); + } else { + arenas_tdata_old = NULL; + narenas_tdata_old = 0; + } + + /* Allocate tdata array if it's missing. */ + if (arenas_tdata == NULL) { + bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); + narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; - return (arenas[0]); + if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { + *arenas_tdata_bypassp = true; + arenas_tdata = (arena_tdata_t *)a0malloc( + sizeof(arena_tdata_t) * narenas_tdata); + *arenas_tdata_bypassp = false; + } + if (arenas_tdata == NULL) { + tdata = NULL; + goto label_return; + } + assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); + tsd_arenas_tdata_set(tsd, arenas_tdata); + tsd_narenas_tdata_set(tsd, narenas_tdata); + } + + /* + * Copy to tdata array. It's possible that the actual number of arenas + * has increased since narenas_total_get() was called above, but that + * causes no correctness issues unless two threads concurrently execute + * the arenas.create mallctl, which we trust mallctl synchronization to + * prevent. + */ + + /* Copy/initialize tickers. */ + for (i = 0; i < narenas_actual; i++) { + if (i < narenas_tdata_old) { + ticker_copy(&arenas_tdata[i].decay_ticker, + &arenas_tdata_old[i].decay_ticker); + } else { + ticker_init(&arenas_tdata[i].decay_ticker, + DECAY_NTICKS_PER_UPDATE); + } + } + if (narenas_tdata > narenas_actual) { + memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) + * (narenas_tdata - narenas_actual)); + } + + /* Read the refreshed tdata array. */ + tdata = &arenas_tdata[ind]; +label_return: + if (arenas_tdata_old != NULL) { + a0dalloc(arenas_tdata_old); + } + return tdata; } -/* Slow path, called only by choose_arena(). */ +/* Slow path, called only by arena_choose(). */ arena_t * -choose_arena_hard(void) -{ - arena_t *ret; +arena_choose_hard(tsd_t *tsd, bool internal) { + arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); + + if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) { + unsigned choose = percpu_arena_choose(); + ret = arena_get(tsd_tsdn(tsd), choose, true); + assert(ret != NULL); + arena_bind(tsd, arena_ind_get(ret), false); + arena_bind(tsd, arena_ind_get(ret), true); + + return ret; + } if (narenas_auto > 1) { - unsigned i, choose, first_null; + unsigned i, j, choose[2], first_null; + bool is_new_arena[2]; + + /* + * Determine binding for both non-internal and internal + * allocation. + * + * choose[0]: For application allocation. + * choose[1]: For internal metadata allocation. + */ + + for (j = 0; j < 2; j++) { + choose[j] = 0; + is_new_arena[j] = false; + } - choose = 0; first_null = narenas_auto; - malloc_mutex_lock(&arenas_lock); - assert(arenas[0] != NULL); + malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); + assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); for (i = 1; i < narenas_auto; i++) { - if (arenas[i] != NULL) { + if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { /* * Choose the first arena that has the lowest * number of threads assigned to it. */ - if (arenas[i]->nthreads < - arenas[choose]->nthreads) - choose = i; + for (j = 0; j < 2; j++) { + if (arena_nthreads_get(arena_get( + tsd_tsdn(tsd), i, false), !!j) < + arena_nthreads_get(arena_get( + tsd_tsdn(tsd), choose[j], false), + !!j)) { + choose[j] = i; + } + } } else if (first_null == narenas_auto) { /* * Record the index of the first uninitialized @@ -174,38 +543,99 @@ choose_arena_hard(void) } } - if (arenas[choose]->nthreads == 0 - || first_null == narenas_auto) { - /* - * Use an unloaded arena, or the least loaded arena if - * all arenas are already initialized. - */ - ret = arenas[choose]; - } else { - /* Initialize a new arena. */ - ret = arenas_extend(first_null); + for (j = 0; j < 2; j++) { + if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), + choose[j], false), !!j) == 0 || first_null == + narenas_auto) { + /* + * Use an unloaded arena, or the least loaded + * arena if all arenas are already initialized. + */ + if (!!j == internal) { + ret = arena_get(tsd_tsdn(tsd), + choose[j], false); + } + } else { + arena_t *arena; + + /* Initialize a new arena. */ + choose[j] = first_null; + arena = arena_init_locked(tsd_tsdn(tsd), + choose[j], + (extent_hooks_t *)&extent_hooks_default); + if (arena == NULL) { + malloc_mutex_unlock(tsd_tsdn(tsd), + &arenas_lock); + return NULL; + } + is_new_arena[j] = true; + if (!!j == internal) { + ret = arena; + } + } + arena_bind(tsd, choose[j], !!j); + } + malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); + + for (j = 0; j < 2; j++) { + if (is_new_arena[j]) { + assert(choose[j] > 0); + arena_new_create_background_thread( + tsd_tsdn(tsd), choose[j]); + } } - ret->nthreads++; - malloc_mutex_unlock(&arenas_lock); + } else { - ret = arenas[0]; - malloc_mutex_lock(&arenas_lock); - ret->nthreads++; - malloc_mutex_unlock(&arenas_lock); + ret = arena_get(tsd_tsdn(tsd), 0, false); + arena_bind(tsd, 0, false); + arena_bind(tsd, 0, true); + } + + return ret; +} + +void +iarena_cleanup(tsd_t *tsd) { + arena_t *iarena; + + iarena = tsd_iarena_get(tsd); + if (iarena != NULL) { + arena_unbind(tsd, arena_ind_get(iarena), true); } +} - arenas_tsd_set(&ret); +void +arena_cleanup(tsd_t *tsd) { + arena_t *arena; - return (ret); + arena = tsd_arena_get(tsd); + if (arena != NULL) { + arena_unbind(tsd, arena_ind_get(arena), false); + } } -static void -stats_print_atexit(void) -{ +void +arenas_tdata_cleanup(tsd_t *tsd) { + arena_tdata_t *arenas_tdata; - if (config_tcache && config_stats) { + /* Prevent tsd->arenas_tdata from being (re)created. */ + *tsd_arenas_tdata_bypassp_get(tsd) = true; + + arenas_tdata = tsd_arenas_tdata_get(tsd); + if (arenas_tdata != NULL) { + tsd_arenas_tdata_set(tsd, NULL); + a0dalloc(arenas_tdata); + } +} + +static void +stats_print_atexit(void) { + if (config_stats) { + tsdn_t *tsdn; unsigned narenas, i; + tsdn = tsdn_fetch(); + /* * Merge stats from extant threads. This is racy, since * individual threads do not lock when recording tcache stats @@ -214,25 +644,45 @@ stats_print_atexit(void) * continue to allocate. */ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { - arena_t *arena = arenas[i]; + arena_t *arena = arena_get(tsdn, i, false); if (arena != NULL) { tcache_t *tcache; - /* - * tcache_stats_merge() locks bins, so if any - * code is introduced that acquires both arena - * and bin locks in the opposite order, - * deadlocks may result. - */ - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); ql_foreach(tcache, &arena->tcache_ql, link) { - tcache_stats_merge(tcache, arena); + tcache_stats_merge(tsdn, tcache, arena); } - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, + &arena->tcache_ql_mtx); } } } - je_malloc_stats_print(NULL, NULL, NULL); + je_malloc_stats_print(NULL, NULL, opt_stats_print_opts); +} + +/* + * Ensure that we don't hold any locks upon entry to or exit from allocator + * code (in a "broad" sense that doesn't count a reentrant allocation as an + * entrance or exit). + */ +JEMALLOC_ALWAYS_INLINE void +check_entry_exit_locking(tsdn_t *tsdn) { + if (!config_debug) { + return; + } + if (tsdn_null(tsdn)) { + return; + } + tsd_t *tsd = tsdn_tsd(tsdn); + /* + * It's possible we hold locks at entry/exit if we're in a nested + * allocation. + */ + int8_t reentrancy_level = tsd_reentrancy_level_get(tsd); + if (reentrancy_level != 0) { + return; + } + witness_assert_lockless(tsdn_witness_tsdp_get(tsdn)); } /* @@ -243,69 +693,82 @@ stats_print_atexit(void) * Begin initialization functions. */ +static char * +jemalloc_secure_getenv(const char *name) { +#ifdef JEMALLOC_HAVE_SECURE_GETENV + return secure_getenv(name); +#else +# ifdef JEMALLOC_HAVE_ISSETUGID + if (issetugid() != 0) { + return NULL; + } +# endif + return getenv(name); +#endif +} + static unsigned -malloc_ncpus(void) -{ +malloc_ncpus(void) { long result; #ifdef _WIN32 SYSTEM_INFO si; GetSystemInfo(&si); result = si.dwNumberOfProcessors; +#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT) + /* + * glibc >= 2.6 has the CPU_COUNT macro. + * + * glibc's sysconf() uses isspace(). glibc allocates for the first time + * *before* setting up the isspace tables. Therefore we need a + * different method to get the number of CPUs. + */ + { + cpu_set_t set; + + pthread_getaffinity_np(pthread_self(), sizeof(set), &set); + result = CPU_COUNT(&set); + } #else result = sysconf(_SC_NPROCESSORS_ONLN); #endif return ((result == -1) ? 1 : (unsigned)result); } -void -arenas_cleanup(void *arg) -{ - arena_t *arena = *(arena_t **)arg; - - malloc_mutex_lock(&arenas_lock); - arena->nthreads--; - malloc_mutex_unlock(&arenas_lock); -} - -JEMALLOC_ALWAYS_INLINE_C void -malloc_thread_init(void) -{ - - /* - * TSD initialization can't be safely done as a side effect of - * deallocation, because it is possible for a thread to do nothing but - * deallocate its TLS data via free(), in which case writing to TLS - * would cause write-after-free memory corruption. The quarantine - * facility *only* gets used as a side effect of deallocation, so make - * a best effort attempt at initializing its TSD by hooking all - * allocation events. - */ - if (config_fill && opt_quarantine) - quarantine_alloc_hook(); -} - -JEMALLOC_ALWAYS_INLINE_C bool -malloc_init(void) -{ +static void +init_opt_stats_print_opts(const char *v, size_t vlen) { + size_t opts_len = strlen(opt_stats_print_opts); + assert(opts_len <= stats_print_tot_num_options); + + for (size_t i = 0; i < vlen; i++) { + switch (v[i]) { +#define OPTION(o, v, d, s) case o: break; + STATS_PRINT_OPTIONS +#undef OPTION + default: continue; + } - if (malloc_initialized == false && malloc_init_hard()) - return (true); - malloc_thread_init(); + if (strchr(opt_stats_print_opts, v[i]) != NULL) { + /* Ignore repeated. */ + continue; + } - return (false); + opt_stats_print_opts[opts_len++] = v[i]; + opt_stats_print_opts[opts_len] = '\0'; + assert(opts_len <= stats_print_tot_num_options); + } + assert(opts_len == strlen(opt_stats_print_opts)); } static bool malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, - char const **v_p, size_t *vlen_p) -{ + char const **v_p, size_t *vlen_p) { bool accept; const char *opts = *opts_p; *k_p = opts; - for (accept = false; accept == false;) { + for (accept = false; !accept;) { switch (*opts) { case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': @@ -333,14 +796,14 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, malloc_write("<jemalloc>: Conf string ends " "with key\n"); } - return (true); + return true; default: malloc_write("<jemalloc>: Malformed conf string\n"); - return (true); + return true; } } - for (accept = false; accept == false;) { + for (accept = false; !accept;) { switch (*opts) { case ',': opts++; @@ -369,46 +832,57 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, } *opts_p = opts; - return (false); + return false; } static void -malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, - size_t vlen) -{ +malloc_abort_invalid_conf(void) { + assert(opt_abort_conf); + malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf " + "value (see above).\n"); + abort(); +} +static void +malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, + size_t vlen) { malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, (int)vlen, v); + had_conf_error = true; + if (opt_abort_conf) { + malloc_abort_invalid_conf(); + } } static void -malloc_conf_init(void) -{ +malloc_slow_flag_init(void) { + /* + * Combine the runtime options into malloc_slow for fast path. Called + * after processing all the options. + */ + malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) + | (opt_junk_free ? flag_opt_junk_free : 0) + | (opt_zero ? flag_opt_zero : 0) + | (opt_utrace ? flag_opt_utrace : 0) + | (opt_xmalloc ? flag_opt_xmalloc : 0); + + malloc_slow = (malloc_slow_flags != 0); +} + +static void +malloc_conf_init(void) { unsigned i; char buf[PATH_MAX + 1]; const char *opts, *k, *v; size_t klen, vlen; - /* - * Automatically configure valgrind before processing options. The - * valgrind option remains in jemalloc 3.x for compatibility reasons. - */ - if (config_valgrind) { - opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; - if (config_fill && opt_valgrind) { - opt_junk = false; - assert(opt_zero == false); - opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; - opt_redzone = true; - } - if (config_tcache && opt_valgrind) - opt_tcache = false; - } - - for (i = 0; i < 3; i++) { + for (i = 0; i < 4; i++) { /* Get runtime configuration. */ switch (i) { case 0: + opts = config_malloc_conf; + break; + case 1: if (je_malloc_conf != NULL) { /* * Use options that were compiled into the @@ -421,8 +895,8 @@ malloc_conf_init(void) opts = buf; } break; - case 1: { - int linklen = 0; + case 2: { + ssize_t linklen = 0; #ifndef _WIN32 int saved_errno = errno; const char *linkname = @@ -441,14 +915,14 @@ malloc_conf_init(void) if (linklen == -1) { /* No configuration specified. */ linklen = 0; - /* restore errno */ + /* Restore errno. */ set_errno(saved_errno); } #endif buf[linklen] = '\0'; opts = buf; break; - } case 2: { + } case 3: { const char *envname = #ifdef JEMALLOC_PREFIX JEMALLOC_CPREFIX"MALLOC_CONF" @@ -457,7 +931,7 @@ malloc_conf_init(void) #endif ; - if ((opts = getenv(envname)) != NULL) { + if ((opts = jemalloc_secure_getenv(envname)) != NULL) { /* * Do nothing; opts is already initialized to * the value of the MALLOC_CONF environment @@ -475,27 +949,31 @@ malloc_conf_init(void) opts = buf; } - while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v, - &vlen) == false) { -#define CONF_HANDLE_BOOL(o, n) \ - if (sizeof(n)-1 == klen && strncmp(n, k, \ - klen) == 0) { \ - if (strncmp("true", v, vlen) == 0 && \ - vlen == sizeof("true")-1) \ + while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, + &vlen)) { +#define CONF_MATCH(n) \ + (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) +#define CONF_MATCH_VALUE(n) \ + (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) +#define CONF_HANDLE_BOOL(o, n) \ + if (CONF_MATCH(n)) { \ + if (CONF_MATCH_VALUE("true")) { \ o = true; \ - else if (strncmp("false", v, vlen) == \ - 0 && vlen == sizeof("false")-1) \ + } else if (CONF_MATCH_VALUE("false")) { \ o = false; \ - else { \ + } else { \ malloc_conf_error( \ "Invalid conf value", \ k, klen, v, vlen); \ } \ continue; \ } -#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \ - if (sizeof(n)-1 == klen && strncmp(n, k, \ - klen) == 0) { \ +#define CONF_MIN_no(um, min) false +#define CONF_MIN_yes(um, min) ((um) < (min)) +#define CONF_MAX_no(um, max) false +#define CONF_MAX_yes(um, max) ((um) > (max)) +#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ + if (CONF_MATCH(n)) { \ uintmax_t um; \ char *end; \ \ @@ -507,27 +985,40 @@ malloc_conf_init(void) "Invalid conf value", \ k, klen, v, vlen); \ } else if (clip) { \ - if (min != 0 && um < min) \ - o = min; \ - else if (um > max) \ - o = max; \ - else \ - o = um; \ + if (CONF_MIN_##check_min(um, \ + (t)(min))) { \ + o = (t)(min); \ + } else if ( \ + CONF_MAX_##check_max(um, \ + (t)(max))) { \ + o = (t)(max); \ + } else { \ + o = (t)um; \ + } \ } else { \ - if ((min != 0 && um < min) || \ - um > max) { \ + if (CONF_MIN_##check_min(um, \ + (t)(min)) || \ + CONF_MAX_##check_max(um, \ + (t)(max))) { \ malloc_conf_error( \ "Out-of-range " \ "conf value", \ k, klen, v, vlen); \ - } else \ - o = um; \ + } else { \ + o = (t)um; \ + } \ } \ continue; \ } -#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ - if (sizeof(n)-1 == klen && strncmp(n, k, \ - klen) == 0) { \ +#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ + clip) \ + CONF_HANDLE_T_U(unsigned, o, n, min, max, \ + check_min, check_max, clip) +#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ + CONF_HANDLE_T_U(size_t, o, n, min, max, \ + check_min, check_max, clip) +#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ + if (CONF_MATCH(n)) { \ long l; \ char *end; \ \ @@ -538,18 +1029,18 @@ malloc_conf_init(void) malloc_conf_error( \ "Invalid conf value", \ k, klen, v, vlen); \ - } else if (l < (ssize_t)min || l > \ - (ssize_t)max) { \ + } else if (l < (ssize_t)(min) || l > \ + (ssize_t)(max)) { \ malloc_conf_error( \ "Out-of-range conf value", \ k, klen, v, vlen); \ - } else \ + } else { \ o = l; \ + } \ continue; \ } -#define CONF_HANDLE_CHAR_P(o, n, d) \ - if (sizeof(n)-1 == klen && strncmp(n, k, \ - klen) == 0) { \ +#define CONF_HANDLE_CHAR_P(o, n, d) \ + if (CONF_MATCH(n)) { \ size_t cpylen = (vlen <= \ sizeof(o)-1) ? vlen : \ sizeof(o)-1; \ @@ -559,23 +1050,18 @@ malloc_conf_init(void) } CONF_HANDLE_BOOL(opt_abort, "abort") - /* - * Chunks always require at least one header page, plus - * one data page in the absence of redzones, or three - * pages in the presence of redzones. In order to - * simplify options processing, fix the limit based on - * config_fill. - */ - CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + - (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1, - true) + CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf") + if (opt_abort_conf && had_conf_error) { + malloc_abort_invalid_conf(); + } + CONF_HANDLE_BOOL(opt_retain, "retain") if (strncmp("dss", k, klen) == 0) { int i; bool match = false; for (i = 0; i < dss_prec_limit; i++) { if (strncmp(dss_prec_names[i], v, vlen) == 0) { - if (chunk_dss_prec_set(i)) { + if (extent_dss_prec_set(i)) { malloc_conf_error( "Error setting dss", k, klen, v, vlen); @@ -587,47 +1073,98 @@ malloc_conf_init(void) } } } - if (match == false) { + if (!match) { malloc_conf_error("Invalid conf value", k, klen, v, vlen); } continue; } - CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1, - SIZE_T_MAX, false) - CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", - -1, (sizeof(size_t) << 3) - 1) + CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, + UINT_MAX, yes, no, false) + CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms, + "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < + QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : + SSIZE_MAX); + CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms, + "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < + QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : + SSIZE_MAX); CONF_HANDLE_BOOL(opt_stats_print, "stats_print") + if (CONF_MATCH("stats_print_opts")) { + init_opt_stats_print_opts(v, vlen); + continue; + } if (config_fill) { - CONF_HANDLE_BOOL(opt_junk, "junk") - CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", - 0, SIZE_T_MAX, false) - CONF_HANDLE_BOOL(opt_redzone, "redzone") + if (CONF_MATCH("junk")) { + if (CONF_MATCH_VALUE("true")) { + opt_junk = "true"; + opt_junk_alloc = opt_junk_free = + true; + } else if (CONF_MATCH_VALUE("false")) { + opt_junk = "false"; + opt_junk_alloc = opt_junk_free = + false; + } else if (CONF_MATCH_VALUE("alloc")) { + opt_junk = "alloc"; + opt_junk_alloc = true; + opt_junk_free = false; + } else if (CONF_MATCH_VALUE("free")) { + opt_junk = "free"; + opt_junk_alloc = false; + opt_junk_free = true; + } else { + malloc_conf_error( + "Invalid conf value", k, + klen, v, vlen); + } + continue; + } CONF_HANDLE_BOOL(opt_zero, "zero") } if (config_utrace) { CONF_HANDLE_BOOL(opt_utrace, "utrace") } - if (config_valgrind) { - CONF_HANDLE_BOOL(opt_valgrind, "valgrind") - } if (config_xmalloc) { CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") } - if (config_tcache) { - CONF_HANDLE_BOOL(opt_tcache, "tcache") - CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, - "lg_tcache_max", -1, - (sizeof(size_t) << 3) - 1) + CONF_HANDLE_BOOL(opt_tcache, "tcache") + CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max", + -1, (sizeof(size_t) << 3) - 1) + if (strncmp("percpu_arena", k, klen) == 0) { + int i; + bool match = false; + for (i = percpu_arena_mode_names_base; i < + percpu_arena_mode_names_limit; i++) { + if (strncmp(percpu_arena_mode_names[i], + v, vlen) == 0) { + if (!have_percpu_arena) { + malloc_conf_error( + "No getcpu support", + k, klen, v, vlen); + } + opt_percpu_arena = i; + match = true; + break; + } + } + if (!match) { + malloc_conf_error("Invalid conf value", + k, klen, v, vlen); + } + continue; } + CONF_HANDLE_BOOL(opt_background_thread, + "background_thread"); if (config_prof) { CONF_HANDLE_BOOL(opt_prof, "prof") CONF_HANDLE_CHAR_P(opt_prof_prefix, "prof_prefix", "jeprof") CONF_HANDLE_BOOL(opt_prof_active, "prof_active") - CONF_HANDLE_SSIZE_T(opt_lg_prof_sample, - "lg_prof_sample", 0, - (sizeof(uint64_t) << 3) - 1) + CONF_HANDLE_BOOL(opt_prof_thread_active_init, + "prof_thread_active_init") + CONF_HANDLE_SIZE_T(opt_lg_prof_sample, + "lg_prof_sample", 0, (sizeof(uint64_t) << 3) + - 1, no, yes, true) CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, "lg_prof_interval", -1, @@ -638,7 +1175,15 @@ malloc_conf_init(void) } malloc_conf_error("Invalid conf pair", k, klen, v, vlen); +#undef CONF_MATCH +#undef CONF_MATCH_VALUE #undef CONF_HANDLE_BOOL +#undef CONF_MIN_no +#undef CONF_MIN_yes +#undef CONF_MAX_no +#undef CONF_MAX_yes +#undef CONF_HANDLE_T_U +#undef CONF_HANDLE_UNSIGNED #undef CONF_HANDLE_SIZE_T #undef CONF_HANDLE_SSIZE_T #undef CONF_HANDLE_CHAR_P @@ -647,195 +1192,325 @@ malloc_conf_init(void) } static bool -malloc_init_hard(void) -{ - arena_t *init_arenas[1]; - - malloc_mutex_lock(&init_lock); - if (malloc_initialized || IS_INITIALIZER) { +malloc_init_hard_needed(void) { + if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == + malloc_init_recursible)) { /* * Another thread initialized the allocator before this one * acquired init_lock, or this thread is the initializing * thread, and it is recursively allocating. */ - malloc_mutex_unlock(&init_lock); - return (false); + return false; } #ifdef JEMALLOC_THREADED_INIT - if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) { + if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { /* Busy-wait until the initializing thread completes. */ + spin_t spinner = SPIN_INITIALIZER; do { - malloc_mutex_unlock(&init_lock); - CPU_SPINWAIT; - malloc_mutex_lock(&init_lock); - } while (malloc_initialized == false); - malloc_mutex_unlock(&init_lock); - return (false); + malloc_mutex_unlock(TSDN_NULL, &init_lock); + spin_adaptive(&spinner); + malloc_mutex_lock(TSDN_NULL, &init_lock); + } while (!malloc_initialized()); + return false; } #endif + return true; +} + +static bool +malloc_init_hard_a0_locked() { malloc_initializer = INITIALIZER; - malloc_tsd_boot(); - if (config_prof) + if (config_prof) { prof_boot0(); - + } malloc_conf_init(); - if (opt_stats_print) { /* Print statistics at exit. */ if (atexit(stats_print_atexit) != 0) { malloc_write("<jemalloc>: Error in atexit()\n"); - if (opt_abort) + if (opt_abort) { abort(); + } } } - - if (base_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); + if (pages_boot()) { + return true; } - - if (chunk_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); + if (base_boot(TSDN_NULL)) { + return true; + } + if (extent_boot()) { + return true; } - if (ctl_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); + return true; } - - if (config_prof) + if (config_prof) { prof_boot1(); - - arena_boot(); - - if (config_tcache && tcache_boot0()) { - malloc_mutex_unlock(&init_lock); - return (true); } - - if (huge_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); + arena_boot(); + if (tcache_boot(TSDN_NULL)) { + return true; } - - if (malloc_mutex_init(&arenas_lock)) { - malloc_mutex_unlock(&init_lock); - return (true); + if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS, + malloc_mutex_rank_exclusive)) { + return true; } - /* * Create enough scaffolding to allow recursive allocation in * malloc_ncpus(). */ - narenas_total = narenas_auto = 1; - arenas = init_arenas; + narenas_auto = 1; memset(arenas, 0, sizeof(arena_t *) * narenas_auto); - /* * Initialize one arena here. The rest are lazily created in - * choose_arena_hard(). + * arena_choose_hard(). */ - arenas_extend(0); - if (arenas[0] == NULL) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - /* Initialize allocation counters before any allocations can occur. */ - if (config_stats && thread_allocated_tsd_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); + if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default) + == NULL) { + return true; } + a0 = arena_get(TSDN_NULL, 0, false); + malloc_init_state = malloc_init_a0_initialized; - if (arenas_tsd_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - if (config_tcache && tcache_boot1()) { - malloc_mutex_unlock(&init_lock); - return (true); - } + return false; +} - if (config_fill && quarantine_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); - } +static bool +malloc_init_hard_a0(void) { + bool ret; - if (config_prof && prof_boot2()) { - malloc_mutex_unlock(&init_lock); - return (true); - } + malloc_mutex_lock(TSDN_NULL, &init_lock); + ret = malloc_init_hard_a0_locked(); + malloc_mutex_unlock(TSDN_NULL, &init_lock); + return ret; +} - malloc_mutex_unlock(&init_lock); - /**********************************************************************/ - /* Recursive allocation may follow. */ +/* Initialize data structures which may trigger recursive allocation. */ +static bool +malloc_init_hard_recursible(void) { + malloc_init_state = malloc_init_recursible; ncpus = malloc_ncpus(); -#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ - && !defined(_WIN32)) - /* LinuxThreads's pthread_atfork() allocates. */ +#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ + && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ + !defined(__native_client__)) + /* LinuxThreads' pthread_atfork() allocates. */ if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, jemalloc_postfork_child) != 0) { malloc_write("<jemalloc>: Error in pthread_atfork()\n"); - if (opt_abort) + if (opt_abort) { abort(); + } + return true; } #endif - /* Done recursively allocating. */ - /**********************************************************************/ - malloc_mutex_lock(&init_lock); + if (background_thread_boot0()) { + return true; + } + + return false; +} + +static unsigned +malloc_narenas_default(void) { + assert(ncpus > 0); + /* + * For SMP systems, create more than one arena per CPU by + * default. + */ + if (ncpus > 1) { + return ncpus << 2; + } else { + return 1; + } +} + +static percpu_arena_mode_t +percpu_arena_as_initialized(percpu_arena_mode_t mode) { + assert(!malloc_initialized()); + assert(mode <= percpu_arena_disabled); - if (mutex_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); + if (mode != percpu_arena_disabled) { + mode += percpu_arena_mode_enabled_base; } + return mode; +} + +static bool +malloc_init_narenas(void) { + assert(ncpus > 0); + + if (opt_percpu_arena != percpu_arena_disabled) { + if (!have_percpu_arena || malloc_getcpu() < 0) { + opt_percpu_arena = percpu_arena_disabled; + malloc_printf("<jemalloc>: perCPU arena getcpu() not " + "available. Setting narenas to %u.\n", opt_narenas ? + opt_narenas : malloc_narenas_default()); + if (opt_abort) { + abort(); + } + } else { + if (ncpus >= MALLOCX_ARENA_LIMIT) { + malloc_printf("<jemalloc>: narenas w/ percpu" + "arena beyond limit (%d)\n", ncpus); + if (opt_abort) { + abort(); + } + return true; + } + /* NB: opt_percpu_arena isn't fully initialized yet. */ + if (percpu_arena_as_initialized(opt_percpu_arena) == + per_phycpu_arena && ncpus % 2 != 0) { + malloc_printf("<jemalloc>: invalid " + "configuration -- per physical CPU arena " + "with odd number (%u) of CPUs (no hyper " + "threading?).\n", ncpus); + if (opt_abort) + abort(); + } + unsigned n = percpu_arena_ind_limit( + percpu_arena_as_initialized(opt_percpu_arena)); + if (opt_narenas < n) { + /* + * If narenas is specified with percpu_arena + * enabled, actual narenas is set as the greater + * of the two. percpu_arena_choose will be free + * to use any of the arenas based on CPU + * id. This is conservative (at a small cost) + * but ensures correctness. + * + * If for some reason the ncpus determined at + * boot is not the actual number (e.g. because + * of affinity setting from numactl), reserving + * narenas this way provides a workaround for + * percpu_arena. + */ + opt_narenas = n; + } + } + } if (opt_narenas == 0) { - /* - * For SMP systems, create more than one arena per CPU by - * default. - */ - if (ncpus > 1) - opt_narenas = ncpus << 2; - else - opt_narenas = 1; + opt_narenas = malloc_narenas_default(); } + assert(opt_narenas > 0); + narenas_auto = opt_narenas; /* - * Make sure that the arenas array can be allocated. In practice, this - * limit is enough to allow the allocator to function, but the ctl - * machinery will fail to allocate memory at far lower limits. + * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). */ - if (narenas_auto > chunksize / sizeof(arena_t *)) { - narenas_auto = chunksize / sizeof(arena_t *); + if (narenas_auto >= MALLOCX_ARENA_LIMIT) { + narenas_auto = MALLOCX_ARENA_LIMIT - 1; malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", narenas_auto); } - narenas_total = narenas_auto; + narenas_total_set(narenas_auto); - /* Allocate and initialize arenas. */ - arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total); - if (arenas == NULL) { - malloc_mutex_unlock(&init_lock); - return (true); + return false; +} + +static void +malloc_init_percpu(void) { + opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena); +} + +static bool +malloc_init_hard_finish(void) { + if (malloc_mutex_boot()) { + return true; + } + + malloc_init_state = malloc_init_initialized; + malloc_slow_flag_init(); + + return false; +} + +static void +malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) { + malloc_mutex_assert_owner(tsdn, &init_lock); + malloc_mutex_unlock(tsdn, &init_lock); + if (reentrancy_set) { + assert(!tsdn_null(tsdn)); + tsd_t *tsd = tsdn_tsd(tsdn); + assert(tsd_reentrancy_level_get(tsd) > 0); + post_reentrancy(tsd); + } +} + +static bool +malloc_init_hard(void) { + tsd_t *tsd; + +#if defined(_WIN32) && _WIN32_WINNT < 0x0600 + _init_init_lock(); +#endif + malloc_mutex_lock(TSDN_NULL, &init_lock); + +#define UNLOCK_RETURN(tsdn, ret, reentrancy) \ + malloc_init_hard_cleanup(tsdn, reentrancy); \ + return ret; + + if (!malloc_init_hard_needed()) { + UNLOCK_RETURN(TSDN_NULL, false, false) + } + + if (malloc_init_state != malloc_init_a0_initialized && + malloc_init_hard_a0_locked()) { + UNLOCK_RETURN(TSDN_NULL, true, false) + } + + malloc_mutex_unlock(TSDN_NULL, &init_lock); + /* Recursive allocation relies on functional tsd. */ + tsd = malloc_tsd_boot0(); + if (tsd == NULL) { + return true; + } + if (malloc_init_hard_recursible()) { + return true; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); + /* Set reentrancy level to 1 during init. */ + pre_reentrancy(tsd, NULL); + /* Initialize narenas before prof_boot2 (for allocation). */ + if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) { + UNLOCK_RETURN(tsd_tsdn(tsd), true, true) + } + if (config_prof && prof_boot2(tsd)) { + UNLOCK_RETURN(tsd_tsdn(tsd), true, true) } - /* - * Zero the array. In practice, this should always be pre-zeroed, - * since it was just mmap()ed, but let's be sure. - */ - memset(arenas, 0, sizeof(arena_t *) * narenas_total); - /* Copy the pointer to the one arena that was already initialized. */ - arenas[0] = init_arenas[0]; - malloc_initialized = true; - malloc_mutex_unlock(&init_lock); + malloc_init_percpu(); - return (false); + if (malloc_init_hard_finish()) { + UNLOCK_RETURN(tsd_tsdn(tsd), true, true) + } + post_reentrancy(tsd); + malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); + + malloc_tsd_boot1(); + /* Update TSD after tsd_boot1. */ + tsd = tsd_fetch(); + if (opt_background_thread) { + assert(have_background_thread); + /* + * Need to finish init & unlock first before creating background + * threads (pthread_create depends on malloc). + */ + malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); + bool err = background_thread_create(tsd, 0); + malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); + if (err) { + return true; + } + } +#undef UNLOCK_RETURN + return false; } /* @@ -843,469 +1518,779 @@ malloc_init_hard(void) */ /******************************************************************************/ /* - * Begin malloc(3)-compatible functions. + * Begin allocation-path internal functions and data structures. */ -static void * -imalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt) -{ - void *p; - - if (cnt == NULL) - return (NULL); - if (prof_promote && usize <= SMALL_MAXCLASS) { - p = imalloc(SMALL_MAXCLASS+1); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = imalloc(usize); +/* + * Settings determined by the documented behavior of the allocation functions. + */ +typedef struct static_opts_s static_opts_t; +struct static_opts_s { + /* Whether or not allocation size may overflow. */ + bool may_overflow; + /* Whether or not allocations of size 0 should be treated as size 1. */ + bool bump_empty_alloc; + /* + * Whether to assert that allocations are not of size 0 (after any + * bumping). + */ + bool assert_nonempty_alloc; - return (p); -} + /* + * Whether or not to modify the 'result' argument to malloc in case of + * error. + */ + bool null_out_result_on_error; + /* Whether to set errno when we encounter an error condition. */ + bool set_errno_on_error; -JEMALLOC_ALWAYS_INLINE_C void * -imalloc_prof(size_t usize, prof_thr_cnt_t *cnt) -{ - void *p; + /* + * The minimum valid alignment for functions requesting aligned storage. + */ + size_t min_alignment; - if ((uintptr_t)cnt != (uintptr_t)1U) - p = imalloc_prof_sample(usize, cnt); - else - p = imalloc(usize); - if (p == NULL) - return (NULL); - prof_malloc(p, usize, cnt); + /* The error string to use if we oom. */ + const char *oom_string; + /* The error string to use if the passed-in alignment is invalid. */ + const char *invalid_alignment_string; - return (p); + /* + * False if we're configured to skip some time-consuming operations. + * + * This isn't really a malloc "behavior", but it acts as a useful + * summary of several other static (or at least, static after program + * initialization) options. + */ + bool slow; +}; + +JEMALLOC_ALWAYS_INLINE void +static_opts_init(static_opts_t *static_opts) { + static_opts->may_overflow = false; + static_opts->bump_empty_alloc = false; + static_opts->assert_nonempty_alloc = false; + static_opts->null_out_result_on_error = false; + static_opts->set_errno_on_error = false; + static_opts->min_alignment = 0; + static_opts->oom_string = ""; + static_opts->invalid_alignment_string = ""; + static_opts->slow = false; } /* - * MALLOC_BODY() is a macro rather than a function because its contents are in - * the fast path, but inlining would cause reliability issues when determining - * how many frames to discard from heap profiling backtraces. + * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we + * should have one constant here per magic value there. Note however that the + * representations need not be related. */ -#define MALLOC_BODY(ret, size, usize) do { \ - if (malloc_init()) \ - ret = NULL; \ - else { \ - if (config_prof && opt_prof) { \ - prof_thr_cnt_t *cnt; \ - \ - usize = s2u(size); \ - /* \ - * Call PROF_ALLOC_PREP() here rather than in \ - * imalloc_prof() so that imalloc_prof() can be \ - * inlined without introducing uncertainty \ - * about the number of backtrace frames to \ - * ignore. imalloc_prof() is in the fast path \ - * when heap profiling is enabled, so inlining \ - * is critical to performance. (For \ - * consistency all callers of PROF_ALLOC_PREP() \ - * are structured similarly, even though e.g. \ - * realloc() isn't called enough for inlining \ - * to be critical.) \ - */ \ - PROF_ALLOC_PREP(1, usize, cnt); \ - ret = imalloc_prof(usize, cnt); \ - } else { \ - if (config_stats || (config_valgrind && \ - opt_valgrind)) \ - usize = s2u(size); \ - ret = imalloc(size); \ - } \ - } \ -} while (0) - -void * -je_malloc(size_t size) -{ - void *ret; - size_t usize JEMALLOC_CC_SILENCE_INIT(0); - - if (size == 0) - size = 1; +#define TCACHE_IND_NONE ((unsigned)-1) +#define TCACHE_IND_AUTOMATIC ((unsigned)-2) +#define ARENA_IND_AUTOMATIC ((unsigned)-1) + +typedef struct dynamic_opts_s dynamic_opts_t; +struct dynamic_opts_s { + void **result; + size_t num_items; + size_t item_size; + size_t alignment; + bool zero; + unsigned tcache_ind; + unsigned arena_ind; +}; + +JEMALLOC_ALWAYS_INLINE void +dynamic_opts_init(dynamic_opts_t *dynamic_opts) { + dynamic_opts->result = NULL; + dynamic_opts->num_items = 0; + dynamic_opts->item_size = 0; + dynamic_opts->alignment = 0; + dynamic_opts->zero = false; + dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC; + dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC; +} - MALLOC_BODY(ret, size, usize); +/* ind is ignored if dopts->alignment > 0. */ +JEMALLOC_ALWAYS_INLINE void * +imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, + size_t size, size_t usize, szind_t ind) { + tcache_t *tcache; + arena_t *arena; - if (ret == NULL) { - if (config_xmalloc && opt_xmalloc) { - malloc_write("<jemalloc>: Error in malloc(): " - "out of memory\n"); - abort(); + /* Fill in the tcache. */ + if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) { + if (likely(!sopts->slow)) { + /* Getting tcache ptr unconditionally. */ + tcache = tsd_tcachep_get(tsd); + assert(tcache == tcache_get(tsd)); + } else { + tcache = tcache_get(tsd); } - set_errno(ENOMEM); - } - if (config_stats && ret != NULL) { - assert(usize == isalloc(ret, config_prof)); - thread_allocated_tsd_get()->allocated += usize; + } else if (dopts->tcache_ind == TCACHE_IND_NONE) { + tcache = NULL; + } else { + tcache = tcaches_get(tsd, dopts->tcache_ind); } - UTRACE(0, size, ret); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false); - return (ret); -} -static void * -imemalign_prof_sample(size_t alignment, size_t usize, prof_thr_cnt_t *cnt) -{ - void *p; + /* Fill in the arena. */ + if (dopts->arena_ind == ARENA_IND_AUTOMATIC) { + /* + * In case of automatic arena management, we defer arena + * computation until as late as we can, hoping to fill the + * allocation out of the tcache. + */ + arena = NULL; + } else { + arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true); + } - if (cnt == NULL) - return (NULL); - if (prof_promote && usize <= SMALL_MAXCLASS) { - assert(sa2u(SMALL_MAXCLASS+1, alignment) != 0); - p = ipalloc(sa2u(SMALL_MAXCLASS+1, alignment), alignment, - false); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = ipalloc(usize, alignment, false); + if (unlikely(dopts->alignment != 0)) { + return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment, + dopts->zero, tcache, arena); + } - return (p); + return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false, + arena, sopts->slow); } -JEMALLOC_ALWAYS_INLINE_C void * -imemalign_prof(size_t alignment, size_t usize, prof_thr_cnt_t *cnt) -{ - void *p; +JEMALLOC_ALWAYS_INLINE void * +imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, + size_t usize, szind_t ind) { + void *ret; - if ((uintptr_t)cnt != (uintptr_t)1U) - p = imemalign_prof_sample(alignment, usize, cnt); - else - p = ipalloc(usize, alignment, false); - if (p == NULL) - return (NULL); - prof_malloc(p, usize, cnt); + /* + * For small allocations, sampling bumps the usize. If so, we allocate + * from the ind_large bucket. + */ + szind_t ind_large; + size_t bumped_usize = usize; + + if (usize <= SMALL_MAXCLASS) { + assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) : + sz_sa2u(LARGE_MINCLASS, dopts->alignment)) + == LARGE_MINCLASS); + ind_large = sz_size2index(LARGE_MINCLASS); + bumped_usize = sz_s2u(LARGE_MINCLASS); + ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize, + bumped_usize, ind_large); + if (unlikely(ret == NULL)) { + return NULL; + } + arena_prof_promote(tsd_tsdn(tsd), ret, usize); + } else { + ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind); + } - return (p); + return ret; } -JEMALLOC_ATTR(nonnull(1)) -#ifdef JEMALLOC_PROF /* - * Avoid any uncertainty as to how many backtrace frames to ignore in - * PROF_ALLOC_PREP(). + * Returns true if the allocation will overflow, and false otherwise. Sets + * *size to the product either way. */ -JEMALLOC_NOINLINE -#endif -static int -imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) -{ - int ret; - size_t usize; - void *result; +JEMALLOC_ALWAYS_INLINE bool +compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts, + size_t *size) { + /* + * This function is just num_items * item_size, except that we may have + * to check for overflow. + */ - assert(min_alignment != 0); + if (!may_overflow) { + assert(dopts->num_items == 1); + *size = dopts->item_size; + return false; + } + + /* A size_t with its high-half bits all set to 1. */ + const static size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2); + + *size = dopts->item_size * dopts->num_items; + + if (unlikely(*size == 0)) { + return (dopts->num_items != 0 && dopts->item_size != 0); + } + + /* + * We got a non-zero size, but we don't know if we overflowed to get + * there. To avoid having to do a divide, we'll be clever and note that + * if both A and B can be represented in N/2 bits, then their product + * can be represented in N bits (without the possibility of overflow). + */ + if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) { + return false; + } + if (likely(*size / dopts->item_size == dopts->num_items)) { + return false; + } + return true; +} + +JEMALLOC_ALWAYS_INLINE int +imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { + /* Where the actual allocated memory will live. */ + void *allocation = NULL; + /* Filled in by compute_size_with_overflow below. */ + size_t size = 0; + /* + * For unaligned allocations, we need only ind. For aligned + * allocations, or in case of stats or profiling we need usize. + * + * These are actually dead stores, in that their values are reset before + * any branch on their value is taken. Sometimes though, it's + * convenient to pass them as arguments before this point. To avoid + * undefined behavior then, we initialize them with dummy stores. + */ + szind_t ind = 0; + size_t usize = 0; - if (malloc_init()) { - result = NULL; + /* Reentrancy is only checked on slow path. */ + int8_t reentrancy_level; + + /* Compute the amount of memory the user wants. */ + if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts, + &size))) { goto label_oom; - } else { - if (size == 0) - size = 1; + } - /* Make sure that alignment is a large enough power of 2. */ - if (((alignment - 1) & alignment) != 0 - || (alignment < min_alignment)) { - if (config_xmalloc && opt_xmalloc) { - malloc_write("<jemalloc>: Error allocating " - "aligned memory: invalid alignment\n"); - abort(); - } - result = NULL; - ret = EINVAL; - goto label_return; + /* Validate the user input. */ + if (sopts->bump_empty_alloc) { + if (unlikely(size == 0)) { + size = 1; } + } + + if (sopts->assert_nonempty_alloc) { + assert (size != 0); + } + + if (unlikely(dopts->alignment < sopts->min_alignment + || (dopts->alignment & (dopts->alignment - 1)) != 0)) { + goto label_invalid_alignment; + } + + /* This is the beginning of the "core" algorithm. */ - usize = sa2u(size, alignment); - if (usize == 0) { - result = NULL; + if (dopts->alignment == 0) { + ind = sz_size2index(size); + if (unlikely(ind >= NSIZES)) { goto label_oom; } + if (config_stats || (config_prof && opt_prof)) { + usize = sz_index2size(ind); + assert(usize > 0 && usize <= LARGE_MAXCLASS); + } + } else { + usize = sz_sa2u(size, dopts->alignment); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { + goto label_oom; + } + } - if (config_prof && opt_prof) { - prof_thr_cnt_t *cnt; + check_entry_exit_locking(tsd_tsdn(tsd)); + + /* + * If we need to handle reentrancy, we can do it out of a + * known-initialized arena (i.e. arena 0). + */ + reentrancy_level = tsd_reentrancy_level_get(tsd); + if (sopts->slow && unlikely(reentrancy_level > 0)) { + /* + * We should never specify particular arenas or tcaches from + * within our internal allocations. + */ + assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC || + dopts->tcache_ind == TCACHE_IND_NONE); + assert(dopts->arena_ind == ARENA_IND_AUTOMATIC); + dopts->tcache_ind = TCACHE_IND_NONE; + /* We know that arena 0 has already been initialized. */ + dopts->arena_ind = 0; + } + + /* If profiling is on, get our profiling context. */ + if (config_prof && opt_prof) { + /* + * Note that if we're going down this path, usize must have been + * initialized in the previous if statement. + */ + prof_tctx_t *tctx = prof_alloc_prep( + tsd, usize, prof_active_get_unlocked(), true); + + alloc_ctx_t alloc_ctx; + if (likely((uintptr_t)tctx == (uintptr_t)1U)) { + alloc_ctx.slab = (usize <= SMALL_MAXCLASS); + allocation = imalloc_no_sample( + sopts, dopts, tsd, usize, usize, ind); + } else if ((uintptr_t)tctx > (uintptr_t)1U) { + /* + * Note that ind might still be 0 here. This is fine; + * imalloc_sample ignores ind if dopts->alignment > 0. + */ + allocation = imalloc_sample( + sopts, dopts, tsd, usize, ind); + alloc_ctx.slab = false; + } else { + allocation = NULL; + } - PROF_ALLOC_PREP(2, usize, cnt); - result = imemalign_prof(alignment, usize, cnt); - } else - result = ipalloc(usize, alignment, false); - if (result == NULL) + if (unlikely(allocation == NULL)) { + prof_alloc_rollback(tsd, tctx, true); + goto label_oom; + } + prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx); + } else { + /* + * If dopts->alignment > 0, then ind is still 0, but usize was + * computed in the previous if statement. Down the positive + * alignment path, imalloc_no_sample ignores ind and size + * (relying only on usize). + */ + allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize, + ind); + if (unlikely(allocation == NULL)) { goto label_oom; + } } - *memptr = result; - ret = 0; -label_return: - if (config_stats && result != NULL) { - assert(usize == isalloc(result, config_prof)); - thread_allocated_tsd_get()->allocated += usize; + /* + * Allocation has been done at this point. We still have some + * post-allocation work to do though. + */ + assert(dopts->alignment == 0 + || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0)); + + if (config_stats) { + assert(usize == isalloc(tsd_tsdn(tsd), allocation)); + *tsd_thread_allocatedp_get(tsd) += usize; } - UTRACE(0, size, result); - return (ret); + + if (sopts->slow) { + UTRACE(0, size, allocation); + } + + /* Success! */ + check_entry_exit_locking(tsd_tsdn(tsd)); + *dopts->result = allocation; + return 0; + label_oom: - assert(result == NULL); - if (config_xmalloc && opt_xmalloc) { - malloc_write("<jemalloc>: Error allocating aligned memory: " - "out of memory\n"); + if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(sopts->oom_string); abort(); } - ret = ENOMEM; - goto label_return; -} -int -je_posix_memalign(void **memptr, size_t alignment, size_t size) -{ - int ret = imemalign(memptr, alignment, size, sizeof(void *)); - JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr, - config_prof), false); - return (ret); + if (sopts->slow) { + UTRACE(NULL, size, NULL); + } + + check_entry_exit_locking(tsd_tsdn(tsd)); + + if (sopts->set_errno_on_error) { + set_errno(ENOMEM); + } + + if (sopts->null_out_result_on_error) { + *dopts->result = NULL; + } + + return ENOMEM; + + /* + * This label is only jumped to by one goto; we move it out of line + * anyways to avoid obscuring the non-error paths, and for symmetry with + * the oom case. + */ +label_invalid_alignment: + if (config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(sopts->invalid_alignment_string); + abort(); + } + + if (sopts->set_errno_on_error) { + set_errno(EINVAL); + } + + if (sopts->slow) { + UTRACE(NULL, size, NULL); + } + + check_entry_exit_locking(tsd_tsdn(tsd)); + + if (sopts->null_out_result_on_error) { + *dopts->result = NULL; + } + + return EINVAL; } -void * -je_aligned_alloc(size_t alignment, size_t size) -{ - void *ret; - int err; +/* Returns the errno-style error code of the allocation. */ +JEMALLOC_ALWAYS_INLINE int +imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) { + if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) { + if (config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(sopts->oom_string); + abort(); + } + UTRACE(NULL, dopts->num_items * dopts->item_size, NULL); + set_errno(ENOMEM); + *dopts->result = NULL; + + return ENOMEM; + } - if ((err = imemalign(&ret, alignment, size, 1)) != 0) { - ret = NULL; - set_errno(err); + /* We always need the tsd. Let's grab it right away. */ + tsd_t *tsd = tsd_fetch(); + assert(tsd); + if (likely(tsd_fast(tsd))) { + /* Fast and common path. */ + tsd_assert_fast(tsd); + sopts->slow = false; + return imalloc_body(sopts, dopts, tsd); + } else { + sopts->slow = true; + return imalloc_body(sopts, dopts, tsd); } - JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof), - false); - return (ret); } +/******************************************************************************/ +/* + * Begin malloc(3)-compatible functions. + */ -static void * -icalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt) -{ - void *p; +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) +je_malloc(size_t size) { + void *ret; + static_opts_t sopts; + dynamic_opts_t dopts; + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.bump_empty_alloc = true; + sopts.null_out_result_on_error = true; + sopts.set_errno_on_error = true; + sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n"; - if (cnt == NULL) - return (NULL); - if (prof_promote && usize <= SMALL_MAXCLASS) { - p = icalloc(SMALL_MAXCLASS+1); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = icalloc(usize); + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; - return (p); + imalloc(&sopts, &dopts); + + return ret; } -JEMALLOC_ALWAYS_INLINE_C void * -icalloc_prof(size_t usize, prof_thr_cnt_t *cnt) -{ - void *p; +JEMALLOC_EXPORT int JEMALLOC_NOTHROW +JEMALLOC_ATTR(nonnull(1)) +je_posix_memalign(void **memptr, size_t alignment, size_t size) { + int ret; + static_opts_t sopts; + dynamic_opts_t dopts; + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.bump_empty_alloc = true; + sopts.min_alignment = sizeof(void *); + sopts.oom_string = + "<jemalloc>: Error allocating aligned memory: out of memory\n"; + sopts.invalid_alignment_string = + "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; + + dopts.result = memptr; + dopts.num_items = 1; + dopts.item_size = size; + dopts.alignment = alignment; + + ret = imalloc(&sopts, &dopts); + return ret; +} - if ((uintptr_t)cnt != (uintptr_t)1U) - p = icalloc_prof_sample(usize, cnt); - else - p = icalloc(usize); - if (p == NULL) - return (NULL); - prof_malloc(p, usize, cnt); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) +je_aligned_alloc(size_t alignment, size_t size) { + void *ret; - return (p); + static_opts_t sopts; + dynamic_opts_t dopts; + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.bump_empty_alloc = true; + sopts.null_out_result_on_error = true; + sopts.set_errno_on_error = true; + sopts.min_alignment = 1; + sopts.oom_string = + "<jemalloc>: Error allocating aligned memory: out of memory\n"; + sopts.invalid_alignment_string = + "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; + + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + dopts.alignment = alignment; + + imalloc(&sopts, &dopts); + return ret; } -void * -je_calloc(size_t num, size_t size) -{ +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) +je_calloc(size_t num, size_t size) { void *ret; - size_t num_size; - size_t usize JEMALLOC_CC_SILENCE_INIT(0); + static_opts_t sopts; + dynamic_opts_t dopts; - if (malloc_init()) { - num_size = 0; - ret = NULL; - goto label_return; - } + static_opts_init(&sopts); + dynamic_opts_init(&dopts); - num_size = num * size; - if (num_size == 0) { - if (num == 0 || size == 0) - num_size = 1; - else { - ret = NULL; - goto label_return; - } - /* - * Try to avoid division here. We know that it isn't possible to - * overflow during multiplication if neither operand uses any of the - * most significant half of the bits in a size_t. - */ - } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2))) - && (num_size / size != num)) { - /* size_t overflow. */ - ret = NULL; - goto label_return; - } + sopts.may_overflow = true; + sopts.bump_empty_alloc = true; + sopts.null_out_result_on_error = true; + sopts.set_errno_on_error = true; + sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n"; - if (config_prof && opt_prof) { - prof_thr_cnt_t *cnt; + dopts.result = &ret; + dopts.num_items = num; + dopts.item_size = size; + dopts.zero = true; - usize = s2u(num_size); - PROF_ALLOC_PREP(1, usize, cnt); - ret = icalloc_prof(usize, cnt); - } else { - if (config_stats || (config_valgrind && opt_valgrind)) - usize = s2u(num_size); - ret = icalloc(num_size); - } + imalloc(&sopts, &dopts); -label_return: - if (ret == NULL) { - if (config_xmalloc && opt_xmalloc) { - malloc_write("<jemalloc>: Error in calloc(): out of " - "memory\n"); - abort(); - } - set_errno(ENOMEM); - } - if (config_stats && ret != NULL) { - assert(usize == isalloc(ret, config_prof)); - thread_allocated_tsd_get()->allocated += usize; - } - UTRACE(0, num_size, ret); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true); - return (ret); + return ret; } static void * -irealloc_prof_sample(void *oldptr, size_t usize, prof_thr_cnt_t *cnt) -{ +irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, + prof_tctx_t *tctx) { void *p; - if (cnt == NULL) - return (NULL); - if (prof_promote && usize <= SMALL_MAXCLASS) { - p = iralloc(oldptr, SMALL_MAXCLASS+1, 0, 0, false); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = iralloc(oldptr, usize, 0, 0, false); + if (tctx == NULL) { + return NULL; + } + if (usize <= SMALL_MAXCLASS) { + p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); + if (p == NULL) { + return NULL; + } + arena_prof_promote(tsd_tsdn(tsd), p, usize); + } else { + p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); + } - return (p); + return p; } -JEMALLOC_ALWAYS_INLINE_C void * -irealloc_prof(void *oldptr, size_t old_usize, size_t usize, prof_thr_cnt_t *cnt) -{ +JEMALLOC_ALWAYS_INLINE void * +irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, + alloc_ctx_t *alloc_ctx) { void *p; - prof_ctx_t *old_ctx; - - old_ctx = prof_ctx_get(oldptr); - if ((uintptr_t)cnt != (uintptr_t)1U) - p = irealloc_prof_sample(oldptr, usize, cnt); - else - p = iralloc(oldptr, usize, 0, 0, false); - if (p == NULL) - return (NULL); - prof_realloc(p, usize, cnt, old_usize, old_ctx); + bool prof_active; + prof_tctx_t *old_tctx, *tctx; + + prof_active = prof_active_get_unlocked(); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); + tctx = prof_alloc_prep(tsd, usize, prof_active, true); + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { + p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); + } else { + p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); + } + if (unlikely(p == NULL)) { + prof_alloc_rollback(tsd, tctx, true); + return NULL; + } + prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, + old_tctx); - return (p); + return p; } -JEMALLOC_INLINE_C void -ifree(void *ptr) -{ +JEMALLOC_ALWAYS_INLINE void +ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { + if (!slow_path) { + tsd_assert_fast(tsd); + } + check_entry_exit_locking(tsd_tsdn(tsd)); + if (tsd_reentrancy_level_get(tsd) != 0) { + assert(slow_path); + } + + assert(ptr != NULL); + assert(malloc_initialized() || IS_INITIALIZER); + + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != NSIZES); + size_t usize; - UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); + if (config_prof && opt_prof) { + usize = sz_index2size(alloc_ctx.szind); + prof_free(tsd, ptr, usize, &alloc_ctx); + } else if (config_stats) { + usize = sz_index2size(alloc_ctx.szind); + } + if (config_stats) { + *tsd_thread_deallocatedp_get(tsd) += usize; + } + + if (likely(!slow_path)) { + idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, + false); + } else { + idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, + true); + } +} + +JEMALLOC_ALWAYS_INLINE void +isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { + if (!slow_path) { + tsd_assert_fast(tsd); + } + check_entry_exit_locking(tsd_tsdn(tsd)); + if (tsd_reentrancy_level_get(tsd) != 0) { + assert(slow_path); + } assert(ptr != NULL); - assert(malloc_initialized || IS_INITIALIZER); + assert(malloc_initialized() || IS_INITIALIZER); + alloc_ctx_t alloc_ctx, *ctx; if (config_prof && opt_prof) { - usize = isalloc(ptr, config_prof); - prof_free(ptr, usize); - } else if (config_stats || config_valgrind) - usize = isalloc(ptr, config_prof); - if (config_stats) - thread_allocated_tsd_get()->deallocated += usize; - if (config_valgrind && opt_valgrind) - rzsize = p2rz(ptr); - iqalloc(ptr); - JEMALLOC_VALGRIND_FREE(ptr, rzsize); + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind == sz_size2index(usize)); + ctx = &alloc_ctx; + prof_free(tsd, ptr, usize, ctx); + } else { + ctx = NULL; + } + + if (config_stats) { + *tsd_thread_deallocatedp_get(tsd) += usize; + } + + if (likely(!slow_path)) { + isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false); + } else { + isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true); + } } -void * -je_realloc(void *ptr, size_t size) -{ +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ALLOC_SIZE(2) +je_realloc(void *ptr, size_t size) { void *ret; + tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); size_t usize JEMALLOC_CC_SILENCE_INIT(0); size_t old_usize = 0; - UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); - if (size == 0) { + if (unlikely(size == 0)) { if (ptr != NULL) { /* realloc(ptr, 0) is equivalent to free(ptr). */ UTRACE(ptr, 0, 0); - ifree(ptr); - return (NULL); + tcache_t *tcache; + tsd_t *tsd = tsd_fetch(); + if (tsd_reentrancy_level_get(tsd) == 0) { + tcache = tcache_get(tsd); + } else { + tcache = NULL; + } + ifree(tsd, ptr, tcache, true); + return NULL; } size = 1; } - if (ptr != NULL) { - assert(malloc_initialized || IS_INITIALIZER); - malloc_thread_init(); + if (likely(ptr != NULL)) { + assert(malloc_initialized() || IS_INITIALIZER); + tsd_t *tsd = tsd_fetch(); - if ((config_prof && opt_prof) || config_stats || - (config_valgrind && opt_valgrind)) - old_usize = isalloc(ptr, config_prof); - if (config_valgrind && opt_valgrind) - old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize); + check_entry_exit_locking(tsd_tsdn(tsd)); + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != NSIZES); + old_usize = sz_index2size(alloc_ctx.szind); + assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); if (config_prof && opt_prof) { - prof_thr_cnt_t *cnt; - - usize = s2u(size); - PROF_ALLOC_PREP(1, usize, cnt); - ret = irealloc_prof(ptr, old_usize, usize, cnt); + usize = sz_s2u(size); + ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ? + NULL : irealloc_prof(tsd, ptr, old_usize, usize, + &alloc_ctx); } else { - if (config_stats || (config_valgrind && opt_valgrind)) - usize = s2u(size); - ret = iralloc(ptr, size, 0, 0, false); + if (config_stats) { + usize = sz_s2u(size); + } + ret = iralloc(tsd, ptr, old_usize, size, 0, false); } + tsdn = tsd_tsdn(tsd); } else { /* realloc(NULL, size) is equivalent to malloc(size). */ - MALLOC_BODY(ret, size, usize); + return je_malloc(size); } - if (ret == NULL) { - if (config_xmalloc && opt_xmalloc) { + if (unlikely(ret == NULL)) { + if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write("<jemalloc>: Error in realloc(): " "out of memory\n"); abort(); } set_errno(ENOMEM); } - if (config_stats && ret != NULL) { - thread_allocated_t *ta; - assert(usize == isalloc(ret, config_prof)); - ta = thread_allocated_tsd_get(); - ta->allocated += usize; - ta->deallocated += old_usize; + if (config_stats && likely(ret != NULL)) { + tsd_t *tsd; + + assert(usize == isalloc(tsdn, ret)); + tsd = tsdn_tsd(tsdn); + *tsd_thread_allocatedp_get(tsd) += usize; + *tsd_thread_deallocatedp_get(tsd) += old_usize; } UTRACE(ptr, size, ret); - JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_usize, old_rzsize, - false); - return (ret); + check_entry_exit_locking(tsdn); + return ret; } -void -je_free(void *ptr) -{ - +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +je_free(void *ptr) { UTRACE(ptr, 0, 0); - if (ptr != NULL) - ifree(ptr); + if (likely(ptr != NULL)) { + /* + * We avoid setting up tsd fully (e.g. tcache, arena binding) + * based on only free() calls -- other activities trigger the + * minimal to full transition. This is because free() may + * happen during thread shutdown after tls deallocation: if a + * thread never had any malloc activities until then, a + * fully-setup tsd won't be destructed properly. + */ + tsd_t *tsd = tsd_fetch_min(); + check_entry_exit_locking(tsd_tsdn(tsd)); + + tcache_t *tcache; + if (likely(tsd_fast(tsd))) { + tsd_assert_fast(tsd); + /* Unconditionally get tcache ptr on fast path. */ + tcache = tsd_tcachep_get(tsd); + ifree(tsd, ptr, tcache, false); + } else { + if (likely(tsd_reentrancy_level_get(tsd) == 0)) { + tcache = tcache_get(tsd); + } else { + tcache = NULL; + } + ifree(tsd, ptr, tcache, true); + } + check_entry_exit_locking(tsd_tsdn(tsd)); + } } /* @@ -1317,36 +2302,68 @@ je_free(void *ptr) */ #ifdef JEMALLOC_OVERRIDE_MEMALIGN -void * -je_memalign(size_t alignment, size_t size) -{ - void *ret JEMALLOC_CC_SILENCE_INIT(NULL); - imemalign(&ret, alignment, size, 1); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); - return (ret); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) +je_memalign(size_t alignment, size_t size) { + void *ret; + static_opts_t sopts; + dynamic_opts_t dopts; + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.bump_empty_alloc = true; + sopts.min_alignment = 1; + sopts.oom_string = + "<jemalloc>: Error allocating aligned memory: out of memory\n"; + sopts.invalid_alignment_string = + "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; + sopts.null_out_result_on_error = true; + + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + dopts.alignment = alignment; + + imalloc(&sopts, &dopts); + return ret; } #endif #ifdef JEMALLOC_OVERRIDE_VALLOC -void * -je_valloc(size_t size) -{ - void *ret JEMALLOC_CC_SILENCE_INIT(NULL); - imemalign(&ret, PAGE, size, 1); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); - return (ret); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) +je_valloc(size_t size) { + void *ret; + + static_opts_t sopts; + dynamic_opts_t dopts; + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.bump_empty_alloc = true; + sopts.null_out_result_on_error = true; + sopts.min_alignment = PAGE; + sopts.oom_string = + "<jemalloc>: Error allocating aligned memory: out of memory\n"; + sopts.invalid_alignment_string = + "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; + + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + dopts.alignment = PAGE; + + imalloc(&sopts, &dopts); + + return ret; } #endif -/* - * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has - * #define je_malloc malloc - */ -#define malloc_is_malloc 1 -#define is_malloc_(a) malloc_is_ ## a -#define is_malloc(a) is_malloc_(a) - -#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__)) +#if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK) /* * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible * to inconsistently reference libc's malloc(3)-compatible functions @@ -1356,11 +2373,47 @@ je_valloc(size_t size) * passed an extra argument for the caller return address, which will be * ignored. */ -JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free; -JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc; -JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc; -JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) = +JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; +JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; +JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; +# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK +JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = je_memalign; +# endif + +# ifdef CPU_COUNT +/* + * To enable static linking with glibc, the libc specific malloc interface must + * be implemented also, so none of glibc's malloc.o functions are added to the + * link. + */ +# define ALIAS(je_fn) __attribute__((alias (#je_fn), used)) +/* To force macro expansion of je_ prefix before stringification. */ +# define PREALIAS(je_fn) ALIAS(je_fn) +# ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC +void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_FREE +void __libc_free(void* ptr) PREALIAS(je_free); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC +void *__libc_malloc(size_t size) PREALIAS(je_malloc); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN +void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC +void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC +void *__libc_valloc(size_t size) PREALIAS(je_valloc); +# endif +# ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN +int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign); +# endif +# undef PREALIAS +# undef ALIAS +# endif #endif /* @@ -1371,162 +2424,98 @@ JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) = * Begin non-standard functions. */ -JEMALLOC_ALWAYS_INLINE_C void * -imallocx(size_t usize, size_t alignment, bool zero, bool try_tcache, - arena_t *arena) -{ - - assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, - alignment))); - - if (alignment != 0) - return (ipalloct(usize, alignment, zero, try_tcache, arena)); - else if (zero) - return (icalloct(usize, try_tcache, arena)); - else - return (imalloct(usize, try_tcache, arena)); -} - -static void * -imallocx_prof_sample(size_t usize, size_t alignment, bool zero, bool try_tcache, - arena_t *arena, prof_thr_cnt_t *cnt) -{ - void *p; - - if (cnt == NULL) - return (NULL); - if (prof_promote && usize <= SMALL_MAXCLASS) { - size_t usize_promoted = (alignment == 0) ? - s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, alignment); - assert(usize_promoted != 0); - p = imallocx(usize_promoted, alignment, zero, try_tcache, - arena); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = imallocx(usize, alignment, zero, try_tcache, arena); - - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -imallocx_prof(size_t usize, size_t alignment, bool zero, bool try_tcache, - arena_t *arena, prof_thr_cnt_t *cnt) -{ - void *p; - - if ((uintptr_t)cnt != (uintptr_t)1U) { - p = imallocx_prof_sample(usize, alignment, zero, try_tcache, - arena, cnt); - } else - p = imallocx(usize, alignment, zero, try_tcache, arena); - if (p == NULL) - return (NULL); - prof_malloc(p, usize, cnt); - - return (p); -} - -void * -je_mallocx(size_t size, int flags) -{ - void *p; - size_t usize; - size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) - & (SIZE_T_MAX-1)); - bool zero = flags & MALLOCX_ZERO; - unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; - arena_t *arena; - bool try_tcache; +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) +je_mallocx(size_t size, int flags) { + void *ret; + static_opts_t sopts; + dynamic_opts_t dopts; + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.assert_nonempty_alloc = true; + sopts.null_out_result_on_error = true; + sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n"; + + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + if (unlikely(flags != 0)) { + if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) { + dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); + } - assert(size != 0); + dopts.zero = MALLOCX_ZERO_GET(flags); - if (malloc_init()) - goto label_oom; + if ((flags & MALLOCX_TCACHE_MASK) != 0) { + if ((flags & MALLOCX_TCACHE_MASK) + == MALLOCX_TCACHE_NONE) { + dopts.tcache_ind = TCACHE_IND_NONE; + } else { + dopts.tcache_ind = MALLOCX_TCACHE_GET(flags); + } + } else { + dopts.tcache_ind = TCACHE_IND_AUTOMATIC; + } - if (arena_ind != UINT_MAX) { - arena = arenas[arena_ind]; - try_tcache = false; - } else { - arena = NULL; - try_tcache = true; + if ((flags & MALLOCX_ARENA_MASK) != 0) + dopts.arena_ind = MALLOCX_ARENA_GET(flags); } - usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); - assert(usize != 0); - - if (config_prof && opt_prof) { - prof_thr_cnt_t *cnt; - - PROF_ALLOC_PREP(1, usize, cnt); - p = imallocx_prof(usize, alignment, zero, try_tcache, arena, - cnt); - } else - p = imallocx(usize, alignment, zero, try_tcache, arena); - if (p == NULL) - goto label_oom; - - if (config_stats) { - assert(usize == isalloc(p, config_prof)); - thread_allocated_tsd_get()->allocated += usize; - } - UTRACE(0, size, p); - JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero); - return (p); -label_oom: - if (config_xmalloc && opt_xmalloc) { - malloc_write("<jemalloc>: Error in mallocx(): out of memory\n"); - abort(); - } - UTRACE(0, size, 0); - return (NULL); + imalloc(&sopts, &dopts); + return ret; } static void * -irallocx_prof_sample(void *oldptr, size_t size, size_t alignment, size_t usize, - bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena, - prof_thr_cnt_t *cnt) -{ +irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize, + size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, + prof_tctx_t *tctx) { void *p; - if (cnt == NULL) - return (NULL); - if (prof_promote && usize <= SMALL_MAXCLASS) { - p = iralloct(oldptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= - size) ? 0 : size - (SMALL_MAXCLASS+1), alignment, zero, - try_tcache_alloc, try_tcache_dalloc, arena); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); + if (tctx == NULL) { + return NULL; + } + if (usize <= SMALL_MAXCLASS) { + p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS, + alignment, zero, tcache, arena); + if (p == NULL) { + return NULL; + } + arena_prof_promote(tsdn, p, usize); } else { - p = iralloct(oldptr, size, 0, alignment, zero, - try_tcache_alloc, try_tcache_dalloc, arena); + p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero, + tcache, arena); } - return (p); + return p; } -JEMALLOC_ALWAYS_INLINE_C void * -irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment, - size_t *usize, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, - arena_t *arena, prof_thr_cnt_t *cnt) -{ +JEMALLOC_ALWAYS_INLINE void * +irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, + size_t alignment, size_t *usize, bool zero, tcache_t *tcache, + arena_t *arena, alloc_ctx_t *alloc_ctx) { void *p; - prof_ctx_t *old_ctx; - - old_ctx = prof_ctx_get(oldptr); - if ((uintptr_t)cnt != (uintptr_t)1U) - p = irallocx_prof_sample(oldptr, size, alignment, *usize, zero, - try_tcache_alloc, try_tcache_dalloc, arena, cnt); - else { - p = iralloct(oldptr, size, 0, alignment, zero, - try_tcache_alloc, try_tcache_dalloc, arena); + bool prof_active; + prof_tctx_t *old_tctx, *tctx; + + prof_active = prof_active_get_unlocked(); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); + tctx = prof_alloc_prep(tsd, *usize, prof_active, false); + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { + p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize, + *usize, alignment, zero, tcache, arena, tctx); + } else { + p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment, + zero, tcache, arena); + } + if (unlikely(p == NULL)) { + prof_alloc_rollback(tsd, tctx, false); + return NULL; } - if (p == NULL) - return (NULL); - if (p == oldptr && alignment != 0) { + if (p == old_ptr && alignment != 0) { /* * The allocation did not move, so it is possible that the size * class is smaller than would guarantee the requested @@ -1535,421 +2524,467 @@ irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment, * be the same as the current usize because of in-place large * reallocation. Therefore, query the actual value of usize. */ - *usize = isalloc(p, config_prof); + *usize = isalloc(tsd_tsdn(tsd), p); } - prof_realloc(p, *usize, cnt, old_usize, old_ctx); + prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr, + old_usize, old_tctx); - return (p); + return p; } -void * -je_rallocx(void *ptr, size_t size, int flags) -{ +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ALLOC_SIZE(2) +je_rallocx(void *ptr, size_t size, int flags) { void *p; - size_t usize, old_usize; - UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); - size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) - & (SIZE_T_MAX-1)); + tsd_t *tsd; + size_t usize; + size_t old_usize; + size_t alignment = MALLOCX_ALIGN_GET(flags); bool zero = flags & MALLOCX_ZERO; - unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; - bool try_tcache_alloc, try_tcache_dalloc; arena_t *arena; + tcache_t *tcache; assert(ptr != NULL); assert(size != 0); - assert(malloc_initialized || IS_INITIALIZER); - malloc_thread_init(); - - if (arena_ind != UINT_MAX) { - arena_chunk_t *chunk; - try_tcache_alloc = false; - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - try_tcache_dalloc = (chunk == ptr || chunk->arena != - arenas[arena_ind]); - arena = arenas[arena_ind]; + assert(malloc_initialized() || IS_INITIALIZER); + tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + + if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { + unsigned arena_ind = MALLOCX_ARENA_GET(flags); + arena = arena_get(tsd_tsdn(tsd), arena_ind, true); + if (unlikely(arena == NULL)) { + goto label_oom; + } } else { - try_tcache_alloc = true; - try_tcache_dalloc = true; arena = NULL; } - if ((config_prof && opt_prof) || config_stats || - (config_valgrind && opt_valgrind)) - old_usize = isalloc(ptr, config_prof); - if (config_valgrind && opt_valgrind) - old_rzsize = u2rz(old_usize); + if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { + tcache = NULL; + } else { + tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); + } + } else { + tcache = tcache_get(tsd); + } + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != NSIZES); + old_usize = sz_index2size(alloc_ctx.szind); + assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); if (config_prof && opt_prof) { - prof_thr_cnt_t *cnt; - - usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); - assert(usize != 0); - PROF_ALLOC_PREP(1, usize, cnt); - p = irallocx_prof(ptr, old_usize, size, alignment, &usize, zero, - try_tcache_alloc, try_tcache_dalloc, arena, cnt); - if (p == NULL) + usize = (alignment == 0) ? + sz_s2u(size) : sz_sa2u(size, alignment); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { + goto label_oom; + } + p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, + zero, tcache, arena, &alloc_ctx); + if (unlikely(p == NULL)) { goto label_oom; + } } else { - p = iralloct(ptr, size, 0, alignment, zero, try_tcache_alloc, - try_tcache_dalloc, arena); - if (p == NULL) + p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment, + zero, tcache, arena); + if (unlikely(p == NULL)) { goto label_oom; - if (config_stats || (config_valgrind && opt_valgrind)) - usize = isalloc(p, config_prof); + } + if (config_stats) { + usize = isalloc(tsd_tsdn(tsd), p); + } } + assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); if (config_stats) { - thread_allocated_t *ta; - ta = thread_allocated_tsd_get(); - ta->allocated += usize; - ta->deallocated += old_usize; + *tsd_thread_allocatedp_get(tsd) += usize; + *tsd_thread_deallocatedp_get(tsd) += old_usize; } UTRACE(ptr, size, p); - JEMALLOC_VALGRIND_REALLOC(p, usize, ptr, old_usize, old_rzsize, zero); - return (p); + check_entry_exit_locking(tsd_tsdn(tsd)); + return p; label_oom: - if (config_xmalloc && opt_xmalloc) { + if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write("<jemalloc>: Error in rallocx(): out of memory\n"); abort(); } UTRACE(ptr, size, 0); - return (NULL); + check_entry_exit_locking(tsd_tsdn(tsd)); + return NULL; } -JEMALLOC_ALWAYS_INLINE_C size_t -ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra, - size_t alignment, bool zero, arena_t *arena) -{ +JEMALLOC_ALWAYS_INLINE size_t +ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, + size_t extra, size_t alignment, bool zero) { size_t usize; - if (ixalloc(ptr, size, extra, alignment, zero)) - return (old_usize); - usize = isalloc(ptr, config_prof); + if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) { + return old_usize; + } + usize = isalloc(tsdn, ptr); - return (usize); + return usize; } static size_t -ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra, - size_t alignment, size_t max_usize, bool zero, arena_t *arena, - prof_thr_cnt_t *cnt) -{ +ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, + size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) { size_t usize; - if (cnt == NULL) - return (old_usize); - /* Use minimum usize to determine whether promotion may happen. */ - if (prof_promote && ((alignment == 0) ? s2u(size) : sa2u(size, - alignment)) <= SMALL_MAXCLASS) { - if (ixalloc(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= - size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1), - alignment, zero)) - return (old_usize); - usize = isalloc(ptr, config_prof); - if (max_usize < PAGE) - arena_prof_promoted(ptr, usize); - } else { - usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, - zero, arena); + if (tctx == NULL) { + return old_usize; } + usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, + zero); - return (usize); + return usize; } -JEMALLOC_ALWAYS_INLINE_C size_t -ixallocx_prof(void *ptr, size_t old_usize, size_t size, size_t extra, - size_t alignment, size_t max_usize, bool zero, arena_t *arena, - prof_thr_cnt_t *cnt) -{ - size_t usize; - prof_ctx_t *old_ctx; +JEMALLOC_ALWAYS_INLINE size_t +ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, + size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) { + size_t usize_max, usize; + bool prof_active; + prof_tctx_t *old_tctx, *tctx; - old_ctx = prof_ctx_get(ptr); - if ((uintptr_t)cnt != (uintptr_t)1U) { - usize = ixallocx_prof_sample(ptr, old_usize, size, extra, - alignment, zero, max_usize, arena, cnt); + prof_active = prof_active_get_unlocked(); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx); + /* + * usize isn't knowable before ixalloc() returns when extra is non-zero. + * Therefore, compute its maximum possible value and use that in + * prof_alloc_prep() to decide whether to capture a backtrace. + * prof_realloc() will use the actual usize to decide whether to sample. + */ + if (alignment == 0) { + usize_max = sz_s2u(size+extra); + assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS); + } else { + usize_max = sz_sa2u(size+extra, alignment); + if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) { + /* + * usize_max is out of range, and chances are that + * allocation will fail, but use the maximum possible + * value and carry on with prof_alloc_prep(), just in + * case allocation succeeds. + */ + usize_max = LARGE_MAXCLASS; + } + } + tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); + + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { + usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, + size, extra, alignment, zero, tctx); } else { - usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, - zero, arena); + usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, + extra, alignment, zero); } - if (usize == old_usize) - return (usize); - prof_realloc(ptr, usize, cnt, old_usize, old_ctx); + if (usize == old_usize) { + prof_alloc_rollback(tsd, tctx, false); + return usize; + } + prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, + old_tctx); - return (usize); + return usize; } -size_t -je_xallocx(void *ptr, size_t size, size_t extra, int flags) -{ +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +je_xallocx(void *ptr, size_t size, size_t extra, int flags) { + tsd_t *tsd; size_t usize, old_usize; - UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); - size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) - & (SIZE_T_MAX-1)); + size_t alignment = MALLOCX_ALIGN_GET(flags); bool zero = flags & MALLOCX_ZERO; - unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; - arena_t *arena; assert(ptr != NULL); assert(size != 0); assert(SIZE_T_MAX - size >= extra); - assert(malloc_initialized || IS_INITIALIZER); - malloc_thread_init(); - - if (arena_ind != UINT_MAX) - arena = arenas[arena_ind]; - else - arena = NULL; - - old_usize = isalloc(ptr, config_prof); - if (config_valgrind && opt_valgrind) - old_rzsize = u2rz(old_usize); + assert(malloc_initialized() || IS_INITIALIZER); + tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != NSIZES); + old_usize = sz_index2size(alloc_ctx.szind); + assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); + /* + * The API explicitly absolves itself of protecting against (size + + * extra) numerical overflow, but we may need to clamp extra to avoid + * exceeding LARGE_MAXCLASS. + * + * Ordinarily, size limit checking is handled deeper down, but here we + * have to check as part of (size + extra) clamping, since we need the + * clamped value in the above helper functions. + */ + if (unlikely(size > LARGE_MAXCLASS)) { + usize = old_usize; + goto label_not_resized; + } + if (unlikely(LARGE_MAXCLASS - size < extra)) { + extra = LARGE_MAXCLASS - size; + } if (config_prof && opt_prof) { - prof_thr_cnt_t *cnt; - /* - * usize isn't knowable before ixalloc() returns when extra is - * non-zero. Therefore, compute its maximum possible value and - * use that in PROF_ALLOC_PREP() to decide whether to capture a - * backtrace. prof_realloc() will use the actual usize to - * decide whether to sample. - */ - size_t max_usize = (alignment == 0) ? s2u(size+extra) : - sa2u(size+extra, alignment); - PROF_ALLOC_PREP(1, max_usize, cnt); - usize = ixallocx_prof(ptr, old_usize, size, extra, alignment, - max_usize, zero, arena, cnt); + usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, + alignment, zero, &alloc_ctx); } else { - usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, - zero, arena); + usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, + extra, alignment, zero); } - if (usize == old_usize) + if (unlikely(usize == old_usize)) { goto label_not_resized; + } if (config_stats) { - thread_allocated_t *ta; - ta = thread_allocated_tsd_get(); - ta->allocated += usize; - ta->deallocated += old_usize; + *tsd_thread_allocatedp_get(tsd) += usize; + *tsd_thread_deallocatedp_get(tsd) += old_usize; } - JEMALLOC_VALGRIND_REALLOC(ptr, usize, ptr, old_usize, old_rzsize, zero); label_not_resized: UTRACE(ptr, size, ptr); - return (usize); + check_entry_exit_locking(tsd_tsdn(tsd)); + return usize; } -size_t -je_sallocx(const void *ptr, int flags) -{ +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +JEMALLOC_ATTR(pure) +je_sallocx(const void *ptr, int flags) { size_t usize; + tsdn_t *tsdn; + + assert(malloc_initialized() || IS_INITIALIZER); + assert(ptr != NULL); - assert(malloc_initialized || IS_INITIALIZER); - malloc_thread_init(); + tsdn = tsdn_fetch(); + check_entry_exit_locking(tsdn); - if (config_ivsalloc) - usize = ivsalloc(ptr, config_prof); - else { - assert(ptr != NULL); - usize = isalloc(ptr, config_prof); + if (config_debug || force_ivsalloc) { + usize = ivsalloc(tsdn, ptr); + assert(force_ivsalloc || usize != 0); + } else { + usize = isalloc(tsdn, ptr); } - return (usize); + check_entry_exit_locking(tsdn); + return usize; } -void -je_dallocx(void *ptr, int flags) -{ +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +je_dallocx(void *ptr, int flags) { + assert(ptr != NULL); + assert(malloc_initialized() || IS_INITIALIZER); + + tsd_t *tsd = tsd_fetch(); + bool fast = tsd_fast(tsd); + check_entry_exit_locking(tsd_tsdn(tsd)); + + tcache_t *tcache; + if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { + /* Not allowed to be reentrant and specify a custom tcache. */ + assert(tsd_reentrancy_level_get(tsd) == 0); + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { + tcache = NULL; + } else { + tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); + } + } else { + if (likely(fast)) { + tcache = tsd_tcachep_get(tsd); + assert(tcache == tcache_get(tsd)); + } else { + if (likely(tsd_reentrancy_level_get(tsd) == 0)) { + tcache = tcache_get(tsd); + } else { + tcache = NULL; + } + } + } + + UTRACE(ptr, 0, 0); + if (likely(fast)) { + tsd_assert_fast(tsd); + ifree(tsd, ptr, tcache, false); + } else { + ifree(tsd, ptr, tcache, true); + } + check_entry_exit_locking(tsd_tsdn(tsd)); +} + +JEMALLOC_ALWAYS_INLINE size_t +inallocx(tsdn_t *tsdn, size_t size, int flags) { + check_entry_exit_locking(tsdn); + size_t usize; - UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); - unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; - bool try_tcache; + if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) { + usize = sz_s2u(size); + } else { + usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); + } + check_entry_exit_locking(tsdn); + return usize; +} +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +je_sdallocx(void *ptr, size_t size, int flags) { assert(ptr != NULL); - assert(malloc_initialized || IS_INITIALIZER); - - if (arena_ind != UINT_MAX) { - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - try_tcache = (chunk == ptr || chunk->arena != - arenas[arena_ind]); - } else - try_tcache = true; + assert(malloc_initialized() || IS_INITIALIZER); + + tsd_t *tsd = tsd_fetch(); + bool fast = tsd_fast(tsd); + size_t usize = inallocx(tsd_tsdn(tsd), size, flags); + assert(usize == isalloc(tsd_tsdn(tsd), ptr)); + check_entry_exit_locking(tsd_tsdn(tsd)); + + tcache_t *tcache; + if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { + /* Not allowed to be reentrant and specify a custom tcache. */ + assert(tsd_reentrancy_level_get(tsd) == 0); + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { + tcache = NULL; + } else { + tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); + } + } else { + if (likely(fast)) { + tcache = tsd_tcachep_get(tsd); + assert(tcache == tcache_get(tsd)); + } else { + if (likely(tsd_reentrancy_level_get(tsd) == 0)) { + tcache = tcache_get(tsd); + } else { + tcache = NULL; + } + } + } UTRACE(ptr, 0, 0); - if (config_stats || config_valgrind) - usize = isalloc(ptr, config_prof); - if (config_prof && opt_prof) { - if (config_stats == false && config_valgrind == false) - usize = isalloc(ptr, config_prof); - prof_free(ptr, usize); + if (likely(fast)) { + tsd_assert_fast(tsd); + isfree(tsd, ptr, usize, tcache, false); + } else { + isfree(tsd, ptr, usize, tcache, true); } - if (config_stats) - thread_allocated_tsd_get()->deallocated += usize; - if (config_valgrind && opt_valgrind) - rzsize = p2rz(ptr); - iqalloct(ptr, try_tcache); - JEMALLOC_VALGRIND_FREE(ptr, rzsize); + check_entry_exit_locking(tsd_tsdn(tsd)); } -size_t -je_nallocx(size_t size, int flags) -{ +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +JEMALLOC_ATTR(pure) +je_nallocx(size_t size, int flags) { size_t usize; - size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) - & (SIZE_T_MAX-1)); + tsdn_t *tsdn; assert(size != 0); - if (malloc_init()) - return (0); + if (unlikely(malloc_init())) { + return 0; + } + + tsdn = tsdn_fetch(); + check_entry_exit_locking(tsdn); - usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); - assert(usize != 0); - return (usize); + usize = inallocx(tsdn, size, flags); + if (unlikely(usize > LARGE_MAXCLASS)) { + return 0; + } + + check_entry_exit_locking(tsdn); + return usize; } -int +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) -{ + size_t newlen) { + int ret; + tsd_t *tsd; - if (malloc_init()) - return (EAGAIN); + if (unlikely(malloc_init())) { + return EAGAIN; + } - return (ctl_byname(name, oldp, oldlenp, newp, newlen)); + tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); + check_entry_exit_locking(tsd_tsdn(tsd)); + return ret; } -int -je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) -{ +JEMALLOC_EXPORT int JEMALLOC_NOTHROW +je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) { + int ret; - if (malloc_init()) - return (EAGAIN); + if (unlikely(malloc_init())) { + return EAGAIN; + } - return (ctl_nametomib(name, mibp, miblenp)); + tsd_t *tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + ret = ctl_nametomib(tsd, name, mibp, miblenp); + check_entry_exit_locking(tsd_tsdn(tsd)); + return ret; } -int +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ + void *newp, size_t newlen) { + int ret; + tsd_t *tsd; - if (malloc_init()) - return (EAGAIN); + if (unlikely(malloc_init())) { + return EAGAIN; + } - return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); + tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); + check_entry_exit_locking(tsd_tsdn(tsd)); + return ret; } -void +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, - const char *opts) -{ + const char *opts) { + tsdn_t *tsdn; + tsdn = tsdn_fetch(); + check_entry_exit_locking(tsdn); stats_print(write_cb, cbopaque, opts); + check_entry_exit_locking(tsdn); } -size_t -je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) -{ +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { size_t ret; + tsdn_t *tsdn; - assert(malloc_initialized || IS_INITIALIZER); - malloc_thread_init(); - - if (config_ivsalloc) - ret = ivsalloc(ptr, config_prof); - else - ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0; - - return (ret); -} - -/* - * End non-standard functions. - */ -/******************************************************************************/ -/* - * Begin experimental functions. - */ -#ifdef JEMALLOC_EXPERIMENTAL - -int -je_allocm(void **ptr, size_t *rsize, size_t size, int flags) -{ - void *p; - - assert(ptr != NULL); - - p = je_mallocx(size, flags); - if (p == NULL) - return (ALLOCM_ERR_OOM); - if (rsize != NULL) - *rsize = isalloc(p, config_prof); - *ptr = p; - return (ALLOCM_SUCCESS); -} - -int -je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) -{ - int ret; - bool no_move = flags & ALLOCM_NO_MOVE; + assert(malloc_initialized() || IS_INITIALIZER); - assert(ptr != NULL); - assert(*ptr != NULL); - assert(size != 0); - assert(SIZE_T_MAX - size >= extra); + tsdn = tsdn_fetch(); + check_entry_exit_locking(tsdn); - if (no_move) { - size_t usize = je_xallocx(*ptr, size, extra, flags); - ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED; - if (rsize != NULL) - *rsize = usize; + if (unlikely(ptr == NULL)) { + ret = 0; } else { - void *p = je_rallocx(*ptr, size+extra, flags); - if (p != NULL) { - *ptr = p; - ret = ALLOCM_SUCCESS; - } else - ret = ALLOCM_ERR_OOM; - if (rsize != NULL) - *rsize = isalloc(*ptr, config_prof); + if (config_debug || force_ivsalloc) { + ret = ivsalloc(tsdn, ptr); + assert(force_ivsalloc || ret != 0); + } else { + ret = isalloc(tsdn, ptr); + } } - return (ret); -} - -int -je_sallocm(const void *ptr, size_t *rsize, int flags) -{ - - assert(rsize != NULL); - *rsize = je_sallocx(ptr, flags); - return (ALLOCM_SUCCESS); -} - -int -je_dallocm(void *ptr, int flags) -{ - je_dallocx(ptr, flags); - return (ALLOCM_SUCCESS); + check_entry_exit_locking(tsdn); + return ret; } -int -je_nallocm(size_t *rsize, size_t size, int flags) -{ - size_t usize; - - usize = je_nallocx(size, flags); - if (usize == 0) - return (ALLOCM_ERR_OOM); - if (rsize != NULL) - *rsize = usize; - return (ALLOCM_SUCCESS); -} - -#endif /* - * End experimental functions. + * End non-standard functions. */ /******************************************************************************/ /* @@ -1966,17 +3001,17 @@ je_nallocm(size_t *rsize, size_t size, int flags) * fork/malloc races via the following functions it registers during * initialization using pthread_atfork(), but of course that does no good if * the allocator isn't fully initialized at fork time. The following library - * constructor is a partial solution to this problem. It may still possible to - * trigger the deadlock described above, but doing so would involve forking via - * a library constructor that runs before jemalloc's runs. + * constructor is a partial solution to this problem. It may still be possible + * to trigger the deadlock described above, but doing so would involve forking + * via a library constructor that runs before jemalloc's runs. */ +#ifndef JEMALLOC_JET JEMALLOC_ATTR(constructor) static void -jemalloc_constructor(void) -{ - +jemalloc_constructor(void) { malloc_init(); } +#endif #ifndef JEMALLOC_MUTEX_INIT_CB void @@ -1986,25 +3021,69 @@ JEMALLOC_EXPORT void _malloc_prefork(void) #endif { - unsigned i; + tsd_t *tsd; + unsigned i, j, narenas; + arena_t *arena; #ifdef JEMALLOC_MUTEX_INIT_CB - if (malloc_initialized == false) + if (!malloc_initialized()) { return; + } #endif - assert(malloc_initialized); + assert(malloc_initialized()); + tsd = tsd_fetch(); + + narenas = narenas_total_get(); + + witness_prefork(tsd_witness_tsdp_get(tsd)); /* Acquire all mutexes in a safe order. */ - ctl_prefork(); - prof_prefork(); - malloc_mutex_prefork(&arenas_lock); - for (i = 0; i < narenas_total; i++) { - if (arenas[i] != NULL) - arena_prefork(arenas[i]); + ctl_prefork(tsd_tsdn(tsd)); + tcache_prefork(tsd_tsdn(tsd)); + malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); + if (have_background_thread) { + background_thread_prefork0(tsd_tsdn(tsd)); + } + prof_prefork0(tsd_tsdn(tsd)); + if (have_background_thread) { + background_thread_prefork1(tsd_tsdn(tsd)); + } + /* Break arena prefork into stages to preserve lock order. */ + for (i = 0; i < 8; i++) { + for (j = 0; j < narenas; j++) { + if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != + NULL) { + switch (i) { + case 0: + arena_prefork0(tsd_tsdn(tsd), arena); + break; + case 1: + arena_prefork1(tsd_tsdn(tsd), arena); + break; + case 2: + arena_prefork2(tsd_tsdn(tsd), arena); + break; + case 3: + arena_prefork3(tsd_tsdn(tsd), arena); + break; + case 4: + arena_prefork4(tsd_tsdn(tsd), arena); + break; + case 5: + arena_prefork5(tsd_tsdn(tsd), arena); + break; + case 6: + arena_prefork6(tsd_tsdn(tsd), arena); + break; + case 7: + arena_prefork7(tsd_tsdn(tsd), arena); + break; + default: not_reached(); + } + } + } } - chunk_prefork(); - base_prefork(); - huge_prefork(); + prof_prefork1(tsd_tsdn(tsd)); } #ifndef JEMALLOC_MUTEX_INIT_CB @@ -2015,97 +3094,61 @@ JEMALLOC_EXPORT void _malloc_postfork(void) #endif { - unsigned i; + tsd_t *tsd; + unsigned i, narenas; #ifdef JEMALLOC_MUTEX_INIT_CB - if (malloc_initialized == false) + if (!malloc_initialized()) { return; -#endif - assert(malloc_initialized); - - /* Release all mutexes, now that fork() has completed. */ - huge_postfork_parent(); - base_postfork_parent(); - chunk_postfork_parent(); - for (i = 0; i < narenas_total; i++) { - if (arenas[i] != NULL) - arena_postfork_parent(arenas[i]); } - malloc_mutex_postfork_parent(&arenas_lock); - prof_postfork_parent(); - ctl_postfork_parent(); -} - -void -jemalloc_postfork_child(void) -{ - unsigned i; +#endif + assert(malloc_initialized()); - assert(malloc_initialized); + tsd = tsd_fetch(); + witness_postfork_parent(tsd_witness_tsdp_get(tsd)); /* Release all mutexes, now that fork() has completed. */ - huge_postfork_child(); - base_postfork_child(); - chunk_postfork_child(); - for (i = 0; i < narenas_total; i++) { - if (arenas[i] != NULL) - arena_postfork_child(arenas[i]); - } - malloc_mutex_postfork_child(&arenas_lock); - prof_postfork_child(); - ctl_postfork_child(); -} + for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { + arena_t *arena; -/******************************************************************************/ -/* - * The following functions are used for TLS allocation/deallocation in static - * binaries on FreeBSD. The primary difference between these and i[mcd]alloc() - * is that these avoid accessing TLS variables. - */ - -static void * -a0alloc(size_t size, bool zero) -{ - - if (malloc_init()) - return (NULL); - - if (size == 0) - size = 1; - - if (size <= arena_maxclass) - return (arena_malloc(arenas[0], size, zero, false)); - else - return (huge_malloc(size, zero, huge_dss_prec_get(arenas[0]))); -} - -void * -a0malloc(size_t size) -{ - - return (a0alloc(size, false)); + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { + arena_postfork_parent(tsd_tsdn(tsd), arena); + } + } + prof_postfork_parent(tsd_tsdn(tsd)); + if (have_background_thread) { + background_thread_postfork_parent(tsd_tsdn(tsd)); + } + malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); + tcache_postfork_parent(tsd_tsdn(tsd)); + ctl_postfork_parent(tsd_tsdn(tsd)); } -void * -a0calloc(size_t num, size_t size) -{ +void +jemalloc_postfork_child(void) { + tsd_t *tsd; + unsigned i, narenas; - return (a0alloc(num * size, true)); -} + assert(malloc_initialized()); -void -a0free(void *ptr) -{ - arena_chunk_t *chunk; + tsd = tsd_fetch(); - if (ptr == NULL) - return; + witness_postfork_child(tsd_witness_tsdp_get(tsd)); + /* Release all mutexes, now that fork() has completed. */ + for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { + arena_t *arena; - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk != ptr) - arena_dalloc(chunk->arena, chunk, ptr, false); - else - huge_dalloc(ptr, true); + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { + arena_postfork_child(tsd_tsdn(tsd), arena); + } + } + prof_postfork_child(tsd_tsdn(tsd)); + if (have_background_thread) { + background_thread_postfork_child(tsd_tsdn(tsd)); + } + malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); + tcache_postfork_child(tsd_tsdn(tsd)); + ctl_postfork_child(tsd_tsdn(tsd)); } /******************************************************************************/ diff --git a/deps/jemalloc/src/mutex.c b/deps/jemalloc/src/mutex.c index 788eca3870..a528ef0c24 100644 --- a/deps/jemalloc/src/mutex.c +++ b/deps/jemalloc/src/mutex.c @@ -1,12 +1,12 @@ -#define JEMALLOC_MUTEX_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_MUTEX_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" -#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) -#include <dlfcn.h> -#endif +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/malloc_io.h" #ifndef _CRT_SPINCOUNT -#define _CRT_SPINCOUNT 4000 +#define _CRT_SPINCOUNT 4000 #endif /******************************************************************************/ @@ -20,10 +20,6 @@ static bool postpone_init = true; static malloc_mutex_t *postponed_mutexes = NULL; #endif -#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) -static void pthread_create_once(void); -#endif - /******************************************************************************/ /* * We intercept pthread_create() calls in order to toggle isthreaded if the @@ -31,33 +27,11 @@ static void pthread_create_once(void); */ #if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) -static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, - void *(*)(void *), void *__restrict); - -static void -pthread_create_once(void) -{ - - pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); - if (pthread_create_fptr == NULL) { - malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, " - "\"pthread_create\")\n"); - abort(); - } - - isthreaded = true; -} - JEMALLOC_EXPORT int pthread_create(pthread_t *__restrict thread, const pthread_attr_t *__restrict attr, void *(*start_routine)(void *), - void *__restrict arg) -{ - static pthread_once_t once_control = PTHREAD_ONCE_INIT; - - pthread_once(&once_control, pthread_create_once); - - return (pthread_create_fptr(thread, attr, start_routine, arg)); + void *__restrict arg) { + return pthread_create_wrapper(thread, attr, start_routine, arg); } #endif @@ -68,14 +42,108 @@ JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, void *(calloc_cb)(size_t, size_t)); #endif -bool -malloc_mutex_init(malloc_mutex_t *mutex) -{ +void +malloc_mutex_lock_slow(malloc_mutex_t *mutex) { + mutex_prof_data_t *data = &mutex->prof_data; + UNUSED nstime_t before = NSTIME_ZERO_INITIALIZER; + + if (ncpus == 1) { + goto label_spin_done; + } + + int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN; + do { + CPU_SPINWAIT; + if (!malloc_mutex_trylock_final(mutex)) { + data->n_spin_acquired++; + return; + } + } while (cnt++ < max_cnt); + + if (!config_stats) { + /* Only spin is useful when stats is off. */ + malloc_mutex_lock_final(mutex); + return; + } +label_spin_done: + nstime_update(&before); + /* Copy before to after to avoid clock skews. */ + nstime_t after; + nstime_copy(&after, &before); + uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1, + ATOMIC_RELAXED) + 1; + /* One last try as above two calls may take quite some cycles. */ + if (!malloc_mutex_trylock_final(mutex)) { + atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED); + data->n_spin_acquired++; + return; + } + + /* True slow path. */ + malloc_mutex_lock_final(mutex); + /* Update more slow-path only counters. */ + atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED); + nstime_update(&after); + + nstime_t delta; + nstime_copy(&delta, &after); + nstime_subtract(&delta, &before); + data->n_wait_times++; + nstime_add(&data->tot_wait_time, &delta); + if (nstime_compare(&data->max_wait_time, &delta) < 0) { + nstime_copy(&data->max_wait_time, &delta); + } + if (n_thds > data->max_n_thds) { + data->max_n_thds = n_thds; + } +} + +static void +mutex_prof_data_init(mutex_prof_data_t *data) { + memset(data, 0, sizeof(mutex_prof_data_t)); + nstime_init(&data->max_wait_time, 0); + nstime_init(&data->tot_wait_time, 0); + data->prev_owner = NULL; +} + +void +malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) { + malloc_mutex_assert_owner(tsdn, mutex); + mutex_prof_data_init(&mutex->prof_data); +} + +static int +mutex_addr_comp(const witness_t *witness1, void *mutex1, + const witness_t *witness2, void *mutex2) { + assert(mutex1 != NULL); + assert(mutex2 != NULL); + uintptr_t mu1int = (uintptr_t)mutex1; + uintptr_t mu2int = (uintptr_t)mutex2; + if (mu1int < mu2int) { + return -1; + } else if (mu1int == mu2int) { + return 0; + } else { + return 1; + } +} + +bool +malloc_mutex_init(malloc_mutex_t *mutex, const char *name, + witness_rank_t rank, malloc_mutex_lock_order_t lock_order) { + mutex_prof_data_init(&mutex->prof_data); #ifdef _WIN32 +# if _WIN32_WINNT >= 0x0600 + InitializeSRWLock(&mutex->lock); +# else if (!InitializeCriticalSectionAndSpinCount(&mutex->lock, - _CRT_SPINCOUNT)) - return (true); + _CRT_SPINCOUNT)) { + return true; + } +# endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + mutex->lock = OS_UNFAIR_LOCK_INIT; #elif (defined(JEMALLOC_OSSPIN)) mutex->lock = 0; #elif (defined(JEMALLOC_MUTEX_INIT_CB)) @@ -83,67 +151,73 @@ malloc_mutex_init(malloc_mutex_t *mutex) mutex->postponed_next = postponed_mutexes; postponed_mutexes = mutex; } else { - if (_pthread_mutex_init_calloc_cb(&mutex->lock, base_calloc) != - 0) - return (true); + if (_pthread_mutex_init_calloc_cb(&mutex->lock, + bootstrap_calloc) != 0) { + return true; + } } #else pthread_mutexattr_t attr; - if (pthread_mutexattr_init(&attr) != 0) - return (true); + if (pthread_mutexattr_init(&attr) != 0) { + return true; + } pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE); if (pthread_mutex_init(&mutex->lock, &attr) != 0) { pthread_mutexattr_destroy(&attr); - return (true); + return true; } pthread_mutexattr_destroy(&attr); #endif - return (false); + if (config_debug) { + mutex->lock_order = lock_order; + if (lock_order == malloc_mutex_address_ordered) { + witness_init(&mutex->witness, name, rank, + mutex_addr_comp, &mutex); + } else { + witness_init(&mutex->witness, name, rank, NULL, NULL); + } + } + return false; } void -malloc_mutex_prefork(malloc_mutex_t *mutex) -{ - - malloc_mutex_lock(mutex); +malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) { + malloc_mutex_lock(tsdn, mutex); } void -malloc_mutex_postfork_parent(malloc_mutex_t *mutex) -{ - - malloc_mutex_unlock(mutex); +malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) { + malloc_mutex_unlock(tsdn, mutex); } void -malloc_mutex_postfork_child(malloc_mutex_t *mutex) -{ - +malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) { #ifdef JEMALLOC_MUTEX_INIT_CB - malloc_mutex_unlock(mutex); + malloc_mutex_unlock(tsdn, mutex); #else - if (malloc_mutex_init(mutex)) { + if (malloc_mutex_init(mutex, mutex->witness.name, + mutex->witness.rank, mutex->lock_order)) { malloc_printf("<jemalloc>: Error re-initializing mutex in " "child\n"); - if (opt_abort) + if (opt_abort) { abort(); + } } #endif } bool -mutex_boot(void) -{ - +malloc_mutex_boot(void) { #ifdef JEMALLOC_MUTEX_INIT_CB postpone_init = false; while (postponed_mutexes != NULL) { if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock, - base_calloc) != 0) - return (true); + bootstrap_calloc) != 0) { + return true; + } postponed_mutexes = postponed_mutexes->postponed_next; } #endif - return (false); + return false; } diff --git a/deps/jemalloc/src/prof.c b/deps/jemalloc/src/prof.c index 7722b7b437..975722c4c3 100644 --- a/deps/jemalloc/src/prof.c +++ b/deps/jemalloc/src/prof.c @@ -1,27 +1,41 @@ -#define JEMALLOC_PROF_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_PROF_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/mutex.h" + /******************************************************************************/ #ifdef JEMALLOC_PROF_LIBUNWIND -#define UNW_LOCAL_ONLY +#define UNW_LOCAL_ONLY #include <libunwind.h> #endif #ifdef JEMALLOC_PROF_LIBGCC +/* + * We have a circular dependency -- jemalloc_internal.h tells us if we should + * use libgcc's unwinding functionality, but after we've included that, we've + * already hooked _Unwind_Backtrace. We'll temporarily disable hooking. + */ +#undef _Unwind_Backtrace #include <unwind.h> +#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, hooks_libc_hook) #endif /******************************************************************************/ /* Data. */ -malloc_tsd_data(, prof_tdata, prof_tdata_t *, NULL) - bool opt_prof = false; bool opt_prof_active = true; +bool opt_prof_thread_active_init = true; size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT; ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; bool opt_prof_gdump = false; -bool opt_prof_final = true; +bool opt_prof_final = false; bool opt_prof_leak = false; bool opt_prof_accum = false; char opt_prof_prefix[ @@ -31,25 +45,66 @@ char opt_prof_prefix[ #endif 1]; +/* + * Initialized as opt_prof_active, and accessed via + * prof_active_[gs]et{_unlocked,}(). + */ +bool prof_active; +static malloc_mutex_t prof_active_mtx; + +/* + * Initialized as opt_prof_thread_active_init, and accessed via + * prof_thread_active_init_[gs]et(). + */ +static bool prof_thread_active_init; +static malloc_mutex_t prof_thread_active_init_mtx; + +/* + * Initialized as opt_prof_gdump, and accessed via + * prof_gdump_[gs]et{_unlocked,}(). + */ +bool prof_gdump_val; +static malloc_mutex_t prof_gdump_mtx; + uint64_t prof_interval = 0; -bool prof_promote; + +size_t lg_prof_sample; /* - * Table of mutexes that are shared among ctx's. These are leaf locks, so - * there is no problem with using them for more than one ctx at the same time. - * The primary motivation for this sharing though is that ctx's are ephemeral, + * Table of mutexes that are shared among gctx's. These are leaf locks, so + * there is no problem with using them for more than one gctx at the same time. + * The primary motivation for this sharing though is that gctx's are ephemeral, * and destroying mutexes causes complications for systems that allocate when * creating/destroying mutexes. */ -static malloc_mutex_t *ctx_locks; -static unsigned cum_ctxs; /* Atomic counter. */ +static malloc_mutex_t *gctx_locks; +static atomic_u_t cum_gctxs; /* Atomic counter. */ /* - * Global hash of (prof_bt_t *)-->(prof_ctx_t *). This is the master data + * Table of mutexes that are shared among tdata's. No operations require + * holding multiple tdata locks, so there is no problem with using them for more + * than one tdata at the same time, even though a gctx lock may be acquired + * while holding a tdata lock. + */ +static malloc_mutex_t *tdata_locks; + +/* + * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data * structure that knows about all backtraces currently captured. */ -static ckh_t bt2ctx; -static malloc_mutex_t bt2ctx_mtx; +static ckh_t bt2gctx; +/* Non static to enable profiling. */ +malloc_mutex_t bt2gctx_mtx; + +/* + * Tree of all extant prof_tdata_t structures, regardless of state, + * {attached,detached,expired}. + */ +static prof_tdata_tree_t tdatas; +static malloc_mutex_t tdatas_mtx; + +static uint64_t next_thr_uid; +static malloc_mutex_t next_thr_uid_mtx; static malloc_mutex_t prof_dump_seq_mtx; static uint64_t prof_dump_seq; @@ -70,161 +125,242 @@ static char prof_dump_buf[ 1 #endif ]; -static unsigned prof_dump_buf_end; +static size_t prof_dump_buf_end; static int prof_dump_fd; /* Do not dump any profiles until bootstrapping is complete. */ static bool prof_booted = false; /******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ -void -bt_init(prof_bt_t *bt, void **vec) -{ +static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx); +static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx); +static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, + bool even_if_attached); +static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, + bool even_if_attached); +static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name); - cassert(config_prof); +/******************************************************************************/ +/* Red-black trees. */ - bt->vec = vec; - bt->len = 0; +static int +prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) { + uint64_t a_thr_uid = a->thr_uid; + uint64_t b_thr_uid = b->thr_uid; + int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid); + if (ret == 0) { + uint64_t a_thr_discrim = a->thr_discrim; + uint64_t b_thr_discrim = b->thr_discrim; + ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim < + b_thr_discrim); + if (ret == 0) { + uint64_t a_tctx_uid = a->tctx_uid; + uint64_t b_tctx_uid = b->tctx_uid; + ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid < + b_tctx_uid); + } + } + return ret; } -static void -bt_destroy(prof_bt_t *bt) -{ +rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t, + tctx_link, prof_tctx_comp) - cassert(config_prof); +static int +prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) { + unsigned a_len = a->bt.len; + unsigned b_len = b->bt.len; + unsigned comp_len = (a_len < b_len) ? a_len : b_len; + int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *)); + if (ret == 0) { + ret = (a_len > b_len) - (a_len < b_len); + } + return ret; +} + +rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link, + prof_gctx_comp) + +static int +prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) { + int ret; + uint64_t a_uid = a->thr_uid; + uint64_t b_uid = b->thr_uid; + + ret = ((a_uid > b_uid) - (a_uid < b_uid)); + if (ret == 0) { + uint64_t a_discrim = a->thr_discrim; + uint64_t b_discrim = b->thr_discrim; - idalloc(bt); + ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim)); + } + return ret; } -static prof_bt_t * -bt_dup(prof_bt_t *bt) -{ - prof_bt_t *ret; +rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, + prof_tdata_comp) + +/******************************************************************************/ + +void +prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) { + prof_tdata_t *tdata; cassert(config_prof); - /* - * Create a single allocation that has space for vec immediately - * following the prof_bt_t structure. The backtraces that get - * stored in the backtrace caches are copied from stack-allocated - * temporary variables, so size is known at creation time. Making this - * a contiguous object improves cache locality. - */ - ret = (prof_bt_t *)imalloc(QUANTUM_CEILING(sizeof(prof_bt_t)) + - (bt->len * sizeof(void *))); - if (ret == NULL) - return (NULL); - ret->vec = (void **)((uintptr_t)ret + - QUANTUM_CEILING(sizeof(prof_bt_t))); - memcpy(ret->vec, bt->vec, bt->len * sizeof(void *)); - ret->len = bt->len; + if (updated) { + /* + * Compute a new sample threshold. This isn't very important in + * practice, because this function is rarely executed, so the + * potential for sample bias is minimal except in contrived + * programs. + */ + tdata = prof_tdata_get(tsd, true); + if (tdata != NULL) { + prof_sample_threshold_update(tdata); + } + } - return (ret); + if ((uintptr_t)tctx > (uintptr_t)1U) { + malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); + tctx->prepared = false; + if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) { + prof_tctx_destroy(tsd, tctx); + } else { + malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); + } + } } -static inline void -prof_enter(prof_tdata_t *prof_tdata) -{ +void +prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx) { + prof_tctx_set(tsdn, ptr, usize, NULL, tctx); - cassert(config_prof); + malloc_mutex_lock(tsdn, tctx->tdata->lock); + tctx->cnts.curobjs++; + tctx->cnts.curbytes += usize; + if (opt_prof_accum) { + tctx->cnts.accumobjs++; + tctx->cnts.accumbytes += usize; + } + tctx->prepared = false; + malloc_mutex_unlock(tsdn, tctx->tdata->lock); +} + +void +prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) { + malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); + assert(tctx->cnts.curobjs > 0); + assert(tctx->cnts.curbytes >= usize); + tctx->cnts.curobjs--; + tctx->cnts.curbytes -= usize; + + if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) { + prof_tctx_destroy(tsd, tctx); + } else { + malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); + } +} - assert(prof_tdata->enq == false); - prof_tdata->enq = true; +void +bt_init(prof_bt_t *bt, void **vec) { + cassert(config_prof); - malloc_mutex_lock(&bt2ctx_mtx); + bt->vec = vec; + bt->len = 0; } -static inline void -prof_leave(prof_tdata_t *prof_tdata) -{ - bool idump, gdump; +static void +prof_enter(tsd_t *tsd, prof_tdata_t *tdata) { + cassert(config_prof); + assert(tdata == prof_tdata_get(tsd, false)); + + if (tdata != NULL) { + assert(!tdata->enq); + tdata->enq = true; + } + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); +} + +static void +prof_leave(tsd_t *tsd, prof_tdata_t *tdata) { cassert(config_prof); + assert(tdata == prof_tdata_get(tsd, false)); - malloc_mutex_unlock(&bt2ctx_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); - assert(prof_tdata->enq); - prof_tdata->enq = false; - idump = prof_tdata->enq_idump; - prof_tdata->enq_idump = false; - gdump = prof_tdata->enq_gdump; - prof_tdata->enq_gdump = false; + if (tdata != NULL) { + bool idump, gdump; - if (idump) - prof_idump(); - if (gdump) - prof_gdump(); + assert(tdata->enq); + tdata->enq = false; + idump = tdata->enq_idump; + tdata->enq_idump = false; + gdump = tdata->enq_gdump; + tdata->enq_gdump = false; + + if (idump) { + prof_idump(tsd_tsdn(tsd)); + } + if (gdump) { + prof_gdump(tsd_tsdn(tsd)); + } + } } #ifdef JEMALLOC_PROF_LIBUNWIND void -prof_backtrace(prof_bt_t *bt, unsigned nignore) -{ - unw_context_t uc; - unw_cursor_t cursor; - unsigned i; - int err; +prof_backtrace(prof_bt_t *bt) { + int nframes; cassert(config_prof); assert(bt->len == 0); assert(bt->vec != NULL); - unw_getcontext(&uc); - unw_init_local(&cursor, &uc); - - /* Throw away (nignore+1) stack frames, if that many exist. */ - for (i = 0; i < nignore + 1; i++) { - err = unw_step(&cursor); - if (err <= 0) - return; - } - - /* - * Iterate over stack frames until there are no more, or until no space - * remains in bt. - */ - for (i = 0; i < PROF_BT_MAX; i++) { - unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]); - bt->len++; - err = unw_step(&cursor); - if (err <= 0) - break; + nframes = unw_backtrace(bt->vec, PROF_BT_MAX); + if (nframes <= 0) { + return; } + bt->len = nframes; } #elif (defined(JEMALLOC_PROF_LIBGCC)) static _Unwind_Reason_Code -prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) -{ - +prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) { cassert(config_prof); - return (_URC_NO_REASON); + return _URC_NO_REASON; } static _Unwind_Reason_Code -prof_unwind_callback(struct _Unwind_Context *context, void *arg) -{ +prof_unwind_callback(struct _Unwind_Context *context, void *arg) { prof_unwind_data_t *data = (prof_unwind_data_t *)arg; + void *ip; cassert(config_prof); - if (data->nignore > 0) - data->nignore--; - else { - data->bt->vec[data->bt->len] = (void *)_Unwind_GetIP(context); - data->bt->len++; - if (data->bt->len == data->max) - return (_URC_END_OF_STACK); + ip = (void *)_Unwind_GetIP(context); + if (ip == NULL) { + return _URC_END_OF_STACK; + } + data->bt->vec[data->bt->len] = ip; + data->bt->len++; + if (data->bt->len == data->max) { + return _URC_END_OF_STACK; } - return (_URC_NO_REASON); + return _URC_NO_REASON; } void -prof_backtrace(prof_bt_t *bt, unsigned nignore) -{ - prof_unwind_data_t data = {bt, nignore, PROF_BT_MAX}; +prof_backtrace(prof_bt_t *bt) { + prof_unwind_data_t data = {bt, PROF_BT_MAX}; cassert(config_prof); @@ -232,25 +368,24 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore) } #elif (defined(JEMALLOC_PROF_GCC)) void -prof_backtrace(prof_bt_t *bt, unsigned nignore) -{ -#define BT_FRAME(i) \ - if ((i) < nignore + PROF_BT_MAX) { \ +prof_backtrace(prof_bt_t *bt) { +#define BT_FRAME(i) \ + if ((i) < PROF_BT_MAX) { \ void *p; \ - if (__builtin_frame_address(i) == 0) \ + if (__builtin_frame_address(i) == 0) { \ return; \ + } \ p = __builtin_return_address(i); \ - if (p == NULL) \ + if (p == NULL) { \ return; \ - if (i >= nignore) { \ - bt->vec[(i) - nignore] = p; \ - bt->len = (i) - nignore + 1; \ } \ - } else \ - return; + bt->vec[(i)] = p; \ + bt->len = (i) + 1; \ + } else { \ + return; \ + } cassert(config_prof); - assert(nignore <= 3); BT_FRAME(0) BT_FRAME(1) @@ -392,307 +527,452 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore) BT_FRAME(125) BT_FRAME(126) BT_FRAME(127) - - /* Extras to compensate for nignore. */ - BT_FRAME(128) - BT_FRAME(129) - BT_FRAME(130) #undef BT_FRAME } #else void -prof_backtrace(prof_bt_t *bt, unsigned nignore) -{ - +prof_backtrace(prof_bt_t *bt) { cassert(config_prof); not_reached(); } #endif static malloc_mutex_t * -prof_ctx_mutex_choose(void) -{ - unsigned nctxs = atomic_add_u(&cum_ctxs, 1); +prof_gctx_mutex_choose(void) { + unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED); - return (&ctx_locks[(nctxs - 1) % PROF_NCTX_LOCKS]); + return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]; } -static void -prof_ctx_init(prof_ctx_t *ctx, prof_bt_t *bt) -{ +static malloc_mutex_t * +prof_tdata_mutex_choose(uint64_t thr_uid) { + return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS]; +} - ctx->bt = bt; - ctx->lock = prof_ctx_mutex_choose(); +static prof_gctx_t * +prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { + /* + * Create a single allocation that has space for vec of length bt->len. + */ + size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *)); + prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size, + sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), + true); + if (gctx == NULL) { + return NULL; + } + gctx->lock = prof_gctx_mutex_choose(); /* * Set nlimbo to 1, in order to avoid a race condition with - * prof_ctx_merge()/prof_ctx_destroy(). + * prof_tctx_destroy()/prof_gctx_try_destroy(). */ - ctx->nlimbo = 1; - ql_elm_new(ctx, dump_link); - memset(&ctx->cnt_merged, 0, sizeof(prof_cnt_t)); - ql_new(&ctx->cnts_ql); + gctx->nlimbo = 1; + tctx_tree_new(&gctx->tctxs); + /* Duplicate bt. */ + memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *)); + gctx->bt.vec = gctx->vec; + gctx->bt.len = bt->len; + return gctx; } static void -prof_ctx_destroy(prof_ctx_t *ctx) -{ - prof_tdata_t *prof_tdata; - +prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, + prof_tdata_t *tdata) { cassert(config_prof); /* - * Check that ctx is still unused by any thread cache before destroying - * it. prof_lookup() increments ctx->nlimbo in order to avoid a race - * condition with this function, as does prof_ctx_merge() in order to - * avoid a race between the main body of prof_ctx_merge() and entry + * Check that gctx is still unused by any thread cache before destroying + * it. prof_lookup() increments gctx->nlimbo in order to avoid a race + * condition with this function, as does prof_tctx_destroy() in order to + * avoid a race between the main body of prof_tctx_destroy() and entry * into this function. */ - prof_tdata = prof_tdata_get(false); - assert((uintptr_t)prof_tdata > (uintptr_t)PROF_TDATA_STATE_MAX); - prof_enter(prof_tdata); - malloc_mutex_lock(ctx->lock); - if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 0 && - ctx->nlimbo == 1) { - assert(ctx->cnt_merged.curbytes == 0); - assert(ctx->cnt_merged.accumobjs == 0); - assert(ctx->cnt_merged.accumbytes == 0); - /* Remove ctx from bt2ctx. */ - if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL)) + prof_enter(tsd, tdata_self); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + assert(gctx->nlimbo != 0); + if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { + /* Remove gctx from bt2gctx. */ + if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) { not_reached(); - prof_leave(prof_tdata); - /* Destroy ctx. */ - malloc_mutex_unlock(ctx->lock); - bt_destroy(ctx->bt); - idalloc(ctx); + } + prof_leave(tsd, tdata_self); + /* Destroy gctx. */ + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true); } else { /* - * Compensate for increment in prof_ctx_merge() or + * Compensate for increment in prof_tctx_destroy() or * prof_lookup(). */ - ctx->nlimbo--; - malloc_mutex_unlock(ctx->lock); - prof_leave(prof_tdata); + gctx->nlimbo--; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + prof_leave(tsd, tdata_self); } } -static void -prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt) -{ - bool destroy; +static bool +prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) { + malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); - cassert(config_prof); + if (opt_prof_accum) { + return false; + } + if (tctx->cnts.curobjs != 0) { + return false; + } + if (tctx->prepared) { + return false; + } + return true; +} + +static bool +prof_gctx_should_destroy(prof_gctx_t *gctx) { + if (opt_prof_accum) { + return false; + } + if (!tctx_tree_empty(&gctx->tctxs)) { + return false; + } + if (gctx->nlimbo != 0) { + return false; + } + return true; +} - /* Merge cnt stats and detach from ctx. */ - malloc_mutex_lock(ctx->lock); - ctx->cnt_merged.curobjs += cnt->cnts.curobjs; - ctx->cnt_merged.curbytes += cnt->cnts.curbytes; - ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs; - ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes; - ql_remove(&ctx->cnts_ql, cnt, cnts_link); - if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL && - ctx->cnt_merged.curobjs == 0 && ctx->nlimbo == 0) { +static void +prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { + prof_tdata_t *tdata = tctx->tdata; + prof_gctx_t *gctx = tctx->gctx; + bool destroy_tdata, destroy_tctx, destroy_gctx; + + malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); + + assert(tctx->cnts.curobjs == 0); + assert(tctx->cnts.curbytes == 0); + assert(!opt_prof_accum); + assert(tctx->cnts.accumobjs == 0); + assert(tctx->cnts.accumbytes == 0); + + ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL); + destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + switch (tctx->state) { + case prof_tctx_state_nominal: + tctx_tree_remove(&gctx->tctxs, tctx); + destroy_tctx = true; + if (prof_gctx_should_destroy(gctx)) { + /* + * Increment gctx->nlimbo in order to keep another + * thread from winning the race to destroy gctx while + * this one has gctx->lock dropped. Without this, it + * would be possible for another thread to: + * + * 1) Sample an allocation associated with gctx. + * 2) Deallocate the sampled object. + * 3) Successfully prof_gctx_try_destroy(gctx). + * + * The result would be that gctx no longer exists by the + * time this thread accesses it in + * prof_gctx_try_destroy(). + */ + gctx->nlimbo++; + destroy_gctx = true; + } else { + destroy_gctx = false; + } + break; + case prof_tctx_state_dumping: /* - * Increment ctx->nlimbo in order to keep another thread from - * winning the race to destroy ctx while this one has ctx->lock - * dropped. Without this, it would be possible for another - * thread to: - * - * 1) Sample an allocation associated with ctx. - * 2) Deallocate the sampled object. - * 3) Successfully prof_ctx_destroy(ctx). - * - * The result would be that ctx no longer exists by the time - * this thread accesses it in prof_ctx_destroy(). + * A dumping thread needs tctx to remain valid until dumping + * has finished. Change state such that the dumping thread will + * complete destruction during a late dump iteration phase. */ - ctx->nlimbo++; - destroy = true; - } else - destroy = false; - malloc_mutex_unlock(ctx->lock); - if (destroy) - prof_ctx_destroy(ctx); + tctx->state = prof_tctx_state_purgatory; + destroy_tctx = false; + destroy_gctx = false; + break; + default: + not_reached(); + destroy_tctx = false; + destroy_gctx = false; + } + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + if (destroy_gctx) { + prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx, + tdata); + } + + malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock); + + if (destroy_tdata) { + prof_tdata_destroy(tsd, tdata, false); + } + + if (destroy_tctx) { + idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true); + } } static bool -prof_lookup_global(prof_bt_t *bt, prof_tdata_t *prof_tdata, void **p_btkey, - prof_ctx_t **p_ctx, bool *p_new_ctx) -{ +prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, + void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) { union { - prof_ctx_t *p; + prof_gctx_t *p; void *v; - } ctx; + } gctx, tgctx; union { prof_bt_t *p; void *v; } btkey; - bool new_ctx; + bool new_gctx; - prof_enter(prof_tdata); - if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) { + prof_enter(tsd, tdata); + if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { /* bt has never been seen before. Insert it. */ - ctx.v = imalloc(sizeof(prof_ctx_t)); - if (ctx.v == NULL) { - prof_leave(prof_tdata); - return (true); - } - btkey.p = bt_dup(bt); - if (btkey.v == NULL) { - prof_leave(prof_tdata); - idalloc(ctx.v); - return (true); + prof_leave(tsd, tdata); + tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt); + if (tgctx.v == NULL) { + return true; } - prof_ctx_init(ctx.p, btkey.p); - if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) { - /* OOM. */ - prof_leave(prof_tdata); - idalloc(btkey.v); - idalloc(ctx.v); - return (true); + prof_enter(tsd, tdata); + if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { + gctx.p = tgctx.p; + btkey.p = &gctx.p->bt; + if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { + /* OOM. */ + prof_leave(tsd, tdata); + idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL, + true, true); + return true; + } + new_gctx = true; + } else { + new_gctx = false; } - new_ctx = true; } else { + tgctx.v = NULL; + new_gctx = false; + } + + if (!new_gctx) { /* * Increment nlimbo, in order to avoid a race condition with - * prof_ctx_merge()/prof_ctx_destroy(). + * prof_tctx_destroy()/prof_gctx_try_destroy(). */ - malloc_mutex_lock(ctx.p->lock); - ctx.p->nlimbo++; - malloc_mutex_unlock(ctx.p->lock); - new_ctx = false; + malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock); + gctx.p->nlimbo++; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock); + new_gctx = false; + + if (tgctx.v != NULL) { + /* Lost race to insert. */ + idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true, + true); + } } - prof_leave(prof_tdata); + prof_leave(tsd, tdata); *p_btkey = btkey.v; - *p_ctx = ctx.p; - *p_new_ctx = new_ctx; - return (false); + *p_gctx = gctx.p; + *p_new_gctx = new_gctx; + return false; } -prof_thr_cnt_t * -prof_lookup(prof_bt_t *bt) -{ +prof_tctx_t * +prof_lookup(tsd_t *tsd, prof_bt_t *bt) { union { - prof_thr_cnt_t *p; + prof_tctx_t *p; void *v; } ret; - prof_tdata_t *prof_tdata; + prof_tdata_t *tdata; + bool not_found; cassert(config_prof); - prof_tdata = prof_tdata_get(false); - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) - return (NULL); + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { + return NULL; + } - if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) { + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); + not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); + if (!not_found) { /* Note double negative! */ + ret.p->prepared = true; + } + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + if (not_found) { void *btkey; - prof_ctx_t *ctx; - bool new_ctx; + prof_gctx_t *gctx; + bool new_gctx, error; /* * This thread's cache lacks bt. Look for it in the global * cache. */ - if (prof_lookup_global(bt, prof_tdata, &btkey, &ctx, &new_ctx)) - return (NULL); + if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx, + &new_gctx)) { + return NULL; + } - /* Link a prof_thd_cnt_t into ctx for this thread. */ - if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) { - assert(ckh_count(&prof_tdata->bt2cnt) > 0); - /* - * Flush the least recently used cnt in order to keep - * bt2cnt from becoming too large. - */ - ret.p = ql_last(&prof_tdata->lru_ql, lru_link); - assert(ret.v != NULL); - if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt, - NULL, NULL)) - not_reached(); - ql_remove(&prof_tdata->lru_ql, ret.p, lru_link); - prof_ctx_merge(ret.p->ctx, ret.p); - /* ret can now be re-used. */ - } else { - assert(ckh_count(&prof_tdata->bt2cnt) < PROF_TCMAX); - /* Allocate and partially initialize a new cnt. */ - ret.v = imalloc(sizeof(prof_thr_cnt_t)); - if (ret.p == NULL) { - if (new_ctx) - prof_ctx_destroy(ctx); - return (NULL); + /* Link a prof_tctx_t into gctx for this thread. */ + ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t), + sz_size2index(sizeof(prof_tctx_t)), false, NULL, true, + arena_ichoose(tsd, NULL), true); + if (ret.p == NULL) { + if (new_gctx) { + prof_gctx_try_destroy(tsd, tdata, gctx, tdata); } - ql_elm_new(ret.p, cnts_link); - ql_elm_new(ret.p, lru_link); + return NULL; } - /* Finish initializing ret. */ - ret.p->ctx = ctx; - ret.p->epoch = 0; + ret.p->tdata = tdata; + ret.p->thr_uid = tdata->thr_uid; + ret.p->thr_discrim = tdata->thr_discrim; memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); - if (ckh_insert(&prof_tdata->bt2cnt, btkey, ret.v)) { - if (new_ctx) - prof_ctx_destroy(ctx); - idalloc(ret.v); - return (NULL); + ret.p->gctx = gctx; + ret.p->tctx_uid = tdata->tctx_uid_next++; + ret.p->prepared = true; + ret.p->state = prof_tctx_state_initializing; + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); + error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + if (error) { + if (new_gctx) { + prof_gctx_try_destroy(tsd, tdata, gctx, tdata); + } + idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true); + return NULL; } - ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link); - malloc_mutex_lock(ctx->lock); - ql_tail_insert(&ctx->cnts_ql, ret.p, cnts_link); - ctx->nlimbo--; - malloc_mutex_unlock(ctx->lock); - } else { - /* Move ret to the front of the LRU. */ - ql_remove(&prof_tdata->lru_ql, ret.p, lru_link); - ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + ret.p->state = prof_tctx_state_nominal; + tctx_tree_insert(&gctx->tctxs, ret.p); + gctx->nlimbo--; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); } - return (ret.p); + return ret.p; +} + +/* + * The bodies of this function and prof_leakcheck() are compiled out unless heap + * profiling is enabled, so that it is possible to compile jemalloc with + * floating point support completely disabled. Avoiding floating point code is + * important on memory-constrained systems, but it also enables a workaround for + * versions of glibc that don't properly save/restore floating point registers + * during dynamic lazy symbol loading (which internally calls into whatever + * malloc implementation happens to be integrated into the application). Note + * that some compilers (e.g. gcc 4.8) may use floating point registers for fast + * memory moves, so jemalloc must be compiled with such optimizations disabled + * (e.g. + * -mno-sse) in order for the workaround to be complete. + */ +void +prof_sample_threshold_update(prof_tdata_t *tdata) { +#ifdef JEMALLOC_PROF + uint64_t r; + double u; + + if (!config_prof) { + return; + } + + if (lg_prof_sample == 0) { + tdata->bytes_until_sample = 0; + return; + } + + /* + * Compute sample interval as a geometrically distributed random + * variable with mean (2^lg_prof_sample). + * + * __ __ + * | log(u) | 1 + * tdata->bytes_until_sample = | -------- |, where p = --------------- + * | log(1-p) | lg_prof_sample + * 2 + * + * For more information on the math, see: + * + * Non-Uniform Random Variate Generation + * Luc Devroye + * Springer-Verlag, New York, 1986 + * pp 500 + * (http://luc.devroye.org/rnbookindex.html) + */ + r = prng_lg_range_u64(&tdata->prng_state, 53); + u = (double)r * (1.0/9007199254740992.0L); + tdata->bytes_until_sample = (uint64_t)(log(u) / + log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample)))) + + (uint64_t)1U; +#endif } #ifdef JEMALLOC_JET +static prof_tdata_t * +prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *arg) { + size_t *tdata_count = (size_t *)arg; + + (*tdata_count)++; + + return NULL; +} + size_t -prof_bt_count(void) -{ +prof_tdata_count(void) { + size_t tdata_count = 0; + tsdn_t *tsdn; + + tsdn = tsdn_fetch(); + malloc_mutex_lock(tsdn, &tdatas_mtx); + tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter, + (void *)&tdata_count); + malloc_mutex_unlock(tsdn, &tdatas_mtx); + + return tdata_count; +} + +size_t +prof_bt_count(void) { size_t bt_count; - prof_tdata_t *prof_tdata; + tsd_t *tsd; + prof_tdata_t *tdata; - prof_tdata = prof_tdata_get(false); - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) - return (0); + tsd = tsd_fetch(); + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { + return 0; + } - prof_enter(prof_tdata); - bt_count = ckh_count(&bt2ctx); - prof_leave(prof_tdata); + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); + bt_count = ckh_count(&bt2gctx); + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); - return (bt_count); + return bt_count; } #endif -#ifdef JEMALLOC_JET -#undef prof_dump_open -#define prof_dump_open JEMALLOC_N(prof_dump_open_impl) -#endif static int -prof_dump_open(bool propagate_err, const char *filename) -{ +prof_dump_open_impl(bool propagate_err, const char *filename) { int fd; fd = creat(filename, 0644); - if (fd == -1 && propagate_err == false) { + if (fd == -1 && !propagate_err) { malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n", filename); - if (opt_abort) + if (opt_abort) { abort(); + } } - return (fd); + return fd; } -#ifdef JEMALLOC_JET -#undef prof_dump_open -#define prof_dump_open JEMALLOC_N(prof_dump_open) -prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl); -#endif +prof_dump_open_t *JET_MUTABLE prof_dump_open = prof_dump_open_impl; static bool -prof_dump_flush(bool propagate_err) -{ +prof_dump_flush(bool propagate_err) { bool ret = false; ssize_t err; @@ -700,22 +980,22 @@ prof_dump_flush(bool propagate_err) err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); if (err == -1) { - if (propagate_err == false) { + if (!propagate_err) { malloc_write("<jemalloc>: write() failed during heap " "profile flush\n"); - if (opt_abort) + if (opt_abort) { abort(); + } } ret = true; } prof_dump_buf_end = 0; - return (ret); + return ret; } static bool -prof_dump_close(bool propagate_err) -{ +prof_dump_close(bool propagate_err) { bool ret; assert(prof_dump_fd != -1); @@ -723,13 +1003,12 @@ prof_dump_close(bool propagate_err) close(prof_dump_fd); prof_dump_fd = -1; - return (ret); + return ret; } static bool -prof_dump_write(bool propagate_err, const char *s) -{ - unsigned i, slen, n; +prof_dump_write(bool propagate_err, const char *s) { + size_t i, slen, n; cassert(config_prof); @@ -737,9 +1016,11 @@ prof_dump_write(bool propagate_err, const char *s) slen = strlen(s); while (i < slen) { /* Flush the buffer if it is full. */ - if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) - if (prof_dump_flush(propagate_err) && propagate_err) - return (true); + if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { + if (prof_dump_flush(propagate_err) && propagate_err) { + return true; + } + } if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) { /* Finish writing. */ @@ -753,13 +1034,12 @@ prof_dump_write(bool propagate_err, const char *s) i += n; } - return (false); + return false; } -JEMALLOC_ATTR(format(printf, 2, 3)) +JEMALLOC_FORMAT_PRINTF(2, 3) static bool -prof_dump_printf(bool propagate_err, const char *format, ...) -{ +prof_dump_printf(bool propagate_err, const char *format, ...) { bool ret; va_list ap; char buf[PROF_PRINTF_BUFSIZE]; @@ -769,179 +1049,401 @@ prof_dump_printf(bool propagate_err, const char *format, ...) va_end(ap); ret = prof_dump_write(propagate_err, buf); - return (ret); + return ret; +} + +static void +prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) { + malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); + + malloc_mutex_lock(tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_initializing: + malloc_mutex_unlock(tsdn, tctx->gctx->lock); + return; + case prof_tctx_state_nominal: + tctx->state = prof_tctx_state_dumping; + malloc_mutex_unlock(tsdn, tctx->gctx->lock); + + memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t)); + + tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs; + tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes; + if (opt_prof_accum) { + tdata->cnt_summed.accumobjs += + tctx->dump_cnts.accumobjs; + tdata->cnt_summed.accumbytes += + tctx->dump_cnts.accumbytes; + } + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + not_reached(); + } } static void -prof_dump_ctx_prep(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx, - prof_ctx_list_t *ctx_ql) -{ - prof_thr_cnt_t *thr_cnt; - prof_cnt_t tcnt; +prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) { + malloc_mutex_assert_owner(tsdn, gctx->lock); + + gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs; + gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes; + if (opt_prof_accum) { + gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs; + gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes; + } +} + +static prof_tctx_t * +prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; + + malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_nominal: + /* New since dumping started; ignore. */ + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx); + break; + default: + not_reached(); + } + + return NULL; +} + +struct prof_tctx_dump_iter_arg_s { + tsdn_t *tsdn; + bool propagate_err; +}; + +static prof_tctx_t * +prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) { + struct prof_tctx_dump_iter_arg_s *arg = + (struct prof_tctx_dump_iter_arg_s *)opaque; + + malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_initializing: + case prof_tctx_state_nominal: + /* Not captured by this dump. */ + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + if (prof_dump_printf(arg->propagate_err, + " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": " + "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs, + tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs, + tctx->dump_cnts.accumbytes)) { + return tctx; + } + break; + default: + not_reached(); + } + return NULL; +} +static prof_tctx_t * +prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; + prof_tctx_t *ret; + + malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_nominal: + /* New since dumping started; ignore. */ + break; + case prof_tctx_state_dumping: + tctx->state = prof_tctx_state_nominal; + break; + case prof_tctx_state_purgatory: + ret = tctx; + goto label_return; + default: + not_reached(); + } + + ret = NULL; +label_return: + return ret; +} + +static void +prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) { cassert(config_prof); - malloc_mutex_lock(ctx->lock); + malloc_mutex_lock(tsdn, gctx->lock); /* - * Increment nlimbo so that ctx won't go away before dump. - * Additionally, link ctx into the dump list so that it is included in + * Increment nlimbo so that gctx won't go away before dump. + * Additionally, link gctx into the dump list so that it is included in * prof_dump()'s second pass. */ - ctx->nlimbo++; - ql_tail_insert(ctx_ql, ctx, dump_link); + gctx->nlimbo++; + gctx_tree_insert(gctxs, gctx); - memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t)); - ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) { - volatile unsigned *epoch = &thr_cnt->epoch; + memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t)); - while (true) { - unsigned epoch0 = *epoch; + malloc_mutex_unlock(tsdn, gctx->lock); +} - /* Make sure epoch is even. */ - if (epoch0 & 1U) - continue; +struct prof_gctx_merge_iter_arg_s { + tsdn_t *tsdn; + size_t leak_ngctx; +}; + +static prof_gctx_t * +prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { + struct prof_gctx_merge_iter_arg_s *arg = + (struct prof_gctx_merge_iter_arg_s *)opaque; + + malloc_mutex_lock(arg->tsdn, gctx->lock); + tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, + (void *)arg->tsdn); + if (gctx->cnt_summed.curobjs != 0) { + arg->leak_ngctx++; + } + malloc_mutex_unlock(arg->tsdn, gctx->lock); - memcpy(&tcnt, &thr_cnt->cnts, sizeof(prof_cnt_t)); + return NULL; +} - /* Terminate if epoch didn't change while reading. */ - if (*epoch == epoch0) - break; - } +static void +prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { + prof_tdata_t *tdata = prof_tdata_get(tsd, false); + prof_gctx_t *gctx; - ctx->cnt_summed.curobjs += tcnt.curobjs; - ctx->cnt_summed.curbytes += tcnt.curbytes; - if (opt_prof_accum) { - ctx->cnt_summed.accumobjs += tcnt.accumobjs; - ctx->cnt_summed.accumbytes += tcnt.accumbytes; + /* + * Standard tree iteration won't work here, because as soon as we + * decrement gctx->nlimbo and unlock gctx, another thread can + * concurrently destroy it, which will corrupt the tree. Therefore, + * tear down the tree one node at a time during iteration. + */ + while ((gctx = gctx_tree_first(gctxs)) != NULL) { + gctx_tree_remove(gctxs, gctx); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + { + prof_tctx_t *next; + + next = NULL; + do { + prof_tctx_t *to_destroy = + tctx_tree_iter(&gctx->tctxs, next, + prof_tctx_finish_iter, + (void *)tsd_tsdn(tsd)); + if (to_destroy != NULL) { + next = tctx_tree_next(&gctx->tctxs, + to_destroy); + tctx_tree_remove(&gctx->tctxs, + to_destroy); + idalloctm(tsd_tsdn(tsd), to_destroy, + NULL, NULL, true, true); + } else { + next = NULL; + } + } while (next != NULL); + } + gctx->nlimbo--; + if (prof_gctx_should_destroy(gctx)) { + gctx->nlimbo++; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + prof_gctx_try_destroy(tsd, tdata, gctx, tdata); + } else { + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); } } +} - if (ctx->cnt_summed.curobjs != 0) - (*leak_nctx)++; +struct prof_tdata_merge_iter_arg_s { + tsdn_t *tsdn; + prof_cnt_t cnt_all; +}; + +static prof_tdata_t * +prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *opaque) { + struct prof_tdata_merge_iter_arg_s *arg = + (struct prof_tdata_merge_iter_arg_s *)opaque; + + malloc_mutex_lock(arg->tsdn, tdata->lock); + if (!tdata->expired) { + size_t tabind; + union { + prof_tctx_t *p; + void *v; + } tctx; + + tdata->dumping = true; + memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); + for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, + &tctx.v);) { + prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata); + } - /* Add to cnt_all. */ - cnt_all->curobjs += ctx->cnt_summed.curobjs; - cnt_all->curbytes += ctx->cnt_summed.curbytes; - if (opt_prof_accum) { - cnt_all->accumobjs += ctx->cnt_summed.accumobjs; - cnt_all->accumbytes += ctx->cnt_summed.accumbytes; + arg->cnt_all.curobjs += tdata->cnt_summed.curobjs; + arg->cnt_all.curbytes += tdata->cnt_summed.curbytes; + if (opt_prof_accum) { + arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs; + arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes; + } + } else { + tdata->dumping = false; } + malloc_mutex_unlock(arg->tsdn, tdata->lock); - malloc_mutex_unlock(ctx->lock); + return NULL; } -static bool -prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all) -{ - - if (opt_lg_prof_sample == 0) { - if (prof_dump_printf(propagate_err, - "heap profile: %"PRId64": %"PRId64 - " [%"PRIu64": %"PRIu64"] @ heapprofile\n", - cnt_all->curobjs, cnt_all->curbytes, - cnt_all->accumobjs, cnt_all->accumbytes)) - return (true); - } else { - if (prof_dump_printf(propagate_err, - "heap profile: %"PRId64": %"PRId64 - " [%"PRIu64": %"PRIu64"] @ heap_v2/%"PRIu64"\n", - cnt_all->curobjs, cnt_all->curbytes, - cnt_all->accumobjs, cnt_all->accumbytes, - ((uint64_t)1U << opt_lg_prof_sample))) - return (true); +static prof_tdata_t * +prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *arg) { + bool propagate_err = *(bool *)arg; + + if (!tdata->dumping) { + return NULL; } - return (false); + if (prof_dump_printf(propagate_err, + " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n", + tdata->thr_uid, tdata->cnt_summed.curobjs, + tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs, + tdata->cnt_summed.accumbytes, + (tdata->thread_name != NULL) ? " " : "", + (tdata->thread_name != NULL) ? tdata->thread_name : "")) { + return tdata; + } + return NULL; } -static void -prof_dump_ctx_cleanup_locked(prof_ctx_t *ctx, prof_ctx_list_t *ctx_ql) -{ - - ctx->nlimbo--; - ql_remove(ctx_ql, ctx, dump_link); -} +static bool +prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err, + const prof_cnt_t *cnt_all) { + bool ret; -static void -prof_dump_ctx_cleanup(prof_ctx_t *ctx, prof_ctx_list_t *ctx_ql) -{ + if (prof_dump_printf(propagate_err, + "heap_v2/%"FMTu64"\n" + " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", + ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs, + cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) { + return true; + } - malloc_mutex_lock(ctx->lock); - prof_dump_ctx_cleanup_locked(ctx, ctx_ql); - malloc_mutex_unlock(ctx->lock); + malloc_mutex_lock(tsdn, &tdatas_mtx); + ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, + (void *)&propagate_err) != NULL); + malloc_mutex_unlock(tsdn, &tdatas_mtx); + return ret; } +prof_dump_header_t *JET_MUTABLE prof_dump_header = prof_dump_header_impl; static bool -prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, const prof_bt_t *bt, - prof_ctx_list_t *ctx_ql) -{ +prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, + const prof_bt_t *bt, prof_gctx_tree_t *gctxs) { bool ret; unsigned i; + struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg; cassert(config_prof); - - /* - * Current statistics can sum to 0 as a result of unmerged per thread - * statistics. Additionally, interval- and growth-triggered dumps can - * occur between the time a ctx is created and when its statistics are - * filled in. Avoid dumping any ctx that is an artifact of either - * implementation detail. - */ - malloc_mutex_lock(ctx->lock); - if ((opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) || - (opt_prof_accum && ctx->cnt_summed.accumobjs == 0)) { - assert(ctx->cnt_summed.curobjs == 0); - assert(ctx->cnt_summed.curbytes == 0); - assert(ctx->cnt_summed.accumobjs == 0); - assert(ctx->cnt_summed.accumbytes == 0); + malloc_mutex_assert_owner(tsdn, gctx->lock); + + /* Avoid dumping such gctx's that have no useful data. */ + if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || + (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) { + assert(gctx->cnt_summed.curobjs == 0); + assert(gctx->cnt_summed.curbytes == 0); + assert(gctx->cnt_summed.accumobjs == 0); + assert(gctx->cnt_summed.accumbytes == 0); ret = false; goto label_return; } - if (prof_dump_printf(propagate_err, "%"PRId64": %"PRId64 - " [%"PRIu64": %"PRIu64"] @", - ctx->cnt_summed.curobjs, ctx->cnt_summed.curbytes, - ctx->cnt_summed.accumobjs, ctx->cnt_summed.accumbytes)) { + if (prof_dump_printf(propagate_err, "@")) { ret = true; goto label_return; } - for (i = 0; i < bt->len; i++) { - if (prof_dump_printf(propagate_err, " %#"PRIxPTR, + if (prof_dump_printf(propagate_err, " %#"FMTxPTR, (uintptr_t)bt->vec[i])) { ret = true; goto label_return; } } - if (prof_dump_write(propagate_err, "\n")) { + if (prof_dump_printf(propagate_err, + "\n" + " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", + gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes, + gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) { + ret = true; + goto label_return; + } + + prof_tctx_dump_iter_arg.tsdn = tsdn; + prof_tctx_dump_iter_arg.propagate_err = propagate_err; + if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, + (void *)&prof_tctx_dump_iter_arg) != NULL) { ret = true; goto label_return; } ret = false; label_return: - prof_dump_ctx_cleanup_locked(ctx, ctx_ql); - malloc_mutex_unlock(ctx->lock); - return (ret); + return ret; +} + +#ifndef _WIN32 +JEMALLOC_FORMAT_PRINTF(1, 2) +static int +prof_open_maps(const char *format, ...) { + int mfd; + va_list ap; + char filename[PATH_MAX + 1]; + + va_start(ap, format); + malloc_vsnprintf(filename, sizeof(filename), format, ap); + va_end(ap); + mfd = open(filename, O_RDONLY | O_CLOEXEC); + + return mfd; +} +#endif + +static int +prof_getpid(void) { +#ifdef _WIN32 + return GetCurrentProcessId(); +#else + return getpid(); +#endif } static bool -prof_dump_maps(bool propagate_err) -{ +prof_dump_maps(bool propagate_err) { bool ret; int mfd; - char filename[PATH_MAX + 1]; cassert(config_prof); #ifdef __FreeBSD__ - malloc_snprintf(filename, sizeof(filename), "/proc/curproc/map"); + mfd = prof_open_maps("/proc/curproc/map"); +#elif defined(_WIN32) + mfd = -1; // Not implemented #else - malloc_snprintf(filename, sizeof(filename), "/proc/%d/maps", - (int)getpid()); + { + int pid = prof_getpid(); + + mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid); + if (mfd == -1) { + mfd = prof_open_maps("/proc/%d/maps", pid); + } + } #endif - mfd = open(filename, O_RDONLY); if (mfd != -1) { ssize_t nread; @@ -971,214 +1473,391 @@ prof_dump_maps(bool propagate_err) ret = false; label_return: - if (mfd != -1) + if (mfd != -1) { close(mfd); - return (ret); + } + return ret; } +/* + * See prof_sample_threshold_update() comment for why the body of this function + * is conditionally compiled. + */ static void -prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_nctx, - const char *filename) -{ - +prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, + const char *filename) { +#ifdef JEMALLOC_PROF + /* + * Scaling is equivalent AdjustSamples() in jeprof, but the result may + * differ slightly from what jeprof reports, because here we scale the + * summary values, whereas jeprof scales each context individually and + * reports the sums of the scaled values. + */ if (cnt_all->curbytes != 0) { - malloc_printf("<jemalloc>: Leak summary: %"PRId64" byte%s, %" - PRId64" object%s, %zu context%s\n", - cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "", - cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "", - leak_nctx, (leak_nctx != 1) ? "s" : ""); + double sample_period = (double)((uint64_t)1 << lg_prof_sample); + double ratio = (((double)cnt_all->curbytes) / + (double)cnt_all->curobjs) / sample_period; + double scale_factor = 1.0 / (1.0 - exp(-ratio)); + uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes) + * scale_factor); + uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) * + scale_factor); + + malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64 + " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n", + curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs != + 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : ""); malloc_printf( - "<jemalloc>: Run pprof on \"%s\" for leak detail\n", + "<jemalloc>: Run jeprof on \"%s\" for leak detail\n", filename); } +#endif } -static bool -prof_dump(bool propagate_err, const char *filename, bool leakcheck) -{ - prof_tdata_t *prof_tdata; - prof_cnt_t cnt_all; +struct prof_gctx_dump_iter_arg_s { + tsdn_t *tsdn; + bool propagate_err; +}; + +static prof_gctx_t * +prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { + prof_gctx_t *ret; + struct prof_gctx_dump_iter_arg_s *arg = + (struct prof_gctx_dump_iter_arg_s *)opaque; + + malloc_mutex_lock(arg->tsdn, gctx->lock); + + if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt, + gctxs)) { + ret = gctx; + goto label_return; + } + + ret = NULL; +label_return: + malloc_mutex_unlock(arg->tsdn, gctx->lock); + return ret; +} + +static void +prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, + struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, + struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, + prof_gctx_tree_t *gctxs) { size_t tabind; union { - prof_ctx_t *p; + prof_gctx_t *p; void *v; - } ctx; - size_t leak_nctx; - prof_ctx_list_t ctx_ql; - - cassert(config_prof); + } gctx; - prof_tdata = prof_tdata_get(false); - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) - return (true); + prof_enter(tsd, tdata); - malloc_mutex_lock(&prof_dump_mtx); + /* + * Put gctx's in limbo and clear their counters in preparation for + * summing. + */ + gctx_tree_new(gctxs); + for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) { + prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs); + } - /* Merge per thread profile stats, and sum them in cnt_all. */ - memset(&cnt_all, 0, sizeof(prof_cnt_t)); - leak_nctx = 0; - ql_new(&ctx_ql); - prof_enter(prof_tdata); - for (tabind = 0; ckh_iter(&bt2ctx, &tabind, NULL, &ctx.v) == false;) - prof_dump_ctx_prep(ctx.p, &cnt_all, &leak_nctx, &ctx_ql); - prof_leave(prof_tdata); + /* + * Iterate over tdatas, and for the non-expired ones snapshot their tctx + * stats and merge them into the associated gctx's. + */ + prof_tdata_merge_iter_arg->tsdn = tsd_tsdn(tsd); + memset(&prof_tdata_merge_iter_arg->cnt_all, 0, sizeof(prof_cnt_t)); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, + (void *)prof_tdata_merge_iter_arg); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + + /* Merge tctx stats into gctx's. */ + prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd); + prof_gctx_merge_iter_arg->leak_ngctx = 0; + gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter, + (void *)prof_gctx_merge_iter_arg); + + prof_leave(tsd, tdata); +} +static bool +prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename, + bool leakcheck, prof_tdata_t *tdata, + struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, + struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, + struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg, + prof_gctx_tree_t *gctxs) { /* Create dump file. */ - if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) - goto label_open_close_error; + if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) { + return true; + } /* Dump profile header. */ - if (prof_dump_header(propagate_err, &cnt_all)) + if (prof_dump_header(tsd_tsdn(tsd), propagate_err, + &prof_tdata_merge_iter_arg->cnt_all)) { goto label_write_error; + } - /* Dump per ctx profile stats. */ - while ((ctx.p = ql_first(&ctx_ql)) != NULL) { - if (prof_dump_ctx(propagate_err, ctx.p, ctx.p->bt, &ctx_ql)) - goto label_write_error; + /* Dump per gctx profile stats. */ + prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd); + prof_gctx_dump_iter_arg->propagate_err = propagate_err; + if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter, + (void *)prof_gctx_dump_iter_arg) != NULL) { + goto label_write_error; } /* Dump /proc/<pid>/maps if possible. */ - if (prof_dump_maps(propagate_err)) + if (prof_dump_maps(propagate_err)) { goto label_write_error; + } - if (prof_dump_close(propagate_err)) - goto label_open_close_error; - - malloc_mutex_unlock(&prof_dump_mtx); - - if (leakcheck) - prof_leakcheck(&cnt_all, leak_nctx, filename); + if (prof_dump_close(propagate_err)) { + return true; + } - return (false); + return false; label_write_error: prof_dump_close(propagate_err); -label_open_close_error: - while ((ctx.p = ql_first(&ctx_ql)) != NULL) - prof_dump_ctx_cleanup(ctx.p, &ctx_ql); - malloc_mutex_unlock(&prof_dump_mtx); - return (true); + return true; } -#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) -#define VSEQ_INVALID UINT64_C(0xffffffffffffffff) -static void -prof_dump_filename(char *filename, char v, int64_t vseq) -{ +static bool +prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, + bool leakcheck) { + cassert(config_prof); + assert(tsd_reentrancy_level_get(tsd) == 0); + + prof_tdata_t * tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) { + return true; + } + + pre_reentrancy(tsd, NULL); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); + + prof_gctx_tree_t gctxs; + struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; + struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; + struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg; + prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg, + &prof_gctx_merge_iter_arg, &gctxs); + bool err = prof_dump_file(tsd, propagate_err, filename, leakcheck, tdata, + &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg, + &prof_gctx_dump_iter_arg, &gctxs); + prof_gctx_finish(tsd, &gctxs); + + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); + post_reentrancy(tsd); + + if (err) { + return true; + } + + if (leakcheck) { + prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all, + prof_gctx_merge_iter_arg.leak_ngctx, filename); + } + return false; +} + +#ifdef JEMALLOC_JET +void +prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, + uint64_t *accumbytes) { + tsd_t *tsd; + prof_tdata_t *tdata; + struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; + struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; + prof_gctx_tree_t gctxs; + + tsd = tsd_fetch(); + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { + if (curobjs != NULL) { + *curobjs = 0; + } + if (curbytes != NULL) { + *curbytes = 0; + } + if (accumobjs != NULL) { + *accumobjs = 0; + } + if (accumbytes != NULL) { + *accumbytes = 0; + } + return; + } + + prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg, + &prof_gctx_merge_iter_arg, &gctxs); + prof_gctx_finish(tsd, &gctxs); + + if (curobjs != NULL) { + *curobjs = prof_tdata_merge_iter_arg.cnt_all.curobjs; + } + if (curbytes != NULL) { + *curbytes = prof_tdata_merge_iter_arg.cnt_all.curbytes; + } + if (accumobjs != NULL) { + *accumobjs = prof_tdata_merge_iter_arg.cnt_all.accumobjs; + } + if (accumbytes != NULL) { + *accumbytes = prof_tdata_merge_iter_arg.cnt_all.accumbytes; + } +} +#endif +#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) +#define VSEQ_INVALID UINT64_C(0xffffffffffffffff) +static void +prof_dump_filename(char *filename, char v, uint64_t vseq) { cassert(config_prof); if (vseq != VSEQ_INVALID) { /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, - "%s.%d.%"PRIu64".%c%"PRId64".heap", - opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq); + "%s.%d.%"FMTu64".%c%"FMTu64".heap", + opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq); } else { /* "<prefix>.<pid>.<seq>.<v>.heap" */ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, - "%s.%d.%"PRIu64".%c.heap", - opt_prof_prefix, (int)getpid(), prof_dump_seq, v); + "%s.%d.%"FMTu64".%c.heap", + opt_prof_prefix, prof_getpid(), prof_dump_seq, v); } prof_dump_seq++; } static void -prof_fdump(void) -{ +prof_fdump(void) { + tsd_t *tsd; char filename[DUMP_FILENAME_BUFSIZE]; cassert(config_prof); + assert(opt_prof_final); + assert(opt_prof_prefix[0] != '\0'); - if (prof_booted == false) + if (!prof_booted) { return; + } + tsd = tsd_fetch(); + assert(tsd_reentrancy_level_get(tsd) == 0); + + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); + prof_dump_filename(filename, 'f', VSEQ_INVALID); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); + prof_dump(tsd, false, filename, opt_prof_leak); +} - if (opt_prof_final && opt_prof_prefix[0] != '\0') { - malloc_mutex_lock(&prof_dump_seq_mtx); - prof_dump_filename(filename, 'f', VSEQ_INVALID); - malloc_mutex_unlock(&prof_dump_seq_mtx); - prof_dump(false, filename, opt_prof_leak); +bool +prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) { + cassert(config_prof); + +#ifndef JEMALLOC_ATOMIC_U64 + if (malloc_mutex_init(&prof_accum->mtx, "prof_accum", + WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) { + return true; } + prof_accum->accumbytes = 0; +#else + atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED); +#endif + return false; } void -prof_idump(void) -{ - prof_tdata_t *prof_tdata; - char filename[PATH_MAX + 1]; +prof_idump(tsdn_t *tsdn) { + tsd_t *tsd; + prof_tdata_t *tdata; cassert(config_prof); - if (prof_booted == false) + if (!prof_booted || tsdn_null(tsdn)) { + return; + } + tsd = tsdn_tsd(tsdn); + if (tsd_reentrancy_level_get(tsd) > 0) { return; - prof_tdata = prof_tdata_get(false); - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) + } + + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { return; - if (prof_tdata->enq) { - prof_tdata->enq_idump = true; + } + if (tdata->enq) { + tdata->enq_idump = true; return; } if (opt_prof_prefix[0] != '\0') { - malloc_mutex_lock(&prof_dump_seq_mtx); + char filename[PATH_MAX + 1]; + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename, 'i', prof_dump_iseq); prof_dump_iseq++; - malloc_mutex_unlock(&prof_dump_seq_mtx); - prof_dump(false, filename, false); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); + prof_dump(tsd, false, filename, false); } } bool -prof_mdump(const char *filename) -{ - char filename_buf[DUMP_FILENAME_BUFSIZE]; - +prof_mdump(tsd_t *tsd, const char *filename) { cassert(config_prof); + assert(tsd_reentrancy_level_get(tsd) == 0); - if (opt_prof == false || prof_booted == false) - return (true); - + if (!opt_prof || !prof_booted) { + return true; + } + char filename_buf[DUMP_FILENAME_BUFSIZE]; if (filename == NULL) { /* No filename specified, so automatically generate one. */ - if (opt_prof_prefix[0] == '\0') - return (true); - malloc_mutex_lock(&prof_dump_seq_mtx); + if (opt_prof_prefix[0] == '\0') { + return true; + } + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename_buf, 'm', prof_dump_mseq); prof_dump_mseq++; - malloc_mutex_unlock(&prof_dump_seq_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); filename = filename_buf; } - return (prof_dump(true, filename, false)); + return prof_dump(tsd, true, filename, false); } void -prof_gdump(void) -{ - prof_tdata_t *prof_tdata; - char filename[DUMP_FILENAME_BUFSIZE]; +prof_gdump(tsdn_t *tsdn) { + tsd_t *tsd; + prof_tdata_t *tdata; cassert(config_prof); - if (prof_booted == false) + if (!prof_booted || tsdn_null(tsdn)) { + return; + } + tsd = tsdn_tsd(tsdn); + if (tsd_reentrancy_level_get(tsd) > 0) { return; - prof_tdata = prof_tdata_get(false); - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) + } + + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { return; - if (prof_tdata->enq) { - prof_tdata->enq_gdump = true; + } + if (tdata->enq) { + tdata->enq_gdump = true; return; } if (opt_prof_prefix[0] != '\0') { - malloc_mutex_lock(&prof_dump_seq_mtx); + char filename[DUMP_FILENAME_BUFSIZE]; + malloc_mutex_lock(tsdn, &prof_dump_seq_mtx); prof_dump_filename(filename, 'u', prof_dump_useq); prof_dump_useq++; - malloc_mutex_unlock(&prof_dump_seq_mtx); - prof_dump(false, filename, false); + malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx); + prof_dump(tsd, false, filename, false); } } static void -prof_bt_hash(const void *key, size_t r_hash[2]) -{ +prof_bt_hash(const void *key, size_t r_hash[2]) { prof_bt_t *bt = (prof_bt_t *)key; cassert(config_prof); @@ -1187,106 +1866,389 @@ prof_bt_hash(const void *key, size_t r_hash[2]) } static bool -prof_bt_keycomp(const void *k1, const void *k2) -{ +prof_bt_keycomp(const void *k1, const void *k2) { const prof_bt_t *bt1 = (prof_bt_t *)k1; const prof_bt_t *bt2 = (prof_bt_t *)k2; cassert(config_prof); - if (bt1->len != bt2->len) - return (false); + if (bt1->len != bt2->len) { + return false; + } return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); } -prof_tdata_t * -prof_tdata_init(void) -{ - prof_tdata_t *prof_tdata; +static uint64_t +prof_thr_uid_alloc(tsdn_t *tsdn) { + uint64_t thr_uid; + + malloc_mutex_lock(tsdn, &next_thr_uid_mtx); + thr_uid = next_thr_uid; + next_thr_uid++; + malloc_mutex_unlock(tsdn, &next_thr_uid_mtx); + + return thr_uid; +} + +static prof_tdata_t * +prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, + char *thread_name, bool active) { + prof_tdata_t *tdata; cassert(config_prof); /* Initialize an empty cache for this thread. */ - prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t)); - if (prof_tdata == NULL) - return (NULL); + tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t), + sz_size2index(sizeof(prof_tdata_t)), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); + if (tdata == NULL) { + return NULL; + } - if (ckh_new(&prof_tdata->bt2cnt, PROF_CKH_MINITEMS, - prof_bt_hash, prof_bt_keycomp)) { - idalloc(prof_tdata); - return (NULL); + tdata->lock = prof_tdata_mutex_choose(thr_uid); + tdata->thr_uid = thr_uid; + tdata->thr_discrim = thr_discrim; + tdata->thread_name = thread_name; + tdata->attached = true; + tdata->expired = false; + tdata->tctx_uid_next = 0; + + if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash, + prof_bt_keycomp)) { + idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); + return NULL; } - ql_new(&prof_tdata->lru_ql); - prof_tdata->vec = imalloc(sizeof(void *) * PROF_BT_MAX); - if (prof_tdata->vec == NULL) { - ckh_delete(&prof_tdata->bt2cnt); - idalloc(prof_tdata); - return (NULL); + tdata->prng_state = (uint64_t)(uintptr_t)tdata; + prof_sample_threshold_update(tdata); + + tdata->enq = false; + tdata->enq_idump = false; + tdata->enq_gdump = false; + + tdata->dumping = false; + tdata->active = active; + + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + tdata_tree_insert(&tdatas, tdata); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + + return tdata; +} + +prof_tdata_t * +prof_tdata_init(tsd_t *tsd) { + return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0, + NULL, prof_thread_active_init_get(tsd_tsdn(tsd))); +} + +static bool +prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) { + if (tdata->attached && !even_if_attached) { + return false; } + if (ckh_count(&tdata->bt2tctx) != 0) { + return false; + } + return true; +} + +static bool +prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, + bool even_if_attached) { + malloc_mutex_assert_owner(tsdn, tdata->lock); - prof_tdata->prng_state = 0; - prof_tdata->threshold = 0; - prof_tdata->accum = 0; + return prof_tdata_should_destroy_unlocked(tdata, even_if_attached); +} + +static void +prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, + bool even_if_attached) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx); - prof_tdata->enq = false; - prof_tdata->enq_idump = false; - prof_tdata->enq_gdump = false; + tdata_tree_remove(&tdatas, tdata); - prof_tdata_tsd_set(&prof_tdata); + assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); - return (prof_tdata); + if (tdata->thread_name != NULL) { + idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true, + true); + } + ckh_delete(tsd, &tdata->bt2tctx); + idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); } -void -prof_tdata_cleanup(void *arg) -{ - prof_thr_cnt_t *cnt; - prof_tdata_t *prof_tdata = *(prof_tdata_t **)arg; +static void +prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + prof_tdata_destroy_locked(tsd, tdata, even_if_attached); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); +} - cassert(config_prof); +static void +prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) { + bool destroy_tdata; - if (prof_tdata == PROF_TDATA_STATE_REINCARNATED) { - /* - * Another destructor deallocated memory after this destructor - * was called. Reset prof_tdata to PROF_TDATA_STATE_PURGATORY - * in order to receive another callback. - */ - prof_tdata = PROF_TDATA_STATE_PURGATORY; - prof_tdata_tsd_set(&prof_tdata); - } else if (prof_tdata == PROF_TDATA_STATE_PURGATORY) { - /* - * The previous time this destructor was called, we set the key - * to PROF_TDATA_STATE_PURGATORY so that other destructors - * wouldn't cause re-creation of the prof_tdata. This time, do - * nothing, so that the destructor will not be called again. - */ - } else if (prof_tdata != NULL) { - /* - * Delete the hash table. All of its contents can still be - * iterated over via the LRU. - */ - ckh_delete(&prof_tdata->bt2cnt); + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); + if (tdata->attached) { + destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, + true); /* - * Iteratively merge cnt's into the global stats and delete - * them. + * Only detach if !destroy_tdata, because detaching would allow + * another thread to win the race to destroy tdata. */ - while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) { - ql_remove(&prof_tdata->lru_ql, cnt, lru_link); - prof_ctx_merge(cnt->ctx, cnt); - idalloc(cnt); + if (!destroy_tdata) { + tdata->attached = false; } - idalloc(prof_tdata->vec); - idalloc(prof_tdata); - prof_tdata = PROF_TDATA_STATE_PURGATORY; - prof_tdata_tsd_set(&prof_tdata); + tsd_prof_tdata_set(tsd, NULL); + } else { + destroy_tdata = false; + } + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + if (destroy_tdata) { + prof_tdata_destroy(tsd, tdata, true); } } +prof_tdata_t * +prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) { + uint64_t thr_uid = tdata->thr_uid; + uint64_t thr_discrim = tdata->thr_discrim + 1; + char *thread_name = (tdata->thread_name != NULL) ? + prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL; + bool active = tdata->active; + + prof_tdata_detach(tsd, tdata); + return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name, + active); +} + +static bool +prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) { + bool destroy_tdata; + + malloc_mutex_lock(tsdn, tdata->lock); + if (!tdata->expired) { + tdata->expired = true; + destroy_tdata = tdata->attached ? false : + prof_tdata_should_destroy(tsdn, tdata, false); + } else { + destroy_tdata = false; + } + malloc_mutex_unlock(tsdn, tdata->lock); + + return destroy_tdata; +} + +static prof_tdata_t * +prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; + + return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL); +} + void -prof_boot0(void) -{ +prof_reset(tsd_t *tsd, size_t lg_sample) { + prof_tdata_t *next; + + assert(lg_sample < (sizeof(uint64_t) << 3)); + + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + + lg_prof_sample = lg_sample; + + next = NULL; + do { + prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next, + prof_tdata_reset_iter, (void *)tsd); + if (to_destroy != NULL) { + next = tdata_tree_next(&tdatas, to_destroy); + prof_tdata_destroy_locked(tsd, to_destroy, false); + } else { + next = NULL; + } + } while (next != NULL); + + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); +} + +void +prof_tdata_cleanup(tsd_t *tsd) { + prof_tdata_t *tdata; + + if (!config_prof) { + return; + } + + tdata = tsd_prof_tdata_get(tsd); + if (tdata != NULL) { + prof_tdata_detach(tsd, tdata); + } +} + +bool +prof_active_get(tsdn_t *tsdn) { + bool prof_active_current; + + malloc_mutex_lock(tsdn, &prof_active_mtx); + prof_active_current = prof_active; + malloc_mutex_unlock(tsdn, &prof_active_mtx); + return prof_active_current; +} +bool +prof_active_set(tsdn_t *tsdn, bool active) { + bool prof_active_old; + + malloc_mutex_lock(tsdn, &prof_active_mtx); + prof_active_old = prof_active; + prof_active = active; + malloc_mutex_unlock(tsdn, &prof_active_mtx); + return prof_active_old; +} + +const char * +prof_thread_name_get(tsd_t *tsd) { + prof_tdata_t *tdata; + + tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) { + return ""; + } + return (tdata->thread_name != NULL ? tdata->thread_name : ""); +} + +static char * +prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) { + char *ret; + size_t size; + + if (thread_name == NULL) { + return NULL; + } + + size = strlen(thread_name) + 1; + if (size == 1) { + return ""; + } + + ret = iallocztm(tsdn, size, sz_size2index(size), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); + if (ret == NULL) { + return NULL; + } + memcpy(ret, thread_name, size); + return ret; +} + +int +prof_thread_name_set(tsd_t *tsd, const char *thread_name) { + prof_tdata_t *tdata; + unsigned i; + char *s; + + tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) { + return EAGAIN; + } + + /* Validate input. */ + if (thread_name == NULL) { + return EFAULT; + } + for (i = 0; thread_name[i] != '\0'; i++) { + char c = thread_name[i]; + if (!isgraph(c) && !isblank(c)) { + return EFAULT; + } + } + + s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name); + if (s == NULL) { + return EAGAIN; + } + + if (tdata->thread_name != NULL) { + idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true, + true); + tdata->thread_name = NULL; + } + if (strlen(s) > 0) { + tdata->thread_name = s; + } + return 0; +} + +bool +prof_thread_active_get(tsd_t *tsd) { + prof_tdata_t *tdata; + + tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) { + return false; + } + return tdata->active; +} + +bool +prof_thread_active_set(tsd_t *tsd, bool active) { + prof_tdata_t *tdata; + + tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) { + return true; + } + tdata->active = active; + return false; +} + +bool +prof_thread_active_init_get(tsdn_t *tsdn) { + bool active_init; + + malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); + active_init = prof_thread_active_init; + malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); + return active_init; +} + +bool +prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) { + bool active_init_old; + + malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); + active_init_old = prof_thread_active_init; + prof_thread_active_init = active_init; + malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); + return active_init_old; +} + +bool +prof_gdump_get(tsdn_t *tsdn) { + bool prof_gdump_current; + + malloc_mutex_lock(tsdn, &prof_gdump_mtx); + prof_gdump_current = prof_gdump_val; + malloc_mutex_unlock(tsdn, &prof_gdump_mtx); + return prof_gdump_current; +} + +bool +prof_gdump_set(tsdn_t *tsdn, bool gdump) { + bool prof_gdump_old; + + malloc_mutex_lock(tsdn, &prof_gdump_mtx); + prof_gdump_old = prof_gdump_val; + prof_gdump_val = gdump; + malloc_mutex_unlock(tsdn, &prof_gdump_mtx); + return prof_gdump_old; +} + +void +prof_boot0(void) { cassert(config_prof); memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT, @@ -1294,17 +2256,15 @@ prof_boot0(void) } void -prof_boot1(void) -{ - +prof_boot1(void) { cassert(config_prof); /* - * opt_prof and prof_promote must be in their final state before any - * arenas are initialized, so this function must be executed early. + * opt_prof must be in its final state before any arenas are + * initialized, so this function must be executed early. */ - if (opt_prof_leak && opt_prof == false) { + if (opt_prof_leak && !opt_prof) { /* * Enable opt_prof, but in such a way that profiles are never * automatically dumped. @@ -1317,48 +2277,101 @@ prof_boot1(void) opt_lg_prof_interval); } } - - prof_promote = (opt_prof && opt_lg_prof_sample > LG_PAGE); } bool -prof_boot2(void) -{ - +prof_boot2(tsd_t *tsd) { cassert(config_prof); if (opt_prof) { unsigned i; - if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash, - prof_bt_keycomp)) - return (true); - if (malloc_mutex_init(&bt2ctx_mtx)) - return (true); - if (prof_tdata_tsd_boot()) { - malloc_write( - "<jemalloc>: Error in pthread_key_create()\n"); - abort(); + lg_prof_sample = opt_lg_prof_sample; + + prof_active = opt_prof_active; + if (malloc_mutex_init(&prof_active_mtx, "prof_active", + WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) { + return true; + } + + prof_gdump_val = opt_prof_gdump; + if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump", + WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) { + return true; + } + + prof_thread_active_init = opt_prof_thread_active_init; + if (malloc_mutex_init(&prof_thread_active_init_mtx, + "prof_thread_active_init", + WITNESS_RANK_PROF_THREAD_ACTIVE_INIT, + malloc_mutex_rank_exclusive)) { + return true; + } + + if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash, + prof_bt_keycomp)) { + return true; + } + if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx", + WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) { + return true; + } + + tdata_tree_new(&tdatas); + if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas", + WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) { + return true; } - if (malloc_mutex_init(&prof_dump_seq_mtx)) - return (true); - if (malloc_mutex_init(&prof_dump_mtx)) - return (true); + next_thr_uid = 0; + if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid", + WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) { + return true; + } + + if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq", + WITNESS_RANK_PROF_DUMP_SEQ, malloc_mutex_rank_exclusive)) { + return true; + } + if (malloc_mutex_init(&prof_dump_mtx, "prof_dump", + WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) { + return true; + } - if (atexit(prof_fdump) != 0) { + if (opt_prof_final && opt_prof_prefix[0] != '\0' && + atexit(prof_fdump) != 0) { malloc_write("<jemalloc>: Error in atexit()\n"); - if (opt_abort) + if (opt_abort) { abort(); + } } - ctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS * - sizeof(malloc_mutex_t)); - if (ctx_locks == NULL) - return (true); + gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), + b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t), + CACHELINE); + if (gctx_locks == NULL) { + return true; + } for (i = 0; i < PROF_NCTX_LOCKS; i++) { - if (malloc_mutex_init(&ctx_locks[i])) - return (true); + if (malloc_mutex_init(&gctx_locks[i], "prof_gctx", + WITNESS_RANK_PROF_GCTX, + malloc_mutex_rank_exclusive)) { + return true; + } + } + + tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), + b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t), + CACHELINE); + if (tdata_locks == NULL) { + return true; + } + for (i = 0; i < PROF_NTDATA_LOCKS; i++) { + if (malloc_mutex_init(&tdata_locks[i], "prof_tdata", + WITNESS_RANK_PROF_TDATA, + malloc_mutex_rank_exclusive)) { + return true; + } } } @@ -1372,48 +2385,79 @@ prof_boot2(void) prof_booted = true; - return (false); + return false; } void -prof_prefork(void) -{ - - if (opt_prof) { +prof_prefork0(tsdn_t *tsdn) { + if (config_prof && opt_prof) { unsigned i; - malloc_mutex_prefork(&bt2ctx_mtx); - malloc_mutex_prefork(&prof_dump_seq_mtx); - for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_prefork(&ctx_locks[i]); + malloc_mutex_prefork(tsdn, &prof_dump_mtx); + malloc_mutex_prefork(tsdn, &bt2gctx_mtx); + malloc_mutex_prefork(tsdn, &tdatas_mtx); + for (i = 0; i < PROF_NTDATA_LOCKS; i++) { + malloc_mutex_prefork(tsdn, &tdata_locks[i]); + } + for (i = 0; i < PROF_NCTX_LOCKS; i++) { + malloc_mutex_prefork(tsdn, &gctx_locks[i]); + } } } void -prof_postfork_parent(void) -{ +prof_prefork1(tsdn_t *tsdn) { + if (config_prof && opt_prof) { + malloc_mutex_prefork(tsdn, &prof_active_mtx); + malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx); + malloc_mutex_prefork(tsdn, &prof_gdump_mtx); + malloc_mutex_prefork(tsdn, &next_thr_uid_mtx); + malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx); + } +} - if (opt_prof) { +void +prof_postfork_parent(tsdn_t *tsdn) { + if (config_prof && opt_prof) { unsigned i; - for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_postfork_parent(&ctx_locks[i]); - malloc_mutex_postfork_parent(&prof_dump_seq_mtx); - malloc_mutex_postfork_parent(&bt2ctx_mtx); + malloc_mutex_postfork_parent(tsdn, + &prof_thread_active_init_mtx); + malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_active_mtx); + for (i = 0; i < PROF_NCTX_LOCKS; i++) { + malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]); + } + for (i = 0; i < PROF_NTDATA_LOCKS; i++) { + malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]); + } + malloc_mutex_postfork_parent(tsdn, &tdatas_mtx); + malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx); } } void -prof_postfork_child(void) -{ - - if (opt_prof) { +prof_postfork_child(tsdn_t *tsdn) { + if (config_prof && opt_prof) { unsigned i; - for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_postfork_child(&ctx_locks[i]); - malloc_mutex_postfork_child(&prof_dump_seq_mtx); - malloc_mutex_postfork_child(&bt2ctx_mtx); + malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx); + malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx); + malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx); + malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx); + malloc_mutex_postfork_child(tsdn, &prof_active_mtx); + for (i = 0; i < PROF_NCTX_LOCKS; i++) { + malloc_mutex_postfork_child(tsdn, &gctx_locks[i]); + } + for (i = 0; i < PROF_NTDATA_LOCKS; i++) { + malloc_mutex_postfork_child(tsdn, &tdata_locks[i]); + } + malloc_mutex_postfork_child(tsdn, &tdatas_mtx); + malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx); + malloc_mutex_postfork_child(tsdn, &prof_dump_mtx); } } diff --git a/deps/jemalloc/src/rtree.c b/deps/jemalloc/src/rtree.c index 205957ac4e..53702cf723 100644 --- a/deps/jemalloc/src/rtree.c +++ b/deps/jemalloc/src/rtree.c @@ -1,105 +1,320 @@ -#define JEMALLOC_RTREE_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -rtree_t * -rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc) -{ - rtree_t *ret; - unsigned bits_per_level, bits_in_leaf, height, i; - - assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3)); - - bits_per_level = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void *)))) - 1; - bits_in_leaf = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(uint8_t)))) - 1; - if (bits > bits_in_leaf) { - height = 1 + (bits - bits_in_leaf) / bits_per_level; - if ((height-1) * bits_per_level + bits_in_leaf != bits) - height++; - } else { - height = 1; - } - assert((height-1) * bits_per_level + bits_in_leaf >= bits); - - ret = (rtree_t*)alloc(offsetof(rtree_t, level2bits) + - (sizeof(unsigned) * height)); - if (ret == NULL) - return (NULL); - memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) * - height)); - - ret->alloc = alloc; - ret->dalloc = dalloc; - if (malloc_mutex_init(&ret->mutex)) { - if (dalloc != NULL) - dalloc(ret); - return (NULL); - } - ret->height = height; - if (height > 1) { - if ((height-1) * bits_per_level + bits_in_leaf > bits) { - ret->level2bits[0] = (bits - bits_in_leaf) % - bits_per_level; - } else - ret->level2bits[0] = bits_per_level; - for (i = 1; i < height-1; i++) - ret->level2bits[i] = bits_per_level; - ret->level2bits[height-1] = bits_in_leaf; - } else - ret->level2bits[0] = bits; - - ret->root = (void**)alloc(sizeof(void *) << ret->level2bits[0]); - if (ret->root == NULL) { - if (dalloc != NULL) - dalloc(ret); - return (NULL); - } - memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]); - - return (ret); +#define JEMALLOC_RTREE_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/mutex.h" + +/* + * Only the most significant bits of keys passed to rtree_{read,write}() are + * used. + */ +bool +rtree_new(rtree_t *rtree, bool zeroed) { +#ifdef JEMALLOC_JET + if (!zeroed) { + memset(rtree, 0, sizeof(rtree_t)); /* Clear root. */ + } +#else + assert(zeroed); +#endif + + if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE, + malloc_mutex_rank_exclusive)) { + return true; + } + + return false; +} + +static rtree_node_elm_t * +rtree_node_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { + return (rtree_node_elm_t *)base_alloc(tsdn, b0get(), nelms * + sizeof(rtree_node_elm_t), CACHELINE); } +rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc = rtree_node_alloc_impl; static void -rtree_delete_subtree(rtree_t *rtree, void **node, unsigned level) -{ +rtree_node_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *node) { + /* Nodes are never deleted during normal operation. */ + not_reached(); +} +UNUSED rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc = + rtree_node_dalloc_impl; + +static rtree_leaf_elm_t * +rtree_leaf_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { + return (rtree_leaf_elm_t *)base_alloc(tsdn, b0get(), nelms * + sizeof(rtree_leaf_elm_t), CACHELINE); +} +rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc = rtree_leaf_alloc_impl; - if (level < rtree->height - 1) { - size_t nchildren, i; +static void +rtree_leaf_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *leaf) { + /* Leaves are never deleted during normal operation. */ + not_reached(); +} +UNUSED rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc = + rtree_leaf_dalloc_impl; - nchildren = ZU(1) << rtree->level2bits[level]; - for (i = 0; i < nchildren; i++) { - void **child = (void **)node[i]; - if (child != NULL) - rtree_delete_subtree(rtree, child, level + 1); +#ifdef JEMALLOC_JET +# if RTREE_HEIGHT > 1 +static void +rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *subtree, + unsigned level) { + size_t nchildren = ZU(1) << rtree_levels[level].bits; + if (level + 2 < RTREE_HEIGHT) { + for (size_t i = 0; i < nchildren; i++) { + rtree_node_elm_t *node = + (rtree_node_elm_t *)atomic_load_p(&subtree[i].child, + ATOMIC_RELAXED); + if (node != NULL) { + rtree_delete_subtree(tsdn, rtree, node, level + + 1); + } + } + } else { + for (size_t i = 0; i < nchildren; i++) { + rtree_leaf_elm_t *leaf = + (rtree_leaf_elm_t *)atomic_load_p(&subtree[i].child, + ATOMIC_RELAXED); + if (leaf != NULL) { + rtree_leaf_dalloc(tsdn, rtree, leaf); + } } } - rtree->dalloc(node); + + if (subtree != rtree->root) { + rtree_node_dalloc(tsdn, rtree, subtree); + } } +# endif void -rtree_delete(rtree_t *rtree) -{ +rtree_delete(tsdn_t *tsdn, rtree_t *rtree) { +# if RTREE_HEIGHT > 1 + rtree_delete_subtree(tsdn, rtree, rtree->root, 0); +# endif +} +#endif - rtree_delete_subtree(rtree, rtree->root, 0); - rtree->dalloc(rtree); +static rtree_node_elm_t * +rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level, + atomic_p_t *elmp) { + malloc_mutex_lock(tsdn, &rtree->init_lock); + /* + * If *elmp is non-null, then it was initialized with the init lock + * held, so we can get by with 'relaxed' here. + */ + rtree_node_elm_t *node = atomic_load_p(elmp, ATOMIC_RELAXED); + if (node == NULL) { + node = rtree_node_alloc(tsdn, rtree, ZU(1) << + rtree_levels[level].bits); + if (node == NULL) { + malloc_mutex_unlock(tsdn, &rtree->init_lock); + return NULL; + } + /* + * Even though we hold the lock, a later reader might not; we + * need release semantics. + */ + atomic_store_p(elmp, node, ATOMIC_RELEASE); + } + malloc_mutex_unlock(tsdn, &rtree->init_lock); + + return node; } -void -rtree_prefork(rtree_t *rtree) -{ +static rtree_leaf_elm_t * +rtree_leaf_init(tsdn_t *tsdn, rtree_t *rtree, atomic_p_t *elmp) { + malloc_mutex_lock(tsdn, &rtree->init_lock); + /* + * If *elmp is non-null, then it was initialized with the init lock + * held, so we can get by with 'relaxed' here. + */ + rtree_leaf_elm_t *leaf = atomic_load_p(elmp, ATOMIC_RELAXED); + if (leaf == NULL) { + leaf = rtree_leaf_alloc(tsdn, rtree, ZU(1) << + rtree_levels[RTREE_HEIGHT-1].bits); + if (leaf == NULL) { + malloc_mutex_unlock(tsdn, &rtree->init_lock); + return NULL; + } + /* + * Even though we hold the lock, a later reader might not; we + * need release semantics. + */ + atomic_store_p(elmp, leaf, ATOMIC_RELEASE); + } + malloc_mutex_unlock(tsdn, &rtree->init_lock); - malloc_mutex_prefork(&rtree->mutex); + return leaf; } -void -rtree_postfork_parent(rtree_t *rtree) -{ +static bool +rtree_node_valid(rtree_node_elm_t *node) { + return ((uintptr_t)node != (uintptr_t)0); +} - malloc_mutex_postfork_parent(&rtree->mutex); +static bool +rtree_leaf_valid(rtree_leaf_elm_t *leaf) { + return ((uintptr_t)leaf != (uintptr_t)0); } -void -rtree_postfork_child(rtree_t *rtree) -{ +static rtree_node_elm_t * +rtree_child_node_tryread(rtree_node_elm_t *elm, bool dependent) { + rtree_node_elm_t *node; + + if (dependent) { + node = (rtree_node_elm_t *)atomic_load_p(&elm->child, + ATOMIC_RELAXED); + } else { + node = (rtree_node_elm_t *)atomic_load_p(&elm->child, + ATOMIC_ACQUIRE); + } - malloc_mutex_postfork_child(&rtree->mutex); + assert(!dependent || node != NULL); + return node; +} + +static rtree_node_elm_t * +rtree_child_node_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm, + unsigned level, bool dependent) { + rtree_node_elm_t *node; + + node = rtree_child_node_tryread(elm, dependent); + if (!dependent && unlikely(!rtree_node_valid(node))) { + node = rtree_node_init(tsdn, rtree, level + 1, &elm->child); + } + assert(!dependent || node != NULL); + return node; +} + +static rtree_leaf_elm_t * +rtree_child_leaf_tryread(rtree_node_elm_t *elm, bool dependent) { + rtree_leaf_elm_t *leaf; + + if (dependent) { + leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child, + ATOMIC_RELAXED); + } else { + leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child, + ATOMIC_ACQUIRE); + } + + assert(!dependent || leaf != NULL); + return leaf; +} + +static rtree_leaf_elm_t * +rtree_child_leaf_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm, + unsigned level, bool dependent) { + rtree_leaf_elm_t *leaf; + + leaf = rtree_child_leaf_tryread(elm, dependent); + if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { + leaf = rtree_leaf_init(tsdn, rtree, &elm->child); + } + assert(!dependent || leaf != NULL); + return leaf; +} + +rtree_leaf_elm_t * +rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent, bool init_missing) { + rtree_node_elm_t *node; + rtree_leaf_elm_t *leaf; +#if RTREE_HEIGHT > 1 + node = rtree->root; +#else + leaf = rtree->root; +#endif + + if (config_debug) { + uintptr_t leafkey = rtree_leafkey(key); + for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) { + assert(rtree_ctx->cache[i].leafkey != leafkey); + } + for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) { + assert(rtree_ctx->l2_cache[i].leafkey != leafkey); + } + } + +#define RTREE_GET_CHILD(level) { \ + assert(level < RTREE_HEIGHT-1); \ + if (level != 0 && !dependent && \ + unlikely(!rtree_node_valid(node))) { \ + return NULL; \ + } \ + uintptr_t subkey = rtree_subkey(key, level); \ + if (level + 2 < RTREE_HEIGHT) { \ + node = init_missing ? \ + rtree_child_node_read(tsdn, rtree, \ + &node[subkey], level, dependent) : \ + rtree_child_node_tryread(&node[subkey], \ + dependent); \ + } else { \ + leaf = init_missing ? \ + rtree_child_leaf_read(tsdn, rtree, \ + &node[subkey], level, dependent) : \ + rtree_child_leaf_tryread(&node[subkey], \ + dependent); \ + } \ + } + /* + * Cache replacement upon hard lookup (i.e. L1 & L2 rtree cache miss): + * (1) evict last entry in L2 cache; (2) move the collision slot from L1 + * cache down to L2; and 3) fill L1. + */ +#define RTREE_GET_LEAF(level) { \ + assert(level == RTREE_HEIGHT-1); \ + if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { \ + return NULL; \ + } \ + if (RTREE_CTX_NCACHE_L2 > 1) { \ + memmove(&rtree_ctx->l2_cache[1], \ + &rtree_ctx->l2_cache[0], \ + sizeof(rtree_ctx_cache_elm_t) * \ + (RTREE_CTX_NCACHE_L2 - 1)); \ + } \ + size_t slot = rtree_cache_direct_map(key); \ + rtree_ctx->l2_cache[0].leafkey = \ + rtree_ctx->cache[slot].leafkey; \ + rtree_ctx->l2_cache[0].leaf = \ + rtree_ctx->cache[slot].leaf; \ + uintptr_t leafkey = rtree_leafkey(key); \ + rtree_ctx->cache[slot].leafkey = leafkey; \ + rtree_ctx->cache[slot].leaf = leaf; \ + uintptr_t subkey = rtree_subkey(key, level); \ + return &leaf[subkey]; \ + } + if (RTREE_HEIGHT > 1) { + RTREE_GET_CHILD(0) + } + if (RTREE_HEIGHT > 2) { + RTREE_GET_CHILD(1) + } + if (RTREE_HEIGHT > 3) { + for (unsigned i = 2; i < RTREE_HEIGHT-1; i++) { + RTREE_GET_CHILD(i) + } + } + RTREE_GET_LEAF(RTREE_HEIGHT-1) +#undef RTREE_GET_CHILD +#undef RTREE_GET_LEAF + not_reached(); +} + +void +rtree_ctx_data_init(rtree_ctx_t *ctx) { + for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) { + rtree_ctx_cache_elm_t *cache = &ctx->cache[i]; + cache->leafkey = RTREE_LEAFKEY_INVALID; + cache->leaf = NULL; + } + for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) { + rtree_ctx_cache_elm_t *cache = &ctx->l2_cache[i]; + cache->leafkey = RTREE_LEAFKEY_INVALID; + cache->leaf = NULL; + } } diff --git a/deps/jemalloc/src/stats.c b/deps/jemalloc/src/stats.c index bef2ab33cd..087df7676e 100644 --- a/deps/jemalloc/src/stats.c +++ b/deps/jemalloc/src/stats.c @@ -1,549 +1,1285 @@ -#define JEMALLOC_STATS_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_STATS_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" -#define CTL_GET(n, v, t) do { \ - size_t sz = sizeof(t); \ - xmallctl(n, v, &sz, NULL, 0); \ -} while (0) +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mutex_prof.h" -#define CTL_I_GET(n, v, t) do { \ - size_t mib[6]; \ - size_t miblen = sizeof(mib) / sizeof(size_t); \ +const char *global_mutex_names[mutex_prof_num_global_mutexes] = { +#define OP(mtx) #mtx, + MUTEX_PROF_GLOBAL_MUTEXES +#undef OP +}; + +const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = { +#define OP(mtx) #mtx, + MUTEX_PROF_ARENA_MUTEXES +#undef OP +}; + +#define CTL_GET(n, v, t) do { \ size_t sz = sizeof(t); \ - xmallctlnametomib(n, mib, &miblen); \ - mib[2] = i; \ - xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ + xmallctl(n, (void *)v, &sz, NULL, 0); \ } while (0) -#define CTL_J_GET(n, v, t) do { \ - size_t mib[6]; \ +#define CTL_M2_GET(n, i, v, t) do { \ + size_t mib[CTL_MAX_DEPTH]; \ size_t miblen = sizeof(mib) / sizeof(size_t); \ size_t sz = sizeof(t); \ xmallctlnametomib(n, mib, &miblen); \ - mib[2] = j; \ - xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ + mib[2] = (i); \ + xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ } while (0) -#define CTL_IJ_GET(n, v, t) do { \ - size_t mib[6]; \ +#define CTL_M2_M4_GET(n, i, j, v, t) do { \ + size_t mib[CTL_MAX_DEPTH]; \ size_t miblen = sizeof(mib) / sizeof(size_t); \ size_t sz = sizeof(t); \ xmallctlnametomib(n, mib, &miblen); \ - mib[2] = i; \ - mib[4] = j; \ - xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ + mib[2] = (i); \ + mib[4] = (j); \ + xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ } while (0) /******************************************************************************/ /* Data. */ -bool opt_stats_print = false; - -size_t stats_cactive = 0; +bool opt_stats_print = false; +char opt_stats_print_opts[stats_print_tot_num_options+1] = ""; /******************************************************************************/ -/* Function prototypes for non-inline static functions. */ -static void stats_arena_bins_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i); -static void stats_arena_lruns_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i); -static void stats_arena_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i, bool bins, bool large); +/* Calculate x.yyy and output a string (takes a fixed sized char array). */ +static bool +get_rate_str(uint64_t dividend, uint64_t divisor, char str[6]) { + if (divisor == 0 || dividend > divisor) { + /* The rate is not supposed to be greater than 1. */ + return true; + } + if (dividend > 0) { + assert(UINT64_MAX / dividend >= 1000); + } -/******************************************************************************/ + unsigned n = (unsigned)((dividend * 1000) / divisor); + if (n < 10) { + malloc_snprintf(str, 6, "0.00%u", n); + } else if (n < 100) { + malloc_snprintf(str, 6, "0.0%u", n); + } else if (n < 1000) { + malloc_snprintf(str, 6, "0.%u", n); + } else { + malloc_snprintf(str, 6, "1"); + } + + return false; +} + +#define MUTEX_CTL_STR_MAX_LENGTH 128 +static void +gen_mutex_ctl_str(char *str, size_t buf_len, const char *prefix, + const char *mutex, const char *counter) { + malloc_snprintf(str, buf_len, "stats.%s.%s.%s", prefix, mutex, counter); +} + +static void +read_arena_bin_mutex_stats(unsigned arena_ind, unsigned bin_ind, + uint64_t results[mutex_prof_num_counters]) { + char cmd[MUTEX_CTL_STR_MAX_LENGTH]; +#define OP(c, t) \ + gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ + "arenas.0.bins.0","mutex", #c); \ + CTL_M2_M4_GET(cmd, arena_ind, bin_ind, \ + (t *)&results[mutex_counter_##c], t); +MUTEX_PROF_COUNTERS +#undef OP +} + +static void +mutex_stats_output_json(void (*write_cb)(void *, const char *), void *cbopaque, + const char *name, uint64_t stats[mutex_prof_num_counters], + const char *json_indent, bool last) { + malloc_cprintf(write_cb, cbopaque, "%s\"%s\": {\n", json_indent, name); + + mutex_prof_counter_ind_t k = 0; + char *fmt_str[2] = {"%s\t\"%s\": %"FMTu32"%s\n", + "%s\t\"%s\": %"FMTu64"%s\n"}; +#define OP(c, t) \ + malloc_cprintf(write_cb, cbopaque, \ + fmt_str[sizeof(t) / sizeof(uint32_t) - 1], \ + json_indent, #c, (t)stats[mutex_counter_##c], \ + (++k == mutex_prof_num_counters) ? "" : ","); +MUTEX_PROF_COUNTERS +#undef OP + malloc_cprintf(write_cb, cbopaque, "%s}%s\n", json_indent, + last ? "" : ","); +} static void stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i) -{ + bool json, bool large, bool mutex, unsigned i) { size_t page; - bool config_tcache; - unsigned nbins, j, gap_start; + bool in_gap, in_gap_prev; + unsigned nbins, j; CTL_GET("arenas.page", &page, size_t); - CTL_GET("config.tcache", &config_tcache, bool); - if (config_tcache) { + CTL_GET("arenas.nbins", &nbins, unsigned); + if (json) { malloc_cprintf(write_cb, cbopaque, - "bins: bin size regs pgs allocated nmalloc" - " ndalloc nrequests nfills nflushes" - " newruns reruns curruns\n"); + "\t\t\t\t\"bins\": [\n"); } else { + char *mutex_counters = " n_lock_ops n_waiting" + " n_spin_acq total_wait_ns max_wait_ns\n"; malloc_cprintf(write_cb, cbopaque, - "bins: bin size regs pgs allocated nmalloc" - " ndalloc newruns reruns curruns\n"); + "bins: size ind allocated nmalloc" + " ndalloc nrequests curregs curslabs regs" + " pgs util nfills nflushes newslabs" + " reslabs%s", mutex ? mutex_counters : "\n"); } - CTL_GET("arenas.nbins", &nbins, unsigned); - for (j = 0, gap_start = UINT_MAX; j < nbins; j++) { - uint64_t nruns; + for (j = 0, in_gap = false; j < nbins; j++) { + uint64_t nslabs; + size_t reg_size, slab_size, curregs; + size_t curslabs; + uint32_t nregs; + uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; + uint64_t nreslabs; - CTL_IJ_GET("stats.arenas.0.bins.0.nruns", &nruns, uint64_t); - if (nruns == 0) { - if (gap_start == UINT_MAX) - gap_start = j; - } else { - size_t reg_size, run_size, allocated; - uint32_t nregs; - uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; - uint64_t reruns; - size_t curruns; - - if (gap_start != UINT_MAX) { - if (j > gap_start + 1) { - /* Gap of more than one size class. */ - malloc_cprintf(write_cb, cbopaque, - "[%u..%u]\n", gap_start, - j - 1); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nslabs", i, j, &nslabs, + uint64_t); + in_gap_prev = in_gap; + in_gap = (nslabs == 0); + + if (!json && in_gap_prev && !in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } + + CTL_M2_GET("arenas.bin.0.size", j, ®_size, size_t); + CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t); + CTL_M2_GET("arenas.bin.0.slab_size", j, &slab_size, size_t); + + CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs, + size_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j, + &nrequests, uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j, &nfills, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j, &nflushes, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nreslabs", i, j, &nreslabs, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j, &curslabs, + size_t); + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t{\n" + "\t\t\t\t\t\t\"nmalloc\": %"FMTu64",\n" + "\t\t\t\t\t\t\"ndalloc\": %"FMTu64",\n" + "\t\t\t\t\t\t\"curregs\": %zu,\n" + "\t\t\t\t\t\t\"nrequests\": %"FMTu64",\n" + "\t\t\t\t\t\t\"nfills\": %"FMTu64",\n" + "\t\t\t\t\t\t\"nflushes\": %"FMTu64",\n" + "\t\t\t\t\t\t\"nreslabs\": %"FMTu64",\n" + "\t\t\t\t\t\t\"curslabs\": %zu%s\n", + nmalloc, ndalloc, curregs, nrequests, nfills, + nflushes, nreslabs, curslabs, mutex ? "," : ""); + if (mutex) { + uint64_t mutex_stats[mutex_prof_num_counters]; + read_arena_bin_mutex_stats(i, j, mutex_stats); + mutex_stats_output_json(write_cb, cbopaque, + "mutex", mutex_stats, "\t\t\t\t\t\t", true); + } + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t}%s\n", + (j + 1 < nbins) ? "," : ""); + } else if (!in_gap) { + size_t availregs = nregs * curslabs; + char util[6]; + if (get_rate_str((uint64_t)curregs, (uint64_t)availregs, + util)) { + if (availregs == 0) { + malloc_snprintf(util, sizeof(util), + "1"); + } else if (curregs > availregs) { + /* + * Race detected: the counters were read + * in separate mallctl calls and + * concurrent operations happened in + * between. In this case no meaningful + * utilization can be computed. + */ + malloc_snprintf(util, sizeof(util), + " race"); } else { - /* Gap of one size class. */ - malloc_cprintf(write_cb, cbopaque, - "[%u]\n", gap_start); + not_reached(); } - gap_start = UINT_MAX; } - CTL_J_GET("arenas.bin.0.size", ®_size, size_t); - CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t); - CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t); - CTL_IJ_GET("stats.arenas.0.bins.0.allocated", - &allocated, size_t); - CTL_IJ_GET("stats.arenas.0.bins.0.nmalloc", - &nmalloc, uint64_t); - CTL_IJ_GET("stats.arenas.0.bins.0.ndalloc", - &ndalloc, uint64_t); - if (config_tcache) { - CTL_IJ_GET("stats.arenas.0.bins.0.nrequests", - &nrequests, uint64_t); - CTL_IJ_GET("stats.arenas.0.bins.0.nfills", - &nfills, uint64_t); - CTL_IJ_GET("stats.arenas.0.bins.0.nflushes", - &nflushes, uint64_t); + uint64_t mutex_stats[mutex_prof_num_counters]; + if (mutex) { + read_arena_bin_mutex_stats(i, j, mutex_stats); } - CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns, - uint64_t); - CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns, - size_t); - if (config_tcache) { + + malloc_cprintf(write_cb, cbopaque, "%20zu %3u %12zu %12" + FMTu64" %12"FMTu64" %12"FMTu64" %12zu %12zu %4u" + " %3zu %-5s %12"FMTu64" %12"FMTu64" %12"FMTu64 + " %12"FMTu64, reg_size, j, curregs * reg_size, + nmalloc, ndalloc, nrequests, curregs, curslabs, + nregs, slab_size / page, util, nfills, nflushes, + nslabs, nreslabs); + + /* Output less info for bin mutexes to save space. */ + if (mutex) { malloc_cprintf(write_cb, cbopaque, - "%13u %5zu %4u %3zu %12zu %12"PRIu64 - " %12"PRIu64" %12"PRIu64" %12"PRIu64 - " %12"PRIu64" %12"PRIu64" %12"PRIu64 - " %12zu\n", - j, reg_size, nregs, run_size / page, - allocated, nmalloc, ndalloc, nrequests, - nfills, nflushes, nruns, reruns, curruns); + " %12"FMTu64" %12"FMTu64" %12"FMTu64 + " %14"FMTu64" %12"FMTu64"\n", + mutex_stats[mutex_counter_num_ops], + mutex_stats[mutex_counter_num_wait], + mutex_stats[mutex_counter_num_spin_acq], + mutex_stats[mutex_counter_total_wait_time], + mutex_stats[mutex_counter_max_wait_time]); } else { - malloc_cprintf(write_cb, cbopaque, - "%13u %5zu %4u %3zu %12zu %12"PRIu64 - " %12"PRIu64" %12"PRIu64" %12"PRIu64 - " %12zu\n", - j, reg_size, nregs, run_size / page, - allocated, nmalloc, ndalloc, nruns, reruns, - curruns); + malloc_cprintf(write_cb, cbopaque, "\n"); } } } - if (gap_start != UINT_MAX) { - if (j > gap_start + 1) { - /* Gap of more than one size class. */ - malloc_cprintf(write_cb, cbopaque, "[%u..%u]\n", - gap_start, j - 1); - } else { - /* Gap of one size class. */ - malloc_cprintf(write_cb, cbopaque, "[%u]\n", gap_start); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t]%s\n", large ? "," : ""); + } else { + if (in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); } } } static void -stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i) -{ - size_t page, nlruns, j; - ssize_t gap_start; - - CTL_GET("arenas.page", &page, size_t); +stats_arena_lextents_print(void (*write_cb)(void *, const char *), + void *cbopaque, bool json, unsigned i) { + unsigned nbins, nlextents, j; + bool in_gap, in_gap_prev; - malloc_cprintf(write_cb, cbopaque, - "large: size pages nmalloc ndalloc nrequests" - " curruns\n"); - CTL_GET("arenas.nlruns", &nlruns, size_t); - for (j = 0, gap_start = -1; j < nlruns; j++) { + CTL_GET("arenas.nbins", &nbins, unsigned); + CTL_GET("arenas.nlextents", &nlextents, unsigned); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"lextents\": [\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "large: size ind allocated nmalloc" + " ndalloc nrequests curlextents\n"); + } + for (j = 0, in_gap = false; j < nlextents; j++) { uint64_t nmalloc, ndalloc, nrequests; - size_t run_size, curruns; + size_t lextent_size, curlextents; - CTL_IJ_GET("stats.arenas.0.lruns.0.nmalloc", &nmalloc, - uint64_t); - CTL_IJ_GET("stats.arenas.0.lruns.0.ndalloc", &ndalloc, - uint64_t); - CTL_IJ_GET("stats.arenas.0.lruns.0.nrequests", &nrequests, - uint64_t); - if (nrequests == 0) { - if (gap_start == -1) - gap_start = j; - } else { - CTL_J_GET("arenas.lrun.0.size", &run_size, size_t); - CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns, - size_t); - if (gap_start != -1) { - malloc_cprintf(write_cb, cbopaque, "[%zu]\n", - j - gap_start); - gap_start = -1; - } + CTL_M2_M4_GET("stats.arenas.0.lextents.0.nmalloc", i, j, + &nmalloc, uint64_t); + CTL_M2_M4_GET("stats.arenas.0.lextents.0.ndalloc", i, j, + &ndalloc, uint64_t); + CTL_M2_M4_GET("stats.arenas.0.lextents.0.nrequests", i, j, + &nrequests, uint64_t); + in_gap_prev = in_gap; + in_gap = (nrequests == 0); + + if (!json && in_gap_prev && !in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } + + CTL_M2_GET("arenas.lextent.0.size", j, &lextent_size, size_t); + CTL_M2_M4_GET("stats.arenas.0.lextents.0.curlextents", i, j, + &curlextents, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t{\n" + "\t\t\t\t\t\t\"curlextents\": %zu\n" + "\t\t\t\t\t}%s\n", + curlextents, + (j + 1 < nlextents) ? "," : ""); + } else if (!in_gap) { + malloc_cprintf(write_cb, cbopaque, + "%20zu %3u %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64" %12zu\n", + lextent_size, nbins + j, + curlextents * lextent_size, nmalloc, ndalloc, + nrequests, curlextents); + } + } + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t]\n"); + } else { + if (in_gap) { malloc_cprintf(write_cb, cbopaque, - "%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64 - " %12zu\n", - run_size, run_size / page, nmalloc, ndalloc, - nrequests, curruns); + " ---\n"); + } + } +} + +static void +read_arena_mutex_stats(unsigned arena_ind, + uint64_t results[mutex_prof_num_arena_mutexes][mutex_prof_num_counters]) { + char cmd[MUTEX_CTL_STR_MAX_LENGTH]; + + mutex_prof_arena_ind_t i; + for (i = 0; i < mutex_prof_num_arena_mutexes; i++) { +#define OP(c, t) \ + gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ + "arenas.0.mutexes", arena_mutex_names[i], #c); \ + CTL_M2_GET(cmd, arena_ind, \ + (t *)&results[i][mutex_counter_##c], t); +MUTEX_PROF_COUNTERS +#undef OP + } +} + +static void +mutex_stats_output(void (*write_cb)(void *, const char *), void *cbopaque, + const char *name, uint64_t stats[mutex_prof_num_counters], + bool first_mutex) { + if (first_mutex) { + /* Print title. */ + malloc_cprintf(write_cb, cbopaque, + " n_lock_ops n_waiting" + " n_spin_acq n_owner_switch total_wait_ns" + " max_wait_ns max_n_thds\n"); + } + + malloc_cprintf(write_cb, cbopaque, "%s", name); + malloc_cprintf(write_cb, cbopaque, ":%*c", + (int)(20 - strlen(name)), ' '); + + char *fmt_str[2] = {"%12"FMTu32, "%16"FMTu64}; +#define OP(c, t) \ + malloc_cprintf(write_cb, cbopaque, \ + fmt_str[sizeof(t) / sizeof(uint32_t) - 1], \ + (t)stats[mutex_counter_##c]); +MUTEX_PROF_COUNTERS +#undef OP + malloc_cprintf(write_cb, cbopaque, "\n"); +} + +static void +stats_arena_mutexes_print(void (*write_cb)(void *, const char *), + void *cbopaque, bool json, bool json_end, unsigned arena_ind) { + uint64_t mutex_stats[mutex_prof_num_arena_mutexes][mutex_prof_num_counters]; + read_arena_mutex_stats(arena_ind, mutex_stats); + + /* Output mutex stats. */ + if (json) { + malloc_cprintf(write_cb, cbopaque, "\t\t\t\t\"mutexes\": {\n"); + mutex_prof_arena_ind_t i, last_mutex; + last_mutex = mutex_prof_num_arena_mutexes - 1; + for (i = 0; i < mutex_prof_num_arena_mutexes; i++) { + mutex_stats_output_json(write_cb, cbopaque, + arena_mutex_names[i], mutex_stats[i], + "\t\t\t\t\t", (i == last_mutex)); + } + malloc_cprintf(write_cb, cbopaque, "\t\t\t\t}%s\n", + json_end ? "" : ","); + } else { + mutex_prof_arena_ind_t i; + for (i = 0; i < mutex_prof_num_arena_mutexes; i++) { + mutex_stats_output(write_cb, cbopaque, + arena_mutex_names[i], mutex_stats[i], i == 0); } } - if (gap_start != -1) - malloc_cprintf(write_cb, cbopaque, "[%zu]\n", j - gap_start); } static void stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i, bool bins, bool large) -{ + bool json, unsigned i, bool bins, bool large, bool mutex) { unsigned nthreads; const char *dss; - size_t page, pactive, pdirty, mapped; - uint64_t npurge, nmadvise, purged; + ssize_t dirty_decay_ms, muzzy_decay_ms; + size_t page, pactive, pdirty, pmuzzy, mapped, retained; + size_t base, internal, resident; + uint64_t dirty_npurge, dirty_nmadvise, dirty_purged; + uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged; size_t small_allocated; uint64_t small_nmalloc, small_ndalloc, small_nrequests; size_t large_allocated; uint64_t large_nmalloc, large_ndalloc, large_nrequests; + size_t tcache_bytes; + uint64_t uptime; CTL_GET("arenas.page", &page, size_t); - CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned); - malloc_cprintf(write_cb, cbopaque, - "assigned threads: %u\n", nthreads); - CTL_I_GET("stats.arenas.0.dss", &dss, const char *); - malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n", - dss); - CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t); - CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t); - CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t); - CTL_I_GET("stats.arenas.0.nmadvise", &nmadvise, uint64_t); - CTL_I_GET("stats.arenas.0.purged", &purged, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s," - " %"PRIu64" madvise%s, %"PRIu64" purged\n", - pactive, pdirty, npurge, npurge == 1 ? "" : "s", - nmadvise, nmadvise == 1 ? "" : "s", purged); - - malloc_cprintf(write_cb, cbopaque, - " allocated nmalloc ndalloc nrequests\n"); - CTL_I_GET("stats.arenas.0.small.allocated", &small_allocated, size_t); - CTL_I_GET("stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t); - CTL_I_GET("stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t); - CTL_I_GET("stats.arenas.0.small.nrequests", &small_nrequests, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "small: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", - small_allocated, small_nmalloc, small_ndalloc, small_nrequests); - CTL_I_GET("stats.arenas.0.large.allocated", &large_allocated, size_t); - CTL_I_GET("stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t); - CTL_I_GET("stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t); - CTL_I_GET("stats.arenas.0.large.nrequests", &large_nrequests, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", - large_allocated, large_nmalloc, large_ndalloc, large_nrequests); - malloc_cprintf(write_cb, cbopaque, - "total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", - small_allocated + large_allocated, - small_nmalloc + large_nmalloc, - small_ndalloc + large_ndalloc, - small_nrequests + large_nrequests); - malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page); - CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t); - malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped); - - if (bins) - stats_arena_bins_print(write_cb, cbopaque, i); - if (large) - stats_arena_lruns_print(write_cb, cbopaque, i); -} + CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"nthreads\": %u,\n", nthreads); + } else { + malloc_cprintf(write_cb, cbopaque, + "assigned threads: %u\n", nthreads); + } -void -stats_print(void (*write_cb)(void *, const char *), void *cbopaque, - const char *opts) -{ - int err; - uint64_t epoch; - size_t u64sz; - bool general = true; - bool merged = true; - bool unmerged = true; - bool bins = true; - bool large = true; + CTL_M2_GET("stats.arenas.0.uptime", i, &uptime, uint64_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"uptime_ns\": %"FMTu64",\n", uptime); + } else { + malloc_cprintf(write_cb, cbopaque, + "uptime: %"FMTu64"\n", uptime); + } - /* - * Refresh stats, in case mallctl() was called by the application. - * - * Check for OOM here, since refreshing the ctl cache can trigger - * allocation. In practice, none of the subsequent mallctl()-related - * calls in this function will cause OOM if this one succeeds. - * */ - epoch = 1; - u64sz = sizeof(uint64_t); - err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t)); - if (err != 0) { - if (err == EAGAIN) { - malloc_write("<jemalloc>: Memory allocation failure in " - "mallctl(\"epoch\", ...)\n"); - return; + CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"dss\": \"%s\",\n", dss); + } else { + malloc_cprintf(write_cb, cbopaque, + "dss allocation precedence: %s\n", dss); + } + + CTL_M2_GET("stats.arenas.0.dirty_decay_ms", i, &dirty_decay_ms, + ssize_t); + CTL_M2_GET("stats.arenas.0.muzzy_decay_ms", i, &muzzy_decay_ms, + ssize_t); + CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t); + CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t); + CTL_M2_GET("stats.arenas.0.pmuzzy", i, &pmuzzy, size_t); + CTL_M2_GET("stats.arenas.0.dirty_npurge", i, &dirty_npurge, uint64_t); + CTL_M2_GET("stats.arenas.0.dirty_nmadvise", i, &dirty_nmadvise, + uint64_t); + CTL_M2_GET("stats.arenas.0.dirty_purged", i, &dirty_purged, uint64_t); + CTL_M2_GET("stats.arenas.0.muzzy_npurge", i, &muzzy_npurge, uint64_t); + CTL_M2_GET("stats.arenas.0.muzzy_nmadvise", i, &muzzy_nmadvise, + uint64_t); + CTL_M2_GET("stats.arenas.0.muzzy_purged", i, &muzzy_purged, uint64_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"dirty_decay_ms\": %zd,\n", dirty_decay_ms); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"muzzy_decay_ms\": %zd,\n", muzzy_decay_ms); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"pactive\": %zu,\n", pactive); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"pdirty\": %zu,\n", pdirty); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"pmuzzy\": %zu,\n", pmuzzy); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"dirty_npurge\": %"FMTu64",\n", dirty_npurge); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"dirty_nmadvise\": %"FMTu64",\n", dirty_nmadvise); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"dirty_purged\": %"FMTu64",\n", dirty_purged); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"muzzy_npurge\": %"FMTu64",\n", muzzy_npurge); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"muzzy_nmadvise\": %"FMTu64",\n", muzzy_nmadvise); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"muzzy_purged\": %"FMTu64",\n", muzzy_purged); + } else { + malloc_cprintf(write_cb, cbopaque, + "decaying: time npages sweeps madvises" + " purged\n"); + if (dirty_decay_ms >= 0) { + malloc_cprintf(write_cb, cbopaque, + " dirty: %5zd %12zu %12"FMTu64" %12"FMTu64" %12" + FMTu64"\n", dirty_decay_ms, pdirty, dirty_npurge, + dirty_nmadvise, dirty_purged); + } else { + malloc_cprintf(write_cb, cbopaque, + " dirty: N/A %12zu %12"FMTu64" %12"FMTu64" %12" + FMTu64"\n", pdirty, dirty_npurge, dirty_nmadvise, + dirty_purged); + } + if (muzzy_decay_ms >= 0) { + malloc_cprintf(write_cb, cbopaque, + " muzzy: %5zd %12zu %12"FMTu64" %12"FMTu64" %12" + FMTu64"\n", muzzy_decay_ms, pmuzzy, muzzy_npurge, + muzzy_nmadvise, muzzy_purged); + } else { + malloc_cprintf(write_cb, cbopaque, + " muzzy: N/A %12zu %12"FMTu64" %12"FMTu64" %12" + FMTu64"\n", pmuzzy, muzzy_npurge, muzzy_nmadvise, + muzzy_purged); } - malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", " - "...)\n"); - abort(); } - if (opts != NULL) { - unsigned i; + CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated, + size_t); + CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t); + CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t); + CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests, + uint64_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"small\": {\n"); - for (i = 0; opts[i] != '\0'; i++) { - switch (opts[i]) { - case 'g': - general = false; - break; - case 'm': - merged = false; - break; - case 'a': - unmerged = false; - break; - case 'b': - bins = false; - break; - case 'l': - large = false; - break; - default:; - } - } + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"allocated\": %zu,\n", small_allocated); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", small_nmalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", small_ndalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", small_nrequests); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t},\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + " allocated nmalloc" + " ndalloc nrequests\n"); + malloc_cprintf(write_cb, cbopaque, + "small: %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64"\n", + small_allocated, small_nmalloc, small_ndalloc, + small_nrequests); } - malloc_cprintf(write_cb, cbopaque, - "___ Begin jemalloc statistics ___\n"); - if (general) { - int err; - const char *cpv; - bool bv; - unsigned uv; - ssize_t ssv; - size_t sv, bsz, ssz, sssz, cpsz; - - bsz = sizeof(bool); - ssz = sizeof(size_t); - sssz = sizeof(ssize_t); - cpsz = sizeof(const char *); - - CTL_GET("version", &cpv, const char *); + CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated, + size_t); + CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t); + CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t); + CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests, + uint64_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"large\": {\n"); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"allocated\": %zu,\n", large_allocated); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", large_nmalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", large_ndalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", large_nrequests); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t},\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "large: %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64"\n", + large_allocated, large_nmalloc, large_ndalloc, + large_nrequests); + malloc_cprintf(write_cb, cbopaque, + "total: %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64"\n", + small_allocated + large_allocated, small_nmalloc + + large_nmalloc, small_ndalloc + large_ndalloc, + small_nrequests + large_nrequests); + } + if (!json) { + malloc_cprintf(write_cb, cbopaque, + "active: %12zu\n", pactive * page); + } + + CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"mapped\": %zu,\n", mapped); + } else { + malloc_cprintf(write_cb, cbopaque, + "mapped: %12zu\n", mapped); + } + + CTL_M2_GET("stats.arenas.0.retained", i, &retained, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"retained\": %zu,\n", retained); + } else { + malloc_cprintf(write_cb, cbopaque, + "retained: %12zu\n", retained); + } + + CTL_M2_GET("stats.arenas.0.base", i, &base, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"base\": %zu,\n", base); + } else { + malloc_cprintf(write_cb, cbopaque, + "base: %12zu\n", base); + } + + CTL_M2_GET("stats.arenas.0.internal", i, &internal, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"internal\": %zu,\n", internal); + } else { + malloc_cprintf(write_cb, cbopaque, + "internal: %12zu\n", internal); + } + + CTL_M2_GET("stats.arenas.0.tcache_bytes", i, &tcache_bytes, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"tcache\": %zu,\n", tcache_bytes); + } else { + malloc_cprintf(write_cb, cbopaque, + "tcache: %12zu\n", tcache_bytes); + } + + CTL_M2_GET("stats.arenas.0.resident", i, &resident, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"resident\": %zu%s\n", resident, + (bins || large || mutex) ? "," : ""); + } else { + malloc_cprintf(write_cb, cbopaque, + "resident: %12zu\n", resident); + } + + if (mutex) { + stats_arena_mutexes_print(write_cb, cbopaque, json, + !(bins || large), i); + } + if (bins) { + stats_arena_bins_print(write_cb, cbopaque, json, large, mutex, + i); + } + if (large) { + stats_arena_lextents_print(write_cb, cbopaque, json, i); + } +} + +static void +stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque, + bool json, bool more) { + const char *cpv; + bool bv; + unsigned uv; + uint32_t u32v; + uint64_t u64v; + ssize_t ssv; + size_t sv, bsz, usz, ssz, sssz, cpsz; + + bsz = sizeof(bool); + usz = sizeof(unsigned); + ssz = sizeof(size_t); + sssz = sizeof(ssize_t); + cpsz = sizeof(const char *); + + CTL_GET("version", &cpv, const char *); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"version\": \"%s\",\n", cpv); + } else { malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv); - CTL_GET("config.debug", &bv, bool); + } + + /* config. */ +#define CONFIG_WRITE_BOOL_JSON(n, c) \ + if (json) { \ + CTL_GET("config."#n, &bv, bool); \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %s%s\n", bv ? "true" : "false", \ + (c)); \ + } + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"config\": {\n"); + } + + CONFIG_WRITE_BOOL_JSON(cache_oblivious, ",") + + CTL_GET("config.debug", &bv, bool); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"debug\": %s,\n", bv ? "true" : "false"); + } else { malloc_cprintf(write_cb, cbopaque, "Assertions %s\n", bv ? "enabled" : "disabled"); + } + + CONFIG_WRITE_BOOL_JSON(fill, ",") + CONFIG_WRITE_BOOL_JSON(lazy_lock, ",") + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"malloc_conf\": \"%s\",\n", + config_malloc_conf); + } else { + malloc_cprintf(write_cb, cbopaque, + "config.malloc_conf: \"%s\"\n", config_malloc_conf); + } + + CONFIG_WRITE_BOOL_JSON(prof, ",") + CONFIG_WRITE_BOOL_JSON(prof_libgcc, ",") + CONFIG_WRITE_BOOL_JSON(prof_libunwind, ",") + CONFIG_WRITE_BOOL_JSON(stats, ",") + CONFIG_WRITE_BOOL_JSON(thp, ",") + CONFIG_WRITE_BOOL_JSON(utrace, ",") + CONFIG_WRITE_BOOL_JSON(xmalloc, "") + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t},\n"); + } +#undef CONFIG_WRITE_BOOL_JSON -#define OPT_WRITE_BOOL(n) \ - if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \ - == 0) { \ + /* opt. */ +#define OPT_WRITE_BOOL(n, c) \ + if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \ + "false", (c)); \ + } else { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %s\n", bv ? "true" : "false"); \ - } -#define OPT_WRITE_SIZE_T(n) \ - if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \ - == 0) { \ + } \ + } +#define OPT_WRITE_BOOL_MUTABLE(n, m, c) { \ + bool bv2; \ + if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0 && \ + je_mallctl(#m, (void *)&bv2, &bsz, NULL, 0) == 0) { \ + if (json) { \ malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %zu\n", sv); \ - } -#define OPT_WRITE_SSIZE_T(n) \ - if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \ - == 0) { \ + "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \ + "false", (c)); \ + } else { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %s ("#m": %s)\n", bv ? "true" \ + : "false", bv2 ? "true" : "false"); \ + } \ + } \ +} +#define OPT_WRITE_UNSIGNED(n, c) \ + if (je_mallctl("opt."#n, (void *)&uv, &usz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %u%s\n", uv, (c)); \ + } else { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %u\n", uv); \ + } \ + } +#define OPT_WRITE_SSIZE_T(n, c) \ + if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \ + } else { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %zd\n", ssv); \ - } -#define OPT_WRITE_CHAR_P(n) \ - if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \ - == 0) { \ + } \ + } +#define OPT_WRITE_SSIZE_T_MUTABLE(n, m, c) { \ + ssize_t ssv2; \ + if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0 && \ + je_mallctl(#m, (void *)&ssv2, &sssz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \ + } else { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %zd ("#m": %zd)\n", \ + ssv, ssv2); \ + } \ + } \ +} +#define OPT_WRITE_CHAR_P(n, c) \ + if (je_mallctl("opt."#n, (void *)&cpv, &cpsz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": \"%s\"%s\n", cpv, (c)); \ + } else { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": \"%s\"\n", cpv); \ - } + } \ + } + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"opt\": {\n"); + } else { malloc_cprintf(write_cb, cbopaque, "Run-time option settings:\n"); - OPT_WRITE_BOOL(abort) - OPT_WRITE_SIZE_T(lg_chunk) - OPT_WRITE_CHAR_P(dss) - OPT_WRITE_SIZE_T(narenas) - OPT_WRITE_SSIZE_T(lg_dirty_mult) - OPT_WRITE_BOOL(stats_print) - OPT_WRITE_BOOL(junk) - OPT_WRITE_SIZE_T(quarantine) - OPT_WRITE_BOOL(redzone) - OPT_WRITE_BOOL(zero) - OPT_WRITE_BOOL(utrace) - OPT_WRITE_BOOL(valgrind) - OPT_WRITE_BOOL(xmalloc) - OPT_WRITE_BOOL(tcache) - OPT_WRITE_SSIZE_T(lg_tcache_max) - OPT_WRITE_BOOL(prof) - OPT_WRITE_CHAR_P(prof_prefix) - OPT_WRITE_BOOL(prof_active) - OPT_WRITE_SSIZE_T(lg_prof_sample) - OPT_WRITE_BOOL(prof_accum) - OPT_WRITE_SSIZE_T(lg_prof_interval) - OPT_WRITE_BOOL(prof_gdump) - OPT_WRITE_BOOL(prof_final) - OPT_WRITE_BOOL(prof_leak) + } + OPT_WRITE_BOOL(abort, ",") + OPT_WRITE_BOOL(abort_conf, ",") + OPT_WRITE_BOOL(retain, ",") + OPT_WRITE_CHAR_P(dss, ",") + OPT_WRITE_UNSIGNED(narenas, ",") + OPT_WRITE_CHAR_P(percpu_arena, ",") + OPT_WRITE_BOOL_MUTABLE(background_thread, background_thread, ",") + OPT_WRITE_SSIZE_T_MUTABLE(dirty_decay_ms, arenas.dirty_decay_ms, ",") + OPT_WRITE_SSIZE_T_MUTABLE(muzzy_decay_ms, arenas.muzzy_decay_ms, ",") + OPT_WRITE_CHAR_P(junk, ",") + OPT_WRITE_BOOL(zero, ",") + OPT_WRITE_BOOL(utrace, ",") + OPT_WRITE_BOOL(xmalloc, ",") + OPT_WRITE_BOOL(tcache, ",") + OPT_WRITE_SSIZE_T(lg_tcache_max, ",") + OPT_WRITE_BOOL(prof, ",") + OPT_WRITE_CHAR_P(prof_prefix, ",") + OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active, ",") + OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init, prof.thread_active_init, + ",") + OPT_WRITE_SSIZE_T_MUTABLE(lg_prof_sample, prof.lg_sample, ",") + OPT_WRITE_BOOL(prof_accum, ",") + OPT_WRITE_SSIZE_T(lg_prof_interval, ",") + OPT_WRITE_BOOL(prof_gdump, ",") + OPT_WRITE_BOOL(prof_final, ",") + OPT_WRITE_BOOL(prof_leak, ",") + OPT_WRITE_BOOL(stats_print, ",") + if (json || opt_stats_print) { + /* + * stats_print_opts is always emitted for JSON, so as long as it + * comes last it's safe to unconditionally omit the comma here + * (rather than having to conditionally omit it elsewhere + * depending on configuration). + */ + OPT_WRITE_CHAR_P(stats_print_opts, "") + } + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t},\n"); + } #undef OPT_WRITE_BOOL -#undef OPT_WRITE_SIZE_T +#undef OPT_WRITE_BOOL_MUTABLE #undef OPT_WRITE_SSIZE_T #undef OPT_WRITE_CHAR_P - malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus); + /* arenas. */ + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"arenas\": {\n"); + } - CTL_GET("arenas.narenas", &uv, unsigned); + CTL_GET("arenas.narenas", &uv, unsigned); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"narenas\": %u,\n", uv); + } else { malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv); + } + + if (json) { + CTL_GET("arenas.dirty_decay_ms", &ssv, ssize_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"dirty_decay_ms\": %zd,\n", ssv); - malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n", - sizeof(void *)); + CTL_GET("arenas.muzzy_decay_ms", &ssv, ssize_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"muzzy_decay_ms\": %zd,\n", ssv); + } - CTL_GET("arenas.quantum", &sv, size_t); + CTL_GET("arenas.quantum", &sv, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"quantum\": %zu,\n", sv); + } else { malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv); + } - CTL_GET("arenas.page", &sv, size_t); + CTL_GET("arenas.page", &sv, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"page\": %zu,\n", sv); + } else { malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv); + } - CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t); - if (ssv >= 0) { + if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) { + if (json) { malloc_cprintf(write_cb, cbopaque, - "Min active:dirty page ratio per arena: %u:1\n", - (1U << ssv)); + "\t\t\t\"tcache_max\": %zu,\n", sv); } else { malloc_cprintf(write_cb, cbopaque, - "Min active:dirty page ratio per arena: N/A\n"); + "Maximum thread-cached size class: %zu\n", sv); } - if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0)) - == 0) { + } + + if (json) { + unsigned nbins, nlextents, i; + + CTL_GET("arenas.nbins", &nbins, unsigned); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"nbins\": %u,\n", nbins); + + CTL_GET("arenas.nhbins", &uv, unsigned); + malloc_cprintf(write_cb, cbopaque, "\t\t\t\"nhbins\": %u,\n", + uv); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"bin\": [\n"); + for (i = 0; i < nbins; i++) { malloc_cprintf(write_cb, cbopaque, - "Maximum thread-cached size class: %zu\n", sv); + "\t\t\t\t{\n"); + + CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"size\": %zu,\n", sv); + + CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nregs\": %"FMTu32",\n", u32v); + + CTL_M2_GET("arenas.bin.0.slab_size", i, &sv, size_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"slab_size\": %zu\n", sv); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t}%s\n", (i + 1 < nbins) ? "," : ""); } - if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 && - bv) { - CTL_GET("opt.lg_prof_sample", &sv, size_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t],\n"); + + CTL_GET("arenas.nlextents", &nlextents, unsigned); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"nlextents\": %u,\n", nlextents); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"lextent\": [\n"); + for (i = 0; i < nlextents; i++) { malloc_cprintf(write_cb, cbopaque, - "Average profile sample interval: %"PRIu64 - " (2^%zu)\n", (((uint64_t)1U) << sv), sv); + "\t\t\t\t{\n"); - CTL_GET("opt.lg_prof_interval", &ssv, ssize_t); - if (ssv >= 0) { - malloc_cprintf(write_cb, cbopaque, - "Average profile dump interval: %"PRIu64 - " (2^%zd)\n", - (((uint64_t)1U) << ssv), ssv); - } else { - malloc_cprintf(write_cb, cbopaque, - "Average profile dump interval: N/A\n"); + CTL_M2_GET("arenas.lextent.0.size", i, &sv, size_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"size\": %zu\n", sv); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t}%s\n", (i + 1 < nlextents) ? "," : ""); + } + malloc_cprintf(write_cb, cbopaque, + "\t\t\t]\n"); + + malloc_cprintf(write_cb, cbopaque, + "\t\t}%s\n", (config_prof || more) ? "," : ""); + } + + /* prof. */ + if (config_prof && json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"prof\": {\n"); + + CTL_GET("prof.thread_active_init", &bv, bool); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"thread_active_init\": %s,\n", bv ? "true" : + "false"); + + CTL_GET("prof.active", &bv, bool); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"active\": %s,\n", bv ? "true" : "false"); + + CTL_GET("prof.gdump", &bv, bool); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"gdump\": %s,\n", bv ? "true" : "false"); + + CTL_GET("prof.interval", &u64v, uint64_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"interval\": %"FMTu64",\n", u64v); + + CTL_GET("prof.lg_sample", &ssv, ssize_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"lg_sample\": %zd\n", ssv); + + malloc_cprintf(write_cb, cbopaque, + "\t\t}%s\n", more ? "," : ""); + } +} + +static void +read_global_mutex_stats( + uint64_t results[mutex_prof_num_global_mutexes][mutex_prof_num_counters]) { + char cmd[MUTEX_CTL_STR_MAX_LENGTH]; + + mutex_prof_global_ind_t i; + for (i = 0; i < mutex_prof_num_global_mutexes; i++) { +#define OP(c, t) \ + gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ + "mutexes", global_mutex_names[i], #c); \ + CTL_GET(cmd, (t *)&results[i][mutex_counter_##c], t); +MUTEX_PROF_COUNTERS +#undef OP + } +} + +static void +stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque, + bool json, bool merged, bool destroyed, bool unmerged, bool bins, + bool large, bool mutex) { + size_t allocated, active, metadata, resident, mapped, retained; + size_t num_background_threads; + uint64_t background_thread_num_runs, background_thread_run_interval; + + CTL_GET("stats.allocated", &allocated, size_t); + CTL_GET("stats.active", &active, size_t); + CTL_GET("stats.metadata", &metadata, size_t); + CTL_GET("stats.resident", &resident, size_t); + CTL_GET("stats.mapped", &mapped, size_t); + CTL_GET("stats.retained", &retained, size_t); + + uint64_t mutex_stats[mutex_prof_num_global_mutexes][mutex_prof_num_counters]; + if (mutex) { + read_global_mutex_stats(mutex_stats); + } + + if (have_background_thread) { + CTL_GET("stats.background_thread.num_threads", + &num_background_threads, size_t); + CTL_GET("stats.background_thread.num_runs", + &background_thread_num_runs, uint64_t); + CTL_GET("stats.background_thread.run_interval", + &background_thread_run_interval, uint64_t); + } else { + num_background_threads = 0; + background_thread_num_runs = 0; + background_thread_run_interval = 0; + } + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"stats\": {\n"); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"allocated\": %zu,\n", allocated); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"active\": %zu,\n", active); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"metadata\": %zu,\n", metadata); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"resident\": %zu,\n", resident); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"mapped\": %zu,\n", mapped); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"retained\": %zu,\n", retained); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"background_thread\": {\n"); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"num_threads\": %zu,\n", num_background_threads); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"num_runs\": %"FMTu64",\n", + background_thread_num_runs); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"run_interval\": %"FMTu64"\n", + background_thread_run_interval); + malloc_cprintf(write_cb, cbopaque, "\t\t\t}%s\n", + mutex ? "," : ""); + + if (mutex) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"mutexes\": {\n"); + mutex_prof_global_ind_t i; + for (i = 0; i < mutex_prof_num_global_mutexes; i++) { + mutex_stats_output_json(write_cb, cbopaque, + global_mutex_names[i], mutex_stats[i], + "\t\t\t\t", + i == mutex_prof_num_global_mutexes - 1); + } + malloc_cprintf(write_cb, cbopaque, "\t\t\t}\n"); + } + malloc_cprintf(write_cb, cbopaque, + "\t\t}%s\n", (merged || unmerged || destroyed) ? "," : ""); + } else { + malloc_cprintf(write_cb, cbopaque, + "Allocated: %zu, active: %zu, metadata: %zu," + " resident: %zu, mapped: %zu, retained: %zu\n", + allocated, active, metadata, resident, mapped, retained); + + if (have_background_thread && num_background_threads > 0) { + malloc_cprintf(write_cb, cbopaque, + "Background threads: %zu, num_runs: %"FMTu64", " + "run_interval: %"FMTu64" ns\n", + num_background_threads, + background_thread_num_runs, + background_thread_run_interval); + } + if (mutex) { + mutex_prof_global_ind_t i; + for (i = 0; i < mutex_prof_num_global_mutexes; i++) { + mutex_stats_output(write_cb, cbopaque, + global_mutex_names[i], mutex_stats[i], + i == 0); } } - CTL_GET("opt.lg_chunk", &sv, size_t); - malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n", - (ZU(1) << sv), sv); } - if (config_stats) { - size_t *cactive; - size_t allocated, active, mapped; - size_t chunks_current, chunks_high; - uint64_t chunks_total; - size_t huge_allocated; - uint64_t huge_nmalloc, huge_ndalloc; - - CTL_GET("stats.cactive", &cactive, size_t *); - CTL_GET("stats.allocated", &allocated, size_t); - CTL_GET("stats.active", &active, size_t); - CTL_GET("stats.mapped", &mapped, size_t); - malloc_cprintf(write_cb, cbopaque, - "Allocated: %zu, active: %zu, mapped: %zu\n", - allocated, active, mapped); - malloc_cprintf(write_cb, cbopaque, - "Current active ceiling: %zu\n", atomic_read_z(cactive)); - - /* Print chunk stats. */ - CTL_GET("stats.chunks.total", &chunks_total, uint64_t); - CTL_GET("stats.chunks.high", &chunks_high, size_t); - CTL_GET("stats.chunks.current", &chunks_current, size_t); - malloc_cprintf(write_cb, cbopaque, "chunks: nchunks " - "highchunks curchunks\n"); - malloc_cprintf(write_cb, cbopaque, - " %13"PRIu64" %12zu %12zu\n", - chunks_total, chunks_high, chunks_current); - - /* Print huge stats. */ - CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t); - CTL_GET("stats.huge.ndalloc", &huge_ndalloc, uint64_t); - CTL_GET("stats.huge.allocated", &huge_allocated, size_t); - malloc_cprintf(write_cb, cbopaque, - "huge: nmalloc ndalloc allocated\n"); - malloc_cprintf(write_cb, cbopaque, - " %12"PRIu64" %12"PRIu64" %12zu\n", - huge_nmalloc, huge_ndalloc, huge_allocated); - - if (merged) { - unsigned narenas; - - CTL_GET("arenas.narenas", &narenas, unsigned); - { - VARIABLE_ARRAY(bool, initialized, narenas); - size_t isz; - unsigned i, ninitialized; - - isz = sizeof(bool) * narenas; - xmallctl("arenas.initialized", initialized, - &isz, NULL, 0); - for (i = ninitialized = 0; i < narenas; i++) { - if (initialized[i]) - ninitialized++; + if (merged || destroyed || unmerged) { + unsigned narenas; + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"stats.arenas\": {\n"); + } + + CTL_GET("arenas.narenas", &narenas, unsigned); + { + size_t mib[3]; + size_t miblen = sizeof(mib) / sizeof(size_t); + size_t sz; + VARIABLE_ARRAY(bool, initialized, narenas); + bool destroyed_initialized; + unsigned i, j, ninitialized; + + xmallctlnametomib("arena.0.initialized", mib, &miblen); + for (i = ninitialized = 0; i < narenas; i++) { + mib[1] = i; + sz = sizeof(bool); + xmallctlbymib(mib, miblen, &initialized[i], &sz, + NULL, 0); + if (initialized[i]) { + ninitialized++; } + } + mib[1] = MALLCTL_ARENAS_DESTROYED; + sz = sizeof(bool); + xmallctlbymib(mib, miblen, &destroyed_initialized, &sz, + NULL, 0); - if (ninitialized > 1 || unmerged == false) { - /* Print merged arena stats. */ + /* Merged stats. */ + if (merged && (ninitialized > 1 || !unmerged)) { + /* Print merged arena stats. */ + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"merged\": {\n"); + } else { malloc_cprintf(write_cb, cbopaque, "\nMerged arenas stats:\n"); - stats_arena_print(write_cb, cbopaque, - narenas, bins, large); + } + stats_arena_print(write_cb, cbopaque, json, + MALLCTL_ARENAS_ALL, bins, large, mutex); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t}%s\n", + ((destroyed_initialized && + destroyed) || unmerged) ? "," : + ""); } } - } - - if (unmerged) { - unsigned narenas; - /* Print stats for each arena. */ - - CTL_GET("arenas.narenas", &narenas, unsigned); - { - VARIABLE_ARRAY(bool, initialized, narenas); - size_t isz; - unsigned i; - - isz = sizeof(bool) * narenas; - xmallctl("arenas.initialized", initialized, - &isz, NULL, 0); + /* Destroyed stats. */ + if (destroyed_initialized && destroyed) { + /* Print destroyed arena stats. */ + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"destroyed\": {\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "\nDestroyed arenas stats:\n"); + } + stats_arena_print(write_cb, cbopaque, json, + MALLCTL_ARENAS_DESTROYED, bins, large, + mutex); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t}%s\n", unmerged ? "," : + ""); + } + } - for (i = 0; i < narenas; i++) { + /* Unmerged stats. */ + if (unmerged) { + for (i = j = 0; i < narenas; i++) { if (initialized[i]) { - malloc_cprintf(write_cb, - cbopaque, - "\narenas[%u]:\n", i); + if (json) { + j++; + malloc_cprintf(write_cb, + cbopaque, + "\t\t\t\"%u\": {\n", + i); + } else { + malloc_cprintf(write_cb, + cbopaque, + "\narenas[%u]:\n", + i); + } stats_arena_print(write_cb, - cbopaque, i, bins, large); + cbopaque, json, i, bins, + large, mutex); + if (json) { + malloc_cprintf(write_cb, + cbopaque, + "\t\t\t}%s\n", (j < + ninitialized) ? "," + : ""); + } } } } } + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t}\n"); + } + } +} + +void +stats_print(void (*write_cb)(void *, const char *), void *cbopaque, + const char *opts) { + int err; + uint64_t epoch; + size_t u64sz; +#define OPTION(o, v, d, s) bool v = d; + STATS_PRINT_OPTIONS +#undef OPTION + + /* + * Refresh stats, in case mallctl() was called by the application. + * + * Check for OOM here, since refreshing the ctl cache can trigger + * allocation. In practice, none of the subsequent mallctl()-related + * calls in this function will cause OOM if this one succeeds. + * */ + epoch = 1; + u64sz = sizeof(uint64_t); + err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch, + sizeof(uint64_t)); + if (err != 0) { + if (err == EAGAIN) { + malloc_write("<jemalloc>: Memory allocation failure in " + "mallctl(\"epoch\", ...)\n"); + return; + } + malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", " + "...)\n"); + abort(); + } + + if (opts != NULL) { + for (unsigned i = 0; opts[i] != '\0'; i++) { + switch (opts[i]) { +#define OPTION(o, v, d, s) case o: v = s; break; + STATS_PRINT_OPTIONS +#undef OPTION + default:; + } + } + } + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "{\n" + "\t\"jemalloc\": {\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "___ Begin jemalloc statistics ___\n"); + } + + if (general) { + stats_general_print(write_cb, cbopaque, json, config_stats); + } + if (config_stats) { + stats_print_helper(write_cb, cbopaque, json, merged, destroyed, + unmerged, bins, large, mutex); + } + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t}\n" + "}\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "--- End jemalloc statistics ---\n"); } - malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n"); } diff --git a/deps/jemalloc/src/tcache.c b/deps/jemalloc/src/tcache.c index 6de92960b2..936ef3140d 100644 --- a/deps/jemalloc/src/tcache.c +++ b/deps/jemalloc/src/tcache.c @@ -1,131 +1,153 @@ -#define JEMALLOC_TCACHE_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_TCACHE_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/size_classes.h" /******************************************************************************/ /* Data. */ -malloc_tsd_data(, tcache, tcache_t *, NULL) -malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default) - bool opt_tcache = true; ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; tcache_bin_info_t *tcache_bin_info; static unsigned stack_nelms; /* Total stack elms per tcache. */ -size_t nhbins; +unsigned nhbins; size_t tcache_maxclass; -/******************************************************************************/ +tcaches_t *tcaches; + +/* Index of first element within tcaches that has never been used. */ +static unsigned tcaches_past; + +/* Head of singly linked list tracking available tcaches elements. */ +static tcaches_t *tcaches_avail; -size_t tcache_salloc(const void *ptr) -{ +/* Protects tcaches{,_past,_avail}. */ +static malloc_mutex_t tcaches_mtx; - return (arena_salloc(ptr, false)); +/******************************************************************************/ + +size_t +tcache_salloc(tsdn_t *tsdn, const void *ptr) { + return arena_salloc(tsdn, ptr); } void -tcache_event_hard(tcache_t *tcache) -{ - size_t binind = tcache->next_gc_bin; - tcache_bin_t *tbin = &tcache->tbins[binind]; - tcache_bin_info_t *tbin_info = &tcache_bin_info[binind]; - +tcache_event_hard(tsd_t *tsd, tcache_t *tcache) { + szind_t binind = tcache->next_gc_bin; + + tcache_bin_t *tbin; + if (binind < NBINS) { + tbin = tcache_small_bin_get(tcache, binind); + } else { + tbin = tcache_large_bin_get(tcache, binind); + } if (tbin->low_water > 0) { /* * Flush (ceiling) 3/4 of the objects below the low water mark. */ if (binind < NBINS) { - tcache_bin_flush_small(tbin, binind, tbin->ncached - - tbin->low_water + (tbin->low_water >> 2), tcache); + tcache_bin_flush_small(tsd, tcache, tbin, binind, + tbin->ncached - tbin->low_water + (tbin->low_water + >> 2)); + /* + * Reduce fill count by 2X. Limit lg_fill_div such that + * the fill count is always at least 1. + */ + tcache_bin_info_t *tbin_info = &tcache_bin_info[binind]; + if ((tbin_info->ncached_max >> + (tcache->lg_fill_div[binind] + 1)) >= 1) { + tcache->lg_fill_div[binind]++; + } } else { - tcache_bin_flush_large(tbin, binind, tbin->ncached - - tbin->low_water + (tbin->low_water >> 2), tcache); + tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached + - tbin->low_water + (tbin->low_water >> 2), tcache); } - /* - * Reduce fill count by 2X. Limit lg_fill_div such that the - * fill count is always at least 1. - */ - if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1) - tbin->lg_fill_div++; } else if (tbin->low_water < 0) { /* - * Increase fill count by 2X. Make sure lg_fill_div stays - * greater than 0. + * Increase fill count by 2X for small bins. Make sure + * lg_fill_div stays greater than 0. */ - if (tbin->lg_fill_div > 1) - tbin->lg_fill_div--; + if (binind < NBINS && tcache->lg_fill_div[binind] > 1) { + tcache->lg_fill_div[binind]--; + } } tbin->low_water = tbin->ncached; tcache->next_gc_bin++; - if (tcache->next_gc_bin == nhbins) + if (tcache->next_gc_bin == nhbins) { tcache->next_gc_bin = 0; - tcache->ev_cnt = 0; + } } void * -tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind) -{ +tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, + tcache_bin_t *tbin, szind_t binind, bool *tcache_success) { void *ret; - arena_tcache_fill_small(tcache->arena, tbin, binind, + assert(tcache->arena != NULL); + arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind, config_prof ? tcache->prof_accumbytes : 0); - if (config_prof) + if (config_prof) { tcache->prof_accumbytes = 0; - ret = tcache_alloc_easy(tbin); + } + ret = tcache_alloc_easy(tbin, tcache_success); - return (ret); + return ret; } void -tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem, - tcache_t *tcache) -{ - void *ptr; - unsigned i, nflush, ndeferred; +tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, + szind_t binind, unsigned rem) { bool merged_stats = false; assert(binind < NBINS); assert(rem <= tbin->ncached); - for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { - /* Lock the arena bin associated with the first object. */ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( - tbin->avail[0]); - arena_t *arena = chunk->arena; - arena_bin_t *bin = &arena->bins[binind]; + arena_t *arena = tcache->arena; + assert(arena != NULL); + unsigned nflush = tbin->ncached - rem; + VARIABLE_ARRAY(extent_t *, item_extent, nflush); + /* Look up extent once per item. */ + for (unsigned i = 0 ; i < nflush; i++) { + item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i)); + } - if (config_prof && arena == tcache->arena) { - if (arena_prof_accum(arena, tcache->prof_accumbytes)) - prof_idump(); + while (nflush > 0) { + /* Lock the arena bin associated with the first object. */ + extent_t *extent = item_extent[0]; + arena_t *bin_arena = extent_arena_get(extent); + arena_bin_t *bin = &bin_arena->bins[binind]; + + if (config_prof && bin_arena == arena) { + if (arena_prof_accum(tsd_tsdn(tsd), arena, + tcache->prof_accumbytes)) { + prof_idump(tsd_tsdn(tsd)); + } tcache->prof_accumbytes = 0; } - malloc_mutex_lock(&bin->lock); - if (config_stats && arena == tcache->arena) { - assert(merged_stats == false); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + if (config_stats && bin_arena == arena) { + assert(!merged_stats); merged_stats = true; bin->stats.nflushes++; bin->stats.nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; } - ndeferred = 0; - for (i = 0; i < nflush; i++) { - ptr = tbin->avail[i]; - assert(ptr != NULL); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk->arena == arena) { - size_t pageind = ((uintptr_t)ptr - - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_t *mapelm = - arena_mapp_get(chunk, pageind); - if (config_fill && opt_junk) { - arena_alloc_junk_small(ptr, - &arena_bin_info[binind], true); - } - arena_dalloc_bin_locked(arena, chunk, ptr, - mapelm); + unsigned ndeferred = 0; + for (unsigned i = 0; i < nflush; i++) { + void *ptr = *(tbin->avail - 1 - i); + extent = item_extent[i]; + assert(ptr != NULL && extent != NULL); + + if (extent_arena_get(extent) == bin_arena) { + arena_dalloc_bin_junked_locked(tsd_tsdn(tsd), + bin_arena, extent, ptr); } else { /* * This object was allocated via a different @@ -133,276 +155,369 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem, * locked. Stash the object, so that it can be * handled in a future pass. */ - tbin->avail[ndeferred] = ptr; + *(tbin->avail - 1 - ndeferred) = ptr; + item_extent[ndeferred] = extent; ndeferred++; } } - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred); + nflush = ndeferred; } - if (config_stats && merged_stats == false) { + if (config_stats && !merged_stats) { /* * The flush loop didn't happen to flush to this thread's * arena, so the stats didn't get merged. Manually do so now. */ - arena_bin_t *bin = &tcache->arena->bins[binind]; - malloc_mutex_lock(&bin->lock); + arena_bin_t *bin = &arena->bins[binind]; + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); bin->stats.nflushes++; bin->stats.nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); } - memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], - rem * sizeof(void *)); + memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * + sizeof(void *)); tbin->ncached = rem; - if ((int)tbin->ncached < tbin->low_water) + if ((low_water_t)tbin->ncached < tbin->low_water) { tbin->low_water = tbin->ncached; + } } void -tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem, - tcache_t *tcache) -{ - void *ptr; - unsigned i, nflush, ndeferred; +tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, + unsigned rem, tcache_t *tcache) { bool merged_stats = false; assert(binind < nhbins); assert(rem <= tbin->ncached); - for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { + arena_t *arena = tcache->arena; + assert(arena != NULL); + unsigned nflush = tbin->ncached - rem; + VARIABLE_ARRAY(extent_t *, item_extent, nflush); + /* Look up extent once per item. */ + for (unsigned i = 0 ; i < nflush; i++) { + item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i)); + } + + while (nflush > 0) { /* Lock the arena associated with the first object. */ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( - tbin->avail[0]); - arena_t *arena = chunk->arena; + extent_t *extent = item_extent[0]; + arena_t *locked_arena = extent_arena_get(extent); UNUSED bool idump; - if (config_prof) + if (config_prof) { idump = false; - malloc_mutex_lock(&arena->lock); - if ((config_prof || config_stats) && arena == tcache->arena) { + } + + malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx); + for (unsigned i = 0; i < nflush; i++) { + void *ptr = *(tbin->avail - 1 - i); + assert(ptr != NULL); + extent = item_extent[i]; + if (extent_arena_get(extent) == locked_arena) { + large_dalloc_prep_junked_locked(tsd_tsdn(tsd), + extent); + } + } + if ((config_prof || config_stats) && locked_arena == arena) { if (config_prof) { - idump = arena_prof_accum_locked(arena, + idump = arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes); tcache->prof_accumbytes = 0; } if (config_stats) { merged_stats = true; - arena->stats.nrequests_large += - tbin->tstats.nrequests; - arena->stats.lstats[binind - NBINS].nrequests += - tbin->tstats.nrequests; + arena_stats_large_nrequests_add(tsd_tsdn(tsd), + &arena->stats, binind, + tbin->tstats.nrequests); tbin->tstats.nrequests = 0; } } - ndeferred = 0; - for (i = 0; i < nflush; i++) { - ptr = tbin->avail[i]; - assert(ptr != NULL); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk->arena == arena) - arena_dalloc_large_locked(arena, chunk, ptr); - else { + malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx); + + unsigned ndeferred = 0; + for (unsigned i = 0; i < nflush; i++) { + void *ptr = *(tbin->avail - 1 - i); + extent = item_extent[i]; + assert(ptr != NULL && extent != NULL); + + if (extent_arena_get(extent) == locked_arena) { + large_dalloc_finish(tsd_tsdn(tsd), extent); + } else { /* * This object was allocated via a different * arena than the one that is currently locked. * Stash the object, so that it can be handled * in a future pass. */ - tbin->avail[ndeferred] = ptr; + *(tbin->avail - 1 - ndeferred) = ptr; + item_extent[ndeferred] = extent; ndeferred++; } } - malloc_mutex_unlock(&arena->lock); - if (config_prof && idump) - prof_idump(); + if (config_prof && idump) { + prof_idump(tsd_tsdn(tsd)); + } + arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush - + ndeferred); + nflush = ndeferred; } - if (config_stats && merged_stats == false) { + if (config_stats && !merged_stats) { /* * The flush loop didn't happen to flush to this thread's * arena, so the stats didn't get merged. Manually do so now. */ - arena_t *arena = tcache->arena; - malloc_mutex_lock(&arena->lock); - arena->stats.nrequests_large += tbin->tstats.nrequests; - arena->stats.lstats[binind - NBINS].nrequests += - tbin->tstats.nrequests; + arena_stats_large_nrequests_add(tsd_tsdn(tsd), &arena->stats, + binind, tbin->tstats.nrequests); tbin->tstats.nrequests = 0; - malloc_mutex_unlock(&arena->lock); } - memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], - rem * sizeof(void *)); + memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * + sizeof(void *)); tbin->ncached = rem; - if ((int)tbin->ncached < tbin->low_water) + if ((low_water_t)tbin->ncached < tbin->low_water) { tbin->low_water = tbin->ncached; + } } void -tcache_arena_associate(tcache_t *tcache, arena_t *arena) -{ +tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { + assert(tcache->arena == NULL); + tcache->arena = arena; if (config_stats) { /* Link into list of extant tcaches. */ - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); ql_elm_new(tcache, link); ql_tail_insert(&arena->tcache_ql, tcache, link); - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); } - tcache->arena = arena; } -void -tcache_arena_dissociate(tcache_t *tcache) -{ - +static void +tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) { + arena_t *arena = tcache->arena; + assert(arena != NULL); if (config_stats) { /* Unlink from list of extant tcaches. */ - malloc_mutex_lock(&tcache->arena->lock); - ql_remove(&tcache->arena->tcache_ql, tcache, link); - tcache_stats_merge(tcache, tcache->arena); - malloc_mutex_unlock(&tcache->arena->lock); + malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); + if (config_debug) { + bool in_ql = false; + tcache_t *iter; + ql_foreach(iter, &arena->tcache_ql, link) { + if (iter == tcache) { + in_ql = true; + break; + } + } + assert(in_ql); + } + ql_remove(&arena->tcache_ql, tcache, link); + tcache_stats_merge(tsdn, tcache, arena); + malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); } + tcache->arena = NULL; } +void +tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { + tcache_arena_dissociate(tsdn, tcache); + tcache_arena_associate(tsdn, tcache, arena); +} + +bool +tsd_tcache_enabled_data_init(tsd_t *tsd) { + /* Called upon tsd initialization. */ + tsd_tcache_enabled_set(tsd, opt_tcache); + tsd_slow_update(tsd); + + if (opt_tcache) { + /* Trigger tcache init. */ + tsd_tcache_data_init(tsd); + } + + return false; +} + +/* Initialize auto tcache (embedded in TSD). */ +static void +tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) { + memset(&tcache->link, 0, sizeof(ql_elm(tcache_t))); + tcache->prof_accumbytes = 0; + tcache->next_gc_bin = 0; + tcache->arena = NULL; + + ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR); + + size_t stack_offset = 0; + assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); + memset(tcache->tbins_small, 0, sizeof(tcache_bin_t) * NBINS); + memset(tcache->tbins_large, 0, sizeof(tcache_bin_t) * (nhbins - NBINS)); + unsigned i = 0; + for (; i < NBINS; i++) { + tcache->lg_fill_div[i] = 1; + stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); + /* + * avail points past the available space. Allocations will + * access the slots toward higher addresses (for the benefit of + * prefetch). + */ + tcache_small_bin_get(tcache, i)->avail = + (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset); + } + for (; i < nhbins; i++) { + stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); + tcache_large_bin_get(tcache, i)->avail = + (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset); + } + assert(stack_offset == stack_nelms * sizeof(void *)); +} + +/* Initialize auto tcache (embedded in TSD). */ +bool +tsd_tcache_data_init(tsd_t *tsd) { + tcache_t *tcache = tsd_tcachep_get_unsafe(tsd); + assert(tcache_small_bin_get(tcache, 0)->avail == NULL); + size_t size = stack_nelms * sizeof(void *); + /* Avoid false cacheline sharing. */ + size = sz_sa2u(size, CACHELINE); + + void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, + NULL, true, arena_get(TSDN_NULL, 0, true)); + if (avail_array == NULL) { + return true; + } + + tcache_init(tsd, tcache, avail_array); + /* + * Initialization is a bit tricky here. After malloc init is done, all + * threads can rely on arena_choose and associate tcache accordingly. + * However, the thread that does actual malloc bootstrapping relies on + * functional tsd, and it can only rely on a0. In that case, we + * associate its tcache to a0 temporarily, and later on + * arena_choose_hard() will re-associate properly. + */ + tcache->arena = NULL; + arena_t *arena; + if (!malloc_initialized()) { + /* If in initialization, assign to a0. */ + arena = arena_get(tsd_tsdn(tsd), 0, false); + tcache_arena_associate(tsd_tsdn(tsd), tcache, arena); + } else { + arena = arena_choose(tsd, NULL); + /* This may happen if thread.tcache.enabled is used. */ + if (tcache->arena == NULL) { + tcache_arena_associate(tsd_tsdn(tsd), tcache, arena); + } + } + assert(arena == tcache->arena); + + return false; +} + +/* Created manual tcache for tcache.create mallctl. */ tcache_t * -tcache_create(arena_t *arena) -{ +tcache_create_explicit(tsd_t *tsd) { tcache_t *tcache; size_t size, stack_offset; - unsigned i; - size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins); + size = sizeof(tcache_t); /* Naturally align the pointer stacks. */ size = PTR_CEILING(size); stack_offset = size; size += stack_nelms * sizeof(void *); - /* - * Round up to the nearest multiple of the cacheline size, in order to - * avoid the possibility of false cacheline sharing. - * - * That this works relies on the same logic as in ipalloc(), but we - * cannot directly call ipalloc() here due to tcache bootstrapping - * issues. - */ - size = (size + CACHELINE_MASK) & (-CACHELINE); - - if (size <= SMALL_MAXCLASS) - tcache = (tcache_t *)arena_malloc_small(arena, size, true); - else if (size <= tcache_maxclass) - tcache = (tcache_t *)arena_malloc_large(arena, size, true); - else - tcache = (tcache_t *)icalloct(size, false, arena); - - if (tcache == NULL) - return (NULL); - - tcache_arena_associate(tcache, arena); + /* Avoid false cacheline sharing. */ + size = sz_sa2u(size, CACHELINE); - assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); - for (i = 0; i < nhbins; i++) { - tcache->tbins[i].lg_fill_div = 1; - tcache->tbins[i].avail = (void **)((uintptr_t)tcache + - (uintptr_t)stack_offset); - stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); + tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true, + arena_get(TSDN_NULL, 0, true)); + if (tcache == NULL) { + return NULL; } - tcache_tsd_set(&tcache); + tcache_init(tsd, tcache, + (void *)((uintptr_t)tcache + (uintptr_t)stack_offset)); + tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL)); - return (tcache); + return tcache; } -void -tcache_destroy(tcache_t *tcache) -{ - unsigned i; - size_t tcache_size; +static void +tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) { + assert(tcache->arena != NULL); - tcache_arena_dissociate(tcache); + for (unsigned i = 0; i < NBINS; i++) { + tcache_bin_t *tbin = tcache_small_bin_get(tcache, i); + tcache_bin_flush_small(tsd, tcache, tbin, i, 0); - for (i = 0; i < NBINS; i++) { - tcache_bin_t *tbin = &tcache->tbins[i]; - tcache_bin_flush_small(tbin, i, 0, tcache); - - if (config_stats && tbin->tstats.nrequests != 0) { - arena_t *arena = tcache->arena; - arena_bin_t *bin = &arena->bins[i]; - malloc_mutex_lock(&bin->lock); - bin->stats.nrequests += tbin->tstats.nrequests; - malloc_mutex_unlock(&bin->lock); + if (config_stats) { + assert(tbin->tstats.nrequests == 0); } } + for (unsigned i = NBINS; i < nhbins; i++) { + tcache_bin_t *tbin = tcache_large_bin_get(tcache, i); + tcache_bin_flush_large(tsd, tbin, i, 0, tcache); - for (; i < nhbins; i++) { - tcache_bin_t *tbin = &tcache->tbins[i]; - tcache_bin_flush_large(tbin, i, 0, tcache); - - if (config_stats && tbin->tstats.nrequests != 0) { - arena_t *arena = tcache->arena; - malloc_mutex_lock(&arena->lock); - arena->stats.nrequests_large += tbin->tstats.nrequests; - arena->stats.lstats[i - NBINS].nrequests += - tbin->tstats.nrequests; - malloc_mutex_unlock(&arena->lock); + if (config_stats) { + assert(tbin->tstats.nrequests == 0); } } if (config_prof && tcache->prof_accumbytes > 0 && - arena_prof_accum(tcache->arena, tcache->prof_accumbytes)) - prof_idump(); - - tcache_size = arena_salloc(tcache, false); - if (tcache_size <= SMALL_MAXCLASS) { - arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache); - arena_t *arena = chunk->arena; - size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >> - LG_PAGE; - arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); - - arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm); - } else if (tcache_size <= tcache_maxclass) { - arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache); - arena_t *arena = chunk->arena; - - arena_dalloc_large(arena, chunk, tcache); - } else - idalloct(tcache, false); + arena_prof_accum(tsd_tsdn(tsd), tcache->arena, + tcache->prof_accumbytes)) { + prof_idump(tsd_tsdn(tsd)); + } } void -tcache_thread_cleanup(void *arg) -{ - tcache_t *tcache = *(tcache_t **)arg; +tcache_flush(tsd_t *tsd) { + assert(tcache_available(tsd)); + tcache_flush_cache(tsd, tsd_tcachep_get(tsd)); +} - if (tcache == TCACHE_STATE_DISABLED) { - /* Do nothing. */ - } else if (tcache == TCACHE_STATE_REINCARNATED) { - /* - * Another destructor called an allocator function after this - * destructor was called. Reset tcache to - * TCACHE_STATE_PURGATORY in order to receive another callback. - */ - tcache = TCACHE_STATE_PURGATORY; - tcache_tsd_set(&tcache); - } else if (tcache == TCACHE_STATE_PURGATORY) { - /* - * The previous time this destructor was called, we set the key - * to TCACHE_STATE_PURGATORY so that other destructors wouldn't - * cause re-creation of the tcache. This time, do nothing, so - * that the destructor will not be called again. - */ - } else if (tcache != NULL) { - assert(tcache != TCACHE_STATE_PURGATORY); - tcache_destroy(tcache); - tcache = TCACHE_STATE_PURGATORY; - tcache_tsd_set(&tcache); +static void +tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) { + tcache_flush_cache(tsd, tcache); + tcache_arena_dissociate(tsd_tsdn(tsd), tcache); + + if (tsd_tcache) { + /* Release the avail array for the TSD embedded auto tcache. */ + void *avail_array = + (void *)((uintptr_t)tcache_small_bin_get(tcache, 0)->avail - + (uintptr_t)tcache_bin_info[0].ncached_max * sizeof(void *)); + idalloctm(tsd_tsdn(tsd), avail_array, NULL, NULL, true, true); + } else { + /* Release both the tcache struct and avail array. */ + idalloctm(tsd_tsdn(tsd), tcache, NULL, NULL, true, true); } } -/* Caller must own arena->lock. */ +/* For auto tcache (embedded in TSD) only. */ void -tcache_stats_merge(tcache_t *tcache, arena_t *arena) -{ +tcache_cleanup(tsd_t *tsd) { + tcache_t *tcache = tsd_tcachep_get(tsd); + if (!tcache_available(tsd)) { + assert(tsd_tcache_enabled_get(tsd) == false); + if (config_debug) { + assert(tcache_small_bin_get(tcache, 0)->avail == NULL); + } + return; + } + assert(tsd_tcache_enabled_get(tsd)); + assert(tcache_small_bin_get(tcache, 0)->avail != NULL); + + tcache_destroy(tsd, tcache, true); + if (config_debug) { + tcache_small_bin_get(tcache, 0)->avail = NULL; + } +} + +void +tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { unsigned i; cassert(config_stats); @@ -410,48 +525,151 @@ tcache_stats_merge(tcache_t *tcache, arena_t *arena) /* Merge and reset tcache stats. */ for (i = 0; i < NBINS; i++) { arena_bin_t *bin = &arena->bins[i]; - tcache_bin_t *tbin = &tcache->tbins[i]; - malloc_mutex_lock(&bin->lock); + tcache_bin_t *tbin = tcache_small_bin_get(tcache, i); + malloc_mutex_lock(tsdn, &bin->lock); bin->stats.nrequests += tbin->tstats.nrequests; - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); tbin->tstats.nrequests = 0; } for (; i < nhbins; i++) { - malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS]; - tcache_bin_t *tbin = &tcache->tbins[i]; - arena->stats.nrequests_large += tbin->tstats.nrequests; - lstats->nrequests += tbin->tstats.nrequests; + tcache_bin_t *tbin = tcache_large_bin_get(tcache, i); + arena_stats_large_nrequests_add(tsdn, &arena->stats, i, + tbin->tstats.nrequests); tbin->tstats.nrequests = 0; } } +static bool +tcaches_create_prep(tsd_t *tsd) { + bool err; + + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); + + if (tcaches == NULL) { + tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *) + * (MALLOCX_TCACHE_MAX+1), CACHELINE); + if (tcaches == NULL) { + err = true; + goto label_return; + } + } + + if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) { + err = true; + goto label_return; + } + + err = false; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); + return err; +} + bool -tcache_boot0(void) -{ - unsigned i; +tcaches_create(tsd_t *tsd, unsigned *r_ind) { + witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0); - /* - * If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is - * known. - */ - if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS) + bool err; + + if (tcaches_create_prep(tsd)) { + err = true; + goto label_return; + } + + tcache_t *tcache = tcache_create_explicit(tsd); + if (tcache == NULL) { + err = true; + goto label_return; + } + + tcaches_t *elm; + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); + if (tcaches_avail != NULL) { + elm = tcaches_avail; + tcaches_avail = tcaches_avail->next; + elm->tcache = tcache; + *r_ind = (unsigned)(elm - tcaches); + } else { + elm = &tcaches[tcaches_past]; + elm->tcache = tcache; + *r_ind = tcaches_past; + tcaches_past++; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); + + err = false; +label_return: + witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0); + return err; +} + +static tcache_t * +tcaches_elm_remove(tsd_t *tsd, tcaches_t *elm) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx); + + if (elm->tcache == NULL) { + return NULL; + } + tcache_t *tcache = elm->tcache; + elm->tcache = NULL; + return tcache; +} + +void +tcaches_flush(tsd_t *tsd, unsigned ind) { + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); + tcache_t *tcache = tcaches_elm_remove(tsd, &tcaches[ind]); + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); + if (tcache != NULL) { + tcache_destroy(tsd, tcache, false); + } +} + +void +tcaches_destroy(tsd_t *tsd, unsigned ind) { + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); + tcaches_t *elm = &tcaches[ind]; + tcache_t *tcache = tcaches_elm_remove(tsd, elm); + elm->next = tcaches_avail; + tcaches_avail = elm; + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); + if (tcache != NULL) { + tcache_destroy(tsd, tcache, false); + } +} + +bool +tcache_boot(tsdn_t *tsdn) { + /* If necessary, clamp opt_lg_tcache_max. */ + if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < + SMALL_MAXCLASS) { tcache_maxclass = SMALL_MAXCLASS; - else if ((1U << opt_lg_tcache_max) > arena_maxclass) - tcache_maxclass = arena_maxclass; - else - tcache_maxclass = (1U << opt_lg_tcache_max); + } else { + tcache_maxclass = (ZU(1) << opt_lg_tcache_max); + } - nhbins = NBINS + (tcache_maxclass >> LG_PAGE); + if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES, + malloc_mutex_rank_exclusive)) { + return true; + } + + nhbins = sz_size2index(tcache_maxclass) + 1; /* Initialize tcache_bin_info. */ - tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins * - sizeof(tcache_bin_info_t)); - if (tcache_bin_info == NULL) - return (true); + tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins + * sizeof(tcache_bin_info_t), CACHELINE); + if (tcache_bin_info == NULL) { + return true; + } stack_nelms = 0; + unsigned i; for (i = 0; i < NBINS; i++) { - if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) { + if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { + tcache_bin_info[i].ncached_max = + TCACHE_NSLOTS_SMALL_MIN; + } else if ((arena_bin_info[i].nregs << 1) <= + TCACHE_NSLOTS_SMALL_MAX) { tcache_bin_info[i].ncached_max = (arena_bin_info[i].nregs << 1); } else { @@ -465,15 +683,26 @@ tcache_boot0(void) stack_nelms += tcache_bin_info[i].ncached_max; } - return (false); + return false; } -bool -tcache_boot1(void) -{ +void +tcache_prefork(tsdn_t *tsdn) { + if (!config_prof && opt_tcache) { + malloc_mutex_prefork(tsdn, &tcaches_mtx); + } +} - if (tcache_tsd_boot() || tcache_enabled_tsd_boot()) - return (true); +void +tcache_postfork_parent(tsdn_t *tsdn) { + if (!config_prof && opt_tcache) { + malloc_mutex_postfork_parent(tsdn, &tcaches_mtx); + } +} - return (false); +void +tcache_postfork_child(tsdn_t *tsdn) { + if (!config_prof && opt_tcache) { + malloc_mutex_postfork_child(tsdn, &tcaches_mtx); + } } diff --git a/deps/jemalloc/src/tsd.c b/deps/jemalloc/src/tsd.c index 700caabfe4..f968992f2b 100644 --- a/deps/jemalloc/src/tsd.c +++ b/deps/jemalloc/src/tsd.c @@ -1,5 +1,10 @@ -#define JEMALLOC_TSD_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_TSD_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree.h" /******************************************************************************/ /* Data. */ @@ -7,28 +12,148 @@ static unsigned ncleanups; static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; +#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP +__thread tsd_t JEMALLOC_TLS_MODEL tsd_tls = TSD_INITIALIZER; +__thread bool JEMALLOC_TLS_MODEL tsd_initialized = false; +bool tsd_booted = false; +#elif (defined(JEMALLOC_TLS)) +__thread tsd_t JEMALLOC_TLS_MODEL tsd_tls = TSD_INITIALIZER; +pthread_key_t tsd_tsd; +bool tsd_booted = false; +#elif (defined(_WIN32)) +DWORD tsd_tsd; +tsd_wrapper_t tsd_boot_wrapper = {false, TSD_INITIALIZER}; +bool tsd_booted = false; +#else + +/* + * This contains a mutex, but it's pretty convenient to allow the mutex code to + * have a dependency on tsd. So we define the struct here, and only refer to it + * by pointer in the header. + */ +struct tsd_init_head_s { + ql_head(tsd_init_block_t) blocks; + malloc_mutex_t lock; +}; + +pthread_key_t tsd_tsd; +tsd_init_head_t tsd_init_head = { + ql_head_initializer(blocks), + MALLOC_MUTEX_INITIALIZER +}; +tsd_wrapper_t tsd_boot_wrapper = { + false, + TSD_INITIALIZER +}; +bool tsd_booted = false; +#endif + + /******************************************************************************/ -void * -malloc_tsd_malloc(size_t size) -{ +void +tsd_slow_update(tsd_t *tsd) { + if (tsd_nominal(tsd)) { + if (malloc_slow || !tsd_tcache_enabled_get(tsd) || + tsd_reentrancy_level_get(tsd) > 0) { + tsd->state = tsd_state_nominal_slow; + } else { + tsd->state = tsd_state_nominal; + } + } +} + +static bool +tsd_data_init(tsd_t *tsd) { + /* + * We initialize the rtree context first (before the tcache), since the + * tcache initialization depends on it. + */ + rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd)); - /* Avoid choose_arena() in order to dodge bootstrapping issues. */ - return (arena_malloc(arenas[0], size, false, false)); + return tsd_tcache_enabled_data_init(tsd); } -void -malloc_tsd_dalloc(void *wrapper) -{ +static void +assert_tsd_data_cleanup_done(tsd_t *tsd) { + assert(!tsd_nominal(tsd)); + assert(*tsd_arenap_get_unsafe(tsd) == NULL); + assert(*tsd_iarenap_get_unsafe(tsd) == NULL); + assert(*tsd_arenas_tdata_bypassp_get_unsafe(tsd) == true); + assert(*tsd_arenas_tdatap_get_unsafe(tsd) == NULL); + assert(*tsd_tcache_enabledp_get_unsafe(tsd) == false); + assert(*tsd_prof_tdatap_get_unsafe(tsd) == NULL); +} + +static bool +tsd_data_init_nocleanup(tsd_t *tsd) { + assert(tsd->state == tsd_state_reincarnated || + tsd->state == tsd_state_minimal_initialized); + /* + * During reincarnation, there is no guarantee that the cleanup function + * will be called (deallocation may happen after all tsd destructors). + * We set up tsd in a way that no cleanup is needed. + */ + rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd)); + *tsd_arenas_tdata_bypassp_get(tsd) = true; + *tsd_tcache_enabledp_get_unsafe(tsd) = false; + *tsd_reentrancy_levelp_get(tsd) = 1; + assert_tsd_data_cleanup_done(tsd); - idalloct(wrapper, false); + return false; } -void -malloc_tsd_no_cleanup(void *arg) -{ +tsd_t * +tsd_fetch_slow(tsd_t *tsd, bool minimal) { + assert(!tsd_fast(tsd)); + + if (tsd->state == tsd_state_nominal_slow) { + /* On slow path but no work needed. */ + assert(malloc_slow || !tsd_tcache_enabled_get(tsd) || + tsd_reentrancy_level_get(tsd) > 0 || + *tsd_arenas_tdata_bypassp_get(tsd)); + } else if (tsd->state == tsd_state_uninitialized) { + if (!minimal) { + tsd->state = tsd_state_nominal; + tsd_slow_update(tsd); + /* Trigger cleanup handler registration. */ + tsd_set(tsd); + tsd_data_init(tsd); + } else { + tsd->state = tsd_state_minimal_initialized; + tsd_set(tsd); + tsd_data_init_nocleanup(tsd); + } + } else if (tsd->state == tsd_state_minimal_initialized) { + if (!minimal) { + /* Switch to fully initialized. */ + tsd->state = tsd_state_nominal; + assert(*tsd_reentrancy_levelp_get(tsd) >= 1); + (*tsd_reentrancy_levelp_get(tsd))--; + tsd_slow_update(tsd); + tsd_data_init(tsd); + } else { + assert_tsd_data_cleanup_done(tsd); + } + } else if (tsd->state == tsd_state_purgatory) { + tsd->state = tsd_state_reincarnated; + tsd_set(tsd); + tsd_data_init_nocleanup(tsd); + } else { + assert(tsd->state == tsd_state_reincarnated); + } + + return tsd; +} + +void * +malloc_tsd_malloc(size_t size) { + return a0malloc(CACHELINE_CEILING(size)); +} - not_reached(); +void +malloc_tsd_dalloc(void *wrapper) { + a0dalloc(wrapper); } #if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) @@ -36,21 +161,22 @@ malloc_tsd_no_cleanup(void *arg) JEMALLOC_EXPORT #endif void -_malloc_thread_cleanup(void) -{ +_malloc_thread_cleanup(void) { bool pending[MALLOC_TSD_CLEANUPS_MAX], again; unsigned i; - for (i = 0; i < ncleanups; i++) + for (i = 0; i < ncleanups; i++) { pending[i] = true; + } do { again = false; for (i = 0; i < ncleanups; i++) { if (pending[i]) { pending[i] = cleanups[i](); - if (pending[i]) + if (pending[i]) { again = true; + } } } } while (again); @@ -58,26 +184,92 @@ _malloc_thread_cleanup(void) #endif void -malloc_tsd_cleanup_register(bool (*f)(void)) -{ - +malloc_tsd_cleanup_register(bool (*f)(void)) { assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX); cleanups[ncleanups] = f; ncleanups++; } +static void +tsd_do_data_cleanup(tsd_t *tsd) { + prof_tdata_cleanup(tsd); + iarena_cleanup(tsd); + arena_cleanup(tsd); + arenas_tdata_cleanup(tsd); + tcache_cleanup(tsd); + witnesses_cleanup(tsd_witness_tsdp_get_unsafe(tsd)); +} + void -malloc_tsd_boot(void) -{ +tsd_cleanup(void *arg) { + tsd_t *tsd = (tsd_t *)arg; + + switch (tsd->state) { + case tsd_state_uninitialized: + /* Do nothing. */ + break; + case tsd_state_minimal_initialized: + /* This implies the thread only did free() in its life time. */ + /* Fall through. */ + case tsd_state_reincarnated: + /* + * Reincarnated means another destructor deallocated memory + * after the destructor was called. Cleanup isn't required but + * is still called for testing and completeness. + */ + assert_tsd_data_cleanup_done(tsd); + /* Fall through. */ + case tsd_state_nominal: + case tsd_state_nominal_slow: + tsd_do_data_cleanup(tsd); + tsd->state = tsd_state_purgatory; + tsd_set(tsd); + break; + case tsd_state_purgatory: + /* + * The previous time this destructor was called, we set the + * state to tsd_state_purgatory so that other destructors + * wouldn't cause re-creation of the tsd. This time, do + * nothing, and do not request another callback. + */ + break; + default: + not_reached(); + } +#ifdef JEMALLOC_JET + test_callback_t test_callback = *tsd_test_callbackp_get_unsafe(tsd); + int *data = tsd_test_datap_get_unsafe(tsd); + if (test_callback != NULL) { + test_callback(data); + } +#endif +} + +tsd_t * +malloc_tsd_boot0(void) { + tsd_t *tsd; ncleanups = 0; + if (tsd_boot0()) { + return NULL; + } + tsd = tsd_fetch(); + *tsd_arenas_tdata_bypassp_get(tsd) = true; + return tsd; +} + +void +malloc_tsd_boot1(void) { + tsd_boot1(); + tsd_t *tsd = tsd_fetch(); + /* malloc_slow has been set properly. Update tsd_slow. */ + tsd_slow_update(tsd); + *tsd_arenas_tdata_bypassp_get(tsd) = false; } #ifdef _WIN32 static BOOL WINAPI -_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) -{ - +_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) { switch (fdwReason) { #ifdef JEMALLOC_LAZY_LOCK case DLL_THREAD_ATTACH: @@ -90,52 +282,60 @@ _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) default: break; } - return (true); + return true; } +/* + * We need to be able to say "read" here (in the "pragma section"), but have + * hooked "read". We won't read for the rest of the file, so we can get away + * with unhooking. + */ +#ifdef read +# undef read +#endif + #ifdef _MSC_VER # ifdef _M_IX86 # pragma comment(linker, "/INCLUDE:__tls_used") +# pragma comment(linker, "/INCLUDE:_tls_callback") # else # pragma comment(linker, "/INCLUDE:_tls_used") +# pragma comment(linker, "/INCLUDE:tls_callback") # endif # pragma section(".CRT$XLY",long,read) #endif JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used) -static const BOOL (WINAPI *tls_callback)(HINSTANCE hinstDLL, +BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) = _tls_callback; #endif #if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ !defined(_WIN32)) void * -tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) -{ +tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) { pthread_t self = pthread_self(); tsd_init_block_t *iter; /* Check whether this thread has already inserted into the list. */ - malloc_mutex_lock(&head->lock); + malloc_mutex_lock(TSDN_NULL, &head->lock); ql_foreach(iter, &head->blocks, link) { if (iter->thread == self) { - malloc_mutex_unlock(&head->lock); - return (iter->data); + malloc_mutex_unlock(TSDN_NULL, &head->lock); + return iter->data; } } /* Insert block into list. */ ql_elm_new(block, link); block->thread = self; ql_tail_insert(&head->blocks, block, link); - malloc_mutex_unlock(&head->lock); - return (NULL); + malloc_mutex_unlock(TSDN_NULL, &head->lock); + return NULL; } void -tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) -{ - - malloc_mutex_lock(&head->lock); +tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) { + malloc_mutex_lock(TSDN_NULL, &head->lock); ql_remove(&head->blocks, block, link); - malloc_mutex_unlock(&head->lock); + malloc_mutex_unlock(TSDN_NULL, &head->lock); } #endif diff --git a/deps/jemalloc/src/zone.c b/deps/jemalloc/src/zone.c index e0302ef4ed..9d3b7b4952 100644 --- a/deps/jemalloc/src/zone.c +++ b/deps/jemalloc/src/zone.c @@ -1,10 +1,83 @@ -#include "jemalloc/internal/jemalloc_internal.h" +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" + #ifndef JEMALLOC_ZONE # error "This source file is for zones on Darwin (OS X)." #endif +/* Definitions of the following structs in malloc/malloc.h might be too old + * for the built binary to run on newer versions of OSX. So use the newest + * possible version of those structs. + */ +typedef struct _malloc_zone_t { + void *reserved1; + void *reserved2; + size_t (*size)(struct _malloc_zone_t *, const void *); + void *(*malloc)(struct _malloc_zone_t *, size_t); + void *(*calloc)(struct _malloc_zone_t *, size_t, size_t); + void *(*valloc)(struct _malloc_zone_t *, size_t); + void (*free)(struct _malloc_zone_t *, void *); + void *(*realloc)(struct _malloc_zone_t *, void *, size_t); + void (*destroy)(struct _malloc_zone_t *); + const char *zone_name; + unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned); + void (*batch_free)(struct _malloc_zone_t *, void **, unsigned); + struct malloc_introspection_t *introspect; + unsigned version; + void *(*memalign)(struct _malloc_zone_t *, size_t, size_t); + void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t); + size_t (*pressure_relief)(struct _malloc_zone_t *, size_t); +} malloc_zone_t; + +typedef struct { + vm_address_t address; + vm_size_t size; +} vm_range_t; + +typedef struct malloc_statistics_t { + unsigned blocks_in_use; + size_t size_in_use; + size_t max_size_in_use; + size_t size_allocated; +} malloc_statistics_t; + +typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **); + +typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned); + +typedef struct malloc_introspection_t { + kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t); + size_t (*good_size)(malloc_zone_t *, size_t); + boolean_t (*check)(malloc_zone_t *); + void (*print)(malloc_zone_t *, boolean_t); + void (*log)(malloc_zone_t *, void *); + void (*force_lock)(malloc_zone_t *); + void (*force_unlock)(malloc_zone_t *); + void (*statistics)(malloc_zone_t *, malloc_statistics_t *); + boolean_t (*zone_locked)(malloc_zone_t *); + boolean_t (*enable_discharge_checking)(malloc_zone_t *); + boolean_t (*disable_discharge_checking)(malloc_zone_t *); + void (*discharge)(malloc_zone_t *, void *); +#ifdef __BLOCKS__ + void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *)); +#else + void *enumerate_unavailable_without_blocks; +#endif + void (*reinit_lock)(malloc_zone_t *); +} malloc_introspection_t; + +extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *); + +extern malloc_zone_t *malloc_default_zone(void); + +extern void malloc_zone_register(malloc_zone_t *zone); + +extern void malloc_zone_unregister(malloc_zone_t *zone); + /* - * The malloc_default_purgeable_zone function is only available on >= 10.6. + * The malloc_default_purgeable_zone() function is only available on >= 10.6. * We need to check whether it is present at runtime, thus the weak_import. */ extern malloc_zone_t *malloc_default_purgeable_zone(void) @@ -13,30 +86,42 @@ JEMALLOC_ATTR(weak_import); /******************************************************************************/ /* Data. */ -static malloc_zone_t zone; -static struct malloc_introspection_t zone_introspect; +static malloc_zone_t *default_zone, *purgeable_zone; +static malloc_zone_t jemalloc_zone; +static struct malloc_introspection_t jemalloc_zone_introspect; /******************************************************************************/ /* Function prototypes for non-inline static functions. */ -static size_t zone_size(malloc_zone_t *zone, void *ptr); +static size_t zone_size(malloc_zone_t *zone, const void *ptr); static void *zone_malloc(malloc_zone_t *zone, size_t size); static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size); static void *zone_valloc(malloc_zone_t *zone, size_t size); static void zone_free(malloc_zone_t *zone, void *ptr); static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size); -#if (JEMALLOC_ZONE_VERSION >= 5) static void *zone_memalign(malloc_zone_t *zone, size_t alignment, -#endif -#if (JEMALLOC_ZONE_VERSION >= 6) size_t size); static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size); -#endif -static void *zone_destroy(malloc_zone_t *zone); +static void zone_destroy(malloc_zone_t *zone); +static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, + void **results, unsigned num_requested); +static void zone_batch_free(struct _malloc_zone_t *zone, + void **to_be_freed, unsigned num_to_be_freed); +static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal); static size_t zone_good_size(malloc_zone_t *zone, size_t size); +static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask, + vm_address_t zone_address, memory_reader_t reader, + vm_range_recorder_t recorder); +static boolean_t zone_check(malloc_zone_t *zone); +static void zone_print(malloc_zone_t *zone, boolean_t verbose); +static void zone_log(malloc_zone_t *zone, void *address); static void zone_force_lock(malloc_zone_t *zone); static void zone_force_unlock(malloc_zone_t *zone); +static void zone_statistics(malloc_zone_t *zone, + malloc_statistics_t *stats); +static boolean_t zone_locked(malloc_zone_t *zone); +static void zone_reinit_lock(malloc_zone_t *zone); /******************************************************************************/ /* @@ -44,9 +129,7 @@ static void zone_force_unlock(malloc_zone_t *zone); */ static size_t -zone_size(malloc_zone_t *zone, void *ptr) -{ - +zone_size(malloc_zone_t *zone, const void *ptr) { /* * There appear to be places within Darwin (such as setenv(3)) that * cause calls to this function with pointers that *no* zone owns. If @@ -54,40 +137,33 @@ zone_size(malloc_zone_t *zone, void *ptr) * our zone into two parts, and use one as the default allocator and * the other as the default deallocator/reallocator. Since that will * not work in practice, we must check all pointers to assure that they - * reside within a mapped chunk before determining size. + * reside within a mapped extent before determining size. */ - return (ivsalloc(ptr, config_prof)); + return ivsalloc(tsdn_fetch(), ptr); } static void * -zone_malloc(malloc_zone_t *zone, size_t size) -{ - - return (je_malloc(size)); +zone_malloc(malloc_zone_t *zone, size_t size) { + return je_malloc(size); } static void * -zone_calloc(malloc_zone_t *zone, size_t num, size_t size) -{ - - return (je_calloc(num, size)); +zone_calloc(malloc_zone_t *zone, size_t num, size_t size) { + return je_calloc(num, size); } static void * -zone_valloc(malloc_zone_t *zone, size_t size) -{ +zone_valloc(malloc_zone_t *zone, size_t size) { void *ret = NULL; /* Assignment avoids useless compiler warning. */ je_posix_memalign(&ret, PAGE, size); - return (ret); + return ret; } static void -zone_free(malloc_zone_t *zone, void *ptr) -{ - - if (ivsalloc(ptr, config_prof) != 0) { +zone_free(malloc_zone_t *zone, void *ptr) { + if (ivsalloc(tsdn_fetch(), ptr) != 0) { je_free(ptr); return; } @@ -96,163 +172,280 @@ zone_free(malloc_zone_t *zone, void *ptr) } static void * -zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) -{ - - if (ivsalloc(ptr, config_prof) != 0) - return (je_realloc(ptr, size)); +zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) { + if (ivsalloc(tsdn_fetch(), ptr) != 0) { + return je_realloc(ptr, size); + } - return (realloc(ptr, size)); + return realloc(ptr, size); } -#if (JEMALLOC_ZONE_VERSION >= 5) static void * -zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) -{ +zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) { void *ret = NULL; /* Assignment avoids useless compiler warning. */ je_posix_memalign(&ret, alignment, size); - return (ret); + return ret; } -#endif -#if (JEMALLOC_ZONE_VERSION >= 6) static void -zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) -{ +zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) { + size_t alloc_size; - if (ivsalloc(ptr, config_prof) != 0) { - assert(ivsalloc(ptr, config_prof) == size); + alloc_size = ivsalloc(tsdn_fetch(), ptr); + if (alloc_size != 0) { + assert(alloc_size == size); je_free(ptr); return; } free(ptr); } -#endif - -static void * -zone_destroy(malloc_zone_t *zone) -{ +static void +zone_destroy(malloc_zone_t *zone) { /* This function should never be called. */ not_reached(); - return (NULL); +} + +static unsigned +zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results, + unsigned num_requested) { + unsigned i; + + for (i = 0; i < num_requested; i++) { + results[i] = je_malloc(size); + if (!results[i]) + break; + } + + return i; +} + +static void +zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed, + unsigned num_to_be_freed) { + unsigned i; + + for (i = 0; i < num_to_be_freed; i++) { + zone_free(zone, to_be_freed[i]); + to_be_freed[i] = NULL; + } } static size_t -zone_good_size(malloc_zone_t *zone, size_t size) -{ +zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) { + return 0; +} - if (size == 0) +static size_t +zone_good_size(malloc_zone_t *zone, size_t size) { + if (size == 0) { size = 1; - return (s2u(size)); + } + return sz_s2u(size); +} + +static kern_return_t +zone_enumerator(task_t task, void *data, unsigned type_mask, + vm_address_t zone_address, memory_reader_t reader, + vm_range_recorder_t recorder) { + return KERN_SUCCESS; +} + +static boolean_t +zone_check(malloc_zone_t *zone) { + return true; +} + +static void +zone_print(malloc_zone_t *zone, boolean_t verbose) { } static void -zone_force_lock(malloc_zone_t *zone) -{ +zone_log(malloc_zone_t *zone, void *address) { +} - if (isthreaded) +static void +zone_force_lock(malloc_zone_t *zone) { + if (isthreaded) { jemalloc_prefork(); + } +} + +static void +zone_force_unlock(malloc_zone_t *zone) { + /* + * Call jemalloc_postfork_child() rather than + * jemalloc_postfork_parent(), because this function is executed by both + * parent and child. The parent can tolerate having state + * reinitialized, but the child cannot unlock mutexes that were locked + * by the parent. + */ + if (isthreaded) { + jemalloc_postfork_child(); + } +} + +static void +zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) { + /* We make no effort to actually fill the values */ + stats->blocks_in_use = 0; + stats->size_in_use = 0; + stats->max_size_in_use = 0; + stats->size_allocated = 0; +} + +static boolean_t +zone_locked(malloc_zone_t *zone) { + /* Pretend no lock is being held */ + return false; +} + +static void +zone_reinit_lock(malloc_zone_t *zone) { + /* As of OSX 10.12, this function is only used when force_unlock would + * be used if the zone version were < 9. So just use force_unlock. */ + zone_force_unlock(zone); +} + +static void +zone_init(void) { + jemalloc_zone.size = zone_size; + jemalloc_zone.malloc = zone_malloc; + jemalloc_zone.calloc = zone_calloc; + jemalloc_zone.valloc = zone_valloc; + jemalloc_zone.free = zone_free; + jemalloc_zone.realloc = zone_realloc; + jemalloc_zone.destroy = zone_destroy; + jemalloc_zone.zone_name = "jemalloc_zone"; + jemalloc_zone.batch_malloc = zone_batch_malloc; + jemalloc_zone.batch_free = zone_batch_free; + jemalloc_zone.introspect = &jemalloc_zone_introspect; + jemalloc_zone.version = 9; + jemalloc_zone.memalign = zone_memalign; + jemalloc_zone.free_definite_size = zone_free_definite_size; + jemalloc_zone.pressure_relief = zone_pressure_relief; + + jemalloc_zone_introspect.enumerator = zone_enumerator; + jemalloc_zone_introspect.good_size = zone_good_size; + jemalloc_zone_introspect.check = zone_check; + jemalloc_zone_introspect.print = zone_print; + jemalloc_zone_introspect.log = zone_log; + jemalloc_zone_introspect.force_lock = zone_force_lock; + jemalloc_zone_introspect.force_unlock = zone_force_unlock; + jemalloc_zone_introspect.statistics = zone_statistics; + jemalloc_zone_introspect.zone_locked = zone_locked; + jemalloc_zone_introspect.enable_discharge_checking = NULL; + jemalloc_zone_introspect.disable_discharge_checking = NULL; + jemalloc_zone_introspect.discharge = NULL; +#ifdef __BLOCKS__ + jemalloc_zone_introspect.enumerate_discharged_pointers = NULL; +#else + jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL; +#endif + jemalloc_zone_introspect.reinit_lock = zone_reinit_lock; +} + +static malloc_zone_t * +zone_default_get(void) { + malloc_zone_t **zones = NULL; + unsigned int num_zones = 0; + + /* + * On OSX 10.12, malloc_default_zone returns a special zone that is not + * present in the list of registered zones. That zone uses a "lite zone" + * if one is present (apparently enabled when malloc stack logging is + * enabled), or the first registered zone otherwise. In practice this + * means unless malloc stack logging is enabled, the first registered + * zone is the default. So get the list of zones to get the first one, + * instead of relying on malloc_default_zone. + */ + if (KERN_SUCCESS != malloc_get_all_zones(0, NULL, + (vm_address_t**)&zones, &num_zones)) { + /* + * Reset the value in case the failure happened after it was + * set. + */ + num_zones = 0; + } + + if (num_zones) { + return zones[0]; + } + + return malloc_default_zone(); } +/* As written, this function can only promote jemalloc_zone. */ static void -zone_force_unlock(malloc_zone_t *zone) -{ +zone_promote(void) { + malloc_zone_t *zone; + + do { + /* + * Unregister and reregister the default zone. On OSX >= 10.6, + * unregistering takes the last registered zone and places it + * at the location of the specified zone. Unregistering the + * default zone thus makes the last registered one the default. + * On OSX < 10.6, unregistering shifts all registered zones. + * The first registered zone then becomes the default. + */ + malloc_zone_unregister(default_zone); + malloc_zone_register(default_zone); - if (isthreaded) - jemalloc_postfork_parent(); + /* + * On OSX 10.6, having the default purgeable zone appear before + * the default zone makes some things crash because it thinks it + * owns the default zone allocated pointers. We thus + * unregister/re-register it in order to ensure it's always + * after the default zone. On OSX < 10.6, there is no purgeable + * zone, so this does nothing. On OSX >= 10.6, unregistering + * replaces the purgeable zone with the last registered zone + * above, i.e. the default zone. Registering it again then puts + * it at the end, obviously after the default zone. + */ + if (purgeable_zone != NULL) { + malloc_zone_unregister(purgeable_zone); + malloc_zone_register(purgeable_zone); + } + + zone = zone_default_get(); + } while (zone != &jemalloc_zone); } JEMALLOC_ATTR(constructor) void -register_zone(void) -{ - +zone_register(void) { /* * If something else replaced the system default zone allocator, don't * register jemalloc's. */ - malloc_zone_t *default_zone = malloc_default_zone(); - if (!default_zone->zone_name || - strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) { + default_zone = zone_default_get(); + if (!default_zone->zone_name || strcmp(default_zone->zone_name, + "DefaultMallocZone") != 0) { return; } - zone.size = (void *)zone_size; - zone.malloc = (void *)zone_malloc; - zone.calloc = (void *)zone_calloc; - zone.valloc = (void *)zone_valloc; - zone.free = (void *)zone_free; - zone.realloc = (void *)zone_realloc; - zone.destroy = (void *)zone_destroy; - zone.zone_name = "jemalloc_zone"; - zone.batch_malloc = NULL; - zone.batch_free = NULL; - zone.introspect = &zone_introspect; - zone.version = JEMALLOC_ZONE_VERSION; -#if (JEMALLOC_ZONE_VERSION >= 5) - zone.memalign = zone_memalign; -#endif -#if (JEMALLOC_ZONE_VERSION >= 6) - zone.free_definite_size = zone_free_definite_size; -#endif -#if (JEMALLOC_ZONE_VERSION >= 8) - zone.pressure_relief = NULL; -#endif - - zone_introspect.enumerator = NULL; - zone_introspect.good_size = (void *)zone_good_size; - zone_introspect.check = NULL; - zone_introspect.print = NULL; - zone_introspect.log = NULL; - zone_introspect.force_lock = (void *)zone_force_lock; - zone_introspect.force_unlock = (void *)zone_force_unlock; - zone_introspect.statistics = NULL; -#if (JEMALLOC_ZONE_VERSION >= 6) - zone_introspect.zone_locked = NULL; -#endif -#if (JEMALLOC_ZONE_VERSION >= 7) - zone_introspect.enable_discharge_checking = NULL; - zone_introspect.disable_discharge_checking = NULL; - zone_introspect.discharge = NULL; -#ifdef __BLOCKS__ - zone_introspect.enumerate_discharged_pointers = NULL; -#else - zone_introspect.enumerate_unavailable_without_blocks = NULL; -#endif -#endif - /* * The default purgeable zone is created lazily by OSX's libc. It uses * the default zone when it is created for "small" allocations * (< 15 KiB), but assumes the default zone is a scalable_zone. This * obviously fails when the default zone is the jemalloc zone, so - * malloc_default_purgeable_zone is called beforehand so that the + * malloc_default_purgeable_zone() is called beforehand so that the * default purgeable zone is created when the default zone is still * a scalable_zone. As purgeable zones only exist on >= 10.6, we need * to check for the existence of malloc_default_purgeable_zone() at * run time. */ - if (malloc_default_purgeable_zone != NULL) - malloc_default_purgeable_zone(); + purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL : + malloc_default_purgeable_zone(); /* Register the custom zone. At this point it won't be the default. */ - malloc_zone_register(&zone); + zone_init(); + malloc_zone_register(&jemalloc_zone); - /* - * Unregister and reregister the default zone. On OSX >= 10.6, - * unregistering takes the last registered zone and places it at the - * location of the specified zone. Unregistering the default zone thus - * makes the last registered one the default. On OSX < 10.6, - * unregistering shifts all registered zones. The first registered zone - * then becomes the default. - */ - do { - default_zone = malloc_default_zone(); - malloc_zone_unregister(default_zone); - malloc_zone_register(default_zone); - } while (malloc_default_zone() != &zone); + /* Promote the custom zone to be default. */ + zone_promote(); } diff --git a/deps/recastnavigation/CMakeLists.txt b/deps/recastnavigation/CMakeLists.txt index 9a4e725c52..dd28884eb1 100644 --- a/deps/recastnavigation/CMakeLists.txt +++ b/deps/recastnavigation/CMakeLists.txt @@ -1,5 +1,5 @@ -# -# Copyright (C) +# Copyright (C) 2008-2016 TrinityCore <http://www.trinitycore.org/> +# Copyright (C) 2005-2011 MaNGOS project <http://getmangos.com/> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by diff --git a/deps/recastnavigation/Detour/CMakeLists.txt b/deps/recastnavigation/Detour/CMakeLists.txt index 233d123434..be271211b3 100644 --- a/deps/recastnavigation/Detour/CMakeLists.txt +++ b/deps/recastnavigation/Detour/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (C) +# Copyright (C) 2008-2016 TrinityCore <http://www.trinitycore.org/> # # This file is free software; as a special exception the author gives # unlimited permission to copy and/or distribute it, with or without @@ -9,12 +9,12 @@ # implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. set(Detour_STAT_SRCS - DetourAlloc.cpp - DetourCommon.cpp - DetourNavMesh.cpp - DetourNavMeshBuilder.cpp - DetourNavMeshQuery.cpp - DetourNode.cpp + Source/DetourAlloc.cpp + Source/DetourCommon.cpp + Source/DetourNavMesh.cpp + Source/DetourNavMeshBuilder.cpp + Source/DetourNavMeshQuery.cpp + Source/DetourNode.cpp ) if(WIN32) @@ -25,4 +25,15 @@ endif() add_library(Detour STATIC ${Detour_STAT_SRCS}) -target_link_libraries(Detour ${ZLIB_LIBRARIES}) +target_include_directories(Detour + PUBLIC + ${CMAKE_CURRENT_SOURCE_DIR}/Include) + +target_link_libraries(Detour + PUBLIC + zlib) + +set_target_properties(Detour + PROPERTIES + FOLDER + "dep") diff --git a/deps/recastnavigation/Detour/DetourAlloc.cpp b/deps/recastnavigation/Detour/DetourAlloc.cpp deleted file mode 100644 index 5f671df5bd..0000000000 --- a/deps/recastnavigation/Detour/DetourAlloc.cpp +++ /dev/null @@ -1,50 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#include <stdlib.h> -#include "DetourAlloc.h" - -static void *dtAllocDefault(int size, dtAllocHint) -{ - return malloc(size); -} - -static void dtFreeDefault(void *ptr) -{ - free(ptr); -} - -static dtAllocFunc* sAllocFunc = dtAllocDefault; -static dtFreeFunc* sFreeFunc = dtFreeDefault; - -void dtAllocSetCustom(dtAllocFunc *allocFunc, dtFreeFunc *freeFunc) -{ - sAllocFunc = allocFunc ? allocFunc : dtAllocDefault; - sFreeFunc = freeFunc ? freeFunc : dtFreeDefault; -} - -void* dtAlloc(int size, dtAllocHint hint) -{ - return sAllocFunc(size, hint); -} - -void dtFree(void* ptr) -{ - if (ptr) - sFreeFunc(ptr); -} diff --git a/deps/recastnavigation/Detour/DetourAlloc.h b/deps/recastnavigation/Detour/DetourAlloc.h deleted file mode 100644 index 8693475419..0000000000 --- a/deps/recastnavigation/Detour/DetourAlloc.h +++ /dev/null @@ -1,36 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#ifndef DETOURALLOCATOR_H -#define DETOURALLOCATOR_H - -enum dtAllocHint -{ - DT_ALLOC_PERM, // Memory persist after a function call. - DT_ALLOC_TEMP // Memory used temporarily within a function. -}; - -typedef void* (dtAllocFunc)(int size, dtAllocHint hint); -typedef void (dtFreeFunc)(void* ptr); - -void dtAllocSetCustom(dtAllocFunc *allocFunc, dtFreeFunc *freeFunc); - -void* dtAlloc(int size, dtAllocHint hint); -void dtFree(void* ptr); - -#endif diff --git a/deps/recastnavigation/Detour/DetourAssert.h b/deps/recastnavigation/Detour/DetourAssert.h deleted file mode 100644 index 3cf652288f..0000000000 --- a/deps/recastnavigation/Detour/DetourAssert.h +++ /dev/null @@ -1,33 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#ifndef DETOURASSERT_H -#define DETOURASSERT_H - -// Note: This header file's only purpose is to include define assert. -// Feel free to change the file and include your own implementation instead. - -#ifdef NDEBUG -// From http://cnicholson.net/2009/02/stupid-c-tricks-adventures-in-assert/ -# define dtAssert(x) do { (void)sizeof(x); } while((void)(__LINE__==-1),false) -#else -# include <assert.h> -# define dtAssert assert -#endif - -#endif // DETOURASSERT_H diff --git a/deps/recastnavigation/Detour/DetourCommon.cpp b/deps/recastnavigation/Detour/DetourCommon.cpp deleted file mode 100644 index c0b973e4a7..0000000000 --- a/deps/recastnavigation/Detour/DetourCommon.cpp +++ /dev/null @@ -1,329 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#include <math.h> -#include "DetourCommon.h" - -////////////////////////////////////////////////////////////////////////////////////////// - -float dtSqrt(float x) -{ - return sqrtf(x); -} - -void dtClosestPtPointTriangle(float* closest, const float* p, - const float* a, const float* b, const float* c) -{ - // Check if P in vertex region outside A - float ab[3], ac[3], ap[3]; - dtVsub(ab, b, a); - dtVsub(ac, c, a); - dtVsub(ap, p, a); - float d1 = dtVdot(ab, ap); - float d2 = dtVdot(ac, ap); - if (d1 <= 0.0f && d2 <= 0.0f) - { - // barycentric coordinates (1,0,0) - dtVcopy(closest, a); - return; - } - - // Check if P in vertex region outside B - float bp[3]; - dtVsub(bp, p, b); - float d3 = dtVdot(ab, bp); - float d4 = dtVdot(ac, bp); - if (d3 >= 0.0f && d4 <= d3) - { - // barycentric coordinates (0,1,0) - dtVcopy(closest, b); - return; - } - - // Check if P in edge region of AB, if so return projection of P onto AB - float vc = d1*d4 - d3*d2; - if (vc <= 0.0f && d1 >= 0.0f && d3 <= 0.0f) - { - // barycentric coordinates (1-v,v,0) - float v = d1 / (d1 - d3); - closest[0] = a[0] + v * ab[0]; - closest[1] = a[1] + v * ab[1]; - closest[2] = a[2] + v * ab[2]; - return; - } - - // Check if P in vertex region outside C - float cp[3]; - dtVsub(cp, p, c); - float d5 = dtVdot(ab, cp); - float d6 = dtVdot(ac, cp); - if (d6 >= 0.0f && d5 <= d6) - { - // barycentric coordinates (0,0,1) - dtVcopy(closest, c); - return; - } - - // Check if P in edge region of AC, if so return projection of P onto AC - float vb = d5*d2 - d1*d6; - if (vb <= 0.0f && d2 >= 0.0f && d6 <= 0.0f) - { - // barycentric coordinates (1-w,0,w) - float w = d2 / (d2 - d6); - closest[0] = a[0] + w * ac[0]; - closest[1] = a[1] + w * ac[1]; - closest[2] = a[2] + w * ac[2]; - return; - } - - // Check if P in edge region of BC, if so return projection of P onto BC - float va = d3*d6 - d5*d4; - if (va <= 0.0f && (d4 - d3) >= 0.0f && (d5 - d6) >= 0.0f) - { - // barycentric coordinates (0,1-w,w) - float w = (d4 - d3) / ((d4 - d3) + (d5 - d6)); - closest[0] = b[0] + w * (c[0] - b[0]); - closest[1] = b[1] + w * (c[1] - b[1]); - closest[2] = b[2] + w * (c[2] - b[2]); - return; - } - - // P inside face region. Compute Q through its barycentric coordinates (u,v,w) - float denom = 1.0f / (va + vb + vc); - float v = vb * denom; - float w = vc * denom; - closest[0] = a[0] + ab[0] * v + ac[0] * w; - closest[1] = a[1] + ab[1] * v + ac[1] * w; - closest[2] = a[2] + ab[2] * v + ac[2] * w; -} - -bool dtIntersectSegmentPoly2D(const float* p0, const float* p1, - const float* verts, int nverts, - float& tmin, float& tmax, - int& segMin, int& segMax) -{ - static const float EPS = 0.00000001f; - - tmin = 0; - tmax = 1; - segMin = -1; - segMax = -1; - - float dir[3]; - dtVsub(dir, p1, p0); - - for (int i = 0, j = nverts-1; i < nverts; j=i++) - { - float edge[3], diff[3]; - dtVsub(edge, &verts[i*3], &verts[j*3]); - dtVsub(diff, p0, &verts[j*3]); - const float n = dtVperp2D(edge, diff); - const float d = dtVperp2D(dir, edge); - if (fabsf(d) < EPS) - { - // S is nearly parallel to this edge - if (n < 0) - return false; - else - continue; - } - const float t = n / d; - if (d < 0) - { - // segment S is entering across this edge - if (t > tmin) - { - tmin = t; - segMin = j; - // S enters after leaving polygon - if (tmin > tmax) - return false; - } - } - else - { - // segment S is leaving across this edge - if (t < tmax) - { - tmax = t; - segMax = j; - // S leaves before entering polygon - if (tmax < tmin) - return false; - } - } - } - - return true; -} - -float dtDistancePtSegSqr2D(const float* pt, const float* p, const float* q, float& t) -{ - float pqx = q[0] - p[0]; - float pqz = q[2] - p[2]; - float dx = pt[0] - p[0]; - float dz = pt[2] - p[2]; - float d = pqx*pqx + pqz*pqz; - t = pqx*dx + pqz*dz; - if (d > 0) t /= d; - if (t < 0) t = 0; - else if (t > 1) t = 1; - dx = p[0] + t*pqx - pt[0]; - dz = p[2] + t*pqz - pt[2]; - return dx*dx + dz*dz; -} - -void dtCalcPolyCenter(float* tc, const unsigned short* idx, int nidx, const float* verts) -{ - tc[0] = 0.0f; - tc[1] = 0.0f; - tc[2] = 0.0f; - for (int j = 0; j < nidx; ++j) - { - const float* v = &verts[idx[j]*3]; - tc[0] += v[0]; - tc[1] += v[1]; - tc[2] += v[2]; - } - const float s = 1.0f / nidx; - tc[0] *= s; - tc[1] *= s; - tc[2] *= s; -} - -bool dtClosestHeightPointTriangle(const float* p, const float* a, const float* b, const float* c, float& h) -{ - float v0[3], v1[3], v2[3]; - dtVsub(v0, c,a); - dtVsub(v1, b,a); - dtVsub(v2, p,a); - - const float dot00 = dtVdot2D(v0, v0); - const float dot01 = dtVdot2D(v0, v1); - const float dot02 = dtVdot2D(v0, v2); - const float dot11 = dtVdot2D(v1, v1); - const float dot12 = dtVdot2D(v1, v2); - - // Compute barycentric coordinates - const float invDenom = 1.0f / (dot00 * dot11 - dot01 * dot01); - const float u = (dot11 * dot02 - dot01 * dot12) * invDenom; - const float v = (dot00 * dot12 - dot01 * dot02) * invDenom; - - // The (sloppy) epsilon is needed to allow to get height of points which - // are interpolated along the edges of the triangles. - static const float EPS = 1e-4f; - - // If point lies inside the triangle, return interpolated ycoord. - if (u >= -EPS && v >= -EPS && (u+v) <= 1+EPS) - { - h = a[1] + v0[1]*u + v1[1]*v; - return true; - } - - return false; -} - -bool dtPointInPolygon(const float* pt, const float* verts, const int nverts) -{ - // TODO: Replace pnpoly with triArea2D tests? - int i, j; - bool c = false; - for (i = 0, j = nverts-1; i < nverts; j = i++) - { - const float* vi = &verts[i*3]; - const float* vj = &verts[j*3]; - if (((vi[2] > pt[2]) != (vj[2] > pt[2])) && - (pt[0] < (vj[0]-vi[0]) * (pt[2]-vi[2]) / (vj[2]-vi[2]) + vi[0]) ) - c = !c; - } - return c; -} - -bool dtDistancePtPolyEdgesSqr(const float* pt, const float* verts, const int nverts, - float* ed, float* et) -{ - // TODO: Replace pnpoly with triArea2D tests? - int i, j; - bool c = false; - for (i = 0, j = nverts-1; i < nverts; j = i++) - { - const float* vi = &verts[i*3]; - const float* vj = &verts[j*3]; - if (((vi[2] > pt[2]) != (vj[2] > pt[2])) && - (pt[0] < (vj[0]-vi[0]) * (pt[2]-vi[2]) / (vj[2]-vi[2]) + vi[0]) ) - c = !c; - ed[j] = dtDistancePtSegSqr2D(pt, vj, vi, et[j]); - } - return c; -} - -static void projectPoly(const float* axis, const float* poly, const int npoly, - float& rmin, float& rmax) -{ - rmin = rmax = dtVdot2D(axis, &poly[0]); - for (int i = 1; i < npoly; ++i) - { - const float d = dtVdot2D(axis, &poly[i*3]); - rmin = dtMin(rmin, d); - rmax = dtMax(rmax, d); - } -} - -inline bool overlapRange(const float amin, const float amax, - const float bmin, const float bmax, - const float eps) -{ - return ((amin+eps) > bmax || (amax-eps) < bmin) ? false : true; -} - -bool dtOverlapPolyPoly2D(const float* polya, const int npolya, - const float* polyb, const int npolyb) -{ - const float eps = 1e-4f; - - for (int i = 0, j = npolya-1; i < npolya; j=i++) - { - const float* va = &polya[j*3]; - const float* vb = &polya[i*3]; - const float n[3] = { vb[2]-va[2], 0, -(vb[0]-va[0]) }; - float amin,amax,bmin,bmax; - projectPoly(n, polya, npolya, amin,amax); - projectPoly(n, polyb, npolyb, bmin,bmax); - if (!overlapRange(amin,amax, bmin,bmax, eps)) - { - // Found separating axis - return false; - } - } - for (int i = 0, j = npolyb-1; i < npolyb; j=i++) - { - const float* va = &polyb[j*3]; - const float* vb = &polyb[i*3]; - const float n[3] = { vb[2]-va[2], 0, -(vb[0]-va[0]) }; - float amin,amax,bmin,bmax; - projectPoly(n, polya, npolya, amin,amax); - projectPoly(n, polyb, npolyb, bmin,bmax); - if (!overlapRange(amin,amax, bmin,bmax, eps)) - { - // Found separating axis - return false; - } - } - return true; -} - diff --git a/deps/recastnavigation/Detour/DetourCommon.h b/deps/recastnavigation/Detour/DetourCommon.h deleted file mode 100644 index 3cee3f6351..0000000000 --- a/deps/recastnavigation/Detour/DetourCommon.h +++ /dev/null @@ -1,248 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#ifndef DETOURCOMMON_H -#define DETOURCOMMON_H - -template<class T> inline void dtSwap(T& a, T& b) { T t = a; a = b; b = t; } -template<class T> inline T dtMin(T a, T b) { return a < b ? a : b; } -template<class T> inline T dtMax(T a, T b) { return a > b ? a : b; } -template<class T> inline T dtAbs(T a) { return a < 0 ? -a : a; } -template<class T> inline T dtSqr(T a) { return a*a; } -template<class T> inline T dtClamp(T v, T mn, T mx) { return v < mn ? mn : (v > mx ? mx : v); } - -float dtSqrt(float x); - -inline void dtVcross(float* dest, const float* v1, const float* v2) -{ - dest[0] = v1[1]*v2[2] - v1[2]*v2[1]; - dest[1] = v1[2]*v2[0] - v1[0]*v2[2]; - dest[2] = v1[0]*v2[1] - v1[1]*v2[0]; -} - -inline float dtVdot(const float* v1, const float* v2) -{ - return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2]; -} - -inline void dtVmad(float* dest, const float* v1, const float* v2, const float s) -{ - dest[0] = v1[0]+v2[0]*s; - dest[1] = v1[1]+v2[1]*s; - dest[2] = v1[2]+v2[2]*s; -} - -inline void dtVlerp(float* dest, const float* v1, const float* v2, const float t) -{ - dest[0] = v1[0]+(v2[0]-v1[0])*t; - dest[1] = v1[1]+(v2[1]-v1[1])*t; - dest[2] = v1[2]+(v2[2]-v1[2])*t; -} - -inline void dtVadd(float* dest, const float* v1, const float* v2) -{ - dest[0] = v1[0]+v2[0]; - dest[1] = v1[1]+v2[1]; - dest[2] = v1[2]+v2[2]; -} - -inline void dtVsub(float* dest, const float* v1, const float* v2) -{ - dest[0] = v1[0]-v2[0]; - dest[1] = v1[1]-v2[1]; - dest[2] = v1[2]-v2[2]; -} - -inline void dtVscale(float* dest, const float* v, const float t) -{ - dest[0] = v[0]*t; - dest[1] = v[1]*t; - dest[2] = v[2]*t; -} - -inline void dtVmin(float* mn, const float* v) -{ - mn[0] = dtMin(mn[0], v[0]); - mn[1] = dtMin(mn[1], v[1]); - mn[2] = dtMin(mn[2], v[2]); -} - -inline void dtVmax(float* mx, const float* v) -{ - mx[0] = dtMax(mx[0], v[0]); - mx[1] = dtMax(mx[1], v[1]); - mx[2] = dtMax(mx[2], v[2]); -} - -inline void dtVset(float* dest, const float x, const float y, const float z) -{ - dest[0] = x; dest[1] = y; dest[2] = z; -} - -inline void dtVcopy(float* dest, const float* a) -{ - dest[0] = a[0]; - dest[1] = a[1]; - dest[2] = a[2]; -} - -inline float dtVlen(const float* v) -{ - return dtSqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2]); -} - -inline float dtVlenSqr(const float* v) -{ - return v[0]*v[0] + v[1]*v[1] + v[2]*v[2]; -} - -inline float dtVdist(const float* v1, const float* v2) -{ - const float dx = v2[0] - v1[0]; - const float dy = v2[1] - v1[1]; - const float dz = v2[2] - v1[2]; - return dtSqrt(dx*dx + dy*dy + dz*dz); -} - -inline float dtVdistSqr(const float* v1, const float* v2) -{ - const float dx = v2[0] - v1[0]; - const float dy = v2[1] - v1[1]; - const float dz = v2[2] - v1[2]; - return dx*dx + dy*dy + dz*dz; -} - -inline float dtVdist2D(const float* v1, const float* v2) -{ - const float dx = v2[0] - v1[0]; - const float dz = v2[2] - v1[2]; - return dtSqrt(dx*dx + dz*dz); -} - -inline float dtVdist2DSqr(const float* v1, const float* v2) -{ - const float dx = v2[0] - v1[0]; - const float dz = v2[2] - v1[2]; - return dx*dx + dz*dz; -} - -inline void dtVnormalize(float* v) -{ - float d = 1.0f / dtSqrt(dtSqr(v[0]) + dtSqr(v[1]) + dtSqr(v[2])); - v[0] *= d; - v[1] *= d; - v[2] *= d; -} - -inline bool dtVequal(const float* p0, const float* p1) -{ - static const float thr = dtSqr(1.0f/16384.0f); - const float d = dtVdistSqr(p0, p1); - return d < thr; -} - -inline unsigned int dtNextPow2(unsigned int v) -{ - v--; - v |= v >> 1; - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - v++; - return v; -} - -inline unsigned int dtIlog2(unsigned int v) -{ - unsigned int r; - unsigned int shift; - r = (v > 0xffff) << 4; v >>= r; - shift = (v > 0xff) << 3; v >>= shift; r |= shift; - shift = (v > 0xf) << 2; v >>= shift; r |= shift; - shift = (v > 0x3) << 1; v >>= shift; r |= shift; - r |= (v >> 1); - return r; -} - -inline int dtAlign4(int x) { return (x+3) & ~3; } - -inline int dtOppositeTile(int side) { return (side+4) & 0x7; } - -inline float dtVdot2D(const float* u, const float* v) -{ - return u[0]*v[0] + u[2]*v[2]; -} - -inline float dtVperp2D(const float* u, const float* v) -{ - return u[2]*v[0] - u[0]*v[2]; -} - -inline float dtTriArea2D(const float* a, const float* b, const float* c) -{ - const float abx = b[0] - a[0]; - const float abz = b[2] - a[2]; - const float acx = c[0] - a[0]; - const float acz = c[2] - a[2]; - return acx*abz - abx*acz; -} - -inline bool dtOverlapQuantBounds(const unsigned short amin[3], const unsigned short amax[3], - const unsigned short bmin[3], const unsigned short bmax[3]) -{ - bool overlap = true; - overlap = (amin[0] > bmax[0] || amax[0] < bmin[0]) ? false : overlap; - overlap = (amin[1] > bmax[1] || amax[1] < bmin[1]) ? false : overlap; - overlap = (amin[2] > bmax[2] || amax[2] < bmin[2]) ? false : overlap; - return overlap; -} - -inline bool dtOverlapBounds(const float* amin, const float* amax, - const float* bmin, const float* bmax) -{ - bool overlap = true; - overlap = (amin[0] > bmax[0] || amax[0] < bmin[0]) ? false : overlap; - overlap = (amin[1] > bmax[1] || amax[1] < bmin[1]) ? false : overlap; - overlap = (amin[2] > bmax[2] || amax[2] < bmin[2]) ? false : overlap; - return overlap; -} - -void dtClosestPtPointTriangle(float* closest, const float* p, - const float* a, const float* b, const float* c); - -bool dtClosestHeightPointTriangle(const float* p, const float* a, const float* b, const float* c, float& h); - -bool dtIntersectSegmentPoly2D(const float* p0, const float* p1, - const float* verts, int nverts, - float& tmin, float& tmax, - int& segMin, int& segMax); - -bool dtPointInPolygon(const float* pt, const float* verts, const int nverts); - -bool dtDistancePtPolyEdgesSqr(const float* pt, const float* verts, const int nverts, - float* ed, float* et); - -float dtDistancePtSegSqr2D(const float* pt, const float* p, const float* q, float& t); - -void dtCalcPolyCenter(float* tc, const unsigned short* idx, int nidx, const float* verts); - -bool dtOverlapPolyPoly2D(const float* polya, const int npolya, - const float* polyb, const int npolyb); - -#endif // DETOURCOMMON_H diff --git a/deps/recastnavigation/Detour/DetourNavMesh.cpp b/deps/recastnavigation/Detour/DetourNavMesh.cpp deleted file mode 100644 index 95af28797d..0000000000 --- a/deps/recastnavigation/Detour/DetourNavMesh.cpp +++ /dev/null @@ -1,1239 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#include <math.h> -#include <float.h> -#include <string.h> -#include <stdio.h> -#include "DetourNavMesh.h" -#include "DetourNode.h" -#include "DetourCommon.h" -#include "DetourAlloc.h" -#include "DetourAssert.h" -#include <new> - - -inline bool overlapSlabs(const float* amin, const float* amax, - const float* bmin, const float* bmax, - const float px, const float py) -{ - // Check for horizontal overlap. - // The segment is shrunken a little so that slabs which touch - // at end points are not connected. - const float minx = dtMax(amin[0]+px,bmin[0]+px); - const float maxx = dtMin(amax[0]-px,bmax[0]-px); - if (minx > maxx) - return false; - - // Check vertical overlap. - const float ad = (amax[1]-amin[1]) / (amax[0]-amin[0]); - const float ak = amin[1] - ad*amin[0]; - const float bd = (bmax[1]-bmin[1]) / (bmax[0]-bmin[0]); - const float bk = bmin[1] - bd*bmin[0]; - const float aminy = ad*minx + ak; - const float amaxy = ad*maxx + ak; - const float bminy = bd*minx + bk; - const float bmaxy = bd*maxx + bk; - const float dmin = bminy - aminy; - const float dmax = bmaxy - amaxy; - - // Crossing segments always overlap. - if (dmin*dmax < 0) - return true; - - // Check for overlap at endpoints. - const float thr = dtSqr(py*2); - if (dmin*dmin <= thr || dmax*dmax <= thr) - return true; - - return false; -} - -static void calcSlabEndPoints(const float* va, const float* vb, float* bmin, float* bmax, const int side) -{ - if (side == 0 || side == 4) - { - if (va[2] < vb[2]) - { - bmin[0] = va[2]; - bmin[1] = va[1]; - bmax[0] = vb[2]; - bmax[1] = vb[1]; - } - else - { - bmin[0] = vb[2]; - bmin[1] = vb[1]; - bmax[0] = va[2]; - bmax[1] = va[1]; - } - } - else if (side == 2 || side == 6) - { - if (va[0] < vb[0]) - { - bmin[0] = va[0]; - bmin[1] = va[1]; - bmax[0] = vb[0]; - bmax[1] = vb[1]; - } - else - { - bmin[0] = vb[0]; - bmin[1] = vb[1]; - bmax[0] = va[0]; - bmax[1] = va[1]; - } - } -} - -inline int computeTileHash(int x, int y, const int mask) -{ - const unsigned int h1 = 0x8da6b343; // Large multiplicative constants; - const unsigned int h2 = 0xd8163841; // here arbitrarily chosen primes - unsigned int n = h1 * x + h2 * y; - return (int)(n & mask); -} - -inline unsigned int allocLink(dtMeshTile* tile) -{ - if (tile->linksFreeList == DT_NULL_LINK) - return DT_NULL_LINK; - unsigned int link = tile->linksFreeList; - tile->linksFreeList = tile->links[link].next; - return link; -} - -inline void freeLink(dtMeshTile* tile, unsigned int link) -{ - tile->links[link].next = tile->linksFreeList; - tile->linksFreeList = link; -} - - -dtNavMesh* dtAllocNavMesh() -{ - void* mem = dtAlloc(sizeof(dtNavMesh), DT_ALLOC_PERM); - if (!mem) return 0; - return new(mem) dtNavMesh; -} - -void dtFreeNavMesh(dtNavMesh* navmesh) -{ - if (!navmesh) return; - navmesh->~dtNavMesh(); - dtFree(navmesh); -} - -////////////////////////////////////////////////////////////////////////////////////////// -dtNavMesh::dtNavMesh() : - m_tileWidth(0), - m_tileHeight(0), - m_maxTiles(0), - m_tileLutSize(0), - m_tileLutMask(0), - m_posLookup(0), - m_nextFree(0), - m_tiles(0), - m_saltBits(0), - m_tileBits(0), - m_polyBits(0) -{ - memset(&m_params, 0, sizeof(dtNavMeshParams)); - m_orig[0] = 0; - m_orig[1] = 0; - m_orig[2] = 0; -} - -dtNavMesh::~dtNavMesh() -{ - for (int i = 0; i < m_maxTiles; ++i) - { - if (m_tiles[i].flags & DT_TILE_FREE_DATA) - { - dtFree(m_tiles[i].data); - m_tiles[i].data = 0; - m_tiles[i].dataSize = 0; - } - } - dtFree(m_posLookup); - dtFree(m_tiles); -} - -dtStatus dtNavMesh::init(const dtNavMeshParams* params) -{ - memcpy(&m_params, params, sizeof(dtNavMeshParams)); - dtVcopy(m_orig, params->orig); - m_tileWidth = params->tileWidth; - m_tileHeight = params->tileHeight; - - // Init tiles - m_maxTiles = params->maxTiles; - m_tileLutSize = dtNextPow2(params->maxTiles/4); - if (!m_tileLutSize) m_tileLutSize = 1; - m_tileLutMask = m_tileLutSize-1; - - m_tiles = (dtMeshTile*)dtAlloc(sizeof(dtMeshTile)*m_maxTiles, DT_ALLOC_PERM); - if (!m_tiles) - return DT_FAILURE; - m_posLookup = (dtMeshTile**)dtAlloc(sizeof(dtMeshTile*)*m_tileLutSize, DT_ALLOC_PERM); - if (!m_posLookup) - return DT_FAILURE; - memset(m_tiles, 0, sizeof(dtMeshTile)*m_maxTiles); - memset(m_posLookup, 0, sizeof(dtMeshTile*)*m_tileLutSize); - m_nextFree = 0; - for (int i = m_maxTiles-1; i >= 0; --i) - { - m_tiles[i].salt = 1; - m_tiles[i].next = m_nextFree; - m_nextFree = &m_tiles[i]; - } - - // Init ID generator values. - m_tileBits = STATIC_TILE_BITS; //dtIlog2(dtNextPow2((unsigned int)params->maxTiles)); - m_polyBits = STATIC_POLY_BITS; //dtIlog2(dtNextPow2((unsigned int)params->maxPolys)); - m_saltBits = STATIC_SALT_BITS; //sizeof(dtPolyRef)*8 - m_tileBits - m_polyBits; - //if (m_saltBits < SALT_MIN_BITS) - //return DT_FAILURE; - - return DT_SUCCESS; -} - -dtStatus dtNavMesh::init(unsigned char* data, const int dataSize, const int flags) -{ - // Make sure the data is in right format. - dtMeshHeader* header = (dtMeshHeader*)data; - if (header->magic != DT_NAVMESH_MAGIC) - return DT_FAILURE; - if (header->version != DT_NAVMESH_VERSION) - return DT_FAILURE; - - dtNavMeshParams params; - dtVcopy(params.orig, header->bmin); - params.tileWidth = header->bmax[0] - header->bmin[0]; - params.tileHeight = header->bmax[2] - header->bmin[2]; - params.maxTiles = 1; - params.maxPolys = header->polyCount; - - dtStatus res = init(¶ms); - if (res != DT_SUCCESS) - return res; - - return addTile(data, dataSize, flags, 0, 0); -} - -const dtNavMeshParams* dtNavMesh::getParams() const -{ - return &m_params; -} - -////////////////////////////////////////////////////////////////////////////////////////// -int dtNavMesh::findConnectingPolys(const float* va, const float* vb, - const dtMeshTile* tile, int side, - dtPolyRef* con, float* conarea, int maxcon) const -{ - if (!tile) return 0; - - float amin[2], amax[2]; - calcSlabEndPoints(va,vb, amin,amax, side); - - // Remove links pointing to 'side' and compact the links array. - float bmin[2], bmax[2]; - unsigned short m = DT_EXT_LINK | (unsigned short)side; - int n = 0; - - dtPolyRef base = getPolyRefBase(tile); - - for (int i = 0; i < tile->header->polyCount; ++i) - { - dtPoly* poly = &tile->polys[i]; - const int nv = poly->vertCount; - for (int j = 0; j < nv; ++j) - { - // Skip edges which do not point to the right side. - if (poly->neis[j] != m) continue; - // Check if the segments touch. - const float* vc = &tile->verts[poly->verts[j]*3]; - const float* vd = &tile->verts[poly->verts[(j+1) % nv]*3]; - calcSlabEndPoints(vc,vd, bmin,bmax, side); - - if (!overlapSlabs(amin,amax, bmin,bmax, 0.01f, tile->header->walkableClimb)) continue; - - // Add return value. - if (n < maxcon) - { - conarea[n*2+0] = dtMax(amin[0], bmin[0]); - conarea[n*2+1] = dtMin(amax[0], bmax[0]); - con[n] = base | (dtPolyRef)i; - n++; - } - break; - } - } - return n; -} - -void dtNavMesh::unconnectExtLinks(dtMeshTile* tile, int side) -{ - if (!tile) return; - - for (int i = 0; i < tile->header->polyCount; ++i) - { - dtPoly* poly = &tile->polys[i]; - unsigned int j = poly->firstLink; - unsigned int pj = DT_NULL_LINK; - while (j != DT_NULL_LINK) - { - if (tile->links[j].side == side) - { - // Revove link. - unsigned int nj = tile->links[j].next; - if (pj == DT_NULL_LINK) - poly->firstLink = nj; - else - tile->links[pj].next = nj; - freeLink(tile, j); - j = nj; - } - else - { - // Advance - pj = j; - j = tile->links[j].next; - } - } - } -} - -void dtNavMesh::connectExtLinks(dtMeshTile* tile, dtMeshTile* target, int side) -{ - if (!tile) return; - - // Connect border links. - for (int i = 0; i < tile->header->polyCount; ++i) - { - dtPoly* poly = &tile->polys[i]; - - // Create new links. - unsigned short m = DT_EXT_LINK | (unsigned short)side; - const int nv = poly->vertCount; - for (int j = 0; j < nv; ++j) - { - // Skip edges which do not point to the right side. - if (poly->neis[j] != m) continue; - - // Create new links - const float* va = &tile->verts[poly->verts[j]*3]; - const float* vb = &tile->verts[poly->verts[(j+1) % nv]*3]; - dtPolyRef nei[4]; - float neia[4*2]; - int nnei = findConnectingPolys(va,vb, target, dtOppositeTile(side), nei,neia,4); - for (int k = 0; k < nnei; ++k) - { - unsigned int idx = allocLink(tile); - if (idx != DT_NULL_LINK) - { - dtLink* link = &tile->links[idx]; - link->ref = nei[k]; - link->edge = (unsigned char)j; - link->side = (unsigned char)side; - - link->next = poly->firstLink; - poly->firstLink = idx; - - // Compress portal limits to a byte value. - if (side == 0 || side == 4) - { - float tmin = (neia[k*2+0]-va[2]) / (vb[2]-va[2]); - float tmax = (neia[k*2+1]-va[2]) / (vb[2]-va[2]); - if (tmin > tmax) - dtSwap(tmin,tmax); - link->bmin = (unsigned char)(dtClamp(tmin, 0.0f, 1.0f)*255.0f); - link->bmax = (unsigned char)(dtClamp(tmax, 0.0f, 1.0f)*255.0f); - } - else if (side == 2 || side == 6) - { - float tmin = (neia[k*2+0]-va[0]) / (vb[0]-va[0]); - float tmax = (neia[k*2+1]-va[0]) / (vb[0]-va[0]); - if (tmin > tmax) - dtSwap(tmin,tmax); - link->bmin = (unsigned char)(dtClamp(tmin, 0.0f, 1.0f)*255.0f); - link->bmax = (unsigned char)(dtClamp(tmax, 0.0f, 1.0f)*255.0f); - } - } - } - } - } -} - -void dtNavMesh::connectExtOffMeshLinks(dtMeshTile* tile, dtMeshTile* target, int side) -{ - if (!tile) return; - - // Connect off-mesh links. - // We are interested on links which land from target tile to this tile. - const unsigned char oppositeSide = (unsigned char)dtOppositeTile(side); - - for (int i = 0; i < target->header->offMeshConCount; ++i) - { - dtOffMeshConnection* targetCon = &target->offMeshCons[i]; - if (targetCon->side != oppositeSide) - continue; - - dtPoly* targetPoly = &target->polys[targetCon->poly]; - - const float ext[3] = { targetCon->rad, target->header->walkableClimb, targetCon->rad }; - - // Find polygon to connect to. - const float* p = &targetCon->pos[3]; - float nearestPt[3]; - dtPolyRef ref = findNearestPolyInTile(tile, p, ext, nearestPt); - if (!ref) continue; - // findNearestPoly may return too optimistic results, further check to make sure. - if (dtSqr(nearestPt[0]-p[0])+dtSqr(nearestPt[2]-p[2]) > dtSqr(targetCon->rad)) - continue; - // Make sure the location is on current mesh. - float* v = &target->verts[targetPoly->verts[1]*3]; - dtVcopy(v, nearestPt); - - // Link off-mesh connection to target poly. - unsigned int idx = allocLink(target); - if (idx != DT_NULL_LINK) - { - dtLink* link = &target->links[idx]; - link->ref = ref; - link->edge = (unsigned char)1; - link->side = oppositeSide; - link->bmin = link->bmax = 0; - // Add to linked list. - link->next = targetPoly->firstLink; - targetPoly->firstLink = idx; - } - - // Link target poly to off-mesh connection. - if (targetCon->flags & DT_OFFMESH_CON_BIDIR) - { - unsigned int idx = allocLink(tile); - if (idx != DT_NULL_LINK) - { - const unsigned short landPolyIdx = (unsigned short)decodePolyIdPoly(ref); - dtPoly* landPoly = &tile->polys[landPolyIdx]; - dtLink* link = &tile->links[idx]; - link->ref = getPolyRefBase(target) | (dtPolyRef)(targetCon->poly); - link->edge = 0xff; - link->side = (unsigned char)side; - link->bmin = link->bmax = 0; - // Add to linked list. - link->next = landPoly->firstLink; - landPoly->firstLink = idx; - } - } - } - -} - -void dtNavMesh::connectIntLinks(dtMeshTile* tile) -{ - if (!tile) return; - - dtPolyRef base = getPolyRefBase(tile); - - for (int i = 0; i < tile->header->polyCount; ++i) - { - dtPoly* poly = &tile->polys[i]; - poly->firstLink = DT_NULL_LINK; - - if (poly->getType() == DT_POLYTYPE_OFFMESH_CONNECTION) - continue; - - // Build edge links backwards so that the links will be - // in the linked list from lowest index to highest. - for (int j = poly->vertCount-1; j >= 0; --j) - { - // Skip hard and non-internal edges. - if (poly->neis[j] == 0 || (poly->neis[j] & DT_EXT_LINK)) continue; - - unsigned int idx = allocLink(tile); - if (idx != DT_NULL_LINK) - { - dtLink* link = &tile->links[idx]; - link->ref = base | (dtPolyRef)(poly->neis[j]-1); - link->edge = (unsigned char)j; - link->side = 0xff; - link->bmin = link->bmax = 0; - // Add to linked list. - link->next = poly->firstLink; - poly->firstLink = idx; - } - } - } -} - -void dtNavMesh::connectIntOffMeshLinks(dtMeshTile* tile) -{ - if (!tile) return; - - dtPolyRef base = getPolyRefBase(tile); - - // Find Off-mesh connection end points. - for (int i = 0; i < tile->header->offMeshConCount; ++i) - { - dtOffMeshConnection* con = &tile->offMeshCons[i]; - dtPoly* poly = &tile->polys[con->poly]; - - const float ext[3] = { con->rad, tile->header->walkableClimb, con->rad }; - - for (int j = 0; j < 2; ++j) - { - unsigned char side = j == 0 ? 0xff : con->side; - - if (side == 0xff) - { - // Find polygon to connect to. - const float* p = &con->pos[j*3]; - float nearestPt[3]; - dtPolyRef ref = findNearestPolyInTile(tile, p, ext, nearestPt); - if (!ref) continue; - // findNearestPoly may return too optimistic results, further check to make sure. - if (dtSqr(nearestPt[0]-p[0])+dtSqr(nearestPt[2]-p[2]) > dtSqr(con->rad)) - continue; - // Make sure the location is on current mesh. - float* v = &tile->verts[poly->verts[j]*3]; - dtVcopy(v, nearestPt); - - // Link off-mesh connection to target poly. - unsigned int idx = allocLink(tile); - if (idx != DT_NULL_LINK) - { - dtLink* link = &tile->links[idx]; - link->ref = ref; - link->edge = (unsigned char)j; - link->side = 0xff; - link->bmin = link->bmax = 0; - // Add to linked list. - link->next = poly->firstLink; - poly->firstLink = idx; - } - - // Start end-point is always connect back to off-mesh connection, - // Destination end-point only if it is bidirectional link. - if (j == 0 || (j == 1 && (con->flags & DT_OFFMESH_CON_BIDIR))) - { - // Link target poly to off-mesh connection. - unsigned int idx = allocLink(tile); - if (idx != DT_NULL_LINK) - { - const unsigned short landPolyIdx = (unsigned short)decodePolyIdPoly(ref); - dtPoly* landPoly = &tile->polys[landPolyIdx]; - dtLink* link = &tile->links[idx]; - link->ref = base | (dtPolyRef)(con->poly); - link->edge = 0xff; - link->side = 0xff; - link->bmin = link->bmax = 0; - // Add to linked list. - link->next = landPoly->firstLink; - landPoly->firstLink = idx; - } - } - - } - } - } -} - -dtStatus dtNavMesh::closestPointOnPolyInTile(const dtMeshTile* tile, unsigned int ip, - const float* pos, float* closest) const -{ - const dtPoly* poly = &tile->polys[ip]; - - float closestDistSqr = FLT_MAX; - const dtPolyDetail* pd = &tile->detailMeshes[ip]; - - for (int j = 0; j < pd->triCount; ++j) - { - const unsigned char* t = &tile->detailTris[(pd->triBase+j)*4]; - const float* v[3]; - for (int k = 0; k < 3; ++k) - { - if (t[k] < poly->vertCount) - v[k] = &tile->verts[poly->verts[t[k]]*3]; - else - v[k] = &tile->detailVerts[(pd->vertBase+(t[k]-poly->vertCount))*3]; - } - float pt[3]; - dtClosestPtPointTriangle(pt, pos, v[0], v[1], v[2]); - float d = dtVdistSqr(pos, pt); - if (d < closestDistSqr) - { - dtVcopy(closest, pt); - closestDistSqr = d; - } - } - - return DT_SUCCESS; -} - -dtPolyRef dtNavMesh::findNearestPolyInTile(const dtMeshTile* tile, - const float* center, const float* extents, - float* nearestPt) const -{ - float bmin[3], bmax[3]; - dtVsub(bmin, center, extents); - dtVadd(bmax, center, extents); - - // Get nearby polygons from proximity grid. - dtPolyRef polys[128]; - int polyCount = queryPolygonsInTile(tile, bmin, bmax, polys, 128); - - // Find nearest polygon amongst the nearby polygons. - dtPolyRef nearest = 0; - float nearestDistanceSqr = FLT_MAX; - for (int i = 0; i < polyCount; ++i) - { - dtPolyRef ref = polys[i]; - float closestPtPoly[3]; - if (closestPointOnPolyInTile(tile, decodePolyIdPoly(ref), center, closestPtPoly) != DT_SUCCESS) - continue; - float d = dtVdistSqr(center, closestPtPoly); - if (d < nearestDistanceSqr) - { - if (nearestPt) - dtVcopy(nearestPt, closestPtPoly); - nearestDistanceSqr = d; - nearest = ref; - } - } - - return nearest; -} - -int dtNavMesh::queryPolygonsInTile(const dtMeshTile* tile, const float* qmin, const float* qmax, - dtPolyRef* polys, const int maxPolys) const -{ - if (tile->bvTree) - { - const dtBVNode* node = &tile->bvTree[0]; - const dtBVNode* end = &tile->bvTree[tile->header->bvNodeCount]; - const float* tbmin = tile->header->bmin; - const float* tbmax = tile->header->bmax; - const float qfac = tile->header->bvQuantFactor; - - // Calculate quantized box - unsigned short bmin[3], bmax[3]; - // dtClamp query box to world box. - float minx = dtClamp(qmin[0], tbmin[0], tbmax[0]) - tbmin[0]; - float miny = dtClamp(qmin[1], tbmin[1], tbmax[1]) - tbmin[1]; - float minz = dtClamp(qmin[2], tbmin[2], tbmax[2]) - tbmin[2]; - float maxx = dtClamp(qmax[0], tbmin[0], tbmax[0]) - tbmin[0]; - float maxy = dtClamp(qmax[1], tbmin[1], tbmax[1]) - tbmin[1]; - float maxz = dtClamp(qmax[2], tbmin[2], tbmax[2]) - tbmin[2]; - // Quantize - bmin[0] = (unsigned short)(qfac * minx) & 0xfffe; - bmin[1] = (unsigned short)(qfac * miny) & 0xfffe; - bmin[2] = (unsigned short)(qfac * minz) & 0xfffe; - bmax[0] = (unsigned short)(qfac * maxx + 1) | 1; - bmax[1] = (unsigned short)(qfac * maxy + 1) | 1; - bmax[2] = (unsigned short)(qfac * maxz + 1) | 1; - - // Traverse tree - dtPolyRef base = getPolyRefBase(tile); - int n = 0; - while (node < end) - { - const bool overlap = dtOverlapQuantBounds(bmin, bmax, node->bmin, node->bmax); - const bool isLeafNode = node->i >= 0; - - if (isLeafNode && overlap) - { - if (n < maxPolys) - polys[n++] = base | (dtPolyRef)node->i; - } - - if (overlap || isLeafNode) - node++; - else - { - const int escapeIndex = -node->i; - node += escapeIndex; - } - } - - return n; - } - else - { - float bmin[3], bmax[3]; - int n = 0; - dtPolyRef base = getPolyRefBase(tile); - for (int i = 0; i < tile->header->polyCount; ++i) - { - dtPoly* p = &tile->polys[i]; - // Do not return off-mesh connection polygons. - if (p->getType() == DT_POLYTYPE_OFFMESH_CONNECTION) - continue; - // Calc polygon bounds. - const float* v = &tile->verts[p->verts[0]*3]; - dtVcopy(bmin, v); - dtVcopy(bmax, v); - for (int j = 1; j < p->vertCount; ++j) - { - v = &tile->verts[p->verts[j]*3]; - dtVmin(bmin, v); - dtVmax(bmax, v); - } - if (dtOverlapBounds(qmin,qmax, bmin,bmax)) - { - if (n < maxPolys) - polys[n++] = base | (dtPolyRef)i; - } - } - return n; - } -} - -dtStatus dtNavMesh::addTile(unsigned char* data, int dataSize, int flags, - dtTileRef lastRef, dtTileRef* result) -{ - // Make sure the data is in right format. - dtMeshHeader* header = (dtMeshHeader*)data; - if (header->magic != DT_NAVMESH_MAGIC) - return DT_FAILURE_DATA_MAGIC; - if (header->version != DT_NAVMESH_VERSION) - return DT_FAILURE_DATA_VERSION; - - // Make sure the location is free. - if (getTileAt(header->x, header->y)) - return DT_FAILURE; - - // Allocate a tile. - dtMeshTile* tile = 0; - if (!lastRef) - { - if (m_nextFree) - { - tile = m_nextFree; - m_nextFree = tile->next; - tile->next = 0; - } - } - else - { - // Try to relocate the tile to specific index with same salt. - int tileIndex = (int)decodePolyIdTile((dtPolyRef)lastRef); - if (tileIndex >= m_maxTiles) - return DT_FAILURE_OUT_OF_MEMORY; - // Try to find the specific tile id from the free list. - dtMeshTile* target = &m_tiles[tileIndex]; - dtMeshTile* prev = 0; - tile = m_nextFree; - while (tile && tile != target) - { - prev = tile; - tile = tile->next; - } - // Could not find the correct location. - if (tile != target) - return DT_FAILURE_OUT_OF_MEMORY; - // Remove from freelist - if (!prev) - m_nextFree = tile->next; - else - prev->next = tile->next; - - // Restore salt. - tile->salt = decodePolyIdSalt((dtPolyRef)lastRef); - } - - // Make sure we could allocate a tile. - if (!tile) - return DT_FAILURE_OUT_OF_MEMORY; - - // Insert tile into the position lut. - int h = computeTileHash(header->x, header->y, m_tileLutMask); - tile->next = m_posLookup[h]; - m_posLookup[h] = tile; - - // Patch header pointers. - const int headerSize = dtAlign4(sizeof(dtMeshHeader)); - const int vertsSize = dtAlign4(sizeof(float)*3*header->vertCount); - const int polysSize = dtAlign4(sizeof(dtPoly)*header->polyCount); - const int linksSize = dtAlign4(sizeof(dtLink)*(header->maxLinkCount)); - const int detailMeshesSize = dtAlign4(sizeof(dtPolyDetail)*header->detailMeshCount); - const int detailVertsSize = dtAlign4(sizeof(float)*3*header->detailVertCount); - const int detailTrisSize = dtAlign4(sizeof(unsigned char)*4*header->detailTriCount); - const int bvtreeSize = dtAlign4(sizeof(dtBVNode)*header->bvNodeCount); - const int offMeshLinksSize = dtAlign4(sizeof(dtOffMeshConnection)*header->offMeshConCount); - - unsigned char* d = data + headerSize; - tile->verts = (float*)d; d += vertsSize; - tile->polys = (dtPoly*)d; d += polysSize; - tile->links = (dtLink*)d; d += linksSize; - tile->detailMeshes = (dtPolyDetail*)d; d += detailMeshesSize; - tile->detailVerts = (float*)d; d += detailVertsSize; - tile->detailTris = (unsigned char*)d; d += detailTrisSize; - tile->bvTree = (dtBVNode*)d; d += bvtreeSize; - tile->offMeshCons = (dtOffMeshConnection*)d; d += offMeshLinksSize; - - // Build links freelist - tile->linksFreeList = 0; - tile->links[header->maxLinkCount-1].next = DT_NULL_LINK; - for (int i = 0; i < header->maxLinkCount-1; ++i) - tile->links[i].next = i+1; - - // Init tile. - tile->header = header; - tile->data = data; - tile->dataSize = dataSize; - tile->flags = flags; - - connectIntLinks(tile); - connectIntOffMeshLinks(tile); - - // Create connections connections. - for (int i = 0; i < 8; ++i) - { - dtMeshTile* nei = getNeighbourTileAt(header->x, header->y, i); - if (nei) - { - connectExtLinks(tile, nei, i); - connectExtLinks(nei, tile, dtOppositeTile(i)); - connectExtOffMeshLinks(tile, nei, i); - connectExtOffMeshLinks(nei, tile, dtOppositeTile(i)); - } - } - - if (result) - *result = getTileRef(tile); - - return DT_SUCCESS; -} - -const dtMeshTile* dtNavMesh::getTileAt(int x, int y) const -{ - // Find tile based on hash. - int h = computeTileHash(x,y,m_tileLutMask); - dtMeshTile* tile = m_posLookup[h]; - while (tile) - { - if (tile->header && tile->header->x == x && tile->header->y == y) - return tile; - tile = tile->next; - } - return 0; -} - -dtMeshTile* dtNavMesh::getNeighbourTileAt(int x, int y, int side) const -{ - switch (side) - { - case 0: x++; break; - case 1: x++; y++; break; - case 2: y++; break; - case 3: x--; y++; break; - case 4: x--; break; - case 5: x--; y--; break; - case 6: y--; break; - case 7: x++; y--; break; - }; - - // Find tile based on hash. - int h = computeTileHash(x,y,m_tileLutMask); - dtMeshTile* tile = m_posLookup[h]; - while (tile) - { - if (tile->header && tile->header->x == x && tile->header->y == y) - return tile; - tile = tile->next; - } - return 0; -} - -dtTileRef dtNavMesh::getTileRefAt(int x, int y) const -{ - // Find tile based on hash. - int h = computeTileHash(x,y,m_tileLutMask); - dtMeshTile* tile = m_posLookup[h]; - while (tile) - { - if (tile->header && tile->header->x == x && tile->header->y == y) - return getTileRef(tile); - tile = tile->next; - } - return 0; -} - -const dtMeshTile* dtNavMesh::getTileByRef(dtTileRef ref) const -{ - if (!ref) - return 0; - unsigned int tileIndex = decodePolyIdTile((dtPolyRef)ref); - unsigned int tileSalt = decodePolyIdSalt((dtPolyRef)ref); - if ((int)tileIndex >= m_maxTiles) - return 0; - const dtMeshTile* tile = &m_tiles[tileIndex]; - if (tile->salt != tileSalt) - return 0; - return tile; -} - -int dtNavMesh::getMaxTiles() const -{ - return m_maxTiles; -} - -dtMeshTile* dtNavMesh::getTile(int i) -{ - return &m_tiles[i]; -} - -const dtMeshTile* dtNavMesh::getTile(int i) const -{ - return &m_tiles[i]; -} - -void dtNavMesh::calcTileLoc(const float* pos, int* tx, int* ty) const -{ - *tx = (int)floorf((pos[0]-m_orig[0]) / m_tileWidth); - *ty = (int)floorf((pos[2]-m_orig[2]) / m_tileHeight); -} - -dtStatus dtNavMesh::getTileAndPolyByRef(const dtPolyRef ref, const dtMeshTile** tile, const dtPoly** poly) const -{ - unsigned int salt, it, ip; - decodePolyId(ref, salt, it, ip); - if (it >= (unsigned int)m_maxTiles) return DT_FAILURE; - if (m_tiles[it].salt != salt || m_tiles[it].header == 0) return DT_FAILURE; - if (ip >= (unsigned int)m_tiles[it].header->polyCount) return DT_FAILURE; - *tile = &m_tiles[it]; - *poly = &m_tiles[it].polys[ip]; - return DT_SUCCESS; -} - -void dtNavMesh::getTileAndPolyByRefUnsafe(const dtPolyRef ref, const dtMeshTile** tile, const dtPoly** poly) const -{ - unsigned int salt, it, ip; - decodePolyId(ref, salt, it, ip); - *tile = &m_tiles[it]; - *poly = &m_tiles[it].polys[ip]; -} - -bool dtNavMesh::isValidPolyRef(dtPolyRef ref) const -{ - unsigned int salt, it, ip; - decodePolyId(ref, salt, it, ip); - if (it >= (unsigned int)m_maxTiles) return false; - if (m_tiles[it].salt != salt || m_tiles[it].header == 0) return false; - if (ip >= (unsigned int)m_tiles[it].header->polyCount) return false; - return true; -} - -dtStatus dtNavMesh::removeTile(dtTileRef ref, unsigned char** data, int* dataSize) -{ - if (!ref) - return DT_FAILURE; - unsigned int tileIndex = decodePolyIdTile((dtPolyRef)ref); - unsigned int tileSalt = decodePolyIdSalt((dtPolyRef)ref); - if ((int)tileIndex >= m_maxTiles) - return DT_FAILURE; - dtMeshTile* tile = &m_tiles[tileIndex]; - if (tile->salt != tileSalt) - return DT_FAILURE; - - // Remove tile from hash lookup. - int h = computeTileHash(tile->header->x,tile->header->y,m_tileLutMask); - dtMeshTile* prev = 0; - dtMeshTile* cur = m_posLookup[h]; - while (cur) - { - if (cur == tile) - { - if (prev) - prev->next = cur->next; - else - m_posLookup[h] = cur->next; - break; - } - prev = cur; - cur = cur->next; - } - - // Remove connections to neighbour tiles. - for (int i = 0; i < 8; ++i) - { - dtMeshTile* nei = getNeighbourTileAt(tile->header->x,tile->header->y,i); - if (!nei) continue; - unconnectExtLinks(nei, dtOppositeTile(i)); - } - - - // Reset tile. - if (tile->flags & DT_TILE_FREE_DATA) - { - // Owns data - dtFree(tile->data); - tile->data = 0; - tile->dataSize = 0; - if (data) *data = 0; - if (dataSize) *dataSize = 0; - } - else - { - if (data) *data = tile->data; - if (dataSize) *dataSize = tile->dataSize; - } - - tile->header = 0; - tile->flags = 0; - tile->linksFreeList = 0; - tile->polys = 0; - tile->verts = 0; - tile->links = 0; - tile->detailMeshes = 0; - tile->detailVerts = 0; - tile->detailTris = 0; - tile->bvTree = 0; - tile->offMeshCons = 0; - - // Update salt, salt should never be zero. - tile->salt = (tile->salt+1) & ((1<<m_saltBits)-1); - if (tile->salt == 0) - tile->salt++; - - // Add to free list. - tile->next = m_nextFree; - m_nextFree = tile; - - return DT_SUCCESS; -} - -dtTileRef dtNavMesh::getTileRef(const dtMeshTile* tile) const -{ - if (!tile) return 0; - const unsigned int it = (unsigned int)(tile - m_tiles); - return (dtTileRef)encodePolyId(tile->salt, it, 0); -} - -dtPolyRef dtNavMesh::getPolyRefBase(const dtMeshTile* tile) const -{ - if (!tile) return 0; - const unsigned int it = (unsigned int)(tile - m_tiles); - return encodePolyId(tile->salt, it, 0); -} - -struct dtTileState -{ - int magic; // Magic number, used to identify the data. - int version; // Data version number. - dtTileRef ref; // Tile ref at the time of storing the data. -}; - -struct dtPolyState -{ - unsigned short flags; // Flags (see dtPolyFlags). - unsigned char area; // Area ID of the polygon. -}; - -int dtNavMesh::getTileStateSize(const dtMeshTile* tile) const -{ - if (!tile) return 0; - const int headerSize = dtAlign4(sizeof(dtTileState)); - const int polyStateSize = dtAlign4(sizeof(dtPolyState) * tile->header->polyCount); - return headerSize + polyStateSize; -} - -dtStatus dtNavMesh::storeTileState(const dtMeshTile* tile, unsigned char* data, const int maxDataSize) const -{ - // Make sure there is enough space to store the state. - const int sizeReq = getTileStateSize(tile); - if (maxDataSize < sizeReq) - return DT_FAILURE; - - dtTileState* tileState = (dtTileState*)data; data += dtAlign4(sizeof(dtTileState)); - dtPolyState* polyStates = (dtPolyState*)data; data += dtAlign4(sizeof(dtPolyState) * tile->header->polyCount); - - // Store tile state. - tileState->magic = DT_NAVMESH_STATE_MAGIC; - tileState->version = DT_NAVMESH_STATE_VERSION; - tileState->ref = getTileRef(tile); - - // Store per poly state. - for (int i = 0; i < tile->header->polyCount; ++i) - { - const dtPoly* p = &tile->polys[i]; - dtPolyState* s = &polyStates[i]; - s->flags = p->flags; - s->area = p->getArea(); - } - - return DT_SUCCESS; -} - -dtStatus dtNavMesh::restoreTileState(dtMeshTile* tile, const unsigned char* data, const int maxDataSize) -{ - // Make sure there is enough space to store the state. - const int sizeReq = getTileStateSize(tile); - if (maxDataSize < sizeReq) - return DT_FAILURE; - - const dtTileState* tileState = (const dtTileState*)data; data += dtAlign4(sizeof(dtTileState)); - const dtPolyState* polyStates = (const dtPolyState*)data; data += dtAlign4(sizeof(dtPolyState) * tile->header->polyCount); - - // Check that the restore is possible. - if (tileState->magic != DT_NAVMESH_STATE_MAGIC) - return DT_FAILURE_DATA_MAGIC; - if (tileState->version != DT_NAVMESH_STATE_VERSION) - return DT_FAILURE_DATA_VERSION; - if (tileState->ref != getTileRef(tile)) - return DT_FAILURE; - - // Restore per poly state. - for (int i = 0; i < tile->header->polyCount; ++i) - { - dtPoly* p = &tile->polys[i]; - const dtPolyState* s = &polyStates[i]; - p->flags = s->flags; - p->setArea(s->area); - } - - return DT_SUCCESS; -} - -// Returns start and end location of an off-mesh link polygon. -dtStatus dtNavMesh::getOffMeshConnectionPolyEndPoints(dtPolyRef prevRef, dtPolyRef polyRef, float* startPos, float* endPos) const -{ - unsigned int salt, it, ip; - - // Get current polygon - decodePolyId(polyRef, salt, it, ip); - if (it >= (unsigned int)m_maxTiles) return DT_FAILURE; - if (m_tiles[it].salt != salt || m_tiles[it].header == 0) return DT_FAILURE; - const dtMeshTile* tile = &m_tiles[it]; - if (ip >= (unsigned int)tile->header->polyCount) return DT_FAILURE; - const dtPoly* poly = &tile->polys[ip]; - - // Make sure that the current poly is indeed off-mesh link. - if (poly->getType() != DT_POLYTYPE_OFFMESH_CONNECTION) - return DT_FAILURE; - - // Figure out which way to hand out the vertices. - int idx0 = 0, idx1 = 1; - - // Find link that points to first vertex. - for (unsigned int i = poly->firstLink; i != DT_NULL_LINK; i = tile->links[i].next) - { - if (tile->links[i].edge == 0) - { - if (tile->links[i].ref != prevRef) - { - idx0 = 1; - idx1 = 0; - } - break; - } - } - - dtVcopy(startPos, &tile->verts[poly->verts[idx0]*3]); - dtVcopy(endPos, &tile->verts[poly->verts[idx1]*3]); - - return DT_SUCCESS; -} - - -const dtOffMeshConnection* dtNavMesh::getOffMeshConnectionByRef(dtPolyRef ref) const -{ - unsigned int salt, it, ip; - - // Get current polygon - decodePolyId(ref, salt, it, ip); - if (it >= (unsigned int)m_maxTiles) return 0; - if (m_tiles[it].salt != salt || m_tiles[it].header == 0) return 0; - const dtMeshTile* tile = &m_tiles[it]; - if (ip >= (unsigned int)tile->header->polyCount) return 0; - const dtPoly* poly = &tile->polys[ip]; - - // Make sure that the current poly is indeed off-mesh link. - if (poly->getType() != DT_POLYTYPE_OFFMESH_CONNECTION) - return 0; - - const unsigned int idx = ip - tile->header->offMeshBase; - dtAssert(idx < (unsigned int)tile->header->offMeshConCount); - return &tile->offMeshCons[idx]; -} - - -dtStatus dtNavMesh::setPolyFlags(dtPolyRef ref, unsigned short flags) -{ - unsigned int salt, it, ip; - decodePolyId(ref, salt, it, ip); - if (it >= (unsigned int)m_maxTiles) return DT_FAILURE; - if (m_tiles[it].salt != salt || m_tiles[it].header == 0) return DT_FAILURE; - dtMeshTile* tile = &m_tiles[it]; - if (ip >= (unsigned int)tile->header->polyCount) return DT_FAILURE; - dtPoly* poly = &tile->polys[ip]; - - // Change flags. - poly->flags = flags; - - return DT_SUCCESS; -} - -dtStatus dtNavMesh::getPolyFlags(dtPolyRef ref, unsigned short* resultFlags) const -{ - unsigned int salt, it, ip; - decodePolyId(ref, salt, it, ip); - if (it >= (unsigned int)m_maxTiles) return DT_FAILURE; - if (m_tiles[it].salt != salt || m_tiles[it].header == 0) return DT_FAILURE; - const dtMeshTile* tile = &m_tiles[it]; - if (ip >= (unsigned int)tile->header->polyCount) return DT_FAILURE; - const dtPoly* poly = &tile->polys[ip]; - - *resultFlags = poly->flags; - - return DT_SUCCESS; -} - -dtStatus dtNavMesh::setPolyArea(dtPolyRef ref, unsigned char area) -{ - unsigned int salt, it, ip; - decodePolyId(ref, salt, it, ip); - if (it >= (unsigned int)m_maxTiles) return DT_FAILURE; - if (m_tiles[it].salt != salt || m_tiles[it].header == 0) return DT_FAILURE; - dtMeshTile* tile = &m_tiles[it]; - if (ip >= (unsigned int)tile->header->polyCount) return DT_FAILURE; - dtPoly* poly = &tile->polys[ip]; - - poly->setArea(area); - - return DT_SUCCESS; -} - -dtStatus dtNavMesh::getPolyArea(dtPolyRef ref, unsigned char* resultArea) const -{ - unsigned int salt, it, ip; - decodePolyId(ref, salt, it, ip); - if (it >= (unsigned int)m_maxTiles) return DT_FAILURE; - if (m_tiles[it].salt != salt || m_tiles[it].header == 0) return DT_FAILURE; - const dtMeshTile* tile = &m_tiles[it]; - if (ip >= (unsigned int)tile->header->polyCount) return DT_FAILURE; - const dtPoly* poly = &tile->polys[ip]; - - *resultArea = poly->getArea(); - - return DT_SUCCESS; -} - diff --git a/deps/recastnavigation/Detour/DetourNavMesh.h b/deps/recastnavigation/Detour/DetourNavMesh.h deleted file mode 100644 index 6f2db04004..0000000000 --- a/deps/recastnavigation/Detour/DetourNavMesh.h +++ /dev/null @@ -1,428 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#ifndef DETOURNAVMESH_H -#define DETOURNAVMESH_H - -#include "DetourAlloc.h" - -#ifdef WIN32 - typedef unsigned __int64 uint64; -#else -#include <stdint.h> -#ifndef uint64_t -#ifdef __linux__ -#include <linux/types.h> -#endif -#endif - typedef uint64_t uint64; -#endif - -// Note: If you want to use 64-bit refs, change the types of both dtPolyRef & dtTileRef. -// It is also recommended to change dtHashRef() to proper 64-bit hash too. - -// Reference to navigation polygon. -typedef uint64 dtPolyRef; - -// Reference to navigation mesh tile. -typedef uint64 dtTileRef; - -// Maximum number of vertices per navigation polygon. -static const int DT_VERTS_PER_POLYGON = 6; - -static const int DT_NAVMESH_MAGIC = 'D'<<24 | 'N'<<16 | 'A'<<8 | 'V'; //'DNAV'; -static const int DT_NAVMESH_VERSION = 6; - -static const int DT_NAVMESH_STATE_MAGIC = 'D'<<24 | 'N'<<16 | 'M'<<8 | 'S'; //'DNMS'; -static const int DT_NAVMESH_STATE_VERSION = 1; - -static const unsigned short DT_EXT_LINK = 0x8000; -static const unsigned int DT_NULL_LINK = 0xffffffff; -static const unsigned int DT_OFFMESH_CON_BIDIR = 1; - -static const int DT_MAX_AREAS = 64; - -static const int STATIC_SALT_BITS = 12; -static const int STATIC_TILE_BITS = 21; -static const int STATIC_POLY_BITS = 31; -// we cannot have over 31 bits for either tile nor poly -// without changing polyCount to use 64bits too. - -// Flags for addTile -enum dtTileFlags -{ - DT_TILE_FREE_DATA = 0x01, // Navmesh owns the tile memory and should free it. -}; - -// Flags returned by findStraightPath(). -enum dtStraightPathFlags -{ - DT_STRAIGHTPATH_START = 0x01, // The vertex is the start position. - DT_STRAIGHTPATH_END = 0x02, // The vertex is the end position. - DT_STRAIGHTPATH_OFFMESH_CONNECTION = 0x04, // The vertex is start of an off-mesh link. -}; - -// Flags describing polygon properties. -enum dtPolyTypes -{ - DT_POLYTYPE_GROUND = 0, // Regular ground polygons. - DT_POLYTYPE_OFFMESH_CONNECTION = 1, // Off-mesh connections. -}; - -enum dtStatus -{ - DT_FAILURE = 0, // Operation failed. - DT_FAILURE_DATA_MAGIC, - DT_FAILURE_DATA_VERSION, - DT_FAILURE_OUT_OF_MEMORY, - DT_SUCCESS, // Operation succeed. - DT_IN_PROGRESS, // Operation still in progress. -}; - - -// Structure describing the navigation polygon data. -struct dtPoly -{ - unsigned int firstLink; // Index to first link in linked list. - unsigned short verts[DT_VERTS_PER_POLYGON]; // Indices to vertices of the poly. - unsigned short neis[DT_VERTS_PER_POLYGON]; // Refs to neighbours of the poly. - unsigned short flags; // Flags (see dtPolyFlags). - unsigned char vertCount; // Number of vertices. - unsigned char areaAndtype; // Bit packed: Area ID of the polygon, and Polygon type, see dtPolyTypes.. - inline void setArea(unsigned char a) { areaAndtype = (areaAndtype & 0xc0) | (a & 0x3f); } - inline void setType(unsigned char t) { areaAndtype = (areaAndtype & 0x3f) | (t << 6); } - inline unsigned char getArea() const { return areaAndtype & 0x3f; } - inline unsigned char getType() const { return areaAndtype >> 6; } -}; - -// Stucture describing polygon detail triangles. -struct dtPolyDetail -{ - unsigned int vertBase; // Offset to detail vertex array. - unsigned int triBase; // Offset to detail triangle array. - unsigned char vertCount; // Number of vertices in the detail mesh. - unsigned char triCount; // Number of triangles. -}; - -// Stucture describing a link to another polygon. -struct dtLink -{ - dtPolyRef ref; // Neighbour reference. - unsigned int next; // Index to next link. - unsigned char edge; // Index to polygon edge which owns this link. - unsigned char side; // If boundary link, defines on which side the link is. - unsigned char bmin, bmax; // If boundary link, defines the sub edge area. -}; - -struct dtBVNode -{ - unsigned short bmin[3], bmax[3]; // BVnode bounds - int i; // Index to item or if negative, escape index. -}; - -struct dtOffMeshConnection -{ - float pos[6]; // Both end point locations. - float rad; // Link connection radius. - unsigned short poly; // Poly Id - unsigned char flags; // Link flags - unsigned char side; // End point side. - unsigned int userId; // User ID to identify this connection. -}; - -struct dtMeshHeader -{ - int magic; // Magic number, used to identify the data. - int version; // Data version number. - int x, y; // Location of the time on the grid. - unsigned int userId; // User ID of the tile. - int polyCount; // Number of polygons in the tile. - int vertCount; // Number of vertices in the tile. - int maxLinkCount; // Number of allocated links. - int detailMeshCount; // Number of detail meshes. - int detailVertCount; // Number of detail vertices. - int detailTriCount; // Number of detail triangles. - int bvNodeCount; // Number of BVtree nodes. - int offMeshConCount; // Number of Off-Mesh links. - int offMeshBase; // Index to first polygon which is Off-Mesh link. - float walkableHeight; // Height of the agent. - float walkableRadius; // Radius of the agent - float walkableClimb; // Max climb height of the agent. - float bmin[3], bmax[3]; // Bounding box of the tile. - float bvQuantFactor; // BVtree quantization factor (world to bvnode coords) -}; - -struct dtMeshTile -{ - unsigned int salt; // Counter describing modifications to the tile. - - unsigned int linksFreeList; // Index to next free link. - dtMeshHeader* header; // Pointer to tile header. - dtPoly* polys; // Pointer to the polygons (will be updated when tile is added). - float* verts; // Pointer to the vertices (will be updated when tile added). - dtLink* links; // Pointer to the links (will be updated when tile added). - dtPolyDetail* detailMeshes; // Pointer to detail meshes (will be updated when tile added). - float* detailVerts; // Pointer to detail vertices (will be updated when tile added). - unsigned char* detailTris; // Pointer to detail triangles (will be updated when tile added). - dtBVNode* bvTree; // Pointer to BVtree nodes (will be updated when tile added). - dtOffMeshConnection* offMeshCons; // Pointer to Off-Mesh links. (will be updated when tile added). - - unsigned char* data; // Pointer to tile data. - int dataSize; // Size of the tile data. - int flags; // Tile flags, see dtTileFlags. - dtMeshTile* next; // Next free tile or, next tile in spatial grid. -}; - -struct dtNavMeshParams -{ - float orig[3]; // Origin of the nav mesh tile space. - float tileWidth, tileHeight; // Width and height of each tile. - int maxTiles; // Maximum number of tiles the navmesh can contain. - int maxPolys; // Maximum number of polygons each tile can contain. -}; - - -class dtNavMesh -{ -public: - dtNavMesh(); - ~dtNavMesh(); - - // Initializes the nav mesh for tiled use. - // Params: - // params - (in) navmesh initialization params, see dtNavMeshParams. - // Returns: True if succeed, else false. - dtStatus init(const dtNavMeshParams* params); - - // Initializes the nav mesh for single tile use. - // Params: - // data - (in) Data of the new tile mesh. - // dataSize - (in) Data size of the new tile mesh. - // flags - (in) Tile flags, see dtTileFlags. - // Returns: True if succeed, else false. - dtStatus init(unsigned char* data, const int dataSize, const int flags); - - // Returns pointer to navmesh initialization params. - const dtNavMeshParams* getParams() const; - - // Adds new tile into the navmesh. - // The add will fail if the data is in wrong format, - // there is not enough tiles left, or if there is a tile already at the location. - // Params: - // data - (in) Data of the new tile mesh. - // dataSize - (in) Data size of the new tile mesh. - // flags - (in) Tile flags, see dtTileFlags. - // lastRef - (in,optional) Last tile ref, the tile will be restored so that - // the reference (as well as poly references) will be the same. Default: 0. - // result - (out,optional) tile ref if the tile was succesfully added. - dtStatus addTile(unsigned char* data, int dataSize, int flags, dtTileRef lastRef, dtTileRef* result); - - // Removes specified tile. - // Params: - // ref - (in) Reference to the tile to remove. - // data - (out) Data associated with deleted tile. - // dataSize - (out) Size of the data associated with deleted tile. - dtStatus removeTile(dtTileRef ref, unsigned char** data, int* dataSize); - - // Calculates tile location based in input world position. - // Params: - // pos - (in) world position of the query. - // tx - (out) tile x location. - // ty - (out) tile y location. - void calcTileLoc(const float* pos, int* tx, int* ty) const; - - // Returns pointer to tile at specified location. - // Params: - // x,y - (in) Location of the tile to get. - // Returns: pointer to tile if tile exists or 0 tile does not exists. - const dtMeshTile* getTileAt(int x, int y) const; - - // Returns reference to tile at specified location. - // Params: - // x,y - (in) Location of the tile to get. - // Returns: reference to tile if tile exists or 0 tile does not exists. - dtTileRef getTileRefAt(int x, int y) const; - - // Returns tile references of a tile based on tile pointer. - dtTileRef getTileRef(const dtMeshTile* tile) const; - - // Returns tile based on references. - const dtMeshTile* getTileByRef(dtTileRef ref) const; - - // Returns max number of tiles. - int getMaxTiles() const; - - // Returns pointer to tile in the tile array. - // Params: - // i - (in) Index to the tile to retrieve, max index is getMaxTiles()-1. - // Returns: Pointer to specified tile. - const dtMeshTile* getTile(int i) const; - - // Returns pointer to tile and polygon pointed by the polygon reference. - // Params: - // ref - (in) reference to a polygon. - // tile - (out) pointer to the tile containing the polygon. - // poly - (out) pointer to the polygon. - dtStatus getTileAndPolyByRef(const dtPolyRef ref, const dtMeshTile** tile, const dtPoly** poly) const; - - // Returns pointer to tile and polygon pointed by the polygon reference. - // Note: this function does not check if 'ref' s valid, and is thus faster. Use only with valid refs! - // Params: - // ref - (in) reference to a polygon. - // tile - (out) pointer to the tile containing the polygon. - // poly - (out) pointer to the polygon. - void getTileAndPolyByRefUnsafe(const dtPolyRef ref, const dtMeshTile** tile, const dtPoly** poly) const; - - // Returns true if polygon reference points to valid data. - bool isValidPolyRef(dtPolyRef ref) const; - - // Returns base poly id for specified tile, polygon refs can be deducted from this. - dtPolyRef getPolyRefBase(const dtMeshTile* tile) const; - - // Returns start and end location of an off-mesh link polygon. - // Params: - // prevRef - (in) ref to the polygon before the link (used to select direction). - // polyRef - (in) ref to the off-mesh link polygon. - // startPos[3] - (out) start point of the link. - // endPos[3] - (out) end point of the link. - // Returns: true if link is found. - dtStatus getOffMeshConnectionPolyEndPoints(dtPolyRef prevRef, dtPolyRef polyRef, float* startPos, float* endPos) const; - - // Returns pointer to off-mesh connection based on polyref, or null if ref not valid. - const dtOffMeshConnection* getOffMeshConnectionByRef(dtPolyRef ref) const; - - // Sets polygon flags. - dtStatus setPolyFlags(dtPolyRef ref, unsigned short flags); - - // Return polygon flags. - dtStatus getPolyFlags(dtPolyRef ref, unsigned short* resultFlags) const; - - // Set polygon type. - dtStatus setPolyArea(dtPolyRef ref, unsigned char area); - - // Return polygon area type. - dtStatus getPolyArea(dtPolyRef ref, unsigned char* resultArea) const; - - - // Returns number of bytes required to store tile state. - int getTileStateSize(const dtMeshTile* tile) const; - - // Stores tile state to buffer. - dtStatus storeTileState(const dtMeshTile* tile, unsigned char* data, const int maxDataSize) const; - - // Restores tile state. - dtStatus restoreTileState(dtMeshTile* tile, const unsigned char* data, const int maxDataSize); - - - // Encodes a tile id. - inline dtPolyRef encodePolyId(unsigned int salt, unsigned int it, unsigned int ip) const - { - return ((dtPolyRef)salt << (m_polyBits+m_tileBits)) | ((dtPolyRef)it << m_polyBits) | (dtPolyRef)ip; - } - - // Decodes a tile id. - inline void decodePolyId(dtPolyRef ref, unsigned int& salt, unsigned int& it, unsigned int& ip) const - { - const dtPolyRef saltMask = ((dtPolyRef)1<<m_saltBits)-1; - const dtPolyRef tileMask = ((dtPolyRef)1<<m_tileBits)-1; - const dtPolyRef polyMask = ((dtPolyRef)1<<m_polyBits)-1; - salt = (unsigned int)((ref >> (m_polyBits+m_tileBits)) & saltMask); - it = (unsigned int)((ref >> m_polyBits) & tileMask); - ip = (unsigned int)(ref & polyMask); - } - - // Decodes a tile salt. - inline unsigned int decodePolyIdSalt(dtPolyRef ref) const - { - const dtPolyRef saltMask = ((dtPolyRef)1<<m_saltBits)-1; - return (unsigned int)((ref >> (m_polyBits+m_tileBits)) & saltMask); - } - - // Decodes a tile id. - inline unsigned int decodePolyIdTile(dtPolyRef ref) const - { - const dtPolyRef tileMask = ((dtPolyRef)1<<m_tileBits)-1; - return (unsigned int)((ref >> m_polyBits) & tileMask); - } - - // Decodes a poly id. - inline unsigned int decodePolyIdPoly(dtPolyRef ref) const - { - const dtPolyRef polyMask = ((dtPolyRef)1<<m_polyBits)-1; - return (unsigned int)(ref & polyMask); - } - -private: - - // Returns pointer to tile in the tile array. - dtMeshTile* getTile(int i); - - // Returns neighbour tile based on side. - dtMeshTile* getNeighbourTileAt(int x, int y, int side) const; - // Returns all polygons in neighbour tile based on portal defined by the segment. - int findConnectingPolys(const float* va, const float* vb, - const dtMeshTile* tile, int side, - dtPolyRef* con, float* conarea, int maxcon) const; - - // Builds internal polygons links for a tile. - void connectIntLinks(dtMeshTile* tile); - // Builds internal polygons links for a tile. - void connectIntOffMeshLinks(dtMeshTile* tile); - - // Builds external polygon links for a tile. - void connectExtLinks(dtMeshTile* tile, dtMeshTile* target, int side); - // Builds external polygon links for a tile. - void connectExtOffMeshLinks(dtMeshTile* tile, dtMeshTile* target, int side); - - // Removes external links at specified side. - void unconnectExtLinks(dtMeshTile* tile, int side); - - - // TODO: These methods are duplicates from dtNavMeshQuery, but are needed for off-mesh connection finding. - - // Queries polygons within a tile. - int queryPolygonsInTile(const dtMeshTile* tile, const float* qmin, const float* qmax, - dtPolyRef* polys, const int maxPolys) const; - // Find nearest polygon within a tile. - dtPolyRef findNearestPolyInTile(const dtMeshTile* tile, const float* center, - const float* extents, float* nearestPt) const; - // Returns closest point on polygon. - dtStatus closestPointOnPolyInTile(const dtMeshTile* tile, unsigned int ip, - const float* pos, float* closest) const; - - dtNavMeshParams m_params; // Current initialization params. TODO: do not store this info twice. - float m_orig[3]; // Origin of the tile (0,0) - float m_tileWidth, m_tileHeight; // Dimensions of each tile. - int m_maxTiles; // Max number of tiles. - int m_tileLutSize; // Tile hash lookup size (must be pot). - int m_tileLutMask; // Tile hash lookup mask. - - dtMeshTile** m_posLookup; // Tile hash lookup. - dtMeshTile* m_nextFree; // Freelist of tiles. - dtMeshTile* m_tiles; // List of tiles. - - unsigned int m_saltBits; // Number of salt bits in the tile ID. - unsigned int m_tileBits; // Number of tile bits in the tile ID. - unsigned int m_polyBits; // Number of poly bits in the tile ID. -}; - -// Helper function to allocate navmesh class using Detour allocator. -dtNavMesh* dtAllocNavMesh(); -void dtFreeNavMesh(dtNavMesh* navmesh); - -#endif // DETOURNAVMESH_H diff --git a/deps/recastnavigation/Detour/DetourNavMeshBuilder.cpp b/deps/recastnavigation/Detour/DetourNavMeshBuilder.cpp deleted file mode 100644 index 68f3d730da..0000000000 --- a/deps/recastnavigation/Detour/DetourNavMeshBuilder.cpp +++ /dev/null @@ -1,770 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#include <math.h> -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include "DetourNavMesh.h" -#include "DetourCommon.h" -#include "DetourNavMeshBuilder.h" -#include "DetourAlloc.h" -#include "DetourAssert.h" - -static unsigned short MESH_NULL_IDX = 0xffff; - - -struct BVItem -{ - unsigned short bmin[3]; - unsigned short bmax[3]; - int i; -}; - -static int compareItemX(const void* va, const void* vb) -{ - const BVItem* a = (const BVItem*)va; - const BVItem* b = (const BVItem*)vb; - if (a->bmin[0] < b->bmin[0]) - return -1; - if (a->bmin[0] > b->bmin[0]) - return 1; - return 0; -} - -static int compareItemY(const void* va, const void* vb) -{ - const BVItem* a = (const BVItem*)va; - const BVItem* b = (const BVItem*)vb; - if (a->bmin[1] < b->bmin[1]) - return -1; - if (a->bmin[1] > b->bmin[1]) - return 1; - return 0; -} - -static int compareItemZ(const void* va, const void* vb) -{ - const BVItem* a = (const BVItem*)va; - const BVItem* b = (const BVItem*)vb; - if (a->bmin[2] < b->bmin[2]) - return -1; - if (a->bmin[2] > b->bmin[2]) - return 1; - return 0; -} - -static void calcExtends(BVItem* items, const int /*nitems*/, const int imin, const int imax, - unsigned short* bmin, unsigned short* bmax) -{ - bmin[0] = items[imin].bmin[0]; - bmin[1] = items[imin].bmin[1]; - bmin[2] = items[imin].bmin[2]; - - bmax[0] = items[imin].bmax[0]; - bmax[1] = items[imin].bmax[1]; - bmax[2] = items[imin].bmax[2]; - - for (int i = imin+1; i < imax; ++i) - { - const BVItem& it = items[i]; - if (it.bmin[0] < bmin[0]) bmin[0] = it.bmin[0]; - if (it.bmin[1] < bmin[1]) bmin[1] = it.bmin[1]; - if (it.bmin[2] < bmin[2]) bmin[2] = it.bmin[2]; - - if (it.bmax[0] > bmax[0]) bmax[0] = it.bmax[0]; - if (it.bmax[1] > bmax[1]) bmax[1] = it.bmax[1]; - if (it.bmax[2] > bmax[2]) bmax[2] = it.bmax[2]; - } -} - -inline int longestAxis(unsigned short x, unsigned short y, unsigned short z) -{ - int axis = 0; - unsigned short maxVal = x; - if (y > maxVal) - { - axis = 1; - maxVal = y; - } - if (z > maxVal) - { - axis = 2; - maxVal = z; - } - return axis; -} - -static void subdivide(BVItem* items, int nitems, int imin, int imax, int& curNode, dtBVNode* nodes) -{ - int inum = imax - imin; - int icur = curNode; - - dtBVNode& node = nodes[curNode++]; - - if (inum == 1) - { - // Leaf - node.bmin[0] = items[imin].bmin[0]; - node.bmin[1] = items[imin].bmin[1]; - node.bmin[2] = items[imin].bmin[2]; - - node.bmax[0] = items[imin].bmax[0]; - node.bmax[1] = items[imin].bmax[1]; - node.bmax[2] = items[imin].bmax[2]; - - node.i = items[imin].i; - } - else - { - // Split - calcExtends(items, nitems, imin, imax, node.bmin, node.bmax); - - int axis = longestAxis(node.bmax[0] - node.bmin[0], - node.bmax[1] - node.bmin[1], - node.bmax[2] - node.bmin[2]); - - if (axis == 0) - { - // Sort along x-axis - qsort(items+imin, inum, sizeof(BVItem), compareItemX); - } - else if (axis == 1) - { - // Sort along y-axis - qsort(items+imin, inum, sizeof(BVItem), compareItemY); - } - else - { - // Sort along z-axis - qsort(items+imin, inum, sizeof(BVItem), compareItemZ); - } - - int isplit = imin+inum/2; - - // Left - subdivide(items, nitems, imin, isplit, curNode, nodes); - // Right - subdivide(items, nitems, isplit, imax, curNode, nodes); - - int iescape = curNode - icur; - // Negative index means escape. - node.i = -iescape; - } -} - -static int createBVTree(const unsigned short* verts, const int /*nverts*/, - const unsigned short* polys, const int npolys, const int nvp, - const float cs, const float ch, - const int /*nnodes*/, dtBVNode* nodes) -{ - // Build tree - BVItem* items = (BVItem*)dtAlloc(sizeof(BVItem)*npolys, DT_ALLOC_TEMP); - for (int i = 0; i < npolys; i++) - { - BVItem& it = items[i]; - it.i = i; - // Calc polygon bounds. - const unsigned short* p = &polys[i*nvp*2]; - it.bmin[0] = it.bmax[0] = verts[p[0]*3+0]; - it.bmin[1] = it.bmax[1] = verts[p[0]*3+1]; - it.bmin[2] = it.bmax[2] = verts[p[0]*3+2]; - - for (int j = 1; j < nvp; ++j) - { - if (p[j] == MESH_NULL_IDX) break; - unsigned short x = verts[p[j]*3+0]; - unsigned short y = verts[p[j]*3+1]; - unsigned short z = verts[p[j]*3+2]; - - if (x < it.bmin[0]) it.bmin[0] = x; - if (y < it.bmin[1]) it.bmin[1] = y; - if (z < it.bmin[2]) it.bmin[2] = z; - - if (x > it.bmax[0]) it.bmax[0] = x; - if (y > it.bmax[1]) it.bmax[1] = y; - if (z > it.bmax[2]) it.bmax[2] = z; - } - // Remap y - it.bmin[1] = (unsigned short)floorf((float)it.bmin[1]*ch/cs); - it.bmax[1] = (unsigned short)ceilf((float)it.bmax[1]*ch/cs); - } - - int curNode = 0; - subdivide(items, npolys, 0, npolys, curNode, nodes); - - dtFree(items); - - return curNode; -} - -static unsigned char classifyOffMeshPoint(const float* pt, const float* bmin, const float* bmax) -{ - static const unsigned char XP = 1<<0; - static const unsigned char ZP = 1<<1; - static const unsigned char XM = 1<<2; - static const unsigned char ZM = 1<<3; - - unsigned char outcode = 0; - outcode |= (pt[0] >= bmax[0]) ? XP : 0; - outcode |= (pt[2] >= bmax[2]) ? ZP : 0; - outcode |= (pt[0] < bmin[0]) ? XM : 0; - outcode |= (pt[2] < bmin[2]) ? ZM : 0; - - switch (outcode) - { - case XP: return 0; - case XP|ZP: return 1; - case ZP: return 2; - case XM|ZP: return 3; - case XM: return 4; - case XM|ZM: return 5; - case ZM: return 6; - case XP|ZM: return 7; - }; - return 0xff; -} - -// TODO: Better error handling. - -bool dtCreateNavMeshData(dtNavMeshCreateParams* params, unsigned char** outData, int* outDataSize) -{ - if (params->nvp > DT_VERTS_PER_POLYGON) - return false; - if (params->vertCount >= 0xffff) - return false; - if (!params->vertCount || !params->verts) - return false; - if (!params->polyCount || !params->polys) - return false; - //if (!params->detailMeshes || !params->detailVerts || !params->detailTris) - // return false; - - const int nvp = params->nvp; - - // Classify off-mesh connection points. We store only the connections - // whose start point is inside the tile. - unsigned char* offMeshConClass = 0; - int storedOffMeshConCount = 0; - int offMeshConLinkCount = 0; - - if (params->offMeshConCount > 0) - { - offMeshConClass = (unsigned char*)dtAlloc(sizeof(unsigned char)*params->offMeshConCount*2, DT_ALLOC_TEMP); - if (!offMeshConClass) - return false; - - for (int i = 0; i < params->offMeshConCount; ++i) - { - offMeshConClass[i*2+0] = classifyOffMeshPoint(¶ms->offMeshConVerts[(i*2+0)*3], params->bmin, params->bmax); - offMeshConClass[i*2+1] = classifyOffMeshPoint(¶ms->offMeshConVerts[(i*2+1)*3], params->bmin, params->bmax); - - // Cound how many links should be allocated for off-mesh connections. - if (offMeshConClass[i*2+0] == 0xff) - offMeshConLinkCount++; - if (offMeshConClass[i*2+1] == 0xff) - offMeshConLinkCount++; - - if (offMeshConClass[i*2+0] == 0xff) - storedOffMeshConCount++; - } - } - - // Off-mesh connectionss are stored as polygons, adjust values. - const int totPolyCount = params->polyCount + storedOffMeshConCount; - const int totVertCount = params->vertCount + storedOffMeshConCount*2; - - // Find portal edges which are at tile borders. - int edgeCount = 0; - int portalCount = 0; - for (int i = 0; i < params->polyCount; ++i) - { - const unsigned short* p = ¶ms->polys[i*2*nvp]; - for (int j = 0; j < nvp; ++j) - { - if (p[j] == MESH_NULL_IDX) break; - int nj = j+1; - if (nj >= nvp || p[nj] == MESH_NULL_IDX) nj = 0; - const unsigned short* va = ¶ms->verts[p[j]*3]; - const unsigned short* vb = ¶ms->verts[p[nj]*3]; - - edgeCount++; - - if (params->tileSize > 0) - { - if (va[0] == params->tileSize && vb[0] == params->tileSize) - portalCount++; // x+ - else if (va[2] == params->tileSize && vb[2] == params->tileSize) - portalCount++; // z+ - else if (va[0] == 0 && vb[0] == 0) - portalCount++; // x- - else if (va[2] == 0 && vb[2] == 0) - portalCount++; // z- - } - } - } - - const int maxLinkCount = edgeCount + portalCount*2 + offMeshConLinkCount*2; - - // Find unique detail vertices. - int uniqueDetailVertCount = 0; - int detailTriCount = 0; - if (params->detailMeshes) - { - // Has detail mesh, count unique detail vertex count and use input detail tri count. - detailTriCount = params->detailTriCount; - for (int i = 0; i < params->polyCount; ++i) - { - const unsigned short* p = ¶ms->polys[i*nvp*2]; - int ndv = params->detailMeshes[i*4+1]; - int nv = 0; - for (int j = 0; j < nvp; ++j) - { - if (p[j] == MESH_NULL_IDX) break; - nv++; - } - ndv -= nv; - uniqueDetailVertCount += ndv; - } - } - else - { - // No input detail mesh, build detail mesh from nav polys. - uniqueDetailVertCount = 0; // No extra detail verts. - detailTriCount = 0; - for (int i = 0; i < params->polyCount; ++i) - { - const unsigned short* p = ¶ms->polys[i*nvp*2]; - int nv = 0; - for (int j = 0; j < nvp; ++j) - { - if (p[j] == MESH_NULL_IDX) break; - nv++; - } - detailTriCount += nv-2; - } - } - - // Calculate data size - const int headerSize = dtAlign4(sizeof(dtMeshHeader)); - const int vertsSize = dtAlign4(sizeof(float)*3*totVertCount); - const int polysSize = dtAlign4(sizeof(dtPoly)*totPolyCount); - const int linksSize = dtAlign4(sizeof(dtLink)*maxLinkCount); - const int detailMeshesSize = dtAlign4(sizeof(dtPolyDetail)*params->polyCount); - const int detailVertsSize = dtAlign4(sizeof(float)*3*uniqueDetailVertCount); - const int detailTrisSize = dtAlign4(sizeof(unsigned char)*4*detailTriCount); - const int bvTreeSize = dtAlign4(sizeof(dtBVNode)*params->polyCount*2); - const int offMeshConsSize = dtAlign4(sizeof(dtOffMeshConnection)*storedOffMeshConCount); - - const int dataSize = headerSize + vertsSize + polysSize + linksSize + - detailMeshesSize + detailVertsSize + detailTrisSize + - bvTreeSize + offMeshConsSize; - - unsigned char* data = (unsigned char*)dtAlloc(sizeof(unsigned char)*dataSize, DT_ALLOC_PERM); - if (!data) - { - dtFree(offMeshConClass); - return false; - } - memset(data, 0, dataSize); - - unsigned char* d = data; - dtMeshHeader* header = (dtMeshHeader*)d; d += headerSize; - float* navVerts = (float*)d; d += vertsSize; - dtPoly* navPolys = (dtPoly*)d; d += polysSize; - d += linksSize; - dtPolyDetail* navDMeshes = (dtPolyDetail*)d; d += detailMeshesSize; - float* navDVerts = (float*)d; d += detailVertsSize; - unsigned char* navDTris = (unsigned char*)d; d += detailTrisSize; - dtBVNode* navBvtree = (dtBVNode*)d; d += bvTreeSize; - dtOffMeshConnection* offMeshCons = (dtOffMeshConnection*)d; d += offMeshConsSize; - - - // Store header - header->magic = DT_NAVMESH_MAGIC; - header->version = DT_NAVMESH_VERSION; - header->x = params->tileX; - header->y = params->tileY; - header->userId = params->userId; - header->polyCount = totPolyCount; - header->vertCount = totVertCount; - header->maxLinkCount = maxLinkCount; - dtVcopy(header->bmin, params->bmin); - dtVcopy(header->bmax, params->bmax); - header->detailMeshCount = params->polyCount; - header->detailVertCount = uniqueDetailVertCount; - header->detailTriCount = detailTriCount; - header->bvQuantFactor = 1.0f / params->cs; - header->offMeshBase = params->polyCount; - header->walkableHeight = params->walkableHeight; - header->walkableRadius = params->walkableRadius; - header->walkableClimb = params->walkableClimb; - header->offMeshConCount = storedOffMeshConCount; - header->bvNodeCount = params->polyCount*2; - - const int offMeshVertsBase = params->vertCount; - const int offMeshPolyBase = params->polyCount; - - // Store vertices - // Mesh vertices - for (int i = 0; i < params->vertCount; ++i) - { - const unsigned short* iv = ¶ms->verts[i*3]; - float* v = &navVerts[i*3]; - v[0] = params->bmin[0] + iv[0] * params->cs; - v[1] = params->bmin[1] + iv[1] * params->ch; - v[2] = params->bmin[2] + iv[2] * params->cs; - } - // Off-mesh link vertices. - int n = 0; - for (int i = 0; i < params->offMeshConCount; ++i) - { - // Only store connections which start from this tile. - if (offMeshConClass[i*2+0] == 0xff) - { - const float* linkv = ¶ms->offMeshConVerts[i*2*3]; - float* v = &navVerts[(offMeshVertsBase + n*2)*3]; - dtVcopy(&v[0], &linkv[0]); - dtVcopy(&v[3], &linkv[3]); - n++; - } - } - - // Store polygons - // Mesh polys - const unsigned short* src = params->polys; - for (int i = 0; i < params->polyCount; ++i) - { - dtPoly* p = &navPolys[i]; - p->vertCount = 0; - p->flags = params->polyFlags[i]; - p->setArea(params->polyAreas[i]); - p->setType(DT_POLYTYPE_GROUND); - for (int j = 0; j < nvp; ++j) - { - if (src[j] == MESH_NULL_IDX) break; - p->verts[j] = src[j]; - p->neis[j] = (src[nvp+j]+1) & 0xffff; - p->vertCount++; - } - src += nvp*2; - } - // Off-mesh connection vertices. - n = 0; - for (int i = 0; i < params->offMeshConCount; ++i) - { - // Only store connections which start from this tile. - if (offMeshConClass[i*2+0] == 0xff) - { - dtPoly* p = &navPolys[offMeshPolyBase+n]; - p->vertCount = 2; - p->verts[0] = (unsigned short)(offMeshVertsBase + n*2+0); - p->verts[1] = (unsigned short)(offMeshVertsBase + n*2+1); - p->flags = params->offMeshConFlags[i]; - p->setArea(params->offMeshConAreas[i]); - p->setType(DT_POLYTYPE_OFFMESH_CONNECTION); - n++; - } - } - - // Store portal edges. - if (params->tileSize > 0) - { - for (int i = 0; i < params->polyCount; ++i) - { - dtPoly* poly = &navPolys[i]; - for (int j = 0; j < poly->vertCount; ++j) - { - int nj = j+1; - if (nj >= poly->vertCount) nj = 0; - - const unsigned short* va = ¶ms->verts[poly->verts[j]*3]; - const unsigned short* vb = ¶ms->verts[poly->verts[nj]*3]; - - if (va[0] == params->tileSize && vb[0] == params->tileSize) // x+ - poly->neis[j] = DT_EXT_LINK | 0; - else if (va[2] == params->tileSize && vb[2] == params->tileSize) // z+ - poly->neis[j] = DT_EXT_LINK | 2; - else if (va[0] == 0 && vb[0] == 0) // x- - poly->neis[j] = DT_EXT_LINK | 4; - else if (va[2] == 0 && vb[2] == 0) // z- - poly->neis[j] = DT_EXT_LINK | 6; - } - } - } - - // Store detail meshes and vertices. - // The nav polygon vertices are stored as the first vertices on each mesh. - // We compress the mesh data by skipping them and using the navmesh coordinates. - if (params->detailMeshes) - { - unsigned short vbase = 0; - for (int i = 0; i < params->polyCount; ++i) - { - dtPolyDetail& dtl = navDMeshes[i]; - const int vb = (int)params->detailMeshes[i*4+0]; - const int ndv = (int)params->detailMeshes[i*4+1]; - const int nv = navPolys[i].vertCount; - dtl.vertBase = (unsigned int)vbase; - dtl.vertCount = (unsigned char)(ndv-nv); - dtl.triBase = (unsigned int)params->detailMeshes[i*4+2]; - dtl.triCount = (unsigned char)params->detailMeshes[i*4+3]; - // Copy vertices except the first 'nv' verts which are equal to nav poly verts. - if (ndv-nv) - { - memcpy(&navDVerts[vbase*3], ¶ms->detailVerts[(vb+nv)*3], sizeof(float)*3*(ndv-nv)); - vbase += (unsigned short)(ndv-nv); - } - } - // Store triangles. - memcpy(navDTris, params->detailTris, sizeof(unsigned char)*4*params->detailTriCount); - } - else - { - // Create dummy detail mesh by triangulating polys. - int tbase = 0; - for (int i = 0; i < params->polyCount; ++i) - { - dtPolyDetail& dtl = navDMeshes[i]; - const int nv = navPolys[i].vertCount; - dtl.vertBase = 0; - dtl.vertCount = 0; - dtl.triBase = (unsigned int)tbase; - dtl.triCount = (unsigned char)(nv-2); - // Triangulate polygon (local indices). - for (int j = 2; j < nv; ++j) - { - unsigned char* t = &navDTris[tbase*4]; - t[0] = 0; - t[1] = (unsigned char)(j-1); - t[2] = (unsigned char)j; - // Bit for each edge that belongs to poly boundary. - t[3] = (1<<2); - if (j == 2) t[3] |= (1<<0); - if (j == nv-1) t[3] |= (1<<4); - tbase++; - } - } - } - - // Store and create BVtree. - // TODO: take detail mesh into account! use byte per bbox extent? - createBVTree(params->verts, params->vertCount, params->polys, params->polyCount, - nvp, params->cs, params->ch, params->polyCount*2, navBvtree); - - // Store Off-Mesh connections. - n = 0; - for (int i = 0; i < params->offMeshConCount; ++i) - { - // Only store connections which start from this tile. - if (offMeshConClass[i*2+0] == 0xff) - { - dtOffMeshConnection* con = &offMeshCons[n]; - con->poly = (unsigned short)(offMeshPolyBase + n); - // Copy connection end-points. - const float* endPts = ¶ms->offMeshConVerts[i*2*3]; - dtVcopy(&con->pos[0], &endPts[0]); - dtVcopy(&con->pos[3], &endPts[3]); - con->rad = params->offMeshConRad[i]; - con->flags = params->offMeshConDir[i] ? DT_OFFMESH_CON_BIDIR : 0; - con->side = offMeshConClass[i*2+1]; - if (params->offMeshConUserID) - con->userId = params->offMeshConUserID[i]; - n++; - } - } - - dtFree(offMeshConClass); - - *outData = data; - *outDataSize = dataSize; - - return true; -} - -inline void swapByte(unsigned char* a, unsigned char* b) -{ - unsigned char tmp = *a; - *a = *b; - *b = tmp; -} - -inline void swapEndian(unsigned short* v) -{ - unsigned char* x = (unsigned char*)v; - swapByte(x+0, x+1); -} - -inline void swapEndian(short* v) -{ - unsigned char* x = (unsigned char*)v; - swapByte(x+0, x+1); -} - -inline void swapEndian(unsigned int* v) -{ - unsigned char* x = (unsigned char*)v; - swapByte(x+0, x+3); swapByte(x+1, x+2); -} - -inline void swapEndian(int* v) -{ - unsigned char* x = (unsigned char*)v; - swapByte(x+0, x+3); swapByte(x+1, x+2); -} - -inline void swapEndian(float* v) -{ - unsigned char* x = (unsigned char*)v; - swapByte(x+0, x+3); swapByte(x+1, x+2); -} - -bool dtNavMeshHeaderSwapEndian(unsigned char* data, const int /*dataSize*/) -{ - dtMeshHeader* header = (dtMeshHeader*)data; - - int swappedMagic = DT_NAVMESH_MAGIC; - int swappedVersion = DT_NAVMESH_VERSION; - swapEndian(&swappedMagic); - swapEndian(&swappedVersion); - - if ((header->magic != DT_NAVMESH_MAGIC || header->version != DT_NAVMESH_VERSION) && - (header->magic != swappedMagic || header->version != swappedVersion)) - { - return false; - } - - swapEndian(&header->magic); - swapEndian(&header->version); - swapEndian(&header->x); - swapEndian(&header->y); - swapEndian(&header->userId); - swapEndian(&header->polyCount); - swapEndian(&header->vertCount); - swapEndian(&header->maxLinkCount); - swapEndian(&header->detailMeshCount); - swapEndian(&header->detailVertCount); - swapEndian(&header->detailTriCount); - swapEndian(&header->bvNodeCount); - swapEndian(&header->offMeshConCount); - swapEndian(&header->offMeshBase); - swapEndian(&header->walkableHeight); - swapEndian(&header->walkableRadius); - swapEndian(&header->walkableClimb); - swapEndian(&header->bmin[0]); - swapEndian(&header->bmin[1]); - swapEndian(&header->bmin[2]); - swapEndian(&header->bmax[0]); - swapEndian(&header->bmax[1]); - swapEndian(&header->bmax[2]); - swapEndian(&header->bvQuantFactor); - - // Freelist index and pointers are updated when tile is added, no need to swap. - - return true; -} - -bool dtNavMeshDataSwapEndian(unsigned char* data, const int /*dataSize*/) -{ - // Make sure the data is in right format. - dtMeshHeader* header = (dtMeshHeader*)data; - if (header->magic != DT_NAVMESH_MAGIC) - return false; - if (header->version != DT_NAVMESH_VERSION) - return false; - - // Patch header pointers. - const int headerSize = dtAlign4(sizeof(dtMeshHeader)); - const int vertsSize = dtAlign4(sizeof(float)*3*header->vertCount); - const int polysSize = dtAlign4(sizeof(dtPoly)*header->polyCount); - const int linksSize = dtAlign4(sizeof(dtLink)*(header->maxLinkCount)); - const int detailMeshesSize = dtAlign4(sizeof(dtPolyDetail)*header->detailMeshCount); - const int detailVertsSize = dtAlign4(sizeof(float)*3*header->detailVertCount); - const int detailTrisSize = dtAlign4(sizeof(unsigned char)*4*header->detailTriCount); - const int bvtreeSize = dtAlign4(sizeof(dtBVNode)*header->bvNodeCount); - const int offMeshLinksSize = dtAlign4(sizeof(dtOffMeshConnection)*header->offMeshConCount); - - unsigned char* d = data + headerSize; - float* verts = (float*)d; d += vertsSize; - dtPoly* polys = (dtPoly*)d; d += polysSize; - /*dtLink* links = (dtLink*)d;*/ d += linksSize; - dtPolyDetail* detailMeshes = (dtPolyDetail*)d; d += detailMeshesSize; - float* detailVerts = (float*)d; d += detailVertsSize; - /*unsigned char* detailTris = (unsigned char*)d;*/ d += detailTrisSize; - dtBVNode* bvTree = (dtBVNode*)d; d += bvtreeSize; - dtOffMeshConnection* offMeshCons = (dtOffMeshConnection*)d; d += offMeshLinksSize; - - // Vertices - for (int i = 0; i < header->vertCount*3; ++i) - { - swapEndian(&verts[i]); - } - - // Polys - for (int i = 0; i < header->polyCount; ++i) - { - dtPoly* p = &polys[i]; - // poly->firstLink is update when tile is added, no need to swap. - for (int j = 0; j < DT_VERTS_PER_POLYGON; ++j) - { - swapEndian(&p->verts[j]); - swapEndian(&p->neis[j]); - } - swapEndian(&p->flags); - } - - // Links are rebuild when tile is added, no need to swap. - - // Detail meshes - for (int i = 0; i < header->detailMeshCount; ++i) - { - dtPolyDetail* pd = &detailMeshes[i]; - swapEndian(&pd->vertBase); - swapEndian(&pd->triBase); - } - - // Detail verts - for (int i = 0; i < header->detailVertCount*3; ++i) - { - swapEndian(&detailVerts[i]); - } - - // BV-tree - for (int i = 0; i < header->bvNodeCount; ++i) - { - dtBVNode* node = &bvTree[i]; - for (int j = 0; j < 3; ++j) - { - swapEndian(&node->bmin[j]); - swapEndian(&node->bmax[j]); - } - swapEndian(&node->i); - } - - // Off-mesh Connections. - for (int i = 0; i < header->offMeshConCount; ++i) - { - dtOffMeshConnection* con = &offMeshCons[i]; - for (int j = 0; j < 6; ++j) - swapEndian(&con->pos[j]); - swapEndian(&con->rad); - swapEndian(&con->poly); - } - - return true; -} diff --git a/deps/recastnavigation/Detour/DetourNavMeshBuilder.h b/deps/recastnavigation/Detour/DetourNavMeshBuilder.h deleted file mode 100644 index aa802d71cb..0000000000 --- a/deps/recastnavigation/Detour/DetourNavMeshBuilder.h +++ /dev/null @@ -1,79 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#ifndef DETOURNAVMESHBUILDER_H -#define DETOURNAVMESHBUILDER_H - -#include "DetourAlloc.h" - - -// The units of the parameters are specified in parenthesis as follows: -// (vx) voxels, (wu) world units -struct dtNavMeshCreateParams -{ - // Navmesh vertices. - const unsigned short* verts; // Array of vertices, each vertex has 3 components. (vx). - int vertCount; // Vertex count - // Navmesh polygons - const unsigned short* polys; // Array of polygons, uses same format as rcPolyMesh. - const unsigned short* polyFlags; // Array of flags per polygon. - const unsigned char* polyAreas; // Array of area ids per polygon. - int polyCount; // Number of polygons - int nvp; // Number of verts per polygon. - // Navmesh Detail - const unsigned int* detailMeshes; // Detail meshes, uses same format as rcPolyMeshDetail. - const float* detailVerts; // Detail mesh vertices, uses same format as rcPolyMeshDetail (wu). - int detailVertsCount; // Total number of detail vertices - const unsigned char* detailTris; // Array of detail tris per detail mesh. - int detailTriCount; // Total number of detail triangles. - // Off-Mesh Connections. - const float* offMeshConVerts; // Off-mesh connection vertices (wu). - const float* offMeshConRad; // Off-mesh connection radii (wu). - const unsigned short* offMeshConFlags; // Off-mesh connection flags. - const unsigned char* offMeshConAreas; // Off-mesh connection area ids. - const unsigned char* offMeshConDir; // Off-mesh connection direction flags (1 = bidir, 0 = oneway). - const unsigned int* offMeshConUserID; // Off-mesh connection user id (optional). - int offMeshConCount; // Number of off-mesh connections - // Tile location - unsigned int userId; // User ID bound to the tile. - int tileX, tileY; // Tile location (tile coords). - float bmin[3], bmax[3]; // Tile bounds (wu). - // Settings - float walkableHeight; // Agent height (wu). - float walkableRadius; // Agent radius (wu). - float walkableClimb; // Agent max climb (wu). - float cs; // Cell size (xz) (wu). - float ch; // Cell height (y) (wu). - int tileSize; // Tile size (width & height) (vx). - int tileLayer; - bool buildBvTree; -}; - -// Build navmesh data from given input data. -bool dtCreateNavMeshData(dtNavMeshCreateParams* params, unsigned char** outData, int* outDataSize); - -// Swaps endianess of navmesh header. -bool dtNavMeshHeaderSwapEndian(unsigned char* data, const int dataSize); - -// Swaps endianess of the navmesh data. This function assumes that the header is in correct -// endianess already. Call dtNavMeshHeaderSwapEndian() first on the data if the data is -// assumed to be in wrong endianess to start with. If converting from native endianess to foreign, -// call dtNavMeshHeaderSwapEndian() after the data has been swapped. -bool dtNavMeshDataSwapEndian(unsigned char* data, const int dataSize); - -#endif // DETOURNAVMESHBUILDER_H diff --git a/deps/recastnavigation/Detour/DetourNavMeshQuery.cpp b/deps/recastnavigation/Detour/DetourNavMeshQuery.cpp deleted file mode 100644 index 33ee87fb95..0000000000 --- a/deps/recastnavigation/Detour/DetourNavMeshQuery.cpp +++ /dev/null @@ -1,2578 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#include <math.h> -#include <float.h> -#include <string.h> -#include "DetourNavMeshQuery.h" -#include "DetourNavMesh.h" -#include "DetourNode.h" -#include "DetourCommon.h" -#include "DetourAlloc.h" -#include "DetourAssert.h" -#include <new> - - -dtQueryFilter::dtQueryFilter() : - m_includeFlags(0xffff), - m_excludeFlags(0) -{ - for (int i = 0; i < DT_MAX_AREAS; ++i) - m_areaCost[i] = 1.0f; -} - -#ifdef DT_VIRTUAL_QUERYFILTER -bool dtQueryFilter::passFilter(const dtPolyRef /*ref*/, - const dtMeshTile* /*tile*/, - const dtPoly* poly) const -{ - return (poly->flags & m_includeFlags) != 0 && (poly->flags & m_excludeFlags) == 0; -} - -float dtQueryFilter::getCost(const float* pa, const float* pb, - const dtPolyRef /*prevRef*/, const dtMeshTile* /*prevTile*/, const dtPoly* /*prevPoly*/, - const dtPolyRef /*curRef*/, const dtMeshTile* /*curTile*/, const dtPoly* curPoly, - const dtPolyRef /*nextRef*/, const dtMeshTile* /*nextTile*/, const dtPoly* /*nextPoly*/) const -{ - return dtVdist(pa, pb) * m_areaCost[curPoly->getArea()]; -} -#else -inline bool dtQueryFilter::passFilter(const dtPolyRef /*ref*/, - const dtMeshTile* /*tile*/, - const dtPoly* poly) const -{ - return (poly->flags & m_includeFlags) != 0 && (poly->flags & m_excludeFlags) == 0; -} - -inline float dtQueryFilter::getCost(const float* pa, const float* pb, - const dtPolyRef /*prevRef*/, const dtMeshTile* /*prevTile*/, const dtPoly* /*prevPoly*/, - const dtPolyRef /*curRef*/, const dtMeshTile* /*curTile*/, const dtPoly* curPoly, - const dtPolyRef /*nextRef*/, const dtMeshTile* /*nextTile*/, const dtPoly* /*nextPoly*/) const -{ - return dtVdist(pa, pb) * m_areaCost[curPoly->getArea()]; -} -#endif - -static const float H_SCALE = 2.0f; // Search heuristic scale. - - -dtNavMeshQuery* dtAllocNavMeshQuery() -{ - void* mem = dtAlloc(sizeof(dtNavMeshQuery), DT_ALLOC_PERM); - if (!mem) return 0; - return new(mem) dtNavMeshQuery; -} - -void dtFreeNavMeshQuery(dtNavMeshQuery* navmesh) -{ - if (!navmesh) return; - navmesh->~dtNavMeshQuery(); - dtFree(navmesh); -} - -////////////////////////////////////////////////////////////////////////////////////////// -dtNavMeshQuery::dtNavMeshQuery() : - m_nav(0), - m_tinyNodePool(0), - m_nodePool(0), - m_openList(0) -{ - memset(&m_query, 0, sizeof(dtQueryData)); -} - -dtNavMeshQuery::~dtNavMeshQuery() -{ - if (m_tinyNodePool) - m_tinyNodePool->~dtNodePool(); - if (m_nodePool) - m_nodePool->~dtNodePool(); - if (m_openList) - m_openList->~dtNodeQueue(); - dtFree(m_tinyNodePool); - dtFree(m_nodePool); - dtFree(m_openList); -} - -dtStatus dtNavMeshQuery::init(const dtNavMesh* nav, const int maxNodes) -{ - m_nav = nav; - - if (!m_nodePool || m_nodePool->getMaxNodes() < maxNodes) - { - if (m_nodePool) - { - m_nodePool->~dtNodePool(); - dtFree(m_nodePool); - m_nodePool = 0; - } - m_nodePool = new (dtAlloc(sizeof(dtNodePool), DT_ALLOC_PERM)) dtNodePool(maxNodes, dtNextPow2(maxNodes/4)); - if (!m_nodePool) - return DT_FAILURE_OUT_OF_MEMORY; - } - else - { - m_nodePool->clear(); - } - - if (!m_tinyNodePool) - { - m_tinyNodePool = new (dtAlloc(sizeof(dtNodePool), DT_ALLOC_PERM)) dtNodePool(64, 32); - if (!m_tinyNodePool) - return DT_FAILURE_OUT_OF_MEMORY; - } - else - { - m_tinyNodePool->clear(); - } - - // TODO: check the open list size too. - if (!m_openList || m_openList->getCapacity() < maxNodes) - { - if (m_openList) - { - m_openList->~dtNodeQueue(); - dtFree(m_openList); - m_openList = 0; - } - m_openList = new (dtAlloc(sizeof(dtNodeQueue), DT_ALLOC_PERM)) dtNodeQueue(maxNodes); - if (!m_openList) - return DT_FAILURE_OUT_OF_MEMORY; - } - else - { - m_openList->clear(); - } - - return DT_SUCCESS; -} - -////////////////////////////////////////////////////////////////////////////////////////// -dtStatus dtNavMeshQuery::closestPointOnPoly(dtPolyRef ref, const float* pos, float* closest) const -{ - dtAssert(m_nav); - const dtMeshTile* tile = 0; - const dtPoly* poly = 0; - if (m_nav->getTileAndPolyByRef(ref, &tile, &poly) != DT_SUCCESS) - return DT_FAILURE; - if (!tile) return DT_FAILURE; - - if (poly->getType() == DT_POLYTYPE_OFFMESH_CONNECTION) - return DT_FAILURE; - - if (closestPointOnPolyInTile(tile, poly, pos, closest) != DT_SUCCESS) - return DT_FAILURE; - return DT_SUCCESS; -} - -dtStatus dtNavMeshQuery::closestPointOnPolyInTile(const dtMeshTile* tile, const dtPoly* poly, - const float* pos, float* closest) const -{ - const unsigned int ip = (unsigned int)(poly - tile->polys); - const dtPolyDetail* pd = &tile->detailMeshes[ip]; - - // TODO: The commented out version finds 'cylinder distance' instead of 'sphere distance' to the navmesh. - // Test and enable. -/* - // Clamp point to be inside the polygon. - float verts[DT_VERTS_PER_POLYGON*3]; - float edged[DT_VERTS_PER_POLYGON]; - float edget[DT_VERTS_PER_POLYGON]; - const int nv = poly->vertCount; - for (int i = 0; i < nv; ++i) - dtVcopy(&verts[i*3], &tile->verts[poly->verts[i]*3]); - - dtVcopy(closest, pos); - if (!dtDistancePtPolyEdgesSqr(pos, verts, nv, edged, edget)) - { - // Point is outside the polygon, dtClamp to nearest edge. - float dmin = FLT_MAX; - int imin = -1; - for (int i = 0; i < nv; ++i) - { - if (edged[i] < dmin) - { - dmin = edged[i]; - imin = i; - } - } - const float* va = &verts[imin*3]; - const float* vb = &verts[((imin+1)%nv)*3]; - dtVlerp(closest, va, vb, edget[imin]); - } - - // Find height at the location. - for (int j = 0; j < pd->triCount; ++j) - { - const unsigned char* t = &tile->detailTris[(pd->triBase+j)*4]; - const float* v[3]; - for (int k = 0; k < 3; ++k) - { - if (t[k] < poly->vertCount) - v[k] = &tile->verts[poly->verts[t[k]]*3]; - else - v[k] = &tile->detailVerts[(pd->vertBase+(t[k]-poly->vertCount))*3]; - } - float h; - if (dtClosestHeightPointTriangle(pos, v[0], v[1], v[2], h)) - { - closest[1] = h; - break; - } - } -*/ - float closestDistSqr = FLT_MAX; - for (int j = 0; j < pd->triCount; ++j) - { - const unsigned char* t = &tile->detailTris[(pd->triBase+j)*4]; - const float* v[3]; - for (int k = 0; k < 3; ++k) - { - if (t[k] < poly->vertCount) - v[k] = &tile->verts[poly->verts[t[k]]*3]; - else - v[k] = &tile->detailVerts[(pd->vertBase+(t[k]-poly->vertCount))*3]; - } - - float pt[3]; - dtClosestPtPointTriangle(pt, pos, v[0], v[1], v[2]); - float d = dtVdistSqr(pos, pt); - - if (d < closestDistSqr) - { - dtVcopy(closest, pt); - closestDistSqr = d; - } - } - - return DT_SUCCESS; -} - -dtStatus dtNavMeshQuery::closestPointOnPolyBoundary(dtPolyRef ref, const float* pos, float* closest) const -{ - dtAssert(m_nav); - - const dtMeshTile* tile = 0; - const dtPoly* poly = 0; - if (m_nav->getTileAndPolyByRef(ref, &tile, &poly) != DT_SUCCESS) - return DT_FAILURE; - - // Collect vertices. - float verts[DT_VERTS_PER_POLYGON*3]; - float edged[DT_VERTS_PER_POLYGON]; - float edget[DT_VERTS_PER_POLYGON]; - int nv = 0; - for (int i = 0; i < (int)poly->vertCount; ++i) - { - dtVcopy(&verts[nv*3], &tile->verts[poly->verts[i]*3]); - nv++; - } - - bool inside = dtDistancePtPolyEdgesSqr(pos, verts, nv, edged, edget); - if (inside) - { - // Point is inside the polygon, return the point. - dtVcopy(closest, pos); - } - else - { - // Point is outside the polygon, dtClamp to nearest edge. - float dmin = FLT_MAX; - int imin = -1; - for (int i = 0; i < nv; ++i) - { - if (edged[i] < dmin) - { - dmin = edged[i]; - imin = i; - } - } - const float* va = &verts[imin*3]; - const float* vb = &verts[((imin+1)%nv)*3]; - dtVlerp(closest, va, vb, edget[imin]); - } - - return DT_SUCCESS; -} - - -dtStatus dtNavMeshQuery::getPolyHeight(dtPolyRef ref, const float* pos, float* height) const -{ - dtAssert(m_nav); - - const dtMeshTile* tile = 0; - const dtPoly* poly = 0; - if (m_nav->getTileAndPolyByRef(ref, &tile, &poly) != DT_SUCCESS) - return DT_FAILURE; - - if (poly->getType() == DT_POLYTYPE_OFFMESH_CONNECTION) - { - const float* v0 = &tile->verts[poly->verts[0]*3]; - const float* v1 = &tile->verts[poly->verts[1]*3]; - const float d0 = dtVdist(pos, v0); - const float d1 = dtVdist(pos, v1); - const float u = d0 / (d0+d1); - if (height) - *height = v0[1] + (v1[1] - v0[1]) * u; - return DT_SUCCESS; - } - else - { - const unsigned int ip = (unsigned int)(poly - tile->polys); - const dtPolyDetail* pd = &tile->detailMeshes[ip]; - for (int j = 0; j < pd->triCount; ++j) - { - const unsigned char* t = &tile->detailTris[(pd->triBase+j)*4]; - const float* v[3]; - for (int k = 0; k < 3; ++k) - { - if (t[k] < poly->vertCount) - v[k] = &tile->verts[poly->verts[t[k]]*3]; - else - v[k] = &tile->detailVerts[(pd->vertBase+(t[k]-poly->vertCount))*3]; - } - float h; - if (dtClosestHeightPointTriangle(pos, v[0], v[1], v[2], h)) - { - if (height) - *height = h; - return DT_SUCCESS; - } - } - } - - return DT_FAILURE; -} - -dtStatus dtNavMeshQuery::findNearestPoly(const float* center, const float* extents, - const dtQueryFilter* filter, - dtPolyRef* nearestRef, float* nearestPt) const -{ - dtAssert(m_nav); - - *nearestRef = 0; - - // Get nearby polygons from proximity grid. - dtPolyRef polys[128]; - int polyCount = 0; - if (queryPolygons(center, extents, filter, polys, &polyCount, 128) != DT_SUCCESS) - return DT_FAILURE; - - // Find nearest polygon amongst the nearby polygons. - dtPolyRef nearest = 0; - float nearestDistanceSqr = FLT_MAX; - for (int i = 0; i < polyCount; ++i) - { - dtPolyRef ref = polys[i]; - float closestPtPoly[3]; - if (closestPointOnPoly(ref, center, closestPtPoly) != DT_SUCCESS) - continue; - float d = dtVdistSqr(center, closestPtPoly); - if (d < nearestDistanceSqr) - { - if (nearestPt) - dtVcopy(nearestPt, closestPtPoly); - nearestDistanceSqr = d; - nearest = ref; - } - } - - if (nearestRef) - *nearestRef = nearest; - - return DT_SUCCESS; -} - -dtPolyRef dtNavMeshQuery::findNearestPolyInTile(const dtMeshTile* tile, const float* center, const float* extents, - const dtQueryFilter* filter, float* nearestPt) const -{ - dtAssert(m_nav); - - float bmin[3], bmax[3]; - dtVsub(bmin, center, extents); - dtVadd(bmax, center, extents); - - // Get nearby polygons from proximity grid. - dtPolyRef polys[128]; - int polyCount = queryPolygonsInTile(tile, bmin, bmax, filter, polys, 128); - - // Find nearest polygon amongst the nearby polygons. - dtPolyRef nearest = 0; - float nearestDistanceSqr = FLT_MAX; - for (int i = 0; i < polyCount; ++i) - { - dtPolyRef ref = polys[i]; - const dtPoly* poly = &tile->polys[m_nav->decodePolyIdPoly(ref)]; - float closestPtPoly[3]; - if (closestPointOnPolyInTile(tile, poly, center, closestPtPoly) != DT_SUCCESS) - continue; - - float d = dtVdistSqr(center, closestPtPoly); - if (d < nearestDistanceSqr) - { - if (nearestPt) - dtVcopy(nearestPt, closestPtPoly); - nearestDistanceSqr = d; - nearest = ref; - } - } - - return nearest; -} - -int dtNavMeshQuery::queryPolygonsInTile(const dtMeshTile* tile, const float* qmin, const float* qmax, - const dtQueryFilter* filter, - dtPolyRef* polys, const int maxPolys) const -{ - dtAssert(m_nav); - - if (tile->bvTree) - { - const dtBVNode* node = &tile->bvTree[0]; - const dtBVNode* end = &tile->bvTree[tile->header->bvNodeCount]; - const float* tbmin = tile->header->bmin; - const float* tbmax = tile->header->bmax; - const float qfac = tile->header->bvQuantFactor; - - // Calculate quantized box - unsigned short bmin[3], bmax[3]; - // dtClamp query box to world box. - float minx = dtClamp(qmin[0], tbmin[0], tbmax[0]) - tbmin[0]; - float miny = dtClamp(qmin[1], tbmin[1], tbmax[1]) - tbmin[1]; - float minz = dtClamp(qmin[2], tbmin[2], tbmax[2]) - tbmin[2]; - float maxx = dtClamp(qmax[0], tbmin[0], tbmax[0]) - tbmin[0]; - float maxy = dtClamp(qmax[1], tbmin[1], tbmax[1]) - tbmin[1]; - float maxz = dtClamp(qmax[2], tbmin[2], tbmax[2]) - tbmin[2]; - // Quantize - bmin[0] = (unsigned short)(qfac * minx) & 0xfffe; - bmin[1] = (unsigned short)(qfac * miny) & 0xfffe; - bmin[2] = (unsigned short)(qfac * minz) & 0xfffe; - bmax[0] = (unsigned short)(qfac * maxx + 1) | 1; - bmax[1] = (unsigned short)(qfac * maxy + 1) | 1; - bmax[2] = (unsigned short)(qfac * maxz + 1) | 1; - - // Traverse tree - const dtPolyRef base = m_nav->getPolyRefBase(tile); - int n = 0; - while (node < end) - { - const bool overlap = dtOverlapQuantBounds(bmin, bmax, node->bmin, node->bmax); - const bool isLeafNode = node->i >= 0; - - if (isLeafNode && overlap) - { - dtPolyRef ref = base | (dtPolyRef)node->i; - if (filter->passFilter(ref, tile, &tile->polys[node->i])) - { - if (n < maxPolys) - polys[n++] = ref; - } - } - - if (overlap || isLeafNode) - node++; - else - { - const int escapeIndex = -node->i; - node += escapeIndex; - } - } - - return n; - } - else - { - float bmin[3], bmax[3]; - int n = 0; - const dtPolyRef base = m_nav->getPolyRefBase(tile); - for (int i = 0; i < tile->header->polyCount; ++i) - { - // Calc polygon bounds. - dtPoly* p = &tile->polys[i]; - // Do not return off-mesh connection polygons. - if (p->getType() == DT_POLYTYPE_OFFMESH_CONNECTION) - continue; - - const float* v = &tile->verts[p->verts[0]*3]; - dtVcopy(bmin, v); - dtVcopy(bmax, v); - for (int j = 1; j < p->vertCount; ++j) - { - v = &tile->verts[p->verts[j]*3]; - dtVmin(bmin, v); - dtVmax(bmax, v); - } - if (dtOverlapBounds(qmin,qmax, bmin,bmax)) - { - const dtPolyRef ref = base | (dtPolyRef)i; - if (filter->passFilter(ref, tile, p)) - { - if (n < maxPolys) - polys[n++] = ref; - } - } - } - return n; - } -} - -dtStatus dtNavMeshQuery::queryPolygons(const float* center, const float* extents, - const dtQueryFilter* filter, - dtPolyRef* polys, int* polyCount, const int maxPolys) const -{ - dtAssert(m_nav); - - float bmin[3], bmax[3]; - dtVsub(bmin, center, extents); - dtVadd(bmax, center, extents); - - // Find tiles the query touches. - int minx, miny, maxx, maxy; - m_nav->calcTileLoc(bmin, &minx, &miny); - m_nav->calcTileLoc(bmax, &maxx, &maxy); - - int n = 0; - - /// pussywizard: additional checks as in PathGenerator::HaveTile - if (minx < 0) minx = 0; if (miny < 0) miny = 0; // min can be negative because we subtract extents (few lines above) - if (maxx < 0 || maxy < 0 || maxx-minx >= 64 /*MAX_NUMBER_OF_GRIDS*/ || maxy-miny >= 64) // max should never be negative - { - *polyCount = n; - return DT_SUCCESS; - } - - for (int y = miny; y <= maxy; ++y) - { - for (int x = minx; x <= maxx; ++x) - { - const dtMeshTile* tile = m_nav->getTileAt(x,y); - if (!tile) continue; - n += queryPolygonsInTile(tile, bmin, bmax, filter, polys+n, maxPolys-n); - if (n >= maxPolys) - { - *polyCount = n; - return DT_SUCCESS; - } - } - } - *polyCount = n; - - return DT_SUCCESS; -} - -dtStatus dtNavMeshQuery::findPath(dtPolyRef startRef, dtPolyRef endRef, - const float* startPos, const float* endPos, - const dtQueryFilter* filter, - dtPolyRef* path, int* pathCount, const int maxPath) const -{ - dtAssert(m_nav); - dtAssert(m_nodePool); - dtAssert(m_openList); - - *pathCount = 0; - - if (!startRef || !endRef) - return DT_FAILURE; - - if (!maxPath) - return DT_FAILURE; - - // Validate input - if (!m_nav->isValidPolyRef(startRef) || !m_nav->isValidPolyRef(endRef)) - return DT_FAILURE; - - if (startRef == endRef) - { - path[0] = startRef; - *pathCount = 1; - return DT_SUCCESS; - } - - m_nodePool->clear(); - m_openList->clear(); - - dtNode* startNode = m_nodePool->getNode(startRef); - dtVcopy(startNode->pos, startPos); - startNode->pidx = 0; - startNode->cost = 0; - startNode->total = dtVdist(startPos, endPos) * H_SCALE; - startNode->id = startRef; - startNode->flags = DT_NODE_OPEN; - m_openList->push(startNode); - - dtNode* lastBestNode = startNode; - float lastBestNodeCost = startNode->total; - - while (!m_openList->empty()) - { - // Remove node from open list and put it in closed list. - dtNode* bestNode = m_openList->pop(); - bestNode->flags &= ~DT_NODE_OPEN; - bestNode->flags |= DT_NODE_CLOSED; - - // Reached the goal, stop searching. - if (bestNode->id == endRef) - { - lastBestNode = bestNode; - break; - } - - // Get current poly and tile. - // The API input has been cheked already, skip checking internal data. - const dtPolyRef bestRef = bestNode->id; - const dtMeshTile* bestTile = 0; - const dtPoly* bestPoly = 0; - m_nav->getTileAndPolyByRefUnsafe(bestRef, &bestTile, &bestPoly); - - // Get parent poly and tile. - dtPolyRef parentRef = 0; - const dtMeshTile* parentTile = 0; - const dtPoly* parentPoly = 0; - if (bestNode->pidx) - parentRef = m_nodePool->getNodeAtIdx(bestNode->pidx)->id; - if (parentRef) - m_nav->getTileAndPolyByRefUnsafe(parentRef, &parentTile, &parentPoly); - - for (unsigned int i = bestPoly->firstLink; i != DT_NULL_LINK; i = bestTile->links[i].next) - { - dtPolyRef neighbourRef = bestTile->links[i].ref; - - // Skip invalid ids and do not expand back to where we came from. - if (!neighbourRef || neighbourRef == parentRef) - continue; - - // Get neighbour poly and tile. - // The API input has been cheked already, skip checking internal data. - const dtMeshTile* neighbourTile = 0; - const dtPoly* neighbourPoly = 0; - m_nav->getTileAndPolyByRefUnsafe(neighbourRef, &neighbourTile, &neighbourPoly); - - if (!filter->passFilter(neighbourRef, neighbourTile, neighbourPoly)) - continue; - - dtNode* neighbourNode = m_nodePool->getNode(neighbourRef); - if (!neighbourNode) - continue; - - // If the node is visited the first time, calculate node position. - if (neighbourNode->flags == 0) - { - getEdgeMidPoint(bestRef, bestPoly, bestTile, - neighbourRef, neighbourPoly, neighbourTile, - neighbourNode->pos); - } - - // Calculate cost and heuristic. - float cost = 0; - float heuristic = 0; - - // Special case for last node. - if (neighbourRef == endRef) - { - // Cost - const float curCost = filter->getCost(bestNode->pos, neighbourNode->pos, - parentRef, parentTile, parentPoly, - bestRef, bestTile, bestPoly, - neighbourRef, neighbourTile, neighbourPoly); - const float endCost = filter->getCost(neighbourNode->pos, endPos, - bestRef, bestTile, bestPoly, - neighbourRef, neighbourTile, neighbourPoly, - 0, 0, 0); - - cost = bestNode->cost + curCost + endCost; - heuristic = 0; - } - else - { - // Cost - const float curCost = filter->getCost(bestNode->pos, neighbourNode->pos, - parentRef, parentTile, parentPoly, - bestRef, bestTile, bestPoly, - neighbourRef, neighbourTile, neighbourPoly); - cost = bestNode->cost + curCost; - heuristic = dtVdist(neighbourNode->pos, endPos)*H_SCALE; - } - - const float total = cost + heuristic; - - // The node is already in open list and the new result is worse, skip. - if ((neighbourNode->flags & DT_NODE_OPEN) && total >= neighbourNode->total) - continue; - // The node is already visited and process, and the new result is worse, skip. - if ((neighbourNode->flags & DT_NODE_CLOSED) && total >= neighbourNode->total) - continue; - - // Add or update the node. - neighbourNode->pidx = m_nodePool->getNodeIdx(bestNode); - neighbourNode->id = neighbourRef; - neighbourNode->flags &= ~DT_NODE_CLOSED; - neighbourNode->cost = cost; - neighbourNode->total = total; - - if (neighbourNode->flags & DT_NODE_OPEN) - { - // Already in open, update node location. - m_openList->modify(neighbourNode); - } - else - { - // Put the node in open list. - neighbourNode->flags |= DT_NODE_OPEN; - m_openList->push(neighbourNode); - } - - // Update nearest node to target so far. - if (heuristic < lastBestNodeCost) - { - lastBestNodeCost = heuristic; - lastBestNode = neighbourNode; - } - } - } - - // Reverse the path. - dtNode* prev = 0; - dtNode* node = lastBestNode; - do - { - dtNode* next = m_nodePool->getNodeAtIdx(node->pidx); - node->pidx = m_nodePool->getNodeIdx(prev); - prev = node; - node = next; - } - while (node); - - // Store path - node = prev; - int n = 0; - do - { - path[n++] = node->id; - node = m_nodePool->getNodeAtIdx(node->pidx); - } - while (node && n < maxPath); - - *pathCount = n; - - return DT_SUCCESS; -} - -dtStatus dtNavMeshQuery::initSlicedFindPath(dtPolyRef startRef, dtPolyRef endRef, - const float* startPos, const float* endPos, - const dtQueryFilter* filter) -{ - dtAssert(m_nav); - dtAssert(m_nodePool); - dtAssert(m_openList); - - // Init path state. - memset(&m_query, 0, sizeof(dtQueryData)); - m_query.status = DT_FAILURE; - m_query.startRef = startRef; - m_query.endRef = endRef; - dtVcopy(m_query.startPos, startPos); - dtVcopy(m_query.endPos, endPos); - m_query.filter = filter; - - if (!startRef || !endRef) - return DT_FAILURE; - - // Validate input - if (!m_nav->isValidPolyRef(startRef) || !m_nav->isValidPolyRef(endRef)) - return DT_FAILURE; - - if (startRef == endRef) - { - m_query.status = DT_SUCCESS; - return DT_SUCCESS; - } - - m_nodePool->clear(); - m_openList->clear(); - - dtNode* startNode = m_nodePool->getNode(startRef); - dtVcopy(startNode->pos, startPos); - startNode->pidx = 0; - startNode->cost = 0; - startNode->total = dtVdist(startPos, endPos) * H_SCALE; - startNode->id = startRef; - startNode->flags = DT_NODE_OPEN; - m_openList->push(startNode); - - m_query.status = DT_IN_PROGRESS; - m_query.lastBestNode = startNode; - m_query.lastBestNodeCost = startNode->total; - - return m_query.status; -} - -dtStatus dtNavMeshQuery::updateSlicedFindPath(const int maxIter) -{ - if (m_query.status!= DT_IN_PROGRESS) - return m_query.status; - - // Make sure the request is still valid. - if (!m_nav->isValidPolyRef(m_query.startRef) || !m_nav->isValidPolyRef(m_query.endRef)) - { - m_query.status = DT_FAILURE; - return DT_FAILURE; - } - - int iter = 0; - while (iter < maxIter && !m_openList->empty()) - { - iter++; - - // Remove node from open list and put it in closed list. - dtNode* bestNode = m_openList->pop(); - bestNode->flags &= ~DT_NODE_OPEN; - bestNode->flags |= DT_NODE_CLOSED; - - // Reached the goal, stop searching. - if (bestNode->id == m_query.endRef) - { - m_query.lastBestNode = bestNode; - m_query.status = DT_SUCCESS; - return m_query.status; - } - - // Get current poly and tile. - // The API input has been cheked already, skip checking internal data. - const dtPolyRef bestRef = bestNode->id; - const dtMeshTile* bestTile = 0; - const dtPoly* bestPoly = 0; - if (m_nav->getTileAndPolyByRef(bestRef, &bestTile, &bestPoly) != DT_SUCCESS) - { - // The polygon has disappeared during the sliced query, fail. - m_query.status = DT_FAILURE; - return m_query.status; - } - - // Get parent poly and tile. - dtPolyRef parentRef = 0; - const dtMeshTile* parentTile = 0; - const dtPoly* parentPoly = 0; - if (bestNode->pidx) - parentRef = m_nodePool->getNodeAtIdx(bestNode->pidx)->id; - if (parentRef) - { - if (m_nav->getTileAndPolyByRef(parentRef, &parentTile, &parentPoly) != DT_SUCCESS) - { - // The polygon has disappeared during the sliced query, fail. - m_query.status = DT_FAILURE; - return m_query.status; - } - } - - for (unsigned int i = bestPoly->firstLink; i != DT_NULL_LINK; i = bestTile->links[i].next) - { - dtPolyRef neighbourRef = bestTile->links[i].ref; - - // Skip invalid ids and do not expand back to where we came from. - if (!neighbourRef || neighbourRef == parentRef) - continue; - - // Get neighbour poly and tile. - // The API input has been cheked already, skip checking internal data. - const dtMeshTile* neighbourTile = 0; - const dtPoly* neighbourPoly = 0; - m_nav->getTileAndPolyByRefUnsafe(neighbourRef, &neighbourTile, &neighbourPoly); - - if (!m_query.filter->passFilter(neighbourRef, neighbourTile, neighbourPoly)) - continue; - - dtNode* neighbourNode = m_nodePool->getNode(neighbourRef); - if (!neighbourNode) - continue; - - // If the node is visited the first time, calculate node position. - if (neighbourNode->flags == 0) - { - getEdgeMidPoint(bestRef, bestPoly, bestTile, - neighbourRef, neighbourPoly, neighbourTile, - neighbourNode->pos); - } - - // Calculate cost and heuristic. - float cost = 0; - float heuristic = 0; - - // Special case for last node. - if (neighbourRef == m_query.endRef) - { - // Cost - const float curCost = m_query.filter->getCost(bestNode->pos, neighbourNode->pos, - parentRef, parentTile, parentPoly, - bestRef, bestTile, bestPoly, - neighbourRef, neighbourTile, neighbourPoly); - const float endCost = m_query.filter->getCost(neighbourNode->pos, m_query.endPos, - bestRef, bestTile, bestPoly, - neighbourRef, neighbourTile, neighbourPoly, - 0, 0, 0); - - cost = bestNode->cost + curCost + endCost; - heuristic = 0; - } - else - { - // Cost - const float curCost = m_query.filter->getCost(bestNode->pos, neighbourNode->pos, - parentRef, parentTile, parentPoly, - bestRef, bestTile, bestPoly, - neighbourRef, neighbourTile, neighbourPoly); - cost = bestNode->cost + curCost; - heuristic = dtVdist(neighbourNode->pos, m_query.endPos)*H_SCALE; - } - - const float total = cost + heuristic; - - // The node is already in open list and the new result is worse, skip. - if ((neighbourNode->flags & DT_NODE_OPEN) && total >= neighbourNode->total) - continue; - // The node is already visited and process, and the new result is worse, skip. - if ((neighbourNode->flags & DT_NODE_CLOSED) && total >= neighbourNode->total) - continue; - - // Add or update the node. - neighbourNode->pidx = m_nodePool->getNodeIdx(bestNode); - neighbourNode->id = neighbourRef; - neighbourNode->flags &= ~DT_NODE_CLOSED; - neighbourNode->cost = cost; - neighbourNode->total = total; - - if (neighbourNode->flags & DT_NODE_OPEN) - { - // Already in open, update node location. - m_openList->modify(neighbourNode); - } - else - { - // Put the node in open list. - neighbourNode->flags |= DT_NODE_OPEN; - m_openList->push(neighbourNode); - } - - // Update nearest node to target so far. - if (heuristic < m_query.lastBestNodeCost) - { - m_query.lastBestNodeCost = heuristic; - m_query.lastBestNode = neighbourNode; - } - } - } - - // Exhausted all nodes, but could not find path. - if (m_openList->empty()) - m_query.status = DT_SUCCESS; - - return m_query.status; -} - -dtStatus dtNavMeshQuery::finalizeSlicedFindPath(dtPolyRef* path, int* pathCount, const int maxPath) -{ - *pathCount = 0; - - if (m_query.status != DT_SUCCESS) - { - // Reset query. - memset(&m_query, 0, sizeof(dtQueryData)); - return DT_FAILURE; - } - - int n = 0; - - if (m_query.startRef == m_query.endRef) - { - // Special case: the search starts and ends at same poly. - path[n++] = m_query.startRef; - } - else - { - // Reverse the path. - dtAssert(m_query.lastBestNode); - dtNode* prev = 0; - dtNode* node = m_query.lastBestNode; - do - { - dtNode* next = m_nodePool->getNodeAtIdx(node->pidx); - node->pidx = m_nodePool->getNodeIdx(prev); - prev = node; - node = next; - } - while (node); - - // Store path - node = prev; - do - { - path[n++] = node->id; - node = m_nodePool->getNodeAtIdx(node->pidx); - } - while (node && n < maxPath); - } - - // Reset query. - memset(&m_query, 0, sizeof(dtQueryData)); - - *pathCount = n; - - return DT_SUCCESS; -} - -dtStatus dtNavMeshQuery::finalizeSlicedFindPathPartial(const dtPolyRef* existing, const int existingSize, - dtPolyRef* path, int* pathCount, const int maxPath) -{ - *pathCount = 0; - - if (existingSize == 0) - { - return DT_FAILURE; - } - - if (m_query.status != DT_SUCCESS && m_query.status != DT_IN_PROGRESS) - { - // Reset query. - memset(&m_query, 0, sizeof(dtQueryData)); - return DT_FAILURE; - } - - int n = 0; - - if (m_query.startRef == m_query.endRef) - { - // Special case: the search starts and ends at same poly. - path[n++] = m_query.startRef; - } - else - { - // Find furthest existing node that was visited. - dtNode* prev = 0; - dtNode* node = 0; - for (int i = existingSize-1; i >= 0; --i) - { - node = m_nodePool->findNode(existing[i]); - if (node) - break; - } - - if (!node) - { - return DT_FAILURE; - } - - // Reverse the path. - do - { - dtNode* next = m_nodePool->getNodeAtIdx(node->pidx); - node->pidx = m_nodePool->getNodeIdx(prev); - prev = node; - node = next; - } - while (node); - - // Store path - node = prev; - do - { - path[n++] = node->id; - node = m_nodePool->getNodeAtIdx(node->pidx); - } - while (node && n < maxPath); - } - - // Reset query. - memset(&m_query, 0, sizeof(dtQueryData)); - - *pathCount = n; - - return DT_SUCCESS; -} - - -dtStatus dtNavMeshQuery::findStraightPath(const float* startPos, const float* endPos, - const dtPolyRef* path, const int pathSize, - float* straightPath, unsigned char* straightPathFlags, dtPolyRef* straightPathRefs, - int* straightPathCount, const int maxStraightPath) const -{ - dtAssert(m_nav); - - *straightPathCount = 0; - - if (!maxStraightPath) - return DT_FAILURE; - - if (!path[0]) - return DT_FAILURE; - - int n = 0; - - // TODO: Should this be callers responsibility? - float closestStartPos[3]; - if (closestPointOnPolyBoundary(path[0], startPos, closestStartPos) != DT_SUCCESS) - return DT_FAILURE; - - // Add start point. - dtVcopy(&straightPath[n*3], closestStartPos); - if (straightPathFlags) - straightPathFlags[n] = DT_STRAIGHTPATH_START; - if (straightPathRefs) - straightPathRefs[n] = path[0]; - n++; - if (n >= maxStraightPath) - { - *straightPathCount = n; - return DT_SUCCESS; - } - - float closestEndPos[3]; - if (closestPointOnPolyBoundary(path[pathSize-1], endPos, closestEndPos) != DT_SUCCESS) - return DT_FAILURE; - - if (pathSize > 1) - { - float portalApex[3], portalLeft[3], portalRight[3]; - dtVcopy(portalApex, closestStartPos); - dtVcopy(portalLeft, portalApex); - dtVcopy(portalRight, portalApex); - int apexIndex = 0; - int leftIndex = 0; - int rightIndex = 0; - - unsigned char leftPolyType = 0; - unsigned char rightPolyType = 0; - - dtPolyRef leftPolyRef = path[0]; - dtPolyRef rightPolyRef = path[0]; - - for (int i = 0; i < pathSize; ++i) - { - float left[3], right[3]; - unsigned char fromType, toType; - - if (i+1 < pathSize) - { - // Next portal. - if (getPortalPoints(path[i], path[i+1], left, right, fromType, toType) != DT_SUCCESS) - { - if (closestPointOnPolyBoundary(path[i], endPos, closestEndPos) != DT_SUCCESS) - return DT_FAILURE; - - dtVcopy(&straightPath[n*3], closestEndPos); - if (straightPathFlags) - straightPathFlags[n] = 0; - if (straightPathRefs) - straightPathRefs[n] = path[i]; - n++; - - return DT_SUCCESS; - } - - // If starting really close the portal, advance. - if (i == 0) - { - float t; - if (dtDistancePtSegSqr2D(portalApex, left, right, t) < dtSqr(0.001f)) - continue; - } - } - else - { - // End of the path. - dtVcopy(left, closestEndPos); - dtVcopy(right, closestEndPos); - - fromType = toType = DT_POLYTYPE_GROUND; - } - - // Right vertex. - if (dtTriArea2D(portalApex, portalRight, right) <= 0.0f) - { - if (dtVequal(portalApex, portalRight) || dtTriArea2D(portalApex, portalLeft, right) > 0.0f) - { - dtVcopy(portalRight, right); - rightPolyRef = (i+1 < pathSize) ? path[i+1] : 0; - rightPolyType = toType; - rightIndex = i; - } - else - { - dtVcopy(portalApex, portalLeft); - apexIndex = leftIndex; - - unsigned char flags = 0; - if (!leftPolyRef) - flags = DT_STRAIGHTPATH_END; - else if (leftPolyType == DT_POLYTYPE_OFFMESH_CONNECTION) - flags = DT_STRAIGHTPATH_OFFMESH_CONNECTION; - dtPolyRef ref = leftPolyRef; - - if (!dtVequal(&straightPath[(n-1)*3], portalApex)) - { - // Append new vertex. - dtVcopy(&straightPath[n*3], portalApex); - if (straightPathFlags) - straightPathFlags[n] = flags; - if (straightPathRefs) - straightPathRefs[n] = ref; - n++; - // If reached end of path or there is no space to append more vertices, return. - if (flags == DT_STRAIGHTPATH_END || n >= maxStraightPath) - { - *straightPathCount = n; - return DT_SUCCESS; - } - } - else - { - // The vertices are equal, update flags and poly. - if (straightPathFlags) - straightPathFlags[n-1] = flags; - if (straightPathRefs) - straightPathRefs[n-1] = ref; - } - - dtVcopy(portalLeft, portalApex); - dtVcopy(portalRight, portalApex); - leftIndex = apexIndex; - rightIndex = apexIndex; - - // Restart - i = apexIndex; - - continue; - } - } - - // Left vertex. - if (dtTriArea2D(portalApex, portalLeft, left) >= 0.0f) - { - if (dtVequal(portalApex, portalLeft) || dtTriArea2D(portalApex, portalRight, left) < 0.0f) - { - dtVcopy(portalLeft, left); - leftPolyRef = (i+1 < pathSize) ? path[i+1] : 0; - leftPolyType = toType; - leftIndex = i; - } - else - { - dtVcopy(portalApex, portalRight); - apexIndex = rightIndex; - - unsigned char flags = 0; - if (!rightPolyRef) - flags = DT_STRAIGHTPATH_END; - else if (rightPolyType == DT_POLYTYPE_OFFMESH_CONNECTION) - flags = DT_STRAIGHTPATH_OFFMESH_CONNECTION; - dtPolyRef ref = rightPolyRef; - - if (!dtVequal(&straightPath[(n-1)*3], portalApex)) - { - // Append new vertex. - dtVcopy(&straightPath[n*3], portalApex); - if (straightPathFlags) - straightPathFlags[n] = flags; - if (straightPathRefs) - straightPathRefs[n] = ref; - n++; - // If reached end of path or there is no space to append more vertices, return. - if (flags == DT_STRAIGHTPATH_END || n >= maxStraightPath) - { - *straightPathCount = n; - return DT_SUCCESS; - } - } - else - { - // The vertices are equal, update flags and poly. - if (straightPathFlags) - straightPathFlags[n-1] = flags; - if (straightPathRefs) - straightPathRefs[n-1] = ref; - } - - dtVcopy(portalLeft, portalApex); - dtVcopy(portalRight, portalApex); - leftIndex = apexIndex; - rightIndex = apexIndex; - - // Restart - i = apexIndex; - - continue; - } - } - } - } - - // If the point already exists, remove it and add reappend the actual end location. - if (n > 0 && dtVequal(&straightPath[(n-1)*3], closestEndPos)) - n--; - - // Add end point. - if (n < maxStraightPath) - { - dtVcopy(&straightPath[n*3], closestEndPos); - if (straightPathFlags) - straightPathFlags[n] = DT_STRAIGHTPATH_END; - if (straightPathRefs) - straightPathRefs[n] = 0; - n++; - } - - *straightPathCount = n; - return DT_SUCCESS; -} - -dtStatus dtNavMeshQuery::moveAlongSurface(dtPolyRef startRef, const float* startPos, const float* endPos, - const dtQueryFilter* filter, - float* resultPos, dtPolyRef* visited, int* visitedCount, const int maxVisitedSize) const -{ - dtAssert(m_nav); - dtAssert(m_tinyNodePool); - - *visitedCount = 0; - - // Validate input - if (!startRef) return DT_FAILURE; - if (!m_nav->isValidPolyRef(startRef)) return DT_FAILURE; - - static const int MAX_STACK = 48; - dtNode* stack[MAX_STACK]; - int nstack = 0; - - m_tinyNodePool->clear(); - - dtNode* startNode = m_tinyNodePool->getNode(startRef); - startNode->pidx = 0; - startNode->cost = 0; - startNode->total = 0; - startNode->id = startRef; - startNode->flags = DT_NODE_CLOSED; - stack[nstack++] = startNode; - - float bestPos[3]; - float bestDist = FLT_MAX; - dtNode* bestNode = 0; - dtVcopy(bestPos, startPos); - - // Search constraints - float searchPos[3], searchRadSqr; - dtVlerp(searchPos, startPos, endPos, 0.5f); - searchRadSqr = dtSqr(dtVdist(startPos, endPos)/2.0f + 0.001f); - - float verts[DT_VERTS_PER_POLYGON*3]; - - while (nstack) - { - // Pop front. - dtNode* curNode = stack[0]; - for (int i = 0; i < nstack-1; ++i) - stack[i] = stack[i+1]; - nstack--; - - // Get poly and tile. - // The API input has been cheked already, skip checking internal data. - const dtPolyRef curRef = curNode->id; - const dtMeshTile* curTile = 0; - const dtPoly* curPoly = 0; - m_nav->getTileAndPolyByRefUnsafe(curRef, &curTile, &curPoly); - - // Collect vertices. - const int nverts = curPoly->vertCount; - for (int i = 0; i < nverts; ++i) - dtVcopy(&verts[i*3], &curTile->verts[curPoly->verts[i]*3]); - - // If target is inside the poly, stop search. - if (dtPointInPolygon(endPos, verts, nverts)) - { - bestNode = curNode; - dtVcopy(bestPos, endPos); - break; - } - - // Find wall edges and find nearest point inside the walls. - for (int i = 0, j = (int)curPoly->vertCount-1; i < (int)curPoly->vertCount; j = i++) - { - // Find links to neighbours. - static const int MAX_NEIS = 8; - int nneis = 0; - dtPolyRef neis[MAX_NEIS]; - - if (curPoly->neis[j] & DT_EXT_LINK) - { - // Tile border. - for (unsigned int k = curPoly->firstLink; k != DT_NULL_LINK; k = curTile->links[k].next) - { - const dtLink* link = &curTile->links[k]; - if (link->edge == j) - { - if (link->ref != 0) - { - const dtMeshTile* neiTile = 0; - const dtPoly* neiPoly = 0; - m_nav->getTileAndPolyByRefUnsafe(link->ref, &neiTile, &neiPoly); - if (filter->passFilter(link->ref, neiTile, neiPoly)) - { - if (nneis < MAX_NEIS) - neis[nneis++] = link->ref; - } - } - } - } - } - else if (curPoly->neis[j]) - { - const unsigned int idx = (unsigned int)(curPoly->neis[j]-1); - const dtPolyRef ref = m_nav->getPolyRefBase(curTile) | idx; - if (filter->passFilter(ref, curTile, &curTile->polys[idx])) - { - // Internal edge, encode id. - neis[nneis++] = ref; - } - } - - if (!nneis) - { - // Wall edge, calc distance. - const float* vj = &verts[j*3]; - const float* vi = &verts[i*3]; - float tseg; - const float distSqr = dtDistancePtSegSqr2D(endPos, vj, vi, tseg); - if (distSqr < bestDist) - { - // Update nearest distance. - dtVlerp(bestPos, vj,vi, tseg); - bestDist = distSqr; - bestNode = curNode; - } - } - else - { - for (int k = 0; k < nneis; ++k) - { - // Skip if no node can be allocated. - dtNode* neighbourNode = m_tinyNodePool->getNode(neis[k]); - if (!neighbourNode) - continue; - // Skip if already visited. - if (neighbourNode->flags & DT_NODE_CLOSED) - continue; - - // Skip the link if it is too far from search constraint. - // TODO: Maybe should use getPortalPoints(), but this one is way faster. - const float* vj = &verts[j*3]; - const float* vi = &verts[i*3]; - float tseg; - float distSqr = dtDistancePtSegSqr2D(searchPos, vj, vi, tseg); - if (distSqr > searchRadSqr) - continue; - - // Mark as the node as visited and push to queue. - if (nstack < MAX_STACK) - { - neighbourNode->pidx = m_tinyNodePool->getNodeIdx(curNode); - neighbourNode->flags |= DT_NODE_CLOSED; - stack[nstack++] = neighbourNode; - } - } - } - } - } - - int n = 0; - if (bestNode) - { - // Reverse the path. - dtNode* prev = 0; - dtNode* node = bestNode; - do - { - dtNode* next = m_tinyNodePool->getNodeAtIdx(node->pidx); - node->pidx = m_tinyNodePool->getNodeIdx(prev); - prev = node; - node = next; - } - while (node); - - // Store result - node = prev; - do - { - visited[n++] = node->id; - node = m_tinyNodePool->getNodeAtIdx(node->pidx); - } - while (node && n < maxVisitedSize); - } - - dtVcopy(resultPos, bestPos); - - *visitedCount = n; - - return DT_SUCCESS; -} - - -dtStatus dtNavMeshQuery::getPortalPoints(dtPolyRef from, dtPolyRef to, float* left, float* right, - unsigned char& fromType, unsigned char& toType) const -{ - dtAssert(m_nav); - - const dtMeshTile* fromTile = 0; - const dtPoly* fromPoly = 0; - if (m_nav->getTileAndPolyByRef(from, &fromTile, &fromPoly) != DT_SUCCESS) - return DT_FAILURE; - fromType = fromPoly->getType(); - - const dtMeshTile* toTile = 0; - const dtPoly* toPoly = 0; - if (m_nav->getTileAndPolyByRef(to, &toTile, &toPoly) != DT_SUCCESS) - return DT_FAILURE; - toType = toPoly->getType(); - - return getPortalPoints(from, fromPoly, fromTile, to, toPoly, toTile, left, right); -} - -// Returns portal points between two polygons. -dtStatus dtNavMeshQuery::getPortalPoints(dtPolyRef from, const dtPoly* fromPoly, const dtMeshTile* fromTile, - dtPolyRef to, const dtPoly* toPoly, const dtMeshTile* toTile, - float* left, float* right) const -{ - // Find the link that points to the 'to' polygon. - const dtLink* link = 0; - for (unsigned int i = fromPoly->firstLink; i != DT_NULL_LINK; i = fromTile->links[i].next) - { - if (fromTile->links[i].ref == to) - { - link = &fromTile->links[i]; - break; - } - } - if (!link) - return DT_FAILURE; - - // Handle off-mesh connections. - if (fromPoly->getType() == DT_POLYTYPE_OFFMESH_CONNECTION) - { - // Find link that points to first vertex. - for (unsigned int i = fromPoly->firstLink; i != DT_NULL_LINK; i = fromTile->links[i].next) - { - if (fromTile->links[i].ref == to) - { - const int v = fromTile->links[i].edge; - dtVcopy(left, &fromTile->verts[fromPoly->verts[v]*3]); - dtVcopy(right, &fromTile->verts[fromPoly->verts[v]*3]); - return DT_SUCCESS; - } - } - return DT_FAILURE; - } - - if (toPoly->getType() == DT_POLYTYPE_OFFMESH_CONNECTION) - { - for (unsigned int i = toPoly->firstLink; i != DT_NULL_LINK; i = toTile->links[i].next) - { - if (toTile->links[i].ref == from) - { - const int v = toTile->links[i].edge; - dtVcopy(left, &toTile->verts[toPoly->verts[v]*3]); - dtVcopy(right, &toTile->verts[toPoly->verts[v]*3]); - return DT_SUCCESS; - } - } - return DT_FAILURE; - } - - // Find portal vertices. - const int v0 = fromPoly->verts[link->edge]; - const int v1 = fromPoly->verts[(link->edge+1) % (int)fromPoly->vertCount]; - dtVcopy(left, &fromTile->verts[v0*3]); - dtVcopy(right, &fromTile->verts[v1*3]); - - // If the link is at tile boundary, dtClamp the vertices to - // the link width. - if (link->side != 0xff) - { - // Unpack portal limits. - if (link->bmin != 0 || link->bmax != 255) - { - const float s = 1.0f/255.0f; - const float tmin = link->bmin*s; - const float tmax = link->bmax*s; - dtVlerp(left, &fromTile->verts[v0*3], &fromTile->verts[v1*3], tmin); - dtVlerp(right, &fromTile->verts[v0*3], &fromTile->verts[v1*3], tmax); - } - } - - return DT_SUCCESS; -} - -// Returns edge mid point between two polygons. -dtStatus dtNavMeshQuery::getEdgeMidPoint(dtPolyRef from, dtPolyRef to, float* mid) const -{ - float left[3], right[3]; - unsigned char fromType, toType; - if (!getPortalPoints(from, to, left,right, fromType, toType)) return DT_FAILURE; - mid[0] = (left[0]+right[0])*0.5f; - mid[1] = (left[1]+right[1])*0.5f; - mid[2] = (left[2]+right[2])*0.5f; - return DT_SUCCESS; -} - -dtStatus dtNavMeshQuery::getEdgeMidPoint(dtPolyRef from, const dtPoly* fromPoly, const dtMeshTile* fromTile, - dtPolyRef to, const dtPoly* toPoly, const dtMeshTile* toTile, - float* mid) const -{ - float left[3], right[3]; - if (getPortalPoints(from, fromPoly, fromTile, to, toPoly, toTile, left, right) != DT_SUCCESS) - return DT_FAILURE; - mid[0] = (left[0]+right[0])*0.5f; - mid[1] = (left[1]+right[1])*0.5f; - mid[2] = (left[2]+right[2])*0.5f; - return DT_SUCCESS; -} - -dtStatus dtNavMeshQuery::raycast(dtPolyRef startRef, const float* startPos, const float* endPos, - const dtQueryFilter* filter, - float* t, float* hitNormal, dtPolyRef* path, int* pathCount, const int maxPath) const -{ - dtAssert(m_nav); - - *t = 0; - if (pathCount) - *pathCount = 0; - - // Validate input - if (!startRef || !m_nav->isValidPolyRef(startRef)) - return DT_FAILURE; - - dtPolyRef curRef = startRef; - float verts[DT_VERTS_PER_POLYGON*3]; - int n = 0; - - hitNormal[0] = 0; - hitNormal[1] = 0; - hitNormal[2] = 0; - - while (curRef) - { - // Cast ray against current polygon. - - // The API input has been cheked already, skip checking internal data. - const dtMeshTile* tile = 0; - const dtPoly* poly = 0; - m_nav->getTileAndPolyByRefUnsafe(curRef, &tile, &poly); - - // Collect vertices. - int nv = 0; - for (int i = 0; i < (int)poly->vertCount; ++i) - { - dtVcopy(&verts[nv*3], &tile->verts[poly->verts[i]*3]); - nv++; - } - - float tmin, tmax; - int segMin, segMax; - if (!dtIntersectSegmentPoly2D(startPos, endPos, verts, nv, tmin, tmax, segMin, segMax)) - { - // Could not hit the polygon, keep the old t and report hit. - if (pathCount) - *pathCount = n; - return DT_SUCCESS; - } - // Keep track of furthest t so far. - if (tmax > *t) - *t = tmax; - - // Store visited polygons. - if (n < maxPath) - path[n++] = curRef; - - // Ray end is completely inside the polygon. - if (segMax == -1) - { - *t = FLT_MAX; - if (pathCount) - *pathCount = n; - return DT_SUCCESS; - } - - // Follow neighbours. - dtPolyRef nextRef = 0; - - for (unsigned int i = poly->firstLink; i != DT_NULL_LINK; i = tile->links[i].next) - { - const dtLink* link = &tile->links[i]; - - // Find link which contains this edge. - if ((int)link->edge != segMax) - continue; - - // Get pointer to the next polygon. - const dtMeshTile* nextTile = 0; - const dtPoly* nextPoly = 0; - m_nav->getTileAndPolyByRefUnsafe(link->ref, &nextTile, &nextPoly); - - // Skip off-mesh connections. - if (nextPoly->getType() == DT_POLYTYPE_OFFMESH_CONNECTION) - continue; - - // Skip links based on filter. - if (!filter->passFilter(link->ref, nextTile, nextPoly)) - continue; - - // If the link is internal, just return the ref. - if (link->side == 0xff) - { - nextRef = link->ref; - break; - } - - // If the link is at tile boundary, - - // Check if the link spans the whole edge, and accept. - if (link->bmin == 0 && link->bmax == 255) - { - nextRef = link->ref; - break; - } - - // Check for partial edge links. - const int v0 = poly->verts[link->edge]; - const int v1 = poly->verts[(link->edge+1) % poly->vertCount]; - const float* left = &tile->verts[v0*3]; - const float* right = &tile->verts[v1*3]; - - // Check that the intersection lies inside the link portal. - if (link->side == 0 || link->side == 4) - { - // Calculate link size. - const float s = 1.0f/255.0f; - float lmin = left[2] + (right[2] - left[2])*(link->bmin*s); - float lmax = left[2] + (right[2] - left[2])*(link->bmax*s); - if (lmin > lmax) dtSwap(lmin, lmax); - - // Find Z intersection. - float z = startPos[2] + (endPos[2]-startPos[2])*tmax; - if (z >= lmin && z <= lmax) - { - nextRef = link->ref; - break; - } - } - else if (link->side == 2 || link->side == 6) - { - // Calculate link size. - const float s = 1.0f/255.0f; - float lmin = left[0] + (right[0] - left[0])*(link->bmin*s); - float lmax = left[0] + (right[0] - left[0])*(link->bmax*s); - if (lmin > lmax) dtSwap(lmin, lmax); - - // Find X intersection. - float x = startPos[0] + (endPos[0]-startPos[0])*tmax; - if (x >= lmin && x <= lmax) - { - nextRef = link->ref; - break; - } - } - } - - if (!nextRef) - { - // No neighbour, we hit a wall. - - // Calculate hit normal. - const int a = segMax; - const int b = segMax+1 < nv ? segMax+1 : 0; - const float* va = &verts[a*3]; - const float* vb = &verts[b*3]; - const float dx = vb[0] - va[0]; - const float dz = vb[2] - va[2]; - hitNormal[0] = dz; - hitNormal[1] = 0; - hitNormal[2] = -dx; - dtVnormalize(hitNormal); - - if (pathCount) - *pathCount = n; - return DT_SUCCESS; - } - - // No hit, advance to neighbour polygon. - curRef = nextRef; - } - - if (pathCount) - *pathCount = n; - - return DT_SUCCESS; -} - -dtStatus dtNavMeshQuery::findPolysAroundCircle(dtPolyRef startRef, const float* centerPos, const float radius, - const dtQueryFilter* filter, - dtPolyRef* resultRef, dtPolyRef* resultParent, float* resultCost, - int* resultCount, const int maxResult) const -{ - dtAssert(m_nav); - dtAssert(m_nodePool); - dtAssert(m_openList); - - *resultCount = 0; - - // Validate input - if (!startRef) return DT_FAILURE; - if (!m_nav->isValidPolyRef(startRef)) return DT_FAILURE; - - m_nodePool->clear(); - m_openList->clear(); - - dtNode* startNode = m_nodePool->getNode(startRef); - dtVcopy(startNode->pos, centerPos); - startNode->pidx = 0; - startNode->cost = 0; - startNode->total = 0; - startNode->id = startRef; - startNode->flags = DT_NODE_OPEN; - m_openList->push(startNode); - - int n = 0; - if (n < maxResult) - { - if (resultRef) - resultRef[n] = startNode->id; - if (resultParent) - resultParent[n] = 0; - if (resultCost) - resultCost[n] = 0; - ++n; - } - - const float radiusSqr = dtSqr(radius); - - while (!m_openList->empty()) - { - dtNode* bestNode = m_openList->pop(); - bestNode->flags &= ~DT_NODE_OPEN; - bestNode->flags |= DT_NODE_CLOSED; - - // Get poly and tile. - // The API input has been cheked already, skip checking internal data. - const dtPolyRef bestRef = bestNode->id; - const dtMeshTile* bestTile = 0; - const dtPoly* bestPoly = 0; - m_nav->getTileAndPolyByRefUnsafe(bestRef, &bestTile, &bestPoly); - - // Get parent poly and tile. - dtPolyRef parentRef = 0; - const dtMeshTile* parentTile = 0; - const dtPoly* parentPoly = 0; - if (bestNode->pidx) - parentRef = m_nodePool->getNodeAtIdx(bestNode->pidx)->id; - if (parentRef) - m_nav->getTileAndPolyByRefUnsafe(parentRef, &parentTile, &parentPoly); - - for (unsigned int i = bestPoly->firstLink; i != DT_NULL_LINK; i = bestTile->links[i].next) - { - const dtLink* link = &bestTile->links[i]; - dtPolyRef neighbourRef = link->ref; - // Skip invalid neighbours and do not follow back to parent. - if (!neighbourRef || neighbourRef == parentRef) - continue; - - // Expand to neighbour - const dtMeshTile* neighbourTile = 0; - const dtPoly* neighbourPoly = 0; - m_nav->getTileAndPolyByRefUnsafe(neighbourRef, &neighbourTile, &neighbourPoly); - - // Do not advance if the polygon is excluded by the filter. - if (!filter->passFilter(neighbourRef, neighbourTile, neighbourPoly)) - continue; - - // Find edge and calc distance to the edge. - float va[3], vb[3]; - if (!getPortalPoints(bestRef, bestPoly, bestTile, neighbourRef, neighbourPoly, neighbourTile, va, vb)) - continue; - - // If the circle is not touching the next polygon, skip it. - float tseg; - float distSqr = dtDistancePtSegSqr2D(centerPos, va, vb, tseg); - if (distSqr > radiusSqr) - continue; - - dtNode* neighbourNode = m_nodePool->getNode(neighbourRef); - if (!neighbourNode) - continue; - - if (neighbourNode->flags & DT_NODE_CLOSED) - continue; - - // Cost - if (neighbourNode->flags == 0) - dtVlerp(neighbourNode->pos, va, vb, 0.5f); - - const float total = bestNode->total + dtVdist(bestNode->pos, neighbourNode->pos); - - // The node is already in open list and the new result is worse, skip. - if ((neighbourNode->flags & DT_NODE_OPEN) && total >= neighbourNode->total) - continue; - - neighbourNode->id = neighbourRef; - neighbourNode->flags &= ~DT_NODE_CLOSED; - neighbourNode->pidx = m_nodePool->getNodeIdx(bestNode); - neighbourNode->total = total; - - if (neighbourNode->flags & DT_NODE_OPEN) - { - m_openList->modify(neighbourNode); - } - else - { - if (n < maxResult) - { - if (resultRef) - resultRef[n] = neighbourNode->id; - if (resultParent) - resultParent[n] = m_nodePool->getNodeAtIdx(neighbourNode->pidx)->id; - if (resultCost) - resultCost[n] = neighbourNode->total; - ++n; - } - neighbourNode->flags = DT_NODE_OPEN; - m_openList->push(neighbourNode); - } - } - } - - *resultCount = n; - - return DT_SUCCESS; -} - -dtStatus dtNavMeshQuery::findPolysAroundShape(dtPolyRef startRef, const float* verts, const int nverts, - const dtQueryFilter* filter, - dtPolyRef* resultRef, dtPolyRef* resultParent, float* resultCost, - int* resultCount, const int maxResult) const -{ - dtAssert(m_nav); - dtAssert(m_nodePool); - dtAssert(m_openList); - - *resultCount = 0; - - // Validate input - if (!startRef) return DT_FAILURE; - if (!m_nav->isValidPolyRef(startRef)) return DT_FAILURE; - - m_nodePool->clear(); - m_openList->clear(); - - float centerPos[3] = {0,0,0}; - for (int i = 0; i < nverts; ++i) - dtVadd(centerPos,centerPos,&verts[i*3]); - dtVscale(centerPos,centerPos,1.0f/nverts); - - dtNode* startNode = m_nodePool->getNode(startRef); - dtVcopy(startNode->pos, centerPos); - startNode->pidx = 0; - startNode->cost = 0; - startNode->total = 0; - startNode->id = startRef; - startNode->flags = DT_NODE_OPEN; - m_openList->push(startNode); - - int n = 0; - if (n < maxResult) - { - if (resultRef) - resultRef[n] = startNode->id; - if (resultParent) - resultParent[n] = 0; - if (resultCost) - resultCost[n] = 0; - ++n; - } - - while (!m_openList->empty()) - { - dtNode* bestNode = m_openList->pop(); - bestNode->flags &= ~DT_NODE_OPEN; - bestNode->flags |= DT_NODE_CLOSED; - - // Get poly and tile. - // The API input has been cheked already, skip checking internal data. - const dtPolyRef bestRef = bestNode->id; - const dtMeshTile* bestTile = 0; - const dtPoly* bestPoly = 0; - m_nav->getTileAndPolyByRefUnsafe(bestRef, &bestTile, &bestPoly); - - // Get parent poly and tile. - dtPolyRef parentRef = 0; - const dtMeshTile* parentTile = 0; - const dtPoly* parentPoly = 0; - if (bestNode->pidx) - parentRef = m_nodePool->getNodeAtIdx(bestNode->pidx)->id; - if (parentRef) - m_nav->getTileAndPolyByRefUnsafe(parentRef, &parentTile, &parentPoly); - - for (unsigned int i = bestPoly->firstLink; i != DT_NULL_LINK; i = bestTile->links[i].next) - { - const dtLink* link = &bestTile->links[i]; - dtPolyRef neighbourRef = link->ref; - // Skip invalid neighbours and do not follow back to parent. - if (!neighbourRef || neighbourRef == parentRef) - continue; - - // Expand to neighbour - const dtMeshTile* neighbourTile = 0; - const dtPoly* neighbourPoly = 0; - m_nav->getTileAndPolyByRefUnsafe(neighbourRef, &neighbourTile, &neighbourPoly); - - // Do not advance if the polygon is excluded by the filter. - if (!filter->passFilter(neighbourRef, neighbourTile, neighbourPoly)) - continue; - - // Find edge and calc distance to the edge. - float va[3], vb[3]; - if (!getPortalPoints(bestRef, bestPoly, bestTile, neighbourRef, neighbourPoly, neighbourTile, va, vb)) - continue; - - // If the poly is not touching the edge to the next polygon, skip the connection it. - float tmin, tmax; - int segMin, segMax; - if (!dtIntersectSegmentPoly2D(va, vb, verts, nverts, tmin, tmax, segMin, segMax)) - continue; - if (tmin > 1.0f || tmax < 0.0f) - continue; - - dtNode* neighbourNode = m_nodePool->getNode(neighbourRef); - if (!neighbourNode) - continue; - - if (neighbourNode->flags & DT_NODE_CLOSED) - continue; - - // Cost - if (neighbourNode->flags == 0) - dtVlerp(neighbourNode->pos, va, vb, 0.5f); - - const float total = bestNode->total + dtVdist(bestNode->pos, neighbourNode->pos); - - // The node is already in open list and the new result is worse, skip. - if ((neighbourNode->flags & DT_NODE_OPEN) && total >= neighbourNode->total) - continue; - - neighbourNode->id = neighbourRef; - neighbourNode->flags &= ~DT_NODE_CLOSED; - neighbourNode->pidx = m_nodePool->getNodeIdx(bestNode); - neighbourNode->total = total; - - if (neighbourNode->flags & DT_NODE_OPEN) - { - m_openList->modify(neighbourNode); - } - else - { - if (n < maxResult) - { - if (resultRef) - resultRef[n] = neighbourNode->id; - if (resultParent) - resultParent[n] = m_nodePool->getNodeAtIdx(neighbourNode->pidx)->id; - if (resultCost) - resultCost[n] = neighbourNode->total; - ++n; - } - neighbourNode->flags = DT_NODE_OPEN; - m_openList->push(neighbourNode); - } - } - } - - *resultCount = n; - - return DT_SUCCESS; -} - -dtStatus dtNavMeshQuery::findLocalNeighbourhood(dtPolyRef startRef, const float* centerPos, const float radius, - const dtQueryFilter* filter, - dtPolyRef* resultRef, dtPolyRef* resultParent, - int* resultCount, const int maxResult) const -{ - dtAssert(m_nav); - dtAssert(m_tinyNodePool); - - *resultCount = 0; - - // Validate input - if (!startRef) return DT_FAILURE; - if (!m_nav->isValidPolyRef(startRef)) return DT_FAILURE; - - static const int MAX_STACK = 48; - dtNode* stack[MAX_STACK]; - int nstack = 0; - - m_tinyNodePool->clear(); - - dtNode* startNode = m_tinyNodePool->getNode(startRef); - startNode->pidx = 0; - startNode->id = startRef; - startNode->flags = DT_NODE_CLOSED; - stack[nstack++] = startNode; - - const float radiusSqr = dtSqr(radius); - - float pa[DT_VERTS_PER_POLYGON*3]; - float pb[DT_VERTS_PER_POLYGON*3]; - - int n = 0; - if (n < maxResult) - { - resultRef[n] = startNode->id; - if (resultParent) - resultParent[n] = 0; - ++n; - } - - while (nstack) - { - // Pop front. - dtNode* curNode = stack[0]; - for (int i = 0; i < nstack-1; ++i) - stack[i] = stack[i+1]; - nstack--; - - // Get poly and tile. - // The API input has been cheked already, skip checking internal data. - const dtPolyRef curRef = curNode->id; - const dtMeshTile* curTile = 0; - const dtPoly* curPoly = 0; - m_nav->getTileAndPolyByRefUnsafe(curRef, &curTile, &curPoly); - - for (unsigned int i = curPoly->firstLink; i != DT_NULL_LINK; i = curTile->links[i].next) - { - const dtLink* link = &curTile->links[i]; - dtPolyRef neighbourRef = link->ref; - // Skip invalid neighbours. - if (!neighbourRef) - continue; - - // Skip if cannot alloca more nodes. - dtNode* neighbourNode = m_tinyNodePool->getNode(neighbourRef); - if (!neighbourNode) - continue; - // Skip visited. - if (neighbourNode->flags & DT_NODE_CLOSED) - continue; - - // Expand to neighbour - const dtMeshTile* neighbourTile = 0; - const dtPoly* neighbourPoly = 0; - m_nav->getTileAndPolyByRefUnsafe(neighbourRef, &neighbourTile, &neighbourPoly); - - // Skip off-mesh connections. - if (neighbourPoly->getType() == DT_POLYTYPE_OFFMESH_CONNECTION) - continue; - - // Do not advance if the polygon is excluded by the filter. - if (!filter->passFilter(neighbourRef, neighbourTile, neighbourPoly)) - continue; - - // Find edge and calc distance to the edge. - float va[3], vb[3]; - if (!getPortalPoints(curRef, curPoly, curTile, neighbourRef, neighbourPoly, neighbourTile, va, vb)) - continue; - - // If the circle is not touching the next polygon, skip it. - float tseg; - float distSqr = dtDistancePtSegSqr2D(centerPos, va, vb, tseg); - if (distSqr > radiusSqr) - continue; - - // Mark node visited, this is done before the overlap test so that - // we will not visit the poly again if the test fails. - neighbourNode->flags |= DT_NODE_CLOSED; - neighbourNode->pidx = m_tinyNodePool->getNodeIdx(curNode); - - // Check that the polygon does not collide with existing polygons. - - // Collect vertices of the neighbour poly. - const int npa = neighbourPoly->vertCount; - for (int k = 0; k < npa; ++k) - dtVcopy(&pa[k*3], &neighbourTile->verts[neighbourPoly->verts[k]*3]); - - bool overlap = false; - for (int j = 0; j < n; ++j) - { - dtPolyRef pastRef = resultRef[j]; - - // Connected polys do not overlap. - bool connected = false; - for (unsigned int k = curPoly->firstLink; k != DT_NULL_LINK; k = curTile->links[k].next) - { - if (curTile->links[k].ref == pastRef) - { - connected = true; - break; - } - } - if (connected) - continue; - - // Potentially overlapping. - const dtMeshTile* pastTile = 0; - const dtPoly* pastPoly = 0; - m_nav->getTileAndPolyByRefUnsafe(pastRef, &pastTile, &pastPoly); - - // Get vertices and test overlap - const int npb = pastPoly->vertCount; - for (int k = 0; k < npb; ++k) - dtVcopy(&pb[k*3], &pastTile->verts[pastPoly->verts[k]*3]); - - if (dtOverlapPolyPoly2D(pa,npa, pb,npb)) - { - overlap = true; - break; - } - } - if (overlap) - continue; - - // This poly is fine, store and advance to the poly. - if (n < maxResult) - { - resultRef[n] = neighbourRef; - if (resultParent) - resultParent[n] = curRef; - ++n; - } - - if (nstack < MAX_STACK) - { - stack[nstack++] = neighbourNode; - } - } - } - - *resultCount = n; - - return DT_SUCCESS; -} - - -struct dtSegInterval -{ - short tmin, tmax; -}; - -static void insertInterval(dtSegInterval* ints, int& nints, const int maxInts, - const short tmin, const short tmax) -{ - if (nints+1 > maxInts) return; - // Find insertion point. - int idx = 0; - while (idx < nints) - { - if (tmax <= ints[idx].tmin) - break; - idx++; - } - // Move current results. - if (nints-idx) - memmove(ints+idx+1, ints+idx, sizeof(dtSegInterval)*(nints-idx)); - // Store - ints[idx].tmin = tmin; - ints[idx].tmax = tmax; - nints++; -} - -dtStatus dtNavMeshQuery::getPolyWallSegments(dtPolyRef ref, const dtQueryFilter* filter, - float* segments, int* segmentCount, const int maxSegments) const -{ - dtAssert(m_nav); - - *segmentCount = 0; - - const dtMeshTile* tile = 0; - const dtPoly* poly = 0; - if (m_nav->getTileAndPolyByRef(ref, &tile, &poly) != DT_SUCCESS) - return DT_FAILURE; - - int n = 0; - static const int MAX_INTERVAL = 16; - dtSegInterval ints[MAX_INTERVAL]; - int nints; - - for (int i = 0, j = (int)poly->vertCount-1; i < (int)poly->vertCount; j = i++) - { - // Skip non-solid edges. - nints = 0; - if (poly->neis[j] & DT_EXT_LINK) - { - // Tile border. - for (unsigned int k = poly->firstLink; k != DT_NULL_LINK; k = tile->links[k].next) - { - const dtLink* link = &tile->links[k]; - if (link->edge == j) - { - if (link->ref != 0) - { - const dtMeshTile* neiTile = 0; - const dtPoly* neiPoly = 0; - m_nav->getTileAndPolyByRefUnsafe(link->ref, &neiTile, &neiPoly); - if (filter->passFilter(link->ref, neiTile, neiPoly)) - { - insertInterval(ints, nints, MAX_INTERVAL, link->bmin, link->bmax); - } - } - } - } - } - else if (poly->neis[j]) - { - // Internal edge - const unsigned int idx = (unsigned int)(poly->neis[j]-1); - const dtPolyRef ref = m_nav->getPolyRefBase(tile) | idx; - if (filter->passFilter(ref, tile, &tile->polys[idx])) - continue; - } - - // Add sentinels - insertInterval(ints, nints, MAX_INTERVAL, -1, 0); - insertInterval(ints, nints, MAX_INTERVAL, 255, 256); - - // Store segment. - const float* vj = &tile->verts[poly->verts[j]*3]; - const float* vi = &tile->verts[poly->verts[i]*3]; - for (int k = 1; k < nints; ++k) - { - // Find the space inbetween the opening areas. - const int imin = ints[k-1].tmax; - const int imax = ints[k].tmin; - if (imin == imax) continue; - if (imin == 0 && imax == 255) - { - if (n < maxSegments) - { - float* seg = &segments[n*6]; - n++; - dtVcopy(seg+0, vj); - dtVcopy(seg+3, vi); - } - } - else - { - const float tmin = imin/255.0f; - const float tmax = imax/255.0f; - if (n < maxSegments) - { - float* seg = &segments[n*6]; - n++; - dtVlerp(seg+0, vj,vi, tmin); - dtVlerp(seg+3, vj,vi, tmax); - } - } - } - } - - *segmentCount = n; - - return DT_SUCCESS; -} - -dtStatus dtNavMeshQuery::findDistanceToWall(dtPolyRef startRef, const float* centerPos, const float maxRadius, - const dtQueryFilter* filter, - float* hitDist, float* hitPos, float* hitNormal) const -{ - dtAssert(m_nav); - dtAssert(m_nodePool); - dtAssert(m_openList); - - // Validate input - if (!startRef) return DT_FAILURE; - if (!m_nav->isValidPolyRef(startRef)) return DT_FAILURE; - - m_nodePool->clear(); - m_openList->clear(); - - dtNode* startNode = m_nodePool->getNode(startRef); - dtVcopy(startNode->pos, centerPos); - startNode->pidx = 0; - startNode->cost = 0; - startNode->total = 0; - startNode->id = startRef; - startNode->flags = DT_NODE_OPEN; - m_openList->push(startNode); - - float radiusSqr = dtSqr(maxRadius); - - while (!m_openList->empty()) - { - dtNode* bestNode = m_openList->pop(); - bestNode->flags &= ~DT_NODE_OPEN; - bestNode->flags |= DT_NODE_CLOSED; - - // Get poly and tile. - // The API input has been cheked already, skip checking internal data. - const dtPolyRef bestRef = bestNode->id; - const dtMeshTile* bestTile = 0; - const dtPoly* bestPoly = 0; - m_nav->getTileAndPolyByRefUnsafe(bestRef, &bestTile, &bestPoly); - - // Get parent poly and tile. - dtPolyRef parentRef = 0; - const dtMeshTile* parentTile = 0; - const dtPoly* parentPoly = 0; - if (bestNode->pidx) - parentRef = m_nodePool->getNodeAtIdx(bestNode->pidx)->id; - if (parentRef) - m_nav->getTileAndPolyByRefUnsafe(parentRef, &parentTile, &parentPoly); - - // Hit test walls. - for (int i = 0, j = (int)bestPoly->vertCount-1; i < (int)bestPoly->vertCount; j = i++) - { - // Skip non-solid edges. - if (bestPoly->neis[j] & DT_EXT_LINK) - { - // Tile border. - bool solid = true; - for (unsigned int k = bestPoly->firstLink; k != DT_NULL_LINK; k = bestTile->links[k].next) - { - const dtLink* link = &bestTile->links[k]; - if (link->edge == j) - { - if (link->ref != 0) - { - const dtMeshTile* neiTile = 0; - const dtPoly* neiPoly = 0; - m_nav->getTileAndPolyByRefUnsafe(link->ref, &neiTile, &neiPoly); - if (filter->passFilter(link->ref, neiTile, neiPoly)) - solid = false; - } - break; - } - } - if (!solid) continue; - } - else if (bestPoly->neis[j]) - { - // Internal edge - const unsigned int idx = (unsigned int)(bestPoly->neis[j]-1); - const dtPolyRef ref = m_nav->getPolyRefBase(bestTile) | idx; - if (filter->passFilter(ref, bestTile, &bestTile->polys[idx])) - continue; - } - - // Calc distance to the edge. - const float* vj = &bestTile->verts[bestPoly->verts[j]*3]; - const float* vi = &bestTile->verts[bestPoly->verts[i]*3]; - float tseg; - float distSqr = dtDistancePtSegSqr2D(centerPos, vj, vi, tseg); - - // Edge is too far, skip. - if (distSqr > radiusSqr) - continue; - - // Hit wall, update radius. - radiusSqr = distSqr; - // Calculate hit pos. - hitPos[0] = vj[0] + (vi[0] - vj[0])*tseg; - hitPos[1] = vj[1] + (vi[1] - vj[1])*tseg; - hitPos[2] = vj[2] + (vi[2] - vj[2])*tseg; - } - - for (unsigned int i = bestPoly->firstLink; i != DT_NULL_LINK; i = bestTile->links[i].next) - { - const dtLink* link = &bestTile->links[i]; - dtPolyRef neighbourRef = link->ref; - // Skip invalid neighbours and do not follow back to parent. - if (!neighbourRef || neighbourRef == parentRef) - continue; - - // Expand to neighbour. - const dtMeshTile* neighbourTile = 0; - const dtPoly* neighbourPoly = 0; - m_nav->getTileAndPolyByRefUnsafe(neighbourRef, &neighbourTile, &neighbourPoly); - - // Skip off-mesh connections. - if (neighbourPoly->getType() == DT_POLYTYPE_OFFMESH_CONNECTION) - continue; - - // Calc distance to the edge. - const float* va = &bestTile->verts[bestPoly->verts[link->edge]*3]; - const float* vb = &bestTile->verts[bestPoly->verts[(link->edge+1) % bestPoly->vertCount]*3]; - float tseg; - float distSqr = dtDistancePtSegSqr2D(centerPos, va, vb, tseg); - - // If the circle is not touching the next polygon, skip it. - if (distSqr > radiusSqr) - continue; - - if (!filter->passFilter(neighbourRef, neighbourTile, neighbourPoly)) - continue; - - dtNode* neighbourNode = m_nodePool->getNode(neighbourRef); - if (!neighbourNode) - continue; - - if (neighbourNode->flags & DT_NODE_CLOSED) - continue; - - // Cost - if (neighbourNode->flags == 0) - { - getEdgeMidPoint(bestRef, bestPoly, bestTile, - neighbourRef, neighbourPoly, neighbourTile, neighbourNode->pos); - } - - const float total = bestNode->total + dtVdist(bestNode->pos, neighbourNode->pos); - - // The node is already in open list and the new result is worse, skip. - if ((neighbourNode->flags & DT_NODE_OPEN) && total >= neighbourNode->total) - continue; - - neighbourNode->id = neighbourRef; - neighbourNode->flags &= ~DT_NODE_CLOSED; - neighbourNode->pidx = m_nodePool->getNodeIdx(bestNode); - neighbourNode->total = total; - - if (neighbourNode->flags & DT_NODE_OPEN) - { - m_openList->modify(neighbourNode); - } - else - { - neighbourNode->flags |= DT_NODE_OPEN; - m_openList->push(neighbourNode); - } - } - } - - // Calc hit normal. - dtVsub(hitNormal, centerPos, hitPos); - dtVnormalize(hitNormal); - - *hitDist = sqrtf(radiusSqr); - - return DT_SUCCESS; -} - -bool dtNavMeshQuery::isInClosedList(dtPolyRef ref) const -{ - if (!m_nodePool) return false; - const dtNode* node = m_nodePool->findNode(ref); - return node && node->flags & DT_NODE_CLOSED; -} diff --git a/deps/recastnavigation/Detour/DetourNavMeshQuery.h b/deps/recastnavigation/Detour/DetourNavMeshQuery.h deleted file mode 100644 index f5046d8329..0000000000 --- a/deps/recastnavigation/Detour/DetourNavMeshQuery.h +++ /dev/null @@ -1,407 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#ifndef DETOURNAVMESHQUERY_H -#define DETOURNAVMESHQUERY_H - -#include "DetourNavMesh.h" - - -// Define DT_VIRTUAL_QUERYFILTER if you wish to derive a custom filter from dtQueryFilter. -// On certain platforms indirect or virtual function call is expensive. The default -// setting is to use non-virtual functions, the actualy implementations of the functions -// are declared as inline for maximum speed. - -//#define DT_VIRTUAL_QUERYFILTER 1 - -// Class for polygon filtering and cost calculation during query operations. -// - It is possible to derive a custom query filter from dtQueryFilter by overriding -// the virtual functions passFilter() and getCost(). -// - Both functions should be as fast as possible. Use cached local copy of data -// instead of accessing your own objects where possible. -// - You do not need to adhere to the flags and cost logic provided by the default -// implementation. -// - In order for the A* to work properly, the cost should be proportional to -// the travel distance. Using cost modifier less than 1.0 is likely to lead -// to problems during pathfinding. -class dtQueryFilter -{ - float m_areaCost[DT_MAX_AREAS]; // Array storing cost per area type, used by default implementation. - unsigned short m_includeFlags; // Include poly flags, used by default implementation. - unsigned short m_excludeFlags; // Exclude poly flags, used by default implementation. - -public: - dtQueryFilter(); - - // Returns true if the polygon is can visited. - // Params: - // ref - (in) reference to the polygon test. - // tile - (in) pointer to the tile of the polygon test. - // poly - (in) pointer to the polygon test. -#ifdef DT_VIRTUAL_QUERYFILTER - virtual bool passFilter(const dtPolyRef ref, - const dtMeshTile* tile, - const dtPoly* poly) const; -#else - bool passFilter(const dtPolyRef ref, - const dtMeshTile* tile, - const dtPoly* poly) const; -#endif - - // Returns cost to travel from 'pa' to 'pb'.' - // The segment is fully contained inside 'cur'. - // 'pa' lies on the edge between 'prev' and 'cur', - // 'pb' lies on the edge between 'cur' and 'next'. - // Params: - // pa - (in) segment start position. - // pb - (in) segment end position. - // prevRef, prevTile, prevPoly - (in) data describing the previous polygon, can be null. - // curRef, curTile, curPoly - (in) data describing the current polygon. - // nextRef, nextTile, nextPoly - (in) data describing the next polygon, can be null. -#ifdef DT_VIRTUAL_QUERYFILTER - virtual float getCost(const float* pa, const float* pb, - const dtPolyRef prevRef, const dtMeshTile* prevTile, const dtPoly* prevPoly, - const dtPolyRef curRef, const dtMeshTile* curTile, const dtPoly* curPoly, - const dtPolyRef nextRef, const dtMeshTile* nextTile, const dtPoly* nextPoly) const; -#else - float getCost(const float* pa, const float* pb, - const dtPolyRef prevRef, const dtMeshTile* prevTile, const dtPoly* prevPoly, - const dtPolyRef curRef, const dtMeshTile* curTile, const dtPoly* curPoly, - const dtPolyRef nextRef, const dtMeshTile* nextTile, const dtPoly* nextPoly) const; -#endif - - // Getters and setters for the default implementation data. - inline float getAreaCost(const int i) const { return m_areaCost[i]; } - inline void setAreaCost(const int i, const float cost) { m_areaCost[i] = cost; } - - inline unsigned short getIncludeFlags() const { return m_includeFlags; } - inline void setIncludeFlags(const unsigned short flags) { m_includeFlags = flags; } - - inline unsigned short getExcludeFlags() const { return m_excludeFlags; } - inline void setExcludeFlags(const unsigned short flags) { m_excludeFlags = flags; } -}; - -class dtNavMeshQuery -{ -public: - dtNavMeshQuery(); - ~dtNavMeshQuery(); - - // Initializes the nav mesh query. - // Params: - // nav - (in) pointer to navigation mesh data. - // maxNodes - (in) Maximum number of search nodes to use (max 65536). - // Returns: True if succeed, else false. - dtStatus init(const dtNavMesh* nav, const int maxNodes); - - // Finds the nearest navigation polygon around the center location. - // Params: - // center[3] - (in) The center of the search box. - // extents[3] - (in) The extents of the search box. - // filter - (in) path polygon filter. - // nearestRef - (out) Reference to the nearest polygon. - // nearestPt[3] - (out, opt) The nearest point on found polygon, null if not needed. - // Returns: Reference identifier for the polygon, or 0 if no polygons found. - dtStatus findNearestPoly(const float* center, const float* extents, - const dtQueryFilter* filter, - dtPolyRef* nearestRef, float* nearestPt) const; - - // Returns polygons which overlap the query box. - // Params: - // center[3] - (in) the center of the search box. - // extents[3] - (in) the extents of the search box. - // filter - (in) path polygon filter. - // polys - (out) array holding the search result. - // polyCount - (out) Number of polygons in search result array. - // maxPolys - (in) The max number of polygons the polys array can hold. - dtStatus queryPolygons(const float* center, const float* extents, - const dtQueryFilter* filter, - dtPolyRef* polys, int* polyCount, const int maxPolys) const; - - // Finds path from start polygon to end polygon. - // If target polygon canno be reached through the navigation graph, - // the last node on the array is nearest node to the end polygon. - // Start end end positions are needed to calculate more accurate - // traversal cost at start end end polygons. - // Params: - // startRef - (in) ref to path start polygon. - // endRef - (in) ref to path end polygon. - // startPos[3] - (in) Path start location. - // endPos[3] - (in) Path end location. - // filter - (in) path polygon filter. - // path - (out) array holding the search result. - // pathCount - (out) Number of polygons in search result array. - // maxPath - (in) The max number of polygons the path array can hold. Must be at least 1. - dtStatus findPath(dtPolyRef startRef, dtPolyRef endRef, - const float* startPos, const float* endPos, - const dtQueryFilter* filter, - dtPolyRef* path, int* pathCount, const int maxPath) const; - - // Intializes sliced path find query. - // Note 1: calling any other dtNavMeshQuery method before calling findPathEnd() - // may results in corrupted data! - // Note 2: The pointer to filter is store, and used in subsequent - // calls to updateSlicedFindPath(). - // Params: - // startRef - (in) ref to path start polygon. - // endRef - (in) ref to path end polygon. - // startPos[3] - (in) Path start location. - // endPos[3] - (in) Path end location. - // filter - (in) path polygon filter. - dtStatus initSlicedFindPath(dtPolyRef startRef, dtPolyRef endRef, - const float* startPos, const float* endPos, - const dtQueryFilter* filter); - - // Updates sliced path find query. - // Params: - // maxIter - (in) max number of iterations to update. - // Returns: Path query state. - dtStatus updateSlicedFindPath(const int maxIter); - - // Finalizes sliced path find query and returns found path. - // path - (out) array holding the search result. - // pathCount - (out) Number of polygons in search result array. - // maxPath - (in) The max number of polygons the path array can hold. - dtStatus finalizeSlicedFindPath(dtPolyRef* path, int* pathCount, const int maxPath); - - // Finalizes partial sliced path find query and returns path to the furthest - // polygon on the existing path that was visited during the search. - // existing - (out) Array of polygons in the existing path. - // existingSize - (out) Number of polygons in existing path array. - // path - (out) array holding the search result. - // pathCount - (out) Number of polygons in search result array. - // maxPath - (in) The max number of polygons the path array can hold. - dtStatus finalizeSlicedFindPathPartial(const dtPolyRef* existing, const int existingSize, - dtPolyRef* path, int* pathCount, const int maxPath); - - // Finds a straight path from start to end locations within the corridor - // described by the path polygons. - // Start and end locations will be clamped on the corridor. - // The returned polygon references are point to polygon which was entered when - // a path point was added. For the end point, zero will be returned. This allows - // to match for example off-mesh link points to their representative polygons. - // Params: - // startPos[3] - (in) Path start location. - // endPo[3] - (in) Path end location. - // path - (in) Array of connected polygons describing the corridor. - // pathSize - (in) Number of polygons in path array. - // straightPath - (out) Points describing the straight path. - // straightPathFlags - (out, opt) Flags describing each point type, see dtStraightPathFlags. - // straightPathRefs - (out, opt) References to polygons at point locations. - // straightPathCount - (out) Number of points in the path. - // maxStraightPath - (in) The max number of points the straight path array can hold. Must be at least 1. - dtStatus findStraightPath(const float* startPos, const float* endPos, - const dtPolyRef* path, const int pathSize, - float* straightPath, unsigned char* straightPathFlags, dtPolyRef* straightPathRefs, - int* straightPathCount, const int maxStraightPath) const; - - // Moves from startPos to endPos constrained to the navmesh. - // If the endPos is reachable, the resultPos will be endPos, - // or else the resultPos will be the nearest point in navmesh. - // Note: The resulting point is not projected to the ground, use getPolyHeight() to get height. - // Note: The algorithm is optimized for small delta movement and small number of polygons. - // Params: - // startRef - (in) ref to the polygon where startPos lies. - // startPos[3] - (in) start position of the mover. - // endPos[3] - (in) desired end position of the mover. - // filter - (in) path polygon filter. - // resultPos[3] - (out) new position of the mover. - // visited - (out) array of visited polygons. - // visitedCount - (out) Number of entries in the visited array. - // maxVisitedSize - (in) max number of polygons in the visited array. - dtStatus moveAlongSurface(dtPolyRef startRef, const float* startPos, const float* endPos, - const dtQueryFilter* filter, - float* resultPos, dtPolyRef* visited, int* visitedCount, const int maxVisitedSize) const; - - // Casts 'walkability' ray along the navmesh surface from startPos towards the endPos. - // Params: - // startRef - (in) ref to the polygon where the start lies. - // startPos[3] - (in) start position of the query. - // endPos[3] - (in) end position of the query. - // t - (out) hit parameter along the segment, FLT_MAX if no hit. - // hitNormal[3] - (out) normal of the nearest hit. - // filter - (in) path polygon filter. - // path - (out,opt) visited path polygons. - // pathCount - (out,opt) Number of polygons visited. - // maxPath - (in) max number of polygons in the path array. - dtStatus raycast(dtPolyRef startRef, const float* startPos, const float* endPos, - const dtQueryFilter* filter, - float* t, float* hitNormal, dtPolyRef* path, int* pathCount, const int maxPath) const; - - // Returns distance to nearest wall from the specified location. - // Params: - // startRef - (in) ref to the polygon where the center lies. - // centerPos[3] - (in) center if the query circle. - // maxRadius - (in) max search radius. - // filter - (in) path polygon filter. - // hitDist - (out) distance to nearest wall from the test location. - // hitPos[3] - (out) location of the nearest hit. - // hitNormal[3] - (out) normal of the nearest hit. - dtStatus findDistanceToWall(dtPolyRef startRef, const float* centerPos, const float maxRadius, - const dtQueryFilter* filter, - float* hitDist, float* hitPos, float* hitNormal) const; - - // Finds polygons found along the navigation graph which touch the specified circle. - // Params: - // startRef - (in) ref to the polygon where the search starts. - // centerPos[3] - (in) center if the query circle. - // radius - (in) radius of the query circle. - // filter - (in) path polygon filter. - // resultRef - (out, opt) refs to the polygons touched by the circle. - // resultParent - (out, opt) parent of each result polygon. - // resultCost - (out, opt) search cost at each result polygon. - // resultCount - (out, opt) Number of results. - // maxResult - (int) maximum capacity of search results. - dtStatus findPolysAroundCircle(dtPolyRef startRef, const float* centerPos, const float radius, - const dtQueryFilter* filter, - dtPolyRef* resultRef, dtPolyRef* resultParent, float* resultCost, - int* resultCount, const int maxResult) const; - - // Finds polygons found along the navigation graph which touch the convex polygon shape. - // Params: - // startRef - (in) ref to the polygon where the search starts. - // verts[3*n] - (in) vertices describing convex polygon shape (CCW). - // nverts - (in) number of vertices in the polygon. - // filter - (in) path polygon filter. - // resultRef - (out, opt) refs to the polygons touched by the circle. - // resultParent - (out, opt) parent of each result polygon. - // resultCost - (out, opt) search cost at each result polygon. - // resultCount - (out) number of results. - // maxResult - (int) maximum capacity of search results. - dtStatus findPolysAroundShape(dtPolyRef startRef, const float* verts, const int nverts, - const dtQueryFilter* filter, - dtPolyRef* resultRef, dtPolyRef* resultParent, float* resultCost, - int* resultCount, const int maxResult) const; - - // Finds non-overlapping local neighbourhood around center location. - // Note: The algorithm is optimized for small query radius and small number of polygons. - // Params: - // startRef - (in) ref to the polygon where the search starts. - // centerPos[3] - (in) center if the query circle. - // radius - (in) radius of the query circle. - // filter - (in) path polygon filter. - // resultRef - (out) refs to the polygons touched by the circle. - // resultParent - (out, opt) parent of each result polygon. - // resultCount - (out) number of results. - // maxResult - (int) maximum capacity of search results. - dtStatus findLocalNeighbourhood(dtPolyRef startRef, const float* centerPos, const float radius, - const dtQueryFilter* filter, - dtPolyRef* resultRef, dtPolyRef* resultParent, - int* resultCount, const int maxResult) const; - - // Returns wall segments of specified polygon. - // Params: - // ref - (in) ref to the polygon. - // filter - (in) path polygon filter. - // segments[6*maxSegments] - (out) wall segments (2 endpoints per segment). - // segmentCount - (out) number of wall segments. - // maxSegments - (in) max number of segments that can be stored in 'segments'. - dtStatus getPolyWallSegments(dtPolyRef ref, const dtQueryFilter* filter, - float* segments, int* segmentCount, const int maxSegments) const; - - // Returns closest point on navigation polygon. - // Uses detail polygons to find the closest point to the navigation polygon surface. - // Params: - // ref - (in) ref to the polygon. - // pos[3] - (in) the point to check. - // closest[3] - (out) closest point. - // Returns: true if closest point found. - dtStatus closestPointOnPoly(dtPolyRef ref, const float* pos, float* closest) const; - - // Returns closest point on navigation polygon boundary. - // Uses the navigation polygon boundary to snap the point to poly boundary - // if it is outside the polygon. Much faster than closestPointToPoly. Does not affect height. - // Params: - // ref - (in) ref to the polygon. - // pos[3] - (in) the point to check. - // closest[3] - (out) closest point. - // Returns: true if closest point found. - dtStatus closestPointOnPolyBoundary(dtPolyRef ref, const float* pos, float* closest) const; - - // Returns start and end location of an off-mesh link polygon. - // Params: - // prevRef - (in) ref to the polygon before the link (used to select direction). - // polyRef - (in) ref to the off-mesh link polygon. - // startPos[3] - (out) start point of the link. - // endPos[3] - (out) end point of the link. - // Returns: true if link is found. - dtStatus getOffMeshConnectionPolyEndPoints(dtPolyRef prevRef, dtPolyRef polyRef, float* startPos, float* endPos) const; - - // Returns height of the polygon at specified location. - // Params: - // ref - (in) ref to the polygon. - // pos[3] - (in) the point where to locate the height. - // height - (out) height at the location. - // Returns: true if over polygon. - dtStatus getPolyHeight(dtPolyRef ref, const float* pos, float* height) const; - - // Returns true if poly reference ins in closed list. - bool isInClosedList(dtPolyRef ref) const; - - class dtNodePool* getNodePool() const { return m_nodePool; } - -private: - - // Returns neighbour tile based on side. - dtMeshTile* getNeighbourTileAt(int x, int y, int side) const; - - // Queries polygons within a tile. - int queryPolygonsInTile(const dtMeshTile* tile, const float* qmin, const float* qmax, const dtQueryFilter* filter, - dtPolyRef* polys, const int maxPolys) const; - // Find nearest polygon within a tile. - dtPolyRef findNearestPolyInTile(const dtMeshTile* tile, const float* center, const float* extents, - const dtQueryFilter* filter, float* nearestPt) const; - // Returns closest point on polygon. - dtStatus closestPointOnPolyInTile(const dtMeshTile* tile, const dtPoly* poly, const float* pos, float* closest) const; - - // Returns portal points between two polygons. - dtStatus getPortalPoints(dtPolyRef from, dtPolyRef to, float* left, float* right, - unsigned char& fromType, unsigned char& toType) const; - dtStatus getPortalPoints(dtPolyRef from, const dtPoly* fromPoly, const dtMeshTile* fromTile, - dtPolyRef to, const dtPoly* toPoly, const dtMeshTile* toTile, - float* left, float* right) const; - - // Returns edge mid point between two polygons. - dtStatus getEdgeMidPoint(dtPolyRef from, dtPolyRef to, float* mid) const; - dtStatus getEdgeMidPoint(dtPolyRef from, const dtPoly* fromPoly, const dtMeshTile* fromTile, - dtPolyRef to, const dtPoly* toPoly, const dtMeshTile* toTile, - float* mid) const; - - const dtNavMesh* m_nav; // Pointer to navmesh data. - - struct dtQueryData - { - dtStatus status; - struct dtNode* lastBestNode; - float lastBestNodeCost; - dtPolyRef startRef, endRef; - float startPos[3], endPos[3]; - const dtQueryFilter* filter; - }; - dtQueryData m_query; // Sliced query state. - - class dtNodePool* m_tinyNodePool; // Pointer to small node pool. - class dtNodePool* m_nodePool; // Pointer to node pool. - class dtNodeQueue* m_openList; // Pointer to open list queue. -}; - -// Helper function to allocate navmesh query class using Detour allocator. -dtNavMeshQuery* dtAllocNavMeshQuery(); -void dtFreeNavMeshQuery(dtNavMeshQuery* query); - -#endif // DETOURNAVMESHQUERY_H diff --git a/deps/recastnavigation/Detour/DetourNode.cpp b/deps/recastnavigation/Detour/DetourNode.cpp deleted file mode 100644 index f7811e3450..0000000000 --- a/deps/recastnavigation/Detour/DetourNode.cpp +++ /dev/null @@ -1,164 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#include "DetourNode.h" -#include "DetourAlloc.h" -#include "DetourAssert.h" -#include "DetourCommon.h" -#include <string.h> - -inline unsigned int dtHashRef(dtPolyRef a) -{ - a = (~a) + (a << 18); - a = a ^ (a >> 31); - a = a * 21; - a = a ^ (a >> 11); - a = a + (a << 6); - a = a ^ (a >> 22); - return (unsigned int)a; -} - -////////////////////////////////////////////////////////////////////////////////////////// -dtNodePool::dtNodePool(int maxNodes, int hashSize) : - m_nodes(0), - m_first(0), - m_next(0), - m_maxNodes(maxNodes), - m_hashSize(hashSize), - m_nodeCount(0) -{ - dtAssert(dtNextPow2(m_hashSize) == (unsigned int)m_hashSize); - dtAssert(m_maxNodes > 0); - - m_nodes = (dtNode*)dtAlloc(sizeof(dtNode)*m_maxNodes, DT_ALLOC_PERM); - m_next = (dtNodeIndex*)dtAlloc(sizeof(dtNodeIndex)*m_maxNodes, DT_ALLOC_PERM); - m_first = (dtNodeIndex*)dtAlloc(sizeof(dtNodeIndex)*hashSize, DT_ALLOC_PERM); - - dtAssert(m_nodes); - dtAssert(m_next); - dtAssert(m_first); - - memset(m_first, 0xff, sizeof(dtNodeIndex)*m_hashSize); - memset(m_next, 0xff, sizeof(dtNodeIndex)*m_maxNodes); -} - -dtNodePool::~dtNodePool() -{ - dtFree(m_nodes); - dtFree(m_next); - dtFree(m_first); -} - -void dtNodePool::clear() -{ - memset(m_first, 0xff, sizeof(dtNodeIndex)*m_hashSize); - m_nodeCount = 0; -} - -dtNode* dtNodePool::findNode(dtPolyRef id) -{ - unsigned int bucket = dtHashRef(id) & (m_hashSize-1); - dtNodeIndex i = m_first[bucket]; - while (i != DT_NULL_IDX) - { - if (m_nodes[i].id == id) - return &m_nodes[i]; - i = m_next[i]; - } - return 0; -} - -dtNode* dtNodePool::getNode(dtPolyRef id) -{ - unsigned int bucket = dtHashRef(id) & (m_hashSize-1); - dtNodeIndex i = m_first[bucket]; - dtNode* node = 0; - while (i != DT_NULL_IDX) - { - if (m_nodes[i].id == id) - return &m_nodes[i]; - i = m_next[i]; - } - - if (m_nodeCount >= m_maxNodes) - return 0; - - i = (dtNodeIndex)m_nodeCount; - m_nodeCount++; - - // Init node - node = &m_nodes[i]; - node->pidx = 0; - node->cost = 0; - node->total = 0; - node->id = id; - node->flags = 0; - - m_next[i] = m_first[bucket]; - m_first[bucket] = i; - - return node; -} - - -////////////////////////////////////////////////////////////////////////////////////////// -dtNodeQueue::dtNodeQueue(int n) : - m_heap(0), - m_capacity(n), - m_size(0) -{ - dtAssert(m_capacity > 0); - - m_heap = (dtNode**)dtAlloc(sizeof(dtNode*)*(m_capacity+1), DT_ALLOC_PERM); - dtAssert(m_heap); -} - -dtNodeQueue::~dtNodeQueue() -{ - dtFree(m_heap); -} - -void dtNodeQueue::bubbleUp(int i, dtNode* node) -{ - int parent = (i-1)/2; - // note: (index > 0) means there is a parent - while ((i > 0) && (m_heap[parent]->total > node->total)) - { - m_heap[i] = m_heap[parent]; - i = parent; - parent = (i-1)/2; - } - m_heap[i] = node; -} - -void dtNodeQueue::trickleDown(int i, dtNode* node) -{ - int child = (i*2)+1; - while (child < m_size) - { - if (((child+1) < m_size) && - (m_heap[child]->total > m_heap[child+1]->total)) - { - child++; - } - m_heap[i] = m_heap[child]; - i = child; - child = (i*2)+1; - } - bubbleUp(i, node); -} diff --git a/deps/recastnavigation/Detour/DetourNode.h b/deps/recastnavigation/Detour/DetourNode.h deleted file mode 100644 index e46254fb50..0000000000 --- a/deps/recastnavigation/Detour/DetourNode.h +++ /dev/null @@ -1,159 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#ifndef DETOURNODE_H -#define DETOURNODE_H - -#include "DetourNavMesh.h" - -enum dtNodeFlags -{ - DT_NODE_OPEN = 0x01, - DT_NODE_CLOSED = 0x02, -}; - -typedef unsigned short dtNodeIndex; -static const dtNodeIndex DT_NULL_IDX = ~0; - -struct dtNode -{ - float pos[3]; // Position of the node. - float cost; // Cost from previous node to current node. - float total; // Cost up to the node. - unsigned int pidx : 30; // Index to parent node. - unsigned int flags : 2; // Node flags 0/open/closed. - dtPolyRef id; // Polygon ref the node corresponds to. -}; - - -class dtNodePool -{ -public: - dtNodePool(int maxNodes, int hashSize); - ~dtNodePool(); - inline void operator=(const dtNodePool&) {} - void clear(); - dtNode* getNode(dtPolyRef id); - dtNode* findNode(dtPolyRef id); - - inline unsigned int getNodeIdx(const dtNode* node) const - { - if (!node) return 0; - return (unsigned int)(node - m_nodes)+1; - } - - inline dtNode* getNodeAtIdx(unsigned int idx) - { - if (!idx) return 0; - return &m_nodes[idx-1]; - } - - inline const dtNode* getNodeAtIdx(unsigned int idx) const - { - if (!idx) return 0; - return &m_nodes[idx-1]; - } - - inline int getMemUsed() const - { - return sizeof(*this) + - sizeof(dtNode)*m_maxNodes + - sizeof(dtNodeIndex)*m_maxNodes + - sizeof(dtNodeIndex)*m_hashSize; - } - - inline int getMaxNodes() const { return m_maxNodes; } - - inline int getHashSize() const { return m_hashSize; } - inline dtNodeIndex getFirst(int bucket) const { return m_first[bucket]; } - inline dtNodeIndex getNext(int i) const { return m_next[i]; } - -private: - - dtNode* m_nodes; - dtNodeIndex* m_first; - dtNodeIndex* m_next; - const int m_maxNodes; - const int m_hashSize; - int m_nodeCount; -}; - -class dtNodeQueue -{ -public: - dtNodeQueue(int n); - ~dtNodeQueue(); - inline void operator=(dtNodeQueue&) {} - - inline void clear() - { - m_size = 0; - } - - inline dtNode* top() - { - return m_heap[0]; - } - - inline dtNode* pop() - { - dtNode* result = m_heap[0]; - m_size--; - trickleDown(0, m_heap[m_size]); - return result; - } - - inline void push(dtNode* node) - { - m_size++; - bubbleUp(m_size-1, node); - } - - inline void modify(dtNode* node) - { - for (int i = 0; i < m_size; ++i) - { - if (m_heap[i] == node) - { - bubbleUp(i, node); - return; - } - } - } - - inline bool empty() const { return m_size == 0; } - - inline int getMemUsed() const - { - return sizeof(*this) + - sizeof(dtNode*)*(m_capacity+1); - } - - inline int getCapacity() const { return m_capacity; } - -private: - void bubbleUp(int i, dtNode* node); - void trickleDown(int i, dtNode* node); - - dtNode** m_heap; - const int m_capacity; - int m_size; -}; - - -#endif // DETOURNODE_H
\ No newline at end of file diff --git a/deps/recastnavigation/Detour/DetourObstacleAvoidance.cpp b/deps/recastnavigation/Detour/DetourObstacleAvoidance.cpp deleted file mode 100644 index a255c9b3fd..0000000000 --- a/deps/recastnavigation/Detour/DetourObstacleAvoidance.cpp +++ /dev/null @@ -1,532 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#include "DetourObstacleAvoidance.h" -#include "DetourCommon.h" -#include "DetourAlloc.h" -#include "DetourAssert.h" -#include <string.h> -#include <math.h> -#include <float.h> -#include <new> - - -static int sweepCircleCircle(const float* c0, const float r0, const float* v, - const float* c1, const float r1, - float& tmin, float& tmax) -{ - static const float EPS = 0.0001f; - float s[3]; - dtVsub(s,c1,c0); - float r = r0+r1; - float c = dtVdot2D(s,s) - r*r; - float a = dtVdot2D(v,v); - if (a < EPS) return 0; // not moving - - // Overlap, calc time to exit. - float b = dtVdot2D(v,s); - float d = b*b - a*c; - if (d < 0.0f) return 0; // no intersection. - a = 1.0f / a; - const float rd = dtSqrt(d); - tmin = (b - rd) * a; - tmax = (b + rd) * a; - return 1; -} - -static int isectRaySeg(const float* ap, const float* u, - const float* bp, const float* bq, - float& t) -{ - float v[3], w[3]; - dtVsub(v,bq,bp); - dtVsub(w,ap,bp); - float d = dtVperp2D(u,v); - if (fabsf(d) < 1e-6f) return 0; - d = 1.0f/d; - t = dtVperp2D(v,w) * d; - if (t < 0 || t > 1) return 0; - float s = dtVperp2D(u,w) * d; - if (s < 0 || s > 1) return 0; - return 1; -} - - - -dtObstacleAvoidanceDebugData* dtAllocObstacleAvoidanceDebugData() -{ - void* mem = dtAlloc(sizeof(dtObstacleAvoidanceDebugData), DT_ALLOC_PERM); - if (!mem) return 0; - return new(mem) dtObstacleAvoidanceDebugData; -} - -void dtFreeObstacleAvoidanceDebugData(dtObstacleAvoidanceDebugData* ptr) -{ - if (!ptr) return; - ptr->~dtObstacleAvoidanceDebugData(); - dtFree(ptr); -} - - -dtObstacleAvoidanceDebugData::dtObstacleAvoidanceDebugData() : - m_nsamples(0), - m_maxSamples(0), - m_vel(0), - m_ssize(0), - m_pen(0), - m_vpen(0), - m_vcpen(0), - m_spen(0), - m_tpen(0) -{ -} - -dtObstacleAvoidanceDebugData::~dtObstacleAvoidanceDebugData() -{ - dtFree(m_vel); - dtFree(m_ssize); - dtFree(m_pen); - dtFree(m_vpen); - dtFree(m_vcpen); - dtFree(m_spen); - dtFree(m_tpen); -} - -bool dtObstacleAvoidanceDebugData::init(const int maxSamples) -{ - dtAssert(maxSamples); - m_maxSamples = maxSamples; - - m_vel = (float*)dtAlloc(sizeof(float)*3*m_maxSamples, DT_ALLOC_PERM); - if (!m_vel) - return false; - m_pen = (float*)dtAlloc(sizeof(float)*m_maxSamples, DT_ALLOC_PERM); - if (!m_pen) - return false; - m_ssize = (float*)dtAlloc(sizeof(float)*m_maxSamples, DT_ALLOC_PERM); - if (!m_ssize) - return false; - m_vpen = (float*)dtAlloc(sizeof(float)*m_maxSamples, DT_ALLOC_PERM); - if (!m_vpen) - return false; - m_vcpen = (float*)dtAlloc(sizeof(float)*m_maxSamples, DT_ALLOC_PERM); - if (!m_vcpen) - return false; - m_spen = (float*)dtAlloc(sizeof(float)*m_maxSamples, DT_ALLOC_PERM); - if (!m_spen) - return false; - m_tpen = (float*)dtAlloc(sizeof(float)*m_maxSamples, DT_ALLOC_PERM); - if (!m_tpen) - return false; - - return true; -} - -void dtObstacleAvoidanceDebugData::reset() -{ - m_nsamples = 0; -} - -void dtObstacleAvoidanceDebugData::addSample(const float* vel, const float ssize, const float pen, - const float vpen, const float vcpen, const float spen, const float tpen) -{ - if (m_nsamples >= m_maxSamples) - return; - dtAssert(m_vel); - dtAssert(m_ssize); - dtAssert(m_pen); - dtAssert(m_vpen); - dtAssert(m_vcpen); - dtAssert(m_spen); - dtAssert(m_tpen); - dtVcopy(&m_vel[m_nsamples*3], vel); - m_ssize[m_nsamples] = ssize; - m_pen[m_nsamples] = pen; - m_vpen[m_nsamples] = vpen; - m_vcpen[m_nsamples] = vcpen; - m_spen[m_nsamples] = spen; - m_tpen[m_nsamples] = tpen; - m_nsamples++; -} - -static void normalizeArray(float* arr, const int n) -{ - // Normalize penaly range. - float minPen = FLT_MAX; - float maxPen = -FLT_MAX; - for (int i = 0; i < n; ++i) - { - minPen = dtMin(minPen, arr[i]); - maxPen = dtMax(maxPen, arr[i]); - } - const float penRange = maxPen-minPen; - const float s = penRange > 0.001f ? (1.0f / penRange) : 1; - for (int i = 0; i < n; ++i) - arr[i] = dtClamp((arr[i]-minPen)*s, 0.0f, 1.0f); -} - -void dtObstacleAvoidanceDebugData::normalizeSamples() -{ - normalizeArray(m_pen, m_nsamples); - normalizeArray(m_vpen, m_nsamples); - normalizeArray(m_vcpen, m_nsamples); - normalizeArray(m_spen, m_nsamples); - normalizeArray(m_tpen, m_nsamples); -} - - -dtObstacleAvoidanceQuery* dtAllocObstacleAvoidanceQuery() -{ - void* mem = dtAlloc(sizeof(dtObstacleAvoidanceQuery), DT_ALLOC_PERM); - if (!mem) return 0; - return new(mem) dtObstacleAvoidanceQuery; -} - -void dtFreeObstacleAvoidanceQuery(dtObstacleAvoidanceQuery* ptr) -{ - if (!ptr) return; - ptr->~dtObstacleAvoidanceQuery(); - dtFree(ptr); -} - - -dtObstacleAvoidanceQuery::dtObstacleAvoidanceQuery() : - m_velBias(0.0f), - m_weightDesVel(0.0f), - m_weightCurVel(0.0f), - m_weightSide(0.0f), - m_weightToi(0.0f), - m_horizTime(0.0f), - m_maxCircles(0), - m_circles(0), - m_ncircles(0), - m_maxSegments(0), - m_segments(0), - m_nsegments(0) -{ -} - -dtObstacleAvoidanceQuery::~dtObstacleAvoidanceQuery() -{ - dtFree(m_circles); - dtFree(m_segments); -} - -bool dtObstacleAvoidanceQuery::init(const int maxCircles, const int maxSegments) -{ - m_maxCircles = maxCircles; - m_ncircles = 0; - m_circles = (dtObstacleCircle*)dtAlloc(sizeof(dtObstacleCircle)*m_maxCircles, DT_ALLOC_PERM); - if (!m_circles) - return false; - memset(m_circles, 0, sizeof(dtObstacleCircle)*m_maxCircles); - - m_maxSegments = maxSegments; - m_nsegments = 0; - m_segments = (dtObstacleSegment*)dtAlloc(sizeof(dtObstacleSegment)*m_maxSegments, DT_ALLOC_PERM); - if (!m_segments) - return false; - memset(m_segments, 0, sizeof(dtObstacleSegment)*m_maxSegments); - - return true; -} - -void dtObstacleAvoidanceQuery::reset() -{ - m_ncircles = 0; - m_nsegments = 0; -} - -void dtObstacleAvoidanceQuery::addCircle(const float* pos, const float rad, - const float* vel, const float* dvel) -{ - if (m_ncircles >= m_maxCircles) - return; - - dtObstacleCircle* cir = &m_circles[m_ncircles++]; - dtVcopy(cir->p, pos); - cir->rad = rad; - dtVcopy(cir->vel, vel); - dtVcopy(cir->dvel, dvel); -} - -void dtObstacleAvoidanceQuery::addSegment(const float* p, const float* q) -{ - if (m_nsegments > m_maxSegments) - return; - - dtObstacleSegment* seg = &m_segments[m_nsegments++]; - dtVcopy(seg->p, p); - dtVcopy(seg->q, q); -} - -void dtObstacleAvoidanceQuery::prepare(const float* pos, const float* dvel) -{ - // Prepare obstacles - for (int i = 0; i < m_ncircles; ++i) - { - dtObstacleCircle* cir = &m_circles[i]; - - // Side - const float* pa = pos; - const float* pb = cir->p; - - const float orig[3] = {0,0}; - float dv[3]; - dtVsub(cir->dp,pb,pa); - dtVnormalize(cir->dp); - dtVsub(dv, cir->dvel, dvel); - - const float a = dtTriArea2D(orig, cir->dp,dv); - if (a < 0.01f) - { - cir->np[0] = -cir->dp[2]; - cir->np[2] = cir->dp[0]; - } - else - { - cir->np[0] = cir->dp[2]; - cir->np[2] = -cir->dp[0]; - } - } - - for (int i = 0; i < m_nsegments; ++i) - { - dtObstacleSegment* seg = &m_segments[i]; - - // Precalc if the agent is really close to the segment. - const float r = 0.01f; - float t; - seg->touch = dtDistancePtSegSqr2D(pos, seg->p, seg->q, t) < dtSqr(r); - } -} - -float dtObstacleAvoidanceQuery::processSample(const float* vcand, const float cs, - const float* pos, const float rad, - const float vmax, const float* vel, const float* dvel, - dtObstacleAvoidanceDebugData* debug) -{ - // Find min time of impact and exit amongst all obstacles. - float tmin = m_horizTime; - float side = 0; - int nside = 0; - - for (int i = 0; i < m_ncircles; ++i) - { - const dtObstacleCircle* cir = &m_circles[i]; - - // RVO - float vab[3]; - dtVscale(vab, vcand, 2); - dtVsub(vab, vab, vel); - dtVsub(vab, vab, cir->vel); - - // Side - side += dtClamp(dtMin(dtVdot2D(cir->dp,vab)*0.5f+0.5f, dtVdot2D(cir->np,vab)*2), 0.0f, 1.0f); - nside++; - - float htmin = 0, htmax = 0; - if (!sweepCircleCircle(pos,rad, vab, cir->p,cir->rad, htmin, htmax)) - continue; - - // Handle overlapping obstacles. - if (htmin < 0.0f && htmax > 0.0f) - { - // Avoid more when overlapped. - htmin = -htmin * 0.5f; - } - - if (htmin >= 0.0f) - { - // The closest obstacle is somewhere ahead of us, keep track of nearest obstacle. - if (htmin < tmin) - tmin = htmin; - } - } - - for (int i = 0; i < m_nsegments; ++i) - { - const dtObstacleSegment* seg = &m_segments[i]; - float htmin = 0; - - if (seg->touch) - { - // Special case when the agent is very close to the segment. - float sdir[3], snorm[3]; - dtVsub(sdir, seg->q, seg->p); - snorm[0] = -sdir[2]; - snorm[2] = sdir[0]; - // If the velocity is pointing towards the segment, no collision. - if (dtVdot2D(snorm, vcand) < 0.0f) - continue; - // Else immediate collision. - htmin = 0.0f; - } - else - { - if (!isectRaySeg(pos, vcand, seg->p, seg->q, htmin)) - continue; - } - - // Avoid less when facing walls. - htmin *= 2.0f; - - // The closest obstacle is somewhere ahead of us, keep track of nearest obstacle. - if (htmin < tmin) - tmin = htmin; - } - - // Normalize side bias, to prevent it dominating too much. - if (nside) - side /= nside; - - const float ivmax = 1.0f / vmax; - const float vpen = m_weightDesVel * (dtVdist2D(vcand, dvel) * ivmax); - const float vcpen = m_weightCurVel * (dtVdist2D(vcand, vel) * ivmax); - const float spen = m_weightSide * side; - const float tpen = m_weightToi * (1.0f/(0.1f+tmin / m_horizTime)); - - const float penalty = vpen + vcpen + spen + tpen; - - // Store different penalties for debug viewing - if (debug) - debug->addSample(vcand, cs, penalty, vpen, vcpen, spen, tpen); - - return penalty; -} - -void dtObstacleAvoidanceQuery::sampleVelocityGrid(const float* pos, const float rad, const float vmax, - const float* vel, const float* dvel, - float* nvel, const int gsize, - dtObstacleAvoidanceDebugData* debug) -{ - prepare(pos, dvel); - - dtVset(nvel, 0,0,0); - - if (debug) - debug->reset(); - - const float cvx = dvel[0] * m_velBias; - const float cvz = dvel[2] * m_velBias; - const float cs = vmax * 2 * (1 - m_velBias) / (float)(gsize-1); - const float half = (gsize-1)*cs*0.5f; - - float minPenalty = FLT_MAX; - - for (int y = 0; y < gsize; ++y) - { - for (int x = 0; x < gsize; ++x) - { - float vcand[3]; - vcand[0] = cvx + x*cs - half; - vcand[1] = 0; - vcand[2] = cvz + y*cs - half; - - if (dtSqr(vcand[0])+dtSqr(vcand[2]) > dtSqr(vmax+cs/2)) continue; - - const float penalty = processSample(vcand, cs, pos,rad,vmax,vel,dvel, debug); - if (penalty < minPenalty) - { - minPenalty = penalty; - dtVcopy(nvel, vcand); - } - } - } -} - - -static const float DT_PI = 3.14159265f; - -void dtObstacleAvoidanceQuery::sampleVelocityAdaptive(const float* pos, const float rad, const float vmax, - const float* vel, const float* dvel, float* nvel, - const int ndivs, const int nrings, const int depth, - dtObstacleAvoidanceDebugData* debug) -{ - prepare(pos, dvel); - - dtVset(nvel, 0,0,0); - - if (debug) - debug->reset(); - - // Build sampling pattern aligned to desired velocity. - static const int MAX_PATTERN_DIVS = 32; - static const int MAX_PATTERN_RINGS = 4; - float pat[(MAX_PATTERN_DIVS*MAX_PATTERN_RINGS+1)*2]; - int npat = 0; - - const int nd = dtClamp(ndivs, 1, MAX_PATTERN_DIVS); - const int nr = dtClamp(nrings, 1, MAX_PATTERN_RINGS); - const float da = (1.0f/nd) * DT_PI*2; - const float dang = atan2f(dvel[2], dvel[0]); - - // Always add sample at zero - pat[npat*2+0] = 0; - pat[npat*2+1] = 0; - npat++; - - for (int j = 0; j < nr; ++j) - { - const float rad = (float)(nr-j)/(float)nr; - float a = dang + (j&1)*0.5f*da; - for (int i = 0; i < nd; ++i) - { - pat[npat*2+0] = cosf(a)*rad; - pat[npat*2+1] = sinf(a)*rad; - npat++; - a += da; - } - } - - // Start sampling. - float cr = vmax * (1.0f-m_velBias); - float res[3]; - dtVset(res, dvel[0] * m_velBias, 0, dvel[2] * m_velBias); - - for (int k = 0; k < depth; ++k) - { - float minPenalty = FLT_MAX; - float bvel[3]; - dtVset(bvel, 0,0,0); - - for (int i = 0; i < npat; ++i) - { - float vcand[3]; - vcand[0] = res[0] + pat[i*2+0]*cr; - vcand[1] = 0; - vcand[2] = res[2] + pat[i*2+1]*cr; - - if (dtSqr(vcand[0])+dtSqr(vcand[2]) > dtSqr(vmax+0.001f)) continue; - - const float penalty = processSample(vcand,cr/10, pos,rad,vmax,vel,dvel, debug); - if (penalty < minPenalty) - { - minPenalty = penalty; - dtVcopy(bvel, vcand); - } - } - - dtVcopy(res, bvel); - - cr *= 0.5f; - } - - dtVcopy(nvel, res); -} - diff --git a/deps/recastnavigation/Detour/DetourObstacleAvoidance.h b/deps/recastnavigation/Detour/DetourObstacleAvoidance.h deleted file mode 100644 index 4a7187a799..0000000000 --- a/deps/recastnavigation/Detour/DetourObstacleAvoidance.h +++ /dev/null @@ -1,148 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#ifndef DETOUROBSTACLEAVOIDANCE_H -#define DETOUROBSTACLEAVOIDANCE_H - -struct dtObstacleCircle -{ - float p[3]; // Position of the obstacle - float vel[3]; // Velocity of the obstacle - float dvel[3]; // Velocity of the obstacle - float rad; // Radius of the obstacle - float dp[3], np[3]; // Use for side selection during sampling. -}; - -struct dtObstacleSegment -{ - float p[3], q[3]; // End points of the obstacle segment - bool touch; -}; - -static const int RVO_SAMPLE_RAD = 15; -static const int MAX_RVO_SAMPLES = (RVO_SAMPLE_RAD*2+1)*(RVO_SAMPLE_RAD*2+1) + 100; - -class dtObstacleAvoidanceDebugData -{ -public: - dtObstacleAvoidanceDebugData(); - ~dtObstacleAvoidanceDebugData(); - - bool init(const int maxSamples); - void reset(); - void addSample(const float* vel, const float ssize, const float pen, - const float vpen, const float vcpen, const float spen, const float tpen); - - void normalizeSamples(); - - inline int getSampleCount() const { return m_nsamples; } - inline const float* getSampleVelocity(const int i) const { return &m_vel[i*3]; } - inline float getSampleSize(const int i) const { return m_ssize[i]; } - inline float getSamplePenalty(const int i) const { return m_pen[i]; } - inline float getSampleDesiredVelocityPenalty(const int i) const { return m_vpen[i]; } - inline float getSampleCurrentVelocityPenalty(const int i) const { return m_vcpen[i]; } - inline float getSamplePreferredSidePenalty(const int i) const { return m_spen[i]; } - inline float getSampleCollisionTimePenalty(const int i) const { return m_tpen[i]; } - -private: - int m_nsamples; - int m_maxSamples; - float* m_vel; - float* m_ssize; - float* m_pen; - float* m_vpen; - float* m_vcpen; - float* m_spen; - float* m_tpen; -}; - -dtObstacleAvoidanceDebugData* dtAllocObstacleAvoidanceDebugData(); -void dtFreeObstacleAvoidanceDebugData(dtObstacleAvoidanceDebugData* ptr); - - -class dtObstacleAvoidanceQuery -{ -public: - dtObstacleAvoidanceQuery(); - ~dtObstacleAvoidanceQuery(); - - bool init(const int maxCircles, const int maxSegments); - - void reset(); - - void addCircle(const float* pos, const float rad, - const float* vel, const float* dvel); - - void addSegment(const float* p, const float* q); - - inline void setVelocitySelectionBias(float v) { m_velBias = v; } - inline void setDesiredVelocityWeight(float w) { m_weightDesVel = w; } - inline void setCurrentVelocityWeight(float w) { m_weightCurVel = w; } - inline void setPreferredSideWeight(float w) { m_weightSide = w; } - inline void setCollisionTimeWeight(float w) { m_weightToi = w; } - inline void setTimeHorizon(float t) { m_horizTime = t; } - - void sampleVelocityGrid(const float* pos, const float rad, const float vmax, - const float* vel, const float* dvel, float* nvel, - const int gsize, - dtObstacleAvoidanceDebugData* debug = 0); - - void sampleVelocityAdaptive(const float* pos, const float rad, const float vmax, - const float* vel, const float* dvel, float* nvel, - const int ndivs, const int nrings, const int depth, - dtObstacleAvoidanceDebugData* debug = 0); - - inline int getObstacleCircleCount() const { return m_ncircles; } - const dtObstacleCircle* getObstacleCircle(const int i) { return &m_circles[i]; } - - inline int getObstacleSegmentCount() const { return m_nsegments; } - const dtObstacleSegment* getObstacleSegment(const int i) { return &m_segments[i]; } - -private: - - void prepare(const float* pos, const float* dvel); - - float processSample(const float* vcand, const float cs, - const float* pos, const float rad, - const float vmax, const float* vel, const float* dvel, - dtObstacleAvoidanceDebugData* debug); - - dtObstacleCircle* insertCircle(const float dist); - dtObstacleSegment* insertSegment(const float dist); - - float m_velBias; - float m_weightDesVel; - float m_weightCurVel; - float m_weightSide; - float m_weightToi; - float m_horizTime; - - int m_maxCircles; - dtObstacleCircle* m_circles; - int m_ncircles; - - int m_maxSegments; - dtObstacleSegment* m_segments; - int m_nsegments; -}; - -dtObstacleAvoidanceQuery* dtAllocObstacleAvoidanceQuery(); -void dtFreeObstacleAvoidanceQuery(dtObstacleAvoidanceQuery* ptr); - - -#endif // DETOUROBSTACLEAVOIDANCE_H
\ No newline at end of file diff --git a/deps/recastnavigation/Readme.txt b/deps/recastnavigation/Readme.txt deleted file mode 100644 index 0c2f7b1675..0000000000 --- a/deps/recastnavigation/Readme.txt +++ /dev/null @@ -1,120 +0,0 @@ - -Recast & Detour Version 1.4 - - -Recast - -Recast is state of the art navigation mesh construction toolset for games. - - * It is automatic, which means that you can throw any level geometry - at it and you will get robust mesh out - * It is fast which means swift turnaround times for level designers - * It is open source so it comes with full source and you can - customize it to your hearts content. - -The Recast process starts with constructing a voxel mold from a level geometry -and then casting a navigation mesh over it. The process consists of three steps, -building the voxel mold, partitioning the mold into simple regions, peeling off -the regions as simple polygons. - - 1. The voxel mold is build from the input triangle mesh by rasterizing - the triangles into a multi-layer heightfield. Some simple filters are - then applied to the mold to prune out locations where the character - would not be able to move. - 2. The walkable areas described by the mold are divided into simple - overlayed 2D regions. The resulting regions have only one non-overlapping - contour, which simplifies the final step of the process tremendously. - 3. The navigation polygons are peeled off from the regions by first tracing - the boundaries and then simplifying them. The resulting polygons are - finally converted to convex polygons which makes them perfect for - pathfinding and spatial reasoning about the level. - -The toolset code is located in the Recast folder and demo application using the Recast -toolset is located in the RecastDemo folder. - -The project files with this distribution can be compiled with Microsoft Visual C++ 2008 -(you can download it for free) and XCode 3.1. - - -Detour - -Recast is accompanied with Detour, path-finding and spatial reasoning toolkit. You can use any navigation mesh with Detour, but of course the data generated with Recast fits perfectly. - -Detour offers simple static navigation mesh which is suitable for many simple cases, as well as tiled navigation mesh which allows you to plug in and out pieces of the mesh. The tiled mesh allows to create systems where you stream new navigation data in and out as the player progresses the level, or you may regenerate tiles as the world changes. - - -Latest code available at http://code.google.com/p/recastnavigation/ - - --- - -Release Notes - ----------------- -* Recast 1.4 - Released August 24th, 2009 - -- Added detail height mesh generation (RecastDetailMesh.cpp) for single, - tiled statmeshes as well as tilemesh. -- Added feature to contour tracing which detects extra vertices along - tile edges which should be removed later. -- Changed the tiled stat mesh preprocess, so that it first generated - polymeshes per tile and finally combines them. -- Fixed bug in the GUI code where invisible buttons could be pressed. - ----------------- -* Recast 1.31 - Released July 24th, 2009 - -- Better cost and heuristic functions. -- Fixed tile navmesh raycast on tile borders. - ----------------- -* Recast 1.3 - Released July 14th, 2009 - -- Added dtTileNavMesh which allows to dynamically add and remove navmesh pieces at runtime. -- Renamed stat navmesh types to dtStat* (i.e. dtPoly is now dtStatPoly). -- Moved common code used by tile and stat navmesh to DetourNode.h/cpp and DetourCommon.h/cpp. -- Refactores the demo code. - ----------------- -* Recast 1.2 - Released June 17th, 2009 - -- Added tiled mesh generation. The tiled generation allows to generate navigation for - much larger worlds, it removes some of the artifacts that comes from distance fields - in open areas, and allows later streaming and dynamic runtime generation -- Improved and added some debug draw modes -- API change: The helper function rcBuildNavMesh does not exists anymore, - had to change few internal things to cope with the tiled processing, - similar API functionality will be added later once the tiled process matures -- The demo is getting way too complicated, need to split demos -- Fixed several filtering functions so that the mesh is tighter to the geometry, - sometimes there could be up error up to tow voxel units close to walls, - now it should be just one. - ----------------- -* Recast 1.1 - Released April 11th, 2009 - -This is the first release of Detour. - ----------------- -* Recast 1.0 - Released March 29th, 2009 - -This is the first release of Recast. - -The process is not always as robust as I would wish. The watershed phase sometimes swallows tiny islands -which are close to edges. These droppings are handled in rcBuildContours, but the code is not -particularly robust either. - -Another non-robust case is when portal contours (contours shared between two regions) are always -assumed to be straight. That can lead to overlapping contours specially when the level has -large open areas. - - - -Mikko Mononen -memon@inside.org diff --git a/deps/recastnavigation/Recast/CMakeLists.txt b/deps/recastnavigation/Recast/CMakeLists.txt index 975b4a9c14..30265cb7e4 100644 --- a/deps/recastnavigation/Recast/CMakeLists.txt +++ b/deps/recastnavigation/Recast/CMakeLists.txt @@ -1,4 +1,4 @@ -# Copyright (C) +# Copyright (C) 2008-2016 TrinityCore <http://www.trinitycore.org/> # # This file is free software; as a special exception the author gives # unlimited permission to copy and/or distribute it, with or without @@ -9,15 +9,16 @@ # implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. set(Recast_STAT_SRCS - Recast.cpp - RecastAlloc.cpp - RecastArea.cpp - RecastContour.cpp - RecastFilter.cpp - RecastMesh.cpp - RecastMeshDetail.cpp - RecastRasterization.cpp - RecastRegion.cpp + Source/Recast.cpp + Source/RecastAlloc.cpp + Source/RecastArea.cpp + Source/RecastContour.cpp + Source/RecastFilter.cpp + Source/RecastLayers.cpp + Source/RecastMesh.cpp + Source/RecastMeshDetail.cpp + Source/RecastRasterization.cpp + Source/RecastRegion.cpp ) if(WIN32) @@ -28,4 +29,15 @@ endif() add_library(Recast STATIC ${Recast_STAT_SRCS}) -target_link_libraries(Recast ${ZLIB_LIBRARIES}) +target_include_directories(Recast + PUBLIC + ${CMAKE_CURRENT_SOURCE_DIR}/Include) + +target_link_libraries(Recast + PUBLIC + zlib) + +set_target_properties(Recast + PROPERTIES + FOLDER + "dep") diff --git a/deps/recastnavigation/Recast/Recast.cpp b/deps/recastnavigation/Recast/Recast.cpp deleted file mode 100644 index d051418e81..0000000000 --- a/deps/recastnavigation/Recast/Recast.cpp +++ /dev/null @@ -1,423 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#include <float.h> -#define _USE_MATH_DEFINES -#include <math.h> -#include <string.h> -#include <stdlib.h> -#include <stdio.h> -#include <stdarg.h> -#include "Recast.h" -#include "RecastAlloc.h" -#include "RecastAssert.h" - -float rcSqrt(float x) -{ - return sqrtf(x); -} - - -void rcContext::log(const rcLogCategory category, const char* format, ...) -{ - if (!m_logEnabled) - return; - static const int MSG_SIZE = 512; - char msg[MSG_SIZE]; - va_list ap; - va_start(ap, format); - int len = vsnprintf(msg, MSG_SIZE, format, ap); - if (len >= MSG_SIZE) - { - len = MSG_SIZE-1; - msg[MSG_SIZE-1] = '\0'; - } - va_end(ap); - doLog(category, msg, len); -} - -rcHeightfield* rcAllocHeightfield() -{ - rcHeightfield* hf = (rcHeightfield*)rcAlloc(sizeof(rcHeightfield), RC_ALLOC_PERM); - memset(hf, 0, sizeof(rcHeightfield)); - return hf; -} - -void rcFreeHeightField(rcHeightfield* hf) -{ - if (!hf) return; - // Delete span array. - rcFree(hf->spans); - // Delete span pools. - while (hf->pools) - { - rcSpanPool* next = hf->pools->next; - rcFree(hf->pools); - hf->pools = next; - } - rcFree(hf); -} - -rcCompactHeightfield* rcAllocCompactHeightfield() -{ - rcCompactHeightfield* chf = (rcCompactHeightfield*)rcAlloc(sizeof(rcCompactHeightfield), RC_ALLOC_PERM); - memset(chf, 0, sizeof(rcCompactHeightfield)); - return chf; -} - -void rcFreeCompactHeightfield(rcCompactHeightfield* chf) -{ - if (!chf) return; - rcFree(chf->cells); - rcFree(chf->spans); - rcFree(chf->dist); - rcFree(chf->areas); - rcFree(chf); -} - -rcContourSet* rcAllocContourSet() -{ - rcContourSet* cset = (rcContourSet*)rcAlloc(sizeof(rcContourSet), RC_ALLOC_PERM); - memset(cset, 0, sizeof(rcContourSet)); - return cset; -} - -void rcFreeContourSet(rcContourSet* cset) -{ - if (!cset) return; - for (int i = 0; i < cset->nconts; ++i) - { - rcFree(cset->conts[i].verts); - rcFree(cset->conts[i].rverts); - } - rcFree(cset->conts); - rcFree(cset); -} - -rcPolyMesh* rcAllocPolyMesh() -{ - rcPolyMesh* pmesh = (rcPolyMesh*)rcAlloc(sizeof(rcPolyMesh), RC_ALLOC_PERM); - memset(pmesh, 0, sizeof(rcPolyMesh)); - return pmesh; -} - -void rcFreePolyMesh(rcPolyMesh* pmesh) -{ - if (!pmesh) return; - rcFree(pmesh->verts); - rcFree(pmesh->polys); - rcFree(pmesh->regs); - rcFree(pmesh->flags); - rcFree(pmesh->areas); - rcFree(pmesh); -} - -rcPolyMeshDetail* rcAllocPolyMeshDetail() -{ - rcPolyMeshDetail* dmesh = (rcPolyMeshDetail*)rcAlloc(sizeof(rcPolyMeshDetail), RC_ALLOC_PERM); - memset(dmesh, 0, sizeof(rcPolyMeshDetail)); - return dmesh; -} - -void rcFreePolyMeshDetail(rcPolyMeshDetail* dmesh) -{ - if (!dmesh) return; - rcFree(dmesh->meshes); - rcFree(dmesh->verts); - rcFree(dmesh->tris); - rcFree(dmesh); -} - - -void rcCalcBounds(const float* verts, int nv, float* bmin, float* bmax) -{ - // Calculate bounding box. - rcVcopy(bmin, verts); - rcVcopy(bmax, verts); - for (int i = 1; i < nv; ++i) - { - const float* v = &verts[i*3]; - rcVmin(bmin, v); - rcVmax(bmax, v); - } -} - -void rcCalcGridSize(const float* bmin, const float* bmax, float cs, int* w, int* h) -{ - *w = (int)((bmax[0] - bmin[0])/cs+0.5f); - *h = (int)((bmax[2] - bmin[2])/cs+0.5f); -} - -bool rcCreateHeightfield(rcContext* /*ctx*/, rcHeightfield& hf, int width, int height, - const float* bmin, const float* bmax, - float cs, float ch) -{ - // TODO: VC complains about unref formal variable, figure out a way to handle this better. -// rcAssert(ctx); - - hf.width = width; - hf.height = height; - rcVcopy(hf.bmin, bmin); - rcVcopy(hf.bmax, bmax); - hf.cs = cs; - hf.ch = ch; - hf.spans = (rcSpan**)rcAlloc(sizeof(rcSpan*)*hf.width*hf.height, RC_ALLOC_PERM); - if (!hf.spans) - return false; - memset(hf.spans, 0, sizeof(rcSpan*)*hf.width*hf.height); - return true; -} - -static void calcTriNormal(const float* v0, const float* v1, const float* v2, float* norm) -{ - float e0[3], e1[3]; - rcVsub(e0, v1, v0); - rcVsub(e1, v2, v0); - rcVcross(norm, e0, e1); - rcVnormalize(norm); -} - -void rcMarkWalkableTriangles(rcContext* /*ctx*/, const float walkableSlopeAngle, - const float* verts, int /*nv*/, - const int* tris, int nt, - unsigned char* areas) -{ - // TODO: VC complains about unref formal variable, figure out a way to handle this better. -// rcAssert(ctx); - - const float walkableThr = cosf(walkableSlopeAngle/180.0f*RC_PI); - - float norm[3]; - - for (int i = 0; i < nt; ++i) - { - const int* tri = &tris[i*3]; - calcTriNormal(&verts[tri[0]*3], &verts[tri[1]*3], &verts[tri[2]*3], norm); - // Check if the face is walkable. - if (norm[1] > walkableThr) - areas[i] = RC_WALKABLE_AREA; - } -} - -void rcClearUnwalkableTriangles(rcContext* /*ctx*/, const float walkableSlopeAngle, - const float* verts, int /*nv*/, - const int* tris, int nt, - unsigned char* areas) -{ - // TODO: VC complains about unref formal variable, figure out a way to handle this better. -// rcAssert(ctx); - - const float walkableThr = cosf(walkableSlopeAngle/180.0f*RC_PI); - - float norm[3]; - - for (int i = 0; i < nt; ++i) - { - const int* tri = &tris[i*3]; - calcTriNormal(&verts[tri[0]*3], &verts[tri[1]*3], &verts[tri[2]*3], norm); - // Check if the face is walkable. - if (norm[1] <= walkableThr) - areas[i] = RC_NULL_AREA; - } -} - -int rcGetHeightFieldSpanCount(rcContext* /*ctx*/, rcHeightfield& hf) -{ - // TODO: VC complains about unref formal variable, figure out a way to handle this better. -// rcAssert(ctx); - - const int w = hf.width; - const int h = hf.height; - int spanCount = 0; - for (int y = 0; y < h; ++y) - { - for (int x = 0; x < w; ++x) - { - for (rcSpan* s = hf.spans[x + y*w]; s; s = s->next) - { - if (s->area != RC_NULL_AREA) - spanCount++; - } - } - } - return spanCount; -} - -bool rcBuildCompactHeightfield(rcContext* ctx, const int walkableHeight, const int walkableClimb, - rcHeightfield& hf, rcCompactHeightfield& chf) -{ - rcAssert(ctx); - - ctx->startTimer(RC_TIMER_BUILD_COMPACTHEIGHTFIELD); - - const int w = hf.width; - const int h = hf.height; - const int spanCount = rcGetHeightFieldSpanCount(ctx, hf); - - // Fill in header. - chf.width = w; - chf.height = h; - chf.spanCount = spanCount; - chf.walkableHeight = walkableHeight; - chf.walkableClimb = walkableClimb; - chf.maxRegions = 0; - rcVcopy(chf.bmin, hf.bmin); - rcVcopy(chf.bmax, hf.bmax); - chf.bmax[1] += walkableHeight*hf.ch; - chf.cs = hf.cs; - chf.ch = hf.ch; - chf.cells = (rcCompactCell*)rcAlloc(sizeof(rcCompactCell)*w*h, RC_ALLOC_PERM); - if (!chf.cells) - { - ctx->log(RC_LOG_ERROR, "rcBuildCompactHeightfield: Out of memory 'chf.cells' (%d)", w*h); - return false; - } - memset(chf.cells, 0, sizeof(rcCompactCell)*w*h); - chf.spans = (rcCompactSpan*)rcAlloc(sizeof(rcCompactSpan)*spanCount, RC_ALLOC_PERM); - if (!chf.spans) - { - ctx->log(RC_LOG_ERROR, "rcBuildCompactHeightfield: Out of memory 'chf.spans' (%d)", spanCount); - return false; - } - memset(chf.spans, 0, sizeof(rcCompactSpan)*spanCount); - chf.areas = (unsigned char*)rcAlloc(sizeof(unsigned char)*spanCount, RC_ALLOC_PERM); - if (!chf.areas) - { - ctx->log(RC_LOG_ERROR, "rcBuildCompactHeightfield: Out of memory 'chf.areas' (%d)", spanCount); - return false; - } - memset(chf.areas, RC_NULL_AREA, sizeof(unsigned char)*spanCount); - - const int MAX_HEIGHT = 0xffff; - - // Fill in cells and spans. - int idx = 0; - for (int y = 0; y < h; ++y) - { - for (int x = 0; x < w; ++x) - { - const rcSpan* s = hf.spans[x + y*w]; - // If there are no spans at this cell, just leave the data to index=0, count=0. - if (!s) continue; - rcCompactCell& c = chf.cells[x+y*w]; - c.index = idx; - c.count = 0; - while (s) - { - if (s->area != RC_NULL_AREA) - { - const int bot = (int)s->smax; - const int top = s->next ? (int)s->next->smin : MAX_HEIGHT; - chf.spans[idx].y = (unsigned short)rcClamp(bot, 0, 0xffff); - chf.spans[idx].h = (unsigned char)rcClamp(top - bot, 0, 0xff); - chf.areas[idx] = s->area; - idx++; - c.count++; - } - s = s->next; - } - } - } - - // Find neighbour connections. - const int MAX_LAYERS = RC_NOT_CONNECTED-1; - int tooHighNeighbour = 0; - for (int y = 0; y < h; ++y) - { - for (int x = 0; x < w; ++x) - { - const rcCompactCell& c = chf.cells[x+y*w]; - for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) - { - rcCompactSpan& s = chf.spans[i]; - - for (int dir = 0; dir < 4; ++dir) - { - rcSetCon(s, dir, RC_NOT_CONNECTED); - const int nx = x + rcGetDirOffsetX(dir); - const int ny = y + rcGetDirOffsetY(dir); - // First check that the neighbour cell is in bounds. - if (nx < 0 || ny < 0 || nx >= w || ny >= h) - continue; - - // Iterate over all neighbour spans and check if any of the is - // accessible from current cell. - const rcCompactCell& nc = chf.cells[nx+ny*w]; - for (int k = (int)nc.index, nk = (int)(nc.index+nc.count); k < nk; ++k) - { - const rcCompactSpan& ns = chf.spans[k]; - const int bot = rcMax(s.y, ns.y); - const int top = rcMin(s.y+s.h, ns.y+ns.h); - - // Check that the gap between the spans is walkable, - // and that the climb height between the gaps is not too high. - if ((top - bot) >= walkableHeight && rcAbs((int)ns.y - (int)s.y) <= walkableClimb) - { - // Mark direction as walkable. - const int idx = k - (int)nc.index; - if (idx < 0 || idx > MAX_LAYERS) - { - tooHighNeighbour = rcMax(tooHighNeighbour, idx); - continue; - } - rcSetCon(s, dir, idx); - break; - } - } - - } - } - } - } - - if (tooHighNeighbour > MAX_LAYERS) - { - ctx->log(RC_LOG_ERROR, "rcBuildCompactHeightfield: Heightfield has too many layers %d (max: %d)", - tooHighNeighbour, MAX_LAYERS); - } - - ctx->stopTimer(RC_TIMER_BUILD_COMPACTHEIGHTFIELD); - - return true; -} - -/* -static int getHeightfieldMemoryUsage(const rcHeightfield& hf) -{ - int size = 0; - size += sizeof(hf); - size += hf.width * hf.height * sizeof(rcSpan*); - - rcSpanPool* pool = hf.pools; - while (pool) - { - size += (sizeof(rcSpanPool) - sizeof(rcSpan)) + sizeof(rcSpan)*RC_SPANS_PER_POOL; - pool = pool->next; - } - return size; -} - -static int getCompactHeightFieldMemoryusage(const rcCompactHeightfield& chf) -{ - int size = 0; - size += sizeof(rcCompactHeightfield); - size += sizeof(rcCompactSpan) * chf.spanCount; - size += sizeof(rcCompactCell) * chf.width * chf.height; - return size; -} -*/
\ No newline at end of file diff --git a/deps/recastnavigation/Recast/Recast.h b/deps/recastnavigation/Recast/Recast.h deleted file mode 100644 index 0e5f074248..0000000000 --- a/deps/recastnavigation/Recast/Recast.h +++ /dev/null @@ -1,688 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#ifndef RECAST_H -#define RECAST_H - -// Some math headers don't have PI defined. -static const float RC_PI = 3.14159265f; - -enum rcLogCategory -{ - RC_LOG_PROGRESS = 1, - RC_LOG_WARNING, - RC_LOG_ERROR, -}; - -enum rcTimerLabel -{ - RC_TIMER_TOTAL, - RC_TIMER_TEMP, - RC_TIMER_RASTERIZE_TRIANGLES, - RC_TIMER_BUILD_COMPACTHEIGHTFIELD, - RC_TIMER_BUILD_CONTOURS, - RC_TIMER_BUILD_CONTOURS_TRACE, - RC_TIMER_BUILD_CONTOURS_SIMPLIFY, - RC_TIMER_FILTER_BORDER, - RC_TIMER_FILTER_WALKABLE, - RC_TIMER_MEDIAN_AREA, - RC_TIMER_FILTER_LOW_OBSTACLES, - RC_TIMER_BUILD_POLYMESH, - RC_TIMER_MERGE_POLYMESH, - RC_TIMER_ERODE_AREA, - RC_TIMER_MARK_BOX_AREA, - RC_TIMER_MARK_CONVEXPOLY_AREA, - RC_TIMER_BUILD_DISTANCEFIELD, - RC_TIMER_BUILD_DISTANCEFIELD_DIST, - RC_TIMER_BUILD_DISTANCEFIELD_BLUR, - RC_TIMER_BUILD_REGIONS, - RC_TIMER_BUILD_REGIONS_WATERSHED, - RC_TIMER_BUILD_REGIONS_EXPAND, - RC_TIMER_BUILD_REGIONS_FLOOD, - RC_TIMER_BUILD_REGIONS_FILTER, - RC_TIMER_BUILD_POLYMESHDETAIL, - RC_TIMER_MERGE_POLYMESHDETAIL, - RC_MAX_TIMERS -}; - -// Build context provides several optional utilities needed for the build process, -// such as timing, logging, and build time collecting. -class rcContext -{ -public: - inline rcContext(bool state = true) : m_logEnabled(state), m_timerEnabled(state) {} - virtual ~rcContext() {} - - // Enables or disables logging. - inline void enableLog(bool state) { m_logEnabled = state; } - // Resets log. - inline void resetLog() { if (m_logEnabled) doResetLog(); } - // Logs a message. - void log(const rcLogCategory category, const char* format, ...); - - // Enables or disables timer. - inline void enableTimer(bool state) { m_timerEnabled = state; } - // Resets all timers. - inline void resetTimers() { if (m_timerEnabled) doResetTimers(); } - // Starts timer, used for performance timing. - inline void startTimer(const rcTimerLabel label) { if (m_timerEnabled) doStartTimer(label); } - // Stops timer, used for performance timing. - inline void stopTimer(const rcTimerLabel label) { if (m_timerEnabled) doStopTimer(label); } - // Returns time accumulated between timer start/stop. - inline int getAccumulatedTime(const rcTimerLabel label) const { return m_timerEnabled ? doGetAccumulatedTime(label) : -1; } - -protected: - // Virtual functions to override for custom implementations. - virtual void doResetLog() {} - virtual void doLog(const rcLogCategory /*category*/, const char* /*msg*/, const int /*len*/) {} - virtual void doResetTimers() {} - virtual void doStartTimer(const rcTimerLabel /*label*/) {} - virtual void doStopTimer(const rcTimerLabel /*label*/) {} - virtual int doGetAccumulatedTime(const rcTimerLabel /*label*/) const { return -1; } - - bool m_logEnabled; - bool m_timerEnabled; -}; - - -// The units of the parameters are specified in parenthesis as follows: -// (vx) voxels, (wu) world units -struct rcConfig -{ - int width, height; // Dimensions of the rasterized heightfield (vx) - int tileSize; // Width and Height of a tile (vx) - int borderSize; // Non-navigable Border around the heightfield (vx) - float cs, ch; // Grid cell size and height (wu) - float bmin[3], bmax[3]; // Grid bounds (wu) - float walkableSlopeAngle; // Maximum walkable slope angle in degrees. - int walkableHeight; // Minimum height where the agent can still walk (vx) - int walkableClimb; // Maximum height between grid cells the agent can climb (vx) - int walkableRadius; // Radius of the agent in cells (vx) - int maxEdgeLen; // Maximum contour edge length (vx) - float maxSimplificationError; // Maximum distance error from contour to cells (vx) - int minRegionArea; // Regions whose area is smaller than this threshold will be removed. (vx) - int mergeRegionArea; // Regions whose area is smaller than this threshold will be merged (vx) - int maxVertsPerPoly; // Max number of vertices per polygon - float detailSampleDist; // Detail mesh sample spacing. - float detailSampleMaxError; // Detail mesh simplification max sample error. -}; - -// Define number of bits in the above structure for smin/smax. -// The max height is used for clamping rasterized values. -static const int RC_SPAN_HEIGHT_BITS = 16; -static const int RC_SPAN_MAX_HEIGHT = (1<<RC_SPAN_HEIGHT_BITS)-1; - -// Heightfield span. -struct rcSpan -{ - unsigned int smin : 16; // Span min height. - unsigned int smax : 16; // Span max height. - unsigned char area; // Span area type. - rcSpan* next; // Next span in column. -}; - -// Number of spans allocated per pool. -static const int RC_SPANS_PER_POOL = 2048; - -// Memory pool used for quick span allocation. -struct rcSpanPool -{ - rcSpanPool* next; // Pointer to next pool. - rcSpan items[RC_SPANS_PER_POOL]; // Array of spans. -}; - -// Dynamic span-heightfield. -struct rcHeightfield -{ - int width, height; // Dimension of the heightfield. - float bmin[3], bmax[3]; // Bounding box of the heightfield - float cs, ch; // Cell size and height. - rcSpan** spans; // Heightfield of spans (width*height). - rcSpanPool* pools; // Linked list of span pools. - rcSpan* freelist; // Pointer to next free span. -}; - -rcHeightfield* rcAllocHeightfield(); -void rcFreeHeightField(rcHeightfield* hf); - - -struct rcCompactCell -{ - unsigned int index : 24; // Index to first span in column. - unsigned int count : 8; // Number of spans in this column. -}; - -struct rcCompactSpan -{ - unsigned short y; // Bottom coordinate of the span. - unsigned short reg; - unsigned int con : 24; // Connections to neighbour cells. - unsigned int h : 8; // Height of the span. -}; - -// Compact static heightfield. -struct rcCompactHeightfield -{ - int width, height; // Width and height of the heightfield. - int spanCount; // Number of spans in the heightfield. - int walkableHeight, walkableClimb; // Agent properties. - unsigned short maxDistance; // Maximum distance value stored in heightfield. - unsigned short maxRegions; // Maximum Region Id stored in heightfield. - float bmin[3], bmax[3]; // Bounding box of the heightfield. - float cs, ch; // Cell size and height. - rcCompactCell* cells; // Pointer to width*height cells. - rcCompactSpan* spans; // Pointer to spans. - unsigned short* dist; // Pointer to per span distance to border. - unsigned char* areas; // Pointer to per span area ID. -}; - -rcCompactHeightfield* rcAllocCompactHeightfield(); -void rcFreeCompactHeightfield(rcCompactHeightfield* chf); - - -struct rcContour -{ - int* verts; // Vertex coordinates, each vertex contains 4 components. - int nverts; // Number of vertices. - int* rverts; // Raw vertex coordinates, each vertex contains 4 components. - int nrverts; // Number of raw vertices. - unsigned short reg; // Region ID of the contour. - unsigned char area; // Area ID of the contour. -}; - -struct rcContourSet -{ - rcContour* conts; // Pointer to all contours. - int nconts; // Number of contours. - float bmin[3], bmax[3]; // Bounding box of the heightfield. - float cs, ch; // Cell size and height. -}; - -rcContourSet* rcAllocContourSet(); -void rcFreeContourSet(rcContourSet* cset); - - -// Polymesh store a connected mesh of polygons. -// The polygons are store in an array where each polygons takes -// 'nvp*2' elements. The first 'nvp' elements are indices to vertices -// and the second 'nvp' elements are indices to neighbour polygons. -// If a polygon has less than 'bvp' vertices, the remaining indices -// are set to RC_MESH_NULL_IDX. If an polygon edge does not have a neighbour -// the neighbour index is set to RC_MESH_NULL_IDX. -// Vertices can be transformed into world space as follows: -// x = bmin[0] + verts[i*3+0]*cs; -// y = bmin[1] + verts[i*3+1]*ch; -// z = bmin[2] + verts[i*3+2]*cs; -struct rcPolyMesh -{ - unsigned short* verts; // Vertices of the mesh, 3 elements per vertex. - unsigned short* polys; // Polygons of the mesh, nvp*2 elements per polygon. - unsigned short* regs; // Region ID of the polygons. - unsigned short* flags; // Per polygon flags. - unsigned char* areas; // Area ID of polygons. - int nverts; // Number of vertices. - int npolys; // Number of polygons. - int maxpolys; // Number of allocated polygons. - int nvp; // Max number of vertices per polygon. - float bmin[3], bmax[3]; // Bounding box of the mesh. - float cs, ch; // Cell size and height. -}; - -rcPolyMesh* rcAllocPolyMesh(); -void rcFreePolyMesh(rcPolyMesh* pmesh); - - -// Detail mesh generated from a rcPolyMesh. -// Each submesh represents a polygon in the polymesh and they are stored in -// exactly same order. Each submesh is described as 4 values: -// base vertex, vertex count, base triangle, triangle count. That is, -// const unsigned char* t = &dmesh.tris[(tbase+i)*3]; and -// const float* v = &dmesh.verts[(vbase+t[j])*3]; -// If the input polygon has 'n' vertices, those vertices are first in the -// submesh vertex list. This allows to compres the mesh by not storing the -// first vertices and using the polymesh vertices instead. -// Max number of vertices per submesh is 127 and -// max number of triangles per submesh is 255. - -struct rcPolyMeshDetail -{ - unsigned int* meshes; // Pointer to all mesh data. - float* verts; // Pointer to all vertex data. - unsigned char* tris; // Pointer to all triangle data. - int nmeshes; // Number of meshes. - int nverts; // Number of total vertices. - int ntris; // Number of triangles. -}; - -rcPolyMeshDetail* rcAllocPolyMeshDetail(); -void rcFreePolyMeshDetail(rcPolyMeshDetail* dmesh); - - -// If heightfield region ID has the following bit set, the region is on border area -// and excluded from many calculations. -static const unsigned short RC_BORDER_REG = 0x8000; - -// If contour region ID has the following bit set, the vertex will be later -// removed in order to match the segments and vertices at tile boundaries. -static const int RC_BORDER_VERTEX = 0x10000; - -static const int RC_AREA_BORDER = 0x20000; - -enum rcBuildContoursFlags -{ - RC_CONTOUR_TESS_WALL_EDGES = 0x01, // Tessellate wall edges - RC_CONTOUR_TESS_AREA_EDGES = 0x02, // Tessellate edges between areas. -}; - -// Mask used with contours to extract region id. -static const int RC_CONTOUR_REG_MASK = 0xffff; - -// Null index which is used with meshes to mark unset or invalid indices. -static const unsigned short RC_MESH_NULL_IDX = 0xffff; - -// Area ID that is considered empty. -static const unsigned char RC_NULL_AREA = 0; - -// Area ID that is considered generally walkable. -static const unsigned char RC_WALKABLE_AREA = 63; - -// Value returned by rcGetCon() if the direction is not connected. -static const int RC_NOT_CONNECTED = 0x3f; - -// Compact span neighbour helpers. -inline void rcSetCon(rcCompactSpan& s, int dir, int i) -{ - const unsigned int shift = (unsigned int)dir*6; - unsigned int con = s.con; - s.con = (con & ~(0x3f << shift)) | (((unsigned int)i & 0x3f) << shift); -} - -inline int rcGetCon(const rcCompactSpan& s, int dir) -{ - const unsigned int shift = (unsigned int)dir*6; - return (s.con >> shift) & 0x3f; -} - -inline int rcGetDirOffsetX(int dir) -{ - const int offset[4] = { -1, 0, 1, 0, }; - return offset[dir&0x03]; -} - -inline int rcGetDirOffsetY(int dir) -{ - const int offset[4] = { 0, 1, 0, -1 }; - return offset[dir&0x03]; -} - -// Common helper functions -template<class T> inline void rcSwap(T& a, T& b) { T t = a; a = b; b = t; } -template<class T> inline T rcMin(T a, T b) { return a < b ? a : b; } -template<class T> inline T rcMax(T a, T b) { return a > b ? a : b; } -template<class T> inline T rcAbs(T a) { return a < 0 ? -a : a; } -template<class T> inline T rcSqr(T a) { return a*a; } -template<class T> inline T rcClamp(T v, T mn, T mx) { return v < mn ? mn : (v > mx ? mx : v); } -float rcSqrt(float x); - -// Common vector helper functions. -inline void rcVcross(float* dest, const float* v1, const float* v2) -{ - dest[0] = v1[1]*v2[2] - v1[2]*v2[1]; - dest[1] = v1[2]*v2[0] - v1[0]*v2[2]; - dest[2] = v1[0]*v2[1] - v1[1]*v2[0]; -} - -inline float rcVdot(const float* v1, const float* v2) -{ - return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2]; -} - -inline void rcVmad(float* dest, const float* v1, const float* v2, const float s) -{ - dest[0] = v1[0]+v2[0]*s; - dest[1] = v1[1]+v2[1]*s; - dest[2] = v1[2]+v2[2]*s; -} - -inline void rcVadd(float* dest, const float* v1, const float* v2) -{ - dest[0] = v1[0]+v2[0]; - dest[1] = v1[1]+v2[1]; - dest[2] = v1[2]+v2[2]; -} - -inline void rcVsub(float* dest, const float* v1, const float* v2) -{ - dest[0] = v1[0]-v2[0]; - dest[1] = v1[1]-v2[1]; - dest[2] = v1[2]-v2[2]; -} - -inline void rcVmin(float* mn, const float* v) -{ - mn[0] = rcMin(mn[0], v[0]); - mn[1] = rcMin(mn[1], v[1]); - mn[2] = rcMin(mn[2], v[2]); -} - -inline void rcVmax(float* mx, const float* v) -{ - mx[0] = rcMax(mx[0], v[0]); - mx[1] = rcMax(mx[1], v[1]); - mx[2] = rcMax(mx[2], v[2]); -} - -inline void rcVcopy(float* dest, const float* v) -{ - dest[0] = v[0]; - dest[1] = v[1]; - dest[2] = v[2]; -} - -inline float rcVdist(const float* v1, const float* v2) -{ - float dx = v2[0] - v1[0]; - float dy = v2[1] - v1[1]; - float dz = v2[2] - v1[2]; - return rcSqrt(dx*dx + dy*dy + dz*dz); -} - -inline float rcVdistSqr(const float* v1, const float* v2) -{ - float dx = v2[0] - v1[0]; - float dy = v2[1] - v1[1]; - float dz = v2[2] - v1[2]; - return dx*dx + dy*dy + dz*dz; -} - -inline void rcVnormalize(float* v) -{ - float d = 1.0f / rcSqrt(rcSqr(v[0]) + rcSqr(v[1]) + rcSqr(v[2])); - v[0] *= d; - v[1] *= d; - v[2] *= d; -} - -inline bool rcVequal(const float* p0, const float* p1) -{ - static const float thr = rcSqr(1.0f/16384.0f); - const float d = rcVdistSqr(p0, p1); - return d < thr; -} - -// Calculated bounding box of array of vertices. -// Params: -// verts - (in) array of vertices -// nv - (in) vertex count -// bmin, bmax - (out) bounding box -void rcCalcBounds(const float* verts, int nv, float* bmin, float* bmax); - -// Calculates grid size based on bounding box and grid cell size. -// Params: -// bmin, bmax - (in) bounding box -// cs - (in) grid cell size -// w - (out) grid width -// h - (out) grid height -void rcCalcGridSize(const float* bmin, const float* bmax, float cs, int* w, int* h); - -// Creates and initializes new heightfield. -// Params: -// hf - (in/out) heightfield to initialize. -// width - (in) width of the heightfield. -// height - (in) height of the heightfield. -// bmin, bmax - (in) bounding box of the heightfield -// cs - (in) grid cell size -// ch - (in) grid cell height -bool rcCreateHeightfield(rcContext* ctx, rcHeightfield& hf, int width, int height, - const float* bmin, const float* bmax, - float cs, float ch); - -// Sets the RC_WALKABLE_AREA for every triangle whose slope is below -// the maximum walkable slope angle. -// Params: -// walkableSlopeAngle - (in) maximum slope angle in degrees. -// verts - (in) array of vertices -// nv - (in) vertex count -// tris - (in) array of triangle vertex indices -// nt - (in) triangle count -// areas - (out) array of triangle area types -void rcMarkWalkableTriangles(rcContext* ctx, const float walkableSlopeAngle, const float* verts, int nv, - const int* tris, int nt, unsigned char* areas); - -// Sets the RC_NULL_AREA for every triangle whose slope is steeper than -// the maximum walkable slope angle. -// Params: -// walkableSlopeAngle - (in) maximum slope angle in degrees. -// verts - (in) array of vertices -// nv - (in) vertex count -// tris - (in) array of triangle vertex indices -// nt - (in) triangle count -// areas - (out) array of triangle are types -void rcClearUnwalkableTriangles(rcContext* ctx, const float walkableSlopeAngle, const float* verts, int nv, - const int* tris, int nt, unsigned char* areas); - -// Adds span to heightfield. -// The span addition can set to favor flags. If the span is merged to -// another span and the new smax is within 'flagMergeThr' units away -// from the existing span the span flags are merged and stored. -// Params: -// solid - (in) heightfield where the spans is added to -// x,y - (in) location on the heightfield where the span is added -// smin,smax - (in) spans min/max height -// flags - (in) span flags (zero or WALKABLE) -// flagMergeThr - (in) merge threshold. -void rcAddSpan(rcContext* ctx, rcHeightfield& solid, const int x, const int y, - const unsigned short smin, const unsigned short smax, - const unsigned short area, const int flagMergeThr); - -// Rasterizes a triangle into heightfield spans. -// Params: -// v0,v1,v2 - (in) the vertices of the triangle. -// area - (in) area type of the triangle. -// solid - (in) heightfield where the triangle is rasterized -// flagMergeThr - (in) distance in voxel where walkable flag is favored over non-walkable. -void rcRasterizeTriangle(rcContext* ctx, const float* v0, const float* v1, const float* v2, - const unsigned char area, rcHeightfield& solid, - const int flagMergeThr = 1); - -// Rasterizes indexed triangle mesh into heightfield spans. -// Params: -// verts - (in) array of vertices -// nv - (in) vertex count -// tris - (in) array of triangle vertex indices -// area - (in) array of triangle area types. -// nt - (in) triangle count -// solid - (in) heightfield where the triangles are rasterized -// flagMergeThr - (in) distance in voxel where walkable flag is favored over non-walkable. -void rcRasterizeTriangles(rcContext* ctx, const float* verts, const int nv, - const int* tris, const unsigned char* areas, const int nt, - rcHeightfield& solid, const int flagMergeThr = 1); - -// Rasterizes indexed triangle mesh into heightfield spans. -// Params: -// verts - (in) array of vertices -// nv - (in) vertex count -// tris - (in) array of triangle vertex indices -// area - (in) array of triangle area types. -// nt - (in) triangle count -// solid - (in) heightfield where the triangles are rasterized -// flagMergeThr - (in) distance in voxel where walkable flag is favored over non-walkable. -void rcRasterizeTriangles(rcContext* ctx, const float* verts, const int nv, - const unsigned short* tris, const unsigned char* areas, const int nt, - rcHeightfield& solid, const int flagMergeThr = 1); - -// Rasterizes the triangles into heightfield spans. -// Params: -// verts - (in) array of vertices -// area - (in) array of triangle area types. -// nt - (in) triangle count -// solid - (in) heightfield where the triangles are rasterized -void rcRasterizeTriangles(rcContext* ctx, const float* verts, const unsigned char* areas, const int nt, - rcHeightfield& solid, const int flagMergeThr = 1); - -// Marks non-walkable low obstacles as walkable if they are closer than walkableClimb -// from a walkable surface. Applying this filter allows to step over low hanging -// low obstacles. -// Params: -// walkableHeight - (in) minimum height where the agent can still walk -// solid - (in/out) heightfield describing the solid space -// TODO: Missuses ledge flag, must be called before rcFilterLedgeSpans! -void rcFilterLowHangingWalkableObstacles(rcContext* ctx, const int walkableClimb, rcHeightfield& solid); - -// Removes WALKABLE flag from all spans that are at ledges. This filtering -// removes possible overestimation of the conservative voxelization so that -// the resulting mesh will not have regions hanging in air over ledges. -// Params: -// walkableHeight - (in) minimum height where the agent can still walk -// walkableClimb - (in) maximum height between grid cells the agent can climb -// solid - (in/out) heightfield describing the solid space -void rcFilterLedgeSpans(rcContext* ctx, const int walkableHeight, - const int walkableClimb, rcHeightfield& solid); - -// Removes WALKABLE flag from all spans which have smaller than -// 'walkableHeight' clearance above them. -// Params: -// walkableHeight - (in) minimum height where the agent can still walk -// solid - (in/out) heightfield describing the solid space -void rcFilterWalkableLowHeightSpans(rcContext* ctx, int walkableHeight, rcHeightfield& solid); - -// Returns number of spans contained in a heightfield. -// Params: -// hf - (in) heightfield to be compacted -// Returns number of spans. -int rcGetHeightFieldSpanCount(rcContext* ctx, rcHeightfield& hf); - -// Builds compact representation of the heightfield. -// Params: -// walkableHeight - (in) minimum height where the agent can still walk -// walkableClimb - (in) maximum height between grid cells the agent can climb -// flags - (in) require flags for a cell to be included in the compact heightfield. -// hf - (in) heightfield to be compacted -// chf - (out) compact heightfield representing the open space. -// Returns false if operation ran out of memory. -bool rcBuildCompactHeightfield(rcContext* ctx, const int walkableHeight, const int walkableClimb, - rcHeightfield& hf, rcCompactHeightfield& chf); - -// Erodes walkable area. -// Params: -// radius - (in) radius of erosion (max 255). -// chf - (in/out) compact heightfield to erode. -// Returns false if operation ran out of memory. -bool rcErodeWalkableArea(rcContext* ctx, int radius, rcCompactHeightfield& chf); - -// Applies median filter to walkable area types, removing noise. -// Params: -// chf - (in/out) compact heightfield to erode. -// Returns false if operation ran out of memory. -bool rcMedianFilterWalkableArea(rcContext* ctx, rcCompactHeightfield& chf); - -// Marks the area of the convex polygon into the area type of the compact heightfield. -// Params: -// bmin/bmax - (in) bounds of the axis aligned box. -// areaId - (in) area ID to mark. -// chf - (in/out) compact heightfield to mark. -void rcMarkBoxArea(rcContext* ctx, const float* bmin, const float* bmax, unsigned char areaId, - rcCompactHeightfield& chf); - -// Marks the area of the convex polygon into the area type of the compact heightfield. -// Params: -// verts - (in) vertices of the convex polygon. -// nverts - (in) number of vertices in the polygon. -// hmin/hmax - (in) min and max height of the polygon. -// areaId - (in) area ID to mark. -// chf - (in/out) compact heightfield to mark. -void rcMarkConvexPolyArea(rcContext* ctx, const float* verts, const int nverts, - const float hmin, const float hmax, unsigned char areaId, - rcCompactHeightfield& chf); - -// Builds distance field and stores it into the combat heightfield. -// Params: -// chf - (in/out) compact heightfield representing the open space. -// Returns false if operation ran out of memory. -bool rcBuildDistanceField(rcContext* ctx, rcCompactHeightfield& chf); - -// Divides the walkable heighfied into simple regions using watershed partitioning. -// Each region has only one contour and no overlaps. -// The regions are stored in the compact heightfield 'reg' field. -// The process sometimes creates small regions. If the area of a regions is -// smaller than 'mergeRegionArea' then the region will be merged with a neighbour -// region if possible. If multiple regions form an area which is smaller than -// 'minRegionArea' all the regions belonging to that area will be removed. -// Here area means the count of spans in an area. -// Params: -// chf - (in/out) compact heightfield representing the open space. -// minRegionArea - (in) the smallest allowed region area. -// maxMergeRegionArea - (in) the largest allowed region area which can be merged. -// Returns false if operation ran out of memory. -bool rcBuildRegions(rcContext* ctx, rcCompactHeightfield& chf, - const int borderSize, const int minRegionArea, const int mergeRegionArea); - -// Divides the walkable heighfied into simple regions using simple monotone partitioning. -// Each region has only one contour and no overlaps. -// The regions are stored in the compact heightfield 'reg' field. -// The process sometimes creates small regions. If the area of a regions is -// smaller than 'mergeRegionArea' then the region will be merged with a neighbour -// region if possible. If multiple regions form an area which is smaller than -// 'minRegionArea' all the regions belonging to that area will be removed. -// Here area means the count of spans in an area. -// Params: -// chf - (in/out) compact heightfield representing the open space. -// minRegionArea - (in) the smallest allowed regions size. -// maxMergeRegionArea - (in) the largest allowed regions size which can be merged. -// Returns false if operation ran out of memory. -bool rcBuildRegionsMonotone(rcContext* ctx, rcCompactHeightfield& chf, - const int borderSize, const int minRegionArea, const int mergeRegionArea); - -// Builds simplified contours from the regions outlines. -// Params: -// chf - (in) compact heightfield which has regions set. -// maxError - (in) maximum allowed distance between simplified contour and cells. -// maxEdgeLen - (in) maximum allowed contour edge length in cells. -// cset - (out) Resulting contour set. -// flags - (in) build flags, see rcBuildContoursFlags. -// Returns false if operation ran out of memory. -bool rcBuildContours(rcContext* ctx, rcCompactHeightfield& chf, - const float maxError, const int maxEdgeLen, - rcContourSet& cset, const int flags = RC_CONTOUR_TESS_WALL_EDGES); - -// Builds connected convex polygon mesh from contour polygons. -// Params: -// cset - (in) contour set. -// nvp - (in) maximum number of vertices per polygon. -// mesh - (out) poly mesh. -// Returns false if operation ran out of memory. -bool rcBuildPolyMesh(rcContext* ctx, rcContourSet& cset, int nvp, rcPolyMesh& mesh); - -bool rcMergePolyMeshes(rcContext* ctx, rcPolyMesh** meshes, const int nmeshes, rcPolyMesh& mesh); - -// Builds detail triangle mesh for each polygon in the poly mesh. -// Params: -// mesh - (in) poly mesh to detail. -// chf - (in) compact height field, used to query height for new vertices. -// sampleDist - (in) spacing between height samples used to generate more detail into mesh. -// sampleMaxError - (in) maximum allowed distance between simplified detail mesh and height sample. -// pmdtl - (out) detail mesh. -// Returns false if operation ran out of memory. -bool rcBuildPolyMeshDetail(rcContext* ctx, const rcPolyMesh& mesh, const rcCompactHeightfield& chf, - const float sampleDist, const float sampleMaxError, - rcPolyMeshDetail& dmesh); - -bool rcMergePolyMeshDetails(rcContext* ctx, rcPolyMeshDetail** meshes, const int nmeshes, rcPolyMeshDetail& mesh); - - -#endif // RECAST_H diff --git a/deps/recastnavigation/Recast/RecastAlloc.cpp b/deps/recastnavigation/Recast/RecastAlloc.cpp deleted file mode 100644 index 2c7396a1bf..0000000000 --- a/deps/recastnavigation/Recast/RecastAlloc.cpp +++ /dev/null @@ -1,67 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#include <stdlib.h> -#include <string.h> -#include "RecastAlloc.h" - -static void *rcAllocDefault(int size, rcAllocHint) -{ - return malloc(size); -} - -static void rcFreeDefault(void *ptr) -{ - free(ptr); -} - -static rcAllocFunc* sRecastAllocFunc = rcAllocDefault; -static rcFreeFunc* sRecastFreeFunc = rcFreeDefault; - -void rcAllocSetCustom(rcAllocFunc *allocFunc, rcFreeFunc *freeFunc) -{ - sRecastAllocFunc = allocFunc ? allocFunc : rcAllocDefault; - sRecastFreeFunc = freeFunc ? freeFunc : rcFreeDefault; -} - -void* rcAlloc(int size, rcAllocHint hint) -{ - return sRecastAllocFunc(size, hint); -} - -void rcFree(void* ptr) -{ - if (ptr) - sRecastFreeFunc(ptr); -} - - -void rcIntArray::resize(int n) -{ - if (n > m_cap) - { - if (!m_cap) m_cap = n; - while (m_cap < n) m_cap *= 2; - int* newData = (int*)rcAlloc(m_cap*sizeof(int), RC_ALLOC_TEMP); - if (m_size && newData) memcpy(newData, m_data, m_size*sizeof(int)); - rcFree(m_data); - m_data = newData; - } - m_size = n; -} - diff --git a/deps/recastnavigation/Recast/RecastAlloc.h b/deps/recastnavigation/Recast/RecastAlloc.h deleted file mode 100644 index 9a316374a7..0000000000 --- a/deps/recastnavigation/Recast/RecastAlloc.h +++ /dev/null @@ -1,69 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#ifndef RECASTALLOC_H -#define RECASTALLOC_H - -enum rcAllocHint -{ - RC_ALLOC_PERM, // Memory persist after a function call. - RC_ALLOC_TEMP // Memory used temporarily within a function. -}; - -typedef void* (rcAllocFunc)(int size, rcAllocHint hint); -typedef void (rcFreeFunc)(void* ptr); - -void rcAllocSetCustom(rcAllocFunc *allocFunc, rcFreeFunc *freeFunc); - -void* rcAlloc(int size, rcAllocHint hint); -void rcFree(void* ptr); - - - -// Simple dynamic array ints. -class rcIntArray -{ - int* m_data; - int m_size, m_cap; - inline rcIntArray(const rcIntArray&); - inline rcIntArray& operator=(const rcIntArray&); -public: - inline rcIntArray() : m_data(0), m_size(0), m_cap(0) {} - inline rcIntArray(int n) : m_data(0), m_size(0), m_cap(0) { resize(n); } - inline ~rcIntArray() { rcFree(m_data); } - void resize(int n); - inline void push(int item) { resize(m_size+1); m_data[m_size-1] = item; } - inline int pop() { if (m_size > 0) m_size--; return m_data[m_size]; } - inline const int& operator[](int i) const { return m_data[i]; } - inline int& operator[](int i) { return m_data[i]; } - inline int size() const { return m_size; } -}; - -// Simple internal helper class to delete array in scope -template<class T> class rcScopedDelete -{ - T* ptr; - inline T* operator=(T* p); -public: - inline rcScopedDelete() : ptr(0) {} - inline rcScopedDelete(T* p) : ptr(p) {} - inline ~rcScopedDelete() { rcFree(ptr); } - inline operator T*() { return ptr; } -}; - -#endif diff --git a/deps/recastnavigation/Recast/RecastArea.cpp b/deps/recastnavigation/Recast/RecastArea.cpp deleted file mode 100644 index c18277b878..0000000000 --- a/deps/recastnavigation/Recast/RecastArea.cpp +++ /dev/null @@ -1,416 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#include <float.h> -#define _USE_MATH_DEFINES -#include <math.h> -#include <string.h> -#include <stdlib.h> -#include <stdio.h> -#include "Recast.h" -#include "RecastAlloc.h" -#include "RecastAssert.h" - - -bool rcErodeWalkableArea(rcContext* ctx, int radius, rcCompactHeightfield& chf) -{ - rcAssert(ctx); - - const int w = chf.width; - const int h = chf.height; - - ctx->startTimer(RC_TIMER_ERODE_AREA); - - unsigned char* dist = (unsigned char*)rcAlloc(sizeof(unsigned char)*chf.spanCount, RC_ALLOC_TEMP); - if (!dist) - { - ctx->log(RC_LOG_ERROR, "erodeWalkableArea: Out of memory 'dist' (%d).", chf.spanCount); - return false; - } - - // Init distance. - memset(dist, 0xff, sizeof(unsigned char)*chf.spanCount); - - // Mark boundary cells. - for (int y = 0; y < h; ++y) - { - for (int x = 0; x < w; ++x) - { - const rcCompactCell& c = chf.cells[x+y*w]; - for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) - { - if (chf.areas[i] != RC_NULL_AREA) - { - const rcCompactSpan& s = chf.spans[i]; - int nc = 0; - for (int dir = 0; dir < 4; ++dir) - { - if (rcGetCon(s, dir) != RC_NOT_CONNECTED) - nc++; - } - // At least one missing neighbour. - if (nc != 4) - dist[i] = 0; - } - } - } - } - - unsigned char nd; - - // Pass 1 - for (int y = 0; y < h; ++y) - { - for (int x = 0; x < w; ++x) - { - const rcCompactCell& c = chf.cells[x+y*w]; - for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) - { - const rcCompactSpan& s = chf.spans[i]; - - if (rcGetCon(s, 0) != RC_NOT_CONNECTED) - { - // (-1,0) - const int ax = x + rcGetDirOffsetX(0); - const int ay = y + rcGetDirOffsetY(0); - const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, 0); - const rcCompactSpan& as = chf.spans[ai]; - nd = (unsigned char)rcMin((int)dist[ai]+2, 255); - if (nd < dist[i]) - dist[i] = nd; - - // (-1,-1) - if (rcGetCon(as, 3) != RC_NOT_CONNECTED) - { - const int aax = ax + rcGetDirOffsetX(3); - const int aay = ay + rcGetDirOffsetY(3); - const int aai = (int)chf.cells[aax+aay*w].index + rcGetCon(as, 3); - nd = (unsigned char)rcMin((int)dist[aai]+3, 255); - if (nd < dist[i]) - dist[i] = nd; - } - } - if (rcGetCon(s, 3) != RC_NOT_CONNECTED) - { - // (0,-1) - const int ax = x + rcGetDirOffsetX(3); - const int ay = y + rcGetDirOffsetY(3); - const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, 3); - const rcCompactSpan& as = chf.spans[ai]; - nd = (unsigned char)rcMin((int)dist[ai]+2, 255); - if (nd < dist[i]) - dist[i] = nd; - - // (1,-1) - if (rcGetCon(as, 2) != RC_NOT_CONNECTED) - { - const int aax = ax + rcGetDirOffsetX(2); - const int aay = ay + rcGetDirOffsetY(2); - const int aai = (int)chf.cells[aax+aay*w].index + rcGetCon(as, 2); - nd = (unsigned char)rcMin((int)dist[aai]+3, 255); - if (nd < dist[i]) - dist[i] = nd; - } - } - } - } - } - - // Pass 2 - for (int y = h-1; y >= 0; --y) - { - for (int x = w-1; x >= 0; --x) - { - const rcCompactCell& c = chf.cells[x+y*w]; - for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) - { - const rcCompactSpan& s = chf.spans[i]; - - if (rcGetCon(s, 2) != RC_NOT_CONNECTED) - { - // (1,0) - const int ax = x + rcGetDirOffsetX(2); - const int ay = y + rcGetDirOffsetY(2); - const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, 2); - const rcCompactSpan& as = chf.spans[ai]; - nd = (unsigned char)rcMin((int)dist[ai]+2, 255); - if (nd < dist[i]) - dist[i] = nd; - - // (1,1) - if (rcGetCon(as, 1) != RC_NOT_CONNECTED) - { - const int aax = ax + rcGetDirOffsetX(1); - const int aay = ay + rcGetDirOffsetY(1); - const int aai = (int)chf.cells[aax+aay*w].index + rcGetCon(as, 1); - nd = (unsigned char)rcMin((int)dist[aai]+3, 255); - if (nd < dist[i]) - dist[i] = nd; - } - } - if (rcGetCon(s, 1) != RC_NOT_CONNECTED) - { - // (0,1) - const int ax = x + rcGetDirOffsetX(1); - const int ay = y + rcGetDirOffsetY(1); - const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, 1); - const rcCompactSpan& as = chf.spans[ai]; - nd = (unsigned char)rcMin((int)dist[ai]+2, 255); - if (nd < dist[i]) - dist[i] = nd; - - // (-1,1) - if (rcGetCon(as, 0) != RC_NOT_CONNECTED) - { - const int aax = ax + rcGetDirOffsetX(0); - const int aay = ay + rcGetDirOffsetY(0); - const int aai = (int)chf.cells[aax+aay*w].index + rcGetCon(as, 0); - nd = (unsigned char)rcMin((int)dist[aai]+3, 255); - if (nd < dist[i]) - dist[i] = nd; - } - } - } - } - } - - const unsigned char thr = (unsigned char)(radius*2); - for (int i = 0; i < chf.spanCount; ++i) - if (dist[i] < thr) - chf.areas[i] = RC_NULL_AREA; - - rcFree(dist); - - ctx->stopTimer(RC_TIMER_ERODE_AREA); - - return true; -} - -static void insertSort(unsigned char* a, const int n) -{ - int i, j; - for (i = 1; i < n; i++) - { - const unsigned char value = a[i]; - for (j = i - 1; j >= 0 && a[j] > value; j--) - a[j+1] = a[j]; - a[j+1] = value; - } -} - - -bool rcMedianFilterWalkableArea(rcContext* ctx, rcCompactHeightfield& chf) -{ - rcAssert(ctx); - - const int w = chf.width; - const int h = chf.height; - - ctx->startTimer(RC_TIMER_MEDIAN_AREA); - - unsigned char* areas = (unsigned char*)rcAlloc(sizeof(unsigned char)*chf.spanCount, RC_ALLOC_TEMP); - if (!areas) - { - ctx->log(RC_LOG_ERROR, "medianFilterWalkableArea: Out of memory 'areas' (%d).", chf.spanCount); - return false; - } - - // Init distance. - memset(areas, 0xff, sizeof(unsigned char)*chf.spanCount); - - for (int y = 0; y < h; ++y) - { - for (int x = 0; x < w; ++x) - { - const rcCompactCell& c = chf.cells[x+y*w]; - for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) - { - const rcCompactSpan& s = chf.spans[i]; - if (chf.areas[i] == RC_NULL_AREA) - { - areas[i] = chf.areas[i]; - continue; - } - - unsigned char nei[9]; - for (int j = 0; j < 9; ++j) - nei[j] = chf.areas[i]; - - for (int dir = 0; dir < 4; ++dir) - { - if (rcGetCon(s, dir) != RC_NOT_CONNECTED) - { - const int ax = x + rcGetDirOffsetX(dir); - const int ay = y + rcGetDirOffsetY(dir); - const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, dir); - if (chf.areas[ai] != RC_NULL_AREA) - nei[dir*2+0] = chf.areas[ai]; - - const rcCompactSpan& as = chf.spans[ai]; - const int dir2 = (dir+1) & 0x3; - if (rcGetCon(as, dir2) != RC_NOT_CONNECTED) - { - const int ax2 = ax + rcGetDirOffsetX(dir2); - const int ay2 = ay + rcGetDirOffsetY(dir2); - const int ai2 = (int)chf.cells[ax2+ay2*w].index + rcGetCon(as, dir2); - if (chf.areas[ai2] != RC_NULL_AREA) - nei[dir*2+1] = chf.areas[ai2]; - } - } - } - insertSort(nei, 9); - areas[i] = nei[4]; - } - } - } - - memcpy(chf.areas, areas, sizeof(unsigned char)*chf.spanCount); - - rcFree(areas); - - ctx->stopTimer(RC_TIMER_MEDIAN_AREA); - - return true; -} - -void rcMarkBoxArea(rcContext* ctx, const float* bmin, const float* bmax, unsigned char areaId, - rcCompactHeightfield& chf) -{ - rcAssert(ctx); - - ctx->startTimer(RC_TIMER_MARK_BOX_AREA); - - int minx = (int)((bmin[0]-chf.bmin[0])/chf.cs); - int miny = (int)((bmin[1]-chf.bmin[1])/chf.ch); - int minz = (int)((bmin[2]-chf.bmin[2])/chf.cs); - int maxx = (int)((bmax[0]-chf.bmin[0])/chf.cs); - int maxy = (int)((bmax[1]-chf.bmin[1])/chf.ch); - int maxz = (int)((bmax[2]-chf.bmin[2])/chf.cs); - - if (maxx < 0) return; - if (minx >= chf.width) return; - if (maxz < 0) return; - if (minz >= chf.height) return; - - if (minx < 0) minx = 0; - if (maxx >= chf.width) maxx = chf.width-1; - if (minz < 0) minz = 0; - if (maxz >= chf.height) maxz = chf.height-1; - - for (int z = minz; z <= maxz; ++z) - { - for (int x = minx; x <= maxx; ++x) - { - const rcCompactCell& c = chf.cells[x+z*chf.width]; - for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) - { - rcCompactSpan& s = chf.spans[i]; - if ((int)s.y >= miny && (int)s.y <= maxy) - { - if (chf.areas[i] != RC_NULL_AREA) - chf.areas[i] = areaId; - } - } - } - } - - ctx->stopTimer(RC_TIMER_MARK_BOX_AREA); - -} - - -static int pointInPoly(int nvert, const float* verts, const float* p) -{ - int i, j, c = 0; - for (i = 0, j = nvert-1; i < nvert; j = i++) - { - const float* vi = &verts[i*3]; - const float* vj = &verts[j*3]; - if (((vi[2] > p[2]) != (vj[2] > p[2])) && - (p[0] < (vj[0]-vi[0]) * (p[2]-vi[2]) / (vj[2]-vi[2]) + vi[0]) ) - c = !c; - } - return c; -} - -void rcMarkConvexPolyArea(rcContext* ctx, const float* verts, const int nverts, - const float hmin, const float hmax, unsigned char areaId, - rcCompactHeightfield& chf) -{ - rcAssert(ctx); - - ctx->startTimer(RC_TIMER_MARK_CONVEXPOLY_AREA); - - float bmin[3], bmax[3]; - rcVcopy(bmin, verts); - rcVcopy(bmax, verts); - for (int i = 1; i < nverts; ++i) - { - rcVmin(bmin, &verts[i*3]); - rcVmax(bmax, &verts[i*3]); - } - bmin[1] = hmin; - bmax[1] = hmax; - - int minx = (int)((bmin[0]-chf.bmin[0])/chf.cs); - int miny = (int)((bmin[1]-chf.bmin[1])/chf.ch); - int minz = (int)((bmin[2]-chf.bmin[2])/chf.cs); - int maxx = (int)((bmax[0]-chf.bmin[0])/chf.cs); - int maxy = (int)((bmax[1]-chf.bmin[1])/chf.ch); - int maxz = (int)((bmax[2]-chf.bmin[2])/chf.cs); - - if (maxx < 0) return; - if (minx >= chf.width) return; - if (maxz < 0) return; - if (minz >= chf.height) return; - - if (minx < 0) minx = 0; - if (maxx >= chf.width) maxx = chf.width-1; - if (minz < 0) minz = 0; - if (maxz >= chf.height) maxz = chf.height-1; - - - // TODO: Optimize. - for (int z = minz; z <= maxz; ++z) - { - for (int x = minx; x <= maxx; ++x) - { - const rcCompactCell& c = chf.cells[x+z*chf.width]; - for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) - { - rcCompactSpan& s = chf.spans[i]; - if (chf.areas[i] == RC_NULL_AREA) - continue; - if ((int)s.y >= miny && (int)s.y <= maxy) - { - float p[3]; - p[0] = chf.bmin[0] + (x+0.5f)*chf.cs; - p[1] = 0; - p[2] = chf.bmin[2] + (z+0.5f)*chf.cs; - - if (pointInPoly(nverts, verts, p)) - { - chf.areas[i] = areaId; - } - } - } - } - } - - ctx->stopTimer(RC_TIMER_MARK_CONVEXPOLY_AREA); -} diff --git a/deps/recastnavigation/Recast/RecastAssert.h b/deps/recastnavigation/Recast/RecastAssert.h deleted file mode 100644 index b58b8fcd28..0000000000 --- a/deps/recastnavigation/Recast/RecastAssert.h +++ /dev/null @@ -1,33 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#ifndef RECASTASSERT_H -#define RECASTASSERT_H - -// Note: This header file's only purpose is to include define assert. -// Feel free to change the file and include your own implementation instead. - -#ifdef NDEBUG -// From http://cnicholson.net/2009/02/stupid-c-tricks-adventures-in-assert/ -# define rcAssert(x) do { (void)sizeof(x); } while(__LINE__==-1,false) -#else -# include <assert.h> -# define rcAssert assert -#endif - -#endif // RECASTASSERT_H diff --git a/deps/recastnavigation/Recast/RecastContour.cpp b/deps/recastnavigation/Recast/RecastContour.cpp deleted file mode 100644 index 4ba8deac89..0000000000 --- a/deps/recastnavigation/Recast/RecastContour.cpp +++ /dev/null @@ -1,802 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#define _USE_MATH_DEFINES -#include <math.h> -#include <string.h> -#include <stdio.h> -#include "Recast.h" -#include "RecastAlloc.h" -#include "RecastAssert.h" - - -static int getCornerHeight(int x, int y, int i, int dir, - const rcCompactHeightfield& chf, - bool& isBorderVertex) -{ - const rcCompactSpan& s = chf.spans[i]; - int ch = (int)s.y; - int dirp = (dir+1) & 0x3; - - unsigned int regs[4] = {0,0,0,0}; - - // Combine region and area codes in order to prevent - // border vertices which are in between two areas to be removed. - regs[0] = chf.spans[i].reg | (chf.areas[i] << 16); - - if (rcGetCon(s, dir) != RC_NOT_CONNECTED) - { - const int ax = x + rcGetDirOffsetX(dir); - const int ay = y + rcGetDirOffsetY(dir); - const int ai = (int)chf.cells[ax+ay*chf.width].index + rcGetCon(s, dir); - const rcCompactSpan& as = chf.spans[ai]; - ch = rcMax(ch, (int)as.y); - regs[1] = chf.spans[ai].reg | (chf.areas[ai] << 16); - if (rcGetCon(as, dirp) != RC_NOT_CONNECTED) - { - const int ax2 = ax + rcGetDirOffsetX(dirp); - const int ay2 = ay + rcGetDirOffsetY(dirp); - const int ai2 = (int)chf.cells[ax2+ay2*chf.width].index + rcGetCon(as, dirp); - const rcCompactSpan& as2 = chf.spans[ai2]; - ch = rcMax(ch, (int)as2.y); - regs[2] = chf.spans[ai2].reg | (chf.areas[ai2] << 16); - } - } - if (rcGetCon(s, dirp) != RC_NOT_CONNECTED) - { - const int ax = x + rcGetDirOffsetX(dirp); - const int ay = y + rcGetDirOffsetY(dirp); - const int ai = (int)chf.cells[ax+ay*chf.width].index + rcGetCon(s, dirp); - const rcCompactSpan& as = chf.spans[ai]; - ch = rcMax(ch, (int)as.y); - regs[3] = chf.spans[ai].reg | (chf.areas[ai] << 16); - if (rcGetCon(as, dir) != RC_NOT_CONNECTED) - { - const int ax2 = ax + rcGetDirOffsetX(dir); - const int ay2 = ay + rcGetDirOffsetY(dir); - const int ai2 = (int)chf.cells[ax2+ay2*chf.width].index + rcGetCon(as, dir); - const rcCompactSpan& as2 = chf.spans[ai2]; - ch = rcMax(ch, (int)as2.y); - regs[2] = chf.spans[ai2].reg | (chf.areas[ai2] << 16); - } - } - - // Check if the vertex is special edge vertex, these vertices will be removed later. - for (int j = 0; j < 4; ++j) - { - const int a = j; - const int b = (j+1) & 0x3; - const int c = (j+2) & 0x3; - const int d = (j+3) & 0x3; - - // The vertex is a border vertex there are two same exterior cells in a row, - // followed by two interior cells and none of the regions are out of bounds. - const bool twoSameExts = (regs[a] & regs[b] & RC_BORDER_REG) != 0 && regs[a] == regs[b]; - const bool twoInts = ((regs[c] | regs[d]) & RC_BORDER_REG) == 0; - const bool intsSameArea = (regs[c]>>16) == (regs[d]>>16); - const bool noZeros = regs[a] != 0 && regs[b] != 0 && regs[c] != 0 && regs[d] != 0; - if (twoSameExts && twoInts && intsSameArea && noZeros) - { - isBorderVertex = true; - break; - } - } - - return ch; -} - -static void walkContour(int x, int y, int i, - rcCompactHeightfield& chf, - unsigned char* flags, rcIntArray& points) -{ - // Choose the first non-connected edge - unsigned char dir = 0; - while ((flags[i] & (1 << dir)) == 0) - dir++; - - unsigned char startDir = dir; - int starti = i; - - const unsigned char area = chf.areas[i]; - - int iter = 0; - while (++iter < 40000) - { - if (flags[i] & (1 << dir)) - { - // Choose the edge corner - bool isBorderVertex = false; - bool isAreaBorder = false; - int px = x; - int py = getCornerHeight(x, y, i, dir, chf, isBorderVertex); - int pz = y; - switch(dir) - { - case 0: pz++; break; - case 1: px++; pz++; break; - case 2: px++; break; - } - int r = 0; - const rcCompactSpan& s = chf.spans[i]; - if (rcGetCon(s, dir) != RC_NOT_CONNECTED) - { - const int ax = x + rcGetDirOffsetX(dir); - const int ay = y + rcGetDirOffsetY(dir); - const int ai = (int)chf.cells[ax+ay*chf.width].index + rcGetCon(s, dir); - r = (int)chf.spans[ai].reg; - if (area != chf.areas[ai]) - isAreaBorder = true; - } - if (isBorderVertex) - r |= RC_BORDER_VERTEX; - if (isAreaBorder) - r |= RC_AREA_BORDER; - points.push(px); - points.push(py); - points.push(pz); - points.push(r); - - flags[i] &= ~(1 << dir); // Remove visited edges - dir = (dir+1) & 0x3; // Rotate CW - } - else - { - int ni = -1; - const int nx = x + rcGetDirOffsetX(dir); - const int ny = y + rcGetDirOffsetY(dir); - const rcCompactSpan& s = chf.spans[i]; - if (rcGetCon(s, dir) != RC_NOT_CONNECTED) - { - const rcCompactCell& nc = chf.cells[nx+ny*chf.width]; - ni = (int)nc.index + rcGetCon(s, dir); - } - if (ni == -1) - { - // Should not happen. - return; - } - x = nx; - y = ny; - i = ni; - dir = (dir+3) & 0x3; // Rotate CCW - } - - if (starti == i && startDir == dir) - { - break; - } - } -} - -static float distancePtSeg(const int x, const int z, - const int px, const int pz, - const int qx, const int qz) -{ -/* float pqx = (float)(qx - px); - float pqy = (float)(qy - py); - float pqz = (float)(qz - pz); - float dx = (float)(x - px); - float dy = (float)(y - py); - float dz = (float)(z - pz); - float d = pqx*pqx + pqy*pqy + pqz*pqz; - float t = pqx*dx + pqy*dy + pqz*dz; - if (d > 0) - t /= d; - if (t < 0) - t = 0; - else if (t > 1) - t = 1; - - dx = px + t*pqx - x; - dy = py + t*pqy - y; - dz = pz + t*pqz - z; - - return dx*dx + dy*dy + dz*dz;*/ - - float pqx = (float)(qx - px); - float pqz = (float)(qz - pz); - float dx = (float)(x - px); - float dz = (float)(z - pz); - float d = pqx*pqx + pqz*pqz; - float t = pqx*dx + pqz*dz; - if (d > 0) - t /= d; - if (t < 0) - t = 0; - else if (t > 1) - t = 1; - - dx = px + t*pqx - x; - dz = pz + t*pqz - z; - - return dx*dx + dz*dz; -} - -static void simplifyContour(rcIntArray& points, rcIntArray& simplified, - const float maxError, const int maxEdgeLen, const int buildFlags) -{ - // Add initial points. - bool hasConnections = false; - for (int i = 0; i < points.size(); i += 4) - { - if ((points[i+3] & RC_CONTOUR_REG_MASK) != 0) - { - hasConnections = true; - break; - } - } - - if (hasConnections) - { - // The contour has some portals to other regions. - // Add a new point to every location where the region changes. - for (int i = 0, ni = points.size()/4; i < ni; ++i) - { - int ii = (i+1) % ni; - const bool differentRegs = (points[i*4+3] & RC_CONTOUR_REG_MASK) != (points[ii*4+3] & RC_CONTOUR_REG_MASK); - const bool areaBorders = (points[i*4+3] & RC_AREA_BORDER) != (points[ii*4+3] & RC_AREA_BORDER); - if (differentRegs || areaBorders) - { - simplified.push(points[i*4+0]); - simplified.push(points[i*4+1]); - simplified.push(points[i*4+2]); - simplified.push(i); - } - } - } - - if (simplified.size() == 0) - { - // If there is no connections at all, - // create some initial points for the simplification process. - // Find lower-left and upper-right vertices of the contour. - int llx = points[0]; - int lly = points[1]; - int llz = points[2]; - int lli = 0; - int urx = points[0]; - int ury = points[1]; - int urz = points[2]; - int uri = 0; - for (int i = 0; i < points.size(); i += 4) - { - int x = points[i+0]; - int y = points[i+1]; - int z = points[i+2]; - if (x < llx || (x == llx && z < llz)) - { - llx = x; - lly = y; - llz = z; - lli = i/4; - } - if (x > urx || (x == urx && z > urz)) - { - urx = x; - ury = y; - urz = z; - uri = i/4; - } - } - simplified.push(llx); - simplified.push(lly); - simplified.push(llz); - simplified.push(lli); - - simplified.push(urx); - simplified.push(ury); - simplified.push(urz); - simplified.push(uri); - } - - // Add points until all raw points are within - // error tolerance to the simplified shape. - const int pn = points.size()/4; - for (int i = 0; i < simplified.size()/4; ) - { - int ii = (i+1) % (simplified.size()/4); - - const int ax = simplified[i*4+0]; - const int az = simplified[i*4+2]; - const int ai = simplified[i*4+3]; - - const int bx = simplified[ii*4+0]; - const int bz = simplified[ii*4+2]; - const int bi = simplified[ii*4+3]; - - // Find maximum deviation from the segment. - float maxd = 0; - int maxi = -1; - int ci, cinc, endi; - - // Traverse the segment in lexilogical order so that the - // max deviation is calculated similarly when traversing - // opposite segments. - if (bx > ax || (bx == ax && bz > az)) - { - cinc = 1; - ci = (ai+cinc) % pn; - endi = bi; - } - else - { - cinc = pn-1; - ci = (bi+cinc) % pn; - endi = ai; - } - - // Tessellate only outer edges oredges between areas. - if ((points[ci*4+3] & RC_CONTOUR_REG_MASK) == 0 || - (points[ci*4+3] & RC_AREA_BORDER)) - { - while (ci != endi) - { - float d = distancePtSeg(points[ci*4+0], points[ci*4+2], ax, az, bx, bz); - if (d > maxd) - { - maxd = d; - maxi = ci; - } - ci = (ci+cinc) % pn; - } - } - - - // If the max deviation is larger than accepted error, - // add new point, else continue to next segment. - if (maxi != -1 && maxd > (maxError*maxError)) - { - // Add space for the new point. - simplified.resize(simplified.size()+4); - const int n = simplified.size()/4; - for (int j = n-1; j > i; --j) - { - simplified[j*4+0] = simplified[(j-1)*4+0]; - simplified[j*4+1] = simplified[(j-1)*4+1]; - simplified[j*4+2] = simplified[(j-1)*4+2]; - simplified[j*4+3] = simplified[(j-1)*4+3]; - } - // Add the point. - simplified[(i+1)*4+0] = points[maxi*4+0]; - simplified[(i+1)*4+1] = points[maxi*4+1]; - simplified[(i+1)*4+2] = points[maxi*4+2]; - simplified[(i+1)*4+3] = maxi; - } - else - { - ++i; - } - } - - // Split too long edges. - if (maxEdgeLen > 0 && (buildFlags & (RC_CONTOUR_TESS_WALL_EDGES|RC_CONTOUR_TESS_AREA_EDGES)) != 0) - { - for (int i = 0; i < simplified.size()/4; ) - { - const int ii = (i+1) % (simplified.size()/4); - - const int ax = simplified[i*4+0]; - const int az = simplified[i*4+2]; - const int ai = simplified[i*4+3]; - - const int bx = simplified[ii*4+0]; - const int bz = simplified[ii*4+2]; - const int bi = simplified[ii*4+3]; - - // Find maximum deviation from the segment. - int maxi = -1; - int ci = (ai+1) % pn; - - // Tessellate only outer edges or edges between areas. - bool tess = false; - // Wall edges. - if ((buildFlags & RC_CONTOUR_TESS_WALL_EDGES) && (points[ci*4+3] & RC_CONTOUR_REG_MASK) == 0) - tess = true; - // Edges between areas. - if ((buildFlags & RC_CONTOUR_TESS_AREA_EDGES) && (points[ci*4+3] & RC_AREA_BORDER)) - tess = true; - - if (tess) - { - int dx = bx - ax; - int dz = bz - az; - if (dx*dx + dz*dz > maxEdgeLen*maxEdgeLen) - { - // Round based on the segments in lexilogical order so that the - // max tesselation is consistent regardles in which direction - // segments are traversed. - const int n = bi < ai ? (bi+pn - ai) : (bi - ai); - if (n > 1) - { - if (bx > ax || (bx == ax && bz > az)) - maxi = (ai + n/2) % pn; - else - maxi = (ai + (n+1)/2) % pn; - } - } - } - - // If the max deviation is larger than accepted error, - // add new point, else continue to next segment. - if (maxi != -1) - { - // Add space for the new point. - simplified.resize(simplified.size()+4); - const int n = simplified.size()/4; - for (int j = n-1; j > i; --j) - { - simplified[j*4+0] = simplified[(j-1)*4+0]; - simplified[j*4+1] = simplified[(j-1)*4+1]; - simplified[j*4+2] = simplified[(j-1)*4+2]; - simplified[j*4+3] = simplified[(j-1)*4+3]; - } - // Add the point. - simplified[(i+1)*4+0] = points[maxi*4+0]; - simplified[(i+1)*4+1] = points[maxi*4+1]; - simplified[(i+1)*4+2] = points[maxi*4+2]; - simplified[(i+1)*4+3] = maxi; - } - else - { - ++i; - } - } - } - - for (int i = 0; i < simplified.size()/4; ++i) - { - // The edge vertex flag is take from the current raw point, - // and the neighbour region is take from the next raw point. - const int ai = (simplified[i*4+3]+1) % pn; - const int bi = simplified[i*4+3]; - simplified[i*4+3] = (points[ai*4+3] & (RC_CONTOUR_REG_MASK|RC_AREA_BORDER)) | (points[bi*4+3] & RC_BORDER_VERTEX); - } - -} - -static void removeDegenerateSegments(rcIntArray& simplified) -{ - // Remove adjacent vertices which are equal on xz-plane, - // or else the triangulator will get confused. - for (int i = 0; i < simplified.size()/4; ++i) - { - int ni = i+1; - if (ni >= (simplified.size()/4)) - ni = 0; - - if (simplified[i*4+0] == simplified[ni*4+0] && - simplified[i*4+2] == simplified[ni*4+2]) - { - // Degenerate segment, remove. - for (int j = i; j < simplified.size()/4-1; ++j) - { - simplified[j*4+0] = simplified[(j+1)*4+0]; - simplified[j*4+1] = simplified[(j+1)*4+1]; - simplified[j*4+2] = simplified[(j+1)*4+2]; - simplified[j*4+3] = simplified[(j+1)*4+3]; - } - simplified.resize(simplified.size()-4); - } - } -} - -static int calcAreaOfPolygon2D(const int* verts, const int nverts) -{ - int area = 0; - for (int i = 0, j = nverts-1; i < nverts; j=i++) - { - const int* vi = &verts[i*4]; - const int* vj = &verts[j*4]; - area += vi[0] * vj[2] - vj[0] * vi[2]; - } - return (area+1) / 2; -} - -inline bool ileft(const int* a, const int* b, const int* c) -{ - return (b[0] - a[0]) * (c[2] - a[2]) - (c[0] - a[0]) * (b[2] - a[2]) <= 0; -} - -static void getClosestIndices(const int* vertsa, const int nvertsa, - const int* vertsb, const int nvertsb, - int& ia, int& ib) -{ - int closestDist = 0xfffffff; - ia = -1, ib = -1; - for (int i = 0; i < nvertsa; ++i) - { - const int in = (i+1) % nvertsa; - const int ip = (i+nvertsa-1) % nvertsa; - const int* va = &vertsa[i*4]; - const int* van = &vertsa[in*4]; - const int* vap = &vertsa[ip*4]; - - for (int j = 0; j < nvertsb; ++j) - { - const int* vb = &vertsb[j*4]; - // vb must be "infront" of va. - if (ileft(vap,va,vb) && ileft(va,van,vb)) - { - const int dx = vb[0] - va[0]; - const int dz = vb[2] - va[2]; - const int d = dx*dx + dz*dz; - if (d < closestDist) - { - ia = i; - ib = j; - closestDist = d; - } - } - } - } -} - -static bool mergeContours(rcContour& ca, rcContour& cb, int ia, int ib) -{ - const int maxVerts = ca.nverts + cb.nverts + 2; - int* verts = (int*)rcAlloc(sizeof(int)*maxVerts*4, RC_ALLOC_PERM); - if (!verts) - return false; - - int nv = 0; - - // Copy contour A. - for (int i = 0; i <= ca.nverts; ++i) - { - int* dst = &verts[nv*4]; - const int* src = &ca.verts[((ia+i)%ca.nverts)*4]; - dst[0] = src[0]; - dst[1] = src[1]; - dst[2] = src[2]; - dst[3] = src[3]; - nv++; - } - - // Copy contour B - for (int i = 0; i <= cb.nverts; ++i) - { - int* dst = &verts[nv*4]; - const int* src = &cb.verts[((ib+i)%cb.nverts)*4]; - dst[0] = src[0]; - dst[1] = src[1]; - dst[2] = src[2]; - dst[3] = src[3]; - nv++; - } - - rcFree(ca.verts); - ca.verts = verts; - ca.nverts = nv; - - rcFree(cb.verts); - cb.verts = 0; - cb.nverts = 0; - - return true; -} - -bool rcBuildContours(rcContext* ctx, rcCompactHeightfield& chf, - const float maxError, const int maxEdgeLen, - rcContourSet& cset, const int buildFlags) -{ - rcAssert(ctx); - - const int w = chf.width; - const int h = chf.height; - - ctx->startTimer(RC_TIMER_BUILD_CONTOURS); - - rcVcopy(cset.bmin, chf.bmin); - rcVcopy(cset.bmax, chf.bmax); - cset.cs = chf.cs; - cset.ch = chf.ch; - - int maxContours = rcMax((int)chf.maxRegions, 8); - cset.conts = (rcContour*)rcAlloc(sizeof(rcContour)*maxContours, RC_ALLOC_PERM); - if (!cset.conts) - return false; - cset.nconts = 0; - - rcScopedDelete<unsigned char> flags = (unsigned char*)rcAlloc(sizeof(unsigned char)*chf.spanCount, RC_ALLOC_TEMP); - if (!flags) - { - ctx->log(RC_LOG_ERROR, "rcBuildContours: Out of memory 'flags' (%d).", chf.spanCount); - return false; - } - - ctx->startTimer(RC_TIMER_BUILD_CONTOURS_TRACE); - - // Mark boundaries. - for (int y = 0; y < h; ++y) - { - for (int x = 0; x < w; ++x) - { - const rcCompactCell& c = chf.cells[x+y*w]; - for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) - { - unsigned char res = 0; - const rcCompactSpan& s = chf.spans[i]; - if (!chf.spans[i].reg || (chf.spans[i].reg & RC_BORDER_REG)) - { - flags[i] = 0; - continue; - } - for (int dir = 0; dir < 4; ++dir) - { - unsigned short r = 0; - if (rcGetCon(s, dir) != RC_NOT_CONNECTED) - { - const int ax = x + rcGetDirOffsetX(dir); - const int ay = y + rcGetDirOffsetY(dir); - const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, dir); - r = chf.spans[ai].reg; - } - if (r == chf.spans[i].reg) - res |= (1 << dir); - } - flags[i] = res ^ 0xf; // Inverse, mark non connected edges. - } - } - } - - ctx->stopTimer(RC_TIMER_BUILD_CONTOURS_TRACE); - - ctx->startTimer(RC_TIMER_BUILD_CONTOURS_SIMPLIFY); - - rcIntArray verts(256); - rcIntArray simplified(64); - - for (int y = 0; y < h; ++y) - { - for (int x = 0; x < w; ++x) - { - const rcCompactCell& c = chf.cells[x+y*w]; - for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) - { - if (flags[i] == 0 || flags[i] == 0xf) - { - flags[i] = 0; - continue; - } - const unsigned short reg = chf.spans[i].reg; - if (!reg || (reg & RC_BORDER_REG)) - continue; - const unsigned char area = chf.areas[i]; - - verts.resize(0); - simplified.resize(0); - walkContour(x, y, i, chf, flags, verts); - simplifyContour(verts, simplified, maxError, maxEdgeLen, buildFlags); - removeDegenerateSegments(simplified); - - // Store region->contour remap info. - // Create contour. - if (simplified.size()/4 >= 3) - { - if (cset.nconts >= maxContours) - { - // Allocate more contours. - // This can happen when there are tiny holes in the heightfield. - const int oldMax = maxContours; - maxContours *= 2; - rcContour* newConts = (rcContour*)rcAlloc(sizeof(rcContour)*maxContours, RC_ALLOC_PERM); - for (int j = 0; j < cset.nconts; ++j) - { - newConts[j] = cset.conts[j]; - // Reset source pointers to prevent data deletion. - cset.conts[j].verts = 0; - cset.conts[j].rverts = 0; - } - rcFree(cset.conts); - cset.conts = newConts; - - ctx->log(RC_LOG_WARNING, "rcBuildContours: Expanding max contours from %d to %d.", oldMax, maxContours); - } - - rcContour* cont = &cset.conts[cset.nconts++]; - - cont->nverts = simplified.size()/4; - cont->verts = (int*)rcAlloc(sizeof(int)*cont->nverts*4, RC_ALLOC_PERM); - if (!cont->verts) - { - ctx->log(RC_LOG_ERROR, "rcBuildContours: Out of memory 'verts' (%d).", cont->nverts); - return false; - } - memcpy(cont->verts, &simplified[0], sizeof(int)*cont->nverts*4); - - cont->nrverts = verts.size()/4; - cont->rverts = (int*)rcAlloc(sizeof(int)*cont->nrverts*4, RC_ALLOC_PERM); - if (!cont->rverts) - { - ctx->log(RC_LOG_ERROR, "rcBuildContours: Out of memory 'rverts' (%d).", cont->nrverts); - return false; - } - memcpy(cont->rverts, &verts[0], sizeof(int)*cont->nrverts*4); - -/* cont->cx = cont->cy = cont->cz = 0; - for (int i = 0; i < cont->nverts; ++i) - { - cont->cx += cont->verts[i*4+0]; - cont->cy += cont->verts[i*4+1]; - cont->cz += cont->verts[i*4+2]; - } - cont->cx /= cont->nverts; - cont->cy /= cont->nverts; - cont->cz /= cont->nverts;*/ - - cont->reg = reg; - cont->area = area; - } - } - } - } - - // Check and merge droppings. - // Sometimes the previous algorithms can fail and create several contours - // per area. This pass will try to merge the holes into the main region. - for (int i = 0; i < cset.nconts; ++i) - { - rcContour& cont = cset.conts[i]; - // Check if the contour is would backwards. - if (calcAreaOfPolygon2D(cont.verts, cont.nverts) < 0) - { - // Find another contour which has the same region ID. - int mergeIdx = -1; - for (int j = 0; j < cset.nconts; ++j) - { - if (i == j) continue; - if (cset.conts[j].nverts && cset.conts[j].reg == cont.reg) - { - // Make sure the polygon is correctly oriented. - if (calcAreaOfPolygon2D(cset.conts[j].verts, cset.conts[j].nverts)) - { - mergeIdx = j; - break; - } - } - } - if (mergeIdx == -1) - { - ctx->log(RC_LOG_WARNING, "rcBuildContours: Could not find merge target for bad contour %d.", i); - } - else - { - rcContour& mcont = cset.conts[mergeIdx]; - // Merge by closest points. - int ia = 0, ib = 0; - getClosestIndices(mcont.verts, mcont.nverts, cont.verts, cont.nverts, ia, ib); - if (ia == -1 || ib == -1) - { - ctx->log(RC_LOG_WARNING, "rcBuildContours: Failed to find merge points for %d and %d.", i, mergeIdx); - continue; - } - if (!mergeContours(mcont, cont, ia, ib)) - { - ctx->log(RC_LOG_WARNING, "rcBuildContours: Failed to merge contours %d and %d.", i, mergeIdx); - continue; - } - } - } - } - - ctx->stopTimer(RC_TIMER_BUILD_CONTOURS_SIMPLIFY); - - ctx->stopTimer(RC_TIMER_BUILD_CONTOURS); - - return true; -} diff --git a/deps/recastnavigation/Recast/RecastFilter.cpp b/deps/recastnavigation/Recast/RecastFilter.cpp deleted file mode 100644 index 66af37a413..0000000000 --- a/deps/recastnavigation/Recast/RecastFilter.cpp +++ /dev/null @@ -1,181 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#define _USE_MATH_DEFINES -#include <math.h> -#include <stdio.h> -#include "Recast.h" -#include "RecastAssert.h" - - -void rcFilterLowHangingWalkableObstacles(rcContext* ctx, const int walkableClimb, rcHeightfield& solid) -{ - rcAssert(ctx); - - ctx->startTimer(RC_TIMER_FILTER_LOW_OBSTACLES); - - const int w = solid.width; - const int h = solid.height; - - for (int y = 0; y < h; ++y) - { - for (int x = 0; x < w; ++x) - { - rcSpan* ps = 0; - bool previousWalkable = false; - unsigned char previousArea = RC_NULL_AREA; - - for (rcSpan* s = solid.spans[x + y*w]; s; ps = s, s = s->next) - { - const bool walkable = s->area != RC_NULL_AREA; - // If current span is not walkable, but there is walkable - // span just below it, mark the span above it walkable too. - if (!walkable && previousWalkable) - { - if (rcAbs((int)s->smax - (int)ps->smax) <= walkableClimb) - s->area = previousArea; - } - // Copy walkable flag so that it cannot propagate - // past multiple non-walkable objects. - previousWalkable = walkable; - previousArea = s->area; - } - } - } - - ctx->stopTimer(RC_TIMER_FILTER_LOW_OBSTACLES); -} - -void rcFilterLedgeSpans(rcContext* ctx, const int walkableHeight, const int walkableClimb, - rcHeightfield& solid) -{ - rcAssert(ctx); - - ctx->startTimer(RC_TIMER_FILTER_BORDER); - - const int w = solid.width; - const int h = solid.height; - const int MAX_HEIGHT = 0xffff; - - // Mark border spans. - for (int y = 0; y < h; ++y) - { - for (int x = 0; x < w; ++x) - { - for (rcSpan* s = solid.spans[x + y*w]; s; s = s->next) - { - // Skip non walkable spans. - if (s->area == RC_NULL_AREA) - continue; - - const int bot = (int)(s->smax); - const int top = s->next ? (int)(s->next->smin) : MAX_HEIGHT; - - // Find neighbours minimum height. - int minh = MAX_HEIGHT; - - // Min and max height of accessible neighbours. - int asmin = s->smax; - int asmax = s->smax; - - for (int dir = 0; dir < 4; ++dir) - { - int dx = x + rcGetDirOffsetX(dir); - int dy = y + rcGetDirOffsetY(dir); - // Skip neighbours which are out of bounds. - if (dx < 0 || dy < 0 || dx >= w || dy >= h) - { - minh = rcMin(minh, -walkableClimb - bot); - continue; - } - - // From minus infinity to the first span. - rcSpan* ns = solid.spans[dx + dy*w]; - int nbot = -walkableClimb; - int ntop = ns ? (int)ns->smin : MAX_HEIGHT; - // Skip neightbour if the gap between the spans is too small. - if (rcMin(top,ntop) - rcMax(bot,nbot) > walkableHeight) - minh = rcMin(minh, nbot - bot); - - // Rest of the spans. - for (ns = solid.spans[dx + dy*w]; ns; ns = ns->next) - { - nbot = (int)ns->smax; - ntop = ns->next ? (int)ns->next->smin : MAX_HEIGHT; - // Skip neightbour if the gap between the spans is too small. - if (rcMin(top,ntop) - rcMax(bot,nbot) > walkableHeight) - { - minh = rcMin(minh, nbot - bot); - - // Find min/max accessible neighbour height. - if (rcAbs(nbot - bot) <= walkableClimb) - { - if (nbot < asmin) asmin = nbot; - if (nbot > asmax) asmax = nbot; - } - - } - } - } - - // The current span is close to a ledge if the drop to any - // neighbour span is less than the walkableClimb. - if (minh < -walkableClimb) - s->area = RC_NULL_AREA; - - // If the difference between all neighbours is too large, - // we are at steep slope, mark the span as ledge. - if ((asmax - asmin) > walkableClimb) - { - s->area = RC_NULL_AREA; - } - } - } - } - - ctx->stopTimer(RC_TIMER_FILTER_BORDER); -} - -void rcFilterWalkableLowHeightSpans(rcContext* ctx, int walkableHeight, rcHeightfield& solid) -{ - rcAssert(ctx); - - ctx->startTimer(RC_TIMER_FILTER_WALKABLE); - - const int w = solid.width; - const int h = solid.height; - const int MAX_HEIGHT = 0xffff; - - // Remove walkable flag from spans which do not have enough - // space above them for the agent to stand there. - for (int y = 0; y < h; ++y) - { - for (int x = 0; x < w; ++x) - { - for (rcSpan* s = solid.spans[x + y*w]; s; s = s->next) - { - const int bot = (int)(s->smax); - const int top = s->next ? (int)(s->next->smin) : MAX_HEIGHT; - if ((top - bot) <= walkableHeight) - s->area = RC_NULL_AREA; - } - } - } - - ctx->stopTimer(RC_TIMER_FILTER_WALKABLE); -} diff --git a/deps/recastnavigation/Recast/RecastMesh.cpp b/deps/recastnavigation/Recast/RecastMesh.cpp deleted file mode 100644 index e7e2397dd6..0000000000 --- a/deps/recastnavigation/Recast/RecastMesh.cpp +++ /dev/null @@ -1,1324 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#define _USE_MATH_DEFINES -#include <math.h> -#include <string.h> -#include <stdio.h> -#include "Recast.h" -#include "RecastAlloc.h" -#include "RecastAssert.h" - -struct rcEdge -{ - unsigned short vert[2]; - unsigned short polyEdge[2]; - unsigned short poly[2]; -}; - -static bool buildMeshAdjacency(unsigned short* polys, const int npolys, - const int nverts, const int vertsPerPoly) -{ - // Based on code by Eric Lengyel from: - // http://www.terathon.com/code/edges.php - - int maxEdgeCount = npolys*vertsPerPoly; - unsigned short* firstEdge = (unsigned short*)rcAlloc(sizeof(unsigned short)*(nverts + maxEdgeCount), RC_ALLOC_TEMP); - if (!firstEdge) - return false; - unsigned short* nextEdge = firstEdge + nverts; - int edgeCount = 0; - - rcEdge* edges = (rcEdge*)rcAlloc(sizeof(rcEdge)*maxEdgeCount, RC_ALLOC_TEMP); - if (!edges) - { - rcFree(firstEdge); - return false; - } - - for (int i = 0; i < nverts; i++) - firstEdge[i] = RC_MESH_NULL_IDX; - - for (int i = 0; i < npolys; ++i) - { - unsigned short* t = &polys[i*vertsPerPoly*2]; - for (int j = 0; j < vertsPerPoly; ++j) - { - if (t[j] == RC_MESH_NULL_IDX) break; - unsigned short v0 = t[j]; - unsigned short v1 = (j+1 >= vertsPerPoly || t[j+1] == RC_MESH_NULL_IDX) ? t[0] : t[j+1]; - if (v0 < v1) - { - rcEdge& edge = edges[edgeCount]; - edge.vert[0] = v0; - edge.vert[1] = v1; - edge.poly[0] = (unsigned short)i; - edge.polyEdge[0] = (unsigned short)j; - edge.poly[1] = (unsigned short)i; - edge.polyEdge[1] = 0; - // Insert edge - nextEdge[edgeCount] = firstEdge[v0]; - firstEdge[v0] = (unsigned short)edgeCount; - edgeCount++; - } - } - } - - for (int i = 0; i < npolys; ++i) - { - unsigned short* t = &polys[i*vertsPerPoly*2]; - for (int j = 0; j < vertsPerPoly; ++j) - { - if (t[j] == RC_MESH_NULL_IDX) break; - unsigned short v0 = t[j]; - unsigned short v1 = (j+1 >= vertsPerPoly || t[j+1] == RC_MESH_NULL_IDX) ? t[0] : t[j+1]; - if (v0 > v1) - { - for (unsigned short e = firstEdge[v1]; e != RC_MESH_NULL_IDX; e = nextEdge[e]) - { - rcEdge& edge = edges[e]; - if (edge.vert[1] == v0 && edge.poly[0] == edge.poly[1]) - { - edge.poly[1] = (unsigned short)i; - edge.polyEdge[1] = (unsigned short)j; - break; - } - } - } - } - } - - // Store adjacency - for (int i = 0; i < edgeCount; ++i) - { - const rcEdge& e = edges[i]; - if (e.poly[0] != e.poly[1]) - { - unsigned short* p0 = &polys[e.poly[0]*vertsPerPoly*2]; - unsigned short* p1 = &polys[e.poly[1]*vertsPerPoly*2]; - p0[vertsPerPoly + e.polyEdge[0]] = e.poly[1]; - p1[vertsPerPoly + e.polyEdge[1]] = e.poly[0]; - } - } - - rcFree(firstEdge); - rcFree(edges); - - return true; -} - - -static const int VERTEX_BUCKET_COUNT = (1<<12); - -inline int computeVertexHash(int x, int y, int z) -{ - const unsigned int h1 = 0x8da6b343; // Large multiplicative constants; - const unsigned int h2 = 0xd8163841; // here arbitrarily chosen primes - const unsigned int h3 = 0xcb1ab31f; - unsigned int n = h1 * x + h2 * y + h3 * z; - return (int)(n & (VERTEX_BUCKET_COUNT-1)); -} - -static unsigned short addVertex(unsigned short x, unsigned short y, unsigned short z, - unsigned short* verts, int* firstVert, int* nextVert, int& nv) -{ - int bucket = computeVertexHash(x, 0, z); - int i = firstVert[bucket]; - - while (i != -1) - { - const unsigned short* v = &verts[i*3]; - if (v[0] == x && (rcAbs(v[1] - y) <= 2) && v[2] == z) - return (unsigned short)i; - i = nextVert[i]; // next - } - - // Could not find, create new. - i = nv; nv++; - unsigned short* v = &verts[i*3]; - v[0] = x; - v[1] = y; - v[2] = z; - nextVert[i] = firstVert[bucket]; - firstVert[bucket] = i; - - return (unsigned short)i; -} - -inline int prev(int i, int n) { return i-1 >= 0 ? i-1 : n-1; } -inline int next(int i, int n) { return i+1 < n ? i+1 : 0; } - -inline int area2(const int* a, const int* b, const int* c) -{ - return (b[0] - a[0]) * (c[2] - a[2]) - (c[0] - a[0]) * (b[2] - a[2]); -} - -// Exclusive or: true iff exactly one argument is true. -// The arguments are negated to ensure that they are 0/1 -// values. Then the bitwise Xor operator may apply. -// (This idea is due to Michael Baldwin.) -inline bool xorb(bool x, bool y) -{ - return !x ^ !y; -} - -// Returns true iff c is strictly to the left of the directed -// line through a to b. -inline bool left(const int* a, const int* b, const int* c) -{ - return area2(a, b, c) < 0; -} - -inline bool leftOn(const int* a, const int* b, const int* c) -{ - return area2(a, b, c) <= 0; -} - -inline bool collinear(const int* a, const int* b, const int* c) -{ - return area2(a, b, c) == 0; -} - -// Returns true iff ab properly intersects cd: they share -// a point interior to both segments. The properness of the -// intersection is ensured by using strict leftness. -bool intersectProp(const int* a, const int* b, const int* c, const int* d) -{ - // Eliminate improper cases. - if (collinear(a,b,c) || collinear(a,b,d) || - collinear(c,d,a) || collinear(c,d,b)) - return false; - - return xorb(left(a,b,c), left(a,b,d)) && xorb(left(c,d,a), left(c,d,b)); -} - -// Returns T iff (a,b,c) are collinear and point c lies -// on the closed segement ab. -static bool between(const int* a, const int* b, const int* c) -{ - if (!collinear(a, b, c)) - return false; - // If ab not vertical, check betweenness on x; else on y. - if (a[0] != b[0]) - return ((a[0] <= c[0]) && (c[0] <= b[0])) || ((a[0] >= c[0]) && (c[0] >= b[0])); - else - return ((a[2] <= c[2]) && (c[2] <= b[2])) || ((a[2] >= c[2]) && (c[2] >= b[2])); -} - -// Returns true iff segments ab and cd intersect, properly or improperly. -static bool intersect(const int* a, const int* b, const int* c, const int* d) -{ - if (intersectProp(a, b, c, d)) - return true; - else if (between(a, b, c) || between(a, b, d) || - between(c, d, a) || between(c, d, b)) - return true; - else - return false; -} - -static bool vequal(const int* a, const int* b) -{ - return a[0] == b[0] && a[2] == b[2]; -} - -// Returns T iff (v_i, v_j) is a proper internal *or* external -// diagonal of P, *ignoring edges incident to v_i and v_j*. -static bool diagonalie(int i, int j, int n, const int* verts, int* indices) -{ - const int* d0 = &verts[(indices[i] & 0x0fffffff) * 4]; - const int* d1 = &verts[(indices[j] & 0x0fffffff) * 4]; - - // For each edge (k,k+1) of P - for (int k = 0; k < n; k++) - { - int k1 = next(k, n); - // Skip edges incident to i or j - if (!((k == i) || (k1 == i) || (k == j) || (k1 == j))) - { - const int* p0 = &verts[(indices[k] & 0x0fffffff) * 4]; - const int* p1 = &verts[(indices[k1] & 0x0fffffff) * 4]; - - if (vequal(d0, p0) || vequal(d1, p0) || vequal(d0, p1) || vequal(d1, p1)) - continue; - - if (intersect(d0, d1, p0, p1)) - return false; - } - } - return true; -} - -// Returns true iff the diagonal (i,j) is strictly internal to the -// polygon P in the neighborhood of the i endpoint. -static bool inCone(int i, int j, int n, const int* verts, int* indices) -{ - const int* pi = &verts[(indices[i] & 0x0fffffff) * 4]; - const int* pj = &verts[(indices[j] & 0x0fffffff) * 4]; - const int* pi1 = &verts[(indices[next(i, n)] & 0x0fffffff) * 4]; - const int* pin1 = &verts[(indices[prev(i, n)] & 0x0fffffff) * 4]; - - // If P[i] is a convex vertex [ i+1 left or on (i-1,i) ]. - if (leftOn(pin1, pi, pi1)) - return left(pi, pj, pin1) && left(pj, pi, pi1); - // Assume (i-1,i,i+1) not collinear. - // else P[i] is reflex. - return !(leftOn(pi, pj, pi1) && leftOn(pj, pi, pin1)); -} - -// Returns T iff (v_i, v_j) is a proper internal -// diagonal of P. -static bool diagonal(int i, int j, int n, const int* verts, int* indices) -{ - return inCone(i, j, n, verts, indices) && diagonalie(i, j, n, verts, indices); -} - -static int triangulate(int n, const int* verts, int* indices, int* tris) -{ - int ntris = 0; - int* dst = tris; - - // The last bit of the index is used to indicate if the vertex can be removed. - for (int i = 0; i < n; i++) - { - int i1 = next(i, n); - int i2 = next(i1, n); - if (diagonal(i, i2, n, verts, indices)) - indices[i1] |= 0x80000000; - } - - while (n > 3) - { - int minLen = -1; - int mini = -1; - for (int i = 0; i < n; i++) - { - int i1 = next(i, n); - if (indices[i1] & 0x80000000) - { - const int* p0 = &verts[(indices[i] & 0x0fffffff) * 4]; - const int* p2 = &verts[(indices[next(i1, n)] & 0x0fffffff) * 4]; - - int dx = p2[0] - p0[0]; - int dy = p2[2] - p0[2]; - int len = dx*dx + dy*dy; - - if (minLen < 0 || len < minLen) - { - minLen = len; - mini = i; - } - } - } - - if (mini == -1) - { - // Should not happen. -/* printf("mini == -1 ntris=%d n=%d\n", ntris, n); - for (int i = 0; i < n; i++) - { - printf("%d ", indices[i] & 0x0fffffff); - } - printf("\n");*/ - return -ntris; - } - - int i = mini; - int i1 = next(i, n); - int i2 = next(i1, n); - - *dst++ = indices[i] & 0x0fffffff; - *dst++ = indices[i1] & 0x0fffffff; - *dst++ = indices[i2] & 0x0fffffff; - ntris++; - - // Removes P[i1] by copying P[i+1]...P[n-1] left one index. - n--; - for (int k = i1; k < n; k++) - indices[k] = indices[k+1]; - - if (i1 >= n) i1 = 0; - i = prev(i1,n); - // Update diagonal flags. - if (diagonal(prev(i, n), i1, n, verts, indices)) - indices[i] |= 0x80000000; - else - indices[i] &= 0x0fffffff; - - if (diagonal(i, next(i1, n), n, verts, indices)) - indices[i1] |= 0x80000000; - else - indices[i1] &= 0x0fffffff; - } - - // Append the remaining triangle. - *dst++ = indices[0] & 0x0fffffff; - *dst++ = indices[1] & 0x0fffffff; - *dst++ = indices[2] & 0x0fffffff; - ntris++; - - return ntris; -} - -static int countPolyVerts(const unsigned short* p, const int nvp) -{ - for (int i = 0; i < nvp; ++i) - if (p[i] == RC_MESH_NULL_IDX) - return i; - return nvp; -} - -inline bool uleft(const unsigned short* a, const unsigned short* b, const unsigned short* c) -{ - return ((int)b[0] - (int)a[0]) * ((int)c[2] - (int)a[2]) - - ((int)c[0] - (int)a[0]) * ((int)b[2] - (int)a[2]) < 0; -} - -static int getPolyMergeValue(unsigned short* pa, unsigned short* pb, - const unsigned short* verts, int& ea, int& eb, - const int nvp) -{ - const int na = countPolyVerts(pa, nvp); - const int nb = countPolyVerts(pb, nvp); - - // If the merged polygon would be too big, do not merge. - if (na+nb-2 > nvp) - return -1; - - // Check if the polygons share an edge. - ea = -1; - eb = -1; - - for (int i = 0; i < na; ++i) - { - unsigned short va0 = pa[i]; - unsigned short va1 = pa[(i+1) % na]; - if (va0 > va1) - rcSwap(va0, va1); - for (int j = 0; j < nb; ++j) - { - unsigned short vb0 = pb[j]; - unsigned short vb1 = pb[(j+1) % nb]; - if (vb0 > vb1) - rcSwap(vb0, vb1); - if (va0 == vb0 && va1 == vb1) - { - ea = i; - eb = j; - break; - } - } - } - - // No common edge, cannot merge. - if (ea == -1 || eb == -1) - return -1; - - // Check to see if the merged polygon would be convex. - unsigned short va, vb, vc; - - va = pa[(ea+na-1) % na]; - vb = pa[ea]; - vc = pb[(eb+2) % nb]; - if (!uleft(&verts[va*3], &verts[vb*3], &verts[vc*3])) - return -1; - - va = pb[(eb+nb-1) % nb]; - vb = pb[eb]; - vc = pa[(ea+2) % na]; - if (!uleft(&verts[va*3], &verts[vb*3], &verts[vc*3])) - return -1; - - va = pa[ea]; - vb = pa[(ea+1)%na]; - - int dx = (int)verts[va*3+0] - (int)verts[vb*3+0]; - int dy = (int)verts[va*3+2] - (int)verts[vb*3+2]; - - return dx*dx + dy*dy; -} - -static void mergePolys(unsigned short* pa, unsigned short* pb, int ea, int eb, - unsigned short* tmp, const int nvp) -{ - const int na = countPolyVerts(pa, nvp); - const int nb = countPolyVerts(pb, nvp); - - // Merge polygons. - memset(tmp, 0xff, sizeof(unsigned short)*nvp); - int n = 0; - // Add pa - for (int i = 0; i < na-1; ++i) - tmp[n++] = pa[(ea+1+i) % na]; - // Add pb - for (int i = 0; i < nb-1; ++i) - tmp[n++] = pb[(eb+1+i) % nb]; - - memcpy(pa, tmp, sizeof(unsigned short)*nvp); -} - -static void pushFront(int v, int* arr, int& an) -{ - an++; - for (int i = an-1; i > 0; --i) arr[i] = arr[i-1]; - arr[0] = v; -} - -static void pushBack(int v, int* arr, int& an) -{ - arr[an] = v; - an++; -} - -static bool canRemoveVertex(rcContext* ctx, rcPolyMesh& mesh, const unsigned short rem) -{ - const int nvp = mesh.nvp; - - // Count number of polygons to remove. - int numRemovedVerts = 0; - int numTouchedVerts = 0; - int numRemainingEdges = 0; - for (int i = 0; i < mesh.npolys; ++i) - { - unsigned short* p = &mesh.polys[i*nvp*2]; - const int nv = countPolyVerts(p, nvp); - int numRemoved = 0; - int numVerts = 0; - for (int j = 0; j < nv; ++j) - { - if (p[j] == rem) - { - numTouchedVerts++; - numRemoved++; - } - numVerts++; - } - if (numRemoved) - { - numRemovedVerts += numRemoved; - numRemainingEdges += numVerts-(numRemoved+1); - } - } - - // There would be too few edges remaining to create a polygon. - // This can happen for example when a tip of a triangle is marked - // as deletion, but there are no other polys that share the vertex. - // In this case, the vertex should not be removed. - if (numRemainingEdges <= 2) - return false; - - // Find edges which share the removed vertex. - const int maxEdges = numTouchedVerts*2; - int nedges = 0; - rcScopedDelete<int> edges = (int*)rcAlloc(sizeof(int)*maxEdges*3, RC_ALLOC_TEMP); - if (!edges) - { - ctx->log(RC_LOG_WARNING, "canRemoveVertex: Out of memory 'edges' (%d).", maxEdges*3); - return false; - } - - for (int i = 0; i < mesh.npolys; ++i) - { - unsigned short* p = &mesh.polys[i*nvp*2]; - const int nv = countPolyVerts(p, nvp); - - // Collect edges which touches the removed vertex. - for (int j = 0, k = nv-1; j < nv; k = j++) - { - if (p[j] == rem || p[k] == rem) - { - // Arrange edge so that a=rem. - int a = p[j], b = p[k]; - if (b == rem) - rcSwap(a,b); - - // Check if the edge exists - bool exists = false; - for (int k = 0; k < nedges; ++k) - { - int* e = &edges[k*3]; - if (e[1] == b) - { - // Exists, increment vertex share count. - e[2]++; - exists = true; - } - } - // Add new edge. - if (!exists) - { - int* e = &edges[nedges*3]; - e[0] = a; - e[1] = b; - e[2] = 1; - nedges++; - } - } - } - } - - // There should be no more than 2 open edges. - // This catches the case that two non-adjacent polygons - // share the removed vertex. In that case, do not remove the vertex. - int numOpenEdges = 0; - for (int i = 0; i < nedges; ++i) - { - if (edges[i*3+2] < 2) - numOpenEdges++; - } - if (numOpenEdges > 2) - return false; - - return true; -} - -static bool removeVertex(rcContext* ctx, rcPolyMesh& mesh, const unsigned short rem, const int maxTris) -{ - const int nvp = mesh.nvp; - - // Count number of polygons to remove. - int numRemovedVerts = 0; - for (int i = 0; i < mesh.npolys; ++i) - { - unsigned short* p = &mesh.polys[i*nvp*2]; - const int nv = countPolyVerts(p, nvp); - for (int j = 0; j < nv; ++j) - { - if (p[j] == rem) - numRemovedVerts++; - } - } - - int nedges = 0; - rcScopedDelete<int> edges = (int*)rcAlloc(sizeof(int)*numRemovedVerts*nvp*4, RC_ALLOC_TEMP); - if (!edges) - { - ctx->log(RC_LOG_WARNING, "removeVertex: Out of memory 'edges' (%d).", numRemovedVerts*nvp*4); - return false; - } - - int nhole = 0; - rcScopedDelete<int> hole = (int*)rcAlloc(sizeof(int)*numRemovedVerts*nvp, RC_ALLOC_TEMP); - if (!hole) - { - ctx->log(RC_LOG_WARNING, "removeVertex: Out of memory 'hole' (%d).", numRemovedVerts*nvp); - return false; - } - - int nhreg = 0; - rcScopedDelete<int> hreg = (int*)rcAlloc(sizeof(int)*numRemovedVerts*nvp, RC_ALLOC_TEMP); - if (!hreg) - { - ctx->log(RC_LOG_WARNING, "removeVertex: Out of memory 'hreg' (%d).", numRemovedVerts*nvp); - return false; - } - - int nharea = 0; - rcScopedDelete<int> harea = (int*)rcAlloc(sizeof(int)*numRemovedVerts*nvp, RC_ALLOC_TEMP); - if (!harea) - { - ctx->log(RC_LOG_WARNING, "removeVertex: Out of memory 'harea' (%d).", numRemovedVerts*nvp); - return false; - } - - for (int i = 0; i < mesh.npolys; ++i) - { - unsigned short* p = &mesh.polys[i*nvp*2]; - const int nv = countPolyVerts(p, nvp); - bool hasRem = false; - for (int j = 0; j < nv; ++j) - if (p[j] == rem) hasRem = true; - if (hasRem) - { - // Collect edges which does not touch the removed vertex. - for (int j = 0, k = nv-1; j < nv; k = j++) - { - if (p[j] != rem && p[k] != rem) - { - int* e = &edges[nedges*4]; - e[0] = p[k]; - e[1] = p[j]; - e[2] = mesh.regs[i]; - e[3] = mesh.areas[i]; - nedges++; - } - } - // Remove the polygon. - unsigned short* p2 = &mesh.polys[(mesh.npolys-1)*nvp*2]; - memcpy(p,p2,sizeof(unsigned short)*nvp); - memset(p+nvp,0xff,sizeof(unsigned short)*nvp); - mesh.regs[i] = mesh.regs[mesh.npolys-1]; - mesh.areas[i] = mesh.areas[mesh.npolys-1]; - mesh.npolys--; - --i; - } - } - - // Remove vertex. - for (int i = (int)rem; i < mesh.nverts; ++i) - { - mesh.verts[i*3+0] = mesh.verts[(i+1)*3+0]; - mesh.verts[i*3+1] = mesh.verts[(i+1)*3+1]; - mesh.verts[i*3+2] = mesh.verts[(i+1)*3+2]; - } - mesh.nverts--; - - // Adjust indices to match the removed vertex layout. - for (int i = 0; i < mesh.npolys; ++i) - { - unsigned short* p = &mesh.polys[i*nvp*2]; - const int nv = countPolyVerts(p, nvp); - for (int j = 0; j < nv; ++j) - if (p[j] > rem) p[j]--; - } - for (int i = 0; i < nedges; ++i) - { - if (edges[i*4+0] > rem) edges[i*4+0]--; - if (edges[i*4+1] > rem) edges[i*4+1]--; - } - - if (nedges == 0) - return true; - - // Start with one vertex, keep appending connected - // segments to the start and end of the hole. - pushBack(edges[0], hole, nhole); - pushBack(edges[2], hreg, nhreg); - pushBack(edges[3], harea, nharea); - - while (nedges) - { - bool match = false; - - for (int i = 0; i < nedges; ++i) - { - const int ea = edges[i*4+0]; - const int eb = edges[i*4+1]; - const int r = edges[i*4+2]; - const int a = edges[i*4+3]; - bool add = false; - if (hole[0] == eb) - { - // The segment matches the beginning of the hole boundary. - pushFront(ea, hole, nhole); - pushFront(r, hreg, nhreg); - pushFront(a, harea, nharea); - add = true; - } - else if (hole[nhole-1] == ea) - { - // The segment matches the end of the hole boundary. - pushBack(eb, hole, nhole); - pushBack(r, hreg, nhreg); - pushBack(a, harea, nharea); - add = true; - } - if (add) - { - // The edge segment was added, remove it. - edges[i*4+0] = edges[(nedges-1)*4+0]; - edges[i*4+1] = edges[(nedges-1)*4+1]; - edges[i*4+2] = edges[(nedges-1)*4+2]; - edges[i*4+3] = edges[(nedges-1)*4+3]; - --nedges; - match = true; - --i; - } - } - - if (!match) - break; - } - - rcScopedDelete<int> tris = (int*)rcAlloc(sizeof(int)*nhole*3, RC_ALLOC_TEMP); - if (!tris) - { - ctx->log(RC_LOG_WARNING, "removeVertex: Out of memory 'tris' (%d).", nhole*3); - return false; - } - - rcScopedDelete<int> tverts = (int*)rcAlloc(sizeof(int)*nhole*4, RC_ALLOC_TEMP); - if (!tverts) - { - ctx->log(RC_LOG_WARNING, "removeVertex: Out of memory 'tverts' (%d).", nhole*4); - return false; - } - - rcScopedDelete<int> thole = (int*)rcAlloc(sizeof(int)*nhole, RC_ALLOC_TEMP); - if (!tverts) - { - ctx->log(RC_LOG_WARNING, "removeVertex: Out of memory 'thole' (%d).", nhole); - return false; - } - - // Generate temp vertex array for triangulation. - for (int i = 0; i < nhole; ++i) - { - const int pi = hole[i]; - tverts[i*4+0] = mesh.verts[pi*3+0]; - tverts[i*4+1] = mesh.verts[pi*3+1]; - tverts[i*4+2] = mesh.verts[pi*3+2]; - tverts[i*4+3] = 0; - thole[i] = i; - } - - // Triangulate the hole. - int ntris = triangulate(nhole, &tverts[0], &thole[0], tris); - if (ntris < 0) - { - ntris = -ntris; - ctx->log(RC_LOG_WARNING, "removeVertex: triangulate() returned bad results."); - } - - // Merge the hole triangles back to polygons. - rcScopedDelete<unsigned short> polys = (unsigned short*)rcAlloc(sizeof(unsigned short)*(ntris+1)*nvp, RC_ALLOC_TEMP); - if (!polys) - { - ctx->log(RC_LOG_ERROR, "removeVertex: Out of memory 'polys' (%d).", (ntris+1)*nvp); - return false; - } - rcScopedDelete<unsigned short> pregs = (unsigned short*)rcAlloc(sizeof(unsigned short)*ntris, RC_ALLOC_TEMP); - if (!pregs) - { - ctx->log(RC_LOG_ERROR, "removeVertex: Out of memory 'pregs' (%d).", ntris); - return false; - } - rcScopedDelete<unsigned char> pareas = (unsigned char*)rcAlloc(sizeof(unsigned char)*ntris, RC_ALLOC_TEMP); - if (!pregs) - { - ctx->log(RC_LOG_ERROR, "removeVertex: Out of memory 'pareas' (%d).", ntris); - return false; - } - - unsigned short* tmpPoly = &polys[ntris*nvp]; - - // Build initial polygons. - int npolys = 0; - memset(polys, 0xff, ntris*nvp*sizeof(unsigned short)); - for (int j = 0; j < ntris; ++j) - { - int* t = &tris[j*3]; - if (t[0] != t[1] && t[0] != t[2] && t[1] != t[2]) - { - polys[npolys*nvp+0] = (unsigned short)hole[t[0]]; - polys[npolys*nvp+1] = (unsigned short)hole[t[1]]; - polys[npolys*nvp+2] = (unsigned short)hole[t[2]]; - pregs[npolys] = (unsigned short)hreg[t[0]]; - pareas[npolys] = (unsigned char)harea[t[0]]; - npolys++; - } - } - if (!npolys) - return true; - - // Merge polygons. - if (nvp > 3) - { - for (;;) - { - // Find best polygons to merge. - int bestMergeVal = 0; - int bestPa = 0, bestPb = 0, bestEa = 0, bestEb = 0; - - for (int j = 0; j < npolys-1; ++j) - { - unsigned short* pj = &polys[j*nvp]; - for (int k = j+1; k < npolys; ++k) - { - unsigned short* pk = &polys[k*nvp]; - int ea, eb; - int v = getPolyMergeValue(pj, pk, mesh.verts, ea, eb, nvp); - if (v > bestMergeVal) - { - bestMergeVal = v; - bestPa = j; - bestPb = k; - bestEa = ea; - bestEb = eb; - } - } - } - - if (bestMergeVal > 0) - { - // Found best, merge. - unsigned short* pa = &polys[bestPa*nvp]; - unsigned short* pb = &polys[bestPb*nvp]; - mergePolys(pa, pb, bestEa, bestEb, tmpPoly, nvp); - memcpy(pb, &polys[(npolys-1)*nvp], sizeof(unsigned short)*nvp); - pregs[bestPb] = pregs[npolys-1]; - pareas[bestPb] = pareas[npolys-1]; - npolys--; - } - else - { - // Could not merge any polygons, stop. - break; - } - } - } - - // Store polygons. - for (int i = 0; i < npolys; ++i) - { - if (mesh.npolys >= maxTris) break; - unsigned short* p = &mesh.polys[mesh.npolys*nvp*2]; - memset(p,0xff,sizeof(unsigned short)*nvp*2); - for (int j = 0; j < nvp; ++j) - p[j] = polys[i*nvp+j]; - mesh.regs[mesh.npolys] = pregs[i]; - mesh.areas[mesh.npolys] = pareas[i]; - mesh.npolys++; - if (mesh.npolys > maxTris) - { - ctx->log(RC_LOG_ERROR, "removeVertex: Too many polygons %d (max:%d).", mesh.npolys, maxTris); - return false; - } - } - - return true; -} - - -bool rcBuildPolyMesh(rcContext* ctx, rcContourSet& cset, int nvp, rcPolyMesh& mesh) -{ - rcAssert(ctx); - - ctx->startTimer(RC_TIMER_BUILD_POLYMESH); - - rcVcopy(mesh.bmin, cset.bmin); - rcVcopy(mesh.bmax, cset.bmax); - mesh.cs = cset.cs; - mesh.ch = cset.ch; - - int maxVertices = 0; - int maxTris = 0; - int maxVertsPerCont = 0; - for (int i = 0; i < cset.nconts; ++i) - { - // Skip null contours. - if (cset.conts[i].nverts < 3) continue; - maxVertices += cset.conts[i].nverts; - maxTris += cset.conts[i].nverts - 2; - maxVertsPerCont = rcMax(maxVertsPerCont, cset.conts[i].nverts); - } - - if (maxVertices >= 0xfffe) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMesh: Too many vertices %d.", maxVertices); - return false; - } - - rcScopedDelete<unsigned char> vflags = (unsigned char*)rcAlloc(sizeof(unsigned char)*maxVertices, RC_ALLOC_TEMP); - if (!vflags) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMesh: Out of memory 'mesh.verts' (%d).", maxVertices); - return false; - } - memset(vflags, 0, maxVertices); - - mesh.verts = (unsigned short*)rcAlloc(sizeof(unsigned short)*maxVertices*3, RC_ALLOC_PERM); - if (!mesh.verts) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMesh: Out of memory 'mesh.verts' (%d).", maxVertices); - return false; - } - mesh.polys = (unsigned short*)rcAlloc(sizeof(unsigned short)*maxTris*nvp*2*2, RC_ALLOC_PERM); - if (!mesh.polys) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMesh: Out of memory 'mesh.polys' (%d).", maxTris*nvp*2); - return false; - } - mesh.regs = (unsigned short*)rcAlloc(sizeof(unsigned short)*maxTris, RC_ALLOC_PERM); - if (!mesh.regs) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMesh: Out of memory 'mesh.regs' (%d).", maxTris); - return false; - } - mesh.areas = (unsigned char*)rcAlloc(sizeof(unsigned char)*maxTris, RC_ALLOC_PERM); - if (!mesh.areas) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMesh: Out of memory 'mesh.areas' (%d).", maxTris); - return false; - } - - mesh.nverts = 0; - mesh.npolys = 0; - mesh.nvp = nvp; - mesh.maxpolys = maxTris; - - memset(mesh.verts, 0, sizeof(unsigned short)*maxVertices*3); - memset(mesh.polys, 0xff, sizeof(unsigned short)*maxTris*nvp*2); - memset(mesh.regs, 0, sizeof(unsigned short)*maxTris); - memset(mesh.areas, 0, sizeof(unsigned char)*maxTris); - - rcScopedDelete<int> nextVert = (int*)rcAlloc(sizeof(int)*maxVertices, RC_ALLOC_TEMP); - if (!nextVert) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMesh: Out of memory 'nextVert' (%d).", maxVertices); - return false; - } - memset(nextVert, 0, sizeof(int)*maxVertices); - - rcScopedDelete<int> firstVert = (int*)rcAlloc(sizeof(int)*VERTEX_BUCKET_COUNT, RC_ALLOC_TEMP); - if (!firstVert) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMesh: Out of memory 'firstVert' (%d).", VERTEX_BUCKET_COUNT); - return false; - } - for (int i = 0; i < VERTEX_BUCKET_COUNT; ++i) - firstVert[i] = -1; - - rcScopedDelete<int> indices = (int*)rcAlloc(sizeof(int)*maxVertsPerCont, RC_ALLOC_TEMP); - if (!indices) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMesh: Out of memory 'indices' (%d).", maxVertsPerCont); - return false; - } - rcScopedDelete<int> tris = (int*)rcAlloc(sizeof(int)*maxVertsPerCont*3, RC_ALLOC_TEMP); - if (!tris) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMesh: Out of memory 'tris' (%d).", maxVertsPerCont*3); - return false; - } - rcScopedDelete<unsigned short> polys = (unsigned short*)rcAlloc(sizeof(unsigned short)*(maxVertsPerCont+1)*nvp, RC_ALLOC_TEMP); - if (!polys) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMesh: Out of memory 'polys' (%d).", maxVertsPerCont*nvp); - return false; - } - unsigned short* tmpPoly = &polys[maxVertsPerCont*nvp]; - - for (int i = 0; i < cset.nconts; ++i) - { - rcContour& cont = cset.conts[i]; - - // Skip null contours. - if (cont.nverts < 3) - continue; - - // Triangulate contour - for (int j = 0; j < cont.nverts; ++j) - indices[j] = j; - - int ntris = triangulate(cont.nverts, cont.verts, &indices[0], &tris[0]); - if (ntris <= 0) - { - // Bad triangulation, should not happen. -/* printf("\tconst float bmin[3] = {%ff,%ff,%ff};\n", cset.bmin[0], cset.bmin[1], cset.bmin[2]); - printf("\tconst float cs = %ff;\n", cset.cs); - printf("\tconst float ch = %ff;\n", cset.ch); - printf("\tconst int verts[] = {\n"); - for (int k = 0; k < cont.nverts; ++k) - { - const int* v = &cont.verts[k*4]; - printf("\t\t%d,%d,%d,%d,\n", v[0], v[1], v[2], v[3]); - } - printf("\t};\n\tconst int nverts = sizeof(verts)/(sizeof(int)*4);\n");*/ - ctx->log(RC_LOG_WARNING, "rcBuildPolyMesh: Bad triangulation Contour %d.", i); - ntris = -ntris; - } - - // Add and merge vertices. - for (int j = 0; j < cont.nverts; ++j) - { - const int* v = &cont.verts[j*4]; - indices[j] = addVertex((unsigned short)v[0], (unsigned short)v[1], (unsigned short)v[2], - mesh.verts, firstVert, nextVert, mesh.nverts); - if (v[3] & RC_BORDER_VERTEX) - { - // This vertex should be removed. - vflags[indices[j]] = 1; - } - } - - // Build initial polygons. - int npolys = 0; - memset(polys, 0xff, maxVertsPerCont*nvp*sizeof(unsigned short)); - for (int j = 0; j < ntris; ++j) - { - int* t = &tris[j*3]; - if (t[0] != t[1] && t[0] != t[2] && t[1] != t[2]) - { - polys[npolys*nvp+0] = (unsigned short)indices[t[0]]; - polys[npolys*nvp+1] = (unsigned short)indices[t[1]]; - polys[npolys*nvp+2] = (unsigned short)indices[t[2]]; - npolys++; - } - } - if (!npolys) - continue; - - // Merge polygons. - if (nvp > 3) - { - for(;;) - { - // Find best polygons to merge. - int bestMergeVal = 0; - int bestPa = 0, bestPb = 0, bestEa = 0, bestEb = 0; - - for (int j = 0; j < npolys-1; ++j) - { - unsigned short* pj = &polys[j*nvp]; - for (int k = j+1; k < npolys; ++k) - { - unsigned short* pk = &polys[k*nvp]; - int ea, eb; - int v = getPolyMergeValue(pj, pk, mesh.verts, ea, eb, nvp); - if (v > bestMergeVal) - { - bestMergeVal = v; - bestPa = j; - bestPb = k; - bestEa = ea; - bestEb = eb; - } - } - } - - if (bestMergeVal > 0) - { - // Found best, merge. - unsigned short* pa = &polys[bestPa*nvp]; - unsigned short* pb = &polys[bestPb*nvp]; - mergePolys(pa, pb, bestEa, bestEb, tmpPoly, nvp); - memcpy(pb, &polys[(npolys-1)*nvp], sizeof(unsigned short)*nvp); - npolys--; - } - else - { - // Could not merge any polygons, stop. - break; - } - } - } - - // Store polygons. - for (int j = 0; j < npolys; ++j) - { - unsigned short* p = &mesh.polys[mesh.npolys*nvp*2]; - unsigned short* q = &polys[j*nvp]; - for (int k = 0; k < nvp; ++k) - p[k] = q[k]; - mesh.regs[mesh.npolys] = cont.reg; - mesh.areas[mesh.npolys] = cont.area; - mesh.npolys++; - if (mesh.npolys > maxTris) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMesh: Too many polygons %d (max:%d).", mesh.npolys, maxTris); - return false; - } - } - } - - - // Remove edge vertices. - for (int i = 0; i < mesh.nverts; ++i) - { - if (vflags[i]) - { - if (!canRemoveVertex(ctx, mesh, (unsigned short)i)) - continue; - if (!removeVertex(ctx, mesh, (unsigned short)i, maxTris)) - { - // Failed to remove vertex - ctx->log(RC_LOG_ERROR, "rcBuildPolyMesh: Failed to remove edge vertex %d.", i); - return false; - } - // Remove vertex - // Note: mesh.nverts is already decremented inside removeVertex()! - for (int j = i; j < mesh.nverts; ++j) - vflags[j] = vflags[j+1]; - --i; - } - } - - // Calculate adjacency. - if (!buildMeshAdjacency(mesh.polys, mesh.npolys, mesh.nverts, nvp)) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMesh: Adjacency failed."); - return false; - } - - // Just allocate the mesh flags array. The user is resposible to fill it. - mesh.flags = (unsigned short*)rcAlloc(sizeof(unsigned short)*mesh.npolys, RC_ALLOC_PERM); - if (!mesh.flags) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMesh: Out of memory 'mesh.flags' (%d).", mesh.npolys); - return false; - } - memset(mesh.flags, 0, sizeof(unsigned short) * mesh.npolys); - - if (mesh.nverts > 0xffff) - { - ctx->log(RC_LOG_ERROR, "rcMergePolyMeshes: The resulting mesh has too many vertices %d (max %d). Data can be corrupted.", mesh.nverts, 0xffff); - } - if (mesh.npolys > 0xffff) - { - ctx->log(RC_LOG_ERROR, "rcMergePolyMeshes: The resulting mesh has too many polygons %d (max %d). Data can be corrupted.", mesh.npolys, 0xffff); - } - - ctx->stopTimer(RC_TIMER_BUILD_POLYMESH); - - return true; -} - -bool rcMergePolyMeshes(rcContext* ctx, rcPolyMesh** meshes, const int nmeshes, rcPolyMesh& mesh) -{ - rcAssert(ctx); - - if (!nmeshes || !meshes) - return true; - - ctx->startTimer(RC_TIMER_MERGE_POLYMESH); - - mesh.nvp = meshes[0]->nvp; - mesh.cs = meshes[0]->cs; - mesh.ch = meshes[0]->ch; - rcVcopy(mesh.bmin, meshes[0]->bmin); - rcVcopy(mesh.bmax, meshes[0]->bmax); - - int maxVerts = 0; - int maxPolys = 0; - int maxVertsPerMesh = 0; - for (int i = 0; i < nmeshes; ++i) - { - rcVmin(mesh.bmin, meshes[i]->bmin); - rcVmax(mesh.bmax, meshes[i]->bmax); - maxVertsPerMesh = rcMax(maxVertsPerMesh, meshes[i]->nverts); - maxVerts += meshes[i]->nverts; - maxPolys += meshes[i]->npolys; - } - - mesh.nverts = 0; - mesh.verts = (unsigned short*)rcAlloc(sizeof(unsigned short)*maxVerts*3, RC_ALLOC_PERM); - if (!mesh.verts) - { - ctx->log(RC_LOG_ERROR, "rcMergePolyMeshes: Out of memory 'mesh.verts' (%d).", maxVerts*3); - return false; - } - - mesh.npolys = 0; - mesh.polys = (unsigned short*)rcAlloc(sizeof(unsigned short)*maxPolys*2*mesh.nvp, RC_ALLOC_PERM); - if (!mesh.polys) - { - ctx->log(RC_LOG_ERROR, "rcMergePolyMeshes: Out of memory 'mesh.polys' (%d).", maxPolys*2*mesh.nvp); - return false; - } - memset(mesh.polys, 0xff, sizeof(unsigned short)*maxPolys*2*mesh.nvp); - - mesh.regs = (unsigned short*)rcAlloc(sizeof(unsigned short)*maxPolys, RC_ALLOC_PERM); - if (!mesh.regs) - { - ctx->log(RC_LOG_ERROR, "rcMergePolyMeshes: Out of memory 'mesh.regs' (%d).", maxPolys); - return false; - } - memset(mesh.regs, 0, sizeof(unsigned short)*maxPolys); - - mesh.areas = (unsigned char*)rcAlloc(sizeof(unsigned char)*maxPolys, RC_ALLOC_PERM); - if (!mesh.areas) - { - ctx->log(RC_LOG_ERROR, "rcMergePolyMeshes: Out of memory 'mesh.areas' (%d).", maxPolys); - return false; - } - memset(mesh.areas, 0, sizeof(unsigned char)*maxPolys); - - mesh.flags = (unsigned short*)rcAlloc(sizeof(unsigned short)*maxPolys, RC_ALLOC_PERM); - if (!mesh.flags) - { - ctx->log(RC_LOG_ERROR, "rcMergePolyMeshes: Out of memory 'mesh.flags' (%d).", maxPolys); - return false; - } - memset(mesh.flags, 0, sizeof(unsigned short)*maxPolys); - - rcScopedDelete<int> nextVert = (int*)rcAlloc(sizeof(int)*maxVerts, RC_ALLOC_TEMP); - if (!nextVert) - { - ctx->log(RC_LOG_ERROR, "rcMergePolyMeshes: Out of memory 'nextVert' (%d).", maxVerts); - return false; - } - memset(nextVert, 0, sizeof(int)*maxVerts); - - rcScopedDelete<int> firstVert = (int*)rcAlloc(sizeof(int)*VERTEX_BUCKET_COUNT, RC_ALLOC_TEMP); - if (!firstVert) - { - ctx->log(RC_LOG_ERROR, "rcMergePolyMeshes: Out of memory 'firstVert' (%d).", VERTEX_BUCKET_COUNT); - return false; - } - for (int i = 0; i < VERTEX_BUCKET_COUNT; ++i) - firstVert[i] = -1; - - rcScopedDelete<unsigned short> vremap = (unsigned short*)rcAlloc(sizeof(unsigned short)*maxVertsPerMesh, RC_ALLOC_PERM); - if (!vremap) - { - ctx->log(RC_LOG_ERROR, "rcMergePolyMeshes: Out of memory 'vremap' (%d).", maxVertsPerMesh); - return false; - } - memset(vremap, 0, sizeof(unsigned short)*maxVertsPerMesh); - - for (int i = 0; i < nmeshes; ++i) - { - const rcPolyMesh* pmesh = meshes[i]; - - const unsigned short ox = (unsigned short)floorf((pmesh->bmin[0]-mesh.bmin[0])/mesh.cs+0.5f); - const unsigned short oz = (unsigned short)floorf((pmesh->bmin[2]-mesh.bmin[2])/mesh.cs+0.5f); - - for (int j = 0; j < pmesh->nverts; ++j) - { - unsigned short* v = &pmesh->verts[j*3]; - vremap[j] = addVertex(v[0]+ox, v[1], v[2]+oz, - mesh.verts, firstVert, nextVert, mesh.nverts); - } - - for (int j = 0; j < pmesh->npolys; ++j) - { - unsigned short* tgt = &mesh.polys[mesh.npolys*2*mesh.nvp]; - unsigned short* src = &pmesh->polys[j*2*mesh.nvp]; - mesh.regs[mesh.npolys] = pmesh->regs[j]; - mesh.areas[mesh.npolys] = pmesh->areas[j]; - mesh.flags[mesh.npolys] = pmesh->flags[j]; - mesh.npolys++; - for (int k = 0; k < mesh.nvp; ++k) - { - if (src[k] == RC_MESH_NULL_IDX) break; - tgt[k] = vremap[src[k]]; - } - } - } - - // Calculate adjacency. - if (!buildMeshAdjacency(mesh.polys, mesh.npolys, mesh.nverts, mesh.nvp)) - { - ctx->log(RC_LOG_ERROR, "rcMergePolyMeshes: Adjacency failed."); - return false; - } - - if (mesh.nverts > 0xffff) - { - ctx->log(RC_LOG_ERROR, "rcMergePolyMeshes: The resulting mesh has too many vertices %d (max %d). Data can be corrupted.", mesh.nverts, 0xffff); - } - if (mesh.npolys > 0xffff) - { - ctx->log(RC_LOG_ERROR, "rcMergePolyMeshes: The resulting mesh has too many polygons %d (max %d). Data can be corrupted.", mesh.npolys, 0xffff); - } - - ctx->stopTimer(RC_TIMER_MERGE_POLYMESH); - - return true; -} diff --git a/deps/recastnavigation/Recast/RecastMeshDetail.cpp b/deps/recastnavigation/Recast/RecastMeshDetail.cpp deleted file mode 100644 index ffb4b58ee9..0000000000 --- a/deps/recastnavigation/Recast/RecastMeshDetail.cpp +++ /dev/null @@ -1,1237 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#include <float.h> -#define _USE_MATH_DEFINES -#include <math.h> -#include <string.h> -#include <stdlib.h> -#include <stdio.h> -#include "Recast.h" -#include "RecastAlloc.h" -#include "RecastAssert.h" - - -static const unsigned RC_UNSET_HEIGHT = 0xffff; - -struct rcHeightPatch -{ - inline rcHeightPatch() : data(0), xmin(0), ymin(0), width(0), height(0) {} - inline ~rcHeightPatch() { rcFree(data); } - unsigned short* data; - int xmin, ymin, width, height; -}; - - -inline float vdot2(const float* a, const float* b) -{ - return a[0]*b[0] + a[2]*b[2]; -} - -inline float vdistSq2(const float* p, const float* q) -{ - const float dx = q[0] - p[0]; - const float dy = q[2] - p[2]; - return dx*dx + dy*dy; -} - -inline float vdist2(const float* p, const float* q) -{ - return sqrtf(vdistSq2(p,q)); -} - -inline float vcross2(const float* p1, const float* p2, const float* p3) -{ - const float u1 = p2[0] - p1[0]; - const float v1 = p2[2] - p1[2]; - const float u2 = p3[0] - p1[0]; - const float v2 = p3[2] - p1[2]; - return u1 * v2 - v1 * u2; -} - -static bool circumCircle(const float* p1, const float* p2, const float* p3, - float* c, float& r) -{ - static const float EPS = 1e-6f; - - const float cp = vcross2(p1, p2, p3); - if (fabsf(cp) > EPS) - { - const float p1Sq = vdot2(p1,p1); - const float p2Sq = vdot2(p2,p2); - const float p3Sq = vdot2(p3,p3); - c[0] = (p1Sq*(p2[2]-p3[2]) + p2Sq*(p3[2]-p1[2]) + p3Sq*(p1[2]-p2[2])) / (2*cp); - c[2] = (p1Sq*(p3[0]-p2[0]) + p2Sq*(p1[0]-p3[0]) + p3Sq*(p2[0]-p1[0])) / (2*cp); - r = vdist2(c, p1); - return true; - } - - c[0] = p1[0]; - c[2] = p1[2]; - r = 0; - return false; -} - -static float distPtTri(const float* p, const float* a, const float* b, const float* c) -{ - float v0[3], v1[3], v2[3]; - rcVsub(v0, c,a); - rcVsub(v1, b,a); - rcVsub(v2, p,a); - - const float dot00 = vdot2(v0, v0); - const float dot01 = vdot2(v0, v1); - const float dot02 = vdot2(v0, v2); - const float dot11 = vdot2(v1, v1); - const float dot12 = vdot2(v1, v2); - - // Compute barycentric coordinates - const float invDenom = 1.0f / (dot00 * dot11 - dot01 * dot01); - const float u = (dot11 * dot02 - dot01 * dot12) * invDenom; - float v = (dot00 * dot12 - dot01 * dot02) * invDenom; - - // If point lies inside the triangle, return interpolated y-coord. - static const float EPS = 1e-4f; - if (u >= -EPS && v >= -EPS && (u+v) <= 1+EPS) - { - const float y = a[1] + v0[1]*u + v1[1]*v; - return fabsf(y-p[1]); - } - return FLT_MAX; -} - -static float distancePtSeg(const float* pt, const float* p, const float* q) -{ - float pqx = q[0] - p[0]; - float pqy = q[1] - p[1]; - float pqz = q[2] - p[2]; - float dx = pt[0] - p[0]; - float dy = pt[1] - p[1]; - float dz = pt[2] - p[2]; - float d = pqx*pqx + pqy*pqy + pqz*pqz; - float t = pqx*dx + pqy*dy + pqz*dz; - if (d > 0) - t /= d; - if (t < 0) - t = 0; - else if (t > 1) - t = 1; - - dx = p[0] + t*pqx - pt[0]; - dy = p[1] + t*pqy - pt[1]; - dz = p[2] + t*pqz - pt[2]; - - return dx*dx + dy*dy + dz*dz; -} - -static float distancePtSeg2d(const float* pt, const float* p, const float* q) -{ - float pqx = q[0] - p[0]; - float pqz = q[2] - p[2]; - float dx = pt[0] - p[0]; - float dz = pt[2] - p[2]; - float d = pqx*pqx + pqz*pqz; - float t = pqx*dx + pqz*dz; - if (d > 0) - t /= d; - if (t < 0) - t = 0; - else if (t > 1) - t = 1; - - dx = p[0] + t*pqx - pt[0]; - dz = p[2] + t*pqz - pt[2]; - - return dx*dx + dz*dz; -} - -static float distToTriMesh(const float* p, const float* verts, const int /*nverts*/, const int* tris, const int ntris) -{ - float dmin = FLT_MAX; - for (int i = 0; i < ntris; ++i) - { - const float* va = &verts[tris[i*4+0]*3]; - const float* vb = &verts[tris[i*4+1]*3]; - const float* vc = &verts[tris[i*4+2]*3]; - float d = distPtTri(p, va,vb,vc); - if (d < dmin) - dmin = d; - } - if (dmin == FLT_MAX) return -1; - return dmin; -} - -static float distToPoly(int nvert, const float* verts, const float* p) -{ - - float dmin = FLT_MAX; - int i, j, c = 0; - for (i = 0, j = nvert-1; i < nvert; j = i++) - { - const float* vi = &verts[i*3]; - const float* vj = &verts[j*3]; - if (((vi[2] > p[2]) != (vj[2] > p[2])) && - (p[0] < (vj[0]-vi[0]) * (p[2]-vi[2]) / (vj[2]-vi[2]) + vi[0]) ) - c = !c; - dmin = rcMin(dmin, distancePtSeg2d(p, vj, vi)); - } - return c ? -dmin : dmin; -} - - -static unsigned short getHeight(const float fx, const float fy, const float fz, - const float /*cs*/, const float ics, const float ch, - const rcHeightPatch& hp) -{ - int ix = (int)floorf(fx*ics + 0.01f); - int iz = (int)floorf(fz*ics + 0.01f); - ix = rcClamp(ix-hp.xmin, 0, hp.width); - iz = rcClamp(iz-hp.ymin, 0, hp.height); - unsigned short h = hp.data[ix+iz*hp.width]; - if (h == RC_UNSET_HEIGHT) - { - // Special case when data might be bad. - // Find nearest neighbour pixel which has valid height. - const int off[8*2] = { -1,0, -1,-1, 0,-1, 1,-1, 1,0, 1,1, 0,1, -1,1}; - float dmin = FLT_MAX; - for (int i = 0; i < 8; ++i) - { - const int nx = ix+off[i*2+0]; - const int nz = iz+off[i*2+1]; - if (nx < 0 || nz < 0 || nx >= hp.width || nz >= hp.height) continue; - const unsigned short nh = hp.data[nx+nz*hp.width]; - if (nh == RC_UNSET_HEIGHT) continue; - - const float d = fabsf(nh*ch - fy); - if (d < dmin) - { - h = nh; - dmin = d; - } - -/* const float dx = (nx+0.5f)*cs - fx; - const float dz = (nz+0.5f)*cs - fz; - const float d = dx*dx+dz*dz; - if (d < dmin) - { - h = nh; - dmin = d; - } */ - } - } - return h; -} - - -enum EdgeValues -{ - UNDEF = -1, - HULL = -2, -}; - -static int findEdge(const int* edges, int nedges, int s, int t) -{ - for (int i = 0; i < nedges; i++) - { - const int* e = &edges[i*4]; - if ((e[0] == s && e[1] == t) || (e[0] == t && e[1] == s)) - return i; - } - return UNDEF; -} - -static int addEdge(rcContext* ctx, int* edges, int& nedges, const int maxEdges, int s, int t, int l, int r) -{ - if (nedges >= maxEdges) - { - ctx->log(RC_LOG_ERROR, "addEdge: Too many edges (%d/%d).", nedges, maxEdges); - return UNDEF; - } - - // Add edge if not already in the triangulation. - int e = findEdge(edges, nedges, s, t); - if (e == UNDEF) - { - int* e = &edges[nedges*4]; - e[0] = s; - e[1] = t; - e[2] = l; - e[3] = r; - return nedges++; - } - else - { - return UNDEF; - } -} - -static void updateLeftFace(int* e, int s, int t, int f) -{ - if (e[0] == s && e[1] == t && e[2] == UNDEF) - e[2] = f; - else if (e[1] == s && e[0] == t && e[3] == UNDEF) - e[3] = f; -} - -static int overlapSegSeg2d(const float* a, const float* b, const float* c, const float* d) -{ - const float a1 = vcross2(a, b, d); - const float a2 = vcross2(a, b, c); - if (a1*a2 < 0.0f) - { - float a3 = vcross2(c, d, a); - float a4 = a3 + a2 - a1; - if (a3 * a4 < 0.0f) - return 1; - } - return 0; -} - -static bool overlapEdges(const float* pts, const int* edges, int nedges, int s1, int t1) -{ - for (int i = 0; i < nedges; ++i) - { - const int s0 = edges[i*4+0]; - const int t0 = edges[i*4+1]; - // Same or connected edges do not overlap. - if (s0 == s1 || s0 == t1 || t0 == s1 || t0 == t1) - continue; - if (overlapSegSeg2d(&pts[s0*3],&pts[t0*3], &pts[s1*3],&pts[t1*3])) - return true; - } - return false; -} - -static void completeFacet(rcContext* ctx, const float* pts, int npts, int* edges, int& nedges, const int maxEdges, int& nfaces, int e) -{ - static const float EPS = 1e-5f; - - int* edge = &edges[e*4]; - - // Cache s and t. - int s,t; - if (edge[2] == UNDEF) - { - s = edge[0]; - t = edge[1]; - } - else if (edge[3] == UNDEF) - { - s = edge[1]; - t = edge[0]; - } - else - { - // Edge already completed. - return; - } - - // Find best point on left of edge. - int pt = npts; - float c[3] = {0,0,0}; - float r = -1; - for (int u = 0; u < npts; ++u) - { - if (u == s || u == t) continue; - if (vcross2(&pts[s*3], &pts[t*3], &pts[u*3]) > EPS) - { - if (r < 0) - { - // The circle is not updated yet, do it now. - pt = u; - circumCircle(&pts[s*3], &pts[t*3], &pts[u*3], c, r); - continue; - } - const float d = vdist2(c, &pts[u*3]); - const float tol = 0.001f; - if (d > r*(1+tol)) - { - // Outside current circumcircle, skip. - continue; - } - else if (d < r*(1-tol)) - { - // Inside safe circumcircle, update circle. - pt = u; - circumCircle(&pts[s*3], &pts[t*3], &pts[u*3], c, r); - } - else - { - // Inside epsilon circum circle, do extra tests to make sure the edge is valid. - // s-u and t-u cannot overlap with s-pt nor t-pt if they exists. - if (overlapEdges(pts, edges, nedges, s,u)) - continue; - if (overlapEdges(pts, edges, nedges, t,u)) - continue; - // Edge is valid. - pt = u; - circumCircle(&pts[s*3], &pts[t*3], &pts[u*3], c, r); - } - } - } - - // Add new triangle or update edge info if s-t is on hull. - if (pt < npts) - { - // Update face information of edge being completed. - updateLeftFace(&edges[e*4], s, t, nfaces); - - // Add new edge or update face info of old edge. - e = findEdge(edges, nedges, pt, s); - if (e == UNDEF) - addEdge(ctx, edges, nedges, maxEdges, pt, s, nfaces, UNDEF); - else - updateLeftFace(&edges[e*4], pt, s, nfaces); - - // Add new edge or update face info of old edge. - e = findEdge(edges, nedges, t, pt); - if (e == UNDEF) - addEdge(ctx, edges, nedges, maxEdges, t, pt, nfaces, UNDEF); - else - updateLeftFace(&edges[e*4], t, pt, nfaces); - - nfaces++; - } - else - { - updateLeftFace(&edges[e*4], s, t, HULL); - } -} - -static void delaunayHull(rcContext* ctx, const int npts, const float* pts, - const int nhull, const int* hull, - rcIntArray& tris, rcIntArray& edges) -{ - int nfaces = 0; - int nedges = 0; - const int maxEdges = npts*10; - edges.resize(maxEdges*4); - - for (int i = 0, j = nhull-1; i < nhull; j=i++) - addEdge(ctx, &edges[0], nedges, maxEdges, hull[j],hull[i], HULL, UNDEF); - - int currentEdge = 0; - while (currentEdge < nedges) - { - if (edges[currentEdge*4+2] == UNDEF) - completeFacet(ctx, pts, npts, &edges[0], nedges, maxEdges, nfaces, currentEdge); - if (edges[currentEdge*4+3] == UNDEF) - completeFacet(ctx, pts, npts, &edges[0], nedges, maxEdges, nfaces, currentEdge); - currentEdge++; - } - - // Create tris - tris.resize(nfaces*4); - for (int i = 0; i < nfaces*4; ++i) - tris[i] = -1; - - for (int i = 0; i < nedges; ++i) - { - const int* e = &edges[i*4]; - if (e[3] >= 0) - { - // Left face - int* t = &tris[e[3]*4]; - if (t[0] == -1) - { - t[0] = e[0]; - t[1] = e[1]; - } - else if (t[0] == e[1]) - t[2] = e[0]; - else if (t[1] == e[0]) - t[2] = e[1]; - } - if (e[2] >= 0) - { - // Right - int* t = &tris[e[2]*4]; - if (t[0] == -1) - { - t[0] = e[1]; - t[1] = e[0]; - } - else if (t[0] == e[0]) - t[2] = e[1]; - else if (t[1] == e[1]) - t[2] = e[0]; - } - } - - for (int i = 0; i < tris.size()/4; ++i) - { - int* t = &tris[i*4]; - if (t[0] == -1 || t[1] == -1 || t[2] == -1) - { - ctx->log(RC_LOG_WARNING, "delaunayHull: Removing dangling face %d [%d,%d,%d].", i, t[0],t[1],t[2]); - t[0] = tris[tris.size()-4]; - t[1] = tris[tris.size()-3]; - t[2] = tris[tris.size()-2]; - t[3] = tris[tris.size()-1]; - tris.resize(tris.size()-4); - --i; - } - } -} - - -inline float getJitterX(const int i) -{ - return (((i * 0x8da6b343) & 0xffff) / 65535.0f * 2.0f) - 1.0f; -} - -inline float getJitterY(const int i) -{ - return (((i * 0xd8163841) & 0xffff) / 65535.0f * 2.0f) - 1.0f; -} - -static bool buildPolyDetail(rcContext* ctx, const float* in, const int nin, - const float sampleDist, const float sampleMaxError, - const rcCompactHeightfield& chf, const rcHeightPatch& hp, - float* verts, int& nverts, rcIntArray& tris, - rcIntArray& edges, rcIntArray& samples) -{ - static const int MAX_VERTS = 127; - static const int MAX_TRIS = 255; // Max tris for delaunay is 2n-2-k (n=num verts, k=num hull verts). - static const int MAX_VERTS_PER_EDGE = 32; - float edge[(MAX_VERTS_PER_EDGE+1)*3]; - int hull[MAX_VERTS]; - int nhull = 0; - - nverts = 0; - - for (int i = 0; i < nin; ++i) - rcVcopy(&verts[i*3], &in[i*3]); - nverts = nin; - - const float cs = chf.cs; - const float ics = 1.0f/cs; - - // Tessellate outlines. - // This is done in separate pass in order to ensure - // seamless height values across the ply boundaries. - if (sampleDist > 0) - { - for (int i = 0, j = nin-1; i < nin; j=i++) - { - const float* vj = &in[j*3]; - const float* vi = &in[i*3]; - bool swapped = false; - // Make sure the segments are always handled in same order - // using lexological sort or else there will be seams. - if (fabsf(vj[0]-vi[0]) < 1e-6f) - { - if (vj[2] > vi[2]) - { - rcSwap(vj,vi); - swapped = true; - } - } - else - { - if (vj[0] > vi[0]) - { - rcSwap(vj,vi); - swapped = true; - } - } - // Create samples along the edge. - float dx = vi[0] - vj[0]; - float dy = vi[1] - vj[1]; - float dz = vi[2] - vj[2]; - float d = sqrtf(dx*dx + dz*dz); - int nn = 1 + (int)floorf(d/sampleDist); - if (nn >= MAX_VERTS_PER_EDGE) nn = MAX_VERTS_PER_EDGE-1; - if (nverts+nn >= MAX_VERTS) - nn = MAX_VERTS-1-nverts; - - for (int k = 0; k <= nn; ++k) - { - float u = (float)k/(float)nn; - float* pos = &edge[k*3]; - pos[0] = vj[0] + dx*u; - pos[1] = vj[1] + dy*u; - pos[2] = vj[2] + dz*u; - pos[1] = getHeight(pos[0],pos[1],pos[2], cs, ics, chf.ch, hp)*chf.ch; - } - // Simplify samples. - int idx[MAX_VERTS_PER_EDGE] = {0,nn}; - int nidx = 2; - for (int k = 0; k < nidx-1; ) - { - const int a = idx[k]; - const int b = idx[k+1]; - const float* va = &edge[a*3]; - const float* vb = &edge[b*3]; - // Find maximum deviation along the segment. - float maxd = 0; - int maxi = -1; - for (int m = a+1; m < b; ++m) - { - float d = distancePtSeg(&edge[m*3],va,vb); - if (d > maxd) - { - maxd = d; - maxi = m; - } - } - // If the max deviation is larger than accepted error, - // add new point, else continue to next segment. - if (maxi != -1 && maxd > rcSqr(sampleMaxError)) - { - for (int m = nidx; m > k; --m) - idx[m] = idx[m-1]; - idx[k+1] = maxi; - nidx++; - } - else - { - ++k; - } - } - - hull[nhull++] = j; - // Add new vertices. - if (swapped) - { - for (int k = nidx-2; k > 0; --k) - { - rcVcopy(&verts[nverts*3], &edge[idx[k]*3]); - hull[nhull++] = nverts; - nverts++; - } - } - else - { - for (int k = 1; k < nidx-1; ++k) - { - rcVcopy(&verts[nverts*3], &edge[idx[k]*3]); - hull[nhull++] = nverts; - nverts++; - } - } - } - } - - - // Tessellate the base mesh. - edges.resize(0); - tris.resize(0); - - delaunayHull(ctx, nverts, verts, nhull, hull, tris, edges); - - if (tris.size() == 0) - { - // Could not triangulate the poly, make sure there is some valid data there. - ctx->log(RC_LOG_WARNING, "buildPolyDetail: Could not triangulate polygon, adding default data."); - for (int i = 2; i < nverts; ++i) - { - tris.push(0); - tris.push(i-1); - tris.push(i); - tris.push(0); - } - return true; - } - - if (sampleDist > 0) - { - // Create sample locations in a grid. - float bmin[3], bmax[3]; - rcVcopy(bmin, in); - rcVcopy(bmax, in); - for (int i = 1; i < nin; ++i) - { - rcVmin(bmin, &in[i*3]); - rcVmax(bmax, &in[i*3]); - } - int x0 = (int)floorf(bmin[0]/sampleDist); - int x1 = (int)ceilf(bmax[0]/sampleDist); - int z0 = (int)floorf(bmin[2]/sampleDist); - int z1 = (int)ceilf(bmax[2]/sampleDist); - samples.resize(0); - for (int z = z0; z < z1; ++z) - { - for (int x = x0; x < x1; ++x) - { - float pt[3]; - pt[0] = x*sampleDist; - pt[1] = (bmax[1]+bmin[1])*0.5f; - pt[2] = z*sampleDist; - // Make sure the samples are not too close to the edges. - if (distToPoly(nin,in,pt) > -sampleDist/2) continue; - samples.push(x); - samples.push(getHeight(pt[0], pt[1], pt[2], cs, ics, chf.ch, hp)); - samples.push(z); - samples.push(0); // Not added - } - } - - // Add the samples starting from the one that has the most - // error. The procedure stops when all samples are added - // or when the max error is within treshold. - const int nsamples = samples.size()/4; - for (int iter = 0; iter < nsamples; ++iter) - { - if (nverts >= MAX_VERTS) - break; - - // Find sample with most error. - float bestpt[3] = {0,0,0}; - float bestd = 0; - int besti = -1; - for (int i = 0; i < nsamples; ++i) - { - const int* s = &samples[i*4]; - if (s[3]) continue; // skip added. - float pt[3]; - // The sample location is jittered to get rid of some bad triangulations - // which are cause by symmetrical data from the grid structure. - pt[0] = s[0]*sampleDist + getJitterX(i)*cs*0.1f; - pt[1] = s[1]*chf.ch; - pt[2] = s[2]*sampleDist + getJitterY(i)*cs*0.1f; - float d = distToTriMesh(pt, verts, nverts, &tris[0], tris.size()/4); - if (d < 0) continue; // did not hit the mesh. - if (d > bestd) - { - bestd = d; - besti = i; - rcVcopy(bestpt,pt); - } - } - // If the max error is within accepted threshold, stop tesselating. - if (bestd <= sampleMaxError || besti == -1) - break; - // Mark sample as added. - samples[besti*4+3] = 1; - // Add the new sample point. - rcVcopy(&verts[nverts*3],bestpt); - nverts++; - - // Create new triangulation. - // TODO: Incremental add instead of full rebuild. - edges.resize(0); - tris.resize(0); - delaunayHull(ctx, nverts, verts, nhull, hull, tris, edges); - } - } - - const int ntris = tris.size()/4; - if (ntris > MAX_TRIS) - { - tris.resize(MAX_TRIS*4); - ctx->log(RC_LOG_ERROR, "rcBuildPolyMeshDetail: Shrinking triangle count from %d to max %d.", ntris, MAX_TRIS); - } - - return true; -} - -static void getHeightData(const rcCompactHeightfield& chf, - const unsigned short* poly, const int npoly, - const unsigned short* verts, - rcHeightPatch& hp, rcIntArray& stack) -{ - // Floodfill the heightfield to get 2D height data, - // starting at vertex locations as seeds. - - memset(hp.data, 0, sizeof(unsigned short)*hp.width*hp.height); - - stack.resize(0); - - static const int offset[9*2] = - { - 0,0, -1,-1, 0,-1, 1,-1, 1,0, 1,1, 0,1, -1,1, -1,0, - }; - - // Use poly vertices as seed points for the flood fill. - for (int j = 0; j < npoly; ++j) - { - int cx = 0, cz = 0, ci =-1; - int dmin = RC_UNSET_HEIGHT; - for (int k = 0; k < 9; ++k) - { - const int ax = (int)verts[poly[j]*3+0] + offset[k*2+0]; - const int ay = (int)verts[poly[j]*3+1]; - const int az = (int)verts[poly[j]*3+2] + offset[k*2+1]; - if (ax < hp.xmin || ax >= hp.xmin+hp.width || - az < hp.ymin || az >= hp.ymin+hp.height) - continue; - - const rcCompactCell& c = chf.cells[ax+az*chf.width]; - for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) - { - const rcCompactSpan& s = chf.spans[i]; - int d = rcAbs(ay - (int)s.y); - if (d < dmin) - { - cx = ax; - cz = az; - ci = i; - dmin = d; - } - } - } - if (ci != -1) - { - stack.push(cx); - stack.push(cz); - stack.push(ci); - } - } - - // Find center of the polygon using flood fill. - int pcx = 0, pcz = 0; - for (int j = 0; j < npoly; ++j) - { - pcx += (int)verts[poly[j]*3+0]; - pcz += (int)verts[poly[j]*3+2]; - } - pcx /= npoly; - pcz /= npoly; - - for (int i = 0; i < stack.size(); i += 3) - { - int cx = stack[i+0]; - int cy = stack[i+1]; - int idx = cx-hp.xmin+(cy-hp.ymin)*hp.width; - hp.data[idx] = 1; - } - - while (stack.size() > 0) - { - int ci = stack.pop(); - int cy = stack.pop(); - int cx = stack.pop(); - - // Check if close to center of the polygon. - if (rcAbs(cx-pcx) <= 1 && rcAbs(cy-pcz) <= 1) - { - stack.resize(0); - stack.push(cx); - stack.push(cy); - stack.push(ci); - break; - } - - const rcCompactSpan& cs = chf.spans[ci]; - - for (int dir = 0; dir < 4; ++dir) - { - if (rcGetCon(cs, dir) == RC_NOT_CONNECTED) continue; - - const int ax = cx + rcGetDirOffsetX(dir); - const int ay = cy + rcGetDirOffsetY(dir); - - if (ax < hp.xmin || ax >= (hp.xmin+hp.width) || - ay < hp.ymin || ay >= (hp.ymin+hp.height)) - continue; - - if (hp.data[ax-hp.xmin+(ay-hp.ymin)*hp.width] != 0) - continue; - - const int ai = (int)chf.cells[ax+ay*chf.width].index + rcGetCon(cs, dir); - - int idx = ax-hp.xmin+(ay-hp.ymin)*hp.width; - hp.data[idx] = 1; - - stack.push(ax); - stack.push(ay); - stack.push(ai); - } - } - - memset(hp.data, 0xff, sizeof(unsigned short)*hp.width*hp.height); - - // Mark start locations. - for (int i = 0; i < stack.size(); i += 3) - { - int cx = stack[i+0]; - int cy = stack[i+1]; - int ci = stack[i+2]; - int idx = cx-hp.xmin+(cy-hp.ymin)*hp.width; - const rcCompactSpan& cs = chf.spans[ci]; - hp.data[idx] = cs.y; - } - - static const int RETRACT_SIZE = 256; - int head = 0; - - while (head*3 < stack.size()) - { - int cx = stack[head*3+0]; - int cy = stack[head*3+1]; - int ci = stack[head*3+2]; - head++; - if (head >= RETRACT_SIZE) - { - head = 0; - if (stack.size() > RETRACT_SIZE*3) - memmove(&stack[0], &stack[RETRACT_SIZE*3], sizeof(int)*(stack.size()-RETRACT_SIZE*3)); - stack.resize(stack.size()-RETRACT_SIZE*3); - } - - const rcCompactSpan& cs = chf.spans[ci]; - for (int dir = 0; dir < 4; ++dir) - { - if (rcGetCon(cs, dir) == RC_NOT_CONNECTED) continue; - - const int ax = cx + rcGetDirOffsetX(dir); - const int ay = cy + rcGetDirOffsetY(dir); - - if (ax < hp.xmin || ax >= (hp.xmin+hp.width) || - ay < hp.ymin || ay >= (hp.ymin+hp.height)) - continue; - - if (hp.data[ax-hp.xmin+(ay-hp.ymin)*hp.width] != RC_UNSET_HEIGHT) - continue; - - const int ai = (int)chf.cells[ax+ay*chf.width].index + rcGetCon(cs, dir); - - const rcCompactSpan& as = chf.spans[ai]; - int idx = ax-hp.xmin+(ay-hp.ymin)*hp.width; - hp.data[idx] = as.y; - - stack.push(ax); - stack.push(ay); - stack.push(ai); - } - } - -} - -static unsigned char getEdgeFlags(const float* va, const float* vb, - const float* vpoly, const int npoly) -{ - // Return true if edge (va,vb) is part of the polygon. - static const float thrSqr = rcSqr(0.001f); - for (int i = 0, j = npoly-1; i < npoly; j=i++) - { - if (distancePtSeg2d(va, &vpoly[j*3], &vpoly[i*3]) < thrSqr && - distancePtSeg2d(vb, &vpoly[j*3], &vpoly[i*3]) < thrSqr) - return 1; - } - return 0; -} - -static unsigned char getTriFlags(const float* va, const float* vb, const float* vc, - const float* vpoly, const int npoly) -{ - unsigned char flags = 0; - flags |= getEdgeFlags(va,vb,vpoly,npoly) << 0; - flags |= getEdgeFlags(vb,vc,vpoly,npoly) << 2; - flags |= getEdgeFlags(vc,va,vpoly,npoly) << 4; - return flags; -} - - - -bool rcBuildPolyMeshDetail(rcContext* ctx, const rcPolyMesh& mesh, const rcCompactHeightfield& chf, - const float sampleDist, const float sampleMaxError, - rcPolyMeshDetail& dmesh) -{ - rcAssert(ctx); - - ctx->startTimer(RC_TIMER_BUILD_POLYMESHDETAIL); - - if (mesh.nverts == 0 || mesh.npolys == 0) - return true; - - const int nvp = mesh.nvp; - const float cs = mesh.cs; - const float ch = mesh.ch; - const float* orig = mesh.bmin; - - rcIntArray edges(64); - rcIntArray tris(512); - rcIntArray stack(512); - rcIntArray samples(512); - float verts[256*3]; - rcHeightPatch hp; - int nPolyVerts = 0; - int maxhw = 0, maxhh = 0; - - rcScopedDelete<int> bounds = (int*)rcAlloc(sizeof(int)*mesh.npolys*4, RC_ALLOC_TEMP); - if (!bounds) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMeshDetail: Out of memory 'bounds' (%d).", mesh.npolys*4); - return false; - } - rcScopedDelete<float> poly = (float*)rcAlloc(sizeof(float)*nvp*3, RC_ALLOC_TEMP); - if (!poly) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMeshDetail: Out of memory 'poly' (%d).", nvp*3); - return false; - } - - // Find max size for a polygon area. - for (int i = 0; i < mesh.npolys; ++i) - { - const unsigned short* p = &mesh.polys[i*nvp*2]; - int& xmin = bounds[i*4+0]; - int& xmax = bounds[i*4+1]; - int& ymin = bounds[i*4+2]; - int& ymax = bounds[i*4+3]; - xmin = chf.width; - xmax = 0; - ymin = chf.height; - ymax = 0; - for (int j = 0; j < nvp; ++j) - { - if(p[j] == RC_MESH_NULL_IDX) break; - const unsigned short* v = &mesh.verts[p[j]*3]; - xmin = rcMin(xmin, (int)v[0]); - xmax = rcMax(xmax, (int)v[0]); - ymin = rcMin(ymin, (int)v[2]); - ymax = rcMax(ymax, (int)v[2]); - nPolyVerts++; - } - xmin = rcMax(0,xmin-1); - xmax = rcMin(chf.width,xmax+1); - ymin = rcMax(0,ymin-1); - ymax = rcMin(chf.height,ymax+1); - if (xmin >= xmax || ymin >= ymax) continue; - maxhw = rcMax(maxhw, xmax-xmin); - maxhh = rcMax(maxhh, ymax-ymin); - } - - hp.data = (unsigned short*)rcAlloc(sizeof(unsigned short)*maxhw*maxhh, RC_ALLOC_TEMP); - if (!hp.data) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMeshDetail: Out of memory 'hp.data' (%d).", maxhw*maxhh); - return false; - } - - dmesh.nmeshes = mesh.npolys; - dmesh.nverts = 0; - dmesh.ntris = 0; - dmesh.meshes = (unsigned int*)rcAlloc(sizeof(unsigned int)*dmesh.nmeshes*4, RC_ALLOC_PERM); - if (!dmesh.meshes) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMeshDetail: Out of memory 'dmesh.meshes' (%d).", dmesh.nmeshes*4); - return false; - } - - int vcap = nPolyVerts+nPolyVerts/2; - int tcap = vcap*2; - - dmesh.nverts = 0; - dmesh.verts = (float*)rcAlloc(sizeof(float)*vcap*3, RC_ALLOC_PERM); - if (!dmesh.verts) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMeshDetail: Out of memory 'dmesh.verts' (%d).", vcap*3); - return false; - } - dmesh.ntris = 0; - dmesh.tris = (unsigned char*)rcAlloc(sizeof(unsigned char*)*tcap*4, RC_ALLOC_PERM); - if (!dmesh.tris) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMeshDetail: Out of memory 'dmesh.tris' (%d).", tcap*4); - return false; - } - - for (int i = 0; i < mesh.npolys; ++i) - { - const unsigned short* p = &mesh.polys[i*nvp*2]; - - // Store polygon vertices for processing. - int npoly = 0; - for (int j = 0; j < nvp; ++j) - { - if(p[j] == RC_MESH_NULL_IDX) break; - const unsigned short* v = &mesh.verts[p[j]*3]; - poly[j*3+0] = v[0]*cs; - poly[j*3+1] = v[1]*ch; - poly[j*3+2] = v[2]*cs; - npoly++; - } - - // Get the height data from the area of the polygon. - hp.xmin = bounds[i*4+0]; - hp.ymin = bounds[i*4+2]; - hp.width = bounds[i*4+1]-bounds[i*4+0]; - hp.height = bounds[i*4+3]-bounds[i*4+2]; - getHeightData(chf, p, npoly, mesh.verts, hp, stack); - - // Build detail mesh. - int nverts = 0; - if (!buildPolyDetail(ctx, poly, npoly, - sampleDist, sampleMaxError, - chf, hp, verts, nverts, tris, - edges, samples)) - { - return false; - } - - // Move detail verts to world space. - for (int j = 0; j < nverts; ++j) - { - verts[j*3+0] += orig[0]; - verts[j*3+1] += orig[1] + chf.ch; // Is this offset necessary? - verts[j*3+2] += orig[2]; - } - // Offset poly too, will be used to flag checking. - for (int j = 0; j < npoly; ++j) - { - poly[j*3+0] += orig[0]; - poly[j*3+1] += orig[1]; - poly[j*3+2] += orig[2]; - } - - // Store detail submesh. - const int ntris = tris.size()/4; - - dmesh.meshes[i*4+0] = (unsigned int)dmesh.nverts; - dmesh.meshes[i*4+1] = (unsigned int)nverts; - dmesh.meshes[i*4+2] = (unsigned int)dmesh.ntris; - dmesh.meshes[i*4+3] = (unsigned int)ntris; - - // Store vertices, allocate more memory if necessary. - if (dmesh.nverts+nverts > vcap) - { - while (dmesh.nverts+nverts > vcap) - vcap += 256; - - float* newv = (float*)rcAlloc(sizeof(float)*vcap*3, RC_ALLOC_PERM); - if (!newv) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMeshDetail: Out of memory 'newv' (%d).", vcap*3); - return false; - } - if (dmesh.nverts) - memcpy(newv, dmesh.verts, sizeof(float)*3*dmesh.nverts); - rcFree(dmesh.verts); - dmesh.verts = newv; - } - for (int j = 0; j < nverts; ++j) - { - dmesh.verts[dmesh.nverts*3+0] = verts[j*3+0]; - dmesh.verts[dmesh.nverts*3+1] = verts[j*3+1]; - dmesh.verts[dmesh.nverts*3+2] = verts[j*3+2]; - dmesh.nverts++; - } - - // Store triangles, allocate more memory if necessary. - if (dmesh.ntris+ntris > tcap) - { - while (dmesh.ntris+ntris > tcap) - tcap += 256; - unsigned char* newt = (unsigned char*)rcAlloc(sizeof(unsigned char)*tcap*4, RC_ALLOC_PERM); - if (!newt) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMeshDetail: Out of memory 'newt' (%d).", tcap*4); - return false; - } - if (dmesh.ntris) - memcpy(newt, dmesh.tris, sizeof(unsigned char)*4*dmesh.ntris); - rcFree(dmesh.tris); - dmesh.tris = newt; - } - for (int j = 0; j < ntris; ++j) - { - const int* t = &tris[j*4]; - dmesh.tris[dmesh.ntris*4+0] = (unsigned char)t[0]; - dmesh.tris[dmesh.ntris*4+1] = (unsigned char)t[1]; - dmesh.tris[dmesh.ntris*4+2] = (unsigned char)t[2]; - dmesh.tris[dmesh.ntris*4+3] = getTriFlags(&verts[t[0]*3], &verts[t[1]*3], &verts[t[2]*3], poly, npoly); - dmesh.ntris++; - } - } - - ctx->stopTimer(RC_TIMER_BUILD_POLYMESHDETAIL); - - return true; -} - -bool rcMergePolyMeshDetails(rcContext* ctx, rcPolyMeshDetail** meshes, const int nmeshes, rcPolyMeshDetail& mesh) -{ - rcAssert(ctx); - - ctx->startTimer(RC_TIMER_MERGE_POLYMESHDETAIL); - - int maxVerts = 0; - int maxTris = 0; - int maxMeshes = 0; - - for (int i = 0; i < nmeshes; ++i) - { - if (!meshes[i]) continue; - maxVerts += meshes[i]->nverts; - maxTris += meshes[i]->ntris; - maxMeshes += meshes[i]->nmeshes; - } - - mesh.nmeshes = 0; - mesh.meshes = (unsigned int*)rcAlloc(sizeof(unsigned int)*maxMeshes*4, RC_ALLOC_PERM); - if (!mesh.meshes) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMeshDetail: Out of memory 'pmdtl.meshes' (%d).", maxMeshes*4); - return false; - } - - mesh.ntris = 0; - mesh.tris = (unsigned char*)rcAlloc(sizeof(unsigned char)*maxTris*4, RC_ALLOC_PERM); - if (!mesh.tris) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMeshDetail: Out of memory 'dmesh.tris' (%d).", maxTris*4); - return false; - } - - mesh.nverts = 0; - mesh.verts = (float*)rcAlloc(sizeof(float)*maxVerts*3, RC_ALLOC_PERM); - if (!mesh.verts) - { - ctx->log(RC_LOG_ERROR, "rcBuildPolyMeshDetail: Out of memory 'dmesh.verts' (%d).", maxVerts*3); - return false; - } - - // Merge datas. - for (int i = 0; i < nmeshes; ++i) - { - rcPolyMeshDetail* dm = meshes[i]; - if (!dm) continue; - for (int j = 0; j < dm->nmeshes; ++j) - { - unsigned int* dst = &mesh.meshes[mesh.nmeshes*4]; - unsigned int* src = &dm->meshes[j*4]; - dst[0] = (unsigned int)mesh.nverts+src[0]; - dst[1] = src[1]; - dst[2] = (unsigned int)mesh.ntris+src[2]; - dst[3] = src[3]; - mesh.nmeshes++; - } - - for (int k = 0; k < dm->nverts; ++k) - { - rcVcopy(&mesh.verts[mesh.nverts*3], &dm->verts[k*3]); - mesh.nverts++; - } - for (int k = 0; k < dm->ntris; ++k) - { - mesh.tris[mesh.ntris*4+0] = dm->tris[k*4+0]; - mesh.tris[mesh.ntris*4+1] = dm->tris[k*4+1]; - mesh.tris[mesh.ntris*4+2] = dm->tris[k*4+2]; - mesh.tris[mesh.ntris*4+3] = dm->tris[k*4+3]; - mesh.ntris++; - } - } - - ctx->stopTimer(RC_TIMER_MERGE_POLYMESHDETAIL); - - return true; -} - diff --git a/deps/recastnavigation/Recast/RecastRasterization.cpp b/deps/recastnavigation/Recast/RecastRasterization.cpp deleted file mode 100644 index 71adfb6732..0000000000 --- a/deps/recastnavigation/Recast/RecastRasterization.cpp +++ /dev/null @@ -1,360 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#define _USE_MATH_DEFINES -#include <math.h> -#include <stdio.h> -#include "Recast.h" -#include "RecastAlloc.h" -#include "RecastAssert.h" - -inline bool overlapBounds(const float* amin, const float* amax, const float* bmin, const float* bmax) -{ - bool overlap = true; - overlap = (amin[0] > bmax[0] || amax[0] < bmin[0]) ? false : overlap; - overlap = (amin[1] > bmax[1] || amax[1] < bmin[1]) ? false : overlap; - overlap = (amin[2] > bmax[2] || amax[2] < bmin[2]) ? false : overlap; - return overlap; -} - -inline bool overlapInterval(unsigned short amin, unsigned short amax, - unsigned short bmin, unsigned short bmax) -{ - if (amax < bmin) return false; - if (amin > bmax) return false; - return true; -} - - -static rcSpan* allocSpan(rcHeightfield& hf) -{ - // If running out of memory, allocate new page and update the freelist. - if (!hf.freelist || !hf.freelist->next) - { - // Create new page. - // Allocate memory for the new pool. - rcSpanPool* pool = (rcSpanPool*)rcAlloc(sizeof(rcSpanPool), RC_ALLOC_PERM); - if (!pool) return 0; - pool->next = 0; - // Add the pool into the list of pools. - pool->next = hf.pools; - hf.pools = pool; - // Add new items to the free list. - rcSpan* freelist = hf.freelist; - rcSpan* head = &pool->items[0]; - rcSpan* it = &pool->items[RC_SPANS_PER_POOL]; - do - { - --it; - it->next = freelist; - freelist = it; - } - while (it != head); - hf.freelist = it; - } - - // Pop item from in front of the free list. - rcSpan* it = hf.freelist; - hf.freelist = hf.freelist->next; - return it; -} - -static void freeSpan(rcHeightfield& hf, rcSpan* ptr) -{ - if (!ptr) return; - // Add the node in front of the free list. - ptr->next = hf.freelist; - hf.freelist = ptr; -} - -static void addSpan(rcHeightfield& hf, const int x, const int y, - const unsigned short smin, const unsigned short smax, - const unsigned char area, const int flagMergeThr) -{ - - int idx = x + y*hf.width; - - rcSpan* s = allocSpan(hf); - s->smin = smin; - s->smax = smax; - s->area = area; - s->next = 0; - - // Empty cell, add he first span. - if (!hf.spans[idx]) - { - hf.spans[idx] = s; - return; - } - rcSpan* prev = 0; - rcSpan* cur = hf.spans[idx]; - - // Insert and merge spans. - while (cur) - { - if (cur->smin > s->smax) - { - // Current span is further than the new span, break. - break; - } - else if (cur->smax < s->smin) - { - // Current span is before the new span advance. - prev = cur; - cur = cur->next; - } - else - { - // Merge spans. - if (cur->smin < s->smin) - s->smin = cur->smin; - if (cur->smax > s->smax) - s->smax = cur->smax; - - // Merge flags. - if (rcAbs((int)s->smax - (int)cur->smax) <= flagMergeThr) - s->area = rcMax(s->area, cur->area); - - // Remove current span. - rcSpan* next = cur->next; - freeSpan(hf, cur); - if (prev) - prev->next = next; - else - hf.spans[idx] = next; - cur = next; - } - } - - // Insert new span. - if (prev) - { - s->next = prev->next; - prev->next = s; - } - else - { - s->next = hf.spans[idx]; - hf.spans[idx] = s; - } -} - -void rcAddSpan(rcContext* /*ctx*/, rcHeightfield& hf, const int x, const int y, - const unsigned short smin, const unsigned short smax, - const unsigned char area, const int flagMergeThr) -{ -// rcAssert(ctx); - addSpan(hf, x,y, smin, smax, area, flagMergeThr); -} - -static int clipPoly(const float* in, int n, float* out, float pnx, float pnz, float pd) -{ - float d[12]; - for (int i = 0; i < n; ++i) - d[i] = pnx*in[i*3+0] + pnz*in[i*3+2] + pd; - - int m = 0; - for (int i = 0, j = n-1; i < n; j=i, ++i) - { - bool ina = d[j] >= 0; - bool inb = d[i] >= 0; - if (ina != inb) - { - float s = d[j] / (d[j] - d[i]); - out[m*3+0] = in[j*3+0] + (in[i*3+0] - in[j*3+0])*s; - out[m*3+1] = in[j*3+1] + (in[i*3+1] - in[j*3+1])*s; - out[m*3+2] = in[j*3+2] + (in[i*3+2] - in[j*3+2])*s; - m++; - } - if (inb) - { - out[m*3+0] = in[i*3+0]; - out[m*3+1] = in[i*3+1]; - out[m*3+2] = in[i*3+2]; - m++; - } - } - return m; -} - -static void rasterizeTri(const float* v0, const float* v1, const float* v2, - const unsigned char area, rcHeightfield& hf, - const float* bmin, const float* bmax, - const float cs, const float ics, const float ich, - const int flagMergeThr) -{ - const int w = hf.width; - const int h = hf.height; - float tmin[3], tmax[3]; - const float by = bmax[1] - bmin[1]; - - // Calculate the bounding box of the triangle. - rcVcopy(tmin, v0); - rcVcopy(tmax, v0); - rcVmin(tmin, v1); - rcVmin(tmin, v2); - rcVmax(tmax, v1); - rcVmax(tmax, v2); - - // If the triangle does not touch the bbox of the heightfield, skip the triagle. - if (!overlapBounds(bmin, bmax, tmin, tmax)) - return; - - // Calculate the footpring of the triangle on the grid. - int x0 = (int)((tmin[0] - bmin[0])*ics); - int y0 = (int)((tmin[2] - bmin[2])*ics); - int x1 = (int)((tmax[0] - bmin[0])*ics); - int y1 = (int)((tmax[2] - bmin[2])*ics); - x0 = rcClamp(x0, 0, w-1); - y0 = rcClamp(y0, 0, h-1); - x1 = rcClamp(x1, 0, w-1); - y1 = rcClamp(y1, 0, h-1); - - // Clip the triangle into all grid cells it touches. - float in[7*3], out[7*3], inrow[7*3]; - - for (int y = y0; y <= y1; ++y) - { - // Clip polygon to row. - rcVcopy(&in[0], v0); - rcVcopy(&in[1*3], v1); - rcVcopy(&in[2*3], v2); - int nvrow = 3; - const float cz = bmin[2] + y*cs; - nvrow = clipPoly(in, nvrow, out, 0, 1, -cz); - if (nvrow < 3) continue; - nvrow = clipPoly(out, nvrow, inrow, 0, -1, cz+cs); - if (nvrow < 3) continue; - - for (int x = x0; x <= x1; ++x) - { - // Clip polygon to column. - int nv = nvrow; - const float cx = bmin[0] + x*cs; - nv = clipPoly(inrow, nv, out, 1, 0, -cx); - if (nv < 3) continue; - nv = clipPoly(out, nv, in, -1, 0, cx+cs); - if (nv < 3) continue; - - // Calculate min and max of the span. - float smin = in[1], smax = in[1]; - for (int i = 1; i < nv; ++i) - { - smin = rcMin(smin, in[i*3+1]); - smax = rcMax(smax, in[i*3+1]); - } - smin -= bmin[1]; - smax -= bmin[1]; - // Skip the span if it is outside the heightfield bbox - if (smax < 0.0f) continue; - if (smin > by) continue; - // Clamp the span to the heightfield bbox. - if (smin < 0.0f) smin = 0; - if (smax > by) smax = by; - - // Snap the span to the heightfield height grid. - unsigned short ismin = (unsigned short)rcClamp((int)floorf(smin * ich), 0, RC_SPAN_MAX_HEIGHT); - unsigned short ismax = (unsigned short)rcClamp((int)ceilf(smax * ich), (int)ismin+1, RC_SPAN_MAX_HEIGHT); - - addSpan(hf, x, y, ismin, ismax, area, flagMergeThr); - } - } -} - -void rcRasterizeTriangle(rcContext* ctx, const float* v0, const float* v1, const float* v2, - const unsigned char area, rcHeightfield& solid, - const int flagMergeThr) -{ - rcAssert(ctx); - - ctx->startTimer(RC_TIMER_RASTERIZE_TRIANGLES); - - const float ics = 1.0f/solid.cs; - const float ich = 1.0f/solid.ch; - rasterizeTri(v0, v1, v2, area, solid, solid.bmin, solid.bmax, solid.cs, ics, ich, flagMergeThr); - - ctx->stopTimer(RC_TIMER_RASTERIZE_TRIANGLES); -} - -void rcRasterizeTriangles(rcContext* ctx, const float* verts, const int /*nv*/, - const int* tris, const unsigned char* areas, const int nt, - rcHeightfield& solid, const int flagMergeThr) -{ - rcAssert(ctx); - - ctx->startTimer(RC_TIMER_RASTERIZE_TRIANGLES); - - const float ics = 1.0f/solid.cs; - const float ich = 1.0f/solid.ch; - // Rasterize triangles. - for (int i = 0; i < nt; ++i) - { - const float* v0 = &verts[tris[i*3+0]*3]; - const float* v1 = &verts[tris[i*3+1]*3]; - const float* v2 = &verts[tris[i*3+2]*3]; - // Rasterize. - rasterizeTri(v0, v1, v2, areas[i], solid, solid.bmin, solid.bmax, solid.cs, ics, ich, flagMergeThr); - } - - ctx->stopTimer(RC_TIMER_RASTERIZE_TRIANGLES); -} - -void rcRasterizeTriangles(rcContext* ctx, const float* verts, const int /*nv*/, - const unsigned short* tris, const unsigned char* areas, const int nt, - rcHeightfield& solid, const int flagMergeThr) -{ - rcAssert(ctx); - - ctx->startTimer(RC_TIMER_RASTERIZE_TRIANGLES); - - const float ics = 1.0f/solid.cs; - const float ich = 1.0f/solid.ch; - // Rasterize triangles. - for (int i = 0; i < nt; ++i) - { - const float* v0 = &verts[tris[i*3+0]*3]; - const float* v1 = &verts[tris[i*3+1]*3]; - const float* v2 = &verts[tris[i*3+2]*3]; - // Rasterize. - rasterizeTri(v0, v1, v2, areas[i], solid, solid.bmin, solid.bmax, solid.cs, ics, ich, flagMergeThr); - } - - ctx->stopTimer(RC_TIMER_RASTERIZE_TRIANGLES); -} - -void rcRasterizeTriangles(rcContext* ctx, const float* verts, const unsigned char* areas, const int nt, - rcHeightfield& solid, const int flagMergeThr) -{ - rcAssert(ctx); - - ctx->startTimer(RC_TIMER_RASTERIZE_TRIANGLES); - - const float ics = 1.0f/solid.cs; - const float ich = 1.0f/solid.ch; - // Rasterize triangles. - for (int i = 0; i < nt; ++i) - { - const float* v0 = &verts[(i*3+0)*3]; - const float* v1 = &verts[(i*3+1)*3]; - const float* v2 = &verts[(i*3+2)*3]; - // Rasterize. - rasterizeTri(v0, v1, v2, areas[i], solid, solid.bmin, solid.bmax, solid.cs, ics, ich, flagMergeThr); - } - - ctx->stopTimer(RC_TIMER_RASTERIZE_TRIANGLES); -} diff --git a/deps/recastnavigation/Recast/RecastRegion.cpp b/deps/recastnavigation/Recast/RecastRegion.cpp deleted file mode 100644 index c624bf6619..0000000000 --- a/deps/recastnavigation/Recast/RecastRegion.cpp +++ /dev/null @@ -1,1285 +0,0 @@ -// -// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org -// -// This software is provided 'as-is', without any express or implied -// warranty. In no event will the authors be held liable for any damages -// arising from the use of this software. -// Permission is granted to anyone to use this software for any purpose, -// including commercial applications, and to alter it and redistribute it -// freely, subject to the following restrictions: -// 1. The origin of this software must not be misrepresented; you must not -// claim that you wrote the original software. If you use this software -// in a product, an acknowledgment in the product documentation would be -// appreciated but is not required. -// 2. Altered source versions must be plainly marked as such, and must not be -// misrepresented as being the original software. -// 3. This notice may not be removed or altered from any source distribution. -// - -#include <float.h> -#define _USE_MATH_DEFINES -#include <math.h> -#include <string.h> -#include <stdlib.h> -#include <stdio.h> -#include "Recast.h" -#include "RecastAlloc.h" -#include "RecastAssert.h" -#include <new> - - -static void calculateDistanceField(rcCompactHeightfield& chf, unsigned short* src, unsigned short& maxDist) -{ - const int w = chf.width; - const int h = chf.height; - - // Init distance and points. - for (int i = 0; i < chf.spanCount; ++i) - src[i] = 0xffff; - - // Mark boundary cells. - for (int y = 0; y < h; ++y) - { - for (int x = 0; x < w; ++x) - { - const rcCompactCell& c = chf.cells[x+y*w]; - for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) - { - const rcCompactSpan& s = chf.spans[i]; - const unsigned char area = chf.areas[i]; - - int nc = 0; - for (int dir = 0; dir < 4; ++dir) - { - if (rcGetCon(s, dir) != RC_NOT_CONNECTED) - { - const int ax = x + rcGetDirOffsetX(dir); - const int ay = y + rcGetDirOffsetY(dir); - const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, dir); - if (area == chf.areas[ai]) - nc++; - } - } - if (nc != 4) - src[i] = 0; - } - } - } - - - // Pass 1 - for (int y = 0; y < h; ++y) - { - for (int x = 0; x < w; ++x) - { - const rcCompactCell& c = chf.cells[x+y*w]; - for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) - { - const rcCompactSpan& s = chf.spans[i]; - - if (rcGetCon(s, 0) != RC_NOT_CONNECTED) - { - // (-1,0) - const int ax = x + rcGetDirOffsetX(0); - const int ay = y + rcGetDirOffsetY(0); - const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, 0); - const rcCompactSpan& as = chf.spans[ai]; - if (src[ai]+2 < src[i]) - src[i] = src[ai]+2; - - // (-1,-1) - if (rcGetCon(as, 3) != RC_NOT_CONNECTED) - { - const int aax = ax + rcGetDirOffsetX(3); - const int aay = ay + rcGetDirOffsetY(3); - const int aai = (int)chf.cells[aax+aay*w].index + rcGetCon(as, 3); - if (src[aai]+3 < src[i]) - src[i] = src[aai]+3; - } - } - if (rcGetCon(s, 3) != RC_NOT_CONNECTED) - { - // (0,-1) - const int ax = x + rcGetDirOffsetX(3); - const int ay = y + rcGetDirOffsetY(3); - const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, 3); - const rcCompactSpan& as = chf.spans[ai]; - if (src[ai]+2 < src[i]) - src[i] = src[ai]+2; - - // (1,-1) - if (rcGetCon(as, 2) != RC_NOT_CONNECTED) - { - const int aax = ax + rcGetDirOffsetX(2); - const int aay = ay + rcGetDirOffsetY(2); - const int aai = (int)chf.cells[aax+aay*w].index + rcGetCon(as, 2); - if (src[aai]+3 < src[i]) - src[i] = src[aai]+3; - } - } - } - } - } - - // Pass 2 - for (int y = h-1; y >= 0; --y) - { - for (int x = w-1; x >= 0; --x) - { - const rcCompactCell& c = chf.cells[x+y*w]; - for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) - { - const rcCompactSpan& s = chf.spans[i]; - - if (rcGetCon(s, 2) != RC_NOT_CONNECTED) - { - // (1,0) - const int ax = x + rcGetDirOffsetX(2); - const int ay = y + rcGetDirOffsetY(2); - const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, 2); - const rcCompactSpan& as = chf.spans[ai]; - if (src[ai]+2 < src[i]) - src[i] = src[ai]+2; - - // (1,1) - if (rcGetCon(as, 1) != RC_NOT_CONNECTED) - { - const int aax = ax + rcGetDirOffsetX(1); - const int aay = ay + rcGetDirOffsetY(1); - const int aai = (int)chf.cells[aax+aay*w].index + rcGetCon(as, 1); - if (src[aai]+3 < src[i]) - src[i] = src[aai]+3; - } - } - if (rcGetCon(s, 1) != RC_NOT_CONNECTED) - { - // (0,1) - const int ax = x + rcGetDirOffsetX(1); - const int ay = y + rcGetDirOffsetY(1); - const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, 1); - const rcCompactSpan& as = chf.spans[ai]; - if (src[ai]+2 < src[i]) - src[i] = src[ai]+2; - - // (-1,1) - if (rcGetCon(as, 0) != RC_NOT_CONNECTED) - { - const int aax = ax + rcGetDirOffsetX(0); - const int aay = ay + rcGetDirOffsetY(0); - const int aai = (int)chf.cells[aax+aay*w].index + rcGetCon(as, 0); - if (src[aai]+3 < src[i]) - src[i] = src[aai]+3; - } - } - } - } - } - - maxDist = 0; - for (int i = 0; i < chf.spanCount; ++i) - maxDist = rcMax(src[i], maxDist); - -} - -static unsigned short* boxBlur(rcCompactHeightfield& chf, int thr, - unsigned short* src, unsigned short* dst) -{ - const int w = chf.width; - const int h = chf.height; - - thr *= 2; - - for (int y = 0; y < h; ++y) - { - for (int x = 0; x < w; ++x) - { - const rcCompactCell& c = chf.cells[x+y*w]; - for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) - { - const rcCompactSpan& s = chf.spans[i]; - const unsigned short cd = src[i]; - if (cd <= thr) - { - dst[i] = cd; - continue; - } - - int d = (int)cd; - for (int dir = 0; dir < 4; ++dir) - { - if (rcGetCon(s, dir) != RC_NOT_CONNECTED) - { - const int ax = x + rcGetDirOffsetX(dir); - const int ay = y + rcGetDirOffsetY(dir); - const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, dir); - d += (int)src[ai]; - - const rcCompactSpan& as = chf.spans[ai]; - const int dir2 = (dir+1) & 0x3; - if (rcGetCon(as, dir2) != RC_NOT_CONNECTED) - { - const int ax2 = ax + rcGetDirOffsetX(dir2); - const int ay2 = ay + rcGetDirOffsetY(dir2); - const int ai2 = (int)chf.cells[ax2+ay2*w].index + rcGetCon(as, dir2); - d += (int)src[ai2]; - } - else - { - d += cd; - } - } - else - { - d += cd*2; - } - } - dst[i] = (unsigned short)((d+5)/9); - } - } - } - return dst; -} - - -static bool floodRegion(int x, int y, int i, - unsigned short level, unsigned short r, - rcCompactHeightfield& chf, - unsigned short* srcReg, unsigned short* srcDist, - rcIntArray& stack) -{ - const int w = chf.width; - - const unsigned char area = chf.areas[i]; - - // Flood fill mark region. - stack.resize(0); - stack.push((int)x); - stack.push((int)y); - stack.push((int)i); - srcReg[i] = r; - srcDist[i] = 0; - - unsigned short lev = level >= 2 ? level-2 : 0; - int count = 0; - - while (stack.size() > 0) - { - int ci = stack.pop(); - int cy = stack.pop(); - int cx = stack.pop(); - - const rcCompactSpan& cs = chf.spans[ci]; - - // Check if any of the neighbours already have a valid region set. - unsigned short ar = 0; - for (int dir = 0; dir < 4; ++dir) - { - // 8 connected - if (rcGetCon(cs, dir) != RC_NOT_CONNECTED) - { - const int ax = cx + rcGetDirOffsetX(dir); - const int ay = cy + rcGetDirOffsetY(dir); - const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(cs, dir); - if (chf.areas[ai] != area) - continue; - unsigned short nr = srcReg[ai]; - if (nr & RC_BORDER_REG) // Do not take borders into account. - continue; - if (nr != 0 && nr != r) - ar = nr; - - const rcCompactSpan& as = chf.spans[ai]; - - const int dir2 = (dir+1) & 0x3; - if (rcGetCon(as, dir2) != RC_NOT_CONNECTED) - { - const int ax2 = ax + rcGetDirOffsetX(dir2); - const int ay2 = ay + rcGetDirOffsetY(dir2); - const int ai2 = (int)chf.cells[ax2+ay2*w].index + rcGetCon(as, dir2); - if (chf.areas[ai2] != area) - continue; - unsigned short nr = srcReg[ai2]; - if (nr != 0 && nr != r) - ar = nr; - } - } - } - if (ar != 0) - { - srcReg[ci] = 0; - continue; - } - count++; - - // Expand neighbours. - for (int dir = 0; dir < 4; ++dir) - { - if (rcGetCon(cs, dir) != RC_NOT_CONNECTED) - { - const int ax = cx + rcGetDirOffsetX(dir); - const int ay = cy + rcGetDirOffsetY(dir); - const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(cs, dir); - if (chf.areas[ai] != area) - continue; - if (chf.dist[ai] >= lev) - { - if (srcReg[ai] == 0) - { - srcReg[ai] = r; - srcDist[ai] = 0; - stack.push(ax); - stack.push(ay); - stack.push(ai); - } - } - } - } - } - - return count > 0; -} - -static unsigned short* expandRegions(int maxIter, unsigned short level, - rcCompactHeightfield& chf, - unsigned short* srcReg, unsigned short* srcDist, - unsigned short* dstReg, unsigned short* dstDist, - rcIntArray& stack) -{ - const int w = chf.width; - const int h = chf.height; - - // Find cells revealed by the raised level. - stack.resize(0); - for (int y = 0; y < h; ++y) - { - for (int x = 0; x < w; ++x) - { - const rcCompactCell& c = chf.cells[x+y*w]; - for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) - { - if (chf.dist[i] >= level && srcReg[i] == 0 && chf.areas[i] != RC_NULL_AREA) - { - stack.push(x); - stack.push(y); - stack.push(i); - } - } - } - } - - int iter = 0; - while (stack.size() > 0) - { - int failed = 0; - - memcpy(dstReg, srcReg, sizeof(unsigned short)*chf.spanCount); - memcpy(dstDist, srcDist, sizeof(unsigned short)*chf.spanCount); - - for (int j = 0; j < stack.size(); j += 3) - { - int x = stack[j+0]; - int y = stack[j+1]; - int i = stack[j+2]; - if (i < 0) - { - failed++; - continue; - } - - unsigned short r = srcReg[i]; - unsigned short d2 = 0xffff; - const unsigned char area = chf.areas[i]; - const rcCompactSpan& s = chf.spans[i]; - for (int dir = 0; dir < 4; ++dir) - { - if (rcGetCon(s, dir) == RC_NOT_CONNECTED) continue; - const int ax = x + rcGetDirOffsetX(dir); - const int ay = y + rcGetDirOffsetY(dir); - const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, dir); - if (chf.areas[ai] != area) continue; - if (srcReg[ai] > 0 && (srcReg[ai] & RC_BORDER_REG) == 0) - { - if ((int)srcDist[ai]+2 < (int)d2) - { - r = srcReg[ai]; - d2 = srcDist[ai]+2; - } - } - } - if (r) - { - stack[j+2] = -1; // mark as used - dstReg[i] = r; - dstDist[i] = d2; - } - else - { - failed++; - } - } - - // rcSwap source and dest. - rcSwap(srcReg, dstReg); - rcSwap(srcDist, dstDist); - - if (failed*3 == stack.size()) - break; - - if (level > 0) - { - ++iter; - if (iter >= maxIter) - break; - } - } - - return srcReg; -} - - -struct rcRegion -{ - inline rcRegion(unsigned short i) : - spanCount(0), - id(i), - areaType(0), - remap(false), - visited(false) - {} - - int spanCount; // Number of spans belonging to this region - unsigned short id; // ID of the region - unsigned char areaType; // Are type. - bool remap; - bool visited; - rcIntArray connections; - rcIntArray floors; -}; - -static void removeAdjacentNeighbours(rcRegion& reg) -{ - // Remove adjacent duplicates. - for (int i = 0; i < reg.connections.size() && reg.connections.size() > 1; ) - { - int ni = (i+1) % reg.connections.size(); - if (reg.connections[i] == reg.connections[ni]) - { - // Remove duplicate - for (int j = i; j < reg.connections.size()-1; ++j) - reg.connections[j] = reg.connections[j+1]; - reg.connections.pop(); - } - else - ++i; - } -} - -static void replaceNeighbour(rcRegion& reg, unsigned short oldId, unsigned short newId) -{ - bool neiChanged = false; - for (int i = 0; i < reg.connections.size(); ++i) - { - if (reg.connections[i] == oldId) - { - reg.connections[i] = newId; - neiChanged = true; - } - } - for (int i = 0; i < reg.floors.size(); ++i) - { - if (reg.floors[i] == oldId) - reg.floors[i] = newId; - } - if (neiChanged) - removeAdjacentNeighbours(reg); -} - -static bool canMergeWithRegion(const rcRegion& rega, const rcRegion& regb) -{ - if (rega.areaType != regb.areaType) - return false; - int n = 0; - for (int i = 0; i < rega.connections.size(); ++i) - { - if (rega.connections[i] == regb.id) - n++; - } - if (n > 1) - return false; - for (int i = 0; i < rega.floors.size(); ++i) - { - if (rega.floors[i] == regb.id) - return false; - } - return true; -} - -static void addUniqueFloorRegion(rcRegion& reg, int n) -{ - for (int i = 0; i < reg.floors.size(); ++i) - if (reg.floors[i] == n) - return; - reg.floors.push(n); -} - -static bool mergeRegions(rcRegion& rega, rcRegion& regb) -{ - unsigned short aid = rega.id; - unsigned short bid = regb.id; - - // Duplicate current neighbourhood. - rcIntArray acon; - acon.resize(rega.connections.size()); - for (int i = 0; i < rega.connections.size(); ++i) - acon[i] = rega.connections[i]; - rcIntArray& bcon = regb.connections; - - // Find insertion point on A. - int insa = -1; - for (int i = 0; i < acon.size(); ++i) - { - if (acon[i] == bid) - { - insa = i; - break; - } - } - if (insa == -1) - return false; - - // Find insertion point on B. - int insb = -1; - for (int i = 0; i < bcon.size(); ++i) - { - if (bcon[i] == aid) - { - insb = i; - break; - } - } - if (insb == -1) - return false; - - // Merge neighbours. - rega.connections.resize(0); - for (int i = 0, ni = acon.size(); i < ni-1; ++i) - rega.connections.push(acon[(insa+1+i) % ni]); - - for (int i = 0, ni = bcon.size(); i < ni-1; ++i) - rega.connections.push(bcon[(insb+1+i) % ni]); - - removeAdjacentNeighbours(rega); - - for (int j = 0; j < regb.floors.size(); ++j) - addUniqueFloorRegion(rega, regb.floors[j]); - rega.spanCount += regb.spanCount; - regb.spanCount = 0; - regb.connections.resize(0); - - return true; -} - -static bool isRegionConnectedToBorder(const rcRegion& reg) -{ - // Region is connected to border if - // one of the neighbours is null id. - for (int i = 0; i < reg.connections.size(); ++i) - { - if (reg.connections[i] == 0) - return true; - } - return false; -} - -static bool isSolidEdge(rcCompactHeightfield& chf, unsigned short* srcReg, - int x, int y, int i, int dir) -{ - const rcCompactSpan& s = chf.spans[i]; - unsigned short r = 0; - if (rcGetCon(s, dir) != RC_NOT_CONNECTED) - { - const int ax = x + rcGetDirOffsetX(dir); - const int ay = y + rcGetDirOffsetY(dir); - const int ai = (int)chf.cells[ax+ay*chf.width].index + rcGetCon(s, dir); - r = srcReg[ai]; - } - if (r == srcReg[i]) - return false; - return true; -} - -static void walkContour(int x, int y, int i, int dir, - rcCompactHeightfield& chf, - unsigned short* srcReg, - rcIntArray& cont) -{ - int startDir = dir; - int starti = i; - - const rcCompactSpan& ss = chf.spans[i]; - unsigned short curReg = 0; - if (rcGetCon(ss, dir) != RC_NOT_CONNECTED) - { - const int ax = x + rcGetDirOffsetX(dir); - const int ay = y + rcGetDirOffsetY(dir); - const int ai = (int)chf.cells[ax+ay*chf.width].index + rcGetCon(ss, dir); - curReg = srcReg[ai]; - } - cont.push(curReg); - - int iter = 0; - while (++iter < 40000) - { - const rcCompactSpan& s = chf.spans[i]; - - if (isSolidEdge(chf, srcReg, x, y, i, dir)) - { - // Choose the edge corner - unsigned short r = 0; - if (rcGetCon(s, dir) != RC_NOT_CONNECTED) - { - const int ax = x + rcGetDirOffsetX(dir); - const int ay = y + rcGetDirOffsetY(dir); - const int ai = (int)chf.cells[ax+ay*chf.width].index + rcGetCon(s, dir); - r = srcReg[ai]; - } - if (r != curReg) - { - curReg = r; - cont.push(curReg); - } - - dir = (dir+1) & 0x3; // Rotate CW - } - else - { - int ni = -1; - const int nx = x + rcGetDirOffsetX(dir); - const int ny = y + rcGetDirOffsetY(dir); - if (rcGetCon(s, dir) != RC_NOT_CONNECTED) - { - const rcCompactCell& nc = chf.cells[nx+ny*chf.width]; - ni = (int)nc.index + rcGetCon(s, dir); - } - if (ni == -1) - { - // Should not happen. - return; - } - x = nx; - y = ny; - i = ni; - dir = (dir+3) & 0x3; // Rotate CCW - } - - if (starti == i && startDir == dir) - { - break; - } - } - - // Remove adjacent duplicates. - if (cont.size() > 1) - { - for (int i = 0; i < cont.size(); ) - { - int ni = (i+1) % cont.size(); - if (cont[i] == cont[ni]) - { - for (int j = i; j < cont.size()-1; ++j) - cont[j] = cont[j+1]; - cont.pop(); - } - else - ++i; - } - } -} - -static bool filterSmallRegions(rcContext* ctx, int minRegionArea, int mergeRegionSize, - unsigned short& maxRegionId, - rcCompactHeightfield& chf, - unsigned short* srcReg) -{ - const int w = chf.width; - const int h = chf.height; - - const int nreg = maxRegionId+1; - rcRegion* regions = (rcRegion*)rcAlloc(sizeof(rcRegion)*nreg, RC_ALLOC_TEMP); - if (!regions) - { - ctx->log(RC_LOG_ERROR, "filterSmallRegions: Out of memory 'regions' (%d).", nreg); - return false; - } - - // Construct regions - for (int i = 0; i < nreg; ++i) - new(®ions[i]) rcRegion((unsigned short)i); - - // Find edge of a region and find connections around the contour. - for (int y = 0; y < h; ++y) - { - for (int x = 0; x < w; ++x) - { - const rcCompactCell& c = chf.cells[x+y*w]; - for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) - { - unsigned short r = srcReg[i]; - if (r == 0 || r >= nreg) - continue; - - rcRegion& reg = regions[r]; - reg.spanCount++; - - - // Update floors. - for (int j = (int)c.index; j < ni; ++j) - { - if (i == j) continue; - unsigned short floorId = srcReg[j]; - if (floorId == 0 || floorId >= nreg) - continue; - addUniqueFloorRegion(reg, floorId); - } - - // Have found contour - if (reg.connections.size() > 0) - continue; - - reg.areaType = chf.areas[i]; - - // Check if this cell is next to a border. - int ndir = -1; - for (int dir = 0; dir < 4; ++dir) - { - if (isSolidEdge(chf, srcReg, x, y, i, dir)) - { - ndir = dir; - break; - } - } - - if (ndir != -1) - { - // The cell is at border. - // Walk around the contour to find all the neighbours. - walkContour(x, y, i, ndir, chf, srcReg, reg.connections); - } - } - } - } - - // Remove too small regions. - rcIntArray stack(32); - rcIntArray trace(32); - for (int i = 0; i < nreg; ++i) - { - rcRegion& reg = regions[i]; - if (reg.id == 0 || (reg.id & RC_BORDER_REG)) - continue; - if (reg.spanCount == 0) - continue; - if (reg.visited) - continue; - - // Count the total size of all the connected regions. - // Also keep track of the regions connects to a tile border. - bool connectsToBorder = false; - int spanCount = 0; - stack.resize(0); - trace.resize(0); - - reg.visited = true; - stack.push(i); - - while (stack.size()) - { - // Pop - int ri = stack.pop(); - - rcRegion& creg = regions[ri]; - - spanCount += creg.spanCount; - trace.push(ri); - - for (int j = 0; j < creg.connections.size(); ++j) - { - if (creg.connections[j] & RC_BORDER_REG) - { - connectsToBorder = true; - continue; - } - rcRegion& nreg = regions[creg.connections[j]]; - if (nreg.visited) - continue; - if (nreg.id == 0 || (nreg.id & RC_BORDER_REG)) - continue; - // Visit - stack.push(nreg.id); - nreg.visited = true; - } - } - - // If the accumulated regions size is too small, remove it. - // Do not remove areas which connect to tile borders - // as their size cannot be estimated correctly and removing them - // can potentially remove necessary areas. - if (spanCount < minRegionArea && !connectsToBorder) - { - // Kill all visited regions. - for (int j = 0; j < trace.size(); ++j) - { - regions[trace[j]].spanCount = 0; - regions[trace[j]].id = 0; - } - } - } - - // Merge too small regions to neighbour regions. - int mergeCount = 0 ; - do - { - mergeCount = 0; - for (int i = 0; i < nreg; ++i) - { - rcRegion& reg = regions[i]; - if (reg.id == 0 || (reg.id & RC_BORDER_REG)) - continue; - if (reg.spanCount == 0) - continue; - - // Check to see if the region should be merged. - if (reg.spanCount > mergeRegionSize && isRegionConnectedToBorder(reg)) - continue; - - // Small region with more than 1 connection. - // Or region which is not connected to a border at all. - // Find smallest neighbour region that connects to this one. - int smallest = 0xfffffff; - unsigned short mergeId = reg.id; - for (int j = 0; j < reg.connections.size(); ++j) - { - if (reg.connections[j] & RC_BORDER_REG) continue; - rcRegion& mreg = regions[reg.connections[j]]; - if (mreg.id == 0 || (mreg.id & RC_BORDER_REG)) continue; - if (mreg.spanCount < smallest && - canMergeWithRegion(reg, mreg) && - canMergeWithRegion(mreg, reg)) - { - smallest = mreg.spanCount; - mergeId = mreg.id; - } - } - // Found new id. - if (mergeId != reg.id) - { - unsigned short oldId = reg.id; - rcRegion& target = regions[mergeId]; - - // Merge neighbours. - if (mergeRegions(target, reg)) - { - // Fixup regions pointing to current region. - for (int j = 0; j < nreg; ++j) - { - if (regions[j].id == 0 || (regions[j].id & RC_BORDER_REG)) continue; - // If another region was already merged into current region - // change the nid of the previous region too. - if (regions[j].id == oldId) - regions[j].id = mergeId; - // Replace the current region with the new one if the - // current regions is neighbour. - replaceNeighbour(regions[j], oldId, mergeId); - } - mergeCount++; - } - } - } - } - while (mergeCount > 0); - - // Compress region Ids. - for (int i = 0; i < nreg; ++i) - { - regions[i].remap = false; - if (regions[i].id == 0) continue; // Skip nil regions. - if (regions[i].id & RC_BORDER_REG) continue; // Skip external regions. - regions[i].remap = true; - } - - unsigned short regIdGen = 0; - for (int i = 0; i < nreg; ++i) - { - if (!regions[i].remap) - continue; - unsigned short oldId = regions[i].id; - unsigned short newId = ++regIdGen; - for (int j = i; j < nreg; ++j) - { - if (regions[j].id == oldId) - { - regions[j].id = newId; - regions[j].remap = false; - } - } - } - maxRegionId = regIdGen; - - // Remap regions. - for (int i = 0; i < chf.spanCount; ++i) - { - if ((srcReg[i] & RC_BORDER_REG) == 0) - srcReg[i] = regions[srcReg[i]].id; - } - - for (int i = 0; i < nreg; ++i) - regions[i].~rcRegion(); - rcFree(regions); - - return true; -} - - -bool rcBuildDistanceField(rcContext* ctx, rcCompactHeightfield& chf) -{ - rcAssert(ctx); - - ctx->startTimer(RC_TIMER_BUILD_DISTANCEFIELD); - - if (chf.dist) - { - rcFree(chf.dist); - chf.dist = 0; - } - - unsigned short* src = (unsigned short*)rcAlloc(sizeof(unsigned short)*chf.spanCount, RC_ALLOC_TEMP); - if (!src) - { - ctx->log(RC_LOG_ERROR, "rcBuildDistanceField: Out of memory 'src' (%d).", chf.spanCount); - return false; - } - unsigned short* dst = (unsigned short*)rcAlloc(sizeof(unsigned short)*chf.spanCount, RC_ALLOC_TEMP); - if (!dst) - { - ctx->log(RC_LOG_ERROR, "rcBuildDistanceField: Out of memory 'dst' (%d).", chf.spanCount); - rcFree(src); - return false; - } - - unsigned short maxDist = 0; - - ctx->startTimer(RC_TIMER_BUILD_DISTANCEFIELD_DIST); - - calculateDistanceField(chf, src, maxDist); - chf.maxDistance = maxDist; - - ctx->stopTimer(RC_TIMER_BUILD_DISTANCEFIELD_DIST); - - ctx->startTimer(RC_TIMER_BUILD_DISTANCEFIELD_BLUR); - - // Blur - if (boxBlur(chf, 1, src, dst) != src) - rcSwap(src, dst); - - // Store distance. - chf.dist = src; - - ctx->stopTimer(RC_TIMER_BUILD_DISTANCEFIELD_BLUR); - - ctx->stopTimer(RC_TIMER_BUILD_DISTANCEFIELD); - - rcFree(dst); - - return true; -} - -static void paintRectRegion(int minx, int maxx, int miny, int maxy, unsigned short regId, - rcCompactHeightfield& chf, unsigned short* srcReg) -{ - const int w = chf.width; - for (int y = miny; y < maxy; ++y) - { - for (int x = minx; x < maxx; ++x) - { - const rcCompactCell& c = chf.cells[x+y*w]; - for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) - { - if (chf.areas[i] != RC_NULL_AREA) - srcReg[i] = regId; - } - } - } -} - - -static const unsigned short RC_NULL_NEI = 0xffff; - -struct rcSweepSpan -{ - unsigned short rid; // row id - unsigned short id; // region id - unsigned short ns; // number samples - unsigned short nei; // neighbour id -}; - -bool rcBuildRegionsMonotone(rcContext* ctx, rcCompactHeightfield& chf, - const int borderSize, const int minRegionArea, const int mergeRegionArea) -{ - rcAssert(ctx); - - ctx->startTimer(RC_TIMER_BUILD_REGIONS); - - const int w = chf.width; - const int h = chf.height; - unsigned short id = 1; - - rcScopedDelete<unsigned short> srcReg = (unsigned short*)rcAlloc(sizeof(unsigned short)*chf.spanCount, RC_ALLOC_TEMP); - if (!srcReg) - { - ctx->log(RC_LOG_ERROR, "rcBuildRegionsMonotone: Out of memory 'src' (%d).", chf.spanCount); - return false; - } - memset(srcReg,0,sizeof(unsigned short)*chf.spanCount); - - const int nsweeps = rcMax(chf.width,chf.height); - rcScopedDelete<rcSweepSpan> sweeps = (rcSweepSpan*)rcAlloc(sizeof(rcSweepSpan)*nsweeps, RC_ALLOC_TEMP); - if (!sweeps) - { - ctx->log(RC_LOG_ERROR, "rcBuildRegionsMonotone: Out of memory 'sweeps' (%d).", nsweeps); - return false; - } - - - // Mark border regions. - if (borderSize > 0) - { - // Make sure border will not overflow. - const int bw = rcMin(w, borderSize); - const int bh = rcMin(h, borderSize); - // Paint regions - paintRectRegion(0, bw, 0, h, id|RC_BORDER_REG, chf, srcReg); id++; - paintRectRegion(w-bw, w, 0, h, id|RC_BORDER_REG, chf, srcReg); id++; - paintRectRegion(0, w, 0, bh, id|RC_BORDER_REG, chf, srcReg); id++; - paintRectRegion(0, w, h-bh, h, id|RC_BORDER_REG, chf, srcReg); id++; - } - - rcIntArray prev(256); - - // Sweep one line at a time. - for (int y = borderSize; y < h-borderSize; ++y) - { - // Collect spans from this row. - prev.resize(id+1); - memset(&prev[0],0,sizeof(int)*id); - unsigned short rid = 1; - - for (int x = borderSize; x < w-borderSize; ++x) - { - const rcCompactCell& c = chf.cells[x+y*w]; - - for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) - { - const rcCompactSpan& s = chf.spans[i]; - if (chf.areas[i] == RC_NULL_AREA) continue; - - // -x - unsigned short previd = 0; - if (rcGetCon(s, 0) != RC_NOT_CONNECTED) - { - const int ax = x + rcGetDirOffsetX(0); - const int ay = y + rcGetDirOffsetY(0); - const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, 0); - if ((srcReg[ai] & RC_BORDER_REG) == 0 && chf.areas[i] == chf.areas[ai]) - previd = srcReg[ai]; - } - - if (!previd) - { - previd = rid++; - sweeps[previd].rid = previd; - sweeps[previd].ns = 0; - sweeps[previd].nei = 0; - } - - // -y - if (rcGetCon(s,3) != RC_NOT_CONNECTED) - { - const int ax = x + rcGetDirOffsetX(3); - const int ay = y + rcGetDirOffsetY(3); - const int ai = (int)chf.cells[ax+ay*w].index + rcGetCon(s, 3); - if (srcReg[ai] && (srcReg[ai] & RC_BORDER_REG) == 0 && chf.areas[i] == chf.areas[ai]) - { - unsigned short nr = srcReg[ai]; - if (!sweeps[previd].nei || sweeps[previd].nei == nr) - { - sweeps[previd].nei = nr; - sweeps[previd].ns++; - prev[nr]++; - } - else - { - sweeps[previd].nei = RC_NULL_NEI; - } - } - } - - srcReg[i] = previd; - } - } - - // Create unique ID. - for (int i = 1; i < rid; ++i) - { - if (sweeps[i].nei != RC_NULL_NEI && sweeps[i].nei != 0 && - prev[sweeps[i].nei] == (int)sweeps[i].ns) - { - sweeps[i].id = sweeps[i].nei; - } - else - { - sweeps[i].id = id++; - } - } - - // Remap IDs - for (int x = borderSize; x < w-borderSize; ++x) - { - const rcCompactCell& c = chf.cells[x+y*w]; - - for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) - { - if (srcReg[i] > 0 && srcReg[i] < rid) - srcReg[i] = sweeps[srcReg[i]].id; - } - } - } - - ctx->startTimer(RC_TIMER_BUILD_REGIONS_FILTER); - - // Filter out small regions. - chf.maxRegions = id; - if (!filterSmallRegions(ctx, minRegionArea, mergeRegionArea, chf.maxRegions, chf, srcReg)) - return false; - - ctx->stopTimer(RC_TIMER_BUILD_REGIONS_FILTER); - - // Store the result out. - for (int i = 0; i < chf.spanCount; ++i) - chf.spans[i].reg = srcReg[i]; - - ctx->stopTimer(RC_TIMER_BUILD_REGIONS); - - return true; -} - -bool rcBuildRegions(rcContext* ctx, rcCompactHeightfield& chf, - const int borderSize, const int minRegionArea, const int mergeRegionArea) -{ - rcAssert(ctx); - - ctx->startTimer(RC_TIMER_BUILD_REGIONS); - - const int w = chf.width; - const int h = chf.height; - - rcScopedDelete<unsigned short> buf = (unsigned short*)rcAlloc(sizeof(unsigned short)*chf.spanCount*4, RC_ALLOC_TEMP); - if (!buf) - { - ctx->log(RC_LOG_ERROR, "rcBuildRegions: Out of memory 'tmp' (%d).", chf.spanCount*4); - return false; - } - - ctx->startTimer(RC_TIMER_BUILD_REGIONS_WATERSHED); - - rcIntArray stack(1024); - rcIntArray visited(1024); - - unsigned short* srcReg = buf; - unsigned short* srcDist = buf+chf.spanCount; - unsigned short* dstReg = buf+chf.spanCount*2; - unsigned short* dstDist = buf+chf.spanCount*3; - - memset(srcReg, 0, sizeof(unsigned short)*chf.spanCount); - memset(srcDist, 0, sizeof(unsigned short)*chf.spanCount); - - unsigned short regionId = 1; - unsigned short level = (chf.maxDistance+1) & ~1; - - // TODO: Figure better formula, expandIters defines how much the - // watershed "overflows" and simplifies the regions. Tying it to - // agent radius was usually good indication how greedy it could be. -// const int expandIters = 4 + walkableRadius * 2; - const int expandIters = 8; - - // Mark border regions. - paintRectRegion(0, borderSize, 0, h, regionId|RC_BORDER_REG, chf, srcReg); regionId++; - paintRectRegion(w-borderSize, w, 0, h, regionId|RC_BORDER_REG, chf, srcReg); regionId++; - paintRectRegion(0, w, 0, borderSize, regionId|RC_BORDER_REG, chf, srcReg); regionId++; - paintRectRegion(0, w, h-borderSize, h, regionId|RC_BORDER_REG, chf, srcReg); regionId++; - - while (level > 0) - { - level = level >= 2 ? level-2 : 0; - - ctx->startTimer(RC_TIMER_BUILD_REGIONS_EXPAND); - - // Expand current regions until no empty connected cells found. - if (expandRegions(expandIters, level, chf, srcReg, srcDist, dstReg, dstDist, stack) != srcReg) - { - rcSwap(srcReg, dstReg); - rcSwap(srcDist, dstDist); - } - - ctx->stopTimer(RC_TIMER_BUILD_REGIONS_EXPAND); - - ctx->startTimer(RC_TIMER_BUILD_REGIONS_FLOOD); - - // Mark new regions with IDs. - for (int y = 0; y < h; ++y) - { - for (int x = 0; x < w; ++x) - { - const rcCompactCell& c = chf.cells[x+y*w]; - for (int i = (int)c.index, ni = (int)(c.index+c.count); i < ni; ++i) - { - if (chf.dist[i] < level || srcReg[i] != 0 || chf.areas[i] == RC_NULL_AREA) - continue; - - if (floodRegion(x, y, i, level, regionId, chf, srcReg, srcDist, stack)) - regionId++; - } - } - } - - ctx->stopTimer(RC_TIMER_BUILD_REGIONS_FLOOD); - - } - - // Expand current regions until no empty connected cells found. - if (expandRegions(expandIters*8, 0, chf, srcReg, srcDist, dstReg, dstDist, stack) != srcReg) - { - rcSwap(srcReg, dstReg); - rcSwap(srcDist, dstDist); - } - - ctx->stopTimer(RC_TIMER_BUILD_REGIONS_WATERSHED); - - ctx->startTimer(RC_TIMER_BUILD_REGIONS_FILTER); - - // Filter out small regions. - chf.maxRegions = regionId; - if (!filterSmallRegions(ctx, minRegionArea, mergeRegionArea, chf.maxRegions, chf, srcReg)) - return false; - - ctx->stopTimer(RC_TIMER_BUILD_REGIONS_FILTER); - - // Write the result out. - for (int i = 0; i < chf.spanCount; ++i) - chf.spans[i].reg = srcReg[i]; - - ctx->stopTimer(RC_TIMER_BUILD_REGIONS); - - return true; -} - - diff --git a/deps/recastnavigation/TODO.txt b/deps/recastnavigation/TODO.txt deleted file mode 100644 index b911c0e472..0000000000 --- a/deps/recastnavigation/TODO.txt +++ /dev/null @@ -1,20 +0,0 @@ -TODO/Roadmap - -Summer/Autumn 2009 - -- Off mesh links (jump links) -- Area annotations -- Embed extra data per polygon -- Height conforming navmesh - - -Autumn/Winter 2009/2010 - -- Detour path following -- More dynamic example with tile navmesh -- Faster small tile process - - -More info at http://digestingduck.blogspot.com/2009/07/recast-and-detour-roadmap.html - -- |