diff options
427 files changed, 34899 insertions, 16004 deletions
diff --git a/.travis.yml b/.travis.yml index d1fdbbc6d2e..2489bb209d4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -32,7 +32,7 @@ install: - mysql -uroot -e 'create database test_mysql;' - mkdir bin - cd bin - - cmake ../ -DWITH_WARNINGS=1 -DWITH_COREDEBUG=0 -DUSE_COREPCH=1 -DUSE_SCRIPTPCH=1 -DTOOLS=1 -DSCRIPTS=dynamic -DSERVERS=1 -DNOJEM=1 -DCMAKE_BUILD_TYPE=Debug -DCMAKE_C_FLAGS="-Werror" -DCMAKE_CXX_FLAGS="-Werror" -DCMAKE_C_FLAGS_DEBUG="-DNDEBUG" -DCMAKE_CXX_FLAGS_DEBUG="-DNDEBUG" -DCMAKE_INSTALL_PREFIX=check_install + - cmake ../ -DWITH_WARNINGS=1 -DWITH_COREDEBUG=0 -DUSE_COREPCH=1 -DUSE_SCRIPTPCH=1 -DTOOLS=1 -DSCRIPTS=dynamic -DSERVERS=1 -DNOJEM=0 -DCMAKE_BUILD_TYPE=Debug -DCMAKE_C_FLAGS="-Werror" -DCMAKE_CXX_FLAGS="-Werror" -DCMAKE_C_FLAGS_DEBUG="-DNDEBUG" -DCMAKE_CXX_FLAGS_DEBUG="-DNDEBUG" -DCMAKE_INSTALL_PREFIX=check_install - cd .. - chmod +x contrib/check_updates.sh diff --git a/cmake/showoptions.cmake b/cmake/showoptions.cmake index ebd9523876d..0809f5a0cf6 100644 --- a/cmake/showoptions.cmake +++ b/cmake/showoptions.cmake @@ -95,13 +95,6 @@ if ( NOJEM ) message(" *** jemalloc linking has been disabled!") message(" *** Please note that this is for DEBUGGING WITH VALGRIND only!") message(" *** DO NOT DISABLE IT UNLESS YOU KNOW WHAT YOU'RE DOING!") -elseif ( VALGRIND ) - message("") - message(" *** VALGRIND - WARNING!") - message(" *** jemalloc will be configured to support Valgrind") - message(" *** Please specify the valgrind include directory in VALGRIND_INCLUDE_DIR option if you get build errors") - message(" *** Please note that this is for DEBUGGING WITH VALGRIND only!") - add_definitions(-DJEMALLOC_VALGRIND) endif() if ( HELGRIND ) diff --git a/dep/PackageList.txt b/dep/PackageList.txt index 9ffc95d89e2..eff711cf974 100644 --- a/dep/PackageList.txt +++ b/dep/PackageList.txt @@ -26,7 +26,7 @@ G3D (a commercial-grade C++ 3D engine available as Open Source (BSD License) jemalloc (a general-purpose scalable concurrent malloc-implementation) http://www.canonware.com/jemalloc/ - Version: 3.6.0 + Version: 5.0.1 libMPQ (a library for reading MPQ files) https://github.com/mbroemme/libmpq/ @@ -57,5 +57,5 @@ gSOAP (a portable development toolkit for C and C++ XML Web services and XML dat Version: 2.8.49 recastnavigation (Recast is state of the art navigation mesh construction toolset for games) - https://github.com/memononen/recastnavigation - Version: 64385e9ed0822427bca5814d03a3f4c4d7a6db9f + https://github.com/recastnavigation/recastnavigation + Version: 2c85309280dbc9c82029e7ab16dfb01b9235c74e diff --git a/dep/jemalloc/CMakeLists.txt b/dep/jemalloc/CMakeLists.txt index 7c2e494c52a..6362714f2ee 100644 --- a/dep/jemalloc/CMakeLists.txt +++ b/dep/jemalloc/CMakeLists.txt @@ -10,45 +10,62 @@ if(CMAKE_SYSTEM_NAME MATCHES "Linux" AND NOT NOJEM) # We need to generate the jemalloc_def.h header based on platform-specific settings + CHECK_SYMBOL_EXISTS(MADV_FREE "sys/mman.h" HAVE_MADV_FREE) + if (PLATFORM EQUAL 32) set(JEM_SIZEDEF 2) set(JEM_TLSMODEL) + set(JEM_VADDRBITS 32) else() set(JEM_SIZEDEF 3) set(JEM_TLSMODEL "__attribute__\(\(tls_model\(\"initial-exec\"\)\)\)") + set(JEM_VADDRBITS 48) + endif() + + if (HAVE_MADV_FREE) + set(JEM_MADFREE_DEF "#define") + else() + set(JEM_MADFREE_DEF "#undef") endif() # Create the header, so we can use it configure_file( - "${CMAKE_SOURCE_DIR}/dep/jemalloc/jemalloc_defs.h.in.cmake" - "${BUILDDIR}/jemalloc_defs.h" + "${CMAKE_SOURCE_DIR}/dep/jemalloc/jemalloc_internal_defs.h.in.cmake" + "${BUILDDIR}/jemalloc_internal_defs.h" @ONLY ) # Done, let's continue set(jemalloc_STAT_SRC ${CMAKE_CURRENT_SOURCE_DIR}/src/arena.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/atomic.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/background_thread.c ${CMAKE_CURRENT_SOURCE_DIR}/src/base.c ${CMAKE_CURRENT_SOURCE_DIR}/src/bitmap.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/chunk.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/chunk_dss.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/chunk_mmap.c ${CMAKE_CURRENT_SOURCE_DIR}/src/ckh.c ${CMAKE_CURRENT_SOURCE_DIR}/src/ctl.c ${CMAKE_CURRENT_SOURCE_DIR}/src/extent.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/extent_dss.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/extent_mmap.c ${CMAKE_CURRENT_SOURCE_DIR}/src/hash.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/huge.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/hooks.c ${CMAKE_CURRENT_SOURCE_DIR}/src/jemalloc.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/mb.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/jemalloc_cpp.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/large.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/malloc_io.c ${CMAKE_CURRENT_SOURCE_DIR}/src/mutex.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/mutex_pool.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/nstime.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/pages.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/prng.c ${CMAKE_CURRENT_SOURCE_DIR}/src/prof.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/quarantine.c ${CMAKE_CURRENT_SOURCE_DIR}/src/rtree.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/spin.c ${CMAKE_CURRENT_SOURCE_DIR}/src/stats.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/sz.c ${CMAKE_CURRENT_SOURCE_DIR}/src/tcache.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/ticker.c ${CMAKE_CURRENT_SOURCE_DIR}/src/tsd.c - ${CMAKE_CURRENT_SOURCE_DIR}/src/util.c + ${CMAKE_CURRENT_SOURCE_DIR}/src/witness.c ) add_library(jemalloc STATIC ${jemalloc_STAT_SRC}) @@ -68,7 +85,8 @@ if(CMAKE_SYSTEM_NAME MATCHES "Linux" AND NOT NOJEM) target_link_libraries(jemalloc PUBLIC threads - valgrind) + valgrind + ${CMAKE_DL_LIBS}) set_target_properties(jemalloc PROPERTIES diff --git a/dep/jemalloc/COPYING b/dep/jemalloc/COPYING index bdda0feb9e5..e308632a813 100644 --- a/dep/jemalloc/COPYING +++ b/dep/jemalloc/COPYING @@ -1,10 +1,10 @@ Unless otherwise specified, files in the jemalloc source distribution are subject to the following license: -------------------------------------------------------------------------------- -Copyright (C) 2002-2014 Jason Evans <jasone@canonware.com>. +Copyright (C) 2002-2017 Jason Evans <jasone@canonware.com>. All rights reserved. Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. -Copyright (C) 2009-2014 Facebook, Inc. All rights reserved. +Copyright (C) 2009-2017 Facebook, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/dep/jemalloc/ChangeLog b/dep/jemalloc/ChangeLog index d56ee999e69..ee1b7ead399 100644 --- a/dep/jemalloc/ChangeLog +++ b/dep/jemalloc/ChangeLog @@ -1,10 +1,727 @@ Following are change highlights associated with official releases. Important -bug fixes are all mentioned, but internal enhancements are omitted here for -brevity (even though they are more fun to write about). Much more detail can be -found in the git revision history: +bug fixes are all mentioned, but some internal enhancements are omitted here for +brevity. Much more detail can be found in the git revision history: https://github.com/jemalloc/jemalloc +* 5.0.1 (July 1, 2017) + + This bugfix release fixes several issues, most of which are obscure enough + that typical applications are not impacted. + + Bug fixes: + - Update decay->nunpurged before purging, in order to avoid potential update + races and subsequent incorrect purging volume. (@interwq) + - Only abort on dlsym(3) error if the failure impacts an enabled feature (lazy + locking and/or background threads). This mitigates an initialization + failure bug for which we still do not have a clear reproduction test case. + (@interwq) + - Modify tsd management so that it neither crashes nor leaks if a thread's + only allocation activity is to call free() after TLS destructors have been + executed. This behavior was observed when operating with GNU libc, and is + unlikely to be an issue with other libc implementations. (@interwq) + - Mask signals during background thread creation. This prevents signals from + being inadvertently delivered to background threads. (@jasone, + @davidgoldblatt, @interwq) + - Avoid inactivity checks within background threads, in order to prevent + recursive mutex acquisition. (@interwq) + - Fix extent_grow_retained() to use the specified hooks when the + arena.<i>.extent_hooks mallctl is used to override the default hooks. + (@interwq) + - Add missing reentrancy support for custom extent hooks which allocate. + (@interwq) + - Post-fork(2), re-initialize the list of tcaches associated with each arena + to contain no tcaches except the forking thread's. (@interwq) + - Add missing post-fork(2) mutex reinitialization for extent_grow_mtx. This + fixes potential deadlocks after fork(2). (@interwq) + - Enforce minimum autoconf version (currently 2.68), since 2.63 is known to + generate corrupt configure scripts. (@jasone) + - Ensure that the configured page size (--with-lg-page) is no larger than the + configured huge page size (--with-lg-hugepage). (@jasone) + +* 5.0.0 (June 13, 2017) + + Unlike all previous jemalloc releases, this release does not use naturally + aligned "chunks" for virtual memory management, and instead uses page-aligned + "extents". This change has few externally visible effects, but the internal + impacts are... extensive. Many other internal changes combine to make this + the most cohesively designed version of jemalloc so far, with ample + opportunity for further enhancements. + + Continuous integration is now an integral aspect of development thanks to the + efforts of @davidtgoldblatt, and the dev branch tends to remain reasonably + stable on the tested platforms (Linux, FreeBSD, macOS, and Windows). As a + side effect the official release frequency may decrease over time. + + New features: + - Implement optional per-CPU arena support; threads choose which arena to use + based on current CPU rather than on fixed thread-->arena associations. + (@interwq) + - Implement two-phase decay of unused dirty pages. Pages transition from + dirty-->muzzy-->clean, where the first phase transition relies on + madvise(... MADV_FREE) semantics, and the second phase transition discards + pages such that they are replaced with demand-zeroed pages on next access. + (@jasone) + - Increase decay time resolution from seconds to milliseconds. (@jasone) + - Implement opt-in per CPU background threads, and use them for asynchronous + decay-driven unused dirty page purging. (@interwq) + - Add mutex profiling, which collects a variety of statistics useful for + diagnosing overhead/contention issues. (@interwq) + - Add C++ new/delete operator bindings. (@djwatson) + - Support manually created arena destruction, such that all data and metadata + are discarded. Add MALLCTL_ARENAS_DESTROYED for accessing merged stats + associated with destroyed arenas. (@jasone) + - Add MALLCTL_ARENAS_ALL as a fixed index for use in accessing + merged/destroyed arena statistics via mallctl. (@jasone) + - Add opt.abort_conf to optionally abort if invalid configuration options are + detected during initialization. (@interwq) + - Add opt.stats_print_opts, so that e.g. JSON output can be selected for the + stats dumped during exit if opt.stats_print is true. (@jasone) + - Add --with-version=VERSION for use when embedding jemalloc into another + project's git repository. (@jasone) + - Add --disable-thp to support cross compiling. (@jasone) + - Add --with-lg-hugepage to support cross compiling. (@jasone) + - Add mallctl interfaces (various authors): + + background_thread + + opt.abort_conf + + opt.retain + + opt.percpu_arena + + opt.background_thread + + opt.{dirty,muzzy}_decay_ms + + opt.stats_print_opts + + arena.<i>.initialized + + arena.<i>.destroy + + arena.<i>.{dirty,muzzy}_decay_ms + + arena.<i>.extent_hooks + + arenas.{dirty,muzzy}_decay_ms + + arenas.bin.<i>.slab_size + + arenas.nlextents + + arenas.lextent.<i>.size + + arenas.create + + stats.background_thread.{num_threads,num_runs,run_interval} + + stats.mutexes.{ctl,background_thread,prof,reset}. + {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds, + num_owner_switch} + + stats.arenas.<i>.{dirty,muzzy}_decay_ms + + stats.arenas.<i>.uptime + + stats.arenas.<i>.{pmuzzy,base,internal,resident} + + stats.arenas.<i>.{dirty,muzzy}_{npurge,nmadvise,purged} + + stats.arenas.<i>.bins.<j>.{nslabs,reslabs,curslabs} + + stats.arenas.<i>.bins.<j>.mutex. + {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds, + num_owner_switch} + + stats.arenas.<i>.lextents.<j>.{nmalloc,ndalloc,nrequests,curlextents} + + stats.arenas.i.mutexes.{large,extent_avail,extents_dirty,extents_muzzy, + extents_retained,decay_dirty,decay_muzzy,base,tcache_list}. + {num_ops,num_spin_acq,num_wait,max_wait_time,total_wait_time,max_num_thds, + num_owner_switch} + + Portability improvements: + - Improve reentrant allocation support, such that deadlock is less likely if + e.g. a system library call in turn allocates memory. (@davidtgoldblatt, + @interwq) + - Support static linking of jemalloc with glibc. (@djwatson) + + Optimizations and refactors: + - Organize virtual memory as "extents" of virtual memory pages, rather than as + naturally aligned "chunks", and store all metadata in arbitrarily distant + locations. This reduces virtual memory external fragmentation, and will + interact better with huge pages (not yet explicitly supported). (@jasone) + - Fold large and huge size classes together; only small and large size classes + remain. (@jasone) + - Unify the allocation paths, and merge most fast-path branching decisions. + (@davidtgoldblatt, @interwq) + - Embed per thread automatic tcache into thread-specific data, which reduces + conditional branches and dereferences. Also reorganize tcache to increase + fast-path data locality. (@interwq) + - Rewrite atomics to closely model the C11 API, convert various + synchronization from mutex-based to atomic, and use the explicit memory + ordering control to resolve various hypothetical races without increasing + synchronization overhead. (@davidtgoldblatt) + - Extensively optimize rtree via various methods: + + Add multiple layers of rtree lookup caching, since rtree lookups are now + part of fast-path deallocation. (@interwq) + + Determine rtree layout at compile time. (@jasone) + + Make the tree shallower for common configurations. (@jasone) + + Embed the root node in the top-level rtree data structure, thus avoiding + one level of indirection. (@jasone) + + Further specialize leaf elements as compared to internal node elements, + and directly embed extent metadata needed for fast-path deallocation. + (@jasone) + + Ignore leading always-zero address bits (architecture-specific). + (@jasone) + - Reorganize headers (ongoing work) to make them hermetic, and disentangle + various module dependencies. (@davidtgoldblatt) + - Convert various internal data structures such as size class metadata from + boot-time-initialized to compile-time-initialized. Propagate resulting data + structure simplifications, such as making arena metadata fixed-size. + (@jasone) + - Simplify size class lookups when constrained to size classes that are + multiples of the page size. This speeds lookups, but the primary benefit is + complexity reduction in code that was the source of numerous regressions. + (@jasone) + - Lock individual extents when possible for localized extent operations, + rather than relying on a top-level arena lock. (@davidtgoldblatt, @jasone) + - Use first fit layout policy instead of best fit, in order to improve + packing. (@jasone) + - If munmap(2) is not in use, use an exponential series to grow each arena's + virtual memory, so that the number of disjoint virtual memory mappings + remains low. (@jasone) + - Implement per arena base allocators, so that arenas never share any virtual + memory pages. (@jasone) + - Automatically generate private symbol name mangling macros. (@jasone) + + Incompatible changes: + - Replace chunk hooks with an expanded/normalized set of extent hooks. + (@jasone) + - Remove ratio-based purging. (@jasone) + - Remove --disable-tcache. (@jasone) + - Remove --disable-tls. (@jasone) + - Remove --enable-ivsalloc. (@jasone) + - Remove --with-lg-size-class-group. (@jasone) + - Remove --with-lg-tiny-min. (@jasone) + - Remove --disable-cc-silence. (@jasone) + - Remove --enable-code-coverage. (@jasone) + - Remove --disable-munmap (replaced by opt.retain). (@jasone) + - Remove Valgrind support. (@jasone) + - Remove quarantine support. (@jasone) + - Remove redzone support. (@jasone) + - Remove mallctl interfaces (various authors): + + config.munmap + + config.tcache + + config.tls + + config.valgrind + + opt.lg_chunk + + opt.purge + + opt.lg_dirty_mult + + opt.decay_time + + opt.quarantine + + opt.redzone + + opt.thp + + arena.<i>.lg_dirty_mult + + arena.<i>.decay_time + + arena.<i>.chunk_hooks + + arenas.initialized + + arenas.lg_dirty_mult + + arenas.decay_time + + arenas.bin.<i>.run_size + + arenas.nlruns + + arenas.lrun.<i>.size + + arenas.nhchunks + + arenas.hchunk.<i>.size + + arenas.extend + + stats.cactive + + stats.arenas.<i>.lg_dirty_mult + + stats.arenas.<i>.decay_time + + stats.arenas.<i>.metadata.{mapped,allocated} + + stats.arenas.<i>.{npurge,nmadvise,purged} + + stats.arenas.<i>.huge.{allocated,nmalloc,ndalloc,nrequests} + + stats.arenas.<i>.bins.<j>.{nruns,reruns,curruns} + + stats.arenas.<i>.lruns.<j>.{nmalloc,ndalloc,nrequests,curruns} + + stats.arenas.<i>.hchunks.<j>.{nmalloc,ndalloc,nrequests,curhchunks} + + Bug fixes: + - Improve interval-based profile dump triggering to dump only one profile when + a single allocation's size exceeds the interval. (@jasone) + - Use prefixed function names (as controlled by --with-jemalloc-prefix) when + pruning backtrace frames in jeprof. (@jasone) + +* 4.5.0 (February 28, 2017) + + This is the first release to benefit from much broader continuous integration + testing, thanks to @davidtgoldblatt. Had we had this testing infrastructure + in place for prior releases, it would have caught all of the most serious + regressions fixed by this release. + + New features: + - Add --disable-thp and the opt.thp mallctl to provide opt-out mechanisms for + transparent huge page integration. (@jasone) + - Update zone allocator integration to work with macOS 10.12. (@glandium) + - Restructure *CFLAGS configuration, so that CFLAGS behaves typically, and + EXTRA_CFLAGS provides a way to specify e.g. -Werror during building, but not + during configuration. (@jasone, @ronawho) + + Bug fixes: + - Fix DSS (sbrk(2)-based) allocation. This regression was first released in + 4.3.0. (@jasone) + - Handle race in per size class utilization computation. This functionality + was first released in 4.0.0. (@interwq) + - Fix lock order reversal during gdump. (@jasone) + - Fix/refactor tcache synchronization. This regression was first released in + 4.0.0. (@jasone) + - Fix various JSON-formatted malloc_stats_print() bugs. This functionality + was first released in 4.3.0. (@jasone) + - Fix huge-aligned allocation. This regression was first released in 4.4.0. + (@jasone) + - When transparent huge page integration is enabled, detect what state pages + start in according to the kernel's current operating mode, and only convert + arena chunks to non-huge during purging if that is not their initial state. + This functionality was first released in 4.4.0. (@jasone) + - Fix lg_chunk clamping for the --enable-cache-oblivious --disable-fill case. + This regression was first released in 4.0.0. (@jasone, @428desmo) + - Properly detect sparc64 when building for Linux. (@glaubitz) + +* 4.4.0 (December 3, 2016) + + New features: + - Add configure support for *-*-linux-android. (@cferris1000, @jasone) + - Add the --disable-syscall configure option, for use on systems that place + security-motivated limitations on syscall(2). (@jasone) + - Add support for Debian GNU/kFreeBSD. (@thesam) + + Optimizations: + - Add extent serial numbers and use them where appropriate as a sort key that + is higher priority than address, so that the allocation policy prefers older + extents. This tends to improve locality (decrease fragmentation) when + memory grows downward. (@jasone) + - Refactor madvise(2) configuration so that MADV_FREE is detected and utilized + on Linux 4.5 and newer. (@jasone) + - Mark partially purged arena chunks as non-huge-page. This improves + interaction with Linux's transparent huge page functionality. (@jasone) + + Bug fixes: + - Fix size class computations for edge conditions involving extremely large + allocations. This regression was first released in 4.0.0. (@jasone, + @ingvarha) + - Remove overly restrictive assertions related to the cactive statistic. This + regression was first released in 4.1.0. (@jasone) + - Implement a more reliable detection scheme for os_unfair_lock on macOS. + (@jszakmeister) + +* 4.3.1 (November 7, 2016) + + Bug fixes: + - Fix a severe virtual memory leak. This regression was first released in + 4.3.0. (@interwq, @jasone) + - Refactor atomic and prng APIs to restore support for 32-bit platforms that + use pre-C11 toolchains, e.g. FreeBSD's mips. (@jasone) + +* 4.3.0 (November 4, 2016) + + This is the first release that passes the test suite for multiple Windows + configurations, thanks in large part to @glandium setting up continuous + integration via AppVeyor (and Travis CI for Linux and OS X). + + New features: + - Add "J" (JSON) support to malloc_stats_print(). (@jasone) + - Add Cray compiler support. (@ronawho) + + Optimizations: + - Add/use adaptive spinning for bootstrapping and radix tree node + initialization. (@jasone) + + Bug fixes: + - Fix large allocation to search starting in the optimal size class heap, + which can substantially reduce virtual memory churn and fragmentation. This + regression was first released in 4.0.0. (@mjp41, @jasone) + - Fix stats.arenas.<i>.nthreads accounting. (@interwq) + - Fix and simplify decay-based purging. (@jasone) + - Make DSS (sbrk(2)-related) operations lockless, which resolves potential + deadlocks during thread exit. (@jasone) + - Fix over-sized allocation of radix tree leaf nodes. (@mjp41, @ogaun, + @jasone) + - Fix over-sized allocation of arena_t (plus associated stats) data + structures. (@jasone, @interwq) + - Fix EXTRA_CFLAGS to not affect configuration. (@jasone) + - Fix a Valgrind integration bug. (@ronawho) + - Disallow 0x5a junk filling when running in Valgrind. (@jasone) + - Fix a file descriptor leak on Linux. This regression was first released in + 4.2.0. (@vsarunas, @jasone) + - Fix static linking of jemalloc with glibc. (@djwatson) + - Use syscall(2) rather than {open,read,close}(2) during boot on Linux. This + works around other libraries' system call wrappers performing reentrant + allocation. (@kspinka, @Whissi, @jasone) + - Fix OS X default zone replacement to work with OS X 10.12. (@glandium, + @jasone) + - Fix cached memory management to avoid needless commit/decommit operations + during purging, which resolves permanent virtual memory map fragmentation + issues on Windows. (@mjp41, @jasone) + - Fix TSD fetches to avoid (recursive) allocation. This is relevant to + non-TLS and Windows configurations. (@jasone) + - Fix malloc_conf overriding to work on Windows. (@jasone) + - Forcibly disable lazy-lock on Windows (was forcibly *enabled*). (@jasone) + +* 4.2.1 (June 8, 2016) + + Bug fixes: + - Fix bootstrapping issues for configurations that require allocation during + tsd initialization (e.g. --disable-tls). (@cferris1000, @jasone) + - Fix gettimeofday() version of nstime_update(). (@ronawho) + - Fix Valgrind regressions in calloc() and chunk_alloc_wrapper(). (@ronawho) + - Fix potential VM map fragmentation regression. (@jasone) + - Fix opt_zero-triggered in-place huge reallocation zeroing. (@jasone) + - Fix heap profiling context leaks in reallocation edge cases. (@jasone) + +* 4.2.0 (May 12, 2016) + + New features: + - Add the arena.<i>.reset mallctl, which makes it possible to discard all of + an arena's allocations in a single operation. (@jasone) + - Add the stats.retained and stats.arenas.<i>.retained statistics. (@jasone) + - Add the --with-version configure option. (@jasone) + - Support --with-lg-page values larger than actual page size. (@jasone) + + Optimizations: + - Use pairing heaps rather than red-black trees for various hot data + structures. (@djwatson, @jasone) + - Streamline fast paths of rtree operations. (@jasone) + - Optimize the fast paths of calloc() and [m,d,sd]allocx(). (@jasone) + - Decommit unused virtual memory if the OS does not overcommit. (@jasone) + - Specify MAP_NORESERVE on Linux if [heuristic] overcommit is active, in order + to avoid unfortunate interactions during fork(2). (@jasone) + + Bug fixes: + - Fix chunk accounting related to triggering gdump profiles. (@jasone) + - Link against librt for clock_gettime(2) if glibc < 2.17. (@jasone) + - Scale leak report summary according to sampling probability. (@jasone) + +* 4.1.1 (May 3, 2016) + + This bugfix release resolves a variety of mostly minor issues, though the + bitmap fix is critical for 64-bit Windows. + + Bug fixes: + - Fix the linear scan version of bitmap_sfu() to shift by the proper amount + even when sizeof(long) is not the same as sizeof(void *), as on 64-bit + Windows. (@jasone) + - Fix hashing functions to avoid unaligned memory accesses (and resulting + crashes). This is relevant at least to some ARM-based platforms. + (@rkmisra) + - Fix fork()-related lock rank ordering reversals. These reversals were + unlikely to cause deadlocks in practice except when heap profiling was + enabled and active. (@jasone) + - Fix various chunk leaks in OOM code paths. (@jasone) + - Fix malloc_stats_print() to print opt.narenas correctly. (@jasone) + - Fix MSVC-specific build/test issues. (@rustyx, @yuslepukhin) + - Fix a variety of test failures that were due to test fragility rather than + core bugs. (@jasone) + +* 4.1.0 (February 28, 2016) + + This release is primarily about optimizations, but it also incorporates a lot + of portability-motivated refactoring and enhancements. Many people worked on + this release, to an extent that even with the omission here of minor changes + (see git revision history), and of the people who reported and diagnosed + issues, so much of the work was contributed that starting with this release, + changes are annotated with author credits to help reflect the collaborative + effort involved. + + New features: + - Implement decay-based unused dirty page purging, a major optimization with + mallctl API impact. This is an alternative to the existing ratio-based + unused dirty page purging, and is intended to eventually become the sole + purging mechanism. New mallctls: + + opt.purge + + opt.decay_time + + arena.<i>.decay + + arena.<i>.decay_time + + arenas.decay_time + + stats.arenas.<i>.decay_time + (@jasone, @cevans87) + - Add --with-malloc-conf, which makes it possible to embed a default + options string during configuration. This was motivated by the desire to + specify --with-malloc-conf=purge:decay , since the default must remain + purge:ratio until the 5.0.0 release. (@jasone) + - Add MS Visual Studio 2015 support. (@rustyx, @yuslepukhin) + - Make *allocx() size class overflow behavior defined. The maximum + size class is now less than PTRDIFF_MAX to protect applications against + numerical overflow, and all allocation functions are guaranteed to indicate + errors rather than potentially crashing if the request size exceeds the + maximum size class. (@jasone) + - jeprof: + + Add raw heap profile support. (@jasone) + + Add --retain and --exclude for backtrace symbol filtering. (@jasone) + + Optimizations: + - Optimize the fast path to combine various bootstrapping and configuration + checks and execute more streamlined code in the common case. (@interwq) + - Use linear scan for small bitmaps (used for small object tracking). In + addition to speeding up bitmap operations on 64-bit systems, this reduces + allocator metadata overhead by approximately 0.2%. (@djwatson) + - Separate arena_avail trees, which substantially speeds up run tree + operations. (@djwatson) + - Use memoization (boot-time-computed table) for run quantization. Separate + arena_avail trees reduced the importance of this optimization. (@jasone) + - Attempt mmap-based in-place huge reallocation. This can dramatically speed + up incremental huge reallocation. (@jasone) + + Incompatible changes: + - Make opt.narenas unsigned rather than size_t. (@jasone) + + Bug fixes: + - Fix stats.cactive accounting regression. (@rustyx, @jasone) + - Handle unaligned keys in hash(). This caused problems for some ARM systems. + (@jasone, @cferris1000) + - Refactor arenas array. In addition to fixing a fork-related deadlock, this + makes arena lookups faster and simpler. (@jasone) + - Move retained memory allocation out of the default chunk allocation + function, to a location that gets executed even if the application installs + a custom chunk allocation function. This resolves a virtual memory leak. + (@buchgr) + - Fix a potential tsd cleanup leak. (@cferris1000, @jasone) + - Fix run quantization. In practice this bug had no impact unless + applications requested memory with alignment exceeding one page. + (@jasone, @djwatson) + - Fix LinuxThreads-specific bootstrapping deadlock. (Cosmin Paraschiv) + - jeprof: + + Don't discard curl options if timeout is not defined. (@djwatson) + + Detect failed profile fetches. (@djwatson) + - Fix stats.arenas.<i>.{dss,lg_dirty_mult,decay_time,pactive,pdirty} for + --disable-stats case. (@jasone) + +* 4.0.4 (October 24, 2015) + + This bugfix release fixes another xallocx() regression. No other regressions + have come to light in over a month, so this is likely a good starting point + for people who prefer to wait for "dot one" releases with all the major issues + shaken out. + + Bug fixes: + - Fix xallocx(..., MALLOCX_ZERO to zero the last full trailing page of large + allocations that have been randomly assigned an offset of 0 when + --enable-cache-oblivious configure option is enabled. + +* 4.0.3 (September 24, 2015) + + This bugfix release continues the trend of xallocx() and heap profiling fixes. + + Bug fixes: + - Fix xallocx(..., MALLOCX_ZERO) to zero all trailing bytes of large + allocations when --enable-cache-oblivious configure option is enabled. + - Fix xallocx(..., MALLOCX_ZERO) to zero trailing bytes of huge allocations + when resizing from/to a size class that is not a multiple of the chunk size. + - Fix prof_tctx_dump_iter() to filter out nodes that were created after heap + profile dumping started. + - Work around a potentially bad thread-specific data initialization + interaction with NPTL (glibc's pthreads implementation). + +* 4.0.2 (September 21, 2015) + + This bugfix release addresses a few bugs specific to heap profiling. + + Bug fixes: + - Fix ixallocx_prof_sample() to never modify nor create sampled small + allocations. xallocx() is in general incapable of moving small allocations, + so this fix removes buggy code without loss of generality. + - Fix irallocx_prof_sample() to always allocate large regions, even when + alignment is non-zero. + - Fix prof_alloc_rollback() to read tdata from thread-specific data rather + than dereferencing a potentially invalid tctx. + +* 4.0.1 (September 15, 2015) + + This is a bugfix release that is somewhat high risk due to the amount of + refactoring required to address deep xallocx() problems. As a side effect of + these fixes, xallocx() now tries harder to partially fulfill requests for + optional extra space. Note that a couple of minor heap profiling + optimizations are included, but these are better thought of as performance + fixes that were integral to disovering most of the other bugs. + + Optimizations: + - Avoid a chunk metadata read in arena_prof_tctx_set(), since it is in the + fast path when heap profiling is enabled. Additionally, split a special + case out into arena_prof_tctx_reset(), which also avoids chunk metadata + reads. + - Optimize irallocx_prof() to optimistically update the sampler state. The + prior implementation appears to have been a holdover from when + rallocx()/xallocx() functionality was combined as rallocm(). + + Bug fixes: + - Fix TLS configuration such that it is enabled by default for platforms on + which it works correctly. + - Fix arenas_cache_cleanup() and arena_get_hard() to handle + allocation/deallocation within the application's thread-specific data + cleanup functions even after arenas_cache is torn down. + - Fix xallocx() bugs related to size+extra exceeding HUGE_MAXCLASS. + - Fix chunk purge hook calls for in-place huge shrinking reallocation to + specify the old chunk size rather than the new chunk size. This bug caused + no correctness issues for the default chunk purge function, but was + visible to custom functions set via the "arena.<i>.chunk_hooks" mallctl. + - Fix heap profiling bugs: + + Fix heap profiling to distinguish among otherwise identical sample sites + with interposed resets (triggered via the "prof.reset" mallctl). This bug + could cause data structure corruption that would most likely result in a + segfault. + + Fix irealloc_prof() to prof_alloc_rollback() on OOM. + + Make one call to prof_active_get_unlocked() per allocation event, and use + the result throughout the relevant functions that handle an allocation + event. Also add a missing check in prof_realloc(). These fixes protect + allocation events against concurrent prof_active changes. + + Fix ixallocx_prof() to pass usize_max and zero to ixallocx_prof_sample() + in the correct order. + + Fix prof_realloc() to call prof_free_sampled_object() after calling + prof_malloc_sample_object(). Prior to this fix, if tctx and old_tctx were + the same, the tctx could have been prematurely destroyed. + - Fix portability bugs: + + Don't bitshift by negative amounts when encoding/decoding run sizes in + chunk header maps. This affected systems with page sizes greater than 8 + KiB. + + Rename index_t to szind_t to avoid an existing type on Solaris. + + Add JEMALLOC_CXX_THROW to the memalign() function prototype, in order to + match glibc and avoid compilation errors when including both + jemalloc/jemalloc.h and malloc.h in C++ code. + + Don't assume that /bin/sh is appropriate when running size_classes.sh + during configuration. + + Consider __sparcv9 a synonym for __sparc64__ when defining LG_QUANTUM. + + Link tests to librt if it contains clock_gettime(2). + +* 4.0.0 (August 17, 2015) + + This version contains many speed and space optimizations, both minor and + major. The major themes are generalization, unification, and simplification. + Although many of these optimizations cause no visible behavior change, their + cumulative effect is substantial. + + New features: + - Normalize size class spacing to be consistent across the complete size + range. By default there are four size classes per size doubling, but this + is now configurable via the --with-lg-size-class-group option. Also add the + --with-lg-page, --with-lg-page-sizes, --with-lg-quantum, and + --with-lg-tiny-min options, which can be used to tweak page and size class + settings. Impacts: + + Worst case performance for incrementally growing/shrinking reallocation + is improved because there are far fewer size classes, and therefore + copying happens less often. + + Internal fragmentation is limited to 20% for all but the smallest size + classes (those less than four times the quantum). (1B + 4 KiB) + and (1B + 4 MiB) previously suffered nearly 50% internal fragmentation. + + Chunk fragmentation tends to be lower because there are fewer distinct run + sizes to pack. + - Add support for explicit tcaches. The "tcache.create", "tcache.flush", and + "tcache.destroy" mallctls control tcache lifetime and flushing, and the + MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to the *allocx() API + control which tcache is used for each operation. + - Implement per thread heap profiling, as well as the ability to + enable/disable heap profiling on a per thread basis. Add the "prof.reset", + "prof.lg_sample", "thread.prof.name", "thread.prof.active", + "opt.prof_thread_active_init", "prof.thread_active_init", and + "thread.prof.active" mallctls. + - Add support for per arena application-specified chunk allocators, configured + via the "arena.<i>.chunk_hooks" mallctl. + - Refactor huge allocation to be managed by arenas, so that arenas now + function as general purpose independent allocators. This is important in + the context of user-specified chunk allocators, aside from the scalability + benefits. Related new statistics: + + The "stats.arenas.<i>.huge.allocated", "stats.arenas.<i>.huge.nmalloc", + "stats.arenas.<i>.huge.ndalloc", and "stats.arenas.<i>.huge.nrequests" + mallctls provide high level per arena huge allocation statistics. + + The "arenas.nhchunks", "arenas.hchunk.<i>.size", + "stats.arenas.<i>.hchunks.<j>.nmalloc", + "stats.arenas.<i>.hchunks.<j>.ndalloc", + "stats.arenas.<i>.hchunks.<j>.nrequests", and + "stats.arenas.<i>.hchunks.<j>.curhchunks" mallctls provide per size class + statistics. + - Add the 'util' column to malloc_stats_print() output, which reports the + proportion of available regions that are currently in use for each small + size class. + - Add "alloc" and "free" modes for for junk filling (see the "opt.junk" + mallctl), so that it is possible to separately enable junk filling for + allocation versus deallocation. + - Add the jemalloc-config script, which provides information about how + jemalloc was configured, and how to integrate it into application builds. + - Add metadata statistics, which are accessible via the "stats.metadata", + "stats.arenas.<i>.metadata.mapped", and + "stats.arenas.<i>.metadata.allocated" mallctls. + - Add the "stats.resident" mallctl, which reports the upper limit of + physically resident memory mapped by the allocator. + - Add per arena control over unused dirty page purging, via the + "arenas.lg_dirty_mult", "arena.<i>.lg_dirty_mult", and + "stats.arenas.<i>.lg_dirty_mult" mallctls. + - Add the "prof.gdump" mallctl, which makes it possible to toggle the gdump + feature on/off during program execution. + - Add sdallocx(), which implements sized deallocation. The primary + optimization over dallocx() is the removal of a metadata read, which often + suffers an L1 cache miss. + - Add missing header includes in jemalloc/jemalloc.h, so that applications + only have to #include <jemalloc/jemalloc.h>. + - Add support for additional platforms: + + Bitrig + + Cygwin + + DragonFlyBSD + + iOS + + OpenBSD + + OpenRISC/or1k + + Optimizations: + - Maintain dirty runs in per arena LRUs rather than in per arena trees of + dirty-run-containing chunks. In practice this change significantly reduces + dirty page purging volume. + - Integrate whole chunks into the unused dirty page purging machinery. This + reduces the cost of repeated huge allocation/deallocation, because it + effectively introduces a cache of chunks. + - Split the arena chunk map into two separate arrays, in order to increase + cache locality for the frequently accessed bits. + - Move small run metadata out of runs, into arena chunk headers. This reduces + run fragmentation, smaller runs reduce external fragmentation for small size + classes, and packed (less uniformly aligned) metadata layout improves CPU + cache set distribution. + - Randomly distribute large allocation base pointer alignment relative to page + boundaries in order to more uniformly utilize CPU cache sets. This can be + disabled via the --disable-cache-oblivious configure option, and queried via + the "config.cache_oblivious" mallctl. + - Micro-optimize the fast paths for the public API functions. + - Refactor thread-specific data to reside in a single structure. This assures + that only a single TLS read is necessary per call into the public API. + - Implement in-place huge allocation growing and shrinking. + - Refactor rtree (radix tree for chunk lookups) to be lock-free, and make + additional optimizations that reduce maximum lookup depth to one or two + levels. This resolves what was a concurrency bottleneck for per arena huge + allocation, because a global data structure is critical for determining + which arenas own which huge allocations. + + Incompatible changes: + - Replace --enable-cc-silence with --disable-cc-silence to suppress spurious + warnings by default. + - Assure that the constness of malloc_usable_size()'s return type matches that + of the system implementation. + - Change the heap profile dump format to support per thread heap profiling, + rename pprof to jeprof, and enhance it with the --thread=<n> option. As a + result, the bundled jeprof must now be used rather than the upstream + (gperftools) pprof. + - Disable "opt.prof_final" by default, in order to avoid atexit(3), which can + internally deadlock on some platforms. + - Change the "arenas.nlruns" mallctl type from size_t to unsigned. + - Replace the "stats.arenas.<i>.bins.<j>.allocated" mallctl with + "stats.arenas.<i>.bins.<j>.curregs". + - Ignore MALLOC_CONF in set{uid,gid,cap} binaries. + - Ignore MALLOCX_ARENA(a) in dallocx(), in favor of using the + MALLOCX_TCACHE(tc) and MALLOCX_TCACHE_NONE flags to control tcache usage. + + Removed features: + - Remove the *allocm() API, which is superseded by the *allocx() API. + - Remove the --enable-dss options, and make dss non-optional on all platforms + which support sbrk(2). + - Remove the "arenas.purge" mallctl, which was obsoleted by the + "arena.<i>.purge" mallctl in 3.1.0. + - Remove the unnecessary "opt.valgrind" mallctl; jemalloc automatically + detects whether it is running inside Valgrind. + - Remove the "stats.huge.allocated", "stats.huge.nmalloc", and + "stats.huge.ndalloc" mallctls. + - Remove the --enable-mremap option. + - Remove the "stats.chunks.current", "stats.chunks.total", and + "stats.chunks.high" mallctls. + + Bug fixes: + - Fix the cactive statistic to decrease (rather than increase) when active + memory decreases. This regression was first released in 3.5.0. + - Fix OOM handling in memalign() and valloc(). A variant of this bug existed + in all releases since 2.0.0, which introduced these functions. + - Fix an OOM-related regression in arena_tcache_fill_small(), which could + cause cache corruption on OOM. This regression was present in all releases + from 2.2.0 through 3.6.0. + - Fix size class overflow handling for malloc(), posix_memalign(), memalign(), + calloc(), and realloc() when profiling is enabled. + - Fix the "arena.<i>.dss" mallctl to return an error if "primary" or + "secondary" precedence is specified, but sbrk(2) is not supported. + - Fix fallback lg_floor() implementations to handle extremely large inputs. + - Ensure the default purgeable zone is after the default zone on OS X. + - Fix latent bugs in atomic_*(). + - Fix the "arena.<i>.dss" mallctl to handle read-only calls. + - Fix tls_model configuration to enable the initial-exec model when possible. + - Mark malloc_conf as a weak symbol so that the application can override it. + - Correctly detect glibc's adaptive pthread mutexes. + - Fix the --without-export configure option. + * 3.6.0 (March 31, 2014) This version contains a critical bug fix for a regression present in 3.5.0 and @@ -21,7 +738,7 @@ found in the git revision history: backtracing to be reliable. - Use dss allocation precedence for huge allocations as well as small/large allocations. - - Fix test assertion failure message formatting. This bug did not manifect on + - Fix test assertion failure message formatting. This bug did not manifest on x86_64 systems because of implementation subtleties in va_list. - Fix inconsequential test failures for hash and SFMT code. @@ -516,7 +1233,7 @@ found in the git revision history: - Make it possible for the application to manually flush a thread's cache, via the "tcache.flush" mallctl. - Base maximum dirty page count on proportion of active memory. - - Compute various addtional run-time statistics, including per size class + - Compute various additional run-time statistics, including per size class statistics for large objects. - Expose malloc_stats_print(), which can be called repeatedly by the application. diff --git a/dep/jemalloc/README b/dep/jemalloc/README index 9b268f42288..3a6e0d27250 100644 --- a/dep/jemalloc/README +++ b/dep/jemalloc/README @@ -3,12 +3,12 @@ fragmentation avoidance and scalable concurrency support. jemalloc first came into use as the FreeBSD libc allocator in 2005, and since then it has found its way into numerous applications that rely on its predictable behavior. In 2010 jemalloc development efforts broadened to include developer support features -such as heap profiling, Valgrind integration, and extensive monitoring/tuning -hooks. Modern jemalloc releases continue to be integrated back into FreeBSD, -and therefore versatility remains critical. Ongoing development efforts trend -toward making jemalloc among the best allocators for a broad range of demanding -applications, and eliminating/mitigating weaknesses that have practical -repercussions for real world applications. +such as heap profiling and extensive monitoring/tuning hooks. Modern jemalloc +releases continue to be integrated back into FreeBSD, and therefore versatility +remains critical. Ongoing development efforts trend toward making jemalloc +among the best allocators for a broad range of demanding applications, and +eliminating/mitigating weaknesses that have practical repercussions for real +world applications. The COPYING file contains copyright and licensing information. @@ -17,4 +17,4 @@ jemalloc. The ChangeLog file contains a brief summary of changes for each release. -URL: http://www.canonware.com/jemalloc/ +URL: http://jemalloc.net/ diff --git a/dep/jemalloc/VERSION b/dep/jemalloc/VERSION deleted file mode 100644 index dace31ba7b6..00000000000 --- a/dep/jemalloc/VERSION +++ /dev/null @@ -1 +0,0 @@ -3.6.0-0-g46c0af68bd248b04df75e4f92d5fb804c3d75340 diff --git a/dep/jemalloc/include/jemalloc/internal/arena.h b/dep/jemalloc/include/jemalloc/internal/arena.h deleted file mode 100644 index 9d000c03dec..00000000000 --- a/dep/jemalloc/include/jemalloc/internal/arena.h +++ /dev/null @@ -1,1063 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* - * RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized - * as small as possible such that this setting is still honored, without - * violating other constraints. The goal is to make runs as small as possible - * without exceeding a per run external fragmentation threshold. - * - * We use binary fixed point math for overhead computations, where the binary - * point is implicitly RUN_BFP bits to the left. - * - * Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be - * honored for some/all object sizes, since when heap profiling is enabled - * there is one pointer of header overhead per object (plus a constant). This - * constraint is relaxed (ignored) for runs that are so small that the - * per-region overhead is greater than: - * - * (RUN_MAX_OVRHD / (reg_interval << (3+RUN_BFP)) - */ -#define RUN_BFP 12 -/* \/ Implicit binary fixed point. */ -#define RUN_MAX_OVRHD 0x0000003dU -#define RUN_MAX_OVRHD_RELAX 0x00001800U - -/* Maximum number of regions in one run. */ -#define LG_RUN_MAXREGS 11 -#define RUN_MAXREGS (1U << LG_RUN_MAXREGS) - -/* - * Minimum redzone size. Redzones may be larger than this if necessary to - * preserve region alignment. - */ -#define REDZONE_MINSIZE 16 - -/* - * The minimum ratio of active:dirty pages per arena is computed as: - * - * (nactive >> opt_lg_dirty_mult) >= ndirty - * - * So, supposing that opt_lg_dirty_mult is 3, there can be no less than 8 times - * as many active pages as dirty pages. - */ -#define LG_DIRTY_MULT_DEFAULT 3 - -typedef struct arena_chunk_map_s arena_chunk_map_t; -typedef struct arena_chunk_s arena_chunk_t; -typedef struct arena_run_s arena_run_t; -typedef struct arena_bin_info_s arena_bin_info_t; -typedef struct arena_bin_s arena_bin_t; -typedef struct arena_s arena_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -/* Each element of the chunk map corresponds to one page within the chunk. */ -struct arena_chunk_map_s { -#ifndef JEMALLOC_PROF - /* - * Overlay prof_ctx in order to allow it to be referenced by dead code. - * Such antics aren't warranted for per arena data structures, but - * chunk map overhead accounts for a percentage of memory, rather than - * being just a fixed cost. - */ - union { -#endif - union { - /* - * Linkage for run trees. There are two disjoint uses: - * - * 1) arena_t's runs_avail tree. - * 2) arena_run_t conceptually uses this linkage for in-use - * non-full runs, rather than directly embedding linkage. - */ - rb_node(arena_chunk_map_t) rb_link; - /* - * List of runs currently in purgatory. arena_chunk_purge() - * temporarily allocates runs that contain dirty pages while - * purging, so that other threads cannot use the runs while the - * purging thread is operating without the arena lock held. - */ - ql_elm(arena_chunk_map_t) ql_link; - } u; - - /* Profile counters, used for large object runs. */ - prof_ctx_t *prof_ctx; -#ifndef JEMALLOC_PROF - }; /* union { ... }; */ -#endif - - /* - * Run address (or size) and various flags are stored together. The bit - * layout looks like (assuming 32-bit system): - * - * ???????? ???????? ????nnnn nnnndula - * - * ? : Unallocated: Run address for first/last pages, unset for internal - * pages. - * Small: Run page offset. - * Large: Run size for first page, unset for trailing pages. - * n : binind for small size class, BININD_INVALID for large size class. - * d : dirty? - * u : unzeroed? - * l : large? - * a : allocated? - * - * Following are example bit patterns for the three types of runs. - * - * p : run page offset - * s : run size - * n : binind for size class; large objects set these to BININD_INVALID - * except for promoted allocations (see prof_promote) - * x : don't care - * - : 0 - * + : 1 - * [DULA] : bit set - * [dula] : bit unset - * - * Unallocated (clean): - * ssssssss ssssssss ssss++++ ++++du-a - * xxxxxxxx xxxxxxxx xxxxxxxx xxxx-Uxx - * ssssssss ssssssss ssss++++ ++++dU-a - * - * Unallocated (dirty): - * ssssssss ssssssss ssss++++ ++++D--a - * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx - * ssssssss ssssssss ssss++++ ++++D--a - * - * Small: - * pppppppp pppppppp ppppnnnn nnnnd--A - * pppppppp pppppppp ppppnnnn nnnn---A - * pppppppp pppppppp ppppnnnn nnnnd--A - * - * Large: - * ssssssss ssssssss ssss++++ ++++D-LA - * xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx - * -------- -------- ----++++ ++++D-LA - * - * Large (sampled, size <= PAGE): - * ssssssss ssssssss ssssnnnn nnnnD-LA - * - * Large (not sampled, size == PAGE): - * ssssssss ssssssss ssss++++ ++++D-LA - */ - size_t bits; -#define CHUNK_MAP_BININD_SHIFT 4 -#define BININD_INVALID ((size_t)0xffU) -/* CHUNK_MAP_BININD_MASK == (BININD_INVALID << CHUNK_MAP_BININD_SHIFT) */ -#define CHUNK_MAP_BININD_MASK ((size_t)0xff0U) -#define CHUNK_MAP_BININD_INVALID CHUNK_MAP_BININD_MASK -#define CHUNK_MAP_FLAGS_MASK ((size_t)0xcU) -#define CHUNK_MAP_DIRTY ((size_t)0x8U) -#define CHUNK_MAP_UNZEROED ((size_t)0x4U) -#define CHUNK_MAP_LARGE ((size_t)0x2U) -#define CHUNK_MAP_ALLOCATED ((size_t)0x1U) -#define CHUNK_MAP_KEY CHUNK_MAP_ALLOCATED -}; -typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t; -typedef rb_tree(arena_chunk_map_t) arena_run_tree_t; -typedef ql_head(arena_chunk_map_t) arena_chunk_mapelms_t; - -/* Arena chunk header. */ -struct arena_chunk_s { - /* Arena that owns the chunk. */ - arena_t *arena; - - /* Linkage for tree of arena chunks that contain dirty runs. */ - rb_node(arena_chunk_t) dirty_link; - - /* Number of dirty pages. */ - size_t ndirty; - - /* Number of available runs. */ - size_t nruns_avail; - - /* - * Number of available run adjacencies that purging could coalesce. - * Clean and dirty available runs are not coalesced, which causes - * virtual memory fragmentation. The ratio of - * (nruns_avail-nruns_adjac):nruns_adjac is used for tracking this - * fragmentation. - */ - size_t nruns_adjac; - - /* - * Map of pages within chunk that keeps track of free/large/small. The - * first map_bias entries are omitted, since the chunk header does not - * need to be tracked in the map. This omission saves a header page - * for common chunk sizes (e.g. 4 MiB). - */ - arena_chunk_map_t map[1]; /* Dynamically sized. */ -}; -typedef rb_tree(arena_chunk_t) arena_chunk_tree_t; - -struct arena_run_s { - /* Bin this run is associated with. */ - arena_bin_t *bin; - - /* Index of next region that has never been allocated, or nregs. */ - uint32_t nextind; - - /* Number of free regions in run. */ - unsigned nfree; -}; - -/* - * Read-only information associated with each element of arena_t's bins array - * is stored separately, partly to reduce memory usage (only one copy, rather - * than one per arena), but mainly to avoid false cacheline sharing. - * - * Each run has the following layout: - * - * /--------------------\ - * | arena_run_t header | - * | ... | - * bitmap_offset | bitmap | - * | ... | - * ctx0_offset | ctx map | - * | ... | - * |--------------------| - * | redzone | - * reg0_offset | region 0 | - * | redzone | - * |--------------------| \ - * | redzone | | - * | region 1 | > reg_interval - * | redzone | / - * |--------------------| - * | ... | - * | ... | - * | ... | - * |--------------------| - * | redzone | - * | region nregs-1 | - * | redzone | - * |--------------------| - * | alignment pad? | - * \--------------------/ - * - * reg_interval has at least the same minimum alignment as reg_size; this - * preserves the alignment constraint that sa2u() depends on. Alignment pad is - * either 0 or redzone_size; it is present only if needed to align reg0_offset. - */ -struct arena_bin_info_s { - /* Size of regions in a run for this bin's size class. */ - size_t reg_size; - - /* Redzone size. */ - size_t redzone_size; - - /* Interval between regions (reg_size + (redzone_size << 1)). */ - size_t reg_interval; - - /* Total size of a run for this bin's size class. */ - size_t run_size; - - /* Total number of regions in a run for this bin's size class. */ - uint32_t nregs; - - /* - * Offset of first bitmap_t element in a run header for this bin's size - * class. - */ - uint32_t bitmap_offset; - - /* - * Metadata used to manipulate bitmaps for runs associated with this - * bin. - */ - bitmap_info_t bitmap_info; - - /* - * Offset of first (prof_ctx_t *) in a run header for this bin's size - * class, or 0 if (config_prof == false || opt_prof == false). - */ - uint32_t ctx0_offset; - - /* Offset of first region in a run for this bin's size class. */ - uint32_t reg0_offset; -}; - -struct arena_bin_s { - /* - * All operations on runcur, runs, and stats require that lock be - * locked. Run allocation/deallocation are protected by the arena lock, - * which may be acquired while holding one or more bin locks, but not - * vise versa. - */ - malloc_mutex_t lock; - - /* - * Current run being used to service allocations of this bin's size - * class. - */ - arena_run_t *runcur; - - /* - * Tree of non-full runs. This tree is used when looking for an - * existing run when runcur is no longer usable. We choose the - * non-full run that is lowest in memory; this policy tends to keep - * objects packed well, and it can also help reduce the number of - * almost-empty chunks. - */ - arena_run_tree_t runs; - - /* Bin statistics. */ - malloc_bin_stats_t stats; -}; - -struct arena_s { - /* This arena's index within the arenas array. */ - unsigned ind; - - /* - * Number of threads currently assigned to this arena. This field is - * protected by arenas_lock. - */ - unsigned nthreads; - - /* - * There are three classes of arena operations from a locking - * perspective: - * 1) Thread asssignment (modifies nthreads) is protected by - * arenas_lock. - * 2) Bin-related operations are protected by bin locks. - * 3) Chunk- and run-related operations are protected by this mutex. - */ - malloc_mutex_t lock; - - arena_stats_t stats; - /* - * List of tcaches for extant threads associated with this arena. - * Stats from these are merged incrementally, and at exit. - */ - ql_head(tcache_t) tcache_ql; - - uint64_t prof_accumbytes; - - dss_prec_t dss_prec; - - /* Tree of dirty-page-containing chunks this arena manages. */ - arena_chunk_tree_t chunks_dirty; - - /* - * In order to avoid rapid chunk allocation/deallocation when an arena - * oscillates right on the cusp of needing a new chunk, cache the most - * recently freed chunk. The spare is left in the arena's chunk trees - * until it is deleted. - * - * There is one spare chunk per arena, rather than one spare total, in - * order to avoid interactions between multiple threads that could make - * a single spare inadequate. - */ - arena_chunk_t *spare; - - /* Number of pages in active runs. */ - size_t nactive; - - /* - * Current count of pages within unused runs that are potentially - * dirty, and for which madvise(... MADV_DONTNEED) has not been called. - * By tracking this, we can institute a limit on how much dirty unused - * memory is mapped for each arena. - */ - size_t ndirty; - - /* - * Approximate number of pages being purged. It is possible for - * multiple threads to purge dirty pages concurrently, and they use - * npurgatory to indicate the total number of pages all threads are - * attempting to purge. - */ - size_t npurgatory; - - /* - * Size/address-ordered trees of this arena's available runs. The trees - * are used for first-best-fit run allocation. - */ - arena_avail_tree_t runs_avail; - - /* bins is used to store trees of free regions. */ - arena_bin_t bins[NBINS]; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern ssize_t opt_lg_dirty_mult; -/* - * small_size2bin is a compact lookup table that rounds request sizes up to - * size classes. In order to reduce cache footprint, the table is compressed, - * and all accesses are via the SMALL_SIZE2BIN macro. - */ -extern uint8_t const small_size2bin[]; -#define SMALL_SIZE2BIN(s) (small_size2bin[(s-1) >> LG_TINY_MIN]) - -extern arena_bin_info_t arena_bin_info[NBINS]; - -/* Number of large size classes. */ -#define nlclasses (chunk_npages - map_bias) - -void arena_purge_all(arena_t *arena); -void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, - size_t binind, uint64_t prof_accumbytes); -void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, - bool zero); -#ifdef JEMALLOC_JET -typedef void (arena_redzone_corruption_t)(void *, size_t, bool, size_t, - uint8_t); -extern arena_redzone_corruption_t *arena_redzone_corruption; -typedef void (arena_dalloc_junk_small_t)(void *, arena_bin_info_t *); -extern arena_dalloc_junk_small_t *arena_dalloc_junk_small; -#else -void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info); -#endif -void arena_quarantine_junk_small(void *ptr, size_t usize); -void *arena_malloc_small(arena_t *arena, size_t size, bool zero); -void *arena_malloc_large(arena_t *arena, size_t size, bool zero); -void *arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero); -void arena_prof_promoted(const void *ptr, size_t size); -void arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, - arena_chunk_map_t *mapelm); -void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind, arena_chunk_map_t *mapelm); -void arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind); -#ifdef JEMALLOC_JET -typedef void (arena_dalloc_junk_large_t)(void *, size_t); -extern arena_dalloc_junk_large_t *arena_dalloc_junk_large; -#endif -void arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, - void *ptr); -void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr); -#ifdef JEMALLOC_JET -typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t); -extern arena_ralloc_junk_large_t *arena_ralloc_junk_large; -#endif -bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, - size_t extra, bool zero); -void *arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, - size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, - bool try_tcache_dalloc); -dss_prec_t arena_dss_prec_get(arena_t *arena); -void arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec); -void arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive, - size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, - malloc_large_stats_t *lstats); -bool arena_new(arena_t *arena, unsigned ind); -void arena_boot(void); -void arena_prefork(arena_t *arena); -void arena_postfork_parent(arena_t *arena); -void arena_postfork_child(arena_t *arena); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -arena_chunk_map_t *arena_mapp_get(arena_chunk_t *chunk, size_t pageind); -size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbitsp_read(size_t *mapbitsp); -size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, - size_t pageind); -size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind); -size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind); -void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits); -void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, - size_t size, size_t flags); -void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, - size_t size); -void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, - size_t size, size_t flags); -void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, - size_t binind); -void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, - size_t runind, size_t binind, size_t flags); -void arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind, - size_t unzeroed); -bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes); -bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes); -bool arena_prof_accum(arena_t *arena, uint64_t accumbytes); -size_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits); -size_t arena_bin_index(arena_t *arena, arena_bin_t *bin); -unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, - const void *ptr); -prof_ctx_t *arena_prof_ctx_get(const void *ptr); -void arena_prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx); -void *arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache); -size_t arena_salloc(const void *ptr, bool demote); -void arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, - bool try_tcache); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_)) -# ifdef JEMALLOC_ARENA_INLINE_A -JEMALLOC_ALWAYS_INLINE arena_chunk_map_t * -arena_mapp_get(arena_chunk_t *chunk, size_t pageind) -{ - - assert(pageind >= map_bias); - assert(pageind < chunk_npages); - - return (&chunk->map[pageind-map_bias]); -} - -JEMALLOC_ALWAYS_INLINE size_t * -arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind) -{ - - return (&arena_mapp_get(chunk, pageind)->bits); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbitsp_read(size_t *mapbitsp) -{ - - return (*mapbitsp); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_get(arena_chunk_t *chunk, size_t pageind) -{ - - return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind))); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); - return (mapbits & ~PAGE_MASK); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == - (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)); - return (mapbits & ~PAGE_MASK); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == - CHUNK_MAP_ALLOCATED); - return (mapbits >> LG_PAGE); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - size_t binind; - - mapbits = arena_mapbits_get(chunk, pageind); - binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; - assert(binind < NBINS || binind == BININD_INVALID); - return (binind); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - return (mapbits & CHUNK_MAP_DIRTY); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - return (mapbits & CHUNK_MAP_UNZEROED); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - return (mapbits & CHUNK_MAP_LARGE); -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind) -{ - size_t mapbits; - - mapbits = arena_mapbits_get(chunk, pageind); - return (mapbits & CHUNK_MAP_ALLOCATED); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits) -{ - - *mapbitsp = mapbits; -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size, - size_t flags) -{ - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); - - assert((size & PAGE_MASK) == 0); - assert((flags & ~CHUNK_MAP_FLAGS_MASK) == 0); - assert((flags & (CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == flags); - arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind, - size_t size) -{ - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); - size_t mapbits = arena_mapbitsp_read(mapbitsp); - - assert((size & PAGE_MASK) == 0); - assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0); - arena_mapbitsp_write(mapbitsp, size | (mapbits & PAGE_MASK)); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size, - size_t flags) -{ - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); - size_t mapbits = arena_mapbitsp_read(mapbitsp); - size_t unzeroed; - - assert((size & PAGE_MASK) == 0); - assert((flags & CHUNK_MAP_DIRTY) == flags); - unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */ - arena_mapbitsp_write(mapbitsp, size | CHUNK_MAP_BININD_INVALID | flags - | unzeroed | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind, - size_t binind) -{ - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); - size_t mapbits = arena_mapbitsp_read(mapbitsp); - - assert(binind <= BININD_INVALID); - assert(arena_mapbits_large_size_get(chunk, pageind) == PAGE); - arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_BININD_MASK) | - (binind << CHUNK_MAP_BININD_SHIFT)); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind, - size_t binind, size_t flags) -{ - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); - size_t mapbits = arena_mapbitsp_read(mapbitsp); - size_t unzeroed; - - assert(binind < BININD_INVALID); - assert(pageind - runind >= map_bias); - assert((flags & CHUNK_MAP_DIRTY) == flags); - unzeroed = mapbits & CHUNK_MAP_UNZEROED; /* Preserve unzeroed. */ - arena_mapbitsp_write(mapbitsp, (runind << LG_PAGE) | (binind << - CHUNK_MAP_BININD_SHIFT) | flags | unzeroed | CHUNK_MAP_ALLOCATED); -} - -JEMALLOC_ALWAYS_INLINE void -arena_mapbits_unzeroed_set(arena_chunk_t *chunk, size_t pageind, - size_t unzeroed) -{ - size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind); - size_t mapbits = arena_mapbitsp_read(mapbitsp); - - arena_mapbitsp_write(mapbitsp, (mapbits & ~CHUNK_MAP_UNZEROED) | - unzeroed); -} - -JEMALLOC_INLINE bool -arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes) -{ - - cassert(config_prof); - assert(prof_interval != 0); - - arena->prof_accumbytes += accumbytes; - if (arena->prof_accumbytes >= prof_interval) { - arena->prof_accumbytes -= prof_interval; - return (true); - } - return (false); -} - -JEMALLOC_INLINE bool -arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes) -{ - - cassert(config_prof); - - if (prof_interval == 0) - return (false); - return (arena_prof_accum_impl(arena, accumbytes)); -} - -JEMALLOC_INLINE bool -arena_prof_accum(arena_t *arena, uint64_t accumbytes) -{ - - cassert(config_prof); - - if (prof_interval == 0) - return (false); - - { - bool ret; - - malloc_mutex_lock(&arena->lock); - ret = arena_prof_accum_impl(arena, accumbytes); - malloc_mutex_unlock(&arena->lock); - return (ret); - } -} - -JEMALLOC_ALWAYS_INLINE size_t -arena_ptr_small_binind_get(const void *ptr, size_t mapbits) -{ - size_t binind; - - binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT; - - if (config_debug) { - arena_chunk_t *chunk; - arena_t *arena; - size_t pageind; - size_t actual_mapbits; - arena_run_t *run; - arena_bin_t *bin; - size_t actual_binind; - arena_bin_info_t *bin_info; - - assert(binind != BININD_INVALID); - assert(binind < NBINS); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - arena = chunk->arena; - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - actual_mapbits = arena_mapbits_get(chunk, pageind); - assert(mapbits == actual_mapbits); - assert(arena_mapbits_large_get(chunk, pageind) == 0); - assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - - (actual_mapbits >> LG_PAGE)) << LG_PAGE)); - bin = run->bin; - actual_binind = bin - arena->bins; - assert(binind == actual_binind); - bin_info = &arena_bin_info[actual_binind]; - assert(((uintptr_t)ptr - ((uintptr_t)run + - (uintptr_t)bin_info->reg0_offset)) % bin_info->reg_interval - == 0); - } - - return (binind); -} -# endif /* JEMALLOC_ARENA_INLINE_A */ - -# ifdef JEMALLOC_ARENA_INLINE_B -JEMALLOC_INLINE size_t -arena_bin_index(arena_t *arena, arena_bin_t *bin) -{ - size_t binind = bin - arena->bins; - assert(binind < NBINS); - return (binind); -} - -JEMALLOC_INLINE unsigned -arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr) -{ - unsigned shift, diff, regind; - size_t interval; - - /* - * Freeing a pointer lower than region zero can cause assertion - * failure. - */ - assert((uintptr_t)ptr >= (uintptr_t)run + - (uintptr_t)bin_info->reg0_offset); - - /* - * Avoid doing division with a variable divisor if possible. Using - * actual division here can reduce allocator throughput by over 20%! - */ - diff = (unsigned)((uintptr_t)ptr - (uintptr_t)run - - bin_info->reg0_offset); - - /* Rescale (factor powers of 2 out of the numerator and denominator). */ - interval = bin_info->reg_interval; - shift = ffs(interval) - 1; - diff >>= shift; - interval >>= shift; - - if (interval == 1) { - /* The divisor was a power of 2. */ - regind = diff; - } else { - /* - * To divide by a number D that is not a power of two we - * multiply by (2^21 / D) and then right shift by 21 positions. - * - * X / D - * - * becomes - * - * (X * interval_invs[D - 3]) >> SIZE_INV_SHIFT - * - * We can omit the first three elements, because we never - * divide by 0, and 1 and 2 are both powers of two, which are - * handled above. - */ -#define SIZE_INV_SHIFT ((sizeof(unsigned) << 3) - LG_RUN_MAXREGS) -#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s)) + 1) - static const unsigned interval_invs[] = { - SIZE_INV(3), - SIZE_INV(4), SIZE_INV(5), SIZE_INV(6), SIZE_INV(7), - SIZE_INV(8), SIZE_INV(9), SIZE_INV(10), SIZE_INV(11), - SIZE_INV(12), SIZE_INV(13), SIZE_INV(14), SIZE_INV(15), - SIZE_INV(16), SIZE_INV(17), SIZE_INV(18), SIZE_INV(19), - SIZE_INV(20), SIZE_INV(21), SIZE_INV(22), SIZE_INV(23), - SIZE_INV(24), SIZE_INV(25), SIZE_INV(26), SIZE_INV(27), - SIZE_INV(28), SIZE_INV(29), SIZE_INV(30), SIZE_INV(31) - }; - - if (interval <= ((sizeof(interval_invs) / sizeof(unsigned)) + - 2)) { - regind = (diff * interval_invs[interval - 3]) >> - SIZE_INV_SHIFT; - } else - regind = diff / interval; -#undef SIZE_INV -#undef SIZE_INV_SHIFT - } - assert(diff == regind * interval); - assert(regind < bin_info->nregs); - - return (regind); -} - -JEMALLOC_INLINE prof_ctx_t * -arena_prof_ctx_get(const void *ptr) -{ - prof_ctx_t *ret; - arena_chunk_t *chunk; - size_t pageind, mapbits; - - cassert(config_prof); - assert(ptr != NULL); - assert(CHUNK_ADDR2BASE(ptr) != ptr); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - mapbits = arena_mapbits_get(chunk, pageind); - assert((mapbits & CHUNK_MAP_ALLOCATED) != 0); - if ((mapbits & CHUNK_MAP_LARGE) == 0) { - if (prof_promote) - ret = (prof_ctx_t *)(uintptr_t)1U; - else { - arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + - (uintptr_t)((pageind - (mapbits >> LG_PAGE)) << - LG_PAGE)); - size_t binind = arena_ptr_small_binind_get(ptr, - mapbits); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; - unsigned regind; - - regind = arena_run_regind(run, bin_info, ptr); - ret = *(prof_ctx_t **)((uintptr_t)run + - bin_info->ctx0_offset + (regind * - sizeof(prof_ctx_t *))); - } - } else - ret = arena_mapp_get(chunk, pageind)->prof_ctx; - - return (ret); -} - -JEMALLOC_INLINE void -arena_prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx) -{ - arena_chunk_t *chunk; - size_t pageind; - - cassert(config_prof); - assert(ptr != NULL); - assert(CHUNK_ADDR2BASE(ptr) != ptr); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - - if (usize > SMALL_MAXCLASS || (prof_promote && - ((uintptr_t)ctx != (uintptr_t)1U || arena_mapbits_large_get(chunk, - pageind) != 0))) { - assert(arena_mapbits_large_get(chunk, pageind) != 0); - arena_mapp_get(chunk, pageind)->prof_ctx = ctx; - } else { - assert(arena_mapbits_large_get(chunk, pageind) == 0); - if (prof_promote == false) { - size_t mapbits = arena_mapbits_get(chunk, pageind); - arena_run_t *run = (arena_run_t *)((uintptr_t)chunk + - (uintptr_t)((pageind - (mapbits >> LG_PAGE)) << - LG_PAGE)); - size_t binind; - arena_bin_info_t *bin_info; - unsigned regind; - - binind = arena_ptr_small_binind_get(ptr, mapbits); - bin_info = &arena_bin_info[binind]; - regind = arena_run_regind(run, bin_info, ptr); - - *((prof_ctx_t **)((uintptr_t)run + - bin_info->ctx0_offset + (regind * sizeof(prof_ctx_t - *)))) = ctx; - } - } -} - -JEMALLOC_ALWAYS_INLINE void * -arena_malloc(arena_t *arena, size_t size, bool zero, bool try_tcache) -{ - tcache_t *tcache; - - assert(size != 0); - assert(size <= arena_maxclass); - - if (size <= SMALL_MAXCLASS) { - if (try_tcache && (tcache = tcache_get(true)) != NULL) - return (tcache_alloc_small(tcache, size, zero)); - else { - return (arena_malloc_small(choose_arena(arena), size, - zero)); - } - } else { - /* - * Initialize tcache after checking size in order to avoid - * infinite recursion during tcache initialization. - */ - if (try_tcache && size <= tcache_maxclass && (tcache = - tcache_get(true)) != NULL) - return (tcache_alloc_large(tcache, size, zero)); - else { - return (arena_malloc_large(choose_arena(arena), size, - zero)); - } - } -} - -/* Return the size of the allocation pointed to by ptr. */ -JEMALLOC_ALWAYS_INLINE size_t -arena_salloc(const void *ptr, bool demote) -{ - size_t ret; - arena_chunk_t *chunk; - size_t pageind, binind; - - assert(ptr != NULL); - assert(CHUNK_ADDR2BASE(ptr) != ptr); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - binind = arena_mapbits_binind_get(chunk, pageind); - if (binind == BININD_INVALID || (config_prof && demote == false && - prof_promote && arena_mapbits_large_get(chunk, pageind) != 0)) { - /* - * Large allocation. In the common case (demote == true), and - * as this is an inline function, most callers will only end up - * looking at binind to determine that ptr is a small - * allocation. - */ - assert(((uintptr_t)ptr & PAGE_MASK) == 0); - ret = arena_mapbits_large_size_get(chunk, pageind); - assert(ret != 0); - assert(pageind + (ret>>LG_PAGE) <= chunk_npages); - assert(ret == PAGE || arena_mapbits_large_size_get(chunk, - pageind+(ret>>LG_PAGE)-1) == 0); - assert(binind == arena_mapbits_binind_get(chunk, - pageind+(ret>>LG_PAGE)-1)); - assert(arena_mapbits_dirty_get(chunk, pageind) == - arena_mapbits_dirty_get(chunk, pageind+(ret>>LG_PAGE)-1)); - } else { - /* - * Small allocation (possibly promoted to a large object due to - * prof_promote). - */ - assert(arena_mapbits_large_get(chunk, pageind) != 0 || - arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, - pageind)) == binind); - ret = arena_bin_info[binind].reg_size; - } - - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void -arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool try_tcache) -{ - size_t pageind, mapbits; - tcache_t *tcache; - - assert(arena != NULL); - assert(chunk->arena == arena); - assert(ptr != NULL); - assert(CHUNK_ADDR2BASE(ptr) != ptr); - - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - mapbits = arena_mapbits_get(chunk, pageind); - assert(arena_mapbits_allocated_get(chunk, pageind) != 0); - if ((mapbits & CHUNK_MAP_LARGE) == 0) { - /* Small allocation. */ - if (try_tcache && (tcache = tcache_get(false)) != NULL) { - size_t binind; - - binind = arena_ptr_small_binind_get(ptr, mapbits); - tcache_dalloc_small(tcache, ptr, binind); - } else - arena_dalloc_small(arena, chunk, ptr, pageind); - } else { - size_t size = arena_mapbits_large_size_get(chunk, pageind); - - assert(((uintptr_t)ptr & PAGE_MASK) == 0); - - if (try_tcache && size <= tcache_maxclass && (tcache = - tcache_get(false)) != NULL) { - tcache_dalloc_large(tcache, ptr, size); - } else - arena_dalloc_large(arena, chunk, ptr); - } -} -# endif /* JEMALLOC_ARENA_INLINE_B */ -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/dep/jemalloc/include/jemalloc/internal/arena_externs.h b/dep/jemalloc/include/jemalloc/internal/arena_externs.h new file mode 100644 index 00000000000..af16d158852 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/arena_externs.h @@ -0,0 +1,97 @@ +#ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H +#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H + +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/stats.h" + +extern ssize_t opt_dirty_decay_ms; +extern ssize_t opt_muzzy_decay_ms; + +extern const arena_bin_info_t arena_bin_info[NBINS]; + +extern percpu_arena_mode_t opt_percpu_arena; +extern const char *percpu_arena_mode_names[]; + +extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS]; +extern malloc_mutex_t arenas_lock; + +void arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats, + szind_t szind, uint64_t nrequests); +void arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, + size_t size); +void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, + unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms, + ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy); +void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, + const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, + size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, + malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats); +void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent); +#ifdef JEMALLOC_JET +size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr); +#endif +extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, + size_t usize, size_t alignment, bool *zero); +void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, + extent_t *extent); +void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, + extent_t *extent, size_t oldsize); +void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, + extent_t *extent, size_t oldsize); +ssize_t arena_dirty_decay_ms_get(arena_t *arena); +bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms); +ssize_t arena_muzzy_decay_ms_get(arena_t *arena); +bool arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms); +void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, + bool all); +void arena_reset(tsd_t *tsd, arena_t *arena); +void arena_destroy(tsd_t *tsd, arena_t *arena); +void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, + tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes); +void arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info, + bool zero); + +typedef void (arena_dalloc_junk_small_t)(void *, const arena_bin_info_t *); +extern arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small; + +void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, + szind_t ind, bool zero); +void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool zero, tcache_t *tcache); +void arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize); +void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, + bool slow_path); +void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, + extent_t *extent, void *ptr); +void arena_dalloc_small(tsdn_t *tsdn, void *ptr); +bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, + size_t extra, bool zero); +void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, + size_t size, size_t alignment, bool zero, tcache_t *tcache); +dss_prec_t arena_dss_prec_get(arena_t *arena); +bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec); +ssize_t arena_dirty_decay_ms_default_get(void); +bool arena_dirty_decay_ms_default_set(ssize_t decay_ms); +ssize_t arena_muzzy_decay_ms_default_get(void); +bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms); +unsigned arena_nthreads_get(arena_t *arena, bool internal); +void arena_nthreads_inc(arena_t *arena, bool internal); +void arena_nthreads_dec(arena_t *arena, bool internal); +size_t arena_extent_sn_next(arena_t *arena); +arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); +void arena_boot(void); +void arena_prefork0(tsdn_t *tsdn, arena_t *arena); +void arena_prefork1(tsdn_t *tsdn, arena_t *arena); +void arena_prefork2(tsdn_t *tsdn, arena_t *arena); +void arena_prefork3(tsdn_t *tsdn, arena_t *arena); +void arena_prefork4(tsdn_t *tsdn, arena_t *arena); +void arena_prefork5(tsdn_t *tsdn, arena_t *arena); +void arena_prefork6(tsdn_t *tsdn, arena_t *arena); +void arena_prefork7(tsdn_t *tsdn, arena_t *arena); +void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena); +void arena_postfork_child(tsdn_t *tsdn, arena_t *arena); + +#endif /* JEMALLOC_INTERNAL_ARENA_EXTERNS_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/arena_inlines_a.h b/dep/jemalloc/include/jemalloc/internal/arena_inlines_a.h new file mode 100644 index 00000000000..da5877060a8 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/arena_inlines_a.h @@ -0,0 +1,57 @@ +#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_A_H +#define JEMALLOC_INTERNAL_ARENA_INLINES_A_H + +static inline unsigned +arena_ind_get(const arena_t *arena) { + return base_ind_get(arena->base); +} + +static inline void +arena_internal_add(arena_t *arena, size_t size) { + atomic_fetch_add_zu(&arena->stats.internal, size, ATOMIC_RELAXED); +} + +static inline void +arena_internal_sub(arena_t *arena, size_t size) { + atomic_fetch_sub_zu(&arena->stats.internal, size, ATOMIC_RELAXED); +} + +static inline size_t +arena_internal_get(arena_t *arena) { + return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED); +} + +static inline bool +arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) { + cassert(config_prof); + + if (likely(prof_interval == 0)) { + return false; + } + + return prof_accum_add(tsdn, &arena->prof_accum, accumbytes); +} + +static inline void +percpu_arena_update(tsd_t *tsd, unsigned cpu) { + assert(have_percpu_arena); + arena_t *oldarena = tsd_arena_get(tsd); + assert(oldarena != NULL); + unsigned oldind = arena_ind_get(oldarena); + + if (oldind != cpu) { + unsigned newind = cpu; + arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true); + assert(newarena != NULL); + + /* Set new arena/tcache associations. */ + arena_migrate(tsd, oldind, newind); + tcache_t *tcache = tcache_get(tsd); + if (tcache != NULL) { + tcache_arena_reassociate(tsd_tsdn(tsd), tcache, + newarena); + } + } +} + +#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/arena_inlines_b.h b/dep/jemalloc/include/jemalloc/internal/arena_inlines_b.h new file mode 100644 index 00000000000..003abe116fb --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/arena_inlines_b.h @@ -0,0 +1,361 @@ +#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H +#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H + +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/sz.h" +#include "jemalloc/internal/ticker.h" + +static inline szind_t +arena_bin_index(arena_t *arena, arena_bin_t *bin) { + szind_t binind = (szind_t)(bin - arena->bins); + assert(binind < NBINS); + return binind; +} + +JEMALLOC_ALWAYS_INLINE prof_tctx_t * +arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) { + cassert(config_prof); + assert(ptr != NULL); + + /* Static check. */ + if (alloc_ctx == NULL) { + const extent_t *extent = iealloc(tsdn, ptr); + if (unlikely(!extent_slab_get(extent))) { + return large_prof_tctx_get(tsdn, extent); + } + } else { + if (unlikely(!alloc_ctx->slab)) { + return large_prof_tctx_get(tsdn, iealloc(tsdn, ptr)); + } + } + return (prof_tctx_t *)(uintptr_t)1U; +} + +JEMALLOC_ALWAYS_INLINE void +arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, + alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) { + cassert(config_prof); + assert(ptr != NULL); + + /* Static check. */ + if (alloc_ctx == NULL) { + extent_t *extent = iealloc(tsdn, ptr); + if (unlikely(!extent_slab_get(extent))) { + large_prof_tctx_set(tsdn, extent, tctx); + } + } else { + if (unlikely(!alloc_ctx->slab)) { + large_prof_tctx_set(tsdn, iealloc(tsdn, ptr), tctx); + } + } +} + +static inline void +arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) { + cassert(config_prof); + assert(ptr != NULL); + + extent_t *extent = iealloc(tsdn, ptr); + assert(!extent_slab_get(extent)); + + large_prof_tctx_reset(tsdn, extent); +} + +JEMALLOC_ALWAYS_INLINE void +arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) { + tsd_t *tsd; + ticker_t *decay_ticker; + + if (unlikely(tsdn_null(tsdn))) { + return; + } + tsd = tsdn_tsd(tsdn); + decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena)); + if (unlikely(decay_ticker == NULL)) { + return; + } + if (unlikely(ticker_ticks(decay_ticker, nticks))) { + arena_decay(tsdn, arena, false, false); + } +} + +JEMALLOC_ALWAYS_INLINE void +arena_decay_tick(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx); + malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx); + + arena_decay_ticks(tsdn, arena, 1); +} + +JEMALLOC_ALWAYS_INLINE void * +arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero, + tcache_t *tcache, bool slow_path) { + assert(!tsdn_null(tsdn) || tcache == NULL); + assert(size != 0); + + if (likely(tcache != NULL)) { + if (likely(size <= SMALL_MAXCLASS)) { + return tcache_alloc_small(tsdn_tsd(tsdn), arena, + tcache, size, ind, zero, slow_path); + } + if (likely(size <= tcache_maxclass)) { + return tcache_alloc_large(tsdn_tsd(tsdn), arena, + tcache, size, ind, zero, slow_path); + } + /* (size > tcache_maxclass) case falls through. */ + assert(size > tcache_maxclass); + } + + return arena_malloc_hard(tsdn, arena, size, ind, zero); +} + +JEMALLOC_ALWAYS_INLINE arena_t * +arena_aalloc(tsdn_t *tsdn, const void *ptr) { + return extent_arena_get(iealloc(tsdn, ptr)); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_salloc(tsdn_t *tsdn, const void *ptr) { + assert(ptr != NULL); + + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true); + assert(szind != NSIZES); + + return sz_index2size(szind); +} + +JEMALLOC_ALWAYS_INLINE size_t +arena_vsalloc(tsdn_t *tsdn, const void *ptr) { + /* + * Return 0 if ptr is not within an extent managed by jemalloc. This + * function has two extra costs relative to isalloc(): + * - The rtree calls cannot claim to be dependent lookups, which induces + * rtree lookup load dependencies. + * - The lookup may fail, so there is an extra branch to check for + * failure. + */ + + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + extent_t *extent; + szind_t szind; + if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, false, &extent, &szind)) { + return 0; + } + + if (extent == NULL) { + return 0; + } + assert(extent_state_get(extent) == extent_state_active); + /* Only slab members should be looked up via interior pointers. */ + assert(extent_addr_get(extent) == ptr || extent_slab_get(extent)); + + assert(szind != NSIZES); + + return sz_index2size(szind); +} + +static inline void +arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) { + assert(ptr != NULL); + + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + szind_t szind; + bool slab; + rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, + true, &szind, &slab); + + if (config_debug) { + extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, + rtree_ctx, (uintptr_t)ptr, true); + assert(szind == extent_szind_get(extent)); + assert(szind < NSIZES); + assert(slab == extent_slab_get(extent)); + } + + if (likely(slab)) { + /* Small allocation. */ + arena_dalloc_small(tsdn, ptr); + } else { + extent_t *extent = iealloc(tsdn, ptr); + large_dalloc(tsdn, extent); + } +} + +JEMALLOC_ALWAYS_INLINE void +arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, + alloc_ctx_t *alloc_ctx, bool slow_path) { + assert(!tsdn_null(tsdn) || tcache == NULL); + assert(ptr != NULL); + + if (unlikely(tcache == NULL)) { + arena_dalloc_no_tcache(tsdn, ptr); + return; + } + + szind_t szind; + bool slab; + rtree_ctx_t *rtree_ctx; + if (alloc_ctx != NULL) { + szind = alloc_ctx->szind; + slab = alloc_ctx->slab; + assert(szind != NSIZES); + } else { + rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn)); + rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &szind, &slab); + } + + if (config_debug) { + rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn)); + extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, + rtree_ctx, (uintptr_t)ptr, true); + assert(szind == extent_szind_get(extent)); + assert(szind < NSIZES); + assert(slab == extent_slab_get(extent)); + } + + if (likely(slab)) { + /* Small allocation. */ + tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind, + slow_path); + } else { + if (szind < nhbins) { + if (config_prof && unlikely(szind < NBINS)) { + arena_dalloc_promoted(tsdn, ptr, tcache, + slow_path); + } else { + tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, + szind, slow_path); + } + } else { + extent_t *extent = iealloc(tsdn, ptr); + large_dalloc(tsdn, extent); + } + } +} + +static inline void +arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) { + assert(ptr != NULL); + assert(size <= LARGE_MAXCLASS); + + szind_t szind; + bool slab; + if (!config_prof || !opt_prof) { + /* + * There is no risk of being confused by a promoted sampled + * object, so base szind and slab on the given size. + */ + szind = sz_size2index(size); + slab = (szind < NBINS); + } + + if ((config_prof && opt_prof) || config_debug) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, + &rtree_ctx_fallback); + + rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &szind, &slab); + + assert(szind == sz_size2index(size)); + assert((config_prof && opt_prof) || slab == (szind < NBINS)); + + if (config_debug) { + extent_t *extent = rtree_extent_read(tsdn, + &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); + assert(szind == extent_szind_get(extent)); + assert(slab == extent_slab_get(extent)); + } + } + + if (likely(slab)) { + /* Small allocation. */ + arena_dalloc_small(tsdn, ptr); + } else { + extent_t *extent = iealloc(tsdn, ptr); + large_dalloc(tsdn, extent); + } +} + +JEMALLOC_ALWAYS_INLINE void +arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, + alloc_ctx_t *alloc_ctx, bool slow_path) { + assert(!tsdn_null(tsdn) || tcache == NULL); + assert(ptr != NULL); + assert(size <= LARGE_MAXCLASS); + + if (unlikely(tcache == NULL)) { + arena_sdalloc_no_tcache(tsdn, ptr, size); + return; + } + + szind_t szind; + bool slab; + UNUSED alloc_ctx_t local_ctx; + if (config_prof && opt_prof) { + if (alloc_ctx == NULL) { + /* Uncommon case and should be a static check. */ + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, + &rtree_ctx_fallback); + rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &local_ctx.szind, + &local_ctx.slab); + assert(local_ctx.szind == sz_size2index(size)); + alloc_ctx = &local_ctx; + } + slab = alloc_ctx->slab; + szind = alloc_ctx->szind; + } else { + /* + * There is no risk of being confused by a promoted sampled + * object, so base szind and slab on the given size. + */ + szind = sz_size2index(size); + slab = (szind < NBINS); + } + + if (config_debug) { + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn)); + rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &szind, &slab); + extent_t *extent = rtree_extent_read(tsdn, + &extents_rtree, rtree_ctx, (uintptr_t)ptr, true); + assert(szind == extent_szind_get(extent)); + assert(slab == extent_slab_get(extent)); + } + + if (likely(slab)) { + /* Small allocation. */ + tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind, + slow_path); + } else { + if (szind < nhbins) { + if (config_prof && unlikely(szind < NBINS)) { + arena_dalloc_promoted(tsdn, ptr, tcache, + slow_path); + } else { + tcache_dalloc_large(tsdn_tsd(tsdn), + tcache, ptr, szind, slow_path); + } + } else { + extent_t *extent = iealloc(tsdn, ptr); + large_dalloc(tsdn, extent); + } + } +} + +#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/arena_structs_a.h b/dep/jemalloc/include/jemalloc/internal/arena_structs_a.h new file mode 100644 index 00000000000..46aa77c884b --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/arena_structs_a.h @@ -0,0 +1,11 @@ +#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H +#define JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H + +#include "jemalloc/internal/bitmap.h" + +struct arena_slab_data_s { + /* Per region allocated/deallocated bitmap. */ + bitmap_t bitmap[BITMAP_GROUPS_MAX]; +}; + +#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/arena_structs_b.h b/dep/jemalloc/include/jemalloc/internal/arena_structs_b.h new file mode 100644 index 00000000000..d1fffec1936 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/arena_structs_b.h @@ -0,0 +1,284 @@ +#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H +#define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/bitmap.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/ql.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/smoothstep.h" +#include "jemalloc/internal/stats.h" +#include "jemalloc/internal/ticker.h" + +/* + * Read-only information associated with each element of arena_t's bins array + * is stored separately, partly to reduce memory usage (only one copy, rather + * than one per arena), but mainly to avoid false cacheline sharing. + * + * Each slab has the following layout: + * + * /--------------------\ + * | region 0 | + * |--------------------| + * | region 1 | + * |--------------------| + * | ... | + * | ... | + * | ... | + * |--------------------| + * | region nregs-1 | + * \--------------------/ + */ +struct arena_bin_info_s { + /* Size of regions in a slab for this bin's size class. */ + size_t reg_size; + + /* Total size of a slab for this bin's size class. */ + size_t slab_size; + + /* Total number of regions in a slab for this bin's size class. */ + uint32_t nregs; + + /* + * Metadata used to manipulate bitmaps for slabs associated with this + * bin. + */ + bitmap_info_t bitmap_info; +}; + +struct arena_decay_s { + /* Synchronizes all non-atomic fields. */ + malloc_mutex_t mtx; + /* + * True if a thread is currently purging the extents associated with + * this decay structure. + */ + bool purging; + /* + * Approximate time in milliseconds from the creation of a set of unused + * dirty pages until an equivalent set of unused dirty pages is purged + * and/or reused. + */ + atomic_zd_t time_ms; + /* time / SMOOTHSTEP_NSTEPS. */ + nstime_t interval; + /* + * Time at which the current decay interval logically started. We do + * not actually advance to a new epoch until sometime after it starts + * because of scheduling and computation delays, and it is even possible + * to completely skip epochs. In all cases, during epoch advancement we + * merge all relevant activity into the most recently recorded epoch. + */ + nstime_t epoch; + /* Deadline randomness generator. */ + uint64_t jitter_state; + /* + * Deadline for current epoch. This is the sum of interval and per + * epoch jitter which is a uniform random variable in [0..interval). + * Epochs always advance by precise multiples of interval, but we + * randomize the deadline to reduce the likelihood of arenas purging in + * lockstep. + */ + nstime_t deadline; + /* + * Number of unpurged pages at beginning of current epoch. During epoch + * advancement we use the delta between arena->decay_*.nunpurged and + * extents_npages_get(&arena->extents_*) to determine how many dirty + * pages, if any, were generated. + */ + size_t nunpurged; + /* + * Trailing log of how many unused dirty pages were generated during + * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last + * element is the most recent epoch. Corresponding epoch times are + * relative to epoch. + */ + size_t backlog[SMOOTHSTEP_NSTEPS]; + + /* + * Pointer to associated stats. These stats are embedded directly in + * the arena's stats due to how stats structures are shared between the + * arena and ctl code. + * + * Synchronization: Same as associated arena's stats field. */ + decay_stats_t *stats; + /* Peak number of pages in associated extents. Used for debug only. */ + uint64_t ceil_npages; +}; + +struct arena_bin_s { + /* All operations on arena_bin_t fields require lock ownership. */ + malloc_mutex_t lock; + + /* + * Current slab being used to service allocations of this bin's size + * class. slabcur is independent of slabs_{nonfull,full}; whenever + * slabcur is reassigned, the previous slab must be deallocated or + * inserted into slabs_{nonfull,full}. + */ + extent_t *slabcur; + + /* + * Heap of non-full slabs. This heap is used to assure that new + * allocations come from the non-full slab that is oldest/lowest in + * memory. + */ + extent_heap_t slabs_nonfull; + + /* List used to track full slabs. */ + extent_list_t slabs_full; + + /* Bin statistics. */ + malloc_bin_stats_t stats; +}; + +struct arena_s { + /* + * Number of threads currently assigned to this arena. Each thread has + * two distinct assignments, one for application-serving allocation, and + * the other for internal metadata allocation. Internal metadata must + * not be allocated from arenas explicitly created via the arenas.create + * mallctl, because the arena.<i>.reset mallctl indiscriminately + * discards all allocations for the affected arena. + * + * 0: Application allocation. + * 1: Internal metadata allocation. + * + * Synchronization: atomic. + */ + atomic_u_t nthreads[2]; + + /* + * When percpu_arena is enabled, to amortize the cost of reading / + * updating the current CPU id, track the most recent thread accessing + * this arena, and only read CPU if there is a mismatch. + */ + tsdn_t *last_thd; + + /* Synchronization: internal. */ + arena_stats_t stats; + + /* + * List of tcaches for extant threads associated with this arena. + * Stats from these are merged incrementally, and at exit if + * opt_stats_print is enabled. + * + * Synchronization: tcache_ql_mtx. + */ + ql_head(tcache_t) tcache_ql; + malloc_mutex_t tcache_ql_mtx; + + /* Synchronization: internal. */ + prof_accum_t prof_accum; + uint64_t prof_accumbytes; + + /* + * PRNG state for cache index randomization of large allocation base + * pointers. + * + * Synchronization: atomic. + */ + atomic_zu_t offset_state; + + /* + * Extent serial number generator state. + * + * Synchronization: atomic. + */ + atomic_zu_t extent_sn_next; + + /* + * Represents a dss_prec_t, but atomically. + * + * Synchronization: atomic. + */ + atomic_u_t dss_prec; + + /* + * Number of pages in active extents. + * + * Synchronization: atomic. + */ + atomic_zu_t nactive; + + /* + * Extant large allocations. + * + * Synchronization: large_mtx. + */ + extent_list_t large; + /* Synchronizes all large allocation/update/deallocation. */ + malloc_mutex_t large_mtx; + + /* + * Collections of extents that were previously allocated. These are + * used when allocating extents, in an attempt to re-use address space. + * + * Synchronization: internal. + */ + extents_t extents_dirty; + extents_t extents_muzzy; + extents_t extents_retained; + + /* + * Decay-based purging state, responsible for scheduling extent state + * transitions. + * + * Synchronization: internal. + */ + arena_decay_t decay_dirty; /* dirty --> muzzy */ + arena_decay_t decay_muzzy; /* muzzy --> retained */ + + /* + * Next extent size class in a growing series to use when satisfying a + * request via the extent hooks (only if opt_retain). This limits the + * number of disjoint virtual memory ranges so that extent merging can + * be effective even if multiple arenas' extent allocation requests are + * highly interleaved. + * + * Synchronization: extent_grow_mtx + */ + pszind_t extent_grow_next; + malloc_mutex_t extent_grow_mtx; + + /* + * Available extent structures that were allocated via + * base_alloc_extent(). + * + * Synchronization: extent_avail_mtx. + */ + extent_tree_t extent_avail; + malloc_mutex_t extent_avail_mtx; + + /* + * bins is used to store heaps of free regions. + * + * Synchronization: internal. + */ + arena_bin_t bins[NBINS]; + + /* + * Base allocator, from which arena metadata are allocated. + * + * Synchronization: internal. + */ + base_t *base; + /* Used to determine uptime. Read-only after initialization. */ + nstime_t create_time; +}; + +/* Used in conjunction with tsd for fast arena-related context lookup. */ +struct arena_tdata_s { + ticker_t decay_ticker; +}; + +/* Used to pass rtree lookup context down the path. */ +struct alloc_ctx_s { + szind_t szind; + bool slab; +}; + +#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/arena_types.h b/dep/jemalloc/include/jemalloc/internal/arena_types.h new file mode 100644 index 00000000000..a691bd811e0 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/arena_types.h @@ -0,0 +1,45 @@ +#ifndef JEMALLOC_INTERNAL_ARENA_TYPES_H +#define JEMALLOC_INTERNAL_ARENA_TYPES_H + +/* Maximum number of regions in one slab. */ +#define LG_SLAB_MAXREGS (LG_PAGE - LG_TINY_MIN) +#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS) + +/* Default decay times in milliseconds. */ +#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000) +#define MUZZY_DECAY_MS_DEFAULT ZD(10 * 1000) +/* Number of event ticks between time checks. */ +#define DECAY_NTICKS_PER_UPDATE 1000 + +typedef struct arena_slab_data_s arena_slab_data_t; +typedef struct arena_bin_info_s arena_bin_info_t; +typedef struct arena_decay_s arena_decay_t; +typedef struct arena_bin_s arena_bin_t; +typedef struct arena_s arena_t; +typedef struct arena_tdata_s arena_tdata_t; +typedef struct alloc_ctx_s alloc_ctx_t; + +typedef enum { + percpu_arena_mode_names_base = 0, /* Used for options processing. */ + + /* + * *_uninit are used only during bootstrapping, and must correspond + * to initialized variant plus percpu_arena_mode_enabled_base. + */ + percpu_arena_uninit = 0, + per_phycpu_arena_uninit = 1, + + /* All non-disabled modes must come after percpu_arena_disabled. */ + percpu_arena_disabled = 2, + + percpu_arena_mode_names_limit = 3, /* Used for options processing. */ + percpu_arena_mode_enabled_base = 3, + + percpu_arena = 3, + per_phycpu_arena = 4 /* Hyper threads share arena. */ +} percpu_arena_mode_t; + +#define PERCPU_ARENA_ENABLED(m) ((m) >= percpu_arena_mode_enabled_base) +#define PERCPU_ARENA_DEFAULT percpu_arena_disabled + +#endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/assert.h b/dep/jemalloc/include/jemalloc/internal/assert.h new file mode 100644 index 00000000000..be4d45b3213 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/assert.h @@ -0,0 +1,56 @@ +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/util.h" + +/* + * Define a custom assert() in order to reduce the chances of deadlock during + * assertion failure. + */ +#ifndef assert +#define assert(e) do { \ + if (unlikely(config_debug && !(e))) { \ + malloc_printf( \ + "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \ + __FILE__, __LINE__, #e); \ + abort(); \ + } \ +} while (0) +#endif + +#ifndef not_reached +#define not_reached() do { \ + if (config_debug) { \ + malloc_printf( \ + "<jemalloc>: %s:%d: Unreachable code reached\n", \ + __FILE__, __LINE__); \ + abort(); \ + } \ + unreachable(); \ +} while (0) +#endif + +#ifndef not_implemented +#define not_implemented() do { \ + if (config_debug) { \ + malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \ + __FILE__, __LINE__); \ + abort(); \ + } \ +} while (0) +#endif + +#ifndef assert_not_implemented +#define assert_not_implemented(e) do { \ + if (unlikely(config_debug && !(e))) { \ + not_implemented(); \ + } \ +} while (0) +#endif + +/* Use to assert a particular configuration, e.g., cassert(config_debug). */ +#ifndef cassert +#define cassert(c) do { \ + if (unlikely(!(c))) { \ + not_reached(); \ + } \ +} while (0) +#endif diff --git a/dep/jemalloc/include/jemalloc/internal/atomic.h b/dep/jemalloc/include/jemalloc/internal/atomic.h index 11a7b47fe0f..adadb1a3acb 100644 --- a/dep/jemalloc/include/jemalloc/internal/atomic.h +++ b/dep/jemalloc/include/jemalloc/internal/atomic.h @@ -1,304 +1,77 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#define atomic_read_uint64(p) atomic_add_uint64(p, 0) -#define atomic_read_uint32(p) atomic_add_uint32(p, 0) -#define atomic_read_z(p) atomic_add_z(p, 0) -#define atomic_read_u(p) atomic_add_u(p, 0) - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -uint64_t atomic_add_uint64(uint64_t *p, uint64_t x); -uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x); -uint32_t atomic_add_uint32(uint32_t *p, uint32_t x); -uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x); -size_t atomic_add_z(size_t *p, size_t x); -size_t atomic_sub_z(size_t *p, size_t x); -unsigned atomic_add_u(unsigned *p, unsigned x); -unsigned atomic_sub_u(unsigned *p, unsigned x); +#ifndef JEMALLOC_INTERNAL_ATOMIC_H +#define JEMALLOC_INTERNAL_ATOMIC_H + +#define ATOMIC_INLINE static inline + +#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) +# include "jemalloc/internal/atomic_gcc_atomic.h" +#elif defined(JEMALLOC_GCC_SYNC_ATOMICS) +# include "jemalloc/internal/atomic_gcc_sync.h" +#elif defined(_MSC_VER) +# include "jemalloc/internal/atomic_msvc.h" +#elif defined(JEMALLOC_C11_ATOMICS) +# include "jemalloc/internal/atomic_c11.h" +#else +# error "Don't have atomics implemented on this platform." #endif -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_)) -/******************************************************************************/ -/* 64-bit operations. */ +/* + * This header gives more or less a backport of C11 atomics. The user can write + * JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_sizeof_type); to generate + * counterparts of the C11 atomic functions for type, as so: + * JEMALLOC_GENERATE_ATOMICS(int *, pi, 3); + * and then write things like: + * int *some_ptr; + * atomic_pi_t atomic_ptr_to_int; + * atomic_store_pi(&atomic_ptr_to_int, some_ptr, ATOMIC_RELAXED); + * int *prev_value = atomic_exchange_pi(&ptr_to_int, NULL, ATOMIC_ACQ_REL); + * assert(some_ptr == prev_value); + * and expect things to work in the obvious way. + * + * Also included (with naming differences to avoid conflicts with the standard + * library): + * atomic_fence(atomic_memory_order_t) (mimics C11's atomic_thread_fence). + * ATOMIC_INIT (mimics C11's ATOMIC_VAR_INIT). + */ + +/* + * Pure convenience, so that we don't have to type "atomic_memory_order_" + * quite so often. + */ +#define ATOMIC_RELAXED atomic_memory_order_relaxed +#define ATOMIC_ACQUIRE atomic_memory_order_acquire +#define ATOMIC_RELEASE atomic_memory_order_release +#define ATOMIC_ACQ_REL atomic_memory_order_acq_rel +#define ATOMIC_SEQ_CST atomic_memory_order_seq_cst + +/* + * Not all platforms have 64-bit atomics. If we do, this #define exposes that + * fact. + */ #if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) -# ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (__sync_add_and_fetch(p, x)); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (__sync_sub_and_fetch(p, x)); -} -#elif (defined(_MSC_VER)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (InterlockedExchangeAdd64(p, x)); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (InterlockedExchangeAdd64(p, -((int64_t)x))); -} -#elif (defined(JEMALLOC_OSATOMIC)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (OSAtomicAdd64((int64_t)x, (int64_t *)p)); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p)); -} -# elif (defined(__amd64__) || defined(__x86_64__)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - asm volatile ( - "lock; xaddq %0, %1;" - : "+r" (x), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (x); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - x = (uint64_t)(-(int64_t)x); - asm volatile ( - "lock; xaddq %0, %1;" - : "+r" (x), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (x); -} -# elif (defined(JEMALLOC_ATOMIC9)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - /* - * atomic_fetchadd_64() doesn't exist, but we only ever use this - * function on LP64 systems, so atomic_fetchadd_long() will do. - */ - assert(sizeof(uint64_t) == sizeof(unsigned long)); - - return (atomic_fetchadd_long(p, (unsigned long)x) + x); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - assert(sizeof(uint64_t) == sizeof(unsigned long)); - - return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x); -} -# elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8)) -JEMALLOC_INLINE uint64_t -atomic_add_uint64(uint64_t *p, uint64_t x) -{ - - return (__sync_add_and_fetch(p, x)); -} - -JEMALLOC_INLINE uint64_t -atomic_sub_uint64(uint64_t *p, uint64_t x) -{ - - return (__sync_sub_and_fetch(p, x)); -} -# else -# error "Missing implementation for 64-bit atomic operations" -# endif +# define JEMALLOC_ATOMIC_U64 #endif -/******************************************************************************/ -/* 32-bit operations. */ -#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ +JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR) - return (__sync_add_and_fetch(p, x)); -} +/* + * There's no actual guarantee that sizeof(bool) == 1, but it's true on the only + * platform that actually needs to know the size, MSVC. + */ +JEMALLOC_GENERATE_ATOMICS(bool, b, 0) -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ +JEMALLOC_GENERATE_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT) - return (__sync_sub_and_fetch(p, x)); -} -#elif (defined(_MSC_VER)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ +JEMALLOC_GENERATE_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR) - return (InterlockedExchangeAdd(p, x)); -} +JEMALLOC_GENERATE_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR) -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ +JEMALLOC_GENERATE_INT_ATOMICS(uint32_t, u32, 2) - return (InterlockedExchangeAdd(p, -((int32_t)x))); -} -#elif (defined(JEMALLOC_OSATOMIC)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (OSAtomicAdd32((int32_t)x, (int32_t *)p)); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p)); -} -#elif (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - asm volatile ( - "lock; xaddl %0, %1;" - : "+r" (x), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (x); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - x = (uint32_t)(-(int32_t)x); - asm volatile ( - "lock; xaddl %0, %1;" - : "+r" (x), "=m" (*p) /* Outputs. */ - : "m" (*p) /* Inputs. */ - ); - - return (x); -} -#elif (defined(JEMALLOC_ATOMIC9)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (atomic_fetchadd_32(p, x) + x); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x); -} -#elif (defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4)) -JEMALLOC_INLINE uint32_t -atomic_add_uint32(uint32_t *p, uint32_t x) -{ - - return (__sync_add_and_fetch(p, x)); -} - -JEMALLOC_INLINE uint32_t -atomic_sub_uint32(uint32_t *p, uint32_t x) -{ - - return (__sync_sub_and_fetch(p, x)); -} -#else -# error "Missing implementation for 32-bit atomic operations" -#endif - -/******************************************************************************/ -/* size_t operations. */ -JEMALLOC_INLINE size_t -atomic_add_z(size_t *p, size_t x) -{ - -#if (LG_SIZEOF_PTR == 3) - return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); -#elif (LG_SIZEOF_PTR == 2) - return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); -#endif -} - -JEMALLOC_INLINE size_t -atomic_sub_z(size_t *p, size_t x) -{ - -#if (LG_SIZEOF_PTR == 3) - return ((size_t)atomic_add_uint64((uint64_t *)p, - (uint64_t)-((int64_t)x))); -#elif (LG_SIZEOF_PTR == 2) - return ((size_t)atomic_add_uint32((uint32_t *)p, - (uint32_t)-((int32_t)x))); +#ifdef JEMALLOC_ATOMIC_U64 +JEMALLOC_GENERATE_INT_ATOMICS(uint64_t, u64, 3) #endif -} -/******************************************************************************/ -/* unsigned operations. */ -JEMALLOC_INLINE unsigned -atomic_add_u(unsigned *p, unsigned x) -{ - -#if (LG_SIZEOF_INT == 3) - return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); -#elif (LG_SIZEOF_INT == 2) - return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); -#endif -} - -JEMALLOC_INLINE unsigned -atomic_sub_u(unsigned *p, unsigned x) -{ - -#if (LG_SIZEOF_INT == 3) - return ((unsigned)atomic_add_uint64((uint64_t *)p, - (uint64_t)-((int64_t)x))); -#elif (LG_SIZEOF_INT == 2) - return ((unsigned)atomic_add_uint32((uint32_t *)p, - (uint32_t)-((int32_t)x))); -#endif -} -/******************************************************************************/ -#endif +#undef ATOMIC_INLINE -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ +#endif /* JEMALLOC_INTERNAL_ATOMIC_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/atomic_c11.h b/dep/jemalloc/include/jemalloc/internal/atomic_c11.h new file mode 100644 index 00000000000..a5f9313a619 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/atomic_c11.h @@ -0,0 +1,97 @@ +#ifndef JEMALLOC_INTERNAL_ATOMIC_C11_H +#define JEMALLOC_INTERNAL_ATOMIC_C11_H + +#include <stdatomic.h> + +#define ATOMIC_INIT(...) ATOMIC_VAR_INIT(__VA_ARGS__) + +#define atomic_memory_order_t memory_order +#define atomic_memory_order_relaxed memory_order_relaxed +#define atomic_memory_order_acquire memory_order_acquire +#define atomic_memory_order_release memory_order_release +#define atomic_memory_order_acq_rel memory_order_acq_rel +#define atomic_memory_order_seq_cst memory_order_seq_cst + +#define atomic_fence atomic_thread_fence + +#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \ + /* unused */ lg_size) \ +typedef _Atomic(type) atomic_##short_type##_t; \ + \ +ATOMIC_INLINE type \ +atomic_load_##short_type(const atomic_##short_type##_t *a, \ + atomic_memory_order_t mo) { \ + /* \ + * A strict interpretation of the C standard prevents \ + * atomic_load from taking a const argument, but it's \ + * convenient for our purposes. This cast is a workaround. \ + */ \ + atomic_##short_type##_t* a_nonconst = \ + (atomic_##short_type##_t*)a; \ + return atomic_load_explicit(a_nonconst, mo); \ +} \ + \ +ATOMIC_INLINE void \ +atomic_store_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + atomic_store_explicit(a, val, mo); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return atomic_exchange_explicit(a, val, mo); \ +} \ + \ +ATOMIC_INLINE bool \ +atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ + type *expected, type desired, atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + return atomic_compare_exchange_weak_explicit(a, expected, \ + desired, success_mo, failure_mo); \ +} \ + \ +ATOMIC_INLINE bool \ +atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ + type *expected, type desired, atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + return atomic_compare_exchange_strong_explicit(a, expected, \ + desired, success_mo, failure_mo); \ +} + +/* + * Integral types have some special operations available that non-integral ones + * lack. + */ +#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \ + /* unused */ lg_size) \ +JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ + \ +ATOMIC_INLINE type \ +atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return atomic_fetch_add_explicit(a, val, mo); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return atomic_fetch_sub_explicit(a, val, mo); \ +} \ +ATOMIC_INLINE type \ +atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return atomic_fetch_and_explicit(a, val, mo); \ +} \ +ATOMIC_INLINE type \ +atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return atomic_fetch_or_explicit(a, val, mo); \ +} \ +ATOMIC_INLINE type \ +atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return atomic_fetch_xor_explicit(a, val, mo); \ +} + +#endif /* JEMALLOC_INTERNAL_ATOMIC_C11_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h b/dep/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h new file mode 100644 index 00000000000..6b73a14f81d --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/atomic_gcc_atomic.h @@ -0,0 +1,127 @@ +#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H +#define JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H + +#include "jemalloc/internal/assert.h" + +#define ATOMIC_INIT(...) {__VA_ARGS__} + +typedef enum { + atomic_memory_order_relaxed, + atomic_memory_order_acquire, + atomic_memory_order_release, + atomic_memory_order_acq_rel, + atomic_memory_order_seq_cst +} atomic_memory_order_t; + +ATOMIC_INLINE int +atomic_enum_to_builtin(atomic_memory_order_t mo) { + switch (mo) { + case atomic_memory_order_relaxed: + return __ATOMIC_RELAXED; + case atomic_memory_order_acquire: + return __ATOMIC_ACQUIRE; + case atomic_memory_order_release: + return __ATOMIC_RELEASE; + case atomic_memory_order_acq_rel: + return __ATOMIC_ACQ_REL; + case atomic_memory_order_seq_cst: + return __ATOMIC_SEQ_CST; + } + /* Can't happen; the switch is exhaustive. */ + not_reached(); +} + +ATOMIC_INLINE void +atomic_fence(atomic_memory_order_t mo) { + __atomic_thread_fence(atomic_enum_to_builtin(mo)); +} + +#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \ + /* unused */ lg_size) \ +typedef struct { \ + type repr; \ +} atomic_##short_type##_t; \ + \ +ATOMIC_INLINE type \ +atomic_load_##short_type(const atomic_##short_type##_t *a, \ + atomic_memory_order_t mo) { \ + type result; \ + __atomic_load(&a->repr, &result, atomic_enum_to_builtin(mo)); \ + return result; \ +} \ + \ +ATOMIC_INLINE void \ +atomic_store_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + __atomic_store(&a->repr, &val, atomic_enum_to_builtin(mo)); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + type result; \ + __atomic_exchange(&a->repr, &val, &result, \ + atomic_enum_to_builtin(mo)); \ + return result; \ +} \ + \ +ATOMIC_INLINE bool \ +atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ + type *expected, type desired, atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + return __atomic_compare_exchange(&a->repr, expected, &desired, \ + true, atomic_enum_to_builtin(success_mo), \ + atomic_enum_to_builtin(failure_mo)); \ +} \ + \ +ATOMIC_INLINE bool \ +atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ + type *expected, type desired, atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + return __atomic_compare_exchange(&a->repr, expected, &desired, \ + false, \ + atomic_enum_to_builtin(success_mo), \ + atomic_enum_to_builtin(failure_mo)); \ +} + + +#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \ + /* unused */ lg_size) \ +JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ + \ +ATOMIC_INLINE type \ +atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __atomic_fetch_add(&a->repr, val, \ + atomic_enum_to_builtin(mo)); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __atomic_fetch_sub(&a->repr, val, \ + atomic_enum_to_builtin(mo)); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __atomic_fetch_and(&a->repr, val, \ + atomic_enum_to_builtin(mo)); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __atomic_fetch_or(&a->repr, val, \ + atomic_enum_to_builtin(mo)); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __atomic_fetch_xor(&a->repr, val, \ + atomic_enum_to_builtin(mo)); \ +} + +#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_ATOMIC_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/atomic_gcc_sync.h b/dep/jemalloc/include/jemalloc/internal/atomic_gcc_sync.h new file mode 100644 index 00000000000..30846e4d27b --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/atomic_gcc_sync.h @@ -0,0 +1,191 @@ +#ifndef JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H +#define JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H + +#define ATOMIC_INIT(...) {__VA_ARGS__} + +typedef enum { + atomic_memory_order_relaxed, + atomic_memory_order_acquire, + atomic_memory_order_release, + atomic_memory_order_acq_rel, + atomic_memory_order_seq_cst +} atomic_memory_order_t; + +ATOMIC_INLINE void +atomic_fence(atomic_memory_order_t mo) { + /* Easy cases first: no barrier, and full barrier. */ + if (mo == atomic_memory_order_relaxed) { + asm volatile("" ::: "memory"); + return; + } + if (mo == atomic_memory_order_seq_cst) { + asm volatile("" ::: "memory"); + __sync_synchronize(); + asm volatile("" ::: "memory"); + return; + } + asm volatile("" ::: "memory"); +# if defined(__i386__) || defined(__x86_64__) + /* This is implicit on x86. */ +# elif defined(__ppc__) + asm volatile("lwsync"); +# elif defined(__sparc__) && defined(__arch64__) + if (mo == atomic_memory_order_acquire) { + asm volatile("membar #LoadLoad | #LoadStore"); + } else if (mo == atomic_memory_order_release) { + asm volatile("membar #LoadStore | #StoreStore"); + } else { + asm volatile("membar #LoadLoad | #LoadStore | #StoreStore"); + } +# else + __sync_synchronize(); +# endif + asm volatile("" ::: "memory"); +} + +/* + * A correct implementation of seq_cst loads and stores on weakly ordered + * architectures could do either of the following: + * 1. store() is weak-fence -> store -> strong fence, load() is load -> + * strong-fence. + * 2. store() is strong-fence -> store, load() is strong-fence -> load -> + * weak-fence. + * The tricky thing is, load() and store() above can be the load or store + * portions of a gcc __sync builtin, so we have to follow GCC's lead, which + * means going with strategy 2. + * On strongly ordered architectures, the natural strategy is to stick a strong + * fence after seq_cst stores, and have naked loads. So we want the strong + * fences in different places on different architectures. + * atomic_pre_sc_load_fence and atomic_post_sc_store_fence allow us to + * accomplish this. + */ + +ATOMIC_INLINE void +atomic_pre_sc_load_fence() { +# if defined(__i386__) || defined(__x86_64__) || \ + (defined(__sparc__) && defined(__arch64__)) + atomic_fence(atomic_memory_order_relaxed); +# else + atomic_fence(atomic_memory_order_seq_cst); +# endif +} + +ATOMIC_INLINE void +atomic_post_sc_store_fence() { +# if defined(__i386__) || defined(__x86_64__) || \ + (defined(__sparc__) && defined(__arch64__)) + atomic_fence(atomic_memory_order_seq_cst); +# else + atomic_fence(atomic_memory_order_relaxed); +# endif + +} + +#define JEMALLOC_GENERATE_ATOMICS(type, short_type, \ + /* unused */ lg_size) \ +typedef struct { \ + type volatile repr; \ +} atomic_##short_type##_t; \ + \ +ATOMIC_INLINE type \ +atomic_load_##short_type(const atomic_##short_type##_t *a, \ + atomic_memory_order_t mo) { \ + if (mo == atomic_memory_order_seq_cst) { \ + atomic_pre_sc_load_fence(); \ + } \ + type result = a->repr; \ + if (mo != atomic_memory_order_relaxed) { \ + atomic_fence(atomic_memory_order_acquire); \ + } \ + return result; \ +} \ + \ +ATOMIC_INLINE void \ +atomic_store_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + if (mo != atomic_memory_order_relaxed) { \ + atomic_fence(atomic_memory_order_release); \ + } \ + a->repr = val; \ + if (mo == atomic_memory_order_seq_cst) { \ + atomic_post_sc_store_fence(); \ + } \ +} \ + \ +ATOMIC_INLINE type \ +atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + /* \ + * Because of FreeBSD, we care about gcc 4.2, which doesn't have\ + * an atomic exchange builtin. We fake it with a CAS loop. \ + */ \ + while (true) { \ + type old = a->repr; \ + if (__sync_bool_compare_and_swap(&a->repr, old, val)) { \ + return old; \ + } \ + } \ +} \ + \ +ATOMIC_INLINE bool \ +atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ + type *expected, type desired, atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + type prev = __sync_val_compare_and_swap(&a->repr, *expected, \ + desired); \ + if (prev == *expected) { \ + return true; \ + } else { \ + *expected = prev; \ + return false; \ + } \ +} \ +ATOMIC_INLINE bool \ +atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ + type *expected, type desired, atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + type prev = __sync_val_compare_and_swap(&a->repr, *expected, \ + desired); \ + if (prev == *expected) { \ + return true; \ + } else { \ + *expected = prev; \ + return false; \ + } \ +} + +#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, \ + /* unused */ lg_size) \ +JEMALLOC_GENERATE_ATOMICS(type, short_type, /* unused */ lg_size) \ + \ +ATOMIC_INLINE type \ +atomic_fetch_add_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __sync_fetch_and_add(&a->repr, val); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __sync_fetch_and_sub(&a->repr, val); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_and_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __sync_fetch_and_and(&a->repr, val); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_or_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __sync_fetch_and_or(&a->repr, val); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return __sync_fetch_and_xor(&a->repr, val); \ +} + +#endif /* JEMALLOC_INTERNAL_ATOMIC_GCC_SYNC_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/atomic_msvc.h b/dep/jemalloc/include/jemalloc/internal/atomic_msvc.h new file mode 100644 index 00000000000..67057ce5089 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/atomic_msvc.h @@ -0,0 +1,158 @@ +#ifndef JEMALLOC_INTERNAL_ATOMIC_MSVC_H +#define JEMALLOC_INTERNAL_ATOMIC_MSVC_H + +#define ATOMIC_INIT(...) {__VA_ARGS__} + +typedef enum { + atomic_memory_order_relaxed, + atomic_memory_order_acquire, + atomic_memory_order_release, + atomic_memory_order_acq_rel, + atomic_memory_order_seq_cst +} atomic_memory_order_t; + +typedef char atomic_repr_0_t; +typedef short atomic_repr_1_t; +typedef long atomic_repr_2_t; +typedef __int64 atomic_repr_3_t; + +ATOMIC_INLINE void +atomic_fence(atomic_memory_order_t mo) { + _ReadWriteBarrier(); +# if defined(_M_ARM) || defined(_M_ARM64) + /* ARM needs a barrier for everything but relaxed. */ + if (mo != atomic_memory_order_relaxed) { + MemoryBarrier(); + } +# elif defined(_M_IX86) || defined (_M_X64) + /* x86 needs a barrier only for seq_cst. */ + if (mo == atomic_memory_order_seq_cst) { + MemoryBarrier(); + } +# else +# error "Don't know how to create atomics for this platform for MSVC." +# endif + _ReadWriteBarrier(); +} + +#define ATOMIC_INTERLOCKED_REPR(lg_size) atomic_repr_ ## lg_size ## _t + +#define ATOMIC_CONCAT(a, b) ATOMIC_RAW_CONCAT(a, b) +#define ATOMIC_RAW_CONCAT(a, b) a ## b + +#define ATOMIC_INTERLOCKED_NAME(base_name, lg_size) ATOMIC_CONCAT( \ + base_name, ATOMIC_INTERLOCKED_SUFFIX(lg_size)) + +#define ATOMIC_INTERLOCKED_SUFFIX(lg_size) \ + ATOMIC_CONCAT(ATOMIC_INTERLOCKED_SUFFIX_, lg_size) + +#define ATOMIC_INTERLOCKED_SUFFIX_0 8 +#define ATOMIC_INTERLOCKED_SUFFIX_1 16 +#define ATOMIC_INTERLOCKED_SUFFIX_2 +#define ATOMIC_INTERLOCKED_SUFFIX_3 64 + +#define JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \ +typedef struct { \ + ATOMIC_INTERLOCKED_REPR(lg_size) repr; \ +} atomic_##short_type##_t; \ + \ +ATOMIC_INLINE type \ +atomic_load_##short_type(const atomic_##short_type##_t *a, \ + atomic_memory_order_t mo) { \ + ATOMIC_INTERLOCKED_REPR(lg_size) ret = a->repr; \ + if (mo != atomic_memory_order_relaxed) { \ + atomic_fence(atomic_memory_order_acquire); \ + } \ + return (type) ret; \ +} \ + \ +ATOMIC_INLINE void \ +atomic_store_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + if (mo != atomic_memory_order_relaxed) { \ + atomic_fence(atomic_memory_order_release); \ + } \ + a->repr = (ATOMIC_INTERLOCKED_REPR(lg_size)) val; \ + if (mo == atomic_memory_order_seq_cst) { \ + atomic_fence(atomic_memory_order_seq_cst); \ + } \ +} \ + \ +ATOMIC_INLINE type \ +atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \ + atomic_memory_order_t mo) { \ + return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchange, \ + lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ +} \ + \ +ATOMIC_INLINE bool \ +atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \ + type *expected, type desired, atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + ATOMIC_INTERLOCKED_REPR(lg_size) e = \ + (ATOMIC_INTERLOCKED_REPR(lg_size))*expected; \ + ATOMIC_INTERLOCKED_REPR(lg_size) d = \ + (ATOMIC_INTERLOCKED_REPR(lg_size))desired; \ + ATOMIC_INTERLOCKED_REPR(lg_size) old = \ + ATOMIC_INTERLOCKED_NAME(_InterlockedCompareExchange, \ + lg_size)(&a->repr, d, e); \ + if (old == e) { \ + return true; \ + } else { \ + *expected = (type)old; \ + return false; \ + } \ +} \ + \ +ATOMIC_INLINE bool \ +atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \ + type *expected, type desired, atomic_memory_order_t success_mo, \ + atomic_memory_order_t failure_mo) { \ + /* We implement the weak version with strong semantics. */ \ + return atomic_compare_exchange_weak_##short_type(a, expected, \ + desired, success_mo, failure_mo); \ +} + + +#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \ +JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \ + \ +ATOMIC_INLINE type \ +atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchangeAdd, \ + lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ +} \ + \ +ATOMIC_INLINE type \ +atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + /* \ + * MSVC warns on negation of unsigned operands, but for us it \ + * gives exactly the right semantics (MAX_TYPE + 1 - operand). \ + */ \ + __pragma(warning(push)) \ + __pragma(warning(disable: 4146)) \ + return atomic_fetch_add_##short_type(a, -val, mo); \ + __pragma(warning(pop)) \ +} \ +ATOMIC_INLINE type \ +atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedAnd, lg_size)( \ + &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ +} \ +ATOMIC_INLINE type \ +atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedOr, lg_size)( \ + &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ +} \ +ATOMIC_INLINE type \ +atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \ + type val, atomic_memory_order_t mo) { \ + return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedXor, lg_size)( \ + &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \ +} + +#endif /* JEMALLOC_INTERNAL_ATOMIC_MSVC_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/background_thread_externs.h b/dep/jemalloc/include/jemalloc/internal/background_thread_externs.h new file mode 100644 index 00000000000..8b4b8471a95 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/background_thread_externs.h @@ -0,0 +1,31 @@ +#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H +#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H + +extern bool opt_background_thread; +extern malloc_mutex_t background_thread_lock; +extern atomic_b_t background_thread_enabled_state; +extern size_t n_background_threads; +extern background_thread_info_t *background_thread_info; +extern bool can_enable_background_thread; + +bool background_thread_create(tsd_t *tsd, unsigned arena_ind); +bool background_threads_enable(tsd_t *tsd); +bool background_threads_disable(tsd_t *tsd); +void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, + arena_decay_t *decay, size_t npages_new); +void background_thread_prefork0(tsdn_t *tsdn); +void background_thread_prefork1(tsdn_t *tsdn); +void background_thread_postfork_parent(tsdn_t *tsdn); +void background_thread_postfork_child(tsdn_t *tsdn); +bool background_thread_stats_read(tsdn_t *tsdn, + background_thread_stats_t *stats); +void background_thread_ctl_init(tsdn_t *tsdn); + +#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER +extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *, + void *(*)(void *), void *__restrict); +#endif +bool background_thread_boot0(void); +bool background_thread_boot1(tsdn_t *tsdn); + +#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/background_thread_inlines.h b/dep/jemalloc/include/jemalloc/internal/background_thread_inlines.h new file mode 100644 index 00000000000..ef50231e8d7 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/background_thread_inlines.h @@ -0,0 +1,57 @@ +#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H +#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H + +JEMALLOC_ALWAYS_INLINE bool +background_thread_enabled(void) { + return atomic_load_b(&background_thread_enabled_state, ATOMIC_RELAXED); +} + +JEMALLOC_ALWAYS_INLINE void +background_thread_enabled_set(tsdn_t *tsdn, bool state) { + malloc_mutex_assert_owner(tsdn, &background_thread_lock); + atomic_store_b(&background_thread_enabled_state, state, ATOMIC_RELAXED); +} + +JEMALLOC_ALWAYS_INLINE background_thread_info_t * +arena_background_thread_info_get(arena_t *arena) { + unsigned arena_ind = arena_ind_get(arena); + return &background_thread_info[arena_ind % ncpus]; +} + +JEMALLOC_ALWAYS_INLINE uint64_t +background_thread_wakeup_time_get(background_thread_info_t *info) { + uint64_t next_wakeup = nstime_ns(&info->next_wakeup); + assert(atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE) == + (next_wakeup == BACKGROUND_THREAD_INDEFINITE_SLEEP)); + return next_wakeup; +} + +JEMALLOC_ALWAYS_INLINE void +background_thread_wakeup_time_set(tsdn_t *tsdn, background_thread_info_t *info, + uint64_t wakeup_time) { + malloc_mutex_assert_owner(tsdn, &info->mtx); + atomic_store_b(&info->indefinite_sleep, + wakeup_time == BACKGROUND_THREAD_INDEFINITE_SLEEP, ATOMIC_RELEASE); + nstime_init(&info->next_wakeup, wakeup_time); +} + +JEMALLOC_ALWAYS_INLINE bool +background_thread_indefinite_sleep(background_thread_info_t *info) { + return atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE); +} + +JEMALLOC_ALWAYS_INLINE void +arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena, + bool is_background_thread) { + if (!background_thread_enabled() || is_background_thread) { + return; + } + background_thread_info_t *info = + arena_background_thread_info_get(arena); + if (background_thread_indefinite_sleep(info)) { + background_thread_interval_check(tsdn, arena, + &arena->decay_dirty, 0); + } +} + +#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/background_thread_structs.h b/dep/jemalloc/include/jemalloc/internal/background_thread_structs.h new file mode 100644 index 00000000000..e69a7d022b4 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/background_thread_structs.h @@ -0,0 +1,52 @@ +#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H +#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H + +/* This file really combines "structs" and "types", but only transitionally. */ + +#if defined(JEMALLOC_BACKGROUND_THREAD) || defined(JEMALLOC_LAZY_LOCK) +# define JEMALLOC_PTHREAD_CREATE_WRAPPER +#endif + +#define BACKGROUND_THREAD_INDEFINITE_SLEEP UINT64_MAX + +typedef enum { + background_thread_stopped, + background_thread_started, + /* Thread waits on the global lock when paused (for arena_reset). */ + background_thread_paused, +} background_thread_state_t; + +struct background_thread_info_s { +#ifdef JEMALLOC_BACKGROUND_THREAD + /* Background thread is pthread specific. */ + pthread_t thread; + pthread_cond_t cond; +#endif + malloc_mutex_t mtx; + background_thread_state_t state; + /* When true, it means no wakeup scheduled. */ + atomic_b_t indefinite_sleep; + /* Next scheduled wakeup time (absolute time in ns). */ + nstime_t next_wakeup; + /* + * Since the last background thread run, newly added number of pages + * that need to be purged by the next wakeup. This is adjusted on + * epoch advance, and is used to determine whether we should signal the + * background thread to wake up earlier. + */ + size_t npages_to_purge_new; + /* Stats: total number of runs since started. */ + uint64_t tot_n_runs; + /* Stats: total sleep time since started. */ + nstime_t tot_sleep_time; +}; +typedef struct background_thread_info_s background_thread_info_t; + +struct background_thread_stats_s { + size_t num_threads; + uint64_t num_runs; + nstime_t run_interval; +}; +typedef struct background_thread_stats_s background_thread_stats_t; + +#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_STRUCTS_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/base.h b/dep/jemalloc/include/jemalloc/internal/base.h deleted file mode 100644 index 9cf75ffb0b3..00000000000 --- a/dep/jemalloc/include/jemalloc/internal/base.h +++ /dev/null @@ -1,26 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void *base_alloc(size_t size); -void *base_calloc(size_t number, size_t size); -extent_node_t *base_node_alloc(void); -void base_node_dealloc(extent_node_t *node); -bool base_boot(void); -void base_prefork(void); -void base_postfork_parent(void); -void base_postfork_child(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/dep/jemalloc/include/jemalloc/internal/base_externs.h b/dep/jemalloc/include/jemalloc/internal/base_externs.h new file mode 100644 index 00000000000..a4fd5ac7d9a --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/base_externs.h @@ -0,0 +1,19 @@ +#ifndef JEMALLOC_INTERNAL_BASE_EXTERNS_H +#define JEMALLOC_INTERNAL_BASE_EXTERNS_H + +base_t *b0get(void); +base_t *base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); +void base_delete(tsdn_t *tsdn, base_t *base); +extent_hooks_t *base_extent_hooks_get(base_t *base); +extent_hooks_t *base_extent_hooks_set(base_t *base, + extent_hooks_t *extent_hooks); +void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment); +extent_t *base_alloc_extent(tsdn_t *tsdn, base_t *base); +void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, + size_t *resident, size_t *mapped); +void base_prefork(tsdn_t *tsdn, base_t *base); +void base_postfork_parent(tsdn_t *tsdn, base_t *base); +void base_postfork_child(tsdn_t *tsdn, base_t *base); +bool base_boot(tsdn_t *tsdn); + +#endif /* JEMALLOC_INTERNAL_BASE_EXTERNS_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/base_inlines.h b/dep/jemalloc/include/jemalloc/internal/base_inlines.h new file mode 100644 index 00000000000..931560bfaea --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/base_inlines.h @@ -0,0 +1,9 @@ +#ifndef JEMALLOC_INTERNAL_BASE_INLINES_H +#define JEMALLOC_INTERNAL_BASE_INLINES_H + +static inline unsigned +base_ind_get(const base_t *base) { + return base->ind; +} + +#endif /* JEMALLOC_INTERNAL_BASE_INLINES_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/base_structs.h b/dep/jemalloc/include/jemalloc/internal/base_structs.h new file mode 100644 index 00000000000..18e227bd5a6 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/base_structs.h @@ -0,0 +1,55 @@ +#ifndef JEMALLOC_INTERNAL_BASE_STRUCTS_H +#define JEMALLOC_INTERNAL_BASE_STRUCTS_H + +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/size_classes.h" + +/* Embedded at the beginning of every block of base-managed virtual memory. */ +struct base_block_s { + /* Total size of block's virtual memory mapping. */ + size_t size; + + /* Next block in list of base's blocks. */ + base_block_t *next; + + /* Tracks unused trailing space. */ + extent_t extent; +}; + +struct base_s { + /* Associated arena's index within the arenas array. */ + unsigned ind; + + /* + * User-configurable extent hook functions. Points to an + * extent_hooks_t. + */ + atomic_p_t extent_hooks; + + /* Protects base_alloc() and base_stats_get() operations. */ + malloc_mutex_t mtx; + + /* + * Most recent size class in the series of increasingly large base + * extents. Logarithmic spacing between subsequent allocations ensures + * that the total number of distinct mappings remains small. + */ + pszind_t pind_last; + + /* Serial number generation state. */ + size_t extent_sn_next; + + /* Chain of all blocks associated with base. */ + base_block_t *blocks; + + /* Heap of extents that track unused trailing space within blocks. */ + extent_heap_t avail[NSIZES]; + + /* Stats, only maintained if config_stats. */ + size_t allocated; + size_t resident; + size_t mapped; +}; + +#endif /* JEMALLOC_INTERNAL_BASE_STRUCTS_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/base_types.h b/dep/jemalloc/include/jemalloc/internal/base_types.h new file mode 100644 index 00000000000..be7ee82589f --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/base_types.h @@ -0,0 +1,7 @@ +#ifndef JEMALLOC_INTERNAL_BASE_TYPES_H +#define JEMALLOC_INTERNAL_BASE_TYPES_H + +typedef struct base_block_s base_block_t; +typedef struct base_s base_t; + +#endif /* JEMALLOC_INTERNAL_BASE_TYPES_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/bit_util.h b/dep/jemalloc/include/jemalloc/internal/bit_util.h new file mode 100644 index 00000000000..8d078a8a35e --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/bit_util.h @@ -0,0 +1,165 @@ +#ifndef JEMALLOC_INTERNAL_BIT_UTIL_H +#define JEMALLOC_INTERNAL_BIT_UTIL_H + +#include "jemalloc/internal/assert.h" + +#define BIT_UTIL_INLINE static inline + +/* Sanity check. */ +#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \ + || !defined(JEMALLOC_INTERNAL_FFS) +# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure +#endif + + +BIT_UTIL_INLINE unsigned +ffs_llu(unsigned long long bitmap) { + return JEMALLOC_INTERNAL_FFSLL(bitmap); +} + +BIT_UTIL_INLINE unsigned +ffs_lu(unsigned long bitmap) { + return JEMALLOC_INTERNAL_FFSL(bitmap); +} + +BIT_UTIL_INLINE unsigned +ffs_u(unsigned bitmap) { + return JEMALLOC_INTERNAL_FFS(bitmap); +} + +BIT_UTIL_INLINE unsigned +ffs_zu(size_t bitmap) { +#if LG_SIZEOF_PTR == LG_SIZEOF_INT + return ffs_u(bitmap); +#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG + return ffs_lu(bitmap); +#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG + return ffs_llu(bitmap); +#else +#error No implementation for size_t ffs() +#endif +} + +BIT_UTIL_INLINE unsigned +ffs_u64(uint64_t bitmap) { +#if LG_SIZEOF_LONG == 3 + return ffs_lu(bitmap); +#elif LG_SIZEOF_LONG_LONG == 3 + return ffs_llu(bitmap); +#else +#error No implementation for 64-bit ffs() +#endif +} + +BIT_UTIL_INLINE unsigned +ffs_u32(uint32_t bitmap) { +#if LG_SIZEOF_INT == 2 + return ffs_u(bitmap); +#else +#error No implementation for 32-bit ffs() +#endif + return ffs_u(bitmap); +} + +BIT_UTIL_INLINE uint64_t +pow2_ceil_u64(uint64_t x) { + x--; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + x |= x >> 32; + x++; + return x; +} + +BIT_UTIL_INLINE uint32_t +pow2_ceil_u32(uint32_t x) { + x--; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + x++; + return x; +} + +/* Compute the smallest power of 2 that is >= x. */ +BIT_UTIL_INLINE size_t +pow2_ceil_zu(size_t x) { +#if (LG_SIZEOF_PTR == 3) + return pow2_ceil_u64(x); +#else + return pow2_ceil_u32(x); +#endif +} + +#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) +BIT_UTIL_INLINE unsigned +lg_floor(size_t x) { + size_t ret; + assert(x != 0); + + asm ("bsr %1, %0" + : "=r"(ret) // Outputs. + : "r"(x) // Inputs. + ); + assert(ret < UINT_MAX); + return (unsigned)ret; +} +#elif (defined(_MSC_VER)) +BIT_UTIL_INLINE unsigned +lg_floor(size_t x) { + unsigned long ret; + + assert(x != 0); + +#if (LG_SIZEOF_PTR == 3) + _BitScanReverse64(&ret, x); +#elif (LG_SIZEOF_PTR == 2) + _BitScanReverse(&ret, x); +#else +# error "Unsupported type size for lg_floor()" +#endif + assert(ret < UINT_MAX); + return (unsigned)ret; +} +#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ)) +BIT_UTIL_INLINE unsigned +lg_floor(size_t x) { + assert(x != 0); + +#if (LG_SIZEOF_PTR == LG_SIZEOF_INT) + return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x); +#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG) + return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x); +#else +# error "Unsupported type size for lg_floor()" +#endif +} +#else +BIT_UTIL_INLINE unsigned +lg_floor(size_t x) { + assert(x != 0); + + x |= (x >> 1); + x |= (x >> 2); + x |= (x >> 4); + x |= (x >> 8); + x |= (x >> 16); +#if (LG_SIZEOF_PTR == 3) + x |= (x >> 32); +#endif + if (x == SIZE_T_MAX) { + return (8 << LG_SIZEOF_PTR) - 1; + } + x++; + return ffs_zu(x) - 2; +} +#endif + +#undef BIT_UTIL_INLINE + +#endif /* JEMALLOC_INTERNAL_BIT_UTIL_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/bitmap.h b/dep/jemalloc/include/jemalloc/internal/bitmap.h index 605ebac58c1..ac990290a5b 100644 --- a/dep/jemalloc/include/jemalloc/internal/bitmap.h +++ b/dep/jemalloc/include/jemalloc/internal/bitmap.h @@ -1,37 +1,159 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES +#ifndef JEMALLOC_INTERNAL_BITMAP_H +#define JEMALLOC_INTERNAL_BITMAP_H -/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ -#define LG_BITMAP_MAXBITS LG_RUN_MAXREGS +#include "jemalloc/internal/arena_types.h" +#include "jemalloc/internal/bit_util.h" +#include "jemalloc/internal/size_classes.h" -typedef struct bitmap_level_s bitmap_level_t; -typedef struct bitmap_info_s bitmap_info_t; typedef unsigned long bitmap_t; -#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG +#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG + +/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ +#if LG_SLAB_MAXREGS > LG_CEIL_NSIZES +/* Maximum bitmap bit count is determined by maximum regions per slab. */ +# define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS +#else +/* Maximum bitmap bit count is determined by number of extent size classes. */ +# define LG_BITMAP_MAXBITS LG_CEIL_NSIZES +#endif +#define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS) /* Number of bits per group. */ -#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) -#define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS) -#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) +#define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) +#define BITMAP_GROUP_NBITS (1U << LG_BITMAP_GROUP_NBITS) +#define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) + +/* + * Do some analysis on how big the bitmap is before we use a tree. For a brute + * force linear search, if we would have to call ffs_lu() more than 2^3 times, + * use a tree instead. + */ +#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3 +# define BITMAP_USE_TREE +#endif + +/* Number of groups required to store a given number of bits. */ +#define BITMAP_BITS2GROUPS(nbits) \ + (((nbits) + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS) + +/* + * Number of groups required at a particular level for a given number of bits. + */ +#define BITMAP_GROUPS_L0(nbits) \ + BITMAP_BITS2GROUPS(nbits) +#define BITMAP_GROUPS_L1(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits)) +#define BITMAP_GROUPS_L2(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))) +#define BITMAP_GROUPS_L3(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ + BITMAP_BITS2GROUPS((nbits))))) +#define BITMAP_GROUPS_L4(nbits) \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ + BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))))) + +/* + * Assuming the number of levels, number of groups required for a given number + * of bits. + */ +#define BITMAP_GROUPS_1_LEVEL(nbits) \ + BITMAP_GROUPS_L0(nbits) +#define BITMAP_GROUPS_2_LEVEL(nbits) \ + (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits)) +#define BITMAP_GROUPS_3_LEVEL(nbits) \ + (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits)) +#define BITMAP_GROUPS_4_LEVEL(nbits) \ + (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits)) +#define BITMAP_GROUPS_5_LEVEL(nbits) \ + (BITMAP_GROUPS_4_LEVEL(nbits) + BITMAP_GROUPS_L4(nbits)) + +/* + * Maximum number of groups required to support LG_BITMAP_MAXBITS. + */ +#ifdef BITMAP_USE_TREE -/* Maximum number of levels possible. */ -#define BITMAP_MAX_LEVELS \ - (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \ - + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP) +#if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_1_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2 +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_2_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3 +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_3_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4 +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_4_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS) +#elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 5 +# define BITMAP_GROUPS(nbits) BITMAP_GROUPS_5_LEVEL(nbits) +# define BITMAP_GROUPS_MAX BITMAP_GROUPS_5_LEVEL(BITMAP_MAXBITS) +#else +# error "Unsupported bitmap size" +#endif + +/* + * Maximum number of levels possible. This could be statically computed based + * on LG_BITMAP_MAXBITS: + * + * #define BITMAP_MAX_LEVELS \ + * (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \ + * + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP) + * + * However, that would not allow the generic BITMAP_INFO_INITIALIZER() macro, so + * instead hardcode BITMAP_MAX_LEVELS to the largest number supported by the + * various cascading macros. The only additional cost this incurs is some + * unused trailing entries in bitmap_info_t structures; the bitmaps themselves + * are not impacted. + */ +#define BITMAP_MAX_LEVELS 5 + +#define BITMAP_INFO_INITIALIZER(nbits) { \ + /* nbits. */ \ + nbits, \ + /* nlevels. */ \ + (BITMAP_GROUPS_L0(nbits) > BITMAP_GROUPS_L1(nbits)) + \ + (BITMAP_GROUPS_L1(nbits) > BITMAP_GROUPS_L2(nbits)) + \ + (BITMAP_GROUPS_L2(nbits) > BITMAP_GROUPS_L3(nbits)) + \ + (BITMAP_GROUPS_L3(nbits) > BITMAP_GROUPS_L4(nbits)) + 1, \ + /* levels. */ \ + { \ + {0}, \ + {BITMAP_GROUPS_L0(nbits)}, \ + {BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \ + {BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) + \ + BITMAP_GROUPS_L0(nbits)}, \ + {BITMAP_GROUPS_L3(nbits) + BITMAP_GROUPS_L2(nbits) + \ + BITMAP_GROUPS_L1(nbits) + BITMAP_GROUPS_L0(nbits)}, \ + {BITMAP_GROUPS_L4(nbits) + BITMAP_GROUPS_L3(nbits) + \ + BITMAP_GROUPS_L2(nbits) + BITMAP_GROUPS_L1(nbits) \ + + BITMAP_GROUPS_L0(nbits)} \ + } \ +} + +#else /* BITMAP_USE_TREE */ -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS +#define BITMAP_GROUPS(nbits) BITMAP_BITS2GROUPS(nbits) +#define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS) -struct bitmap_level_s { +#define BITMAP_INFO_INITIALIZER(nbits) { \ + /* nbits. */ \ + nbits, \ + /* ngroups. */ \ + BITMAP_BITS2GROUPS(nbits) \ +} + +#endif /* BITMAP_USE_TREE */ + +typedef struct bitmap_level_s { /* Offset of this level's groups within the array of groups. */ size_t group_offset; -}; +} bitmap_level_t; -struct bitmap_info_s { +typedef struct bitmap_info_s { /* Logical number of bits in bitmap (stored at bottom level). */ size_t nbits; +#ifdef BITMAP_USE_TREE /* Number of levels necessary for nbits. */ unsigned nlevels; @@ -40,67 +162,62 @@ struct bitmap_info_s { * bottom to top (e.g. the bottom level is stored in levels[0]). */ bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); -size_t bitmap_info_ngroups(const bitmap_info_t *binfo); -size_t bitmap_size(size_t nbits); -void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo); -bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); -void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); -size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo); -void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); -#endif +#else /* BITMAP_USE_TREE */ + /* Number of groups necessary for nbits. */ + size_t ngroups; +#endif /* BITMAP_USE_TREE */ +} bitmap_info_t; + +void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); +void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill); +size_t bitmap_size(const bitmap_info_t *binfo); -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_)) -JEMALLOC_INLINE bool -bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) -{ - unsigned rgoff = binfo->levels[binfo->nlevels].group_offset - 1; +static inline bool +bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) { +#ifdef BITMAP_USE_TREE + size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1; bitmap_t rg = bitmap[rgoff]; /* The bitmap is full iff the root group is 0. */ return (rg == 0); +#else + size_t i; + + for (i = 0; i < binfo->ngroups; i++) { + if (bitmap[i] != 0) { + return false; + } + } + return true; +#endif } -JEMALLOC_INLINE bool -bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) -{ +static inline bool +bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { size_t goff; bitmap_t g; assert(bit < binfo->nbits); goff = bit >> LG_BITMAP_GROUP_NBITS; g = bitmap[goff]; - return (!(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK)))); + return !(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); } -JEMALLOC_INLINE void -bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) -{ +static inline void +bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { size_t goff; bitmap_t *gp; bitmap_t g; assert(bit < binfo->nbits); - assert(bitmap_get(bitmap, binfo, bit) == false); + assert(!bitmap_get(bitmap, binfo, bit)); goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[goff]; g = *gp; - assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))); - g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); + assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); + g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; assert(bitmap_get(bitmap, binfo, bit)); +#ifdef BITMAP_USE_TREE /* Propagate group state transitions up the tree. */ if (g == 0) { unsigned i; @@ -109,45 +226,113 @@ bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[binfo->levels[i].group_offset + goff]; g = *gp; - assert(g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))); - g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); + assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); + g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; - if (g != 0) + if (g != 0) { break; + } } } +#endif +} + +/* ffu: find first unset >= bit. */ +static inline size_t +bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) { + assert(min_bit < binfo->nbits); + +#ifdef BITMAP_USE_TREE + size_t bit = 0; + for (unsigned level = binfo->nlevels; level--;) { + size_t lg_bits_per_group = (LG_BITMAP_GROUP_NBITS * (level + + 1)); + bitmap_t group = bitmap[binfo->levels[level].group_offset + (bit + >> lg_bits_per_group)]; + unsigned group_nmask = (unsigned)(((min_bit > bit) ? (min_bit - + bit) : 0) >> (lg_bits_per_group - LG_BITMAP_GROUP_NBITS)); + assert(group_nmask <= BITMAP_GROUP_NBITS); + bitmap_t group_mask = ~((1LU << group_nmask) - 1); + bitmap_t group_masked = group & group_mask; + if (group_masked == 0LU) { + if (group == 0LU) { + return binfo->nbits; + } + /* + * min_bit was preceded by one or more unset bits in + * this group, but there are no other unset bits in this + * group. Try again starting at the first bit of the + * next sibling. This will recurse at most once per + * non-root level. + */ + size_t sib_base = bit + (ZU(1) << lg_bits_per_group); + assert(sib_base > min_bit); + assert(sib_base > bit); + if (sib_base >= binfo->nbits) { + return binfo->nbits; + } + return bitmap_ffu(bitmap, binfo, sib_base); + } + bit += ((size_t)(ffs_lu(group_masked) - 1)) << + (lg_bits_per_group - LG_BITMAP_GROUP_NBITS); + } + assert(bit >= min_bit); + assert(bit < binfo->nbits); + return bit; +#else + size_t i = min_bit >> LG_BITMAP_GROUP_NBITS; + bitmap_t g = bitmap[i] & ~((1LU << (min_bit & BITMAP_GROUP_NBITS_MASK)) + - 1); + size_t bit; + do { + bit = ffs_lu(g); + if (bit != 0) { + return (i << LG_BITMAP_GROUP_NBITS) + (bit - 1); + } + i++; + g = bitmap[i]; + } while (i < binfo->ngroups); + return binfo->nbits; +#endif } /* sfu: set first unset. */ -JEMALLOC_INLINE size_t -bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) -{ +static inline size_t +bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) { size_t bit; bitmap_t g; unsigned i; - assert(bitmap_full(bitmap, binfo) == false); + assert(!bitmap_full(bitmap, binfo)); +#ifdef BITMAP_USE_TREE i = binfo->nlevels - 1; g = bitmap[binfo->levels[i].group_offset]; - bit = ffsl(g) - 1; + bit = ffs_lu(g) - 1; while (i > 0) { i--; g = bitmap[binfo->levels[i].group_offset + bit]; - bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffsl(g) - 1); + bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1); } - +#else + i = 0; + g = bitmap[0]; + while ((bit = ffs_lu(g)) == 0) { + i++; + g = bitmap[i]; + } + bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1); +#endif bitmap_set(bitmap, binfo, bit); - return (bit); + return bit; } -JEMALLOC_INLINE void -bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) -{ +static inline void +bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { size_t goff; bitmap_t *gp; bitmap_t g; - bool propagate; + UNUSED bool propagate; assert(bit < binfo->nbits); assert(bitmap_get(bitmap, binfo, bit)); @@ -155,10 +340,11 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) gp = &bitmap[goff]; g = *gp; propagate = (g == 0); - assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); - g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); + assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); + g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; - assert(bitmap_get(bitmap, binfo, bit) == false); + assert(!bitmap_get(bitmap, binfo, bit)); +#ifdef BITMAP_USE_TREE /* Propagate group state transitions up the tree. */ if (propagate) { unsigned i; @@ -168,17 +354,16 @@ bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) gp = &bitmap[binfo->levels[i].group_offset + goff]; g = *gp; propagate = (g == 0); - assert((g & (1LU << (bit & BITMAP_GROUP_NBITS_MASK))) + assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); - g ^= 1LU << (bit & BITMAP_GROUP_NBITS_MASK); + g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; - if (propagate == false) + if (!propagate) { break; + } } } +#endif /* BITMAP_USE_TREE */ } -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ +#endif /* JEMALLOC_INTERNAL_BITMAP_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/chunk.h b/dep/jemalloc/include/jemalloc/internal/chunk.h deleted file mode 100644 index 87d8700dac8..00000000000 --- a/dep/jemalloc/include/jemalloc/internal/chunk.h +++ /dev/null @@ -1,63 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -/* - * Size and alignment of memory chunks that are allocated by the OS's virtual - * memory system. - */ -#define LG_CHUNK_DEFAULT 22 - -/* Return the chunk address for allocation address a. */ -#define CHUNK_ADDR2BASE(a) \ - ((void *)((uintptr_t)(a) & ~chunksize_mask)) - -/* Return the chunk offset of address a. */ -#define CHUNK_ADDR2OFFSET(a) \ - ((size_t)((uintptr_t)(a) & chunksize_mask)) - -/* Return the smallest chunk multiple that is >= s. */ -#define CHUNK_CEILING(s) \ - (((s) + chunksize_mask) & ~chunksize_mask) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern size_t opt_lg_chunk; -extern const char *opt_dss; - -/* Protects stats_chunks; currently not used for any other purpose. */ -extern malloc_mutex_t chunks_mtx; -/* Chunk statistics. */ -extern chunk_stats_t stats_chunks; - -extern rtree_t *chunks_rtree; - -extern size_t chunksize; -extern size_t chunksize_mask; /* (chunksize - 1). */ -extern size_t chunk_npages; -extern size_t map_bias; /* Number of arena chunk header pages. */ -extern size_t arena_maxclass; /* Max size class for arenas. */ - -void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero, - dss_prec_t dss_prec); -void chunk_unmap(void *chunk, size_t size); -void chunk_dealloc(void *chunk, size_t size, bool unmap); -bool chunk_boot(void); -void chunk_prefork(void); -void chunk_postfork_parent(void); -void chunk_postfork_child(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - -#include "jemalloc/internal/chunk_dss.h" -#include "jemalloc/internal/chunk_mmap.h" diff --git a/dep/jemalloc/include/jemalloc/internal/chunk_dss.h b/dep/jemalloc/include/jemalloc/internal/chunk_dss.h deleted file mode 100644 index 4535ce09c09..00000000000 --- a/dep/jemalloc/include/jemalloc/internal/chunk_dss.h +++ /dev/null @@ -1,38 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef enum { - dss_prec_disabled = 0, - dss_prec_primary = 1, - dss_prec_secondary = 2, - - dss_prec_limit = 3 -} dss_prec_t; -#define DSS_PREC_DEFAULT dss_prec_secondary -#define DSS_DEFAULT "secondary" - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -extern const char *dss_prec_names[]; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -dss_prec_t chunk_dss_prec_get(void); -bool chunk_dss_prec_set(dss_prec_t dss_prec); -void *chunk_alloc_dss(size_t size, size_t alignment, bool *zero); -bool chunk_in_dss(void *chunk); -bool chunk_dss_boot(void); -void chunk_dss_prefork(void); -void chunk_dss_postfork_parent(void); -void chunk_dss_postfork_child(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/dep/jemalloc/include/jemalloc/internal/chunk_mmap.h b/dep/jemalloc/include/jemalloc/internal/chunk_mmap.h deleted file mode 100644 index f24abac7538..00000000000 --- a/dep/jemalloc/include/jemalloc/internal/chunk_mmap.h +++ /dev/null @@ -1,22 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -bool pages_purge(void *addr, size_t length); - -void *chunk_alloc_mmap(size_t size, size_t alignment, bool *zero); -bool chunk_dealloc_mmap(void *chunk, size_t size); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/dep/jemalloc/include/jemalloc/internal/ckh.h b/dep/jemalloc/include/jemalloc/internal/ckh.h index 58712a6a763..7b3850bc168 100644 --- a/dep/jemalloc/include/jemalloc/internal/ckh.h +++ b/dep/jemalloc/include/jemalloc/internal/ckh.h @@ -1,88 +1,101 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES +#ifndef JEMALLOC_INTERNAL_CKH_H +#define JEMALLOC_INTERNAL_CKH_H -typedef struct ckh_s ckh_t; -typedef struct ckhc_s ckhc_t; +#include "jemalloc/internal/tsd.h" -/* Typedefs to allow easy function pointer passing. */ -typedef void ckh_hash_t (const void *, size_t[2]); -typedef bool ckh_keycomp_t (const void *, const void *); +/* Cuckoo hashing implementation. Skip to the end for the interface. */ + +/******************************************************************************/ +/* INTERNAL DEFINITIONS -- IGNORE */ +/******************************************************************************/ /* Maintain counters used to get an idea of performance. */ -/* #define CKH_COUNT */ +/* #define CKH_COUNT */ /* Print counter values in ckh_delete() (requires CKH_COUNT). */ -/* #define CKH_VERBOSE */ +/* #define CKH_VERBOSE */ /* * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit * one bucket per L1 cache line. */ -#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) +#define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS +/* Typedefs to allow easy function pointer passing. */ +typedef void ckh_hash_t (const void *, size_t[2]); +typedef bool ckh_keycomp_t (const void *, const void *); /* Hash table cell. */ -struct ckhc_s { - const void *key; - const void *data; -}; +typedef struct { + const void *key; + const void *data; +} ckhc_t; -struct ckh_s { +/* The hash table itself. */ +typedef struct { #ifdef CKH_COUNT /* Counters used to get an idea of performance. */ - uint64_t ngrows; - uint64_t nshrinks; - uint64_t nshrinkfails; - uint64_t ninserts; - uint64_t nrelocs; + uint64_t ngrows; + uint64_t nshrinks; + uint64_t nshrinkfails; + uint64_t ninserts; + uint64_t nrelocs; #endif /* Used for pseudo-random number generation. */ -#define CKH_A 1103515241 -#define CKH_C 12347 - uint32_t prng_state; + uint64_t prng_state; /* Total number of items. */ - size_t count; + size_t count; /* * Minimum and current number of hash table buckets. There are * 2^LG_CKH_BUCKET_CELLS cells per bucket. */ - unsigned lg_minbuckets; - unsigned lg_curbuckets; + unsigned lg_minbuckets; + unsigned lg_curbuckets; /* Hash and comparison functions. */ - ckh_hash_t *hash; - ckh_keycomp_t *keycomp; + ckh_hash_t *hash; + ckh_keycomp_t *keycomp; /* Hash table with 2^lg_curbuckets buckets. */ - ckhc_t *tab; -}; + ckhc_t *tab; +} ckh_t; -#endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS +/* BEGIN PUBLIC API */ +/******************************************************************************/ -bool ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, +/* Lifetime management. Minitems is the initial capacity. */ +bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp); -void ckh_delete(ckh_t *ckh); -size_t ckh_count(ckh_t *ckh); -bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); -bool ckh_insert(ckh_t *ckh, const void *key, const void *data); -bool ckh_remove(ckh_t *ckh, const void *searchkey, void **key, +void ckh_delete(tsd_t *tsd, ckh_t *ckh); + +/* Get the number of elements in the set. */ +size_t ckh_count(ckh_t *ckh); + +/* + * To iterate over the elements in the table, initialize *tabind to 0 and call + * this function until it returns true. Each call that returns false will + * update *key and *data to the next element in the table, assuming the pointers + * are non-NULL. + */ +bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); + +/* + * Basic hash table operations -- insert, removal, lookup. For ckh_remove and + * ckh_search, key or data can be NULL. The hash-table only stores pointers to + * the key and value, and doesn't do any lifetime management. + */ +bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data); +bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, void **data); -bool ckh_search(ckh_t *ckh, const void *seachkey, void **key, void **data); -void ckh_string_hash(const void *key, size_t r_hash[2]); -bool ckh_string_keycomp(const void *k1, const void *k2); -void ckh_pointer_hash(const void *key, size_t r_hash[2]); -bool ckh_pointer_keycomp(const void *k1, const void *k2); +bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data); -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES +/* Some useful hash and comparison functions for strings and pointers. */ +void ckh_string_hash(const void *key, size_t r_hash[2]); +bool ckh_string_keycomp(const void *k1, const void *k2); +void ckh_pointer_hash(const void *key, size_t r_hash[2]); +bool ckh_pointer_keycomp(const void *k1, const void *k2); -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ +#endif /* JEMALLOC_INTERNAL_CKH_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/ctl.h b/dep/jemalloc/include/jemalloc/internal/ctl.h index 0ffecc5f2a2..a91c4cf556b 100644 --- a/dep/jemalloc/include/jemalloc/internal/ctl.h +++ b/dep/jemalloc/include/jemalloc/internal/ctl.h @@ -1,87 +1,106 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct ctl_node_s ctl_node_t; -typedef struct ctl_named_node_s ctl_named_node_t; -typedef struct ctl_indexed_node_s ctl_indexed_node_t; -typedef struct ctl_arena_stats_s ctl_arena_stats_t; -typedef struct ctl_stats_s ctl_stats_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct ctl_node_s { - bool named; -}; - -struct ctl_named_node_s { - struct ctl_node_s node; - const char *name; +#ifndef JEMALLOC_INTERNAL_CTL_H +#define JEMALLOC_INTERNAL_CTL_H + +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/mutex_prof.h" +#include "jemalloc/internal/ql.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/stats.h" + +/* Maximum ctl tree depth. */ +#define CTL_MAX_DEPTH 7 + +typedef struct ctl_node_s { + bool named; +} ctl_node_t; + +typedef struct ctl_named_node_s { + ctl_node_t node; + const char *name; /* If (nchildren == 0), this is a terminal node. */ - unsigned nchildren; - const ctl_node_t *children; - int (*ctl)(const size_t *, size_t, void *, size_t *, - void *, size_t); -}; + size_t nchildren; + const ctl_node_t *children; + int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *, + size_t); +} ctl_named_node_t; -struct ctl_indexed_node_s { - struct ctl_node_s node; - const ctl_named_node_t *(*index)(const size_t *, size_t, size_t); -}; +typedef struct ctl_indexed_node_s { + struct ctl_node_s node; + const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t, + size_t); +} ctl_indexed_node_t; -struct ctl_arena_stats_s { - bool initialized; - unsigned nthreads; - const char *dss; - size_t pactive; - size_t pdirty; - arena_stats_t astats; +typedef struct ctl_arena_stats_s { + arena_stats_t astats; /* Aggregate stats for small size classes, based on bin stats. */ - size_t allocated_small; - uint64_t nmalloc_small; - uint64_t ndalloc_small; - uint64_t nrequests_small; - - malloc_bin_stats_t bstats[NBINS]; - malloc_large_stats_t *lstats; /* nlclasses elements. */ + size_t allocated_small; + uint64_t nmalloc_small; + uint64_t ndalloc_small; + uint64_t nrequests_small; + + malloc_bin_stats_t bstats[NBINS]; + malloc_large_stats_t lstats[NSIZES - NBINS]; +} ctl_arena_stats_t; + +typedef struct ctl_stats_s { + size_t allocated; + size_t active; + size_t metadata; + size_t resident; + size_t mapped; + size_t retained; + + background_thread_stats_t background_thread; + mutex_prof_data_t mutex_prof_data[mutex_prof_num_global_mutexes]; +} ctl_stats_t; + +typedef struct ctl_arena_s ctl_arena_t; +struct ctl_arena_s { + unsigned arena_ind; + bool initialized; + ql_elm(ctl_arena_t) destroyed_link; + + /* Basic stats, supported even if !config_stats. */ + unsigned nthreads; + const char *dss; + ssize_t dirty_decay_ms; + ssize_t muzzy_decay_ms; + size_t pactive; + size_t pdirty; + size_t pmuzzy; + + /* NULL if !config_stats. */ + ctl_arena_stats_t *astats; }; -struct ctl_stats_s { - size_t allocated; - size_t active; - size_t mapped; - struct { - size_t current; /* stats_chunks.curchunks */ - uint64_t total; /* stats_chunks.nchunks */ - size_t high; /* stats_chunks.highchunks */ - } chunks; - struct { - size_t allocated; /* huge_allocated */ - uint64_t nmalloc; /* huge_nmalloc */ - uint64_t ndalloc; /* huge_ndalloc */ - } huge; - unsigned narenas; - ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen); -int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp); - -int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, +typedef struct ctl_arenas_s { + uint64_t epoch; + unsigned narenas; + ql_head(ctl_arena_t) destroyed; + + /* + * Element 0 corresponds to merged stats for extant arenas (accessed via + * MALLCTL_ARENAS_ALL), element 1 corresponds to merged stats for + * destroyed arenas (accessed via MALLCTL_ARENAS_DESTROYED), and the + * remaining MALLOCX_ARENA_LIMIT elements correspond to arenas. + */ + ctl_arena_t *arenas[2 + MALLOCX_ARENA_LIMIT]; +} ctl_arenas_t; + +int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen); -bool ctl_boot(void); -void ctl_prefork(void); -void ctl_postfork_parent(void); -void ctl_postfork_child(void); +int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp); -#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ +int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen); +bool ctl_boot(void); +void ctl_prefork(tsdn_t *tsdn); +void ctl_postfork_parent(tsdn_t *tsdn); +void ctl_postfork_child(tsdn_t *tsdn); + +#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ != 0) { \ malloc_printf( \ @@ -91,7 +110,7 @@ void ctl_postfork_child(void); } \ } while (0) -#define xmallctlnametomib(name, mibp, miblenp) do { \ +#define xmallctlnametomib(name, mibp, miblenp) do { \ if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ malloc_printf("<jemalloc>: Failure in " \ "xmallctlnametomib(\"%s\", ...)\n", name); \ @@ -99,7 +118,7 @@ void ctl_postfork_child(void); } \ } while (0) -#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ +#define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ newlen) != 0) { \ malloc_write( \ @@ -108,10 +127,4 @@ void ctl_postfork_child(void); } \ } while (0) -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - +#endif /* JEMALLOC_INTERNAL_CTL_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/extent.h b/dep/jemalloc/include/jemalloc/internal/extent.h deleted file mode 100644 index ba95ca816bd..00000000000 --- a/dep/jemalloc/include/jemalloc/internal/extent.h +++ /dev/null @@ -1,46 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct extent_node_s extent_node_t; - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -/* Tree of extents. */ -struct extent_node_s { - /* Linkage for the size/address-ordered tree. */ - rb_node(extent_node_t) link_szad; - - /* Linkage for the address-ordered tree. */ - rb_node(extent_node_t) link_ad; - - /* Profile counters, used for huge objects. */ - prof_ctx_t *prof_ctx; - - /* Pointer to the extent that this tree node is responsible for. */ - void *addr; - - /* Total region size. */ - size_t size; - - /* True if zero-filled; used by chunk recycling code. */ - bool zeroed; -}; -typedef rb_tree(extent_node_t) extent_tree_t; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -rb_proto(, extent_tree_szad_, extent_tree_t, extent_node_t) - -rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t) - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - diff --git a/dep/jemalloc/include/jemalloc/internal/extent_dss.h b/dep/jemalloc/include/jemalloc/internal/extent_dss.h new file mode 100644 index 00000000000..e8f02ce2ad2 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/extent_dss.h @@ -0,0 +1,26 @@ +#ifndef JEMALLOC_INTERNAL_EXTENT_DSS_H +#define JEMALLOC_INTERNAL_EXTENT_DSS_H + +typedef enum { + dss_prec_disabled = 0, + dss_prec_primary = 1, + dss_prec_secondary = 2, + + dss_prec_limit = 3 +} dss_prec_t; +#define DSS_PREC_DEFAULT dss_prec_secondary +#define DSS_DEFAULT "secondary" + +extern const char *dss_prec_names[]; + +extern const char *opt_dss; + +dss_prec_t extent_dss_prec_get(void); +bool extent_dss_prec_set(dss_prec_t dss_prec); +void *extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit); +bool extent_in_dss(void *addr); +bool extent_dss_mergeable(void *addr_a, void *addr_b); +void extent_dss_boot(void); + +#endif /* JEMALLOC_INTERNAL_EXTENT_DSS_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/extent_externs.h b/dep/jemalloc/include/jemalloc/internal/extent_externs.h new file mode 100644 index 00000000000..489a813c80d --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/extent_externs.h @@ -0,0 +1,72 @@ +#ifndef JEMALLOC_INTERNAL_EXTENT_EXTERNS_H +#define JEMALLOC_INTERNAL_EXTENT_EXTERNS_H + +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mutex_pool.h" +#include "jemalloc/internal/ph.h" +#include "jemalloc/internal/rb.h" +#include "jemalloc/internal/rtree.h" + +extern rtree_t extents_rtree; +extern const extent_hooks_t extent_hooks_default; +extern mutex_pool_t extent_mutex_pool; + +extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena); +void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent); + +extent_hooks_t *extent_hooks_get(arena_t *arena); +extent_hooks_t *extent_hooks_set(tsd_t *tsd, arena_t *arena, + extent_hooks_t *extent_hooks); + +#ifdef JEMALLOC_JET +size_t extent_size_quantize_floor(size_t size); +size_t extent_size_quantize_ceil(size_t size); +#endif + +rb_proto(, extent_avail_, extent_tree_t, extent_t) +ph_proto(, extent_heap_, extent_heap_t, extent_t) + +bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state, + bool delay_coalesce); +extent_state_t extents_state_get(const extents_t *extents); +size_t extents_npages_get(extents_t *extents); +extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr, + size_t size, size_t pad, size_t alignment, bool slab, szind_t szind, + bool *zero, bool *commit); +void extents_dalloc(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent); +extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_min); +void extents_prefork(tsdn_t *tsdn, extents_t *extents); +void extents_postfork_parent(tsdn_t *tsdn, extents_t *extents); +void extents_postfork_child(tsdn_t *tsdn, extents_t *extents); +extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit); +void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent); +void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent); +void extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent); +bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length); +bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length); +bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length); +bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length); +extent_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, + szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b); +bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b); + +bool extent_boot(void); + +#endif /* JEMALLOC_INTERNAL_EXTENT_EXTERNS_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/extent_inlines.h b/dep/jemalloc/include/jemalloc/internal/extent_inlines.h new file mode 100644 index 00000000000..bb2bd699ed2 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/extent_inlines.h @@ -0,0 +1,407 @@ +#ifndef JEMALLOC_INTERNAL_EXTENT_INLINES_H +#define JEMALLOC_INTERNAL_EXTENT_INLINES_H + +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mutex_pool.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/ql.h" +#include "jemalloc/internal/sz.h" + +static inline void +extent_lock(tsdn_t *tsdn, extent_t *extent) { + assert(extent != NULL); + mutex_pool_lock(tsdn, &extent_mutex_pool, (uintptr_t)extent); +} + +static inline void +extent_unlock(tsdn_t *tsdn, extent_t *extent) { + assert(extent != NULL); + mutex_pool_unlock(tsdn, &extent_mutex_pool, (uintptr_t)extent); +} + +static inline void +extent_lock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) { + assert(extent1 != NULL && extent2 != NULL); + mutex_pool_lock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1, + (uintptr_t)extent2); +} + +static inline void +extent_unlock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) { + assert(extent1 != NULL && extent2 != NULL); + mutex_pool_unlock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1, + (uintptr_t)extent2); +} + +static inline arena_t * +extent_arena_get(const extent_t *extent) { + unsigned arena_ind = (unsigned)((extent->e_bits & + EXTENT_BITS_ARENA_MASK) >> EXTENT_BITS_ARENA_SHIFT); + /* + * The following check is omitted because we should never actually read + * a NULL arena pointer. + */ + if (false && arena_ind >= MALLOCX_ARENA_LIMIT) { + return NULL; + } + assert(arena_ind < MALLOCX_ARENA_LIMIT); + return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_ACQUIRE); +} + +static inline szind_t +extent_szind_get_maybe_invalid(const extent_t *extent) { + szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK) >> + EXTENT_BITS_SZIND_SHIFT); + assert(szind <= NSIZES); + return szind; +} + +static inline szind_t +extent_szind_get(const extent_t *extent) { + szind_t szind = extent_szind_get_maybe_invalid(extent); + assert(szind < NSIZES); /* Never call when "invalid". */ + return szind; +} + +static inline size_t +extent_usize_get(const extent_t *extent) { + return sz_index2size(extent_szind_get(extent)); +} + +static inline size_t +extent_sn_get(const extent_t *extent) { + return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >> + EXTENT_BITS_SN_SHIFT); +} + +static inline extent_state_t +extent_state_get(const extent_t *extent) { + return (extent_state_t)((extent->e_bits & EXTENT_BITS_STATE_MASK) >> + EXTENT_BITS_STATE_SHIFT); +} + +static inline bool +extent_zeroed_get(const extent_t *extent) { + return (bool)((extent->e_bits & EXTENT_BITS_ZEROED_MASK) >> + EXTENT_BITS_ZEROED_SHIFT); +} + +static inline bool +extent_committed_get(const extent_t *extent) { + return (bool)((extent->e_bits & EXTENT_BITS_COMMITTED_MASK) >> + EXTENT_BITS_COMMITTED_SHIFT); +} + +static inline bool +extent_slab_get(const extent_t *extent) { + return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >> + EXTENT_BITS_SLAB_SHIFT); +} + +static inline unsigned +extent_nfree_get(const extent_t *extent) { + assert(extent_slab_get(extent)); + return (unsigned)((extent->e_bits & EXTENT_BITS_NFREE_MASK) >> + EXTENT_BITS_NFREE_SHIFT); +} + +static inline void * +extent_base_get(const extent_t *extent) { + assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) || + !extent_slab_get(extent)); + return PAGE_ADDR2BASE(extent->e_addr); +} + +static inline void * +extent_addr_get(const extent_t *extent) { + assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) || + !extent_slab_get(extent)); + return extent->e_addr; +} + +static inline size_t +extent_size_get(const extent_t *extent) { + return (extent->e_size_esn & EXTENT_SIZE_MASK); +} + +static inline size_t +extent_esn_get(const extent_t *extent) { + return (extent->e_size_esn & EXTENT_ESN_MASK); +} + +static inline size_t +extent_bsize_get(const extent_t *extent) { + return extent->e_bsize; +} + +static inline void * +extent_before_get(const extent_t *extent) { + return (void *)((uintptr_t)extent_base_get(extent) - PAGE); +} + +static inline void * +extent_last_get(const extent_t *extent) { + return (void *)((uintptr_t)extent_base_get(extent) + + extent_size_get(extent) - PAGE); +} + +static inline void * +extent_past_get(const extent_t *extent) { + return (void *)((uintptr_t)extent_base_get(extent) + + extent_size_get(extent)); +} + +static inline arena_slab_data_t * +extent_slab_data_get(extent_t *extent) { + assert(extent_slab_get(extent)); + return &extent->e_slab_data; +} + +static inline const arena_slab_data_t * +extent_slab_data_get_const(const extent_t *extent) { + assert(extent_slab_get(extent)); + return &extent->e_slab_data; +} + +static inline prof_tctx_t * +extent_prof_tctx_get(const extent_t *extent) { + return (prof_tctx_t *)atomic_load_p(&extent->e_prof_tctx, + ATOMIC_ACQUIRE); +} + +static inline void +extent_arena_set(extent_t *extent, arena_t *arena) { + unsigned arena_ind = (arena != NULL) ? arena_ind_get(arena) : ((1U << + MALLOCX_ARENA_BITS) - 1); + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ARENA_MASK) | + ((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT); +} + +static inline void +extent_addr_set(extent_t *extent, void *addr) { + extent->e_addr = addr; +} + +static inline void +extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment) { + assert(extent_base_get(extent) == extent_addr_get(extent)); + + if (alignment < PAGE) { + unsigned lg_range = LG_PAGE - + lg_floor(CACHELINE_CEILING(alignment)); + size_t r = + prng_lg_range_zu(&extent_arena_get(extent)->offset_state, + lg_range, true); + uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE - + lg_range); + extent->e_addr = (void *)((uintptr_t)extent->e_addr + + random_offset); + assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) == + extent->e_addr); + } +} + +static inline void +extent_size_set(extent_t *extent, size_t size) { + assert((size & ~EXTENT_SIZE_MASK) == 0); + extent->e_size_esn = size | (extent->e_size_esn & ~EXTENT_SIZE_MASK); +} + +static inline void +extent_esn_set(extent_t *extent, size_t esn) { + extent->e_size_esn = (extent->e_size_esn & ~EXTENT_ESN_MASK) | (esn & + EXTENT_ESN_MASK); +} + +static inline void +extent_bsize_set(extent_t *extent, size_t bsize) { + extent->e_bsize = bsize; +} + +static inline void +extent_szind_set(extent_t *extent, szind_t szind) { + assert(szind <= NSIZES); /* NSIZES means "invalid". */ + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK) | + ((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT); +} + +static inline void +extent_nfree_set(extent_t *extent, unsigned nfree) { + assert(extent_slab_get(extent)); + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_NFREE_MASK) | + ((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT); +} + +static inline void +extent_nfree_inc(extent_t *extent) { + assert(extent_slab_get(extent)); + extent->e_bits += ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT); +} + +static inline void +extent_nfree_dec(extent_t *extent) { + assert(extent_slab_get(extent)); + extent->e_bits -= ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT); +} + +static inline void +extent_sn_set(extent_t *extent, size_t sn) { + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SN_MASK) | + ((uint64_t)sn << EXTENT_BITS_SN_SHIFT); +} + +static inline void +extent_state_set(extent_t *extent, extent_state_t state) { + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_STATE_MASK) | + ((uint64_t)state << EXTENT_BITS_STATE_SHIFT); +} + +static inline void +extent_zeroed_set(extent_t *extent, bool zeroed) { + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ZEROED_MASK) | + ((uint64_t)zeroed << EXTENT_BITS_ZEROED_SHIFT); +} + +static inline void +extent_committed_set(extent_t *extent, bool committed) { + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_COMMITTED_MASK) | + ((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT); +} + +static inline void +extent_slab_set(extent_t *extent, bool slab) { + extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) | + ((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT); +} + +static inline void +extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) { + atomic_store_p(&extent->e_prof_tctx, tctx, ATOMIC_RELEASE); +} + +static inline void +extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size, + bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed, + bool committed) { + assert(addr == PAGE_ADDR2BASE(addr) || !slab); + + extent_arena_set(extent, arena); + extent_addr_set(extent, addr); + extent_size_set(extent, size); + extent_slab_set(extent, slab); + extent_szind_set(extent, szind); + extent_sn_set(extent, sn); + extent_state_set(extent, state); + extent_zeroed_set(extent, zeroed); + extent_committed_set(extent, committed); + ql_elm_new(extent, ql_link); + if (config_prof) { + extent_prof_tctx_set(extent, NULL); + } +} + +static inline void +extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) { + extent_arena_set(extent, NULL); + extent_addr_set(extent, addr); + extent_bsize_set(extent, bsize); + extent_slab_set(extent, false); + extent_szind_set(extent, NSIZES); + extent_sn_set(extent, sn); + extent_state_set(extent, extent_state_active); + extent_zeroed_set(extent, true); + extent_committed_set(extent, true); +} + +static inline void +extent_list_init(extent_list_t *list) { + ql_new(list); +} + +static inline extent_t * +extent_list_first(const extent_list_t *list) { + return ql_first(list); +} + +static inline extent_t * +extent_list_last(const extent_list_t *list) { + return ql_last(list, ql_link); +} + +static inline void +extent_list_append(extent_list_t *list, extent_t *extent) { + ql_tail_insert(list, extent, ql_link); +} + +static inline void +extent_list_replace(extent_list_t *list, extent_t *to_remove, + extent_t *to_insert) { + ql_after_insert(to_remove, to_insert, ql_link); + ql_remove(list, to_remove, ql_link); +} + +static inline void +extent_list_remove(extent_list_t *list, extent_t *extent) { + ql_remove(list, extent, ql_link); +} + +static inline int +extent_sn_comp(const extent_t *a, const extent_t *b) { + size_t a_sn = extent_sn_get(a); + size_t b_sn = extent_sn_get(b); + + return (a_sn > b_sn) - (a_sn < b_sn); +} + +static inline int +extent_esn_comp(const extent_t *a, const extent_t *b) { + size_t a_esn = extent_esn_get(a); + size_t b_esn = extent_esn_get(b); + + return (a_esn > b_esn) - (a_esn < b_esn); +} + +static inline int +extent_ad_comp(const extent_t *a, const extent_t *b) { + uintptr_t a_addr = (uintptr_t)extent_addr_get(a); + uintptr_t b_addr = (uintptr_t)extent_addr_get(b); + + return (a_addr > b_addr) - (a_addr < b_addr); +} + +static inline int +extent_ead_comp(const extent_t *a, const extent_t *b) { + uintptr_t a_eaddr = (uintptr_t)a; + uintptr_t b_eaddr = (uintptr_t)b; + + return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr); +} + +static inline int +extent_snad_comp(const extent_t *a, const extent_t *b) { + int ret; + + ret = extent_sn_comp(a, b); + if (ret != 0) { + return ret; + } + + ret = extent_ad_comp(a, b); + return ret; +} + +static inline int +extent_esnead_comp(const extent_t *a, const extent_t *b) { + int ret; + + ret = extent_esn_comp(a, b); + if (ret != 0) { + return ret; + } + + ret = extent_ead_comp(a, b); + return ret; +} + +#endif /* JEMALLOC_INTERNAL_EXTENT_INLINES_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/extent_mmap.h b/dep/jemalloc/include/jemalloc/internal/extent_mmap.h new file mode 100644 index 00000000000..55f17ee4876 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/extent_mmap.h @@ -0,0 +1,10 @@ +#ifndef JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H +#define JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H + +extern bool opt_retain; + +void *extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, + bool *zero, bool *commit); +bool extent_dalloc_mmap(void *addr, size_t size); + +#endif /* JEMALLOC_INTERNAL_EXTENT_MMAP_EXTERNS_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/extent_structs.h b/dep/jemalloc/include/jemalloc/internal/extent_structs.h new file mode 100644 index 00000000000..d2979503458 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/extent_structs.h @@ -0,0 +1,199 @@ +#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H +#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/bitmap.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/ql.h" +#include "jemalloc/internal/rb.h" +#include "jemalloc/internal/ph.h" +#include "jemalloc/internal/size_classes.h" + +typedef enum { + extent_state_active = 0, + extent_state_dirty = 1, + extent_state_muzzy = 2, + extent_state_retained = 3 +} extent_state_t; + +/* Extent (span of pages). Use accessor functions for e_* fields. */ +struct extent_s { + /* + * Bitfield containing several fields: + * + * a: arena_ind + * b: slab + * c: committed + * z: zeroed + * t: state + * i: szind + * f: nfree + * n: sn + * + * nnnnnnnn ... nnnnnfff fffffffi iiiiiiit tzcbaaaa aaaaaaaa + * + * arena_ind: Arena from which this extent came, or all 1 bits if + * unassociated. + * + * slab: The slab flag indicates whether the extent is used for a slab + * of small regions. This helps differentiate small size classes, + * and it indicates whether interior pointers can be looked up via + * iealloc(). + * + * committed: The committed flag indicates whether physical memory is + * committed to the extent, whether explicitly or implicitly + * as on a system that overcommits and satisfies physical + * memory needs on demand via soft page faults. + * + * zeroed: The zeroed flag is used by extent recycling code to track + * whether memory is zero-filled. + * + * state: The state flag is an extent_state_t. + * + * szind: The szind flag indicates usable size class index for + * allocations residing in this extent, regardless of whether the + * extent is a slab. Extent size and usable size often differ + * even for non-slabs, either due to sz_large_pad or promotion of + * sampled small regions. + * + * nfree: Number of free regions in slab. + * + * sn: Serial number (potentially non-unique). + * + * Serial numbers may wrap around if !opt_retain, but as long as + * comparison functions fall back on address comparison for equal + * serial numbers, stable (if imperfect) ordering is maintained. + * + * Serial numbers may not be unique even in the absence of + * wrap-around, e.g. when splitting an extent and assigning the same + * serial number to both resulting adjacent extents. + */ + uint64_t e_bits; +#define EXTENT_BITS_ARENA_SHIFT 0 +#define EXTENT_BITS_ARENA_MASK \ + (((uint64_t)(1U << MALLOCX_ARENA_BITS) - 1) << EXTENT_BITS_ARENA_SHIFT) + +#define EXTENT_BITS_SLAB_SHIFT MALLOCX_ARENA_BITS +#define EXTENT_BITS_SLAB_MASK \ + ((uint64_t)0x1U << EXTENT_BITS_SLAB_SHIFT) + +#define EXTENT_BITS_COMMITTED_SHIFT (MALLOCX_ARENA_BITS + 1) +#define EXTENT_BITS_COMMITTED_MASK \ + ((uint64_t)0x1U << EXTENT_BITS_COMMITTED_SHIFT) + +#define EXTENT_BITS_ZEROED_SHIFT (MALLOCX_ARENA_BITS + 2) +#define EXTENT_BITS_ZEROED_MASK \ + ((uint64_t)0x1U << EXTENT_BITS_ZEROED_SHIFT) + +#define EXTENT_BITS_STATE_SHIFT (MALLOCX_ARENA_BITS + 3) +#define EXTENT_BITS_STATE_MASK \ + ((uint64_t)0x3U << EXTENT_BITS_STATE_SHIFT) + +#define EXTENT_BITS_SZIND_SHIFT (MALLOCX_ARENA_BITS + 5) +#define EXTENT_BITS_SZIND_MASK \ + (((uint64_t)(1U << LG_CEIL_NSIZES) - 1) << EXTENT_BITS_SZIND_SHIFT) + +#define EXTENT_BITS_NFREE_SHIFT \ + (MALLOCX_ARENA_BITS + 5 + LG_CEIL_NSIZES) +#define EXTENT_BITS_NFREE_MASK \ + ((uint64_t)((1U << (LG_SLAB_MAXREGS + 1)) - 1) << EXTENT_BITS_NFREE_SHIFT) + +#define EXTENT_BITS_SN_SHIFT \ + (MALLOCX_ARENA_BITS + 5 + LG_CEIL_NSIZES + (LG_SLAB_MAXREGS + 1)) +#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT) + + /* Pointer to the extent that this structure is responsible for. */ + void *e_addr; + + union { + /* + * Extent size and serial number associated with the extent + * structure (different than the serial number for the extent at + * e_addr). + * + * ssssssss [...] ssssssss ssssnnnn nnnnnnnn + */ + size_t e_size_esn; + #define EXTENT_SIZE_MASK ((size_t)~(PAGE-1)) + #define EXTENT_ESN_MASK ((size_t)PAGE-1) + /* Base extent size, which may not be a multiple of PAGE. */ + size_t e_bsize; + }; + + union { + /* + * List linkage, used by a variety of lists: + * - arena_bin_t's slabs_full + * - extents_t's LRU + * - stashed dirty extents + * - arena's large allocations + */ + ql_elm(extent_t) ql_link; + /* Red-black tree linkage, used by arena's extent_avail. */ + rb_node(extent_t) rb_link; + }; + + /* Linkage for per size class sn/address-ordered heaps. */ + phn(extent_t) ph_link; + + union { + /* Small region slab metadata. */ + arena_slab_data_t e_slab_data; + + /* + * Profile counters, used for large objects. Points to a + * prof_tctx_t. + */ + atomic_p_t e_prof_tctx; + }; +}; +typedef ql_head(extent_t) extent_list_t; +typedef rb_tree(extent_t) extent_tree_t; +typedef ph(extent_t) extent_heap_t; + +/* Quantized collection of extents, with built-in LRU queue. */ +struct extents_s { + malloc_mutex_t mtx; + + /* + * Quantized per size class heaps of extents. + * + * Synchronization: mtx. + */ + extent_heap_t heaps[NPSIZES+1]; + + /* + * Bitmap for which set bits correspond to non-empty heaps. + * + * Synchronization: mtx. + */ + bitmap_t bitmap[BITMAP_GROUPS(NPSIZES+1)]; + + /* + * LRU of all extents in heaps. + * + * Synchronization: mtx. + */ + extent_list_t lru; + + /* + * Page sum for all extents in heaps. + * + * The synchronization here is a little tricky. Modifications to npages + * must hold mtx, but reads need not (though, a reader who sees npages + * without holding the mutex can't assume anything about the rest of the + * state of the extents_t). + */ + atomic_zu_t npages; + + /* All stored extents must be in the same state. */ + extent_state_t state; + + /* + * If true, delay coalescing until eviction; otherwise coalesce during + * deallocation. + */ + bool delay_coalesce; +}; + +#endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/extent_types.h b/dep/jemalloc/include/jemalloc/internal/extent_types.h new file mode 100644 index 00000000000..b6905ce1055 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/extent_types.h @@ -0,0 +1,9 @@ +#ifndef JEMALLOC_INTERNAL_EXTENT_TYPES_H +#define JEMALLOC_INTERNAL_EXTENT_TYPES_H + +typedef struct extent_s extent_t; +typedef struct extents_s extents_t; + +#define EXTENT_HOOKS_INITIALIZER NULL + +#endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/hash.h b/dep/jemalloc/include/jemalloc/internal/hash.h index c7183ede82d..188296cf0e2 100644 --- a/dep/jemalloc/include/jemalloc/internal/hash.h +++ b/dep/jemalloc/include/jemalloc/internal/hash.h @@ -1,92 +1,76 @@ +#ifndef JEMALLOC_INTERNAL_HASH_H +#define JEMALLOC_INTERNAL_HASH_H + +#include "jemalloc/internal/assert.h" + /* * The following hash function is based on MurmurHash3, placed into the public - * domain by Austin Appleby. See http://code.google.com/p/smhasher/ for + * domain by Austin Appleby. See https://github.com/aappleby/smhasher for * details. */ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -uint32_t hash_x86_32(const void *key, int len, uint32_t seed); -void hash_x86_128(const void *key, const int len, uint32_t seed, - uint64_t r_out[2]); -void hash_x64_128(const void *key, const int len, const uint32_t seed, - uint64_t r_out[2]); -void hash(const void *key, size_t len, const uint32_t seed, - size_t r_hash[2]); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_)) /******************************************************************************/ /* Internal implementation. */ -JEMALLOC_INLINE uint32_t -hash_rotl_32(uint32_t x, int8_t r) -{ - - return (x << r) | (x >> (32 - r)); +static inline uint32_t +hash_rotl_32(uint32_t x, int8_t r) { + return ((x << r) | (x >> (32 - r))); } -JEMALLOC_INLINE uint64_t -hash_rotl_64(uint64_t x, int8_t r) -{ - return (x << r) | (x >> (64 - r)); +static inline uint64_t +hash_rotl_64(uint64_t x, int8_t r) { + return ((x << r) | (x >> (64 - r))); } -JEMALLOC_INLINE uint32_t -hash_get_block_32(const uint32_t *p, int i) -{ +static inline uint32_t +hash_get_block_32(const uint32_t *p, int i) { + /* Handle unaligned read. */ + if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) { + uint32_t ret; - return (p[i]); + memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t)); + return ret; + } + + return p[i]; } -JEMALLOC_INLINE uint64_t -hash_get_block_64(const uint64_t *p, int i) -{ +static inline uint64_t +hash_get_block_64(const uint64_t *p, int i) { + /* Handle unaligned read. */ + if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) { + uint64_t ret; - return (p[i]); -} + memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t)); + return ret; + } -JEMALLOC_INLINE uint32_t -hash_fmix_32(uint32_t h) -{ + return p[i]; +} +static inline uint32_t +hash_fmix_32(uint32_t h) { h ^= h >> 16; h *= 0x85ebca6b; h ^= h >> 13; h *= 0xc2b2ae35; h ^= h >> 16; - return (h); + return h; } -JEMALLOC_INLINE uint64_t -hash_fmix_64(uint64_t k) -{ - +static inline uint64_t +hash_fmix_64(uint64_t k) { k ^= k >> 33; - k *= QU(0xff51afd7ed558ccdLLU); + k *= KQU(0xff51afd7ed558ccd); k ^= k >> 33; - k *= QU(0xc4ceb9fe1a85ec53LLU); + k *= KQU(0xc4ceb9fe1a85ec53); k ^= k >> 33; - return (k); + return k; } -JEMALLOC_INLINE uint32_t -hash_x86_32(const void *key, int len, uint32_t seed) -{ +static inline uint32_t +hash_x86_32(const void *key, int len, uint32_t seed) { const uint8_t *data = (const uint8_t *) key; const int nblocks = len / 4; @@ -132,13 +116,12 @@ hash_x86_32(const void *key, int len, uint32_t seed) h1 = hash_fmix_32(h1); - return (h1); + return h1; } -UNUSED JEMALLOC_INLINE void +UNUSED static inline void hash_x86_128(const void *key, const int len, uint32_t seed, - uint64_t r_out[2]) -{ + uint64_t r_out[2]) { const uint8_t * data = (const uint8_t *) key; const int nblocks = len / 16; @@ -237,18 +220,17 @@ hash_x86_128(const void *key, const int len, uint32_t seed, r_out[1] = (((uint64_t) h4) << 32) | h3; } -UNUSED JEMALLOC_INLINE void +UNUSED static inline void hash_x64_128(const void *key, const int len, const uint32_t seed, - uint64_t r_out[2]) -{ + uint64_t r_out[2]) { const uint8_t *data = (const uint8_t *) key; const int nblocks = len / 16; uint64_t h1 = seed; uint64_t h2 = seed; - const uint64_t c1 = QU(0x87c37b91114253d5LLU); - const uint64_t c2 = QU(0x4cf5ad432745937fLLU); + const uint64_t c1 = KQU(0x87c37b91114253d5); + const uint64_t c2 = KQU(0x4cf5ad432745937f); /* body */ { @@ -317,19 +299,20 @@ hash_x64_128(const void *key, const int len, const uint32_t seed, /******************************************************************************/ /* API. */ -JEMALLOC_INLINE void -hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) -{ +static inline void +hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) { + assert(len <= INT_MAX); /* Unfortunate implementation limitation. */ + #if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN)) - hash_x64_128(key, len, seed, (uint64_t *)r_hash); + hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash); #else - uint64_t hashes[2]; - hash_x86_128(key, len, seed, hashes); - r_hash[0] = (size_t)hashes[0]; - r_hash[1] = (size_t)hashes[1]; + { + uint64_t hashes[2]; + hash_x86_128(key, (int)len, seed, hashes); + r_hash[0] = (size_t)hashes[0]; + r_hash[1] = (size_t)hashes[1]; + } #endif } -#endif -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ +#endif /* JEMALLOC_INTERNAL_HASH_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/hooks.h b/dep/jemalloc/include/jemalloc/internal/hooks.h new file mode 100644 index 00000000000..cd49afcb094 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/hooks.h @@ -0,0 +1,19 @@ +#ifndef JEMALLOC_INTERNAL_HOOKS_H +#define JEMALLOC_INTERNAL_HOOKS_H + +extern JEMALLOC_EXPORT void (*hooks_arena_new_hook)(); +extern JEMALLOC_EXPORT void (*hooks_libc_hook)(); + +#define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn) + +#define open JEMALLOC_HOOK(open, hooks_libc_hook) +#define read JEMALLOC_HOOK(read, hooks_libc_hook) +#define write JEMALLOC_HOOK(write, hooks_libc_hook) +#define readlink JEMALLOC_HOOK(readlink, hooks_libc_hook) +#define close JEMALLOC_HOOK(close, hooks_libc_hook) +#define creat JEMALLOC_HOOK(creat, hooks_libc_hook) +#define secure_getenv JEMALLOC_HOOK(secure_getenv, hooks_libc_hook) +/* Note that this is undef'd and re-define'd in src/prof.c. */ +#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, hooks_libc_hook) + +#endif /* JEMALLOC_INTERNAL_HOOKS_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/huge.h b/dep/jemalloc/include/jemalloc/internal/huge.h deleted file mode 100644 index a2b9c779191..00000000000 --- a/dep/jemalloc/include/jemalloc/internal/huge.h +++ /dev/null @@ -1,46 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -/* Huge allocation statistics. */ -extern uint64_t huge_nmalloc; -extern uint64_t huge_ndalloc; -extern size_t huge_allocated; - -/* Protects chunk-related data structures. */ -extern malloc_mutex_t huge_mtx; - -void *huge_malloc(size_t size, bool zero, dss_prec_t dss_prec); -void *huge_palloc(size_t size, size_t alignment, bool zero, - dss_prec_t dss_prec); -bool huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, - size_t extra); -void *huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, - size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec); -#ifdef JEMALLOC_JET -typedef void (huge_dalloc_junk_t)(void *, size_t); -extern huge_dalloc_junk_t *huge_dalloc_junk; -#endif -void huge_dalloc(void *ptr, bool unmap); -size_t huge_salloc(const void *ptr); -dss_prec_t huge_dss_prec_get(arena_t *arena); -prof_ctx_t *huge_prof_ctx_get(const void *ptr); -void huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx); -bool huge_boot(void); -void huge_prefork(void); -void huge_postfork_parent(void); -void huge_postfork_child(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/dep/jemalloc/include/jemalloc/internal/jemalloc_internal.h b/dep/jemalloc/include/jemalloc/internal/jemalloc_internal.h deleted file mode 100644 index cf171326c29..00000000000 --- a/dep/jemalloc/include/jemalloc/internal/jemalloc_internal.h +++ /dev/null @@ -1,1059 +0,0 @@ -#ifndef JEMALLOC_INTERNAL_H -#define JEMALLOC_INTERNAL_H -#include <math.h> -#ifdef _WIN32 -# include <windows.h> -# define ENOENT ERROR_PATH_NOT_FOUND -# define EINVAL ERROR_BAD_ARGUMENTS -# define EAGAIN ERROR_OUTOFMEMORY -# define EPERM ERROR_WRITE_FAULT -# define EFAULT ERROR_INVALID_ADDRESS -# define ENOMEM ERROR_NOT_ENOUGH_MEMORY -# undef ERANGE -# define ERANGE ERROR_INVALID_DATA -#else -# include <sys/param.h> -# include <sys/mman.h> -# include <sys/syscall.h> -# if !defined(SYS_write) && defined(__NR_write) -# define SYS_write __NR_write -# endif -# include <sys/uio.h> -# include <pthread.h> -# include <errno.h> -#endif -#include <sys/types.h> - -#include <limits.h> -#ifndef SIZE_T_MAX -# define SIZE_T_MAX SIZE_MAX -#endif -#include <stdarg.h> -#include <stdbool.h> -#include <stdio.h> -#include <stdlib.h> -#include <stdint.h> -#include <stddef.h> -#ifndef offsetof -# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) -#endif -#include <inttypes.h> -#include <string.h> -#include <strings.h> -#include <ctype.h> -#ifdef _MSC_VER -# include <io.h> -typedef intptr_t ssize_t; -# define PATH_MAX 1024 -# define STDERR_FILENO 2 -# define __func__ __FUNCTION__ -/* Disable warnings about deprecated system functions */ -# pragma warning(disable: 4996) -#else -# include <unistd.h> -#endif -#include <fcntl.h> - -#include "jemalloc_defs.h" - -#ifdef JEMALLOC_UTRACE -#include <sys/ktrace.h> -#endif - -#ifdef JEMALLOC_VALGRIND -#include <valgrind/valgrind.h> -#include <valgrind/memcheck.h> -#endif - -#define JEMALLOC_NO_DEMANGLE -#include "../jemalloc.h" -#include "jemalloc/internal/private_namespace.h" - -#ifdef JEMALLOC_CC_SILENCE -#define UNUSED JEMALLOC_ATTR(unused) -#else -#define UNUSED -#endif - -static const bool config_debug = -#ifdef JEMALLOC_DEBUG - true -#else - false -#endif - ; -static const bool config_dss = -#ifdef JEMALLOC_DSS - true -#else - false -#endif - ; -static const bool config_fill = -#ifdef JEMALLOC_FILL - true -#else - false -#endif - ; -static const bool config_lazy_lock = -#ifdef JEMALLOC_LAZY_LOCK - true -#else - false -#endif - ; -static const bool config_prof = -#ifdef JEMALLOC_PROF - true -#else - false -#endif - ; -static const bool config_prof_libgcc = -#ifdef JEMALLOC_PROF_LIBGCC - true -#else - false -#endif - ; -static const bool config_prof_libunwind = -#ifdef JEMALLOC_PROF_LIBUNWIND - true -#else - false -#endif - ; -static const bool config_mremap = -#ifdef JEMALLOC_MREMAP - true -#else - false -#endif - ; -static const bool config_munmap = -#ifdef JEMALLOC_MUNMAP - true -#else - false -#endif - ; -static const bool config_stats = -#ifdef JEMALLOC_STATS - true -#else - false -#endif - ; -static const bool config_tcache = -#ifdef JEMALLOC_TCACHE - true -#else - false -#endif - ; -static const bool config_tls = -#ifdef JEMALLOC_TLS - true -#else - false -#endif - ; -static const bool config_utrace = -#ifdef JEMALLOC_UTRACE - true -#else - false -#endif - ; -static const bool config_valgrind = -#ifdef JEMALLOC_VALGRIND - true -#else - false -#endif - ; -static const bool config_xmalloc = -#ifdef JEMALLOC_XMALLOC - true -#else - false -#endif - ; -static const bool config_ivsalloc = -#ifdef JEMALLOC_IVSALLOC - true -#else - false -#endif - ; - -#ifdef JEMALLOC_ATOMIC9 -#include <machine/atomic.h> -#endif - -#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) -#include <libkern/OSAtomic.h> -#endif - -#ifdef JEMALLOC_ZONE -#include <mach/mach_error.h> -#include <mach/mach_init.h> -#include <mach/vm_map.h> -#include <malloc/malloc.h> -#endif - -#define RB_COMPACT -#include "jemalloc/internal/rb.h" -#include "jemalloc/internal/qr.h" -#include "jemalloc/internal/ql.h" - -/* - * jemalloc can conceptually be broken into components (arena, tcache, etc.), - * but there are circular dependencies that cannot be broken without - * substantial performance degradation. In order to reduce the effect on - * visual code flow, read the header files in multiple passes, with one of the - * following cpp variables defined during each pass: - * - * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data - * types. - * JEMALLOC_H_STRUCTS : Data structures. - * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. - * JEMALLOC_H_INLINES : Inline functions. - */ -/******************************************************************************/ -#define JEMALLOC_H_TYPES - -#ifndef JEMALLOC_HAS_RESTRICT -# define restrict -#endif - -#define MALLOCX_LG_ALIGN_MASK ((int)0x3f) -#define ALLOCM_LG_ALIGN_MASK ((int)0x3f) - -#define ZU(z) ((size_t)z) -#define QU(q) ((uint64_t)q) - -#ifndef __DECONST -# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) -#endif - -#if defined(JEMALLOC_DEBUG) - /* Disable inlining to make debugging easier. */ -# define JEMALLOC_ALWAYS_INLINE -# define JEMALLOC_ALWAYS_INLINE_C static -# define JEMALLOC_INLINE -# define JEMALLOC_INLINE_C static -# define inline -#else -# define JEMALLOC_ENABLE_INLINE -# ifdef JEMALLOC_HAVE_ATTR -# define JEMALLOC_ALWAYS_INLINE \ - static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline) -# define JEMALLOC_ALWAYS_INLINE_C \ - static inline JEMALLOC_ATTR(always_inline) -# else -# define JEMALLOC_ALWAYS_INLINE static inline -# define JEMALLOC_ALWAYS_INLINE_C static inline -# endif -# define JEMALLOC_INLINE static inline -# define JEMALLOC_INLINE_C static inline -# ifdef _MSC_VER -# define inline _inline -# endif -#endif - -/* Smallest size class to support. */ -#define LG_TINY_MIN 3 -#define TINY_MIN (1U << LG_TINY_MIN) - -/* - * Minimum alignment of allocations is 2^LG_QUANTUM bytes (ignoring tiny size - * classes). - */ -#ifndef LG_QUANTUM -# if (defined(__i386__) || defined(_M_IX86)) -# define LG_QUANTUM 4 -# endif -# ifdef __ia64__ -# define LG_QUANTUM 4 -# endif -# ifdef __alpha__ -# define LG_QUANTUM 4 -# endif -# ifdef __sparc64__ -# define LG_QUANTUM 4 -# endif -# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) -# define LG_QUANTUM 4 -# endif -# ifdef __arm__ -# define LG_QUANTUM 3 -# endif -# ifdef __aarch64__ -# define LG_QUANTUM 4 -# endif -# ifdef __hppa__ -# define LG_QUANTUM 4 -# endif -# ifdef __mips__ -# define LG_QUANTUM 3 -# endif -# ifdef __powerpc__ -# define LG_QUANTUM 4 -# endif -# ifdef __s390__ -# define LG_QUANTUM 4 -# endif -# ifdef __SH4__ -# define LG_QUANTUM 4 -# endif -# ifdef __tile__ -# define LG_QUANTUM 4 -# endif -# ifndef LG_QUANTUM -# error "No LG_QUANTUM definition for architecture; specify via CPPFLAGS" -# endif -#endif - -#define QUANTUM ((size_t)(1U << LG_QUANTUM)) -#define QUANTUM_MASK (QUANTUM - 1) - -/* Return the smallest quantum multiple that is >= a. */ -#define QUANTUM_CEILING(a) \ - (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) - -#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) -#define LONG_MASK (LONG - 1) - -/* Return the smallest long multiple that is >= a. */ -#define LONG_CEILING(a) \ - (((a) + LONG_MASK) & ~LONG_MASK) - -#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) -#define PTR_MASK (SIZEOF_PTR - 1) - -/* Return the smallest (void *) multiple that is >= a. */ -#define PTR_CEILING(a) \ - (((a) + PTR_MASK) & ~PTR_MASK) - -/* - * Maximum size of L1 cache line. This is used to avoid cache line aliasing. - * In addition, this controls the spacing of cacheline-spaced size classes. - * - * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can - * only handle raw constants. - */ -#define LG_CACHELINE 6 -#define CACHELINE 64 -#define CACHELINE_MASK (CACHELINE - 1) - -/* Return the smallest cacheline multiple that is >= s. */ -#define CACHELINE_CEILING(s) \ - (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) - -/* Page size. STATIC_PAGE_SHIFT is determined by the configure script. */ -#ifdef PAGE_MASK -# undef PAGE_MASK -#endif -#define LG_PAGE STATIC_PAGE_SHIFT -#define PAGE ((size_t)(1U << STATIC_PAGE_SHIFT)) -#define PAGE_MASK ((size_t)(PAGE - 1)) - -/* Return the smallest pagesize multiple that is >= s. */ -#define PAGE_CEILING(s) \ - (((s) + PAGE_MASK) & ~PAGE_MASK) - -/* Return the nearest aligned address at or below a. */ -#define ALIGNMENT_ADDR2BASE(a, alignment) \ - ((void *)((uintptr_t)(a) & (-(alignment)))) - -/* Return the offset between a and the nearest aligned address at or below a. */ -#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ - ((size_t)((uintptr_t)(a) & (alignment - 1))) - -/* Return the smallest alignment multiple that is >= s. */ -#define ALIGNMENT_CEILING(s, alignment) \ - (((s) + (alignment - 1)) & (-(alignment))) - -/* Declare a variable length array */ -#if __STDC_VERSION__ < 199901L -# ifdef _MSC_VER -# include <malloc.h> -# define alloca _alloca -# else -# ifdef JEMALLOC_HAS_ALLOCA_H -# include <alloca.h> -# else -# include <stdlib.h> -# endif -# endif -# define VARIABLE_ARRAY(type, name, count) \ - type *name = alloca(sizeof(type) * count) -#else -# define VARIABLE_ARRAY(type, name, count) type name[count] -#endif - -#ifdef JEMALLOC_VALGRIND -/* - * The JEMALLOC_VALGRIND_*() macros must be macros rather than functions - * so that when Valgrind reports errors, there are no extra stack frames - * in the backtraces. - * - * The size that is reported to valgrind must be consistent through a chain of - * malloc..realloc..realloc calls. Request size isn't recorded anywhere in - * jemalloc, so it is critical that all callers of these macros provide usize - * rather than request size. As a result, buffer overflow detection is - * technically weakened for the standard API, though it is generally accepted - * practice to consider any extra bytes reported by malloc_usable_size() as - * usable space. - */ -#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \ - if (config_valgrind && opt_valgrind && cond) \ - VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \ -} while (0) -#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ - old_rzsize, zero) do { \ - if (config_valgrind && opt_valgrind) { \ - size_t rzsize = p2rz(ptr); \ - \ - if (ptr == old_ptr) { \ - VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ - usize, rzsize); \ - if (zero && old_usize < usize) { \ - VALGRIND_MAKE_MEM_DEFINED( \ - (void *)((uintptr_t)ptr + \ - old_usize), usize - old_usize); \ - } \ - } else { \ - if (old_ptr != NULL) { \ - VALGRIND_FREELIKE_BLOCK(old_ptr, \ - old_rzsize); \ - } \ - if (ptr != NULL) { \ - size_t copy_size = (old_usize < usize) \ - ? old_usize : usize; \ - size_t tail_size = usize - copy_size; \ - VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \ - rzsize, false); \ - if (copy_size > 0) { \ - VALGRIND_MAKE_MEM_DEFINED(ptr, \ - copy_size); \ - } \ - if (zero && tail_size > 0) { \ - VALGRIND_MAKE_MEM_DEFINED( \ - (void *)((uintptr_t)ptr + \ - copy_size), tail_size); \ - } \ - } \ - } \ - } \ -} while (0) -#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \ - if (config_valgrind && opt_valgrind) \ - VALGRIND_FREELIKE_BLOCK(ptr, rzsize); \ -} while (0) -#else -#define RUNNING_ON_VALGRIND ((unsigned)0) -#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \ - do {} while (0) -#define VALGRIND_RESIZEINPLACE_BLOCK(addr, oldSizeB, newSizeB, rzB) \ - do {} while (0) -#define VALGRIND_FREELIKE_BLOCK(addr, rzB) do {} while (0) -#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr, _qzz_len) do {} while (0) -#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr, _qzz_len) do {} while (0) -#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr, _qzz_len) do {} while (0) -#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0) -#define JEMALLOC_VALGRIND_REALLOC(ptr, usize, old_ptr, old_usize, \ - old_rzsize, zero) do {} while (0) -#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0) -#endif - -#include "jemalloc/internal/util.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/size_classes.h" -#include "jemalloc/internal/stats.h" -#include "jemalloc/internal/ctl.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/tsd.h" -#include "jemalloc/internal/mb.h" -#include "jemalloc/internal/extent.h" -#include "jemalloc/internal/arena.h" -#include "jemalloc/internal/bitmap.h" -#include "jemalloc/internal/base.h" -#include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" -#include "jemalloc/internal/rtree.h" -#include "jemalloc/internal/tcache.h" -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/quarantine.h" -#include "jemalloc/internal/prof.h" - -#undef JEMALLOC_H_TYPES -/******************************************************************************/ -#define JEMALLOC_H_STRUCTS - -#include "jemalloc/internal/util.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/size_classes.h" -#include "jemalloc/internal/stats.h" -#include "jemalloc/internal/ctl.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/tsd.h" -#include "jemalloc/internal/mb.h" -#include "jemalloc/internal/bitmap.h" -#include "jemalloc/internal/extent.h" -#include "jemalloc/internal/arena.h" -#include "jemalloc/internal/base.h" -#include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" -#include "jemalloc/internal/rtree.h" -#include "jemalloc/internal/tcache.h" -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/quarantine.h" -#include "jemalloc/internal/prof.h" - -typedef struct { - uint64_t allocated; - uint64_t deallocated; -} thread_allocated_t; -/* - * The JEMALLOC_ARG_CONCAT() wrapper is necessary to pass {0, 0} via a cpp macro - * argument. - */ -#define THREAD_ALLOCATED_INITIALIZER JEMALLOC_ARG_CONCAT({0, 0}) - -#undef JEMALLOC_H_STRUCTS -/******************************************************************************/ -#define JEMALLOC_H_EXTERNS - -extern bool opt_abort; -extern bool opt_junk; -extern size_t opt_quarantine; -extern bool opt_redzone; -extern bool opt_utrace; -extern bool opt_valgrind; -extern bool opt_xmalloc; -extern bool opt_zero; -extern size_t opt_narenas; - -/* Number of CPUs. */ -extern unsigned ncpus; - -/* Protects arenas initialization (arenas, arenas_total). */ -extern malloc_mutex_t arenas_lock; -/* - * Arenas that are used to service external requests. Not all elements of the - * arenas array are necessarily used; arenas are created lazily as needed. - * - * arenas[0..narenas_auto) are used for automatic multiplexing of threads and - * arenas. arenas[narenas_auto..narenas_total) are only used if the application - * takes some action to create them and allocate from them. - */ -extern arena_t **arenas; -extern unsigned narenas_total; -extern unsigned narenas_auto; /* Read-only after initialization. */ - -arena_t *arenas_extend(unsigned ind); -void arenas_cleanup(void *arg); -arena_t *choose_arena_hard(void); -void jemalloc_prefork(void); -void jemalloc_postfork_parent(void); -void jemalloc_postfork_child(void); - -#include "jemalloc/internal/util.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/size_classes.h" -#include "jemalloc/internal/stats.h" -#include "jemalloc/internal/ctl.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/tsd.h" -#include "jemalloc/internal/mb.h" -#include "jemalloc/internal/bitmap.h" -#include "jemalloc/internal/extent.h" -#include "jemalloc/internal/arena.h" -#include "jemalloc/internal/base.h" -#include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" -#include "jemalloc/internal/rtree.h" -#include "jemalloc/internal/tcache.h" -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/quarantine.h" -#include "jemalloc/internal/prof.h" - -#undef JEMALLOC_H_EXTERNS -/******************************************************************************/ -#define JEMALLOC_H_INLINES - -#include "jemalloc/internal/util.h" -#include "jemalloc/internal/atomic.h" -#include "jemalloc/internal/prng.h" -#include "jemalloc/internal/ckh.h" -#include "jemalloc/internal/size_classes.h" -#include "jemalloc/internal/stats.h" -#include "jemalloc/internal/ctl.h" -#include "jemalloc/internal/mutex.h" -#include "jemalloc/internal/tsd.h" -#include "jemalloc/internal/mb.h" -#include "jemalloc/internal/extent.h" -#include "jemalloc/internal/base.h" -#include "jemalloc/internal/chunk.h" -#include "jemalloc/internal/huge.h" - -#ifndef JEMALLOC_ENABLE_INLINE -malloc_tsd_protos(JEMALLOC_ATTR(unused), arenas, arena_t *) - -size_t s2u(size_t size); -size_t sa2u(size_t size, size_t alignment); -unsigned narenas_total_get(void); -arena_t *choose_arena(arena_t *arena); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) -/* - * Map of pthread_self() --> arenas[???], used for selecting an arena to use - * for allocations. - */ -malloc_tsd_externs(arenas, arena_t *) -malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, arenas, arena_t *, NULL, - arenas_cleanup) - -/* - * Compute usable size that would result from allocating an object with the - * specified size. - */ -JEMALLOC_ALWAYS_INLINE size_t -s2u(size_t size) -{ - - if (size <= SMALL_MAXCLASS) - return (arena_bin_info[SMALL_SIZE2BIN(size)].reg_size); - if (size <= arena_maxclass) - return (PAGE_CEILING(size)); - return (CHUNK_CEILING(size)); -} - -/* - * Compute usable size that would result from allocating an object with the - * specified size and alignment. - */ -JEMALLOC_ALWAYS_INLINE size_t -sa2u(size_t size, size_t alignment) -{ - size_t usize; - - assert(alignment != 0 && ((alignment - 1) & alignment) == 0); - - /* - * Round size up to the nearest multiple of alignment. - * - * This done, we can take advantage of the fact that for each small - * size class, every object is aligned at the smallest power of two - * that is non-zero in the base two representation of the size. For - * example: - * - * Size | Base 2 | Minimum alignment - * -----+----------+------------------ - * 96 | 1100000 | 32 - * 144 | 10100000 | 32 - * 192 | 11000000 | 64 - */ - usize = ALIGNMENT_CEILING(size, alignment); - /* - * (usize < size) protects against the combination of maximal - * alignment and size greater than maximal alignment. - */ - if (usize < size) { - /* size_t overflow. */ - return (0); - } - - if (usize <= arena_maxclass && alignment <= PAGE) { - if (usize <= SMALL_MAXCLASS) - return (arena_bin_info[SMALL_SIZE2BIN(usize)].reg_size); - return (PAGE_CEILING(usize)); - } else { - size_t run_size; - - /* - * We can't achieve subpage alignment, so round up alignment - * permanently; it makes later calculations simpler. - */ - alignment = PAGE_CEILING(alignment); - usize = PAGE_CEILING(size); - /* - * (usize < size) protects against very large sizes within - * PAGE of SIZE_T_MAX. - * - * (usize + alignment < usize) protects against the - * combination of maximal alignment and usize large enough - * to cause overflow. This is similar to the first overflow - * check above, but it needs to be repeated due to the new - * usize value, which may now be *equal* to maximal - * alignment, whereas before we only detected overflow if the - * original size was *greater* than maximal alignment. - */ - if (usize < size || usize + alignment < usize) { - /* size_t overflow. */ - return (0); - } - - /* - * Calculate the size of the over-size run that arena_palloc() - * would need to allocate in order to guarantee the alignment. - * If the run wouldn't fit within a chunk, round up to a huge - * allocation size. - */ - run_size = usize + alignment - PAGE; - if (run_size <= arena_maxclass) - return (PAGE_CEILING(usize)); - return (CHUNK_CEILING(usize)); - } -} - -JEMALLOC_INLINE unsigned -narenas_total_get(void) -{ - unsigned narenas; - - malloc_mutex_lock(&arenas_lock); - narenas = narenas_total; - malloc_mutex_unlock(&arenas_lock); - - return (narenas); -} - -/* Choose an arena based on a per-thread value. */ -JEMALLOC_INLINE arena_t * -choose_arena(arena_t *arena) -{ - arena_t *ret; - - if (arena != NULL) - return (arena); - - if ((ret = *arenas_tsd_get()) == NULL) { - ret = choose_arena_hard(); - assert(ret != NULL); - } - - return (ret); -} -#endif - -#include "jemalloc/internal/bitmap.h" -#include "jemalloc/internal/rtree.h" -/* - * Include arena.h twice in order to resolve circular dependencies with - * tcache.h. - */ -#define JEMALLOC_ARENA_INLINE_A -#include "jemalloc/internal/arena.h" -#undef JEMALLOC_ARENA_INLINE_A -#include "jemalloc/internal/tcache.h" -#define JEMALLOC_ARENA_INLINE_B -#include "jemalloc/internal/arena.h" -#undef JEMALLOC_ARENA_INLINE_B -#include "jemalloc/internal/hash.h" -#include "jemalloc/internal/quarantine.h" - -#ifndef JEMALLOC_ENABLE_INLINE -void *imalloct(size_t size, bool try_tcache, arena_t *arena); -void *imalloc(size_t size); -void *icalloct(size_t size, bool try_tcache, arena_t *arena); -void *icalloc(size_t size); -void *ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache, - arena_t *arena); -void *ipalloc(size_t usize, size_t alignment, bool zero); -size_t isalloc(const void *ptr, bool demote); -size_t ivsalloc(const void *ptr, bool demote); -size_t u2rz(size_t usize); -size_t p2rz(const void *ptr); -void idalloct(void *ptr, bool try_tcache); -void idalloc(void *ptr); -void iqalloct(void *ptr, bool try_tcache); -void iqalloc(void *ptr); -void *iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra, - size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, - arena_t *arena); -void *iralloct(void *ptr, size_t size, size_t extra, size_t alignment, - bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena); -void *iralloc(void *ptr, size_t size, size_t extra, size_t alignment, - bool zero); -bool ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, - bool zero); -malloc_tsd_protos(JEMALLOC_ATTR(unused), thread_allocated, thread_allocated_t) -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_)) -JEMALLOC_ALWAYS_INLINE void * -imalloct(size_t size, bool try_tcache, arena_t *arena) -{ - - assert(size != 0); - - if (size <= arena_maxclass) - return (arena_malloc(arena, size, false, try_tcache)); - else - return (huge_malloc(size, false, huge_dss_prec_get(arena))); -} - -JEMALLOC_ALWAYS_INLINE void * -imalloc(size_t size) -{ - - return (imalloct(size, true, NULL)); -} - -JEMALLOC_ALWAYS_INLINE void * -icalloct(size_t size, bool try_tcache, arena_t *arena) -{ - - if (size <= arena_maxclass) - return (arena_malloc(arena, size, true, try_tcache)); - else - return (huge_malloc(size, true, huge_dss_prec_get(arena))); -} - -JEMALLOC_ALWAYS_INLINE void * -icalloc(size_t size) -{ - - return (icalloct(size, true, NULL)); -} - -JEMALLOC_ALWAYS_INLINE void * -ipalloct(size_t usize, size_t alignment, bool zero, bool try_tcache, - arena_t *arena) -{ - void *ret; - - assert(usize != 0); - assert(usize == sa2u(usize, alignment)); - - if (usize <= arena_maxclass && alignment <= PAGE) - ret = arena_malloc(arena, usize, zero, try_tcache); - else { - if (usize <= arena_maxclass) { - ret = arena_palloc(choose_arena(arena), usize, - alignment, zero); - } else if (alignment <= chunksize) - ret = huge_malloc(usize, zero, huge_dss_prec_get(arena)); - else - ret = huge_palloc(usize, alignment, zero, huge_dss_prec_get(arena)); - } - - assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void * -ipalloc(size_t usize, size_t alignment, bool zero) -{ - - return (ipalloct(usize, alignment, zero, true, NULL)); -} - -/* - * Typical usage: - * void *ptr = [...] - * size_t sz = isalloc(ptr, config_prof); - */ -JEMALLOC_ALWAYS_INLINE size_t -isalloc(const void *ptr, bool demote) -{ - size_t ret; - arena_chunk_t *chunk; - - assert(ptr != NULL); - /* Demotion only makes sense if config_prof is true. */ - assert(config_prof || demote == false); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk != ptr) - ret = arena_salloc(ptr, demote); - else - ret = huge_salloc(ptr); - - return (ret); -} - -JEMALLOC_ALWAYS_INLINE size_t -ivsalloc(const void *ptr, bool demote) -{ - - /* Return 0 if ptr is not within a chunk managed by jemalloc. */ - if (rtree_get(chunks_rtree, (uintptr_t)CHUNK_ADDR2BASE(ptr)) == 0) - return (0); - - return (isalloc(ptr, demote)); -} - -JEMALLOC_INLINE size_t -u2rz(size_t usize) -{ - size_t ret; - - if (usize <= SMALL_MAXCLASS) { - size_t binind = SMALL_SIZE2BIN(usize); - ret = arena_bin_info[binind].redzone_size; - } else - ret = 0; - - return (ret); -} - -JEMALLOC_INLINE size_t -p2rz(const void *ptr) -{ - size_t usize = isalloc(ptr, false); - - return (u2rz(usize)); -} - -JEMALLOC_ALWAYS_INLINE void -idalloct(void *ptr, bool try_tcache) -{ - arena_chunk_t *chunk; - - assert(ptr != NULL); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk != ptr) - arena_dalloc(chunk->arena, chunk, ptr, try_tcache); - else - huge_dalloc(ptr, true); -} - -JEMALLOC_ALWAYS_INLINE void -idalloc(void *ptr) -{ - - idalloct(ptr, true); -} - -JEMALLOC_ALWAYS_INLINE void -iqalloct(void *ptr, bool try_tcache) -{ - - if (config_fill && opt_quarantine) - quarantine(ptr); - else - idalloct(ptr, try_tcache); -} - -JEMALLOC_ALWAYS_INLINE void -iqalloc(void *ptr) -{ - - iqalloct(ptr, true); -} - -JEMALLOC_ALWAYS_INLINE void * -iralloct_realign(void *ptr, size_t oldsize, size_t size, size_t extra, - size_t alignment, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, - arena_t *arena) -{ - void *p; - size_t usize, copysize; - - usize = sa2u(size + extra, alignment); - if (usize == 0) - return (NULL); - p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); - if (p == NULL) { - if (extra == 0) - return (NULL); - /* Try again, without extra this time. */ - usize = sa2u(size, alignment); - if (usize == 0) - return (NULL); - p = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); - if (p == NULL) - return (NULL); - } - /* - * Copy at most size bytes (not size+extra), since the caller has no - * expectation that the extra bytes will be reliably preserved. - */ - copysize = (size < oldsize) ? size : oldsize; - memcpy(p, ptr, copysize); - iqalloct(ptr, try_tcache_dalloc); - return (p); -} - -JEMALLOC_ALWAYS_INLINE void * -iralloct(void *ptr, size_t size, size_t extra, size_t alignment, bool zero, - bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena) -{ - size_t oldsize; - - assert(ptr != NULL); - assert(size != 0); - - oldsize = isalloc(ptr, config_prof); - - if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) - != 0) { - /* - * Existing object alignment is inadequate; allocate new space - * and copy. - */ - return (iralloct_realign(ptr, oldsize, size, extra, alignment, - zero, try_tcache_alloc, try_tcache_dalloc, arena)); - } - - if (size + extra <= arena_maxclass) { - return (arena_ralloc(arena, ptr, oldsize, size, extra, - alignment, zero, try_tcache_alloc, - try_tcache_dalloc)); - } else { - return (huge_ralloc(ptr, oldsize, size, extra, - alignment, zero, try_tcache_dalloc, huge_dss_prec_get(arena))); - } -} - -JEMALLOC_ALWAYS_INLINE void * -iralloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero) -{ - - return (iralloct(ptr, size, extra, alignment, zero, true, true, NULL)); -} - -JEMALLOC_ALWAYS_INLINE bool -ixalloc(void *ptr, size_t size, size_t extra, size_t alignment, bool zero) -{ - size_t oldsize; - - assert(ptr != NULL); - assert(size != 0); - - oldsize = isalloc(ptr, config_prof); - if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) - != 0) { - /* Existing object alignment is inadequate. */ - return (true); - } - - if (size <= arena_maxclass) - return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero)); - else - return (huge_ralloc_no_move(ptr, oldsize, size, extra)); -} - -malloc_tsd_externs(thread_allocated, thread_allocated_t) -malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, thread_allocated, thread_allocated_t, - THREAD_ALLOCATED_INITIALIZER, malloc_tsd_no_cleanup) -#endif - -#include "jemalloc/internal/prof.h" - -#undef JEMALLOC_H_INLINES -/******************************************************************************/ -#endif /* JEMALLOC_INTERNAL_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h b/dep/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h new file mode 100644 index 00000000000..8ae5ef48cd8 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h @@ -0,0 +1,82 @@ +#ifndef JEMALLOC_INTERNAL_DECLS_H +#define JEMALLOC_INTERNAL_DECLS_H + +#include <math.h> +#ifdef _WIN32 +# include <windows.h> +# include "msvc_compat/windows_extra.h" + +#else +# include <sys/param.h> +# include <sys/mman.h> +# if !defined(__pnacl__) && !defined(__native_client__) +# include <sys/syscall.h> +# if !defined(SYS_write) && defined(__NR_write) +# define SYS_write __NR_write +# endif +# if defined(SYS_open) && defined(__aarch64__) + /* Android headers may define SYS_open to __NR_open even though + * __NR_open may not exist on AArch64 (superseded by __NR_openat). */ +# undef SYS_open +# endif +# include <sys/uio.h> +# endif +# include <pthread.h> +# include <signal.h> +# ifdef JEMALLOC_OS_UNFAIR_LOCK +# include <os/lock.h> +# endif +# ifdef JEMALLOC_GLIBC_MALLOC_HOOK +# include <sched.h> +# endif +# include <errno.h> +# include <sys/time.h> +# include <time.h> +# ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME +# include <mach/mach_time.h> +# endif +#endif +#include <sys/types.h> + +#include <limits.h> +#ifndef SIZE_T_MAX +# define SIZE_T_MAX SIZE_MAX +#endif +#ifndef SSIZE_MAX +# define SSIZE_MAX ((ssize_t)(SIZE_T_MAX >> 1)) +#endif +#include <stdarg.h> +#include <stdbool.h> +#include <stdio.h> +#include <stdlib.h> +#include <stdint.h> +#include <stddef.h> +#ifndef offsetof +# define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) +#endif +#include <string.h> +#include <strings.h> +#include <ctype.h> +#ifdef _MSC_VER +# include <io.h> +typedef intptr_t ssize_t; +# define PATH_MAX 1024 +# define STDERR_FILENO 2 +# define __func__ __FUNCTION__ +# ifdef JEMALLOC_HAS_RESTRICT +# define restrict __restrict +# endif +/* Disable warnings about deprecated system functions. */ +# pragma warning(disable: 4996) +#if _MSC_VER < 1800 +static int +isblank(int c) { + return (c == '\t' || c == ' '); +} +#endif +#else +# include <unistd.h> +#endif +#include <fcntl.h> + +#endif /* JEMALLOC_INTERNAL_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h b/dep/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h new file mode 100644 index 00000000000..e10fb275d40 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h @@ -0,0 +1,53 @@ +#ifndef JEMALLOC_INTERNAL_EXTERNS_H +#define JEMALLOC_INTERNAL_EXTERNS_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/tsd_types.h" + +/* TSD checks this to set thread local slow state accordingly. */ +extern bool malloc_slow; + +/* Run-time options. */ +extern bool opt_abort; +extern bool opt_abort_conf; +extern const char *opt_junk; +extern bool opt_junk_alloc; +extern bool opt_junk_free; +extern bool opt_utrace; +extern bool opt_xmalloc; +extern bool opt_zero; +extern unsigned opt_narenas; + +/* Number of CPUs. */ +extern unsigned ncpus; + +/* Number of arenas used for automatic multiplexing of threads and arenas. */ +extern unsigned narenas_auto; + +/* + * Arenas that are used to service external requests. Not all elements of the + * arenas array are necessarily used; arenas are created lazily as needed. + */ +extern atomic_p_t arenas[]; + +void *a0malloc(size_t size); +void a0dalloc(void *ptr); +void *bootstrap_malloc(size_t size); +void *bootstrap_calloc(size_t num, size_t size); +void bootstrap_free(void *ptr); +void arena_set(unsigned ind, arena_t *arena); +unsigned narenas_total_get(void); +arena_t *arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks); +arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind); +arena_t *arena_choose_hard(tsd_t *tsd, bool internal); +void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind); +void iarena_cleanup(tsd_t *tsd); +void arena_cleanup(tsd_t *tsd); +void arenas_tdata_cleanup(tsd_t *tsd); +void jemalloc_prefork(void); +void jemalloc_postfork_parent(void); +void jemalloc_postfork_child(void); +bool malloc_initialized(void); + +#endif /* JEMALLOC_INTERNAL_EXTERNS_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h b/dep/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h new file mode 100644 index 00000000000..437eaa40793 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h @@ -0,0 +1,94 @@ +#ifndef JEMALLOC_INTERNAL_INCLUDES_H +#define JEMALLOC_INTERNAL_INCLUDES_H + +/* + * jemalloc can conceptually be broken into components (arena, tcache, etc.), + * but there are circular dependencies that cannot be broken without + * substantial performance degradation. + * + * Historically, we dealt with this by each header into four sections (types, + * structs, externs, and inlines), and included each header file multiple times + * in this file, picking out the portion we want on each pass using the + * following #defines: + * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data + * types. + * JEMALLOC_H_STRUCTS : Data structures. + * JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes. + * JEMALLOC_H_INLINES : Inline functions. + * + * We're moving toward a world in which the dependencies are explicit; each file + * will #include the headers it depends on (rather than relying on them being + * implicitly available via this file including every header file in the + * project). + * + * We're now in an intermediate state: we've broken up the header files to avoid + * having to include each one multiple times, but have not yet moved the + * dependency information into the header files (i.e. we still rely on the + * ordering in this file to ensure all a header's dependencies are available in + * its translation unit). Each component is now broken up into multiple header + * files, corresponding to the sections above (e.g. instead of "foo.h", we now + * have "foo_types.h", "foo_structs.h", "foo_externs.h", "foo_inlines.h"). + * + * Those files which have been converted to explicitly include their + * inter-component dependencies are now in the initial HERMETIC HEADERS + * section. All headers may still rely on jemalloc_preamble.h (which, by fiat, + * must be included first in every translation unit) for system headers and + * global jemalloc definitions, however. + */ + +/******************************************************************************/ +/* TYPES */ +/******************************************************************************/ + +#include "jemalloc/internal/extent_types.h" +#include "jemalloc/internal/base_types.h" +#include "jemalloc/internal/arena_types.h" +#include "jemalloc/internal/tcache_types.h" +#include "jemalloc/internal/prof_types.h" + +/******************************************************************************/ +/* STRUCTS */ +/******************************************************************************/ + +#include "jemalloc/internal/arena_structs_a.h" +#include "jemalloc/internal/extent_structs.h" +#include "jemalloc/internal/base_structs.h" +#include "jemalloc/internal/prof_structs.h" +#include "jemalloc/internal/arena_structs_b.h" +#include "jemalloc/internal/tcache_structs.h" +#include "jemalloc/internal/background_thread_structs.h" + +/******************************************************************************/ +/* EXTERNS */ +/******************************************************************************/ + +#include "jemalloc/internal/jemalloc_internal_externs.h" +#include "jemalloc/internal/extent_externs.h" +#include "jemalloc/internal/base_externs.h" +#include "jemalloc/internal/arena_externs.h" +#include "jemalloc/internal/large_externs.h" +#include "jemalloc/internal/tcache_externs.h" +#include "jemalloc/internal/prof_externs.h" +#include "jemalloc/internal/background_thread_externs.h" + +/******************************************************************************/ +/* INLINES */ +/******************************************************************************/ + +#include "jemalloc/internal/jemalloc_internal_inlines_a.h" +#include "jemalloc/internal/base_inlines.h" +/* + * Include portions of arena code interleaved with tcache code in order to + * resolve circular dependencies. + */ +#include "jemalloc/internal/prof_inlines_a.h" +#include "jemalloc/internal/arena_inlines_a.h" +#include "jemalloc/internal/extent_inlines.h" +#include "jemalloc/internal/jemalloc_internal_inlines_b.h" +#include "jemalloc/internal/tcache_inlines.h" +#include "jemalloc/internal/arena_inlines_b.h" +#include "jemalloc/internal/jemalloc_internal_inlines_c.h" +#include "jemalloc/internal/prof_inlines_b.h" +#include "jemalloc/internal/background_thread_inlines.h" + +#endif /* JEMALLOC_INTERNAL_INCLUDES_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h b/dep/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h new file mode 100644 index 00000000000..24ea416297f --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h @@ -0,0 +1,171 @@ +#ifndef JEMALLOC_INTERNAL_INLINES_A_H +#define JEMALLOC_INTERNAL_INLINES_A_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/bit_util.h" +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/ticker.h" + +JEMALLOC_ALWAYS_INLINE malloc_cpuid_t +malloc_getcpu(void) { + assert(have_percpu_arena); +#if defined(JEMALLOC_HAVE_SCHED_GETCPU) + return (malloc_cpuid_t)sched_getcpu(); +#else + not_reached(); + return -1; +#endif +} + +/* Return the chosen arena index based on current cpu. */ +JEMALLOC_ALWAYS_INLINE unsigned +percpu_arena_choose(void) { + assert(have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)); + + malloc_cpuid_t cpuid = malloc_getcpu(); + assert(cpuid >= 0); + + unsigned arena_ind; + if ((opt_percpu_arena == percpu_arena) || ((unsigned)cpuid < ncpus / + 2)) { + arena_ind = cpuid; + } else { + assert(opt_percpu_arena == per_phycpu_arena); + /* Hyper threads on the same physical CPU share arena. */ + arena_ind = cpuid - ncpus / 2; + } + + return arena_ind; +} + +/* Return the limit of percpu auto arena range, i.e. arenas[0...ind_limit). */ +JEMALLOC_ALWAYS_INLINE unsigned +percpu_arena_ind_limit(percpu_arena_mode_t mode) { + assert(have_percpu_arena && PERCPU_ARENA_ENABLED(mode)); + if (mode == per_phycpu_arena && ncpus > 1) { + if (ncpus % 2) { + /* This likely means a misconfig. */ + return ncpus / 2 + 1; + } + return ncpus / 2; + } else { + return ncpus; + } +} + +static inline arena_tdata_t * +arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) { + arena_tdata_t *tdata; + arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); + + if (unlikely(arenas_tdata == NULL)) { + /* arenas_tdata hasn't been initialized yet. */ + return arena_tdata_get_hard(tsd, ind); + } + if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) { + /* + * ind is invalid, cache is old (too small), or tdata to be + * initialized. + */ + return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) : + NULL); + } + + tdata = &arenas_tdata[ind]; + if (likely(tdata != NULL) || !refresh_if_missing) { + return tdata; + } + return arena_tdata_get_hard(tsd, ind); +} + +static inline arena_t * +arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) { + arena_t *ret; + + assert(ind < MALLOCX_ARENA_LIMIT); + + ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE); + if (unlikely(ret == NULL)) { + if (init_if_missing) { + ret = arena_init(tsdn, ind, + (extent_hooks_t *)&extent_hooks_default); + } + } + return ret; +} + +static inline ticker_t * +decay_ticker_get(tsd_t *tsd, unsigned ind) { + arena_tdata_t *tdata; + + tdata = arena_tdata_get(tsd, ind, true); + if (unlikely(tdata == NULL)) { + return NULL; + } + return &tdata->decay_ticker; +} + +JEMALLOC_ALWAYS_INLINE tcache_bin_t * +tcache_small_bin_get(tcache_t *tcache, szind_t binind) { + assert(binind < NBINS); + return &tcache->tbins_small[binind]; +} + +JEMALLOC_ALWAYS_INLINE tcache_bin_t * +tcache_large_bin_get(tcache_t *tcache, szind_t binind) { + assert(binind >= NBINS &&binind < nhbins); + return &tcache->tbins_large[binind - NBINS]; +} + +JEMALLOC_ALWAYS_INLINE bool +tcache_available(tsd_t *tsd) { + /* + * Thread specific auto tcache might be unavailable if: 1) during tcache + * initialization, or 2) disabled through thread.tcache.enabled mallctl + * or config options. This check covers all cases. + */ + if (likely(tsd_tcache_enabled_get(tsd))) { + /* Associated arena == NULL implies tcache init in progress. */ + assert(tsd_tcachep_get(tsd)->arena == NULL || + tcache_small_bin_get(tsd_tcachep_get(tsd), 0)->avail != + NULL); + return true; + } + + return false; +} + +JEMALLOC_ALWAYS_INLINE tcache_t * +tcache_get(tsd_t *tsd) { + if (!tcache_available(tsd)) { + return NULL; + } + + return tsd_tcachep_get(tsd); +} + +static inline void +pre_reentrancy(tsd_t *tsd, arena_t *arena) { + /* arena is the current context. Reentry from a0 is not allowed. */ + assert(arena != arena_get(tsd_tsdn(tsd), 0, false)); + + bool fast = tsd_fast(tsd); + ++*tsd_reentrancy_levelp_get(tsd); + if (fast) { + /* Prepare slow path for reentrancy. */ + tsd_slow_update(tsd); + assert(tsd->state == tsd_state_nominal_slow); + } +} + +static inline void +post_reentrancy(tsd_t *tsd) { + int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd); + assert(*reentrancy_level > 0); + if (--*reentrancy_level == 0) { + tsd_slow_update(tsd); + } +} + +#endif /* JEMALLOC_INTERNAL_INLINES_A_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h b/dep/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h new file mode 100644 index 00000000000..2e76e5d8f7c --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h @@ -0,0 +1,86 @@ +#ifndef JEMALLOC_INTERNAL_INLINES_B_H +#define JEMALLOC_INTERNAL_INLINES_B_H + +#include "jemalloc/internal/rtree.h" + +/* Choose an arena based on a per-thread value. */ +static inline arena_t * +arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) { + arena_t *ret; + + if (arena != NULL) { + return arena; + } + + /* During reentrancy, arena 0 is the safest bet. */ + if (unlikely(tsd_reentrancy_level_get(tsd) > 0)) { + return arena_get(tsd_tsdn(tsd), 0, true); + } + + ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd); + if (unlikely(ret == NULL)) { + ret = arena_choose_hard(tsd, internal); + assert(ret); + if (tcache_available(tsd)) { + tcache_t *tcache = tcache_get(tsd); + if (tcache->arena != NULL) { + /* See comments in tcache_data_init().*/ + assert(tcache->arena == + arena_get(tsd_tsdn(tsd), 0, false)); + if (tcache->arena != ret) { + tcache_arena_reassociate(tsd_tsdn(tsd), + tcache, ret); + } + } else { + tcache_arena_associate(tsd_tsdn(tsd), tcache, + ret); + } + } + } + + /* + * Note that for percpu arena, if the current arena is outside of the + * auto percpu arena range, (i.e. thread is assigned to a manually + * managed arena), then percpu arena is skipped. + */ + if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena) && + !internal && (arena_ind_get(ret) < + percpu_arena_ind_limit(opt_percpu_arena)) && (ret->last_thd != + tsd_tsdn(tsd))) { + unsigned ind = percpu_arena_choose(); + if (arena_ind_get(ret) != ind) { + percpu_arena_update(tsd, ind); + ret = tsd_arena_get(tsd); + } + ret->last_thd = tsd_tsdn(tsd); + } + + return ret; +} + +static inline arena_t * +arena_choose(tsd_t *tsd, arena_t *arena) { + return arena_choose_impl(tsd, arena, false); +} + +static inline arena_t * +arena_ichoose(tsd_t *tsd, arena_t *arena) { + return arena_choose_impl(tsd, arena, true); +} + +static inline bool +arena_is_auto(arena_t *arena) { + assert(narenas_auto > 0); + return (arena_ind_get(arena) < narenas_auto); +} + +JEMALLOC_ALWAYS_INLINE extent_t * +iealloc(tsdn_t *tsdn, const void *ptr) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + return rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true); +} + +#endif /* JEMALLOC_INTERNAL_INLINES_B_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h b/dep/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h new file mode 100644 index 00000000000..7ffce6fb035 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h @@ -0,0 +1,197 @@ +#ifndef JEMALLOC_INTERNAL_INLINES_C_H +#define JEMALLOC_INTERNAL_INLINES_C_H + +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/sz.h" +#include "jemalloc/internal/witness.h" + +JEMALLOC_ALWAYS_INLINE arena_t * +iaalloc(tsdn_t *tsdn, const void *ptr) { + assert(ptr != NULL); + + return arena_aalloc(tsdn, ptr); +} + +JEMALLOC_ALWAYS_INLINE size_t +isalloc(tsdn_t *tsdn, const void *ptr) { + assert(ptr != NULL); + + return arena_salloc(tsdn, ptr); +} + +JEMALLOC_ALWAYS_INLINE void * +iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache, + bool is_internal, arena_t *arena, bool slow_path) { + void *ret; + + assert(size != 0); + assert(!is_internal || tcache == NULL); + assert(!is_internal || arena == NULL || arena_is_auto(arena)); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path); + if (config_stats && is_internal && likely(ret != NULL)) { + arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret)); + } + return ret; +} + +JEMALLOC_ALWAYS_INLINE void * +ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path) { + return iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd), false, + NULL, slow_path); +} + +JEMALLOC_ALWAYS_INLINE void * +ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, bool is_internal, arena_t *arena) { + void *ret; + + assert(usize != 0); + assert(usize == sz_sa2u(usize, alignment)); + assert(!is_internal || tcache == NULL); + assert(!is_internal || arena == NULL || arena_is_auto(arena)); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache); + assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret); + if (config_stats && is_internal && likely(ret != NULL)) { + arena_internal_add(iaalloc(tsdn, ret), isalloc(tsdn, ret)); + } + return ret; +} + +JEMALLOC_ALWAYS_INLINE void * +ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, + tcache_t *tcache, arena_t *arena) { + return ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena); +} + +JEMALLOC_ALWAYS_INLINE void * +ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero) { + return ipallocztm(tsd_tsdn(tsd), usize, alignment, zero, + tcache_get(tsd), false, NULL); +} + +JEMALLOC_ALWAYS_INLINE size_t +ivsalloc(tsdn_t *tsdn, const void *ptr) { + return arena_vsalloc(tsdn, ptr); +} + +JEMALLOC_ALWAYS_INLINE void +idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx_t *alloc_ctx, + bool is_internal, bool slow_path) { + assert(ptr != NULL); + assert(!is_internal || tcache == NULL); + assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr))); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + if (config_stats && is_internal) { + arena_internal_sub(iaalloc(tsdn, ptr), isalloc(tsdn, ptr)); + } + if (!is_internal && tsd_reentrancy_level_get(tsdn_tsd(tsdn)) != 0) { + assert(tcache == NULL); + } + arena_dalloc(tsdn, ptr, tcache, alloc_ctx, slow_path); +} + +JEMALLOC_ALWAYS_INLINE void +idalloc(tsd_t *tsd, void *ptr) { + idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd), NULL, false, true); +} + +JEMALLOC_ALWAYS_INLINE void +isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache, + alloc_ctx_t *alloc_ctx, bool slow_path) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path); +} + +JEMALLOC_ALWAYS_INLINE void * +iralloct_realign(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, + size_t extra, size_t alignment, bool zero, tcache_t *tcache, + arena_t *arena) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + void *p; + size_t usize, copysize; + + usize = sz_sa2u(size + extra, alignment); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { + return NULL; + } + p = ipalloct(tsdn, usize, alignment, zero, tcache, arena); + if (p == NULL) { + if (extra == 0) { + return NULL; + } + /* Try again, without extra this time. */ + usize = sz_sa2u(size, alignment); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { + return NULL; + } + p = ipalloct(tsdn, usize, alignment, zero, tcache, arena); + if (p == NULL) { + return NULL; + } + } + /* + * Copy at most size bytes (not size+extra), since the caller has no + * expectation that the extra bytes will be reliably preserved. + */ + copysize = (size < oldsize) ? size : oldsize; + memcpy(p, ptr, copysize); + isdalloct(tsdn, ptr, oldsize, tcache, NULL, true); + return p; +} + +JEMALLOC_ALWAYS_INLINE void * +iralloct(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t alignment, + bool zero, tcache_t *tcache, arena_t *arena) { + assert(ptr != NULL); + assert(size != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) + != 0) { + /* + * Existing object alignment is inadequate; allocate new space + * and copy. + */ + return iralloct_realign(tsdn, ptr, oldsize, size, 0, alignment, + zero, tcache, arena); + } + + return arena_ralloc(tsdn, arena, ptr, oldsize, size, alignment, zero, + tcache); +} + +JEMALLOC_ALWAYS_INLINE void * +iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment, + bool zero) { + return iralloct(tsd_tsdn(tsd), ptr, oldsize, size, alignment, zero, + tcache_get(tsd), NULL); +} + +JEMALLOC_ALWAYS_INLINE bool +ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra, + size_t alignment, bool zero) { + assert(ptr != NULL); + assert(size != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1)) + != 0) { + /* Existing object alignment is inadequate. */ + return true; + } + + return arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero); +} + +#endif /* JEMALLOC_INTERNAL_INLINES_C_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h b/dep/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h new file mode 100644 index 00000000000..4571895ec37 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h @@ -0,0 +1,40 @@ +#ifndef JEMALLOC_INTERNAL_MACROS_H +#define JEMALLOC_INTERNAL_MACROS_H + +#ifdef JEMALLOC_DEBUG +# define JEMALLOC_ALWAYS_INLINE static inline +#else +# define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline +#endif +#ifdef _MSC_VER +# define inline _inline +#endif + +#define UNUSED JEMALLOC_ATTR(unused) + +#define ZU(z) ((size_t)z) +#define ZD(z) ((ssize_t)z) +#define QU(q) ((uint64_t)q) +#define QD(q) ((int64_t)q) + +#define KZU(z) ZU(z##ULL) +#define KZD(z) ZD(z##LL) +#define KQU(q) QU(q##ULL) +#define KQD(q) QI(q##LL) + +#ifndef __DECONST +# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) +#endif + +#if !defined(JEMALLOC_HAS_RESTRICT) || defined(__cplusplus) +# define restrict +#endif + +/* Various function pointers are statick and immutable except during testing. */ +#ifdef JEMALLOC_JET +# define JET_MUTABLE +#else +# define JET_MUTABLE const +#endif + +#endif /* JEMALLOC_INTERNAL_MACROS_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h b/dep/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h new file mode 100644 index 00000000000..50f9d001d54 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h @@ -0,0 +1,178 @@ +#ifndef JEMALLOC_INTERNAL_TYPES_H +#define JEMALLOC_INTERNAL_TYPES_H + +/* Page size index type. */ +typedef unsigned pszind_t; + +/* Size class index type. */ +typedef unsigned szind_t; + +/* Processor / core id type. */ +typedef int malloc_cpuid_t; + +/* + * Flags bits: + * + * a: arena + * t: tcache + * 0: unused + * z: zero + * n: alignment + * + * aaaaaaaa aaaatttt tttttttt 0znnnnnn + */ +#define MALLOCX_ARENA_BITS 12 +#define MALLOCX_TCACHE_BITS 12 +#define MALLOCX_LG_ALIGN_BITS 6 +#define MALLOCX_ARENA_SHIFT 20 +#define MALLOCX_TCACHE_SHIFT 8 +#define MALLOCX_ARENA_MASK \ + (((1 << MALLOCX_ARENA_BITS) - 1) << MALLOCX_ARENA_SHIFT) +/* NB: Arena index bias decreases the maximum number of arenas by 1. */ +#define MALLOCX_ARENA_LIMIT ((1 << MALLOCX_ARENA_BITS) - 1) +#define MALLOCX_TCACHE_MASK \ + (((1 << MALLOCX_TCACHE_BITS) - 1) << MALLOCX_TCACHE_SHIFT) +#define MALLOCX_TCACHE_MAX ((1 << MALLOCX_TCACHE_BITS) - 3) +#define MALLOCX_LG_ALIGN_MASK ((1 << MALLOCX_LG_ALIGN_BITS) - 1) +/* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */ +#define MALLOCX_ALIGN_GET_SPECIFIED(flags) \ + (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)) +#define MALLOCX_ALIGN_GET(flags) \ + (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1)) +#define MALLOCX_ZERO_GET(flags) \ + ((bool)(flags & MALLOCX_ZERO)) + +#define MALLOCX_TCACHE_GET(flags) \ + (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> MALLOCX_TCACHE_SHIFT)) - 2) +#define MALLOCX_ARENA_GET(flags) \ + (((unsigned)(((unsigned)flags) >> MALLOCX_ARENA_SHIFT)) - 1) + +/* Smallest size class to support. */ +#define TINY_MIN (1U << LG_TINY_MIN) + +/* + * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size + * classes). + */ +#ifndef LG_QUANTUM +# if (defined(__i386__) || defined(_M_IX86)) +# define LG_QUANTUM 4 +# endif +# ifdef __ia64__ +# define LG_QUANTUM 4 +# endif +# ifdef __alpha__ +# define LG_QUANTUM 4 +# endif +# if (defined(__sparc64__) || defined(__sparcv9) || defined(__sparc_v9__)) +# define LG_QUANTUM 4 +# endif +# if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64)) +# define LG_QUANTUM 4 +# endif +# ifdef __arm__ +# define LG_QUANTUM 3 +# endif +# ifdef __aarch64__ +# define LG_QUANTUM 4 +# endif +# ifdef __hppa__ +# define LG_QUANTUM 4 +# endif +# ifdef __mips__ +# define LG_QUANTUM 3 +# endif +# ifdef __or1k__ +# define LG_QUANTUM 3 +# endif +# ifdef __powerpc__ +# define LG_QUANTUM 4 +# endif +# ifdef __riscv__ +# define LG_QUANTUM 4 +# endif +# ifdef __s390__ +# define LG_QUANTUM 4 +# endif +# ifdef __SH4__ +# define LG_QUANTUM 4 +# endif +# ifdef __tile__ +# define LG_QUANTUM 4 +# endif +# ifdef __le32__ +# define LG_QUANTUM 4 +# endif +# ifndef LG_QUANTUM +# error "Unknown minimum alignment for architecture; specify via " + "--with-lg-quantum" +# endif +#endif + +#define QUANTUM ((size_t)(1U << LG_QUANTUM)) +#define QUANTUM_MASK (QUANTUM - 1) + +/* Return the smallest quantum multiple that is >= a. */ +#define QUANTUM_CEILING(a) \ + (((a) + QUANTUM_MASK) & ~QUANTUM_MASK) + +#define LONG ((size_t)(1U << LG_SIZEOF_LONG)) +#define LONG_MASK (LONG - 1) + +/* Return the smallest long multiple that is >= a. */ +#define LONG_CEILING(a) \ + (((a) + LONG_MASK) & ~LONG_MASK) + +#define SIZEOF_PTR (1U << LG_SIZEOF_PTR) +#define PTR_MASK (SIZEOF_PTR - 1) + +/* Return the smallest (void *) multiple that is >= a. */ +#define PTR_CEILING(a) \ + (((a) + PTR_MASK) & ~PTR_MASK) + +/* + * Maximum size of L1 cache line. This is used to avoid cache line aliasing. + * In addition, this controls the spacing of cacheline-spaced size classes. + * + * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can + * only handle raw constants. + */ +#define LG_CACHELINE 6 +#define CACHELINE 64 +#define CACHELINE_MASK (CACHELINE - 1) + +/* Return the smallest cacheline multiple that is >= s. */ +#define CACHELINE_CEILING(s) \ + (((s) + CACHELINE_MASK) & ~CACHELINE_MASK) + +/* Return the nearest aligned address at or below a. */ +#define ALIGNMENT_ADDR2BASE(a, alignment) \ + ((void *)((uintptr_t)(a) & ((~(alignment)) + 1))) + +/* Return the offset between a and the nearest aligned address at or below a. */ +#define ALIGNMENT_ADDR2OFFSET(a, alignment) \ + ((size_t)((uintptr_t)(a) & (alignment - 1))) + +/* Return the smallest alignment multiple that is >= s. */ +#define ALIGNMENT_CEILING(s, alignment) \ + (((s) + (alignment - 1)) & ((~(alignment)) + 1)) + +/* Declare a variable-length array. */ +#if __STDC_VERSION__ < 199901L +# ifdef _MSC_VER +# include <malloc.h> +# define alloca _alloca +# else +# ifdef JEMALLOC_HAS_ALLOCA_H +# include <alloca.h> +# else +# include <stdlib.h> +# endif +# endif +# define VARIABLE_ARRAY(type, name, count) \ + type *name = alloca(sizeof(type) * (count)) +#else +# define VARIABLE_ARRAY(type, name, count) type name[(count)] +#endif + +#endif /* JEMALLOC_INTERNAL_TYPES_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/jemalloc_preamble.h b/dep/jemalloc/include/jemalloc/internal/jemalloc_preamble.h new file mode 100644 index 00000000000..39045c857f3 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/jemalloc_preamble.h @@ -0,0 +1,179 @@ +#ifndef JEMALLOC_PREAMBLE_H +#define JEMALLOC_PREAMBLE_H + +#include "jemalloc_internal_defs.h" +#include "jemalloc/internal/jemalloc_internal_decls.h" + +#ifdef JEMALLOC_UTRACE +#include <sys/ktrace.h> +#endif + +#define JEMALLOC_NO_DEMANGLE +#ifdef JEMALLOC_JET +# undef JEMALLOC_IS_MALLOC +# define JEMALLOC_N(n) jet_##n +# include "jemalloc/internal/public_namespace.h" +# define JEMALLOC_NO_RENAME +# include "../jemalloc.h" +# undef JEMALLOC_NO_RENAME +#else +# define JEMALLOC_N(n) je_##n +# include "../jemalloc.h" +#endif + +#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) +#include <libkern/OSAtomic.h> +#endif + +#ifdef JEMALLOC_ZONE +#include <mach/mach_error.h> +#include <mach/mach_init.h> +#include <mach/vm_map.h> +#endif + +#include "jemalloc/internal/jemalloc_internal_macros.h" + +/* + * Note that the ordering matters here; the hook itself is name-mangled. We + * want the inclusion of hooks to happen early, so that we hook as much as + * possible. + */ +#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE +# ifndef JEMALLOC_JET +# include "jemalloc/internal/private_namespace.h" +# else +# include "jemalloc/internal/private_namespace_jet.h" +# endif +#endif +#include "jemalloc/internal/hooks.h" + +static const bool config_debug = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; +static const bool have_dss = +#ifdef JEMALLOC_DSS + true +#else + false +#endif + ; +static const bool config_fill = +#ifdef JEMALLOC_FILL + true +#else + false +#endif + ; +static const bool config_lazy_lock = +#ifdef JEMALLOC_LAZY_LOCK + true +#else + false +#endif + ; +static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF; +static const bool config_prof = +#ifdef JEMALLOC_PROF + true +#else + false +#endif + ; +static const bool config_prof_libgcc = +#ifdef JEMALLOC_PROF_LIBGCC + true +#else + false +#endif + ; +static const bool config_prof_libunwind = +#ifdef JEMALLOC_PROF_LIBUNWIND + true +#else + false +#endif + ; +static const bool maps_coalesce = +#ifdef JEMALLOC_MAPS_COALESCE + true +#else + false +#endif + ; +static const bool config_stats = +#ifdef JEMALLOC_STATS + true +#else + false +#endif + ; +static const bool config_thp = +#ifdef JEMALLOC_THP + true +#else + false +#endif + ; +static const bool config_tls = +#ifdef JEMALLOC_TLS + true +#else + false +#endif + ; +static const bool config_utrace = +#ifdef JEMALLOC_UTRACE + true +#else + false +#endif + ; +static const bool config_xmalloc = +#ifdef JEMALLOC_XMALLOC + true +#else + false +#endif + ; +static const bool config_cache_oblivious = +#ifdef JEMALLOC_CACHE_OBLIVIOUS + true +#else + false +#endif + ; +#ifdef JEMALLOC_HAVE_SCHED_GETCPU +/* Currently percpu_arena depends on sched_getcpu. */ +#define JEMALLOC_PERCPU_ARENA +#endif +static const bool have_percpu_arena = +#ifdef JEMALLOC_PERCPU_ARENA + true +#else + false +#endif + ; +/* + * Undocumented, and not recommended; the application should take full + * responsibility for tracking provenance. + */ +static const bool force_ivsalloc = +#ifdef JEMALLOC_FORCE_IVSALLOC + true +#else + false +#endif + ; +static const bool have_background_thread = +#ifdef JEMALLOC_BACKGROUND_THREAD + true +#else + false +#endif + ; + +#endif /* JEMALLOC_PREAMBLE_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in b/dep/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in new file mode 100644 index 00000000000..18539a09887 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in @@ -0,0 +1,179 @@ +#ifndef JEMALLOC_PREAMBLE_H +#define JEMALLOC_PREAMBLE_H + +#include "jemalloc_internal_defs.h" +#include "jemalloc/internal/jemalloc_internal_decls.h" + +#ifdef JEMALLOC_UTRACE +#include <sys/ktrace.h> +#endif + +#define JEMALLOC_NO_DEMANGLE +#ifdef JEMALLOC_JET +# undef JEMALLOC_IS_MALLOC +# define JEMALLOC_N(n) jet_##n +# include "jemalloc/internal/public_namespace.h" +# define JEMALLOC_NO_RENAME +# include "../jemalloc@install_suffix@.h" +# undef JEMALLOC_NO_RENAME +#else +# define JEMALLOC_N(n) @private_namespace@##n +# include "../jemalloc@install_suffix@.h" +#endif + +#if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN)) +#include <libkern/OSAtomic.h> +#endif + +#ifdef JEMALLOC_ZONE +#include <mach/mach_error.h> +#include <mach/mach_init.h> +#include <mach/vm_map.h> +#endif + +#include "jemalloc/internal/jemalloc_internal_macros.h" + +/* + * Note that the ordering matters here; the hook itself is name-mangled. We + * want the inclusion of hooks to happen early, so that we hook as much as + * possible. + */ +#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE +# ifndef JEMALLOC_JET +# include "jemalloc/internal/private_namespace.h" +# else +# include "jemalloc/internal/private_namespace_jet.h" +# endif +#endif +#include "jemalloc/internal/hooks.h" + +static const bool config_debug = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; +static const bool have_dss = +#ifdef JEMALLOC_DSS + true +#else + false +#endif + ; +static const bool config_fill = +#ifdef JEMALLOC_FILL + true +#else + false +#endif + ; +static const bool config_lazy_lock = +#ifdef JEMALLOC_LAZY_LOCK + true +#else + false +#endif + ; +static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF; +static const bool config_prof = +#ifdef JEMALLOC_PROF + true +#else + false +#endif + ; +static const bool config_prof_libgcc = +#ifdef JEMALLOC_PROF_LIBGCC + true +#else + false +#endif + ; +static const bool config_prof_libunwind = +#ifdef JEMALLOC_PROF_LIBUNWIND + true +#else + false +#endif + ; +static const bool maps_coalesce = +#ifdef JEMALLOC_MAPS_COALESCE + true +#else + false +#endif + ; +static const bool config_stats = +#ifdef JEMALLOC_STATS + true +#else + false +#endif + ; +static const bool config_thp = +#ifdef JEMALLOC_THP + true +#else + false +#endif + ; +static const bool config_tls = +#ifdef JEMALLOC_TLS + true +#else + false +#endif + ; +static const bool config_utrace = +#ifdef JEMALLOC_UTRACE + true +#else + false +#endif + ; +static const bool config_xmalloc = +#ifdef JEMALLOC_XMALLOC + true +#else + false +#endif + ; +static const bool config_cache_oblivious = +#ifdef JEMALLOC_CACHE_OBLIVIOUS + true +#else + false +#endif + ; +#ifdef JEMALLOC_HAVE_SCHED_GETCPU +/* Currently percpu_arena depends on sched_getcpu. */ +#define JEMALLOC_PERCPU_ARENA +#endif +static const bool have_percpu_arena = +#ifdef JEMALLOC_PERCPU_ARENA + true +#else + false +#endif + ; +/* + * Undocumented, and not recommended; the application should take full + * responsibility for tracking provenance. + */ +static const bool force_ivsalloc = +#ifdef JEMALLOC_FORCE_IVSALLOC + true +#else + false +#endif + ; +static const bool have_background_thread = +#ifdef JEMALLOC_BACKGROUND_THREAD + true +#else + false +#endif + ; + +#endif /* JEMALLOC_PREAMBLE_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/large_externs.h b/dep/jemalloc/include/jemalloc/internal/large_externs.h new file mode 100644 index 00000000000..3f36282cd40 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/large_externs.h @@ -0,0 +1,26 @@ +#ifndef JEMALLOC_INTERNAL_LARGE_EXTERNS_H +#define JEMALLOC_INTERNAL_LARGE_EXTERNS_H + +void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero); +void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, + bool zero); +bool large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, + size_t usize_max, bool zero); +void *large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize, + size_t alignment, bool zero, tcache_t *tcache); + +typedef void (large_dalloc_junk_t)(void *, size_t); +extern large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk; + +typedef void (large_dalloc_maybe_junk_t)(void *, size_t); +extern large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk; + +void large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent); +void large_dalloc_finish(tsdn_t *tsdn, extent_t *extent); +void large_dalloc(tsdn_t *tsdn, extent_t *extent); +size_t large_salloc(tsdn_t *tsdn, const extent_t *extent); +prof_tctx_t *large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent); +void large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx); +void large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent); + +#endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/malloc_io.h b/dep/jemalloc/include/jemalloc/internal/malloc_io.h new file mode 100644 index 00000000000..47ae58ec352 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/malloc_io.h @@ -0,0 +1,62 @@ +#ifndef JEMALLOC_INTERNAL_MALLOC_IO_H +#define JEMALLOC_INTERNAL_MALLOC_IO_H + +#ifdef _WIN32 +# ifdef _WIN64 +# define FMT64_PREFIX "ll" +# define FMTPTR_PREFIX "ll" +# else +# define FMT64_PREFIX "ll" +# define FMTPTR_PREFIX "" +# endif +# define FMTd32 "d" +# define FMTu32 "u" +# define FMTx32 "x" +# define FMTd64 FMT64_PREFIX "d" +# define FMTu64 FMT64_PREFIX "u" +# define FMTx64 FMT64_PREFIX "x" +# define FMTdPTR FMTPTR_PREFIX "d" +# define FMTuPTR FMTPTR_PREFIX "u" +# define FMTxPTR FMTPTR_PREFIX "x" +#else +# include <inttypes.h> +# define FMTd32 PRId32 +# define FMTu32 PRIu32 +# define FMTx32 PRIx32 +# define FMTd64 PRId64 +# define FMTu64 PRIu64 +# define FMTx64 PRIx64 +# define FMTdPTR PRIdPTR +# define FMTuPTR PRIuPTR +# define FMTxPTR PRIxPTR +#endif + +/* Size of stack-allocated buffer passed to buferror(). */ +#define BUFERROR_BUF 64 + +/* + * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be + * large enough for all possible uses within jemalloc. + */ +#define MALLOC_PRINTF_BUFSIZE 4096 + +int buferror(int err, char *buf, size_t buflen); +uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr, + int base); +void malloc_write(const char *s); + +/* + * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating + * point math. + */ +size_t malloc_vsnprintf(char *str, size_t size, const char *format, + va_list ap); +size_t malloc_snprintf(char *str, size_t size, const char *format, ...) + JEMALLOC_FORMAT_PRINTF(3, 4); +void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, + const char *format, va_list ap); +void malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, + const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); +void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); + +#endif /* JEMALLOC_INTERNAL_MALLOC_IO_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/mb.h b/dep/jemalloc/include/jemalloc/internal/mb.h deleted file mode 100644 index 3cfa7872942..00000000000 --- a/dep/jemalloc/include/jemalloc/internal/mb.h +++ /dev/null @@ -1,115 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -void mb_write(void); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_)) -#ifdef __i386__ -/* - * According to the Intel Architecture Software Developer's Manual, current - * processors execute instructions in order from the perspective of other - * processors in a multiprocessor system, but 1) Intel reserves the right to - * change that, and 2) the compiler's optimizer could re-order instructions if - * there weren't some form of barrier. Therefore, even if running on an - * architecture that does not need memory barriers (everything through at least - * i686), an "optimizer barrier" is necessary. - */ -JEMALLOC_INLINE void -mb_write(void) -{ - -# if 0 - /* This is a true memory barrier. */ - asm volatile ("pusha;" - "xor %%eax,%%eax;" - "cpuid;" - "popa;" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -#else - /* - * This is hopefully enough to keep the compiler from reordering - * instructions around this one. - */ - asm volatile ("nop;" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -#endif -} -#elif (defined(__amd64__) || defined(__x86_64__)) -JEMALLOC_INLINE void -mb_write(void) -{ - - asm volatile ("sfence" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -} -#elif defined(__powerpc__) -JEMALLOC_INLINE void -mb_write(void) -{ - - asm volatile ("eieio" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -} -#elif defined(__sparc64__) -JEMALLOC_INLINE void -mb_write(void) -{ - - asm volatile ("membar #StoreStore" - : /* Outputs. */ - : /* Inputs. */ - : "memory" /* Clobbers. */ - ); -} -#elif defined(__tile__) -JEMALLOC_INLINE void -mb_write(void) -{ - - __sync_synchronize(); -} -#else -/* - * This is much slower than a simple memory barrier, but the semantics of mutex - * unlock make this work. - */ -JEMALLOC_INLINE void -mb_write(void) -{ - malloc_mutex_t mtx; - - malloc_mutex_init(&mtx); - malloc_mutex_lock(&mtx); - malloc_mutex_unlock(&mtx); -} -#endif -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/dep/jemalloc/include/jemalloc/internal/mutex.h b/dep/jemalloc/include/jemalloc/internal/mutex.h index de44e1435ad..6520c2512d3 100644 --- a/dep/jemalloc/include/jemalloc/internal/mutex.h +++ b/dep/jemalloc/include/jemalloc/internal/mutex.h @@ -1,45 +1,123 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES +#ifndef JEMALLOC_INTERNAL_MUTEX_H +#define JEMALLOC_INTERNAL_MUTEX_H -typedef struct malloc_mutex_s malloc_mutex_t; +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/mutex_prof.h" +#include "jemalloc/internal/tsd.h" +#include "jemalloc/internal/witness.h" + +typedef enum { + /* Can only acquire one mutex of a given witness rank at a time. */ + malloc_mutex_rank_exclusive, + /* + * Can acquire multiple mutexes of the same witness rank, but in + * address-ascending order only. + */ + malloc_mutex_address_ordered +} malloc_mutex_lock_order_t; +typedef struct malloc_mutex_s malloc_mutex_t; +struct malloc_mutex_s { + union { + struct { + /* + * prof_data is defined first to reduce cacheline + * bouncing: the data is not touched by the mutex holder + * during unlocking, while might be modified by + * contenders. Having it before the mutex itself could + * avoid prefetching a modified cacheline (for the + * unlocking thread). + */ + mutex_prof_data_t prof_data; #ifdef _WIN32 -# define MALLOC_MUTEX_INITIALIZER +# if _WIN32_WINNT >= 0x0600 + SRWLOCK lock; +# else + CRITICAL_SECTION lock; +# endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + os_unfair_lock lock; #elif (defined(JEMALLOC_OSSPIN)) -# define MALLOC_MUTEX_INITIALIZER {0} + OSSpinLock lock; #elif (defined(JEMALLOC_MUTEX_INIT_CB)) -# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL} + pthread_mutex_t lock; + malloc_mutex_t *postponed_next; #else -# if (defined(PTHREAD_MUTEX_ADAPTIVE_NP) && \ - defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)) -# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP -# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP} + pthread_mutex_t lock; +#endif + }; + /* + * We only touch witness when configured w/ debug. However we + * keep the field in a union when !debug so that we don't have + * to pollute the code base with #ifdefs, while avoid paying the + * memory cost. + */ +#if !defined(JEMALLOC_DEBUG) + witness_t witness; + malloc_mutex_lock_order_t lock_order; +#endif + }; + +#if defined(JEMALLOC_DEBUG) + witness_t witness; + malloc_mutex_lock_order_t lock_order; +#endif +}; + +/* + * Based on benchmark results, a fixed spin with this amount of retries works + * well for our critical sections. + */ +#define MALLOC_MUTEX_MAX_SPIN 250 + +#ifdef _WIN32 +# if _WIN32_WINNT >= 0x0600 +# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock) +# define MALLOC_MUTEX_UNLOCK(m) ReleaseSRWLockExclusive(&(m)->lock) +# define MALLOC_MUTEX_TRYLOCK(m) (!TryAcquireSRWLockExclusive(&(m)->lock)) # else -# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT -# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER} +# define MALLOC_MUTEX_LOCK(m) EnterCriticalSection(&(m)->lock) +# define MALLOC_MUTEX_UNLOCK(m) LeaveCriticalSection(&(m)->lock) +# define MALLOC_MUTEX_TRYLOCK(m) (!TryEnterCriticalSection(&(m)->lock)) # endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) +# define MALLOC_MUTEX_LOCK(m) os_unfair_lock_lock(&(m)->lock) +# define MALLOC_MUTEX_UNLOCK(m) os_unfair_lock_unlock(&(m)->lock) +# define MALLOC_MUTEX_TRYLOCK(m) (!os_unfair_lock_trylock(&(m)->lock)) +#elif (defined(JEMALLOC_OSSPIN)) +# define MALLOC_MUTEX_LOCK(m) OSSpinLockLock(&(m)->lock) +# define MALLOC_MUTEX_UNLOCK(m) OSSpinLockUnlock(&(m)->lock) +# define MALLOC_MUTEX_TRYLOCK(m) (!OSSpinLockTry(&(m)->lock)) +#else +# define MALLOC_MUTEX_LOCK(m) pthread_mutex_lock(&(m)->lock) +# define MALLOC_MUTEX_UNLOCK(m) pthread_mutex_unlock(&(m)->lock) +# define MALLOC_MUTEX_TRYLOCK(m) (pthread_mutex_trylock(&(m)->lock) != 0) #endif -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS +#define LOCK_PROF_DATA_INITIALIZER \ + {NSTIME_ZERO_INITIALIZER, NSTIME_ZERO_INITIALIZER, 0, 0, 0, \ + ATOMIC_INIT(0), 0, NULL, 0} -struct malloc_mutex_s { #ifdef _WIN32 - CRITICAL_SECTION lock; +# define MALLOC_MUTEX_INITIALIZER +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) +# define MALLOC_MUTEX_INITIALIZER \ + {{{LOCK_PROF_DATA_INITIALIZER, OS_UNFAIR_LOCK_INIT}}, \ + WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} #elif (defined(JEMALLOC_OSSPIN)) - OSSpinLock lock; +# define MALLOC_MUTEX_INITIALIZER \ + {{{LOCK_PROF_DATA_INITIALIZER, 0}}, \ + WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} #elif (defined(JEMALLOC_MUTEX_INIT_CB)) - pthread_mutex_t lock; - malloc_mutex_t *postponed_next; +# define MALLOC_MUTEX_INITIALIZER \ + {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER, NULL}}, \ + WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} #else - pthread_mutex_t lock; +# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT +# define MALLOC_MUTEX_INITIALIZER \ + {{{LOCK_PROF_DATA_INITIALIZER, PTHREAD_MUTEX_INITIALIZER}}, \ + WITNESS_INITIALIZER("mutex", WITNESS_RANK_OMIT)} #endif -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_LAZY_LOCK extern bool isthreaded; @@ -48,52 +126,123 @@ extern bool isthreaded; # define isthreaded true #endif -bool malloc_mutex_init(malloc_mutex_t *mutex); -void malloc_mutex_prefork(malloc_mutex_t *mutex); -void malloc_mutex_postfork_parent(malloc_mutex_t *mutex); -void malloc_mutex_postfork_child(malloc_mutex_t *mutex); -bool mutex_boot(void); +bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, + witness_rank_t rank, malloc_mutex_lock_order_t lock_order); +void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex); +void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex); +bool malloc_mutex_boot(void); +void malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex); -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES +void malloc_mutex_lock_slow(malloc_mutex_t *mutex); -#ifndef JEMALLOC_ENABLE_INLINE -void malloc_mutex_lock(malloc_mutex_t *mutex); -void malloc_mutex_unlock(malloc_mutex_t *mutex); -#endif +static inline void +malloc_mutex_lock_final(malloc_mutex_t *mutex) { + MALLOC_MUTEX_LOCK(mutex); +} -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) -JEMALLOC_INLINE void -malloc_mutex_lock(malloc_mutex_t *mutex) -{ +static inline bool +malloc_mutex_trylock_final(malloc_mutex_t *mutex) { + return MALLOC_MUTEX_TRYLOCK(mutex); +} +static inline void +mutex_owner_stats_update(tsdn_t *tsdn, malloc_mutex_t *mutex) { + if (config_stats) { + mutex_prof_data_t *data = &mutex->prof_data; + data->n_lock_ops++; + if (data->prev_owner != tsdn) { + data->prev_owner = tsdn; + data->n_owner_switches++; + } + } +} + +/* Trylock: return false if the lock is successfully acquired. */ +static inline bool +malloc_mutex_trylock(tsdn_t *tsdn, malloc_mutex_t *mutex) { + witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); if (isthreaded) { -#ifdef _WIN32 - EnterCriticalSection(&mutex->lock); -#elif (defined(JEMALLOC_OSSPIN)) - OSSpinLockLock(&mutex->lock); -#else - pthread_mutex_lock(&mutex->lock); -#endif + if (malloc_mutex_trylock_final(mutex)) { + return true; + } + mutex_owner_stats_update(tsdn, mutex); } + witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness); + + return false; } -JEMALLOC_INLINE void -malloc_mutex_unlock(malloc_mutex_t *mutex) -{ +/* Aggregate lock prof data. */ +static inline void +malloc_mutex_prof_merge(mutex_prof_data_t *sum, mutex_prof_data_t *data) { + nstime_add(&sum->tot_wait_time, &data->tot_wait_time); + if (nstime_compare(&sum->max_wait_time, &data->max_wait_time) < 0) { + nstime_copy(&sum->max_wait_time, &data->max_wait_time); + } + + sum->n_wait_times += data->n_wait_times; + sum->n_spin_acquired += data->n_spin_acquired; + + if (sum->max_n_thds < data->max_n_thds) { + sum->max_n_thds = data->max_n_thds; + } + uint32_t cur_n_waiting_thds = atomic_load_u32(&sum->n_waiting_thds, + ATOMIC_RELAXED); + uint32_t new_n_waiting_thds = cur_n_waiting_thds + atomic_load_u32( + &data->n_waiting_thds, ATOMIC_RELAXED); + atomic_store_u32(&sum->n_waiting_thds, new_n_waiting_thds, + ATOMIC_RELAXED); + sum->n_owner_switches += data->n_owner_switches; + sum->n_lock_ops += data->n_lock_ops; +} +static inline void +malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) { + witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); if (isthreaded) { -#ifdef _WIN32 - LeaveCriticalSection(&mutex->lock); -#elif (defined(JEMALLOC_OSSPIN)) - OSSpinLockUnlock(&mutex->lock); -#else - pthread_mutex_unlock(&mutex->lock); -#endif + if (malloc_mutex_trylock_final(mutex)) { + malloc_mutex_lock_slow(mutex); + } + mutex_owner_stats_update(tsdn, mutex); } + witness_lock(tsdn_witness_tsdp_get(tsdn), &mutex->witness); +} + +static inline void +malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) { + witness_unlock(tsdn_witness_tsdp_get(tsdn), &mutex->witness); + if (isthreaded) { + MALLOC_MUTEX_UNLOCK(mutex); + } +} + +static inline void +malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) { + witness_assert_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); +} + +static inline void +malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) { + witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness); +} + +/* Copy the prof data from mutex for processing. */ +static inline void +malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data, + malloc_mutex_t *mutex) { + mutex_prof_data_t *source = &mutex->prof_data; + /* Can only read holding the mutex. */ + malloc_mutex_assert_owner(tsdn, mutex); + + /* + * Not *really* allowed (we shouldn't be doing non-atomic loads of + * atomic data), but the mutex protection makes this safe, and writing + * a member-for-member copy is tedious for this situation. + */ + *data = *source; + /* n_wait_thds is not reported (modified w/o locking). */ + atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED); } -#endif -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ +#endif /* JEMALLOC_INTERNAL_MUTEX_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/mutex_pool.h b/dep/jemalloc/include/jemalloc/internal/mutex_pool.h new file mode 100644 index 00000000000..726cece90bc --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/mutex_pool.h @@ -0,0 +1,94 @@ +#ifndef JEMALLOC_INTERNAL_MUTEX_POOL_H +#define JEMALLOC_INTERNAL_MUTEX_POOL_H + +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/witness.h" + +/* We do mod reductions by this value, so it should be kept a power of 2. */ +#define MUTEX_POOL_SIZE 256 + +typedef struct mutex_pool_s mutex_pool_t; +struct mutex_pool_s { + malloc_mutex_t mutexes[MUTEX_POOL_SIZE]; +}; + +bool mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank); + +/* Internal helper - not meant to be called outside this module. */ +static inline malloc_mutex_t * +mutex_pool_mutex(mutex_pool_t *pool, uintptr_t key) { + size_t hash_result[2]; + hash(&key, sizeof(key), 0xd50dcc1b, hash_result); + return &pool->mutexes[hash_result[0] % MUTEX_POOL_SIZE]; +} + +static inline void +mutex_pool_assert_not_held(tsdn_t *tsdn, mutex_pool_t *pool) { + for (int i = 0; i < MUTEX_POOL_SIZE; i++) { + malloc_mutex_assert_not_owner(tsdn, &pool->mutexes[i]); + } +} + +/* + * Note that a mutex pool doesn't work exactly the way an embdedded mutex would. + * You're not allowed to acquire mutexes in the pool one at a time. You have to + * acquire all the mutexes you'll need in a single function call, and then + * release them all in a single function call. + */ + +static inline void +mutex_pool_lock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) { + mutex_pool_assert_not_held(tsdn, pool); + + malloc_mutex_t *mutex = mutex_pool_mutex(pool, key); + malloc_mutex_lock(tsdn, mutex); +} + +static inline void +mutex_pool_unlock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) { + malloc_mutex_t *mutex = mutex_pool_mutex(pool, key); + malloc_mutex_unlock(tsdn, mutex); + + mutex_pool_assert_not_held(tsdn, pool); +} + +static inline void +mutex_pool_lock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1, + uintptr_t key2) { + mutex_pool_assert_not_held(tsdn, pool); + + malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1); + malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2); + if ((uintptr_t)mutex1 < (uintptr_t)mutex2) { + malloc_mutex_lock(tsdn, mutex1); + malloc_mutex_lock(tsdn, mutex2); + } else if ((uintptr_t)mutex1 == (uintptr_t)mutex2) { + malloc_mutex_lock(tsdn, mutex1); + } else { + malloc_mutex_lock(tsdn, mutex2); + malloc_mutex_lock(tsdn, mutex1); + } +} + +static inline void +mutex_pool_unlock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1, + uintptr_t key2) { + malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1); + malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2); + if (mutex1 == mutex2) { + malloc_mutex_unlock(tsdn, mutex1); + } else { + malloc_mutex_unlock(tsdn, mutex1); + malloc_mutex_unlock(tsdn, mutex2); + } + + mutex_pool_assert_not_held(tsdn, pool); +} + +static inline void +mutex_pool_assert_owner(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) { + malloc_mutex_assert_owner(tsdn, mutex_pool_mutex(pool, key)); +} + +#endif /* JEMALLOC_INTERNAL_MUTEX_POOL_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/mutex_prof.h b/dep/jemalloc/include/jemalloc/internal/mutex_prof.h new file mode 100644 index 00000000000..3358bcf5351 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/mutex_prof.h @@ -0,0 +1,86 @@ +#ifndef JEMALLOC_INTERNAL_MUTEX_PROF_H +#define JEMALLOC_INTERNAL_MUTEX_PROF_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/tsd_types.h" + +#define MUTEX_PROF_GLOBAL_MUTEXES \ + OP(background_thread) \ + OP(ctl) \ + OP(prof) + +typedef enum { +#define OP(mtx) global_prof_mutex_##mtx, + MUTEX_PROF_GLOBAL_MUTEXES +#undef OP + mutex_prof_num_global_mutexes +} mutex_prof_global_ind_t; + +#define MUTEX_PROF_ARENA_MUTEXES \ + OP(large) \ + OP(extent_avail) \ + OP(extents_dirty) \ + OP(extents_muzzy) \ + OP(extents_retained) \ + OP(decay_dirty) \ + OP(decay_muzzy) \ + OP(base) \ + OP(tcache_list) + +typedef enum { +#define OP(mtx) arena_prof_mutex_##mtx, + MUTEX_PROF_ARENA_MUTEXES +#undef OP + mutex_prof_num_arena_mutexes +} mutex_prof_arena_ind_t; + +#define MUTEX_PROF_COUNTERS \ + OP(num_ops, uint64_t) \ + OP(num_wait, uint64_t) \ + OP(num_spin_acq, uint64_t) \ + OP(num_owner_switch, uint64_t) \ + OP(total_wait_time, uint64_t) \ + OP(max_wait_time, uint64_t) \ + OP(max_num_thds, uint32_t) + +typedef enum { +#define OP(counter, type) mutex_counter_##counter, + MUTEX_PROF_COUNTERS +#undef OP + mutex_prof_num_counters +} mutex_prof_counter_ind_t; + +typedef struct { + /* + * Counters touched on the slow path, i.e. when there is lock + * contention. We update them once we have the lock. + */ + /* Total time (in nano seconds) spent waiting on this mutex. */ + nstime_t tot_wait_time; + /* Max time (in nano seconds) spent on a single lock operation. */ + nstime_t max_wait_time; + /* # of times have to wait for this mutex (after spinning). */ + uint64_t n_wait_times; + /* # of times acquired the mutex through local spinning. */ + uint64_t n_spin_acquired; + /* Max # of threads waiting for the mutex at the same time. */ + uint32_t max_n_thds; + /* Current # of threads waiting on the lock. Atomic synced. */ + atomic_u32_t n_waiting_thds; + + /* + * Data touched on the fast path. These are modified right after we + * grab the lock, so it's placed closest to the end (i.e. right before + * the lock) so that we have a higher chance of them being on the same + * cacheline. + */ + /* # of times the mutex holder is different than the previous one. */ + uint64_t n_owner_switches; + /* Previous mutex holder, to facilitate n_owner_switches. */ + tsdn_t *prev_owner; + /* # of lock() operations in total. */ + uint64_t n_lock_ops; +} mutex_prof_data_t; + +#endif /* JEMALLOC_INTERNAL_MUTEX_PROF_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/nstime.h b/dep/jemalloc/include/jemalloc/internal/nstime.h new file mode 100644 index 00000000000..17c177c7f4b --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/nstime.h @@ -0,0 +1,34 @@ +#ifndef JEMALLOC_INTERNAL_NSTIME_H +#define JEMALLOC_INTERNAL_NSTIME_H + +/* Maximum supported number of seconds (~584 years). */ +#define NSTIME_SEC_MAX KQU(18446744072) +#define NSTIME_ZERO_INITIALIZER {0} + +typedef struct { + uint64_t ns; +} nstime_t; + +void nstime_init(nstime_t *time, uint64_t ns); +void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec); +uint64_t nstime_ns(const nstime_t *time); +uint64_t nstime_sec(const nstime_t *time); +uint64_t nstime_msec(const nstime_t *time); +uint64_t nstime_nsec(const nstime_t *time); +void nstime_copy(nstime_t *time, const nstime_t *source); +int nstime_compare(const nstime_t *a, const nstime_t *b); +void nstime_add(nstime_t *time, const nstime_t *addend); +void nstime_iadd(nstime_t *time, uint64_t addend); +void nstime_subtract(nstime_t *time, const nstime_t *subtrahend); +void nstime_isubtract(nstime_t *time, uint64_t subtrahend); +void nstime_imultiply(nstime_t *time, uint64_t multiplier); +void nstime_idivide(nstime_t *time, uint64_t divisor); +uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor); + +typedef bool (nstime_monotonic_t)(void); +extern nstime_monotonic_t *JET_MUTABLE nstime_monotonic; + +typedef bool (nstime_update_t)(nstime_t *); +extern nstime_update_t *JET_MUTABLE nstime_update; + +#endif /* JEMALLOC_INTERNAL_NSTIME_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/pages.h b/dep/jemalloc/include/jemalloc/internal/pages.h new file mode 100644 index 00000000000..28383b7f973 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/pages.h @@ -0,0 +1,71 @@ +#ifndef JEMALLOC_INTERNAL_PAGES_EXTERNS_H +#define JEMALLOC_INTERNAL_PAGES_EXTERNS_H + +/* Page size. LG_PAGE is determined by the configure script. */ +#ifdef PAGE_MASK +# undef PAGE_MASK +#endif +#define PAGE ((size_t)(1U << LG_PAGE)) +#define PAGE_MASK ((size_t)(PAGE - 1)) +/* Return the page base address for the page containing address a. */ +#define PAGE_ADDR2BASE(a) \ + ((void *)((uintptr_t)(a) & ~PAGE_MASK)) +/* Return the smallest pagesize multiple that is >= s. */ +#define PAGE_CEILING(s) \ + (((s) + PAGE_MASK) & ~PAGE_MASK) + +/* Huge page size. LG_HUGEPAGE is determined by the configure script. */ +#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE)) +#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1)) +/* Return the huge page base address for the huge page containing address a. */ +#define HUGEPAGE_ADDR2BASE(a) \ + ((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK)) +/* Return the smallest pagesize multiple that is >= s. */ +#define HUGEPAGE_CEILING(s) \ + (((s) + HUGEPAGE_MASK) & ~HUGEPAGE_MASK) + +/* PAGES_CAN_PURGE_LAZY is defined if lazy purging is supported. */ +#if defined(_WIN32) || defined(JEMALLOC_PURGE_MADVISE_FREE) +# define PAGES_CAN_PURGE_LAZY +#endif +/* + * PAGES_CAN_PURGE_FORCED is defined if forced purging is supported. + * + * The only supported way to hard-purge on Windows is to decommit and then + * re-commit, but doing so is racy, and if re-commit fails it's a pain to + * propagate the "poisoned" memory state. Since we typically decommit as the + * next step after purging on Windows anyway, there's no point in adding such + * complexity. + */ +#if !defined(_WIN32) && ((defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ + defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)) || \ + defined(JEMALLOC_MAPS_COALESCE)) +# define PAGES_CAN_PURGE_FORCED +#endif + +static const bool pages_can_purge_lazy = +#ifdef PAGES_CAN_PURGE_LAZY + true +#else + false +#endif + ; +static const bool pages_can_purge_forced = +#ifdef PAGES_CAN_PURGE_FORCED + true +#else + false +#endif + ; + +void *pages_map(void *addr, size_t size, size_t alignment, bool *commit); +void pages_unmap(void *addr, size_t size); +bool pages_commit(void *addr, size_t size); +bool pages_decommit(void *addr, size_t size); +bool pages_purge_lazy(void *addr, size_t size); +bool pages_purge_forced(void *addr, size_t size); +bool pages_huge(void *addr, size_t size); +bool pages_nohuge(void *addr, size_t size); +bool pages_boot(void); + +#endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/ph.h b/dep/jemalloc/include/jemalloc/internal/ph.h new file mode 100644 index 00000000000..84d6778a906 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/ph.h @@ -0,0 +1,391 @@ +/* + * A Pairing Heap implementation. + * + * "The Pairing Heap: A New Form of Self-Adjusting Heap" + * https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf + * + * With auxiliary twopass list, described in a follow on paper. + * + * "Pairing Heaps: Experiments and Analysis" + * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf + * + ******************************************************************************* + */ + +#ifndef PH_H_ +#define PH_H_ + +/* Node structure. */ +#define phn(a_type) \ +struct { \ + a_type *phn_prev; \ + a_type *phn_next; \ + a_type *phn_lchild; \ +} + +/* Root structure. */ +#define ph(a_type) \ +struct { \ + a_type *ph_root; \ +} + +/* Internal utility macros. */ +#define phn_lchild_get(a_type, a_field, a_phn) \ + (a_phn->a_field.phn_lchild) +#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \ + a_phn->a_field.phn_lchild = a_lchild; \ +} while (0) + +#define phn_next_get(a_type, a_field, a_phn) \ + (a_phn->a_field.phn_next) +#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \ + a_phn->a_field.phn_prev = a_prev; \ +} while (0) + +#define phn_prev_get(a_type, a_field, a_phn) \ + (a_phn->a_field.phn_prev) +#define phn_next_set(a_type, a_field, a_phn, a_next) do { \ + a_phn->a_field.phn_next = a_next; \ +} while (0) + +#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \ + a_type *phn0child; \ + \ + assert(a_phn0 != NULL); \ + assert(a_phn1 != NULL); \ + assert(a_cmp(a_phn0, a_phn1) <= 0); \ + \ + phn_prev_set(a_type, a_field, a_phn1, a_phn0); \ + phn0child = phn_lchild_get(a_type, a_field, a_phn0); \ + phn_next_set(a_type, a_field, a_phn1, phn0child); \ + if (phn0child != NULL) { \ + phn_prev_set(a_type, a_field, phn0child, a_phn1); \ + } \ + phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \ +} while (0) + +#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \ + if (a_phn0 == NULL) { \ + r_phn = a_phn1; \ + } else if (a_phn1 == NULL) { \ + r_phn = a_phn0; \ + } else if (a_cmp(a_phn0, a_phn1) < 0) { \ + phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \ + a_cmp); \ + r_phn = a_phn0; \ + } else { \ + phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \ + a_cmp); \ + r_phn = a_phn1; \ + } \ +} while (0) + +#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \ + a_type *head = NULL; \ + a_type *tail = NULL; \ + a_type *phn0 = a_phn; \ + a_type *phn1 = phn_next_get(a_type, a_field, phn0); \ + \ + /* \ + * Multipass merge, wherein the first two elements of a FIFO \ + * are repeatedly merged, and each result is appended to the \ + * singly linked FIFO, until the FIFO contains only a single \ + * element. We start with a sibling list but no reference to \ + * its tail, so we do a single pass over the sibling list to \ + * populate the FIFO. \ + */ \ + if (phn1 != NULL) { \ + a_type *phnrest = phn_next_get(a_type, a_field, phn1); \ + if (phnrest != NULL) { \ + phn_prev_set(a_type, a_field, phnrest, NULL); \ + } \ + phn_prev_set(a_type, a_field, phn0, NULL); \ + phn_next_set(a_type, a_field, phn0, NULL); \ + phn_prev_set(a_type, a_field, phn1, NULL); \ + phn_next_set(a_type, a_field, phn1, NULL); \ + phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \ + head = tail = phn0; \ + phn0 = phnrest; \ + while (phn0 != NULL) { \ + phn1 = phn_next_get(a_type, a_field, phn0); \ + if (phn1 != NULL) { \ + phnrest = phn_next_get(a_type, a_field, \ + phn1); \ + if (phnrest != NULL) { \ + phn_prev_set(a_type, a_field, \ + phnrest, NULL); \ + } \ + phn_prev_set(a_type, a_field, phn0, \ + NULL); \ + phn_next_set(a_type, a_field, phn0, \ + NULL); \ + phn_prev_set(a_type, a_field, phn1, \ + NULL); \ + phn_next_set(a_type, a_field, phn1, \ + NULL); \ + phn_merge(a_type, a_field, phn0, phn1, \ + a_cmp, phn0); \ + phn_next_set(a_type, a_field, tail, \ + phn0); \ + tail = phn0; \ + phn0 = phnrest; \ + } else { \ + phn_next_set(a_type, a_field, tail, \ + phn0); \ + tail = phn0; \ + phn0 = NULL; \ + } \ + } \ + phn0 = head; \ + phn1 = phn_next_get(a_type, a_field, phn0); \ + if (phn1 != NULL) { \ + while (true) { \ + head = phn_next_get(a_type, a_field, \ + phn1); \ + assert(phn_prev_get(a_type, a_field, \ + phn0) == NULL); \ + phn_next_set(a_type, a_field, phn0, \ + NULL); \ + assert(phn_prev_get(a_type, a_field, \ + phn1) == NULL); \ + phn_next_set(a_type, a_field, phn1, \ + NULL); \ + phn_merge(a_type, a_field, phn0, phn1, \ + a_cmp, phn0); \ + if (head == NULL) { \ + break; \ + } \ + phn_next_set(a_type, a_field, tail, \ + phn0); \ + tail = phn0; \ + phn0 = head; \ + phn1 = phn_next_get(a_type, a_field, \ + phn0); \ + } \ + } \ + } \ + r_phn = phn0; \ +} while (0) + +#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \ + a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \ + if (phn != NULL) { \ + phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \ + phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \ + phn_prev_set(a_type, a_field, phn, NULL); \ + ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \ + assert(phn_next_get(a_type, a_field, phn) == NULL); \ + phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \ + a_ph->ph_root); \ + } \ +} while (0) + +#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \ + a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \ + if (lchild == NULL) { \ + r_phn = NULL; \ + } else { \ + ph_merge_siblings(a_type, a_field, lchild, a_cmp, \ + r_phn); \ + } \ +} while (0) + +/* + * The ph_proto() macro generates function prototypes that correspond to the + * functions generated by an equivalently parameterized call to ph_gen(). + */ +#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \ +a_attr void a_prefix##new(a_ph_type *ph); \ +a_attr bool a_prefix##empty(a_ph_type *ph); \ +a_attr a_type *a_prefix##first(a_ph_type *ph); \ +a_attr a_type *a_prefix##any(a_ph_type *ph); \ +a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \ +a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \ +a_attr a_type *a_prefix##remove_any(a_ph_type *ph); \ +a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn); + +/* + * The ph_gen() macro generates a type-specific pairing heap implementation, + * based on the above cpp macros. + */ +#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \ +a_attr void \ +a_prefix##new(a_ph_type *ph) { \ + memset(ph, 0, sizeof(ph(a_type))); \ +} \ +a_attr bool \ +a_prefix##empty(a_ph_type *ph) { \ + return (ph->ph_root == NULL); \ +} \ +a_attr a_type * \ +a_prefix##first(a_ph_type *ph) { \ + if (ph->ph_root == NULL) { \ + return NULL; \ + } \ + ph_merge_aux(a_type, a_field, ph, a_cmp); \ + return ph->ph_root; \ +} \ +a_attr a_type * \ +a_prefix##any(a_ph_type *ph) { \ + if (ph->ph_root == NULL) { \ + return NULL; \ + } \ + a_type *aux = phn_next_get(a_type, a_field, ph->ph_root); \ + if (aux != NULL) { \ + return aux; \ + } \ + return ph->ph_root; \ +} \ +a_attr void \ +a_prefix##insert(a_ph_type *ph, a_type *phn) { \ + memset(&phn->a_field, 0, sizeof(phn(a_type))); \ + \ + /* \ + * Treat the root as an aux list during insertion, and lazily \ + * merge during a_prefix##remove_first(). For elements that \ + * are inserted, then removed via a_prefix##remove() before the \ + * aux list is ever processed, this makes insert/remove \ + * constant-time, whereas eager merging would make insert \ + * O(log n). \ + */ \ + if (ph->ph_root == NULL) { \ + ph->ph_root = phn; \ + } else { \ + phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \ + a_field, ph->ph_root)); \ + if (phn_next_get(a_type, a_field, ph->ph_root) != \ + NULL) { \ + phn_prev_set(a_type, a_field, \ + phn_next_get(a_type, a_field, ph->ph_root), \ + phn); \ + } \ + phn_prev_set(a_type, a_field, phn, ph->ph_root); \ + phn_next_set(a_type, a_field, ph->ph_root, phn); \ + } \ +} \ +a_attr a_type * \ +a_prefix##remove_first(a_ph_type *ph) { \ + a_type *ret; \ + \ + if (ph->ph_root == NULL) { \ + return NULL; \ + } \ + ph_merge_aux(a_type, a_field, ph, a_cmp); \ + \ + ret = ph->ph_root; \ + \ + ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \ + ph->ph_root); \ + \ + return ret; \ +} \ +a_attr a_type * \ +a_prefix##remove_any(a_ph_type *ph) { \ + /* \ + * Remove the most recently inserted aux list element, or the \ + * root if the aux list is empty. This has the effect of \ + * behaving as a LIFO (and insertion/removal is therefore \ + * constant-time) if a_prefix##[remove_]first() are never \ + * called. \ + */ \ + if (ph->ph_root == NULL) { \ + return NULL; \ + } \ + a_type *ret = phn_next_get(a_type, a_field, ph->ph_root); \ + if (ret != NULL) { \ + a_type *aux = phn_next_get(a_type, a_field, ret); \ + phn_next_set(a_type, a_field, ph->ph_root, aux); \ + if (aux != NULL) { \ + phn_prev_set(a_type, a_field, aux, \ + ph->ph_root); \ + } \ + return ret; \ + } \ + ret = ph->ph_root; \ + ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \ + ph->ph_root); \ + return ret; \ +} \ +a_attr void \ +a_prefix##remove(a_ph_type *ph, a_type *phn) { \ + a_type *replace, *parent; \ + \ + if (ph->ph_root == phn) { \ + /* \ + * We can delete from aux list without merging it, but \ + * we need to merge if we are dealing with the root \ + * node and it has children. \ + */ \ + if (phn_lchild_get(a_type, a_field, phn) == NULL) { \ + ph->ph_root = phn_next_get(a_type, a_field, \ + phn); \ + if (ph->ph_root != NULL) { \ + phn_prev_set(a_type, a_field, \ + ph->ph_root, NULL); \ + } \ + return; \ + } \ + ph_merge_aux(a_type, a_field, ph, a_cmp); \ + if (ph->ph_root == phn) { \ + ph_merge_children(a_type, a_field, ph->ph_root, \ + a_cmp, ph->ph_root); \ + return; \ + } \ + } \ + \ + /* Get parent (if phn is leftmost child) before mutating. */ \ + if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \ + if (phn_lchild_get(a_type, a_field, parent) != phn) { \ + parent = NULL; \ + } \ + } \ + /* Find a possible replacement node, and link to parent. */ \ + ph_merge_children(a_type, a_field, phn, a_cmp, replace); \ + /* Set next/prev for sibling linked list. */ \ + if (replace != NULL) { \ + if (parent != NULL) { \ + phn_prev_set(a_type, a_field, replace, parent); \ + phn_lchild_set(a_type, a_field, parent, \ + replace); \ + } else { \ + phn_prev_set(a_type, a_field, replace, \ + phn_prev_get(a_type, a_field, phn)); \ + if (phn_prev_get(a_type, a_field, phn) != \ + NULL) { \ + phn_next_set(a_type, a_field, \ + phn_prev_get(a_type, a_field, phn), \ + replace); \ + } \ + } \ + phn_next_set(a_type, a_field, replace, \ + phn_next_get(a_type, a_field, phn)); \ + if (phn_next_get(a_type, a_field, phn) != NULL) { \ + phn_prev_set(a_type, a_field, \ + phn_next_get(a_type, a_field, phn), \ + replace); \ + } \ + } else { \ + if (parent != NULL) { \ + a_type *next = phn_next_get(a_type, a_field, \ + phn); \ + phn_lchild_set(a_type, a_field, parent, next); \ + if (next != NULL) { \ + phn_prev_set(a_type, a_field, next, \ + parent); \ + } \ + } else { \ + assert(phn_prev_get(a_type, a_field, phn) != \ + NULL); \ + phn_next_set(a_type, a_field, \ + phn_prev_get(a_type, a_field, phn), \ + phn_next_get(a_type, a_field, phn)); \ + } \ + if (phn_next_get(a_type, a_field, phn) != NULL) { \ + phn_prev_set(a_type, a_field, \ + phn_next_get(a_type, a_field, phn), \ + phn_prev_get(a_type, a_field, phn)); \ + } \ + } \ +} + +#endif /* PH_H_ */ diff --git a/dep/jemalloc/include/jemalloc/internal/private_namespace.h b/dep/jemalloc/include/jemalloc/internal/private_namespace.h index 35c3b0c6c74..7ebeeba8269 100644 --- a/dep/jemalloc/include/jemalloc/internal/private_namespace.h +++ b/dep/jemalloc/include/jemalloc/internal/private_namespace.h @@ -1,147 +1,199 @@ -#define a0calloc JEMALLOC_N(a0calloc) -#define a0free JEMALLOC_N(a0free) +#define a0dalloc JEMALLOC_N(a0dalloc) +#define a0get JEMALLOC_N(a0get) #define a0malloc JEMALLOC_N(a0malloc) +#define arena_aalloc JEMALLOC_N(arena_aalloc) #define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small) +#define arena_basic_stats_merge JEMALLOC_N(arena_basic_stats_merge) #define arena_bin_index JEMALLOC_N(arena_bin_index) #define arena_bin_info JEMALLOC_N(arena_bin_info) +#define arena_bitselm_get_const JEMALLOC_N(arena_bitselm_get_const) +#define arena_bitselm_get_mutable JEMALLOC_N(arena_bitselm_get_mutable) #define arena_boot JEMALLOC_N(arena_boot) +#define arena_choose JEMALLOC_N(arena_choose) +#define arena_choose_hard JEMALLOC_N(arena_choose_hard) +#define arena_choose_impl JEMALLOC_N(arena_choose_impl) +#define arena_chunk_alloc_huge JEMALLOC_N(arena_chunk_alloc_huge) +#define arena_chunk_cache_maybe_insert JEMALLOC_N(arena_chunk_cache_maybe_insert) +#define arena_chunk_cache_maybe_remove JEMALLOC_N(arena_chunk_cache_maybe_remove) +#define arena_chunk_dalloc_huge JEMALLOC_N(arena_chunk_dalloc_huge) +#define arena_chunk_ralloc_huge_expand JEMALLOC_N(arena_chunk_ralloc_huge_expand) +#define arena_chunk_ralloc_huge_shrink JEMALLOC_N(arena_chunk_ralloc_huge_shrink) +#define arena_chunk_ralloc_huge_similar JEMALLOC_N(arena_chunk_ralloc_huge_similar) +#define arena_cleanup JEMALLOC_N(arena_cleanup) #define arena_dalloc JEMALLOC_N(arena_dalloc) #define arena_dalloc_bin JEMALLOC_N(arena_dalloc_bin) -#define arena_dalloc_bin_locked JEMALLOC_N(arena_dalloc_bin_locked) +#define arena_dalloc_bin_junked_locked JEMALLOC_N(arena_dalloc_bin_junked_locked) #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) #define arena_dalloc_large JEMALLOC_N(arena_dalloc_large) -#define arena_dalloc_large_locked JEMALLOC_N(arena_dalloc_large_locked) +#define arena_dalloc_large_junked_locked JEMALLOC_N(arena_dalloc_large_junked_locked) #define arena_dalloc_small JEMALLOC_N(arena_dalloc_small) +#define arena_decay_tick JEMALLOC_N(arena_decay_tick) +#define arena_decay_ticks JEMALLOC_N(arena_decay_ticks) +#define arena_decay_time_default_get JEMALLOC_N(arena_decay_time_default_get) +#define arena_decay_time_default_set JEMALLOC_N(arena_decay_time_default_set) +#define arena_decay_time_get JEMALLOC_N(arena_decay_time_get) +#define arena_decay_time_set JEMALLOC_N(arena_decay_time_set) #define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get) #define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set) +#define arena_extent_sn_next JEMALLOC_N(arena_extent_sn_next) +#define arena_get JEMALLOC_N(arena_get) +#define arena_ichoose JEMALLOC_N(arena_ichoose) +#define arena_init JEMALLOC_N(arena_init) +#define arena_lg_dirty_mult_default_get JEMALLOC_N(arena_lg_dirty_mult_default_get) +#define arena_lg_dirty_mult_default_set JEMALLOC_N(arena_lg_dirty_mult_default_set) +#define arena_lg_dirty_mult_get JEMALLOC_N(arena_lg_dirty_mult_get) +#define arena_lg_dirty_mult_set JEMALLOC_N(arena_lg_dirty_mult_set) #define arena_malloc JEMALLOC_N(arena_malloc) +#define arena_malloc_hard JEMALLOC_N(arena_malloc_hard) #define arena_malloc_large JEMALLOC_N(arena_malloc_large) -#define arena_malloc_small JEMALLOC_N(arena_malloc_small) #define arena_mapbits_allocated_get JEMALLOC_N(arena_mapbits_allocated_get) #define arena_mapbits_binind_get JEMALLOC_N(arena_mapbits_binind_get) +#define arena_mapbits_decommitted_get JEMALLOC_N(arena_mapbits_decommitted_get) #define arena_mapbits_dirty_get JEMALLOC_N(arena_mapbits_dirty_get) #define arena_mapbits_get JEMALLOC_N(arena_mapbits_get) +#define arena_mapbits_internal_set JEMALLOC_N(arena_mapbits_internal_set) #define arena_mapbits_large_binind_set JEMALLOC_N(arena_mapbits_large_binind_set) #define arena_mapbits_large_get JEMALLOC_N(arena_mapbits_large_get) #define arena_mapbits_large_set JEMALLOC_N(arena_mapbits_large_set) #define arena_mapbits_large_size_get JEMALLOC_N(arena_mapbits_large_size_get) +#define arena_mapbits_size_decode JEMALLOC_N(arena_mapbits_size_decode) +#define arena_mapbits_size_encode JEMALLOC_N(arena_mapbits_size_encode) #define arena_mapbits_small_runind_get JEMALLOC_N(arena_mapbits_small_runind_get) #define arena_mapbits_small_set JEMALLOC_N(arena_mapbits_small_set) #define arena_mapbits_unallocated_set JEMALLOC_N(arena_mapbits_unallocated_set) #define arena_mapbits_unallocated_size_get JEMALLOC_N(arena_mapbits_unallocated_size_get) #define arena_mapbits_unallocated_size_set JEMALLOC_N(arena_mapbits_unallocated_size_set) #define arena_mapbits_unzeroed_get JEMALLOC_N(arena_mapbits_unzeroed_get) -#define arena_mapbits_unzeroed_set JEMALLOC_N(arena_mapbits_unzeroed_set) -#define arena_mapbitsp_get JEMALLOC_N(arena_mapbitsp_get) +#define arena_mapbitsp_get_const JEMALLOC_N(arena_mapbitsp_get_const) +#define arena_mapbitsp_get_mutable JEMALLOC_N(arena_mapbitsp_get_mutable) #define arena_mapbitsp_read JEMALLOC_N(arena_mapbitsp_read) #define arena_mapbitsp_write JEMALLOC_N(arena_mapbitsp_write) -#define arena_mapp_get JEMALLOC_N(arena_mapp_get) -#define arena_maxclass JEMALLOC_N(arena_maxclass) +#define arena_maxrun JEMALLOC_N(arena_maxrun) +#define arena_maybe_purge JEMALLOC_N(arena_maybe_purge) +#define arena_metadata_allocated_add JEMALLOC_N(arena_metadata_allocated_add) +#define arena_metadata_allocated_get JEMALLOC_N(arena_metadata_allocated_get) +#define arena_metadata_allocated_sub JEMALLOC_N(arena_metadata_allocated_sub) +#define arena_migrate JEMALLOC_N(arena_migrate) +#define arena_miscelm_get_const JEMALLOC_N(arena_miscelm_get_const) +#define arena_miscelm_get_mutable JEMALLOC_N(arena_miscelm_get_mutable) +#define arena_miscelm_to_pageind JEMALLOC_N(arena_miscelm_to_pageind) +#define arena_miscelm_to_rpages JEMALLOC_N(arena_miscelm_to_rpages) #define arena_new JEMALLOC_N(arena_new) +#define arena_node_alloc JEMALLOC_N(arena_node_alloc) +#define arena_node_dalloc JEMALLOC_N(arena_node_dalloc) +#define arena_nthreads_dec JEMALLOC_N(arena_nthreads_dec) +#define arena_nthreads_get JEMALLOC_N(arena_nthreads_get) +#define arena_nthreads_inc JEMALLOC_N(arena_nthreads_inc) #define arena_palloc JEMALLOC_N(arena_palloc) #define arena_postfork_child JEMALLOC_N(arena_postfork_child) #define arena_postfork_parent JEMALLOC_N(arena_postfork_parent) -#define arena_prefork JEMALLOC_N(arena_prefork) +#define arena_prefork0 JEMALLOC_N(arena_prefork0) +#define arena_prefork1 JEMALLOC_N(arena_prefork1) +#define arena_prefork2 JEMALLOC_N(arena_prefork2) +#define arena_prefork3 JEMALLOC_N(arena_prefork3) #define arena_prof_accum JEMALLOC_N(arena_prof_accum) #define arena_prof_accum_impl JEMALLOC_N(arena_prof_accum_impl) #define arena_prof_accum_locked JEMALLOC_N(arena_prof_accum_locked) -#define arena_prof_ctx_get JEMALLOC_N(arena_prof_ctx_get) -#define arena_prof_ctx_set JEMALLOC_N(arena_prof_ctx_set) #define arena_prof_promoted JEMALLOC_N(arena_prof_promoted) +#define arena_prof_tctx_get JEMALLOC_N(arena_prof_tctx_get) +#define arena_prof_tctx_reset JEMALLOC_N(arena_prof_tctx_reset) +#define arena_prof_tctx_set JEMALLOC_N(arena_prof_tctx_set) #define arena_ptr_small_binind_get JEMALLOC_N(arena_ptr_small_binind_get) -#define arena_purge_all JEMALLOC_N(arena_purge_all) +#define arena_purge JEMALLOC_N(arena_purge) #define arena_quarantine_junk_small JEMALLOC_N(arena_quarantine_junk_small) #define arena_ralloc JEMALLOC_N(arena_ralloc) #define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) #define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move) +#define arena_rd_to_miscelm JEMALLOC_N(arena_rd_to_miscelm) #define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) +#define arena_reset JEMALLOC_N(arena_reset) #define arena_run_regind JEMALLOC_N(arena_run_regind) +#define arena_run_to_miscelm JEMALLOC_N(arena_run_to_miscelm) #define arena_salloc JEMALLOC_N(arena_salloc) +#define arena_sdalloc JEMALLOC_N(arena_sdalloc) #define arena_stats_merge JEMALLOC_N(arena_stats_merge) #define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small) +#define arena_tdata_get JEMALLOC_N(arena_tdata_get) +#define arena_tdata_get_hard JEMALLOC_N(arena_tdata_get_hard) #define arenas JEMALLOC_N(arenas) -#define arenas_booted JEMALLOC_N(arenas_booted) -#define arenas_cleanup JEMALLOC_N(arenas_cleanup) -#define arenas_extend JEMALLOC_N(arenas_extend) -#define arenas_initialized JEMALLOC_N(arenas_initialized) -#define arenas_lock JEMALLOC_N(arenas_lock) -#define arenas_tls JEMALLOC_N(arenas_tls) -#define arenas_tsd JEMALLOC_N(arenas_tsd) -#define arenas_tsd_boot JEMALLOC_N(arenas_tsd_boot) -#define arenas_tsd_cleanup_wrapper JEMALLOC_N(arenas_tsd_cleanup_wrapper) -#define arenas_tsd_get JEMALLOC_N(arenas_tsd_get) -#define arenas_tsd_get_wrapper JEMALLOC_N(arenas_tsd_get_wrapper) -#define arenas_tsd_init_head JEMALLOC_N(arenas_tsd_init_head) -#define arenas_tsd_set JEMALLOC_N(arenas_tsd_set) +#define arenas_tdata_bypass_cleanup JEMALLOC_N(arenas_tdata_bypass_cleanup) +#define arenas_tdata_cleanup JEMALLOC_N(arenas_tdata_cleanup) +#define atomic_add_p JEMALLOC_N(atomic_add_p) #define atomic_add_u JEMALLOC_N(atomic_add_u) #define atomic_add_uint32 JEMALLOC_N(atomic_add_uint32) #define atomic_add_uint64 JEMALLOC_N(atomic_add_uint64) #define atomic_add_z JEMALLOC_N(atomic_add_z) +#define atomic_cas_p JEMALLOC_N(atomic_cas_p) +#define atomic_cas_u JEMALLOC_N(atomic_cas_u) +#define atomic_cas_uint32 JEMALLOC_N(atomic_cas_uint32) +#define atomic_cas_uint64 JEMALLOC_N(atomic_cas_uint64) +#define atomic_cas_z JEMALLOC_N(atomic_cas_z) +#define atomic_sub_p JEMALLOC_N(atomic_sub_p) #define atomic_sub_u JEMALLOC_N(atomic_sub_u) #define atomic_sub_uint32 JEMALLOC_N(atomic_sub_uint32) #define atomic_sub_uint64 JEMALLOC_N(atomic_sub_uint64) #define atomic_sub_z JEMALLOC_N(atomic_sub_z) +#define atomic_write_p JEMALLOC_N(atomic_write_p) +#define atomic_write_u JEMALLOC_N(atomic_write_u) +#define atomic_write_uint32 JEMALLOC_N(atomic_write_uint32) +#define atomic_write_uint64 JEMALLOC_N(atomic_write_uint64) +#define atomic_write_z JEMALLOC_N(atomic_write_z) #define base_alloc JEMALLOC_N(base_alloc) #define base_boot JEMALLOC_N(base_boot) -#define base_calloc JEMALLOC_N(base_calloc) -#define base_node_alloc JEMALLOC_N(base_node_alloc) -#define base_node_dealloc JEMALLOC_N(base_node_dealloc) #define base_postfork_child JEMALLOC_N(base_postfork_child) #define base_postfork_parent JEMALLOC_N(base_postfork_parent) #define base_prefork JEMALLOC_N(base_prefork) +#define base_stats_get JEMALLOC_N(base_stats_get) #define bitmap_full JEMALLOC_N(bitmap_full) #define bitmap_get JEMALLOC_N(bitmap_get) #define bitmap_info_init JEMALLOC_N(bitmap_info_init) -#define bitmap_info_ngroups JEMALLOC_N(bitmap_info_ngroups) #define bitmap_init JEMALLOC_N(bitmap_init) #define bitmap_set JEMALLOC_N(bitmap_set) #define bitmap_sfu JEMALLOC_N(bitmap_sfu) #define bitmap_size JEMALLOC_N(bitmap_size) #define bitmap_unset JEMALLOC_N(bitmap_unset) +#define bootstrap_calloc JEMALLOC_N(bootstrap_calloc) +#define bootstrap_free JEMALLOC_N(bootstrap_free) +#define bootstrap_malloc JEMALLOC_N(bootstrap_malloc) #define bt_init JEMALLOC_N(bt_init) #define buferror JEMALLOC_N(buferror) -#define choose_arena JEMALLOC_N(choose_arena) -#define choose_arena_hard JEMALLOC_N(choose_arena_hard) -#define chunk_alloc JEMALLOC_N(chunk_alloc) +#define chunk_alloc_base JEMALLOC_N(chunk_alloc_base) +#define chunk_alloc_cache JEMALLOC_N(chunk_alloc_cache) #define chunk_alloc_dss JEMALLOC_N(chunk_alloc_dss) #define chunk_alloc_mmap JEMALLOC_N(chunk_alloc_mmap) +#define chunk_alloc_wrapper JEMALLOC_N(chunk_alloc_wrapper) #define chunk_boot JEMALLOC_N(chunk_boot) -#define chunk_dealloc JEMALLOC_N(chunk_dealloc) -#define chunk_dealloc_mmap JEMALLOC_N(chunk_dealloc_mmap) +#define chunk_dalloc_cache JEMALLOC_N(chunk_dalloc_cache) +#define chunk_dalloc_mmap JEMALLOC_N(chunk_dalloc_mmap) +#define chunk_dalloc_wrapper JEMALLOC_N(chunk_dalloc_wrapper) +#define chunk_deregister JEMALLOC_N(chunk_deregister) #define chunk_dss_boot JEMALLOC_N(chunk_dss_boot) -#define chunk_dss_postfork_child JEMALLOC_N(chunk_dss_postfork_child) -#define chunk_dss_postfork_parent JEMALLOC_N(chunk_dss_postfork_parent) +#define chunk_dss_mergeable JEMALLOC_N(chunk_dss_mergeable) #define chunk_dss_prec_get JEMALLOC_N(chunk_dss_prec_get) #define chunk_dss_prec_set JEMALLOC_N(chunk_dss_prec_set) -#define chunk_dss_prefork JEMALLOC_N(chunk_dss_prefork) +#define chunk_hooks_default JEMALLOC_N(chunk_hooks_default) +#define chunk_hooks_get JEMALLOC_N(chunk_hooks_get) +#define chunk_hooks_set JEMALLOC_N(chunk_hooks_set) #define chunk_in_dss JEMALLOC_N(chunk_in_dss) +#define chunk_lookup JEMALLOC_N(chunk_lookup) #define chunk_npages JEMALLOC_N(chunk_npages) -#define chunk_postfork_child JEMALLOC_N(chunk_postfork_child) -#define chunk_postfork_parent JEMALLOC_N(chunk_postfork_parent) -#define chunk_prefork JEMALLOC_N(chunk_prefork) -#define chunk_unmap JEMALLOC_N(chunk_unmap) -#define chunks_mtx JEMALLOC_N(chunks_mtx) +#define chunk_purge_wrapper JEMALLOC_N(chunk_purge_wrapper) +#define chunk_register JEMALLOC_N(chunk_register) #define chunks_rtree JEMALLOC_N(chunks_rtree) #define chunksize JEMALLOC_N(chunksize) #define chunksize_mask JEMALLOC_N(chunksize_mask) -#define ckh_bucket_search JEMALLOC_N(ckh_bucket_search) #define ckh_count JEMALLOC_N(ckh_count) #define ckh_delete JEMALLOC_N(ckh_delete) -#define ckh_evict_reloc_insert JEMALLOC_N(ckh_evict_reloc_insert) #define ckh_insert JEMALLOC_N(ckh_insert) -#define ckh_isearch JEMALLOC_N(ckh_isearch) #define ckh_iter JEMALLOC_N(ckh_iter) #define ckh_new JEMALLOC_N(ckh_new) #define ckh_pointer_hash JEMALLOC_N(ckh_pointer_hash) #define ckh_pointer_keycomp JEMALLOC_N(ckh_pointer_keycomp) -#define ckh_rebuild JEMALLOC_N(ckh_rebuild) #define ckh_remove JEMALLOC_N(ckh_remove) #define ckh_search JEMALLOC_N(ckh_search) #define ckh_string_hash JEMALLOC_N(ckh_string_hash) #define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp) -#define ckh_try_bucket_insert JEMALLOC_N(ckh_try_bucket_insert) -#define ckh_try_insert JEMALLOC_N(ckh_try_insert) #define ctl_boot JEMALLOC_N(ctl_boot) #define ctl_bymib JEMALLOC_N(ctl_bymib) #define ctl_byname JEMALLOC_N(ctl_byname) @@ -149,7 +201,33 @@ #define ctl_postfork_child JEMALLOC_N(ctl_postfork_child) #define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent) #define ctl_prefork JEMALLOC_N(ctl_prefork) +#define decay_ticker_get JEMALLOC_N(decay_ticker_get) #define dss_prec_names JEMALLOC_N(dss_prec_names) +#define extent_node_achunk_get JEMALLOC_N(extent_node_achunk_get) +#define extent_node_achunk_set JEMALLOC_N(extent_node_achunk_set) +#define extent_node_addr_get JEMALLOC_N(extent_node_addr_get) +#define extent_node_addr_set JEMALLOC_N(extent_node_addr_set) +#define extent_node_arena_get JEMALLOC_N(extent_node_arena_get) +#define extent_node_arena_set JEMALLOC_N(extent_node_arena_set) +#define extent_node_committed_get JEMALLOC_N(extent_node_committed_get) +#define extent_node_committed_set JEMALLOC_N(extent_node_committed_set) +#define extent_node_dirty_insert JEMALLOC_N(extent_node_dirty_insert) +#define extent_node_dirty_linkage_init JEMALLOC_N(extent_node_dirty_linkage_init) +#define extent_node_dirty_remove JEMALLOC_N(extent_node_dirty_remove) +#define extent_node_init JEMALLOC_N(extent_node_init) +#define extent_node_prof_tctx_get JEMALLOC_N(extent_node_prof_tctx_get) +#define extent_node_prof_tctx_set JEMALLOC_N(extent_node_prof_tctx_set) +#define extent_node_size_get JEMALLOC_N(extent_node_size_get) +#define extent_node_size_set JEMALLOC_N(extent_node_size_set) +#define extent_node_sn_get JEMALLOC_N(extent_node_sn_get) +#define extent_node_sn_set JEMALLOC_N(extent_node_sn_set) +#define extent_node_zeroed_get JEMALLOC_N(extent_node_zeroed_get) +#define extent_node_zeroed_set JEMALLOC_N(extent_node_zeroed_set) +#define extent_size_quantize_ceil JEMALLOC_N(extent_size_quantize_ceil) +#define extent_size_quantize_floor JEMALLOC_N(extent_size_quantize_floor) +#define extent_tree_ad_destroy JEMALLOC_N(extent_tree_ad_destroy) +#define extent_tree_ad_destroy_recurse JEMALLOC_N(extent_tree_ad_destroy_recurse) +#define extent_tree_ad_empty JEMALLOC_N(extent_tree_ad_empty) #define extent_tree_ad_first JEMALLOC_N(extent_tree_ad_first) #define extent_tree_ad_insert JEMALLOC_N(extent_tree_ad_insert) #define extent_tree_ad_iter JEMALLOC_N(extent_tree_ad_iter) @@ -166,22 +244,31 @@ #define extent_tree_ad_reverse_iter_recurse JEMALLOC_N(extent_tree_ad_reverse_iter_recurse) #define extent_tree_ad_reverse_iter_start JEMALLOC_N(extent_tree_ad_reverse_iter_start) #define extent_tree_ad_search JEMALLOC_N(extent_tree_ad_search) -#define extent_tree_szad_first JEMALLOC_N(extent_tree_szad_first) -#define extent_tree_szad_insert JEMALLOC_N(extent_tree_szad_insert) -#define extent_tree_szad_iter JEMALLOC_N(extent_tree_szad_iter) -#define extent_tree_szad_iter_recurse JEMALLOC_N(extent_tree_szad_iter_recurse) -#define extent_tree_szad_iter_start JEMALLOC_N(extent_tree_szad_iter_start) -#define extent_tree_szad_last JEMALLOC_N(extent_tree_szad_last) -#define extent_tree_szad_new JEMALLOC_N(extent_tree_szad_new) -#define extent_tree_szad_next JEMALLOC_N(extent_tree_szad_next) -#define extent_tree_szad_nsearch JEMALLOC_N(extent_tree_szad_nsearch) -#define extent_tree_szad_prev JEMALLOC_N(extent_tree_szad_prev) -#define extent_tree_szad_psearch JEMALLOC_N(extent_tree_szad_psearch) -#define extent_tree_szad_remove JEMALLOC_N(extent_tree_szad_remove) -#define extent_tree_szad_reverse_iter JEMALLOC_N(extent_tree_szad_reverse_iter) -#define extent_tree_szad_reverse_iter_recurse JEMALLOC_N(extent_tree_szad_reverse_iter_recurse) -#define extent_tree_szad_reverse_iter_start JEMALLOC_N(extent_tree_szad_reverse_iter_start) -#define extent_tree_szad_search JEMALLOC_N(extent_tree_szad_search) +#define extent_tree_szsnad_destroy JEMALLOC_N(extent_tree_szsnad_destroy) +#define extent_tree_szsnad_destroy_recurse JEMALLOC_N(extent_tree_szsnad_destroy_recurse) +#define extent_tree_szsnad_empty JEMALLOC_N(extent_tree_szsnad_empty) +#define extent_tree_szsnad_first JEMALLOC_N(extent_tree_szsnad_first) +#define extent_tree_szsnad_insert JEMALLOC_N(extent_tree_szsnad_insert) +#define extent_tree_szsnad_iter JEMALLOC_N(extent_tree_szsnad_iter) +#define extent_tree_szsnad_iter_recurse JEMALLOC_N(extent_tree_szsnad_iter_recurse) +#define extent_tree_szsnad_iter_start JEMALLOC_N(extent_tree_szsnad_iter_start) +#define extent_tree_szsnad_last JEMALLOC_N(extent_tree_szsnad_last) +#define extent_tree_szsnad_new JEMALLOC_N(extent_tree_szsnad_new) +#define extent_tree_szsnad_next JEMALLOC_N(extent_tree_szsnad_next) +#define extent_tree_szsnad_nsearch JEMALLOC_N(extent_tree_szsnad_nsearch) +#define extent_tree_szsnad_prev JEMALLOC_N(extent_tree_szsnad_prev) +#define extent_tree_szsnad_psearch JEMALLOC_N(extent_tree_szsnad_psearch) +#define extent_tree_szsnad_remove JEMALLOC_N(extent_tree_szsnad_remove) +#define extent_tree_szsnad_reverse_iter JEMALLOC_N(extent_tree_szsnad_reverse_iter) +#define extent_tree_szsnad_reverse_iter_recurse JEMALLOC_N(extent_tree_szsnad_reverse_iter_recurse) +#define extent_tree_szsnad_reverse_iter_start JEMALLOC_N(extent_tree_szsnad_reverse_iter_start) +#define extent_tree_szsnad_search JEMALLOC_N(extent_tree_szsnad_search) +#define ffs_llu JEMALLOC_N(ffs_llu) +#define ffs_lu JEMALLOC_N(ffs_lu) +#define ffs_u JEMALLOC_N(ffs_u) +#define ffs_u32 JEMALLOC_N(ffs_u32) +#define ffs_u64 JEMALLOC_N(ffs_u64) +#define ffs_zu JEMALLOC_N(ffs_zu) #define get_errno JEMALLOC_N(get_errno) #define hash JEMALLOC_N(hash) #define hash_fmix_32 JEMALLOC_N(hash_fmix_32) @@ -193,46 +280,51 @@ #define hash_x64_128 JEMALLOC_N(hash_x64_128) #define hash_x86_128 JEMALLOC_N(hash_x86_128) #define hash_x86_32 JEMALLOC_N(hash_x86_32) -#define huge_allocated JEMALLOC_N(huge_allocated) -#define huge_boot JEMALLOC_N(huge_boot) +#define huge_aalloc JEMALLOC_N(huge_aalloc) #define huge_dalloc JEMALLOC_N(huge_dalloc) #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk) -#define huge_dss_prec_get JEMALLOC_N(huge_dss_prec_get) #define huge_malloc JEMALLOC_N(huge_malloc) -#define huge_mtx JEMALLOC_N(huge_mtx) -#define huge_ndalloc JEMALLOC_N(huge_ndalloc) -#define huge_nmalloc JEMALLOC_N(huge_nmalloc) #define huge_palloc JEMALLOC_N(huge_palloc) -#define huge_postfork_child JEMALLOC_N(huge_postfork_child) -#define huge_postfork_parent JEMALLOC_N(huge_postfork_parent) -#define huge_prefork JEMALLOC_N(huge_prefork) -#define huge_prof_ctx_get JEMALLOC_N(huge_prof_ctx_get) -#define huge_prof_ctx_set JEMALLOC_N(huge_prof_ctx_set) +#define huge_prof_tctx_get JEMALLOC_N(huge_prof_tctx_get) +#define huge_prof_tctx_reset JEMALLOC_N(huge_prof_tctx_reset) +#define huge_prof_tctx_set JEMALLOC_N(huge_prof_tctx_set) #define huge_ralloc JEMALLOC_N(huge_ralloc) #define huge_ralloc_no_move JEMALLOC_N(huge_ralloc_no_move) #define huge_salloc JEMALLOC_N(huge_salloc) -#define iallocm JEMALLOC_N(iallocm) -#define icalloc JEMALLOC_N(icalloc) -#define icalloct JEMALLOC_N(icalloct) +#define iaalloc JEMALLOC_N(iaalloc) +#define ialloc JEMALLOC_N(ialloc) +#define iallocztm JEMALLOC_N(iallocztm) +#define iarena_cleanup JEMALLOC_N(iarena_cleanup) #define idalloc JEMALLOC_N(idalloc) -#define idalloct JEMALLOC_N(idalloct) -#define imalloc JEMALLOC_N(imalloc) -#define imalloct JEMALLOC_N(imalloct) +#define idalloctm JEMALLOC_N(idalloctm) +#define in_valgrind JEMALLOC_N(in_valgrind) +#define index2size JEMALLOC_N(index2size) +#define index2size_compute JEMALLOC_N(index2size_compute) +#define index2size_lookup JEMALLOC_N(index2size_lookup) +#define index2size_tab JEMALLOC_N(index2size_tab) #define ipalloc JEMALLOC_N(ipalloc) #define ipalloct JEMALLOC_N(ipalloct) +#define ipallocztm JEMALLOC_N(ipallocztm) #define iqalloc JEMALLOC_N(iqalloc) -#define iqalloct JEMALLOC_N(iqalloct) #define iralloc JEMALLOC_N(iralloc) #define iralloct JEMALLOC_N(iralloct) #define iralloct_realign JEMALLOC_N(iralloct_realign) #define isalloc JEMALLOC_N(isalloc) +#define isdalloct JEMALLOC_N(isdalloct) +#define isqalloc JEMALLOC_N(isqalloc) #define isthreaded JEMALLOC_N(isthreaded) #define ivsalloc JEMALLOC_N(ivsalloc) #define ixalloc JEMALLOC_N(ixalloc) #define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child) #define jemalloc_postfork_parent JEMALLOC_N(jemalloc_postfork_parent) #define jemalloc_prefork JEMALLOC_N(jemalloc_prefork) +#define large_maxclass JEMALLOC_N(large_maxclass) +#define lg_floor JEMALLOC_N(lg_floor) +#define lg_prof_sample JEMALLOC_N(lg_prof_sample) #define malloc_cprintf JEMALLOC_N(malloc_cprintf) +#define malloc_mutex_assert_not_owner JEMALLOC_N(malloc_mutex_assert_not_owner) +#define malloc_mutex_assert_owner JEMALLOC_N(malloc_mutex_assert_owner) +#define malloc_mutex_boot JEMALLOC_N(malloc_mutex_boot) #define malloc_mutex_init JEMALLOC_N(malloc_mutex_init) #define malloc_mutex_lock JEMALLOC_N(malloc_mutex_lock) #define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child) @@ -242,7 +334,8 @@ #define malloc_printf JEMALLOC_N(malloc_printf) #define malloc_snprintf JEMALLOC_N(malloc_snprintf) #define malloc_strtoumax JEMALLOC_N(malloc_strtoumax) -#define malloc_tsd_boot JEMALLOC_N(malloc_tsd_boot) +#define malloc_tsd_boot0 JEMALLOC_N(malloc_tsd_boot0) +#define malloc_tsd_boot1 JEMALLOC_N(malloc_tsd_boot1) #define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register) #define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc) #define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc) @@ -251,16 +344,35 @@ #define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf) #define malloc_write JEMALLOC_N(malloc_write) #define map_bias JEMALLOC_N(map_bias) +#define map_misc_offset JEMALLOC_N(map_misc_offset) #define mb_write JEMALLOC_N(mb_write) -#define mutex_boot JEMALLOC_N(mutex_boot) #define narenas_auto JEMALLOC_N(narenas_auto) -#define narenas_total JEMALLOC_N(narenas_total) +#define narenas_tdata_cleanup JEMALLOC_N(narenas_tdata_cleanup) #define narenas_total_get JEMALLOC_N(narenas_total_get) #define ncpus JEMALLOC_N(ncpus) #define nhbins JEMALLOC_N(nhbins) +#define nhclasses JEMALLOC_N(nhclasses) +#define nlclasses JEMALLOC_N(nlclasses) +#define nstime_add JEMALLOC_N(nstime_add) +#define nstime_compare JEMALLOC_N(nstime_compare) +#define nstime_copy JEMALLOC_N(nstime_copy) +#define nstime_divide JEMALLOC_N(nstime_divide) +#define nstime_idivide JEMALLOC_N(nstime_idivide) +#define nstime_imultiply JEMALLOC_N(nstime_imultiply) +#define nstime_init JEMALLOC_N(nstime_init) +#define nstime_init2 JEMALLOC_N(nstime_init2) +#define nstime_monotonic JEMALLOC_N(nstime_monotonic) +#define nstime_ns JEMALLOC_N(nstime_ns) +#define nstime_nsec JEMALLOC_N(nstime_nsec) +#define nstime_sec JEMALLOC_N(nstime_sec) +#define nstime_subtract JEMALLOC_N(nstime_subtract) +#define nstime_update JEMALLOC_N(nstime_update) #define opt_abort JEMALLOC_N(opt_abort) +#define opt_decay_time JEMALLOC_N(opt_decay_time) #define opt_dss JEMALLOC_N(opt_dss) #define opt_junk JEMALLOC_N(opt_junk) +#define opt_junk_alloc JEMALLOC_N(opt_junk_alloc) +#define opt_junk_free JEMALLOC_N(opt_junk_free) #define opt_lg_chunk JEMALLOC_N(opt_lg_chunk) #define opt_lg_dirty_mult JEMALLOC_N(opt_lg_dirty_mult) #define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval) @@ -274,140 +386,254 @@ #define opt_prof_gdump JEMALLOC_N(opt_prof_gdump) #define opt_prof_leak JEMALLOC_N(opt_prof_leak) #define opt_prof_prefix JEMALLOC_N(opt_prof_prefix) +#define opt_prof_thread_active_init JEMALLOC_N(opt_prof_thread_active_init) +#define opt_purge JEMALLOC_N(opt_purge) #define opt_quarantine JEMALLOC_N(opt_quarantine) #define opt_redzone JEMALLOC_N(opt_redzone) #define opt_stats_print JEMALLOC_N(opt_stats_print) #define opt_tcache JEMALLOC_N(opt_tcache) +#define opt_thp JEMALLOC_N(opt_thp) #define opt_utrace JEMALLOC_N(opt_utrace) -#define opt_valgrind JEMALLOC_N(opt_valgrind) #define opt_xmalloc JEMALLOC_N(opt_xmalloc) #define opt_zero JEMALLOC_N(opt_zero) #define p2rz JEMALLOC_N(p2rz) +#define pages_boot JEMALLOC_N(pages_boot) +#define pages_commit JEMALLOC_N(pages_commit) +#define pages_decommit JEMALLOC_N(pages_decommit) +#define pages_huge JEMALLOC_N(pages_huge) +#define pages_map JEMALLOC_N(pages_map) +#define pages_nohuge JEMALLOC_N(pages_nohuge) #define pages_purge JEMALLOC_N(pages_purge) -#define pow2_ceil JEMALLOC_N(pow2_ceil) +#define pages_trim JEMALLOC_N(pages_trim) +#define pages_unmap JEMALLOC_N(pages_unmap) +#define pind2sz JEMALLOC_N(pind2sz) +#define pind2sz_compute JEMALLOC_N(pind2sz_compute) +#define pind2sz_lookup JEMALLOC_N(pind2sz_lookup) +#define pind2sz_tab JEMALLOC_N(pind2sz_tab) +#define pow2_ceil_u32 JEMALLOC_N(pow2_ceil_u32) +#define pow2_ceil_u64 JEMALLOC_N(pow2_ceil_u64) +#define pow2_ceil_zu JEMALLOC_N(pow2_ceil_zu) +#define prng_lg_range_u32 JEMALLOC_N(prng_lg_range_u32) +#define prng_lg_range_u64 JEMALLOC_N(prng_lg_range_u64) +#define prng_lg_range_zu JEMALLOC_N(prng_lg_range_zu) +#define prng_range_u32 JEMALLOC_N(prng_range_u32) +#define prng_range_u64 JEMALLOC_N(prng_range_u64) +#define prng_range_zu JEMALLOC_N(prng_range_zu) +#define prng_state_next_u32 JEMALLOC_N(prng_state_next_u32) +#define prng_state_next_u64 JEMALLOC_N(prng_state_next_u64) +#define prng_state_next_zu JEMALLOC_N(prng_state_next_zu) +#define prof_active JEMALLOC_N(prof_active) +#define prof_active_get JEMALLOC_N(prof_active_get) +#define prof_active_get_unlocked JEMALLOC_N(prof_active_get_unlocked) +#define prof_active_set JEMALLOC_N(prof_active_set) +#define prof_alloc_prep JEMALLOC_N(prof_alloc_prep) +#define prof_alloc_rollback JEMALLOC_N(prof_alloc_rollback) #define prof_backtrace JEMALLOC_N(prof_backtrace) #define prof_boot0 JEMALLOC_N(prof_boot0) #define prof_boot1 JEMALLOC_N(prof_boot1) #define prof_boot2 JEMALLOC_N(prof_boot2) #define prof_bt_count JEMALLOC_N(prof_bt_count) -#define prof_ctx_get JEMALLOC_N(prof_ctx_get) -#define prof_ctx_set JEMALLOC_N(prof_ctx_set) +#define prof_dump_header JEMALLOC_N(prof_dump_header) #define prof_dump_open JEMALLOC_N(prof_dump_open) #define prof_free JEMALLOC_N(prof_free) +#define prof_free_sampled_object JEMALLOC_N(prof_free_sampled_object) #define prof_gdump JEMALLOC_N(prof_gdump) +#define prof_gdump_get JEMALLOC_N(prof_gdump_get) +#define prof_gdump_get_unlocked JEMALLOC_N(prof_gdump_get_unlocked) +#define prof_gdump_set JEMALLOC_N(prof_gdump_set) +#define prof_gdump_val JEMALLOC_N(prof_gdump_val) #define prof_idump JEMALLOC_N(prof_idump) #define prof_interval JEMALLOC_N(prof_interval) #define prof_lookup JEMALLOC_N(prof_lookup) #define prof_malloc JEMALLOC_N(prof_malloc) +#define prof_malloc_sample_object JEMALLOC_N(prof_malloc_sample_object) #define prof_mdump JEMALLOC_N(prof_mdump) #define prof_postfork_child JEMALLOC_N(prof_postfork_child) #define prof_postfork_parent JEMALLOC_N(prof_postfork_parent) -#define prof_prefork JEMALLOC_N(prof_prefork) -#define prof_promote JEMALLOC_N(prof_promote) +#define prof_prefork0 JEMALLOC_N(prof_prefork0) +#define prof_prefork1 JEMALLOC_N(prof_prefork1) #define prof_realloc JEMALLOC_N(prof_realloc) +#define prof_reset JEMALLOC_N(prof_reset) #define prof_sample_accum_update JEMALLOC_N(prof_sample_accum_update) #define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update) -#define prof_tdata_booted JEMALLOC_N(prof_tdata_booted) +#define prof_tctx_get JEMALLOC_N(prof_tctx_get) +#define prof_tctx_reset JEMALLOC_N(prof_tctx_reset) +#define prof_tctx_set JEMALLOC_N(prof_tctx_set) #define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup) +#define prof_tdata_count JEMALLOC_N(prof_tdata_count) #define prof_tdata_get JEMALLOC_N(prof_tdata_get) #define prof_tdata_init JEMALLOC_N(prof_tdata_init) -#define prof_tdata_initialized JEMALLOC_N(prof_tdata_initialized) -#define prof_tdata_tls JEMALLOC_N(prof_tdata_tls) -#define prof_tdata_tsd JEMALLOC_N(prof_tdata_tsd) -#define prof_tdata_tsd_boot JEMALLOC_N(prof_tdata_tsd_boot) -#define prof_tdata_tsd_cleanup_wrapper JEMALLOC_N(prof_tdata_tsd_cleanup_wrapper) -#define prof_tdata_tsd_get JEMALLOC_N(prof_tdata_tsd_get) -#define prof_tdata_tsd_get_wrapper JEMALLOC_N(prof_tdata_tsd_get_wrapper) -#define prof_tdata_tsd_init_head JEMALLOC_N(prof_tdata_tsd_init_head) -#define prof_tdata_tsd_set JEMALLOC_N(prof_tdata_tsd_set) +#define prof_tdata_reinit JEMALLOC_N(prof_tdata_reinit) +#define prof_thread_active_get JEMALLOC_N(prof_thread_active_get) +#define prof_thread_active_init_get JEMALLOC_N(prof_thread_active_init_get) +#define prof_thread_active_init_set JEMALLOC_N(prof_thread_active_init_set) +#define prof_thread_active_set JEMALLOC_N(prof_thread_active_set) +#define prof_thread_name_get JEMALLOC_N(prof_thread_name_get) +#define prof_thread_name_set JEMALLOC_N(prof_thread_name_set) +#define psz2ind JEMALLOC_N(psz2ind) +#define psz2u JEMALLOC_N(psz2u) +#define purge_mode_names JEMALLOC_N(purge_mode_names) #define quarantine JEMALLOC_N(quarantine) #define quarantine_alloc_hook JEMALLOC_N(quarantine_alloc_hook) -#define quarantine_boot JEMALLOC_N(quarantine_boot) -#define quarantine_booted JEMALLOC_N(quarantine_booted) +#define quarantine_alloc_hook_work JEMALLOC_N(quarantine_alloc_hook_work) #define quarantine_cleanup JEMALLOC_N(quarantine_cleanup) -#define quarantine_init JEMALLOC_N(quarantine_init) -#define quarantine_tls JEMALLOC_N(quarantine_tls) -#define quarantine_tsd JEMALLOC_N(quarantine_tsd) -#define quarantine_tsd_boot JEMALLOC_N(quarantine_tsd_boot) -#define quarantine_tsd_cleanup_wrapper JEMALLOC_N(quarantine_tsd_cleanup_wrapper) -#define quarantine_tsd_get JEMALLOC_N(quarantine_tsd_get) -#define quarantine_tsd_get_wrapper JEMALLOC_N(quarantine_tsd_get_wrapper) -#define quarantine_tsd_init_head JEMALLOC_N(quarantine_tsd_init_head) -#define quarantine_tsd_set JEMALLOC_N(quarantine_tsd_set) -#define register_zone JEMALLOC_N(register_zone) +#define rtree_child_read JEMALLOC_N(rtree_child_read) +#define rtree_child_read_hard JEMALLOC_N(rtree_child_read_hard) +#define rtree_child_tryread JEMALLOC_N(rtree_child_tryread) #define rtree_delete JEMALLOC_N(rtree_delete) #define rtree_get JEMALLOC_N(rtree_get) -#define rtree_get_locked JEMALLOC_N(rtree_get_locked) #define rtree_new JEMALLOC_N(rtree_new) -#define rtree_postfork_child JEMALLOC_N(rtree_postfork_child) -#define rtree_postfork_parent JEMALLOC_N(rtree_postfork_parent) -#define rtree_prefork JEMALLOC_N(rtree_prefork) +#define rtree_node_valid JEMALLOC_N(rtree_node_valid) #define rtree_set JEMALLOC_N(rtree_set) +#define rtree_start_level JEMALLOC_N(rtree_start_level) +#define rtree_subkey JEMALLOC_N(rtree_subkey) +#define rtree_subtree_read JEMALLOC_N(rtree_subtree_read) +#define rtree_subtree_read_hard JEMALLOC_N(rtree_subtree_read_hard) +#define rtree_subtree_tryread JEMALLOC_N(rtree_subtree_tryread) +#define rtree_val_read JEMALLOC_N(rtree_val_read) +#define rtree_val_write JEMALLOC_N(rtree_val_write) +#define run_quantize_ceil JEMALLOC_N(run_quantize_ceil) +#define run_quantize_floor JEMALLOC_N(run_quantize_floor) #define s2u JEMALLOC_N(s2u) +#define s2u_compute JEMALLOC_N(s2u_compute) +#define s2u_lookup JEMALLOC_N(s2u_lookup) #define sa2u JEMALLOC_N(sa2u) #define set_errno JEMALLOC_N(set_errno) -#define small_size2bin JEMALLOC_N(small_size2bin) +#define size2index JEMALLOC_N(size2index) +#define size2index_compute JEMALLOC_N(size2index_compute) +#define size2index_lookup JEMALLOC_N(size2index_lookup) +#define size2index_tab JEMALLOC_N(size2index_tab) +#define spin_adaptive JEMALLOC_N(spin_adaptive) +#define spin_init JEMALLOC_N(spin_init) #define stats_cactive JEMALLOC_N(stats_cactive) #define stats_cactive_add JEMALLOC_N(stats_cactive_add) #define stats_cactive_get JEMALLOC_N(stats_cactive_get) #define stats_cactive_sub JEMALLOC_N(stats_cactive_sub) -#define stats_chunks JEMALLOC_N(stats_chunks) #define stats_print JEMALLOC_N(stats_print) #define tcache_alloc_easy JEMALLOC_N(tcache_alloc_easy) #define tcache_alloc_large JEMALLOC_N(tcache_alloc_large) #define tcache_alloc_small JEMALLOC_N(tcache_alloc_small) #define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard) -#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate) -#define tcache_arena_dissociate JEMALLOC_N(tcache_arena_dissociate) +#define tcache_arena_reassociate JEMALLOC_N(tcache_arena_reassociate) #define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large) #define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small) #define tcache_bin_info JEMALLOC_N(tcache_bin_info) -#define tcache_boot0 JEMALLOC_N(tcache_boot0) -#define tcache_boot1 JEMALLOC_N(tcache_boot1) -#define tcache_booted JEMALLOC_N(tcache_booted) +#define tcache_boot JEMALLOC_N(tcache_boot) +#define tcache_cleanup JEMALLOC_N(tcache_cleanup) #define tcache_create JEMALLOC_N(tcache_create) #define tcache_dalloc_large JEMALLOC_N(tcache_dalloc_large) #define tcache_dalloc_small JEMALLOC_N(tcache_dalloc_small) -#define tcache_destroy JEMALLOC_N(tcache_destroy) -#define tcache_enabled_booted JEMALLOC_N(tcache_enabled_booted) +#define tcache_enabled_cleanup JEMALLOC_N(tcache_enabled_cleanup) #define tcache_enabled_get JEMALLOC_N(tcache_enabled_get) -#define tcache_enabled_initialized JEMALLOC_N(tcache_enabled_initialized) #define tcache_enabled_set JEMALLOC_N(tcache_enabled_set) -#define tcache_enabled_tls JEMALLOC_N(tcache_enabled_tls) -#define tcache_enabled_tsd JEMALLOC_N(tcache_enabled_tsd) -#define tcache_enabled_tsd_boot JEMALLOC_N(tcache_enabled_tsd_boot) -#define tcache_enabled_tsd_cleanup_wrapper JEMALLOC_N(tcache_enabled_tsd_cleanup_wrapper) -#define tcache_enabled_tsd_get JEMALLOC_N(tcache_enabled_tsd_get) -#define tcache_enabled_tsd_get_wrapper JEMALLOC_N(tcache_enabled_tsd_get_wrapper) -#define tcache_enabled_tsd_init_head JEMALLOC_N(tcache_enabled_tsd_init_head) -#define tcache_enabled_tsd_set JEMALLOC_N(tcache_enabled_tsd_set) #define tcache_event JEMALLOC_N(tcache_event) #define tcache_event_hard JEMALLOC_N(tcache_event_hard) #define tcache_flush JEMALLOC_N(tcache_flush) #define tcache_get JEMALLOC_N(tcache_get) -#define tcache_initialized JEMALLOC_N(tcache_initialized) +#define tcache_get_hard JEMALLOC_N(tcache_get_hard) #define tcache_maxclass JEMALLOC_N(tcache_maxclass) +#define tcache_postfork_child JEMALLOC_N(tcache_postfork_child) +#define tcache_postfork_parent JEMALLOC_N(tcache_postfork_parent) +#define tcache_prefork JEMALLOC_N(tcache_prefork) #define tcache_salloc JEMALLOC_N(tcache_salloc) #define tcache_stats_merge JEMALLOC_N(tcache_stats_merge) -#define tcache_thread_cleanup JEMALLOC_N(tcache_thread_cleanup) -#define tcache_tls JEMALLOC_N(tcache_tls) -#define tcache_tsd JEMALLOC_N(tcache_tsd) -#define tcache_tsd_boot JEMALLOC_N(tcache_tsd_boot) -#define tcache_tsd_cleanup_wrapper JEMALLOC_N(tcache_tsd_cleanup_wrapper) -#define tcache_tsd_get JEMALLOC_N(tcache_tsd_get) -#define tcache_tsd_get_wrapper JEMALLOC_N(tcache_tsd_get_wrapper) -#define tcache_tsd_init_head JEMALLOC_N(tcache_tsd_init_head) -#define tcache_tsd_set JEMALLOC_N(tcache_tsd_set) -#define thread_allocated_booted JEMALLOC_N(thread_allocated_booted) -#define thread_allocated_initialized JEMALLOC_N(thread_allocated_initialized) -#define thread_allocated_tls JEMALLOC_N(thread_allocated_tls) -#define thread_allocated_tsd JEMALLOC_N(thread_allocated_tsd) -#define thread_allocated_tsd_boot JEMALLOC_N(thread_allocated_tsd_boot) -#define thread_allocated_tsd_cleanup_wrapper JEMALLOC_N(thread_allocated_tsd_cleanup_wrapper) -#define thread_allocated_tsd_get JEMALLOC_N(thread_allocated_tsd_get) -#define thread_allocated_tsd_get_wrapper JEMALLOC_N(thread_allocated_tsd_get_wrapper) -#define thread_allocated_tsd_init_head JEMALLOC_N(thread_allocated_tsd_init_head) -#define thread_allocated_tsd_set JEMALLOC_N(thread_allocated_tsd_set) +#define tcaches JEMALLOC_N(tcaches) +#define tcaches_create JEMALLOC_N(tcaches_create) +#define tcaches_destroy JEMALLOC_N(tcaches_destroy) +#define tcaches_flush JEMALLOC_N(tcaches_flush) +#define tcaches_get JEMALLOC_N(tcaches_get) +#define thread_allocated_cleanup JEMALLOC_N(thread_allocated_cleanup) +#define thread_deallocated_cleanup JEMALLOC_N(thread_deallocated_cleanup) +#define ticker_copy JEMALLOC_N(ticker_copy) +#define ticker_init JEMALLOC_N(ticker_init) +#define ticker_read JEMALLOC_N(ticker_read) +#define ticker_tick JEMALLOC_N(ticker_tick) +#define ticker_ticks JEMALLOC_N(ticker_ticks) +#define tsd_arena_get JEMALLOC_N(tsd_arena_get) +#define tsd_arena_set JEMALLOC_N(tsd_arena_set) +#define tsd_arenap_get JEMALLOC_N(tsd_arenap_get) +#define tsd_arenas_tdata_bypass_get JEMALLOC_N(tsd_arenas_tdata_bypass_get) +#define tsd_arenas_tdata_bypass_set JEMALLOC_N(tsd_arenas_tdata_bypass_set) +#define tsd_arenas_tdata_bypassp_get JEMALLOC_N(tsd_arenas_tdata_bypassp_get) +#define tsd_arenas_tdata_get JEMALLOC_N(tsd_arenas_tdata_get) +#define tsd_arenas_tdata_set JEMALLOC_N(tsd_arenas_tdata_set) +#define tsd_arenas_tdatap_get JEMALLOC_N(tsd_arenas_tdatap_get) +#define tsd_boot JEMALLOC_N(tsd_boot) +#define tsd_boot0 JEMALLOC_N(tsd_boot0) +#define tsd_boot1 JEMALLOC_N(tsd_boot1) +#define tsd_booted JEMALLOC_N(tsd_booted) +#define tsd_booted_get JEMALLOC_N(tsd_booted_get) +#define tsd_cleanup JEMALLOC_N(tsd_cleanup) +#define tsd_cleanup_wrapper JEMALLOC_N(tsd_cleanup_wrapper) +#define tsd_fetch JEMALLOC_N(tsd_fetch) +#define tsd_fetch_impl JEMALLOC_N(tsd_fetch_impl) +#define tsd_get JEMALLOC_N(tsd_get) +#define tsd_get_allocates JEMALLOC_N(tsd_get_allocates) +#define tsd_iarena_get JEMALLOC_N(tsd_iarena_get) +#define tsd_iarena_set JEMALLOC_N(tsd_iarena_set) +#define tsd_iarenap_get JEMALLOC_N(tsd_iarenap_get) +#define tsd_initialized JEMALLOC_N(tsd_initialized) #define tsd_init_check_recursion JEMALLOC_N(tsd_init_check_recursion) #define tsd_init_finish JEMALLOC_N(tsd_init_finish) +#define tsd_init_head JEMALLOC_N(tsd_init_head) +#define tsd_narenas_tdata_get JEMALLOC_N(tsd_narenas_tdata_get) +#define tsd_narenas_tdata_set JEMALLOC_N(tsd_narenas_tdata_set) +#define tsd_narenas_tdatap_get JEMALLOC_N(tsd_narenas_tdatap_get) +#define tsd_wrapper_get JEMALLOC_N(tsd_wrapper_get) +#define tsd_wrapper_set JEMALLOC_N(tsd_wrapper_set) +#define tsd_nominal JEMALLOC_N(tsd_nominal) +#define tsd_prof_tdata_get JEMALLOC_N(tsd_prof_tdata_get) +#define tsd_prof_tdata_set JEMALLOC_N(tsd_prof_tdata_set) +#define tsd_prof_tdatap_get JEMALLOC_N(tsd_prof_tdatap_get) +#define tsd_quarantine_get JEMALLOC_N(tsd_quarantine_get) +#define tsd_quarantine_set JEMALLOC_N(tsd_quarantine_set) +#define tsd_quarantinep_get JEMALLOC_N(tsd_quarantinep_get) +#define tsd_set JEMALLOC_N(tsd_set) +#define tsd_tcache_enabled_get JEMALLOC_N(tsd_tcache_enabled_get) +#define tsd_tcache_enabled_set JEMALLOC_N(tsd_tcache_enabled_set) +#define tsd_tcache_enabledp_get JEMALLOC_N(tsd_tcache_enabledp_get) +#define tsd_tcache_get JEMALLOC_N(tsd_tcache_get) +#define tsd_tcache_set JEMALLOC_N(tsd_tcache_set) +#define tsd_tcachep_get JEMALLOC_N(tsd_tcachep_get) +#define tsd_thread_allocated_get JEMALLOC_N(tsd_thread_allocated_get) +#define tsd_thread_allocated_set JEMALLOC_N(tsd_thread_allocated_set) +#define tsd_thread_allocatedp_get JEMALLOC_N(tsd_thread_allocatedp_get) +#define tsd_thread_deallocated_get JEMALLOC_N(tsd_thread_deallocated_get) +#define tsd_thread_deallocated_set JEMALLOC_N(tsd_thread_deallocated_set) +#define tsd_thread_deallocatedp_get JEMALLOC_N(tsd_thread_deallocatedp_get) +#define tsd_tls JEMALLOC_N(tsd_tls) +#define tsd_tsd JEMALLOC_N(tsd_tsd) +#define tsd_tsdn JEMALLOC_N(tsd_tsdn) +#define tsd_witness_fork_get JEMALLOC_N(tsd_witness_fork_get) +#define tsd_witness_fork_set JEMALLOC_N(tsd_witness_fork_set) +#define tsd_witness_forkp_get JEMALLOC_N(tsd_witness_forkp_get) +#define tsd_witnesses_get JEMALLOC_N(tsd_witnesses_get) +#define tsd_witnesses_set JEMALLOC_N(tsd_witnesses_set) +#define tsd_witnessesp_get JEMALLOC_N(tsd_witnessesp_get) +#define tsdn_fetch JEMALLOC_N(tsdn_fetch) +#define tsdn_null JEMALLOC_N(tsdn_null) +#define tsdn_tsd JEMALLOC_N(tsdn_tsd) #define u2rz JEMALLOC_N(u2rz) +#define valgrind_freelike_block JEMALLOC_N(valgrind_freelike_block) +#define valgrind_make_mem_defined JEMALLOC_N(valgrind_make_mem_defined) +#define valgrind_make_mem_noaccess JEMALLOC_N(valgrind_make_mem_noaccess) +#define valgrind_make_mem_undefined JEMALLOC_N(valgrind_make_mem_undefined) +#define witness_assert_depth JEMALLOC_N(witness_assert_depth) +#define witness_assert_depth_to_rank JEMALLOC_N(witness_assert_depth_to_rank) +#define witness_assert_lockless JEMALLOC_N(witness_assert_lockless) +#define witness_assert_not_owner JEMALLOC_N(witness_assert_not_owner) +#define witness_assert_owner JEMALLOC_N(witness_assert_owner) +#define witness_depth_error JEMALLOC_N(witness_depth_error) +#define witness_fork_cleanup JEMALLOC_N(witness_fork_cleanup) +#define witness_init JEMALLOC_N(witness_init) +#define witness_lock JEMALLOC_N(witness_lock) +#define witness_lock_error JEMALLOC_N(witness_lock_error) +#define witness_not_owner_error JEMALLOC_N(witness_not_owner_error) +#define witness_owner JEMALLOC_N(witness_owner) +#define witness_owner_error JEMALLOC_N(witness_owner_error) +#define witness_postfork_child JEMALLOC_N(witness_postfork_child) +#define witness_postfork_parent JEMALLOC_N(witness_postfork_parent) +#define witness_prefork JEMALLOC_N(witness_prefork) +#define witness_unlock JEMALLOC_N(witness_unlock) +#define witnesses_cleanup JEMALLOC_N(witnesses_cleanup) +#define zone_register JEMALLOC_N(zone_register) diff --git a/dep/jemalloc/include/jemalloc/internal/prng.h b/dep/jemalloc/include/jemalloc/internal/prng.h index 7b2b06512ff..15cc2d18fa4 100644 --- a/dep/jemalloc/include/jemalloc/internal/prng.h +++ b/dep/jemalloc/include/jemalloc/internal/prng.h @@ -1,5 +1,8 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES +#ifndef JEMALLOC_INTERNAL_PRNG_H +#define JEMALLOC_INTERNAL_PRNG_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/bit_util.h" /* * Simple linear congruential pseudo-random number generator: @@ -15,46 +18,168 @@ * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. * * This choice of m has the disadvantage that the quality of the bits is - * proportional to bit position. For example. the lowest bit has a cycle of 2, + * proportional to bit position. For example, the lowest bit has a cycle of 2, * the next has a cycle of 4, etc. For this reason, we prefer to use the upper * bits. - * - * Macro parameters: - * uint32_t r : Result. - * unsigned lg_range : (0..32], number of least significant bits to return. - * uint32_t state : Seed value. - * const uint32_t a, c : See above discussion. */ -#define prng32(r, lg_range, state, a, c) do { \ - assert(lg_range > 0); \ - assert(lg_range <= 32); \ - \ - r = (state * (a)) + (c); \ - state = r; \ - r >>= (32 - lg_range); \ -} while (false) - -/* Same as prng32(), but 64 bits of pseudo-randomness, using uint64_t. */ -#define prng64(r, lg_range, state, a, c) do { \ - assert(lg_range > 0); \ - assert(lg_range <= 64); \ - \ - r = (state * (a)) + (c); \ - state = r; \ - r >>= (64 - lg_range); \ -} while (false) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS -#endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -#endif /* JEMALLOC_H_EXTERNS */ +/* INTERNAL DEFINITIONS -- IGNORE */ /******************************************************************************/ -#ifdef JEMALLOC_H_INLINES +#define PRNG_A_32 UINT32_C(1103515241) +#define PRNG_C_32 UINT32_C(12347) + +#define PRNG_A_64 UINT64_C(6364136223846793005) +#define PRNG_C_64 UINT64_C(1442695040888963407) + +JEMALLOC_ALWAYS_INLINE uint32_t +prng_state_next_u32(uint32_t state) { + return (state * PRNG_A_32) + PRNG_C_32; +} + +JEMALLOC_ALWAYS_INLINE uint64_t +prng_state_next_u64(uint64_t state) { + return (state * PRNG_A_64) + PRNG_C_64; +} + +JEMALLOC_ALWAYS_INLINE size_t +prng_state_next_zu(size_t state) { +#if LG_SIZEOF_PTR == 2 + return (state * PRNG_A_32) + PRNG_C_32; +#elif LG_SIZEOF_PTR == 3 + return (state * PRNG_A_64) + PRNG_C_64; +#else +#error Unsupported pointer size +#endif +} -#endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ +/* BEGIN PUBLIC API */ +/******************************************************************************/ + +/* + * The prng_lg_range functions give a uniform int in the half-open range [0, + * 2**lg_range). If atomic is true, they do so safely from multiple threads. + * Multithreaded 64-bit prngs aren't supported. + */ + +JEMALLOC_ALWAYS_INLINE uint32_t +prng_lg_range_u32(atomic_u32_t *state, unsigned lg_range, bool atomic) { + uint32_t ret, state0, state1; + + assert(lg_range > 0); + assert(lg_range <= 32); + + state0 = atomic_load_u32(state, ATOMIC_RELAXED); + + if (atomic) { + do { + state1 = prng_state_next_u32(state0); + } while (!atomic_compare_exchange_weak_u32(state, &state0, + state1, ATOMIC_RELAXED, ATOMIC_RELAXED)); + } else { + state1 = prng_state_next_u32(state0); + atomic_store_u32(state, state1, ATOMIC_RELAXED); + } + ret = state1 >> (32 - lg_range); + + return ret; +} + +JEMALLOC_ALWAYS_INLINE uint64_t +prng_lg_range_u64(uint64_t *state, unsigned lg_range) { + uint64_t ret, state1; + + assert(lg_range > 0); + assert(lg_range <= 64); + + state1 = prng_state_next_u64(*state); + *state = state1; + ret = state1 >> (64 - lg_range); + + return ret; +} + +JEMALLOC_ALWAYS_INLINE size_t +prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) { + size_t ret, state0, state1; + + assert(lg_range > 0); + assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR)); + + state0 = atomic_load_zu(state, ATOMIC_RELAXED); + + if (atomic) { + do { + state1 = prng_state_next_zu(state0); + } while (atomic_compare_exchange_weak_zu(state, &state0, + state1, ATOMIC_RELAXED, ATOMIC_RELAXED)); + } else { + state1 = prng_state_next_zu(state0); + atomic_store_zu(state, state1, ATOMIC_RELAXED); + } + ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range); + + return ret; +} + +/* + * The prng_range functions behave like the prng_lg_range, but return a result + * in [0, range) instead of [0, 2**lg_range). + */ + +JEMALLOC_ALWAYS_INLINE uint32_t +prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) { + uint32_t ret; + unsigned lg_range; + + assert(range > 1); + + /* Compute the ceiling of lg(range). */ + lg_range = ffs_u32(pow2_ceil_u32(range)) - 1; + + /* Generate a result in [0..range) via repeated trial. */ + do { + ret = prng_lg_range_u32(state, lg_range, atomic); + } while (ret >= range); + + return ret; +} + +JEMALLOC_ALWAYS_INLINE uint64_t +prng_range_u64(uint64_t *state, uint64_t range) { + uint64_t ret; + unsigned lg_range; + + assert(range > 1); + + /* Compute the ceiling of lg(range). */ + lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; + + /* Generate a result in [0..range) via repeated trial. */ + do { + ret = prng_lg_range_u64(state, lg_range); + } while (ret >= range); + + return ret; +} + +JEMALLOC_ALWAYS_INLINE size_t +prng_range_zu(atomic_zu_t *state, size_t range, bool atomic) { + size_t ret; + unsigned lg_range; + + assert(range > 1); + + /* Compute the ceiling of lg(range). */ + lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; + + /* Generate a result in [0..range) via repeated trial. */ + do { + ret = prng_lg_range_zu(state, lg_range, atomic); + } while (ret >= range); + + return ret; +} + +#endif /* JEMALLOC_INTERNAL_PRNG_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/prof.h b/dep/jemalloc/include/jemalloc/internal/prof.h deleted file mode 100644 index 6f162d21e84..00000000000 --- a/dep/jemalloc/include/jemalloc/internal/prof.h +++ /dev/null @@ -1,613 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct prof_bt_s prof_bt_t; -typedef struct prof_cnt_s prof_cnt_t; -typedef struct prof_thr_cnt_s prof_thr_cnt_t; -typedef struct prof_ctx_s prof_ctx_t; -typedef struct prof_tdata_s prof_tdata_t; - -/* Option defaults. */ -#ifdef JEMALLOC_PROF -# define PROF_PREFIX_DEFAULT "jeprof" -#else -# define PROF_PREFIX_DEFAULT "" -#endif -#define LG_PROF_SAMPLE_DEFAULT 19 -#define LG_PROF_INTERVAL_DEFAULT -1 - -/* - * Hard limit on stack backtrace depth. The version of prof_backtrace() that - * is based on __builtin_return_address() necessarily has a hard-coded number - * of backtrace frame handlers, and should be kept in sync with this setting. - */ -#define PROF_BT_MAX 128 - -/* Maximum number of backtraces to store in each per thread LRU cache. */ -#define PROF_TCMAX 1024 - -/* Initial hash table size. */ -#define PROF_CKH_MINITEMS 64 - -/* Size of memory buffer to use when writing dump files. */ -#define PROF_DUMP_BUFSIZE 65536 - -/* Size of stack-allocated buffer used by prof_printf(). */ -#define PROF_PRINTF_BUFSIZE 128 - -/* - * Number of mutexes shared among all ctx's. No space is allocated for these - * unless profiling is enabled, so it's okay to over-provision. - */ -#define PROF_NCTX_LOCKS 1024 - -/* - * prof_tdata pointers close to NULL are used to encode state information that - * is used for cleaning up during thread shutdown. - */ -#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1) -#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2) -#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct prof_bt_s { - /* Backtrace, stored as len program counters. */ - void **vec; - unsigned len; -}; - -#ifdef JEMALLOC_PROF_LIBGCC -/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ -typedef struct { - prof_bt_t *bt; - unsigned nignore; - unsigned max; -} prof_unwind_data_t; -#endif - -struct prof_cnt_s { - /* - * Profiling counters. An allocation/deallocation pair can operate on - * different prof_thr_cnt_t objects that are linked into the same - * prof_ctx_t cnts_ql, so it is possible for the cur* counters to go - * negative. In principle it is possible for the *bytes counters to - * overflow/underflow, but a general solution would require something - * like 128-bit counters; this implementation doesn't bother to solve - * that problem. - */ - int64_t curobjs; - int64_t curbytes; - uint64_t accumobjs; - uint64_t accumbytes; -}; - -struct prof_thr_cnt_s { - /* Linkage into prof_ctx_t's cnts_ql. */ - ql_elm(prof_thr_cnt_t) cnts_link; - - /* Linkage into thread's LRU. */ - ql_elm(prof_thr_cnt_t) lru_link; - - /* - * Associated context. If a thread frees an object that it did not - * allocate, it is possible that the context is not cached in the - * thread's hash table, in which case it must be able to look up the - * context, insert a new prof_thr_cnt_t into the thread's hash table, - * and link it into the prof_ctx_t's cnts_ql. - */ - prof_ctx_t *ctx; - - /* - * Threads use memory barriers to update the counters. Since there is - * only ever one writer, the only challenge is for the reader to get a - * consistent read of the counters. - * - * The writer uses this series of operations: - * - * 1) Increment epoch to an odd number. - * 2) Update counters. - * 3) Increment epoch to an even number. - * - * The reader must assure 1) that the epoch is even while it reads the - * counters, and 2) that the epoch doesn't change between the time it - * starts and finishes reading the counters. - */ - unsigned epoch; - - /* Profiling counters. */ - prof_cnt_t cnts; -}; - -struct prof_ctx_s { - /* Associated backtrace. */ - prof_bt_t *bt; - - /* Protects nlimbo, cnt_merged, and cnts_ql. */ - malloc_mutex_t *lock; - - /* - * Number of threads that currently cause this ctx to be in a state of - * limbo due to one of: - * - Initializing per thread counters associated with this ctx. - * - Preparing to destroy this ctx. - * - Dumping a heap profile that includes this ctx. - * nlimbo must be 1 (single destroyer) in order to safely destroy the - * ctx. - */ - unsigned nlimbo; - - /* Temporary storage for summation during dump. */ - prof_cnt_t cnt_summed; - - /* When threads exit, they merge their stats into cnt_merged. */ - prof_cnt_t cnt_merged; - - /* - * List of profile counters, one for each thread that has allocated in - * this context. - */ - ql_head(prof_thr_cnt_t) cnts_ql; - - /* Linkage for list of contexts to be dumped. */ - ql_elm(prof_ctx_t) dump_link; -}; -typedef ql_head(prof_ctx_t) prof_ctx_list_t; - -struct prof_tdata_s { - /* - * Hash of (prof_bt_t *)-->(prof_thr_cnt_t *). Each thread keeps a - * cache of backtraces, with associated thread-specific prof_thr_cnt_t - * objects. Other threads may read the prof_thr_cnt_t contents, but no - * others will ever write them. - * - * Upon thread exit, the thread must merge all the prof_thr_cnt_t - * counter data into the associated prof_ctx_t objects, and unlink/free - * the prof_thr_cnt_t objects. - */ - ckh_t bt2cnt; - - /* LRU for contents of bt2cnt. */ - ql_head(prof_thr_cnt_t) lru_ql; - - /* Backtrace vector, used for calls to prof_backtrace(). */ - void **vec; - - /* Sampling state. */ - uint64_t prng_state; - uint64_t threshold; - uint64_t accum; - - /* State used to avoid dumping while operating on prof internals. */ - bool enq; - bool enq_idump; - bool enq_gdump; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern bool opt_prof; -/* - * Even if opt_prof is true, sampling can be temporarily disabled by setting - * opt_prof_active to false. No locking is used when updating opt_prof_active, - * so there are no guarantees regarding how long it will take for all threads - * to notice state changes. - */ -extern bool opt_prof_active; -extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ -extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ -extern bool opt_prof_gdump; /* High-water memory dumping. */ -extern bool opt_prof_final; /* Final profile dumping. */ -extern bool opt_prof_leak; /* Dump leak summary at exit. */ -extern bool opt_prof_accum; /* Report cumulative bytes. */ -extern char opt_prof_prefix[ - /* Minimize memory bloat for non-prof builds. */ -#ifdef JEMALLOC_PROF - PATH_MAX + -#endif - 1]; - -/* - * Profile dump interval, measured in bytes allocated. Each arena triggers a - * profile dump when it reaches this threshold. The effect is that the - * interval between profile dumps averages prof_interval, though the actual - * interval between dumps will tend to be sporadic, and the interval will be a - * maximum of approximately (prof_interval * narenas). - */ -extern uint64_t prof_interval; - -/* - * If true, promote small sampled objects to large objects, since small run - * headers do not have embedded profile context pointers. - */ -extern bool prof_promote; - -void bt_init(prof_bt_t *bt, void **vec); -void prof_backtrace(prof_bt_t *bt, unsigned nignore); -prof_thr_cnt_t *prof_lookup(prof_bt_t *bt); -#ifdef JEMALLOC_JET -size_t prof_bt_count(void); -typedef int (prof_dump_open_t)(bool, const char *); -extern prof_dump_open_t *prof_dump_open; -#endif -void prof_idump(void); -bool prof_mdump(const char *filename); -void prof_gdump(void); -prof_tdata_t *prof_tdata_init(void); -void prof_tdata_cleanup(void *arg); -void prof_boot0(void); -void prof_boot1(void); -bool prof_boot2(void); -void prof_prefork(void); -void prof_postfork_parent(void); -void prof_postfork_child(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#define PROF_ALLOC_PREP(nignore, size, ret) do { \ - prof_tdata_t *prof_tdata; \ - prof_bt_t bt; \ - \ - assert(size == s2u(size)); \ - \ - prof_tdata = prof_tdata_get(true); \ - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \ - if (prof_tdata != NULL) \ - ret = (prof_thr_cnt_t *)(uintptr_t)1U; \ - else \ - ret = NULL; \ - break; \ - } \ - \ - if (opt_prof_active == false) { \ - /* Sampling is currently inactive, so avoid sampling. */\ - ret = (prof_thr_cnt_t *)(uintptr_t)1U; \ - } else if (opt_lg_prof_sample == 0) { \ - /* Don't bother with sampling logic, since sampling */\ - /* interval is 1. */\ - bt_init(&bt, prof_tdata->vec); \ - prof_backtrace(&bt, nignore); \ - ret = prof_lookup(&bt); \ - } else { \ - if (prof_tdata->threshold == 0) { \ - /* Initialize. Seed the prng differently for */\ - /* each thread. */\ - prof_tdata->prng_state = \ - (uint64_t)(uintptr_t)&size; \ - prof_sample_threshold_update(prof_tdata); \ - } \ - \ - /* Determine whether to capture a backtrace based on */\ - /* whether size is enough for prof_accum to reach */\ - /* prof_tdata->threshold. However, delay updating */\ - /* these variables until prof_{m,re}alloc(), because */\ - /* we don't know for sure that the allocation will */\ - /* succeed. */\ - /* */\ - /* Use subtraction rather than addition to avoid */\ - /* potential integer overflow. */\ - if (size >= prof_tdata->threshold - \ - prof_tdata->accum) { \ - bt_init(&bt, prof_tdata->vec); \ - prof_backtrace(&bt, nignore); \ - ret = prof_lookup(&bt); \ - } else \ - ret = (prof_thr_cnt_t *)(uintptr_t)1U; \ - } \ -} while (0) - -#ifndef JEMALLOC_ENABLE_INLINE -malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *) - -prof_tdata_t *prof_tdata_get(bool create); -void prof_sample_threshold_update(prof_tdata_t *prof_tdata); -prof_ctx_t *prof_ctx_get(const void *ptr); -void prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx); -bool prof_sample_accum_update(size_t size); -void prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt); -void prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt, - size_t old_usize, prof_ctx_t *old_ctx); -void prof_free(const void *ptr, size_t size); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_)) -/* Thread-specific backtrace cache, used to reduce bt2ctx contention. */ -malloc_tsd_externs(prof_tdata, prof_tdata_t *) -malloc_tsd_funcs(JEMALLOC_INLINE, prof_tdata, prof_tdata_t *, NULL, - prof_tdata_cleanup) - -JEMALLOC_INLINE prof_tdata_t * -prof_tdata_get(bool create) -{ - prof_tdata_t *prof_tdata; - - cassert(config_prof); - - prof_tdata = *prof_tdata_tsd_get(); - if (create && prof_tdata == NULL) - prof_tdata = prof_tdata_init(); - - return (prof_tdata); -} - -JEMALLOC_INLINE void -prof_sample_threshold_update(prof_tdata_t *prof_tdata) -{ - /* - * The body of this function is compiled out unless heap profiling is - * enabled, so that it is possible to compile jemalloc with floating - * point support completely disabled. Avoiding floating point code is - * important on memory-constrained systems, but it also enables a - * workaround for versions of glibc that don't properly save/restore - * floating point registers during dynamic lazy symbol loading (which - * internally calls into whatever malloc implementation happens to be - * integrated into the application). Note that some compilers (e.g. - * gcc 4.8) may use floating point registers for fast memory moves, so - * jemalloc must be compiled with such optimizations disabled (e.g. - * -mno-sse) in order for the workaround to be complete. - */ -#ifdef JEMALLOC_PROF - uint64_t r; - double u; - - cassert(config_prof); - - /* - * Compute sample threshold as a geometrically distributed random - * variable with mean (2^opt_lg_prof_sample). - * - * __ __ - * | log(u) | 1 - * prof_tdata->threshold = | -------- |, where p = ------------------- - * | log(1-p) | opt_lg_prof_sample - * 2 - * - * For more information on the math, see: - * - * Non-Uniform Random Variate Generation - * Luc Devroye - * Springer-Verlag, New York, 1986 - * pp 500 - * (http://luc.devroye.org/rnbookindex.html) - */ - prng64(r, 53, prof_tdata->prng_state, - UINT64_C(6364136223846793005), UINT64_C(1442695040888963407)); - u = (double)r * (1.0/9007199254740992.0L); - prof_tdata->threshold = (uint64_t)(log(u) / - log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample)))) - + (uint64_t)1U; -#endif -} - -JEMALLOC_INLINE prof_ctx_t * -prof_ctx_get(const void *ptr) -{ - prof_ctx_t *ret; - arena_chunk_t *chunk; - - cassert(config_prof); - assert(ptr != NULL); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk != ptr) { - /* Region. */ - ret = arena_prof_ctx_get(ptr); - } else - ret = huge_prof_ctx_get(ptr); - - return (ret); -} - -JEMALLOC_INLINE void -prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx) -{ - arena_chunk_t *chunk; - - cassert(config_prof); - assert(ptr != NULL); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk != ptr) { - /* Region. */ - arena_prof_ctx_set(ptr, usize, ctx); - } else - huge_prof_ctx_set(ptr, ctx); -} - -JEMALLOC_INLINE bool -prof_sample_accum_update(size_t size) -{ - prof_tdata_t *prof_tdata; - - cassert(config_prof); - /* Sampling logic is unnecessary if the interval is 1. */ - assert(opt_lg_prof_sample != 0); - - prof_tdata = prof_tdata_get(false); - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) - return (true); - - /* Take care to avoid integer overflow. */ - if (size >= prof_tdata->threshold - prof_tdata->accum) { - prof_tdata->accum -= (prof_tdata->threshold - size); - /* Compute new sample threshold. */ - prof_sample_threshold_update(prof_tdata); - while (prof_tdata->accum >= prof_tdata->threshold) { - prof_tdata->accum -= prof_tdata->threshold; - prof_sample_threshold_update(prof_tdata); - } - return (false); - } else { - prof_tdata->accum += size; - return (true); - } -} - -JEMALLOC_INLINE void -prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt) -{ - - cassert(config_prof); - assert(ptr != NULL); - assert(usize == isalloc(ptr, true)); - - if (opt_lg_prof_sample != 0) { - if (prof_sample_accum_update(usize)) { - /* - * Don't sample. For malloc()-like allocation, it is - * always possible to tell in advance how large an - * object's usable size will be, so there should never - * be a difference between the usize passed to - * PROF_ALLOC_PREP() and prof_malloc(). - */ - assert((uintptr_t)cnt == (uintptr_t)1U); - } - } - - if ((uintptr_t)cnt > (uintptr_t)1U) { - prof_ctx_set(ptr, usize, cnt->ctx); - - cnt->epoch++; - /*********/ - mb_write(); - /*********/ - cnt->cnts.curobjs++; - cnt->cnts.curbytes += usize; - if (opt_prof_accum) { - cnt->cnts.accumobjs++; - cnt->cnts.accumbytes += usize; - } - /*********/ - mb_write(); - /*********/ - cnt->epoch++; - /*********/ - mb_write(); - /*********/ - } else - prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U); -} - -JEMALLOC_INLINE void -prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt, - size_t old_usize, prof_ctx_t *old_ctx) -{ - prof_thr_cnt_t *told_cnt; - - cassert(config_prof); - assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U); - - if (ptr != NULL) { - assert(usize == isalloc(ptr, true)); - if (opt_lg_prof_sample != 0) { - if (prof_sample_accum_update(usize)) { - /* - * Don't sample. The usize passed to - * PROF_ALLOC_PREP() was larger than what - * actually got allocated, so a backtrace was - * captured for this allocation, even though - * its actual usize was insufficient to cross - * the sample threshold. - */ - cnt = (prof_thr_cnt_t *)(uintptr_t)1U; - } - } - } - - if ((uintptr_t)old_ctx > (uintptr_t)1U) { - told_cnt = prof_lookup(old_ctx->bt); - if (told_cnt == NULL) { - /* - * It's too late to propagate OOM for this realloc(), - * so operate directly on old_cnt->ctx->cnt_merged. - */ - malloc_mutex_lock(old_ctx->lock); - old_ctx->cnt_merged.curobjs--; - old_ctx->cnt_merged.curbytes -= old_usize; - malloc_mutex_unlock(old_ctx->lock); - told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U; - } - } else - told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U; - - if ((uintptr_t)told_cnt > (uintptr_t)1U) - told_cnt->epoch++; - if ((uintptr_t)cnt > (uintptr_t)1U) { - prof_ctx_set(ptr, usize, cnt->ctx); - cnt->epoch++; - } else if (ptr != NULL) - prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U); - /*********/ - mb_write(); - /*********/ - if ((uintptr_t)told_cnt > (uintptr_t)1U) { - told_cnt->cnts.curobjs--; - told_cnt->cnts.curbytes -= old_usize; - } - if ((uintptr_t)cnt > (uintptr_t)1U) { - cnt->cnts.curobjs++; - cnt->cnts.curbytes += usize; - if (opt_prof_accum) { - cnt->cnts.accumobjs++; - cnt->cnts.accumbytes += usize; - } - } - /*********/ - mb_write(); - /*********/ - if ((uintptr_t)told_cnt > (uintptr_t)1U) - told_cnt->epoch++; - if ((uintptr_t)cnt > (uintptr_t)1U) - cnt->epoch++; - /*********/ - mb_write(); /* Not strictly necessary. */ -} - -JEMALLOC_INLINE void -prof_free(const void *ptr, size_t size) -{ - prof_ctx_t *ctx = prof_ctx_get(ptr); - - cassert(config_prof); - - if ((uintptr_t)ctx > (uintptr_t)1) { - prof_thr_cnt_t *tcnt; - assert(size == isalloc(ptr, true)); - tcnt = prof_lookup(ctx->bt); - - if (tcnt != NULL) { - tcnt->epoch++; - /*********/ - mb_write(); - /*********/ - tcnt->cnts.curobjs--; - tcnt->cnts.curbytes -= size; - /*********/ - mb_write(); - /*********/ - tcnt->epoch++; - /*********/ - mb_write(); - /*********/ - } else { - /* - * OOM during free() cannot be propagated, so operate - * directly on cnt->ctx->cnt_merged. - */ - malloc_mutex_lock(ctx->lock); - ctx->cnt_merged.curobjs--; - ctx->cnt_merged.curbytes -= size; - malloc_mutex_unlock(ctx->lock); - } - } -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/dep/jemalloc/include/jemalloc/internal/prof_externs.h b/dep/jemalloc/include/jemalloc/internal/prof_externs.h new file mode 100644 index 00000000000..04348696f58 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/prof_externs.h @@ -0,0 +1,92 @@ +#ifndef JEMALLOC_INTERNAL_PROF_EXTERNS_H +#define JEMALLOC_INTERNAL_PROF_EXTERNS_H + +#include "jemalloc/internal/mutex.h" + +extern malloc_mutex_t bt2gctx_mtx; + +extern bool opt_prof; +extern bool opt_prof_active; +extern bool opt_prof_thread_active_init; +extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ +extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ +extern bool opt_prof_gdump; /* High-water memory dumping. */ +extern bool opt_prof_final; /* Final profile dumping. */ +extern bool opt_prof_leak; /* Dump leak summary at exit. */ +extern bool opt_prof_accum; /* Report cumulative bytes. */ +extern char opt_prof_prefix[ + /* Minimize memory bloat for non-prof builds. */ +#ifdef JEMALLOC_PROF + PATH_MAX + +#endif + 1]; + +/* Accessed via prof_active_[gs]et{_unlocked,}(). */ +extern bool prof_active; + +/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */ +extern bool prof_gdump_val; + +/* + * Profile dump interval, measured in bytes allocated. Each arena triggers a + * profile dump when it reaches this threshold. The effect is that the + * interval between profile dumps averages prof_interval, though the actual + * interval between dumps will tend to be sporadic, and the interval will be a + * maximum of approximately (prof_interval * narenas). + */ +extern uint64_t prof_interval; + +/* + * Initialized as opt_lg_prof_sample, and potentially modified during profiling + * resets. + */ +extern size_t lg_prof_sample; + +void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated); +void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx); +void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx); +void bt_init(prof_bt_t *bt, void **vec); +void prof_backtrace(prof_bt_t *bt); +prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt); +#ifdef JEMALLOC_JET +size_t prof_tdata_count(void); +size_t prof_bt_count(void); +#endif +typedef int (prof_dump_open_t)(bool, const char *); +extern prof_dump_open_t *JET_MUTABLE prof_dump_open; + +typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *); +extern prof_dump_header_t *JET_MUTABLE prof_dump_header; +#ifdef JEMALLOC_JET +void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, + uint64_t *accumbytes); +#endif +bool prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum); +void prof_idump(tsdn_t *tsdn); +bool prof_mdump(tsd_t *tsd, const char *filename); +void prof_gdump(tsdn_t *tsdn); +prof_tdata_t *prof_tdata_init(tsd_t *tsd); +prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); +void prof_reset(tsd_t *tsd, size_t lg_sample); +void prof_tdata_cleanup(tsd_t *tsd); +bool prof_active_get(tsdn_t *tsdn); +bool prof_active_set(tsdn_t *tsdn, bool active); +const char *prof_thread_name_get(tsd_t *tsd); +int prof_thread_name_set(tsd_t *tsd, const char *thread_name); +bool prof_thread_active_get(tsd_t *tsd); +bool prof_thread_active_set(tsd_t *tsd, bool active); +bool prof_thread_active_init_get(tsdn_t *tsdn); +bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init); +bool prof_gdump_get(tsdn_t *tsdn); +bool prof_gdump_set(tsdn_t *tsdn, bool active); +void prof_boot0(void); +void prof_boot1(void); +bool prof_boot2(tsd_t *tsd); +void prof_prefork0(tsdn_t *tsdn); +void prof_prefork1(tsdn_t *tsdn); +void prof_postfork_parent(tsdn_t *tsdn); +void prof_postfork_child(tsdn_t *tsdn); +void prof_sample_threshold_update(prof_tdata_t *tdata); + +#endif /* JEMALLOC_INTERNAL_PROF_EXTERNS_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/prof_inlines_a.h b/dep/jemalloc/include/jemalloc/internal/prof_inlines_a.h new file mode 100644 index 00000000000..eda6839ade4 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/prof_inlines_a.h @@ -0,0 +1,72 @@ +#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H +#define JEMALLOC_INTERNAL_PROF_INLINES_A_H + +#include "jemalloc/internal/mutex.h" + +static inline bool +prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum, uint64_t accumbytes) { + cassert(config_prof); + + bool overflow; + uint64_t a0, a1; + + /* + * If the application allocates fast enough (and/or if idump is slow + * enough), extreme overflow here (a1 >= prof_interval * 2) can cause + * idump trigger coalescing. This is an intentional mechanism that + * avoids rate-limiting allocation. + */ +#ifdef JEMALLOC_ATOMIC_U64 + a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED); + do { + a1 = a0 + accumbytes; + assert(a1 >= a0); + overflow = (a1 >= prof_interval); + if (overflow) { + a1 %= prof_interval; + } + } while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0, + a1, ATOMIC_RELAXED, ATOMIC_RELAXED)); +#else + malloc_mutex_lock(tsdn, &prof_accum->mtx); + a0 = prof_accum->accumbytes; + a1 = a0 + accumbytes; + overflow = (a1 >= prof_interval); + if (overflow) { + a1 %= prof_interval; + } + prof_accum->accumbytes = a1; + malloc_mutex_unlock(tsdn, &prof_accum->mtx); +#endif + return overflow; +} + +static inline void +prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum, size_t usize) { + cassert(config_prof); + + /* + * Cancel out as much of the excessive prof_accumbytes increase as + * possible without underflowing. Interval-triggered dumps occur + * slightly more often than intended as a result of incomplete + * canceling. + */ + uint64_t a0, a1; +#ifdef JEMALLOC_ATOMIC_U64 + a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED); + do { + a1 = (a0 >= LARGE_MINCLASS - usize) ? a0 - (LARGE_MINCLASS - + usize) : 0; + } while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0, + a1, ATOMIC_RELAXED, ATOMIC_RELAXED)); +#else + malloc_mutex_lock(tsdn, &prof_accum->mtx); + a0 = prof_accum->accumbytes; + a1 = (a0 >= LARGE_MINCLASS - usize) ? a0 - (LARGE_MINCLASS - usize) : + 0; + prof_accum->accumbytes = a1; + malloc_mutex_unlock(tsdn, &prof_accum->mtx); +#endif +} + +#endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/prof_inlines_b.h b/dep/jemalloc/include/jemalloc/internal/prof_inlines_b.h new file mode 100644 index 00000000000..d670cb7b8f8 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/prof_inlines_b.h @@ -0,0 +1,217 @@ +#ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H +#define JEMALLOC_INTERNAL_PROF_INLINES_B_H + +#include "jemalloc/internal/sz.h" + +JEMALLOC_ALWAYS_INLINE bool +prof_active_get_unlocked(void) { + /* + * Even if opt_prof is true, sampling can be temporarily disabled by + * setting prof_active to false. No locking is used when reading + * prof_active in the fast path, so there are no guarantees regarding + * how long it will take for all threads to notice state changes. + */ + return prof_active; +} + +JEMALLOC_ALWAYS_INLINE bool +prof_gdump_get_unlocked(void) { + /* + * No locking is used when reading prof_gdump_val in the fast path, so + * there are no guarantees regarding how long it will take for all + * threads to notice state changes. + */ + return prof_gdump_val; +} + +JEMALLOC_ALWAYS_INLINE prof_tdata_t * +prof_tdata_get(tsd_t *tsd, bool create) { + prof_tdata_t *tdata; + + cassert(config_prof); + + tdata = tsd_prof_tdata_get(tsd); + if (create) { + if (unlikely(tdata == NULL)) { + if (tsd_nominal(tsd)) { + tdata = prof_tdata_init(tsd); + tsd_prof_tdata_set(tsd, tdata); + } + } else if (unlikely(tdata->expired)) { + tdata = prof_tdata_reinit(tsd, tdata); + tsd_prof_tdata_set(tsd, tdata); + } + assert(tdata == NULL || tdata->attached); + } + + return tdata; +} + +JEMALLOC_ALWAYS_INLINE prof_tctx_t * +prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) { + cassert(config_prof); + assert(ptr != NULL); + + return arena_prof_tctx_get(tsdn, ptr, alloc_ctx); +} + +JEMALLOC_ALWAYS_INLINE void +prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, + alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) { + cassert(config_prof); + assert(ptr != NULL); + + arena_prof_tctx_set(tsdn, ptr, usize, alloc_ctx, tctx); +} + +JEMALLOC_ALWAYS_INLINE void +prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) { + cassert(config_prof); + assert(ptr != NULL); + + arena_prof_tctx_reset(tsdn, ptr, tctx); +} + +JEMALLOC_ALWAYS_INLINE bool +prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update, + prof_tdata_t **tdata_out) { + prof_tdata_t *tdata; + + cassert(config_prof); + + tdata = prof_tdata_get(tsd, true); + if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) { + tdata = NULL; + } + + if (tdata_out != NULL) { + *tdata_out = tdata; + } + + if (unlikely(tdata == NULL)) { + return true; + } + + if (likely(tdata->bytes_until_sample >= usize)) { + if (update) { + tdata->bytes_until_sample -= usize; + } + return true; + } else { + if (tsd_reentrancy_level_get(tsd) > 0) { + return true; + } + /* Compute new sample threshold. */ + if (update) { + prof_sample_threshold_update(tdata); + } + return !tdata->active; + } +} + +JEMALLOC_ALWAYS_INLINE prof_tctx_t * +prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) { + prof_tctx_t *ret; + prof_tdata_t *tdata; + prof_bt_t bt; + + assert(usize == sz_s2u(usize)); + + if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update, + &tdata))) { + ret = (prof_tctx_t *)(uintptr_t)1U; + } else { + bt_init(&bt, tdata->vec); + prof_backtrace(&bt); + ret = prof_lookup(tsd, &bt); + } + + return ret; +} + +JEMALLOC_ALWAYS_INLINE void +prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx, + prof_tctx_t *tctx) { + cassert(config_prof); + assert(ptr != NULL); + assert(usize == isalloc(tsdn, ptr)); + + if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) { + prof_malloc_sample_object(tsdn, ptr, usize, tctx); + } else { + prof_tctx_set(tsdn, ptr, usize, alloc_ctx, + (prof_tctx_t *)(uintptr_t)1U); + } +} + +JEMALLOC_ALWAYS_INLINE void +prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, + bool prof_active, bool updated, const void *old_ptr, size_t old_usize, + prof_tctx_t *old_tctx) { + bool sampled, old_sampled, moved; + + cassert(config_prof); + assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U); + + if (prof_active && !updated && ptr != NULL) { + assert(usize == isalloc(tsd_tsdn(tsd), ptr)); + if (prof_sample_accum_update(tsd, usize, true, NULL)) { + /* + * Don't sample. The usize passed to prof_alloc_prep() + * was larger than what actually got allocated, so a + * backtrace was captured for this allocation, even + * though its actual usize was insufficient to cross the + * sample threshold. + */ + prof_alloc_rollback(tsd, tctx, true); + tctx = (prof_tctx_t *)(uintptr_t)1U; + } + } + + sampled = ((uintptr_t)tctx > (uintptr_t)1U); + old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U); + moved = (ptr != old_ptr); + + if (unlikely(sampled)) { + prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx); + } else if (moved) { + prof_tctx_set(tsd_tsdn(tsd), ptr, usize, NULL, + (prof_tctx_t *)(uintptr_t)1U); + } else if (unlikely(old_sampled)) { + /* + * prof_tctx_set() would work for the !moved case as well, but + * prof_tctx_reset() is slightly cheaper, and the proper thing + * to do here in the presence of explicit knowledge re: moved + * state. + */ + prof_tctx_reset(tsd_tsdn(tsd), ptr, tctx); + } else { + assert((uintptr_t)prof_tctx_get(tsd_tsdn(tsd), ptr, NULL) == + (uintptr_t)1U); + } + + /* + * The prof_free_sampled_object() call must come after the + * prof_malloc_sample_object() call, because tctx and old_tctx may be + * the same, in which case reversing the call order could cause the tctx + * to be prematurely destroyed as a side effect of momentarily zeroed + * counters. + */ + if (unlikely(old_sampled)) { + prof_free_sampled_object(tsd, old_usize, old_tctx); + } +} + +JEMALLOC_ALWAYS_INLINE void +prof_free(tsd_t *tsd, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx) { + prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx); + + cassert(config_prof); + assert(usize == isalloc(tsd_tsdn(tsd), ptr)); + + if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) { + prof_free_sampled_object(tsd, usize, tctx); + } +} + +#endif /* JEMALLOC_INTERNAL_PROF_INLINES_B_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/prof_structs.h b/dep/jemalloc/include/jemalloc/internal/prof_structs.h new file mode 100644 index 00000000000..0d58ae1005b --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/prof_structs.h @@ -0,0 +1,201 @@ +#ifndef JEMALLOC_INTERNAL_PROF_STRUCTS_H +#define JEMALLOC_INTERNAL_PROF_STRUCTS_H + +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/rb.h" + +struct prof_bt_s { + /* Backtrace, stored as len program counters. */ + void **vec; + unsigned len; +}; + +#ifdef JEMALLOC_PROF_LIBGCC +/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ +typedef struct { + prof_bt_t *bt; + unsigned max; +} prof_unwind_data_t; +#endif + +struct prof_accum_s { +#ifndef JEMALLOC_ATOMIC_U64 + malloc_mutex_t mtx; + uint64_t accumbytes; +#else + atomic_u64_t accumbytes; +#endif +}; + +struct prof_cnt_s { + /* Profiling counters. */ + uint64_t curobjs; + uint64_t curbytes; + uint64_t accumobjs; + uint64_t accumbytes; +}; + +typedef enum { + prof_tctx_state_initializing, + prof_tctx_state_nominal, + prof_tctx_state_dumping, + prof_tctx_state_purgatory /* Dumper must finish destroying. */ +} prof_tctx_state_t; + +struct prof_tctx_s { + /* Thread data for thread that performed the allocation. */ + prof_tdata_t *tdata; + + /* + * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be + * defunct during teardown. + */ + uint64_t thr_uid; + uint64_t thr_discrim; + + /* Profiling counters, protected by tdata->lock. */ + prof_cnt_t cnts; + + /* Associated global context. */ + prof_gctx_t *gctx; + + /* + * UID that distinguishes multiple tctx's created by the same thread, + * but coexisting in gctx->tctxs. There are two ways that such + * coexistence can occur: + * - A dumper thread can cause a tctx to be retained in the purgatory + * state. + * - Although a single "producer" thread must create all tctx's which + * share the same thr_uid, multiple "consumers" can each concurrently + * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only + * gets called once each time cnts.cur{objs,bytes} drop to 0, but this + * threshold can be hit again before the first consumer finishes + * executing prof_tctx_destroy(). + */ + uint64_t tctx_uid; + + /* Linkage into gctx's tctxs. */ + rb_node(prof_tctx_t) tctx_link; + + /* + * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents + * sample vs destroy race. + */ + bool prepared; + + /* Current dump-related state, protected by gctx->lock. */ + prof_tctx_state_t state; + + /* + * Copy of cnts snapshotted during early dump phase, protected by + * dump_mtx. + */ + prof_cnt_t dump_cnts; +}; +typedef rb_tree(prof_tctx_t) prof_tctx_tree_t; + +struct prof_gctx_s { + /* Protects nlimbo, cnt_summed, and tctxs. */ + malloc_mutex_t *lock; + + /* + * Number of threads that currently cause this gctx to be in a state of + * limbo due to one of: + * - Initializing this gctx. + * - Initializing per thread counters associated with this gctx. + * - Preparing to destroy this gctx. + * - Dumping a heap profile that includes this gctx. + * nlimbo must be 1 (single destroyer) in order to safely destroy the + * gctx. + */ + unsigned nlimbo; + + /* + * Tree of profile counters, one for each thread that has allocated in + * this context. + */ + prof_tctx_tree_t tctxs; + + /* Linkage for tree of contexts to be dumped. */ + rb_node(prof_gctx_t) dump_link; + + /* Temporary storage for summation during dump. */ + prof_cnt_t cnt_summed; + + /* Associated backtrace. */ + prof_bt_t bt; + + /* Backtrace vector, variable size, referred to by bt. */ + void *vec[1]; +}; +typedef rb_tree(prof_gctx_t) prof_gctx_tree_t; + +struct prof_tdata_s { + malloc_mutex_t *lock; + + /* Monotonically increasing unique thread identifier. */ + uint64_t thr_uid; + + /* + * Monotonically increasing discriminator among tdata structures + * associated with the same thr_uid. + */ + uint64_t thr_discrim; + + /* Included in heap profile dumps if non-NULL. */ + char *thread_name; + + bool attached; + bool expired; + + rb_node(prof_tdata_t) tdata_link; + + /* + * Counter used to initialize prof_tctx_t's tctx_uid. No locking is + * necessary when incrementing this field, because only one thread ever + * does so. + */ + uint64_t tctx_uid_next; + + /* + * Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks + * backtraces for which it has non-zero allocation/deallocation counters + * associated with thread-specific prof_tctx_t objects. Other threads + * may write to prof_tctx_t contents when freeing associated objects. + */ + ckh_t bt2tctx; + + /* Sampling state. */ + uint64_t prng_state; + uint64_t bytes_until_sample; + + /* State used to avoid dumping while operating on prof internals. */ + bool enq; + bool enq_idump; + bool enq_gdump; + + /* + * Set to true during an early dump phase for tdata's which are + * currently being dumped. New threads' tdata's have this initialized + * to false so that they aren't accidentally included in later dump + * phases. + */ + bool dumping; + + /* + * True if profiling is active for this tdata's thread + * (thread.prof.active mallctl). + */ + bool active; + + /* Temporary storage for summation during dump. */ + prof_cnt_t cnt_summed; + + /* Backtrace vector, used for calls to prof_backtrace(). */ + void *vec[PROF_BT_MAX]; +}; +typedef rb_tree(prof_tdata_t) prof_tdata_tree_t; + +#endif /* JEMALLOC_INTERNAL_PROF_STRUCTS_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/prof_types.h b/dep/jemalloc/include/jemalloc/internal/prof_types.h new file mode 100644 index 00000000000..1eff995ecf0 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/prof_types.h @@ -0,0 +1,56 @@ +#ifndef JEMALLOC_INTERNAL_PROF_TYPES_H +#define JEMALLOC_INTERNAL_PROF_TYPES_H + +typedef struct prof_bt_s prof_bt_t; +typedef struct prof_accum_s prof_accum_t; +typedef struct prof_cnt_s prof_cnt_t; +typedef struct prof_tctx_s prof_tctx_t; +typedef struct prof_gctx_s prof_gctx_t; +typedef struct prof_tdata_s prof_tdata_t; + +/* Option defaults. */ +#ifdef JEMALLOC_PROF +# define PROF_PREFIX_DEFAULT "jeprof" +#else +# define PROF_PREFIX_DEFAULT "" +#endif +#define LG_PROF_SAMPLE_DEFAULT 19 +#define LG_PROF_INTERVAL_DEFAULT -1 + +/* + * Hard limit on stack backtrace depth. The version of prof_backtrace() that + * is based on __builtin_return_address() necessarily has a hard-coded number + * of backtrace frame handlers, and should be kept in sync with this setting. + */ +#define PROF_BT_MAX 128 + +/* Initial hash table size. */ +#define PROF_CKH_MINITEMS 64 + +/* Size of memory buffer to use when writing dump files. */ +#define PROF_DUMP_BUFSIZE 65536 + +/* Size of stack-allocated buffer used by prof_printf(). */ +#define PROF_PRINTF_BUFSIZE 128 + +/* + * Number of mutexes shared among all gctx's. No space is allocated for these + * unless profiling is enabled, so it's okay to over-provision. + */ +#define PROF_NCTX_LOCKS 1024 + +/* + * Number of mutexes shared among all tdata's. No space is allocated for these + * unless profiling is enabled, so it's okay to over-provision. + */ +#define PROF_NTDATA_LOCKS 256 + +/* + * prof_tdata pointers close to NULL are used to encode state information that + * is used for cleaning up during thread shutdown. + */ +#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1) +#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2) +#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY + +#endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/ql.h b/dep/jemalloc/include/jemalloc/internal/ql.h index f70c5f6f391..80290407716 100644 --- a/dep/jemalloc/include/jemalloc/internal/ql.h +++ b/dep/jemalloc/include/jemalloc/internal/ql.h @@ -1,61 +1,64 @@ -/* - * List definitions. - */ -#define ql_head(a_type) \ +#ifndef JEMALLOC_INTERNAL_QL_H +#define JEMALLOC_INTERNAL_QL_H + +#include "jemalloc/internal/qr.h" + +/* List definitions. */ +#define ql_head(a_type) \ struct { \ a_type *qlh_first; \ } -#define ql_head_initializer(a_head) {NULL} +#define ql_head_initializer(a_head) {NULL} -#define ql_elm(a_type) qr(a_type) +#define ql_elm(a_type) qr(a_type) /* List functions. */ -#define ql_new(a_head) do { \ +#define ql_new(a_head) do { \ (a_head)->qlh_first = NULL; \ } while (0) -#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) +#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) -#define ql_first(a_head) ((a_head)->qlh_first) +#define ql_first(a_head) ((a_head)->qlh_first) -#define ql_last(a_head, a_field) \ +#define ql_last(a_head, a_field) \ ((ql_first(a_head) != NULL) \ ? qr_prev(ql_first(a_head), a_field) : NULL) -#define ql_next(a_head, a_elm, a_field) \ +#define ql_next(a_head, a_elm, a_field) \ ((ql_last(a_head, a_field) != (a_elm)) \ ? qr_next((a_elm), a_field) : NULL) -#define ql_prev(a_head, a_elm, a_field) \ +#define ql_prev(a_head, a_elm, a_field) \ ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ : NULL) -#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ +#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ qr_before_insert((a_qlelm), (a_elm), a_field); \ if (ql_first(a_head) == (a_qlelm)) { \ ql_first(a_head) = (a_elm); \ } \ } while (0) -#define ql_after_insert(a_qlelm, a_elm, a_field) \ +#define ql_after_insert(a_qlelm, a_elm, a_field) \ qr_after_insert((a_qlelm), (a_elm), a_field) -#define ql_head_insert(a_head, a_elm, a_field) do { \ +#define ql_head_insert(a_head, a_elm, a_field) do { \ if (ql_first(a_head) != NULL) { \ qr_before_insert(ql_first(a_head), (a_elm), a_field); \ } \ ql_first(a_head) = (a_elm); \ } while (0) -#define ql_tail_insert(a_head, a_elm, a_field) do { \ +#define ql_tail_insert(a_head, a_elm, a_field) do { \ if (ql_first(a_head) != NULL) { \ qr_before_insert(ql_first(a_head), (a_elm), a_field); \ } \ ql_first(a_head) = qr_next((a_elm), a_field); \ } while (0) -#define ql_remove(a_head, a_elm, a_field) do { \ +#define ql_remove(a_head, a_elm, a_field) do { \ if (ql_first(a_head) == (a_elm)) { \ ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ } \ @@ -66,18 +69,20 @@ struct { \ } \ } while (0) -#define ql_head_remove(a_head, a_type, a_field) do { \ +#define ql_head_remove(a_head, a_type, a_field) do { \ a_type *t = ql_first(a_head); \ ql_remove((a_head), t, a_field); \ } while (0) -#define ql_tail_remove(a_head, a_type, a_field) do { \ +#define ql_tail_remove(a_head, a_type, a_field) do { \ a_type *t = ql_last(a_head, a_field); \ ql_remove((a_head), t, a_field); \ } while (0) -#define ql_foreach(a_var, a_head, a_field) \ +#define ql_foreach(a_var, a_head, a_field) \ qr_foreach((a_var), ql_first(a_head), a_field) -#define ql_reverse_foreach(a_var, a_head, a_field) \ +#define ql_reverse_foreach(a_var, a_head, a_field) \ qr_reverse_foreach((a_var), ql_first(a_head), a_field) + +#endif /* JEMALLOC_INTERNAL_QL_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/qr.h b/dep/jemalloc/include/jemalloc/internal/qr.h index 602944b9b4f..1e1056b3868 100644 --- a/dep/jemalloc/include/jemalloc/internal/qr.h +++ b/dep/jemalloc/include/jemalloc/internal/qr.h @@ -1,38 +1,39 @@ +#ifndef JEMALLOC_INTERNAL_QR_H +#define JEMALLOC_INTERNAL_QR_H + /* Ring definitions. */ -#define qr(a_type) \ +#define qr(a_type) \ struct { \ a_type *qre_next; \ a_type *qre_prev; \ } /* Ring functions. */ -#define qr_new(a_qr, a_field) do { \ +#define qr_new(a_qr, a_field) do { \ (a_qr)->a_field.qre_next = (a_qr); \ (a_qr)->a_field.qre_prev = (a_qr); \ } while (0) -#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) +#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) -#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) +#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) -#define qr_before_insert(a_qrelm, a_qr, a_field) do { \ +#define qr_before_insert(a_qrelm, a_qr, a_field) do { \ (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ (a_qr)->a_field.qre_next = (a_qrelm); \ (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ (a_qrelm)->a_field.qre_prev = (a_qr); \ } while (0) -#define qr_after_insert(a_qrelm, a_qr, a_field) \ - do \ - { \ +#define qr_after_insert(a_qrelm, a_qr, a_field) do { \ (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ (a_qr)->a_field.qre_prev = (a_qrelm); \ (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \ (a_qrelm)->a_field.qre_next = (a_qr); \ - } while (0) +} while (0) -#define qr_meld(a_qr_a, a_qr_b, a_field) do { \ - void *t; \ +#define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \ + a_type *t; \ (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ t = (a_qr_a)->a_field.qre_prev; \ @@ -40,12 +41,14 @@ struct { \ (a_qr_b)->a_field.qre_prev = t; \ } while (0) -/* qr_meld() and qr_split() are functionally equivalent, so there's no need to - * have two copies of the code. */ -#define qr_split(a_qr_a, a_qr_b, a_field) \ - qr_meld((a_qr_a), (a_qr_b), a_field) +/* + * qr_meld() and qr_split() are functionally equivalent, so there's no need to + * have two copies of the code. + */ +#define qr_split(a_qr_a, a_qr_b, a_type, a_field) \ + qr_meld((a_qr_a), (a_qr_b), a_type, a_field) -#define qr_remove(a_qr, a_field) do { \ +#define qr_remove(a_qr, a_field) do { \ (a_qr)->a_field.qre_prev->a_field.qre_next \ = (a_qr)->a_field.qre_next; \ (a_qr)->a_field.qre_next->a_field.qre_prev \ @@ -54,14 +57,16 @@ struct { \ (a_qr)->a_field.qre_prev = (a_qr); \ } while (0) -#define qr_foreach(var, a_qr, a_field) \ +#define qr_foreach(var, a_qr, a_field) \ for ((var) = (a_qr); \ (var) != NULL; \ (var) = (((var)->a_field.qre_next != (a_qr)) \ ? (var)->a_field.qre_next : NULL)) -#define qr_reverse_foreach(var, a_qr, a_field) \ +#define qr_reverse_foreach(var, a_qr, a_field) \ for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ (var) != NULL; \ (var) = (((var) != (a_qr)) \ ? (var)->a_field.qre_prev : NULL)) + +#endif /* JEMALLOC_INTERNAL_QR_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/quarantine.h b/dep/jemalloc/include/jemalloc/internal/quarantine.h deleted file mode 100644 index 16f677f73da..00000000000 --- a/dep/jemalloc/include/jemalloc/internal/quarantine.h +++ /dev/null @@ -1,67 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct quarantine_obj_s quarantine_obj_t; -typedef struct quarantine_s quarantine_t; - -/* Default per thread quarantine size if valgrind is enabled. */ -#define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -struct quarantine_obj_s { - void *ptr; - size_t usize; -}; - -struct quarantine_s { - size_t curbytes; - size_t curobjs; - size_t first; -#define LG_MAXOBJS_INIT 10 - size_t lg_maxobjs; - quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */ -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -quarantine_t *quarantine_init(size_t lg_maxobjs); -void quarantine(void *ptr); -void quarantine_cleanup(void *arg); -bool quarantine_boot(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -malloc_tsd_protos(JEMALLOC_ATTR(unused), quarantine, quarantine_t *) - -void quarantine_alloc_hook(void); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_)) -malloc_tsd_externs(quarantine, quarantine_t *) -malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, quarantine, quarantine_t *, NULL, - quarantine_cleanup) - -JEMALLOC_ALWAYS_INLINE void -quarantine_alloc_hook(void) -{ - quarantine_t *quarantine; - - assert(config_fill && opt_quarantine); - - quarantine = *quarantine_tsd_get(); - if (quarantine == NULL) - quarantine_init(LG_MAXOBJS_INIT); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ - diff --git a/dep/jemalloc/include/jemalloc/internal/rb.h b/dep/jemalloc/include/jemalloc/internal/rb.h index 423802eb2dc..47fa5ca99bb 100644 --- a/dep/jemalloc/include/jemalloc/internal/rb.h +++ b/dep/jemalloc/include/jemalloc/internal/rb.h @@ -20,17 +20,21 @@ */ #ifndef RB_H_ -#define RB_H_ +#define RB_H_ + +#ifndef __PGI +#define RB_COMPACT +#endif #ifdef RB_COMPACT /* Node structure. */ -#define rb_node(a_type) \ +#define rb_node(a_type) \ struct { \ a_type *rbn_left; \ a_type *rbn_right_red; \ } #else -#define rb_node(a_type) \ +#define rb_node(a_type) \ struct { \ a_type *rbn_left; \ a_type *rbn_right; \ @@ -39,111 +43,116 @@ struct { \ #endif /* Root structure. */ -#define rb_tree(a_type) \ +#define rb_tree(a_type) \ struct { \ a_type *rbt_root; \ - a_type rbt_nil; \ } /* Left accessors. */ -#define rbtn_left_get(a_type, a_field, a_node) \ +#define rbtn_left_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_left) -#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \ +#define rbtn_left_set(a_type, a_field, a_node, a_left) do { \ (a_node)->a_field.rbn_left = a_left; \ } while (0) #ifdef RB_COMPACT /* Right accessors. */ -#define rbtn_right_get(a_type, a_field, a_node) \ +#define rbtn_right_get(a_type, a_field, a_node) \ ((a_type *) (((intptr_t) (a_node)->a_field.rbn_right_red) \ & ((ssize_t)-2))) -#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ +#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) a_right) \ | (((uintptr_t) (a_node)->a_field.rbn_right_red) & ((size_t)1))); \ } while (0) /* Color accessors. */ -#define rbtn_red_get(a_type, a_field, a_node) \ +#define rbtn_red_get(a_type, a_field, a_node) \ ((bool) (((uintptr_t) (a_node)->a_field.rbn_right_red) \ & ((size_t)1))) -#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ +#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ (a_node)->a_field.rbn_right_red = (a_type *) ((((intptr_t) \ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)) \ | ((ssize_t)a_red)); \ } while (0) -#define rbtn_red_set(a_type, a_field, a_node) do { \ +#define rbtn_red_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((uintptr_t) \ (a_node)->a_field.rbn_right_red) | ((size_t)1)); \ } while (0) -#define rbtn_black_set(a_type, a_field, a_node) do { \ +#define rbtn_black_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_right_red = (a_type *) (((intptr_t) \ (a_node)->a_field.rbn_right_red) & ((ssize_t)-2)); \ } while (0) + +/* Node initializer. */ +#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ + /* Bookkeeping bit cannot be used by node pointer. */ \ + assert(((uintptr_t)(a_node) & 0x1) == 0); \ + rbtn_left_set(a_type, a_field, (a_node), NULL); \ + rbtn_right_set(a_type, a_field, (a_node), NULL); \ + rbtn_red_set(a_type, a_field, (a_node)); \ +} while (0) #else /* Right accessors. */ -#define rbtn_right_get(a_type, a_field, a_node) \ +#define rbtn_right_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_right) -#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ +#define rbtn_right_set(a_type, a_field, a_node, a_right) do { \ (a_node)->a_field.rbn_right = a_right; \ } while (0) /* Color accessors. */ -#define rbtn_red_get(a_type, a_field, a_node) \ +#define rbtn_red_get(a_type, a_field, a_node) \ ((a_node)->a_field.rbn_red) -#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ +#define rbtn_color_set(a_type, a_field, a_node, a_red) do { \ (a_node)->a_field.rbn_red = (a_red); \ } while (0) -#define rbtn_red_set(a_type, a_field, a_node) do { \ +#define rbtn_red_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_red = true; \ } while (0) -#define rbtn_black_set(a_type, a_field, a_node) do { \ +#define rbtn_black_set(a_type, a_field, a_node) do { \ (a_node)->a_field.rbn_red = false; \ } while (0) -#endif /* Node initializer. */ -#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ - rbtn_left_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \ - rbtn_right_set(a_type, a_field, (a_node), &(a_rbt)->rbt_nil); \ +#define rbt_node_new(a_type, a_field, a_rbt, a_node) do { \ + rbtn_left_set(a_type, a_field, (a_node), NULL); \ + rbtn_right_set(a_type, a_field, (a_node), NULL); \ rbtn_red_set(a_type, a_field, (a_node)); \ } while (0) +#endif /* Tree initializer. */ -#define rb_new(a_type, a_field, a_rbt) do { \ - (a_rbt)->rbt_root = &(a_rbt)->rbt_nil; \ - rbt_node_new(a_type, a_field, a_rbt, &(a_rbt)->rbt_nil); \ - rbtn_black_set(a_type, a_field, &(a_rbt)->rbt_nil); \ +#define rb_new(a_type, a_field, a_rbt) do { \ + (a_rbt)->rbt_root = NULL; \ } while (0) /* Internal utility macros. */ -#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ +#define rbtn_first(a_type, a_field, a_rbt, a_root, r_node) do { \ (r_node) = (a_root); \ - if ((r_node) != &(a_rbt)->rbt_nil) { \ + if ((r_node) != NULL) { \ for (; \ - rbtn_left_get(a_type, a_field, (r_node)) != &(a_rbt)->rbt_nil;\ + rbtn_left_get(a_type, a_field, (r_node)) != NULL; \ (r_node) = rbtn_left_get(a_type, a_field, (r_node))) { \ } \ } \ } while (0) -#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ +#define rbtn_last(a_type, a_field, a_rbt, a_root, r_node) do { \ (r_node) = (a_root); \ - if ((r_node) != &(a_rbt)->rbt_nil) { \ - for (; rbtn_right_get(a_type, a_field, (r_node)) != \ - &(a_rbt)->rbt_nil; (r_node) = rbtn_right_get(a_type, a_field, \ - (r_node))) { \ + if ((r_node) != NULL) { \ + for (; rbtn_right_get(a_type, a_field, (r_node)) != NULL; \ + (r_node) = rbtn_right_get(a_type, a_field, (r_node))) { \ } \ } \ } while (0) -#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \ +#define rbtn_rotate_left(a_type, a_field, a_node, r_node) do { \ (r_node) = rbtn_right_get(a_type, a_field, (a_node)); \ rbtn_right_set(a_type, a_field, (a_node), \ rbtn_left_get(a_type, a_field, (r_node))); \ rbtn_left_set(a_type, a_field, (r_node), (a_node)); \ } while (0) -#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \ +#define rbtn_rotate_right(a_type, a_field, a_node, r_node) do { \ (r_node) = rbtn_left_get(a_type, a_field, (a_node)); \ rbtn_left_set(a_type, a_field, (a_node), \ rbtn_right_get(a_type, a_field, (r_node))); \ @@ -155,9 +164,11 @@ struct { \ * functions generated by an equivalently parameterized call to rb_gen(). */ -#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ +#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \ a_attr void \ a_prefix##new(a_rbt_type *rbtree); \ +a_attr bool \ +a_prefix##empty(a_rbt_type *rbtree); \ a_attr a_type * \ a_prefix##first(a_rbt_type *rbtree); \ a_attr a_type * \ @@ -167,11 +178,11 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node); \ a_attr a_type * \ a_prefix##prev(a_rbt_type *rbtree, a_type *node); \ a_attr a_type * \ -a_prefix##search(a_rbt_type *rbtree, a_type *key); \ +a_prefix##search(a_rbt_type *rbtree, const a_type *key); \ a_attr a_type * \ -a_prefix##nsearch(a_rbt_type *rbtree, a_type *key); \ +a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key); \ a_attr a_type * \ -a_prefix##psearch(a_rbt_type *rbtree, a_type *key); \ +a_prefix##psearch(a_rbt_type *rbtree, const a_type *key); \ a_attr void \ a_prefix##insert(a_rbt_type *rbtree, a_type *node); \ a_attr void \ @@ -181,7 +192,10 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ a_rbt_type *, a_type *, void *), void *arg); \ a_attr a_type * \ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ - a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); + a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \ +a_attr void \ +a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ + void *arg); /* * The rb_gen() macro generates a type-specific red-black tree implementation, @@ -198,7 +212,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ * int (a_cmp *)(a_type *a_node, a_type *a_other); * ^^^^^^ * or a_key - * Interpretation of comparision function return values: + * Interpretation of comparison function return values: * -1 : a_node < a_other * 0 : a_node == a_other * 1 : a_node > a_other @@ -224,6 +238,13 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ * Args: * tree: Pointer to an uninitialized red-black tree object. * + * static bool + * ex_empty(ex_t *tree); + * Description: Determine whether tree is empty. + * Args: + * tree: Pointer to an initialized red-black tree object. + * Ret: True if tree is empty, false otherwise. + * * static ex_node_t * * ex_first(ex_t *tree); * static ex_node_t * @@ -245,7 +266,7 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ * last/first. * * static ex_node_t * - * ex_search(ex_t *tree, ex_node_t *key); + * ex_search(ex_t *tree, const ex_node_t *key); * Description: Search for node that matches key. * Args: * tree: Pointer to an initialized red-black tree object. @@ -253,9 +274,9 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ * Ret: Node in tree that matches key, or NULL if no match. * * static ex_node_t * - * ex_nsearch(ex_t *tree, ex_node_t *key); + * ex_nsearch(ex_t *tree, const ex_node_t *key); * static ex_node_t * - * ex_psearch(ex_t *tree, ex_node_t *key); + * ex_psearch(ex_t *tree, const ex_node_t *key); * Description: Search for node that matches key. If no match is found, * return what would be key's successor/predecessor, were * key in tree. @@ -303,40 +324,52 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ * arg : Opaque pointer passed to cb(). * Ret: NULL if iteration completed, or the non-NULL callback return value * that caused termination of the iteration. + * + * static void + * ex_destroy(ex_t *tree, void (*cb)(ex_node_t *, void *), void *arg); + * Description: Iterate over the tree with post-order traversal, remove + * each node, and run the callback if non-null. This is + * used for destroying a tree without paying the cost to + * rebalance it. The tree must not be otherwise altered + * during traversal. + * Args: + * tree: Pointer to an initialized red-black tree object. + * cb : Callback function, which, if non-null, is called for each node + * during iteration. There is no way to stop iteration once it + * has begun. + * arg : Opaque pointer passed to cb(). */ -#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ +#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \ a_attr void \ a_prefix##new(a_rbt_type *rbtree) { \ rb_new(a_type, a_field, rbtree); \ } \ +a_attr bool \ +a_prefix##empty(a_rbt_type *rbtree) { \ + return (rbtree->rbt_root == NULL); \ +} \ a_attr a_type * \ a_prefix##first(a_rbt_type *rbtree) { \ a_type *ret; \ rbtn_first(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ - if (ret == &rbtree->rbt_nil) { \ - ret = NULL; \ - } \ - return (ret); \ + return ret; \ } \ a_attr a_type * \ a_prefix##last(a_rbt_type *rbtree) { \ a_type *ret; \ rbtn_last(a_type, a_field, rbtree, rbtree->rbt_root, ret); \ - if (ret == &rbtree->rbt_nil) { \ - ret = NULL; \ - } \ - return (ret); \ + return ret; \ } \ a_attr a_type * \ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ a_type *ret; \ - if (rbtn_right_get(a_type, a_field, node) != &rbtree->rbt_nil) { \ + if (rbtn_right_get(a_type, a_field, node) != NULL) { \ rbtn_first(a_type, a_field, rbtree, rbtn_right_get(a_type, \ a_field, node), ret); \ } else { \ a_type *tnode = rbtree->rbt_root; \ - assert(tnode != &rbtree->rbt_nil); \ - ret = &rbtree->rbt_nil; \ + assert(tnode != NULL); \ + ret = NULL; \ while (true) { \ int cmp = (a_cmp)(node, tnode); \ if (cmp < 0) { \ @@ -347,24 +380,21 @@ a_prefix##next(a_rbt_type *rbtree, a_type *node) { \ } else { \ break; \ } \ - assert(tnode != &rbtree->rbt_nil); \ + assert(tnode != NULL); \ } \ } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ + return ret; \ } \ a_attr a_type * \ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ a_type *ret; \ - if (rbtn_left_get(a_type, a_field, node) != &rbtree->rbt_nil) { \ + if (rbtn_left_get(a_type, a_field, node) != NULL) { \ rbtn_last(a_type, a_field, rbtree, rbtn_left_get(a_type, \ a_field, node), ret); \ } else { \ a_type *tnode = rbtree->rbt_root; \ - assert(tnode != &rbtree->rbt_nil); \ - ret = &rbtree->rbt_nil; \ + assert(tnode != NULL); \ + ret = NULL; \ while (true) { \ int cmp = (a_cmp)(node, tnode); \ if (cmp < 0) { \ @@ -375,20 +405,17 @@ a_prefix##prev(a_rbt_type *rbtree, a_type *node) { \ } else { \ break; \ } \ - assert(tnode != &rbtree->rbt_nil); \ + assert(tnode != NULL); \ } \ } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ + return ret; \ } \ a_attr a_type * \ -a_prefix##search(a_rbt_type *rbtree, a_type *key) { \ +a_prefix##search(a_rbt_type *rbtree, const a_type *key) { \ a_type *ret; \ int cmp; \ ret = rbtree->rbt_root; \ - while (ret != &rbtree->rbt_nil \ + while (ret != NULL \ && (cmp = (a_cmp)(key, ret)) != 0) { \ if (cmp < 0) { \ ret = rbtn_left_get(a_type, a_field, ret); \ @@ -396,17 +423,14 @@ a_prefix##search(a_rbt_type *rbtree, a_type *key) { \ ret = rbtn_right_get(a_type, a_field, ret); \ } \ } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ + return ret; \ } \ a_attr a_type * \ -a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \ +a_prefix##nsearch(a_rbt_type *rbtree, const a_type *key) { \ a_type *ret; \ a_type *tnode = rbtree->rbt_root; \ - ret = &rbtree->rbt_nil; \ - while (tnode != &rbtree->rbt_nil) { \ + ret = NULL; \ + while (tnode != NULL) { \ int cmp = (a_cmp)(key, tnode); \ if (cmp < 0) { \ ret = tnode; \ @@ -418,17 +442,14 @@ a_prefix##nsearch(a_rbt_type *rbtree, a_type *key) { \ break; \ } \ } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ + return ret; \ } \ a_attr a_type * \ -a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \ +a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \ a_type *ret; \ a_type *tnode = rbtree->rbt_root; \ - ret = &rbtree->rbt_nil; \ - while (tnode != &rbtree->rbt_nil) { \ + ret = NULL; \ + while (tnode != NULL) { \ int cmp = (a_cmp)(key, tnode); \ if (cmp < 0) { \ tnode = rbtn_left_get(a_type, a_field, tnode); \ @@ -440,10 +461,7 @@ a_prefix##psearch(a_rbt_type *rbtree, a_type *key) { \ break; \ } \ } \ - if (ret == &rbtree->rbt_nil) { \ - ret = (NULL); \ - } \ - return (ret); \ + return ret; \ } \ a_attr void \ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ @@ -454,7 +472,7 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ rbt_node_new(a_type, a_field, rbtree, node); \ /* Wind. */ \ path->node = rbtree->rbt_root; \ - for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \ + for (pathp = path; pathp->node != NULL; pathp++) { \ int cmp = pathp->cmp = a_cmp(node, pathp->node); \ assert(cmp != 0); \ if (cmp < 0) { \ @@ -474,7 +492,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ rbtn_left_set(a_type, a_field, cnode, left); \ if (rbtn_red_get(a_type, a_field, left)) { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ - if (rbtn_red_get(a_type, a_field, leftleft)) { \ + if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ + leftleft)) { \ /* Fix up 4-node. */ \ a_type *tnode; \ rbtn_black_set(a_type, a_field, leftleft); \ @@ -489,7 +508,8 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \ rbtn_right_set(a_type, a_field, cnode, right); \ if (rbtn_red_get(a_type, a_field, right)) { \ a_type *left = rbtn_left_get(a_type, a_field, cnode); \ - if (rbtn_red_get(a_type, a_field, left)) { \ + if (left != NULL && rbtn_red_get(a_type, a_field, \ + left)) { \ /* Split 4-node. */ \ rbtn_black_set(a_type, a_field, left); \ rbtn_black_set(a_type, a_field, right); \ @@ -522,7 +542,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ /* Wind. */ \ nodep = NULL; /* Silence compiler warning. */ \ path->node = rbtree->rbt_root; \ - for (pathp = path; pathp->node != &rbtree->rbt_nil; pathp++) { \ + for (pathp = path; pathp->node != NULL; pathp++) { \ int cmp = pathp->cmp = a_cmp(node, pathp->node); \ if (cmp < 0) { \ pathp[1].node = rbtn_left_get(a_type, a_field, \ @@ -534,8 +554,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ /* Find node's successor, in preparation for swap. */ \ pathp->cmp = 1; \ nodep = pathp; \ - for (pathp++; pathp->node != &rbtree->rbt_nil; \ - pathp++) { \ + for (pathp++; pathp->node != NULL; pathp++) { \ pathp->cmp = -1; \ pathp[1].node = rbtn_left_get(a_type, a_field, \ pathp->node); \ @@ -577,10 +596,10 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ } \ } else { \ a_type *left = rbtn_left_get(a_type, a_field, node); \ - if (left != &rbtree->rbt_nil) { \ + if (left != NULL) { \ /* node has no successor, but it has a left child. */\ /* Splice node out, without losing the left child. */\ - assert(rbtn_red_get(a_type, a_field, node) == false); \ + assert(!rbtn_red_get(a_type, a_field, node)); \ assert(rbtn_red_get(a_type, a_field, left)); \ rbtn_black_set(a_type, a_field, left); \ if (pathp == path) { \ @@ -597,34 +616,32 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ return; \ } else if (pathp == path) { \ /* The tree only contained one node. */ \ - rbtree->rbt_root = &rbtree->rbt_nil; \ + rbtree->rbt_root = NULL; \ return; \ } \ } \ if (rbtn_red_get(a_type, a_field, pathp->node)) { \ /* Prune red node, which requires no fixup. */ \ assert(pathp[-1].cmp < 0); \ - rbtn_left_set(a_type, a_field, pathp[-1].node, \ - &rbtree->rbt_nil); \ + rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \ return; \ } \ /* The node to be pruned is black, so unwind until balance is */\ /* restored. */\ - pathp->node = &rbtree->rbt_nil; \ + pathp->node = NULL; \ for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \ assert(pathp->cmp != 0); \ if (pathp->cmp < 0) { \ rbtn_left_set(a_type, a_field, pathp->node, \ pathp[1].node); \ - assert(rbtn_red_get(a_type, a_field, pathp[1].node) \ - == false); \ if (rbtn_red_get(a_type, a_field, pathp->node)) { \ a_type *right = rbtn_right_get(a_type, a_field, \ pathp->node); \ a_type *rightleft = rbtn_left_get(a_type, a_field, \ right); \ a_type *tnode; \ - if (rbtn_red_get(a_type, a_field, rightleft)) { \ + if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ + rightleft)) { \ /* In the following diagrams, ||, //, and \\ */\ /* indicate the path to the removed node. */\ /* */\ @@ -667,7 +684,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ pathp->node); \ a_type *rightleft = rbtn_left_get(a_type, a_field, \ right); \ - if (rbtn_red_get(a_type, a_field, rightleft)) { \ + if (rightleft != NULL && rbtn_red_get(a_type, a_field, \ + rightleft)) { \ /* || */\ /* pathp(b) */\ /* // \ */\ @@ -681,7 +699,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ rbtn_rotate_left(a_type, a_field, pathp->node, \ tnode); \ /* Balance restored, but rotation modified */\ - /* subree root, which may actually be the tree */\ + /* subtree root, which may actually be the tree */\ /* root. */\ if (pathp == path) { \ /* Set root. */ \ @@ -721,7 +739,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ left); \ a_type *leftrightleft = rbtn_left_get(a_type, a_field, \ leftright); \ - if (rbtn_red_get(a_type, a_field, leftrightleft)) { \ + if (leftrightleft != NULL && rbtn_red_get(a_type, \ + a_field, leftrightleft)) { \ /* || */\ /* pathp(b) */\ /* / \\ */\ @@ -747,7 +766,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ /* (b) */\ /* / */\ /* (b) */\ - assert(leftright != &rbtree->rbt_nil); \ + assert(leftright != NULL); \ rbtn_red_set(a_type, a_field, leftright); \ rbtn_rotate_right(a_type, a_field, pathp->node, \ tnode); \ @@ -770,7 +789,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ return; \ } else if (rbtn_red_get(a_type, a_field, pathp->node)) { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ - if (rbtn_red_get(a_type, a_field, leftleft)) { \ + if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ + leftleft)) { \ /* || */\ /* pathp(r) */\ /* / \\ */\ @@ -808,7 +828,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ } \ } else { \ a_type *leftleft = rbtn_left_get(a_type, a_field, left);\ - if (rbtn_red_get(a_type, a_field, leftleft)) { \ + if (leftleft != NULL && rbtn_red_get(a_type, a_field, \ + leftleft)) { \ /* || */\ /* pathp(b) */\ /* / \\ */\ @@ -849,22 +870,22 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \ } \ /* Set root. */ \ rbtree->rbt_root = path->node; \ - assert(rbtn_red_get(a_type, a_field, rbtree->rbt_root) == false); \ + assert(!rbtn_red_get(a_type, a_field, rbtree->rbt_root)); \ } \ a_attr a_type * \ a_prefix##iter_recurse(a_rbt_type *rbtree, a_type *node, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - if (node == &rbtree->rbt_nil) { \ - return (&rbtree->rbt_nil); \ + if (node == NULL) { \ + return NULL; \ } else { \ a_type *ret; \ if ((ret = a_prefix##iter_recurse(rbtree, rbtn_left_get(a_type, \ - a_field, node), cb, arg)) != &rbtree->rbt_nil \ - || (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ + a_field, node), cb, arg)) != NULL || (ret = cb(rbtree, node, \ + arg)) != NULL) { \ + return ret; \ } \ - return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ - a_field, node), cb, arg)); \ + return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ + a_field, node), cb, arg); \ } \ } \ a_attr a_type * \ @@ -874,22 +895,22 @@ a_prefix##iter_start(a_rbt_type *rbtree, a_type *start, a_type *node, \ if (cmp < 0) { \ a_type *ret; \ if ((ret = a_prefix##iter_start(rbtree, start, \ - rbtn_left_get(a_type, a_field, node), cb, arg)) != \ - &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ + rbtn_left_get(a_type, a_field, node), cb, arg)) != NULL || \ + (ret = cb(rbtree, node, arg)) != NULL) { \ + return ret; \ } \ - return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ - a_field, node), cb, arg)); \ + return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ + a_field, node), cb, arg); \ } else if (cmp > 0) { \ - return (a_prefix##iter_start(rbtree, start, \ - rbtn_right_get(a_type, a_field, node), cb, arg)); \ + return a_prefix##iter_start(rbtree, start, \ + rbtn_right_get(a_type, a_field, node), cb, arg); \ } else { \ a_type *ret; \ if ((ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ + return ret; \ } \ - return (a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ - a_field, node), cb, arg)); \ + return a_prefix##iter_recurse(rbtree, rbtn_right_get(a_type, \ + a_field, node), cb, arg); \ } \ } \ a_attr a_type * \ @@ -902,25 +923,22 @@ a_prefix##iter(a_rbt_type *rbtree, a_type *start, a_type *(*cb)( \ } else { \ ret = a_prefix##iter_recurse(rbtree, rbtree->rbt_root, cb, arg);\ } \ - if (ret == &rbtree->rbt_nil) { \ - ret = NULL; \ - } \ - return (ret); \ + return ret; \ } \ a_attr a_type * \ a_prefix##reverse_iter_recurse(a_rbt_type *rbtree, a_type *node, \ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg) { \ - if (node == &rbtree->rbt_nil) { \ - return (&rbtree->rbt_nil); \ + if (node == NULL) { \ + return NULL; \ } else { \ a_type *ret; \ if ((ret = a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_right_get(a_type, a_field, node), cb, arg)) != \ - &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ + rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ + (ret = cb(rbtree, node, arg)) != NULL) { \ + return ret; \ } \ - return (a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ + return a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_left_get(a_type, a_field, node), cb, arg); \ } \ } \ a_attr a_type * \ @@ -931,22 +949,22 @@ a_prefix##reverse_iter_start(a_rbt_type *rbtree, a_type *start, \ if (cmp > 0) { \ a_type *ret; \ if ((ret = a_prefix##reverse_iter_start(rbtree, start, \ - rbtn_right_get(a_type, a_field, node), cb, arg)) != \ - &rbtree->rbt_nil || (ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ + rbtn_right_get(a_type, a_field, node), cb, arg)) != NULL || \ + (ret = cb(rbtree, node, arg)) != NULL) { \ + return ret; \ } \ - return (a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ + return a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_left_get(a_type, a_field, node), cb, arg); \ } else if (cmp < 0) { \ - return (a_prefix##reverse_iter_start(rbtree, start, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ + return a_prefix##reverse_iter_start(rbtree, start, \ + rbtn_left_get(a_type, a_field, node), cb, arg); \ } else { \ a_type *ret; \ if ((ret = cb(rbtree, node, arg)) != NULL) { \ - return (ret); \ + return ret; \ } \ - return (a_prefix##reverse_iter_recurse(rbtree, \ - rbtn_left_get(a_type, a_field, node), cb, arg)); \ + return a_prefix##reverse_iter_recurse(rbtree, \ + rbtn_left_get(a_type, a_field, node), cb, arg); \ } \ } \ a_attr a_type * \ @@ -960,10 +978,29 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \ ret = a_prefix##reverse_iter_recurse(rbtree, rbtree->rbt_root, \ cb, arg); \ } \ - if (ret == &rbtree->rbt_nil) { \ - ret = NULL; \ + return ret; \ +} \ +a_attr void \ +a_prefix##destroy_recurse(a_rbt_type *rbtree, a_type *node, void (*cb)( \ + a_type *, void *), void *arg) { \ + if (node == NULL) { \ + return; \ } \ - return (ret); \ + a_prefix##destroy_recurse(rbtree, rbtn_left_get(a_type, a_field, \ + node), cb, arg); \ + rbtn_left_set(a_type, a_field, (node), NULL); \ + a_prefix##destroy_recurse(rbtree, rbtn_right_get(a_type, a_field, \ + node), cb, arg); \ + rbtn_right_set(a_type, a_field, (node), NULL); \ + if (cb) { \ + cb(node, arg); \ + } \ +} \ +a_attr void \ +a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \ + void *arg) { \ + a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \ + rbtree->rbt_root = NULL; \ } #endif /* RB_H_ */ diff --git a/dep/jemalloc/include/jemalloc/internal/rtree.h b/dep/jemalloc/include/jemalloc/internal/rtree.h index bc74769f50e..b5d4db3988f 100644 --- a/dep/jemalloc/include/jemalloc/internal/rtree.h +++ b/dep/jemalloc/include/jemalloc/internal/rtree.h @@ -1,172 +1,474 @@ +#ifndef JEMALLOC_INTERNAL_RTREE_H +#define JEMALLOC_INTERNAL_RTREE_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree_tsd.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/tsd.h" + /* * This radix tree implementation is tailored to the singular purpose of - * tracking which chunks are currently owned by jemalloc. This functionality - * is mandatory for OS X, where jemalloc must be able to respond to object - * ownership queries. + * associating metadata with extents that are currently owned by jemalloc. * ******************************************************************************* */ -#ifdef JEMALLOC_H_TYPES + +/* Number of high insignificant bits. */ +#define RTREE_NHIB ((1U << (LG_SIZEOF_PTR+3)) - LG_VADDR) +/* Number of low insigificant bits. */ +#define RTREE_NLIB LG_PAGE +/* Number of significant bits. */ +#define RTREE_NSB (LG_VADDR - RTREE_NLIB) +/* Number of levels in radix tree. */ +#if RTREE_NSB <= 10 +# define RTREE_HEIGHT 1 +#elif RTREE_NSB <= 36 +# define RTREE_HEIGHT 2 +#elif RTREE_NSB <= 52 +# define RTREE_HEIGHT 3 +#else +# error Unsupported number of significant virtual address bits +#endif +/* Use compact leaf representation if virtual address encoding allows. */ +#if RTREE_NHIB >= LG_CEIL_NSIZES +# define RTREE_LEAF_COMPACT +#endif + +/* Needed for initialization only. */ +#define RTREE_LEAFKEY_INVALID ((uintptr_t)1) + +typedef struct rtree_node_elm_s rtree_node_elm_t; +struct rtree_node_elm_s { + atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */ +}; + +struct rtree_leaf_elm_s { +#ifdef RTREE_LEAF_COMPACT + /* + * Single pointer-width field containing all three leaf element fields. + * For example, on a 64-bit x64 system with 48 significant virtual + * memory address bits, the index, extent, and slab fields are packed as + * such: + * + * x: index + * e: extent + * b: slab + * + * 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b + */ + atomic_p_t le_bits; +#else + atomic_p_t le_extent; /* (extent_t *) */ + atomic_u_t le_szind; /* (szind_t) */ + atomic_b_t le_slab; /* (bool) */ +#endif +}; + +typedef struct rtree_level_s rtree_level_t; +struct rtree_level_s { + /* Number of key bits distinguished by this level. */ + unsigned bits; + /* + * Cumulative number of key bits distinguished by traversing to + * corresponding tree level. + */ + unsigned cumbits; +}; typedef struct rtree_s rtree_t; +struct rtree_s { + malloc_mutex_t init_lock; + /* Number of elements based on rtree_levels[0].bits. */ +#if RTREE_HEIGHT > 1 + rtree_node_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)]; +#else + rtree_leaf_elm_t root[1U << (RTREE_NSB/RTREE_HEIGHT)]; +#endif +}; /* - * Size of each radix tree node (must be a power of 2). This impacts tree - * depth. + * Split the bits into one to three partitions depending on number of + * significant bits. It the number of bits does not divide evenly into the + * number of levels, place one remainder bit per level starting at the leaf + * level. */ -#define RTREE_NODESIZE (1U << 16) +static const rtree_level_t rtree_levels[] = { +#if RTREE_HEIGHT == 1 + {RTREE_NSB, RTREE_NHIB + RTREE_NSB} +#elif RTREE_HEIGHT == 2 + {RTREE_NSB/2, RTREE_NHIB + RTREE_NSB/2}, + {RTREE_NSB/2 + RTREE_NSB%2, RTREE_NHIB + RTREE_NSB} +#elif RTREE_HEIGHT == 3 + {RTREE_NSB/3, RTREE_NHIB + RTREE_NSB/3}, + {RTREE_NSB/3 + RTREE_NSB%3/2, + RTREE_NHIB + RTREE_NSB/3*2 + RTREE_NSB%3/2}, + {RTREE_NSB/3 + RTREE_NSB%3 - RTREE_NSB%3/2, RTREE_NHIB + RTREE_NSB} +#else +# error Unsupported rtree height +#endif +}; -typedef void *(rtree_alloc_t)(size_t); -typedef void (rtree_dalloc_t)(void *); +bool rtree_new(rtree_t *rtree, bool zeroed); -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS +typedef rtree_node_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t); +extern rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc; -struct rtree_s { - rtree_alloc_t *alloc; - rtree_dalloc_t *dalloc; - malloc_mutex_t mutex; - void **root; - unsigned height; - unsigned level2bits[1]; /* Dynamically sized. */ -}; +typedef rtree_leaf_elm_t *(rtree_leaf_alloc_t)(tsdn_t *, rtree_t *, size_t); +extern rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc; + +typedef void (rtree_node_dalloc_t)(tsdn_t *, rtree_t *, rtree_node_elm_t *); +extern rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc; + +typedef void (rtree_leaf_dalloc_t)(tsdn_t *, rtree_t *, rtree_leaf_elm_t *); +extern rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc; +#ifdef JEMALLOC_JET +void rtree_delete(tsdn_t *tsdn, rtree_t *rtree); +#endif +rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, + rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing); + +JEMALLOC_ALWAYS_INLINE uintptr_t +rtree_leafkey(uintptr_t key) { + unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); + unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits - + rtree_levels[RTREE_HEIGHT-1].bits); + unsigned maskbits = ptrbits - cumbits; + uintptr_t mask = ~((ZU(1) << maskbits) - 1); + return (key & mask); +} + +JEMALLOC_ALWAYS_INLINE size_t +rtree_cache_direct_map(uintptr_t key) { + unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); + unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits - + rtree_levels[RTREE_HEIGHT-1].bits); + unsigned maskbits = ptrbits - cumbits; + return (size_t)((key >> maskbits) & (RTREE_CTX_NCACHE - 1)); +} + +JEMALLOC_ALWAYS_INLINE uintptr_t +rtree_subkey(uintptr_t key, unsigned level) { + unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3); + unsigned cumbits = rtree_levels[level].cumbits; + unsigned shiftbits = ptrbits - cumbits; + unsigned maskbits = rtree_levels[level].bits; + uintptr_t mask = (ZU(1) << maskbits) - 1; + return ((key >> shiftbits) & mask); +} + +/* + * Atomic getters. + * + * dependent: Reading a value on behalf of a pointer to a valid allocation + * is guaranteed to be a clean read even without synchronization, + * because the rtree update became visible in memory before the + * pointer came into existence. + * !dependent: An arbitrary read, e.g. on behalf of ivsalloc(), may not be + * dependent on a previous rtree write, which means a stale read + * could result if synchronization were omitted here. + */ +# ifdef RTREE_LEAF_COMPACT +JEMALLOC_ALWAYS_INLINE uintptr_t +rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, + bool dependent) { + return (uintptr_t)atomic_load_p(&elm->le_bits, dependent + ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); +} + +JEMALLOC_ALWAYS_INLINE extent_t * +rtree_leaf_elm_bits_extent_get(uintptr_t bits) { + /* Restore sign-extended high bits, mask slab bit. */ + return (extent_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >> + RTREE_NHIB) & ~((uintptr_t)0x1)); +} -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS +JEMALLOC_ALWAYS_INLINE szind_t +rtree_leaf_elm_bits_szind_get(uintptr_t bits) { + return (szind_t)(bits >> LG_VADDR); +} -rtree_t *rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc); -void rtree_delete(rtree_t *rtree); -void rtree_prefork(rtree_t *rtree); -void rtree_postfork_parent(rtree_t *rtree); -void rtree_postfork_child(rtree_t *rtree); +JEMALLOC_ALWAYS_INLINE bool +rtree_leaf_elm_bits_slab_get(uintptr_t bits) { + return (bool)(bits & (uintptr_t)0x1); +} -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES +# endif -#ifndef JEMALLOC_ENABLE_INLINE -#ifdef JEMALLOC_DEBUG -uint8_t rtree_get_locked(rtree_t *rtree, uintptr_t key); +JEMALLOC_ALWAYS_INLINE extent_t * +rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, + bool dependent) { +#ifdef RTREE_LEAF_COMPACT + uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); + return rtree_leaf_elm_bits_extent_get(bits); +#else + extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent + ? ATOMIC_RELAXED : ATOMIC_ACQUIRE); + return extent; #endif -uint8_t rtree_get(rtree_t *rtree, uintptr_t key); -bool rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val); +} + +JEMALLOC_ALWAYS_INLINE szind_t +rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, + bool dependent) { +#ifdef RTREE_LEAF_COMPACT + uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); + return rtree_leaf_elm_bits_szind_get(bits); +#else + return (szind_t)atomic_load_u(&elm->le_szind, dependent ? ATOMIC_RELAXED + : ATOMIC_ACQUIRE); #endif +} -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_)) -#define RTREE_GET_GENERATE(f) \ -/* The least significant bits of the key are ignored. */ \ -JEMALLOC_INLINE uint8_t \ -f(rtree_t *rtree, uintptr_t key) \ -{ \ - uint8_t ret; \ - uintptr_t subkey; \ - unsigned i, lshift, height, bits; \ - void **node, **child; \ - \ - RTREE_LOCK(&rtree->mutex); \ - for (i = lshift = 0, height = rtree->height, node = rtree->root;\ - i < height - 1; \ - i++, lshift += bits, node = child) { \ - bits = rtree->level2bits[i]; \ - subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \ - 3)) - bits); \ - child = (void**)node[subkey]; \ - if (child == NULL) { \ - RTREE_UNLOCK(&rtree->mutex); \ - return (0); \ - } \ - } \ - \ - /* \ - * node is a leaf, so it contains values rather than node \ - * pointers. \ - */ \ - bits = rtree->level2bits[i]; \ - subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \ - bits); \ - { \ - uint8_t *leaf = (uint8_t *)node; \ - ret = leaf[subkey]; \ - } \ - RTREE_UNLOCK(&rtree->mutex); \ - \ - RTREE_GET_VALIDATE \ - return (ret); \ -} - -#ifdef JEMALLOC_DEBUG -# define RTREE_LOCK(l) malloc_mutex_lock(l) -# define RTREE_UNLOCK(l) malloc_mutex_unlock(l) -# define RTREE_GET_VALIDATE -RTREE_GET_GENERATE(rtree_get_locked) -# undef RTREE_LOCK -# undef RTREE_UNLOCK -# undef RTREE_GET_VALIDATE +JEMALLOC_ALWAYS_INLINE bool +rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, + bool dependent) { +#ifdef RTREE_LEAF_COMPACT + uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent); + return rtree_leaf_elm_bits_slab_get(bits); +#else + return atomic_load_b(&elm->le_slab, dependent ? ATOMIC_RELAXED : + ATOMIC_ACQUIRE); #endif +} -#define RTREE_LOCK(l) -#define RTREE_UNLOCK(l) -#ifdef JEMALLOC_DEBUG - /* - * Suppose that it were possible for a jemalloc-allocated chunk to be - * munmap()ped, followed by a different allocator in another thread re-using - * overlapping virtual memory, all without invalidating the cached rtree - * value. The result would be a false positive (the rtree would claim that - * jemalloc owns memory that it had actually discarded). This scenario - * seems impossible, but the following assertion is a prudent sanity check. - */ -# define RTREE_GET_VALIDATE \ - assert(rtree_get_locked(rtree, key) == ret); +static inline void +rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, + extent_t *extent) { +#ifdef RTREE_LEAF_COMPACT + uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true); + uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) << + LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) + | ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits)); + atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); #else -# define RTREE_GET_VALIDATE + atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE); #endif -RTREE_GET_GENERATE(rtree_get) -#undef RTREE_LOCK -#undef RTREE_UNLOCK -#undef RTREE_GET_VALIDATE - -JEMALLOC_INLINE bool -rtree_set(rtree_t *rtree, uintptr_t key, uint8_t val) -{ - uintptr_t subkey; - unsigned i, lshift, height, bits; - void **node, **child; - - malloc_mutex_lock(&rtree->mutex); - for (i = lshift = 0, height = rtree->height, node = rtree->root; - i < height - 1; - i++, lshift += bits, node = child) { - bits = rtree->level2bits[i]; - subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - - bits); - child = (void**)node[subkey]; - if (child == NULL) { - size_t size = ((i + 1 < height - 1) ? sizeof(void *) - : (sizeof(uint8_t))) << rtree->level2bits[i+1]; - child = (void**)rtree->alloc(size); - if (child == NULL) { - malloc_mutex_unlock(&rtree->mutex); - return (true); - } - memset(child, 0, size); - node[subkey] = child; - } +} + +static inline void +rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, + szind_t szind) { + assert(szind <= NSIZES); + +#ifdef RTREE_LEAF_COMPACT + uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, + true); + uintptr_t bits = ((uintptr_t)szind << LG_VADDR) | + ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) & + (((uintptr_t)0x1 << LG_VADDR) - 1)) | + ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits)); + atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); +#else + atomic_store_u(&elm->le_szind, szind, ATOMIC_RELEASE); +#endif +} + +static inline void +rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, + bool slab) { +#ifdef RTREE_LEAF_COMPACT + uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, + true); + uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) << + LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) & + (((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab); + atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); +#else + atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE); +#endif +} + +static inline void +rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm, + extent_t *extent, szind_t szind, bool slab) { +#ifdef RTREE_LEAF_COMPACT + uintptr_t bits = ((uintptr_t)szind << LG_VADDR) | + ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) | + ((uintptr_t)slab); + atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE); +#else + rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab); + rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind); + /* + * Write extent last, since the element is atomically considered valid + * as soon as the extent field is non-NULL. + */ + rtree_leaf_elm_extent_write(tsdn, rtree, elm, extent); +#endif +} + +static inline void +rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, + rtree_leaf_elm_t *elm, szind_t szind, bool slab) { + assert(!slab || szind < NBINS); + + /* + * The caller implicitly assures that it is the only writer to the szind + * and slab fields, and that the extent field cannot currently change. + */ + rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab); + rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind); +} + +JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t * +rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent, bool init_missing) { + assert(key != 0); + assert(!dependent || !init_missing); + + size_t slot = rtree_cache_direct_map(key); + uintptr_t leafkey = rtree_leafkey(key); + assert(leafkey != RTREE_LEAFKEY_INVALID); + + /* Fast path: L1 direct mapped cache. */ + if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) { + rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf; + assert(leaf != NULL); + uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); + return &leaf[subkey]; } + /* + * Search the L2 LRU cache. On hit, swap the matching element into the + * slot in L1 cache, and move the position in L2 up by 1. + */ +#define RTREE_CACHE_CHECK_L2(i) do { \ + if (likely(rtree_ctx->l2_cache[i].leafkey == leafkey)) { \ + rtree_leaf_elm_t *leaf = rtree_ctx->l2_cache[i].leaf; \ + assert(leaf != NULL); \ + if (i > 0) { \ + /* Bubble up by one. */ \ + rtree_ctx->l2_cache[i].leafkey = \ + rtree_ctx->l2_cache[i - 1].leafkey; \ + rtree_ctx->l2_cache[i].leaf = \ + rtree_ctx->l2_cache[i - 1].leaf; \ + rtree_ctx->l2_cache[i - 1].leafkey = \ + rtree_ctx->cache[slot].leafkey; \ + rtree_ctx->l2_cache[i - 1].leaf = \ + rtree_ctx->cache[slot].leaf; \ + } else { \ + rtree_ctx->l2_cache[0].leafkey = \ + rtree_ctx->cache[slot].leafkey; \ + rtree_ctx->l2_cache[0].leaf = \ + rtree_ctx->cache[slot].leaf; \ + } \ + rtree_ctx->cache[slot].leafkey = leafkey; \ + rtree_ctx->cache[slot].leaf = leaf; \ + uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1); \ + return &leaf[subkey]; \ + } \ +} while (0) + /* Check the first cache entry. */ + RTREE_CACHE_CHECK_L2(0); + /* Search the remaining cache elements. */ + for (unsigned i = 1; i < RTREE_CTX_NCACHE_L2; i++) { + RTREE_CACHE_CHECK_L2(i); + } +#undef RTREE_CACHE_CHECK_L2 + + return rtree_leaf_elm_lookup_hard(tsdn, rtree, rtree_ctx, key, + dependent, init_missing); +} + +static inline bool +rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, + extent_t *extent, szind_t szind, bool slab) { + /* Use rtree_clear() to set the extent to NULL. */ + assert(extent != NULL); - /* node is a leaf, so it contains values rather than node pointers. */ - bits = rtree->level2bits[i]; - subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits); - { - uint8_t *leaf = (uint8_t *)node; - leaf[subkey] = val; + rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, + key, false, true); + if (elm == NULL) { + return true; } - malloc_mutex_unlock(&rtree->mutex); - return (false); + assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) == NULL); + rtree_leaf_elm_write(tsdn, rtree, elm, extent, szind, slab); + + return false; +} + +JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t * +rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key, + bool dependent) { + rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, + key, dependent, false); + if (!dependent && elm == NULL) { + return NULL; + } + assert(elm != NULL); + return elm; +} + +JEMALLOC_ALWAYS_INLINE extent_t * +rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent) { + rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, + dependent); + if (!dependent && elm == NULL) { + return NULL; + } + return rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent); +} + +JEMALLOC_ALWAYS_INLINE szind_t +rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent) { + rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, + dependent); + if (!dependent && elm == NULL) { + return NSIZES; + } + return rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent); +} + +/* + * rtree_slab_read() is intentionally omitted because slab is always read in + * conjunction with szind, which makes rtree_szind_slab_read() a better choice. + */ + +JEMALLOC_ALWAYS_INLINE bool +rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent, extent_t **r_extent, szind_t *r_szind) { + rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, + dependent); + if (!dependent && elm == NULL) { + return true; + } + *r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent); + *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent); + return false; +} + +JEMALLOC_ALWAYS_INLINE bool +rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent, szind_t *r_szind, bool *r_slab) { + rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, + dependent); + if (!dependent && elm == NULL) { + return true; + } + *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent); + *r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, dependent); + return false; +} + +static inline void +rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, szind_t szind, bool slab) { + assert(!slab || szind < NBINS); + + rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true); + rtree_leaf_elm_szind_slab_update(tsdn, rtree, elm, szind, slab); +} + +static inline void +rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key) { + rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true); + assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) != + NULL); + rtree_leaf_elm_write(tsdn, rtree, elm, NULL, NSIZES, false); } -#endif -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ +#endif /* JEMALLOC_INTERNAL_RTREE_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/rtree_tsd.h b/dep/jemalloc/include/jemalloc/internal/rtree_tsd.h new file mode 100644 index 00000000000..3cdc8625487 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/rtree_tsd.h @@ -0,0 +1,50 @@ +#ifndef JEMALLOC_INTERNAL_RTREE_CTX_H +#define JEMALLOC_INTERNAL_RTREE_CTX_H + +/* + * Number of leafkey/leaf pairs to cache in L1 and L2 level respectively. Each + * entry supports an entire leaf, so the cache hit rate is typically high even + * with a small number of entries. In rare cases extent activity will straddle + * the boundary between two leaf nodes. Furthermore, an arena may use a + * combination of dss and mmap. Note that as memory usage grows past the amount + * that this cache can directly cover, the cache will become less effective if + * locality of reference is low, but the consequence is merely cache misses + * while traversing the tree nodes. + * + * The L1 direct mapped cache offers consistent and low cost on cache hit. + * However collision could affect hit rate negatively. This is resolved by + * combining with a L2 LRU cache, which requires linear search and re-ordering + * on access but suffers no collision. Note that, the cache will itself suffer + * cache misses if made overly large, plus the cost of linear search in the LRU + * cache. + */ +#define RTREE_CTX_LG_NCACHE 4 +#define RTREE_CTX_NCACHE (1 << RTREE_CTX_LG_NCACHE) +#define RTREE_CTX_NCACHE_L2 8 + +/* + * Zero initializer required for tsd initialization only. Proper initialization + * done via rtree_ctx_data_init(). + */ +#define RTREE_CTX_ZERO_INITIALIZER {{{0}}} + + +typedef struct rtree_leaf_elm_s rtree_leaf_elm_t; + +typedef struct rtree_ctx_cache_elm_s rtree_ctx_cache_elm_t; +struct rtree_ctx_cache_elm_s { + uintptr_t leafkey; + rtree_leaf_elm_t *leaf; +}; + +typedef struct rtree_ctx_s rtree_ctx_t; +struct rtree_ctx_s { + /* Direct mapped cache. */ + rtree_ctx_cache_elm_t cache[RTREE_CTX_NCACHE]; + /* L2 LRU cache. */ + rtree_ctx_cache_elm_t l2_cache[RTREE_CTX_NCACHE_L2]; +}; + +void rtree_ctx_data_init(rtree_ctx_t *ctx); + +#endif /* JEMALLOC_INTERNAL_RTREE_CTX_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/size_classes.h b/dep/jemalloc/include/jemalloc/internal/size_classes.h index 821102e5c1c..0b7d3cd369b 100644 --- a/dep/jemalloc/include/jemalloc/internal/size_classes.h +++ b/dep/jemalloc/include/jemalloc/internal/size_classes.h @@ -1,690 +1,1416 @@ -/* This file was automatically generated by size_classes.sh. */ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 12) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 8, 8) \ - SIZE_CLASS(1, 8, 16) \ - SIZE_CLASS(2, 8, 24) \ - SIZE_CLASS(3, 8, 32) \ - SIZE_CLASS(4, 8, 40) \ - SIZE_CLASS(5, 8, 48) \ - SIZE_CLASS(6, 8, 56) \ - SIZE_CLASS(7, 8, 64) \ - SIZE_CLASS(8, 16, 80) \ - SIZE_CLASS(9, 16, 96) \ - SIZE_CLASS(10, 16, 112) \ - SIZE_CLASS(11, 16, 128) \ - SIZE_CLASS(12, 32, 160) \ - SIZE_CLASS(13, 32, 192) \ - SIZE_CLASS(14, 32, 224) \ - SIZE_CLASS(15, 32, 256) \ - SIZE_CLASS(16, 64, 320) \ - SIZE_CLASS(17, 64, 384) \ - SIZE_CLASS(18, 64, 448) \ - SIZE_CLASS(19, 64, 512) \ - SIZE_CLASS(20, 128, 640) \ - SIZE_CLASS(21, 128, 768) \ - SIZE_CLASS(22, 128, 896) \ - SIZE_CLASS(23, 128, 1024) \ - SIZE_CLASS(24, 256, 1280) \ - SIZE_CLASS(25, 256, 1536) \ - SIZE_CLASS(26, 256, 1792) \ - SIZE_CLASS(27, 256, 2048) \ - SIZE_CLASS(28, 512, 2560) \ - SIZE_CLASS(29, 512, 3072) \ - SIZE_CLASS(30, 512, 3584) \ - -#define NBINS 31 -#define SMALL_MAXCLASS 3584 -#endif - -#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 13) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 8, 8) \ - SIZE_CLASS(1, 8, 16) \ - SIZE_CLASS(2, 8, 24) \ - SIZE_CLASS(3, 8, 32) \ - SIZE_CLASS(4, 8, 40) \ - SIZE_CLASS(5, 8, 48) \ - SIZE_CLASS(6, 8, 56) \ - SIZE_CLASS(7, 8, 64) \ - SIZE_CLASS(8, 16, 80) \ - SIZE_CLASS(9, 16, 96) \ - SIZE_CLASS(10, 16, 112) \ - SIZE_CLASS(11, 16, 128) \ - SIZE_CLASS(12, 32, 160) \ - SIZE_CLASS(13, 32, 192) \ - SIZE_CLASS(14, 32, 224) \ - SIZE_CLASS(15, 32, 256) \ - SIZE_CLASS(16, 64, 320) \ - SIZE_CLASS(17, 64, 384) \ - SIZE_CLASS(18, 64, 448) \ - SIZE_CLASS(19, 64, 512) \ - SIZE_CLASS(20, 128, 640) \ - SIZE_CLASS(21, 128, 768) \ - SIZE_CLASS(22, 128, 896) \ - SIZE_CLASS(23, 128, 1024) \ - SIZE_CLASS(24, 256, 1280) \ - SIZE_CLASS(25, 256, 1536) \ - SIZE_CLASS(26, 256, 1792) \ - SIZE_CLASS(27, 256, 2048) \ - SIZE_CLASS(28, 512, 2560) \ - SIZE_CLASS(29, 512, 3072) \ - SIZE_CLASS(30, 512, 3584) \ - SIZE_CLASS(31, 512, 4096) \ - SIZE_CLASS(32, 1024, 5120) \ - SIZE_CLASS(33, 1024, 6144) \ - SIZE_CLASS(34, 1024, 7168) \ - -#define NBINS 35 -#define SMALL_MAXCLASS 7168 -#endif - -#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 14) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 8, 8) \ - SIZE_CLASS(1, 8, 16) \ - SIZE_CLASS(2, 8, 24) \ - SIZE_CLASS(3, 8, 32) \ - SIZE_CLASS(4, 8, 40) \ - SIZE_CLASS(5, 8, 48) \ - SIZE_CLASS(6, 8, 56) \ - SIZE_CLASS(7, 8, 64) \ - SIZE_CLASS(8, 16, 80) \ - SIZE_CLASS(9, 16, 96) \ - SIZE_CLASS(10, 16, 112) \ - SIZE_CLASS(11, 16, 128) \ - SIZE_CLASS(12, 32, 160) \ - SIZE_CLASS(13, 32, 192) \ - SIZE_CLASS(14, 32, 224) \ - SIZE_CLASS(15, 32, 256) \ - SIZE_CLASS(16, 64, 320) \ - SIZE_CLASS(17, 64, 384) \ - SIZE_CLASS(18, 64, 448) \ - SIZE_CLASS(19, 64, 512) \ - SIZE_CLASS(20, 128, 640) \ - SIZE_CLASS(21, 128, 768) \ - SIZE_CLASS(22, 128, 896) \ - SIZE_CLASS(23, 128, 1024) \ - SIZE_CLASS(24, 256, 1280) \ - SIZE_CLASS(25, 256, 1536) \ - SIZE_CLASS(26, 256, 1792) \ - SIZE_CLASS(27, 256, 2048) \ - SIZE_CLASS(28, 512, 2560) \ - SIZE_CLASS(29, 512, 3072) \ - SIZE_CLASS(30, 512, 3584) \ - SIZE_CLASS(31, 512, 4096) \ - SIZE_CLASS(32, 1024, 5120) \ - SIZE_CLASS(33, 1024, 6144) \ - SIZE_CLASS(34, 1024, 7168) \ - SIZE_CLASS(35, 1024, 8192) \ - SIZE_CLASS(36, 2048, 10240) \ - SIZE_CLASS(37, 2048, 12288) \ - SIZE_CLASS(38, 2048, 14336) \ - -#define NBINS 39 -#define SMALL_MAXCLASS 14336 -#endif - -#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 15) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 8, 8) \ - SIZE_CLASS(1, 8, 16) \ - SIZE_CLASS(2, 8, 24) \ - SIZE_CLASS(3, 8, 32) \ - SIZE_CLASS(4, 8, 40) \ - SIZE_CLASS(5, 8, 48) \ - SIZE_CLASS(6, 8, 56) \ - SIZE_CLASS(7, 8, 64) \ - SIZE_CLASS(8, 16, 80) \ - SIZE_CLASS(9, 16, 96) \ - SIZE_CLASS(10, 16, 112) \ - SIZE_CLASS(11, 16, 128) \ - SIZE_CLASS(12, 32, 160) \ - SIZE_CLASS(13, 32, 192) \ - SIZE_CLASS(14, 32, 224) \ - SIZE_CLASS(15, 32, 256) \ - SIZE_CLASS(16, 64, 320) \ - SIZE_CLASS(17, 64, 384) \ - SIZE_CLASS(18, 64, 448) \ - SIZE_CLASS(19, 64, 512) \ - SIZE_CLASS(20, 128, 640) \ - SIZE_CLASS(21, 128, 768) \ - SIZE_CLASS(22, 128, 896) \ - SIZE_CLASS(23, 128, 1024) \ - SIZE_CLASS(24, 256, 1280) \ - SIZE_CLASS(25, 256, 1536) \ - SIZE_CLASS(26, 256, 1792) \ - SIZE_CLASS(27, 256, 2048) \ - SIZE_CLASS(28, 512, 2560) \ - SIZE_CLASS(29, 512, 3072) \ - SIZE_CLASS(30, 512, 3584) \ - SIZE_CLASS(31, 512, 4096) \ - SIZE_CLASS(32, 1024, 5120) \ - SIZE_CLASS(33, 1024, 6144) \ - SIZE_CLASS(34, 1024, 7168) \ - SIZE_CLASS(35, 1024, 8192) \ - SIZE_CLASS(36, 2048, 10240) \ - SIZE_CLASS(37, 2048, 12288) \ - SIZE_CLASS(38, 2048, 14336) \ - SIZE_CLASS(39, 2048, 16384) \ - SIZE_CLASS(40, 4096, 20480) \ - SIZE_CLASS(41, 4096, 24576) \ - SIZE_CLASS(42, 4096, 28672) \ - -#define NBINS 43 -#define SMALL_MAXCLASS 28672 -#endif - -#if (LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 16) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 8, 8) \ - SIZE_CLASS(1, 8, 16) \ - SIZE_CLASS(2, 8, 24) \ - SIZE_CLASS(3, 8, 32) \ - SIZE_CLASS(4, 8, 40) \ - SIZE_CLASS(5, 8, 48) \ - SIZE_CLASS(6, 8, 56) \ - SIZE_CLASS(7, 8, 64) \ - SIZE_CLASS(8, 16, 80) \ - SIZE_CLASS(9, 16, 96) \ - SIZE_CLASS(10, 16, 112) \ - SIZE_CLASS(11, 16, 128) \ - SIZE_CLASS(12, 32, 160) \ - SIZE_CLASS(13, 32, 192) \ - SIZE_CLASS(14, 32, 224) \ - SIZE_CLASS(15, 32, 256) \ - SIZE_CLASS(16, 64, 320) \ - SIZE_CLASS(17, 64, 384) \ - SIZE_CLASS(18, 64, 448) \ - SIZE_CLASS(19, 64, 512) \ - SIZE_CLASS(20, 128, 640) \ - SIZE_CLASS(21, 128, 768) \ - SIZE_CLASS(22, 128, 896) \ - SIZE_CLASS(23, 128, 1024) \ - SIZE_CLASS(24, 256, 1280) \ - SIZE_CLASS(25, 256, 1536) \ - SIZE_CLASS(26, 256, 1792) \ - SIZE_CLASS(27, 256, 2048) \ - SIZE_CLASS(28, 512, 2560) \ - SIZE_CLASS(29, 512, 3072) \ - SIZE_CLASS(30, 512, 3584) \ - SIZE_CLASS(31, 512, 4096) \ - SIZE_CLASS(32, 1024, 5120) \ - SIZE_CLASS(33, 1024, 6144) \ - SIZE_CLASS(34, 1024, 7168) \ - SIZE_CLASS(35, 1024, 8192) \ - SIZE_CLASS(36, 2048, 10240) \ - SIZE_CLASS(37, 2048, 12288) \ - SIZE_CLASS(38, 2048, 14336) \ - SIZE_CLASS(39, 2048, 16384) \ - SIZE_CLASS(40, 4096, 20480) \ - SIZE_CLASS(41, 4096, 24576) \ - SIZE_CLASS(42, 4096, 28672) \ - SIZE_CLASS(43, 4096, 32768) \ - SIZE_CLASS(44, 8192, 40960) \ - SIZE_CLASS(45, 8192, 49152) \ - SIZE_CLASS(46, 8192, 57344) \ - -#define NBINS 47 -#define SMALL_MAXCLASS 57344 -#endif - -#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 12) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 8, 8) \ - SIZE_CLASS(1, 8, 16) \ - SIZE_CLASS(2, 16, 32) \ - SIZE_CLASS(3, 16, 48) \ - SIZE_CLASS(4, 16, 64) \ - SIZE_CLASS(5, 16, 80) \ - SIZE_CLASS(6, 16, 96) \ - SIZE_CLASS(7, 16, 112) \ - SIZE_CLASS(8, 16, 128) \ - SIZE_CLASS(9, 32, 160) \ - SIZE_CLASS(10, 32, 192) \ - SIZE_CLASS(11, 32, 224) \ - SIZE_CLASS(12, 32, 256) \ - SIZE_CLASS(13, 64, 320) \ - SIZE_CLASS(14, 64, 384) \ - SIZE_CLASS(15, 64, 448) \ - SIZE_CLASS(16, 64, 512) \ - SIZE_CLASS(17, 128, 640) \ - SIZE_CLASS(18, 128, 768) \ - SIZE_CLASS(19, 128, 896) \ - SIZE_CLASS(20, 128, 1024) \ - SIZE_CLASS(21, 256, 1280) \ - SIZE_CLASS(22, 256, 1536) \ - SIZE_CLASS(23, 256, 1792) \ - SIZE_CLASS(24, 256, 2048) \ - SIZE_CLASS(25, 512, 2560) \ - SIZE_CLASS(26, 512, 3072) \ - SIZE_CLASS(27, 512, 3584) \ +#ifndef JEMALLOC_INTERNAL_SIZE_CLASSES_H +#define JEMALLOC_INTERNAL_SIZE_CLASSES_H -#define NBINS 28 -#define SMALL_MAXCLASS 3584 -#endif - -#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 13) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 8, 8) \ - SIZE_CLASS(1, 8, 16) \ - SIZE_CLASS(2, 16, 32) \ - SIZE_CLASS(3, 16, 48) \ - SIZE_CLASS(4, 16, 64) \ - SIZE_CLASS(5, 16, 80) \ - SIZE_CLASS(6, 16, 96) \ - SIZE_CLASS(7, 16, 112) \ - SIZE_CLASS(8, 16, 128) \ - SIZE_CLASS(9, 32, 160) \ - SIZE_CLASS(10, 32, 192) \ - SIZE_CLASS(11, 32, 224) \ - SIZE_CLASS(12, 32, 256) \ - SIZE_CLASS(13, 64, 320) \ - SIZE_CLASS(14, 64, 384) \ - SIZE_CLASS(15, 64, 448) \ - SIZE_CLASS(16, 64, 512) \ - SIZE_CLASS(17, 128, 640) \ - SIZE_CLASS(18, 128, 768) \ - SIZE_CLASS(19, 128, 896) \ - SIZE_CLASS(20, 128, 1024) \ - SIZE_CLASS(21, 256, 1280) \ - SIZE_CLASS(22, 256, 1536) \ - SIZE_CLASS(23, 256, 1792) \ - SIZE_CLASS(24, 256, 2048) \ - SIZE_CLASS(25, 512, 2560) \ - SIZE_CLASS(26, 512, 3072) \ - SIZE_CLASS(27, 512, 3584) \ - SIZE_CLASS(28, 512, 4096) \ - SIZE_CLASS(29, 1024, 5120) \ - SIZE_CLASS(30, 1024, 6144) \ - SIZE_CLASS(31, 1024, 7168) \ - -#define NBINS 32 -#define SMALL_MAXCLASS 7168 -#endif - -#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 14) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 8, 8) \ - SIZE_CLASS(1, 8, 16) \ - SIZE_CLASS(2, 16, 32) \ - SIZE_CLASS(3, 16, 48) \ - SIZE_CLASS(4, 16, 64) \ - SIZE_CLASS(5, 16, 80) \ - SIZE_CLASS(6, 16, 96) \ - SIZE_CLASS(7, 16, 112) \ - SIZE_CLASS(8, 16, 128) \ - SIZE_CLASS(9, 32, 160) \ - SIZE_CLASS(10, 32, 192) \ - SIZE_CLASS(11, 32, 224) \ - SIZE_CLASS(12, 32, 256) \ - SIZE_CLASS(13, 64, 320) \ - SIZE_CLASS(14, 64, 384) \ - SIZE_CLASS(15, 64, 448) \ - SIZE_CLASS(16, 64, 512) \ - SIZE_CLASS(17, 128, 640) \ - SIZE_CLASS(18, 128, 768) \ - SIZE_CLASS(19, 128, 896) \ - SIZE_CLASS(20, 128, 1024) \ - SIZE_CLASS(21, 256, 1280) \ - SIZE_CLASS(22, 256, 1536) \ - SIZE_CLASS(23, 256, 1792) \ - SIZE_CLASS(24, 256, 2048) \ - SIZE_CLASS(25, 512, 2560) \ - SIZE_CLASS(26, 512, 3072) \ - SIZE_CLASS(27, 512, 3584) \ - SIZE_CLASS(28, 512, 4096) \ - SIZE_CLASS(29, 1024, 5120) \ - SIZE_CLASS(30, 1024, 6144) \ - SIZE_CLASS(31, 1024, 7168) \ - SIZE_CLASS(32, 1024, 8192) \ - SIZE_CLASS(33, 2048, 10240) \ - SIZE_CLASS(34, 2048, 12288) \ - SIZE_CLASS(35, 2048, 14336) \ +/* This file was automatically generated by size_classes.sh. */ -#define NBINS 36 -#define SMALL_MAXCLASS 14336 -#endif +#include "jemalloc/internal/jemalloc_internal_types.h" -#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 15) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 8, 8) \ - SIZE_CLASS(1, 8, 16) \ - SIZE_CLASS(2, 16, 32) \ - SIZE_CLASS(3, 16, 48) \ - SIZE_CLASS(4, 16, 64) \ - SIZE_CLASS(5, 16, 80) \ - SIZE_CLASS(6, 16, 96) \ - SIZE_CLASS(7, 16, 112) \ - SIZE_CLASS(8, 16, 128) \ - SIZE_CLASS(9, 32, 160) \ - SIZE_CLASS(10, 32, 192) \ - SIZE_CLASS(11, 32, 224) \ - SIZE_CLASS(12, 32, 256) \ - SIZE_CLASS(13, 64, 320) \ - SIZE_CLASS(14, 64, 384) \ - SIZE_CLASS(15, 64, 448) \ - SIZE_CLASS(16, 64, 512) \ - SIZE_CLASS(17, 128, 640) \ - SIZE_CLASS(18, 128, 768) \ - SIZE_CLASS(19, 128, 896) \ - SIZE_CLASS(20, 128, 1024) \ - SIZE_CLASS(21, 256, 1280) \ - SIZE_CLASS(22, 256, 1536) \ - SIZE_CLASS(23, 256, 1792) \ - SIZE_CLASS(24, 256, 2048) \ - SIZE_CLASS(25, 512, 2560) \ - SIZE_CLASS(26, 512, 3072) \ - SIZE_CLASS(27, 512, 3584) \ - SIZE_CLASS(28, 512, 4096) \ - SIZE_CLASS(29, 1024, 5120) \ - SIZE_CLASS(30, 1024, 6144) \ - SIZE_CLASS(31, 1024, 7168) \ - SIZE_CLASS(32, 1024, 8192) \ - SIZE_CLASS(33, 2048, 10240) \ - SIZE_CLASS(34, 2048, 12288) \ - SIZE_CLASS(35, 2048, 14336) \ - SIZE_CLASS(36, 2048, 16384) \ - SIZE_CLASS(37, 4096, 20480) \ - SIZE_CLASS(38, 4096, 24576) \ - SIZE_CLASS(39, 4096, 28672) \ +/* + * This header file defines: + * + * LG_SIZE_CLASS_GROUP: Lg of size class count for each size doubling. + * LG_TINY_MIN: Lg of minimum size class to support. + * SIZE_CLASSES: Complete table of SC(index, lg_grp, lg_delta, ndelta, psz, + * bin, pgs, lg_delta_lookup) tuples. + * index: Size class index. + * lg_grp: Lg group base size (no deltas added). + * lg_delta: Lg delta to previous size class. + * ndelta: Delta multiplier. size == 1<<lg_grp + ndelta<<lg_delta + * psz: 'yes' if a multiple of the page size, 'no' otherwise. + * bin: 'yes' if a small bin size class, 'no' otherwise. + * pgs: Slab page count if a small bin size class, 0 otherwise. + * lg_delta_lookup: Same as lg_delta if a lookup table size class, 'no' + * otherwise. + * NTBINS: Number of tiny bins. + * NLBINS: Number of bins supported by the lookup table. + * NBINS: Number of small size class bins. + * NSIZES: Number of size classes. + * LG_CEIL_NSIZES: Number of bits required to store NSIZES. + * NPSIZES: Number of size classes that are a multiple of (1U << LG_PAGE). + * LG_TINY_MAXCLASS: Lg of maximum tiny size class. + * LOOKUP_MAXCLASS: Maximum size class included in lookup table. + * SMALL_MAXCLASS: Maximum small size class. + * LG_LARGE_MINCLASS: Lg of minimum large size class. + * LARGE_MAXCLASS: Maximum (large) size class. + */ -#define NBINS 40 -#define SMALL_MAXCLASS 28672 -#endif +#define LG_SIZE_CLASS_GROUP 2 +#define LG_TINY_MIN 3 -#if (LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 16) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 8, 8) \ - SIZE_CLASS(1, 8, 16) \ - SIZE_CLASS(2, 16, 32) \ - SIZE_CLASS(3, 16, 48) \ - SIZE_CLASS(4, 16, 64) \ - SIZE_CLASS(5, 16, 80) \ - SIZE_CLASS(6, 16, 96) \ - SIZE_CLASS(7, 16, 112) \ - SIZE_CLASS(8, 16, 128) \ - SIZE_CLASS(9, 32, 160) \ - SIZE_CLASS(10, 32, 192) \ - SIZE_CLASS(11, 32, 224) \ - SIZE_CLASS(12, 32, 256) \ - SIZE_CLASS(13, 64, 320) \ - SIZE_CLASS(14, 64, 384) \ - SIZE_CLASS(15, 64, 448) \ - SIZE_CLASS(16, 64, 512) \ - SIZE_CLASS(17, 128, 640) \ - SIZE_CLASS(18, 128, 768) \ - SIZE_CLASS(19, 128, 896) \ - SIZE_CLASS(20, 128, 1024) \ - SIZE_CLASS(21, 256, 1280) \ - SIZE_CLASS(22, 256, 1536) \ - SIZE_CLASS(23, 256, 1792) \ - SIZE_CLASS(24, 256, 2048) \ - SIZE_CLASS(25, 512, 2560) \ - SIZE_CLASS(26, 512, 3072) \ - SIZE_CLASS(27, 512, 3584) \ - SIZE_CLASS(28, 512, 4096) \ - SIZE_CLASS(29, 1024, 5120) \ - SIZE_CLASS(30, 1024, 6144) \ - SIZE_CLASS(31, 1024, 7168) \ - SIZE_CLASS(32, 1024, 8192) \ - SIZE_CLASS(33, 2048, 10240) \ - SIZE_CLASS(34, 2048, 12288) \ - SIZE_CLASS(35, 2048, 14336) \ - SIZE_CLASS(36, 2048, 16384) \ - SIZE_CLASS(37, 4096, 20480) \ - SIZE_CLASS(38, 4096, 24576) \ - SIZE_CLASS(39, 4096, 28672) \ - SIZE_CLASS(40, 4096, 32768) \ - SIZE_CLASS(41, 8192, 40960) \ - SIZE_CLASS(42, 8192, 49152) \ - SIZE_CLASS(43, 8192, 57344) \ +#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 12) +#define SIZE_CLASSES \ + /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \ + SC( 0, 3, 3, 0, no, yes, 1, 3) \ + SC( 1, 3, 3, 1, no, yes, 1, 3) \ + SC( 2, 3, 3, 2, no, yes, 3, 3) \ + SC( 3, 3, 3, 3, no, yes, 1, 3) \ + \ + SC( 4, 5, 3, 1, no, yes, 5, 3) \ + SC( 5, 5, 3, 2, no, yes, 3, 3) \ + SC( 6, 5, 3, 3, no, yes, 7, 3) \ + SC( 7, 5, 3, 4, no, yes, 1, 3) \ + \ + SC( 8, 6, 4, 1, no, yes, 5, 4) \ + SC( 9, 6, 4, 2, no, yes, 3, 4) \ + SC( 10, 6, 4, 3, no, yes, 7, 4) \ + SC( 11, 6, 4, 4, no, yes, 1, 4) \ + \ + SC( 12, 7, 5, 1, no, yes, 5, 5) \ + SC( 13, 7, 5, 2, no, yes, 3, 5) \ + SC( 14, 7, 5, 3, no, yes, 7, 5) \ + SC( 15, 7, 5, 4, no, yes, 1, 5) \ + \ + SC( 16, 8, 6, 1, no, yes, 5, 6) \ + SC( 17, 8, 6, 2, no, yes, 3, 6) \ + SC( 18, 8, 6, 3, no, yes, 7, 6) \ + SC( 19, 8, 6, 4, no, yes, 1, 6) \ + \ + SC( 20, 9, 7, 1, no, yes, 5, 7) \ + SC( 21, 9, 7, 2, no, yes, 3, 7) \ + SC( 22, 9, 7, 3, no, yes, 7, 7) \ + SC( 23, 9, 7, 4, no, yes, 1, 7) \ + \ + SC( 24, 10, 8, 1, no, yes, 5, 8) \ + SC( 25, 10, 8, 2, no, yes, 3, 8) \ + SC( 26, 10, 8, 3, no, yes, 7, 8) \ + SC( 27, 10, 8, 4, no, yes, 1, 8) \ + \ + SC( 28, 11, 9, 1, no, yes, 5, 9) \ + SC( 29, 11, 9, 2, no, yes, 3, 9) \ + SC( 30, 11, 9, 3, no, yes, 7, 9) \ + SC( 31, 11, 9, 4, yes, yes, 1, 9) \ + \ + SC( 32, 12, 10, 1, no, yes, 5, no) \ + SC( 33, 12, 10, 2, no, yes, 3, no) \ + SC( 34, 12, 10, 3, no, yes, 7, no) \ + SC( 35, 12, 10, 4, yes, yes, 2, no) \ + \ + SC( 36, 13, 11, 1, no, yes, 5, no) \ + SC( 37, 13, 11, 2, yes, yes, 3, no) \ + SC( 38, 13, 11, 3, no, yes, 7, no) \ + SC( 39, 13, 11, 4, yes, no, 0, no) \ + \ + SC( 40, 14, 12, 1, yes, no, 0, no) \ + SC( 41, 14, 12, 2, yes, no, 0, no) \ + SC( 42, 14, 12, 3, yes, no, 0, no) \ + SC( 43, 14, 12, 4, yes, no, 0, no) \ + \ + SC( 44, 15, 13, 1, yes, no, 0, no) \ + SC( 45, 15, 13, 2, yes, no, 0, no) \ + SC( 46, 15, 13, 3, yes, no, 0, no) \ + SC( 47, 15, 13, 4, yes, no, 0, no) \ + \ + SC( 48, 16, 14, 1, yes, no, 0, no) \ + SC( 49, 16, 14, 2, yes, no, 0, no) \ + SC( 50, 16, 14, 3, yes, no, 0, no) \ + SC( 51, 16, 14, 4, yes, no, 0, no) \ + \ + SC( 52, 17, 15, 1, yes, no, 0, no) \ + SC( 53, 17, 15, 2, yes, no, 0, no) \ + SC( 54, 17, 15, 3, yes, no, 0, no) \ + SC( 55, 17, 15, 4, yes, no, 0, no) \ + \ + SC( 56, 18, 16, 1, yes, no, 0, no) \ + SC( 57, 18, 16, 2, yes, no, 0, no) \ + SC( 58, 18, 16, 3, yes, no, 0, no) \ + SC( 59, 18, 16, 4, yes, no, 0, no) \ + \ + SC( 60, 19, 17, 1, yes, no, 0, no) \ + SC( 61, 19, 17, 2, yes, no, 0, no) \ + SC( 62, 19, 17, 3, yes, no, 0, no) \ + SC( 63, 19, 17, 4, yes, no, 0, no) \ + \ + SC( 64, 20, 18, 1, yes, no, 0, no) \ + SC( 65, 20, 18, 2, yes, no, 0, no) \ + SC( 66, 20, 18, 3, yes, no, 0, no) \ + SC( 67, 20, 18, 4, yes, no, 0, no) \ + \ + SC( 68, 21, 19, 1, yes, no, 0, no) \ + SC( 69, 21, 19, 2, yes, no, 0, no) \ + SC( 70, 21, 19, 3, yes, no, 0, no) \ + SC( 71, 21, 19, 4, yes, no, 0, no) \ + \ + SC( 72, 22, 20, 1, yes, no, 0, no) \ + SC( 73, 22, 20, 2, yes, no, 0, no) \ + SC( 74, 22, 20, 3, yes, no, 0, no) \ + SC( 75, 22, 20, 4, yes, no, 0, no) \ + \ + SC( 76, 23, 21, 1, yes, no, 0, no) \ + SC( 77, 23, 21, 2, yes, no, 0, no) \ + SC( 78, 23, 21, 3, yes, no, 0, no) \ + SC( 79, 23, 21, 4, yes, no, 0, no) \ + \ + SC( 80, 24, 22, 1, yes, no, 0, no) \ + SC( 81, 24, 22, 2, yes, no, 0, no) \ + SC( 82, 24, 22, 3, yes, no, 0, no) \ + SC( 83, 24, 22, 4, yes, no, 0, no) \ + \ + SC( 84, 25, 23, 1, yes, no, 0, no) \ + SC( 85, 25, 23, 2, yes, no, 0, no) \ + SC( 86, 25, 23, 3, yes, no, 0, no) \ + SC( 87, 25, 23, 4, yes, no, 0, no) \ + \ + SC( 88, 26, 24, 1, yes, no, 0, no) \ + SC( 89, 26, 24, 2, yes, no, 0, no) \ + SC( 90, 26, 24, 3, yes, no, 0, no) \ + SC( 91, 26, 24, 4, yes, no, 0, no) \ + \ + SC( 92, 27, 25, 1, yes, no, 0, no) \ + SC( 93, 27, 25, 2, yes, no, 0, no) \ + SC( 94, 27, 25, 3, yes, no, 0, no) \ + SC( 95, 27, 25, 4, yes, no, 0, no) \ + \ + SC( 96, 28, 26, 1, yes, no, 0, no) \ + SC( 97, 28, 26, 2, yes, no, 0, no) \ + SC( 98, 28, 26, 3, yes, no, 0, no) \ + SC( 99, 28, 26, 4, yes, no, 0, no) \ + \ + SC(100, 29, 27, 1, yes, no, 0, no) \ + SC(101, 29, 27, 2, yes, no, 0, no) \ + SC(102, 29, 27, 3, yes, no, 0, no) \ + SC(103, 29, 27, 4, yes, no, 0, no) \ + \ + SC(104, 30, 28, 1, yes, no, 0, no) \ + SC(105, 30, 28, 2, yes, no, 0, no) \ + SC(106, 30, 28, 3, yes, no, 0, no) \ -#define NBINS 44 -#define SMALL_MAXCLASS 57344 +#define SIZE_CLASSES_DEFINED +#define NTBINS 0 +#define NLBINS 32 +#define NBINS 39 +#define NSIZES 107 +#define LG_CEIL_NSIZES 7 +#define NPSIZES 71 +#define LG_TINY_MAXCLASS "NA" +#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) +#define SMALL_MAXCLASS ((((size_t)1) << 13) + (((size_t)3) << 11)) +#define LG_LARGE_MINCLASS 14 +#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS) +#define LARGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28)) #endif -#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 12) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 16, 16) \ - SIZE_CLASS(1, 16, 32) \ - SIZE_CLASS(2, 16, 48) \ - SIZE_CLASS(3, 16, 64) \ - SIZE_CLASS(4, 16, 80) \ - SIZE_CLASS(5, 16, 96) \ - SIZE_CLASS(6, 16, 112) \ - SIZE_CLASS(7, 16, 128) \ - SIZE_CLASS(8, 32, 160) \ - SIZE_CLASS(9, 32, 192) \ - SIZE_CLASS(10, 32, 224) \ - SIZE_CLASS(11, 32, 256) \ - SIZE_CLASS(12, 64, 320) \ - SIZE_CLASS(13, 64, 384) \ - SIZE_CLASS(14, 64, 448) \ - SIZE_CLASS(15, 64, 512) \ - SIZE_CLASS(16, 128, 640) \ - SIZE_CLASS(17, 128, 768) \ - SIZE_CLASS(18, 128, 896) \ - SIZE_CLASS(19, 128, 1024) \ - SIZE_CLASS(20, 256, 1280) \ - SIZE_CLASS(21, 256, 1536) \ - SIZE_CLASS(22, 256, 1792) \ - SIZE_CLASS(23, 256, 2048) \ - SIZE_CLASS(24, 512, 2560) \ - SIZE_CLASS(25, 512, 3072) \ - SIZE_CLASS(26, 512, 3584) \ +#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 12) +#define SIZE_CLASSES \ + /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \ + SC( 0, 3, 3, 0, no, yes, 1, 3) \ + \ + SC( 1, 3, 3, 1, no, yes, 1, 3) \ + SC( 2, 4, 4, 1, no, yes, 1, 4) \ + SC( 3, 4, 4, 2, no, yes, 3, 4) \ + SC( 4, 4, 4, 3, no, yes, 1, 4) \ + \ + SC( 5, 6, 4, 1, no, yes, 5, 4) \ + SC( 6, 6, 4, 2, no, yes, 3, 4) \ + SC( 7, 6, 4, 3, no, yes, 7, 4) \ + SC( 8, 6, 4, 4, no, yes, 1, 4) \ + \ + SC( 9, 7, 5, 1, no, yes, 5, 5) \ + SC( 10, 7, 5, 2, no, yes, 3, 5) \ + SC( 11, 7, 5, 3, no, yes, 7, 5) \ + SC( 12, 7, 5, 4, no, yes, 1, 5) \ + \ + SC( 13, 8, 6, 1, no, yes, 5, 6) \ + SC( 14, 8, 6, 2, no, yes, 3, 6) \ + SC( 15, 8, 6, 3, no, yes, 7, 6) \ + SC( 16, 8, 6, 4, no, yes, 1, 6) \ + \ + SC( 17, 9, 7, 1, no, yes, 5, 7) \ + SC( 18, 9, 7, 2, no, yes, 3, 7) \ + SC( 19, 9, 7, 3, no, yes, 7, 7) \ + SC( 20, 9, 7, 4, no, yes, 1, 7) \ + \ + SC( 21, 10, 8, 1, no, yes, 5, 8) \ + SC( 22, 10, 8, 2, no, yes, 3, 8) \ + SC( 23, 10, 8, 3, no, yes, 7, 8) \ + SC( 24, 10, 8, 4, no, yes, 1, 8) \ + \ + SC( 25, 11, 9, 1, no, yes, 5, 9) \ + SC( 26, 11, 9, 2, no, yes, 3, 9) \ + SC( 27, 11, 9, 3, no, yes, 7, 9) \ + SC( 28, 11, 9, 4, yes, yes, 1, 9) \ + \ + SC( 29, 12, 10, 1, no, yes, 5, no) \ + SC( 30, 12, 10, 2, no, yes, 3, no) \ + SC( 31, 12, 10, 3, no, yes, 7, no) \ + SC( 32, 12, 10, 4, yes, yes, 2, no) \ + \ + SC( 33, 13, 11, 1, no, yes, 5, no) \ + SC( 34, 13, 11, 2, yes, yes, 3, no) \ + SC( 35, 13, 11, 3, no, yes, 7, no) \ + SC( 36, 13, 11, 4, yes, no, 0, no) \ + \ + SC( 37, 14, 12, 1, yes, no, 0, no) \ + SC( 38, 14, 12, 2, yes, no, 0, no) \ + SC( 39, 14, 12, 3, yes, no, 0, no) \ + SC( 40, 14, 12, 4, yes, no, 0, no) \ + \ + SC( 41, 15, 13, 1, yes, no, 0, no) \ + SC( 42, 15, 13, 2, yes, no, 0, no) \ + SC( 43, 15, 13, 3, yes, no, 0, no) \ + SC( 44, 15, 13, 4, yes, no, 0, no) \ + \ + SC( 45, 16, 14, 1, yes, no, 0, no) \ + SC( 46, 16, 14, 2, yes, no, 0, no) \ + SC( 47, 16, 14, 3, yes, no, 0, no) \ + SC( 48, 16, 14, 4, yes, no, 0, no) \ + \ + SC( 49, 17, 15, 1, yes, no, 0, no) \ + SC( 50, 17, 15, 2, yes, no, 0, no) \ + SC( 51, 17, 15, 3, yes, no, 0, no) \ + SC( 52, 17, 15, 4, yes, no, 0, no) \ + \ + SC( 53, 18, 16, 1, yes, no, 0, no) \ + SC( 54, 18, 16, 2, yes, no, 0, no) \ + SC( 55, 18, 16, 3, yes, no, 0, no) \ + SC( 56, 18, 16, 4, yes, no, 0, no) \ + \ + SC( 57, 19, 17, 1, yes, no, 0, no) \ + SC( 58, 19, 17, 2, yes, no, 0, no) \ + SC( 59, 19, 17, 3, yes, no, 0, no) \ + SC( 60, 19, 17, 4, yes, no, 0, no) \ + \ + SC( 61, 20, 18, 1, yes, no, 0, no) \ + SC( 62, 20, 18, 2, yes, no, 0, no) \ + SC( 63, 20, 18, 3, yes, no, 0, no) \ + SC( 64, 20, 18, 4, yes, no, 0, no) \ + \ + SC( 65, 21, 19, 1, yes, no, 0, no) \ + SC( 66, 21, 19, 2, yes, no, 0, no) \ + SC( 67, 21, 19, 3, yes, no, 0, no) \ + SC( 68, 21, 19, 4, yes, no, 0, no) \ + \ + SC( 69, 22, 20, 1, yes, no, 0, no) \ + SC( 70, 22, 20, 2, yes, no, 0, no) \ + SC( 71, 22, 20, 3, yes, no, 0, no) \ + SC( 72, 22, 20, 4, yes, no, 0, no) \ + \ + SC( 73, 23, 21, 1, yes, no, 0, no) \ + SC( 74, 23, 21, 2, yes, no, 0, no) \ + SC( 75, 23, 21, 3, yes, no, 0, no) \ + SC( 76, 23, 21, 4, yes, no, 0, no) \ + \ + SC( 77, 24, 22, 1, yes, no, 0, no) \ + SC( 78, 24, 22, 2, yes, no, 0, no) \ + SC( 79, 24, 22, 3, yes, no, 0, no) \ + SC( 80, 24, 22, 4, yes, no, 0, no) \ + \ + SC( 81, 25, 23, 1, yes, no, 0, no) \ + SC( 82, 25, 23, 2, yes, no, 0, no) \ + SC( 83, 25, 23, 3, yes, no, 0, no) \ + SC( 84, 25, 23, 4, yes, no, 0, no) \ + \ + SC( 85, 26, 24, 1, yes, no, 0, no) \ + SC( 86, 26, 24, 2, yes, no, 0, no) \ + SC( 87, 26, 24, 3, yes, no, 0, no) \ + SC( 88, 26, 24, 4, yes, no, 0, no) \ + \ + SC( 89, 27, 25, 1, yes, no, 0, no) \ + SC( 90, 27, 25, 2, yes, no, 0, no) \ + SC( 91, 27, 25, 3, yes, no, 0, no) \ + SC( 92, 27, 25, 4, yes, no, 0, no) \ + \ + SC( 93, 28, 26, 1, yes, no, 0, no) \ + SC( 94, 28, 26, 2, yes, no, 0, no) \ + SC( 95, 28, 26, 3, yes, no, 0, no) \ + SC( 96, 28, 26, 4, yes, no, 0, no) \ + \ + SC( 97, 29, 27, 1, yes, no, 0, no) \ + SC( 98, 29, 27, 2, yes, no, 0, no) \ + SC( 99, 29, 27, 3, yes, no, 0, no) \ + SC(100, 29, 27, 4, yes, no, 0, no) \ + \ + SC(101, 30, 28, 1, yes, no, 0, no) \ + SC(102, 30, 28, 2, yes, no, 0, no) \ + SC(103, 30, 28, 3, yes, no, 0, no) \ -#define NBINS 27 -#define SMALL_MAXCLASS 3584 +#define SIZE_CLASSES_DEFINED +#define NTBINS 1 +#define NLBINS 29 +#define NBINS 36 +#define NSIZES 104 +#define LG_CEIL_NSIZES 7 +#define NPSIZES 71 +#define LG_TINY_MAXCLASS 3 +#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) +#define SMALL_MAXCLASS ((((size_t)1) << 13) + (((size_t)3) << 11)) +#define LG_LARGE_MINCLASS 14 +#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS) +#define LARGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28)) #endif -#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 13) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 16, 16) \ - SIZE_CLASS(1, 16, 32) \ - SIZE_CLASS(2, 16, 48) \ - SIZE_CLASS(3, 16, 64) \ - SIZE_CLASS(4, 16, 80) \ - SIZE_CLASS(5, 16, 96) \ - SIZE_CLASS(6, 16, 112) \ - SIZE_CLASS(7, 16, 128) \ - SIZE_CLASS(8, 32, 160) \ - SIZE_CLASS(9, 32, 192) \ - SIZE_CLASS(10, 32, 224) \ - SIZE_CLASS(11, 32, 256) \ - SIZE_CLASS(12, 64, 320) \ - SIZE_CLASS(13, 64, 384) \ - SIZE_CLASS(14, 64, 448) \ - SIZE_CLASS(15, 64, 512) \ - SIZE_CLASS(16, 128, 640) \ - SIZE_CLASS(17, 128, 768) \ - SIZE_CLASS(18, 128, 896) \ - SIZE_CLASS(19, 128, 1024) \ - SIZE_CLASS(20, 256, 1280) \ - SIZE_CLASS(21, 256, 1536) \ - SIZE_CLASS(22, 256, 1792) \ - SIZE_CLASS(23, 256, 2048) \ - SIZE_CLASS(24, 512, 2560) \ - SIZE_CLASS(25, 512, 3072) \ - SIZE_CLASS(26, 512, 3584) \ - SIZE_CLASS(27, 512, 4096) \ - SIZE_CLASS(28, 1024, 5120) \ - SIZE_CLASS(29, 1024, 6144) \ - SIZE_CLASS(30, 1024, 7168) \ +#if (LG_SIZEOF_PTR == 2 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 12) +#define SIZE_CLASSES \ + /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \ + SC( 0, 4, 4, 0, no, yes, 1, 4) \ + SC( 1, 4, 4, 1, no, yes, 1, 4) \ + SC( 2, 4, 4, 2, no, yes, 3, 4) \ + SC( 3, 4, 4, 3, no, yes, 1, 4) \ + \ + SC( 4, 6, 4, 1, no, yes, 5, 4) \ + SC( 5, 6, 4, 2, no, yes, 3, 4) \ + SC( 6, 6, 4, 3, no, yes, 7, 4) \ + SC( 7, 6, 4, 4, no, yes, 1, 4) \ + \ + SC( 8, 7, 5, 1, no, yes, 5, 5) \ + SC( 9, 7, 5, 2, no, yes, 3, 5) \ + SC( 10, 7, 5, 3, no, yes, 7, 5) \ + SC( 11, 7, 5, 4, no, yes, 1, 5) \ + \ + SC( 12, 8, 6, 1, no, yes, 5, 6) \ + SC( 13, 8, 6, 2, no, yes, 3, 6) \ + SC( 14, 8, 6, 3, no, yes, 7, 6) \ + SC( 15, 8, 6, 4, no, yes, 1, 6) \ + \ + SC( 16, 9, 7, 1, no, yes, 5, 7) \ + SC( 17, 9, 7, 2, no, yes, 3, 7) \ + SC( 18, 9, 7, 3, no, yes, 7, 7) \ + SC( 19, 9, 7, 4, no, yes, 1, 7) \ + \ + SC( 20, 10, 8, 1, no, yes, 5, 8) \ + SC( 21, 10, 8, 2, no, yes, 3, 8) \ + SC( 22, 10, 8, 3, no, yes, 7, 8) \ + SC( 23, 10, 8, 4, no, yes, 1, 8) \ + \ + SC( 24, 11, 9, 1, no, yes, 5, 9) \ + SC( 25, 11, 9, 2, no, yes, 3, 9) \ + SC( 26, 11, 9, 3, no, yes, 7, 9) \ + SC( 27, 11, 9, 4, yes, yes, 1, 9) \ + \ + SC( 28, 12, 10, 1, no, yes, 5, no) \ + SC( 29, 12, 10, 2, no, yes, 3, no) \ + SC( 30, 12, 10, 3, no, yes, 7, no) \ + SC( 31, 12, 10, 4, yes, yes, 2, no) \ + \ + SC( 32, 13, 11, 1, no, yes, 5, no) \ + SC( 33, 13, 11, 2, yes, yes, 3, no) \ + SC( 34, 13, 11, 3, no, yes, 7, no) \ + SC( 35, 13, 11, 4, yes, no, 0, no) \ + \ + SC( 36, 14, 12, 1, yes, no, 0, no) \ + SC( 37, 14, 12, 2, yes, no, 0, no) \ + SC( 38, 14, 12, 3, yes, no, 0, no) \ + SC( 39, 14, 12, 4, yes, no, 0, no) \ + \ + SC( 40, 15, 13, 1, yes, no, 0, no) \ + SC( 41, 15, 13, 2, yes, no, 0, no) \ + SC( 42, 15, 13, 3, yes, no, 0, no) \ + SC( 43, 15, 13, 4, yes, no, 0, no) \ + \ + SC( 44, 16, 14, 1, yes, no, 0, no) \ + SC( 45, 16, 14, 2, yes, no, 0, no) \ + SC( 46, 16, 14, 3, yes, no, 0, no) \ + SC( 47, 16, 14, 4, yes, no, 0, no) \ + \ + SC( 48, 17, 15, 1, yes, no, 0, no) \ + SC( 49, 17, 15, 2, yes, no, 0, no) \ + SC( 50, 17, 15, 3, yes, no, 0, no) \ + SC( 51, 17, 15, 4, yes, no, 0, no) \ + \ + SC( 52, 18, 16, 1, yes, no, 0, no) \ + SC( 53, 18, 16, 2, yes, no, 0, no) \ + SC( 54, 18, 16, 3, yes, no, 0, no) \ + SC( 55, 18, 16, 4, yes, no, 0, no) \ + \ + SC( 56, 19, 17, 1, yes, no, 0, no) \ + SC( 57, 19, 17, 2, yes, no, 0, no) \ + SC( 58, 19, 17, 3, yes, no, 0, no) \ + SC( 59, 19, 17, 4, yes, no, 0, no) \ + \ + SC( 60, 20, 18, 1, yes, no, 0, no) \ + SC( 61, 20, 18, 2, yes, no, 0, no) \ + SC( 62, 20, 18, 3, yes, no, 0, no) \ + SC( 63, 20, 18, 4, yes, no, 0, no) \ + \ + SC( 64, 21, 19, 1, yes, no, 0, no) \ + SC( 65, 21, 19, 2, yes, no, 0, no) \ + SC( 66, 21, 19, 3, yes, no, 0, no) \ + SC( 67, 21, 19, 4, yes, no, 0, no) \ + \ + SC( 68, 22, 20, 1, yes, no, 0, no) \ + SC( 69, 22, 20, 2, yes, no, 0, no) \ + SC( 70, 22, 20, 3, yes, no, 0, no) \ + SC( 71, 22, 20, 4, yes, no, 0, no) \ + \ + SC( 72, 23, 21, 1, yes, no, 0, no) \ + SC( 73, 23, 21, 2, yes, no, 0, no) \ + SC( 74, 23, 21, 3, yes, no, 0, no) \ + SC( 75, 23, 21, 4, yes, no, 0, no) \ + \ + SC( 76, 24, 22, 1, yes, no, 0, no) \ + SC( 77, 24, 22, 2, yes, no, 0, no) \ + SC( 78, 24, 22, 3, yes, no, 0, no) \ + SC( 79, 24, 22, 4, yes, no, 0, no) \ + \ + SC( 80, 25, 23, 1, yes, no, 0, no) \ + SC( 81, 25, 23, 2, yes, no, 0, no) \ + SC( 82, 25, 23, 3, yes, no, 0, no) \ + SC( 83, 25, 23, 4, yes, no, 0, no) \ + \ + SC( 84, 26, 24, 1, yes, no, 0, no) \ + SC( 85, 26, 24, 2, yes, no, 0, no) \ + SC( 86, 26, 24, 3, yes, no, 0, no) \ + SC( 87, 26, 24, 4, yes, no, 0, no) \ + \ + SC( 88, 27, 25, 1, yes, no, 0, no) \ + SC( 89, 27, 25, 2, yes, no, 0, no) \ + SC( 90, 27, 25, 3, yes, no, 0, no) \ + SC( 91, 27, 25, 4, yes, no, 0, no) \ + \ + SC( 92, 28, 26, 1, yes, no, 0, no) \ + SC( 93, 28, 26, 2, yes, no, 0, no) \ + SC( 94, 28, 26, 3, yes, no, 0, no) \ + SC( 95, 28, 26, 4, yes, no, 0, no) \ + \ + SC( 96, 29, 27, 1, yes, no, 0, no) \ + SC( 97, 29, 27, 2, yes, no, 0, no) \ + SC( 98, 29, 27, 3, yes, no, 0, no) \ + SC( 99, 29, 27, 4, yes, no, 0, no) \ + \ + SC(100, 30, 28, 1, yes, no, 0, no) \ + SC(101, 30, 28, 2, yes, no, 0, no) \ + SC(102, 30, 28, 3, yes, no, 0, no) \ -#define NBINS 31 -#define SMALL_MAXCLASS 7168 +#define SIZE_CLASSES_DEFINED +#define NTBINS 0 +#define NLBINS 28 +#define NBINS 35 +#define NSIZES 103 +#define LG_CEIL_NSIZES 7 +#define NPSIZES 71 +#define LG_TINY_MAXCLASS "NA" +#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) +#define SMALL_MAXCLASS ((((size_t)1) << 13) + (((size_t)3) << 11)) +#define LG_LARGE_MINCLASS 14 +#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS) +#define LARGE_MAXCLASS ((((size_t)1) << 30) + (((size_t)3) << 28)) #endif -#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 14) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 16, 16) \ - SIZE_CLASS(1, 16, 32) \ - SIZE_CLASS(2, 16, 48) \ - SIZE_CLASS(3, 16, 64) \ - SIZE_CLASS(4, 16, 80) \ - SIZE_CLASS(5, 16, 96) \ - SIZE_CLASS(6, 16, 112) \ - SIZE_CLASS(7, 16, 128) \ - SIZE_CLASS(8, 32, 160) \ - SIZE_CLASS(9, 32, 192) \ - SIZE_CLASS(10, 32, 224) \ - SIZE_CLASS(11, 32, 256) \ - SIZE_CLASS(12, 64, 320) \ - SIZE_CLASS(13, 64, 384) \ - SIZE_CLASS(14, 64, 448) \ - SIZE_CLASS(15, 64, 512) \ - SIZE_CLASS(16, 128, 640) \ - SIZE_CLASS(17, 128, 768) \ - SIZE_CLASS(18, 128, 896) \ - SIZE_CLASS(19, 128, 1024) \ - SIZE_CLASS(20, 256, 1280) \ - SIZE_CLASS(21, 256, 1536) \ - SIZE_CLASS(22, 256, 1792) \ - SIZE_CLASS(23, 256, 2048) \ - SIZE_CLASS(24, 512, 2560) \ - SIZE_CLASS(25, 512, 3072) \ - SIZE_CLASS(26, 512, 3584) \ - SIZE_CLASS(27, 512, 4096) \ - SIZE_CLASS(28, 1024, 5120) \ - SIZE_CLASS(29, 1024, 6144) \ - SIZE_CLASS(30, 1024, 7168) \ - SIZE_CLASS(31, 1024, 8192) \ - SIZE_CLASS(32, 2048, 10240) \ - SIZE_CLASS(33, 2048, 12288) \ - SIZE_CLASS(34, 2048, 14336) \ +#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 3 && LG_PAGE == 12) +#define SIZE_CLASSES \ + /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \ + SC( 0, 3, 3, 0, no, yes, 1, 3) \ + SC( 1, 3, 3, 1, no, yes, 1, 3) \ + SC( 2, 3, 3, 2, no, yes, 3, 3) \ + SC( 3, 3, 3, 3, no, yes, 1, 3) \ + \ + SC( 4, 5, 3, 1, no, yes, 5, 3) \ + SC( 5, 5, 3, 2, no, yes, 3, 3) \ + SC( 6, 5, 3, 3, no, yes, 7, 3) \ + SC( 7, 5, 3, 4, no, yes, 1, 3) \ + \ + SC( 8, 6, 4, 1, no, yes, 5, 4) \ + SC( 9, 6, 4, 2, no, yes, 3, 4) \ + SC( 10, 6, 4, 3, no, yes, 7, 4) \ + SC( 11, 6, 4, 4, no, yes, 1, 4) \ + \ + SC( 12, 7, 5, 1, no, yes, 5, 5) \ + SC( 13, 7, 5, 2, no, yes, 3, 5) \ + SC( 14, 7, 5, 3, no, yes, 7, 5) \ + SC( 15, 7, 5, 4, no, yes, 1, 5) \ + \ + SC( 16, 8, 6, 1, no, yes, 5, 6) \ + SC( 17, 8, 6, 2, no, yes, 3, 6) \ + SC( 18, 8, 6, 3, no, yes, 7, 6) \ + SC( 19, 8, 6, 4, no, yes, 1, 6) \ + \ + SC( 20, 9, 7, 1, no, yes, 5, 7) \ + SC( 21, 9, 7, 2, no, yes, 3, 7) \ + SC( 22, 9, 7, 3, no, yes, 7, 7) \ + SC( 23, 9, 7, 4, no, yes, 1, 7) \ + \ + SC( 24, 10, 8, 1, no, yes, 5, 8) \ + SC( 25, 10, 8, 2, no, yes, 3, 8) \ + SC( 26, 10, 8, 3, no, yes, 7, 8) \ + SC( 27, 10, 8, 4, no, yes, 1, 8) \ + \ + SC( 28, 11, 9, 1, no, yes, 5, 9) \ + SC( 29, 11, 9, 2, no, yes, 3, 9) \ + SC( 30, 11, 9, 3, no, yes, 7, 9) \ + SC( 31, 11, 9, 4, yes, yes, 1, 9) \ + \ + SC( 32, 12, 10, 1, no, yes, 5, no) \ + SC( 33, 12, 10, 2, no, yes, 3, no) \ + SC( 34, 12, 10, 3, no, yes, 7, no) \ + SC( 35, 12, 10, 4, yes, yes, 2, no) \ + \ + SC( 36, 13, 11, 1, no, yes, 5, no) \ + SC( 37, 13, 11, 2, yes, yes, 3, no) \ + SC( 38, 13, 11, 3, no, yes, 7, no) \ + SC( 39, 13, 11, 4, yes, no, 0, no) \ + \ + SC( 40, 14, 12, 1, yes, no, 0, no) \ + SC( 41, 14, 12, 2, yes, no, 0, no) \ + SC( 42, 14, 12, 3, yes, no, 0, no) \ + SC( 43, 14, 12, 4, yes, no, 0, no) \ + \ + SC( 44, 15, 13, 1, yes, no, 0, no) \ + SC( 45, 15, 13, 2, yes, no, 0, no) \ + SC( 46, 15, 13, 3, yes, no, 0, no) \ + SC( 47, 15, 13, 4, yes, no, 0, no) \ + \ + SC( 48, 16, 14, 1, yes, no, 0, no) \ + SC( 49, 16, 14, 2, yes, no, 0, no) \ + SC( 50, 16, 14, 3, yes, no, 0, no) \ + SC( 51, 16, 14, 4, yes, no, 0, no) \ + \ + SC( 52, 17, 15, 1, yes, no, 0, no) \ + SC( 53, 17, 15, 2, yes, no, 0, no) \ + SC( 54, 17, 15, 3, yes, no, 0, no) \ + SC( 55, 17, 15, 4, yes, no, 0, no) \ + \ + SC( 56, 18, 16, 1, yes, no, 0, no) \ + SC( 57, 18, 16, 2, yes, no, 0, no) \ + SC( 58, 18, 16, 3, yes, no, 0, no) \ + SC( 59, 18, 16, 4, yes, no, 0, no) \ + \ + SC( 60, 19, 17, 1, yes, no, 0, no) \ + SC( 61, 19, 17, 2, yes, no, 0, no) \ + SC( 62, 19, 17, 3, yes, no, 0, no) \ + SC( 63, 19, 17, 4, yes, no, 0, no) \ + \ + SC( 64, 20, 18, 1, yes, no, 0, no) \ + SC( 65, 20, 18, 2, yes, no, 0, no) \ + SC( 66, 20, 18, 3, yes, no, 0, no) \ + SC( 67, 20, 18, 4, yes, no, 0, no) \ + \ + SC( 68, 21, 19, 1, yes, no, 0, no) \ + SC( 69, 21, 19, 2, yes, no, 0, no) \ + SC( 70, 21, 19, 3, yes, no, 0, no) \ + SC( 71, 21, 19, 4, yes, no, 0, no) \ + \ + SC( 72, 22, 20, 1, yes, no, 0, no) \ + SC( 73, 22, 20, 2, yes, no, 0, no) \ + SC( 74, 22, 20, 3, yes, no, 0, no) \ + SC( 75, 22, 20, 4, yes, no, 0, no) \ + \ + SC( 76, 23, 21, 1, yes, no, 0, no) \ + SC( 77, 23, 21, 2, yes, no, 0, no) \ + SC( 78, 23, 21, 3, yes, no, 0, no) \ + SC( 79, 23, 21, 4, yes, no, 0, no) \ + \ + SC( 80, 24, 22, 1, yes, no, 0, no) \ + SC( 81, 24, 22, 2, yes, no, 0, no) \ + SC( 82, 24, 22, 3, yes, no, 0, no) \ + SC( 83, 24, 22, 4, yes, no, 0, no) \ + \ + SC( 84, 25, 23, 1, yes, no, 0, no) \ + SC( 85, 25, 23, 2, yes, no, 0, no) \ + SC( 86, 25, 23, 3, yes, no, 0, no) \ + SC( 87, 25, 23, 4, yes, no, 0, no) \ + \ + SC( 88, 26, 24, 1, yes, no, 0, no) \ + SC( 89, 26, 24, 2, yes, no, 0, no) \ + SC( 90, 26, 24, 3, yes, no, 0, no) \ + SC( 91, 26, 24, 4, yes, no, 0, no) \ + \ + SC( 92, 27, 25, 1, yes, no, 0, no) \ + SC( 93, 27, 25, 2, yes, no, 0, no) \ + SC( 94, 27, 25, 3, yes, no, 0, no) \ + SC( 95, 27, 25, 4, yes, no, 0, no) \ + \ + SC( 96, 28, 26, 1, yes, no, 0, no) \ + SC( 97, 28, 26, 2, yes, no, 0, no) \ + SC( 98, 28, 26, 3, yes, no, 0, no) \ + SC( 99, 28, 26, 4, yes, no, 0, no) \ + \ + SC(100, 29, 27, 1, yes, no, 0, no) \ + SC(101, 29, 27, 2, yes, no, 0, no) \ + SC(102, 29, 27, 3, yes, no, 0, no) \ + SC(103, 29, 27, 4, yes, no, 0, no) \ + \ + SC(104, 30, 28, 1, yes, no, 0, no) \ + SC(105, 30, 28, 2, yes, no, 0, no) \ + SC(106, 30, 28, 3, yes, no, 0, no) \ + SC(107, 30, 28, 4, yes, no, 0, no) \ + \ + SC(108, 31, 29, 1, yes, no, 0, no) \ + SC(109, 31, 29, 2, yes, no, 0, no) \ + SC(110, 31, 29, 3, yes, no, 0, no) \ + SC(111, 31, 29, 4, yes, no, 0, no) \ + \ + SC(112, 32, 30, 1, yes, no, 0, no) \ + SC(113, 32, 30, 2, yes, no, 0, no) \ + SC(114, 32, 30, 3, yes, no, 0, no) \ + SC(115, 32, 30, 4, yes, no, 0, no) \ + \ + SC(116, 33, 31, 1, yes, no, 0, no) \ + SC(117, 33, 31, 2, yes, no, 0, no) \ + SC(118, 33, 31, 3, yes, no, 0, no) \ + SC(119, 33, 31, 4, yes, no, 0, no) \ + \ + SC(120, 34, 32, 1, yes, no, 0, no) \ + SC(121, 34, 32, 2, yes, no, 0, no) \ + SC(122, 34, 32, 3, yes, no, 0, no) \ + SC(123, 34, 32, 4, yes, no, 0, no) \ + \ + SC(124, 35, 33, 1, yes, no, 0, no) \ + SC(125, 35, 33, 2, yes, no, 0, no) \ + SC(126, 35, 33, 3, yes, no, 0, no) \ + SC(127, 35, 33, 4, yes, no, 0, no) \ + \ + SC(128, 36, 34, 1, yes, no, 0, no) \ + SC(129, 36, 34, 2, yes, no, 0, no) \ + SC(130, 36, 34, 3, yes, no, 0, no) \ + SC(131, 36, 34, 4, yes, no, 0, no) \ + \ + SC(132, 37, 35, 1, yes, no, 0, no) \ + SC(133, 37, 35, 2, yes, no, 0, no) \ + SC(134, 37, 35, 3, yes, no, 0, no) \ + SC(135, 37, 35, 4, yes, no, 0, no) \ + \ + SC(136, 38, 36, 1, yes, no, 0, no) \ + SC(137, 38, 36, 2, yes, no, 0, no) \ + SC(138, 38, 36, 3, yes, no, 0, no) \ + SC(139, 38, 36, 4, yes, no, 0, no) \ + \ + SC(140, 39, 37, 1, yes, no, 0, no) \ + SC(141, 39, 37, 2, yes, no, 0, no) \ + SC(142, 39, 37, 3, yes, no, 0, no) \ + SC(143, 39, 37, 4, yes, no, 0, no) \ + \ + SC(144, 40, 38, 1, yes, no, 0, no) \ + SC(145, 40, 38, 2, yes, no, 0, no) \ + SC(146, 40, 38, 3, yes, no, 0, no) \ + SC(147, 40, 38, 4, yes, no, 0, no) \ + \ + SC(148, 41, 39, 1, yes, no, 0, no) \ + SC(149, 41, 39, 2, yes, no, 0, no) \ + SC(150, 41, 39, 3, yes, no, 0, no) \ + SC(151, 41, 39, 4, yes, no, 0, no) \ + \ + SC(152, 42, 40, 1, yes, no, 0, no) \ + SC(153, 42, 40, 2, yes, no, 0, no) \ + SC(154, 42, 40, 3, yes, no, 0, no) \ + SC(155, 42, 40, 4, yes, no, 0, no) \ + \ + SC(156, 43, 41, 1, yes, no, 0, no) \ + SC(157, 43, 41, 2, yes, no, 0, no) \ + SC(158, 43, 41, 3, yes, no, 0, no) \ + SC(159, 43, 41, 4, yes, no, 0, no) \ + \ + SC(160, 44, 42, 1, yes, no, 0, no) \ + SC(161, 44, 42, 2, yes, no, 0, no) \ + SC(162, 44, 42, 3, yes, no, 0, no) \ + SC(163, 44, 42, 4, yes, no, 0, no) \ + \ + SC(164, 45, 43, 1, yes, no, 0, no) \ + SC(165, 45, 43, 2, yes, no, 0, no) \ + SC(166, 45, 43, 3, yes, no, 0, no) \ + SC(167, 45, 43, 4, yes, no, 0, no) \ + \ + SC(168, 46, 44, 1, yes, no, 0, no) \ + SC(169, 46, 44, 2, yes, no, 0, no) \ + SC(170, 46, 44, 3, yes, no, 0, no) \ + SC(171, 46, 44, 4, yes, no, 0, no) \ + \ + SC(172, 47, 45, 1, yes, no, 0, no) \ + SC(173, 47, 45, 2, yes, no, 0, no) \ + SC(174, 47, 45, 3, yes, no, 0, no) \ + SC(175, 47, 45, 4, yes, no, 0, no) \ + \ + SC(176, 48, 46, 1, yes, no, 0, no) \ + SC(177, 48, 46, 2, yes, no, 0, no) \ + SC(178, 48, 46, 3, yes, no, 0, no) \ + SC(179, 48, 46, 4, yes, no, 0, no) \ + \ + SC(180, 49, 47, 1, yes, no, 0, no) \ + SC(181, 49, 47, 2, yes, no, 0, no) \ + SC(182, 49, 47, 3, yes, no, 0, no) \ + SC(183, 49, 47, 4, yes, no, 0, no) \ + \ + SC(184, 50, 48, 1, yes, no, 0, no) \ + SC(185, 50, 48, 2, yes, no, 0, no) \ + SC(186, 50, 48, 3, yes, no, 0, no) \ + SC(187, 50, 48, 4, yes, no, 0, no) \ + \ + SC(188, 51, 49, 1, yes, no, 0, no) \ + SC(189, 51, 49, 2, yes, no, 0, no) \ + SC(190, 51, 49, 3, yes, no, 0, no) \ + SC(191, 51, 49, 4, yes, no, 0, no) \ + \ + SC(192, 52, 50, 1, yes, no, 0, no) \ + SC(193, 52, 50, 2, yes, no, 0, no) \ + SC(194, 52, 50, 3, yes, no, 0, no) \ + SC(195, 52, 50, 4, yes, no, 0, no) \ + \ + SC(196, 53, 51, 1, yes, no, 0, no) \ + SC(197, 53, 51, 2, yes, no, 0, no) \ + SC(198, 53, 51, 3, yes, no, 0, no) \ + SC(199, 53, 51, 4, yes, no, 0, no) \ + \ + SC(200, 54, 52, 1, yes, no, 0, no) \ + SC(201, 54, 52, 2, yes, no, 0, no) \ + SC(202, 54, 52, 3, yes, no, 0, no) \ + SC(203, 54, 52, 4, yes, no, 0, no) \ + \ + SC(204, 55, 53, 1, yes, no, 0, no) \ + SC(205, 55, 53, 2, yes, no, 0, no) \ + SC(206, 55, 53, 3, yes, no, 0, no) \ + SC(207, 55, 53, 4, yes, no, 0, no) \ + \ + SC(208, 56, 54, 1, yes, no, 0, no) \ + SC(209, 56, 54, 2, yes, no, 0, no) \ + SC(210, 56, 54, 3, yes, no, 0, no) \ + SC(211, 56, 54, 4, yes, no, 0, no) \ + \ + SC(212, 57, 55, 1, yes, no, 0, no) \ + SC(213, 57, 55, 2, yes, no, 0, no) \ + SC(214, 57, 55, 3, yes, no, 0, no) \ + SC(215, 57, 55, 4, yes, no, 0, no) \ + \ + SC(216, 58, 56, 1, yes, no, 0, no) \ + SC(217, 58, 56, 2, yes, no, 0, no) \ + SC(218, 58, 56, 3, yes, no, 0, no) \ + SC(219, 58, 56, 4, yes, no, 0, no) \ + \ + SC(220, 59, 57, 1, yes, no, 0, no) \ + SC(221, 59, 57, 2, yes, no, 0, no) \ + SC(222, 59, 57, 3, yes, no, 0, no) \ + SC(223, 59, 57, 4, yes, no, 0, no) \ + \ + SC(224, 60, 58, 1, yes, no, 0, no) \ + SC(225, 60, 58, 2, yes, no, 0, no) \ + SC(226, 60, 58, 3, yes, no, 0, no) \ + SC(227, 60, 58, 4, yes, no, 0, no) \ + \ + SC(228, 61, 59, 1, yes, no, 0, no) \ + SC(229, 61, 59, 2, yes, no, 0, no) \ + SC(230, 61, 59, 3, yes, no, 0, no) \ + SC(231, 61, 59, 4, yes, no, 0, no) \ + \ + SC(232, 62, 60, 1, yes, no, 0, no) \ + SC(233, 62, 60, 2, yes, no, 0, no) \ + SC(234, 62, 60, 3, yes, no, 0, no) \ -#define NBINS 35 -#define SMALL_MAXCLASS 14336 +#define SIZE_CLASSES_DEFINED +#define NTBINS 0 +#define NLBINS 32 +#define NBINS 39 +#define NSIZES 235 +#define LG_CEIL_NSIZES 8 +#define NPSIZES 199 +#define LG_TINY_MAXCLASS "NA" +#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) +#define SMALL_MAXCLASS ((((size_t)1) << 13) + (((size_t)3) << 11)) +#define LG_LARGE_MINCLASS 14 +#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS) +#define LARGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60)) #endif -#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 15) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 16, 16) \ - SIZE_CLASS(1, 16, 32) \ - SIZE_CLASS(2, 16, 48) \ - SIZE_CLASS(3, 16, 64) \ - SIZE_CLASS(4, 16, 80) \ - SIZE_CLASS(5, 16, 96) \ - SIZE_CLASS(6, 16, 112) \ - SIZE_CLASS(7, 16, 128) \ - SIZE_CLASS(8, 32, 160) \ - SIZE_CLASS(9, 32, 192) \ - SIZE_CLASS(10, 32, 224) \ - SIZE_CLASS(11, 32, 256) \ - SIZE_CLASS(12, 64, 320) \ - SIZE_CLASS(13, 64, 384) \ - SIZE_CLASS(14, 64, 448) \ - SIZE_CLASS(15, 64, 512) \ - SIZE_CLASS(16, 128, 640) \ - SIZE_CLASS(17, 128, 768) \ - SIZE_CLASS(18, 128, 896) \ - SIZE_CLASS(19, 128, 1024) \ - SIZE_CLASS(20, 256, 1280) \ - SIZE_CLASS(21, 256, 1536) \ - SIZE_CLASS(22, 256, 1792) \ - SIZE_CLASS(23, 256, 2048) \ - SIZE_CLASS(24, 512, 2560) \ - SIZE_CLASS(25, 512, 3072) \ - SIZE_CLASS(26, 512, 3584) \ - SIZE_CLASS(27, 512, 4096) \ - SIZE_CLASS(28, 1024, 5120) \ - SIZE_CLASS(29, 1024, 6144) \ - SIZE_CLASS(30, 1024, 7168) \ - SIZE_CLASS(31, 1024, 8192) \ - SIZE_CLASS(32, 2048, 10240) \ - SIZE_CLASS(33, 2048, 12288) \ - SIZE_CLASS(34, 2048, 14336) \ - SIZE_CLASS(35, 2048, 16384) \ - SIZE_CLASS(36, 4096, 20480) \ - SIZE_CLASS(37, 4096, 24576) \ - SIZE_CLASS(38, 4096, 28672) \ +#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 3 && LG_QUANTUM == 4 && LG_PAGE == 12) +#define SIZE_CLASSES \ + /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \ + SC( 0, 3, 3, 0, no, yes, 1, 3) \ + \ + SC( 1, 3, 3, 1, no, yes, 1, 3) \ + SC( 2, 4, 4, 1, no, yes, 1, 4) \ + SC( 3, 4, 4, 2, no, yes, 3, 4) \ + SC( 4, 4, 4, 3, no, yes, 1, 4) \ + \ + SC( 5, 6, 4, 1, no, yes, 5, 4) \ + SC( 6, 6, 4, 2, no, yes, 3, 4) \ + SC( 7, 6, 4, 3, no, yes, 7, 4) \ + SC( 8, 6, 4, 4, no, yes, 1, 4) \ + \ + SC( 9, 7, 5, 1, no, yes, 5, 5) \ + SC( 10, 7, 5, 2, no, yes, 3, 5) \ + SC( 11, 7, 5, 3, no, yes, 7, 5) \ + SC( 12, 7, 5, 4, no, yes, 1, 5) \ + \ + SC( 13, 8, 6, 1, no, yes, 5, 6) \ + SC( 14, 8, 6, 2, no, yes, 3, 6) \ + SC( 15, 8, 6, 3, no, yes, 7, 6) \ + SC( 16, 8, 6, 4, no, yes, 1, 6) \ + \ + SC( 17, 9, 7, 1, no, yes, 5, 7) \ + SC( 18, 9, 7, 2, no, yes, 3, 7) \ + SC( 19, 9, 7, 3, no, yes, 7, 7) \ + SC( 20, 9, 7, 4, no, yes, 1, 7) \ + \ + SC( 21, 10, 8, 1, no, yes, 5, 8) \ + SC( 22, 10, 8, 2, no, yes, 3, 8) \ + SC( 23, 10, 8, 3, no, yes, 7, 8) \ + SC( 24, 10, 8, 4, no, yes, 1, 8) \ + \ + SC( 25, 11, 9, 1, no, yes, 5, 9) \ + SC( 26, 11, 9, 2, no, yes, 3, 9) \ + SC( 27, 11, 9, 3, no, yes, 7, 9) \ + SC( 28, 11, 9, 4, yes, yes, 1, 9) \ + \ + SC( 29, 12, 10, 1, no, yes, 5, no) \ + SC( 30, 12, 10, 2, no, yes, 3, no) \ + SC( 31, 12, 10, 3, no, yes, 7, no) \ + SC( 32, 12, 10, 4, yes, yes, 2, no) \ + \ + SC( 33, 13, 11, 1, no, yes, 5, no) \ + SC( 34, 13, 11, 2, yes, yes, 3, no) \ + SC( 35, 13, 11, 3, no, yes, 7, no) \ + SC( 36, 13, 11, 4, yes, no, 0, no) \ + \ + SC( 37, 14, 12, 1, yes, no, 0, no) \ + SC( 38, 14, 12, 2, yes, no, 0, no) \ + SC( 39, 14, 12, 3, yes, no, 0, no) \ + SC( 40, 14, 12, 4, yes, no, 0, no) \ + \ + SC( 41, 15, 13, 1, yes, no, 0, no) \ + SC( 42, 15, 13, 2, yes, no, 0, no) \ + SC( 43, 15, 13, 3, yes, no, 0, no) \ + SC( 44, 15, 13, 4, yes, no, 0, no) \ + \ + SC( 45, 16, 14, 1, yes, no, 0, no) \ + SC( 46, 16, 14, 2, yes, no, 0, no) \ + SC( 47, 16, 14, 3, yes, no, 0, no) \ + SC( 48, 16, 14, 4, yes, no, 0, no) \ + \ + SC( 49, 17, 15, 1, yes, no, 0, no) \ + SC( 50, 17, 15, 2, yes, no, 0, no) \ + SC( 51, 17, 15, 3, yes, no, 0, no) \ + SC( 52, 17, 15, 4, yes, no, 0, no) \ + \ + SC( 53, 18, 16, 1, yes, no, 0, no) \ + SC( 54, 18, 16, 2, yes, no, 0, no) \ + SC( 55, 18, 16, 3, yes, no, 0, no) \ + SC( 56, 18, 16, 4, yes, no, 0, no) \ + \ + SC( 57, 19, 17, 1, yes, no, 0, no) \ + SC( 58, 19, 17, 2, yes, no, 0, no) \ + SC( 59, 19, 17, 3, yes, no, 0, no) \ + SC( 60, 19, 17, 4, yes, no, 0, no) \ + \ + SC( 61, 20, 18, 1, yes, no, 0, no) \ + SC( 62, 20, 18, 2, yes, no, 0, no) \ + SC( 63, 20, 18, 3, yes, no, 0, no) \ + SC( 64, 20, 18, 4, yes, no, 0, no) \ + \ + SC( 65, 21, 19, 1, yes, no, 0, no) \ + SC( 66, 21, 19, 2, yes, no, 0, no) \ + SC( 67, 21, 19, 3, yes, no, 0, no) \ + SC( 68, 21, 19, 4, yes, no, 0, no) \ + \ + SC( 69, 22, 20, 1, yes, no, 0, no) \ + SC( 70, 22, 20, 2, yes, no, 0, no) \ + SC( 71, 22, 20, 3, yes, no, 0, no) \ + SC( 72, 22, 20, 4, yes, no, 0, no) \ + \ + SC( 73, 23, 21, 1, yes, no, 0, no) \ + SC( 74, 23, 21, 2, yes, no, 0, no) \ + SC( 75, 23, 21, 3, yes, no, 0, no) \ + SC( 76, 23, 21, 4, yes, no, 0, no) \ + \ + SC( 77, 24, 22, 1, yes, no, 0, no) \ + SC( 78, 24, 22, 2, yes, no, 0, no) \ + SC( 79, 24, 22, 3, yes, no, 0, no) \ + SC( 80, 24, 22, 4, yes, no, 0, no) \ + \ + SC( 81, 25, 23, 1, yes, no, 0, no) \ + SC( 82, 25, 23, 2, yes, no, 0, no) \ + SC( 83, 25, 23, 3, yes, no, 0, no) \ + SC( 84, 25, 23, 4, yes, no, 0, no) \ + \ + SC( 85, 26, 24, 1, yes, no, 0, no) \ + SC( 86, 26, 24, 2, yes, no, 0, no) \ + SC( 87, 26, 24, 3, yes, no, 0, no) \ + SC( 88, 26, 24, 4, yes, no, 0, no) \ + \ + SC( 89, 27, 25, 1, yes, no, 0, no) \ + SC( 90, 27, 25, 2, yes, no, 0, no) \ + SC( 91, 27, 25, 3, yes, no, 0, no) \ + SC( 92, 27, 25, 4, yes, no, 0, no) \ + \ + SC( 93, 28, 26, 1, yes, no, 0, no) \ + SC( 94, 28, 26, 2, yes, no, 0, no) \ + SC( 95, 28, 26, 3, yes, no, 0, no) \ + SC( 96, 28, 26, 4, yes, no, 0, no) \ + \ + SC( 97, 29, 27, 1, yes, no, 0, no) \ + SC( 98, 29, 27, 2, yes, no, 0, no) \ + SC( 99, 29, 27, 3, yes, no, 0, no) \ + SC(100, 29, 27, 4, yes, no, 0, no) \ + \ + SC(101, 30, 28, 1, yes, no, 0, no) \ + SC(102, 30, 28, 2, yes, no, 0, no) \ + SC(103, 30, 28, 3, yes, no, 0, no) \ + SC(104, 30, 28, 4, yes, no, 0, no) \ + \ + SC(105, 31, 29, 1, yes, no, 0, no) \ + SC(106, 31, 29, 2, yes, no, 0, no) \ + SC(107, 31, 29, 3, yes, no, 0, no) \ + SC(108, 31, 29, 4, yes, no, 0, no) \ + \ + SC(109, 32, 30, 1, yes, no, 0, no) \ + SC(110, 32, 30, 2, yes, no, 0, no) \ + SC(111, 32, 30, 3, yes, no, 0, no) \ + SC(112, 32, 30, 4, yes, no, 0, no) \ + \ + SC(113, 33, 31, 1, yes, no, 0, no) \ + SC(114, 33, 31, 2, yes, no, 0, no) \ + SC(115, 33, 31, 3, yes, no, 0, no) \ + SC(116, 33, 31, 4, yes, no, 0, no) \ + \ + SC(117, 34, 32, 1, yes, no, 0, no) \ + SC(118, 34, 32, 2, yes, no, 0, no) \ + SC(119, 34, 32, 3, yes, no, 0, no) \ + SC(120, 34, 32, 4, yes, no, 0, no) \ + \ + SC(121, 35, 33, 1, yes, no, 0, no) \ + SC(122, 35, 33, 2, yes, no, 0, no) \ + SC(123, 35, 33, 3, yes, no, 0, no) \ + SC(124, 35, 33, 4, yes, no, 0, no) \ + \ + SC(125, 36, 34, 1, yes, no, 0, no) \ + SC(126, 36, 34, 2, yes, no, 0, no) \ + SC(127, 36, 34, 3, yes, no, 0, no) \ + SC(128, 36, 34, 4, yes, no, 0, no) \ + \ + SC(129, 37, 35, 1, yes, no, 0, no) \ + SC(130, 37, 35, 2, yes, no, 0, no) \ + SC(131, 37, 35, 3, yes, no, 0, no) \ + SC(132, 37, 35, 4, yes, no, 0, no) \ + \ + SC(133, 38, 36, 1, yes, no, 0, no) \ + SC(134, 38, 36, 2, yes, no, 0, no) \ + SC(135, 38, 36, 3, yes, no, 0, no) \ + SC(136, 38, 36, 4, yes, no, 0, no) \ + \ + SC(137, 39, 37, 1, yes, no, 0, no) \ + SC(138, 39, 37, 2, yes, no, 0, no) \ + SC(139, 39, 37, 3, yes, no, 0, no) \ + SC(140, 39, 37, 4, yes, no, 0, no) \ + \ + SC(141, 40, 38, 1, yes, no, 0, no) \ + SC(142, 40, 38, 2, yes, no, 0, no) \ + SC(143, 40, 38, 3, yes, no, 0, no) \ + SC(144, 40, 38, 4, yes, no, 0, no) \ + \ + SC(145, 41, 39, 1, yes, no, 0, no) \ + SC(146, 41, 39, 2, yes, no, 0, no) \ + SC(147, 41, 39, 3, yes, no, 0, no) \ + SC(148, 41, 39, 4, yes, no, 0, no) \ + \ + SC(149, 42, 40, 1, yes, no, 0, no) \ + SC(150, 42, 40, 2, yes, no, 0, no) \ + SC(151, 42, 40, 3, yes, no, 0, no) \ + SC(152, 42, 40, 4, yes, no, 0, no) \ + \ + SC(153, 43, 41, 1, yes, no, 0, no) \ + SC(154, 43, 41, 2, yes, no, 0, no) \ + SC(155, 43, 41, 3, yes, no, 0, no) \ + SC(156, 43, 41, 4, yes, no, 0, no) \ + \ + SC(157, 44, 42, 1, yes, no, 0, no) \ + SC(158, 44, 42, 2, yes, no, 0, no) \ + SC(159, 44, 42, 3, yes, no, 0, no) \ + SC(160, 44, 42, 4, yes, no, 0, no) \ + \ + SC(161, 45, 43, 1, yes, no, 0, no) \ + SC(162, 45, 43, 2, yes, no, 0, no) \ + SC(163, 45, 43, 3, yes, no, 0, no) \ + SC(164, 45, 43, 4, yes, no, 0, no) \ + \ + SC(165, 46, 44, 1, yes, no, 0, no) \ + SC(166, 46, 44, 2, yes, no, 0, no) \ + SC(167, 46, 44, 3, yes, no, 0, no) \ + SC(168, 46, 44, 4, yes, no, 0, no) \ + \ + SC(169, 47, 45, 1, yes, no, 0, no) \ + SC(170, 47, 45, 2, yes, no, 0, no) \ + SC(171, 47, 45, 3, yes, no, 0, no) \ + SC(172, 47, 45, 4, yes, no, 0, no) \ + \ + SC(173, 48, 46, 1, yes, no, 0, no) \ + SC(174, 48, 46, 2, yes, no, 0, no) \ + SC(175, 48, 46, 3, yes, no, 0, no) \ + SC(176, 48, 46, 4, yes, no, 0, no) \ + \ + SC(177, 49, 47, 1, yes, no, 0, no) \ + SC(178, 49, 47, 2, yes, no, 0, no) \ + SC(179, 49, 47, 3, yes, no, 0, no) \ + SC(180, 49, 47, 4, yes, no, 0, no) \ + \ + SC(181, 50, 48, 1, yes, no, 0, no) \ + SC(182, 50, 48, 2, yes, no, 0, no) \ + SC(183, 50, 48, 3, yes, no, 0, no) \ + SC(184, 50, 48, 4, yes, no, 0, no) \ + \ + SC(185, 51, 49, 1, yes, no, 0, no) \ + SC(186, 51, 49, 2, yes, no, 0, no) \ + SC(187, 51, 49, 3, yes, no, 0, no) \ + SC(188, 51, 49, 4, yes, no, 0, no) \ + \ + SC(189, 52, 50, 1, yes, no, 0, no) \ + SC(190, 52, 50, 2, yes, no, 0, no) \ + SC(191, 52, 50, 3, yes, no, 0, no) \ + SC(192, 52, 50, 4, yes, no, 0, no) \ + \ + SC(193, 53, 51, 1, yes, no, 0, no) \ + SC(194, 53, 51, 2, yes, no, 0, no) \ + SC(195, 53, 51, 3, yes, no, 0, no) \ + SC(196, 53, 51, 4, yes, no, 0, no) \ + \ + SC(197, 54, 52, 1, yes, no, 0, no) \ + SC(198, 54, 52, 2, yes, no, 0, no) \ + SC(199, 54, 52, 3, yes, no, 0, no) \ + SC(200, 54, 52, 4, yes, no, 0, no) \ + \ + SC(201, 55, 53, 1, yes, no, 0, no) \ + SC(202, 55, 53, 2, yes, no, 0, no) \ + SC(203, 55, 53, 3, yes, no, 0, no) \ + SC(204, 55, 53, 4, yes, no, 0, no) \ + \ + SC(205, 56, 54, 1, yes, no, 0, no) \ + SC(206, 56, 54, 2, yes, no, 0, no) \ + SC(207, 56, 54, 3, yes, no, 0, no) \ + SC(208, 56, 54, 4, yes, no, 0, no) \ + \ + SC(209, 57, 55, 1, yes, no, 0, no) \ + SC(210, 57, 55, 2, yes, no, 0, no) \ + SC(211, 57, 55, 3, yes, no, 0, no) \ + SC(212, 57, 55, 4, yes, no, 0, no) \ + \ + SC(213, 58, 56, 1, yes, no, 0, no) \ + SC(214, 58, 56, 2, yes, no, 0, no) \ + SC(215, 58, 56, 3, yes, no, 0, no) \ + SC(216, 58, 56, 4, yes, no, 0, no) \ + \ + SC(217, 59, 57, 1, yes, no, 0, no) \ + SC(218, 59, 57, 2, yes, no, 0, no) \ + SC(219, 59, 57, 3, yes, no, 0, no) \ + SC(220, 59, 57, 4, yes, no, 0, no) \ + \ + SC(221, 60, 58, 1, yes, no, 0, no) \ + SC(222, 60, 58, 2, yes, no, 0, no) \ + SC(223, 60, 58, 3, yes, no, 0, no) \ + SC(224, 60, 58, 4, yes, no, 0, no) \ + \ + SC(225, 61, 59, 1, yes, no, 0, no) \ + SC(226, 61, 59, 2, yes, no, 0, no) \ + SC(227, 61, 59, 3, yes, no, 0, no) \ + SC(228, 61, 59, 4, yes, no, 0, no) \ + \ + SC(229, 62, 60, 1, yes, no, 0, no) \ + SC(230, 62, 60, 2, yes, no, 0, no) \ + SC(231, 62, 60, 3, yes, no, 0, no) \ -#define NBINS 39 -#define SMALL_MAXCLASS 28672 +#define SIZE_CLASSES_DEFINED +#define NTBINS 1 +#define NLBINS 29 +#define NBINS 36 +#define NSIZES 232 +#define LG_CEIL_NSIZES 8 +#define NPSIZES 199 +#define LG_TINY_MAXCLASS 3 +#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) +#define SMALL_MAXCLASS ((((size_t)1) << 13) + (((size_t)3) << 11)) +#define LG_LARGE_MINCLASS 14 +#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS) +#define LARGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60)) #endif -#if (LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 16) -#define SIZE_CLASSES_DEFINED -/* SIZE_CLASS(bin, delta, sz) */ -#define SIZE_CLASSES \ - SIZE_CLASS(0, 16, 16) \ - SIZE_CLASS(1, 16, 32) \ - SIZE_CLASS(2, 16, 48) \ - SIZE_CLASS(3, 16, 64) \ - SIZE_CLASS(4, 16, 80) \ - SIZE_CLASS(5, 16, 96) \ - SIZE_CLASS(6, 16, 112) \ - SIZE_CLASS(7, 16, 128) \ - SIZE_CLASS(8, 32, 160) \ - SIZE_CLASS(9, 32, 192) \ - SIZE_CLASS(10, 32, 224) \ - SIZE_CLASS(11, 32, 256) \ - SIZE_CLASS(12, 64, 320) \ - SIZE_CLASS(13, 64, 384) \ - SIZE_CLASS(14, 64, 448) \ - SIZE_CLASS(15, 64, 512) \ - SIZE_CLASS(16, 128, 640) \ - SIZE_CLASS(17, 128, 768) \ - SIZE_CLASS(18, 128, 896) \ - SIZE_CLASS(19, 128, 1024) \ - SIZE_CLASS(20, 256, 1280) \ - SIZE_CLASS(21, 256, 1536) \ - SIZE_CLASS(22, 256, 1792) \ - SIZE_CLASS(23, 256, 2048) \ - SIZE_CLASS(24, 512, 2560) \ - SIZE_CLASS(25, 512, 3072) \ - SIZE_CLASS(26, 512, 3584) \ - SIZE_CLASS(27, 512, 4096) \ - SIZE_CLASS(28, 1024, 5120) \ - SIZE_CLASS(29, 1024, 6144) \ - SIZE_CLASS(30, 1024, 7168) \ - SIZE_CLASS(31, 1024, 8192) \ - SIZE_CLASS(32, 2048, 10240) \ - SIZE_CLASS(33, 2048, 12288) \ - SIZE_CLASS(34, 2048, 14336) \ - SIZE_CLASS(35, 2048, 16384) \ - SIZE_CLASS(36, 4096, 20480) \ - SIZE_CLASS(37, 4096, 24576) \ - SIZE_CLASS(38, 4096, 28672) \ - SIZE_CLASS(39, 4096, 32768) \ - SIZE_CLASS(40, 8192, 40960) \ - SIZE_CLASS(41, 8192, 49152) \ - SIZE_CLASS(42, 8192, 57344) \ +#if (LG_SIZEOF_PTR == 3 && LG_TINY_MIN == 4 && LG_QUANTUM == 4 && LG_PAGE == 12) +#define SIZE_CLASSES \ + /* index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup */ \ + SC( 0, 4, 4, 0, no, yes, 1, 4) \ + SC( 1, 4, 4, 1, no, yes, 1, 4) \ + SC( 2, 4, 4, 2, no, yes, 3, 4) \ + SC( 3, 4, 4, 3, no, yes, 1, 4) \ + \ + SC( 4, 6, 4, 1, no, yes, 5, 4) \ + SC( 5, 6, 4, 2, no, yes, 3, 4) \ + SC( 6, 6, 4, 3, no, yes, 7, 4) \ + SC( 7, 6, 4, 4, no, yes, 1, 4) \ + \ + SC( 8, 7, 5, 1, no, yes, 5, 5) \ + SC( 9, 7, 5, 2, no, yes, 3, 5) \ + SC( 10, 7, 5, 3, no, yes, 7, 5) \ + SC( 11, 7, 5, 4, no, yes, 1, 5) \ + \ + SC( 12, 8, 6, 1, no, yes, 5, 6) \ + SC( 13, 8, 6, 2, no, yes, 3, 6) \ + SC( 14, 8, 6, 3, no, yes, 7, 6) \ + SC( 15, 8, 6, 4, no, yes, 1, 6) \ + \ + SC( 16, 9, 7, 1, no, yes, 5, 7) \ + SC( 17, 9, 7, 2, no, yes, 3, 7) \ + SC( 18, 9, 7, 3, no, yes, 7, 7) \ + SC( 19, 9, 7, 4, no, yes, 1, 7) \ + \ + SC( 20, 10, 8, 1, no, yes, 5, 8) \ + SC( 21, 10, 8, 2, no, yes, 3, 8) \ + SC( 22, 10, 8, 3, no, yes, 7, 8) \ + SC( 23, 10, 8, 4, no, yes, 1, 8) \ + \ + SC( 24, 11, 9, 1, no, yes, 5, 9) \ + SC( 25, 11, 9, 2, no, yes, 3, 9) \ + SC( 26, 11, 9, 3, no, yes, 7, 9) \ + SC( 27, 11, 9, 4, yes, yes, 1, 9) \ + \ + SC( 28, 12, 10, 1, no, yes, 5, no) \ + SC( 29, 12, 10, 2, no, yes, 3, no) \ + SC( 30, 12, 10, 3, no, yes, 7, no) \ + SC( 31, 12, 10, 4, yes, yes, 2, no) \ + \ + SC( 32, 13, 11, 1, no, yes, 5, no) \ + SC( 33, 13, 11, 2, yes, yes, 3, no) \ + SC( 34, 13, 11, 3, no, yes, 7, no) \ + SC( 35, 13, 11, 4, yes, no, 0, no) \ + \ + SC( 36, 14, 12, 1, yes, no, 0, no) \ + SC( 37, 14, 12, 2, yes, no, 0, no) \ + SC( 38, 14, 12, 3, yes, no, 0, no) \ + SC( 39, 14, 12, 4, yes, no, 0, no) \ + \ + SC( 40, 15, 13, 1, yes, no, 0, no) \ + SC( 41, 15, 13, 2, yes, no, 0, no) \ + SC( 42, 15, 13, 3, yes, no, 0, no) \ + SC( 43, 15, 13, 4, yes, no, 0, no) \ + \ + SC( 44, 16, 14, 1, yes, no, 0, no) \ + SC( 45, 16, 14, 2, yes, no, 0, no) \ + SC( 46, 16, 14, 3, yes, no, 0, no) \ + SC( 47, 16, 14, 4, yes, no, 0, no) \ + \ + SC( 48, 17, 15, 1, yes, no, 0, no) \ + SC( 49, 17, 15, 2, yes, no, 0, no) \ + SC( 50, 17, 15, 3, yes, no, 0, no) \ + SC( 51, 17, 15, 4, yes, no, 0, no) \ + \ + SC( 52, 18, 16, 1, yes, no, 0, no) \ + SC( 53, 18, 16, 2, yes, no, 0, no) \ + SC( 54, 18, 16, 3, yes, no, 0, no) \ + SC( 55, 18, 16, 4, yes, no, 0, no) \ + \ + SC( 56, 19, 17, 1, yes, no, 0, no) \ + SC( 57, 19, 17, 2, yes, no, 0, no) \ + SC( 58, 19, 17, 3, yes, no, 0, no) \ + SC( 59, 19, 17, 4, yes, no, 0, no) \ + \ + SC( 60, 20, 18, 1, yes, no, 0, no) \ + SC( 61, 20, 18, 2, yes, no, 0, no) \ + SC( 62, 20, 18, 3, yes, no, 0, no) \ + SC( 63, 20, 18, 4, yes, no, 0, no) \ + \ + SC( 64, 21, 19, 1, yes, no, 0, no) \ + SC( 65, 21, 19, 2, yes, no, 0, no) \ + SC( 66, 21, 19, 3, yes, no, 0, no) \ + SC( 67, 21, 19, 4, yes, no, 0, no) \ + \ + SC( 68, 22, 20, 1, yes, no, 0, no) \ + SC( 69, 22, 20, 2, yes, no, 0, no) \ + SC( 70, 22, 20, 3, yes, no, 0, no) \ + SC( 71, 22, 20, 4, yes, no, 0, no) \ + \ + SC( 72, 23, 21, 1, yes, no, 0, no) \ + SC( 73, 23, 21, 2, yes, no, 0, no) \ + SC( 74, 23, 21, 3, yes, no, 0, no) \ + SC( 75, 23, 21, 4, yes, no, 0, no) \ + \ + SC( 76, 24, 22, 1, yes, no, 0, no) \ + SC( 77, 24, 22, 2, yes, no, 0, no) \ + SC( 78, 24, 22, 3, yes, no, 0, no) \ + SC( 79, 24, 22, 4, yes, no, 0, no) \ + \ + SC( 80, 25, 23, 1, yes, no, 0, no) \ + SC( 81, 25, 23, 2, yes, no, 0, no) \ + SC( 82, 25, 23, 3, yes, no, 0, no) \ + SC( 83, 25, 23, 4, yes, no, 0, no) \ + \ + SC( 84, 26, 24, 1, yes, no, 0, no) \ + SC( 85, 26, 24, 2, yes, no, 0, no) \ + SC( 86, 26, 24, 3, yes, no, 0, no) \ + SC( 87, 26, 24, 4, yes, no, 0, no) \ + \ + SC( 88, 27, 25, 1, yes, no, 0, no) \ + SC( 89, 27, 25, 2, yes, no, 0, no) \ + SC( 90, 27, 25, 3, yes, no, 0, no) \ + SC( 91, 27, 25, 4, yes, no, 0, no) \ + \ + SC( 92, 28, 26, 1, yes, no, 0, no) \ + SC( 93, 28, 26, 2, yes, no, 0, no) \ + SC( 94, 28, 26, 3, yes, no, 0, no) \ + SC( 95, 28, 26, 4, yes, no, 0, no) \ + \ + SC( 96, 29, 27, 1, yes, no, 0, no) \ + SC( 97, 29, 27, 2, yes, no, 0, no) \ + SC( 98, 29, 27, 3, yes, no, 0, no) \ + SC( 99, 29, 27, 4, yes, no, 0, no) \ + \ + SC(100, 30, 28, 1, yes, no, 0, no) \ + SC(101, 30, 28, 2, yes, no, 0, no) \ + SC(102, 30, 28, 3, yes, no, 0, no) \ + SC(103, 30, 28, 4, yes, no, 0, no) \ + \ + SC(104, 31, 29, 1, yes, no, 0, no) \ + SC(105, 31, 29, 2, yes, no, 0, no) \ + SC(106, 31, 29, 3, yes, no, 0, no) \ + SC(107, 31, 29, 4, yes, no, 0, no) \ + \ + SC(108, 32, 30, 1, yes, no, 0, no) \ + SC(109, 32, 30, 2, yes, no, 0, no) \ + SC(110, 32, 30, 3, yes, no, 0, no) \ + SC(111, 32, 30, 4, yes, no, 0, no) \ + \ + SC(112, 33, 31, 1, yes, no, 0, no) \ + SC(113, 33, 31, 2, yes, no, 0, no) \ + SC(114, 33, 31, 3, yes, no, 0, no) \ + SC(115, 33, 31, 4, yes, no, 0, no) \ + \ + SC(116, 34, 32, 1, yes, no, 0, no) \ + SC(117, 34, 32, 2, yes, no, 0, no) \ + SC(118, 34, 32, 3, yes, no, 0, no) \ + SC(119, 34, 32, 4, yes, no, 0, no) \ + \ + SC(120, 35, 33, 1, yes, no, 0, no) \ + SC(121, 35, 33, 2, yes, no, 0, no) \ + SC(122, 35, 33, 3, yes, no, 0, no) \ + SC(123, 35, 33, 4, yes, no, 0, no) \ + \ + SC(124, 36, 34, 1, yes, no, 0, no) \ + SC(125, 36, 34, 2, yes, no, 0, no) \ + SC(126, 36, 34, 3, yes, no, 0, no) \ + SC(127, 36, 34, 4, yes, no, 0, no) \ + \ + SC(128, 37, 35, 1, yes, no, 0, no) \ + SC(129, 37, 35, 2, yes, no, 0, no) \ + SC(130, 37, 35, 3, yes, no, 0, no) \ + SC(131, 37, 35, 4, yes, no, 0, no) \ + \ + SC(132, 38, 36, 1, yes, no, 0, no) \ + SC(133, 38, 36, 2, yes, no, 0, no) \ + SC(134, 38, 36, 3, yes, no, 0, no) \ + SC(135, 38, 36, 4, yes, no, 0, no) \ + \ + SC(136, 39, 37, 1, yes, no, 0, no) \ + SC(137, 39, 37, 2, yes, no, 0, no) \ + SC(138, 39, 37, 3, yes, no, 0, no) \ + SC(139, 39, 37, 4, yes, no, 0, no) \ + \ + SC(140, 40, 38, 1, yes, no, 0, no) \ + SC(141, 40, 38, 2, yes, no, 0, no) \ + SC(142, 40, 38, 3, yes, no, 0, no) \ + SC(143, 40, 38, 4, yes, no, 0, no) \ + \ + SC(144, 41, 39, 1, yes, no, 0, no) \ + SC(145, 41, 39, 2, yes, no, 0, no) \ + SC(146, 41, 39, 3, yes, no, 0, no) \ + SC(147, 41, 39, 4, yes, no, 0, no) \ + \ + SC(148, 42, 40, 1, yes, no, 0, no) \ + SC(149, 42, 40, 2, yes, no, 0, no) \ + SC(150, 42, 40, 3, yes, no, 0, no) \ + SC(151, 42, 40, 4, yes, no, 0, no) \ + \ + SC(152, 43, 41, 1, yes, no, 0, no) \ + SC(153, 43, 41, 2, yes, no, 0, no) \ + SC(154, 43, 41, 3, yes, no, 0, no) \ + SC(155, 43, 41, 4, yes, no, 0, no) \ + \ + SC(156, 44, 42, 1, yes, no, 0, no) \ + SC(157, 44, 42, 2, yes, no, 0, no) \ + SC(158, 44, 42, 3, yes, no, 0, no) \ + SC(159, 44, 42, 4, yes, no, 0, no) \ + \ + SC(160, 45, 43, 1, yes, no, 0, no) \ + SC(161, 45, 43, 2, yes, no, 0, no) \ + SC(162, 45, 43, 3, yes, no, 0, no) \ + SC(163, 45, 43, 4, yes, no, 0, no) \ + \ + SC(164, 46, 44, 1, yes, no, 0, no) \ + SC(165, 46, 44, 2, yes, no, 0, no) \ + SC(166, 46, 44, 3, yes, no, 0, no) \ + SC(167, 46, 44, 4, yes, no, 0, no) \ + \ + SC(168, 47, 45, 1, yes, no, 0, no) \ + SC(169, 47, 45, 2, yes, no, 0, no) \ + SC(170, 47, 45, 3, yes, no, 0, no) \ + SC(171, 47, 45, 4, yes, no, 0, no) \ + \ + SC(172, 48, 46, 1, yes, no, 0, no) \ + SC(173, 48, 46, 2, yes, no, 0, no) \ + SC(174, 48, 46, 3, yes, no, 0, no) \ + SC(175, 48, 46, 4, yes, no, 0, no) \ + \ + SC(176, 49, 47, 1, yes, no, 0, no) \ + SC(177, 49, 47, 2, yes, no, 0, no) \ + SC(178, 49, 47, 3, yes, no, 0, no) \ + SC(179, 49, 47, 4, yes, no, 0, no) \ + \ + SC(180, 50, 48, 1, yes, no, 0, no) \ + SC(181, 50, 48, 2, yes, no, 0, no) \ + SC(182, 50, 48, 3, yes, no, 0, no) \ + SC(183, 50, 48, 4, yes, no, 0, no) \ + \ + SC(184, 51, 49, 1, yes, no, 0, no) \ + SC(185, 51, 49, 2, yes, no, 0, no) \ + SC(186, 51, 49, 3, yes, no, 0, no) \ + SC(187, 51, 49, 4, yes, no, 0, no) \ + \ + SC(188, 52, 50, 1, yes, no, 0, no) \ + SC(189, 52, 50, 2, yes, no, 0, no) \ + SC(190, 52, 50, 3, yes, no, 0, no) \ + SC(191, 52, 50, 4, yes, no, 0, no) \ + \ + SC(192, 53, 51, 1, yes, no, 0, no) \ + SC(193, 53, 51, 2, yes, no, 0, no) \ + SC(194, 53, 51, 3, yes, no, 0, no) \ + SC(195, 53, 51, 4, yes, no, 0, no) \ + \ + SC(196, 54, 52, 1, yes, no, 0, no) \ + SC(197, 54, 52, 2, yes, no, 0, no) \ + SC(198, 54, 52, 3, yes, no, 0, no) \ + SC(199, 54, 52, 4, yes, no, 0, no) \ + \ + SC(200, 55, 53, 1, yes, no, 0, no) \ + SC(201, 55, 53, 2, yes, no, 0, no) \ + SC(202, 55, 53, 3, yes, no, 0, no) \ + SC(203, 55, 53, 4, yes, no, 0, no) \ + \ + SC(204, 56, 54, 1, yes, no, 0, no) \ + SC(205, 56, 54, 2, yes, no, 0, no) \ + SC(206, 56, 54, 3, yes, no, 0, no) \ + SC(207, 56, 54, 4, yes, no, 0, no) \ + \ + SC(208, 57, 55, 1, yes, no, 0, no) \ + SC(209, 57, 55, 2, yes, no, 0, no) \ + SC(210, 57, 55, 3, yes, no, 0, no) \ + SC(211, 57, 55, 4, yes, no, 0, no) \ + \ + SC(212, 58, 56, 1, yes, no, 0, no) \ + SC(213, 58, 56, 2, yes, no, 0, no) \ + SC(214, 58, 56, 3, yes, no, 0, no) \ + SC(215, 58, 56, 4, yes, no, 0, no) \ + \ + SC(216, 59, 57, 1, yes, no, 0, no) \ + SC(217, 59, 57, 2, yes, no, 0, no) \ + SC(218, 59, 57, 3, yes, no, 0, no) \ + SC(219, 59, 57, 4, yes, no, 0, no) \ + \ + SC(220, 60, 58, 1, yes, no, 0, no) \ + SC(221, 60, 58, 2, yes, no, 0, no) \ + SC(222, 60, 58, 3, yes, no, 0, no) \ + SC(223, 60, 58, 4, yes, no, 0, no) \ + \ + SC(224, 61, 59, 1, yes, no, 0, no) \ + SC(225, 61, 59, 2, yes, no, 0, no) \ + SC(226, 61, 59, 3, yes, no, 0, no) \ + SC(227, 61, 59, 4, yes, no, 0, no) \ + \ + SC(228, 62, 60, 1, yes, no, 0, no) \ + SC(229, 62, 60, 2, yes, no, 0, no) \ + SC(230, 62, 60, 3, yes, no, 0, no) \ -#define NBINS 43 -#define SMALL_MAXCLASS 57344 +#define SIZE_CLASSES_DEFINED +#define NTBINS 0 +#define NLBINS 28 +#define NBINS 35 +#define NSIZES 231 +#define LG_CEIL_NSIZES 8 +#define NPSIZES 199 +#define LG_TINY_MAXCLASS "NA" +#define LOOKUP_MAXCLASS ((((size_t)1) << 11) + (((size_t)4) << 9)) +#define SMALL_MAXCLASS ((((size_t)1) << 13) + (((size_t)3) << 11)) +#define LG_LARGE_MINCLASS 14 +#define LARGE_MINCLASS (ZU(1) << LG_LARGE_MINCLASS) +#define LARGE_MAXCLASS ((((size_t)1) << 62) + (((size_t)3) << 60)) #endif #ifndef SIZE_CLASSES_DEFINED @@ -692,30 +1418,11 @@ #endif #undef SIZE_CLASSES_DEFINED /* - * The small_size2bin lookup table uses uint8_t to encode each bin index, so we - * cannot support more than 256 small size classes. Further constrain NBINS to - * 255 to support prof_promote, since all small size classes, plus a "not - * small" size class must be stored in 8 bits of arena_chunk_map_t's bits - * field. + * The size2index_tab lookup table uses uint8_t to encode each bin index, so we + * cannot support more than 256 small size classes. */ -#if (NBINS > 255) +#if (NBINS > 256) # error "Too many small size classes" #endif -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ +#endif /* JEMALLOC_INTERNAL_SIZE_CLASSES_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/smoothstep.h b/dep/jemalloc/include/jemalloc/internal/smoothstep.h new file mode 100644 index 00000000000..2e14430f5f1 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/smoothstep.h @@ -0,0 +1,232 @@ +#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H +#define JEMALLOC_INTERNAL_SMOOTHSTEP_H + +/* + * This file was generated by the following command: + * sh smoothstep.sh smoother 200 24 3 15 + */ +/******************************************************************************/ + +/* + * This header defines a precomputed table based on the smoothstep family of + * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0 + * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so + * that floating point math can be avoided. + * + * 3 2 + * smoothstep(x) = -2x + 3x + * + * 5 4 3 + * smootherstep(x) = 6x - 15x + 10x + * + * 7 6 5 4 + * smootheststep(x) = -20x + 70x - 84x + 35x + */ + +#define SMOOTHSTEP_VARIANT "smoother" +#define SMOOTHSTEP_NSTEPS 200 +#define SMOOTHSTEP_BFP 24 +#define SMOOTHSTEP \ + /* STEP(step, h, x, y) */ \ + STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \ + STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \ + STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \ + STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \ + STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \ + STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \ + STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \ + STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \ + STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \ + STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \ + STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \ + STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \ + STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \ + STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \ + STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \ + STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \ + STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \ + STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \ + STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \ + STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \ + STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \ + STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \ + STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \ + STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \ + STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \ + STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \ + STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \ + STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \ + STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \ + STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \ + STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \ + STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \ + STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \ + STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \ + STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \ + STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \ + STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \ + STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \ + STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \ + STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \ + STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \ + STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \ + STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \ + STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \ + STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \ + STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \ + STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \ + STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \ + STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \ + STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \ + STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \ + STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \ + STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \ + STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \ + STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \ + STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \ + STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \ + STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \ + STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \ + STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \ + STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \ + STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \ + STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \ + STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \ + STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \ + STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \ + STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \ + STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \ + STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \ + STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \ + STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \ + STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \ + STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \ + STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \ + STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \ + STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \ + STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \ + STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \ + STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \ + STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \ + STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \ + STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \ + STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \ + STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \ + STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \ + STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \ + STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \ + STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \ + STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \ + STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \ + STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \ + STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \ + STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \ + STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \ + STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \ + STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \ + STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \ + STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \ + STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \ + STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \ + STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \ + STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \ + STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \ + STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \ + STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \ + STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \ + STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \ + STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \ + STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \ + STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \ + STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \ + STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \ + STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \ + STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \ + STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \ + STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \ + STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \ + STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \ + STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \ + STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \ + STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \ + STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \ + STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \ + STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \ + STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \ + STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \ + STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \ + STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \ + STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \ + STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \ + STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \ + STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \ + STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \ + STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \ + STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \ + STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \ + STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \ + STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \ + STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \ + STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \ + STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \ + STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \ + STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \ + STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \ + STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \ + STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \ + STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \ + STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \ + STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \ + STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \ + STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \ + STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \ + STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \ + STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \ + STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \ + STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \ + STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \ + STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \ + STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \ + STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \ + STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \ + STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \ + STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \ + STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \ + STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \ + STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \ + STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \ + STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \ + STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \ + STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \ + STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \ + STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \ + STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \ + STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \ + STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \ + STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \ + STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \ + STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \ + STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \ + STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \ + STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \ + STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \ + STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \ + STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \ + STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \ + STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \ + STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \ + STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \ + STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \ + STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \ + STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \ + STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \ + STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \ + STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \ + STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \ + STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \ + STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \ + STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \ + STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \ + STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \ + +#endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/spin.h b/dep/jemalloc/include/jemalloc/internal/spin.h new file mode 100644 index 00000000000..e2afc98cfda --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/spin.h @@ -0,0 +1,36 @@ +#ifndef JEMALLOC_INTERNAL_SPIN_H +#define JEMALLOC_INTERNAL_SPIN_H + +#ifdef JEMALLOC_SPIN_C_ +# define SPIN_INLINE extern inline +#else +# define SPIN_INLINE inline +#endif + +#define SPIN_INITIALIZER {0U} + +typedef struct { + unsigned iteration; +} spin_t; + +SPIN_INLINE void +spin_adaptive(spin_t *spin) { + volatile uint32_t i; + + if (spin->iteration < 5) { + for (i = 0; i < (1U << spin->iteration); i++) { + CPU_SPINWAIT; + } + spin->iteration++; + } else { +#ifdef _WIN32 + SwitchToThread(); +#else + sched_yield(); +#endif + } +} + +#undef SPIN_INLINE + +#endif /* JEMALLOC_INTERNAL_SPIN_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/stats.h b/dep/jemalloc/include/jemalloc/internal/stats.h index 27f68e3681c..1198779ab9c 100644 --- a/dep/jemalloc/include/jemalloc/internal/stats.h +++ b/dep/jemalloc/include/jemalloc/internal/stats.h @@ -1,31 +1,51 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct tcache_bin_stats_s tcache_bin_stats_t; -typedef struct malloc_bin_stats_s malloc_bin_stats_t; -typedef struct malloc_large_stats_s malloc_large_stats_t; -typedef struct arena_stats_s arena_stats_t; -typedef struct chunk_stats_s chunk_stats_t; +#ifndef JEMALLOC_INTERNAL_STATS_H +#define JEMALLOC_INTERNAL_STATS_H + +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/mutex_prof.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/stats_tsd.h" + +/* OPTION(opt, var_name, default, set_value_to) */ +#define STATS_PRINT_OPTIONS \ + OPTION('J', json, false, true) \ + OPTION('g', general, true, false) \ + OPTION('m', merged, config_stats, false) \ + OPTION('d', destroyed, config_stats, false) \ + OPTION('a', unmerged, config_stats, false) \ + OPTION('b', bins, true, false) \ + OPTION('l', large, true, false) \ + OPTION('x', mutex, true, false) + +enum { +#define OPTION(o, v, d, s) stats_print_option_num_##v, + STATS_PRINT_OPTIONS +#undef OPTION + stats_print_tot_num_options +}; -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS +/* Options for stats_print. */ +extern bool opt_stats_print; +extern char opt_stats_print_opts[stats_print_tot_num_options+1]; -struct tcache_bin_stats_s { - /* - * Number of allocation requests that corresponded to the size of this - * bin. - */ - uint64_t nrequests; -}; +/* Implements je_malloc_stats_print. */ +void stats_print(void (*write_cb)(void *, const char *), void *cbopaque, + const char *opts); -struct malloc_bin_stats_s { - /* - * Current number of bytes allocated, including objects currently - * cached by tcache. - */ - size_t allocated; +/* + * In those architectures that support 64-bit atomics, we use atomic updates for + * our 64-bit values. Otherwise, we use a plain uint64_t and synchronize + * externally. + */ +#ifdef JEMALLOC_ATOMIC_U64 +typedef atomic_u64_t arena_stats_u64_t; +#else +/* Must hold the arena stats mutex while reading atomically. */ +typedef uint64_t arena_stats_u64_t; +#endif +typedef struct malloc_bin_stats_s { /* * Total number of allocation/deallocation requests served directly by * the bin. Note that tcache may allocate an object, then recycle it @@ -42,132 +62,103 @@ struct malloc_bin_stats_s { */ uint64_t nrequests; + /* + * Current number of regions of this size class, including regions + * currently cached by tcache. + */ + size_t curregs; + /* Number of tcache fills from this bin. */ uint64_t nfills; /* Number of tcache flushes to this bin. */ uint64_t nflushes; - /* Total number of runs created for this bin's size class. */ - uint64_t nruns; + /* Total number of slabs created for this bin's size class. */ + uint64_t nslabs; /* - * Total number of runs reused by extracting them from the runs tree for - * this bin's size class. + * Total number of slabs reused by extracting them from the slabs heap + * for this bin's size class. */ - uint64_t reruns; + uint64_t reslabs; - /* Current number of runs in this bin. */ - size_t curruns; -}; + /* Current number of slabs in this bin. */ + size_t curslabs; -struct malloc_large_stats_s { + mutex_prof_data_t mutex_data; +} malloc_bin_stats_t; + +typedef struct malloc_large_stats_s { /* * Total number of allocation/deallocation requests served directly by - * the arena. Note that tcache may allocate an object, then recycle it - * many times, resulting many increments to nrequests, but only one - * each to nmalloc and ndalloc. + * the arena. */ - uint64_t nmalloc; - uint64_t ndalloc; + arena_stats_u64_t nmalloc; + arena_stats_u64_t ndalloc; /* * Number of allocation requests that correspond to this size class. * This includes requests served by tcache, though tcache only * periodically merges into this counter. */ - uint64_t nrequests; - - /* Current number of runs of this size class. */ - size_t curruns; -}; - -struct arena_stats_s { - /* Number of bytes currently mapped. */ - size_t mapped; - - /* - * Total number of purge sweeps, total number of madvise calls made, - * and total pages purged in order to keep dirty unused memory under - * control. - */ - uint64_t npurge; - uint64_t nmadvise; - uint64_t purged; - - /* Per-size-category statistics. */ - size_t allocated_large; - uint64_t nmalloc_large; - uint64_t ndalloc_large; - uint64_t nrequests_large; - - /* - * One element for each possible size class, including sizes that - * overlap with bin size classes. This is necessary because ipalloc() - * sometimes has to use such large objects in order to assure proper - * alignment. - */ - malloc_large_stats_t *lstats; -}; - -struct chunk_stats_s { - /* Number of chunks that were allocated. */ - uint64_t nchunks; + arena_stats_u64_t nrequests; /* Partially derived. */ + + /* Current number of allocations of this size class. */ + size_t curlextents; /* Derived. */ +} malloc_large_stats_t; + +typedef struct decay_stats_s { + /* Total number of purge sweeps. */ + arena_stats_u64_t npurge; + /* Total number of madvise calls made. */ + arena_stats_u64_t nmadvise; + /* Total number of pages purged. */ + arena_stats_u64_t purged; +} decay_stats_t; + +/* + * Arena stats. Note that fields marked "derived" are not directly maintained + * within the arena code; rather their values are derived during stats merge + * requests. + */ +typedef struct arena_stats_s { +#ifndef JEMALLOC_ATOMIC_U64 + malloc_mutex_t mtx; +#endif - /* High-water mark for number of chunks allocated. */ - size_t highchunks; + /* Number of bytes currently mapped, excluding retained memory. */ + atomic_zu_t mapped; /* Partially derived. */ /* - * Current number of chunks allocated. This value isn't maintained for - * any other purpose, so keep track of it in order to be able to set - * highchunks. + * Number of unused virtual memory bytes currently retained. Retained + * bytes are technically mapped (though always decommitted or purged), + * but they are excluded from the mapped statistic (above). */ - size_t curchunks; -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern bool opt_stats_print; - -extern size_t stats_cactive; + atomic_zu_t retained; /* Derived. */ -void stats_print(void (*write)(void *, const char *), void *cbopaque, - const char *opts); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES + decay_stats_t decay_dirty; + decay_stats_t decay_muzzy; -#ifndef JEMALLOC_ENABLE_INLINE -size_t stats_cactive_get(void); -void stats_cactive_add(size_t size); -void stats_cactive_sub(size_t size); -#endif + atomic_zu_t base; /* Derived. */ + atomic_zu_t internal; + atomic_zu_t resident; /* Derived. */ -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_)) -JEMALLOC_INLINE size_t -stats_cactive_get(void) -{ + atomic_zu_t allocated_large; /* Derived. */ + arena_stats_u64_t nmalloc_large; /* Derived. */ + arena_stats_u64_t ndalloc_large; /* Derived. */ + arena_stats_u64_t nrequests_large; /* Derived. */ - return (atomic_read_z(&stats_cactive)); -} + /* Number of bytes cached in tcache associated with this arena. */ + atomic_zu_t tcache_bytes; /* Derived. */ -JEMALLOC_INLINE void -stats_cactive_add(size_t size) -{ + mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]; - atomic_add_z(&stats_cactive, size); -} + /* One element for each large size class. */ + malloc_large_stats_t lstats[NSIZES - NBINS]; -JEMALLOC_INLINE void -stats_cactive_sub(size_t size) -{ - - atomic_sub_z(&stats_cactive, size); -} -#endif + /* Arena uptime. */ + nstime_t uptime; +} arena_stats_t; -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ +#endif /* JEMALLOC_INTERNAL_STATS_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/stats_tsd.h b/dep/jemalloc/include/jemalloc/internal/stats_tsd.h new file mode 100644 index 00000000000..d0c3bbe4945 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/stats_tsd.h @@ -0,0 +1,12 @@ +#ifndef JEMALLOC_INTERNAL_STATS_TSD_H +#define JEMALLOC_INTERNAL_STATS_TSD_H + +typedef struct tcache_bin_stats_s { + /* + * Number of allocation requests that corresponded to the size of this + * bin. + */ + uint64_t nrequests; +} tcache_bin_stats_t; + +#endif /* JEMALLOC_INTERNAL_STATS_TSD_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/sz.h b/dep/jemalloc/include/jemalloc/internal/sz.h new file mode 100644 index 00000000000..7f640d55ad7 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/sz.h @@ -0,0 +1,317 @@ +#ifndef JEMALLOC_INTERNAL_SIZE_H +#define JEMALLOC_INTERNAL_SIZE_H + +#include "jemalloc/internal/bit_util.h" +#include "jemalloc/internal/pages.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/util.h" + +/* + * sz module: Size computations. + * + * Some abbreviations used here: + * p: Page + * ind: Index + * s, sz: Size + * u: Usable size + * a: Aligned + * + * These are not always used completely consistently, but should be enough to + * interpret function names. E.g. sz_psz2ind converts page size to page size + * index; sz_sa2u converts a (size, alignment) allocation request to the usable + * size that would result from such an allocation. + */ + +/* + * sz_pind2sz_tab encodes the same information as could be computed by + * sz_pind2sz_compute(). + */ +extern size_t const sz_pind2sz_tab[NPSIZES+1]; +/* + * sz_index2size_tab encodes the same information as could be computed (at + * unacceptable cost in some code paths) by sz_index2size_compute(). + */ +extern size_t const sz_index2size_tab[NSIZES]; +/* + * sz_size2index_tab is a compact lookup table that rounds request sizes up to + * size classes. In order to reduce cache footprint, the table is compressed, + * and all accesses are via sz_size2index(). + */ +extern uint8_t const sz_size2index_tab[]; + +static const size_t sz_large_pad = +#ifdef JEMALLOC_CACHE_OBLIVIOUS + PAGE +#else + 0 +#endif + ; + +JEMALLOC_ALWAYS_INLINE pszind_t +sz_psz2ind(size_t psz) { + if (unlikely(psz > LARGE_MAXCLASS)) { + return NPSIZES; + } + { + pszind_t x = lg_floor((psz<<1)-1); + pszind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_PAGE) ? 0 : x - + (LG_SIZE_CLASS_GROUP + LG_PAGE); + pszind_t grp = shift << LG_SIZE_CLASS_GROUP; + + pszind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ? + LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1; + + size_t delta_inverse_mask = ZD(-1) << lg_delta; + pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) & + ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); + + pszind_t ind = grp + mod; + return ind; + } +} + +static inline size_t +sz_pind2sz_compute(pszind_t pind) { + if (unlikely(pind == NPSIZES)) { + return LARGE_MAXCLASS + PAGE; + } + { + size_t grp = pind >> LG_SIZE_CLASS_GROUP; + size_t mod = pind & ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); + + size_t grp_size_mask = ~((!!grp)-1); + size_t grp_size = ((ZU(1) << (LG_PAGE + + (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask; + + size_t shift = (grp == 0) ? 1 : grp; + size_t lg_delta = shift + (LG_PAGE-1); + size_t mod_size = (mod+1) << lg_delta; + + size_t sz = grp_size + mod_size; + return sz; + } +} + +static inline size_t +sz_pind2sz_lookup(pszind_t pind) { + size_t ret = (size_t)sz_pind2sz_tab[pind]; + assert(ret == sz_pind2sz_compute(pind)); + return ret; +} + +static inline size_t +sz_pind2sz(pszind_t pind) { + assert(pind < NPSIZES+1); + return sz_pind2sz_lookup(pind); +} + +static inline size_t +sz_psz2u(size_t psz) { + if (unlikely(psz > LARGE_MAXCLASS)) { + return LARGE_MAXCLASS + PAGE; + } + { + size_t x = lg_floor((psz<<1)-1); + size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_PAGE + 1) ? + LG_PAGE : x - LG_SIZE_CLASS_GROUP - 1; + size_t delta = ZU(1) << lg_delta; + size_t delta_mask = delta - 1; + size_t usize = (psz + delta_mask) & ~delta_mask; + return usize; + } +} + +static inline szind_t +sz_size2index_compute(size_t size) { + if (unlikely(size > LARGE_MAXCLASS)) { + return NSIZES; + } +#if (NTBINS != 0) + if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { + szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; + szind_t lg_ceil = lg_floor(pow2_ceil_zu(size)); + return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin); + } +#endif + { + szind_t x = lg_floor((size<<1)-1); + szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 : + x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM); + szind_t grp = shift << LG_SIZE_CLASS_GROUP; + + szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) + ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; + + size_t delta_inverse_mask = ZD(-1) << lg_delta; + szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) & + ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1); + + szind_t index = NTBINS + grp + mod; + return index; + } +} + +JEMALLOC_ALWAYS_INLINE szind_t +sz_size2index_lookup(size_t size) { + assert(size <= LOOKUP_MAXCLASS); + { + szind_t ret = (sz_size2index_tab[(size-1) >> LG_TINY_MIN]); + assert(ret == sz_size2index_compute(size)); + return ret; + } +} + +JEMALLOC_ALWAYS_INLINE szind_t +sz_size2index(size_t size) { + assert(size > 0); + if (likely(size <= LOOKUP_MAXCLASS)) { + return sz_size2index_lookup(size); + } + return sz_size2index_compute(size); +} + +static inline size_t +sz_index2size_compute(szind_t index) { +#if (NTBINS > 0) + if (index < NTBINS) { + return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index)); + } +#endif + { + size_t reduced_index = index - NTBINS; + size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP; + size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) - + 1); + + size_t grp_size_mask = ~((!!grp)-1); + size_t grp_size = ((ZU(1) << (LG_QUANTUM + + (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask; + + size_t shift = (grp == 0) ? 1 : grp; + size_t lg_delta = shift + (LG_QUANTUM-1); + size_t mod_size = (mod+1) << lg_delta; + + size_t usize = grp_size + mod_size; + return usize; + } +} + +JEMALLOC_ALWAYS_INLINE size_t +sz_index2size_lookup(szind_t index) { + size_t ret = (size_t)sz_index2size_tab[index]; + assert(ret == sz_index2size_compute(index)); + return ret; +} + +JEMALLOC_ALWAYS_INLINE size_t +sz_index2size(szind_t index) { + assert(index < NSIZES); + return sz_index2size_lookup(index); +} + +JEMALLOC_ALWAYS_INLINE size_t +sz_s2u_compute(size_t size) { + if (unlikely(size > LARGE_MAXCLASS)) { + return 0; + } +#if (NTBINS > 0) + if (size <= (ZU(1) << LG_TINY_MAXCLASS)) { + size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1; + size_t lg_ceil = lg_floor(pow2_ceil_zu(size)); + return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) : + (ZU(1) << lg_ceil)); + } +#endif + { + size_t x = lg_floor((size<<1)-1); + size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1) + ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1; + size_t delta = ZU(1) << lg_delta; + size_t delta_mask = delta - 1; + size_t usize = (size + delta_mask) & ~delta_mask; + return usize; + } +} + +JEMALLOC_ALWAYS_INLINE size_t +sz_s2u_lookup(size_t size) { + size_t ret = sz_index2size_lookup(sz_size2index_lookup(size)); + + assert(ret == sz_s2u_compute(size)); + return ret; +} + +/* + * Compute usable size that would result from allocating an object with the + * specified size. + */ +JEMALLOC_ALWAYS_INLINE size_t +sz_s2u(size_t size) { + assert(size > 0); + if (likely(size <= LOOKUP_MAXCLASS)) { + return sz_s2u_lookup(size); + } + return sz_s2u_compute(size); +} + +/* + * Compute usable size that would result from allocating an object with the + * specified size and alignment. + */ +JEMALLOC_ALWAYS_INLINE size_t +sz_sa2u(size_t size, size_t alignment) { + size_t usize; + + assert(alignment != 0 && ((alignment - 1) & alignment) == 0); + + /* Try for a small size class. */ + if (size <= SMALL_MAXCLASS && alignment < PAGE) { + /* + * Round size up to the nearest multiple of alignment. + * + * This done, we can take advantage of the fact that for each + * small size class, every object is aligned at the smallest + * power of two that is non-zero in the base two representation + * of the size. For example: + * + * Size | Base 2 | Minimum alignment + * -----+----------+------------------ + * 96 | 1100000 | 32 + * 144 | 10100000 | 32 + * 192 | 11000000 | 64 + */ + usize = sz_s2u(ALIGNMENT_CEILING(size, alignment)); + if (usize < LARGE_MINCLASS) { + return usize; + } + } + + /* Large size class. Beware of overflow. */ + + if (unlikely(alignment > LARGE_MAXCLASS)) { + return 0; + } + + /* Make sure result is a large size class. */ + if (size <= LARGE_MINCLASS) { + usize = LARGE_MINCLASS; + } else { + usize = sz_s2u(size); + if (usize < size) { + /* size_t overflow. */ + return 0; + } + } + + /* + * Calculate the multi-page mapping that large_palloc() would need in + * order to guarantee the alignment. + */ + if (usize + sz_large_pad + PAGE_CEILING(alignment) - PAGE < usize) { + /* size_t overflow. */ + return 0; + } + return usize; +} + +#endif /* JEMALLOC_INTERNAL_SIZE_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/tcache.h b/dep/jemalloc/include/jemalloc/internal/tcache.h deleted file mode 100644 index c3d4b58d4dc..00000000000 --- a/dep/jemalloc/include/jemalloc/internal/tcache.h +++ /dev/null @@ -1,443 +0,0 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES - -typedef struct tcache_bin_info_s tcache_bin_info_t; -typedef struct tcache_bin_s tcache_bin_t; -typedef struct tcache_s tcache_t; - -/* - * tcache pointers close to NULL are used to encode state information that is - * used for two purposes: preventing thread caching on a per thread basis and - * cleaning up during thread shutdown. - */ -#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1) -#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2) -#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3) -#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY - -/* - * Absolute maximum number of cache slots for each small bin in the thread - * cache. This is an additional constraint beyond that imposed as: twice the - * number of regions per run for this size class. - * - * This constant must be an even number. - */ -#define TCACHE_NSLOTS_SMALL_MAX 200 - -/* Number of cache slots for large size classes. */ -#define TCACHE_NSLOTS_LARGE 20 - -/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */ -#define LG_TCACHE_MAXCLASS_DEFAULT 15 - -/* - * TCACHE_GC_SWEEP is the approximate number of allocation events between - * full GC sweeps. Integer rounding may cause the actual number to be - * slightly higher, since GC is performed incrementally. - */ -#define TCACHE_GC_SWEEP 8192 - -/* Number of tcache allocation/deallocation events between incremental GCs. */ -#define TCACHE_GC_INCR \ - ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1)) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -typedef enum { - tcache_enabled_false = 0, /* Enable cast to/from bool. */ - tcache_enabled_true = 1, - tcache_enabled_default = 2 -} tcache_enabled_t; - -/* - * Read-only information associated with each element of tcache_t's tbins array - * is stored separately, mainly to reduce memory usage. - */ -struct tcache_bin_info_s { - unsigned ncached_max; /* Upper limit on ncached. */ -}; - -struct tcache_bin_s { - tcache_bin_stats_t tstats; - int low_water; /* Min # cached since last GC. */ - unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */ - unsigned ncached; /* # of cached objects. */ - void **avail; /* Stack of available objects. */ -}; - -struct tcache_s { - ql_elm(tcache_t) link; /* Used for aggregating stats. */ - uint64_t prof_accumbytes;/* Cleared after arena_prof_accum() */ - arena_t *arena; /* This thread's arena. */ - unsigned ev_cnt; /* Event count since incremental GC. */ - unsigned next_gc_bin; /* Next bin to GC. */ - tcache_bin_t tbins[1]; /* Dynamically sized. */ - /* - * The pointer stacks associated with tbins follow as a contiguous - * array. During tcache initialization, the avail pointer in each - * element of tbins is initialized to point to the proper offset within - * this array. - */ -}; - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -extern bool opt_tcache; -extern ssize_t opt_lg_tcache_max; - -extern tcache_bin_info_t *tcache_bin_info; - -/* - * Number of tcache bins. There are NBINS small-object bins, plus 0 or more - * large-object bins. - */ -extern size_t nhbins; - -/* Maximum cached size class. */ -extern size_t tcache_maxclass; - -size_t tcache_salloc(const void *ptr); -void tcache_event_hard(tcache_t *tcache); -void *tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, - size_t binind); -void tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem, - tcache_t *tcache); -void tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem, - tcache_t *tcache); -void tcache_arena_associate(tcache_t *tcache, arena_t *arena); -void tcache_arena_dissociate(tcache_t *tcache); -tcache_t *tcache_create(arena_t *arena); -void tcache_destroy(tcache_t *tcache); -void tcache_thread_cleanup(void *arg); -void tcache_stats_merge(tcache_t *tcache, arena_t *arena); -bool tcache_boot0(void); -bool tcache_boot1(void); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache, tcache_t *) -malloc_tsd_protos(JEMALLOC_ATTR(unused), tcache_enabled, tcache_enabled_t) - -void tcache_event(tcache_t *tcache); -void tcache_flush(void); -bool tcache_enabled_get(void); -tcache_t *tcache_get(bool create); -void tcache_enabled_set(bool enabled); -void *tcache_alloc_easy(tcache_bin_t *tbin); -void *tcache_alloc_small(tcache_t *tcache, size_t size, bool zero); -void *tcache_alloc_large(tcache_t *tcache, size_t size, bool zero); -void tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind); -void tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size); -#endif - -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_)) -/* Map of thread-specific caches. */ -malloc_tsd_externs(tcache, tcache_t *) -malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache, tcache_t *, NULL, - tcache_thread_cleanup) -/* Per thread flag that allows thread caches to be disabled. */ -malloc_tsd_externs(tcache_enabled, tcache_enabled_t) -malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, tcache_enabled, tcache_enabled_t, - tcache_enabled_default, malloc_tsd_no_cleanup) - -JEMALLOC_INLINE void -tcache_flush(void) -{ - tcache_t *tcache; - - cassert(config_tcache); - - tcache = *tcache_tsd_get(); - if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) - return; - tcache_destroy(tcache); - tcache = NULL; - tcache_tsd_set(&tcache); -} - -JEMALLOC_INLINE bool -tcache_enabled_get(void) -{ - tcache_enabled_t tcache_enabled; - - cassert(config_tcache); - - tcache_enabled = *tcache_enabled_tsd_get(); - if (tcache_enabled == tcache_enabled_default) { - tcache_enabled = (tcache_enabled_t)opt_tcache; - tcache_enabled_tsd_set(&tcache_enabled); - } - - return ((bool)tcache_enabled); -} - -JEMALLOC_INLINE void -tcache_enabled_set(bool enabled) -{ - tcache_enabled_t tcache_enabled; - tcache_t *tcache; - - cassert(config_tcache); - - tcache_enabled = (tcache_enabled_t)enabled; - tcache_enabled_tsd_set(&tcache_enabled); - tcache = *tcache_tsd_get(); - if (enabled) { - if (tcache == TCACHE_STATE_DISABLED) { - tcache = NULL; - tcache_tsd_set(&tcache); - } - } else /* disabled */ { - if (tcache > TCACHE_STATE_MAX) { - tcache_destroy(tcache); - tcache = NULL; - } - if (tcache == NULL) { - tcache = TCACHE_STATE_DISABLED; - tcache_tsd_set(&tcache); - } - } -} - -JEMALLOC_ALWAYS_INLINE tcache_t * -tcache_get(bool create) -{ - tcache_t *tcache; - - if (config_tcache == false) - return (NULL); - if (config_lazy_lock && isthreaded == false) - return (NULL); - - tcache = *tcache_tsd_get(); - if ((uintptr_t)tcache <= (uintptr_t)TCACHE_STATE_MAX) { - if (tcache == TCACHE_STATE_DISABLED) - return (NULL); - if (tcache == NULL) { - if (create == false) { - /* - * Creating a tcache here would cause - * allocation as a side effect of free(). - * Ordinarily that would be okay since - * tcache_create() failure is a soft failure - * that doesn't propagate. However, if TLS - * data are freed via free() as in glibc, - * subtle corruption could result from setting - * a TLS variable after its backing memory is - * freed. - */ - return (NULL); - } - if (tcache_enabled_get() == false) { - tcache_enabled_set(false); /* Memoize. */ - return (NULL); - } - return (tcache_create(choose_arena(NULL))); - } - if (tcache == TCACHE_STATE_PURGATORY) { - /* - * Make a note that an allocator function was called - * after tcache_thread_cleanup() was called. - */ - tcache = TCACHE_STATE_REINCARNATED; - tcache_tsd_set(&tcache); - return (NULL); - } - if (tcache == TCACHE_STATE_REINCARNATED) - return (NULL); - not_reached(); - } - - return (tcache); -} - -JEMALLOC_ALWAYS_INLINE void -tcache_event(tcache_t *tcache) -{ - - if (TCACHE_GC_INCR == 0) - return; - - tcache->ev_cnt++; - assert(tcache->ev_cnt <= TCACHE_GC_INCR); - if (tcache->ev_cnt == TCACHE_GC_INCR) - tcache_event_hard(tcache); -} - -JEMALLOC_ALWAYS_INLINE void * -tcache_alloc_easy(tcache_bin_t *tbin) -{ - void *ret; - - if (tbin->ncached == 0) { - tbin->low_water = -1; - return (NULL); - } - tbin->ncached--; - if ((int)tbin->ncached < tbin->low_water) - tbin->low_water = tbin->ncached; - ret = tbin->avail[tbin->ncached]; - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void * -tcache_alloc_small(tcache_t *tcache, size_t size, bool zero) -{ - void *ret; - size_t binind; - tcache_bin_t *tbin; - - binind = SMALL_SIZE2BIN(size); - assert(binind < NBINS); - tbin = &tcache->tbins[binind]; - size = arena_bin_info[binind].reg_size; - ret = tcache_alloc_easy(tbin); - if (ret == NULL) { - ret = tcache_alloc_small_hard(tcache, tbin, binind); - if (ret == NULL) - return (NULL); - } - assert(tcache_salloc(ret) == arena_bin_info[binind].reg_size); - - if (zero == false) { - if (config_fill) { - if (opt_junk) { - arena_alloc_junk_small(ret, - &arena_bin_info[binind], false); - } else if (opt_zero) - memset(ret, 0, size); - } - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - } else { - if (config_fill && opt_junk) { - arena_alloc_junk_small(ret, &arena_bin_info[binind], - true); - } - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - memset(ret, 0, size); - } - - if (config_stats) - tbin->tstats.nrequests++; - if (config_prof) - tcache->prof_accumbytes += arena_bin_info[binind].reg_size; - tcache_event(tcache); - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void * -tcache_alloc_large(tcache_t *tcache, size_t size, bool zero) -{ - void *ret; - size_t binind; - tcache_bin_t *tbin; - - size = PAGE_CEILING(size); - assert(size <= tcache_maxclass); - binind = NBINS + (size >> LG_PAGE) - 1; - assert(binind < nhbins); - tbin = &tcache->tbins[binind]; - ret = tcache_alloc_easy(tbin); - if (ret == NULL) { - /* - * Only allocate one large object at a time, because it's quite - * expensive to create one and not use it. - */ - ret = arena_malloc_large(tcache->arena, size, zero); - if (ret == NULL) - return (NULL); - } else { - if (config_prof && prof_promote && size == PAGE) { - arena_chunk_t *chunk = - (arena_chunk_t *)CHUNK_ADDR2BASE(ret); - size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >> - LG_PAGE); - arena_mapbits_large_binind_set(chunk, pageind, - BININD_INVALID); - } - if (zero == false) { - if (config_fill) { - if (opt_junk) - memset(ret, 0xa5, size); - else if (opt_zero) - memset(ret, 0, size); - } - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - } else { - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - memset(ret, 0, size); - } - - if (config_stats) - tbin->tstats.nrequests++; - if (config_prof) - tcache->prof_accumbytes += size; - } - - tcache_event(tcache); - return (ret); -} - -JEMALLOC_ALWAYS_INLINE void -tcache_dalloc_small(tcache_t *tcache, void *ptr, size_t binind) -{ - tcache_bin_t *tbin; - tcache_bin_info_t *tbin_info; - - assert(tcache_salloc(ptr) <= SMALL_MAXCLASS); - - if (config_fill && opt_junk) - arena_dalloc_junk_small(ptr, &arena_bin_info[binind]); - - tbin = &tcache->tbins[binind]; - tbin_info = &tcache_bin_info[binind]; - if (tbin->ncached == tbin_info->ncached_max) { - tcache_bin_flush_small(tbin, binind, (tbin_info->ncached_max >> - 1), tcache); - } - assert(tbin->ncached < tbin_info->ncached_max); - tbin->avail[tbin->ncached] = ptr; - tbin->ncached++; - - tcache_event(tcache); -} - -JEMALLOC_ALWAYS_INLINE void -tcache_dalloc_large(tcache_t *tcache, void *ptr, size_t size) -{ - size_t binind; - tcache_bin_t *tbin; - tcache_bin_info_t *tbin_info; - - assert((size & PAGE_MASK) == 0); - assert(tcache_salloc(ptr) > SMALL_MAXCLASS); - assert(tcache_salloc(ptr) <= tcache_maxclass); - - binind = NBINS + (size >> LG_PAGE) - 1; - - if (config_fill && opt_junk) - memset(ptr, 0x5a, size); - - tbin = &tcache->tbins[binind]; - tbin_info = &tcache_bin_info[binind]; - if (tbin->ncached == tbin_info->ncached_max) { - tcache_bin_flush_large(tbin, binind, (tbin_info->ncached_max >> - 1), tcache); - } - assert(tbin->ncached < tbin_info->ncached_max); - tbin->avail[tbin->ncached] = ptr; - tbin->ncached++; - - tcache_event(tcache); -} -#endif - -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ diff --git a/dep/jemalloc/include/jemalloc/internal/tcache_externs.h b/dep/jemalloc/include/jemalloc/internal/tcache_externs.h new file mode 100644 index 00000000000..db3e9c7d5d1 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/tcache_externs.h @@ -0,0 +1,55 @@ +#ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H +#define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H + +#include "jemalloc/internal/size_classes.h" + +extern bool opt_tcache; +extern ssize_t opt_lg_tcache_max; + +extern tcache_bin_info_t *tcache_bin_info; + +/* + * Number of tcache bins. There are NBINS small-object bins, plus 0 or more + * large-object bins. + */ +extern unsigned nhbins; + +/* Maximum cached size class. */ +extern size_t tcache_maxclass; + +/* + * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and + * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are + * completely disjoint from this data structure. tcaches starts off as a sparse + * array, so it has no physical memory footprint until individual pages are + * touched. This allows the entire array to be allocated the first time an + * explicit tcache is created without a disproportionate impact on memory usage. + */ +extern tcaches_t *tcaches; + +size_t tcache_salloc(tsdn_t *tsdn, const void *ptr); +void tcache_event_hard(tsd_t *tsd, tcache_t *tcache); +void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, + tcache_bin_t *tbin, szind_t binind, bool *tcache_success); +void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, + szind_t binind, unsigned rem); +void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, + unsigned rem, tcache_t *tcache); +void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, + arena_t *arena); +tcache_t *tcache_create_explicit(tsd_t *tsd); +void tcache_cleanup(tsd_t *tsd); +void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); +bool tcaches_create(tsd_t *tsd, unsigned *r_ind); +void tcaches_flush(tsd_t *tsd, unsigned ind); +void tcaches_destroy(tsd_t *tsd, unsigned ind); +bool tcache_boot(tsdn_t *tsdn); +void tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); +void tcache_prefork(tsdn_t *tsdn); +void tcache_postfork_parent(tsdn_t *tsdn); +void tcache_postfork_child(tsdn_t *tsdn); +void tcache_flush(tsd_t *tsd); +bool tsd_tcache_data_init(tsd_t *tsd); +bool tsd_tcache_enabled_data_init(tsd_t *tsd); + +#endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/tcache_inlines.h b/dep/jemalloc/include/jemalloc/internal/tcache_inlines.h new file mode 100644 index 00000000000..c55bcd2723d --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/tcache_inlines.h @@ -0,0 +1,250 @@ +#ifndef JEMALLOC_INTERNAL_TCACHE_INLINES_H +#define JEMALLOC_INTERNAL_TCACHE_INLINES_H + +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/sz.h" +#include "jemalloc/internal/ticker.h" +#include "jemalloc/internal/util.h" + +static inline bool +tcache_enabled_get(tsd_t *tsd) { + return tsd_tcache_enabled_get(tsd); +} + +static inline void +tcache_enabled_set(tsd_t *tsd, bool enabled) { + bool was_enabled = tsd_tcache_enabled_get(tsd); + + if (!was_enabled && enabled) { + tsd_tcache_data_init(tsd); + } else if (was_enabled && !enabled) { + tcache_cleanup(tsd); + } + /* Commit the state last. Above calls check current state. */ + tsd_tcache_enabled_set(tsd, enabled); + tsd_slow_update(tsd); +} + +JEMALLOC_ALWAYS_INLINE void +tcache_event(tsd_t *tsd, tcache_t *tcache) { + if (TCACHE_GC_INCR == 0) { + return; + } + + if (unlikely(ticker_tick(&tcache->gc_ticker))) { + tcache_event_hard(tsd, tcache); + } +} + +JEMALLOC_ALWAYS_INLINE void * +tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success) { + void *ret; + + if (unlikely(tbin->ncached == 0)) { + tbin->low_water = -1; + *tcache_success = false; + return NULL; + } + /* + * tcache_success (instead of ret) should be checked upon the return of + * this function. We avoid checking (ret == NULL) because there is + * never a null stored on the avail stack (which is unknown to the + * compiler), and eagerly checking ret would cause pipeline stall + * (waiting for the cacheline). + */ + *tcache_success = true; + ret = *(tbin->avail - tbin->ncached); + tbin->ncached--; + + if (unlikely((low_water_t)tbin->ncached < tbin->low_water)) { + tbin->low_water = tbin->ncached; + } + + return ret; +} + +JEMALLOC_ALWAYS_INLINE void * +tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, + szind_t binind, bool zero, bool slow_path) { + void *ret; + tcache_bin_t *tbin; + bool tcache_success; + size_t usize JEMALLOC_CC_SILENCE_INIT(0); + + assert(binind < NBINS); + tbin = tcache_small_bin_get(tcache, binind); + ret = tcache_alloc_easy(tbin, &tcache_success); + assert(tcache_success == (ret != NULL)); + if (unlikely(!tcache_success)) { + bool tcache_hard_success; + arena = arena_choose(tsd, arena); + if (unlikely(arena == NULL)) { + return NULL; + } + + ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache, + tbin, binind, &tcache_hard_success); + if (tcache_hard_success == false) { + return NULL; + } + } + + assert(ret); + /* + * Only compute usize if required. The checks in the following if + * statement are all static. + */ + if (config_prof || (slow_path && config_fill) || unlikely(zero)) { + usize = sz_index2size(binind); + assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize); + } + + if (likely(!zero)) { + if (slow_path && config_fill) { + if (unlikely(opt_junk_alloc)) { + arena_alloc_junk_small(ret, + &arena_bin_info[binind], false); + } else if (unlikely(opt_zero)) { + memset(ret, 0, usize); + } + } + } else { + if (slow_path && config_fill && unlikely(opt_junk_alloc)) { + arena_alloc_junk_small(ret, &arena_bin_info[binind], + true); + } + memset(ret, 0, usize); + } + + if (config_stats) { + tbin->tstats.nrequests++; + } + if (config_prof) { + tcache->prof_accumbytes += usize; + } + tcache_event(tsd, tcache); + return ret; +} + +JEMALLOC_ALWAYS_INLINE void * +tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, + szind_t binind, bool zero, bool slow_path) { + void *ret; + tcache_bin_t *tbin; + bool tcache_success; + + assert(binind >= NBINS &&binind < nhbins); + tbin = tcache_large_bin_get(tcache, binind); + ret = tcache_alloc_easy(tbin, &tcache_success); + assert(tcache_success == (ret != NULL)); + if (unlikely(!tcache_success)) { + /* + * Only allocate one large object at a time, because it's quite + * expensive to create one and not use it. + */ + arena = arena_choose(tsd, arena); + if (unlikely(arena == NULL)) { + return NULL; + } + + ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero); + if (ret == NULL) { + return NULL; + } + } else { + size_t usize JEMALLOC_CC_SILENCE_INIT(0); + + /* Only compute usize on demand */ + if (config_prof || (slow_path && config_fill) || + unlikely(zero)) { + usize = sz_index2size(binind); + assert(usize <= tcache_maxclass); + } + + if (likely(!zero)) { + if (slow_path && config_fill) { + if (unlikely(opt_junk_alloc)) { + memset(ret, JEMALLOC_ALLOC_JUNK, + usize); + } else if (unlikely(opt_zero)) { + memset(ret, 0, usize); + } + } + } else { + memset(ret, 0, usize); + } + + if (config_stats) { + tbin->tstats.nrequests++; + } + if (config_prof) { + tcache->prof_accumbytes += usize; + } + } + + tcache_event(tsd, tcache); + return ret; +} + +JEMALLOC_ALWAYS_INLINE void +tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, + bool slow_path) { + tcache_bin_t *tbin; + tcache_bin_info_t *tbin_info; + + assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS); + + if (slow_path && config_fill && unlikely(opt_junk_free)) { + arena_dalloc_junk_small(ptr, &arena_bin_info[binind]); + } + + tbin = tcache_small_bin_get(tcache, binind); + tbin_info = &tcache_bin_info[binind]; + if (unlikely(tbin->ncached == tbin_info->ncached_max)) { + tcache_bin_flush_small(tsd, tcache, tbin, binind, + (tbin_info->ncached_max >> 1)); + } + assert(tbin->ncached < tbin_info->ncached_max); + tbin->ncached++; + *(tbin->avail - tbin->ncached) = ptr; + + tcache_event(tsd, tcache); +} + +JEMALLOC_ALWAYS_INLINE void +tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, + bool slow_path) { + tcache_bin_t *tbin; + tcache_bin_info_t *tbin_info; + + assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS); + assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass); + + if (slow_path && config_fill && unlikely(opt_junk_free)) { + large_dalloc_junk(ptr, sz_index2size(binind)); + } + + tbin = tcache_large_bin_get(tcache, binind); + tbin_info = &tcache_bin_info[binind]; + if (unlikely(tbin->ncached == tbin_info->ncached_max)) { + tcache_bin_flush_large(tsd, tbin, binind, + (tbin_info->ncached_max >> 1), tcache); + } + assert(tbin->ncached < tbin_info->ncached_max); + tbin->ncached++; + *(tbin->avail - tbin->ncached) = ptr; + + tcache_event(tsd, tcache); +} + +JEMALLOC_ALWAYS_INLINE tcache_t * +tcaches_get(tsd_t *tsd, unsigned ind) { + tcaches_t *elm = &tcaches[ind]; + if (unlikely(elm->tcache == NULL)) { + elm->tcache = tcache_create_explicit(tsd); + } + return elm->tcache; +} + +#endif /* JEMALLOC_INTERNAL_TCACHE_INLINES_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/tcache_structs.h b/dep/jemalloc/include/jemalloc/internal/tcache_structs.h new file mode 100644 index 00000000000..7eb516fb6b1 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/tcache_structs.h @@ -0,0 +1,64 @@ +#ifndef JEMALLOC_INTERNAL_TCACHE_STRUCTS_H +#define JEMALLOC_INTERNAL_TCACHE_STRUCTS_H + +#include "jemalloc/internal/ql.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/stats_tsd.h" +#include "jemalloc/internal/ticker.h" + +/* + * Read-only information associated with each element of tcache_t's tbins array + * is stored separately, mainly to reduce memory usage. + */ +struct tcache_bin_info_s { + unsigned ncached_max; /* Upper limit on ncached. */ +}; + +struct tcache_bin_s { + low_water_t low_water; /* Min # cached since last GC. */ + uint32_t ncached; /* # of cached objects. */ + /* + * ncached and stats are both modified frequently. Let's keep them + * close so that they have a higher chance of being on the same + * cacheline, thus less write-backs. + */ + tcache_bin_stats_t tstats; + /* + * To make use of adjacent cacheline prefetch, the items in the avail + * stack goes to higher address for newer allocations. avail points + * just above the available space, which means that + * avail[-ncached, ... -1] are available items and the lowest item will + * be allocated first. + */ + void **avail; /* Stack of available objects. */ +}; + +struct tcache_s { + /* Data accessed frequently first: prof, ticker and small bins. */ + uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */ + ticker_t gc_ticker; /* Drives incremental GC. */ + /* + * The pointer stacks associated with tbins follow as a contiguous + * array. During tcache initialization, the avail pointer in each + * element of tbins is initialized to point to the proper offset within + * this array. + */ + tcache_bin_t tbins_small[NBINS]; + /* Data accessed less often below. */ + ql_elm(tcache_t) link; /* Used for aggregating stats. */ + arena_t *arena; /* Associated arena. */ + szind_t next_gc_bin; /* Next bin to GC. */ + /* For small bins, fill (ncached_max >> lg_fill_div). */ + uint8_t lg_fill_div[NBINS]; + tcache_bin_t tbins_large[NSIZES-NBINS]; +}; + +/* Linkage for list of available (previously used) explicit tcache IDs. */ +struct tcaches_s { + union { + tcache_t *tcache; + tcaches_t *next; + }; +}; + +#endif /* JEMALLOC_INTERNAL_TCACHE_STRUCTS_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/tcache_types.h b/dep/jemalloc/include/jemalloc/internal/tcache_types.h new file mode 100644 index 00000000000..1155d62cb44 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/tcache_types.h @@ -0,0 +1,61 @@ +#ifndef JEMALLOC_INTERNAL_TCACHE_TYPES_H +#define JEMALLOC_INTERNAL_TCACHE_TYPES_H + +#include "jemalloc/internal/size_classes.h" + +typedef struct tcache_bin_info_s tcache_bin_info_t; +typedef struct tcache_bin_s tcache_bin_t; +typedef struct tcache_s tcache_t; +typedef struct tcaches_s tcaches_t; + +/* ncached is cast to this type for comparison. */ +typedef int32_t low_water_t; + +/* + * tcache pointers close to NULL are used to encode state information that is + * used for two purposes: preventing thread caching on a per thread basis and + * cleaning up during thread shutdown. + */ +#define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1) +#define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2) +#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3) +#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY + +/* + * Absolute minimum number of cache slots for each small bin. + */ +#define TCACHE_NSLOTS_SMALL_MIN 20 + +/* + * Absolute maximum number of cache slots for each small bin in the thread + * cache. This is an additional constraint beyond that imposed as: twice the + * number of regions per slab for this size class. + * + * This constant must be an even number. + */ +#define TCACHE_NSLOTS_SMALL_MAX 200 + +/* Number of cache slots for large size classes. */ +#define TCACHE_NSLOTS_LARGE 20 + +/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */ +#define LG_TCACHE_MAXCLASS_DEFAULT 15 + +/* + * TCACHE_GC_SWEEP is the approximate number of allocation events between + * full GC sweeps. Integer rounding may cause the actual number to be + * slightly higher, since GC is performed incrementally. + */ +#define TCACHE_GC_SWEEP 8192 + +/* Number of tcache allocation/deallocation events between incremental GCs. */ +#define TCACHE_GC_INCR \ + ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1)) + +/* Used in TSD static initializer only. Real init in tcache_data_init(). */ +#define TCACHE_ZERO_INITIALIZER {0} + +/* Used in TSD static initializer only. Will be initialized to opt_tcache. */ +#define TCACHE_ENABLED_ZERO_INITIALIZER false + +#endif /* JEMALLOC_INTERNAL_TCACHE_TYPES_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/ticker.h b/dep/jemalloc/include/jemalloc/internal/ticker.h new file mode 100644 index 00000000000..572b96459cc --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/ticker.h @@ -0,0 +1,50 @@ +#ifndef JEMALLOC_INTERNAL_TICKER_H +#define JEMALLOC_INTERNAL_TICKER_H + +#include "jemalloc/internal/util.h" + +/** + * A ticker makes it easy to count-down events until some limit. You + * ticker_init the ticker to trigger every nticks events. You then notify it + * that an event has occurred with calls to ticker_tick (or that nticks events + * have occurred with a call to ticker_ticks), which will return true (and reset + * the counter) if the countdown hit zero. + */ + +typedef struct { + int32_t tick; + int32_t nticks; +} ticker_t; + +static inline void +ticker_init(ticker_t *ticker, int32_t nticks) { + ticker->tick = nticks; + ticker->nticks = nticks; +} + +static inline void +ticker_copy(ticker_t *ticker, const ticker_t *other) { + *ticker = *other; +} + +static inline int32_t +ticker_read(const ticker_t *ticker) { + return ticker->tick; +} + +static inline bool +ticker_ticks(ticker_t *ticker, int32_t nticks) { + if (unlikely(ticker->tick < nticks)) { + ticker->tick = ticker->nticks; + return true; + } + ticker->tick -= nticks; + return(false); +} + +static inline bool +ticker_tick(ticker_t *ticker) { + return ticker_ticks(ticker, 1); +} + +#endif /* JEMALLOC_INTERNAL_TICKER_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/tsd.h b/dep/jemalloc/include/jemalloc/internal/tsd.h index 9fb4a23ec6b..155a2ec6c44 100644 --- a/dep/jemalloc/include/jemalloc/internal/tsd.h +++ b/dep/jemalloc/include/jemalloc/internal/tsd.h @@ -1,434 +1,324 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES +#ifndef JEMALLOC_INTERNAL_TSD_H +#define JEMALLOC_INTERNAL_TSD_H -/* Maximum number of malloc_tsd users with cleanup functions. */ -#define MALLOC_TSD_CLEANUPS_MAX 8 +#include "jemalloc/internal/arena_types.h" +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/jemalloc_internal_externs.h" +#include "jemalloc/internal/prof_types.h" +#include "jemalloc/internal/ql.h" +#include "jemalloc/internal/rtree_tsd.h" +#include "jemalloc/internal/tcache_types.h" +#include "jemalloc/internal/tcache_structs.h" +#include "jemalloc/internal/util.h" +#include "jemalloc/internal/witness.h" -typedef bool (*malloc_tsd_cleanup_t)(void); - -#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ - !defined(_WIN32)) -typedef struct tsd_init_block_s tsd_init_block_t; -typedef struct tsd_init_head_s tsd_init_head_t; +/* + * Thread-Specific-Data layout + * --- data accessed on tcache fast path: state, rtree_ctx, stats, prof --- + * s: state + * e: tcache_enabled + * m: thread_allocated (config_stats) + * f: thread_deallocated (config_stats) + * p: prof_tdata (config_prof) + * c: rtree_ctx (rtree cache accessed on deallocation) + * t: tcache + * --- data not accessed on tcache fast path: arena-related fields --- + * d: arenas_tdata_bypass + * r: reentrancy_level + * x: narenas_tdata + * i: iarena + * a: arena + * o: arenas_tdata + * Loading TSD data is on the critical path of basically all malloc operations. + * In particular, tcache and rtree_ctx rely on hot CPU cache to be effective. + * Use a compact layout to reduce cache footprint. + * +--- 64-bit and 64B cacheline; 1B each letter; First byte on the left. ---+ + * |---------------------------- 1st cacheline ----------------------------| + * | sedrxxxx mmmmmmmm ffffffff pppppppp [c * 32 ........ ........ .......] | + * |---------------------------- 2nd cacheline ----------------------------| + * | [c * 64 ........ ........ ........ ........ ........ ........ .......] | + * |---------------------------- 3nd cacheline ----------------------------| + * | [c * 32 ........ ........ .......] iiiiiiii aaaaaaaa oooooooo [t...... | + * +-------------------------------------------------------------------------+ + * Note: the entire tcache is embedded into TSD and spans multiple cachelines. + * + * The last 3 members (i, a and o) before tcache isn't really needed on tcache + * fast path. However we have a number of unused tcache bins and witnesses + * (never touched unless config_debug) at the end of tcache, so we place them + * there to avoid breaking the cachelines and possibly paging in an extra page. + */ +#ifdef JEMALLOC_JET +typedef void (*test_callback_t)(int *); +# define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10 +# define MALLOC_TEST_TSD \ + O(test_data, int, int) \ + O(test_callback, test_callback_t, int) +# define MALLOC_TEST_TSD_INITIALIZER , MALLOC_TSD_TEST_DATA_INIT, NULL +#else +# define MALLOC_TEST_TSD +# define MALLOC_TEST_TSD_INITIALIZER #endif +/* O(name, type, nullable type */ +#define MALLOC_TSD \ + O(tcache_enabled, bool, bool) \ + O(arenas_tdata_bypass, bool, bool) \ + O(reentrancy_level, int8_t, int8_t) \ + O(narenas_tdata, uint32_t, uint32_t) \ + O(thread_allocated, uint64_t, uint64_t) \ + O(thread_deallocated, uint64_t, uint64_t) \ + O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \ + O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) \ + O(iarena, arena_t *, arena_t *) \ + O(arena, arena_t *, arena_t *) \ + O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\ + O(tcache, tcache_t, tcache_t) \ + O(witness_tsd, witness_tsd_t, witness_tsdn_t) \ + MALLOC_TEST_TSD + +#define TSD_INITIALIZER { \ + tsd_state_uninitialized, \ + TCACHE_ENABLED_ZERO_INITIALIZER, \ + false, \ + 0, \ + 0, \ + 0, \ + 0, \ + NULL, \ + RTREE_CTX_ZERO_INITIALIZER, \ + NULL, \ + NULL, \ + NULL, \ + TCACHE_ZERO_INITIALIZER, \ + WITNESS_TSD_INITIALIZER \ + MALLOC_TEST_TSD_INITIALIZER \ +} + +enum { + tsd_state_nominal = 0, /* Common case --> jnz. */ + tsd_state_nominal_slow = 1, /* Initialized but on slow path. */ + /* the above 2 nominal states should be lower values. */ + tsd_state_nominal_max = 1, /* used for comparison only. */ + tsd_state_minimal_initialized = 2, + tsd_state_purgatory = 3, + tsd_state_reincarnated = 4, + tsd_state_uninitialized = 5 +}; + +/* Manually limit tsd_state_t to a single byte. */ +typedef uint8_t tsd_state_t; + +/* The actual tsd. */ +struct tsd_s { + /* + * The contents should be treated as totally opaque outside the tsd + * module. Access any thread-local state through the getters and + * setters below. + */ + tsd_state_t state; +#define O(n, t, nt) \ + t use_a_getter_or_setter_instead_##n; +MALLOC_TSD +#undef O +}; + /* - * TLS/TSD-agnostic macro-based implementation of thread-specific data. There - * are four macros that support (at least) three use cases: file-private, - * library-private, and library-private inlined. Following is an example - * library-private tsd variable: - * - * In example.h: - * typedef struct { - * int x; - * int y; - * } example_t; - * #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0}) - * malloc_tsd_protos(, example, example_t *) - * malloc_tsd_externs(example, example_t *) - * In example.c: - * malloc_tsd_data(, example, example_t *, EX_INITIALIZER) - * malloc_tsd_funcs(, example, example_t *, EX_INITIALIZER, - * example_tsd_cleanup) - * - * The result is a set of generated functions, e.g.: - * - * bool example_tsd_boot(void) {...} - * example_t **example_tsd_get() {...} - * void example_tsd_set(example_t **val) {...} - * - * Note that all of the functions deal in terms of (a_type *) rather than - * (a_type) so that it is possible to support non-pointer types (unlike - * pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is - * cast to (void *). This means that the cleanup function needs to cast *and* - * dereference the function argument, e.g.: - * - * void - * example_tsd_cleanup(void *arg) - * { - * example_t *example = *(example_t **)arg; - * - * [...] - * if ([want the cleanup function to be called again]) { - * example_tsd_set(&example); - * } - * } - * - * If example_tsd_set() is called within example_tsd_cleanup(), it will be - * called again. This is similar to how pthreads TSD destruction works, except - * that pthreads only calls the cleanup function again if the value was set to - * non-NULL. + * Wrapper around tsd_t that makes it possible to avoid implicit conversion + * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be + * explicitly converted to tsd_t, which is non-nullable. */ +struct tsdn_s { + tsd_t tsd; +}; +#define TSDN_NULL ((tsdn_t *)0) +JEMALLOC_ALWAYS_INLINE tsdn_t * +tsd_tsdn(tsd_t *tsd) { + return (tsdn_t *)tsd; +} -/* malloc_tsd_protos(). */ -#define malloc_tsd_protos(a_attr, a_name, a_type) \ -a_attr bool \ -a_name##_tsd_boot(void); \ -a_attr a_type * \ -a_name##_tsd_get(void); \ -a_attr void \ -a_name##_tsd_set(a_type *val); +JEMALLOC_ALWAYS_INLINE bool +tsdn_null(const tsdn_t *tsdn) { + return tsdn == NULL; +} -/* malloc_tsd_externs(). */ -#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#define malloc_tsd_externs(a_name, a_type) \ -extern __thread a_type a_name##_tls; \ -extern __thread bool a_name##_initialized; \ -extern bool a_name##_booted; -#elif (defined(JEMALLOC_TLS)) -#define malloc_tsd_externs(a_name, a_type) \ -extern __thread a_type a_name##_tls; \ -extern pthread_key_t a_name##_tsd; \ -extern bool a_name##_booted; -#elif (defined(_WIN32)) -#define malloc_tsd_externs(a_name, a_type) \ -extern DWORD a_name##_tsd; \ -extern bool a_name##_booted; -#else -#define malloc_tsd_externs(a_name, a_type) \ -extern pthread_key_t a_name##_tsd; \ -extern tsd_init_head_t a_name##_tsd_init_head; \ -extern bool a_name##_booted; -#endif +JEMALLOC_ALWAYS_INLINE tsd_t * +tsdn_tsd(tsdn_t *tsdn) { + assert(!tsdn_null(tsdn)); + + return &tsdn->tsd; +} + +void *malloc_tsd_malloc(size_t size); +void malloc_tsd_dalloc(void *wrapper); +void malloc_tsd_cleanup_register(bool (*f)(void)); +tsd_t *malloc_tsd_boot0(void); +void malloc_tsd_boot1(void); +void tsd_cleanup(void *arg); +tsd_t *tsd_fetch_slow(tsd_t *tsd, bool internal); +void tsd_slow_update(tsd_t *tsd); -/* malloc_tsd_data(). */ +/* + * We put the platform-specific data declarations and inlines into their own + * header files to avoid cluttering this file. They define tsd_boot0, + * tsd_boot1, tsd_boot, tsd_booted_get, tsd_get_allocates, tsd_get, and tsd_set. + */ #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr __thread a_type JEMALLOC_TLS_MODEL \ - a_name##_tls = a_initializer; \ -a_attr __thread bool JEMALLOC_TLS_MODEL \ - a_name##_initialized = false; \ -a_attr bool a_name##_booted = false; +#include "jemalloc/internal/tsd_malloc_thread_cleanup.h" #elif (defined(JEMALLOC_TLS)) -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr __thread a_type JEMALLOC_TLS_MODEL \ - a_name##_tls = a_initializer; \ -a_attr pthread_key_t a_name##_tsd; \ -a_attr bool a_name##_booted = false; +#include "jemalloc/internal/tsd_tls.h" #elif (defined(_WIN32)) -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr DWORD a_name##_tsd; \ -a_attr bool a_name##_booted = false; +#include "jemalloc/internal/tsd_win.h" #else -#define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ -a_attr pthread_key_t a_name##_tsd; \ -a_attr tsd_init_head_t a_name##_tsd_init_head = { \ - ql_head_initializer(blocks), \ - MALLOC_MUTEX_INITIALIZER \ -}; \ -a_attr bool a_name##_booted = false; +#include "jemalloc/internal/tsd_generic.h" #endif -/* malloc_tsd_funcs(). */ -#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Initialization/cleanup. */ \ -a_attr bool \ -a_name##_tsd_cleanup_wrapper(void) \ -{ \ - \ - if (a_name##_initialized) { \ - a_name##_initialized = false; \ - a_cleanup(&a_name##_tls); \ - } \ - return (a_name##_initialized); \ -} \ -a_attr bool \ -a_name##_tsd_boot(void) \ -{ \ - \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - malloc_tsd_cleanup_register( \ - &a_name##_tsd_cleanup_wrapper); \ - } \ - a_name##_booted = true; \ - return (false); \ -} \ -/* Get/set. */ \ -a_attr a_type * \ -a_name##_tsd_get(void) \ -{ \ - \ - assert(a_name##_booted); \ - return (&a_name##_tls); \ -} \ -a_attr void \ -a_name##_tsd_set(a_type *val) \ -{ \ - \ - assert(a_name##_booted); \ - a_name##_tls = (*val); \ - if (a_cleanup != malloc_tsd_no_cleanup) \ - a_name##_initialized = true; \ +/* + * tsd_foop_get_unsafe(tsd) returns a pointer to the thread-local instance of + * foo. This omits some safety checks, and so can be used during tsd + * initialization and cleanup. + */ +#define O(n, t, nt) \ +JEMALLOC_ALWAYS_INLINE t * \ +tsd_##n##p_get_unsafe(tsd_t *tsd) { \ + return &tsd->use_a_getter_or_setter_instead_##n; \ } -#elif (defined(JEMALLOC_TLS)) -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Initialization/cleanup. */ \ -a_attr bool \ -a_name##_tsd_boot(void) \ -{ \ - \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - if (pthread_key_create(&a_name##_tsd, a_cleanup) != 0) \ - return (true); \ - } \ - a_name##_booted = true; \ - return (false); \ -} \ -/* Get/set. */ \ -a_attr a_type * \ -a_name##_tsd_get(void) \ -{ \ - \ - assert(a_name##_booted); \ - return (&a_name##_tls); \ -} \ -a_attr void \ -a_name##_tsd_set(a_type *val) \ -{ \ - \ - assert(a_name##_booted); \ - a_name##_tls = (*val); \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - if (pthread_setspecific(a_name##_tsd, \ - (void *)(&a_name##_tls))) { \ - malloc_write("<jemalloc>: Error" \ - " setting TSD for "#a_name"\n"); \ - if (opt_abort) \ - abort(); \ - } \ - } \ +MALLOC_TSD +#undef O + +/* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */ +#define O(n, t, nt) \ +JEMALLOC_ALWAYS_INLINE t * \ +tsd_##n##p_get(tsd_t *tsd) { \ + assert(tsd->state == tsd_state_nominal || \ + tsd->state == tsd_state_nominal_slow || \ + tsd->state == tsd_state_reincarnated || \ + tsd->state == tsd_state_minimal_initialized); \ + return tsd_##n##p_get_unsafe(tsd); \ } -#elif (defined(_WIN32)) -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Data structure. */ \ -typedef struct { \ - bool initialized; \ - a_type val; \ -} a_name##_tsd_wrapper_t; \ -/* Initialization/cleanup. */ \ -a_attr bool \ -a_name##_tsd_cleanup_wrapper(void) \ -{ \ - a_name##_tsd_wrapper_t *wrapper; \ - \ - wrapper = (a_name##_tsd_wrapper_t *) TlsGetValue(a_name##_tsd); \ - if (wrapper == NULL) \ - return (false); \ - if (a_cleanup != malloc_tsd_no_cleanup && \ - wrapper->initialized) { \ - a_type val = wrapper->val; \ - a_type tsd_static_data = a_initializer; \ - wrapper->initialized = false; \ - wrapper->val = tsd_static_data; \ - a_cleanup(&val); \ - if (wrapper->initialized) { \ - /* Trigger another cleanup round. */ \ - return (true); \ - } \ - } \ - malloc_tsd_dalloc(wrapper); \ - return (false); \ -} \ -a_attr bool \ -a_name##_tsd_boot(void) \ -{ \ - \ - a_name##_tsd = TlsAlloc(); \ - if (a_name##_tsd == TLS_OUT_OF_INDEXES) \ - return (true); \ - if (a_cleanup != malloc_tsd_no_cleanup) { \ - malloc_tsd_cleanup_register( \ - &a_name##_tsd_cleanup_wrapper); \ - } \ - a_name##_booted = true; \ - return (false); \ -} \ -/* Get/set. */ \ -a_attr a_name##_tsd_wrapper_t * \ -a_name##_tsd_get_wrapper(void) \ -{ \ - a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \ - TlsGetValue(a_name##_tsd); \ - \ - if (wrapper == NULL) { \ - wrapper = (a_name##_tsd_wrapper_t *) \ - malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \ - if (wrapper == NULL) { \ - malloc_write("<jemalloc>: Error allocating" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } else { \ - static a_type tsd_static_data = a_initializer; \ - wrapper->initialized = false; \ - wrapper->val = tsd_static_data; \ - } \ - if (!TlsSetValue(a_name##_tsd, (void *)wrapper)) { \ - malloc_write("<jemalloc>: Error setting" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } \ +MALLOC_TSD +#undef O + +/* + * tsdn_foop_get(tsdn) returns either the thread-local instance of foo (if tsdn + * isn't NULL), or NULL (if tsdn is NULL), cast to the nullable pointer type. + */ +#define O(n, t, nt) \ +JEMALLOC_ALWAYS_INLINE nt * \ +tsdn_##n##p_get(tsdn_t *tsdn) { \ + if (tsdn_null(tsdn)) { \ + return NULL; \ } \ - return (wrapper); \ -} \ -a_attr a_type * \ -a_name##_tsd_get(void) \ -{ \ - a_name##_tsd_wrapper_t *wrapper; \ - \ - assert(a_name##_booted); \ - wrapper = a_name##_tsd_get_wrapper(); \ - return (&wrapper->val); \ -} \ -a_attr void \ -a_name##_tsd_set(a_type *val) \ -{ \ - a_name##_tsd_wrapper_t *wrapper; \ - \ - assert(a_name##_booted); \ - wrapper = a_name##_tsd_get_wrapper(); \ - wrapper->val = *(val); \ - if (a_cleanup != malloc_tsd_no_cleanup) \ - wrapper->initialized = true; \ + tsd_t *tsd = tsdn_tsd(tsdn); \ + return (nt *)tsd_##n##p_get(tsd); \ } -#else -#define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ - a_cleanup) \ -/* Data structure. */ \ -typedef struct { \ - bool initialized; \ - a_type val; \ -} a_name##_tsd_wrapper_t; \ -/* Initialization/cleanup. */ \ -a_attr void \ -a_name##_tsd_cleanup_wrapper(void *arg) \ -{ \ - a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *)arg;\ - \ - if (a_cleanup != malloc_tsd_no_cleanup && \ - wrapper->initialized) { \ - wrapper->initialized = false; \ - a_cleanup(&wrapper->val); \ - if (wrapper->initialized) { \ - /* Trigger another cleanup round. */ \ - if (pthread_setspecific(a_name##_tsd, \ - (void *)wrapper)) { \ - malloc_write("<jemalloc>: Error" \ - " setting TSD for "#a_name"\n"); \ - if (opt_abort) \ - abort(); \ - } \ - return; \ - } \ - } \ - malloc_tsd_dalloc(wrapper); \ -} \ -a_attr bool \ -a_name##_tsd_boot(void) \ -{ \ - \ - if (pthread_key_create(&a_name##_tsd, \ - a_name##_tsd_cleanup_wrapper) != 0) \ - return (true); \ - a_name##_booted = true; \ - return (false); \ -} \ -/* Get/set. */ \ -a_attr a_name##_tsd_wrapper_t * \ -a_name##_tsd_get_wrapper(void) \ -{ \ - a_name##_tsd_wrapper_t *wrapper = (a_name##_tsd_wrapper_t *) \ - pthread_getspecific(a_name##_tsd); \ - \ - if (wrapper == NULL) { \ - tsd_init_block_t block; \ - wrapper = tsd_init_check_recursion( \ - &a_name##_tsd_init_head, &block); \ - if (wrapper) \ - return (wrapper); \ - wrapper = (a_name##_tsd_wrapper_t *) \ - malloc_tsd_malloc(sizeof(a_name##_tsd_wrapper_t)); \ - block.data = wrapper; \ - if (wrapper == NULL) { \ - malloc_write("<jemalloc>: Error allocating" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } else { \ - static a_type tsd_static_data = a_initializer; \ - wrapper->initialized = false; \ - wrapper->val = tsd_static_data; \ - } \ - if (pthread_setspecific(a_name##_tsd, \ - (void *)wrapper)) { \ - malloc_write("<jemalloc>: Error setting" \ - " TSD for "#a_name"\n"); \ - abort(); \ - } \ - tsd_init_finish(&a_name##_tsd_init_head, &block); \ - } \ - return (wrapper); \ -} \ -a_attr a_type * \ -a_name##_tsd_get(void) \ -{ \ - a_name##_tsd_wrapper_t *wrapper; \ - \ - assert(a_name##_booted); \ - wrapper = a_name##_tsd_get_wrapper(); \ - return (&wrapper->val); \ -} \ -a_attr void \ -a_name##_tsd_set(a_type *val) \ -{ \ - a_name##_tsd_wrapper_t *wrapper; \ - \ - assert(a_name##_booted); \ - wrapper = a_name##_tsd_get_wrapper(); \ - wrapper->val = *(val); \ - if (a_cleanup != malloc_tsd_no_cleanup) \ - wrapper->initialized = true; \ +MALLOC_TSD +#undef O + +/* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */ +#define O(n, t, nt) \ +JEMALLOC_ALWAYS_INLINE t \ +tsd_##n##_get(tsd_t *tsd) { \ + return *tsd_##n##p_get(tsd); \ } -#endif +MALLOC_TSD +#undef O -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS +/* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */ +#define O(n, t, nt) \ +JEMALLOC_ALWAYS_INLINE void \ +tsd_##n##_set(tsd_t *tsd, t val) { \ + assert(tsd->state != tsd_state_reincarnated && \ + tsd->state != tsd_state_minimal_initialized); \ + *tsd_##n##p_get(tsd) = val; \ +} +MALLOC_TSD +#undef O -#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ - !defined(_WIN32)) -struct tsd_init_block_s { - ql_elm(tsd_init_block_t) link; - pthread_t thread; - void *data; -}; -struct tsd_init_head_s { - ql_head(tsd_init_block_t) blocks; - malloc_mutex_t lock; -}; -#endif +JEMALLOC_ALWAYS_INLINE void +tsd_assert_fast(tsd_t *tsd) { + assert(!malloc_slow && tsd_tcache_enabled_get(tsd) && + tsd_reentrancy_level_get(tsd) == 0); +} -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -void *malloc_tsd_malloc(size_t size); -void malloc_tsd_dalloc(void *wrapper); -void malloc_tsd_no_cleanup(void *); -void malloc_tsd_cleanup_register(bool (*f)(void)); -void malloc_tsd_boot(void); -#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ - !defined(_WIN32)) -void *tsd_init_check_recursion(tsd_init_head_t *head, - tsd_init_block_t *block); -void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block); -#endif +JEMALLOC_ALWAYS_INLINE bool +tsd_fast(tsd_t *tsd) { + bool fast = (tsd->state == tsd_state_nominal); + if (fast) { + tsd_assert_fast(tsd); + } + + return fast; +} -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_fetch_impl(bool init, bool minimal) { + tsd_t *tsd = tsd_get(init); + + if (!init && tsd_get_allocates() && tsd == NULL) { + return NULL; + } + assert(tsd != NULL); + + if (unlikely(tsd->state != tsd_state_nominal)) { + return tsd_fetch_slow(tsd, minimal); + } + assert(tsd_fast(tsd)); + tsd_assert_fast(tsd); + + return tsd; +} + +/* Get a minimal TSD that requires no cleanup. See comments in free(). */ +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_fetch_min(void) { + return tsd_fetch_impl(true, true); +} + +/* For internal background threads use only. */ +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_internal_fetch(void) { + tsd_t *tsd = tsd_fetch_min(); + /* Use reincarnated state to prevent full initialization. */ + tsd->state = tsd_state_reincarnated; + + return tsd; +} + +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_fetch(void) { + return tsd_fetch_impl(true, false); +} + +static inline bool +tsd_nominal(tsd_t *tsd) { + return (tsd->state <= tsd_state_nominal_max); +} + +JEMALLOC_ALWAYS_INLINE tsdn_t * +tsdn_fetch(void) { + if (!tsd_booted_get()) { + return NULL; + } + + return tsd_tsdn(tsd_fetch_impl(false, false)); +} + +JEMALLOC_ALWAYS_INLINE rtree_ctx_t * +tsd_rtree_ctx(tsd_t *tsd) { + return tsd_rtree_ctxp_get(tsd); +} + +JEMALLOC_ALWAYS_INLINE rtree_ctx_t * +tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) { + /* + * If tsd cannot be accessed, initialize the fallback rtree_ctx and + * return a pointer to it. + */ + if (unlikely(tsdn_null(tsdn))) { + rtree_ctx_data_init(fallback); + return fallback; + } + return tsd_rtree_ctx(tsdn_tsd(tsdn)); +} -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ +#endif /* JEMALLOC_INTERNAL_TSD_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/tsd_generic.h b/dep/jemalloc/include/jemalloc/internal/tsd_generic.h new file mode 100644 index 00000000000..1e52ef767f1 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/tsd_generic.h @@ -0,0 +1,157 @@ +#ifdef JEMALLOC_INTERNAL_TSD_GENERIC_H +#error This file should be included only once, by tsd.h. +#endif +#define JEMALLOC_INTERNAL_TSD_GENERIC_H + +typedef struct tsd_init_block_s tsd_init_block_t; +struct tsd_init_block_s { + ql_elm(tsd_init_block_t) link; + pthread_t thread; + void *data; +}; + +/* Defined in tsd.c, to allow the mutex headers to have tsd dependencies. */ +typedef struct tsd_init_head_s tsd_init_head_t; + +typedef struct { + bool initialized; + tsd_t val; +} tsd_wrapper_t; + +void *tsd_init_check_recursion(tsd_init_head_t *head, + tsd_init_block_t *block); +void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block); + +extern pthread_key_t tsd_tsd; +extern tsd_init_head_t tsd_init_head; +extern tsd_wrapper_t tsd_boot_wrapper; +extern bool tsd_booted; + +/* Initialization/cleanup. */ +JEMALLOC_ALWAYS_INLINE void +tsd_cleanup_wrapper(void *arg) { + tsd_wrapper_t *wrapper = (tsd_wrapper_t *)arg; + + if (wrapper->initialized) { + wrapper->initialized = false; + tsd_cleanup(&wrapper->val); + if (wrapper->initialized) { + /* Trigger another cleanup round. */ + if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0) + { + malloc_write("<jemalloc>: Error setting TSD\n"); + if (opt_abort) { + abort(); + } + } + return; + } + } + malloc_tsd_dalloc(wrapper); +} + +JEMALLOC_ALWAYS_INLINE void +tsd_wrapper_set(tsd_wrapper_t *wrapper) { + if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0) { + malloc_write("<jemalloc>: Error setting TSD\n"); + abort(); + } +} + +JEMALLOC_ALWAYS_INLINE tsd_wrapper_t * +tsd_wrapper_get(bool init) { + tsd_wrapper_t *wrapper = (tsd_wrapper_t *)pthread_getspecific(tsd_tsd); + + if (init && unlikely(wrapper == NULL)) { + tsd_init_block_t block; + wrapper = (tsd_wrapper_t *) + tsd_init_check_recursion(&tsd_init_head, &block); + if (wrapper) { + return wrapper; + } + wrapper = (tsd_wrapper_t *) + malloc_tsd_malloc(sizeof(tsd_wrapper_t)); + block.data = (void *)wrapper; + if (wrapper == NULL) { + malloc_write("<jemalloc>: Error allocating TSD\n"); + abort(); + } else { + wrapper->initialized = false; + tsd_t initializer = TSD_INITIALIZER; + wrapper->val = initializer; + } + tsd_wrapper_set(wrapper); + tsd_init_finish(&tsd_init_head, &block); + } + return wrapper; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_boot0(void) { + if (pthread_key_create(&tsd_tsd, tsd_cleanup_wrapper) != 0) { + return true; + } + tsd_wrapper_set(&tsd_boot_wrapper); + tsd_booted = true; + return false; +} + +JEMALLOC_ALWAYS_INLINE void +tsd_boot1(void) { + tsd_wrapper_t *wrapper; + wrapper = (tsd_wrapper_t *)malloc_tsd_malloc(sizeof(tsd_wrapper_t)); + if (wrapper == NULL) { + malloc_write("<jemalloc>: Error allocating TSD\n"); + abort(); + } + tsd_boot_wrapper.initialized = false; + tsd_cleanup(&tsd_boot_wrapper.val); + wrapper->initialized = false; + tsd_t initializer = TSD_INITIALIZER; + wrapper->val = initializer; + tsd_wrapper_set(wrapper); +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_boot(void) { + if (tsd_boot0()) { + return true; + } + tsd_boot1(); + return false; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_booted_get(void) { + return tsd_booted; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_get_allocates(void) { + return true; +} + +/* Get/set. */ +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_get(bool init) { + tsd_wrapper_t *wrapper; + + assert(tsd_booted); + wrapper = tsd_wrapper_get(init); + if (tsd_get_allocates() && !init && wrapper == NULL) { + return NULL; + } + return &wrapper->val; +} + +JEMALLOC_ALWAYS_INLINE void +tsd_set(tsd_t *val) { + tsd_wrapper_t *wrapper; + + assert(tsd_booted); + wrapper = tsd_wrapper_get(true); + if (likely(&wrapper->val != val)) { + wrapper->val = *(val); + } + wrapper->initialized = true; +} diff --git a/dep/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h b/dep/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h new file mode 100644 index 00000000000..beb467a67e9 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h @@ -0,0 +1,60 @@ +#ifdef JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H +#error This file should be included only once, by tsd.h. +#endif +#define JEMALLOC_INTERNAL_TSD_MALLOC_THREAD_CLEANUP_H + +extern __thread tsd_t tsd_tls; +extern __thread bool tsd_initialized; +extern bool tsd_booted; + +/* Initialization/cleanup. */ +JEMALLOC_ALWAYS_INLINE bool +tsd_cleanup_wrapper(void) { + if (tsd_initialized) { + tsd_initialized = false; + tsd_cleanup(&tsd_tls); + } + return tsd_initialized; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_boot0(void) { + malloc_tsd_cleanup_register(&tsd_cleanup_wrapper); + tsd_booted = true; + return false; +} + +JEMALLOC_ALWAYS_INLINE void +tsd_boot1(void) { + /* Do nothing. */ +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_boot(void) { + return tsd_boot0(); +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_booted_get(void) { + return tsd_booted; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_get_allocates(void) { + return false; +} + +/* Get/set. */ +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_get(bool init) { + assert(tsd_booted); + return &tsd_tls; +} +JEMALLOC_ALWAYS_INLINE void +tsd_set(tsd_t *val) { + assert(tsd_booted); + if (likely(&tsd_tls != val)) { + tsd_tls = (*val); + } + tsd_initialized = true; +} diff --git a/dep/jemalloc/include/jemalloc/internal/tsd_tls.h b/dep/jemalloc/include/jemalloc/internal/tsd_tls.h new file mode 100644 index 00000000000..757aaa0eeff --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/tsd_tls.h @@ -0,0 +1,59 @@ +#ifdef JEMALLOC_INTERNAL_TSD_TLS_H +#error This file should be included only once, by tsd.h. +#endif +#define JEMALLOC_INTERNAL_TSD_TLS_H + +extern __thread tsd_t tsd_tls; +extern pthread_key_t tsd_tsd; +extern bool tsd_booted; + +/* Initialization/cleanup. */ +JEMALLOC_ALWAYS_INLINE bool +tsd_boot0(void) { + if (pthread_key_create(&tsd_tsd, &tsd_cleanup) != 0) { + return true; + } + tsd_booted = true; + return false; +} + +JEMALLOC_ALWAYS_INLINE void +tsd_boot1(void) { + /* Do nothing. */ +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_boot(void) { + return tsd_boot0(); +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_booted_get(void) { + return tsd_booted; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_get_allocates(void) { + return false; +} + +/* Get/set. */ +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_get(bool init) { + assert(tsd_booted); + return &tsd_tls; +} + +JEMALLOC_ALWAYS_INLINE void +tsd_set(tsd_t *val) { + assert(tsd_booted); + if (likely(&tsd_tls != val)) { + tsd_tls = (*val); + } + if (pthread_setspecific(tsd_tsd, (void *)(&tsd_tls)) != 0) { + malloc_write("<jemalloc>: Error setting tsd.\n"); + if (opt_abort) { + abort(); + } + } +} diff --git a/dep/jemalloc/include/jemalloc/internal/tsd_types.h b/dep/jemalloc/include/jemalloc/internal/tsd_types.h new file mode 100644 index 00000000000..6200af61f3d --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/tsd_types.h @@ -0,0 +1,10 @@ +#ifndef JEMALLOC_INTERNAL_TSD_TYPES_H +#define JEMALLOC_INTERNAL_TSD_TYPES_H + +#define MALLOC_TSD_CLEANUPS_MAX 2 + +typedef struct tsd_s tsd_t; +typedef struct tsdn_s tsdn_t; +typedef bool (*malloc_tsd_cleanup_t)(void); + +#endif /* JEMALLOC_INTERNAL_TSD_TYPES_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/tsd_win.h b/dep/jemalloc/include/jemalloc/internal/tsd_win.h new file mode 100644 index 00000000000..cf30d18e3c6 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/tsd_win.h @@ -0,0 +1,139 @@ +#ifdef JEMALLOC_INTERNAL_TSD_WIN_H +#error This file should be included only once, by tsd.h. +#endif +#define JEMALLOC_INTERNAL_TSD_WIN_H + +typedef struct { + bool initialized; + tsd_t val; +} tsd_wrapper_t; + +extern DWORD tsd_tsd; +extern tsd_wrapper_t tsd_boot_wrapper; +extern bool tsd_booted; + +/* Initialization/cleanup. */ +JEMALLOC_ALWAYS_INLINE bool +tsd_cleanup_wrapper(void) { + DWORD error = GetLastError(); + tsd_wrapper_t *wrapper = (tsd_wrapper_t *)TlsGetValue(tsd_tsd); + SetLastError(error); + + if (wrapper == NULL) { + return false; + } + + if (wrapper->initialized) { + wrapper->initialized = false; + tsd_cleanup(&wrapper->val); + if (wrapper->initialized) { + /* Trigger another cleanup round. */ + return true; + } + } + malloc_tsd_dalloc(wrapper); + return false; +} + +JEMALLOC_ALWAYS_INLINE void +tsd_wrapper_set(tsd_wrapper_t *wrapper) { + if (!TlsSetValue(tsd_tsd, (void *)wrapper)) { + malloc_write("<jemalloc>: Error setting TSD\n"); + abort(); + } +} + +JEMALLOC_ALWAYS_INLINE tsd_wrapper_t * +tsd_wrapper_get(bool init) { + DWORD error = GetLastError(); + tsd_wrapper_t *wrapper = (tsd_wrapper_t *) TlsGetValue(tsd_tsd); + SetLastError(error); + + if (init && unlikely(wrapper == NULL)) { + wrapper = (tsd_wrapper_t *) + malloc_tsd_malloc(sizeof(tsd_wrapper_t)); + if (wrapper == NULL) { + malloc_write("<jemalloc>: Error allocating TSD\n"); + abort(); + } else { + wrapper->initialized = false; + /* MSVC is finicky about aggregate initialization. */ + tsd_t tsd_initializer = TSD_INITIALIZER; + wrapper->val = tsd_initializer; + } + tsd_wrapper_set(wrapper); + } + return wrapper; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_boot0(void) { + tsd_tsd = TlsAlloc(); + if (tsd_tsd == TLS_OUT_OF_INDEXES) { + return true; + } + malloc_tsd_cleanup_register(&tsd_cleanup_wrapper); + tsd_wrapper_set(&tsd_boot_wrapper); + tsd_booted = true; + return false; +} + +JEMALLOC_ALWAYS_INLINE void +tsd_boot1(void) { + tsd_wrapper_t *wrapper; + wrapper = (tsd_wrapper_t *) + malloc_tsd_malloc(sizeof(tsd_wrapper_t)); + if (wrapper == NULL) { + malloc_write("<jemalloc>: Error allocating TSD\n"); + abort(); + } + tsd_boot_wrapper.initialized = false; + tsd_cleanup(&tsd_boot_wrapper.val); + wrapper->initialized = false; + tsd_t initializer = TSD_INITIALIZER; + wrapper->val = initializer; + tsd_wrapper_set(wrapper); +} +JEMALLOC_ALWAYS_INLINE bool +tsd_boot(void) { + if (tsd_boot0()) { + return true; + } + tsd_boot1(); + return false; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_booted_get(void) { + return tsd_booted; +} + +JEMALLOC_ALWAYS_INLINE bool +tsd_get_allocates(void) { + return true; +} + +/* Get/set. */ +JEMALLOC_ALWAYS_INLINE tsd_t * +tsd_get(bool init) { + tsd_wrapper_t *wrapper; + + assert(tsd_booted); + wrapper = tsd_wrapper_get(init); + if (tsd_get_allocates() && !init && wrapper == NULL) { + return NULL; + } + return &wrapper->val; +} + +JEMALLOC_ALWAYS_INLINE void +tsd_set(tsd_t *val) { + tsd_wrapper_t *wrapper; + + assert(tsd_booted); + wrapper = tsd_wrapper_get(true); + if (likely(&wrapper->val != val)) { + wrapper->val = *(val); + } + wrapper->initialized = true; +} diff --git a/dep/jemalloc/include/jemalloc/internal/util.h b/dep/jemalloc/include/jemalloc/internal/util.h index 6b938f74688..304cb545afc 100644 --- a/dep/jemalloc/include/jemalloc/internal/util.h +++ b/dep/jemalloc/include/jemalloc/internal/util.h @@ -1,143 +1,50 @@ -/******************************************************************************/ -#ifdef JEMALLOC_H_TYPES +#ifndef JEMALLOC_INTERNAL_UTIL_H +#define JEMALLOC_INTERNAL_UTIL_H -/* Size of stack-allocated buffer passed to buferror(). */ -#define BUFERROR_BUF 64 +#define UTIL_INLINE static inline -/* - * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be - * large enough for all possible uses within jemalloc. - */ -#define MALLOC_PRINTF_BUFSIZE 4096 +/* Junk fill patterns. */ +#ifndef JEMALLOC_ALLOC_JUNK +# define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5) +#endif +#ifndef JEMALLOC_FREE_JUNK +# define JEMALLOC_FREE_JUNK ((uint8_t)0x5a) +#endif /* * Wrap a cpp argument that contains commas such that it isn't broken up into * multiple arguments. */ -#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__ +#define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__ + +/* cpp macro definition stringification. */ +#define STRINGIFY_HELPER(x) #x +#define STRINGIFY(x) STRINGIFY_HELPER(x) /* * Silence compiler warnings due to uninitialized values. This is used * wherever the compiler fails to recognize that the variable is never used * uninitialized. */ -#ifdef JEMALLOC_CC_SILENCE -# define JEMALLOC_CC_SILENCE_INIT(v) = v -#else -# define JEMALLOC_CC_SILENCE_INIT(v) -#endif - -/* - * Define a custom assert() in order to reduce the chances of deadlock during - * assertion failure. - */ -#ifndef assert -#define assert(e) do { \ - if (config_debug && !(e)) { \ - malloc_printf( \ - "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \ - __FILE__, __LINE__, #e); \ - abort(); \ - } \ -} while (0) -#endif - -#ifndef not_reached -#define not_reached() do { \ - if (config_debug) { \ - malloc_printf( \ - "<jemalloc>: %s:%d: Unreachable code reached\n", \ - __FILE__, __LINE__); \ - abort(); \ - } \ -} while (0) -#endif - -#ifndef not_implemented -#define not_implemented() do { \ - if (config_debug) { \ - malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \ - __FILE__, __LINE__); \ - abort(); \ - } \ -} while (0) -#endif +#define JEMALLOC_CC_SILENCE_INIT(v) = v -#ifndef assert_not_implemented -#define assert_not_implemented(e) do { \ - if (config_debug && !(e)) \ - not_implemented(); \ -} while (0) -#endif - -/* Use to assert a particular configuration, e.g., cassert(config_debug). */ -#define cassert(c) do { \ - if ((c) == false) \ - not_reached(); \ -} while (0) - -#endif /* JEMALLOC_H_TYPES */ -/******************************************************************************/ -#ifdef JEMALLOC_H_STRUCTS - -#endif /* JEMALLOC_H_STRUCTS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_EXTERNS - -int buferror(int err, char *buf, size_t buflen); -uintmax_t malloc_strtoumax(const char *restrict nptr, - char **restrict endptr, int base); -void malloc_write(const char *s); - -/* - * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating - * point math. - */ -int malloc_vsnprintf(char *str, size_t size, const char *format, - va_list ap); -int malloc_snprintf(char *str, size_t size, const char *format, ...) - JEMALLOC_ATTR(format(printf, 3, 4)); -void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, - const char *format, va_list ap); -void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque, - const char *format, ...) JEMALLOC_ATTR(format(printf, 3, 4)); -void malloc_printf(const char *format, ...) - JEMALLOC_ATTR(format(printf, 1, 2)); - -#endif /* JEMALLOC_H_EXTERNS */ -/******************************************************************************/ -#ifdef JEMALLOC_H_INLINES - -#ifndef JEMALLOC_ENABLE_INLINE -size_t pow2_ceil(size_t x); -void set_errno(int errnum); -int get_errno(void); +#ifdef __GNUC__ +# define likely(x) __builtin_expect(!!(x), 1) +# define unlikely(x) __builtin_expect(!!(x), 0) +#else +# define likely(x) !!(x) +# define unlikely(x) !!(x) #endif -#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_)) -/* Compute the smallest power of 2 that is >= x. */ -JEMALLOC_INLINE size_t -pow2_ceil(size_t x) -{ - - x--; - x |= x >> 1; - x |= x >> 2; - x |= x >> 4; - x |= x >> 8; - x |= x >> 16; -#if (LG_SIZEOF_PTR == 3) - x |= x >> 32; +#if !defined(JEMALLOC_INTERNAL_UNREACHABLE) +# error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure #endif - x++; - return (x); -} -/* Sets error code */ -JEMALLOC_INLINE void -set_errno(int errnum) -{ +#define unreachable() JEMALLOC_INTERNAL_UNREACHABLE() +/* Set error code. */ +UTIL_INLINE void +set_errno(int errnum) { #ifdef _WIN32 SetLastError(errnum); #else @@ -145,18 +52,16 @@ set_errno(int errnum) #endif } -/* Get last error code */ -JEMALLOC_INLINE int -get_errno(void) -{ - +/* Get last error code. */ +UTIL_INLINE int +get_errno(void) { #ifdef _WIN32 - return (GetLastError()); + return GetLastError(); #else - return (errno); + return errno; #endif } -#endif -#endif /* JEMALLOC_H_INLINES */ -/******************************************************************************/ +#undef UTIL_INLINE + +#endif /* JEMALLOC_INTERNAL_UTIL_H */ diff --git a/dep/jemalloc/include/jemalloc/internal/witness.h b/dep/jemalloc/include/jemalloc/internal/witness.h new file mode 100644 index 00000000000..33be6661071 --- /dev/null +++ b/dep/jemalloc/include/jemalloc/internal/witness.h @@ -0,0 +1,346 @@ +#ifndef JEMALLOC_INTERNAL_WITNESS_H +#define JEMALLOC_INTERNAL_WITNESS_H + +#include "jemalloc/internal/ql.h" + +/******************************************************************************/ +/* LOCK RANKS */ +/******************************************************************************/ + +/* + * Witnesses with rank WITNESS_RANK_OMIT are completely ignored by the witness + * machinery. + */ + +#define WITNESS_RANK_OMIT 0U + +#define WITNESS_RANK_MIN 1U + +#define WITNESS_RANK_INIT 1U +#define WITNESS_RANK_CTL 1U +#define WITNESS_RANK_TCACHES 2U +#define WITNESS_RANK_ARENAS 3U + +#define WITNESS_RANK_BACKGROUND_THREAD_GLOBAL 4U + +#define WITNESS_RANK_PROF_DUMP 5U +#define WITNESS_RANK_PROF_BT2GCTX 6U +#define WITNESS_RANK_PROF_TDATAS 7U +#define WITNESS_RANK_PROF_TDATA 8U +#define WITNESS_RANK_PROF_GCTX 9U + +#define WITNESS_RANK_BACKGROUND_THREAD 10U + +/* + * Used as an argument to witness_assert_depth_to_rank() in order to validate + * depth excluding non-core locks with lower ranks. Since the rank argument to + * witness_assert_depth_to_rank() is inclusive rather than exclusive, this + * definition can have the same value as the minimally ranked core lock. + */ +#define WITNESS_RANK_CORE 11U + +#define WITNESS_RANK_DECAY 11U +#define WITNESS_RANK_TCACHE_QL 12U +#define WITNESS_RANK_EXTENT_GROW 13U +#define WITNESS_RANK_EXTENTS 14U +#define WITNESS_RANK_EXTENT_AVAIL 15U + +#define WITNESS_RANK_EXTENT_POOL 16U +#define WITNESS_RANK_RTREE 17U +#define WITNESS_RANK_BASE 18U +#define WITNESS_RANK_ARENA_LARGE 19U + +#define WITNESS_RANK_LEAF 0xffffffffU +#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF +#define WITNESS_RANK_ARENA_STATS WITNESS_RANK_LEAF +#define WITNESS_RANK_DSS WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_ACCUM WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF +#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF + +/******************************************************************************/ +/* PER-WITNESS DATA */ +/******************************************************************************/ +#if defined(JEMALLOC_DEBUG) +# define WITNESS_INITIALIZER(name, rank) {name, rank, NULL, NULL, {NULL, NULL}} +#else +# define WITNESS_INITIALIZER(name, rank) +#endif + +typedef struct witness_s witness_t; +typedef unsigned witness_rank_t; +typedef ql_head(witness_t) witness_list_t; +typedef int witness_comp_t (const witness_t *, void *, const witness_t *, + void *); + +struct witness_s { + /* Name, used for printing lock order reversal messages. */ + const char *name; + + /* + * Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses + * must be acquired in order of increasing rank. + */ + witness_rank_t rank; + + /* + * If two witnesses are of equal rank and they have the samp comp + * function pointer, it is called as a last attempt to differentiate + * between witnesses of equal rank. + */ + witness_comp_t *comp; + + /* Opaque data, passed to comp(). */ + void *opaque; + + /* Linkage for thread's currently owned locks. */ + ql_elm(witness_t) link; +}; + +/******************************************************************************/ +/* PER-THREAD DATA */ +/******************************************************************************/ +typedef struct witness_tsd_s witness_tsd_t; +struct witness_tsd_s { + witness_list_t witnesses; + bool forking; +}; + +#define WITNESS_TSD_INITIALIZER { ql_head_initializer(witnesses), false } +#define WITNESS_TSDN_NULL ((witness_tsdn_t *)0) + +/******************************************************************************/ +/* (PER-THREAD) NULLABILITY HELPERS */ +/******************************************************************************/ +typedef struct witness_tsdn_s witness_tsdn_t; +struct witness_tsdn_s { + witness_tsd_t witness_tsd; +}; + +JEMALLOC_ALWAYS_INLINE witness_tsdn_t * +witness_tsd_tsdn(witness_tsd_t *witness_tsd) { + return (witness_tsdn_t *)witness_tsd; +} + +JEMALLOC_ALWAYS_INLINE bool +witness_tsdn_null(witness_tsdn_t *witness_tsdn) { + return witness_tsdn == NULL; +} + +JEMALLOC_ALWAYS_INLINE witness_tsd_t * +witness_tsdn_tsd(witness_tsdn_t *witness_tsdn) { + assert(!witness_tsdn_null(witness_tsdn)); + return &witness_tsdn->witness_tsd; +} + +/******************************************************************************/ +/* API */ +/******************************************************************************/ +void witness_init(witness_t *witness, const char *name, witness_rank_t rank, + witness_comp_t *comp, void *opaque); + +typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *); +extern witness_lock_error_t *JET_MUTABLE witness_lock_error; + +typedef void (witness_owner_error_t)(const witness_t *); +extern witness_owner_error_t *JET_MUTABLE witness_owner_error; + +typedef void (witness_not_owner_error_t)(const witness_t *); +extern witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error; + +typedef void (witness_depth_error_t)(const witness_list_t *, + witness_rank_t rank_inclusive, unsigned depth); +extern witness_depth_error_t *JET_MUTABLE witness_depth_error; + +void witnesses_cleanup(witness_tsd_t *witness_tsd); +void witness_prefork(witness_tsd_t *witness_tsd); +void witness_postfork_parent(witness_tsd_t *witness_tsd); +void witness_postfork_child(witness_tsd_t *witness_tsd); + +/* Helper, not intended for direct use. */ +static inline bool +witness_owner(witness_tsd_t *witness_tsd, const witness_t *witness) { + witness_list_t *witnesses; + witness_t *w; + + cassert(config_debug); + + witnesses = &witness_tsd->witnesses; + ql_foreach(w, witnesses, link) { + if (w == witness) { + return true; + } + } + + return false; +} + +static inline void +witness_assert_owner(witness_tsdn_t *witness_tsdn, const witness_t *witness) { + witness_tsd_t *witness_tsd; + + if (!config_debug) { + return; + } + + if (witness_tsdn_null(witness_tsdn)) { + return; + } + witness_tsd = witness_tsdn_tsd(witness_tsdn); + if (witness->rank == WITNESS_RANK_OMIT) { + return; + } + + if (witness_owner(witness_tsd, witness)) { + return; + } + witness_owner_error(witness); +} + +static inline void +witness_assert_not_owner(witness_tsdn_t *witness_tsdn, + const witness_t *witness) { + witness_tsd_t *witness_tsd; + witness_list_t *witnesses; + witness_t *w; + + if (!config_debug) { + return; + } + + if (witness_tsdn_null(witness_tsdn)) { + return; + } + witness_tsd = witness_tsdn_tsd(witness_tsdn); + if (witness->rank == WITNESS_RANK_OMIT) { + return; + } + + witnesses = &witness_tsd->witnesses; + ql_foreach(w, witnesses, link) { + if (w == witness) { + witness_not_owner_error(witness); + } + } +} + +static inline void +witness_assert_depth_to_rank(witness_tsdn_t *witness_tsdn, + witness_rank_t rank_inclusive, unsigned depth) { + witness_tsd_t *witness_tsd; + unsigned d; + witness_list_t *witnesses; + witness_t *w; + + if (!config_debug) { + return; + } + + if (witness_tsdn_null(witness_tsdn)) { + return; + } + witness_tsd = witness_tsdn_tsd(witness_tsdn); + + d = 0; + witnesses = &witness_tsd->witnesses; + w = ql_last(witnesses, link); + if (w != NULL) { + ql_reverse_foreach(w, witnesses, link) { + if (w->rank < rank_inclusive) { + break; + } + d++; + } + } + if (d != depth) { + witness_depth_error(witnesses, rank_inclusive, depth); + } +} + +static inline void +witness_assert_depth(witness_tsdn_t *witness_tsdn, unsigned depth) { + witness_assert_depth_to_rank(witness_tsdn, WITNESS_RANK_MIN, depth); +} + +static inline void +witness_assert_lockless(witness_tsdn_t *witness_tsdn) { + witness_assert_depth(witness_tsdn, 0); +} + +static inline void +witness_lock(witness_tsdn_t *witness_tsdn, witness_t *witness) { + witness_tsd_t *witness_tsd; + witness_list_t *witnesses; + witness_t *w; + + if (!config_debug) { + return; + } + + if (witness_tsdn_null(witness_tsdn)) { + return; + } + witness_tsd = witness_tsdn_tsd(witness_tsdn); + if (witness->rank == WITNESS_RANK_OMIT) { + return; + } + + witness_assert_not_owner(witness_tsdn, witness); + + witnesses = &witness_tsd->witnesses; + w = ql_last(witnesses, link); + if (w == NULL) { + /* No other locks; do nothing. */ + } else if (witness_tsd->forking && w->rank <= witness->rank) { + /* Forking, and relaxed ranking satisfied. */ + } else if (w->rank > witness->rank) { + /* Not forking, rank order reversal. */ + witness_lock_error(witnesses, witness); + } else if (w->rank == witness->rank && (w->comp == NULL || w->comp != + witness->comp || w->comp(w, w->opaque, witness, witness->opaque) > + 0)) { + /* + * Missing/incompatible comparison function, or comparison + * function indicates rank order reversal. + */ + witness_lock_error(witnesses, witness); + } + + ql_elm_new(witness, link); + ql_tail_insert(witnesses, witness, link); +} + +static inline void +witness_unlock(witness_tsdn_t *witness_tsdn, witness_t *witness) { + witness_tsd_t *witness_tsd; + witness_list_t *witnesses; + + if (!config_debug) { + return; + } + + if (witness_tsdn_null(witness_tsdn)) { + return; + } + witness_tsd = witness_tsdn_tsd(witness_tsdn); + if (witness->rank == WITNESS_RANK_OMIT) { + return; + } + + /* + * Check whether owner before removal, rather than relying on + * witness_assert_owner() to abort, so that unit tests can test this + * function's failure mode without causing undefined behavior. + */ + if (witness_owner(witness_tsd, witness)) { + witnesses = &witness_tsd->witnesses; + ql_remove(witnesses, witness, link); + } else { + witness_assert_owner(witness_tsdn, witness); + } +} + +#endif /* JEMALLOC_INTERNAL_WITNESS_H */ diff --git a/dep/jemalloc/include/jemalloc/jemalloc.h b/dep/jemalloc/include/jemalloc/jemalloc.h index b8ea851e525..6ffe5c71b38 100644 --- a/dep/jemalloc/include/jemalloc/jemalloc.h +++ b/dep/jemalloc/include/jemalloc/jemalloc.h @@ -1,45 +1,203 @@ #ifndef JEMALLOC_H_ -#define JEMALLOC_H_ +#define JEMALLOC_H_ #ifdef __cplusplus extern "C" { #endif +/* Defined if __attribute__((...)) syntax is supported. */ +#define JEMALLOC_HAVE_ATTR + +/* Defined if alloc_size attribute is supported. */ +#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE + +/* Defined if format(gnu_printf, ...) attribute is supported. */ +#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF + +/* Defined if format(printf, ...) attribute is supported. */ +#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF + +/* + * Define overrides for non-standard allocator-related functions if they are + * present on the system. + */ +#define JEMALLOC_OVERRIDE_MEMALIGN +#define JEMALLOC_OVERRIDE_VALLOC + +/* + * At least Linux omits the "const" in: + * + * size_t malloc_usable_size(const void *ptr); + * + * Match the operating system's prototype. + */ +#define JEMALLOC_USABLE_SIZE_CONST + +/* + * If defined, specify throw() for the public function prototypes when compiling + * with C++. The only justification for this is to match the prototypes that + * glibc defines. + */ +#define JEMALLOC_USE_CXX_THROW + +#ifdef _MSC_VER +# ifdef _WIN64 +# define LG_SIZEOF_PTR_WIN 3 +# else +# define LG_SIZEOF_PTR_WIN 2 +# endif +#endif + +/* + * Name mangling for public symbols is controlled by --with-mangling and + * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by + * these macro definitions. + */ +#ifndef JEMALLOC_NO_RENAME +# define je_aligned_alloc aligned_alloc +# define je_calloc calloc +# define je_dallocx dallocx +# define je_free free +# define je_mallctl mallctl +# define je_mallctlbymib mallctlbymib +# define je_mallctlnametomib mallctlnametomib +# define je_malloc malloc +# define je_malloc_conf malloc_conf +# define je_malloc_message malloc_message +# define je_malloc_stats_print malloc_stats_print +# define je_malloc_usable_size malloc_usable_size +# define je_mallocx mallocx +# define je_nallocx nallocx +# define je_posix_memalign posix_memalign +# define je_rallocx rallocx +# define je_realloc realloc +# define je_sallocx sallocx +# define je_sdallocx sdallocx +# define je_xallocx xallocx +# define je_memalign memalign +# define je_valloc valloc +#endif + +#include <stdlib.h> +#include <stdbool.h> +#include <stdint.h> #include <limits.h> #include <strings.h> -#define JEMALLOC_VERSION "3.6.0-0-g46c0af68bd248b04df75e4f92d5fb804c3d75340" -#define JEMALLOC_VERSION_MAJOR 3 -#define JEMALLOC_VERSION_MINOR 6 -#define JEMALLOC_VERSION_BUGFIX 0 -#define JEMALLOC_VERSION_NREV 0 -#define JEMALLOC_VERSION_GID "46c0af68bd248b04df75e4f92d5fb804c3d75340" +#define JEMALLOC_VERSION "5.0.1-0-g896ed3a8b3f41998d4fb4d625d30ac63ef2d51fb" +#define JEMALLOC_VERSION_MAJOR 5 +#define JEMALLOC_VERSION_MINOR 0 +#define JEMALLOC_VERSION_BUGFIX 1 +#define JEMALLOC_VERSION_NREV 0 +#define JEMALLOC_VERSION_GID "896ed3a8b3f41998d4fb4d625d30ac63ef2d51fb" + +#define MALLOCX_LG_ALIGN(la) ((int)(la)) +#if LG_SIZEOF_PTR == 2 +# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1)) +#else +# define MALLOCX_ALIGN(a) \ + ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \ + ffs((int)(((size_t)(a))>>32))+31)) +#endif +#define MALLOCX_ZERO ((int)0x40) +/* + * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1 + * encodes MALLOCX_TCACHE_NONE. + */ +#define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8)) +#define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1) +/* + * Bias arena index bits so that 0 encodes "use an automatically chosen arena". + */ +#define MALLOCX_ARENA(a) ((((int)(a))+1) << 20) + +/* + * Use as arena index in "arena.<i>.{purge,decay,dss}" and + * "stats.arenas.<i>.*" mallctl interfaces to select all arenas. This + * definition is intentionally specified in raw decimal format to support + * cpp-based string concatenation, e.g. + * + * #define STRINGIFY_HELPER(x) #x + * #define STRINGIFY(x) STRINGIFY_HELPER(x) + * + * mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL, + * 0); + */ +#define MALLCTL_ARENAS_ALL 4096 +/* + * Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select + * destroyed arenas. + */ +#define MALLCTL_ARENAS_DESTROYED 4097 -# define MALLOCX_LG_ALIGN(la) (la) -# if LG_SIZEOF_PTR == 2 -# define MALLOCX_ALIGN(a) (ffs(a)-1) +#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW) +# define JEMALLOC_CXX_THROW throw() +#else +# define JEMALLOC_CXX_THROW +#endif + +#if defined(_MSC_VER) +# define JEMALLOC_ATTR(s) +# define JEMALLOC_ALIGNED(s) __declspec(align(s)) +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# ifndef JEMALLOC_EXPORT +# ifdef DLLEXPORT +# define JEMALLOC_EXPORT __declspec(dllexport) +# else +# define JEMALLOC_EXPORT __declspec(dllimport) +# endif +# endif +# define JEMALLOC_FORMAT_PRINTF(s, i) +# define JEMALLOC_NOINLINE __declspec(noinline) +# ifdef __cplusplus +# define JEMALLOC_NOTHROW __declspec(nothrow) +# else +# define JEMALLOC_NOTHROW +# endif +# define JEMALLOC_SECTION(s) __declspec(allocate(s)) +# define JEMALLOC_RESTRICT_RETURN __declspec(restrict) +# if _MSC_VER >= 1900 && !defined(__EDG__) +# define JEMALLOC_ALLOCATOR __declspec(allocator) +# else +# define JEMALLOC_ALLOCATOR +# endif +#elif defined(JEMALLOC_HAVE_ATTR) +# define JEMALLOC_ATTR(s) __attribute__((s)) +# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) +# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE +# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s)) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2)) # else -# define MALLOCX_ALIGN(a) \ - ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31) +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# endif +# ifndef JEMALLOC_EXPORT +# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) # endif -# define MALLOCX_ZERO ((int)0x40) -/* Bias arena index bits so that 0 encodes "MALLOCX_ARENA() unspecified". */ -# define MALLOCX_ARENA(a) ((int)(((a)+1) << 8)) - -#ifdef JEMALLOC_EXPERIMENTAL -# define ALLOCM_LG_ALIGN(la) (la) -# if LG_SIZEOF_PTR == 2 -# define ALLOCM_ALIGN(a) (ffs(a)-1) +# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF +# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i)) +# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF) +# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i)) # else -# define ALLOCM_ALIGN(a) \ - ((a < (size_t)INT_MAX) ? ffs(a)-1 : ffs(a>>32)+31) +# define JEMALLOC_FORMAT_PRINTF(s, i) # endif -# define ALLOCM_ZERO ((int)0x40) -# define ALLOCM_NO_MOVE ((int)0x80) -/* Bias arena index bits so that 0 encodes "ALLOCM_ARENA() unspecified". */ -# define ALLOCM_ARENA(a) ((int)(((a)+1) << 8)) -# define ALLOCM_SUCCESS 0 -# define ALLOCM_ERR_OOM 1 -# define ALLOCM_ERR_NOT_MOVED 2 +# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) +# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow) +# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) +# define JEMALLOC_RESTRICT_RETURN +# define JEMALLOC_ALLOCATOR +#else +# define JEMALLOC_ATTR(s) +# define JEMALLOC_ALIGNED(s) +# define JEMALLOC_ALLOC_SIZE(s) +# define JEMALLOC_ALLOC_SIZE2(s1, s2) +# define JEMALLOC_EXPORT +# define JEMALLOC_FORMAT_PRINTF(s, i) +# define JEMALLOC_NOINLINE +# define JEMALLOC_NOTHROW +# define JEMALLOC_SECTION(s) +# define JEMALLOC_RESTRICT_RETURN +# define JEMALLOC_ALLOCATOR #endif /* @@ -51,55 +209,141 @@ extern JEMALLOC_EXPORT const char *je_malloc_conf; extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque, const char *s); -JEMALLOC_EXPORT void *je_malloc(size_t size) JEMALLOC_ATTR(malloc); -JEMALLOC_EXPORT void *je_calloc(size_t num, size_t size) - JEMALLOC_ATTR(malloc); -JEMALLOC_EXPORT int je_posix_memalign(void **memptr, size_t alignment, - size_t size) JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT void *je_aligned_alloc(size_t alignment, size_t size) - JEMALLOC_ATTR(malloc); -JEMALLOC_EXPORT void *je_realloc(void *ptr, size_t size); -JEMALLOC_EXPORT void je_free(void *ptr); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_malloc(size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr, + size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1)); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment, + size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) + JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr) + JEMALLOC_CXX_THROW; -JEMALLOC_EXPORT void *je_mallocx(size_t size, int flags); -JEMALLOC_EXPORT void *je_rallocx(void *ptr, size_t size, int flags); -JEMALLOC_EXPORT size_t je_xallocx(void *ptr, size_t size, size_t extra, +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_mallocx(size_t size, int flags) + JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_rallocx(void *ptr, size_t size, + int flags) JEMALLOC_ALLOC_SIZE(2); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_xallocx(void *ptr, size_t size, + size_t extra, int flags); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_sallocx(const void *ptr, + int flags) JEMALLOC_ATTR(pure); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_dallocx(void *ptr, int flags); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_sdallocx(void *ptr, size_t size, int flags); -JEMALLOC_EXPORT size_t je_sallocx(const void *ptr, int flags); -JEMALLOC_EXPORT void je_dallocx(void *ptr, int flags); -JEMALLOC_EXPORT size_t je_nallocx(size_t size, int flags); - -JEMALLOC_EXPORT int je_mallctl(const char *name, void *oldp, - size_t *oldlenp, void *newp, size_t newlen); -JEMALLOC_EXPORT int je_mallctlnametomib(const char *name, size_t *mibp, - size_t *miblenp); -JEMALLOC_EXPORT int je_mallctlbymib(const size_t *mib, size_t miblen, +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_nallocx(size_t size, int flags) + JEMALLOC_ATTR(pure); + +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen); -JEMALLOC_EXPORT void je_malloc_stats_print(void (*write_cb)(void *, - const char *), void *je_cbopaque, const char *opts); -JEMALLOC_EXPORT size_t je_malloc_usable_size( - JEMALLOC_USABLE_SIZE_CONST void *ptr); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlnametomib(const char *name, + size_t *mibp, size_t *miblenp); +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib, + size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print( + void (*write_cb)(void *, const char *), void *je_cbopaque, + const char *opts); +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size( + JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW; #ifdef JEMALLOC_OVERRIDE_MEMALIGN -JEMALLOC_EXPORT void * je_memalign(size_t alignment, size_t size) - JEMALLOC_ATTR(malloc); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size) + JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc); #endif #ifdef JEMALLOC_OVERRIDE_VALLOC -JEMALLOC_EXPORT void * je_valloc(size_t size) JEMALLOC_ATTR(malloc); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN + void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW + JEMALLOC_ATTR(malloc); #endif -#ifdef JEMALLOC_EXPERIMENTAL -JEMALLOC_EXPORT int je_allocm(void **ptr, size_t *rsize, size_t size, - int flags) JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT int je_rallocm(void **ptr, size_t *rsize, size_t size, - size_t extra, int flags) JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT int je_sallocm(const void *ptr, size_t *rsize, int flags) - JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT int je_dallocm(void *ptr, int flags) - JEMALLOC_ATTR(nonnull(1)); -JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags); -#endif +typedef struct extent_hooks_s extent_hooks_t; + +/* + * void * + * extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size, + * size_t alignment, bool *zero, bool *commit, unsigned arena_ind); + */ +typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *, + bool *, unsigned); + +/* + * bool + * extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size, + * bool committed, unsigned arena_ind); + */ +typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool, + unsigned); + +/* + * void + * extent_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size, + * bool committed, unsigned arena_ind); + */ +typedef void (extent_destroy_t)(extent_hooks_t *, void *, size_t, bool, + unsigned); + +/* + * bool + * extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size, + * size_t offset, size_t length, unsigned arena_ind); + */ +typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t, + unsigned); + +/* + * bool + * extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size, + * size_t offset, size_t length, unsigned arena_ind); + */ +typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t, + size_t, unsigned); + +/* + * bool + * extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size, + * size_t offset, size_t length, unsigned arena_ind); + */ +typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t, + unsigned); + +/* + * bool + * extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size, + * size_t size_a, size_t size_b, bool committed, unsigned arena_ind); + */ +typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t, + bool, unsigned); + +/* + * bool + * extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, + * void *addr_b, size_t size_b, bool committed, unsigned arena_ind); + */ +typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t, + bool, unsigned); + +struct extent_hooks_s { + extent_alloc_t *alloc; + extent_dalloc_t *dalloc; + extent_destroy_t *destroy; + extent_commit_t *commit; + extent_decommit_t *decommit; + extent_purge_t *purge_lazy; + extent_purge_t *purge_forced; + extent_split_t *split; + extent_merge_t *merge; +}; /* * By default application code must explicitly refer to mangled symbol names, @@ -112,32 +356,28 @@ JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags); # ifndef JEMALLOC_NO_DEMANGLE # define JEMALLOC_NO_DEMANGLE # endif -# define malloc_conf je_malloc_conf -# define malloc_message je_malloc_message -# define malloc je_malloc -# define calloc je_calloc -# define posix_memalign je_posix_memalign # define aligned_alloc je_aligned_alloc -# define realloc je_realloc -# define free je_free -# define mallocx je_mallocx -# define rallocx je_rallocx -# define xallocx je_xallocx -# define sallocx je_sallocx +# define calloc je_calloc # define dallocx je_dallocx -# define nallocx je_nallocx +# define free je_free # define mallctl je_mallctl -# define mallctlnametomib je_mallctlnametomib # define mallctlbymib je_mallctlbymib +# define mallctlnametomib je_mallctlnametomib +# define malloc je_malloc +# define malloc_conf je_malloc_conf +# define malloc_message je_malloc_message # define malloc_stats_print je_malloc_stats_print # define malloc_usable_size je_malloc_usable_size +# define mallocx je_mallocx +# define nallocx je_nallocx +# define posix_memalign je_posix_memalign +# define rallocx je_rallocx +# define realloc je_realloc +# define sallocx je_sallocx +# define sdallocx je_sdallocx +# define xallocx je_xallocx # define memalign je_memalign # define valloc je_valloc -# define allocm je_allocm -# define dallocm je_dallocm -# define nallocm je_nallocm -# define rallocm je_rallocm -# define sallocm je_sallocm #endif /* @@ -148,35 +388,31 @@ JEMALLOC_EXPORT int je_nallocm(size_t *rsize, size_t size, int flags); * and/or --with-jemalloc-prefix. */ #ifndef JEMALLOC_NO_DEMANGLE -# undef je_malloc_conf -# undef je_malloc_message -# undef je_malloc -# undef je_calloc -# undef je_posix_memalign # undef je_aligned_alloc -# undef je_realloc -# undef je_free -# undef je_mallocx -# undef je_rallocx -# undef je_xallocx -# undef je_sallocx +# undef je_calloc # undef je_dallocx -# undef je_nallocx +# undef je_free # undef je_mallctl -# undef je_mallctlnametomib # undef je_mallctlbymib +# undef je_mallctlnametomib +# undef je_malloc +# undef je_malloc_conf +# undef je_malloc_message # undef je_malloc_stats_print # undef je_malloc_usable_size +# undef je_mallocx +# undef je_nallocx +# undef je_posix_memalign +# undef je_rallocx +# undef je_realloc +# undef je_sallocx +# undef je_sdallocx +# undef je_xallocx # undef je_memalign # undef je_valloc -# undef je_allocm -# undef je_dallocm -# undef je_nallocm -# undef je_rallocm -# undef je_sallocm #endif #ifdef __cplusplus -}; +} #endif #endif /* JEMALLOC_H_ */ diff --git a/dep/jemalloc/include/msvc_compat/C99/stdbool.h b/dep/jemalloc/include/msvc_compat/C99/stdbool.h new file mode 100644 index 00000000000..d92160ebc75 --- /dev/null +++ b/dep/jemalloc/include/msvc_compat/C99/stdbool.h @@ -0,0 +1,20 @@ +#ifndef stdbool_h +#define stdbool_h + +#include <wtypes.h> + +/* MSVC doesn't define _Bool or bool in C, but does have BOOL */ +/* Note this doesn't pass autoconf's test because (bool) 0.5 != true */ +/* Clang-cl uses MSVC headers, so needs msvc_compat, but has _Bool as + * a built-in type. */ +#ifndef __clang__ +typedef BOOL _Bool; +#endif + +#define bool _Bool +#define true 1 +#define false 0 + +#define __bool_true_false_are_defined 1 + +#endif /* stdbool_h */ diff --git a/dep/jemalloc/include/msvc_compat/C99/stdint.h b/dep/jemalloc/include/msvc_compat/C99/stdint.h new file mode 100644 index 00000000000..d02608a5972 --- /dev/null +++ b/dep/jemalloc/include/msvc_compat/C99/stdint.h @@ -0,0 +1,247 @@ +// ISO C9x compliant stdint.h for Microsoft Visual Studio +// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 +// +// Copyright (c) 2006-2008 Alexander Chemeris +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. The name of the author may be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////////// + +#ifndef _MSC_VER // [ +#error "Use this header only with Microsoft Visual C++ compilers!" +#endif // _MSC_VER ] + +#ifndef _MSC_STDINT_H_ // [ +#define _MSC_STDINT_H_ + +#if _MSC_VER > 1000 +#pragma once +#endif + +#include <limits.h> + +// For Visual Studio 6 in C++ mode and for many Visual Studio versions when +// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}' +// or compiler give many errors like this: +// error C2733: second C linkage of overloaded function 'wmemchr' not allowed +#ifdef __cplusplus +extern "C" { +#endif +# include <wchar.h> +#ifdef __cplusplus +} +#endif + +// Define _W64 macros to mark types changing their size, like intptr_t. +#ifndef _W64 +# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 +# define _W64 __w64 +# else +# define _W64 +# endif +#endif + + +// 7.18.1 Integer types + +// 7.18.1.1 Exact-width integer types + +// Visual Studio 6 and Embedded Visual C++ 4 doesn't +// realize that, e.g. char has the same size as __int8 +// so we give up on __intX for them. +#if (_MSC_VER < 1300) + typedef signed char int8_t; + typedef signed short int16_t; + typedef signed int int32_t; + typedef unsigned char uint8_t; + typedef unsigned short uint16_t; + typedef unsigned int uint32_t; +#else + typedef signed __int8 int8_t; + typedef signed __int16 int16_t; + typedef signed __int32 int32_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; +#endif +typedef signed __int64 int64_t; +typedef unsigned __int64 uint64_t; + + +// 7.18.1.2 Minimum-width integer types +typedef int8_t int_least8_t; +typedef int16_t int_least16_t; +typedef int32_t int_least32_t; +typedef int64_t int_least64_t; +typedef uint8_t uint_least8_t; +typedef uint16_t uint_least16_t; +typedef uint32_t uint_least32_t; +typedef uint64_t uint_least64_t; + +// 7.18.1.3 Fastest minimum-width integer types +typedef int8_t int_fast8_t; +typedef int16_t int_fast16_t; +typedef int32_t int_fast32_t; +typedef int64_t int_fast64_t; +typedef uint8_t uint_fast8_t; +typedef uint16_t uint_fast16_t; +typedef uint32_t uint_fast32_t; +typedef uint64_t uint_fast64_t; + +// 7.18.1.4 Integer types capable of holding object pointers +#ifdef _WIN64 // [ + typedef signed __int64 intptr_t; + typedef unsigned __int64 uintptr_t; +#else // _WIN64 ][ + typedef _W64 signed int intptr_t; + typedef _W64 unsigned int uintptr_t; +#endif // _WIN64 ] + +// 7.18.1.5 Greatest-width integer types +typedef int64_t intmax_t; +typedef uint64_t uintmax_t; + + +// 7.18.2 Limits of specified-width integer types + +#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 + +// 7.18.2.1 Limits of exact-width integer types +#define INT8_MIN ((int8_t)_I8_MIN) +#define INT8_MAX _I8_MAX +#define INT16_MIN ((int16_t)_I16_MIN) +#define INT16_MAX _I16_MAX +#define INT32_MIN ((int32_t)_I32_MIN) +#define INT32_MAX _I32_MAX +#define INT64_MIN ((int64_t)_I64_MIN) +#define INT64_MAX _I64_MAX +#define UINT8_MAX _UI8_MAX +#define UINT16_MAX _UI16_MAX +#define UINT32_MAX _UI32_MAX +#define UINT64_MAX _UI64_MAX + +// 7.18.2.2 Limits of minimum-width integer types +#define INT_LEAST8_MIN INT8_MIN +#define INT_LEAST8_MAX INT8_MAX +#define INT_LEAST16_MIN INT16_MIN +#define INT_LEAST16_MAX INT16_MAX +#define INT_LEAST32_MIN INT32_MIN +#define INT_LEAST32_MAX INT32_MAX +#define INT_LEAST64_MIN INT64_MIN +#define INT_LEAST64_MAX INT64_MAX +#define UINT_LEAST8_MAX UINT8_MAX +#define UINT_LEAST16_MAX UINT16_MAX +#define UINT_LEAST32_MAX UINT32_MAX +#define UINT_LEAST64_MAX UINT64_MAX + +// 7.18.2.3 Limits of fastest minimum-width integer types +#define INT_FAST8_MIN INT8_MIN +#define INT_FAST8_MAX INT8_MAX +#define INT_FAST16_MIN INT16_MIN +#define INT_FAST16_MAX INT16_MAX +#define INT_FAST32_MIN INT32_MIN +#define INT_FAST32_MAX INT32_MAX +#define INT_FAST64_MIN INT64_MIN +#define INT_FAST64_MAX INT64_MAX +#define UINT_FAST8_MAX UINT8_MAX +#define UINT_FAST16_MAX UINT16_MAX +#define UINT_FAST32_MAX UINT32_MAX +#define UINT_FAST64_MAX UINT64_MAX + +// 7.18.2.4 Limits of integer types capable of holding object pointers +#ifdef _WIN64 // [ +# define INTPTR_MIN INT64_MIN +# define INTPTR_MAX INT64_MAX +# define UINTPTR_MAX UINT64_MAX +#else // _WIN64 ][ +# define INTPTR_MIN INT32_MIN +# define INTPTR_MAX INT32_MAX +# define UINTPTR_MAX UINT32_MAX +#endif // _WIN64 ] + +// 7.18.2.5 Limits of greatest-width integer types +#define INTMAX_MIN INT64_MIN +#define INTMAX_MAX INT64_MAX +#define UINTMAX_MAX UINT64_MAX + +// 7.18.3 Limits of other integer types + +#ifdef _WIN64 // [ +# define PTRDIFF_MIN _I64_MIN +# define PTRDIFF_MAX _I64_MAX +#else // _WIN64 ][ +# define PTRDIFF_MIN _I32_MIN +# define PTRDIFF_MAX _I32_MAX +#endif // _WIN64 ] + +#define SIG_ATOMIC_MIN INT_MIN +#define SIG_ATOMIC_MAX INT_MAX + +#ifndef SIZE_MAX // [ +# ifdef _WIN64 // [ +# define SIZE_MAX _UI64_MAX +# else // _WIN64 ][ +# define SIZE_MAX _UI32_MAX +# endif // _WIN64 ] +#endif // SIZE_MAX ] + +// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h> +#ifndef WCHAR_MIN // [ +# define WCHAR_MIN 0 +#endif // WCHAR_MIN ] +#ifndef WCHAR_MAX // [ +# define WCHAR_MAX _UI16_MAX +#endif // WCHAR_MAX ] + +#define WINT_MIN 0 +#define WINT_MAX _UI16_MAX + +#endif // __STDC_LIMIT_MACROS ] + + +// 7.18.4 Limits of other integer types + +#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 + +// 7.18.4.1 Macros for minimum-width integer constants + +#define INT8_C(val) val##i8 +#define INT16_C(val) val##i16 +#define INT32_C(val) val##i32 +#define INT64_C(val) val##i64 + +#define UINT8_C(val) val##ui8 +#define UINT16_C(val) val##ui16 +#define UINT32_C(val) val##ui32 +#define UINT64_C(val) val##ui64 + +// 7.18.4.2 Macros for greatest-width integer constants +#define INTMAX_C INT64_C +#define UINTMAX_C UINT64_C + +#endif // __STDC_CONSTANT_MACROS ] + + +#endif // _MSC_STDINT_H_ ] diff --git a/dep/jemalloc/include/msvc_compat/strings.h b/dep/jemalloc/include/msvc_compat/strings.h index c84975b6b8e..996f256ce84 100644 --- a/dep/jemalloc/include/msvc_compat/strings.h +++ b/dep/jemalloc/include/msvc_compat/strings.h @@ -3,21 +3,56 @@ /* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided * for both */ -#include <intrin.h> -#pragma intrinsic(_BitScanForward) -static __forceinline int ffsl(long x) -{ +#ifdef _MSC_VER +# include <intrin.h> +# pragma intrinsic(_BitScanForward) +static __forceinline int ffsl(long x) { unsigned long i; - if (_BitScanForward(&i, x)) - return (i + 1); - return (0); + if (_BitScanForward(&i, x)) { + return i + 1; + } + return 0; } -static __forceinline int ffs(int x) -{ +static __forceinline int ffs(int x) { + return ffsl(x); +} + +# ifdef _M_X64 +# pragma intrinsic(_BitScanForward64) +# endif + +static __forceinline int ffsll(unsigned __int64 x) { + unsigned long i; +#ifdef _M_X64 + if (_BitScanForward64(&i, x)) { + return i + 1; + } + return 0; +#else +// Fallback for 32-bit build where 64-bit version not available +// assuming little endian + union { + unsigned __int64 ll; + unsigned long l[2]; + } s; - return (ffsl(x)); + s.ll = x; + + if (_BitScanForward(&i, s.l[0])) { + return i + 1; + } else if(_BitScanForward(&i, s.l[1])) { + return i + 33; + } + return 0; +#endif } +#else +# define ffsll(x) __builtin_ffsll(x) +# define ffsl(x) __builtin_ffsl(x) +# define ffs(x) __builtin_ffs(x) #endif + +#endif /* strings_h */ diff --git a/dep/jemalloc/include/msvc_compat/windows_extra.h b/dep/jemalloc/include/msvc_compat/windows_extra.h new file mode 100644 index 00000000000..a6ebb9306f2 --- /dev/null +++ b/dep/jemalloc/include/msvc_compat/windows_extra.h @@ -0,0 +1,6 @@ +#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H +#define MSVC_COMPAT_WINDOWS_EXTRA_H + +#include <errno.h> + +#endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */ diff --git a/dep/jemalloc/jemalloc_defs.h.in.cmake b/dep/jemalloc/jemalloc_defs.h.in.cmake deleted file mode 100644 index 89e496f4acb..00000000000 --- a/dep/jemalloc/jemalloc_defs.h.in.cmake +++ /dev/null @@ -1,274 +0,0 @@ -/* include/jemalloc/jemalloc_defs.h. Generated from jemalloc_defs.h.in by configure. */ -/* - * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all - * public APIs to be prefixed. This makes it possible, with some care, to use - * multiple allocators simultaneously. - */ -/* #undef JEMALLOC_PREFIX */ -/* #undef JEMALLOC_CPREFIX */ - -/* - * Name mangling for public symbols is controlled by --with-mangling and - * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by - * these macro definitions. - */ -#define je_malloc_conf malloc_conf -#define je_malloc_message malloc_message -#define je_malloc malloc -#define je_calloc calloc -#define je_posix_memalign posix_memalign -#define je_aligned_alloc aligned_alloc -#define je_realloc realloc -#define je_free free -#define je_malloc_usable_size malloc_usable_size -#define je_malloc_stats_print malloc_stats_print -#define je_mallctl mallctl -#define je_mallctlnametomib mallctlnametomib -#define je_mallctlbymib mallctlbymib -#define je_memalign memalign -#define je_valloc valloc -#define je_allocm allocm -#define je_rallocm rallocm -#define je_sallocm sallocm -#define je_dallocm dallocm -#define je_nallocm nallocm - -/* - * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. - * For shared libraries, symbol visibility mechanisms prevent these symbols - * from being exported, but for static libraries, naming collisions are a real - * possibility. - */ -#define JEMALLOC_PRIVATE_NAMESPACE "" -#define JEMALLOC_N(string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix) string_that_no_one_should_want_to_use_as_a_jemalloc_private_namespace_prefix - -/* - * Hyper-threaded CPUs may need a special instruction inside spin loops in - * order to yield to another virtual CPU. - */ -#define CPU_SPINWAIT __asm__ volatile("pause") - -/* Defined if the equivalent of FreeBSD's atomic(9) functions are available. */ -/* #undef JEMALLOC_ATOMIC9 */ - -/* - * Defined if OSAtomic*() functions are available, as provided by Darwin, and - * documented in the atomic(3) manual page. - */ -/* #undef JEMALLOC_OSATOMIC */ - -/* - * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and - * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite - * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the - * functions are defined in libgcc instead of being inlines) - */ -/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */ - -/* - * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and - * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite - * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the - * functions are defined in libgcc instead of being inlines) - */ -/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */ - -/* - * Defined if OSSpin*() functions are available, as provided by Darwin, and - * documented in the spinlock(3) manual page. - */ -/* #undef JEMALLOC_OSSPIN */ - -/* - * Defined if _malloc_thread_cleanup() exists. At least in the case of - * FreeBSD, pthread_key_create() allocates, which if used during malloc - * bootstrapping will cause recursion into the pthreads library. Therefore, if - * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in - * malloc_tsd. - */ -/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */ - -/* - * Defined if threaded initialization is known to be safe on this platform. - * Among other things, it must be possible to initialize a mutex without - * triggering allocation in order for threaded allocation to be safe. - */ -#define JEMALLOC_THREADED_INIT - -/* - * Defined if the pthreads implementation defines - * _pthread_mutex_init_calloc_cb(), in which case the function is used in order - * to avoid recursive allocation during mutex initialization. - */ -/* #undef JEMALLOC_MUTEX_INIT_CB */ - -/* Defined if __attribute__((...)) syntax is supported. */ -#define JEMALLOC_HAVE_ATTR -#ifdef JEMALLOC_HAVE_ATTR -# define JEMALLOC_ATTR(s) __attribute__((s)) -# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default")) -# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s)) -# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s)) -# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline) -#elif _MSC_VER -# define JEMALLOC_ATTR(s) -# ifdef DLLEXPORT -# define JEMALLOC_EXPORT __declspec(dllexport) -# else -# define JEMALLOC_EXPORT __declspec(dllimport) -# endif -# define JEMALLOC_ALIGNED(s) __declspec(align(s)) -# define JEMALLOC_SECTION(s) __declspec(allocate(s)) -# define JEMALLOC_NOINLINE __declspec(noinline) -#else -# define JEMALLOC_ATTR(s) -# define JEMALLOC_EXPORT -# define JEMALLOC_ALIGNED(s) -# define JEMALLOC_SECTION(s) -# define JEMALLOC_NOINLINE -#endif - -/* Defined if sbrk() is supported. */ -#define JEMALLOC_HAVE_SBRK - -/* Non-empty if the tls_model attribute is supported. */ -#define JEMALLOC_TLS_MODEL @JEM_TLSMODEL@ - -/* JEMALLOC_CC_SILENCE enables code that silences unuseful compiler warnings. */ -/* #undef JEMALLOC_CC_SILENCE */ - -/* - * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables - * inline functions. - */ -/* #undef JEMALLOC_DEBUG */ - -/* JEMALLOC_STATS enables statistics calculation. */ -#define JEMALLOC_STATS - -/* JEMALLOC_PROF enables allocation profiling. */ -/* #undef JEMALLOC_PROF */ - -/* Use libunwind for profile backtracing if defined. */ -/* #undef JEMALLOC_PROF_LIBUNWIND */ - -/* Use libgcc for profile backtracing if defined. */ -/* #undef JEMALLOC_PROF_LIBGCC */ - -/* Use gcc intrinsics for profile backtracing if defined. */ -/* #undef JEMALLOC_PROF_GCC */ - -/* - * JEMALLOC_TCACHE enables a thread-specific caching layer for small objects. - * This makes it possible to allocate/deallocate objects without any locking - * when the cache is in the steady state. - */ -#define JEMALLOC_TCACHE - -/* - * JEMALLOC_DSS enables use of sbrk(2) to allocate chunks from the data storage - * segment (DSS). - */ -/* #undef JEMALLOC_DSS */ - -/* Support memory filling (junk/zero/quarantine/redzone). */ -#define JEMALLOC_FILL - -/* Support the experimental API. */ -#define JEMALLOC_EXPERIMENTAL - -/* Support utrace(2)-based tracing. */ -/* #undef JEMALLOC_UTRACE */ - -/* Support Valgrind. */ -/* #undef JEMALLOC_VALGRIND */ - -/* Support optional abort() on OOM. */ -/* #undef JEMALLOC_XMALLOC */ - -/* Support lazy locking (avoid locking unless a second thread is launched). */ -/* #undef JEMALLOC_LAZY_LOCK */ - -/* One page is 2^STATIC_PAGE_SHIFT bytes. */ -#define STATIC_PAGE_SHIFT 12 - -/* - * If defined, use munmap() to unmap freed chunks, rather than storing them for - * later reuse. This is disabled by default on Linux because common sequences - * of mmap()/munmap() calls will cause virtual memory map holes. - */ -/* #undef JEMALLOC_MUNMAP */ - -/* - * If defined, use mremap(...MREMAP_FIXED...) for huge realloc(). This is - * disabled by default because it is Linux-specific and it will cause virtual - * memory map holes, much like munmap(2) does. - */ -/* #undef JEMALLOC_MREMAP */ - -/* TLS is used to map arenas and magazine caches to threads. */ -#define JEMALLOC_TLS - -/* - * JEMALLOC_IVSALLOC enables ivsalloc(), which verifies that pointers reside - * within jemalloc-owned chunks before dereferencing them. - */ -/* #undef JEMALLOC_IVSALLOC */ - -/* - * Define overrides for non-standard allocator-related functions if they - * are present on the system. - */ -#define JEMALLOC_OVERRIDE_MEMALIGN -#define JEMALLOC_OVERRIDE_VALLOC - -/* - * At least Linux omits the "const" in: - * - * size_t malloc_usable_size(const void *ptr); - * - * Match the operating system's prototype. - */ -#define JEMALLOC_USABLE_SIZE_CONST - -/* - * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. - */ -/* #undef JEMALLOC_ZONE */ -/* #undef JEMALLOC_ZONE_VERSION */ - -/* - * Methods for purging unused pages differ between operating systems. - * - * madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages, - * such that new pages will be demand-zeroed if - * the address region is later touched. - * madvise(..., MADV_FREE) : On FreeBSD and Darwin, this marks pages as being - * unused, such that they will be discarded rather - * than swapped out. - */ -#define JEMALLOC_PURGE_MADVISE_DONTNEED -/* #undef JEMALLOC_PURGE_MADVISE_FREE */ - -/* - * Define if operating system has alloca.h header. - */ -#define JEMALLOC_HAS_ALLOCA_H - -/* sizeof(void *) == 2^LG_SIZEOF_PTR. */ -#define LG_SIZEOF_PTR @JEM_SIZEDEF@ - -/* sizeof(int) == 2^LG_SIZEOF_INT. */ -#define LG_SIZEOF_INT 2 - -/* sizeof(long) == 2^LG_SIZEOF_LONG. */ -#define LG_SIZEOF_LONG @JEM_SIZEDEF@ - -/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ -#define LG_SIZEOF_INTMAX_T 3 - -/* C99 restrict keyword supported. */ -#define JEMALLOC_HAS_RESTRICT - -/* JEMALLOC_CODE_COVERAGE enables test code coverage analysis. */ -#undef JEMALLOC_CODE_COVERAGE diff --git a/dep/jemalloc/jemalloc_internal_defs.h.in.cmake b/dep/jemalloc/jemalloc_internal_defs.h.in.cmake new file mode 100644 index 00000000000..1527fa2c2fe --- /dev/null +++ b/dep/jemalloc/jemalloc_internal_defs.h.in.cmake @@ -0,0 +1,343 @@ +/* include/jemalloc/internal/jemalloc_internal_defs.h. Generated from jemalloc_internal_defs.h.in by configure. */ +#ifndef JEMALLOC_INTERNAL_DEFS_H_ +#define JEMALLOC_INTERNAL_DEFS_H_ +/* + * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all + * public APIs to be prefixed. This makes it possible, with some care, to use + * multiple allocators simultaneously. + */ +/* #undef JEMALLOC_PREFIX */ +/* #undef JEMALLOC_CPREFIX */ + +/* + * Define overrides for non-standard allocator-related functions if they are + * present on the system. + */ +#define JEMALLOC_OVERRIDE___LIBC_CALLOC +#define JEMALLOC_OVERRIDE___LIBC_FREE +#define JEMALLOC_OVERRIDE___LIBC_MALLOC +#define JEMALLOC_OVERRIDE___LIBC_MEMALIGN +#define JEMALLOC_OVERRIDE___LIBC_REALLOC +#define JEMALLOC_OVERRIDE___LIBC_VALLOC +/* #undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN */ + +/* + * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs. + * For shared libraries, symbol visibility mechanisms prevent these symbols + * from being exported, but for static libraries, naming collisions are a real + * possibility. + */ +#define JEMALLOC_PRIVATE_NAMESPACE je_ + +/* + * Hyper-threaded CPUs may need a special instruction inside spin loops in + * order to yield to another virtual CPU. + */ +#define CPU_SPINWAIT __asm__ volatile("pause") + +/* + * Number of significant bits in virtual addresses. This may be less than the + * total number of bits in a pointer, e.g. on x64, for which the uppermost 16 + * bits are the same as bit 47. + */ +#define LG_VADDR @JEM_VADDRBITS@ + +/* Defined if C11 atomics are available. */ +#define JEMALLOC_C11_ATOMICS 1 + +/* Defined if GCC __atomic atomics are available. */ +#define JEMALLOC_GCC_ATOMIC_ATOMICS 1 + +/* Defined if GCC __sync atomics are available. */ +#define JEMALLOC_GCC_SYNC_ATOMICS 1 + +/* + * Defined if __sync_add_and_fetch(uint32_t *, uint32_t) and + * __sync_sub_and_fetch(uint32_t *, uint32_t) are available, despite + * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 not being defined (which means the + * functions are defined in libgcc instead of being inlines). + */ +/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_4 */ + +/* + * Defined if __sync_add_and_fetch(uint64_t *, uint64_t) and + * __sync_sub_and_fetch(uint64_t *, uint64_t) are available, despite + * __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 not being defined (which means the + * functions are defined in libgcc instead of being inlines). + */ +/* #undef JE_FORCE_SYNC_COMPARE_AND_SWAP_8 */ + +/* + * Defined if __builtin_clz() and __builtin_clzl() are available. + */ +#define JEMALLOC_HAVE_BUILTIN_CLZ + +/* + * Defined if os_unfair_lock_*() functions are available, as provided by Darwin. + */ +/* #undef JEMALLOC_OS_UNFAIR_LOCK */ + +/* + * Defined if OSSpin*() functions are available, as provided by Darwin, and + * documented in the spinlock(3) manual page. + */ +/* #undef JEMALLOC_OSSPIN */ + +/* Defined if syscall(2) is usable. */ +#define JEMALLOC_USE_SYSCALL + +/* + * Defined if secure_getenv(3) is available. + */ +#define JEMALLOC_HAVE_SECURE_GETENV + +/* + * Defined if issetugid(2) is available. + */ +/* #undef JEMALLOC_HAVE_ISSETUGID */ + +/* Defined if pthread_atfork(3) is available. */ +#define JEMALLOC_HAVE_PTHREAD_ATFORK + +/* Defined if pthread_setname_np(3) is available. */ +#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP + +/* + * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available. + */ +#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE 1 + +/* + * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available. + */ +#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1 + +/* + * Defined if mach_absolute_time() is available. + */ +/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */ + +/* + * Defined if _malloc_thread_cleanup() exists. At least in the case of + * FreeBSD, pthread_key_create() allocates, which if used during malloc + * bootstrapping will cause recursion into the pthreads library. Therefore, if + * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in + * malloc_tsd. + */ +/* #undef JEMALLOC_MALLOC_THREAD_CLEANUP */ + +/* + * Defined if threaded initialization is known to be safe on this platform. + * Among other things, it must be possible to initialize a mutex without + * triggering allocation in order for threaded allocation to be safe. + */ +#define JEMALLOC_THREADED_INIT + +/* + * Defined if the pthreads implementation defines + * _pthread_mutex_init_calloc_cb(), in which case the function is used in order + * to avoid recursive allocation during mutex initialization. + */ +/* #undef JEMALLOC_MUTEX_INIT_CB */ + +/* Non-empty if the tls_model attribute is supported. */ +#define JEMALLOC_TLS_MODEL @JEM_TLSMODEL@ + +/* + * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables + * inline functions. + */ +/* #undef JEMALLOC_DEBUG */ + +/* JEMALLOC_STATS enables statistics calculation. */ +/* #undef JEMALLOC_STATS */ + +/* JEMALLOC_PROF enables allocation profiling. */ +/* #undef JEMALLOC_PROF */ + +/* Use libunwind for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_LIBUNWIND */ + +/* Use libgcc for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_LIBGCC */ + +/* Use gcc intrinsics for profile backtracing if defined. */ +/* #undef JEMALLOC_PROF_GCC */ + +/* + * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage + * segment (DSS). + */ +#define JEMALLOC_DSS + +/* Support memory filling (junk/zero). */ +#define JEMALLOC_FILL + +/* Support utrace(2)-based tracing. */ +/* #undef JEMALLOC_UTRACE */ + +/* Support optional abort() on OOM. */ +/* #undef JEMALLOC_XMALLOC */ + +/* Support lazy locking (avoid locking unless a second thread is launched). */ +/* #undef JEMALLOC_LAZY_LOCK */ + +/* + * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size + * classes). + */ +/* #undef LG_QUANTUM */ + +/* One page is 2^LG_PAGE bytes. */ +#define LG_PAGE 12 + +/* + * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the + * system does not explicitly support huge pages; system calls that require + * explicit huge page support are separately configured. + */ +#define LG_HUGEPAGE 21 + +/* + * If defined, adjacent virtual memory mappings with identical attributes + * automatically coalesce, and they fragment when changes are made to subranges. + * This is the normal order of things for mmap()/munmap(), but on Windows + * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e. + * mappings do *not* coalesce/fragment. + */ +#define JEMALLOC_MAPS_COALESCE + +/* + * If defined, retain memory for later reuse by default rather than using e.g. + * munmap() to unmap freed extents. This is enabled on 64-bit Linux because + * common sequences of mmap()/munmap() calls will cause virtual memory map + * holes. + */ +#define JEMALLOC_RETAIN + +/* TLS is used to map arenas and magazine caches to threads. */ +#define JEMALLOC_TLS + +/* + * Used to mark unreachable code to quiet "end of non-void" compiler warnings. + * Don't use this directly; instead use unreachable() from util.h + */ +#define JEMALLOC_INTERNAL_UNREACHABLE __builtin_unreachable + +/* + * ffs*() functions to use for bitmapping. Don't use these directly; instead, + * use ffs_*() from util.h. + */ +#define JEMALLOC_INTERNAL_FFSLL __builtin_ffsll +#define JEMALLOC_INTERNAL_FFSL __builtin_ffsl +#define JEMALLOC_INTERNAL_FFS __builtin_ffs + +/* + * If defined, explicitly attempt to more uniformly distribute large allocation + * pointer alignments across all cache indices. + */ +#define JEMALLOC_CACHE_OBLIVIOUS + +/* + * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings. + */ +/* #undef JEMALLOC_ZONE */ + +/* + * Methods for determining whether the OS overcommits. + * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's + * /proc/sys/vm.overcommit_memory file. + * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl. + */ +/* #undef JEMALLOC_SYSCTL_VM_OVERCOMMIT */ +#define JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY + +/* Defined if madvise(2) is available. */ +#define JEMALLOC_HAVE_MADVISE + +/* + * Methods for purging unused pages differ between operating systems. + * + * madvise(..., MADV_FREE) : This marks pages as being unused, such that they + * will be discarded rather than swapped out. + * madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is + * defined, this immediately discards pages, + * such that new pages will be demand-zeroed if + * the address region is later touched; + * otherwise this behaves similarly to + * MADV_FREE, though typically with higher + * system overhead. + */ +@JEM_MADFREE_DEF@ JEMALLOC_PURGE_MADVISE_FREE +#define JEMALLOC_PURGE_MADVISE_DONTNEED +#define JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS 1 + +/* + * Defined if transparent huge pages (THPs) are supported via the + * MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled. + */ +#define JEMALLOC_THP + +/* Define if operating system has alloca.h header. */ +#define JEMALLOC_HAS_ALLOCA_H 1 + +/* C99 restrict keyword supported. */ +#define JEMALLOC_HAS_RESTRICT 1 + +/* For use by hash code. */ +/* #undef JEMALLOC_BIG_ENDIAN */ + +/* sizeof(int) == 2^LG_SIZEOF_INT. */ +#define LG_SIZEOF_INT 2 + +/* sizeof(long) == 2^LG_SIZEOF_LONG. */ +#define LG_SIZEOF_LONG @JEM_SIZEDEF@ + +/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */ +#define LG_SIZEOF_LONG_LONG 3 + +/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */ +#define LG_SIZEOF_INTMAX_T 3 + +/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */ +#define JEMALLOC_GLIBC_MALLOC_HOOK + +/* glibc memalign hook. */ +#define JEMALLOC_GLIBC_MEMALIGN_HOOK + +/* pthread support */ +#define JEMALLOC_HAVE_PTHREAD + +/* dlsym() support */ +#define JEMALLOC_HAVE_DLSYM + +/* Adaptive mutex support in pthreads. */ +#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP + +/* GNU specific sched_getcpu support */ +#define JEMALLOC_HAVE_SCHED_GETCPU + +/* GNU specific sched_setaffinity support */ +#define JEMALLOC_HAVE_SCHED_SETAFFINITY + +/* + * If defined, all the features necessary for background threads are present. + */ +#define JEMALLOC_BACKGROUND_THREAD 1 + +/* + * If defined, jemalloc symbols are not exported (doesn't work when + * JEMALLOC_PREFIX is not defined). + */ +/* #undef JEMALLOC_EXPORT */ + +/* config.malloc_conf options string. */ +#define JEMALLOC_CONFIG_MALLOC_CONF "" + +/* If defined, jemalloc takes the malloc/free/etc. symbol names. */ +#define JEMALLOC_IS_MALLOC 1 + +/* sizeof(void *) == 2^LG_SIZEOF_PTR. */ +#define LG_SIZEOF_PTR @JEM_SIZEDEF@ + +#endif /* JEMALLOC_INTERNAL_DEFS_H_ */ diff --git a/dep/jemalloc/src/arena.c b/dep/jemalloc/src/arena.c index dad707b63d0..632fce5233e 100644 --- a/dep/jemalloc/src/arena.c +++ b/dep/jemalloc/src/arena.c @@ -1,40 +1,57 @@ -#define JEMALLOC_ARENA_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_ARENA_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/util.h" /******************************************************************************/ /* Data. */ -ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; -arena_bin_info_t arena_bin_info[NBINS]; - -JEMALLOC_ALIGNED(CACHELINE) -const uint8_t small_size2bin[] = { -#define S2B_8(i) i, -#define S2B_16(i) S2B_8(i) S2B_8(i) -#define S2B_32(i) S2B_16(i) S2B_16(i) -#define S2B_64(i) S2B_32(i) S2B_32(i) -#define S2B_128(i) S2B_64(i) S2B_64(i) -#define S2B_256(i) S2B_128(i) S2B_128(i) -#define S2B_512(i) S2B_256(i) S2B_256(i) -#define S2B_1024(i) S2B_512(i) S2B_512(i) -#define S2B_2048(i) S2B_1024(i) S2B_1024(i) -#define S2B_4096(i) S2B_2048(i) S2B_2048(i) -#define S2B_8192(i) S2B_4096(i) S2B_4096(i) -#define SIZE_CLASS(bin, delta, size) \ - S2B_##delta(bin) +/* + * Define names for both unininitialized and initialized phases, so that + * options and mallctl processing are straightforward. + */ +const char *percpu_arena_mode_names[] = { + "percpu", + "phycpu", + "disabled", + "percpu", + "phycpu" +}; +percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT; + +ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT; +ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT; + +static atomic_zd_t dirty_decay_ms_default; +static atomic_zd_t muzzy_decay_ms_default; + +const arena_bin_info_t arena_bin_info[NBINS] = { +#define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \ + {reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)}, +#define BIN_INFO_bin_no(reg_size, slab_size, nregs) +#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \ + lg_delta_lookup) \ + BIN_INFO_bin_##bin((1U<<lg_grp) + (ndelta<<lg_delta), \ + (pgs << LG_PAGE), (pgs << LG_PAGE) / ((1U<<lg_grp) + \ + (ndelta<<lg_delta))) SIZE_CLASSES -#undef S2B_8 -#undef S2B_16 -#undef S2B_32 -#undef S2B_64 -#undef S2B_128 -#undef S2B_256 -#undef S2B_512 -#undef S2B_1024 -#undef S2B_2048 -#undef S2B_4096 -#undef S2B_8192 -#undef SIZE_CLASS +#undef BIN_INFO_bin_yes +#undef BIN_INFO_bin_no +#undef SC +}; + +const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = { +#define STEP(step, h, x, y) \ + h, + SMOOTHSTEP +#undef STEP }; /******************************************************************************/ @@ -43,2535 +60,2120 @@ const uint8_t small_size2bin[] = { * definition. */ -static void arena_purge(arena_t *arena, bool all); -static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, - bool cleaned); -static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run, arena_bin_t *bin); -static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run, arena_bin_t *bin); +static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, + arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit, + bool is_background_thread); +static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, + bool is_background_thread, bool all); +static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, + arena_bin_t *bin); +static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, + arena_bin_t *bin); /******************************************************************************/ -static inline int -arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) -{ - uintptr_t a_mapelm = (uintptr_t)a; - uintptr_t b_mapelm = (uintptr_t)b; - - assert(a != NULL); - assert(b != NULL); - - return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm)); -} - -/* Generate red-black tree functions. */ -rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t, - u.rb_link, arena_run_comp) - -static inline int -arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) -{ - int ret; - size_t a_size = a->bits & ~PAGE_MASK; - size_t b_size = b->bits & ~PAGE_MASK; - - ret = (a_size > b_size) - (a_size < b_size); - if (ret == 0) { - uintptr_t a_mapelm, b_mapelm; - - if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY) - a_mapelm = (uintptr_t)a; - else { - /* - * Treat keys as though they are lower than anything - * else. - */ - a_mapelm = 0; +static bool +arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) { + if (config_debug) { + for (size_t i = 0; i < sizeof(arena_stats_t); i++) { + assert(((char *)arena_stats)[i] == 0); } - b_mapelm = (uintptr_t)b; - - ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm); } - - return (ret); +#ifndef JEMALLOC_ATOMIC_U64 + if (malloc_mutex_init(&arena_stats->mtx, "arena_stats", + WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) { + return true; + } +#endif + /* Memory is zeroed, so there is no need to clear stats. */ + return false; } -/* Generate red-black tree functions. */ -rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t, - u.rb_link, arena_avail_comp) - -static inline int -arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b) -{ - - assert(a != NULL); - assert(b != NULL); - - /* - * Short-circuit for self comparison. The following comparison code - * would come to the same result, but at the cost of executing the slow - * path. - */ - if (a == b) - return (0); - - /* - * Order such that chunks with higher fragmentation are "less than" - * those with lower fragmentation -- purging order is from "least" to - * "greatest". Fragmentation is measured as: - * - * mean current avail run size - * -------------------------------- - * mean defragmented avail run size - * - * navail - * ----------- - * nruns_avail nruns_avail-nruns_adjac - * = ========================= = ----------------------- - * navail nruns_avail - * ----------------------- - * nruns_avail-nruns_adjac - * - * The following code multiplies away the denominator prior to - * comparison, in order to avoid division. - * - */ - { - size_t a_val = (a->nruns_avail - a->nruns_adjac) * - b->nruns_avail; - size_t b_val = (b->nruns_avail - b->nruns_adjac) * - a->nruns_avail; +static void +arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) { +#ifndef JEMALLOC_ATOMIC_U64 + malloc_mutex_lock(tsdn, &arena_stats->mtx); +#endif +} - if (a_val < b_val) - return (1); - if (a_val > b_val) - return (-1); - } - /* - * Break ties by chunk address. For fragmented chunks, report lower - * addresses as "lower", so that fragmentation reduction happens first - * at lower addresses. However, use the opposite ordering for - * unfragmented chunks, in order to increase the chances of - * re-allocating dirty runs. - */ - { - uintptr_t a_chunk = (uintptr_t)a; - uintptr_t b_chunk = (uintptr_t)b; - int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk)); - if (a->nruns_adjac == 0) { - assert(b->nruns_adjac == 0); - ret = -ret; - } - return (ret); - } +static void +arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) { +#ifndef JEMALLOC_ATOMIC_U64 + malloc_mutex_unlock(tsdn, &arena_stats->mtx); +#endif } -/* Generate red-black tree functions. */ -rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t, - dirty_link, arena_chunk_dirty_comp) +static uint64_t +arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, + arena_stats_u64_t *p) { +#ifdef JEMALLOC_ATOMIC_U64 + return atomic_load_u64(p, ATOMIC_RELAXED); +#else + malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); + return *p; +#endif +} -static inline bool -arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind) -{ - bool ret; +static void +arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, + arena_stats_u64_t *p, uint64_t x) { +#ifdef JEMALLOC_ATOMIC_U64 + atomic_fetch_add_u64(p, x, ATOMIC_RELAXED); +#else + malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); + *p += x; +#endif +} - if (pageind-1 < map_bias) - ret = false; - else { - ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0); - assert(ret == false || arena_mapbits_dirty_get(chunk, - pageind-1) != arena_mapbits_dirty_get(chunk, pageind)); - } - return (ret); +UNUSED static void +arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, + arena_stats_u64_t *p, uint64_t x) { +#ifdef JEMALLOC_ATOMIC_U64 + UNUSED uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED); + assert(r - x <= r); +#else + malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); + *p -= x; + assert(*p + x >= *p); +#endif } -static inline bool -arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages) -{ - bool ret; +/* + * Non-atomically sets *dst += src. *dst needs external synchronization. + * This lets us avoid the cost of a fetch_add when its unnecessary (note that + * the types here are atomic). + */ +static void +arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) { +#ifdef JEMALLOC_ATOMIC_U64 + uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED); + atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED); +#else + *dst += src; +#endif +} - if (pageind+npages == chunk_npages) - ret = false; - else { - assert(pageind+npages < chunk_npages); - ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0); - assert(ret == false || arena_mapbits_dirty_get(chunk, pageind) - != arena_mapbits_dirty_get(chunk, pageind+npages)); - } - return (ret); +static size_t +arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) { +#ifdef JEMALLOC_ATOMIC_U64 + return atomic_load_zu(p, ATOMIC_RELAXED); +#else + malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); + return atomic_load_zu(p, ATOMIC_RELAXED); +#endif } -static inline bool -arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages) -{ +static void +arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p, + size_t x) { +#ifdef JEMALLOC_ATOMIC_U64 + atomic_fetch_add_zu(p, x, ATOMIC_RELAXED); +#else + malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); + size_t cur = atomic_load_zu(p, ATOMIC_RELAXED); + atomic_store_zu(p, cur + x, ATOMIC_RELAXED); +#endif +} - return (arena_avail_adjac_pred(chunk, pageind) || - arena_avail_adjac_succ(chunk, pageind, npages)); +static void +arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p, + size_t x) { +#ifdef JEMALLOC_ATOMIC_U64 + UNUSED size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED); + assert(r - x <= r); +#else + malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); + size_t cur = atomic_load_zu(p, ATOMIC_RELAXED); + atomic_store_zu(p, cur - x, ATOMIC_RELAXED); +#endif } +/* Like the _u64 variant, needs an externally synchronized *dst. */ static void -arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, - size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ) -{ +arena_stats_accum_zu(atomic_zu_t *dst, size_t src) { + size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED); + atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED); +} - assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> - LG_PAGE)); +void +arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats, + szind_t szind, uint64_t nrequests) { + arena_stats_lock(tsdn, arena_stats); + arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind - + NBINS].nrequests, nrequests); + arena_stats_unlock(tsdn, arena_stats); +} - /* - * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be - * removed and reinserted even if the run to be inserted is clean. - */ - if (chunk->ndirty != 0) - arena_chunk_dirty_remove(&arena->chunks_dirty, chunk); +void +arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) { + arena_stats_lock(tsdn, arena_stats); + arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size); + arena_stats_unlock(tsdn, arena_stats); +} - if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind)) - chunk->nruns_adjac++; - if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages)) - chunk->nruns_adjac++; - chunk->nruns_avail++; - assert(chunk->nruns_avail > chunk->nruns_adjac); +void +arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, + const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, + size_t *nactive, size_t *ndirty, size_t *nmuzzy) { + *nthreads += arena_nthreads_get(arena, false); + *dss = dss_prec_names[arena_dss_prec_get(arena)]; + *dirty_decay_ms = arena_dirty_decay_ms_get(arena); + *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); + *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED); + *ndirty += extents_npages_get(&arena->extents_dirty); + *nmuzzy += extents_npages_get(&arena->extents_muzzy); +} - if (arena_mapbits_dirty_get(chunk, pageind) != 0) { - arena->ndirty += npages; - chunk->ndirty += npages; +void +arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, + const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, + size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, + malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats) { + cassert(config_stats); + + arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms, + muzzy_decay_ms, nactive, ndirty, nmuzzy); + + size_t base_allocated, base_resident, base_mapped; + base_stats_get(tsdn, arena->base, &base_allocated, &base_resident, + &base_mapped); + + arena_stats_lock(tsdn, &arena->stats); + + arena_stats_accum_zu(&astats->mapped, base_mapped + + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped)); + arena_stats_accum_zu(&astats->retained, + extents_npages_get(&arena->extents_retained) << LG_PAGE); + + arena_stats_accum_u64(&astats->decay_dirty.npurge, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_dirty.npurge)); + arena_stats_accum_u64(&astats->decay_dirty.nmadvise, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_dirty.nmadvise)); + arena_stats_accum_u64(&astats->decay_dirty.purged, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_dirty.purged)); + + arena_stats_accum_u64(&astats->decay_muzzy.npurge, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_muzzy.npurge)); + arena_stats_accum_u64(&astats->decay_muzzy.nmadvise, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_muzzy.nmadvise)); + arena_stats_accum_u64(&astats->decay_muzzy.purged, + arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.decay_muzzy.purged)); + + arena_stats_accum_zu(&astats->base, base_allocated); + arena_stats_accum_zu(&astats->internal, arena_internal_get(arena)); + arena_stats_accum_zu(&astats->resident, base_resident + + (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) + + extents_npages_get(&arena->extents_dirty) + + extents_npages_get(&arena->extents_muzzy)) << LG_PAGE))); + + for (szind_t i = 0; i < NSIZES - NBINS; i++) { + uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.lstats[i].nmalloc); + arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc); + arena_stats_accum_u64(&astats->nmalloc_large, nmalloc); + + uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.lstats[i].ndalloc); + arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc); + arena_stats_accum_u64(&astats->ndalloc_large, ndalloc); + + uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats, + &arena->stats.lstats[i].nrequests); + arena_stats_accum_u64(&lstats[i].nrequests, + nmalloc + nrequests); + arena_stats_accum_u64(&astats->nrequests_large, + nmalloc + nrequests); + + assert(nmalloc >= ndalloc); + assert(nmalloc - ndalloc <= SIZE_T_MAX); + size_t curlextents = (size_t)(nmalloc - ndalloc); + lstats[i].curlextents += curlextents; + arena_stats_accum_zu(&astats->allocated_large, + curlextents * sz_index2size(NBINS + i)); + } + + arena_stats_unlock(tsdn, &arena->stats); + + /* tcache_bytes counts currently cached bytes. */ + atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED); + malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); + tcache_t *tcache; + ql_foreach(tcache, &arena->tcache_ql, link) { + szind_t i = 0; + for (; i < NBINS; i++) { + tcache_bin_t *tbin = tcache_small_bin_get(tcache, i); + arena_stats_accum_zu(&astats->tcache_bytes, + tbin->ncached * sz_index2size(i)); + } + for (; i < nhbins; i++) { + tcache_bin_t *tbin = tcache_large_bin_get(tcache, i); + arena_stats_accum_zu(&astats->tcache_bytes, + tbin->ncached * sz_index2size(i)); + } } - if (chunk->ndirty != 0) - arena_chunk_dirty_insert(&arena->chunks_dirty, chunk); + malloc_mutex_prof_read(tsdn, + &astats->mutex_prof_data[arena_prof_mutex_tcache_list], + &arena->tcache_ql_mtx); + malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); + +#define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \ + malloc_mutex_lock(tsdn, &arena->mtx); \ + malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \ + &arena->mtx); \ + malloc_mutex_unlock(tsdn, &arena->mtx); + + /* Gather per arena mutex profiling data. */ + READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large); + READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx, + arena_prof_mutex_extent_avail) + READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx, + arena_prof_mutex_extents_dirty) + READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx, + arena_prof_mutex_extents_muzzy) + READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx, + arena_prof_mutex_extents_retained) + READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx, + arena_prof_mutex_decay_dirty) + READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx, + arena_prof_mutex_decay_muzzy) + READ_ARENA_MUTEX_PROF_DATA(base->mtx, + arena_prof_mutex_base) +#undef READ_ARENA_MUTEX_PROF_DATA + + nstime_copy(&astats->uptime, &arena->create_time); + nstime_update(&astats->uptime); + nstime_subtract(&astats->uptime, &arena->create_time); + + for (szind_t i = 0; i < NBINS; i++) { + arena_bin_t *bin = &arena->bins[i]; - arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk, - pageind)); + malloc_mutex_lock(tsdn, &bin->lock); + malloc_mutex_prof_read(tsdn, &bstats[i].mutex_data, &bin->lock); + bstats[i].nmalloc += bin->stats.nmalloc; + bstats[i].ndalloc += bin->stats.ndalloc; + bstats[i].nrequests += bin->stats.nrequests; + bstats[i].curregs += bin->stats.curregs; + bstats[i].nfills += bin->stats.nfills; + bstats[i].nflushes += bin->stats.nflushes; + bstats[i].nslabs += bin->stats.nslabs; + bstats[i].reslabs += bin->stats.reslabs; + bstats[i].curslabs += bin->stats.curslabs; + malloc_mutex_unlock(tsdn, &bin->lock); + } } -static void -arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, - size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ) -{ - - assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> - LG_PAGE)); - - /* - * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be - * removed and reinserted even if the run to be removed is clean. - */ - if (chunk->ndirty != 0) - arena_chunk_dirty_remove(&arena->chunks_dirty, chunk); - - if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind)) - chunk->nruns_adjac--; - if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages)) - chunk->nruns_adjac--; - chunk->nruns_avail--; - assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail - == 0 && chunk->nruns_adjac == 0)); - - if (arena_mapbits_dirty_get(chunk, pageind) != 0) { - arena->ndirty -= npages; - chunk->ndirty -= npages; +void +arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty, + extent); + if (arena_dirty_decay_ms_get(arena) == 0) { + arena_decay_dirty(tsdn, arena, false, true); + } else { + arena_background_thread_inactivity_check(tsdn, arena, false); } - if (chunk->ndirty != 0) - arena_chunk_dirty_insert(&arena->chunks_dirty, chunk); - - arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk, - pageind)); } -static inline void * -arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) -{ +static void * +arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab, + const arena_bin_info_t *bin_info) { void *ret; - unsigned regind; - bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + - (uintptr_t)bin_info->bitmap_offset); - - assert(run->nfree > 0); - assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false); - - regind = bitmap_sfu(bitmap, &bin_info->bitmap_info); - ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset + - (uintptr_t)(bin_info->reg_interval * regind)); - run->nfree--; - if (regind == run->nextind) - run->nextind++; - assert(regind < run->nextind); - return (ret); -} - -static inline void -arena_run_reg_dalloc(arena_run_t *run, void *ptr) -{ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t mapbits = arena_mapbits_get(chunk, pageind); - size_t binind = arena_ptr_small_binind_get(ptr, mapbits); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; - unsigned regind = arena_run_regind(run, bin_info, ptr); - bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + - (uintptr_t)bin_info->bitmap_offset); - - assert(run->nfree < bin_info->nregs); - /* Freeing an interior pointer can cause assertion failure. */ - assert(((uintptr_t)ptr - ((uintptr_t)run + - (uintptr_t)bin_info->reg0_offset)) % - (uintptr_t)bin_info->reg_interval == 0); - assert((uintptr_t)ptr >= (uintptr_t)run + - (uintptr_t)bin_info->reg0_offset); - /* Freeing an unallocated pointer can cause assertion failure. */ - assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind)); + arena_slab_data_t *slab_data = extent_slab_data_get(slab); + size_t regind; + + assert(extent_nfree_get(slab) > 0); + assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); - bitmap_unset(bitmap, &bin_info->bitmap_info, regind); - run->nfree++; + regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info); + ret = (void *)((uintptr_t)extent_addr_get(slab) + + (uintptr_t)(bin_info->reg_size * regind)); + extent_nfree_dec(slab); + return ret; } -static inline void -arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) -{ +#ifndef JEMALLOC_JET +static +#endif +size_t +arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) { + size_t diff, regind; - VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind << - LG_PAGE)), (npages << LG_PAGE)); - memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, - (npages << LG_PAGE)); -} + /* Freeing a pointer outside the slab can cause assertion failure. */ + assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab)); + assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab)); + /* Freeing an interior pointer can cause assertion failure. */ + assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) % + (uintptr_t)arena_bin_info[binind].reg_size == 0); + + /* Avoid doing division with a variable divisor. */ + diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)); + switch (binind) { +#define REGIND_bin_yes(index, reg_size) \ + case index: \ + regind = diff / (reg_size); \ + assert(diff == regind * (reg_size)); \ + break; +#define REGIND_bin_no(index, reg_size) +#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \ + lg_delta_lookup) \ + REGIND_bin_##bin(index, (1U<<lg_grp) + (ndelta<<lg_delta)) + SIZE_CLASSES +#undef REGIND_bin_yes +#undef REGIND_bin_no +#undef SC + default: not_reached(); + } -static inline void -arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) -{ + assert(regind < arena_bin_info[binind].nregs); - VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind << - LG_PAGE)), PAGE); + return regind; } -static inline void -arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) -{ - size_t i; - UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); +static void +arena_slab_reg_dalloc(tsdn_t *tsdn, extent_t *slab, + arena_slab_data_t *slab_data, void *ptr) { + szind_t binind = extent_szind_get(slab); + const arena_bin_info_t *bin_info = &arena_bin_info[binind]; + size_t regind = arena_slab_regind(slab, binind, ptr); + + assert(extent_nfree_get(slab) < bin_info->nregs); + /* Freeing an unallocated pointer can cause assertion failure. */ + assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind)); - arena_run_page_mark_zeroed(chunk, run_ind); - for (i = 0; i < PAGE / sizeof(size_t); i++) - assert(p[i] == 0); + bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind); + extent_nfree_inc(slab); } static void -arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages) -{ +arena_nactive_add(arena_t *arena, size_t add_pages) { + atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED); +} - if (config_stats) { - ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + - add_pages) << LG_PAGE) - CHUNK_CEILING((arena->nactive - - sub_pages) << LG_PAGE); - if (cactive_diff != 0) - stats_cactive_add(cactive_diff); - } +static void +arena_nactive_sub(arena_t *arena, size_t sub_pages) { + assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages); + atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED); } static void -arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, - size_t flag_dirty, size_t need_pages) -{ - size_t total_pages, rem_pages; - - total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> - LG_PAGE; - assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == - flag_dirty); - assert(need_pages <= total_pages); - rem_pages = total_pages - need_pages; - - arena_avail_remove(arena, chunk, run_ind, total_pages, true, true); - arena_cactive_update(arena, need_pages, 0); - arena->nactive += need_pages; - - /* Keep track of trailing unused pages for later use. */ - if (rem_pages > 0) { - if (flag_dirty != 0) { - arena_mapbits_unallocated_set(chunk, - run_ind+need_pages, (rem_pages << LG_PAGE), - flag_dirty); - arena_mapbits_unallocated_set(chunk, - run_ind+total_pages-1, (rem_pages << LG_PAGE), - flag_dirty); - } else { - arena_mapbits_unallocated_set(chunk, run_ind+need_pages, - (rem_pages << LG_PAGE), - arena_mapbits_unzeroed_get(chunk, - run_ind+need_pages)); - arena_mapbits_unallocated_set(chunk, - run_ind+total_pages-1, (rem_pages << LG_PAGE), - arena_mapbits_unzeroed_get(chunk, - run_ind+total_pages-1)); - } - arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages, - false, true); +arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { + szind_t index, hindex; + + cassert(config_stats); + + if (usize < LARGE_MINCLASS) { + usize = LARGE_MINCLASS; } + index = sz_size2index(usize); + hindex = (index >= NBINS) ? index - NBINS : 0; + + arena_stats_add_u64(tsdn, &arena->stats, + &arena->stats.lstats[hindex].nmalloc, 1); } static void -arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, - bool remove, bool zero) -{ - arena_chunk_t *chunk; - size_t flag_dirty, run_ind, need_pages, i; +arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { + szind_t index, hindex; - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); - flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); - need_pages = (size >> LG_PAGE); - assert(need_pages > 0); + cassert(config_stats); - if (remove) { - arena_run_split_remove(arena, chunk, run_ind, flag_dirty, - need_pages); + if (usize < LARGE_MINCLASS) { + usize = LARGE_MINCLASS; } + index = sz_size2index(usize); + hindex = (index >= NBINS) ? index - NBINS : 0; - if (zero) { - if (flag_dirty == 0) { + arena_stats_add_u64(tsdn, &arena->stats, + &arena->stats.lstats[hindex].ndalloc, 1); +} + +static void +arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize, + size_t usize) { + arena_large_dalloc_stats_update(tsdn, arena, oldusize); + arena_large_malloc_stats_update(tsdn, arena, usize); +} + +extent_t * +arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool *zero) { + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; + + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + szind_t szind = sz_size2index(usize); + size_t mapped_add; + bool commit = true; + extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false, + szind, zero, &commit); + if (extent == NULL) { + extent = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment, + false, szind, zero, &commit); + } + size_t size = usize + sz_large_pad; + if (extent == NULL) { + extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL, + usize, sz_large_pad, alignment, false, szind, zero, + &commit); + if (config_stats) { /* - * The run is clean, so some pages may be zeroed (i.e. - * never before touched). + * extent may be NULL on OOM, but in that case + * mapped_add isn't used below, so there's no need to + * conditionlly set it to 0 here. */ - for (i = 0; i < need_pages; i++) { - if (arena_mapbits_unzeroed_get(chunk, run_ind+i) - != 0) - arena_run_zero(chunk, run_ind+i, 1); - else if (config_debug) { - arena_run_page_validate_zeroed(chunk, - run_ind+i); - } else { - arena_run_page_mark_zeroed(chunk, - run_ind+i); - } + mapped_add = size; + } + } else if (config_stats) { + mapped_add = 0; + } + + if (extent != NULL) { + if (config_stats) { + arena_stats_lock(tsdn, &arena->stats); + arena_large_malloc_stats_update(tsdn, arena, usize); + if (mapped_add != 0) { + arena_stats_add_zu(tsdn, &arena->stats, + &arena->stats.mapped, mapped_add); } - } else { - /* The run is dirty, so all pages must be zeroed. */ - arena_run_zero(chunk, run_ind, need_pages); + arena_stats_unlock(tsdn, &arena->stats); } - } else { - VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + - (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); + arena_nactive_add(arena, size >> LG_PAGE); } - /* - * Set the last element first, in case the run only contains one page - * (i.e. both statements set the same element). - */ - arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty); - arena_mapbits_large_set(chunk, run_ind, size, flag_dirty); + return extent; } -static void -arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) -{ - - arena_run_split_large_helper(arena, run, size, true, zero); +void +arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { + if (config_stats) { + arena_stats_lock(tsdn, &arena->stats); + arena_large_dalloc_stats_update(tsdn, arena, + extent_usize_get(extent)); + arena_stats_unlock(tsdn, &arena->stats); + } + arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE); } -static void -arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) -{ +void +arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, + size_t oldusize) { + size_t usize = extent_usize_get(extent); + size_t udiff = oldusize - usize; - arena_run_split_large_helper(arena, run, size, false, zero); + if (config_stats) { + arena_stats_lock(tsdn, &arena->stats); + arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); + arena_stats_unlock(tsdn, &arena->stats); + } + arena_nactive_sub(arena, udiff >> LG_PAGE); } -static void -arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, - size_t binind) -{ - arena_chunk_t *chunk; - size_t flag_dirty, run_ind, need_pages, i; +void +arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, + size_t oldusize) { + size_t usize = extent_usize_get(extent); + size_t udiff = usize - oldusize; - assert(binind != BININD_INVALID); + if (config_stats) { + arena_stats_lock(tsdn, &arena->stats); + arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); + arena_stats_unlock(tsdn, &arena->stats); + } + arena_nactive_add(arena, udiff >> LG_PAGE); +} - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); - flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); - need_pages = (size >> LG_PAGE); - assert(need_pages > 0); +static ssize_t +arena_decay_ms_read(arena_decay_t *decay) { + return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); +} - arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages); +static void +arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) { + atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED); +} +static void +arena_decay_deadline_init(arena_decay_t *decay) { /* - * Propagate the dirty and unzeroed flags to the allocated small run, - * so that arena_dalloc_bin_run() has the ability to conditionally trim - * clean pages. - */ - arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty); - /* - * The first page will always be dirtied during small run - * initialization, so a validation failure here would not actually - * cause an observable failure. + * Generate a new deadline that is uniformly random within the next + * epoch after the current one. */ - if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk, - run_ind) == 0) - arena_run_page_validate_zeroed(chunk, run_ind); - for (i = 1; i < need_pages - 1; i++) { - arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0); - if (config_debug && flag_dirty == 0 && - arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) - arena_run_page_validate_zeroed(chunk, run_ind+i); - } - arena_mapbits_small_set(chunk, run_ind+need_pages-1, need_pages-1, - binind, flag_dirty); - if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk, - run_ind+need_pages-1) == 0) - arena_run_page_validate_zeroed(chunk, run_ind+need_pages-1); - VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + - (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); -} - -static arena_chunk_t * -arena_chunk_init_spare(arena_t *arena) -{ - arena_chunk_t *chunk; - - assert(arena->spare != NULL); - - chunk = arena->spare; - arena->spare = NULL; - - assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); - assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); - assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == - arena_maxclass); - assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == - arena_maxclass); - assert(arena_mapbits_dirty_get(chunk, map_bias) == - arena_mapbits_dirty_get(chunk, chunk_npages-1)); - - return (chunk); -} - -static arena_chunk_t * -arena_chunk_init_hard(arena_t *arena) -{ - arena_chunk_t *chunk; - bool zero; - size_t unzeroed, i; - - assert(arena->spare == NULL); - - zero = false; - malloc_mutex_unlock(&arena->lock); - chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, false, - &zero, arena->dss_prec); - malloc_mutex_lock(&arena->lock); - if (chunk == NULL) - return (NULL); - if (config_stats) - arena->stats.mapped += chunksize; + nstime_copy(&decay->deadline, &decay->epoch); + nstime_add(&decay->deadline, &decay->interval); + if (arena_decay_ms_read(decay) > 0) { + nstime_t jitter; - chunk->arena = arena; + nstime_init(&jitter, prng_range_u64(&decay->jitter_state, + nstime_ns(&decay->interval))); + nstime_add(&decay->deadline, &jitter); + } +} - /* - * Claim that no pages are in use, since the header is merely overhead. - */ - chunk->ndirty = 0; +static bool +arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) { + return (nstime_compare(&decay->deadline, time) <= 0); +} - chunk->nruns_avail = 0; - chunk->nruns_adjac = 0; +static size_t +arena_decay_backlog_npages_limit(const arena_decay_t *decay) { + uint64_t sum; + size_t npages_limit_backlog; + unsigned i; /* - * Initialize the map to contain one maximal free untouched run. Mark - * the pages as zeroed iff chunk_alloc() returned a zeroed chunk. - */ - unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED; - arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass, - unzeroed); - /* - * There is no need to initialize the internal page map entries unless - * the chunk is not zeroed. + * For each element of decay_backlog, multiply by the corresponding + * fixed-point smoothstep decay factor. Sum the products, then divide + * to round down to the nearest whole number of pages. */ - if (zero == false) { - VALGRIND_MAKE_MEM_UNDEFINED((void *)arena_mapp_get(chunk, - map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk, - chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk, - map_bias+1))); - for (i = map_bias+1; i < chunk_npages-1; i++) - arena_mapbits_unzeroed_set(chunk, i, unzeroed); - } else { - VALGRIND_MAKE_MEM_DEFINED((void *)arena_mapp_get(chunk, - map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk, - chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk, - map_bias+1))); - if (config_debug) { - for (i = map_bias+1; i < chunk_npages-1; i++) { - assert(arena_mapbits_unzeroed_get(chunk, i) == - unzeroed); - } - } + sum = 0; + for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { + sum += decay->backlog[i] * h_steps[i]; } - arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxclass, - unzeroed); + npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); - return (chunk); + return npages_limit_backlog; } -static arena_chunk_t * -arena_chunk_alloc(arena_t *arena) -{ - arena_chunk_t *chunk; +static void +arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) { + size_t npages_delta = (current_npages > decay->nunpurged) ? + current_npages - decay->nunpurged : 0; + decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta; - if (arena->spare != NULL) - chunk = arena_chunk_init_spare(arena); - else { - chunk = arena_chunk_init_hard(arena); - if (chunk == NULL) - return (NULL); + if (config_debug) { + if (current_npages > decay->ceil_npages) { + decay->ceil_npages = current_npages; + } + size_t npages_limit = arena_decay_backlog_npages_limit(decay); + assert(decay->ceil_npages >= npages_limit); + if (decay->ceil_npages > npages_limit) { + decay->ceil_npages = npages_limit; + } } - - /* Insert the run into the runs_avail tree. */ - arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias, - false, false); - - return (chunk); } static void -arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk) -{ - assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); - assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); - assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == - arena_maxclass); - assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == - arena_maxclass); - assert(arena_mapbits_dirty_get(chunk, map_bias) == - arena_mapbits_dirty_get(chunk, chunk_npages-1)); +arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64, + size_t current_npages) { + if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { + memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) * + sizeof(size_t)); + } else { + size_t nadvance_z = (size_t)nadvance_u64; - /* - * Remove run from the runs_avail tree, so that the arena does not use - * it. - */ - arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias, - false, false); + assert((uint64_t)nadvance_z == nadvance_u64); - if (arena->spare != NULL) { - arena_chunk_t *spare = arena->spare; + memmove(decay->backlog, &decay->backlog[nadvance_z], + (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); + if (nadvance_z > 1) { + memset(&decay->backlog[SMOOTHSTEP_NSTEPS - + nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); + } + } - arena->spare = chunk; - malloc_mutex_unlock(&arena->lock); - chunk_dealloc((void *)spare, chunksize, true); - malloc_mutex_lock(&arena->lock); - if (config_stats) - arena->stats.mapped -= chunksize; - } else - arena->spare = chunk; + arena_decay_backlog_update_last(decay, current_npages); } -static arena_run_t * -arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) -{ - arena_run_t *run; - arena_chunk_map_t *mapelm, key; - - key.bits = size | CHUNK_MAP_KEY; - mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key); - if (mapelm != NULL) { - arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); - size_t pageind = (((uintptr_t)mapelm - - (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) - + map_bias; - - run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << - LG_PAGE)); - arena_run_split_large(arena, run, size, zero); - return (run); +static void +arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, size_t current_npages, size_t npages_limit, + bool is_background_thread) { + if (current_npages > npages_limit) { + arena_decay_to_limit(tsdn, arena, decay, extents, false, + npages_limit, is_background_thread); } - - return (NULL); } -static arena_run_t * -arena_run_alloc_large(arena_t *arena, size_t size, bool zero) -{ - arena_chunk_t *chunk; - arena_run_t *run; +static void +arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time, + size_t current_npages) { + assert(arena_decay_deadline_reached(decay, time)); - assert(size <= arena_maxclass); - assert((size & PAGE_MASK) == 0); + nstime_t delta; + nstime_copy(&delta, time); + nstime_subtract(&delta, &decay->epoch); - /* Search the arena's chunks for the lowest best fit. */ - run = arena_run_alloc_large_helper(arena, size, zero); - if (run != NULL) - return (run); + uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval); + assert(nadvance_u64 > 0); - /* - * No usable runs. Create a new chunk from which to allocate the run. - */ - chunk = arena_chunk_alloc(arena); - if (chunk != NULL) { - run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE)); - arena_run_split_large(arena, run, size, zero); - return (run); - } + /* Add nadvance_u64 decay intervals to epoch. */ + nstime_copy(&delta, &decay->interval); + nstime_imultiply(&delta, nadvance_u64); + nstime_add(&decay->epoch, &delta); - /* - * arena_chunk_alloc() failed, but another thread may have made - * sufficient memory available while this one dropped arena->lock in - * arena_chunk_alloc(), so search one more time. - */ - return (arena_run_alloc_large_helper(arena, size, zero)); + /* Set a new deadline. */ + arena_decay_deadline_init(decay); + + /* Update the backlog. */ + arena_decay_backlog_update(decay, nadvance_u64, current_npages); } -static arena_run_t * -arena_run_alloc_small_helper(arena_t *arena, size_t size, size_t binind) -{ - arena_run_t *run; - arena_chunk_map_t *mapelm, key; +static void +arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, const nstime_t *time, bool is_background_thread) { + size_t current_npages = extents_npages_get(extents); + arena_decay_epoch_advance_helper(decay, time, current_npages); - key.bits = size | CHUNK_MAP_KEY; - mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key); - if (mapelm != NULL) { - arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); - size_t pageind = (((uintptr_t)mapelm - - (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) - + map_bias; + size_t npages_limit = arena_decay_backlog_npages_limit(decay); + /* We may unlock decay->mtx when try_purge(). Finish logging first. */ + decay->nunpurged = (npages_limit > current_npages) ? npages_limit : + current_npages; - run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << - LG_PAGE)); - arena_run_split_small(arena, run, size, binind); - return (run); + if (!background_thread_enabled() || is_background_thread) { + arena_decay_try_purge(tsdn, arena, decay, extents, + current_npages, npages_limit, is_background_thread); } - - return (NULL); } -static arena_run_t * -arena_run_alloc_small(arena_t *arena, size_t size, size_t binind) -{ - arena_chunk_t *chunk; - arena_run_t *run; - - assert(size <= arena_maxclass); - assert((size & PAGE_MASK) == 0); - assert(binind != BININD_INVALID); +static void +arena_decay_reinit(arena_decay_t *decay, extents_t *extents, ssize_t decay_ms) { + arena_decay_ms_write(decay, decay_ms); + if (decay_ms > 0) { + nstime_init(&decay->interval, (uint64_t)decay_ms * + KQU(1000000)); + nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS); + } - /* Search the arena's chunks for the lowest best fit. */ - run = arena_run_alloc_small_helper(arena, size, binind); - if (run != NULL) - return (run); + nstime_init(&decay->epoch, 0); + nstime_update(&decay->epoch); + decay->jitter_state = (uint64_t)(uintptr_t)decay; + arena_decay_deadline_init(decay); + decay->nunpurged = 0; + memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); +} - /* - * No usable runs. Create a new chunk from which to allocate the run. - */ - chunk = arena_chunk_alloc(arena); - if (chunk != NULL) { - run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE)); - arena_run_split_small(arena, run, size, binind); - return (run); +static bool +arena_decay_init(arena_decay_t *decay, extents_t *extents, ssize_t decay_ms, + decay_stats_t *stats) { + if (config_debug) { + for (size_t i = 0; i < sizeof(arena_decay_t); i++) { + assert(((char *)decay)[i] == 0); + } + decay->ceil_npages = 0; + } + if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY, + malloc_mutex_rank_exclusive)) { + return true; + } + decay->purging = false; + arena_decay_reinit(decay, extents, decay_ms); + /* Memory is zeroed, so there is no need to clear stats. */ + if (config_stats) { + decay->stats = stats; } + return false; +} - /* - * arena_chunk_alloc() failed, but another thread may have made - * sufficient memory available while this one dropped arena->lock in - * arena_chunk_alloc(), so search one more time. - */ - return (arena_run_alloc_small_helper(arena, size, binind)); +static bool +arena_decay_ms_valid(ssize_t decay_ms) { + if (decay_ms < -1) { + return false; + } + if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX * + KQU(1000)) { + return true; + } + return false; } -static inline void -arena_maybe_purge(arena_t *arena) -{ - size_t npurgeable, threshold; +static bool +arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, bool is_background_thread) { + malloc_mutex_assert_owner(tsdn, &decay->mtx); + + /* Purge all or nothing if the option is disabled. */ + ssize_t decay_ms = arena_decay_ms_read(decay); + if (decay_ms <= 0) { + if (decay_ms == 0) { + arena_decay_to_limit(tsdn, arena, decay, extents, false, + 0, is_background_thread); + } + return false; + } + + nstime_t time; + nstime_init(&time, 0); + nstime_update(&time); + if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time) + > 0)) { + /* + * Time went backwards. Move the epoch back in time and + * generate a new deadline, with the expectation that time + * typically flows forward for long enough periods of time that + * epochs complete. Unfortunately, this strategy is susceptible + * to clock jitter triggering premature epoch advances, but + * clock jitter estimation and compensation isn't feasible here + * because calls into this code are event-driven. + */ + nstime_copy(&decay->epoch, &time); + arena_decay_deadline_init(decay); + } else { + /* Verify that time does not go backwards. */ + assert(nstime_compare(&decay->epoch, &time) <= 0); + } - /* Don't purge if the option is disabled. */ - if (opt_lg_dirty_mult < 0) - return; - /* Don't purge if all dirty pages are already being purged. */ - if (arena->ndirty <= arena->npurgatory) - return; - npurgeable = arena->ndirty - arena->npurgatory; - threshold = (arena->nactive >> opt_lg_dirty_mult); /* - * Don't purge unless the number of purgeable pages exceeds the - * threshold. + * If the deadline has been reached, advance to the current epoch and + * purge to the new limit if necessary. Note that dirty pages created + * during the current epoch are not subject to purge until a future + * epoch, so as a result purging only happens during epoch advances, or + * being triggered by background threads (scheduled event). */ - if (npurgeable <= threshold) - return; + bool advance_epoch = arena_decay_deadline_reached(decay, &time); + if (advance_epoch) { + arena_decay_epoch_advance(tsdn, arena, decay, extents, &time, + is_background_thread); + } else if (is_background_thread) { + arena_decay_try_purge(tsdn, arena, decay, extents, + extents_npages_get(extents), + arena_decay_backlog_npages_limit(decay), + is_background_thread); + } - arena_purge(arena, false); + return advance_epoch; } -static arena_chunk_t * -chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg) -{ - size_t *ndirty = (size_t *)arg; +static ssize_t +arena_decay_ms_get(arena_decay_t *decay) { + return arena_decay_ms_read(decay); +} - assert(chunk->ndirty != 0); - *ndirty += chunk->ndirty; - return (NULL); +ssize_t +arena_dirty_decay_ms_get(arena_t *arena) { + return arena_decay_ms_get(&arena->decay_dirty); } -static size_t -arena_compute_npurgatory(arena_t *arena, bool all) -{ - size_t npurgatory, npurgeable; +ssize_t +arena_muzzy_decay_ms_get(arena_t *arena) { + return arena_decay_ms_get(&arena->decay_muzzy); +} +static bool +arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, ssize_t decay_ms) { + if (!arena_decay_ms_valid(decay_ms)) { + return true; + } + + malloc_mutex_lock(tsdn, &decay->mtx); /* - * Compute the minimum number of pages that this thread should try to - * purge. + * Restart decay backlog from scratch, which may cause many dirty pages + * to be immediately purged. It would conceptually be possible to map + * the old backlog onto the new backlog, but there is no justification + * for such complexity since decay_ms changes are intended to be + * infrequent, either between the {-1, 0, >0} states, or a one-time + * arbitrary change during initial arena configuration. */ - npurgeable = arena->ndirty - arena->npurgatory; + arena_decay_reinit(decay, extents, decay_ms); + arena_maybe_decay(tsdn, arena, decay, extents, false); + malloc_mutex_unlock(tsdn, &decay->mtx); - if (all == false) { - size_t threshold = (arena->nactive >> opt_lg_dirty_mult); + return false; +} - npurgatory = npurgeable - threshold; - } else - npurgatory = npurgeable; +bool +arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, + ssize_t decay_ms) { + return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty, + &arena->extents_dirty, decay_ms); +} - return (npurgatory); +bool +arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, + ssize_t decay_ms) { + return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy, + &arena->extents_muzzy, decay_ms); } -static void -arena_chunk_stash_dirty(arena_t *arena, arena_chunk_t *chunk, bool all, - arena_chunk_mapelms_t *mapelms) -{ - size_t pageind, npages; +static size_t +arena_stash_decayed(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit, + extent_list_t *decay_extents) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); - /* - * Temporarily allocate free dirty runs within chunk. If all is false, - * only operate on dirty runs that are fragments; otherwise operate on - * all dirty runs. - */ - for (pageind = map_bias; pageind < chunk_npages; pageind += npages) { - arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); - if (arena_mapbits_allocated_get(chunk, pageind) == 0) { - size_t run_size = - arena_mapbits_unallocated_size_get(chunk, pageind); - - npages = run_size >> LG_PAGE; - assert(pageind + npages <= chunk_npages); - assert(arena_mapbits_dirty_get(chunk, pageind) == - arena_mapbits_dirty_get(chunk, pageind+npages-1)); - - if (arena_mapbits_dirty_get(chunk, pageind) != 0 && - (all || arena_avail_adjac(chunk, pageind, - npages))) { - arena_run_t *run = (arena_run_t *)((uintptr_t) - chunk + (uintptr_t)(pageind << LG_PAGE)); - - arena_run_split_large(arena, run, run_size, - false); - /* Append to list for later processing. */ - ql_elm_new(mapelm, u.ql_link); - ql_tail_insert(mapelms, mapelm, u.ql_link); - } - } else { - /* Skip run. */ - if (arena_mapbits_large_get(chunk, pageind) != 0) { - npages = arena_mapbits_large_size_get(chunk, - pageind) >> LG_PAGE; - } else { - size_t binind; - arena_bin_info_t *bin_info; - arena_run_t *run = (arena_run_t *)((uintptr_t) - chunk + (uintptr_t)(pageind << LG_PAGE)); - - assert(arena_mapbits_small_runind_get(chunk, - pageind) == 0); - binind = arena_bin_index(arena, run->bin); - bin_info = &arena_bin_info[binind]; - npages = bin_info->run_size >> LG_PAGE; - } - } + /* Stash extents according to npages_limit. */ + size_t nstashed = 0; + extent_t *extent; + while ((extent = extents_evict(tsdn, arena, r_extent_hooks, extents, + npages_limit)) != NULL) { + extent_list_append(decay_extents, extent); + nstashed += extent_size_get(extent) >> LG_PAGE; } - assert(pageind == chunk_npages); - assert(chunk->ndirty == 0 || all == false); - assert(chunk->nruns_adjac == 0); + return nstashed; } static size_t -arena_chunk_purge_stashed(arena_t *arena, arena_chunk_t *chunk, - arena_chunk_mapelms_t *mapelms) -{ - size_t npurged, pageind, npages, nmadvise; - arena_chunk_map_t *mapelm; - - malloc_mutex_unlock(&arena->lock); - if (config_stats) +arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents, + bool all, extent_list_t *decay_extents, bool is_background_thread) { + UNUSED size_t nmadvise, nunmapped; + size_t npurged; + + if (config_stats) { nmadvise = 0; + nunmapped = 0; + } npurged = 0; - ql_foreach(mapelm, mapelms, u.ql_link) { - bool unzeroed; - size_t flag_unzeroed, i; - - pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / - sizeof(arena_chunk_map_t)) + map_bias; - npages = arena_mapbits_large_size_get(chunk, pageind) >> - LG_PAGE; - assert(pageind + npages <= chunk_npages); - unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind << - LG_PAGE)), (npages << LG_PAGE)); - flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0; - /* - * Set the unzeroed flag for all pages, now that pages_purge() - * has returned whether the pages were zeroed as a side effect - * of purging. This chunk map modification is safe even though - * the arena mutex isn't currently owned by this thread, - * because the run is marked as allocated, thus protecting it - * from being modified by any other thread. As long as these - * writes don't perturb the first and last elements' - * CHUNK_MAP_ALLOCATED bits, behavior is well defined. - */ - for (i = 0; i < npages; i++) { - arena_mapbits_unzeroed_set(chunk, pageind+i, - flag_unzeroed); + + ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); + for (extent_t *extent = extent_list_first(decay_extents); extent != + NULL; extent = extent_list_first(decay_extents)) { + if (config_stats) { + nmadvise++; } + size_t npages = extent_size_get(extent) >> LG_PAGE; npurged += npages; - if (config_stats) - nmadvise++; + extent_list_remove(decay_extents, extent); + switch (extents_state_get(extents)) { + case extent_state_active: + not_reached(); + case extent_state_dirty: + if (!all && muzzy_decay_ms != 0 && + !extent_purge_lazy_wrapper(tsdn, arena, + r_extent_hooks, extent, 0, + extent_size_get(extent))) { + extents_dalloc(tsdn, arena, r_extent_hooks, + &arena->extents_muzzy, extent); + arena_background_thread_inactivity_check(tsdn, + arena, is_background_thread); + break; + } + /* Fall through. */ + case extent_state_muzzy: + extent_dalloc_wrapper(tsdn, arena, r_extent_hooks, + extent); + if (config_stats) { + nunmapped += npages; + } + break; + case extent_state_retained: + default: + not_reached(); + } } - malloc_mutex_lock(&arena->lock); - if (config_stats) - arena->stats.nmadvise += nmadvise; - return (npurged); + if (config_stats) { + arena_stats_lock(tsdn, &arena->stats); + arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge, + 1); + arena_stats_add_u64(tsdn, &arena->stats, + &decay->stats->nmadvise, nmadvise); + arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged, + npurged); + arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped, + nunmapped << LG_PAGE); + arena_stats_unlock(tsdn, &arena->stats); + } + + return npurged; } +/* + * npages_limit: Decay as many dirty extents as possible without violating the + * invariant: (extents_npages_get(extents) >= npages_limit) + */ static void -arena_chunk_unstash_purged(arena_t *arena, arena_chunk_t *chunk, - arena_chunk_mapelms_t *mapelms) -{ - arena_chunk_map_t *mapelm; - size_t pageind; - - /* Deallocate runs. */ - for (mapelm = ql_first(mapelms); mapelm != NULL; - mapelm = ql_first(mapelms)) { - arena_run_t *run; - - pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / - sizeof(arena_chunk_map_t)) + map_bias; - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind << - LG_PAGE)); - ql_remove(mapelms, mapelm, u.ql_link); - arena_run_dalloc(arena, run, false, true); +arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, bool all, size_t npages_limit, + bool is_background_thread) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 1); + malloc_mutex_assert_owner(tsdn, &decay->mtx); + + if (decay->purging) { + return; } -} + decay->purging = true; + malloc_mutex_unlock(tsdn, &decay->mtx); -static inline size_t -arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all) -{ - size_t npurged; - arena_chunk_mapelms_t mapelms; + extent_hooks_t *extent_hooks = extent_hooks_get(arena); - ql_new(&mapelms); + extent_list_t decay_extents; + extent_list_init(&decay_extents); - /* - * If chunk is the spare, temporarily re-allocate it, 1) so that its - * run is reinserted into runs_avail, and 2) so that it cannot be - * completely discarded by another thread while arena->lock is dropped - * by this thread. Note that the arena_run_dalloc() call will - * implicitly deallocate the chunk, so no explicit action is required - * in this function to deallocate the chunk. - * - * Note that once a chunk contains dirty pages, it cannot again contain - * a single run unless 1) it is a dirty run, or 2) this function purges - * dirty pages and causes the transition to a single clean run. Thus - * (chunk == arena->spare) is possible, but it is not possible for - * this function to be called on the spare unless it contains a dirty - * run. - */ - if (chunk == arena->spare) { - assert(arena_mapbits_dirty_get(chunk, map_bias) != 0); - assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0); - - arena_chunk_alloc(arena); + size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents, + npages_limit, &decay_extents); + if (npurge != 0) { + UNUSED size_t npurged = arena_decay_stashed(tsdn, arena, + &extent_hooks, decay, extents, all, &decay_extents, + is_background_thread); + assert(npurged == npurge); } - if (config_stats) - arena->stats.purged += chunk->ndirty; - - /* - * Operate on all dirty runs if there is no clean/dirty run - * fragmentation. - */ - if (chunk->nruns_adjac == 0) - all = true; - - arena_chunk_stash_dirty(arena, chunk, all, &mapelms); - npurged = arena_chunk_purge_stashed(arena, chunk, &mapelms); - arena_chunk_unstash_purged(arena, chunk, &mapelms); - - return (npurged); + malloc_mutex_lock(tsdn, &decay->mtx); + decay->purging = false; } -static void -arena_purge(arena_t *arena, bool all) -{ - arena_chunk_t *chunk; - size_t npurgatory; - if (config_debug) { - size_t ndirty = 0; +static bool +arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, + extents_t *extents, bool is_background_thread, bool all) { + if (all) { + malloc_mutex_lock(tsdn, &decay->mtx); + arena_decay_to_limit(tsdn, arena, decay, extents, all, 0, + is_background_thread); + malloc_mutex_unlock(tsdn, &decay->mtx); - arena_chunk_dirty_iter(&arena->chunks_dirty, NULL, - chunks_dirty_iter_cb, (void *)&ndirty); - assert(ndirty == arena->ndirty); + return false; } - assert(arena->ndirty > arena->npurgatory || all); - assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty - - arena->npurgatory) || all); - if (config_stats) - arena->stats.npurge++; + if (malloc_mutex_trylock(tsdn, &decay->mtx)) { + /* No need to wait if another thread is in progress. */ + return true; + } - /* - * Add the minimum number of pages this thread should try to purge to - * arena->npurgatory. This will keep multiple threads from racing to - * reduce ndirty below the threshold. - */ - npurgatory = arena_compute_npurgatory(arena, all); - arena->npurgatory += npurgatory; + bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents, + is_background_thread); + size_t npages_new; + if (epoch_advanced) { + /* Backlog is updated on epoch advance. */ + npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1]; + } + malloc_mutex_unlock(tsdn, &decay->mtx); - while (npurgatory > 0) { - size_t npurgeable, npurged, nunpurged; + if (have_background_thread && background_thread_enabled() && + epoch_advanced && !is_background_thread) { + background_thread_interval_check(tsdn, arena, decay, npages_new); + } - /* Get next chunk with dirty pages. */ - chunk = arena_chunk_dirty_first(&arena->chunks_dirty); - if (chunk == NULL) { - /* - * This thread was unable to purge as many pages as - * originally intended, due to races with other threads - * that either did some of the purging work, or re-used - * dirty pages. - */ - arena->npurgatory -= npurgatory; - return; - } - npurgeable = chunk->ndirty; - assert(npurgeable != 0); + return false; +} - if (npurgeable > npurgatory && chunk->nruns_adjac == 0) { - /* - * This thread will purge all the dirty pages in chunk, - * so set npurgatory to reflect this thread's intent to - * purge the pages. This tends to reduce the chances - * of the following scenario: - * - * 1) This thread sets arena->npurgatory such that - * (arena->ndirty - arena->npurgatory) is at the - * threshold. - * 2) This thread drops arena->lock. - * 3) Another thread causes one or more pages to be - * dirtied, and immediately determines that it must - * purge dirty pages. - * - * If this scenario *does* play out, that's okay, - * because all of the purging work being done really - * needs to happen. - */ - arena->npurgatory += npurgeable - npurgatory; - npurgatory = npurgeable; - } +static bool +arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, + bool all) { + return arena_decay_impl(tsdn, arena, &arena->decay_dirty, + &arena->extents_dirty, is_background_thread, all); +} - /* - * Keep track of how many pages are purgeable, versus how many - * actually get purged, and adjust counters accordingly. - */ - arena->npurgatory -= npurgeable; - npurgatory -= npurgeable; - npurged = arena_chunk_purge(arena, chunk, all); - nunpurged = npurgeable - npurged; - arena->npurgatory += nunpurged; - npurgatory += nunpurged; - } +static bool +arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, + bool all) { + return arena_decay_impl(tsdn, arena, &arena->decay_muzzy, + &arena->extents_muzzy, is_background_thread, all); } void -arena_purge_all(arena_t *arena) -{ - - malloc_mutex_lock(&arena->lock); - arena_purge(arena, true); - malloc_mutex_unlock(&arena->lock); +arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { + if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) { + return; + } + arena_decay_muzzy(tsdn, arena, is_background_thread, all); } static void -arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, - size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty) -{ - size_t size = *p_size; - size_t run_ind = *p_run_ind; - size_t run_pages = *p_run_pages; - - /* Try to coalesce forward. */ - if (run_ind + run_pages < chunk_npages && - arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 && - arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) { - size_t nrun_size = arena_mapbits_unallocated_size_get(chunk, - run_ind+run_pages); - size_t nrun_pages = nrun_size >> LG_PAGE; - - /* - * Remove successor from runs_avail; the coalesced run is - * inserted later. - */ - assert(arena_mapbits_unallocated_size_get(chunk, - run_ind+run_pages+nrun_pages-1) == nrun_size); - assert(arena_mapbits_dirty_get(chunk, - run_ind+run_pages+nrun_pages-1) == flag_dirty); - arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages, - false, true); +arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) { + arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE); - size += nrun_size; - run_pages += nrun_pages; - - arena_mapbits_unallocated_size_set(chunk, run_ind, size); - arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, - size); - } - - /* Try to coalesce backward. */ - if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, - run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == - flag_dirty) { - size_t prun_size = arena_mapbits_unallocated_size_get(chunk, - run_ind-1); - size_t prun_pages = prun_size >> LG_PAGE; - - run_ind -= prun_pages; + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; + arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab); +} - /* - * Remove predecessor from runs_avail; the coalesced run is - * inserted later. - */ - assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == - prun_size); - assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty); - arena_avail_remove(arena, chunk, run_ind, prun_pages, true, - false); +static void +arena_bin_slabs_nonfull_insert(arena_bin_t *bin, extent_t *slab) { + assert(extent_nfree_get(slab) > 0); + extent_heap_insert(&bin->slabs_nonfull, slab); +} - size += prun_size; - run_pages += prun_pages; +static void +arena_bin_slabs_nonfull_remove(arena_bin_t *bin, extent_t *slab) { + extent_heap_remove(&bin->slabs_nonfull, slab); +} - arena_mapbits_unallocated_size_set(chunk, run_ind, size); - arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, - size); +static extent_t * +arena_bin_slabs_nonfull_tryget(arena_bin_t *bin) { + extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull); + if (slab == NULL) { + return NULL; } - - *p_size = size; - *p_run_ind = run_ind; - *p_run_pages = run_pages; + if (config_stats) { + bin->stats.reslabs++; + } + return slab; } static void -arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned) -{ - arena_chunk_t *chunk; - size_t size, run_ind, run_pages, flag_dirty; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); - assert(run_ind >= map_bias); - assert(run_ind < chunk_npages); - if (arena_mapbits_large_get(chunk, run_ind) != 0) { - size = arena_mapbits_large_size_get(chunk, run_ind); - assert(size == PAGE || - arena_mapbits_large_size_get(chunk, - run_ind+(size>>LG_PAGE)-1) == 0); - } else { - size_t binind = arena_bin_index(arena, run->bin); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; - size = bin_info->run_size; - } - run_pages = (size >> LG_PAGE); - arena_cactive_update(arena, 0, run_pages); - arena->nactive -= run_pages; - +arena_bin_slabs_full_insert(arena_t *arena, arena_bin_t *bin, extent_t *slab) { + assert(extent_nfree_get(slab) == 0); /* - * The run is dirty if the caller claims to have dirtied it, as well as - * if it was already dirty before being allocated and the caller - * doesn't claim to have cleaned it. + * Tracking extents is required by arena_reset, which is not allowed + * for auto arenas. Bypass this step to avoid touching the extent + * linkage (often results in cache misses) for auto arenas. */ - assert(arena_mapbits_dirty_get(chunk, run_ind) == - arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); - if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0) - dirty = true; - flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; - - /* Mark pages as unallocated in the chunk map. */ - if (dirty) { - arena_mapbits_unallocated_set(chunk, run_ind, size, - CHUNK_MAP_DIRTY); - arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, - CHUNK_MAP_DIRTY); - } else { - arena_mapbits_unallocated_set(chunk, run_ind, size, - arena_mapbits_unzeroed_get(chunk, run_ind)); - arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, - arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); + if (arena_is_auto(arena)) { + return; } + extent_list_append(&bin->slabs_full, slab); +} - arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, - flag_dirty); - - /* Insert into runs_avail, now that coalescing is complete. */ - assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == - arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); - assert(arena_mapbits_dirty_get(chunk, run_ind) == - arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); - arena_avail_insert(arena, chunk, run_ind, run_pages, true, true); - - /* Deallocate chunk if it is now completely unused. */ - if (size == arena_maxclass) { - assert(run_ind == map_bias); - assert(run_pages == (arena_maxclass >> LG_PAGE)); - arena_chunk_dealloc(arena, chunk); +static void +arena_bin_slabs_full_remove(arena_t *arena, arena_bin_t *bin, extent_t *slab) { + if (arena_is_auto(arena)) { + return; } + extent_list_remove(&bin->slabs_full, slab); +} +void +arena_reset(tsd_t *tsd, arena_t *arena) { /* - * It is okay to do dirty page processing here even if the chunk was - * deallocated above, since in that case it is the spare. Waiting - * until after possible chunk deallocation to do dirty processing - * allows for an old spare to be fully deallocated, thus decreasing the - * chances of spuriously crossing the dirty page purging threshold. + * Locking in this function is unintuitive. The caller guarantees that + * no concurrent operations are happening in this arena, but there are + * still reasons that some locking is necessary: + * + * - Some of the functions in the transitive closure of calls assume + * appropriate locks are held, and in some cases these locks are + * temporarily dropped to avoid lock order reversal or deadlock due to + * reentry. + * - mallctl("epoch", ...) may concurrently refresh stats. While + * strictly speaking this is a "concurrent operation", disallowing + * stats refreshes would impose an inconvenient burden. */ - if (dirty) - arena_maybe_purge(arena); -} -static void -arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - size_t oldsize, size_t newsize) -{ - size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; - size_t head_npages = (oldsize - newsize) >> LG_PAGE; - size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); + /* Large allocations. */ + malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); - assert(oldsize > newsize); + for (extent_t *extent = extent_list_first(&arena->large); extent != + NULL; extent = extent_list_first(&arena->large)) { + void *ptr = extent_base_get(extent); + size_t usize; - /* - * Update the chunk map so that arena_run_dalloc() can treat the - * leading run as separately allocated. Set the last element of each - * run first, in case of single-page runs. - */ - assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); - arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty); - arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty); + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != NSIZES); - if (config_debug) { - UNUSED size_t tail_npages = newsize >> LG_PAGE; - assert(arena_mapbits_large_size_get(chunk, - pageind+head_npages+tail_npages-1) == 0); - assert(arena_mapbits_dirty_get(chunk, - pageind+head_npages+tail_npages-1) == flag_dirty); + if (config_stats || (config_prof && opt_prof)) { + usize = sz_index2size(alloc_ctx.szind); + assert(usize == isalloc(tsd_tsdn(tsd), ptr)); + } + /* Remove large allocation from prof sample set. */ + if (config_prof && opt_prof) { + prof_free(tsd, ptr, usize, &alloc_ctx); + } + large_dalloc(tsd_tsdn(tsd), extent); + malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); + } + malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); + + /* Bins. */ + for (unsigned i = 0; i < NBINS; i++) { + extent_t *slab; + arena_bin_t *bin = &arena->bins[i]; + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + if (bin->slabcur != NULL) { + slab = bin->slabcur; + bin->slabcur = NULL; + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + } + while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != + NULL) { + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + } + for (slab = extent_list_first(&bin->slabs_full); slab != NULL; + slab = extent_list_first(&bin->slabs_full)) { + arena_bin_slabs_full_remove(arena, bin, slab); + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + } + if (config_stats) { + bin->stats.curregs = 0; + bin->stats.curslabs = 0; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); } - arena_mapbits_large_set(chunk, pageind+head_npages, newsize, - flag_dirty); - arena_run_dalloc(arena, run, false, false); + atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); } static void -arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - size_t oldsize, size_t newsize, bool dirty) -{ - size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; - size_t head_npages = newsize >> LG_PAGE; - size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); - - assert(oldsize > newsize); - +arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) { /* - * Update the chunk map so that arena_run_dalloc() can treat the - * trailing run as separately allocated. Set the last element of each - * run first, in case of single-page runs. + * Iterate over the retained extents and destroy them. This gives the + * extent allocator underlying the extent hooks an opportunity to unmap + * all retained memory without having to keep its own metadata + * structures. In practice, virtual memory for dss-allocated extents is + * leaked here, so best practice is to avoid dss for arenas to be + * destroyed, or provide custom extent hooks that track retained + * dss-based extents for later reuse. */ - assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); - arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty); - arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty); - - if (config_debug) { - UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; - assert(arena_mapbits_large_size_get(chunk, - pageind+head_npages+tail_npages-1) == 0); - assert(arena_mapbits_dirty_get(chunk, - pageind+head_npages+tail_npages-1) == flag_dirty); + extent_hooks_t *extent_hooks = extent_hooks_get(arena); + extent_t *extent; + while ((extent = extents_evict(tsdn, arena, &extent_hooks, + &arena->extents_retained, 0)) != NULL) { + extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent); } - arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, - flag_dirty); - - arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize), - dirty, false); } -static arena_run_t * -arena_bin_runs_first(arena_bin_t *bin) -{ - arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs); - if (mapelm != NULL) { - arena_chunk_t *chunk; - size_t pageind; - arena_run_t *run; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm); - pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) / - sizeof(arena_chunk_map_t))) + map_bias; - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - - arena_mapbits_small_runind_get(chunk, pageind)) << - LG_PAGE)); - return (run); - } +void +arena_destroy(tsd_t *tsd, arena_t *arena) { + assert(base_ind_get(arena->base) >= narenas_auto); + assert(arena_nthreads_get(arena, false) == 0); + assert(arena_nthreads_get(arena, true) == 0); - return (NULL); -} + /* + * No allocations have occurred since arena_reset() was called. + * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached + * extents, so only retained extents may remain. + */ + assert(extents_npages_get(&arena->extents_dirty) == 0); + assert(extents_npages_get(&arena->extents_muzzy) == 0); -static void -arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) -{ - arena_chunk_t *chunk = CHUNK_ADDR2BASE(run); - size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); + /* Deallocate retained memory. */ + arena_destroy_retained(tsd_tsdn(tsd), arena); - assert(arena_run_tree_search(&bin->runs, mapelm) == NULL); + /* + * Remove the arena pointer from the arenas array. We rely on the fact + * that there is no way for the application to get a dirty read from the + * arenas array unless there is an inherent race in the application + * involving access of an arena being concurrently destroyed. The + * application must synchronize knowledge of the arena's validity, so as + * long as we use an atomic write to update the arenas array, the + * application will get a clean read any time after it synchronizes + * knowledge that the arena is no longer valid. + */ + arena_set(base_ind_get(arena->base), NULL); - arena_run_tree_insert(&bin->runs, mapelm); + /* + * Destroy the base allocator, which manages all metadata ever mapped by + * this arena. + */ + base_delete(tsd_tsdn(tsd), arena->base); } -static void -arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run) -{ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); +static extent_t * +arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, const arena_bin_info_t *bin_info, + szind_t szind) { + extent_t *slab; + bool zero, commit; - assert(arena_run_tree_search(&bin->runs, mapelm) != NULL); - - arena_run_tree_remove(&bin->runs, mapelm); -} + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); -static arena_run_t * -arena_bin_nonfull_run_tryget(arena_bin_t *bin) -{ - arena_run_t *run = arena_bin_runs_first(bin); - if (run != NULL) { - arena_bin_runs_remove(bin, run); - if (config_stats) - bin->stats.reruns++; + zero = false; + commit = true; + slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL, + bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit); + + if (config_stats && slab != NULL) { + arena_stats_mapped_add(tsdn, &arena->stats, + bin_info->slab_size); + } + + return slab; +} + +static extent_t * +arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, + const arena_bin_info_t *bin_info) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; + szind_t szind = sz_size2index(bin_info->reg_size); + bool zero = false; + bool commit = true; + extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true, + binind, &zero, &commit); + if (slab == NULL) { + slab = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE, + true, binind, &zero, &commit); + } + if (slab == NULL) { + slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks, + bin_info, szind); + if (slab == NULL) { + return NULL; + } } - return (run); + assert(extent_slab_get(slab)); + + /* Initialize slab internals. */ + arena_slab_data_t *slab_data = extent_slab_data_get(slab); + extent_nfree_set(slab, bin_info->nregs); + bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false); + + arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE); + + return slab; } -static arena_run_t * -arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) -{ - arena_run_t *run; - size_t binind; - arena_bin_info_t *bin_info; +static extent_t * +arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, + szind_t binind) { + extent_t *slab; + const arena_bin_info_t *bin_info; - /* Look for a usable run. */ - run = arena_bin_nonfull_run_tryget(bin); - if (run != NULL) - return (run); - /* No existing runs have any space available. */ + /* Look for a usable slab. */ + slab = arena_bin_slabs_nonfull_tryget(bin); + if (slab != NULL) { + return slab; + } + /* No existing slabs have any space available. */ - binind = arena_bin_index(arena, bin); bin_info = &arena_bin_info[binind]; - /* Allocate a new run. */ - malloc_mutex_unlock(&bin->lock); + /* Allocate a new slab. */ + malloc_mutex_unlock(tsdn, &bin->lock); /******************************/ - malloc_mutex_lock(&arena->lock); - run = arena_run_alloc_small(arena, bin_info->run_size, binind); - if (run != NULL) { - bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + - (uintptr_t)bin_info->bitmap_offset); - - /* Initialize run internals. */ - run->bin = bin; - run->nextind = 0; - run->nfree = bin_info->nregs; - bitmap_init(bitmap, &bin_info->bitmap_info); - } - malloc_mutex_unlock(&arena->lock); + slab = arena_slab_alloc(tsdn, arena, binind, bin_info); /********************************/ - malloc_mutex_lock(&bin->lock); - if (run != NULL) { + malloc_mutex_lock(tsdn, &bin->lock); + if (slab != NULL) { if (config_stats) { - bin->stats.nruns++; - bin->stats.curruns++; + bin->stats.nslabs++; + bin->stats.curslabs++; } - return (run); + return slab; } /* - * arena_run_alloc_small() failed, but another thread may have made + * arena_slab_alloc() failed, but another thread may have made * sufficient memory available while this one dropped bin->lock above, * so search one more time. */ - run = arena_bin_nonfull_run_tryget(bin); - if (run != NULL) - return (run); + slab = arena_bin_slabs_nonfull_tryget(bin); + if (slab != NULL) { + return slab; + } - return (NULL); + return NULL; } -/* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ +/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */ static void * -arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) -{ - void *ret; - size_t binind; - arena_bin_info_t *bin_info; - arena_run_t *run; +arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, + szind_t binind) { + const arena_bin_info_t *bin_info; + extent_t *slab; - binind = arena_bin_index(arena, bin); bin_info = &arena_bin_info[binind]; - bin->runcur = NULL; - run = arena_bin_nonfull_run_get(arena, bin); - if (bin->runcur != NULL && bin->runcur->nfree > 0) { + if (!arena_is_auto(arena) && bin->slabcur != NULL) { + arena_bin_slabs_full_insert(arena, bin, bin->slabcur); + bin->slabcur = NULL; + } + slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind); + if (bin->slabcur != NULL) { /* - * Another thread updated runcur while this one ran without the - * bin lock in arena_bin_nonfull_run_get(). + * Another thread updated slabcur while this one ran without the + * bin lock in arena_bin_nonfull_slab_get(). */ - assert(bin->runcur->nfree > 0); - ret = arena_run_reg_alloc(bin->runcur, bin_info); - if (run != NULL) { - arena_chunk_t *chunk; - - /* - * arena_run_alloc_small() may have allocated run, or - * it may have pulled run from the bin's run tree. - * Therefore it is unsafe to make any assumptions about - * how run has previously been used, and - * arena_bin_lower_run() must be called, as if a region - * were just deallocated from the run. - */ - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); - if (run->nfree == bin_info->nregs) - arena_dalloc_bin_run(arena, chunk, run, bin); - else - arena_bin_lower_run(arena, chunk, run, bin); + if (extent_nfree_get(bin->slabcur) > 0) { + void *ret = arena_slab_reg_alloc(tsdn, bin->slabcur, + bin_info); + if (slab != NULL) { + /* + * arena_slab_alloc() may have allocated slab, + * or it may have been pulled from + * slabs_nonfull. Therefore it is unsafe to + * make any assumptions about how slab has + * previously been used, and + * arena_bin_lower_slab() must be called, as if + * a region were just deallocated from the slab. + */ + if (extent_nfree_get(slab) == bin_info->nregs) { + arena_dalloc_bin_slab(tsdn, arena, slab, + bin); + } else { + arena_bin_lower_slab(tsdn, arena, slab, + bin); + } + } + return ret; } - return (ret); - } - if (run == NULL) - return (NULL); + arena_bin_slabs_full_insert(arena, bin, bin->slabcur); + bin->slabcur = NULL; + } - bin->runcur = run; + if (slab == NULL) { + return NULL; + } + bin->slabcur = slab; - assert(bin->runcur->nfree > 0); + assert(extent_nfree_get(bin->slabcur) > 0); - return (arena_run_reg_alloc(bin->runcur, bin_info)); + return arena_slab_reg_alloc(tsdn, slab, bin_info); } void -arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind, - uint64_t prof_accumbytes) -{ +arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, + tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) { unsigned i, nfill; arena_bin_t *bin; - arena_run_t *run; - void *ptr; assert(tbin->ncached == 0); - if (config_prof && arena_prof_accum(arena, prof_accumbytes)) - prof_idump(); + if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) { + prof_idump(tsdn); + } bin = &arena->bins[binind]; - malloc_mutex_lock(&bin->lock); + malloc_mutex_lock(tsdn, &bin->lock); for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> - tbin->lg_fill_div); i < nfill; i++) { - if ((run = bin->runcur) != NULL && run->nfree > 0) - ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); - else - ptr = arena_bin_malloc_hard(arena, bin); - if (ptr == NULL) + tcache->lg_fill_div[binind]); i < nfill; i++) { + extent_t *slab; + void *ptr; + if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > + 0) { + ptr = arena_slab_reg_alloc(tsdn, slab, + &arena_bin_info[binind]); + } else { + ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind); + } + if (ptr == NULL) { + /* + * OOM. tbin->avail isn't yet filled down to its first + * element, so the successful allocations (if any) must + * be moved just before tbin->avail before bailing out. + */ + if (i > 0) { + memmove(tbin->avail - i, tbin->avail - nfill, + i * sizeof(void *)); + } break; - if (config_fill && opt_junk) { + } + if (config_fill && unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ptr, &arena_bin_info[binind], true); } /* Insert such that low regions get used first. */ - tbin->avail[nfill - 1 - i] = ptr; + *(tbin->avail - nfill + i) = ptr; } if (config_stats) { - bin->stats.allocated += i * arena_bin_info[binind].reg_size; bin->stats.nmalloc += i; bin->stats.nrequests += tbin->tstats.nrequests; + bin->stats.curregs += i; bin->stats.nfills++; tbin->tstats.nrequests = 0; } - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); tbin->ncached = i; + arena_decay_tick(tsdn, arena); } void -arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) -{ - - if (zero) { - size_t redzone_size = bin_info->redzone_size; - memset((void *)((uintptr_t)ptr - redzone_size), 0xa5, - redzone_size); - memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5, - redzone_size); - } else { - memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5, - bin_info->reg_interval); +arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info, bool zero) { + if (!zero) { + memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size); } } -#ifdef JEMALLOC_JET -#undef arena_redzone_corruption -#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl) -#endif -static void -arena_redzone_corruption(void *ptr, size_t usize, bool after, - size_t offset, uint8_t byte) -{ - - malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p " - "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s", - after ? "after" : "before", ptr, usize, byte); -} -#ifdef JEMALLOC_JET -#undef arena_redzone_corruption -#define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) -arena_redzone_corruption_t *arena_redzone_corruption = - JEMALLOC_N(arena_redzone_corruption_impl); -#endif - static void -arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) -{ - size_t size = bin_info->reg_size; - size_t redzone_size = bin_info->redzone_size; - size_t i; - bool error = false; - - for (i = 1; i <= redzone_size; i++) { - uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); - if (*byte != 0xa5) { - error = true; - arena_redzone_corruption(ptr, size, false, i, *byte); - if (reset) - *byte = 0xa5; - } - } - for (i = 0; i < redzone_size; i++) { - uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); - if (*byte != 0xa5) { - error = true; - arena_redzone_corruption(ptr, size, true, i, *byte); - if (reset) - *byte = 0xa5; - } - } - if (opt_abort && error) - abort(); +arena_dalloc_junk_small_impl(void *ptr, const arena_bin_info_t *bin_info) { + memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size); } +arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small = + arena_dalloc_junk_small_impl; -#ifdef JEMALLOC_JET -#undef arena_dalloc_junk_small -#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl) -#endif -void -arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) -{ - size_t redzone_size = bin_info->redzone_size; - - arena_redzones_validate(ptr, bin_info, false); - memset((void *)((uintptr_t)ptr - redzone_size), 0x5a, - bin_info->reg_interval); -} -#ifdef JEMALLOC_JET -#undef arena_dalloc_junk_small -#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) -arena_dalloc_junk_small_t *arena_dalloc_junk_small = - JEMALLOC_N(arena_dalloc_junk_small_impl); -#endif - -void -arena_quarantine_junk_small(void *ptr, size_t usize) -{ - size_t binind; - arena_bin_info_t *bin_info; - cassert(config_fill); - assert(opt_junk); - assert(opt_quarantine); - assert(usize <= SMALL_MAXCLASS); - - binind = SMALL_SIZE2BIN(usize); - bin_info = &arena_bin_info[binind]; - arena_redzones_validate(ptr, bin_info, true); -} - -void * -arena_malloc_small(arena_t *arena, size_t size, bool zero) -{ +static void * +arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { void *ret; arena_bin_t *bin; - arena_run_t *run; - size_t binind; + size_t usize; + extent_t *slab; - binind = SMALL_SIZE2BIN(size); assert(binind < NBINS); bin = &arena->bins[binind]; - size = arena_bin_info[binind].reg_size; + usize = sz_index2size(binind); - malloc_mutex_lock(&bin->lock); - if ((run = bin->runcur) != NULL && run->nfree > 0) - ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); - else - ret = arena_bin_malloc_hard(arena, bin); + malloc_mutex_lock(tsdn, &bin->lock); + if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) { + ret = arena_slab_reg_alloc(tsdn, slab, &arena_bin_info[binind]); + } else { + ret = arena_bin_malloc_hard(tsdn, arena, bin, binind); + } if (ret == NULL) { - malloc_mutex_unlock(&bin->lock); - return (NULL); + malloc_mutex_unlock(tsdn, &bin->lock); + return NULL; } if (config_stats) { - bin->stats.allocated += size; bin->stats.nmalloc++; bin->stats.nrequests++; + bin->stats.curregs++; + } + malloc_mutex_unlock(tsdn, &bin->lock); + if (config_prof && arena_prof_accum(tsdn, arena, usize)) { + prof_idump(tsdn); } - malloc_mutex_unlock(&bin->lock); - if (config_prof && isthreaded == false && arena_prof_accum(arena, size)) - prof_idump(); - if (zero == false) { + if (!zero) { if (config_fill) { - if (opt_junk) { + if (unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &arena_bin_info[binind], false); - } else if (opt_zero) - memset(ret, 0, size); + } else if (unlikely(opt_zero)) { + memset(ret, 0, usize); + } } - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); } else { - if (config_fill && opt_junk) { + if (config_fill && unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &arena_bin_info[binind], true); } - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - memset(ret, 0, size); + memset(ret, 0, usize); } - return (ret); + arena_decay_tick(tsdn, arena); + return ret; } void * -arena_malloc_large(arena_t *arena, size_t size, bool zero) -{ - void *ret; - UNUSED bool idump; +arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, + bool zero) { + assert(!tsdn_null(tsdn) || arena != NULL); - /* Large allocation. */ - size = PAGE_CEILING(size); - malloc_mutex_lock(&arena->lock); - ret = (void *)arena_run_alloc_large(arena, size, zero); - if (ret == NULL) { - malloc_mutex_unlock(&arena->lock); - return (NULL); + if (likely(!tsdn_null(tsdn))) { + arena = arena_choose(tsdn_tsd(tsdn), arena); } - if (config_stats) { - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; - arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; - arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; - } - if (config_prof) - idump = arena_prof_accum_locked(arena, size); - malloc_mutex_unlock(&arena->lock); - if (config_prof && idump) - prof_idump(); - - if (zero == false) { - if (config_fill) { - if (opt_junk) - memset(ret, 0xa5, size); - else if (opt_zero) - memset(ret, 0, size); - } + if (unlikely(arena == NULL)) { + return NULL; } - return (ret); + if (likely(size <= SMALL_MAXCLASS)) { + return arena_malloc_small(tsdn, arena, ind, zero); + } + return large_malloc(tsdn, arena, sz_index2size(ind), zero); } -/* Only handles large allocations that require more than page alignment. */ void * -arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero) -{ +arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, + bool zero, tcache_t *tcache) { void *ret; - size_t alloc_size, leadsize, trailsize; - arena_run_t *run; - arena_chunk_t *chunk; - assert((size & PAGE_MASK) == 0); + if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE + && (usize & PAGE_MASK) == 0))) { + /* Small; alignment doesn't require special slab placement. */ + ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize), + zero, tcache, true); + } else { + if (likely(alignment <= CACHELINE)) { + ret = large_malloc(tsdn, arena, usize, zero); + } else { + ret = large_palloc(tsdn, arena, usize, alignment, zero); + } + } + return ret; +} - alignment = PAGE_CEILING(alignment); - alloc_size = size + alignment - PAGE; +void +arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) { + cassert(config_prof); + assert(ptr != NULL); + assert(isalloc(tsdn, ptr) == LARGE_MINCLASS); + assert(usize <= SMALL_MAXCLASS); - malloc_mutex_lock(&arena->lock); - run = arena_run_alloc_large(arena, alloc_size, false); - if (run == NULL) { - malloc_mutex_unlock(&arena->lock); - return (NULL); - } - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); - leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) - - (uintptr_t)run; - assert(alloc_size >= leadsize + size); - trailsize = alloc_size - leadsize - size; - ret = (void *)((uintptr_t)run + leadsize); - if (leadsize != 0) { - arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size - - leadsize); - } - if (trailsize != 0) { - arena_run_trim_tail(arena, chunk, ret, size + trailsize, size, - false); - } - arena_run_init_large(arena, (arena_run_t *)ret, size, zero); + extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true); + arena_t *arena = extent_arena_get(extent); - if (config_stats) { - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; - arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; - arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; - } - malloc_mutex_unlock(&arena->lock); + szind_t szind = sz_size2index(usize); + extent_szind_set(extent, szind); + rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, + szind, false); - if (config_fill && zero == false) { - if (opt_junk) - memset(ret, 0xa5, size); - else if (opt_zero) - memset(ret, 0, size); - } - return (ret); -} + prof_accum_cancel(tsdn, &arena->prof_accum, usize); -void -arena_prof_promoted(const void *ptr, size_t size) -{ - arena_chunk_t *chunk; - size_t pageind, binind; + assert(isalloc(tsdn, ptr) == usize); +} +static size_t +arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) { cassert(config_prof); assert(ptr != NULL); - assert(CHUNK_ADDR2BASE(ptr) != ptr); - assert(isalloc(ptr, false) == PAGE); - assert(isalloc(ptr, true) == PAGE); - assert(size <= SMALL_MAXCLASS); - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - binind = SMALL_SIZE2BIN(size); - assert(binind < NBINS); - arena_mapbits_large_binind_set(chunk, pageind, binind); - assert(isalloc(ptr, false) == PAGE); - assert(isalloc(ptr, true) == size); + extent_szind_set(extent, NBINS); + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, + NBINS, false); + + assert(isalloc(tsdn, ptr) == LARGE_MINCLASS); + + return LARGE_MINCLASS; +} + +void +arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, + bool slow_path) { + cassert(config_prof); + assert(opt_prof); + + extent_t *extent = iealloc(tsdn, ptr); + size_t usize = arena_prof_demote(tsdn, extent, ptr); + if (usize <= tcache_maxclass) { + tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, + sz_size2index(usize), slow_path); + } else { + large_dalloc(tsdn, extent); + } } static void -arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin) -{ - - /* Dissociate run from bin. */ - if (run == bin->runcur) - bin->runcur = NULL; - else { - size_t binind = arena_bin_index(chunk->arena, bin); - arena_bin_info_t *bin_info = &arena_bin_info[binind]; - - if (bin_info->nregs != 1) { - /* - * This block's conditional is necessary because if the - * run only contains one region, then it never gets - * inserted into the non-full runs tree. - */ - arena_bin_runs_remove(bin, run); +arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, arena_bin_t *bin) { + /* Dissociate slab from bin. */ + if (slab == bin->slabcur) { + bin->slabcur = NULL; + } else { + szind_t binind = extent_szind_get(slab); + const arena_bin_info_t *bin_info = &arena_bin_info[binind]; + + /* + * The following block's conditional is necessary because if the + * slab only contains one region, then it never gets inserted + * into the non-full slabs heap. + */ + if (bin_info->nregs == 1) { + arena_bin_slabs_full_remove(arena, bin, slab); + } else { + arena_bin_slabs_nonfull_remove(bin, slab); } } } static void -arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin) -{ - size_t binind; - arena_bin_info_t *bin_info; - size_t npages, run_ind, past; - - assert(run != bin->runcur); - assert(arena_run_tree_search(&bin->runs, - arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE)) - == NULL); - - binind = arena_bin_index(chunk->arena, run->bin); - bin_info = &arena_bin_info[binind]; +arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, + arena_bin_t *bin) { + assert(slab != bin->slabcur); - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); /******************************/ - npages = bin_info->run_size >> LG_PAGE; - run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); - past = (size_t)(PAGE_CEILING((uintptr_t)run + - (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind * - bin_info->reg_interval - bin_info->redzone_size) - - (uintptr_t)chunk) >> LG_PAGE); - malloc_mutex_lock(&arena->lock); - - /* - * If the run was originally clean, and some pages were never touched, - * trim the clean pages before deallocating the dirty portion of the - * run. - */ - assert(arena_mapbits_dirty_get(chunk, run_ind) == - arena_mapbits_dirty_get(chunk, run_ind+npages-1)); - if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind < - npages) { - /* Trim clean pages. Convert to large run beforehand. */ - assert(npages > 0); - arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0); - arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0); - arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE), - ((past - run_ind) << LG_PAGE), false); - /* npages = past - run_ind; */ - } - arena_run_dalloc(arena, run, true, false); - malloc_mutex_unlock(&arena->lock); + arena_slab_dalloc(tsdn, arena, slab); /****************************/ - malloc_mutex_lock(&bin->lock); - if (config_stats) - bin->stats.curruns--; + malloc_mutex_lock(tsdn, &bin->lock); + if (config_stats) { + bin->stats.curslabs--; + } } static void -arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, - arena_bin_t *bin) -{ +arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, + arena_bin_t *bin) { + assert(extent_nfree_get(slab) > 0); /* - * Make sure that if bin->runcur is non-NULL, it refers to the lowest - * non-full run. It is okay to NULL runcur out rather than proactively - * keeping it pointing at the lowest non-full run. + * Make sure that if bin->slabcur is non-NULL, it refers to the + * oldest/lowest non-full slab. It is okay to NULL slabcur out rather + * than proactively keeping it pointing at the oldest/lowest non-full + * slab. */ - if ((uintptr_t)run < (uintptr_t)bin->runcur) { - /* Switch runcur. */ - if (bin->runcur->nfree > 0) - arena_bin_runs_insert(bin, bin->runcur); - bin->runcur = run; - if (config_stats) - bin->stats.reruns++; - } else - arena_bin_runs_insert(bin, run); + if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) { + /* Switch slabcur. */ + if (extent_nfree_get(bin->slabcur) > 0) { + arena_bin_slabs_nonfull_insert(bin, bin->slabcur); + } else { + arena_bin_slabs_full_insert(arena, bin, bin->slabcur); + } + bin->slabcur = slab; + if (config_stats) { + bin->stats.reslabs++; + } + } else { + arena_bin_slabs_nonfull_insert(bin, slab); + } } -void -arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, - arena_chunk_map_t *mapelm) -{ - size_t pageind; - arena_run_t *run; - arena_bin_t *bin; - arena_bin_info_t *bin_info; - size_t size, binind; - - pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - - arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE)); - bin = run->bin; - binind = arena_ptr_small_binind_get(ptr, mapelm->bits); - bin_info = &arena_bin_info[binind]; - if (config_fill || config_stats) - size = bin_info->reg_size; - - if (config_fill && opt_junk) +static void +arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab, + void *ptr, bool junked) { + arena_slab_data_t *slab_data = extent_slab_data_get(slab); + szind_t binind = extent_szind_get(slab); + arena_bin_t *bin = &arena->bins[binind]; + const arena_bin_info_t *bin_info = &arena_bin_info[binind]; + + if (!junked && config_fill && unlikely(opt_junk_free)) { arena_dalloc_junk_small(ptr, bin_info); + } - arena_run_reg_dalloc(run, ptr); - if (run->nfree == bin_info->nregs) { - arena_dissociate_bin_run(chunk, run, bin); - arena_dalloc_bin_run(arena, chunk, run, bin); - } else if (run->nfree == 1 && run != bin->runcur) - arena_bin_lower_run(arena, chunk, run, bin); + arena_slab_reg_dalloc(tsdn, slab, slab_data, ptr); + unsigned nfree = extent_nfree_get(slab); + if (nfree == bin_info->nregs) { + arena_dissociate_bin_slab(arena, slab, bin); + arena_dalloc_bin_slab(tsdn, arena, slab, bin); + } else if (nfree == 1 && slab != bin->slabcur) { + arena_bin_slabs_full_remove(arena, bin, slab); + arena_bin_lower_slab(tsdn, arena, slab, bin); + } if (config_stats) { - bin->stats.allocated -= size; bin->stats.ndalloc++; + bin->stats.curregs--; } } void -arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind, arena_chunk_map_t *mapelm) -{ - arena_run_t *run; - arena_bin_t *bin; - - run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - - arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE)); - bin = run->bin; - malloc_mutex_lock(&bin->lock); - arena_dalloc_bin_locked(arena, chunk, ptr, mapelm); - malloc_mutex_unlock(&bin->lock); -} - -void -arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t pageind) -{ - arena_chunk_map_t *mapelm; - - if (config_debug) { - /* arena_ptr_small_binind_get() does extra sanity checking. */ - assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, - pageind)) != BININD_INVALID); - } - mapelm = arena_mapp_get(chunk, pageind); - arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm); +arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent, + void *ptr) { + arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true); } -#ifdef JEMALLOC_JET -#undef arena_dalloc_junk_large -#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl) -#endif static void -arena_dalloc_junk_large(void *ptr, size_t usize) -{ - - if (config_fill && opt_junk) - memset(ptr, 0x5a, usize); -} -#ifdef JEMALLOC_JET -#undef arena_dalloc_junk_large -#define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) -arena_dalloc_junk_large_t *arena_dalloc_junk_large = - JEMALLOC_N(arena_dalloc_junk_large_impl); -#endif - -void -arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr) -{ +arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) { + szind_t binind = extent_szind_get(extent); + arena_bin_t *bin = &arena->bins[binind]; - if (config_fill || config_stats) { - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t usize = arena_mapbits_large_size_get(chunk, pageind); - - arena_dalloc_junk_large(ptr, usize); - if (config_stats) { - arena->stats.ndalloc_large++; - arena->stats.allocated_large -= usize; - arena->stats.lstats[(usize >> LG_PAGE) - 1].ndalloc++; - arena->stats.lstats[(usize >> LG_PAGE) - 1].curruns--; - } - } - - arena_run_dalloc(arena, (arena_run_t *)ptr, true, false); + malloc_mutex_lock(tsdn, &bin->lock); + arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false); + malloc_mutex_unlock(tsdn, &bin->lock); } void -arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) -{ +arena_dalloc_small(tsdn_t *tsdn, void *ptr) { + extent_t *extent = iealloc(tsdn, ptr); + arena_t *arena = extent_arena_get(extent); - malloc_mutex_lock(&arena->lock); - arena_dalloc_large_locked(arena, chunk, ptr); - malloc_mutex_unlock(&arena->lock); + arena_dalloc_bin(tsdn, arena, extent, ptr); + arena_decay_tick(tsdn, arena); } -static void -arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t oldsize, size_t size) -{ - - assert(size < oldsize); - - /* - * Shrink the run, and make trailing pages available for other - * allocations. - */ - malloc_mutex_lock(&arena->lock); - arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size, - true); - if (config_stats) { - arena->stats.ndalloc_large++; - arena->stats.allocated_large -= oldsize; - arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++; - arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--; +bool +arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, + size_t extra, bool zero) { + /* Calls with non-zero extra had to clamp extra. */ + assert(extra == 0 || size + extra <= LARGE_MAXCLASS); - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; - arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; - arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; + if (unlikely(size > LARGE_MAXCLASS)) { + return true; } - malloc_mutex_unlock(&arena->lock); -} -static bool -arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t oldsize, size_t size, size_t extra, bool zero) -{ - size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; - size_t npages = oldsize >> LG_PAGE; - size_t followsize; - - assert(oldsize == arena_mapbits_large_size_get(chunk, pageind)); - - /* Try to extend the run. */ - assert(size + extra > oldsize); - malloc_mutex_lock(&arena->lock); - if (pageind + npages < chunk_npages && - arena_mapbits_allocated_get(chunk, pageind+npages) == 0 && - (followsize = arena_mapbits_unallocated_size_get(chunk, - pageind+npages)) >= size - oldsize) { + extent_t *extent = iealloc(tsdn, ptr); + size_t usize_min = sz_s2u(size); + size_t usize_max = sz_s2u(size + extra); + if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) { /* - * The next run is available and sufficiently large. Split the - * following run, then merge the first part with the existing - * allocation. + * Avoid moving the allocation if the size class can be left the + * same. */ - size_t flag_dirty; - size_t splitsize = (oldsize + followsize <= size + extra) - ? followsize : size + extra - oldsize; - arena_run_split_large(arena, (arena_run_t *)((uintptr_t)chunk + - ((pageind+npages) << LG_PAGE)), splitsize, zero); - - size = oldsize + splitsize; - npages = size >> LG_PAGE; - - /* - * Mark the extended run as dirty if either portion of the run - * was dirty before allocation. This is rather pedantic, - * because there's not actually any sequence of events that - * could cause the resulting run to be passed to - * arena_run_dalloc() with the dirty argument set to false - * (which is when dirty flag consistency would really matter). - */ - flag_dirty = arena_mapbits_dirty_get(chunk, pageind) | - arena_mapbits_dirty_get(chunk, pageind+npages-1); - arena_mapbits_large_set(chunk, pageind, size, flag_dirty); - arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty); - - if (config_stats) { - arena->stats.ndalloc_large++; - arena->stats.allocated_large -= oldsize; - arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++; - arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--; - - arena->stats.nmalloc_large++; - arena->stats.nrequests_large++; - arena->stats.allocated_large += size; - arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; - arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; - arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; + assert(arena_bin_info[sz_size2index(oldsize)].reg_size == + oldsize); + if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) != + sz_size2index(oldsize)) && (size > oldsize || usize_max < + oldsize)) { + return true; } - malloc_mutex_unlock(&arena->lock); - return (false); + + arena_decay_tick(tsdn, extent_arena_get(extent)); + return false; + } else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) { + return large_ralloc_no_move(tsdn, extent, usize_min, usize_max, + zero); } - malloc_mutex_unlock(&arena->lock); - return (true); + return true; } -#ifdef JEMALLOC_JET -#undef arena_ralloc_junk_large -#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl) -#endif -static void -arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) -{ - - if (config_fill && opt_junk) { - memset((void *)((uintptr_t)ptr + usize), 0x5a, - old_usize - usize); +static void * +arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool zero, tcache_t *tcache) { + if (alignment == 0) { + return arena_malloc(tsdn, arena, usize, sz_size2index(usize), + zero, tcache, true); } -} -#ifdef JEMALLOC_JET -#undef arena_ralloc_junk_large -#define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) -arena_ralloc_junk_large_t *arena_ralloc_junk_large = - JEMALLOC_N(arena_ralloc_junk_large_impl); -#endif - -/* - * Try to resize a large allocation, in order to avoid copying. This will - * always fail if growing an object, and the following run is already in use. - */ -static bool -arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, - bool zero) -{ - size_t psize; - - psize = PAGE_CEILING(size + extra); - if (psize == oldsize) { - /* Same size class. */ - return (false); - } else { - arena_chunk_t *chunk; - arena_t *arena; - - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - arena = chunk->arena; - - if (psize < oldsize) { - /* Fill before shrinking in order avoid a race. */ - arena_ralloc_junk_large(ptr, oldsize, psize); - arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, - psize); - return (false); - } else { - bool ret = arena_ralloc_large_grow(arena, chunk, ptr, - oldsize, PAGE_CEILING(size), - psize - PAGE_CEILING(size), zero); - if (config_fill && ret == false && zero == false) { - if (opt_junk) { - memset((void *)((uintptr_t)ptr + - oldsize), 0xa5, isalloc(ptr, - config_prof) - oldsize); - } else if (opt_zero) { - memset((void *)((uintptr_t)ptr + - oldsize), 0, isalloc(ptr, - config_prof) - oldsize); - } - } - return (ret); - } + usize = sz_sa2u(usize, alignment); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { + return NULL; } + return ipalloct(tsdn, usize, alignment, zero, tcache, arena); } -bool -arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, - bool zero) -{ +void * +arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, + size_t size, size_t alignment, bool zero, tcache_t *tcache) { + size_t usize = sz_s2u(size); + if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) { + return NULL; + } - /* - * Avoid moving the allocation if the size class can be left the same. - */ - if (oldsize <= arena_maxclass) { - if (oldsize <= SMALL_MAXCLASS) { - assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size - == oldsize); - if ((size + extra <= SMALL_MAXCLASS && - SMALL_SIZE2BIN(size + extra) == - SMALL_SIZE2BIN(oldsize)) || (size <= oldsize && - size + extra >= oldsize)) - return (false); - } else { - assert(size <= arena_maxclass); - if (size + extra > SMALL_MAXCLASS) { - if (arena_ralloc_large(ptr, oldsize, size, - extra, zero) == false) - return (false); - } + if (likely(usize <= SMALL_MAXCLASS)) { + /* Try to avoid moving the allocation. */ + if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero)) { + return ptr; } } - /* Reallocation would require a move. */ - return (true); -} - -void * -arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, - size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, - bool try_tcache_dalloc) -{ - void *ret; - size_t copysize; - - /* Try to avoid moving the allocation. */ - if (arena_ralloc_no_move(ptr, oldsize, size, extra, zero) == false) - return (ptr); + if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) { + return large_ralloc(tsdn, arena, iealloc(tsdn, ptr), usize, + alignment, zero, tcache); + } /* * size and oldsize are different enough that we need to move the - * object. In that case, fall back to allocating new space and - * copying. + * object. In that case, fall back to allocating new space and copying. */ - if (alignment != 0) { - size_t usize = sa2u(size + extra, alignment); - if (usize == 0) - return (NULL); - ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); - } else - ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc); - + void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment, + zero, tcache); if (ret == NULL) { - if (extra == 0) - return (NULL); - /* Try again, this time without extra. */ - if (alignment != 0) { - size_t usize = sa2u(size, alignment); - if (usize == 0) - return (NULL); - ret = ipalloct(usize, alignment, zero, try_tcache_alloc, - arena); - } else - ret = arena_malloc(arena, size, zero, try_tcache_alloc); - - if (ret == NULL) - return (NULL); + return NULL; } - /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */ - /* - * Copy at most size bytes (not size+extra), since the caller has no - * expectation that the extra bytes will be reliably preserved. + * Junk/zero-filling were already done by + * ipalloc()/arena_malloc(). */ - copysize = (size < oldsize) ? size : oldsize; - VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); + + size_t copysize = (usize < oldsize) ? usize : oldsize; memcpy(ret, ptr, copysize); - iqalloct(ptr, try_tcache_dalloc); - return (ret); + isdalloct(tsdn, ptr, oldsize, tcache, NULL, true); + return ret; } dss_prec_t -arena_dss_prec_get(arena_t *arena) -{ - dss_prec_t ret; +arena_dss_prec_get(arena_t *arena) { + return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE); +} - malloc_mutex_lock(&arena->lock); - ret = arena->dss_prec; - malloc_mutex_unlock(&arena->lock); - return (ret); +bool +arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) { + if (!have_dss) { + return (dss_prec != dss_prec_disabled); + } + atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE); + return false; } -void -arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) -{ +ssize_t +arena_dirty_decay_ms_default_get(void) { + return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED); +} - malloc_mutex_lock(&arena->lock); - arena->dss_prec = dss_prec; - malloc_mutex_unlock(&arena->lock); +bool +arena_dirty_decay_ms_default_set(ssize_t decay_ms) { + if (!arena_decay_ms_valid(decay_ms)) { + return true; + } + atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED); + return false; } -void -arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive, - size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, - malloc_large_stats_t *lstats) -{ - unsigned i; +ssize_t +arena_muzzy_decay_ms_default_get(void) { + return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED); +} - malloc_mutex_lock(&arena->lock); - *dss = dss_prec_names[arena->dss_prec]; - *nactive += arena->nactive; - *ndirty += arena->ndirty; +bool +arena_muzzy_decay_ms_default_set(ssize_t decay_ms) { + if (!arena_decay_ms_valid(decay_ms)) { + return true; + } + atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED); + return false; +} - astats->mapped += arena->stats.mapped; - astats->npurge += arena->stats.npurge; - astats->nmadvise += arena->stats.nmadvise; - astats->purged += arena->stats.purged; - astats->allocated_large += arena->stats.allocated_large; - astats->nmalloc_large += arena->stats.nmalloc_large; - astats->ndalloc_large += arena->stats.ndalloc_large; - astats->nrequests_large += arena->stats.nrequests_large; +unsigned +arena_nthreads_get(arena_t *arena, bool internal) { + return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED); +} - for (i = 0; i < nlclasses; i++) { - lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; - lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; - lstats[i].nrequests += arena->stats.lstats[i].nrequests; - lstats[i].curruns += arena->stats.lstats[i].curruns; - } - malloc_mutex_unlock(&arena->lock); +void +arena_nthreads_inc(arena_t *arena, bool internal) { + atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); +} - for (i = 0; i < NBINS; i++) { - arena_bin_t *bin = &arena->bins[i]; +void +arena_nthreads_dec(arena_t *arena, bool internal) { + atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); +} - malloc_mutex_lock(&bin->lock); - bstats[i].allocated += bin->stats.allocated; - bstats[i].nmalloc += bin->stats.nmalloc; - bstats[i].ndalloc += bin->stats.ndalloc; - bstats[i].nrequests += bin->stats.nrequests; - if (config_tcache) { - bstats[i].nfills += bin->stats.nfills; - bstats[i].nflushes += bin->stats.nflushes; - } - bstats[i].nruns += bin->stats.nruns; - bstats[i].reruns += bin->stats.reruns; - bstats[i].curruns += bin->stats.curruns; - malloc_mutex_unlock(&bin->lock); - } +size_t +arena_extent_sn_next(arena_t *arena) { + return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED); } -bool -arena_new(arena_t *arena, unsigned ind) -{ +arena_t * +arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { + arena_t *arena; + base_t *base; unsigned i; - arena_bin_t *bin; - arena->ind = ind; - arena->nthreads = 0; + if (ind == 0) { + base = b0get(); + } else { + base = base_new(tsdn, ind, extent_hooks); + if (base == NULL) { + return NULL; + } + } + + arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE); + if (arena == NULL) { + goto label_error; + } - if (malloc_mutex_init(&arena->lock)) - return (true); + atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); + atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); + arena->last_thd = NULL; if (config_stats) { - memset(&arena->stats, 0, sizeof(arena_stats_t)); - arena->stats.lstats = - (malloc_large_stats_t *)base_alloc(nlclasses * - sizeof(malloc_large_stats_t)); - if (arena->stats.lstats == NULL) - return (true); - memset(arena->stats.lstats, 0, nlclasses * - sizeof(malloc_large_stats_t)); - if (config_tcache) - ql_new(&arena->tcache_ql); + if (arena_stats_init(tsdn, &arena->stats)) { + goto label_error; + } + + ql_new(&arena->tcache_ql); + if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql", + WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) { + goto label_error; + } } - if (config_prof) - arena->prof_accumbytes = 0; + if (config_prof) { + if (prof_accum_init(tsdn, &arena->prof_accum)) { + goto label_error; + } + } - arena->dss_prec = chunk_dss_prec_get(); + if (config_cache_oblivious) { + /* + * A nondeterministic seed based on the address of arena reduces + * the likelihood of lockstep non-uniform cache index + * utilization among identical concurrent processes, but at the + * cost of test repeatability. For debug builds, instead use a + * deterministic seed. + */ + atomic_store_zu(&arena->offset_state, config_debug ? ind : + (size_t)(uintptr_t)arena, ATOMIC_RELAXED); + } - /* Initialize chunks. */ - arena_chunk_dirty_new(&arena->chunks_dirty); - arena->spare = NULL; + atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED); - arena->nactive = 0; - arena->ndirty = 0; - arena->npurgatory = 0; + atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(), + ATOMIC_RELAXED); - arena_avail_tree_new(&arena->runs_avail); + atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); - /* Initialize bins. */ - for (i = 0; i < NBINS; i++) { - bin = &arena->bins[i]; - if (malloc_mutex_init(&bin->lock)) - return (true); - bin->runcur = NULL; - arena_run_tree_new(&bin->runs); - if (config_stats) - memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); + extent_list_init(&arena->large); + if (malloc_mutex_init(&arena->large_mtx, "arena_large", + WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) { + goto label_error; } - return (false); -} - -/* - * Calculate bin_info->run_size such that it meets the following constraints: - * - * *) bin_info->run_size >= min_run_size - * *) bin_info->run_size <= arena_maxclass - * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed). - * *) bin_info->nregs <= RUN_MAXREGS - * - * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also - * calculated here, since these settings are all interdependent. - */ -static size_t -bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size) -{ - size_t pad_size; - size_t try_run_size, good_run_size; - uint32_t try_nregs, good_nregs; - uint32_t try_hdr_size, good_hdr_size; - uint32_t try_bitmap_offset, good_bitmap_offset; - uint32_t try_ctx0_offset, good_ctx0_offset; - uint32_t try_redzone0_offset, good_redzone0_offset; - - assert(min_run_size >= PAGE); - assert(min_run_size <= arena_maxclass); - /* - * Determine redzone size based on minimum alignment and minimum - * redzone size. Add padding to the end of the run if it is needed to - * align the regions. The padding allows each redzone to be half the - * minimum alignment; without the padding, each redzone would have to - * be twice as large in order to maintain alignment. + * Delay coalescing for dirty extents despite the disruptive effect on + * memory layout for best-fit extent allocation, since cached extents + * are likely to be reused soon after deallocation, and the cost of + * merging/splitting extents is non-trivial. */ - if (config_fill && opt_redzone) { - size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1); - if (align_min <= REDZONE_MINSIZE) { - bin_info->redzone_size = REDZONE_MINSIZE; - pad_size = 0; - } else { - bin_info->redzone_size = align_min >> 1; - pad_size = bin_info->redzone_size; - } - } else { - bin_info->redzone_size = 0; - pad_size = 0; + if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty, + true)) { + goto label_error; } - bin_info->reg_interval = bin_info->reg_size + - (bin_info->redzone_size << 1); - /* - * Calculate known-valid settings before entering the run_size - * expansion loop, so that the first part of the loop always copies - * valid settings. - * - * The do..while loop iteratively reduces the number of regions until - * the run header and the regions no longer overlap. A closed formula - * would be quite messy, since there is an interdependency between the - * header's mask length and the number of regions. + * Coalesce muzzy extents immediately, because operations on them are in + * the critical path much less often than for dirty extents. */ - try_run_size = min_run_size; - try_nregs = ((try_run_size - sizeof(arena_run_t)) / - bin_info->reg_interval) - + 1; /* Counter-act try_nregs-- in loop. */ - if (try_nregs > RUN_MAXREGS) { - try_nregs = RUN_MAXREGS - + 1; /* Counter-act try_nregs-- in loop. */ - } - do { - try_nregs--; - try_hdr_size = sizeof(arena_run_t); - /* Pad to a long boundary. */ - try_hdr_size = LONG_CEILING(try_hdr_size); - try_bitmap_offset = try_hdr_size; - /* Add space for bitmap. */ - try_hdr_size += bitmap_size(try_nregs); - if (config_prof && opt_prof && prof_promote == false) { - /* Pad to a quantum boundary. */ - try_hdr_size = QUANTUM_CEILING(try_hdr_size); - try_ctx0_offset = try_hdr_size; - /* Add space for one (prof_ctx_t *) per region. */ - try_hdr_size += try_nregs * sizeof(prof_ctx_t *); - } else - try_ctx0_offset = 0; - try_redzone0_offset = try_run_size - (try_nregs * - bin_info->reg_interval) - pad_size; - } while (try_hdr_size > try_redzone0_offset); - - /* run_size expansion loop. */ - do { - /* - * Copy valid settings before trying more aggressive settings. - */ - good_run_size = try_run_size; - good_nregs = try_nregs; - good_hdr_size = try_hdr_size; - good_bitmap_offset = try_bitmap_offset; - good_ctx0_offset = try_ctx0_offset; - good_redzone0_offset = try_redzone0_offset; - - /* Try more aggressive settings. */ - try_run_size += PAGE; - try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) / - bin_info->reg_interval) - + 1; /* Counter-act try_nregs-- in loop. */ - if (try_nregs > RUN_MAXREGS) { - try_nregs = RUN_MAXREGS - + 1; /* Counter-act try_nregs-- in loop. */ + if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy, + false)) { + goto label_error; + } + /* + * Coalesce retained extents immediately, in part because they will + * never be evicted (and therefore there's no opportunity for delayed + * coalescing), but also because operations on retained extents are not + * in the critical path. + */ + if (extents_init(tsdn, &arena->extents_retained, extent_state_retained, + false)) { + goto label_error; + } + + if (arena_decay_init(&arena->decay_dirty, &arena->extents_dirty, + arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) { + goto label_error; + } + if (arena_decay_init(&arena->decay_muzzy, &arena->extents_muzzy, + arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) { + goto label_error; + } + + arena->extent_grow_next = sz_psz2ind(HUGEPAGE); + if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow", + WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) { + goto label_error; + } + + extent_avail_new(&arena->extent_avail); + if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail", + WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) { + goto label_error; + } + + /* Initialize bins. */ + for (i = 0; i < NBINS; i++) { + arena_bin_t *bin = &arena->bins[i]; + if (malloc_mutex_init(&bin->lock, "arena_bin", + WITNESS_RANK_ARENA_BIN, malloc_mutex_rank_exclusive)) { + goto label_error; } - do { - try_nregs--; - try_hdr_size = sizeof(arena_run_t); - /* Pad to a long boundary. */ - try_hdr_size = LONG_CEILING(try_hdr_size); - try_bitmap_offset = try_hdr_size; - /* Add space for bitmap. */ - try_hdr_size += bitmap_size(try_nregs); - if (config_prof && opt_prof && prof_promote == false) { - /* Pad to a quantum boundary. */ - try_hdr_size = QUANTUM_CEILING(try_hdr_size); - try_ctx0_offset = try_hdr_size; - /* - * Add space for one (prof_ctx_t *) per region. - */ - try_hdr_size += try_nregs * - sizeof(prof_ctx_t *); - } - try_redzone0_offset = try_run_size - (try_nregs * - bin_info->reg_interval) - pad_size; - } while (try_hdr_size > try_redzone0_offset); - } while (try_run_size <= arena_maxclass - && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) > - RUN_MAX_OVRHD_RELAX - && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size - && try_nregs < RUN_MAXREGS); + bin->slabcur = NULL; + extent_heap_new(&bin->slabs_nonfull); + extent_list_init(&bin->slabs_full); + if (config_stats) { + memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); + } + } - assert(good_hdr_size <= good_redzone0_offset); + arena->base = base; + /* Set arena before creating background threads. */ + arena_set(ind, arena); - /* Copy final settings. */ - bin_info->run_size = good_run_size; - bin_info->nregs = good_nregs; - bin_info->bitmap_offset = good_bitmap_offset; - bin_info->ctx0_offset = good_ctx0_offset; - bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size; + nstime_init(&arena->create_time, 0); + nstime_update(&arena->create_time); - assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs - * bin_info->reg_interval) + pad_size == bin_info->run_size); + /* We don't support reentrancy for arena 0 bootstrapping. */ + if (ind != 0) { + /* + * If we're here, then arena 0 already exists, so bootstrapping + * is done enough that we should have tsd. + */ + assert(!tsdn_null(tsdn)); + pre_reentrancy(tsdn_tsd(tsdn), arena); + if (hooks_arena_new_hook) { + hooks_arena_new_hook(); + } + post_reentrancy(tsdn_tsd(tsdn)); + } - return (good_run_size); + return arena; +label_error: + if (ind != 0) { + base_delete(tsdn, base); + } + return NULL; } -static void -bin_info_init(void) -{ - arena_bin_info_t *bin_info; - size_t prev_run_size = PAGE; - -#define SIZE_CLASS(bin, delta, size) \ - bin_info = &arena_bin_info[bin]; \ - bin_info->reg_size = size; \ - prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\ - bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); - SIZE_CLASSES -#undef SIZE_CLASS +void +arena_boot(void) { + arena_dirty_decay_ms_default_set(opt_dirty_decay_ms); + arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms); } void -arena_boot(void) -{ - size_t header_size; - unsigned i; +arena_prefork0(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx); + malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx); +} - /* - * Compute the header size such that it is large enough to contain the - * page map. The page map is biased to omit entries for the header - * itself, so some iteration is necessary to compute the map bias. - * - * 1) Compute safe header_size and map_bias values that include enough - * space for an unbiased page map. - * 2) Refine map_bias based on (1) to omit the header pages in the page - * map. The resulting map_bias may be one too small. - * 3) Refine map_bias based on (2). The result will be >= the result - * from (2), and will always be correct. - */ - map_bias = 0; - for (i = 0; i < 3; i++) { - header_size = offsetof(arena_chunk_t, map) + - (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias)); - map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK) - != 0); +void +arena_prefork1(tsdn_t *tsdn, arena_t *arena) { + if (config_stats) { + malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx); } - assert(map_bias > 0); +} - arena_maxclass = chunksize - (map_bias << LG_PAGE); +void +arena_prefork2(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx); +} - bin_info_init(); +void +arena_prefork3(tsdn_t *tsdn, arena_t *arena) { + extents_prefork(tsdn, &arena->extents_dirty); + extents_prefork(tsdn, &arena->extents_muzzy); + extents_prefork(tsdn, &arena->extents_retained); } void -arena_prefork(arena_t *arena) -{ - unsigned i; +arena_prefork4(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx); +} - malloc_mutex_prefork(&arena->lock); - for (i = 0; i < NBINS; i++) - malloc_mutex_prefork(&arena->bins[i].lock); +void +arena_prefork5(tsdn_t *tsdn, arena_t *arena) { + base_prefork(tsdn, arena->base); +} + +void +arena_prefork6(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_prefork(tsdn, &arena->large_mtx); +} + +void +arena_prefork7(tsdn_t *tsdn, arena_t *arena) { + for (unsigned i = 0; i < NBINS; i++) { + malloc_mutex_prefork(tsdn, &arena->bins[i].lock); + } } void -arena_postfork_parent(arena_t *arena) -{ +arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { unsigned i; - for (i = 0; i < NBINS; i++) - malloc_mutex_postfork_parent(&arena->bins[i].lock); - malloc_mutex_postfork_parent(&arena->lock); + for (i = 0; i < NBINS; i++) { + malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock); + } + malloc_mutex_postfork_parent(tsdn, &arena->large_mtx); + base_postfork_parent(tsdn, arena->base); + malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx); + extents_postfork_parent(tsdn, &arena->extents_dirty); + extents_postfork_parent(tsdn, &arena->extents_muzzy); + extents_postfork_parent(tsdn, &arena->extents_retained); + malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx); + malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx); + malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx); + if (config_stats) { + malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx); + } } void -arena_postfork_child(arena_t *arena) -{ +arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { unsigned i; - for (i = 0; i < NBINS; i++) - malloc_mutex_postfork_child(&arena->bins[i].lock); - malloc_mutex_postfork_child(&arena->lock); + atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); + atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); + if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) { + arena_nthreads_inc(arena, false); + } + if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) { + arena_nthreads_inc(arena, true); + } + if (config_stats) { + ql_new(&arena->tcache_ql); + tcache_t *tcache = tcache_get(tsdn_tsd(tsdn)); + if (tcache != NULL && tcache->arena == arena) { + ql_elm_new(tcache, link); + ql_tail_insert(&arena->tcache_ql, tcache, link); + } + } + + for (i = 0; i < NBINS; i++) { + malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock); + } + malloc_mutex_postfork_child(tsdn, &arena->large_mtx); + base_postfork_child(tsdn, arena->base); + malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx); + extents_postfork_child(tsdn, &arena->extents_dirty); + extents_postfork_child(tsdn, &arena->extents_muzzy); + extents_postfork_child(tsdn, &arena->extents_retained); + malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx); + malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx); + malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx); + if (config_stats) { + malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx); + } } diff --git a/dep/jemalloc/src/atomic.c b/dep/jemalloc/src/atomic.c deleted file mode 100644 index 77ee313113b..00000000000 --- a/dep/jemalloc/src/atomic.c +++ /dev/null @@ -1,2 +0,0 @@ -#define JEMALLOC_ATOMIC_C_ -#include "jemalloc/internal/jemalloc_internal.h" diff --git a/dep/jemalloc/src/background_thread.c b/dep/jemalloc/src/background_thread.c new file mode 100644 index 00000000000..eb30eb5b423 --- /dev/null +++ b/dep/jemalloc/src/background_thread.c @@ -0,0 +1,880 @@ +#define JEMALLOC_BACKGROUND_THREAD_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" + +/******************************************************************************/ +/* Data. */ + +/* This option should be opt-in only. */ +#define BACKGROUND_THREAD_DEFAULT false +/* Read-only after initialization. */ +bool opt_background_thread = BACKGROUND_THREAD_DEFAULT; + +/* Used for thread creation, termination and stats. */ +malloc_mutex_t background_thread_lock; +/* Indicates global state. Atomic because decay reads this w/o locking. */ +atomic_b_t background_thread_enabled_state; +size_t n_background_threads; +/* Thread info per-index. */ +background_thread_info_t *background_thread_info; + +/* False if no necessary runtime support. */ +bool can_enable_background_thread; + +/******************************************************************************/ + +#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER +#include <dlfcn.h> + +static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, + void *(*)(void *), void *__restrict); +static pthread_once_t once_control = PTHREAD_ONCE_INIT; + +static void +pthread_create_wrapper_once(void) { +#ifdef JEMALLOC_LAZY_LOCK + isthreaded = true; +#endif +} + +int +pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr, + void *(*start_routine)(void *), void *__restrict arg) { + pthread_once(&once_control, pthread_create_wrapper_once); + + return pthread_create_fptr(thread, attr, start_routine, arg); +} +#endif /* JEMALLOC_PTHREAD_CREATE_WRAPPER */ + +#ifndef JEMALLOC_BACKGROUND_THREAD +#define NOT_REACHED { not_reached(); } +bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED +bool background_threads_enable(tsd_t *tsd) NOT_REACHED +bool background_threads_disable(tsd_t *tsd) NOT_REACHED +void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, + arena_decay_t *decay, size_t npages_new) NOT_REACHED +void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED +void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED +void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED +void background_thread_postfork_child(tsdn_t *tsdn) NOT_REACHED +bool background_thread_stats_read(tsdn_t *tsdn, + background_thread_stats_t *stats) NOT_REACHED +void background_thread_ctl_init(tsdn_t *tsdn) NOT_REACHED +#undef NOT_REACHED +#else + +static bool background_thread_enabled_at_fork; + +static void +background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) { + background_thread_wakeup_time_set(tsdn, info, 0); + info->npages_to_purge_new = 0; + if (config_stats) { + info->tot_n_runs = 0; + nstime_init(&info->tot_sleep_time, 0); + } +} + +static inline bool +set_current_thread_affinity(UNUSED int cpu) { +#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY) + cpu_set_t cpuset; + CPU_ZERO(&cpuset); + CPU_SET(cpu, &cpuset); + int ret = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset); + + return (ret != 0); +#else + return false; +#endif +} + +/* Threshold for determining when to wake up the background thread. */ +#define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024) +#define BILLION UINT64_C(1000000000) +/* Minimal sleep interval 100 ms. */ +#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10) + +static inline size_t +decay_npurge_after_interval(arena_decay_t *decay, size_t interval) { + size_t i; + uint64_t sum = 0; + for (i = 0; i < interval; i++) { + sum += decay->backlog[i] * h_steps[i]; + } + for (; i < SMOOTHSTEP_NSTEPS; i++) { + sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]); + } + + return (size_t)(sum >> SMOOTHSTEP_BFP); +} + +static uint64_t +arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay, + extents_t *extents) { + if (malloc_mutex_trylock(tsdn, &decay->mtx)) { + /* Use minimal interval if decay is contended. */ + return BACKGROUND_THREAD_MIN_INTERVAL_NS; + } + + uint64_t interval; + ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); + if (decay_time <= 0) { + /* Purging is eagerly done or disabled currently. */ + interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; + goto label_done; + } + + uint64_t decay_interval_ns = nstime_ns(&decay->interval); + assert(decay_interval_ns > 0); + size_t npages = extents_npages_get(extents); + if (npages == 0) { + unsigned i; + for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { + if (decay->backlog[i] > 0) { + break; + } + } + if (i == SMOOTHSTEP_NSTEPS) { + /* No dirty pages recorded. Sleep indefinitely. */ + interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; + goto label_done; + } + } + if (npages <= BACKGROUND_THREAD_NPAGES_THRESHOLD) { + /* Use max interval. */ + interval = decay_interval_ns * SMOOTHSTEP_NSTEPS; + goto label_done; + } + + size_t lb = BACKGROUND_THREAD_MIN_INTERVAL_NS / decay_interval_ns; + size_t ub = SMOOTHSTEP_NSTEPS; + /* Minimal 2 intervals to ensure reaching next epoch deadline. */ + lb = (lb < 2) ? 2 : lb; + if ((decay_interval_ns * ub <= BACKGROUND_THREAD_MIN_INTERVAL_NS) || + (lb + 2 > ub)) { + interval = BACKGROUND_THREAD_MIN_INTERVAL_NS; + goto label_done; + } + + assert(lb + 2 <= ub); + size_t npurge_lb, npurge_ub; + npurge_lb = decay_npurge_after_interval(decay, lb); + if (npurge_lb > BACKGROUND_THREAD_NPAGES_THRESHOLD) { + interval = decay_interval_ns * lb; + goto label_done; + } + npurge_ub = decay_npurge_after_interval(decay, ub); + if (npurge_ub < BACKGROUND_THREAD_NPAGES_THRESHOLD) { + interval = decay_interval_ns * ub; + goto label_done; + } + + unsigned n_search = 0; + size_t target, npurge; + while ((npurge_lb + BACKGROUND_THREAD_NPAGES_THRESHOLD < npurge_ub) + && (lb + 2 < ub)) { + target = (lb + ub) / 2; + npurge = decay_npurge_after_interval(decay, target); + if (npurge > BACKGROUND_THREAD_NPAGES_THRESHOLD) { + ub = target; + npurge_ub = npurge; + } else { + lb = target; + npurge_lb = npurge; + } + assert(n_search++ < lg_floor(SMOOTHSTEP_NSTEPS) + 1); + } + interval = decay_interval_ns * (ub + lb) / 2; +label_done: + interval = (interval < BACKGROUND_THREAD_MIN_INTERVAL_NS) ? + BACKGROUND_THREAD_MIN_INTERVAL_NS : interval; + malloc_mutex_unlock(tsdn, &decay->mtx); + + return interval; +} + +/* Compute purge interval for background threads. */ +static uint64_t +arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) { + uint64_t i1, i2; + i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty, + &arena->extents_dirty); + if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) { + return i1; + } + i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy, + &arena->extents_muzzy); + + return i1 < i2 ? i1 : i2; +} + +static void +background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info, + uint64_t interval) { + if (config_stats) { + info->tot_n_runs++; + } + info->npages_to_purge_new = 0; + + struct timeval tv; + /* Specific clock required by timedwait. */ + gettimeofday(&tv, NULL); + nstime_t before_sleep; + nstime_init2(&before_sleep, tv.tv_sec, tv.tv_usec * 1000); + + int ret; + if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) { + assert(background_thread_indefinite_sleep(info)); + ret = pthread_cond_wait(&info->cond, &info->mtx.lock); + assert(ret == 0); + } else { + assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS && + interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP); + /* We need malloc clock (can be different from tv). */ + nstime_t next_wakeup; + nstime_init(&next_wakeup, 0); + nstime_update(&next_wakeup); + nstime_iadd(&next_wakeup, interval); + assert(nstime_ns(&next_wakeup) < + BACKGROUND_THREAD_INDEFINITE_SLEEP); + background_thread_wakeup_time_set(tsdn, info, + nstime_ns(&next_wakeup)); + + nstime_t ts_wakeup; + nstime_copy(&ts_wakeup, &before_sleep); + nstime_iadd(&ts_wakeup, interval); + struct timespec ts; + ts.tv_sec = (size_t)nstime_sec(&ts_wakeup); + ts.tv_nsec = (size_t)nstime_nsec(&ts_wakeup); + + assert(!background_thread_indefinite_sleep(info)); + ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts); + assert(ret == ETIMEDOUT || ret == 0); + background_thread_wakeup_time_set(tsdn, info, + BACKGROUND_THREAD_INDEFINITE_SLEEP); + } + if (config_stats) { + gettimeofday(&tv, NULL); + nstime_t after_sleep; + nstime_init2(&after_sleep, tv.tv_sec, tv.tv_usec * 1000); + if (nstime_compare(&after_sleep, &before_sleep) > 0) { + nstime_subtract(&after_sleep, &before_sleep); + nstime_add(&info->tot_sleep_time, &after_sleep); + } + } +} + +static bool +background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) { + if (unlikely(info->state == background_thread_paused)) { + malloc_mutex_unlock(tsdn, &info->mtx); + /* Wait on global lock to update status. */ + malloc_mutex_lock(tsdn, &background_thread_lock); + malloc_mutex_unlock(tsdn, &background_thread_lock); + malloc_mutex_lock(tsdn, &info->mtx); + return true; + } + + return false; +} + +static inline void +background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigned ind) { + uint64_t min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP; + unsigned narenas = narenas_total_get(); + + for (unsigned i = ind; i < narenas; i += ncpus) { + arena_t *arena = arena_get(tsdn, i, false); + if (!arena) { + continue; + } + arena_decay(tsdn, arena, true, false); + if (min_interval == BACKGROUND_THREAD_MIN_INTERVAL_NS) { + /* Min interval will be used. */ + continue; + } + uint64_t interval = arena_decay_compute_purge_interval(tsdn, + arena); + assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS); + if (min_interval > interval) { + min_interval = interval; + } + } + background_thread_sleep(tsdn, info, min_interval); +} + +static bool +background_threads_disable_single(tsd_t *tsd, background_thread_info_t *info) { + if (info == &background_thread_info[0]) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), + &background_thread_lock); + } else { + malloc_mutex_assert_not_owner(tsd_tsdn(tsd), + &background_thread_lock); + } + + pre_reentrancy(tsd, NULL); + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + bool has_thread; + assert(info->state != background_thread_paused); + if (info->state == background_thread_started) { + has_thread = true; + info->state = background_thread_stopped; + pthread_cond_signal(&info->cond); + } else { + has_thread = false; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + + if (!has_thread) { + post_reentrancy(tsd); + return false; + } + void *ret; + if (pthread_join(info->thread, &ret)) { + post_reentrancy(tsd); + return true; + } + assert(ret == NULL); + n_background_threads--; + post_reentrancy(tsd); + + return false; +} + +static void *background_thread_entry(void *ind_arg); + +static int +background_thread_create_signals_masked(pthread_t *thread, + const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) { + /* + * Mask signals during thread creation so that the thread inherits + * an empty signal set. + */ + sigset_t set; + sigfillset(&set); + sigset_t oldset; + int mask_err = pthread_sigmask(SIG_SETMASK, &set, &oldset); + if (mask_err != 0) { + return mask_err; + } + int create_err = pthread_create_wrapper(thread, attr, start_routine, + arg); + /* + * Restore the signal mask. Failure to restore the signal mask here + * changes program behavior. + */ + int restore_err = pthread_sigmask(SIG_SETMASK, &oldset, NULL); + if (restore_err != 0) { + malloc_printf("<jemalloc>: background thread creation " + "failed (%d), and signal mask restoration failed " + "(%d)\n", create_err, restore_err); + if (opt_abort) { + abort(); + } + } + return create_err; +} + +static void +check_background_thread_creation(tsd_t *tsd, unsigned *n_created, + bool *created_threads) { + if (likely(*n_created == n_background_threads)) { + return; + } + + malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_info[0].mtx); +label_restart: + malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); + for (unsigned i = 1; i < ncpus; i++) { + if (created_threads[i]) { + continue; + } + background_thread_info_t *info = &background_thread_info[i]; + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + assert(info->state != background_thread_paused); + bool create = (info->state == background_thread_started); + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + if (!create) { + continue; + } + + /* + * To avoid deadlock with prefork handlers (which waits for the + * mutex held here), unlock before calling pthread_create(). + */ + malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); + + pre_reentrancy(tsd, NULL); + int err = background_thread_create_signals_masked(&info->thread, + NULL, background_thread_entry, (void *)(uintptr_t)i); + post_reentrancy(tsd); + + if (err == 0) { + (*n_created)++; + created_threads[i] = true; + } else { + malloc_printf("<jemalloc>: background thread " + "creation failed (%d)\n", err); + if (opt_abort) { + abort(); + } + } + /* Restart since we unlocked. */ + goto label_restart; + } + malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_info[0].mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); +} + +static void +background_thread0_work(tsd_t *tsd) { + /* Thread0 is also responsible for launching / terminating threads. */ + VARIABLE_ARRAY(bool, created_threads, ncpus); + unsigned i; + for (i = 1; i < ncpus; i++) { + created_threads[i] = false; + } + /* Start working, and create more threads when asked. */ + unsigned n_created = 1; + while (background_thread_info[0].state != background_thread_stopped) { + if (background_thread_pause_check(tsd_tsdn(tsd), + &background_thread_info[0])) { + continue; + } + check_background_thread_creation(tsd, &n_created, + (bool *)&created_threads); + background_work_sleep_once(tsd_tsdn(tsd), + &background_thread_info[0], 0); + } + + /* + * Shut down other threads at exit. Note that the ctl thread is holding + * the global background_thread mutex (and is waiting) for us. + */ + assert(!background_thread_enabled()); + for (i = 1; i < ncpus; i++) { + background_thread_info_t *info = &background_thread_info[i]; + assert(info->state != background_thread_paused); + if (created_threads[i]) { + background_threads_disable_single(tsd, info); + } else { + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + /* Clear in case the thread wasn't created. */ + info->state = background_thread_stopped; + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + } + } + background_thread_info[0].state = background_thread_stopped; + assert(n_background_threads == 1); +} + +static void +background_work(tsd_t *tsd, unsigned ind) { + background_thread_info_t *info = &background_thread_info[ind]; + + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + background_thread_wakeup_time_set(tsd_tsdn(tsd), info, + BACKGROUND_THREAD_INDEFINITE_SLEEP); + if (ind == 0) { + background_thread0_work(tsd); + } else { + while (info->state != background_thread_stopped) { + if (background_thread_pause_check(tsd_tsdn(tsd), + info)) { + continue; + } + background_work_sleep_once(tsd_tsdn(tsd), info, ind); + } + } + assert(info->state == background_thread_stopped); + background_thread_wakeup_time_set(tsd_tsdn(tsd), info, 0); + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); +} + +static void * +background_thread_entry(void *ind_arg) { + unsigned thread_ind = (unsigned)(uintptr_t)ind_arg; + assert(thread_ind < ncpus); +#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP + pthread_setname_np(pthread_self(), "jemalloc_bg_thd"); +#endif + if (opt_percpu_arena != percpu_arena_disabled) { + set_current_thread_affinity((int)thread_ind); + } + /* + * Start periodic background work. We use internal tsd which avoids + * side effects, for example triggering new arena creation (which in + * turn triggers another background thread creation). + */ + background_work(tsd_internal_fetch(), thread_ind); + assert(pthread_equal(pthread_self(), + background_thread_info[thread_ind].thread)); + + return NULL; +} + +static void +background_thread_init(tsd_t *tsd, background_thread_info_t *info) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); + info->state = background_thread_started; + background_thread_info_init(tsd_tsdn(tsd), info); + n_background_threads++; +} + +/* Create a new background thread if needed. */ +bool +background_thread_create(tsd_t *tsd, unsigned arena_ind) { + assert(have_background_thread); + malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); + + /* We create at most NCPUs threads. */ + size_t thread_ind = arena_ind % ncpus; + background_thread_info_t *info = &background_thread_info[thread_ind]; + + bool need_new_thread; + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + need_new_thread = background_thread_enabled() && + (info->state == background_thread_stopped); + if (need_new_thread) { + background_thread_init(tsd, info); + } + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + if (!need_new_thread) { + return false; + } + if (arena_ind != 0) { + /* Threads are created asynchronously by Thread 0. */ + background_thread_info_t *t0 = &background_thread_info[0]; + malloc_mutex_lock(tsd_tsdn(tsd), &t0->mtx); + assert(t0->state == background_thread_started); + pthread_cond_signal(&t0->cond); + malloc_mutex_unlock(tsd_tsdn(tsd), &t0->mtx); + + return false; + } + + pre_reentrancy(tsd, NULL); + /* + * To avoid complications (besides reentrancy), create internal + * background threads with the underlying pthread_create. + */ + int err = background_thread_create_signals_masked(&info->thread, NULL, + background_thread_entry, (void *)thread_ind); + post_reentrancy(tsd); + + if (err != 0) { + malloc_printf("<jemalloc>: arena 0 background thread creation " + "failed (%d)\n", err); + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + info->state = background_thread_stopped; + n_background_threads--; + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + + return true; + } + + return false; +} + +bool +background_threads_enable(tsd_t *tsd) { + assert(n_background_threads == 0); + assert(background_thread_enabled()); + malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); + + VARIABLE_ARRAY(bool, marked, ncpus); + unsigned i, nmarked; + for (i = 0; i < ncpus; i++) { + marked[i] = false; + } + nmarked = 0; + /* Mark the threads we need to create for thread 0. */ + unsigned n = narenas_total_get(); + for (i = 1; i < n; i++) { + if (marked[i % ncpus] || + arena_get(tsd_tsdn(tsd), i, false) == NULL) { + continue; + } + background_thread_info_t *info = &background_thread_info[i]; + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + assert(info->state == background_thread_stopped); + background_thread_init(tsd, info); + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + marked[i % ncpus] = true; + if (++nmarked == ncpus) { + break; + } + } + + return background_thread_create(tsd, 0); +} + +bool +background_threads_disable(tsd_t *tsd) { + assert(!background_thread_enabled()); + malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock); + + /* Thread 0 will be responsible for terminating other threads. */ + if (background_threads_disable_single(tsd, + &background_thread_info[0])) { + return true; + } + assert(n_background_threads == 0); + + return false; +} + +/* Check if we need to signal the background thread early. */ +void +background_thread_interval_check(tsdn_t *tsdn, arena_t *arena, + arena_decay_t *decay, size_t npages_new) { + background_thread_info_t *info = arena_background_thread_info_get( + arena); + if (malloc_mutex_trylock(tsdn, &info->mtx)) { + /* + * Background thread may hold the mutex for a long period of + * time. We'd like to avoid the variance on application + * threads. So keep this non-blocking, and leave the work to a + * future epoch. + */ + return; + } + + if (info->state != background_thread_started) { + goto label_done; + } + if (malloc_mutex_trylock(tsdn, &decay->mtx)) { + goto label_done; + } + + ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); + if (decay_time <= 0) { + /* Purging is eagerly done or disabled currently. */ + goto label_done_unlock2; + } + uint64_t decay_interval_ns = nstime_ns(&decay->interval); + assert(decay_interval_ns > 0); + + nstime_t diff; + nstime_init(&diff, background_thread_wakeup_time_get(info)); + if (nstime_compare(&diff, &decay->epoch) <= 0) { + goto label_done_unlock2; + } + nstime_subtract(&diff, &decay->epoch); + if (nstime_ns(&diff) < BACKGROUND_THREAD_MIN_INTERVAL_NS) { + goto label_done_unlock2; + } + + if (npages_new > 0) { + size_t n_epoch = (size_t)(nstime_ns(&diff) / decay_interval_ns); + /* + * Compute how many new pages we would need to purge by the next + * wakeup, which is used to determine if we should signal the + * background thread. + */ + uint64_t npurge_new; + if (n_epoch >= SMOOTHSTEP_NSTEPS) { + npurge_new = npages_new; + } else { + uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1]; + assert(h_steps_max >= + h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]); + npurge_new = npages_new * (h_steps_max - + h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]); + npurge_new >>= SMOOTHSTEP_BFP; + } + info->npages_to_purge_new += npurge_new; + } + + bool should_signal; + if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) { + should_signal = true; + } else if (unlikely(background_thread_indefinite_sleep(info)) && + (extents_npages_get(&arena->extents_dirty) > 0 || + extents_npages_get(&arena->extents_muzzy) > 0 || + info->npages_to_purge_new > 0)) { + should_signal = true; + } else { + should_signal = false; + } + + if (should_signal) { + info->npages_to_purge_new = 0; + pthread_cond_signal(&info->cond); + } +label_done_unlock2: + malloc_mutex_unlock(tsdn, &decay->mtx); +label_done: + malloc_mutex_unlock(tsdn, &info->mtx); +} + +void +background_thread_prefork0(tsdn_t *tsdn) { + malloc_mutex_prefork(tsdn, &background_thread_lock); + background_thread_enabled_at_fork = background_thread_enabled(); +} + +void +background_thread_prefork1(tsdn_t *tsdn) { + for (unsigned i = 0; i < ncpus; i++) { + malloc_mutex_prefork(tsdn, &background_thread_info[i].mtx); + } +} + +void +background_thread_postfork_parent(tsdn_t *tsdn) { + for (unsigned i = 0; i < ncpus; i++) { + malloc_mutex_postfork_parent(tsdn, + &background_thread_info[i].mtx); + } + malloc_mutex_postfork_parent(tsdn, &background_thread_lock); +} + +void +background_thread_postfork_child(tsdn_t *tsdn) { + for (unsigned i = 0; i < ncpus; i++) { + malloc_mutex_postfork_child(tsdn, + &background_thread_info[i].mtx); + } + malloc_mutex_postfork_child(tsdn, &background_thread_lock); + if (!background_thread_enabled_at_fork) { + return; + } + + /* Clear background_thread state (reset to disabled for child). */ + malloc_mutex_lock(tsdn, &background_thread_lock); + n_background_threads = 0; + background_thread_enabled_set(tsdn, false); + for (unsigned i = 0; i < ncpus; i++) { + background_thread_info_t *info = &background_thread_info[i]; + malloc_mutex_lock(tsdn, &info->mtx); + info->state = background_thread_stopped; + int ret = pthread_cond_init(&info->cond, NULL); + assert(ret == 0); + background_thread_info_init(tsdn, info); + malloc_mutex_unlock(tsdn, &info->mtx); + } + malloc_mutex_unlock(tsdn, &background_thread_lock); +} + +bool +background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) { + assert(config_stats); + malloc_mutex_lock(tsdn, &background_thread_lock); + if (!background_thread_enabled()) { + malloc_mutex_unlock(tsdn, &background_thread_lock); + return true; + } + + stats->num_threads = n_background_threads; + uint64_t num_runs = 0; + nstime_init(&stats->run_interval, 0); + for (unsigned i = 0; i < ncpus; i++) { + background_thread_info_t *info = &background_thread_info[i]; + malloc_mutex_lock(tsdn, &info->mtx); + if (info->state != background_thread_stopped) { + num_runs += info->tot_n_runs; + nstime_add(&stats->run_interval, &info->tot_sleep_time); + } + malloc_mutex_unlock(tsdn, &info->mtx); + } + stats->num_runs = num_runs; + if (num_runs > 0) { + nstime_idivide(&stats->run_interval, num_runs); + } + malloc_mutex_unlock(tsdn, &background_thread_lock); + + return false; +} + +#undef BACKGROUND_THREAD_NPAGES_THRESHOLD +#undef BILLION +#undef BACKGROUND_THREAD_MIN_INTERVAL_NS + +/* + * When lazy lock is enabled, we need to make sure setting isthreaded before + * taking any background_thread locks. This is called early in ctl (instead of + * wait for the pthread_create calls to trigger) because the mutex is required + * before creating background threads. + */ +void +background_thread_ctl_init(tsdn_t *tsdn) { + malloc_mutex_assert_not_owner(tsdn, &background_thread_lock); +#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER + pthread_once(&once_control, pthread_create_wrapper_once); +#endif +} + +#endif /* defined(JEMALLOC_BACKGROUND_THREAD) */ + +bool +background_thread_boot0(void) { + if (!have_background_thread && opt_background_thread) { + malloc_printf("<jemalloc>: option background_thread currently " + "supports pthread only\n"); + return true; + } + +#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER + pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); + if (pthread_create_fptr == NULL) { + can_enable_background_thread = false; + if (config_lazy_lock || opt_background_thread) { + malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, " + "\"pthread_create\")\n"); + abort(); + } + } else { + can_enable_background_thread = true; + } +#endif + return false; +} + +bool +background_thread_boot1(tsdn_t *tsdn) { +#ifdef JEMALLOC_BACKGROUND_THREAD + assert(have_background_thread); + assert(narenas_total_get() > 0); + + background_thread_enabled_set(tsdn, opt_background_thread); + if (malloc_mutex_init(&background_thread_lock, + "background_thread_global", + WITNESS_RANK_BACKGROUND_THREAD_GLOBAL, + malloc_mutex_rank_exclusive)) { + return true; + } + if (opt_background_thread) { + background_thread_ctl_init(tsdn); + } + + background_thread_info = (background_thread_info_t *)base_alloc(tsdn, + b0get(), ncpus * sizeof(background_thread_info_t), CACHELINE); + if (background_thread_info == NULL) { + return true; + } + + for (unsigned i = 0; i < ncpus; i++) { + background_thread_info_t *info = &background_thread_info[i]; + /* Thread mutex is rank_inclusive because of thread0. */ + if (malloc_mutex_init(&info->mtx, "background_thread", + WITNESS_RANK_BACKGROUND_THREAD, + malloc_mutex_address_ordered)) { + return true; + } + if (pthread_cond_init(&info->cond, NULL)) { + return true; + } + malloc_mutex_lock(tsdn, &info->mtx); + info->state = background_thread_stopped; + background_thread_info_init(tsdn, info); + malloc_mutex_unlock(tsdn, &info->mtx); + } +#endif + + return false; +} diff --git a/dep/jemalloc/src/base.c b/dep/jemalloc/src/base.c index 4e62e8fa918..97078b134d1 100644 --- a/dep/jemalloc/src/base.c +++ b/dep/jemalloc/src/base.c @@ -1,142 +1,402 @@ -#define JEMALLOC_BASE_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_BASE_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/sz.h" /******************************************************************************/ /* Data. */ -static malloc_mutex_t base_mtx; - -/* - * Current pages that are being used for internal memory allocations. These - * pages are carved up in cacheline-size quanta, so that there is no chance of - * false cache line sharing. - */ -static void *base_pages; -static void *base_next_addr; -static void *base_past_addr; /* Addr immediately past base_pages. */ -static extent_node_t *base_nodes; +static base_t *b0; /******************************************************************************/ -/* Function prototypes for non-inline static functions. */ -static bool base_pages_alloc(size_t minsize); +static void * +base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) { + void *addr; + bool zero = true; + bool commit = true; -/******************************************************************************/ + assert(size == HUGEPAGE_CEILING(size)); + + if (extent_hooks == &extent_hooks_default) { + addr = extent_alloc_mmap(NULL, size, PAGE, &zero, &commit); + } else { + /* No arena context as we are creating new arenas. */ + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + pre_reentrancy(tsd, NULL); + addr = extent_hooks->alloc(extent_hooks, NULL, size, PAGE, + &zero, &commit, ind); + post_reentrancy(tsd); + } + + return addr; +} + +static void +base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr, + size_t size) { + /* + * Cascade through dalloc, decommit, purge_forced, and purge_lazy, + * stopping at first success. This cascade is performed for consistency + * with the cascade in extent_dalloc_wrapper() because an application's + * custom hooks may not support e.g. dalloc. This function is only ever + * called as a side effect of arena destruction, so although it might + * seem pointless to do anything besides dalloc here, the application + * may in fact want the end state of all associated virtual memory to be + * in some consistent-but-allocated state. + */ + if (extent_hooks == &extent_hooks_default) { + if (!extent_dalloc_mmap(addr, size)) { + return; + } + if (!pages_decommit(addr, size)) { + return; + } + if (!pages_purge_forced(addr, size)) { + return; + } + if (!pages_purge_lazy(addr, size)) { + return; + } + /* Nothing worked. This should never happen. */ + not_reached(); + } else { + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + pre_reentrancy(tsd, NULL); + if (extent_hooks->dalloc != NULL && + !extent_hooks->dalloc(extent_hooks, addr, size, true, + ind)) { + goto label_done; + } + if (extent_hooks->decommit != NULL && + !extent_hooks->decommit(extent_hooks, addr, size, 0, size, + ind)) { + goto label_done; + } + if (extent_hooks->purge_forced != NULL && + !extent_hooks->purge_forced(extent_hooks, addr, size, 0, + size, ind)) { + goto label_done; + } + if (extent_hooks->purge_lazy != NULL && + !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size, + ind)) { + goto label_done; + } + /* Nothing worked. That's the application's problem. */ + label_done: + post_reentrancy(tsd); + return; + } +} -static bool -base_pages_alloc(size_t minsize) -{ - size_t csize; - bool zero; +static void +base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr, + size_t size) { + size_t sn; - assert(minsize != 0); - csize = CHUNK_CEILING(minsize); - zero = false; - base_pages = chunk_alloc(csize, chunksize, true, &zero, - chunk_dss_prec_get()); - if (base_pages == NULL) - return (true); - base_next_addr = base_pages; - base_past_addr = (void *)((uintptr_t)base_pages + csize); + sn = *extent_sn_next; + (*extent_sn_next)++; - return (false); + extent_binit(extent, addr, size, sn); } -void * -base_alloc(size_t size) -{ +static void * +base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size, + size_t alignment) { void *ret; - size_t csize; - /* Round size up to nearest multiple of the cacheline size. */ - csize = CACHELINE_CEILING(size); + assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM)); + assert(size == ALIGNMENT_CEILING(size, alignment)); - malloc_mutex_lock(&base_mtx); - /* Make sure there's enough space for the allocation. */ - if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) { - if (base_pages_alloc(csize)) { - malloc_mutex_unlock(&base_mtx); - return (NULL); - } + *gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent), + alignment) - (uintptr_t)extent_addr_get(extent); + ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size); + assert(extent_bsize_get(extent) >= *gap_size + size); + extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) + + *gap_size + size), extent_bsize_get(extent) - *gap_size - size, + extent_sn_get(extent)); + return ret; +} + +static void +base_extent_bump_alloc_post(tsdn_t *tsdn, base_t *base, extent_t *extent, + size_t gap_size, void *addr, size_t size) { + if (extent_bsize_get(extent) > 0) { + /* + * Compute the index for the largest size class that does not + * exceed extent's size. + */ + szind_t index_floor = + sz_size2index(extent_bsize_get(extent) + 1) - 1; + extent_heap_insert(&base->avail[index_floor], extent); } - /* Allocate. */ - ret = base_next_addr; - base_next_addr = (void *)((uintptr_t)base_next_addr + csize); - malloc_mutex_unlock(&base_mtx); - VALGRIND_MAKE_MEM_UNDEFINED(ret, csize); - return (ret); + if (config_stats) { + base->allocated += size; + /* + * Add one PAGE to base_resident for every page boundary that is + * crossed by the new allocation. + */ + base->resident += PAGE_CEILING((uintptr_t)addr + size) - + PAGE_CEILING((uintptr_t)addr - gap_size); + assert(base->allocated <= base->resident); + assert(base->resident <= base->mapped); + } } -void * -base_calloc(size_t number, size_t size) -{ - void *ret = base_alloc(number * size); +static void * +base_extent_bump_alloc(tsdn_t *tsdn, base_t *base, extent_t *extent, + size_t size, size_t alignment) { + void *ret; + size_t gap_size; + + ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment); + base_extent_bump_alloc_post(tsdn, base, extent, gap_size, ret, size); + return ret; +} + +/* + * Allocate a block of virtual memory that is large enough to start with a + * base_block_t header, followed by an object of specified size and alignment. + * On success a pointer to the initialized base_block_t header is returned. + */ +static base_block_t * +base_block_alloc(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, + pszind_t *pind_last, size_t *extent_sn_next, size_t size, + size_t alignment) { + alignment = ALIGNMENT_CEILING(alignment, QUANTUM); + size_t usize = ALIGNMENT_CEILING(size, alignment); + size_t header_size = sizeof(base_block_t); + size_t gap_size = ALIGNMENT_CEILING(header_size, alignment) - + header_size; + /* + * Create increasingly larger blocks in order to limit the total number + * of disjoint virtual memory ranges. Choose the next size in the page + * size class series (skipping size classes that are not a multiple of + * HUGEPAGE), or a size large enough to satisfy the requested size and + * alignment, whichever is larger. + */ + size_t min_block_size = HUGEPAGE_CEILING(sz_psz2u(header_size + gap_size + + usize)); + pszind_t pind_next = (*pind_last + 1 < NPSIZES) ? *pind_last + 1 : + *pind_last; + size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next)); + size_t block_size = (min_block_size > next_block_size) ? min_block_size + : next_block_size; + base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind, + block_size); + if (block == NULL) { + return NULL; + } + *pind_last = sz_psz2ind(block_size); + block->size = block_size; + block->next = NULL; + assert(block_size >= header_size); + base_extent_init(extent_sn_next, &block->extent, + (void *)((uintptr_t)block + header_size), block_size - header_size); + return block; +} - if (ret != NULL) - memset(ret, 0, number * size); +/* + * Allocate an extent that is at least as large as specified size, with + * specified alignment. + */ +static extent_t * +base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { + malloc_mutex_assert_owner(tsdn, &base->mtx); - return (ret); + extent_hooks_t *extent_hooks = base_extent_hooks_get(base); + /* + * Drop mutex during base_block_alloc(), because an extent hook will be + * called. + */ + malloc_mutex_unlock(tsdn, &base->mtx); + base_block_t *block = base_block_alloc(tsdn, extent_hooks, + base_ind_get(base), &base->pind_last, &base->extent_sn_next, size, + alignment); + malloc_mutex_lock(tsdn, &base->mtx); + if (block == NULL) { + return NULL; + } + block->next = base->blocks; + base->blocks = block; + if (config_stats) { + base->allocated += sizeof(base_block_t); + base->resident += PAGE_CEILING(sizeof(base_block_t)); + base->mapped += block->size; + assert(base->allocated <= base->resident); + assert(base->resident <= base->mapped); + } + return &block->extent; } -extent_node_t * -base_node_alloc(void) -{ - extent_node_t *ret; +base_t * +b0get(void) { + return b0; +} - malloc_mutex_lock(&base_mtx); - if (base_nodes != NULL) { - ret = base_nodes; - base_nodes = *(extent_node_t **)ret; - malloc_mutex_unlock(&base_mtx); - VALGRIND_MAKE_MEM_UNDEFINED(ret, sizeof(extent_node_t)); - } else { - malloc_mutex_unlock(&base_mtx); - ret = (extent_node_t *)base_alloc(sizeof(extent_node_t)); +base_t * +base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { + pszind_t pind_last = 0; + size_t extent_sn_next = 0; + base_block_t *block = base_block_alloc(tsdn, extent_hooks, ind, + &pind_last, &extent_sn_next, sizeof(base_t), QUANTUM); + if (block == NULL) { + return NULL; } - return (ret); + size_t gap_size; + size_t base_alignment = CACHELINE; + size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment); + base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent, + &gap_size, base_size, base_alignment); + base->ind = ind; + atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED); + if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE, + malloc_mutex_rank_exclusive)) { + base_unmap(tsdn, extent_hooks, ind, block, block->size); + return NULL; + } + base->pind_last = pind_last; + base->extent_sn_next = extent_sn_next; + base->blocks = block; + for (szind_t i = 0; i < NSIZES; i++) { + extent_heap_new(&base->avail[i]); + } + if (config_stats) { + base->allocated = sizeof(base_block_t); + base->resident = PAGE_CEILING(sizeof(base_block_t)); + base->mapped = block->size; + assert(base->allocated <= base->resident); + assert(base->resident <= base->mapped); + } + base_extent_bump_alloc_post(tsdn, base, &block->extent, gap_size, base, + base_size); + + return base; } void -base_node_dealloc(extent_node_t *node) -{ +base_delete(tsdn_t *tsdn, base_t *base) { + extent_hooks_t *extent_hooks = base_extent_hooks_get(base); + base_block_t *next = base->blocks; + do { + base_block_t *block = next; + next = block->next; + base_unmap(tsdn, extent_hooks, base_ind_get(base), block, + block->size); + } while (next != NULL); +} - VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t)); - malloc_mutex_lock(&base_mtx); - *(extent_node_t **)node = base_nodes; - base_nodes = node; - malloc_mutex_unlock(&base_mtx); +extent_hooks_t * +base_extent_hooks_get(base_t *base) { + return (extent_hooks_t *)atomic_load_p(&base->extent_hooks, + ATOMIC_ACQUIRE); } -bool -base_boot(void) -{ +extent_hooks_t * +base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) { + extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base); + atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE); + return old_extent_hooks; +} + +static void * +base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment, + size_t *esn) { + alignment = QUANTUM_CEILING(alignment); + size_t usize = ALIGNMENT_CEILING(size, alignment); + size_t asize = usize + alignment - QUANTUM; + + extent_t *extent = NULL; + malloc_mutex_lock(tsdn, &base->mtx); + for (szind_t i = sz_size2index(asize); i < NSIZES; i++) { + extent = extent_heap_remove_first(&base->avail[i]); + if (extent != NULL) { + /* Use existing space. */ + break; + } + } + if (extent == NULL) { + /* Try to allocate more space. */ + extent = base_extent_alloc(tsdn, base, usize, alignment); + } + void *ret; + if (extent == NULL) { + ret = NULL; + goto label_return; + } - base_nodes = NULL; - if (malloc_mutex_init(&base_mtx)) - return (true); + ret = base_extent_bump_alloc(tsdn, base, extent, usize, alignment); + if (esn != NULL) { + *esn = extent_sn_get(extent); + } +label_return: + malloc_mutex_unlock(tsdn, &base->mtx); + return ret; +} - return (false); +/* + * base_alloc() returns zeroed memory, which is always demand-zeroed for the + * auto arenas, in order to make multi-page sparse data structures such as radix + * tree nodes efficient with respect to physical memory usage. Upon success a + * pointer to at least size bytes with specified alignment is returned. Note + * that size is rounded up to the nearest multiple of alignment to avoid false + * sharing. + */ +void * +base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) { + return base_alloc_impl(tsdn, base, size, alignment, NULL); +} + +extent_t * +base_alloc_extent(tsdn_t *tsdn, base_t *base) { + size_t esn; + extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t), + CACHELINE, &esn); + if (extent == NULL) { + return NULL; + } + extent_esn_set(extent, esn); + return extent; } void -base_prefork(void) -{ +base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated, size_t *resident, + size_t *mapped) { + cassert(config_stats); - malloc_mutex_prefork(&base_mtx); + malloc_mutex_lock(tsdn, &base->mtx); + assert(base->allocated <= base->resident); + assert(base->resident <= base->mapped); + *allocated = base->allocated; + *resident = base->resident; + *mapped = base->mapped; + malloc_mutex_unlock(tsdn, &base->mtx); } void -base_postfork_parent(void) -{ +base_prefork(tsdn_t *tsdn, base_t *base) { + malloc_mutex_prefork(tsdn, &base->mtx); +} - malloc_mutex_postfork_parent(&base_mtx); +void +base_postfork_parent(tsdn_t *tsdn, base_t *base) { + malloc_mutex_postfork_parent(tsdn, &base->mtx); } void -base_postfork_child(void) -{ +base_postfork_child(tsdn_t *tsdn, base_t *base) { + malloc_mutex_postfork_child(tsdn, &base->mtx); +} - malloc_mutex_postfork_child(&base_mtx); +bool +base_boot(tsdn_t *tsdn) { + b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default); + return (b0 == NULL); } diff --git a/dep/jemalloc/src/bitmap.c b/dep/jemalloc/src/bitmap.c index e2bd907d558..468b3178ebf 100644 --- a/dep/jemalloc/src/bitmap.c +++ b/dep/jemalloc/src/bitmap.c @@ -1,24 +1,15 @@ -#define JEMALLOC_BITMAP_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_BITMAP_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static size_t bits2groups(size_t nbits); +#include "jemalloc/internal/assert.h" /******************************************************************************/ -static size_t -bits2groups(size_t nbits) -{ - - return ((nbits >> LG_BITMAP_GROUP_NBITS) + - !!(nbits & BITMAP_GROUP_NBITS_MASK)); -} +#ifdef BITMAP_USE_TREE void -bitmap_info_init(bitmap_info_t *binfo, size_t nbits) -{ +bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { unsigned i; size_t group_count; @@ -31,60 +22,100 @@ bitmap_info_init(bitmap_info_t *binfo, size_t nbits) * that requires only one group. */ binfo->levels[0].group_offset = 0; - group_count = bits2groups(nbits); + group_count = BITMAP_BITS2GROUPS(nbits); for (i = 1; group_count > 1; i++) { assert(i < BITMAP_MAX_LEVELS); binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + group_count; - group_count = bits2groups(group_count); + group_count = BITMAP_BITS2GROUPS(group_count); } binfo->levels[i].group_offset = binfo->levels[i-1].group_offset + group_count; + assert(binfo->levels[i].group_offset <= BITMAP_GROUPS_MAX); binfo->nlevels = i; binfo->nbits = nbits; } -size_t -bitmap_info_ngroups(const bitmap_info_t *binfo) -{ - - return (binfo->levels[binfo->nlevels].group_offset << LG_SIZEOF_BITMAP); -} - -size_t -bitmap_size(size_t nbits) -{ - bitmap_info_t binfo; - - bitmap_info_init(&binfo, nbits); - return (bitmap_info_ngroups(&binfo)); +static size_t +bitmap_info_ngroups(const bitmap_info_t *binfo) { + return binfo->levels[binfo->nlevels].group_offset; } void -bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo) -{ +bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) { size_t extra; unsigned i; /* * Bits are actually inverted with regard to the external bitmap - * interface, so the bitmap starts out with all 1 bits, except for - * trailing unused bits (if any). Note that each group uses bit 0 to - * correspond to the first logical bit in the group, so extra bits - * are the most significant bits of the last group. + * interface. + */ + + if (fill) { + /* The "filled" bitmap starts out with all 0 bits. */ + memset(bitmap, 0, bitmap_size(binfo)); + return; + } + + /* + * The "empty" bitmap starts out with all 1 bits, except for trailing + * unused bits (if any). Note that each group uses bit 0 to correspond + * to the first logical bit in the group, so extra bits are the most + * significant bits of the last group. */ - memset(bitmap, 0xffU, binfo->levels[binfo->nlevels].group_offset << - LG_SIZEOF_BITMAP); + memset(bitmap, 0xffU, bitmap_size(binfo)); extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; - if (extra != 0) + if (extra != 0) { bitmap[binfo->levels[1].group_offset - 1] >>= extra; + } for (i = 1; i < binfo->nlevels; i++) { size_t group_count = binfo->levels[i].group_offset - binfo->levels[i-1].group_offset; extra = (BITMAP_GROUP_NBITS - (group_count & BITMAP_GROUP_NBITS_MASK)) & BITMAP_GROUP_NBITS_MASK; - if (extra != 0) + if (extra != 0) { bitmap[binfo->levels[i+1].group_offset - 1] >>= extra; + } } } + +#else /* BITMAP_USE_TREE */ + +void +bitmap_info_init(bitmap_info_t *binfo, size_t nbits) { + assert(nbits > 0); + assert(nbits <= (ZU(1) << LG_BITMAP_MAXBITS)); + + binfo->ngroups = BITMAP_BITS2GROUPS(nbits); + binfo->nbits = nbits; +} + +static size_t +bitmap_info_ngroups(const bitmap_info_t *binfo) { + return binfo->ngroups; +} + +void +bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo, bool fill) { + size_t extra; + + if (fill) { + memset(bitmap, 0, bitmap_size(binfo)); + return; + } + + memset(bitmap, 0xffU, bitmap_size(binfo)); + extra = (BITMAP_GROUP_NBITS - (binfo->nbits & BITMAP_GROUP_NBITS_MASK)) + & BITMAP_GROUP_NBITS_MASK; + if (extra != 0) { + bitmap[binfo->ngroups - 1] >>= extra; + } +} + +#endif /* BITMAP_USE_TREE */ + +size_t +bitmap_size(const bitmap_info_t *binfo) { + return (bitmap_info_ngroups(binfo) << LG_SIZEOF_BITMAP); +} diff --git a/dep/jemalloc/src/chunk.c b/dep/jemalloc/src/chunk.c deleted file mode 100644 index 90ab116ae5f..00000000000 --- a/dep/jemalloc/src/chunk.c +++ /dev/null @@ -1,395 +0,0 @@ -#define JEMALLOC_CHUNK_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -const char *opt_dss = DSS_DEFAULT; -size_t opt_lg_chunk = LG_CHUNK_DEFAULT; - -malloc_mutex_t chunks_mtx; -chunk_stats_t stats_chunks; - -/* - * Trees of chunks that were previously allocated (trees differ only in node - * ordering). These are used when allocating chunks, in an attempt to re-use - * address space. Depending on function, different tree orderings are needed, - * which is why there are two trees with the same contents. - */ -static extent_tree_t chunks_szad_mmap; -static extent_tree_t chunks_ad_mmap; -static extent_tree_t chunks_szad_dss; -static extent_tree_t chunks_ad_dss; - -rtree_t *chunks_rtree; - -/* Various chunk-related settings. */ -size_t chunksize; -size_t chunksize_mask; /* (chunksize - 1). */ -size_t chunk_npages; -size_t map_bias; -size_t arena_maxclass; /* Max size class for arenas. */ - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static void *chunk_recycle(extent_tree_t *chunks_szad, - extent_tree_t *chunks_ad, size_t size, size_t alignment, bool base, - bool *zero); -static void chunk_record(extent_tree_t *chunks_szad, - extent_tree_t *chunks_ad, void *chunk, size_t size); - -/******************************************************************************/ - -static void * -chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size, - size_t alignment, bool base, bool *zero) -{ - void *ret; - extent_node_t *node; - extent_node_t key; - size_t alloc_size, leadsize, trailsize; - bool zeroed; - - if (base) { - /* - * This function may need to call base_node_{,de}alloc(), but - * the current chunk allocation request is on behalf of the - * base allocator. Avoid deadlock (and if that weren't an - * issue, potential for infinite recursion) by returning NULL. - */ - return (NULL); - } - - alloc_size = size + alignment - chunksize; - /* Beware size_t wrap-around. */ - if (alloc_size < size) - return (NULL); - key.addr = NULL; - key.size = alloc_size; - malloc_mutex_lock(&chunks_mtx); - node = extent_tree_szad_nsearch(chunks_szad, &key); - if (node == NULL) { - malloc_mutex_unlock(&chunks_mtx); - return (NULL); - } - leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) - - (uintptr_t)node->addr; - assert(node->size >= leadsize + size); - trailsize = node->size - leadsize - size; - ret = (void *)((uintptr_t)node->addr + leadsize); - zeroed = node->zeroed; - if (zeroed) - *zero = true; - /* Remove node from the tree. */ - extent_tree_szad_remove(chunks_szad, node); - extent_tree_ad_remove(chunks_ad, node); - if (leadsize != 0) { - /* Insert the leading space as a smaller chunk. */ - node->size = leadsize; - extent_tree_szad_insert(chunks_szad, node); - extent_tree_ad_insert(chunks_ad, node); - node = NULL; - } - if (trailsize != 0) { - /* Insert the trailing space as a smaller chunk. */ - if (node == NULL) { - /* - * An additional node is required, but - * base_node_alloc() can cause a new base chunk to be - * allocated. Drop chunks_mtx in order to avoid - * deadlock, and if node allocation fails, deallocate - * the result before returning an error. - */ - malloc_mutex_unlock(&chunks_mtx); - node = base_node_alloc(); - if (node == NULL) { - chunk_dealloc(ret, size, true); - return (NULL); - } - malloc_mutex_lock(&chunks_mtx); - } - node->addr = (void *)((uintptr_t)(ret) + size); - node->size = trailsize; - node->zeroed = zeroed; - extent_tree_szad_insert(chunks_szad, node); - extent_tree_ad_insert(chunks_ad, node); - node = NULL; - } - malloc_mutex_unlock(&chunks_mtx); - - if (node != NULL) - base_node_dealloc(node); - if (*zero) { - if (zeroed == false) - memset(ret, 0, size); - else if (config_debug) { - size_t i; - size_t *p = (size_t *)(uintptr_t)ret; - - VALGRIND_MAKE_MEM_DEFINED(ret, size); - for (i = 0; i < size / sizeof(size_t); i++) - assert(p[i] == 0); - } - } - return (ret); -} - -/* - * If the caller specifies (*zero == false), it is still possible to receive - * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc() - * takes advantage of this to avoid demanding zeroed chunks, but taking - * advantage of them if they are returned. - */ -void * -chunk_alloc(size_t size, size_t alignment, bool base, bool *zero, - dss_prec_t dss_prec) -{ - void *ret; - - assert(size != 0); - assert((size & chunksize_mask) == 0); - assert(alignment != 0); - assert((alignment & chunksize_mask) == 0); - - /* "primary" dss. */ - if (config_dss && dss_prec == dss_prec_primary) { - if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size, - alignment, base, zero)) != NULL) - goto label_return; - if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL) - goto label_return; - } - /* mmap. */ - if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size, - alignment, base, zero)) != NULL) - goto label_return; - if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL) - goto label_return; - /* "secondary" dss. */ - if (config_dss && dss_prec == dss_prec_secondary) { - if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size, - alignment, base, zero)) != NULL) - goto label_return; - if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL) - goto label_return; - } - - /* All strategies for allocation failed. */ - ret = NULL; -label_return: - if (ret != NULL) { - if (config_ivsalloc && base == false) { - if (rtree_set(chunks_rtree, (uintptr_t)ret, 1)) { - chunk_dealloc(ret, size, true); - return (NULL); - } - } - if (config_stats || config_prof) { - bool gdump; - malloc_mutex_lock(&chunks_mtx); - if (config_stats) - stats_chunks.nchunks += (size / chunksize); - stats_chunks.curchunks += (size / chunksize); - if (stats_chunks.curchunks > stats_chunks.highchunks) { - stats_chunks.highchunks = - stats_chunks.curchunks; - if (config_prof) - gdump = true; - } else if (config_prof) - gdump = false; - malloc_mutex_unlock(&chunks_mtx); - if (config_prof && opt_prof && opt_prof_gdump && gdump) - prof_gdump(); - } - if (config_valgrind) - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - } - assert(CHUNK_ADDR2BASE(ret) == ret); - return (ret); -} - -static void -chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk, - size_t size) -{ - bool unzeroed; - extent_node_t *xnode, *node, *prev, *xprev, key; - - unzeroed = pages_purge(chunk, size); - VALGRIND_MAKE_MEM_NOACCESS(chunk, size); - - /* - * Allocate a node before acquiring chunks_mtx even though it might not - * be needed, because base_node_alloc() may cause a new base chunk to - * be allocated, which could cause deadlock if chunks_mtx were already - * held. - */ - xnode = base_node_alloc(); - /* Use xprev to implement conditional deferred deallocation of prev. */ - xprev = NULL; - - malloc_mutex_lock(&chunks_mtx); - key.addr = (void *)((uintptr_t)chunk + size); - node = extent_tree_ad_nsearch(chunks_ad, &key); - /* Try to coalesce forward. */ - if (node != NULL && node->addr == key.addr) { - /* - * Coalesce chunk with the following address range. This does - * not change the position within chunks_ad, so only - * remove/insert from/into chunks_szad. - */ - extent_tree_szad_remove(chunks_szad, node); - node->addr = chunk; - node->size += size; - node->zeroed = (node->zeroed && (unzeroed == false)); - extent_tree_szad_insert(chunks_szad, node); - } else { - /* Coalescing forward failed, so insert a new node. */ - if (xnode == NULL) { - /* - * base_node_alloc() failed, which is an exceedingly - * unlikely failure. Leak chunk; its pages have - * already been purged, so this is only a virtual - * memory leak. - */ - goto label_return; - } - node = xnode; - xnode = NULL; /* Prevent deallocation below. */ - node->addr = chunk; - node->size = size; - node->zeroed = (unzeroed == false); - extent_tree_ad_insert(chunks_ad, node); - extent_tree_szad_insert(chunks_szad, node); - } - - /* Try to coalesce backward. */ - prev = extent_tree_ad_prev(chunks_ad, node); - if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) == - chunk) { - /* - * Coalesce chunk with the previous address range. This does - * not change the position within chunks_ad, so only - * remove/insert node from/into chunks_szad. - */ - extent_tree_szad_remove(chunks_szad, prev); - extent_tree_ad_remove(chunks_ad, prev); - - extent_tree_szad_remove(chunks_szad, node); - node->addr = prev->addr; - node->size += prev->size; - node->zeroed = (node->zeroed && prev->zeroed); - extent_tree_szad_insert(chunks_szad, node); - - xprev = prev; - } - -label_return: - malloc_mutex_unlock(&chunks_mtx); - /* - * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to - * avoid potential deadlock. - */ - if (xnode != NULL) - base_node_dealloc(xnode); - if (xprev != NULL) - base_node_dealloc(xprev); -} - -void -chunk_unmap(void *chunk, size_t size) -{ - assert(chunk != NULL); - assert(CHUNK_ADDR2BASE(chunk) == chunk); - assert(size != 0); - assert((size & chunksize_mask) == 0); - - if (config_dss && chunk_in_dss(chunk)) - chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size); - else if (chunk_dealloc_mmap(chunk, size)) - chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size); -} - -void -chunk_dealloc(void *chunk, size_t size, bool unmap) -{ - - assert(chunk != NULL); - assert(CHUNK_ADDR2BASE(chunk) == chunk); - assert(size != 0); - assert((size & chunksize_mask) == 0); - - if (config_ivsalloc) - rtree_set(chunks_rtree, (uintptr_t)chunk, 0); - if (config_stats || config_prof) { - malloc_mutex_lock(&chunks_mtx); - assert(stats_chunks.curchunks >= (size / chunksize)); - stats_chunks.curchunks -= (size / chunksize); - malloc_mutex_unlock(&chunks_mtx); - } - - if (unmap) - chunk_unmap(chunk, size); -} - -bool -chunk_boot(void) -{ - - /* Set variables according to the value of opt_lg_chunk. */ - chunksize = (ZU(1) << opt_lg_chunk); - assert(chunksize >= PAGE); - chunksize_mask = chunksize - 1; - chunk_npages = (chunksize >> LG_PAGE); - - if (config_stats || config_prof) { - if (malloc_mutex_init(&chunks_mtx)) - return (true); - memset(&stats_chunks, 0, sizeof(chunk_stats_t)); - } - if (config_dss && chunk_dss_boot()) - return (true); - extent_tree_szad_new(&chunks_szad_mmap); - extent_tree_ad_new(&chunks_ad_mmap); - extent_tree_szad_new(&chunks_szad_dss); - extent_tree_ad_new(&chunks_ad_dss); - if (config_ivsalloc) { - chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - - opt_lg_chunk, base_alloc, NULL); - if (chunks_rtree == NULL) - return (true); - } - - return (false); -} - -void -chunk_prefork(void) -{ - - malloc_mutex_prefork(&chunks_mtx); - if (config_ivsalloc) - rtree_prefork(chunks_rtree); - chunk_dss_prefork(); -} - -void -chunk_postfork_parent(void) -{ - - chunk_dss_postfork_parent(); - if (config_ivsalloc) - rtree_postfork_parent(chunks_rtree); - malloc_mutex_postfork_parent(&chunks_mtx); -} - -void -chunk_postfork_child(void) -{ - - chunk_dss_postfork_child(); - if (config_ivsalloc) - rtree_postfork_child(chunks_rtree); - malloc_mutex_postfork_child(&chunks_mtx); -} diff --git a/dep/jemalloc/src/chunk_dss.c b/dep/jemalloc/src/chunk_dss.c deleted file mode 100644 index 510bb8bee85..00000000000 --- a/dep/jemalloc/src/chunk_dss.c +++ /dev/null @@ -1,198 +0,0 @@ -#define JEMALLOC_CHUNK_DSS_C_ -#include "jemalloc/internal/jemalloc_internal.h" -/******************************************************************************/ -/* Data. */ - -const char *dss_prec_names[] = { - "disabled", - "primary", - "secondary", - "N/A" -}; - -/* Current dss precedence default, used when creating new arenas. */ -static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT; - -/* - * Protects sbrk() calls. This avoids malloc races among threads, though it - * does not protect against races with threads that call sbrk() directly. - */ -static malloc_mutex_t dss_mtx; - -/* Base address of the DSS. */ -static void *dss_base; -/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */ -static void *dss_prev; -/* Current upper limit on DSS addresses. */ -static void *dss_max; - -/******************************************************************************/ - -static void * -chunk_dss_sbrk(intptr_t increment) -{ - -#ifdef JEMALLOC_HAVE_SBRK - return (sbrk(increment)); -#else - not_implemented(); - return (NULL); -#endif -} - -dss_prec_t -chunk_dss_prec_get(void) -{ - dss_prec_t ret; - - if (config_dss == false) - return (dss_prec_disabled); - malloc_mutex_lock(&dss_mtx); - ret = dss_prec_default; - malloc_mutex_unlock(&dss_mtx); - return (ret); -} - -bool -chunk_dss_prec_set(dss_prec_t dss_prec) -{ - - if (config_dss == false) - return (true); - malloc_mutex_lock(&dss_mtx); - dss_prec_default = dss_prec; - malloc_mutex_unlock(&dss_mtx); - return (false); -} - -void * -chunk_alloc_dss(size_t size, size_t alignment, bool *zero) -{ - void *ret; - - cassert(config_dss); - assert(size > 0 && (size & chunksize_mask) == 0); - assert(alignment > 0 && (alignment & chunksize_mask) == 0); - - /* - * sbrk() uses a signed increment argument, so take care not to - * interpret a huge allocation request as a negative increment. - */ - if ((intptr_t)size < 0) - return (NULL); - - malloc_mutex_lock(&dss_mtx); - if (dss_prev != (void *)-1) { - size_t gap_size, cpad_size; - void *cpad, *dss_next; - intptr_t incr; - - /* - * The loop is necessary to recover from races with other - * threads that are using the DSS for something other than - * malloc. - */ - do { - /* Get the current end of the DSS. */ - dss_max = chunk_dss_sbrk(0); - /* - * Calculate how much padding is necessary to - * chunk-align the end of the DSS. - */ - gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) & - chunksize_mask; - /* - * Compute how much chunk-aligned pad space (if any) is - * necessary to satisfy alignment. This space can be - * recycled for later use. - */ - cpad = (void *)((uintptr_t)dss_max + gap_size); - ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max, - alignment); - cpad_size = (uintptr_t)ret - (uintptr_t)cpad; - dss_next = (void *)((uintptr_t)ret + size); - if ((uintptr_t)ret < (uintptr_t)dss_max || - (uintptr_t)dss_next < (uintptr_t)dss_max) { - /* Wrap-around. */ - malloc_mutex_unlock(&dss_mtx); - return (NULL); - } - incr = gap_size + cpad_size + size; - dss_prev = chunk_dss_sbrk(incr); - if (dss_prev == dss_max) { - /* Success. */ - dss_max = dss_next; - malloc_mutex_unlock(&dss_mtx); - if (cpad_size != 0) - chunk_unmap(cpad, cpad_size); - if (*zero) { - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - memset(ret, 0, size); - } - return (ret); - } - } while (dss_prev != (void *)-1); - } - malloc_mutex_unlock(&dss_mtx); - - return (NULL); -} - -bool -chunk_in_dss(void *chunk) -{ - bool ret; - - cassert(config_dss); - - malloc_mutex_lock(&dss_mtx); - if ((uintptr_t)chunk >= (uintptr_t)dss_base - && (uintptr_t)chunk < (uintptr_t)dss_max) - ret = true; - else - ret = false; - malloc_mutex_unlock(&dss_mtx); - - return (ret); -} - -bool -chunk_dss_boot(void) -{ - - cassert(config_dss); - - if (malloc_mutex_init(&dss_mtx)) - return (true); - dss_base = chunk_dss_sbrk(0); - dss_prev = dss_base; - dss_max = dss_base; - - return (false); -} - -void -chunk_dss_prefork(void) -{ - - if (config_dss) - malloc_mutex_prefork(&dss_mtx); -} - -void -chunk_dss_postfork_parent(void) -{ - - if (config_dss) - malloc_mutex_postfork_parent(&dss_mtx); -} - -void -chunk_dss_postfork_child(void) -{ - - if (config_dss) - malloc_mutex_postfork_child(&dss_mtx); -} - -/******************************************************************************/ diff --git a/dep/jemalloc/src/chunk_mmap.c b/dep/jemalloc/src/chunk_mmap.c deleted file mode 100644 index 2056d793f05..00000000000 --- a/dep/jemalloc/src/chunk_mmap.c +++ /dev/null @@ -1,210 +0,0 @@ -#define JEMALLOC_CHUNK_MMAP_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static void *pages_map(void *addr, size_t size); -static void pages_unmap(void *addr, size_t size); -static void *chunk_alloc_mmap_slow(size_t size, size_t alignment, - bool *zero); - -/******************************************************************************/ - -static void * -pages_map(void *addr, size_t size) -{ - void *ret; - - assert(size != 0); - -#ifdef _WIN32 - /* - * If VirtualAlloc can't allocate at the given address when one is - * given, it fails and returns NULL. - */ - ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, - PAGE_READWRITE); -#else - /* - * We don't use MAP_FIXED here, because it can cause the *replacement* - * of existing mappings, and we only want to create new mappings. - */ - ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, - -1, 0); - assert(ret != NULL); - - if (ret == MAP_FAILED) - ret = NULL; - else if (addr != NULL && ret != addr) { - /* - * We succeeded in mapping memory, but not in the right place. - */ - if (munmap(ret, size) == -1) { - char buf[BUFERROR_BUF]; - - buferror(get_errno(), buf, sizeof(buf)); - malloc_printf("<jemalloc: Error in munmap(): %s\n", - buf); - if (opt_abort) - abort(); - } - ret = NULL; - } -#endif - assert(ret == NULL || (addr == NULL && ret != addr) - || (addr != NULL && ret == addr)); - return (ret); -} - -static void -pages_unmap(void *addr, size_t size) -{ - -#ifdef _WIN32 - if (VirtualFree(addr, 0, MEM_RELEASE) == 0) -#else - if (munmap(addr, size) == -1) -#endif - { - char buf[BUFERROR_BUF]; - - buferror(get_errno(), buf, sizeof(buf)); - malloc_printf("<jemalloc>: Error in " -#ifdef _WIN32 - "VirtualFree" -#else - "munmap" -#endif - "(): %s\n", buf); - if (opt_abort) - abort(); - } -} - -static void * -pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size) -{ - void *ret = (void *)((uintptr_t)addr + leadsize); - - assert(alloc_size >= leadsize + size); -#ifdef _WIN32 - { - void *new_addr; - - pages_unmap(addr, alloc_size); - new_addr = pages_map(ret, size); - if (new_addr == ret) - return (ret); - if (new_addr) - pages_unmap(new_addr, size); - return (NULL); - } -#else - { - size_t trailsize = alloc_size - leadsize - size; - - if (leadsize != 0) - pages_unmap(addr, leadsize); - if (trailsize != 0) - pages_unmap((void *)((uintptr_t)ret + size), trailsize); - return (ret); - } -#endif -} - -bool -pages_purge(void *addr, size_t length) -{ - bool unzeroed; - -#ifdef _WIN32 - VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE); - unzeroed = true; -#else -# ifdef JEMALLOC_PURGE_MADVISE_DONTNEED -# define JEMALLOC_MADV_PURGE MADV_DONTNEED -# define JEMALLOC_MADV_ZEROS true -# elif defined(JEMALLOC_PURGE_MADVISE_FREE) -# define JEMALLOC_MADV_PURGE MADV_FREE -# define JEMALLOC_MADV_ZEROS false -# else -# error "No method defined for purging unused dirty pages." -# endif - int err = madvise(addr, length, JEMALLOC_MADV_PURGE); - unzeroed = (JEMALLOC_MADV_ZEROS == false || err != 0); -# undef JEMALLOC_MADV_PURGE -# undef JEMALLOC_MADV_ZEROS -#endif - return (unzeroed); -} - -static void * -chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero) -{ - void *ret, *pages; - size_t alloc_size, leadsize; - - alloc_size = size + alignment - PAGE; - /* Beware size_t wrap-around. */ - if (alloc_size < size) - return (NULL); - do { - pages = pages_map(NULL, alloc_size); - if (pages == NULL) - return (NULL); - leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - - (uintptr_t)pages; - ret = pages_trim(pages, alloc_size, leadsize, size); - } while (ret == NULL); - - assert(ret != NULL); - *zero = true; - return (ret); -} - -void * -chunk_alloc_mmap(size_t size, size_t alignment, bool *zero) -{ - void *ret; - size_t offset; - - /* - * Ideally, there would be a way to specify alignment to mmap() (like - * NetBSD has), but in the absence of such a feature, we have to work - * hard to efficiently create aligned mappings. The reliable, but - * slow method is to create a mapping that is over-sized, then trim the - * excess. However, that always results in one or two calls to - * pages_unmap(). - * - * Optimistically try mapping precisely the right amount before falling - * back to the slow method, with the expectation that the optimistic - * approach works most of the time. - */ - - assert(alignment != 0); - assert((alignment & chunksize_mask) == 0); - - ret = pages_map(NULL, size); - if (ret == NULL) - return (NULL); - offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); - if (offset != 0) { - pages_unmap(ret, size); - return (chunk_alloc_mmap_slow(size, alignment, zero)); - } - - assert(ret != NULL); - *zero = true; - return (ret); -} - -bool -chunk_dealloc_mmap(void *chunk, size_t size) -{ - - if (config_munmap) - pages_unmap(chunk, size); - - return (config_munmap == false); -} diff --git a/dep/jemalloc/src/ckh.c b/dep/jemalloc/src/ckh.c index 04c52966193..e95e0a3ed59 100644 --- a/dep/jemalloc/src/ckh.c +++ b/dep/jemalloc/src/ckh.c @@ -34,14 +34,24 @@ * respectively. * ******************************************************************************/ -#define JEMALLOC_CKH_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_CKH_C_ +#include "jemalloc/internal/jemalloc_preamble.h" + +#include "jemalloc/internal/ckh.h" + +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/prng.h" +#include "jemalloc/internal/util.h" /******************************************************************************/ /* Function prototypes for non-inline static functions. */ -static bool ckh_grow(ckh_t *ckh); -static void ckh_shrink(ckh_t *ckh); +static bool ckh_grow(tsd_t *tsd, ckh_t *ckh); +static void ckh_shrink(tsd_t *tsd, ckh_t *ckh); /******************************************************************************/ @@ -49,27 +59,26 @@ static void ckh_shrink(ckh_t *ckh); * Search bucket for key and return the cell number if found; SIZE_T_MAX * otherwise. */ -JEMALLOC_INLINE_C size_t -ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) -{ +static size_t +ckh_bucket_search(ckh_t *ckh, size_t bucket, const void *key) { ckhc_t *cell; unsigned i; for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; - if (cell->key != NULL && ckh->keycomp(key, cell->key)) - return ((bucket << LG_CKH_BUCKET_CELLS) + i); + if (cell->key != NULL && ckh->keycomp(key, cell->key)) { + return (bucket << LG_CKH_BUCKET_CELLS) + i; + } } - return (SIZE_T_MAX); + return SIZE_T_MAX; } /* * Search table for key and return cell number if found; SIZE_T_MAX otherwise. */ -JEMALLOC_INLINE_C size_t -ckh_isearch(ckh_t *ckh, const void *key) -{ +static size_t +ckh_isearch(ckh_t *ckh, const void *key) { size_t hashes[2], bucket, cell; assert(ckh != NULL); @@ -79,19 +88,19 @@ ckh_isearch(ckh_t *ckh, const void *key) /* Search primary bucket. */ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); cell = ckh_bucket_search(ckh, bucket, key); - if (cell != SIZE_T_MAX) - return (cell); + if (cell != SIZE_T_MAX) { + return cell; + } /* Search secondary bucket. */ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); cell = ckh_bucket_search(ckh, bucket, key); - return (cell); + return cell; } -JEMALLOC_INLINE_C bool +static bool ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, - const void *data) -{ + const void *data) { ckhc_t *cell; unsigned offset, i; @@ -99,7 +108,8 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, * Cycle through the cells in the bucket, starting at a random position. * The randomness avoids worst-case search overhead as buckets fill up. */ - prng32(offset, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); + offset = (unsigned)prng_lg_range_u64(&ckh->prng_state, + LG_CKH_BUCKET_CELLS); for (i = 0; i < (ZU(1) << LG_CKH_BUCKET_CELLS); i++) { cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + ((i + offset) & ((ZU(1) << LG_CKH_BUCKET_CELLS) - 1))]; @@ -107,11 +117,11 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, cell->key = key; cell->data = data; ckh->count++; - return (false); + return false; } } - return (true); + return true; } /* @@ -120,10 +130,9 @@ ckh_try_bucket_insert(ckh_t *ckh, size_t bucket, const void *key, * eviction/relocation procedure until either success or detection of an * eviction/relocation bucket cycle. */ -JEMALLOC_INLINE_C bool +static bool ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, - void const **argdata) -{ + void const **argdata) { const void *key, *data, *tkey, *tdata; ckhc_t *cell; size_t hashes[2], bucket, tbucket; @@ -141,7 +150,8 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, * were an item for which both hashes indicated the same * bucket. */ - prng32(i, LG_CKH_BUCKET_CELLS, ckh->prng_state, CKH_A, CKH_C); + i = (unsigned)prng_lg_range_u64(&ckh->prng_state, + LG_CKH_BUCKET_CELLS); cell = &ckh->tab[(bucket << LG_CKH_BUCKET_CELLS) + i]; assert(cell->key != NULL); @@ -181,18 +191,18 @@ ckh_evict_reloc_insert(ckh_t *ckh, size_t argbucket, void const **argkey, if (tbucket == argbucket) { *argkey = key; *argdata = data; - return (true); + return true; } bucket = tbucket; - if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) - return (false); + if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { + return false; + } } } -JEMALLOC_INLINE_C bool -ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) -{ +static bool +ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) { size_t hashes[2], bucket; const void *key = *argkey; const void *data = *argdata; @@ -201,27 +211,28 @@ ckh_try_insert(ckh_t *ckh, void const**argkey, void const**argdata) /* Try to insert in primary bucket. */ bucket = hashes[0] & ((ZU(1) << ckh->lg_curbuckets) - 1); - if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) - return (false); + if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { + return false; + } /* Try to insert in secondary bucket. */ bucket = hashes[1] & ((ZU(1) << ckh->lg_curbuckets) - 1); - if (ckh_try_bucket_insert(ckh, bucket, key, data) == false) - return (false); + if (!ckh_try_bucket_insert(ckh, bucket, key, data)) { + return false; + } /* * Try to find a place for this item via iterative eviction/relocation. */ - return (ckh_evict_reloc_insert(ckh, bucket, argkey, argdata)); + return ckh_evict_reloc_insert(ckh, bucket, argkey, argdata); } /* * Try to rebuild the hash table from scratch by inserting all items from the * old table into the new. */ -JEMALLOC_INLINE_C bool -ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) -{ +static bool +ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) { size_t count, i, nins; const void *key, *data; @@ -233,22 +244,20 @@ ckh_rebuild(ckh_t *ckh, ckhc_t *aTab) data = aTab[i].data; if (ckh_try_insert(ckh, &key, &data)) { ckh->count = count; - return (true); + return true; } nins++; } } - return (false); + return false; } static bool -ckh_grow(ckh_t *ckh) -{ +ckh_grow(tsd_t *tsd, ckh_t *ckh) { bool ret; ckhc_t *tab, *ttab; - size_t lg_curcells; - unsigned lg_prevbuckets; + unsigned lg_prevbuckets, lg_curcells; #ifdef CKH_COUNT ckh->ngrows++; @@ -265,12 +274,13 @@ ckh_grow(ckh_t *ckh) size_t usize; lg_curcells++; - usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); - if (usize == 0) { + usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { ret = true; goto label_return; } - tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); + tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, + true, NULL, true, arena_ichoose(tsd, NULL)); if (tab == NULL) { ret = true; goto label_return; @@ -281,28 +291,27 @@ ckh_grow(ckh_t *ckh) tab = ttab; ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; - if (ckh_rebuild(ckh, tab) == false) { - idalloc(tab); + if (!ckh_rebuild(ckh, tab)) { + idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true); break; } /* Rebuilding failed, so back out partially rebuilt table. */ - idalloc(ckh->tab); + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); ckh->tab = tab; ckh->lg_curbuckets = lg_prevbuckets; } ret = false; label_return: - return (ret); + return ret; } static void -ckh_shrink(ckh_t *ckh) -{ +ckh_shrink(tsd_t *tsd, ckh_t *ckh) { ckhc_t *tab, *ttab; - size_t lg_curcells, usize; - unsigned lg_prevbuckets; + size_t usize; + unsigned lg_prevbuckets, lg_curcells; /* * It is possible (though unlikely, given well behaved hashes) that the @@ -310,10 +319,12 @@ ckh_shrink(ckh_t *ckh) */ lg_prevbuckets = ckh->lg_curbuckets; lg_curcells = ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS - 1; - usize = sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); - if (usize == 0) + usize = sz_sa2u(sizeof(ckhc_t) << lg_curcells, CACHELINE); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { return; - tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); + } + tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, NULL, + true, arena_ichoose(tsd, NULL)); if (tab == NULL) { /* * An OOM error isn't worth propagating, since it doesn't @@ -327,8 +338,8 @@ ckh_shrink(ckh_t *ckh) tab = ttab; ckh->lg_curbuckets = lg_curcells - LG_CKH_BUCKET_CELLS; - if (ckh_rebuild(ckh, tab) == false) { - idalloc(tab); + if (!ckh_rebuild(ckh, tab)) { + idalloctm(tsd_tsdn(tsd), tab, NULL, NULL, true, true); #ifdef CKH_COUNT ckh->nshrinks++; #endif @@ -336,7 +347,7 @@ ckh_shrink(ckh_t *ckh) } /* Rebuilding failed, so back out partially rebuilt table. */ - idalloc(ckh->tab); + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); ckh->tab = tab; ckh->lg_curbuckets = lg_prevbuckets; #ifdef CKH_COUNT @@ -345,8 +356,8 @@ ckh_shrink(ckh_t *ckh) } bool -ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp) -{ +ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, + ckh_keycomp_t *keycomp) { bool ret; size_t mincells, usize; unsigned lg_mincells; @@ -366,29 +377,31 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp) ckh->count = 0; /* - * Find the minimum power of 2 that is large enough to fit aBaseCount + * Find the minimum power of 2 that is large enough to fit minitems * entries. We are using (2+,2) cuckoo hashing, which has an expected * maximum load factor of at least ~0.86, so 0.75 is a conservative load - * factor that will typically allow 2^aLgMinItems to fit without ever + * factor that will typically allow mincells items to fit without ever * growing the table. */ assert(LG_CKH_BUCKET_CELLS > 0); mincells = ((minitems + (3 - (minitems % 3))) / 3) << 2; for (lg_mincells = LG_CKH_BUCKET_CELLS; (ZU(1) << lg_mincells) < mincells; - lg_mincells++) - ; /* Do nothing. */ + lg_mincells++) { + /* Do nothing. */ + } ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS; ckh->hash = hash; ckh->keycomp = keycomp; - usize = sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); - if (usize == 0) { + usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { ret = true; goto label_return; } - ckh->tab = (ckhc_t *)ipalloc(usize, CACHELINE, true); + ckh->tab = (ckhc_t *)ipallocztm(tsd_tsdn(tsd), usize, CACHELINE, true, + NULL, true, arena_ichoose(tsd, NULL)); if (ckh->tab == NULL) { ret = true; goto label_return; @@ -396,20 +409,18 @@ ckh_new(ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp) ret = false; label_return: - return (ret); + return ret; } void -ckh_delete(ckh_t *ckh) -{ - +ckh_delete(tsd_t *tsd, ckh_t *ckh) { assert(ckh != NULL); #ifdef CKH_VERBOSE malloc_printf( - "%s(%p): ngrows: %"PRIu64", nshrinks: %"PRIu64"," - " nshrinkfails: %"PRIu64", ninserts: %"PRIu64"," - " nrelocs: %"PRIu64"\n", __func__, ckh, + "%s(%p): ngrows: %"FMTu64", nshrinks: %"FMTu64"," + " nshrinkfails: %"FMTu64", ninserts: %"FMTu64"," + " nrelocs: %"FMTu64"\n", __func__, ckh, (unsigned long long)ckh->ngrows, (unsigned long long)ckh->nshrinks, (unsigned long long)ckh->nshrinkfails, @@ -417,43 +428,42 @@ ckh_delete(ckh_t *ckh) (unsigned long long)ckh->nrelocs); #endif - idalloc(ckh->tab); - if (config_debug) - memset(ckh, 0x5a, sizeof(ckh_t)); + idalloctm(tsd_tsdn(tsd), ckh->tab, NULL, NULL, true, true); + if (config_debug) { + memset(ckh, JEMALLOC_FREE_JUNK, sizeof(ckh_t)); + } } size_t -ckh_count(ckh_t *ckh) -{ - +ckh_count(ckh_t *ckh) { assert(ckh != NULL); - return (ckh->count); + return ckh->count; } bool -ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) -{ +ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data) { size_t i, ncells; for (i = *tabind, ncells = (ZU(1) << (ckh->lg_curbuckets + LG_CKH_BUCKET_CELLS)); i < ncells; i++) { if (ckh->tab[i].key != NULL) { - if (key != NULL) + if (key != NULL) { *key = (void *)ckh->tab[i].key; - if (data != NULL) + } + if (data != NULL) { *data = (void *)ckh->tab[i].data; + } *tabind = i + 1; - return (false); + return false; } } - return (true); + return true; } bool -ckh_insert(ckh_t *ckh, const void *key, const void *data) -{ +ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data) { bool ret; assert(ckh != NULL); @@ -464,7 +474,7 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data) #endif while (ckh_try_insert(ckh, &key, &data)) { - if (ckh_grow(ckh)) { + if (ckh_grow(tsd, ckh)) { ret = true; goto label_return; } @@ -472,22 +482,24 @@ ckh_insert(ckh_t *ckh, const void *key, const void *data) ret = false; label_return: - return (ret); + return ret; } bool -ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data) -{ +ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, + void **data) { size_t cell; assert(ckh != NULL); cell = ckh_isearch(ckh, searchkey); if (cell != SIZE_T_MAX) { - if (key != NULL) + if (key != NULL) { *key = (void *)ckh->tab[cell].key; - if (data != NULL) + } + if (data != NULL) { *data = (void *)ckh->tab[cell].data; + } ckh->tab[cell].key = NULL; ckh->tab[cell].data = NULL; /* Not necessary. */ @@ -497,54 +509,50 @@ ckh_remove(ckh_t *ckh, const void *searchkey, void **key, void **data) + LG_CKH_BUCKET_CELLS - 2)) && ckh->lg_curbuckets > ckh->lg_minbuckets) { /* Ignore error due to OOM. */ - ckh_shrink(ckh); + ckh_shrink(tsd, ckh); } - return (false); + return false; } - return (true); + return true; } bool -ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) -{ +ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data) { size_t cell; assert(ckh != NULL); cell = ckh_isearch(ckh, searchkey); if (cell != SIZE_T_MAX) { - if (key != NULL) + if (key != NULL) { *key = (void *)ckh->tab[cell].key; - if (data != NULL) + } + if (data != NULL) { *data = (void *)ckh->tab[cell].data; - return (false); + } + return false; } - return (true); + return true; } void -ckh_string_hash(const void *key, size_t r_hash[2]) -{ - +ckh_string_hash(const void *key, size_t r_hash[2]) { hash(key, strlen((const char *)key), 0x94122f33U, r_hash); } bool -ckh_string_keycomp(const void *k1, const void *k2) -{ - - assert(k1 != NULL); - assert(k2 != NULL); +ckh_string_keycomp(const void *k1, const void *k2) { + assert(k1 != NULL); + assert(k2 != NULL); - return (strcmp((char *)k1, (char *)k2) ? false : true); + return !strcmp((char *)k1, (char *)k2); } void -ckh_pointer_hash(const void *key, size_t r_hash[2]) -{ +ckh_pointer_hash(const void *key, size_t r_hash[2]) { union { const void *v; size_t i; @@ -556,8 +564,6 @@ ckh_pointer_hash(const void *key, size_t r_hash[2]) } bool -ckh_pointer_keycomp(const void *k1, const void *k2) -{ - - return ((k1 == k2) ? true : false); +ckh_pointer_keycomp(const void *k1, const void *k2) { + return (k1 == k2); } diff --git a/dep/jemalloc/src/ctl.c b/dep/jemalloc/src/ctl.c index cc2c5aef570..36bc8fb5b75 100644 --- a/dep/jemalloc/src/ctl.c +++ b/dep/jemalloc/src/ctl.c @@ -1,146 +1,146 @@ -#define JEMALLOC_CTL_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_CTL_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/nstime.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/util.h" /******************************************************************************/ /* Data. */ /* * ctl_mtx protects the following: - * - ctl_stats.* - * - opt_prof_active + * - ctl_stats->* */ static malloc_mutex_t ctl_mtx; static bool ctl_initialized; -static uint64_t ctl_epoch; -static ctl_stats_t ctl_stats; +static ctl_stats_t *ctl_stats; +static ctl_arenas_t *ctl_arenas; /******************************************************************************/ /* Helpers for named and indexed nodes. */ -static inline const ctl_named_node_t * -ctl_named_node(const ctl_node_t *node) -{ - +static const ctl_named_node_t * +ctl_named_node(const ctl_node_t *node) { return ((node->named) ? (const ctl_named_node_t *)node : NULL); } -static inline const ctl_named_node_t * -ctl_named_children(const ctl_named_node_t *node, int index) -{ +static const ctl_named_node_t * +ctl_named_children(const ctl_named_node_t *node, size_t index) { const ctl_named_node_t *children = ctl_named_node(node->children); return (children ? &children[index] : NULL); } -static inline const ctl_indexed_node_t * -ctl_indexed_node(const ctl_node_t *node) -{ - - return ((node->named == false) ? (const ctl_indexed_node_t *)node : - NULL); +static const ctl_indexed_node_t * +ctl_indexed_node(const ctl_node_t *node) { + return (!node->named ? (const ctl_indexed_node_t *)node : NULL); } /******************************************************************************/ /* Function prototypes for non-inline static functions. */ -#define CTL_PROTO(n) \ -static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \ - size_t *oldlenp, void *newp, size_t newlen); - -#define INDEX_PROTO(n) \ -static const ctl_named_node_t *n##_index(const size_t *mib, \ - size_t miblen, size_t i); - -static bool ctl_arena_init(ctl_arena_stats_t *astats); -static void ctl_arena_clear(ctl_arena_stats_t *astats); -static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, - arena_t *arena); -static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, - ctl_arena_stats_t *astats); -static void ctl_arena_refresh(arena_t *arena, unsigned i); -static bool ctl_grow(void); -static void ctl_refresh(void); -static bool ctl_init(void); -static int ctl_lookup(const char *name, ctl_node_t const **nodesp, - size_t *mibp, size_t *depthp); +#define CTL_PROTO(n) \ +static int n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \ + void *oldp, size_t *oldlenp, void *newp, size_t newlen); + +#define INDEX_PROTO(n) \ +static const ctl_named_node_t *n##_index(tsdn_t *tsdn, \ + const size_t *mib, size_t miblen, size_t i); CTL_PROTO(version) CTL_PROTO(epoch) +CTL_PROTO(background_thread) CTL_PROTO(thread_tcache_enabled) CTL_PROTO(thread_tcache_flush) +CTL_PROTO(thread_prof_name) +CTL_PROTO(thread_prof_active) CTL_PROTO(thread_arena) CTL_PROTO(thread_allocated) CTL_PROTO(thread_allocatedp) CTL_PROTO(thread_deallocated) CTL_PROTO(thread_deallocatedp) +CTL_PROTO(config_cache_oblivious) CTL_PROTO(config_debug) -CTL_PROTO(config_dss) CTL_PROTO(config_fill) CTL_PROTO(config_lazy_lock) -CTL_PROTO(config_mremap) -CTL_PROTO(config_munmap) +CTL_PROTO(config_malloc_conf) CTL_PROTO(config_prof) CTL_PROTO(config_prof_libgcc) CTL_PROTO(config_prof_libunwind) CTL_PROTO(config_stats) -CTL_PROTO(config_tcache) -CTL_PROTO(config_tls) +CTL_PROTO(config_thp) CTL_PROTO(config_utrace) -CTL_PROTO(config_valgrind) CTL_PROTO(config_xmalloc) CTL_PROTO(opt_abort) +CTL_PROTO(opt_abort_conf) +CTL_PROTO(opt_retain) CTL_PROTO(opt_dss) -CTL_PROTO(opt_lg_chunk) CTL_PROTO(opt_narenas) -CTL_PROTO(opt_lg_dirty_mult) +CTL_PROTO(opt_percpu_arena) +CTL_PROTO(opt_background_thread) +CTL_PROTO(opt_dirty_decay_ms) +CTL_PROTO(opt_muzzy_decay_ms) CTL_PROTO(opt_stats_print) +CTL_PROTO(opt_stats_print_opts) CTL_PROTO(opt_junk) CTL_PROTO(opt_zero) -CTL_PROTO(opt_quarantine) -CTL_PROTO(opt_redzone) CTL_PROTO(opt_utrace) -CTL_PROTO(opt_valgrind) CTL_PROTO(opt_xmalloc) CTL_PROTO(opt_tcache) CTL_PROTO(opt_lg_tcache_max) CTL_PROTO(opt_prof) CTL_PROTO(opt_prof_prefix) CTL_PROTO(opt_prof_active) +CTL_PROTO(opt_prof_thread_active_init) CTL_PROTO(opt_lg_prof_sample) CTL_PROTO(opt_lg_prof_interval) CTL_PROTO(opt_prof_gdump) CTL_PROTO(opt_prof_final) CTL_PROTO(opt_prof_leak) CTL_PROTO(opt_prof_accum) +CTL_PROTO(tcache_create) +CTL_PROTO(tcache_flush) +CTL_PROTO(tcache_destroy) +CTL_PROTO(arena_i_initialized) +CTL_PROTO(arena_i_decay) CTL_PROTO(arena_i_purge) -static void arena_purge(unsigned arena_ind); +CTL_PROTO(arena_i_reset) +CTL_PROTO(arena_i_destroy) CTL_PROTO(arena_i_dss) +CTL_PROTO(arena_i_dirty_decay_ms) +CTL_PROTO(arena_i_muzzy_decay_ms) +CTL_PROTO(arena_i_extent_hooks) INDEX_PROTO(arena_i) CTL_PROTO(arenas_bin_i_size) CTL_PROTO(arenas_bin_i_nregs) -CTL_PROTO(arenas_bin_i_run_size) +CTL_PROTO(arenas_bin_i_slab_size) INDEX_PROTO(arenas_bin_i) -CTL_PROTO(arenas_lrun_i_size) -INDEX_PROTO(arenas_lrun_i) +CTL_PROTO(arenas_lextent_i_size) +INDEX_PROTO(arenas_lextent_i) CTL_PROTO(arenas_narenas) -CTL_PROTO(arenas_initialized) +CTL_PROTO(arenas_dirty_decay_ms) +CTL_PROTO(arenas_muzzy_decay_ms) CTL_PROTO(arenas_quantum) CTL_PROTO(arenas_page) CTL_PROTO(arenas_tcache_max) CTL_PROTO(arenas_nbins) CTL_PROTO(arenas_nhbins) -CTL_PROTO(arenas_nlruns) -CTL_PROTO(arenas_purge) -CTL_PROTO(arenas_extend) +CTL_PROTO(arenas_nlextents) +CTL_PROTO(arenas_create) +CTL_PROTO(prof_thread_active_init) CTL_PROTO(prof_active) CTL_PROTO(prof_dump) +CTL_PROTO(prof_gdump) +CTL_PROTO(prof_reset) CTL_PROTO(prof_interval) -CTL_PROTO(stats_chunks_current) -CTL_PROTO(stats_chunks_total) -CTL_PROTO(stats_chunks_high) -CTL_PROTO(stats_huge_allocated) -CTL_PROTO(stats_huge_nmalloc) -CTL_PROTO(stats_huge_ndalloc) +CTL_PROTO(lg_prof_sample) CTL_PROTO(stats_arenas_i_small_allocated) CTL_PROTO(stats_arenas_i_small_nmalloc) CTL_PROTO(stats_arenas_i_small_ndalloc) @@ -149,119 +149,177 @@ CTL_PROTO(stats_arenas_i_large_allocated) CTL_PROTO(stats_arenas_i_large_nmalloc) CTL_PROTO(stats_arenas_i_large_ndalloc) CTL_PROTO(stats_arenas_i_large_nrequests) -CTL_PROTO(stats_arenas_i_bins_j_allocated) CTL_PROTO(stats_arenas_i_bins_j_nmalloc) CTL_PROTO(stats_arenas_i_bins_j_ndalloc) CTL_PROTO(stats_arenas_i_bins_j_nrequests) +CTL_PROTO(stats_arenas_i_bins_j_curregs) CTL_PROTO(stats_arenas_i_bins_j_nfills) CTL_PROTO(stats_arenas_i_bins_j_nflushes) -CTL_PROTO(stats_arenas_i_bins_j_nruns) -CTL_PROTO(stats_arenas_i_bins_j_nreruns) -CTL_PROTO(stats_arenas_i_bins_j_curruns) +CTL_PROTO(stats_arenas_i_bins_j_nslabs) +CTL_PROTO(stats_arenas_i_bins_j_nreslabs) +CTL_PROTO(stats_arenas_i_bins_j_curslabs) INDEX_PROTO(stats_arenas_i_bins_j) -CTL_PROTO(stats_arenas_i_lruns_j_nmalloc) -CTL_PROTO(stats_arenas_i_lruns_j_ndalloc) -CTL_PROTO(stats_arenas_i_lruns_j_nrequests) -CTL_PROTO(stats_arenas_i_lruns_j_curruns) -INDEX_PROTO(stats_arenas_i_lruns_j) +CTL_PROTO(stats_arenas_i_lextents_j_nmalloc) +CTL_PROTO(stats_arenas_i_lextents_j_ndalloc) +CTL_PROTO(stats_arenas_i_lextents_j_nrequests) +CTL_PROTO(stats_arenas_i_lextents_j_curlextents) +INDEX_PROTO(stats_arenas_i_lextents_j) CTL_PROTO(stats_arenas_i_nthreads) +CTL_PROTO(stats_arenas_i_uptime) CTL_PROTO(stats_arenas_i_dss) +CTL_PROTO(stats_arenas_i_dirty_decay_ms) +CTL_PROTO(stats_arenas_i_muzzy_decay_ms) CTL_PROTO(stats_arenas_i_pactive) CTL_PROTO(stats_arenas_i_pdirty) +CTL_PROTO(stats_arenas_i_pmuzzy) CTL_PROTO(stats_arenas_i_mapped) -CTL_PROTO(stats_arenas_i_npurge) -CTL_PROTO(stats_arenas_i_nmadvise) -CTL_PROTO(stats_arenas_i_purged) +CTL_PROTO(stats_arenas_i_retained) +CTL_PROTO(stats_arenas_i_dirty_npurge) +CTL_PROTO(stats_arenas_i_dirty_nmadvise) +CTL_PROTO(stats_arenas_i_dirty_purged) +CTL_PROTO(stats_arenas_i_muzzy_npurge) +CTL_PROTO(stats_arenas_i_muzzy_nmadvise) +CTL_PROTO(stats_arenas_i_muzzy_purged) +CTL_PROTO(stats_arenas_i_base) +CTL_PROTO(stats_arenas_i_internal) +CTL_PROTO(stats_arenas_i_tcache_bytes) +CTL_PROTO(stats_arenas_i_resident) INDEX_PROTO(stats_arenas_i) -CTL_PROTO(stats_cactive) CTL_PROTO(stats_allocated) CTL_PROTO(stats_active) +CTL_PROTO(stats_background_thread_num_threads) +CTL_PROTO(stats_background_thread_num_runs) +CTL_PROTO(stats_background_thread_run_interval) +CTL_PROTO(stats_metadata) +CTL_PROTO(stats_resident) CTL_PROTO(stats_mapped) +CTL_PROTO(stats_retained) + +#define MUTEX_STATS_CTL_PROTO_GEN(n) \ +CTL_PROTO(stats_##n##_num_ops) \ +CTL_PROTO(stats_##n##_num_wait) \ +CTL_PROTO(stats_##n##_num_spin_acq) \ +CTL_PROTO(stats_##n##_num_owner_switch) \ +CTL_PROTO(stats_##n##_total_wait_time) \ +CTL_PROTO(stats_##n##_max_wait_time) \ +CTL_PROTO(stats_##n##_max_num_thds) + +/* Global mutexes. */ +#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx) +MUTEX_PROF_GLOBAL_MUTEXES +#undef OP + +/* Per arena mutexes. */ +#define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx) +MUTEX_PROF_ARENA_MUTEXES +#undef OP + +/* Arena bin mutexes. */ +MUTEX_STATS_CTL_PROTO_GEN(arenas_i_bins_j_mutex) +#undef MUTEX_STATS_CTL_PROTO_GEN + +CTL_PROTO(stats_mutexes_reset) /******************************************************************************/ /* mallctl tree. */ -/* Maximum tree depth. */ -#define CTL_MAX_DEPTH 6 - -#define NAME(n) {true}, n -#define CHILD(t, c) \ +#define NAME(n) {true}, n +#define CHILD(t, c) \ sizeof(c##_node) / sizeof(ctl_##t##_node_t), \ (ctl_node_t *)c##_node, \ NULL -#define CTL(c) 0, NULL, c##_ctl +#define CTL(c) 0, NULL, c##_ctl /* * Only handles internal indexed nodes, since there are currently no external * ones. */ -#define INDEX(i) {false}, i##_index +#define INDEX(i) {false}, i##_index -static const ctl_named_node_t tcache_node[] = { +static const ctl_named_node_t thread_tcache_node[] = { {NAME("enabled"), CTL(thread_tcache_enabled)}, {NAME("flush"), CTL(thread_tcache_flush)} }; +static const ctl_named_node_t thread_prof_node[] = { + {NAME("name"), CTL(thread_prof_name)}, + {NAME("active"), CTL(thread_prof_active)} +}; + static const ctl_named_node_t thread_node[] = { {NAME("arena"), CTL(thread_arena)}, {NAME("allocated"), CTL(thread_allocated)}, {NAME("allocatedp"), CTL(thread_allocatedp)}, {NAME("deallocated"), CTL(thread_deallocated)}, {NAME("deallocatedp"), CTL(thread_deallocatedp)}, - {NAME("tcache"), CHILD(named, tcache)} + {NAME("tcache"), CHILD(named, thread_tcache)}, + {NAME("prof"), CHILD(named, thread_prof)} }; static const ctl_named_node_t config_node[] = { - {NAME("debug"), CTL(config_debug)}, - {NAME("dss"), CTL(config_dss)}, - {NAME("fill"), CTL(config_fill)}, - {NAME("lazy_lock"), CTL(config_lazy_lock)}, - {NAME("mremap"), CTL(config_mremap)}, - {NAME("munmap"), CTL(config_munmap)}, - {NAME("prof"), CTL(config_prof)}, - {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, - {NAME("prof_libunwind"), CTL(config_prof_libunwind)}, - {NAME("stats"), CTL(config_stats)}, - {NAME("tcache"), CTL(config_tcache)}, - {NAME("tls"), CTL(config_tls)}, - {NAME("utrace"), CTL(config_utrace)}, - {NAME("valgrind"), CTL(config_valgrind)}, - {NAME("xmalloc"), CTL(config_xmalloc)} + {NAME("cache_oblivious"), CTL(config_cache_oblivious)}, + {NAME("debug"), CTL(config_debug)}, + {NAME("fill"), CTL(config_fill)}, + {NAME("lazy_lock"), CTL(config_lazy_lock)}, + {NAME("malloc_conf"), CTL(config_malloc_conf)}, + {NAME("prof"), CTL(config_prof)}, + {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, + {NAME("prof_libunwind"), CTL(config_prof_libunwind)}, + {NAME("stats"), CTL(config_stats)}, + {NAME("thp"), CTL(config_thp)}, + {NAME("utrace"), CTL(config_utrace)}, + {NAME("xmalloc"), CTL(config_xmalloc)} }; static const ctl_named_node_t opt_node[] = { - {NAME("abort"), CTL(opt_abort)}, - {NAME("dss"), CTL(opt_dss)}, - {NAME("lg_chunk"), CTL(opt_lg_chunk)}, - {NAME("narenas"), CTL(opt_narenas)}, - {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)}, - {NAME("stats_print"), CTL(opt_stats_print)}, - {NAME("junk"), CTL(opt_junk)}, - {NAME("zero"), CTL(opt_zero)}, - {NAME("quarantine"), CTL(opt_quarantine)}, - {NAME("redzone"), CTL(opt_redzone)}, - {NAME("utrace"), CTL(opt_utrace)}, - {NAME("valgrind"), CTL(opt_valgrind)}, - {NAME("xmalloc"), CTL(opt_xmalloc)}, - {NAME("tcache"), CTL(opt_tcache)}, - {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)}, - {NAME("prof"), CTL(opt_prof)}, - {NAME("prof_prefix"), CTL(opt_prof_prefix)}, - {NAME("prof_active"), CTL(opt_prof_active)}, - {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)}, - {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)}, - {NAME("prof_gdump"), CTL(opt_prof_gdump)}, - {NAME("prof_final"), CTL(opt_prof_final)}, - {NAME("prof_leak"), CTL(opt_prof_leak)}, - {NAME("prof_accum"), CTL(opt_prof_accum)} + {NAME("abort"), CTL(opt_abort)}, + {NAME("abort_conf"), CTL(opt_abort_conf)}, + {NAME("retain"), CTL(opt_retain)}, + {NAME("dss"), CTL(opt_dss)}, + {NAME("narenas"), CTL(opt_narenas)}, + {NAME("percpu_arena"), CTL(opt_percpu_arena)}, + {NAME("background_thread"), CTL(opt_background_thread)}, + {NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)}, + {NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)}, + {NAME("stats_print"), CTL(opt_stats_print)}, + {NAME("stats_print_opts"), CTL(opt_stats_print_opts)}, + {NAME("junk"), CTL(opt_junk)}, + {NAME("zero"), CTL(opt_zero)}, + {NAME("utrace"), CTL(opt_utrace)}, + {NAME("xmalloc"), CTL(opt_xmalloc)}, + {NAME("tcache"), CTL(opt_tcache)}, + {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)}, + {NAME("prof"), CTL(opt_prof)}, + {NAME("prof_prefix"), CTL(opt_prof_prefix)}, + {NAME("prof_active"), CTL(opt_prof_active)}, + {NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)}, + {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)}, + {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)}, + {NAME("prof_gdump"), CTL(opt_prof_gdump)}, + {NAME("prof_final"), CTL(opt_prof_final)}, + {NAME("prof_leak"), CTL(opt_prof_leak)}, + {NAME("prof_accum"), CTL(opt_prof_accum)} +}; + +static const ctl_named_node_t tcache_node[] = { + {NAME("create"), CTL(tcache_create)}, + {NAME("flush"), CTL(tcache_flush)}, + {NAME("destroy"), CTL(tcache_destroy)} }; static const ctl_named_node_t arena_i_node[] = { - {NAME("purge"), CTL(arena_i_purge)}, - {NAME("dss"), CTL(arena_i_dss)} + {NAME("initialized"), CTL(arena_i_initialized)}, + {NAME("decay"), CTL(arena_i_decay)}, + {NAME("purge"), CTL(arena_i_purge)}, + {NAME("reset"), CTL(arena_i_reset)}, + {NAME("destroy"), CTL(arena_i_destroy)}, + {NAME("dss"), CTL(arena_i_dss)}, + {NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)}, + {NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)}, + {NAME("extent_hooks"), CTL(arena_i_extent_hooks)} }; static const ctl_named_node_t super_arena_i_node[] = { - {NAME(""), CHILD(named, arena_i)} + {NAME(""), CHILD(named, arena_i)} }; static const ctl_indexed_node_t arena_node[] = { @@ -269,147 +327,208 @@ static const ctl_indexed_node_t arena_node[] = { }; static const ctl_named_node_t arenas_bin_i_node[] = { - {NAME("size"), CTL(arenas_bin_i_size)}, - {NAME("nregs"), CTL(arenas_bin_i_nregs)}, - {NAME("run_size"), CTL(arenas_bin_i_run_size)} + {NAME("size"), CTL(arenas_bin_i_size)}, + {NAME("nregs"), CTL(arenas_bin_i_nregs)}, + {NAME("slab_size"), CTL(arenas_bin_i_slab_size)} }; static const ctl_named_node_t super_arenas_bin_i_node[] = { - {NAME(""), CHILD(named, arenas_bin_i)} + {NAME(""), CHILD(named, arenas_bin_i)} }; static const ctl_indexed_node_t arenas_bin_node[] = { {INDEX(arenas_bin_i)} }; -static const ctl_named_node_t arenas_lrun_i_node[] = { - {NAME("size"), CTL(arenas_lrun_i_size)} +static const ctl_named_node_t arenas_lextent_i_node[] = { + {NAME("size"), CTL(arenas_lextent_i_size)} }; -static const ctl_named_node_t super_arenas_lrun_i_node[] = { - {NAME(""), CHILD(named, arenas_lrun_i)} +static const ctl_named_node_t super_arenas_lextent_i_node[] = { + {NAME(""), CHILD(named, arenas_lextent_i)} }; -static const ctl_indexed_node_t arenas_lrun_node[] = { - {INDEX(arenas_lrun_i)} +static const ctl_indexed_node_t arenas_lextent_node[] = { + {INDEX(arenas_lextent_i)} }; static const ctl_named_node_t arenas_node[] = { - {NAME("narenas"), CTL(arenas_narenas)}, - {NAME("initialized"), CTL(arenas_initialized)}, - {NAME("quantum"), CTL(arenas_quantum)}, - {NAME("page"), CTL(arenas_page)}, - {NAME("tcache_max"), CTL(arenas_tcache_max)}, - {NAME("nbins"), CTL(arenas_nbins)}, - {NAME("nhbins"), CTL(arenas_nhbins)}, - {NAME("bin"), CHILD(indexed, arenas_bin)}, - {NAME("nlruns"), CTL(arenas_nlruns)}, - {NAME("lrun"), CHILD(indexed, arenas_lrun)}, - {NAME("purge"), CTL(arenas_purge)}, - {NAME("extend"), CTL(arenas_extend)} + {NAME("narenas"), CTL(arenas_narenas)}, + {NAME("dirty_decay_ms"), CTL(arenas_dirty_decay_ms)}, + {NAME("muzzy_decay_ms"), CTL(arenas_muzzy_decay_ms)}, + {NAME("quantum"), CTL(arenas_quantum)}, + {NAME("page"), CTL(arenas_page)}, + {NAME("tcache_max"), CTL(arenas_tcache_max)}, + {NAME("nbins"), CTL(arenas_nbins)}, + {NAME("nhbins"), CTL(arenas_nhbins)}, + {NAME("bin"), CHILD(indexed, arenas_bin)}, + {NAME("nlextents"), CTL(arenas_nlextents)}, + {NAME("lextent"), CHILD(indexed, arenas_lextent)}, + {NAME("create"), CTL(arenas_create)} }; static const ctl_named_node_t prof_node[] = { + {NAME("thread_active_init"), CTL(prof_thread_active_init)}, {NAME("active"), CTL(prof_active)}, {NAME("dump"), CTL(prof_dump)}, - {NAME("interval"), CTL(prof_interval)} + {NAME("gdump"), CTL(prof_gdump)}, + {NAME("reset"), CTL(prof_reset)}, + {NAME("interval"), CTL(prof_interval)}, + {NAME("lg_sample"), CTL(lg_prof_sample)} }; -static const ctl_named_node_t stats_chunks_node[] = { - {NAME("current"), CTL(stats_chunks_current)}, - {NAME("total"), CTL(stats_chunks_total)}, - {NAME("high"), CTL(stats_chunks_high)} +static const ctl_named_node_t stats_arenas_i_small_node[] = { + {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)} }; -static const ctl_named_node_t stats_huge_node[] = { - {NAME("allocated"), CTL(stats_huge_allocated)}, - {NAME("nmalloc"), CTL(stats_huge_nmalloc)}, - {NAME("ndalloc"), CTL(stats_huge_ndalloc)} +static const ctl_named_node_t stats_arenas_i_large_node[] = { + {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, + {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)} }; -static const ctl_named_node_t stats_arenas_i_small_node[] = { - {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, - {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)} +#define MUTEX_PROF_DATA_NODE(prefix) \ +static const ctl_named_node_t stats_##prefix##_node[] = { \ + {NAME("num_ops"), \ + CTL(stats_##prefix##_num_ops)}, \ + {NAME("num_wait"), \ + CTL(stats_##prefix##_num_wait)}, \ + {NAME("num_spin_acq"), \ + CTL(stats_##prefix##_num_spin_acq)}, \ + {NAME("num_owner_switch"), \ + CTL(stats_##prefix##_num_owner_switch)}, \ + {NAME("total_wait_time"), \ + CTL(stats_##prefix##_total_wait_time)}, \ + {NAME("max_wait_time"), \ + CTL(stats_##prefix##_max_wait_time)}, \ + {NAME("max_num_thds"), \ + CTL(stats_##prefix##_max_num_thds)} \ + /* Note that # of current waiting thread not provided. */ \ }; -static const ctl_named_node_t stats_arenas_i_large_node[] = { - {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, - {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)} -}; +MUTEX_PROF_DATA_NODE(arenas_i_bins_j_mutex) static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { - {NAME("allocated"), CTL(stats_arenas_i_bins_j_allocated)}, - {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, - {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, - {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, - {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)}, - {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)}, - {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)} + {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, + {NAME("curregs"), CTL(stats_arenas_i_bins_j_curregs)}, + {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, + {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, + {NAME("nslabs"), CTL(stats_arenas_i_bins_j_nslabs)}, + {NAME("nreslabs"), CTL(stats_arenas_i_bins_j_nreslabs)}, + {NAME("curslabs"), CTL(stats_arenas_i_bins_j_curslabs)}, + {NAME("mutex"), CHILD(named, stats_arenas_i_bins_j_mutex)} }; + static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = { - {NAME(""), CHILD(named, stats_arenas_i_bins_j)} + {NAME(""), CHILD(named, stats_arenas_i_bins_j)} }; static const ctl_indexed_node_t stats_arenas_i_bins_node[] = { {INDEX(stats_arenas_i_bins_j)} }; -static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = { - {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)}, - {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)}, - {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)}, - {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)} +static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = { + {NAME("nmalloc"), CTL(stats_arenas_i_lextents_j_nmalloc)}, + {NAME("ndalloc"), CTL(stats_arenas_i_lextents_j_ndalloc)}, + {NAME("nrequests"), CTL(stats_arenas_i_lextents_j_nrequests)}, + {NAME("curlextents"), CTL(stats_arenas_i_lextents_j_curlextents)} }; -static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = { - {NAME(""), CHILD(named, stats_arenas_i_lruns_j)} +static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = { + {NAME(""), CHILD(named, stats_arenas_i_lextents_j)} }; -static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = { - {INDEX(stats_arenas_i_lruns_j)} +static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = { + {INDEX(stats_arenas_i_lextents_j)} +}; + +#define OP(mtx) MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx) +MUTEX_PROF_ARENA_MUTEXES +#undef OP + +static const ctl_named_node_t stats_arenas_i_mutexes_node[] = { +#define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)}, +MUTEX_PROF_ARENA_MUTEXES +#undef OP }; static const ctl_named_node_t stats_arenas_i_node[] = { - {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, - {NAME("dss"), CTL(stats_arenas_i_dss)}, - {NAME("pactive"), CTL(stats_arenas_i_pactive)}, - {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, - {NAME("mapped"), CTL(stats_arenas_i_mapped)}, - {NAME("npurge"), CTL(stats_arenas_i_npurge)}, - {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)}, - {NAME("purged"), CTL(stats_arenas_i_purged)}, - {NAME("small"), CHILD(named, stats_arenas_i_small)}, - {NAME("large"), CHILD(named, stats_arenas_i_large)}, - {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, - {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)} + {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, + {NAME("uptime"), CTL(stats_arenas_i_uptime)}, + {NAME("dss"), CTL(stats_arenas_i_dss)}, + {NAME("dirty_decay_ms"), CTL(stats_arenas_i_dirty_decay_ms)}, + {NAME("muzzy_decay_ms"), CTL(stats_arenas_i_muzzy_decay_ms)}, + {NAME("pactive"), CTL(stats_arenas_i_pactive)}, + {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, + {NAME("pmuzzy"), CTL(stats_arenas_i_pmuzzy)}, + {NAME("mapped"), CTL(stats_arenas_i_mapped)}, + {NAME("retained"), CTL(stats_arenas_i_retained)}, + {NAME("dirty_npurge"), CTL(stats_arenas_i_dirty_npurge)}, + {NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)}, + {NAME("dirty_purged"), CTL(stats_arenas_i_dirty_purged)}, + {NAME("muzzy_npurge"), CTL(stats_arenas_i_muzzy_npurge)}, + {NAME("muzzy_nmadvise"), CTL(stats_arenas_i_muzzy_nmadvise)}, + {NAME("muzzy_purged"), CTL(stats_arenas_i_muzzy_purged)}, + {NAME("base"), CTL(stats_arenas_i_base)}, + {NAME("internal"), CTL(stats_arenas_i_internal)}, + {NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)}, + {NAME("resident"), CTL(stats_arenas_i_resident)}, + {NAME("small"), CHILD(named, stats_arenas_i_small)}, + {NAME("large"), CHILD(named, stats_arenas_i_large)}, + {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, + {NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)}, + {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)} }; static const ctl_named_node_t super_stats_arenas_i_node[] = { - {NAME(""), CHILD(named, stats_arenas_i)} + {NAME(""), CHILD(named, stats_arenas_i)} }; static const ctl_indexed_node_t stats_arenas_node[] = { {INDEX(stats_arenas_i)} }; +static const ctl_named_node_t stats_background_thread_node[] = { + {NAME("num_threads"), CTL(stats_background_thread_num_threads)}, + {NAME("num_runs"), CTL(stats_background_thread_num_runs)}, + {NAME("run_interval"), CTL(stats_background_thread_run_interval)} +}; + +#define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx) +MUTEX_PROF_GLOBAL_MUTEXES +#undef OP + +static const ctl_named_node_t stats_mutexes_node[] = { +#define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)}, +MUTEX_PROF_GLOBAL_MUTEXES +#undef OP + {NAME("reset"), CTL(stats_mutexes_reset)} +}; +#undef MUTEX_PROF_DATA_NODE + static const ctl_named_node_t stats_node[] = { - {NAME("cactive"), CTL(stats_cactive)}, - {NAME("allocated"), CTL(stats_allocated)}, - {NAME("active"), CTL(stats_active)}, - {NAME("mapped"), CTL(stats_mapped)}, - {NAME("chunks"), CHILD(named, stats_chunks)}, - {NAME("huge"), CHILD(named, stats_huge)}, - {NAME("arenas"), CHILD(indexed, stats_arenas)} + {NAME("allocated"), CTL(stats_allocated)}, + {NAME("active"), CTL(stats_active)}, + {NAME("metadata"), CTL(stats_metadata)}, + {NAME("resident"), CTL(stats_resident)}, + {NAME("mapped"), CTL(stats_mapped)}, + {NAME("retained"), CTL(stats_retained)}, + {NAME("background_thread"), + CHILD(named, stats_background_thread)}, + {NAME("mutexes"), CHILD(named, stats_mutexes)}, + {NAME("arenas"), CHILD(indexed, stats_arenas)} }; static const ctl_named_node_t root_node[] = { {NAME("version"), CTL(version)}, {NAME("epoch"), CTL(epoch)}, + {NAME("background_thread"), CTL(background_thread)}, {NAME("thread"), CHILD(named, thread)}, {NAME("config"), CHILD(named, config)}, {NAME("opt"), CHILD(named, opt)}, + {NAME("tcache"), CHILD(named, tcache)}, {NAME("arena"), CHILD(indexed, arena)}, {NAME("arenas"), CHILD(named, arenas)}, {NAME("prof"), CHILD(named, prof)}, @@ -426,303 +545,514 @@ static const ctl_named_node_t super_root_node[] = { /******************************************************************************/ -static bool -ctl_arena_init(ctl_arena_stats_t *astats) -{ +/* + * Sets *dst + *src non-atomically. This is safe, since everything is + * synchronized by the ctl mutex. + */ +static void +accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) { +#ifdef JEMALLOC_ATOMIC_U64 + uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED); + uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED); + atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED); +#else + *dst += *src; +#endif +} - if (astats->lstats == NULL) { - astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses * - sizeof(malloc_large_stats_t)); - if (astats->lstats == NULL) - return (true); +/* Likewise: with ctl mutex synchronization, reading is simple. */ +static uint64_t +arena_stats_read_u64(arena_stats_u64_t *p) { +#ifdef JEMALLOC_ATOMIC_U64 + return atomic_load_u64(p, ATOMIC_RELAXED); +#else + return *p; +#endif +} + +static void accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) { + size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED); + size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED); + atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED); +} + +/******************************************************************************/ + +static unsigned +arenas_i2a_impl(size_t i, bool compat, bool validate) { + unsigned a; + + switch (i) { + case MALLCTL_ARENAS_ALL: + a = 0; + break; + case MALLCTL_ARENAS_DESTROYED: + a = 1; + break; + default: + if (compat && i == ctl_arenas->narenas) { + /* + * Provide deprecated backward compatibility for + * accessing the merged stats at index narenas rather + * than via MALLCTL_ARENAS_ALL. This is scheduled for + * removal in 6.0.0. + */ + a = 0; + } else if (validate && i >= ctl_arenas->narenas) { + a = UINT_MAX; + } else { + /* + * This function should never be called for an index + * more than one past the range of indices that have + * initialized ctl data. + */ + assert(i < ctl_arenas->narenas || (!validate && i == + ctl_arenas->narenas)); + a = (unsigned)i + 2; + } + break; } - return (false); + return a; } -static void -ctl_arena_clear(ctl_arena_stats_t *astats) -{ +static unsigned +arenas_i2a(size_t i) { + return arenas_i2a_impl(i, true, false); +} + +static ctl_arena_t * +arenas_i_impl(tsd_t *tsd, size_t i, bool compat, bool init) { + ctl_arena_t *ret; - astats->dss = dss_prec_names[dss_prec_limit]; - astats->pactive = 0; - astats->pdirty = 0; + assert(!compat || !init); + + ret = ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)]; + if (init && ret == NULL) { + if (config_stats) { + struct container_s { + ctl_arena_t ctl_arena; + ctl_arena_stats_t astats; + }; + struct container_s *cont = + (struct container_s *)base_alloc(tsd_tsdn(tsd), + b0get(), sizeof(struct container_s), QUANTUM); + if (cont == NULL) { + return NULL; + } + ret = &cont->ctl_arena; + ret->astats = &cont->astats; + } else { + ret = (ctl_arena_t *)base_alloc(tsd_tsdn(tsd), b0get(), + sizeof(ctl_arena_t), QUANTUM); + if (ret == NULL) { + return NULL; + } + } + ret->arena_ind = (unsigned)i; + ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)] = ret; + } + + assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i)); + return ret; +} + +static ctl_arena_t * +arenas_i(size_t i) { + ctl_arena_t *ret = arenas_i_impl(tsd_fetch(), i, true, false); + assert(ret != NULL); + return ret; +} + +static void +ctl_arena_clear(ctl_arena_t *ctl_arena) { + ctl_arena->nthreads = 0; + ctl_arena->dss = dss_prec_names[dss_prec_limit]; + ctl_arena->dirty_decay_ms = -1; + ctl_arena->muzzy_decay_ms = -1; + ctl_arena->pactive = 0; + ctl_arena->pdirty = 0; + ctl_arena->pmuzzy = 0; if (config_stats) { - memset(&astats->astats, 0, sizeof(arena_stats_t)); - astats->allocated_small = 0; - astats->nmalloc_small = 0; - astats->ndalloc_small = 0; - astats->nrequests_small = 0; - memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t)); - memset(astats->lstats, 0, nlclasses * + memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t)); + ctl_arena->astats->allocated_small = 0; + ctl_arena->astats->nmalloc_small = 0; + ctl_arena->astats->ndalloc_small = 0; + ctl_arena->astats->nrequests_small = 0; + memset(ctl_arena->astats->bstats, 0, NBINS * + sizeof(malloc_bin_stats_t)); + memset(ctl_arena->astats->lstats, 0, (NSIZES - NBINS) * sizeof(malloc_large_stats_t)); } } static void -ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena) -{ +ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) { unsigned i; - arena_stats_merge(arena, &cstats->dss, &cstats->pactive, - &cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats); - - for (i = 0; i < NBINS; i++) { - cstats->allocated_small += cstats->bstats[i].allocated; - cstats->nmalloc_small += cstats->bstats[i].nmalloc; - cstats->ndalloc_small += cstats->bstats[i].ndalloc; - cstats->nrequests_small += cstats->bstats[i].nrequests; + if (config_stats) { + arena_stats_merge(tsdn, arena, &ctl_arena->nthreads, + &ctl_arena->dss, &ctl_arena->dirty_decay_ms, + &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive, + &ctl_arena->pdirty, &ctl_arena->pmuzzy, + &ctl_arena->astats->astats, ctl_arena->astats->bstats, + ctl_arena->astats->lstats); + + for (i = 0; i < NBINS; i++) { + ctl_arena->astats->allocated_small += + ctl_arena->astats->bstats[i].curregs * + sz_index2size(i); + ctl_arena->astats->nmalloc_small += + ctl_arena->astats->bstats[i].nmalloc; + ctl_arena->astats->ndalloc_small += + ctl_arena->astats->bstats[i].ndalloc; + ctl_arena->astats->nrequests_small += + ctl_arena->astats->bstats[i].nrequests; + } + } else { + arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads, + &ctl_arena->dss, &ctl_arena->dirty_decay_ms, + &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive, + &ctl_arena->pdirty, &ctl_arena->pmuzzy); } } static void -ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats) -{ +ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena, + bool destroyed) { unsigned i; - sstats->pactive += astats->pactive; - sstats->pdirty += astats->pdirty; - - sstats->astats.mapped += astats->astats.mapped; - sstats->astats.npurge += astats->astats.npurge; - sstats->astats.nmadvise += astats->astats.nmadvise; - sstats->astats.purged += astats->astats.purged; - - sstats->allocated_small += astats->allocated_small; - sstats->nmalloc_small += astats->nmalloc_small; - sstats->ndalloc_small += astats->ndalloc_small; - sstats->nrequests_small += astats->nrequests_small; - - sstats->astats.allocated_large += astats->astats.allocated_large; - sstats->astats.nmalloc_large += astats->astats.nmalloc_large; - sstats->astats.ndalloc_large += astats->astats.ndalloc_large; - sstats->astats.nrequests_large += astats->astats.nrequests_large; - - for (i = 0; i < nlclasses; i++) { - sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc; - sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc; - sstats->lstats[i].nrequests += astats->lstats[i].nrequests; - sstats->lstats[i].curruns += astats->lstats[i].curruns; - } - - for (i = 0; i < NBINS; i++) { - sstats->bstats[i].allocated += astats->bstats[i].allocated; - sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; - sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; - sstats->bstats[i].nrequests += astats->bstats[i].nrequests; - if (config_tcache) { - sstats->bstats[i].nfills += astats->bstats[i].nfills; - sstats->bstats[i].nflushes += + if (!destroyed) { + ctl_sdarena->nthreads += ctl_arena->nthreads; + ctl_sdarena->pactive += ctl_arena->pactive; + ctl_sdarena->pdirty += ctl_arena->pdirty; + ctl_sdarena->pmuzzy += ctl_arena->pmuzzy; + } else { + assert(ctl_arena->nthreads == 0); + assert(ctl_arena->pactive == 0); + assert(ctl_arena->pdirty == 0); + assert(ctl_arena->pmuzzy == 0); + } + + if (config_stats) { + ctl_arena_stats_t *sdstats = ctl_sdarena->astats; + ctl_arena_stats_t *astats = ctl_arena->astats; + + if (!destroyed) { + accum_atomic_zu(&sdstats->astats.mapped, + &astats->astats.mapped); + accum_atomic_zu(&sdstats->astats.retained, + &astats->astats.retained); + } + + accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge, + &astats->astats.decay_dirty.npurge); + accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise, + &astats->astats.decay_dirty.nmadvise); + accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged, + &astats->astats.decay_dirty.purged); + + accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge, + &astats->astats.decay_muzzy.npurge); + accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise, + &astats->astats.decay_muzzy.nmadvise); + accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged, + &astats->astats.decay_muzzy.purged); + +#define OP(mtx) malloc_mutex_prof_merge( \ + &(sdstats->astats.mutex_prof_data[ \ + arena_prof_mutex_##mtx]), \ + &(astats->astats.mutex_prof_data[ \ + arena_prof_mutex_##mtx])); +MUTEX_PROF_ARENA_MUTEXES +#undef OP + if (!destroyed) { + accum_atomic_zu(&sdstats->astats.base, + &astats->astats.base); + accum_atomic_zu(&sdstats->astats.internal, + &astats->astats.internal); + accum_atomic_zu(&sdstats->astats.resident, + &astats->astats.resident); + } else { + assert(atomic_load_zu( + &astats->astats.internal, ATOMIC_RELAXED) == 0); + } + + if (!destroyed) { + sdstats->allocated_small += astats->allocated_small; + } else { + assert(astats->allocated_small == 0); + } + sdstats->nmalloc_small += astats->nmalloc_small; + sdstats->ndalloc_small += astats->ndalloc_small; + sdstats->nrequests_small += astats->nrequests_small; + + if (!destroyed) { + accum_atomic_zu(&sdstats->astats.allocated_large, + &astats->astats.allocated_large); + } else { + assert(atomic_load_zu(&astats->astats.allocated_large, + ATOMIC_RELAXED) == 0); + } + accum_arena_stats_u64(&sdstats->astats.nmalloc_large, + &astats->astats.nmalloc_large); + accum_arena_stats_u64(&sdstats->astats.ndalloc_large, + &astats->astats.ndalloc_large); + accum_arena_stats_u64(&sdstats->astats.nrequests_large, + &astats->astats.nrequests_large); + + accum_atomic_zu(&sdstats->astats.tcache_bytes, + &astats->astats.tcache_bytes); + + if (ctl_arena->arena_ind == 0) { + sdstats->astats.uptime = astats->astats.uptime; + } + + for (i = 0; i < NBINS; i++) { + sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; + sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; + sdstats->bstats[i].nrequests += + astats->bstats[i].nrequests; + if (!destroyed) { + sdstats->bstats[i].curregs += + astats->bstats[i].curregs; + } else { + assert(astats->bstats[i].curregs == 0); + } + sdstats->bstats[i].nfills += astats->bstats[i].nfills; + sdstats->bstats[i].nflushes += astats->bstats[i].nflushes; + sdstats->bstats[i].nslabs += astats->bstats[i].nslabs; + sdstats->bstats[i].reslabs += astats->bstats[i].reslabs; + if (!destroyed) { + sdstats->bstats[i].curslabs += + astats->bstats[i].curslabs; + } else { + assert(astats->bstats[i].curslabs == 0); + } + malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data, + &astats->bstats[i].mutex_data); + } + + for (i = 0; i < NSIZES - NBINS; i++) { + accum_arena_stats_u64(&sdstats->lstats[i].nmalloc, + &astats->lstats[i].nmalloc); + accum_arena_stats_u64(&sdstats->lstats[i].ndalloc, + &astats->lstats[i].ndalloc); + accum_arena_stats_u64(&sdstats->lstats[i].nrequests, + &astats->lstats[i].nrequests); + if (!destroyed) { + sdstats->lstats[i].curlextents += + astats->lstats[i].curlextents; + } else { + assert(astats->lstats[i].curlextents == 0); + } } - sstats->bstats[i].nruns += astats->bstats[i].nruns; - sstats->bstats[i].reruns += astats->bstats[i].reruns; - sstats->bstats[i].curruns += astats->bstats[i].curruns; } } static void -ctl_arena_refresh(arena_t *arena, unsigned i) -{ - ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; - ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas]; +ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena, + unsigned i, bool destroyed) { + ctl_arena_t *ctl_arena = arenas_i(i); + + ctl_arena_clear(ctl_arena); + ctl_arena_stats_amerge(tsdn, ctl_arena, arena); + /* Merge into sum stats as well. */ + ctl_arena_stats_sdmerge(ctl_sdarena, ctl_arena, destroyed); +} - ctl_arena_clear(astats); +static unsigned +ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) { + unsigned arena_ind; + ctl_arena_t *ctl_arena; - sstats->nthreads += astats->nthreads; - if (config_stats) { - ctl_arena_stats_amerge(astats, arena); - /* Merge into sum stats as well. */ - ctl_arena_stats_smerge(sstats, astats); + if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) != + NULL) { + ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link); + arena_ind = ctl_arena->arena_ind; } else { - astats->pactive += arena->nactive; - astats->pdirty += arena->ndirty; - /* Merge into sum stats as well. */ - sstats->pactive += arena->nactive; - sstats->pdirty += arena->ndirty; + arena_ind = ctl_arenas->narenas; } -} -static bool -ctl_grow(void) -{ - ctl_arena_stats_t *astats; - arena_t **tarenas; - - /* Allocate extended arena stats and arenas arrays. */ - astats = (ctl_arena_stats_t *)imalloc((ctl_stats.narenas + 2) * - sizeof(ctl_arena_stats_t)); - if (astats == NULL) - return (true); - tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) * - sizeof(arena_t *)); - if (tarenas == NULL) { - idalloc(astats); - return (true); - } - - /* Initialize the new astats element. */ - memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) * - sizeof(ctl_arena_stats_t)); - memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t)); - if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) { - idalloc(tarenas); - idalloc(astats); - return (true); - } - /* Swap merged stats to their new location. */ - { - ctl_arena_stats_t tstats; - memcpy(&tstats, &astats[ctl_stats.narenas], - sizeof(ctl_arena_stats_t)); - memcpy(&astats[ctl_stats.narenas], - &astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t)); - memcpy(&astats[ctl_stats.narenas + 1], &tstats, - sizeof(ctl_arena_stats_t)); - } - /* Initialize the new arenas element. */ - tarenas[ctl_stats.narenas] = NULL; - { - arena_t **arenas_old = arenas; - /* - * Swap extended arenas array into place. Although ctl_mtx - * protects this function from other threads extending the - * array, it does not protect from other threads mutating it - * (i.e. initializing arenas and setting array elements to - * point to them). Therefore, array copying must happen under - * the protection of arenas_lock. - */ - malloc_mutex_lock(&arenas_lock); - arenas = tarenas; - memcpy(arenas, arenas_old, ctl_stats.narenas * - sizeof(arena_t *)); - narenas_total++; - arenas_extend(narenas_total - 1); - malloc_mutex_unlock(&arenas_lock); - /* - * Deallocate arenas_old only if it came from imalloc() (not - * base_alloc()). - */ - if (ctl_stats.narenas != narenas_auto) - idalloc(arenas_old); + /* Trigger stats allocation. */ + if (arenas_i_impl(tsd, arena_ind, false, true) == NULL) { + return UINT_MAX; } - ctl_stats.arenas = astats; - ctl_stats.narenas++; - return (false); -} + /* Initialize new arena. */ + if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) { + return UINT_MAX; + } -static void -ctl_refresh(void) -{ - unsigned i; - VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); + if (arena_ind == ctl_arenas->narenas) { + ctl_arenas->narenas++; + } - if (config_stats) { - malloc_mutex_lock(&chunks_mtx); - ctl_stats.chunks.current = stats_chunks.curchunks; - ctl_stats.chunks.total = stats_chunks.nchunks; - ctl_stats.chunks.high = stats_chunks.highchunks; - malloc_mutex_unlock(&chunks_mtx); + return arena_ind; +} - malloc_mutex_lock(&huge_mtx); - ctl_stats.huge.allocated = huge_allocated; - ctl_stats.huge.nmalloc = huge_nmalloc; - ctl_stats.huge.ndalloc = huge_ndalloc; - malloc_mutex_unlock(&huge_mtx); +static void +ctl_background_thread_stats_read(tsdn_t *tsdn) { + background_thread_stats_t *stats = &ctl_stats->background_thread; + if (!have_background_thread || + background_thread_stats_read(tsdn, stats)) { + memset(stats, 0, sizeof(background_thread_stats_t)); + nstime_init(&stats->run_interval, 0); } +} + +static void +ctl_refresh(tsdn_t *tsdn) { + unsigned i; + ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL); + VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas); /* * Clear sum stats, since they will be merged into by * ctl_arena_refresh(). */ - ctl_stats.arenas[ctl_stats.narenas].nthreads = 0; - ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]); - - malloc_mutex_lock(&arenas_lock); - memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas); - for (i = 0; i < ctl_stats.narenas; i++) { - if (arenas[i] != NULL) - ctl_stats.arenas[i].nthreads = arenas[i]->nthreads; - else - ctl_stats.arenas[i].nthreads = 0; - } - malloc_mutex_unlock(&arenas_lock); - for (i = 0; i < ctl_stats.narenas; i++) { + ctl_arena_clear(ctl_sarena); + + for (i = 0; i < ctl_arenas->narenas; i++) { + tarenas[i] = arena_get(tsdn, i, false); + } + + for (i = 0; i < ctl_arenas->narenas; i++) { + ctl_arena_t *ctl_arena = arenas_i(i); bool initialized = (tarenas[i] != NULL); - ctl_stats.arenas[i].initialized = initialized; - if (initialized) - ctl_arena_refresh(tarenas[i], i); + ctl_arena->initialized = initialized; + if (initialized) { + ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i, + false); + } } if (config_stats) { - ctl_stats.allocated = - ctl_stats.arenas[ctl_stats.narenas].allocated_small - + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large - + ctl_stats.huge.allocated; - ctl_stats.active = - (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE) - + ctl_stats.huge.allocated; - ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk); + ctl_stats->allocated = ctl_sarena->astats->allocated_small + + atomic_load_zu(&ctl_sarena->astats->astats.allocated_large, + ATOMIC_RELAXED); + ctl_stats->active = (ctl_sarena->pactive << LG_PAGE); + ctl_stats->metadata = atomic_load_zu( + &ctl_sarena->astats->astats.base, ATOMIC_RELAXED) + + atomic_load_zu(&ctl_sarena->astats->astats.internal, + ATOMIC_RELAXED); + ctl_stats->resident = atomic_load_zu( + &ctl_sarena->astats->astats.resident, ATOMIC_RELAXED); + ctl_stats->mapped = atomic_load_zu( + &ctl_sarena->astats->astats.mapped, ATOMIC_RELAXED); + ctl_stats->retained = atomic_load_zu( + &ctl_sarena->astats->astats.retained, ATOMIC_RELAXED); + + ctl_background_thread_stats_read(tsdn); + +#define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx) \ + malloc_mutex_lock(tsdn, &mtx); \ + malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx); \ + malloc_mutex_unlock(tsdn, &mtx); + + if (config_prof && opt_prof) { + READ_GLOBAL_MUTEX_PROF_DATA(global_prof_mutex_prof, + bt2gctx_mtx); + } + if (have_background_thread) { + READ_GLOBAL_MUTEX_PROF_DATA( + global_prof_mutex_background_thread, + background_thread_lock); + } else { + memset(&ctl_stats->mutex_prof_data[ + global_prof_mutex_background_thread], 0, + sizeof(mutex_prof_data_t)); + } + /* We own ctl mutex already. */ + malloc_mutex_prof_read(tsdn, + &ctl_stats->mutex_prof_data[global_prof_mutex_ctl], + &ctl_mtx); +#undef READ_GLOBAL_MUTEX_PROF_DATA } - - ctl_epoch++; + ctl_arenas->epoch++; } static bool -ctl_init(void) -{ +ctl_init(tsd_t *tsd) { bool ret; + tsdn_t *tsdn = tsd_tsdn(tsd); + + malloc_mutex_lock(tsdn, &ctl_mtx); + if (!ctl_initialized) { + ctl_arena_t *ctl_sarena, *ctl_darena; + unsigned i; - malloc_mutex_lock(&ctl_mtx); - if (ctl_initialized == false) { /* - * Allocate space for one extra arena stats element, which - * contains summed stats across all arenas. + * Allocate demand-zeroed space for pointers to the full + * range of supported arena indices. */ - assert(narenas_auto == narenas_total_get()); - ctl_stats.narenas = narenas_auto; - ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc( - (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t)); - if (ctl_stats.arenas == NULL) { + if (ctl_arenas == NULL) { + ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn, + b0get(), sizeof(ctl_arenas_t), QUANTUM); + if (ctl_arenas == NULL) { + ret = true; + goto label_return; + } + } + + if (config_stats && ctl_stats == NULL) { + ctl_stats = (ctl_stats_t *)base_alloc(tsdn, b0get(), + sizeof(ctl_stats_t), QUANTUM); + if (ctl_stats == NULL) { + ret = true; + goto label_return; + } + } + + /* + * Allocate space for the current full range of arenas + * here rather than doing it lazily elsewhere, in order + * to limit when OOM-caused errors can occur. + */ + if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false, + true)) == NULL) { ret = true; goto label_return; } - memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) * - sizeof(ctl_arena_stats_t)); + ctl_sarena->initialized = true; + if ((ctl_darena = arenas_i_impl(tsd, MALLCTL_ARENAS_DESTROYED, + false, true)) == NULL) { + ret = true; + goto label_return; + } + ctl_arena_clear(ctl_darena); /* - * Initialize all stats structures, regardless of whether they - * ever get used. Lazy initialization would allow errors to - * cause inconsistent state to be viewable by the application. + * Don't toggle ctl_darena to initialized until an arena is + * actually destroyed, so that arena.<i>.initialized can be used + * to query whether the stats are relevant. */ - if (config_stats) { - unsigned i; - for (i = 0; i <= ctl_stats.narenas; i++) { - if (ctl_arena_init(&ctl_stats.arenas[i])) { - ret = true; - goto label_return; - } + + ctl_arenas->narenas = narenas_total_get(); + for (i = 0; i < ctl_arenas->narenas; i++) { + if (arenas_i_impl(tsd, i, false, true) == NULL) { + ret = true; + goto label_return; } } - ctl_stats.arenas[ctl_stats.narenas].initialized = true; - ctl_epoch = 0; - ctl_refresh(); + ql_new(&ctl_arenas->destroyed); + ctl_refresh(tsdn); + ctl_initialized = true; } ret = false; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + malloc_mutex_unlock(tsdn, &ctl_mtx); + return ret; } static int -ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, - size_t *depthp) -{ +ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp, + size_t *mibp, size_t *depthp) { int ret; const char *elm, *tdot, *dot; size_t elen, i, j; @@ -750,9 +1080,10 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, if (strlen(child->name) == elen && strncmp(elm, child->name, elen) == 0) { node = child; - if (nodesp != NULL) + if (nodesp != NULL) { nodesp[i] = (const ctl_node_t *)node; + } mibp[i] = j; break; } @@ -773,14 +1104,15 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, } inode = ctl_indexed_node(node->children); - node = inode->index(mibp, *depthp, (size_t)index); + node = inode->index(tsdn, mibp, *depthp, (size_t)index); if (node == NULL) { ret = ENOENT; goto label_return; } - if (nodesp != NULL) + if (nodesp != NULL) { nodesp[i] = (const ctl_node_t *)node; + } mibp[i] = (size_t)index; } @@ -813,33 +1145,33 @@ ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, ret = 0; label_return: - return (ret); + return ret; } int -ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) -{ +ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, + void *newp, size_t newlen) { int ret; size_t depth; ctl_node_t const *nodes[CTL_MAX_DEPTH]; size_t mib[CTL_MAX_DEPTH]; const ctl_named_node_t *node; - if (ctl_initialized == false && ctl_init()) { + if (!ctl_initialized && ctl_init(tsd)) { ret = EAGAIN; goto label_return; } depth = CTL_MAX_DEPTH; - ret = ctl_lookup(name, nodes, mib, &depth); - if (ret != 0) + ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth); + if (ret != 0) { goto label_return; + } node = ctl_named_node(nodes[depth-1]); - if (node != NULL && node->ctl) - ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen); - else { + if (node != NULL && node->ctl) { + ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen); + } else { /* The name refers to a partial path through the ctl tree. */ ret = ENOENT; } @@ -849,29 +1181,27 @@ label_return: } int -ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp) -{ +ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) { int ret; - if (ctl_initialized == false && ctl_init()) { + if (!ctl_initialized && ctl_init(tsd)) { ret = EAGAIN; goto label_return; } - ret = ctl_lookup(name, NULL, mibp, miblenp); + ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp); label_return: return(ret); } int -ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; const ctl_named_node_t *node; size_t i; - if (ctl_initialized == false && ctl_init()) { + if (!ctl_initialized && ctl_init(tsd)) { ret = EAGAIN; goto label_return; } @@ -893,7 +1223,7 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, /* Indexed element. */ inode = ctl_indexed_node(node->children); - node = inode->index(mib, miblen, mib[i]); + node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]); if (node == NULL) { ret = ENOENT; goto label_return; @@ -902,9 +1232,9 @@ ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, } /* Call the ctl function. */ - if (node && node->ctl) - ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen); - else { + if (node && node->ctl) { + ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen); + } else { /* Partial MIB. */ ret = ENOENT; } @@ -914,56 +1244,58 @@ label_return: } bool -ctl_boot(void) -{ - - if (malloc_mutex_init(&ctl_mtx)) - return (true); +ctl_boot(void) { + if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL, + malloc_mutex_rank_exclusive)) { + return true; + } ctl_initialized = false; - return (false); + return false; } void -ctl_prefork(void) -{ - - malloc_mutex_prefork(&ctl_mtx); +ctl_prefork(tsdn_t *tsdn) { + malloc_mutex_prefork(tsdn, &ctl_mtx); } void -ctl_postfork_parent(void) -{ - - malloc_mutex_postfork_parent(&ctl_mtx); +ctl_postfork_parent(tsdn_t *tsdn) { + malloc_mutex_postfork_parent(tsdn, &ctl_mtx); } void -ctl_postfork_child(void) -{ - - malloc_mutex_postfork_child(&ctl_mtx); +ctl_postfork_child(tsdn_t *tsdn) { + malloc_mutex_postfork_child(tsdn, &ctl_mtx); } /******************************************************************************/ /* *_ctl() functions. */ -#define READONLY() do { \ +#define READONLY() do { \ if (newp != NULL || newlen != 0) { \ ret = EPERM; \ goto label_return; \ } \ } while (0) -#define WRITEONLY() do { \ +#define WRITEONLY() do { \ if (oldp != NULL || oldlenp != NULL) { \ ret = EPERM; \ goto label_return; \ } \ } while (0) -#define READ(v, t) do { \ +#define READ_XOR_WRITE() do { \ + if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \ + newlen != 0)) { \ + ret = EPERM; \ + goto label_return; \ + } \ +} while (0) + +#define READ(v, t) do { \ if (oldp != NULL && oldlenp != NULL) { \ if (*oldlenp != sizeof(t)) { \ size_t copylen = (sizeof(t) <= *oldlenp) \ @@ -971,12 +1303,12 @@ ctl_postfork_child(void) memcpy(oldp, (void *)&(v), copylen); \ ret = EINVAL; \ goto label_return; \ - } else \ - *(t *)oldp = (v); \ + } \ + *(t *)oldp = (v); \ } \ } while (0) -#define WRITE(v, t) do { \ +#define WRITE(v, t) do { \ if (newp != NULL) { \ if (newlen != sizeof(t)) { \ ret = EINVAL; \ @@ -986,101 +1318,109 @@ ctl_postfork_child(void) } \ } while (0) +#define MIB_UNSIGNED(v, i) do { \ + if (mib[i] > UINT_MAX) { \ + ret = EFAULT; \ + goto label_return; \ + } \ + v = (unsigned)mib[i]; \ +} while (0) + /* * There's a lot of code duplication in the following macros due to limitations * in how nested cpp macros are expanded. */ -#define CTL_RO_CLGEN(c, l, n, v, t) \ +#define CTL_RO_CLGEN(c, l, n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ - if ((c) == false) \ - return (ENOENT); \ - if (l) \ - malloc_mutex_lock(&ctl_mtx); \ + if (!(c)) { \ + return ENOENT; \ + } \ + if (l) { \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ + } \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ - if (l) \ - malloc_mutex_unlock(&ctl_mtx); \ - return (ret); \ + if (l) { \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ + } \ + return ret; \ } -#define CTL_RO_CGEN(c, n, v, t) \ +#define CTL_RO_CGEN(c, n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ - if ((c) == false) \ - return (ENOENT); \ - malloc_mutex_lock(&ctl_mtx); \ + if (!(c)) { \ + return ENOENT; \ + } \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ - malloc_mutex_unlock(&ctl_mtx); \ - return (ret); \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ + return ret; \ } -#define CTL_RO_GEN(n, v, t) \ +#define CTL_RO_GEN(n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ - malloc_mutex_lock(&ctl_mtx); \ + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ - malloc_mutex_unlock(&ctl_mtx); \ - return (ret); \ + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); \ + return ret; \ } /* * ctl_mtx is not acquired, under the assumption that no pertinent data will * mutate during the call. */ -#define CTL_RO_NL_CGEN(c, n, v, t) \ +#define CTL_RO_NL_CGEN(c, n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ - if ((c) == false) \ - return (ENOENT); \ + if (!(c)) { \ + return ENOENT; \ + } \ READONLY(); \ oldval = (v); \ READ(oldval, t); \ \ ret = 0; \ label_return: \ - return (ret); \ + return ret; \ } -#define CTL_RO_NL_GEN(n, v, t) \ +#define CTL_RO_NL_GEN(n, v, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ t oldval; \ \ @@ -1090,24 +1430,42 @@ n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ \ ret = 0; \ label_return: \ - return (ret); \ + return ret; \ } -#define CTL_RO_BOOL_CONFIG_GEN(n) \ +#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \ static int \ -n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ - void *newp, size_t newlen) \ -{ \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ int ret; \ - bool oldval; \ + t oldval; \ + \ + if (!(c)) { \ + return ENOENT; \ + } \ + READONLY(); \ + oldval = (m(tsd)); \ + READ(oldval, t); \ + \ + ret = 0; \ +label_return: \ + return ret; \ +} + +#define CTL_RO_CONFIG_GEN(n, t) \ +static int \ +n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \ + size_t *oldlenp, void *newp, size_t newlen) { \ + int ret; \ + t oldval; \ \ READONLY(); \ oldval = n; \ - READ(oldval, bool); \ + READ(oldval, t); \ \ ret = 0; \ label_return: \ - return (ret); \ + return ret; \ } /******************************************************************************/ @@ -1115,62 +1473,122 @@ label_return: \ CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) static int -epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; UNUSED uint64_t newval; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); WRITE(newval, uint64_t); - if (newp != NULL) - ctl_refresh(); - READ(ctl_epoch, uint64_t); + if (newp != NULL) { + ctl_refresh(tsd_tsdn(tsd)); + } + READ(ctl_arenas->epoch, uint64_t); ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} + +static int +background_thread_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + bool oldval; + + if (!have_background_thread) { + return ENOENT; + } + background_thread_ctl_init(tsd_tsdn(tsd)); + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); + if (newp == NULL) { + oldval = background_thread_enabled(); + READ(oldval, bool); + } else { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + oldval = background_thread_enabled(); + READ(oldval, bool); + + bool newval = *(bool *)newp; + if (newval == oldval) { + ret = 0; + goto label_return; + } + + background_thread_enabled_set(tsd_tsdn(tsd), newval); + if (newval) { + if (!can_enable_background_thread) { + malloc_printf("<jemalloc>: Error in dlsym(" + "RTLD_NEXT, \"pthread_create\"). Cannot " + "enable background_thread\n"); + ret = EFAULT; + goto label_return; + } + if (background_threads_enable(tsd)) { + ret = EFAULT; + goto label_return; + } + } else { + if (background_threads_disable(tsd)) { + ret = EFAULT; + goto label_return; + } + } + } + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + + return ret; } /******************************************************************************/ -CTL_RO_BOOL_CONFIG_GEN(config_debug) -CTL_RO_BOOL_CONFIG_GEN(config_dss) -CTL_RO_BOOL_CONFIG_GEN(config_fill) -CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock) -CTL_RO_BOOL_CONFIG_GEN(config_mremap) -CTL_RO_BOOL_CONFIG_GEN(config_munmap) -CTL_RO_BOOL_CONFIG_GEN(config_prof) -CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc) -CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind) -CTL_RO_BOOL_CONFIG_GEN(config_stats) -CTL_RO_BOOL_CONFIG_GEN(config_tcache) -CTL_RO_BOOL_CONFIG_GEN(config_tls) -CTL_RO_BOOL_CONFIG_GEN(config_utrace) -CTL_RO_BOOL_CONFIG_GEN(config_valgrind) -CTL_RO_BOOL_CONFIG_GEN(config_xmalloc) +CTL_RO_CONFIG_GEN(config_cache_oblivious, bool) +CTL_RO_CONFIG_GEN(config_debug, bool) +CTL_RO_CONFIG_GEN(config_fill, bool) +CTL_RO_CONFIG_GEN(config_lazy_lock, bool) +CTL_RO_CONFIG_GEN(config_malloc_conf, const char *) +CTL_RO_CONFIG_GEN(config_prof, bool) +CTL_RO_CONFIG_GEN(config_prof_libgcc, bool) +CTL_RO_CONFIG_GEN(config_prof_libunwind, bool) +CTL_RO_CONFIG_GEN(config_stats, bool) +CTL_RO_CONFIG_GEN(config_thp, bool) +CTL_RO_CONFIG_GEN(config_utrace, bool) +CTL_RO_CONFIG_GEN(config_xmalloc, bool) /******************************************************************************/ CTL_RO_NL_GEN(opt_abort, opt_abort, bool) +CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool) +CTL_RO_NL_GEN(opt_retain, opt_retain, bool) CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) -CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) -CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t) -CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t) +CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned) +CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena], + const char *) +CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool) +CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t) +CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t) CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) -CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool) -CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t) -CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool) +CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *) +CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *) CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) -CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool) CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) -CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool) -CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) +CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool) +CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) -CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */ +CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) +CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init, + opt_prof_thread_active_init, bool) CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t) CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool) CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) @@ -1181,504 +1599,1100 @@ CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) /******************************************************************************/ static int -thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; + arena_t *oldarena; unsigned newind, oldind; - malloc_mutex_lock(&ctl_mtx); - newind = oldind = choose_arena(NULL)->ind; + oldarena = arena_choose(tsd, NULL); + if (oldarena == NULL) { + return EAGAIN; + } + newind = oldind = arena_ind_get(oldarena); WRITE(newind, unsigned); READ(oldind, unsigned); + if (newind != oldind) { - arena_t *arena; + arena_t *newarena; - if (newind >= ctl_stats.narenas) { + if (newind >= narenas_total_get()) { /* New arena index is out of range. */ ret = EFAULT; goto label_return; } + if (have_percpu_arena && + PERCPU_ARENA_ENABLED(opt_percpu_arena)) { + if (newind < percpu_arena_ind_limit(opt_percpu_arena)) { + /* + * If perCPU arena is enabled, thread_arena + * control is not allowed for the auto arena + * range. + */ + ret = EPERM; + goto label_return; + } + } + /* Initialize arena if necessary. */ - malloc_mutex_lock(&arenas_lock); - if ((arena = arenas[newind]) == NULL && (arena = - arenas_extend(newind)) == NULL) { - malloc_mutex_unlock(&arenas_lock); + newarena = arena_get(tsd_tsdn(tsd), newind, true); + if (newarena == NULL) { ret = EAGAIN; goto label_return; } - assert(arena == arenas[newind]); - arenas[oldind]->nthreads--; - arenas[newind]->nthreads++; - malloc_mutex_unlock(&arenas_lock); - - /* Set new arena association. */ - if (config_tcache) { - tcache_t *tcache; - if ((uintptr_t)(tcache = *tcache_tsd_get()) > - (uintptr_t)TCACHE_STATE_MAX) { - tcache_arena_dissociate(tcache); - tcache_arena_associate(tcache, arena); - } + /* Set new arena/tcache associations. */ + arena_migrate(tsd, oldind, newind); + if (tcache_available(tsd)) { + tcache_arena_reassociate(tsd_tsdn(tsd), + tsd_tcachep_get(tsd), newarena); } - arenas_tsd_set(&arena); } ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + return ret; } -CTL_RO_NL_CGEN(config_stats, thread_allocated, - thread_allocated_tsd_get()->allocated, uint64_t) -CTL_RO_NL_CGEN(config_stats, thread_allocatedp, - &thread_allocated_tsd_get()->allocated, uint64_t *) -CTL_RO_NL_CGEN(config_stats, thread_deallocated, - thread_allocated_tsd_get()->deallocated, uint64_t) -CTL_RO_NL_CGEN(config_stats, thread_deallocatedp, - &thread_allocated_tsd_get()->deallocated, uint64_t *) +CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get, + uint64_t) +CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get, + uint64_t *) +CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get, + uint64_t) +CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp, + tsd_thread_deallocatedp_get, uint64_t *) static int -thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ +thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; - if (config_tcache == false) - return (ENOENT); - - oldval = tcache_enabled_get(); + oldval = tcache_enabled_get(tsd); if (newp != NULL) { if (newlen != sizeof(bool)) { ret = EINVAL; goto label_return; } - tcache_enabled_set(*(bool *)newp); + tcache_enabled_set(tsd, *(bool *)newp); } READ(oldval, bool); ret = 0; label_return: - return (ret); + return ret; +} + +static int +thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + + if (!tcache_available(tsd)) { + ret = EFAULT; + goto label_return; + } + + READONLY(); + WRITEONLY(); + + tcache_flush(tsd); + + ret = 0; +label_return: + return ret; +} + +static int +thread_prof_name_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + + if (!config_prof) { + return ENOENT; + } + + READ_XOR_WRITE(); + + if (newp != NULL) { + if (newlen != sizeof(const char *)) { + ret = EINVAL; + goto label_return; + } + + if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) != + 0) { + goto label_return; + } + } else { + const char *oldname = prof_thread_name_get(tsd); + READ(oldname, const char *); + } + + ret = 0; +label_return: + return ret; } static int -thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ +thread_prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; + bool oldval; - if (config_tcache == false) - return (ENOENT); + if (!config_prof) { + return ENOENT; + } + + oldval = prof_thread_active_get(tsd); + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + if (prof_thread_active_set(tsd, *(bool *)newp)) { + ret = EAGAIN; + goto label_return; + } + } + READ(oldval, bool); + + ret = 0; +label_return: + return ret; +} + +/******************************************************************************/ + +static int +tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned tcache_ind; READONLY(); + if (tcaches_create(tsd, &tcache_ind)) { + ret = EFAULT; + goto label_return; + } + READ(tcache_ind, unsigned); + + ret = 0; +label_return: + return ret; +} + +static int +tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned tcache_ind; + WRITEONLY(); + tcache_ind = UINT_MAX; + WRITE(tcache_ind, unsigned); + if (tcache_ind == UINT_MAX) { + ret = EFAULT; + goto label_return; + } + tcaches_flush(tsd, tcache_ind); + + ret = 0; +label_return: + return ret; +} + +static int +tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned tcache_ind; - tcache_flush(); + WRITEONLY(); + tcache_ind = UINT_MAX; + WRITE(tcache_ind, unsigned); + if (tcache_ind == UINT_MAX) { + ret = EFAULT; + goto label_return; + } + tcaches_destroy(tsd, tcache_ind); ret = 0; label_return: - return (ret); + return ret; } /******************************************************************************/ -/* ctl_mutex must be held during execution of this function. */ +static int +arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + tsdn_t *tsdn = tsd_tsdn(tsd); + unsigned arena_ind; + bool initialized; + + READONLY(); + MIB_UNSIGNED(arena_ind, 1); + + malloc_mutex_lock(tsdn, &ctl_mtx); + initialized = arenas_i(arena_ind)->initialized; + malloc_mutex_unlock(tsdn, &ctl_mtx); + + READ(initialized, bool); + + ret = 0; +label_return: + return ret; +} + static void -arena_purge(unsigned arena_ind) -{ - VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); +arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) { + malloc_mutex_lock(tsdn, &ctl_mtx); + { + unsigned narenas = ctl_arenas->narenas; - malloc_mutex_lock(&arenas_lock); - memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas); - malloc_mutex_unlock(&arenas_lock); + /* + * Access via index narenas is deprecated, and scheduled for + * removal in 6.0.0. + */ + if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == narenas) { + unsigned i; + VARIABLE_ARRAY(arena_t *, tarenas, narenas); - if (arena_ind == ctl_stats.narenas) { - unsigned i; - for (i = 0; i < ctl_stats.narenas; i++) { - if (tarenas[i] != NULL) - arena_purge_all(tarenas[i]); + for (i = 0; i < narenas; i++) { + tarenas[i] = arena_get(tsdn, i, false); + } + + /* + * No further need to hold ctl_mtx, since narenas and + * tarenas contain everything needed below. + */ + malloc_mutex_unlock(tsdn, &ctl_mtx); + + for (i = 0; i < narenas; i++) { + if (tarenas[i] != NULL) { + arena_decay(tsdn, tarenas[i], false, + all); + } + } + } else { + arena_t *tarena; + + assert(arena_ind < narenas); + + tarena = arena_get(tsdn, arena_ind, false); + + /* No further need to hold ctl_mtx. */ + malloc_mutex_unlock(tsdn, &ctl_mtx); + + if (tarena != NULL) { + arena_decay(tsdn, tarena, false, all); + } } - } else { - assert(arena_ind < ctl_stats.narenas); - if (tarenas[arena_ind] != NULL) - arena_purge_all(tarenas[arena_ind]); } } static int -arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; + unsigned arena_ind; READONLY(); WRITEONLY(); - malloc_mutex_lock(&ctl_mtx); - arena_purge(mib[1]); - malloc_mutex_unlock(&ctl_mtx); + MIB_UNSIGNED(arena_ind, 1); + arena_i_decay(tsd_tsdn(tsd), arena_ind, false); ret = 0; label_return: - return (ret); + return ret; } static int -arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ - int ret, i; - bool match, err; - const char *dss; - unsigned arena_ind = mib[1]; +arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + + READONLY(); + WRITEONLY(); + MIB_UNSIGNED(arena_ind, 1); + arena_i_decay(tsd_tsdn(tsd), arena_ind, true); + + ret = 0; +label_return: + return ret; +} + +static int +arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind, + arena_t **arena) { + int ret; + + READONLY(); + WRITEONLY(); + MIB_UNSIGNED(*arena_ind, 1); + + *arena = arena_get(tsd_tsdn(tsd), *arena_ind, false); + if (*arena == NULL || arena_is_auto(*arena)) { + ret = EFAULT; + goto label_return; + } + + ret = 0; +label_return: + return ret; +} + +static void +arena_reset_prepare_background_thread(tsd_t *tsd, unsigned arena_ind) { + /* Temporarily disable the background thread during arena reset. */ + if (have_background_thread) { + malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); + if (background_thread_enabled()) { + unsigned ind = arena_ind % ncpus; + background_thread_info_t *info = + &background_thread_info[ind]; + assert(info->state == background_thread_started); + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + info->state = background_thread_paused; + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + } + } +} + +static void +arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) { + if (have_background_thread) { + if (background_thread_enabled()) { + unsigned ind = arena_ind % ncpus; + background_thread_info_t *info = + &background_thread_info[ind]; + assert(info->state == background_thread_paused); + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + info->state = background_thread_started; + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + } + malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); + } +} + +static int +arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + arena_t *arena; + + ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp, + newp, newlen, &arena_ind, &arena); + if (ret != 0) { + return ret; + } + + arena_reset_prepare_background_thread(tsd, arena_ind); + arena_reset(tsd, arena); + arena_reset_finish_background_thread(tsd, arena_ind); + + return ret; +} + +static int +arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + arena_t *arena; + ctl_arena_t *ctl_darena, *ctl_arena; + + ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp, + newp, newlen, &arena_ind, &arena); + if (ret != 0) { + goto label_return; + } + + if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena, + true) != 0) { + ret = EFAULT; + goto label_return; + } + + arena_reset_prepare_background_thread(tsd, arena_ind); + /* Merge stats after resetting and purging arena. */ + arena_reset(tsd, arena); + arena_decay(tsd_tsdn(tsd), arena, false, true); + ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED); + ctl_darena->initialized = true; + ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true); + /* Destroy arena. */ + arena_destroy(tsd, arena); + ctl_arena = arenas_i(arena_ind); + ctl_arena->initialized = false; + /* Record arena index for later recycling via arenas.create. */ + ql_elm_new(ctl_arena, destroyed_link); + ql_tail_insert(&ctl_arenas->destroyed, ctl_arena, destroyed_link); + arena_reset_finish_background_thread(tsd, arena_ind); + + assert(ret == 0); +label_return: + return ret; +} + +static int +arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + const char *dss = NULL; + unsigned arena_ind; dss_prec_t dss_prec_old = dss_prec_limit; dss_prec_t dss_prec = dss_prec_limit; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); WRITE(dss, const char *); - match = false; - for (i = 0; i < dss_prec_limit; i++) { - if (strcmp(dss_prec_names[i], dss) == 0) { - dss_prec = i; - match = true; - break; + MIB_UNSIGNED(arena_ind, 1); + if (dss != NULL) { + int i; + bool match = false; + + for (i = 0; i < dss_prec_limit; i++) { + if (strcmp(dss_prec_names[i], dss) == 0) { + dss_prec = i; + match = true; + break; + } + } + + if (!match) { + ret = EINVAL; + goto label_return; } - } - if (match == false) { - ret = EINVAL; - goto label_return; } - if (arena_ind < ctl_stats.narenas) { - arena_t *arena = arenas[arena_ind]; - if (arena != NULL) { - dss_prec_old = arena_dss_prec_get(arena); - arena_dss_prec_set(arena, dss_prec); - err = false; - } else - err = true; + /* + * Access via index narenas is deprecated, and scheduled for removal in + * 6.0.0. + */ + if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == + ctl_arenas->narenas) { + if (dss_prec != dss_prec_limit && + extent_dss_prec_set(dss_prec)) { + ret = EFAULT; + goto label_return; + } + dss_prec_old = extent_dss_prec_get(); } else { - dss_prec_old = chunk_dss_prec_get(); - err = chunk_dss_prec_set(dss_prec); + arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false); + if (arena == NULL || (dss_prec != dss_prec_limit && + arena_dss_prec_set(arena, dss_prec))) { + ret = EFAULT; + goto label_return; + } + dss_prec_old = arena_dss_prec_get(arena); } + dss = dss_prec_names[dss_prec_old]; READ(dss, const char *); - if (err) { + + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} + +static int +arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) { + int ret; + unsigned arena_ind; + arena_t *arena; + + MIB_UNSIGNED(arena_ind, 1); + arena = arena_get(tsd_tsdn(tsd), arena_ind, false); + if (arena == NULL) { ret = EFAULT; goto label_return; } + if (oldp != NULL && oldlenp != NULL) { + size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) : + arena_muzzy_decay_ms_get(arena); + READ(oldval, ssize_t); + } + if (newp != NULL) { + if (newlen != sizeof(ssize_t)) { + ret = EINVAL; + goto label_return; + } + if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena, + *(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd), + arena, *(ssize_t *)newp)) { + ret = EFAULT; + goto label_return; + } + } + ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + return ret; } -static const ctl_named_node_t * -arena_i_index(const size_t *mib, size_t miblen, size_t i) -{ - const ctl_named_node_t * ret; +static int +arena_i_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, + newlen, true); +} - malloc_mutex_lock(&ctl_mtx); - if (i > ctl_stats.narenas) { - ret = NULL; +static int +arena_i_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, + newlen, false); +} + +static int +arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + int ret; + unsigned arena_ind; + arena_t *arena; + + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + MIB_UNSIGNED(arena_ind, 1); + if (arena_ind < narenas_total_get() && (arena = + arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) { + if (newp != NULL) { + extent_hooks_t *old_extent_hooks; + extent_hooks_t *new_extent_hooks + JEMALLOC_CC_SILENCE_INIT(NULL); + WRITE(new_extent_hooks, extent_hooks_t *); + old_extent_hooks = extent_hooks_set(tsd, arena, + new_extent_hooks); + READ(old_extent_hooks, extent_hooks_t *); + } else { + extent_hooks_t *old_extent_hooks = + extent_hooks_get(arena); + READ(old_extent_hooks, extent_hooks_t *); + } + } else { + ret = EFAULT; goto label_return; } + ret = 0; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; +} + +static const ctl_named_node_t * +arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { + const ctl_named_node_t *ret; + + malloc_mutex_lock(tsdn, &ctl_mtx); + switch (i) { + case MALLCTL_ARENAS_ALL: + case MALLCTL_ARENAS_DESTROYED: + break; + default: + if (i > ctl_arenas->narenas) { + ret = NULL; + goto label_return; + } + break; + } ret = super_arena_i_node; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + malloc_mutex_unlock(tsdn, &ctl_mtx); + return ret; } /******************************************************************************/ static int -arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ +arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; unsigned narenas; - malloc_mutex_lock(&ctl_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); READONLY(); if (*oldlenp != sizeof(unsigned)) { ret = EINVAL; goto label_return; } - narenas = ctl_stats.narenas; + narenas = ctl_arenas->narenas; READ(narenas, unsigned); ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; } static int -arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp, - size_t *oldlenp, void *newp, size_t newlen) -{ +arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) { int ret; - unsigned nread, i; - malloc_mutex_lock(&ctl_mtx); - READONLY(); - if (*oldlenp != ctl_stats.narenas * sizeof(bool)) { - ret = EINVAL; - nread = (*oldlenp < ctl_stats.narenas * sizeof(bool)) - ? (*oldlenp / sizeof(bool)) : ctl_stats.narenas; - } else { - ret = 0; - nread = ctl_stats.narenas; + if (oldp != NULL && oldlenp != NULL) { + size_t oldval = (dirty ? arena_dirty_decay_ms_default_get() : + arena_muzzy_decay_ms_default_get()); + READ(oldval, ssize_t); + } + if (newp != NULL) { + if (newlen != sizeof(ssize_t)) { + ret = EINVAL; + goto label_return; + } + if (dirty ? arena_dirty_decay_ms_default_set(*(ssize_t *)newp) + : arena_muzzy_decay_ms_default_set(*(ssize_t *)newp)) { + ret = EFAULT; + goto label_return; + } } - for (i = 0; i < nread; i++) - ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized; - + ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + return ret; +} + +static int +arenas_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, + newlen, true); +} + +static int +arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp, + newlen, false); } CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) CTL_RO_NL_GEN(arenas_page, PAGE, size_t) -CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t) +CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t) CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned) -CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned) +CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned) CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) -CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t) +CTL_RO_NL_GEN(arenas_bin_i_slab_size, arena_bin_info[mib[2]].slab_size, size_t) static const ctl_named_node_t * -arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i) -{ - - if (i > NBINS) - return (NULL); - return (super_arenas_bin_i_node); +arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { + if (i > NBINS) { + return NULL; + } + return super_arenas_bin_i_node; } -CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t) -CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t) +CTL_RO_NL_GEN(arenas_nlextents, NSIZES - NBINS, unsigned) +CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(NBINS+(szind_t)mib[2]), + size_t) static const ctl_named_node_t * -arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i) -{ - - if (i > nlclasses) - return (NULL); - return (super_arenas_lrun_i_node); +arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t i) { + if (i > NSIZES - NBINS) { + return NULL; + } + return super_arenas_lextent_i_node; } static int -arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; + extent_hooks_t *extent_hooks; unsigned arena_ind; - malloc_mutex_lock(&ctl_mtx); - WRITEONLY(); - arena_ind = UINT_MAX; - WRITE(arena_ind, unsigned); - if (newp != NULL && arena_ind >= ctl_stats.narenas) - ret = EFAULT; - else { - if (arena_ind == UINT_MAX) - arena_ind = ctl_stats.narenas; - arena_purge(arena_ind); - ret = 0; + malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx); + + extent_hooks = (extent_hooks_t *)&extent_hooks_default; + WRITE(extent_hooks, extent_hooks_t *); + if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) { + ret = EAGAIN; + goto label_return; } + READ(arena_ind, unsigned); + ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx); + return ret; } +/******************************************************************************/ + static int -arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { int ret; - unsigned narenas; + bool oldval; - malloc_mutex_lock(&ctl_mtx); - READONLY(); - if (ctl_grow()) { - ret = EAGAIN; - goto label_return; + if (!config_prof) { + return ENOENT; } - narenas = ctl_stats.narenas - 1; - READ(narenas, unsigned); + + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + oldval = prof_thread_active_init_set(tsd_tsdn(tsd), + *(bool *)newp); + } else { + oldval = prof_thread_active_init_get(tsd_tsdn(tsd)); + } + READ(oldval, bool); ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + return ret; } -/******************************************************************************/ - static int -prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; bool oldval; - if (config_prof == false) - return (ENOENT); + if (!config_prof) { + return ENOENT; + } - malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */ - oldval = opt_prof_active; if (newp != NULL) { - /* - * The memory barriers will tend to make opt_prof_active - * propagate faster on systems with weak memory ordering. - */ - mb_write(); - WRITE(opt_prof_active, bool); - mb_write(); + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp); + } else { + oldval = prof_active_get(tsd_tsdn(tsd)); } READ(oldval, bool); ret = 0; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + return ret; } static int -prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ +prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { int ret; const char *filename = NULL; - if (config_prof == false) - return (ENOENT); + if (!config_prof) { + return ENOENT; + } WRITEONLY(); WRITE(filename, const char *); - if (prof_mdump(filename)) { + if (prof_mdump(tsd, filename)) { ret = EFAULT; goto label_return; } ret = 0; label_return: - return (ret); + return ret; +} + +static int +prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + bool oldval; + + if (!config_prof) { + return ENOENT; + } + + if (newp != NULL) { + if (newlen != sizeof(bool)) { + ret = EINVAL; + goto label_return; + } + oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp); + } else { + oldval = prof_gdump_get(tsd_tsdn(tsd)); + } + READ(oldval, bool); + + ret = 0; +label_return: + return ret; +} + +static int +prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, + size_t *oldlenp, void *newp, size_t newlen) { + int ret; + size_t lg_sample = lg_prof_sample; + + if (!config_prof) { + return ENOENT; + } + + WRITEONLY(); + WRITE(lg_sample, size_t); + if (lg_sample >= (sizeof(uint64_t) << 3)) { + lg_sample = (sizeof(uint64_t) << 3) - 1; + } + + prof_reset(tsd, lg_sample); + + ret = 0; +label_return: + return ret; } CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) +CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t) /******************************************************************************/ -CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *) -CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t) -CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) -CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t) +CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t) +CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t) +CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t) +CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t) +CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t) +CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t) + +CTL_RO_CGEN(config_stats, stats_background_thread_num_threads, + ctl_stats->background_thread.num_threads, size_t) +CTL_RO_CGEN(config_stats, stats_background_thread_num_runs, + ctl_stats->background_thread.num_runs, uint64_t) +CTL_RO_CGEN(config_stats, stats_background_thread_run_interval, + nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t) + +CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *) +CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms, + ssize_t) +CTL_RO_GEN(stats_arenas_i_muzzy_decay_ms, arenas_i(mib[2])->muzzy_decay_ms, + ssize_t) +CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned) +CTL_RO_GEN(stats_arenas_i_uptime, + nstime_ns(&arenas_i(mib[2])->astats->astats.uptime), uint64_t) +CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t) +CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t) +CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED), + size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_retained, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED), + size_t) -CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current, +CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge, + arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_dirty.npurge), + uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise, + arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged, + arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_dirty.purged), + uint64_t) + +CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge, + arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_muzzy.npurge), + uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise, + arena_stats_read_u64( + &arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged, + arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.decay_muzzy.purged), + uint64_t) + +CTL_RO_CGEN(config_stats, stats_arenas_i_base, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED), + size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_internal, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED), + size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes, + ATOMIC_RELAXED), size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_resident, + atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED), size_t) -CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t) -CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t) -CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t) -CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t) - -CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *) -CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned) -CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) -CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, - ctl_stats.arenas[mib[2]].astats.mapped, size_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_npurge, - ctl_stats.arenas[mib[2]].astats.npurge, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise, - ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_purged, - ctl_stats.arenas[mib[2]].astats.purged, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, - ctl_stats.arenas[mib[2]].allocated_small, size_t) + arenas_i(mib[2])->astats->allocated_small, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc, - ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t) + arenas_i(mib[2])->astats->nmalloc_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc, - ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t) + arenas_i(mib[2])->astats->ndalloc_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests, - ctl_stats.arenas[mib[2]].nrequests_small, uint64_t) + arenas_i(mib[2])->astats->nrequests_small, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, - ctl_stats.arenas[mib[2]].astats.allocated_large, size_t) + atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large, + ATOMIC_RELAXED), size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, - ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t) + arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.nmalloc_large), + uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, - ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t) + arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.ndalloc_large), + uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, - ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t) + arena_stats_read_u64(&arenas_i(mib[2])->astats->astats.nmalloc_large), + uint64_t) /* Intentional. */ + +/* Lock profiling related APIs below. */ +#define RO_MUTEX_CTL_GEN(n, l) \ +CTL_RO_CGEN(config_stats, stats_##n##_num_ops, \ + l.n_lock_ops, uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_num_wait, \ + l.n_wait_times, uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq, \ + l.n_spin_acquired, uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch, \ + l.n_owner_switches, uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time, \ + nstime_ns(&l.tot_wait_time), uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time, \ + nstime_ns(&l.max_wait_time), uint64_t) \ +CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds, \ + l.max_n_thds, uint32_t) + +/* Global mutexes. */ +#define OP(mtx) \ + RO_MUTEX_CTL_GEN(mutexes_##mtx, \ + ctl_stats->mutex_prof_data[global_prof_mutex_##mtx]) +MUTEX_PROF_GLOBAL_MUTEXES +#undef OP + +/* Per arena mutexes */ +#define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx, \ + arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx]) +MUTEX_PROF_ARENA_MUTEXES +#undef OP + +/* tcache bin mutex */ +RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex, + arenas_i(mib[2])->astats->bstats[mib[4]].mutex_data) +#undef RO_MUTEX_CTL_GEN + +/* Resets all mutex stats, including global, arena and bin mutexes. */ +static int +stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, + void *oldp, size_t *oldlenp, void *newp, size_t newlen) { + if (!config_stats) { + return ENOENT; + } + + tsdn_t *tsdn = tsd_tsdn(tsd); + +#define MUTEX_PROF_RESET(mtx) \ + malloc_mutex_lock(tsdn, &mtx); \ + malloc_mutex_prof_data_reset(tsdn, &mtx); \ + malloc_mutex_unlock(tsdn, &mtx); + + /* Global mutexes: ctl and prof. */ + MUTEX_PROF_RESET(ctl_mtx); + if (have_background_thread) { + MUTEX_PROF_RESET(background_thread_lock); + } + if (config_prof && opt_prof) { + MUTEX_PROF_RESET(bt2gctx_mtx); + } + + + /* Per arena mutexes. */ + unsigned n = narenas_total_get(); + + for (unsigned i = 0; i < n; i++) { + arena_t *arena = arena_get(tsdn, i, false); + if (!arena) { + continue; + } + MUTEX_PROF_RESET(arena->large_mtx); + MUTEX_PROF_RESET(arena->extent_avail_mtx); + MUTEX_PROF_RESET(arena->extents_dirty.mtx); + MUTEX_PROF_RESET(arena->extents_muzzy.mtx); + MUTEX_PROF_RESET(arena->extents_retained.mtx); + MUTEX_PROF_RESET(arena->decay_dirty.mtx); + MUTEX_PROF_RESET(arena->decay_muzzy.mtx); + MUTEX_PROF_RESET(arena->tcache_ql_mtx); + MUTEX_PROF_RESET(arena->base->mtx); + + for (szind_t i = 0; i < NBINS; i++) { + arena_bin_t *bin = &arena->bins[i]; + MUTEX_PROF_RESET(bin->lock); + } + } +#undef MUTEX_PROF_RESET + return 0; +} -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated, - ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t) + arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc, - ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t) + arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t) CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t) -CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t) -CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns, - ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns, - ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns, - ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t) + arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs, + arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills, + arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes, + arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs, + arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs, + arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs, + arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t) static const ctl_named_node_t * -stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j) -{ - - if (j > NBINS) - return (NULL); - return (super_stats_arenas_i_bins_j_node); +stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t j) { + if (j > NBINS) { + return NULL; + } + return super_stats_arenas_i_bins_j_node; } -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc, - ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc, - ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests, - ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t) -CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns, - ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc, + arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), + uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc, + arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), + uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests, + arena_stats_read_u64(&arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), + uint64_t) +CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents, + arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t) static const ctl_named_node_t * -stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j) -{ - - if (j > nlclasses) - return (NULL); - return (super_stats_arenas_i_lruns_j_node); +stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, + size_t j) { + if (j > NSIZES - NBINS) { + return NULL; + } + return super_stats_arenas_i_lextents_j_node; } static const ctl_named_node_t * -stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i) -{ - const ctl_named_node_t * ret; +stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen, size_t i) { + const ctl_named_node_t *ret; + size_t a; - malloc_mutex_lock(&ctl_mtx); - if (i > ctl_stats.narenas || ctl_stats.arenas[i].initialized == false) { + malloc_mutex_lock(tsdn, &ctl_mtx); + a = arenas_i2a_impl(i, true, true); + if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) { ret = NULL; goto label_return; } ret = super_stats_arenas_i_node; label_return: - malloc_mutex_unlock(&ctl_mtx); - return (ret); + malloc_mutex_unlock(tsdn, &ctl_mtx); + return ret; } diff --git a/dep/jemalloc/src/extent.c b/dep/jemalloc/src/extent.c index 8c09b486ed8..fa45c84d34f 100644 --- a/dep/jemalloc/src/extent.c +++ b/dep/jemalloc/src/extent.c @@ -1,39 +1,1987 @@ -#define JEMALLOC_EXTENT_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_EXTENT_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/ph.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mutex_pool.h" + +/******************************************************************************/ +/* Data. */ + +rtree_t extents_rtree; +/* Keyed by the address of the extent_t being protected. */ +mutex_pool_t extent_mutex_pool; + +static const bitmap_info_t extents_bitmap_info = + BITMAP_INFO_INITIALIZER(NPSIZES+1); + +static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit, + unsigned arena_ind); +static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, bool committed, unsigned arena_ind); +static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, bool committed, unsigned arena_ind); +static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind); +static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained); +static bool extent_decommit_default(extent_hooks_t *extent_hooks, + void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); +#ifdef PAGES_CAN_PURGE_LAZY +static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind); +#endif +static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained); +#ifdef PAGES_CAN_PURGE_FORCED +static bool extent_purge_forced_default(extent_hooks_t *extent_hooks, + void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); +#endif +static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained); +#ifdef JEMALLOC_MAPS_COALESCE +static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t size_a, size_t size_b, bool committed, + unsigned arena_ind); +#endif +static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, + szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, + bool growing_retained); +#ifdef JEMALLOC_MAPS_COALESCE +static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, + size_t size_a, void *addr_b, size_t size_b, bool committed, + unsigned arena_ind); +#endif +static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, + bool growing_retained); + +const extent_hooks_t extent_hooks_default = { + extent_alloc_default, + extent_dalloc_default, + extent_destroy_default, + extent_commit_default, + extent_decommit_default +#ifdef PAGES_CAN_PURGE_LAZY + , + extent_purge_lazy_default +#else + , + NULL +#endif +#ifdef PAGES_CAN_PURGE_FORCED + , + extent_purge_forced_default +#else + , + NULL +#endif +#ifdef JEMALLOC_MAPS_COALESCE + , + extent_split_default, + extent_merge_default +#endif +}; + +/* Used exclusively for gdump triggering. */ +static atomic_zu_t curpages; +static atomic_zu_t highpages; + +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + +static void extent_deregister(tsdn_t *tsdn, extent_t *extent); +static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr, + size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind, + bool *zero, bool *commit, bool growing_retained); +static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + extent_t *extent, bool *coalesced, bool growing_retained); +static void extent_record(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent, + bool growing_retained); /******************************************************************************/ -static inline int -extent_szad_comp(extent_node_t *a, extent_node_t *b) -{ - int ret; - size_t a_size = a->size; - size_t b_size = b->size; +rb_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, rb_link, + extent_esnead_comp) + +typedef enum { + lock_result_success, + lock_result_failure, + lock_result_no_extent +} lock_result_t; + +static lock_result_t +extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm, + extent_t **result) { + extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree, + elm, true); + + if (extent1 == NULL) { + return lock_result_no_extent; + } + /* + * It's possible that the extent changed out from under us, and with it + * the leaf->extent mapping. We have to recheck while holding the lock. + */ + extent_lock(tsdn, extent1); + extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn, + &extents_rtree, elm, true); + + if (extent1 == extent2) { + *result = extent1; + return lock_result_success; + } else { + extent_unlock(tsdn, extent1); + return lock_result_failure; + } +} + +/* + * Returns a pool-locked extent_t * if there's one associated with the given + * address, and NULL otherwise. + */ +static extent_t * +extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) { + extent_t *ret = NULL; + rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree, + rtree_ctx, (uintptr_t)addr, false, false); + if (elm == NULL) { + return NULL; + } + lock_result_t lock_result; + do { + lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret); + } while (lock_result == lock_result_failure); + return ret; +} + +extent_t * +extent_alloc(tsdn_t *tsdn, arena_t *arena) { + malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); + extent_t *extent = extent_avail_first(&arena->extent_avail); + if (extent == NULL) { + malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); + return base_alloc_extent(tsdn, arena->base); + } + extent_avail_remove(&arena->extent_avail, extent); + malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); + return extent; +} + +void +extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { + malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); + extent_avail_insert(&arena->extent_avail, extent); + malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); +} + +extent_hooks_t * +extent_hooks_get(arena_t *arena) { + return base_extent_hooks_get(arena->base); +} + +extent_hooks_t * +extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) { + background_thread_info_t *info; + if (have_background_thread) { + info = arena_background_thread_info_get(arena); + malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); + } + extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks); + if (have_background_thread) { + malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); + } + + return ret; +} + +static void +extent_hooks_assure_initialized(arena_t *arena, + extent_hooks_t **r_extent_hooks) { + if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) { + *r_extent_hooks = extent_hooks_get(arena); + } +} + +#ifndef JEMALLOC_JET +static +#endif +size_t +extent_size_quantize_floor(size_t size) { + size_t ret; + pszind_t pind; + + assert(size > 0); + assert((size & PAGE_MASK) == 0); + + pind = sz_psz2ind(size - sz_large_pad + 1); + if (pind == 0) { + /* + * Avoid underflow. This short-circuit would also do the right + * thing for all sizes in the range for which there are + * PAGE-spaced size classes, but it's simplest to just handle + * the one case that would cause erroneous results. + */ + return size; + } + ret = sz_pind2sz(pind - 1) + sz_large_pad; + assert(ret <= size); + return ret; +} + +#ifndef JEMALLOC_JET +static +#endif +size_t +extent_size_quantize_ceil(size_t size) { + size_t ret; - ret = (a_size > b_size) - (a_size < b_size); - if (ret == 0) { - uintptr_t a_addr = (uintptr_t)a->addr; - uintptr_t b_addr = (uintptr_t)b->addr; + assert(size > 0); + assert(size - sz_large_pad <= LARGE_MAXCLASS); + assert((size & PAGE_MASK) == 0); - ret = (a_addr > b_addr) - (a_addr < b_addr); + ret = extent_size_quantize_floor(size); + if (ret < size) { + /* + * Skip a quantization that may have an adequately large extent, + * because under-sized extents may be mixed in. This only + * happens when an unusual size is requested, i.e. for aligned + * allocation, and is just one of several places where linear + * search would potentially find sufficiently aligned available + * memory somewhere lower. + */ + ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) + + sz_large_pad; } + return ret; +} + +/* Generate pairing heap functions. */ +ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp) + +bool +extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state, + bool delay_coalesce) { + if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS, + malloc_mutex_rank_exclusive)) { + return true; + } + for (unsigned i = 0; i < NPSIZES+1; i++) { + extent_heap_new(&extents->heaps[i]); + } + bitmap_init(extents->bitmap, &extents_bitmap_info, true); + extent_list_init(&extents->lru); + atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED); + extents->state = state; + extents->delay_coalesce = delay_coalesce; + return false; +} + +extent_state_t +extents_state_get(const extents_t *extents) { + return extents->state; +} - return (ret); +size_t +extents_npages_get(extents_t *extents) { + return atomic_load_zu(&extents->npages, ATOMIC_RELAXED); } -/* Generate red-black tree functions. */ -rb_gen(, extent_tree_szad_, extent_tree_t, extent_node_t, link_szad, - extent_szad_comp) +static void +extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent, + bool preserve_lru) { + malloc_mutex_assert_owner(tsdn, &extents->mtx); + assert(extent_state_get(extent) == extents->state); -static inline int -extent_ad_comp(extent_node_t *a, extent_node_t *b) -{ - uintptr_t a_addr = (uintptr_t)a->addr; - uintptr_t b_addr = (uintptr_t)b->addr; + size_t size = extent_size_get(extent); + size_t psz = extent_size_quantize_floor(size); + pszind_t pind = sz_psz2ind(psz); + if (extent_heap_empty(&extents->heaps[pind])) { + bitmap_unset(extents->bitmap, &extents_bitmap_info, + (size_t)pind); + } + extent_heap_insert(&extents->heaps[pind], extent); + if (!preserve_lru) { + extent_list_append(&extents->lru, extent); + } + size_t npages = size >> LG_PAGE; + /* + * All modifications to npages hold the mutex (as asserted above), so we + * don't need an atomic fetch-add; we can get by with a load followed by + * a store. + */ + size_t cur_extents_npages = + atomic_load_zu(&extents->npages, ATOMIC_RELAXED); + atomic_store_zu(&extents->npages, cur_extents_npages + npages, + ATOMIC_RELAXED); +} - return ((a_addr > b_addr) - (a_addr < b_addr)); +static void +extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent, + bool preserve_lru) { + malloc_mutex_assert_owner(tsdn, &extents->mtx); + assert(extent_state_get(extent) == extents->state); + + size_t size = extent_size_get(extent); + size_t psz = extent_size_quantize_floor(size); + pszind_t pind = sz_psz2ind(psz); + extent_heap_remove(&extents->heaps[pind], extent); + if (extent_heap_empty(&extents->heaps[pind])) { + bitmap_set(extents->bitmap, &extents_bitmap_info, + (size_t)pind); + } + if (!preserve_lru) { + extent_list_remove(&extents->lru, extent); + } + size_t npages = size >> LG_PAGE; + /* + * As in extents_insert_locked, we hold extents->mtx and so don't need + * atomic operations for updating extents->npages. + */ + size_t cur_extents_npages = + atomic_load_zu(&extents->npages, ATOMIC_RELAXED); + assert(cur_extents_npages >= npages); + atomic_store_zu(&extents->npages, + cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED); } -/* Generate red-black tree functions. */ -rb_gen(, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad, - extent_ad_comp) +/* Do any-best-fit extent selection, i.e. select any extent that best fits. */ +static extent_t * +extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + size_t size) { + pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); + pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, + (size_t)pind); + if (i < NPSIZES+1) { + assert(!extent_heap_empty(&extents->heaps[i])); + extent_t *extent = extent_heap_any(&extents->heaps[i]); + assert(extent_size_get(extent) >= size); + return extent; + } + + return NULL; +} + +/* + * Do first-fit extent selection, i.e. select the oldest/lowest extent that is + * large enough. + */ +static extent_t * +extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + size_t size) { + extent_t *ret = NULL; + + pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); + for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, + &extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i = + (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, + (size_t)i+1)) { + assert(!extent_heap_empty(&extents->heaps[i])); + extent_t *extent = extent_heap_first(&extents->heaps[i]); + assert(extent_size_get(extent) >= size); + if (ret == NULL || extent_snad_comp(extent, ret) < 0) { + ret = extent; + } + if (i == NPSIZES) { + break; + } + assert(i < NPSIZES); + } + + return ret; +} + +/* + * Do {best,first}-fit extent selection, where the selection policy choice is + * based on extents->delay_coalesce. Best-fit selection requires less + * searching, but its layout policy is less stable and may cause higher virtual + * memory fragmentation as a side effect. + */ +static extent_t * +extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + size_t size) { + malloc_mutex_assert_owner(tsdn, &extents->mtx); + + return extents->delay_coalesce ? extents_best_fit_locked(tsdn, arena, + extents, size) : extents_first_fit_locked(tsdn, arena, extents, + size); +} + +static bool +extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + extent_t *extent) { + extent_state_set(extent, extent_state_active); + bool coalesced; + extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx, + extents, extent, &coalesced, false); + extent_state_set(extent, extents_state_get(extents)); + + if (!coalesced) { + return true; + } + extents_insert_locked(tsdn, extents, extent, true); + return false; +} + +extent_t * +extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { + assert(size + pad != 0); + assert(alignment != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + return extent_recycle(tsdn, arena, r_extent_hooks, extents, new_addr, + size, pad, alignment, slab, szind, zero, commit, false); +} + +void +extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, extent_t *extent) { + assert(extent_base_get(extent) != NULL); + assert(extent_size_get(extent) != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_addr_set(extent, extent_base_get(extent)); + extent_zeroed_set(extent, false); + + extent_record(tsdn, arena, r_extent_hooks, extents, extent, false); +} + +extent_t * +extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, size_t npages_min) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + malloc_mutex_lock(tsdn, &extents->mtx); + + /* + * Get the LRU coalesced extent, if any. If coalescing was delayed, + * the loop will iterate until the LRU extent is fully coalesced. + */ + extent_t *extent; + while (true) { + /* Get the LRU extent, if any. */ + extent = extent_list_first(&extents->lru); + if (extent == NULL) { + goto label_return; + } + /* Check the eviction limit. */ + size_t npages = extent_size_get(extent) >> LG_PAGE; + size_t extents_npages = atomic_load_zu(&extents->npages, + ATOMIC_RELAXED); + if (extents_npages - npages < npages_min) { + extent = NULL; + goto label_return; + } + extents_remove_locked(tsdn, extents, extent, false); + if (!extents->delay_coalesce) { + break; + } + /* Try to coalesce. */ + if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks, + rtree_ctx, extents, extent)) { + break; + } + /* + * The LRU extent was just coalesced and the result placed in + * the LRU at its neighbor's position. Start over. + */ + } + + /* + * Either mark the extent active or deregister it to protect against + * concurrent operations. + */ + switch (extents_state_get(extents)) { + case extent_state_active: + not_reached(); + case extent_state_dirty: + case extent_state_muzzy: + extent_state_set(extent, extent_state_active); + break; + case extent_state_retained: + extent_deregister(tsdn, extent); + break; + default: + not_reached(); + } + +label_return: + malloc_mutex_unlock(tsdn, &extents->mtx); + return extent; +} + +static void +extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, extent_t *extent, bool growing_retained) { + /* + * Leak extent after making sure its pages have already been purged, so + * that this is only a virtual memory leak. + */ + if (extents_state_get(extents) == extent_state_dirty) { + if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, + extent, 0, extent_size_get(extent), growing_retained)) { + extent_purge_forced_impl(tsdn, arena, r_extent_hooks, + extent, 0, extent_size_get(extent), + growing_retained); + } + } + extent_dalloc(tsdn, arena, extent); +} + +void +extents_prefork(tsdn_t *tsdn, extents_t *extents) { + malloc_mutex_prefork(tsdn, &extents->mtx); +} + +void +extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) { + malloc_mutex_postfork_parent(tsdn, &extents->mtx); +} + +void +extents_postfork_child(tsdn_t *tsdn, extents_t *extents) { + malloc_mutex_postfork_child(tsdn, &extents->mtx); +} + +static void +extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + extent_t *extent, bool preserve_lru) { + assert(extent_arena_get(extent) == arena); + assert(extent_state_get(extent) == extent_state_active); + + extent_state_set(extent, extents_state_get(extents)); + extents_insert_locked(tsdn, extents, extent, preserve_lru); +} + +static void +extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + extent_t *extent, bool preserve_lru) { + malloc_mutex_lock(tsdn, &extents->mtx); + extent_deactivate_locked(tsdn, arena, extents, extent, preserve_lru); + malloc_mutex_unlock(tsdn, &extents->mtx); +} + +static void +extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, + extent_t *extent, bool preserve_lru) { + assert(extent_arena_get(extent) == arena); + assert(extent_state_get(extent) == extents_state_get(extents)); + + extents_remove_locked(tsdn, extents, extent, preserve_lru); + extent_state_set(extent, extent_state_active); +} + +static bool +extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, + const extent_t *extent, bool dependent, bool init_missing, + rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) { + *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_base_get(extent), dependent, init_missing); + if (!dependent && *r_elm_a == NULL) { + return true; + } + assert(*r_elm_a != NULL); + + *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_last_get(extent), dependent, init_missing); + if (!dependent && *r_elm_b == NULL) { + return true; + } + assert(*r_elm_b != NULL); + + return false; +} + +static void +extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a, + rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) { + rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab); + if (elm_b != NULL) { + rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind, + slab); + } +} + +static void +extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent, + szind_t szind) { + assert(extent_slab_get(extent)); + + /* Register interior. */ + for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) { + rtree_write(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_base_get(extent) + (uintptr_t)(i << + LG_PAGE), extent, szind, true); + } +} + +static void +extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) { + cassert(config_prof); + /* prof_gdump() requirement. */ + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + if (opt_prof && extent_state_get(extent) == extent_state_active) { + size_t nadd = extent_size_get(extent) >> LG_PAGE; + size_t cur = atomic_fetch_add_zu(&curpages, nadd, + ATOMIC_RELAXED) + nadd; + size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED); + while (cur > high && !atomic_compare_exchange_weak_zu( + &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) { + /* + * Don't refresh cur, because it may have decreased + * since this thread lost the highpages update race. + * Note that high is updated in case of CAS failure. + */ + } + if (cur > high && prof_gdump_get_unlocked()) { + prof_gdump(tsdn); + } + } +} + +static void +extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) { + cassert(config_prof); + + if (opt_prof && extent_state_get(extent) == extent_state_active) { + size_t nsub = extent_size_get(extent) >> LG_PAGE; + assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub); + atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED); + } +} + +static bool +extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_leaf_elm_t *elm_a, *elm_b; + + /* + * We need to hold the lock to protect against a concurrent coalesce + * operation that sees us in a partial state. + */ + extent_lock(tsdn, extent); + + if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true, + &elm_a, &elm_b)) { + return true; + } + + szind_t szind = extent_szind_get_maybe_invalid(extent); + bool slab = extent_slab_get(extent); + extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab); + if (slab) { + extent_interior_register(tsdn, rtree_ctx, extent, szind); + } + + extent_unlock(tsdn, extent); + + if (config_prof && gdump_add) { + extent_gdump_add(tsdn, extent); + } + + return false; +} + +static bool +extent_register(tsdn_t *tsdn, extent_t *extent) { + return extent_register_impl(tsdn, extent, true); +} + +static bool +extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) { + return extent_register_impl(tsdn, extent, false); +} + +static void +extent_reregister(tsdn_t *tsdn, extent_t *extent) { + bool err = extent_register(tsdn, extent); + assert(!err); +} + +static void +extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, + extent_t *extent) { + size_t i; + + assert(extent_slab_get(extent)); + + for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) { + rtree_clear(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_base_get(extent) + (uintptr_t)(i << + LG_PAGE)); + } +} + +static void +extent_deregister(tsdn_t *tsdn, extent_t *extent) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_leaf_elm_t *elm_a, *elm_b; + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false, + &elm_a, &elm_b); + + extent_lock(tsdn, extent); + + extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false); + if (extent_slab_get(extent)) { + extent_interior_deregister(tsdn, rtree_ctx, extent); + extent_slab_set(extent, false); + } + + extent_unlock(tsdn, extent); + + if (config_prof) { + extent_gdump_sub(tsdn, extent); + } +} + +static extent_t * +extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, + bool *zero, bool *commit, bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + assert(alignment > 0); + if (config_debug && new_addr != NULL) { + /* + * Non-NULL new_addr has two use cases: + * + * 1) Recycle a known-extant extent, e.g. during purging. + * 2) Perform in-place expanding reallocation. + * + * Regardless of use case, new_addr must either refer to a + * non-existing extent, or to the base of an extant extent, + * since only active slabs support interior lookups (which of + * course cannot be recycled). + */ + assert(PAGE_ADDR2BASE(new_addr) == new_addr); + assert(pad == 0); + assert(alignment <= PAGE); + } + + size_t esize = size + pad; + size_t alloc_size = esize + PAGE_CEILING(alignment) - PAGE; + /* Beware size_t wrap-around. */ + if (alloc_size < esize) { + return NULL; + } + malloc_mutex_lock(tsdn, &extents->mtx); + extent_hooks_assure_initialized(arena, r_extent_hooks); + extent_t *extent; + if (new_addr != NULL) { + extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr); + if (extent != NULL) { + /* + * We might null-out extent to report an error, but we + * still need to unlock the associated mutex after. + */ + extent_t *unlock_extent = extent; + assert(extent_base_get(extent) == new_addr); + if (extent_arena_get(extent) != arena || + extent_size_get(extent) < esize || + extent_state_get(extent) != + extents_state_get(extents)) { + extent = NULL; + } + extent_unlock(tsdn, unlock_extent); + } + } else { + extent = extents_fit_locked(tsdn, arena, extents, alloc_size); + } + if (extent == NULL) { + malloc_mutex_unlock(tsdn, &extents->mtx); + return NULL; + } + + extent_activate_locked(tsdn, arena, extents, extent, false); + malloc_mutex_unlock(tsdn, &extents->mtx); + + if (extent_zeroed_get(extent)) { + *zero = true; + } + if (extent_committed_get(extent)) { + *commit = true; + } + + return extent; +} + +static extent_t * +extent_recycle_split(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, + szind_t szind, extent_t *extent, bool growing_retained) { + size_t esize = size + pad; + size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(extent), + PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(extent); + assert(new_addr == NULL || leadsize == 0); + assert(extent_size_get(extent) >= leadsize + esize); + size_t trailsize = extent_size_get(extent) - leadsize - esize; + + /* Split the lead. */ + if (leadsize != 0) { + extent_t *lead = extent; + extent = extent_split_impl(tsdn, arena, r_extent_hooks, + lead, leadsize, NSIZES, false, esize + trailsize, szind, + slab, growing_retained); + if (extent == NULL) { + extent_deregister(tsdn, lead); + extents_leak(tsdn, arena, r_extent_hooks, extents, + lead, growing_retained); + return NULL; + } + extent_deactivate(tsdn, arena, extents, lead, false); + } + + /* Split the trail. */ + if (trailsize != 0) { + extent_t *trail = extent_split_impl(tsdn, arena, + r_extent_hooks, extent, esize, szind, slab, trailsize, + NSIZES, false, growing_retained); + if (trail == NULL) { + extent_deregister(tsdn, extent); + extents_leak(tsdn, arena, r_extent_hooks, extents, + extent, growing_retained); + return NULL; + } + extent_deactivate(tsdn, arena, extents, trail, false); + } else if (leadsize == 0) { + /* + * Splitting causes szind to be set as a side effect, but no + * splitting occurred. + */ + extent_szind_set(extent, szind); + if (szind != NSIZES) { + rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_addr_get(extent), szind, slab); + if (slab && extent_size_get(extent) > PAGE) { + rtree_szind_slab_update(tsdn, &extents_rtree, + rtree_ctx, + (uintptr_t)extent_past_get(extent) - + (uintptr_t)PAGE, szind, slab); + } + } + } + + return extent; +} + +static extent_t * +extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit, + bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + assert(new_addr == NULL || !slab); + assert(pad == 0 || !slab); + assert(!*zero || !slab); + + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + bool committed = false; + extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks, + rtree_ctx, extents, new_addr, size, pad, alignment, slab, zero, + &committed, growing_retained); + if (extent == NULL) { + return NULL; + } + if (committed) { + *commit = true; + } + + extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx, + extents, new_addr, size, pad, alignment, slab, szind, extent, + growing_retained); + if (extent == NULL) { + return NULL; + } + + if (*commit && !extent_committed_get(extent)) { + if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, + 0, extent_size_get(extent), growing_retained)) { + extent_record(tsdn, arena, r_extent_hooks, extents, + extent, growing_retained); + return NULL; + } + extent_zeroed_set(extent, true); + } + + if (pad != 0) { + extent_addr_randomize(tsdn, extent, alignment); + } + assert(extent_state_get(extent) == extent_state_active); + if (slab) { + extent_slab_set(extent, slab); + extent_interior_register(tsdn, rtree_ctx, extent, szind); + } + + if (*zero) { + void *addr = extent_base_get(extent); + size_t size = extent_size_get(extent); + if (!extent_zeroed_get(extent)) { + if (pages_purge_forced(addr, size)) { + memset(addr, 0, size); + } + } else if (config_debug) { + size_t *p = (size_t *)(uintptr_t)addr; + for (size_t i = 0; i < size / sizeof(size_t); i++) { + assert(p[i] == 0); + } + } + } + return extent; +} + +/* + * If the caller specifies (!*zero), it is still possible to receive zeroed + * memory, in which case *zero is toggled to true. arena_extent_alloc() takes + * advantage of this to avoid demanding zeroed extents, but taking advantage of + * them if they are returned. + */ +static void * +extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) { + void *ret; + + assert(size != 0); + assert(alignment != 0); + + /* "primary" dss. */ + if (have_dss && dss_prec == dss_prec_primary && (ret = + extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, + commit)) != NULL) { + return ret; + } + /* mmap. */ + if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit)) + != NULL) { + return ret; + } + /* "secondary" dss. */ + if (have_dss && dss_prec == dss_prec_secondary && (ret = + extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, + commit)) != NULL) { + return ret; + } + + /* All strategies for allocation failed. */ + return NULL; +} + +static void * +extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit) { + void *ret; + + ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero, + commit, (dss_prec_t)atomic_load_u(&arena->dss_prec, + ATOMIC_RELAXED)); + return ret; +} + +static void * +extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { + tsdn_t *tsdn; + arena_t *arena; + + tsdn = tsdn_fetch(); + arena = arena_get(tsdn, arena_ind, false); + /* + * The arena we're allocating on behalf of must have been initialized + * already. + */ + assert(arena != NULL); + + return extent_alloc_default_impl(tsdn, arena, new_addr, size, + alignment, zero, commit); +} + +static void +extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) { + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + pre_reentrancy(tsd, arena); +} + +static void +extent_hook_post_reentrancy(tsdn_t *tsdn) { + tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); + post_reentrancy(tsd); +} + +/* + * If virtual memory is retained, create increasingly larger extents from which + * to split requested extents in order to limit the total number of disjoint + * virtual memory ranges retained by each arena. + */ +static extent_t * +extent_grow_retained(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment, + bool slab, szind_t szind, bool *zero, bool *commit) { + malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx); + assert(pad == 0 || !slab); + assert(!*zero || !slab); + + size_t esize = size + pad; + size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE; + /* Beware size_t wrap-around. */ + if (alloc_size_min < esize) { + goto label_err; + } + /* + * Find the next extent size in the series that would be large enough to + * satisfy this request. + */ + pszind_t egn_skip = 0; + size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); + while (alloc_size < alloc_size_min) { + egn_skip++; + if (arena->extent_grow_next + egn_skip == NPSIZES) { + /* Outside legal range. */ + goto label_err; + } + assert(arena->extent_grow_next + egn_skip < NPSIZES); + alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); + } + + extent_t *extent = extent_alloc(tsdn, arena); + if (extent == NULL) { + goto label_err; + } + bool zeroed = false; + bool committed = false; + + void *ptr; + if (*r_extent_hooks == &extent_hooks_default) { + ptr = extent_alloc_core(tsdn, arena, NULL, alloc_size, PAGE, + &zeroed, &committed, (dss_prec_t)atomic_load_u( + &arena->dss_prec, ATOMIC_RELAXED)); + } else { + extent_hook_pre_reentrancy(tsdn, arena); + ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL, + alloc_size, PAGE, &zeroed, &committed, + arena_ind_get(arena)); + extent_hook_post_reentrancy(tsdn); + } + + extent_init(extent, arena, ptr, alloc_size, false, NSIZES, + arena_extent_sn_next(arena), extent_state_active, zeroed, + committed); + if (ptr == NULL) { + extent_dalloc(tsdn, arena, extent); + goto label_err; + } + if (extent_register_no_gdump_add(tsdn, extent)) { + extents_leak(tsdn, arena, r_extent_hooks, + &arena->extents_retained, extent, true); + goto label_err; + } + + size_t leadsize = ALIGNMENT_CEILING((uintptr_t)ptr, + PAGE_CEILING(alignment)) - (uintptr_t)ptr; + assert(alloc_size >= leadsize + esize); + size_t trailsize = alloc_size - leadsize - esize; + if (extent_zeroed_get(extent) && extent_committed_get(extent)) { + *zero = true; + } + if (extent_committed_get(extent)) { + *commit = true; + } + + /* Split the lead. */ + if (leadsize != 0) { + extent_t *lead = extent; + extent = extent_split_impl(tsdn, arena, r_extent_hooks, lead, + leadsize, NSIZES, false, esize + trailsize, szind, slab, + true); + if (extent == NULL) { + extent_deregister(tsdn, lead); + extents_leak(tsdn, arena, r_extent_hooks, + &arena->extents_retained, lead, true); + goto label_err; + } + extent_record(tsdn, arena, r_extent_hooks, + &arena->extents_retained, lead, true); + } + + /* Split the trail. */ + if (trailsize != 0) { + extent_t *trail = extent_split_impl(tsdn, arena, r_extent_hooks, + extent, esize, szind, slab, trailsize, NSIZES, false, true); + if (trail == NULL) { + extent_deregister(tsdn, extent); + extents_leak(tsdn, arena, r_extent_hooks, + &arena->extents_retained, extent, true); + goto label_err; + } + extent_record(tsdn, arena, r_extent_hooks, + &arena->extents_retained, trail, true); + } else if (leadsize == 0) { + /* + * Splitting causes szind to be set as a side effect, but no + * splitting occurred. + */ + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, + &rtree_ctx_fallback); + + extent_szind_set(extent, szind); + if (szind != NSIZES) { + rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_addr_get(extent), szind, slab); + if (slab && extent_size_get(extent) > PAGE) { + rtree_szind_slab_update(tsdn, &extents_rtree, + rtree_ctx, + (uintptr_t)extent_past_get(extent) - + (uintptr_t)PAGE, szind, slab); + } + } + } + + if (*commit && !extent_committed_get(extent)) { + if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0, + extent_size_get(extent), true)) { + extent_record(tsdn, arena, r_extent_hooks, + &arena->extents_retained, extent, true); + goto label_err; + } + extent_zeroed_set(extent, true); + } + + /* + * Increment extent_grow_next if doing so wouldn't exceed the legal + * range. + */ + if (arena->extent_grow_next + egn_skip + 1 < NPSIZES) { + arena->extent_grow_next += egn_skip + 1; + } else { + arena->extent_grow_next = NPSIZES - 1; + } + /* All opportunities for failure are past. */ + malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); + + if (config_prof) { + /* Adjust gdump stats now that extent is final size. */ + extent_gdump_add(tsdn, extent); + } + if (pad != 0) { + extent_addr_randomize(tsdn, extent, alignment); + } + if (slab) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, + &rtree_ctx_fallback); + + extent_slab_set(extent, true); + extent_interior_register(tsdn, rtree_ctx, extent, szind); + } + if (*zero && !extent_zeroed_get(extent)) { + void *addr = extent_base_get(extent); + size_t size = extent_size_get(extent); + if (pages_purge_forced(addr, size)) { + memset(addr, 0, size); + } + } + + return extent; +label_err: + malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); + return NULL; +} + +static extent_t * +extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { + assert(size != 0); + assert(alignment != 0); + + malloc_mutex_lock(tsdn, &arena->extent_grow_mtx); + + extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, + &arena->extents_retained, new_addr, size, pad, alignment, slab, + szind, zero, commit, true); + if (extent != NULL) { + malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); + if (config_prof) { + extent_gdump_add(tsdn, extent); + } + } else if (opt_retain && new_addr == NULL) { + extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size, + pad, alignment, slab, szind, zero, commit); + /* extent_grow_retained() always releases extent_grow_mtx. */ + } else { + malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); + } + malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx); + + return extent; +} + +static extent_t * +extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { + size_t esize = size + pad; + extent_t *extent = extent_alloc(tsdn, arena); + if (extent == NULL) { + return NULL; + } + void *addr; + if (*r_extent_hooks == &extent_hooks_default) { + /* Call directly to propagate tsdn. */ + addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize, + alignment, zero, commit); + } else { + extent_hook_pre_reentrancy(tsdn, arena); + addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr, + esize, alignment, zero, commit, arena_ind_get(arena)); + extent_hook_post_reentrancy(tsdn); + } + if (addr == NULL) { + extent_dalloc(tsdn, arena, extent); + return NULL; + } + extent_init(extent, arena, addr, esize, slab, szind, + arena_extent_sn_next(arena), extent_state_active, zero, commit); + if (pad != 0) { + extent_addr_randomize(tsdn, extent, alignment); + } + if (extent_register(tsdn, extent)) { + extents_leak(tsdn, arena, r_extent_hooks, + &arena->extents_retained, extent, false); + return NULL; + } + + return extent; +} + +extent_t * +extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, + size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks, + new_addr, size, pad, alignment, slab, szind, zero, commit); + if (extent == NULL) { + extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks, + new_addr, size, pad, alignment, slab, szind, zero, commit); + } + + return extent; +} + +static bool +extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner, + const extent_t *outer) { + assert(extent_arena_get(inner) == arena); + if (extent_arena_get(outer) != arena) { + return false; + } + + assert(extent_state_get(inner) == extent_state_active); + if (extent_state_get(outer) != extents->state) { + return false; + } + + if (extent_committed_get(inner) != extent_committed_get(outer)) { + return false; + } + + return true; +} + +static bool +extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, extent_t *inner, extent_t *outer, bool forward, + bool growing_retained) { + assert(extent_can_coalesce(arena, extents, inner, outer)); + + if (forward && extents->delay_coalesce) { + /* + * The extent that remains after coalescing must occupy the + * outer extent's position in the LRU. For forward coalescing, + * swap the inner extent into the LRU. + */ + extent_list_replace(&extents->lru, outer, inner); + } + extent_activate_locked(tsdn, arena, extents, outer, + extents->delay_coalesce); + + malloc_mutex_unlock(tsdn, &extents->mtx); + bool err = extent_merge_impl(tsdn, arena, r_extent_hooks, + forward ? inner : outer, forward ? outer : inner, growing_retained); + malloc_mutex_lock(tsdn, &extents->mtx); + + if (err) { + if (forward && extents->delay_coalesce) { + extent_list_replace(&extents->lru, inner, outer); + } + extent_deactivate_locked(tsdn, arena, extents, outer, + extents->delay_coalesce); + } + + return err; +} + +static extent_t * +extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, + extent_t *extent, bool *coalesced, bool growing_retained) { + /* + * Continue attempting to coalesce until failure, to protect against + * races with other threads that are thwarted by this one. + */ + bool again; + do { + again = false; + + /* Try to coalesce forward. */ + extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx, + extent_past_get(extent)); + if (next != NULL) { + /* + * extents->mtx only protects against races for + * like-state extents, so call extent_can_coalesce() + * before releasing next's pool lock. + */ + bool can_coalesce = extent_can_coalesce(arena, extents, + extent, next); + + extent_unlock(tsdn, next); + + if (can_coalesce && !extent_coalesce(tsdn, arena, + r_extent_hooks, extents, extent, next, true, + growing_retained)) { + if (extents->delay_coalesce) { + /* Do minimal coalescing. */ + *coalesced = true; + return extent; + } + again = true; + } + } + + /* Try to coalesce backward. */ + extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx, + extent_before_get(extent)); + if (prev != NULL) { + bool can_coalesce = extent_can_coalesce(arena, extents, + extent, prev); + extent_unlock(tsdn, prev); + + if (can_coalesce && !extent_coalesce(tsdn, arena, + r_extent_hooks, extents, extent, prev, false, + growing_retained)) { + extent = prev; + if (extents->delay_coalesce) { + /* Do minimal coalescing. */ + *coalesced = true; + return extent; + } + again = true; + } + } + } while (again); + + if (extents->delay_coalesce) { + *coalesced = false; + } + return extent; +} + +static void +extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, + extents_t *extents, extent_t *extent, bool growing_retained) { + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + + assert((extents_state_get(extents) != extent_state_dirty && + extents_state_get(extents) != extent_state_muzzy) || + !extent_zeroed_get(extent)); + + malloc_mutex_lock(tsdn, &extents->mtx); + extent_hooks_assure_initialized(arena, r_extent_hooks); + + extent_szind_set(extent, NSIZES); + if (extent_slab_get(extent)) { + extent_interior_deregister(tsdn, rtree_ctx, extent); + extent_slab_set(extent, false); + } + + assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_base_get(extent), true) == extent); + + if (!extents->delay_coalesce) { + extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, + rtree_ctx, extents, extent, NULL, growing_retained); + } + + extent_deactivate_locked(tsdn, arena, extents, extent, false); + + malloc_mutex_unlock(tsdn, &extents->mtx); +} + +void +extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; + + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + if (extent_register(tsdn, extent)) { + extents_leak(tsdn, arena, &extent_hooks, + &arena->extents_retained, extent, false); + return; + } + extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent); +} + +static bool +extent_dalloc_default_impl(void *addr, size_t size) { + if (!have_dss || !extent_in_dss(addr)) { + return extent_dalloc_mmap(addr, size); + } + return true; +} + +static bool +extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + bool committed, unsigned arena_ind) { + return extent_dalloc_default_impl(addr, size); +} + +static bool +extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent) { + bool err; + + assert(extent_base_get(extent) != NULL); + assert(extent_size_get(extent) != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_addr_set(extent, extent_base_get(extent)); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + /* Try to deallocate. */ + if (*r_extent_hooks == &extent_hooks_default) { + /* Call directly to propagate tsdn. */ + err = extent_dalloc_default_impl(extent_base_get(extent), + extent_size_get(extent)); + } else { + extent_hook_pre_reentrancy(tsdn, arena); + err = ((*r_extent_hooks)->dalloc == NULL || + (*r_extent_hooks)->dalloc(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), + extent_committed_get(extent), arena_ind_get(arena))); + extent_hook_post_reentrancy(tsdn); + } + + if (!err) { + extent_dalloc(tsdn, arena, extent); + } + + return err; +} + +void +extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + /* + * Deregister first to avoid a race with other allocating threads, and + * reregister if deallocation fails. + */ + extent_deregister(tsdn, extent); + if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) { + return; + } + + extent_reregister(tsdn, extent); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + /* Try to decommit; purge if that fails. */ + bool zeroed; + if (!extent_committed_get(extent)) { + zeroed = true; + } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent, + 0, extent_size_get(extent))) { + zeroed = true; + } else if ((*r_extent_hooks)->purge_forced != NULL && + !(*r_extent_hooks)->purge_forced(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), 0, + extent_size_get(extent), arena_ind_get(arena))) { + zeroed = true; + } else if (extent_state_get(extent) == extent_state_muzzy || + ((*r_extent_hooks)->purge_lazy != NULL && + !(*r_extent_hooks)->purge_lazy(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), 0, + extent_size_get(extent), arena_ind_get(arena)))) { + zeroed = false; + } else { + zeroed = false; + } + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + extent_zeroed_set(extent, zeroed); + + if (config_prof) { + extent_gdump_sub(tsdn, extent); + } + + extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained, + extent, false); +} + +static void +extent_destroy_default_impl(void *addr, size_t size) { + if (!have_dss || !extent_in_dss(addr)) { + pages_unmap(addr, size); + } +} + +static void +extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + bool committed, unsigned arena_ind) { + extent_destroy_default_impl(addr, size); +} + +void +extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent) { + assert(extent_base_get(extent) != NULL); + assert(extent_size_get(extent) != 0); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + /* Deregister first to avoid a race with other allocating threads. */ + extent_deregister(tsdn, extent); + + extent_addr_set(extent, extent_base_get(extent)); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + /* Try to destroy; silently fail otherwise. */ + if (*r_extent_hooks == &extent_hooks_default) { + /* Call directly to propagate tsdn. */ + extent_destroy_default_impl(extent_base_get(extent), + extent_size_get(extent)); + } else if ((*r_extent_hooks)->destroy != NULL) { + extent_hook_pre_reentrancy(tsdn, arena); + (*r_extent_hooks)->destroy(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), + extent_committed_get(extent), arena_ind_get(arena)); + extent_hook_post_reentrancy(tsdn); + } + + extent_dalloc(tsdn, arena, extent); +} + +static bool +extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset), + length); +} + +static bool +extent_commit_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = ((*r_extent_hooks)->commit == NULL || + (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent), + extent_size_get(extent), offset, length, arena_ind_get(arena))); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + extent_committed_set(extent, extent_committed_get(extent) || !err); + return err; +} + +bool +extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length) { + return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset, + length, false); +} + +static bool +extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset), + length); +} + +bool +extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = ((*r_extent_hooks)->decommit == NULL || + (*r_extent_hooks)->decommit(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), offset, length, + arena_ind_get(arena))); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + extent_committed_set(extent, extent_committed_get(extent) && err); + return err; +} + +#ifdef PAGES_CAN_PURGE_LAZY +static bool +extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t offset, size_t length, unsigned arena_ind) { + assert(addr != NULL); + assert((offset & PAGE_MASK) == 0); + assert(length != 0); + assert((length & PAGE_MASK) == 0); + + return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset), + length); +} +#endif + +static bool +extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if ((*r_extent_hooks)->purge_lazy == NULL) { + return true; + } + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), offset, length, + arena_ind_get(arena)); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + + return err; +} + +bool +extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length) { + return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent, + offset, length, false); +} + +#ifdef PAGES_CAN_PURGE_FORCED +static bool +extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr, + size_t size, size_t offset, size_t length, unsigned arena_ind) { + assert(addr != NULL); + assert((offset & PAGE_MASK) == 0); + assert(length != 0); + assert((length & PAGE_MASK) == 0); + + return pages_purge_forced((void *)((uintptr_t)addr + + (uintptr_t)offset), length); +} +#endif + +static bool +extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length, bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if ((*r_extent_hooks)->purge_forced == NULL) { + return true; + } + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks, + extent_base_get(extent), extent_size_get(extent), offset, length, + arena_ind_get(arena)); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + return err; +} + +bool +extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, + size_t length) { + return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent, + offset, length, false); +} + +#ifdef JEMALLOC_MAPS_COALESCE +static bool +extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size, + size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { + return !maps_coalesce; +} +#endif + +static extent_t * +extent_split_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, + szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, + bool growing_retained) { + assert(extent_size_get(extent) == size_a + size_b); + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if ((*r_extent_hooks)->split == NULL) { + return NULL; + } + + extent_t *trail = extent_alloc(tsdn, arena); + if (trail == NULL) { + goto label_error_a; + } + + extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) + + size_a), size_b, slab_b, szind_b, extent_sn_get(extent), + extent_state_get(extent), extent_zeroed_get(extent), + extent_committed_get(extent)); + + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_leaf_elm_t *lead_elm_a, *lead_elm_b; + { + extent_t lead; + + extent_init(&lead, arena, extent_addr_get(extent), size_a, + slab_a, szind_a, extent_sn_get(extent), + extent_state_get(extent), extent_zeroed_get(extent), + extent_committed_get(extent)); + + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false, + true, &lead_elm_a, &lead_elm_b); + } + rtree_leaf_elm_t *trail_elm_a, *trail_elm_b; + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true, + &trail_elm_a, &trail_elm_b); + + if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL + || trail_elm_b == NULL) { + goto label_error_b; + } + + extent_lock2(tsdn, extent, trail); + + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_pre_reentrancy(tsdn, arena); + } + bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent), + size_a + size_b, size_a, size_b, extent_committed_get(extent), + arena_ind_get(arena)); + if (*r_extent_hooks != &extent_hooks_default) { + extent_hook_post_reentrancy(tsdn); + } + if (err) { + goto label_error_c; + } + + extent_size_set(extent, size_a); + extent_szind_set(extent, szind_a); + + extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent, + szind_a, slab_a); + extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail, + szind_b, slab_b); + + extent_unlock2(tsdn, extent, trail); + + return trail; +label_error_c: + extent_unlock2(tsdn, extent, trail); +label_error_b: + extent_dalloc(tsdn, arena, trail); +label_error_a: + return NULL; +} + +extent_t * +extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, + szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) { + return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a, + szind_a, slab_a, size_b, szind_b, slab_b, false); +} + +static bool +extent_merge_default_impl(void *addr_a, void *addr_b) { + if (!maps_coalesce) { + return true; + } + if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) { + return true; + } + + return false; +} + +#ifdef JEMALLOC_MAPS_COALESCE +static bool +extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, + void *addr_b, size_t size_b, bool committed, unsigned arena_ind) { + return extent_merge_default_impl(addr_a, addr_b); +} +#endif + +static bool +extent_merge_impl(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, + bool growing_retained) { + witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), + WITNESS_RANK_CORE, growing_retained ? 1 : 0); + + extent_hooks_assure_initialized(arena, r_extent_hooks); + + if ((*r_extent_hooks)->merge == NULL) { + return true; + } + + bool err; + if (*r_extent_hooks == &extent_hooks_default) { + /* Call directly to propagate tsdn. */ + err = extent_merge_default_impl(extent_base_get(a), + extent_base_get(b)); + } else { + extent_hook_pre_reentrancy(tsdn, arena); + err = (*r_extent_hooks)->merge(*r_extent_hooks, + extent_base_get(a), extent_size_get(a), extent_base_get(b), + extent_size_get(b), extent_committed_get(a), + arena_ind_get(arena)); + extent_hook_post_reentrancy(tsdn); + } + + if (err) { + return true; + } + + /* + * The rtree writes must happen while all the relevant elements are + * owned, so the following code uses decomposed helper functions rather + * than extent_{,de}register() to do things in the right order. + */ + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b; + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a, + &a_elm_b); + extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a, + &b_elm_b); + + extent_lock2(tsdn, a, b); + + if (a_elm_b != NULL) { + rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL, + NSIZES, false); + } + if (b_elm_b != NULL) { + rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL, + NSIZES, false); + } else { + b_elm_b = b_elm_a; + } + + extent_size_set(a, extent_size_get(a) + extent_size_get(b)); + extent_szind_set(a, NSIZES); + extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ? + extent_sn_get(a) : extent_sn_get(b)); + extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b)); + + extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false); + + extent_unlock2(tsdn, a, b); + + extent_dalloc(tsdn, extent_arena_get(b), b); + + return false; +} + +bool +extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, + extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) { + return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false); +} + +bool +extent_boot(void) { + if (rtree_new(&extents_rtree, true)) { + return true; + } + + if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool", + WITNESS_RANK_EXTENT_POOL)) { + return true; + } + + if (have_dss) { + extent_dss_boot(); + } + + return false; +} diff --git a/dep/jemalloc/src/extent_dss.c b/dep/jemalloc/src/extent_dss.c new file mode 100644 index 00000000000..e72da95870d --- /dev/null +++ b/dep/jemalloc/src/extent_dss.c @@ -0,0 +1,269 @@ +#define JEMALLOC_EXTENT_DSS_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/spin.h" + +/******************************************************************************/ +/* Data. */ + +const char *opt_dss = DSS_DEFAULT; + +const char *dss_prec_names[] = { + "disabled", + "primary", + "secondary", + "N/A" +}; + +/* + * Current dss precedence default, used when creating new arenas. NB: This is + * stored as unsigned rather than dss_prec_t because in principle there's no + * guarantee that sizeof(dss_prec_t) is the same as sizeof(unsigned), and we use + * atomic operations to synchronize the setting. + */ +static atomic_u_t dss_prec_default = ATOMIC_INIT( + (unsigned)DSS_PREC_DEFAULT); + +/* Base address of the DSS. */ +static void *dss_base; +/* Atomic boolean indicating whether a thread is currently extending DSS. */ +static atomic_b_t dss_extending; +/* Atomic boolean indicating whether the DSS is exhausted. */ +static atomic_b_t dss_exhausted; +/* Atomic current upper limit on DSS addresses. */ +static atomic_p_t dss_max; + +/******************************************************************************/ + +static void * +extent_dss_sbrk(intptr_t increment) { +#ifdef JEMALLOC_DSS + return sbrk(increment); +#else + not_implemented(); + return NULL; +#endif +} + +dss_prec_t +extent_dss_prec_get(void) { + dss_prec_t ret; + + if (!have_dss) { + return dss_prec_disabled; + } + ret = (dss_prec_t)atomic_load_u(&dss_prec_default, ATOMIC_ACQUIRE); + return ret; +} + +bool +extent_dss_prec_set(dss_prec_t dss_prec) { + if (!have_dss) { + return (dss_prec != dss_prec_disabled); + } + atomic_store_u(&dss_prec_default, (unsigned)dss_prec, ATOMIC_RELEASE); + return false; +} + +static void +extent_dss_extending_start(void) { + spin_t spinner = SPIN_INITIALIZER; + while (true) { + bool expected = false; + if (atomic_compare_exchange_weak_b(&dss_extending, &expected, + true, ATOMIC_ACQ_REL, ATOMIC_RELAXED)) { + break; + } + spin_adaptive(&spinner); + } +} + +static void +extent_dss_extending_finish(void) { + assert(atomic_load_b(&dss_extending, ATOMIC_RELAXED)); + + atomic_store_b(&dss_extending, false, ATOMIC_RELEASE); +} + +static void * +extent_dss_max_update(void *new_addr) { + /* + * Get the current end of the DSS as max_cur and assure that dss_max is + * up to date. + */ + void *max_cur = extent_dss_sbrk(0); + if (max_cur == (void *)-1) { + return NULL; + } + atomic_store_p(&dss_max, max_cur, ATOMIC_RELEASE); + /* Fixed new_addr can only be supported if it is at the edge of DSS. */ + if (new_addr != NULL && max_cur != new_addr) { + return NULL; + } + return max_cur; +} + +void * +extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit) { + extent_t *gap; + + cassert(have_dss); + assert(size > 0); + assert(alignment > 0); + + /* + * sbrk() uses a signed increment argument, so take care not to + * interpret a large allocation request as a negative increment. + */ + if ((intptr_t)size < 0) { + return NULL; + } + + gap = extent_alloc(tsdn, arena); + if (gap == NULL) { + return NULL; + } + + extent_dss_extending_start(); + if (!atomic_load_b(&dss_exhausted, ATOMIC_ACQUIRE)) { + /* + * The loop is necessary to recover from races with other + * threads that are using the DSS for something other than + * malloc. + */ + while (true) { + void *max_cur = extent_dss_max_update(new_addr); + if (max_cur == NULL) { + goto label_oom; + } + + /* + * Compute how much page-aligned gap space (if any) is + * necessary to satisfy alignment. This space can be + * recycled for later use. + */ + void *gap_addr_page = (void *)(PAGE_CEILING( + (uintptr_t)max_cur)); + void *ret = (void *)ALIGNMENT_CEILING( + (uintptr_t)gap_addr_page, alignment); + size_t gap_size_page = (uintptr_t)ret - + (uintptr_t)gap_addr_page; + if (gap_size_page != 0) { + extent_init(gap, arena, gap_addr_page, + gap_size_page, false, NSIZES, + arena_extent_sn_next(arena), + extent_state_active, false, true); + } + /* + * Compute the address just past the end of the desired + * allocation space. + */ + void *dss_next = (void *)((uintptr_t)ret + size); + if ((uintptr_t)ret < (uintptr_t)max_cur || + (uintptr_t)dss_next < (uintptr_t)max_cur) { + goto label_oom; /* Wrap-around. */ + } + /* Compute the increment, including subpage bytes. */ + void *gap_addr_subpage = max_cur; + size_t gap_size_subpage = (uintptr_t)ret - + (uintptr_t)gap_addr_subpage; + intptr_t incr = gap_size_subpage + size; + + assert((uintptr_t)max_cur + incr == (uintptr_t)ret + + size); + + /* Try to allocate. */ + void *dss_prev = extent_dss_sbrk(incr); + if (dss_prev == max_cur) { + /* Success. */ + atomic_store_p(&dss_max, dss_next, + ATOMIC_RELEASE); + extent_dss_extending_finish(); + + if (gap_size_page != 0) { + extent_dalloc_gap(tsdn, arena, gap); + } else { + extent_dalloc(tsdn, arena, gap); + } + if (!*commit) { + *commit = pages_decommit(ret, size); + } + if (*zero && *commit) { + extent_hooks_t *extent_hooks = + EXTENT_HOOKS_INITIALIZER; + extent_t extent; + + extent_init(&extent, arena, ret, size, + size, false, NSIZES, + extent_state_active, false, true); + if (extent_purge_forced_wrapper(tsdn, + arena, &extent_hooks, &extent, 0, + size)) { + memset(ret, 0, size); + } + } + return ret; + } + /* + * Failure, whether due to OOM or a race with a raw + * sbrk() call from outside the allocator. + */ + if (dss_prev == (void *)-1) { + /* OOM. */ + atomic_store_b(&dss_exhausted, true, + ATOMIC_RELEASE); + goto label_oom; + } + } + } +label_oom: + extent_dss_extending_finish(); + extent_dalloc(tsdn, arena, gap); + return NULL; +} + +static bool +extent_in_dss_helper(void *addr, void *max) { + return ((uintptr_t)addr >= (uintptr_t)dss_base && (uintptr_t)addr < + (uintptr_t)max); +} + +bool +extent_in_dss(void *addr) { + cassert(have_dss); + + return extent_in_dss_helper(addr, atomic_load_p(&dss_max, + ATOMIC_ACQUIRE)); +} + +bool +extent_dss_mergeable(void *addr_a, void *addr_b) { + void *max; + + cassert(have_dss); + + if ((uintptr_t)addr_a < (uintptr_t)dss_base && (uintptr_t)addr_b < + (uintptr_t)dss_base) { + return true; + } + + max = atomic_load_p(&dss_max, ATOMIC_ACQUIRE); + return (extent_in_dss_helper(addr_a, max) == + extent_in_dss_helper(addr_b, max)); +} + +void +extent_dss_boot(void) { + cassert(have_dss); + + dss_base = extent_dss_sbrk(0); + atomic_store_b(&dss_extending, false, ATOMIC_RELAXED); + atomic_store_b(&dss_exhausted, dss_base == (void *)-1, ATOMIC_RELAXED); + atomic_store_p(&dss_max, dss_base, ATOMIC_RELAXED); +} + +/******************************************************************************/ diff --git a/dep/jemalloc/src/extent_mmap.c b/dep/jemalloc/src/extent_mmap.c new file mode 100644 index 00000000000..8d607dc8039 --- /dev/null +++ b/dep/jemalloc/src/extent_mmap.c @@ -0,0 +1,42 @@ +#define JEMALLOC_EXTENT_MMAP_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/extent_mmap.h" + +/******************************************************************************/ +/* Data. */ + +bool opt_retain = +#ifdef JEMALLOC_RETAIN + true +#else + false +#endif + ; + +/******************************************************************************/ + +void * +extent_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, + bool *commit) { + void *ret = pages_map(new_addr, size, ALIGNMENT_CEILING(alignment, + PAGE), commit); + if (ret == NULL) { + return NULL; + } + assert(ret != NULL); + if (*commit) { + *zero = true; + } + return ret; +} + +bool +extent_dalloc_mmap(void *addr, size_t size) { + if (!opt_retain) { + pages_unmap(addr, size); + } + return opt_retain; +} diff --git a/dep/jemalloc/src/hash.c b/dep/jemalloc/src/hash.c index cfa4da0275c..7b2bdc2bd6f 100644 --- a/dep/jemalloc/src/hash.c +++ b/dep/jemalloc/src/hash.c @@ -1,2 +1,3 @@ -#define JEMALLOC_HASH_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_HASH_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" diff --git a/dep/jemalloc/src/hooks.c b/dep/jemalloc/src/hooks.c new file mode 100644 index 00000000000..6266ecd47fe --- /dev/null +++ b/dep/jemalloc/src/hooks.c @@ -0,0 +1,12 @@ +#include "jemalloc/internal/jemalloc_preamble.h" + +/* + * The hooks are a little bit screwy -- they're not genuinely exported in the + * sense that we want them available to end-users, but we do want them visible + * from outside the generated library, so that we can use them in test code. + */ +JEMALLOC_EXPORT +void (*hooks_arena_new_hook)() = NULL; + +JEMALLOC_EXPORT +void (*hooks_libc_hook)() = NULL; diff --git a/dep/jemalloc/src/huge.c b/dep/jemalloc/src/huge.c deleted file mode 100644 index d72f2135702..00000000000 --- a/dep/jemalloc/src/huge.c +++ /dev/null @@ -1,347 +0,0 @@ -#define JEMALLOC_HUGE_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/******************************************************************************/ -/* Data. */ - -uint64_t huge_nmalloc; -uint64_t huge_ndalloc; -size_t huge_allocated; - -malloc_mutex_t huge_mtx; - -/******************************************************************************/ - -/* Tree of chunks that are stand-alone huge allocations. */ -static extent_tree_t huge; - -void * -huge_malloc(size_t size, bool zero, dss_prec_t dss_prec) -{ - - return (huge_palloc(size, chunksize, zero, dss_prec)); -} - -void * -huge_palloc(size_t size, size_t alignment, bool zero, dss_prec_t dss_prec) -{ - void *ret; - size_t csize; - extent_node_t *node; - bool is_zeroed; - - /* Allocate one or more contiguous chunks for this request. */ - - csize = CHUNK_CEILING(size); - if (csize == 0) { - /* size is large enough to cause size_t wrap-around. */ - return (NULL); - } - - /* Allocate an extent node with which to track the chunk. */ - node = base_node_alloc(); - if (node == NULL) - return (NULL); - - /* - * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that - * it is possible to make correct junk/zero fill decisions below. - */ - is_zeroed = zero; - ret = chunk_alloc(csize, alignment, false, &is_zeroed, dss_prec); - if (ret == NULL) { - base_node_dealloc(node); - return (NULL); - } - - /* Insert node into huge. */ - node->addr = ret; - node->size = csize; - - malloc_mutex_lock(&huge_mtx); - extent_tree_ad_insert(&huge, node); - if (config_stats) { - stats_cactive_add(csize); - huge_nmalloc++; - huge_allocated += csize; - } - malloc_mutex_unlock(&huge_mtx); - - if (config_fill && zero == false) { - if (opt_junk) - memset(ret, 0xa5, csize); - else if (opt_zero && is_zeroed == false) - memset(ret, 0, csize); - } - - return (ret); -} - -bool -huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra) -{ - - /* - * Avoid moving the allocation if the size class can be left the same. - */ - if (oldsize > arena_maxclass - && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size) - && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) { - assert(CHUNK_CEILING(oldsize) == oldsize); - return (false); - } - - /* Reallocation would require a move. */ - return (true); -} - -void * -huge_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra, - size_t alignment, bool zero, bool try_tcache_dalloc, dss_prec_t dss_prec) -{ - void *ret; - size_t copysize; - - /* Try to avoid moving the allocation. */ - if (huge_ralloc_no_move(ptr, oldsize, size, extra) == false) - return (ptr); - - /* - * size and oldsize are different enough that we need to use a - * different size class. In that case, fall back to allocating new - * space and copying. - */ - if (alignment > chunksize) - ret = huge_palloc(size + extra, alignment, zero, dss_prec); - else - ret = huge_malloc(size + extra, zero, dss_prec); - - if (ret == NULL) { - if (extra == 0) - return (NULL); - /* Try again, this time without extra. */ - if (alignment > chunksize) - ret = huge_palloc(size, alignment, zero, dss_prec); - else - ret = huge_malloc(size, zero, dss_prec); - - if (ret == NULL) - return (NULL); - } - - /* - * Copy at most size bytes (not size+extra), since the caller has no - * expectation that the extra bytes will be reliably preserved. - */ - copysize = (size < oldsize) ? size : oldsize; - -#ifdef JEMALLOC_MREMAP - /* - * Use mremap(2) if this is a huge-->huge reallocation, and neither the - * source nor the destination are in dss. - */ - if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr) - == false && chunk_in_dss(ret) == false))) { - size_t newsize = huge_salloc(ret); - - /* - * Remove ptr from the tree of huge allocations before - * performing the remap operation, in order to avoid the - * possibility of another thread acquiring that mapping before - * this one removes it from the tree. - */ - huge_dalloc(ptr, false); - if (mremap(ptr, oldsize, newsize, MREMAP_MAYMOVE|MREMAP_FIXED, - ret) == MAP_FAILED) { - /* - * Assuming no chunk management bugs in the allocator, - * the only documented way an error can occur here is - * if the application changed the map type for a - * portion of the old allocation. This is firmly in - * undefined behavior territory, so write a diagnostic - * message, and optionally abort. - */ - char buf[BUFERROR_BUF]; - - buferror(get_errno(), buf, sizeof(buf)); - malloc_printf("<jemalloc>: Error in mremap(): %s\n", - buf); - if (opt_abort) - abort(); - memcpy(ret, ptr, copysize); - chunk_dealloc_mmap(ptr, oldsize); - } else if (config_fill && zero == false && opt_junk && oldsize - < newsize) { - /* - * mremap(2) clobbers the original mapping, so - * junk/zero filling is not preserved. There is no - * need to zero fill here, since any trailing - * uninititialized memory is demand-zeroed by the - * kernel, but junk filling must be redone. - */ - memset(ret + oldsize, 0xa5, newsize - oldsize); - } - } else -#endif - { - memcpy(ret, ptr, copysize); - iqalloct(ptr, try_tcache_dalloc); - } - return (ret); -} - -#ifdef JEMALLOC_JET -#undef huge_dalloc_junk -#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl) -#endif -static void -huge_dalloc_junk(void *ptr, size_t usize) -{ - - if (config_fill && config_dss && opt_junk) { - /* - * Only bother junk filling if the chunk isn't about to be - * unmapped. - */ - if (config_munmap == false || (config_dss && chunk_in_dss(ptr))) - memset(ptr, 0x5a, usize); - } -} -#ifdef JEMALLOC_JET -#undef huge_dalloc_junk -#define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk) -huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl); -#endif - -void -huge_dalloc(void *ptr, bool unmap) -{ - extent_node_t *node, key; - - malloc_mutex_lock(&huge_mtx); - - /* Extract from tree of huge allocations. */ - key.addr = ptr; - node = extent_tree_ad_search(&huge, &key); - assert(node != NULL); - assert(node->addr == ptr); - extent_tree_ad_remove(&huge, node); - - if (config_stats) { - stats_cactive_sub(node->size); - huge_ndalloc++; - huge_allocated -= node->size; - } - - malloc_mutex_unlock(&huge_mtx); - - if (unmap) - huge_dalloc_junk(node->addr, node->size); - - chunk_dealloc(node->addr, node->size, unmap); - - base_node_dealloc(node); -} - -size_t -huge_salloc(const void *ptr) -{ - size_t ret; - extent_node_t *node, key; - - malloc_mutex_lock(&huge_mtx); - - /* Extract from tree of huge allocations. */ - key.addr = __DECONST(void *, ptr); - node = extent_tree_ad_search(&huge, &key); - assert(node != NULL); - - ret = node->size; - - malloc_mutex_unlock(&huge_mtx); - - return (ret); -} - -dss_prec_t -huge_dss_prec_get(arena_t *arena) -{ - - return (arena_dss_prec_get(choose_arena(arena))); -} - -prof_ctx_t * -huge_prof_ctx_get(const void *ptr) -{ - prof_ctx_t *ret; - extent_node_t *node, key; - - malloc_mutex_lock(&huge_mtx); - - /* Extract from tree of huge allocations. */ - key.addr = __DECONST(void *, ptr); - node = extent_tree_ad_search(&huge, &key); - assert(node != NULL); - - ret = node->prof_ctx; - - malloc_mutex_unlock(&huge_mtx); - - return (ret); -} - -void -huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx) -{ - extent_node_t *node, key; - - malloc_mutex_lock(&huge_mtx); - - /* Extract from tree of huge allocations. */ - key.addr = __DECONST(void *, ptr); - node = extent_tree_ad_search(&huge, &key); - assert(node != NULL); - - node->prof_ctx = ctx; - - malloc_mutex_unlock(&huge_mtx); -} - -bool -huge_boot(void) -{ - - /* Initialize chunks data. */ - if (malloc_mutex_init(&huge_mtx)) - return (true); - extent_tree_ad_new(&huge); - - if (config_stats) { - huge_nmalloc = 0; - huge_ndalloc = 0; - huge_allocated = 0; - } - - return (false); -} - -void -huge_prefork(void) -{ - - malloc_mutex_prefork(&huge_mtx); -} - -void -huge_postfork_parent(void) -{ - - malloc_mutex_postfork_parent(&huge_mtx); -} - -void -huge_postfork_child(void) -{ - - malloc_mutex_postfork_child(&huge_mtx); -} diff --git a/dep/jemalloc/src/jemalloc.c b/dep/jemalloc/src/jemalloc.c index 204778bc89d..0ee8ad48b98 100644 --- a/dep/jemalloc/src/jemalloc.c +++ b/dep/jemalloc/src/jemalloc.c @@ -1,15 +1,31 @@ -#define JEMALLOC_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/atomic.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/extent_dss.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/jemalloc_internal_types.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/size_classes.h" +#include "jemalloc/internal/spin.h" +#include "jemalloc/internal/sz.h" +#include "jemalloc/internal/ticker.h" +#include "jemalloc/internal/util.h" /******************************************************************************/ /* Data. */ -malloc_tsd_data(, arenas, arena_t *, NULL) -malloc_tsd_data(, thread_allocated, thread_allocated_t, - THREAD_ALLOCATED_INITIALIZER) - /* Runtime configuration options. */ -const char *je_malloc_conf; +const char *je_malloc_conf +#ifndef _WIN32 + JEMALLOC_ATTR(weak) +#endif + ; bool opt_abort = #ifdef JEMALLOC_DEBUG true @@ -17,30 +33,80 @@ bool opt_abort = false #endif ; -bool opt_junk = +bool opt_abort_conf = +#ifdef JEMALLOC_DEBUG + true +#else + false +#endif + ; +const char *opt_junk = +#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) + "true" +#else + "false" +#endif + ; +bool opt_junk_alloc = +#if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) + true +#else + false +#endif + ; +bool opt_junk_free = #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) true #else false #endif ; -size_t opt_quarantine = ZU(0); -bool opt_redzone = false; + bool opt_utrace = false; -bool opt_valgrind = false; bool opt_xmalloc = false; bool opt_zero = false; -size_t opt_narenas = 0; +unsigned opt_narenas = 0; unsigned ncpus; -malloc_mutex_t arenas_lock; -arena_t **arenas; -unsigned narenas_total; -unsigned narenas_auto; - -/* Set to true once the allocator has been initialized. */ -static bool malloc_initialized = false; +/* Protects arenas initialization. */ +malloc_mutex_t arenas_lock; +/* + * Arenas that are used to service external requests. Not all elements of the + * arenas array are necessarily used; arenas are created lazily as needed. + * + * arenas[0..narenas_auto) are used for automatic multiplexing of threads and + * arenas. arenas[narenas_auto..narenas_total) are only used if the application + * takes some action to create them and allocate from them. + * + * Points to an arena_t. + */ +JEMALLOC_ALIGNED(CACHELINE) +atomic_p_t arenas[MALLOCX_ARENA_LIMIT]; +static atomic_u_t narenas_total; /* Use narenas_total_*(). */ +static arena_t *a0; /* arenas[0]; read-only after initialization. */ +unsigned narenas_auto; /* Read-only after initialization. */ + +typedef enum { + malloc_init_uninitialized = 3, + malloc_init_a0_initialized = 2, + malloc_init_recursible = 1, + malloc_init_initialized = 0 /* Common case --> jnz. */ +} malloc_init_t; +static malloc_init_t malloc_init_state = malloc_init_uninitialized; + +/* False should be the common case. Set to true to trigger initialization. */ +bool malloc_slow = true; + +/* When malloc_slow is true, set the corresponding bits for sanity check. */ +enum { + flag_opt_junk_alloc = (1U), + flag_opt_junk_free = (1U << 1), + flag_opt_zero = (1U << 2), + flag_opt_utrace = (1U << 3), + flag_opt_xmalloc = (1U << 4) +}; +static uint8_t malloc_slow_flags; #ifdef JEMALLOC_THREADED_INIT /* Used to let the initializing thread recursively allocate. */ @@ -57,14 +123,30 @@ static bool malloc_initializer = NO_INITIALIZER; /* Used to avoid initialization races. */ #ifdef _WIN32 +#if _WIN32_WINNT >= 0x0600 +static malloc_mutex_t init_lock = SRWLOCK_INIT; +#else static malloc_mutex_t init_lock; +static bool init_lock_initialized = false; JEMALLOC_ATTR(constructor) static void WINAPI -_init_init_lock(void) -{ - - malloc_mutex_init(&init_lock); +_init_init_lock(void) { + /* + * If another constructor in the same binary is using mallctl to e.g. + * set up extent hooks, it may end up running before this one, and + * malloc_init_hard will crash trying to lock the uninitialized lock. So + * we force an initialization of the lock in malloc_init_hard as well. + * We don't try to care about atomicity of the accessed to the + * init_lock_initialized boolean, since it really only matters early in + * the process creation, before any separate thread normally starts + * doing anything. + */ + if (!init_lock_initialized) { + malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT, + malloc_mutex_rank_exclusive); + } + init_lock_initialized = true; } #ifdef _MSC_VER @@ -72,7 +154,7 @@ _init_init_lock(void) JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) static const void (WINAPI *init_init_lock)(void) = _init_init_lock; #endif - +#endif #else static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; #endif @@ -85,7 +167,7 @@ typedef struct { #ifdef JEMALLOC_UTRACE # define UTRACE(a, b, c) do { \ - if (opt_utrace) { \ + if (unlikely(opt_utrace)) { \ int utrace_serrno = errno; \ malloc_utrace_t ut; \ ut.p = (a); \ @@ -99,12 +181,16 @@ typedef struct { # define UTRACE(a, b, c) #endif +/* Whether encountered any invalid config options. */ +static bool had_conf_error = false; + /******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to * definition. */ +static bool malloc_init_hard_a0(void); static bool malloc_init_hard(void); /******************************************************************************/ @@ -112,54 +198,337 @@ static bool malloc_init_hard(void); * Begin miscellaneous support functions. */ +bool +malloc_initialized(void) { + return (malloc_init_state == malloc_init_initialized); +} + +JEMALLOC_ALWAYS_INLINE bool +malloc_init_a0(void) { + if (unlikely(malloc_init_state == malloc_init_uninitialized)) { + return malloc_init_hard_a0(); + } + return false; +} + +JEMALLOC_ALWAYS_INLINE bool +malloc_init(void) { + if (unlikely(!malloc_initialized()) && malloc_init_hard()) { + return true; + } + return false; +} + +/* + * The a0*() functions are used instead of i{d,}alloc() in situations that + * cannot tolerate TLS variable access. + */ + +static void * +a0ialloc(size_t size, bool zero, bool is_internal) { + if (unlikely(malloc_init_a0())) { + return NULL; + } + + return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL, + is_internal, arena_get(TSDN_NULL, 0, true), true); +} + +static void +a0idalloc(void *ptr, bool is_internal) { + idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true); +} + +void * +a0malloc(size_t size) { + return a0ialloc(size, false, true); +} + +void +a0dalloc(void *ptr) { + a0idalloc(ptr, true); +} + +/* + * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive + * situations that cannot tolerate TLS variable access (TLS allocation and very + * early internal data structure initialization). + */ + +void * +bootstrap_malloc(size_t size) { + if (unlikely(size == 0)) { + size = 1; + } + + return a0ialloc(size, false, false); +} + +void * +bootstrap_calloc(size_t num, size_t size) { + size_t num_size; + + num_size = num * size; + if (unlikely(num_size == 0)) { + assert(num == 0 || size == 0); + num_size = 1; + } + + return a0ialloc(num_size, true, false); +} + +void +bootstrap_free(void *ptr) { + if (unlikely(ptr == NULL)) { + return; + } + + a0idalloc(ptr, false); +} + +void +arena_set(unsigned ind, arena_t *arena) { + atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE); +} + +static void +narenas_total_set(unsigned narenas) { + atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE); +} + +static void +narenas_total_inc(void) { + atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE); +} + +unsigned +narenas_total_get(void) { + return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE); +} + /* Create a new arena and insert it into the arenas array at index ind. */ +static arena_t * +arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { + arena_t *arena; + + assert(ind <= narenas_total_get()); + if (ind >= MALLOCX_ARENA_LIMIT) { + return NULL; + } + if (ind == narenas_total_get()) { + narenas_total_inc(); + } + + /* + * Another thread may have already initialized arenas[ind] if it's an + * auto arena. + */ + arena = arena_get(tsdn, ind, false); + if (arena != NULL) { + assert(ind < narenas_auto); + return arena; + } + + /* Actually initialize the arena. */ + arena = arena_new(tsdn, ind, extent_hooks); + + return arena; +} + +static void +arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) { + if (ind == 0) { + return; + } + if (have_background_thread) { + bool err; + malloc_mutex_lock(tsdn, &background_thread_lock); + err = background_thread_create(tsdn_tsd(tsdn), ind); + malloc_mutex_unlock(tsdn, &background_thread_lock); + if (err) { + malloc_printf("<jemalloc>: error in background thread " + "creation for arena %u. Abort.\n", ind); + abort(); + } + } +} + arena_t * -arenas_extend(unsigned ind) -{ - arena_t *ret; +arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { + arena_t *arena; + + malloc_mutex_lock(tsdn, &arenas_lock); + arena = arena_init_locked(tsdn, ind, extent_hooks); + malloc_mutex_unlock(tsdn, &arenas_lock); + + arena_new_create_background_thread(tsdn, ind); + + return arena; +} + +static void +arena_bind(tsd_t *tsd, unsigned ind, bool internal) { + arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false); + arena_nthreads_inc(arena, internal); + + if (internal) { + tsd_iarena_set(tsd, arena); + } else { + tsd_arena_set(tsd, arena); + } +} - ret = (arena_t *)base_alloc(sizeof(arena_t)); - if (ret != NULL && arena_new(ret, ind) == false) { - arenas[ind] = ret; - return (ret); +void +arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) { + arena_t *oldarena, *newarena; + + oldarena = arena_get(tsd_tsdn(tsd), oldind, false); + newarena = arena_get(tsd_tsdn(tsd), newind, false); + arena_nthreads_dec(oldarena, false); + arena_nthreads_inc(newarena, false); + tsd_arena_set(tsd, newarena); +} + +static void +arena_unbind(tsd_t *tsd, unsigned ind, bool internal) { + arena_t *arena; + + arena = arena_get(tsd_tsdn(tsd), ind, false); + arena_nthreads_dec(arena, internal); + + if (internal) { + tsd_iarena_set(tsd, NULL); + } else { + tsd_arena_set(tsd, NULL); } - /* Only reached if there is an OOM error. */ +} + +arena_tdata_t * +arena_tdata_get_hard(tsd_t *tsd, unsigned ind) { + arena_tdata_t *tdata, *arenas_tdata_old; + arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); + unsigned narenas_tdata_old, i; + unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); + unsigned narenas_actual = narenas_total_get(); /* - * OOM here is quite inconvenient to propagate, since dealing with it - * would require a check for failure in the fast path. Instead, punt - * by using arenas[0]. In practice, this is an extremely unlikely - * failure. + * Dissociate old tdata array (and set up for deallocation upon return) + * if it's too small. */ - malloc_write("<jemalloc>: Error initializing arena\n"); - if (opt_abort) - abort(); + if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { + arenas_tdata_old = arenas_tdata; + narenas_tdata_old = narenas_tdata; + arenas_tdata = NULL; + narenas_tdata = 0; + tsd_arenas_tdata_set(tsd, arenas_tdata); + tsd_narenas_tdata_set(tsd, narenas_tdata); + } else { + arenas_tdata_old = NULL; + narenas_tdata_old = 0; + } + + /* Allocate tdata array if it's missing. */ + if (arenas_tdata == NULL) { + bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); + narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; - return (arenas[0]); + if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { + *arenas_tdata_bypassp = true; + arenas_tdata = (arena_tdata_t *)a0malloc( + sizeof(arena_tdata_t) * narenas_tdata); + *arenas_tdata_bypassp = false; + } + if (arenas_tdata == NULL) { + tdata = NULL; + goto label_return; + } + assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); + tsd_arenas_tdata_set(tsd, arenas_tdata); + tsd_narenas_tdata_set(tsd, narenas_tdata); + } + + /* + * Copy to tdata array. It's possible that the actual number of arenas + * has increased since narenas_total_get() was called above, but that + * causes no correctness issues unless two threads concurrently execute + * the arenas.create mallctl, which we trust mallctl synchronization to + * prevent. + */ + + /* Copy/initialize tickers. */ + for (i = 0; i < narenas_actual; i++) { + if (i < narenas_tdata_old) { + ticker_copy(&arenas_tdata[i].decay_ticker, + &arenas_tdata_old[i].decay_ticker); + } else { + ticker_init(&arenas_tdata[i].decay_ticker, + DECAY_NTICKS_PER_UPDATE); + } + } + if (narenas_tdata > narenas_actual) { + memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) + * (narenas_tdata - narenas_actual)); + } + + /* Read the refreshed tdata array. */ + tdata = &arenas_tdata[ind]; +label_return: + if (arenas_tdata_old != NULL) { + a0dalloc(arenas_tdata_old); + } + return tdata; } -/* Slow path, called only by choose_arena(). */ +/* Slow path, called only by arena_choose(). */ arena_t * -choose_arena_hard(void) -{ - arena_t *ret; +arena_choose_hard(tsd_t *tsd, bool internal) { + arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); + + if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) { + unsigned choose = percpu_arena_choose(); + ret = arena_get(tsd_tsdn(tsd), choose, true); + assert(ret != NULL); + arena_bind(tsd, arena_ind_get(ret), false); + arena_bind(tsd, arena_ind_get(ret), true); + + return ret; + } if (narenas_auto > 1) { - unsigned i, choose, first_null; + unsigned i, j, choose[2], first_null; + bool is_new_arena[2]; + + /* + * Determine binding for both non-internal and internal + * allocation. + * + * choose[0]: For application allocation. + * choose[1]: For internal metadata allocation. + */ + + for (j = 0; j < 2; j++) { + choose[j] = 0; + is_new_arena[j] = false; + } - choose = 0; first_null = narenas_auto; - malloc_mutex_lock(&arenas_lock); - assert(arenas[0] != NULL); + malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); + assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); for (i = 1; i < narenas_auto; i++) { - if (arenas[i] != NULL) { + if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { /* * Choose the first arena that has the lowest * number of threads assigned to it. */ - if (arenas[i]->nthreads < - arenas[choose]->nthreads) - choose = i; + for (j = 0; j < 2; j++) { + if (arena_nthreads_get(arena_get( + tsd_tsdn(tsd), i, false), !!j) < + arena_nthreads_get(arena_get( + tsd_tsdn(tsd), choose[j], false), + !!j)) { + choose[j] = i; + } + } } else if (first_null == narenas_auto) { /* * Record the index of the first uninitialized @@ -174,38 +543,99 @@ choose_arena_hard(void) } } - if (arenas[choose]->nthreads == 0 - || first_null == narenas_auto) { - /* - * Use an unloaded arena, or the least loaded arena if - * all arenas are already initialized. - */ - ret = arenas[choose]; - } else { - /* Initialize a new arena. */ - ret = arenas_extend(first_null); + for (j = 0; j < 2; j++) { + if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), + choose[j], false), !!j) == 0 || first_null == + narenas_auto) { + /* + * Use an unloaded arena, or the least loaded + * arena if all arenas are already initialized. + */ + if (!!j == internal) { + ret = arena_get(tsd_tsdn(tsd), + choose[j], false); + } + } else { + arena_t *arena; + + /* Initialize a new arena. */ + choose[j] = first_null; + arena = arena_init_locked(tsd_tsdn(tsd), + choose[j], + (extent_hooks_t *)&extent_hooks_default); + if (arena == NULL) { + malloc_mutex_unlock(tsd_tsdn(tsd), + &arenas_lock); + return NULL; + } + is_new_arena[j] = true; + if (!!j == internal) { + ret = arena; + } + } + arena_bind(tsd, choose[j], !!j); + } + malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); + + for (j = 0; j < 2; j++) { + if (is_new_arena[j]) { + assert(choose[j] > 0); + arena_new_create_background_thread( + tsd_tsdn(tsd), choose[j]); + } } - ret->nthreads++; - malloc_mutex_unlock(&arenas_lock); + } else { - ret = arenas[0]; - malloc_mutex_lock(&arenas_lock); - ret->nthreads++; - malloc_mutex_unlock(&arenas_lock); + ret = arena_get(tsd_tsdn(tsd), 0, false); + arena_bind(tsd, 0, false); + arena_bind(tsd, 0, true); + } + + return ret; +} + +void +iarena_cleanup(tsd_t *tsd) { + arena_t *iarena; + + iarena = tsd_iarena_get(tsd); + if (iarena != NULL) { + arena_unbind(tsd, arena_ind_get(iarena), true); } +} - arenas_tsd_set(&ret); +void +arena_cleanup(tsd_t *tsd) { + arena_t *arena; - return (ret); + arena = tsd_arena_get(tsd); + if (arena != NULL) { + arena_unbind(tsd, arena_ind_get(arena), false); + } } -static void -stats_print_atexit(void) -{ +void +arenas_tdata_cleanup(tsd_t *tsd) { + arena_tdata_t *arenas_tdata; - if (config_tcache && config_stats) { + /* Prevent tsd->arenas_tdata from being (re)created. */ + *tsd_arenas_tdata_bypassp_get(tsd) = true; + + arenas_tdata = tsd_arenas_tdata_get(tsd); + if (arenas_tdata != NULL) { + tsd_arenas_tdata_set(tsd, NULL); + a0dalloc(arenas_tdata); + } +} + +static void +stats_print_atexit(void) { + if (config_stats) { + tsdn_t *tsdn; unsigned narenas, i; + tsdn = tsdn_fetch(); + /* * Merge stats from extant threads. This is racy, since * individual threads do not lock when recording tcache stats @@ -214,25 +644,45 @@ stats_print_atexit(void) * continue to allocate. */ for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { - arena_t *arena = arenas[i]; + arena_t *arena = arena_get(tsdn, i, false); if (arena != NULL) { tcache_t *tcache; - /* - * tcache_stats_merge() locks bins, so if any - * code is introduced that acquires both arena - * and bin locks in the opposite order, - * deadlocks may result. - */ - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); ql_foreach(tcache, &arena->tcache_ql, link) { - tcache_stats_merge(tcache, arena); + tcache_stats_merge(tsdn, tcache, arena); } - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, + &arena->tcache_ql_mtx); } } } - je_malloc_stats_print(NULL, NULL, NULL); + je_malloc_stats_print(NULL, NULL, opt_stats_print_opts); +} + +/* + * Ensure that we don't hold any locks upon entry to or exit from allocator + * code (in a "broad" sense that doesn't count a reentrant allocation as an + * entrance or exit). + */ +JEMALLOC_ALWAYS_INLINE void +check_entry_exit_locking(tsdn_t *tsdn) { + if (!config_debug) { + return; + } + if (tsdn_null(tsdn)) { + return; + } + tsd_t *tsd = tsdn_tsd(tsdn); + /* + * It's possible we hold locks at entry/exit if we're in a nested + * allocation. + */ + int8_t reentrancy_level = tsd_reentrancy_level_get(tsd); + if (reentrancy_level != 0) { + return; + } + witness_assert_lockless(tsdn_witness_tsdp_get(tsdn)); } /* @@ -243,69 +693,82 @@ stats_print_atexit(void) * Begin initialization functions. */ +static char * +jemalloc_secure_getenv(const char *name) { +#ifdef JEMALLOC_HAVE_SECURE_GETENV + return secure_getenv(name); +#else +# ifdef JEMALLOC_HAVE_ISSETUGID + if (issetugid() != 0) { + return NULL; + } +# endif + return getenv(name); +#endif +} + static unsigned -malloc_ncpus(void) -{ +malloc_ncpus(void) { long result; #ifdef _WIN32 SYSTEM_INFO si; GetSystemInfo(&si); result = si.dwNumberOfProcessors; +#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT) + /* + * glibc >= 2.6 has the CPU_COUNT macro. + * + * glibc's sysconf() uses isspace(). glibc allocates for the first time + * *before* setting up the isspace tables. Therefore we need a + * different method to get the number of CPUs. + */ + { + cpu_set_t set; + + pthread_getaffinity_np(pthread_self(), sizeof(set), &set); + result = CPU_COUNT(&set); + } #else result = sysconf(_SC_NPROCESSORS_ONLN); #endif return ((result == -1) ? 1 : (unsigned)result); } -void -arenas_cleanup(void *arg) -{ - arena_t *arena = *(arena_t **)arg; - - malloc_mutex_lock(&arenas_lock); - arena->nthreads--; - malloc_mutex_unlock(&arenas_lock); -} - -JEMALLOC_ALWAYS_INLINE_C void -malloc_thread_init(void) -{ - - /* - * TSD initialization can't be safely done as a side effect of - * deallocation, because it is possible for a thread to do nothing but - * deallocate its TLS data via free(), in which case writing to TLS - * would cause write-after-free memory corruption. The quarantine - * facility *only* gets used as a side effect of deallocation, so make - * a best effort attempt at initializing its TSD by hooking all - * allocation events. - */ - if (config_fill && opt_quarantine) - quarantine_alloc_hook(); -} - -JEMALLOC_ALWAYS_INLINE_C bool -malloc_init(void) -{ +static void +init_opt_stats_print_opts(const char *v, size_t vlen) { + size_t opts_len = strlen(opt_stats_print_opts); + assert(opts_len <= stats_print_tot_num_options); + + for (size_t i = 0; i < vlen; i++) { + switch (v[i]) { +#define OPTION(o, v, d, s) case o: break; + STATS_PRINT_OPTIONS +#undef OPTION + default: continue; + } - if (malloc_initialized == false && malloc_init_hard()) - return (true); - malloc_thread_init(); + if (strchr(opt_stats_print_opts, v[i]) != NULL) { + /* Ignore repeated. */ + continue; + } - return (false); + opt_stats_print_opts[opts_len++] = v[i]; + opt_stats_print_opts[opts_len] = '\0'; + assert(opts_len <= stats_print_tot_num_options); + } + assert(opts_len == strlen(opt_stats_print_opts)); } static bool malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, - char const **v_p, size_t *vlen_p) -{ + char const **v_p, size_t *vlen_p) { bool accept; const char *opts = *opts_p; *k_p = opts; - for (accept = false; accept == false;) { + for (accept = false; !accept;) { switch (*opts) { case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': @@ -333,14 +796,14 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, malloc_write("<jemalloc>: Conf string ends " "with key\n"); } - return (true); + return true; default: malloc_write("<jemalloc>: Malformed conf string\n"); - return (true); + return true; } } - for (accept = false; accept == false;) { + for (accept = false; !accept;) { switch (*opts) { case ',': opts++; @@ -369,46 +832,57 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, } *opts_p = opts; - return (false); + return false; } static void -malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, - size_t vlen) -{ +malloc_abort_invalid_conf(void) { + assert(opt_abort_conf); + malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf " + "value (see above).\n"); + abort(); +} +static void +malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, + size_t vlen) { malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, (int)vlen, v); + had_conf_error = true; + if (opt_abort_conf) { + malloc_abort_invalid_conf(); + } } static void -malloc_conf_init(void) -{ +malloc_slow_flag_init(void) { + /* + * Combine the runtime options into malloc_slow for fast path. Called + * after processing all the options. + */ + malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) + | (opt_junk_free ? flag_opt_junk_free : 0) + | (opt_zero ? flag_opt_zero : 0) + | (opt_utrace ? flag_opt_utrace : 0) + | (opt_xmalloc ? flag_opt_xmalloc : 0); + + malloc_slow = (malloc_slow_flags != 0); +} + +static void +malloc_conf_init(void) { unsigned i; char buf[PATH_MAX + 1]; const char *opts, *k, *v; size_t klen, vlen; - /* - * Automatically configure valgrind before processing options. The - * valgrind option remains in jemalloc 3.x for compatibility reasons. - */ - if (config_valgrind) { - opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; - if (config_fill && opt_valgrind) { - opt_junk = false; - assert(opt_zero == false); - opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; - opt_redzone = true; - } - if (config_tcache && opt_valgrind) - opt_tcache = false; - } - - for (i = 0; i < 3; i++) { + for (i = 0; i < 4; i++) { /* Get runtime configuration. */ switch (i) { case 0: + opts = config_malloc_conf; + break; + case 1: if (je_malloc_conf != NULL) { /* * Use options that were compiled into the @@ -421,8 +895,8 @@ malloc_conf_init(void) opts = buf; } break; - case 1: { - int linklen = 0; + case 2: { + ssize_t linklen = 0; #ifndef _WIN32 int saved_errno = errno; const char *linkname = @@ -441,14 +915,14 @@ malloc_conf_init(void) if (linklen == -1) { /* No configuration specified. */ linklen = 0; - /* restore errno */ + /* Restore errno. */ set_errno(saved_errno); } #endif buf[linklen] = '\0'; opts = buf; break; - } case 2: { + } case 3: { const char *envname = #ifdef JEMALLOC_PREFIX JEMALLOC_CPREFIX"MALLOC_CONF" @@ -457,7 +931,7 @@ malloc_conf_init(void) #endif ; - if ((opts = getenv(envname)) != NULL) { + if ((opts = jemalloc_secure_getenv(envname)) != NULL) { /* * Do nothing; opts is already initialized to * the value of the MALLOC_CONF environment @@ -475,27 +949,31 @@ malloc_conf_init(void) opts = buf; } - while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v, - &vlen) == false) { -#define CONF_HANDLE_BOOL(o, n) \ - if (sizeof(n)-1 == klen && strncmp(n, k, \ - klen) == 0) { \ - if (strncmp("true", v, vlen) == 0 && \ - vlen == sizeof("true")-1) \ + while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, + &vlen)) { +#define CONF_MATCH(n) \ + (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) +#define CONF_MATCH_VALUE(n) \ + (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) +#define CONF_HANDLE_BOOL(o, n) \ + if (CONF_MATCH(n)) { \ + if (CONF_MATCH_VALUE("true")) { \ o = true; \ - else if (strncmp("false", v, vlen) == \ - 0 && vlen == sizeof("false")-1) \ + } else if (CONF_MATCH_VALUE("false")) { \ o = false; \ - else { \ + } else { \ malloc_conf_error( \ "Invalid conf value", \ k, klen, v, vlen); \ } \ continue; \ } -#define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \ - if (sizeof(n)-1 == klen && strncmp(n, k, \ - klen) == 0) { \ +#define CONF_MIN_no(um, min) false +#define CONF_MIN_yes(um, min) ((um) < (min)) +#define CONF_MAX_no(um, max) false +#define CONF_MAX_yes(um, max) ((um) > (max)) +#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ + if (CONF_MATCH(n)) { \ uintmax_t um; \ char *end; \ \ @@ -507,27 +985,40 @@ malloc_conf_init(void) "Invalid conf value", \ k, klen, v, vlen); \ } else if (clip) { \ - if (min != 0 && um < min) \ - o = min; \ - else if (um > max) \ - o = max; \ - else \ - o = um; \ + if (CONF_MIN_##check_min(um, \ + (t)(min))) { \ + o = (t)(min); \ + } else if ( \ + CONF_MAX_##check_max(um, \ + (t)(max))) { \ + o = (t)(max); \ + } else { \ + o = (t)um; \ + } \ } else { \ - if ((min != 0 && um < min) || \ - um > max) { \ + if (CONF_MIN_##check_min(um, \ + (t)(min)) || \ + CONF_MAX_##check_max(um, \ + (t)(max))) { \ malloc_conf_error( \ "Out-of-range " \ "conf value", \ k, klen, v, vlen); \ - } else \ - o = um; \ + } else { \ + o = (t)um; \ + } \ } \ continue; \ } -#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ - if (sizeof(n)-1 == klen && strncmp(n, k, \ - klen) == 0) { \ +#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ + clip) \ + CONF_HANDLE_T_U(unsigned, o, n, min, max, \ + check_min, check_max, clip) +#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ + CONF_HANDLE_T_U(size_t, o, n, min, max, \ + check_min, check_max, clip) +#define CONF_HANDLE_SSIZE_T(o, n, min, max) \ + if (CONF_MATCH(n)) { \ long l; \ char *end; \ \ @@ -538,18 +1029,18 @@ malloc_conf_init(void) malloc_conf_error( \ "Invalid conf value", \ k, klen, v, vlen); \ - } else if (l < (ssize_t)min || l > \ - (ssize_t)max) { \ + } else if (l < (ssize_t)(min) || l > \ + (ssize_t)(max)) { \ malloc_conf_error( \ "Out-of-range conf value", \ k, klen, v, vlen); \ - } else \ + } else { \ o = l; \ + } \ continue; \ } -#define CONF_HANDLE_CHAR_P(o, n, d) \ - if (sizeof(n)-1 == klen && strncmp(n, k, \ - klen) == 0) { \ +#define CONF_HANDLE_CHAR_P(o, n, d) \ + if (CONF_MATCH(n)) { \ size_t cpylen = (vlen <= \ sizeof(o)-1) ? vlen : \ sizeof(o)-1; \ @@ -559,23 +1050,18 @@ malloc_conf_init(void) } CONF_HANDLE_BOOL(opt_abort, "abort") - /* - * Chunks always require at least one header page, plus - * one data page in the absence of redzones, or three - * pages in the presence of redzones. In order to - * simplify options processing, fix the limit based on - * config_fill. - */ - CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + - (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1, - true) + CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf") + if (opt_abort_conf && had_conf_error) { + malloc_abort_invalid_conf(); + } + CONF_HANDLE_BOOL(opt_retain, "retain") if (strncmp("dss", k, klen) == 0) { int i; bool match = false; for (i = 0; i < dss_prec_limit; i++) { if (strncmp(dss_prec_names[i], v, vlen) == 0) { - if (chunk_dss_prec_set(i)) { + if (extent_dss_prec_set(i)) { malloc_conf_error( "Error setting dss", k, klen, v, vlen); @@ -587,47 +1073,98 @@ malloc_conf_init(void) } } } - if (match == false) { + if (!match) { malloc_conf_error("Invalid conf value", k, klen, v, vlen); } continue; } - CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1, - SIZE_T_MAX, false) - CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", - -1, (sizeof(size_t) << 3) - 1) + CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, + UINT_MAX, yes, no, false) + CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms, + "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < + QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : + SSIZE_MAX); + CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms, + "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < + QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : + SSIZE_MAX); CONF_HANDLE_BOOL(opt_stats_print, "stats_print") + if (CONF_MATCH("stats_print_opts")) { + init_opt_stats_print_opts(v, vlen); + continue; + } if (config_fill) { - CONF_HANDLE_BOOL(opt_junk, "junk") - CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", - 0, SIZE_T_MAX, false) - CONF_HANDLE_BOOL(opt_redzone, "redzone") + if (CONF_MATCH("junk")) { + if (CONF_MATCH_VALUE("true")) { + opt_junk = "true"; + opt_junk_alloc = opt_junk_free = + true; + } else if (CONF_MATCH_VALUE("false")) { + opt_junk = "false"; + opt_junk_alloc = opt_junk_free = + false; + } else if (CONF_MATCH_VALUE("alloc")) { + opt_junk = "alloc"; + opt_junk_alloc = true; + opt_junk_free = false; + } else if (CONF_MATCH_VALUE("free")) { + opt_junk = "free"; + opt_junk_alloc = false; + opt_junk_free = true; + } else { + malloc_conf_error( + "Invalid conf value", k, + klen, v, vlen); + } + continue; + } CONF_HANDLE_BOOL(opt_zero, "zero") } if (config_utrace) { CONF_HANDLE_BOOL(opt_utrace, "utrace") } - if (config_valgrind) { - CONF_HANDLE_BOOL(opt_valgrind, "valgrind") - } if (config_xmalloc) { CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") } - if (config_tcache) { - CONF_HANDLE_BOOL(opt_tcache, "tcache") - CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, - "lg_tcache_max", -1, - (sizeof(size_t) << 3) - 1) + CONF_HANDLE_BOOL(opt_tcache, "tcache") + CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max", + -1, (sizeof(size_t) << 3) - 1) + if (strncmp("percpu_arena", k, klen) == 0) { + int i; + bool match = false; + for (i = percpu_arena_mode_names_base; i < + percpu_arena_mode_names_limit; i++) { + if (strncmp(percpu_arena_mode_names[i], + v, vlen) == 0) { + if (!have_percpu_arena) { + malloc_conf_error( + "No getcpu support", + k, klen, v, vlen); + } + opt_percpu_arena = i; + match = true; + break; + } + } + if (!match) { + malloc_conf_error("Invalid conf value", + k, klen, v, vlen); + } + continue; } + CONF_HANDLE_BOOL(opt_background_thread, + "background_thread"); if (config_prof) { CONF_HANDLE_BOOL(opt_prof, "prof") CONF_HANDLE_CHAR_P(opt_prof_prefix, "prof_prefix", "jeprof") CONF_HANDLE_BOOL(opt_prof_active, "prof_active") - CONF_HANDLE_SSIZE_T(opt_lg_prof_sample, - "lg_prof_sample", 0, - (sizeof(uint64_t) << 3) - 1) + CONF_HANDLE_BOOL(opt_prof_thread_active_init, + "prof_thread_active_init") + CONF_HANDLE_SIZE_T(opt_lg_prof_sample, + "lg_prof_sample", 0, (sizeof(uint64_t) << 3) + - 1, no, yes, true) CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, "lg_prof_interval", -1, @@ -638,7 +1175,15 @@ malloc_conf_init(void) } malloc_conf_error("Invalid conf pair", k, klen, v, vlen); +#undef CONF_MATCH +#undef CONF_MATCH_VALUE #undef CONF_HANDLE_BOOL +#undef CONF_MIN_no +#undef CONF_MIN_yes +#undef CONF_MAX_no +#undef CONF_MAX_yes +#undef CONF_HANDLE_T_U +#undef CONF_HANDLE_UNSIGNED #undef CONF_HANDLE_SIZE_T #undef CONF_HANDLE_SSIZE_T #undef CONF_HANDLE_CHAR_P @@ -647,195 +1192,325 @@ malloc_conf_init(void) } static bool -malloc_init_hard(void) -{ - arena_t *init_arenas[1]; - - malloc_mutex_lock(&init_lock); - if (malloc_initialized || IS_INITIALIZER) { +malloc_init_hard_needed(void) { + if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == + malloc_init_recursible)) { /* * Another thread initialized the allocator before this one * acquired init_lock, or this thread is the initializing * thread, and it is recursively allocating. */ - malloc_mutex_unlock(&init_lock); - return (false); + return false; } #ifdef JEMALLOC_THREADED_INIT - if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) { + if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { /* Busy-wait until the initializing thread completes. */ + spin_t spinner = SPIN_INITIALIZER; do { - malloc_mutex_unlock(&init_lock); - CPU_SPINWAIT; - malloc_mutex_lock(&init_lock); - } while (malloc_initialized == false); - malloc_mutex_unlock(&init_lock); - return (false); + malloc_mutex_unlock(TSDN_NULL, &init_lock); + spin_adaptive(&spinner); + malloc_mutex_lock(TSDN_NULL, &init_lock); + } while (!malloc_initialized()); + return false; } #endif + return true; +} + +static bool +malloc_init_hard_a0_locked() { malloc_initializer = INITIALIZER; - malloc_tsd_boot(); - if (config_prof) + if (config_prof) { prof_boot0(); - + } malloc_conf_init(); - if (opt_stats_print) { /* Print statistics at exit. */ if (atexit(stats_print_atexit) != 0) { malloc_write("<jemalloc>: Error in atexit()\n"); - if (opt_abort) + if (opt_abort) { abort(); + } } } - - if (base_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); + if (pages_boot()) { + return true; } - - if (chunk_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); + if (base_boot(TSDN_NULL)) { + return true; + } + if (extent_boot()) { + return true; } - if (ctl_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); + return true; } - - if (config_prof) + if (config_prof) { prof_boot1(); - - arena_boot(); - - if (config_tcache && tcache_boot0()) { - malloc_mutex_unlock(&init_lock); - return (true); } - - if (huge_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); + arena_boot(); + if (tcache_boot(TSDN_NULL)) { + return true; } - - if (malloc_mutex_init(&arenas_lock)) { - malloc_mutex_unlock(&init_lock); - return (true); + if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS, + malloc_mutex_rank_exclusive)) { + return true; } - /* * Create enough scaffolding to allow recursive allocation in * malloc_ncpus(). */ - narenas_total = narenas_auto = 1; - arenas = init_arenas; + narenas_auto = 1; memset(arenas, 0, sizeof(arena_t *) * narenas_auto); - /* * Initialize one arena here. The rest are lazily created in - * choose_arena_hard(). + * arena_choose_hard(). */ - arenas_extend(0); - if (arenas[0] == NULL) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - /* Initialize allocation counters before any allocations can occur. */ - if (config_stats && thread_allocated_tsd_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); + if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default) + == NULL) { + return true; } + a0 = arena_get(TSDN_NULL, 0, false); + malloc_init_state = malloc_init_a0_initialized; - if (arenas_tsd_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); - } - - if (config_tcache && tcache_boot1()) { - malloc_mutex_unlock(&init_lock); - return (true); - } + return false; +} - if (config_fill && quarantine_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); - } +static bool +malloc_init_hard_a0(void) { + bool ret; - if (config_prof && prof_boot2()) { - malloc_mutex_unlock(&init_lock); - return (true); - } + malloc_mutex_lock(TSDN_NULL, &init_lock); + ret = malloc_init_hard_a0_locked(); + malloc_mutex_unlock(TSDN_NULL, &init_lock); + return ret; +} - malloc_mutex_unlock(&init_lock); - /**********************************************************************/ - /* Recursive allocation may follow. */ +/* Initialize data structures which may trigger recursive allocation. */ +static bool +malloc_init_hard_recursible(void) { + malloc_init_state = malloc_init_recursible; ncpus = malloc_ncpus(); -#if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ - && !defined(_WIN32)) - /* LinuxThreads's pthread_atfork() allocates. */ +#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ + && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ + !defined(__native_client__)) + /* LinuxThreads' pthread_atfork() allocates. */ if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, jemalloc_postfork_child) != 0) { malloc_write("<jemalloc>: Error in pthread_atfork()\n"); - if (opt_abort) + if (opt_abort) { abort(); + } + return true; } #endif - /* Done recursively allocating. */ - /**********************************************************************/ - malloc_mutex_lock(&init_lock); + if (background_thread_boot0()) { + return true; + } + + return false; +} + +static unsigned +malloc_narenas_default(void) { + assert(ncpus > 0); + /* + * For SMP systems, create more than one arena per CPU by + * default. + */ + if (ncpus > 1) { + return ncpus << 2; + } else { + return 1; + } +} + +static percpu_arena_mode_t +percpu_arena_as_initialized(percpu_arena_mode_t mode) { + assert(!malloc_initialized()); + assert(mode <= percpu_arena_disabled); - if (mutex_boot()) { - malloc_mutex_unlock(&init_lock); - return (true); + if (mode != percpu_arena_disabled) { + mode += percpu_arena_mode_enabled_base; } + return mode; +} + +static bool +malloc_init_narenas(void) { + assert(ncpus > 0); + + if (opt_percpu_arena != percpu_arena_disabled) { + if (!have_percpu_arena || malloc_getcpu() < 0) { + opt_percpu_arena = percpu_arena_disabled; + malloc_printf("<jemalloc>: perCPU arena getcpu() not " + "available. Setting narenas to %u.\n", opt_narenas ? + opt_narenas : malloc_narenas_default()); + if (opt_abort) { + abort(); + } + } else { + if (ncpus >= MALLOCX_ARENA_LIMIT) { + malloc_printf("<jemalloc>: narenas w/ percpu" + "arena beyond limit (%d)\n", ncpus); + if (opt_abort) { + abort(); + } + return true; + } + /* NB: opt_percpu_arena isn't fully initialized yet. */ + if (percpu_arena_as_initialized(opt_percpu_arena) == + per_phycpu_arena && ncpus % 2 != 0) { + malloc_printf("<jemalloc>: invalid " + "configuration -- per physical CPU arena " + "with odd number (%u) of CPUs (no hyper " + "threading?).\n", ncpus); + if (opt_abort) + abort(); + } + unsigned n = percpu_arena_ind_limit( + percpu_arena_as_initialized(opt_percpu_arena)); + if (opt_narenas < n) { + /* + * If narenas is specified with percpu_arena + * enabled, actual narenas is set as the greater + * of the two. percpu_arena_choose will be free + * to use any of the arenas based on CPU + * id. This is conservative (at a small cost) + * but ensures correctness. + * + * If for some reason the ncpus determined at + * boot is not the actual number (e.g. because + * of affinity setting from numactl), reserving + * narenas this way provides a workaround for + * percpu_arena. + */ + opt_narenas = n; + } + } + } if (opt_narenas == 0) { - /* - * For SMP systems, create more than one arena per CPU by - * default. - */ - if (ncpus > 1) - opt_narenas = ncpus << 2; - else - opt_narenas = 1; + opt_narenas = malloc_narenas_default(); } + assert(opt_narenas > 0); + narenas_auto = opt_narenas; /* - * Make sure that the arenas array can be allocated. In practice, this - * limit is enough to allow the allocator to function, but the ctl - * machinery will fail to allocate memory at far lower limits. + * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). */ - if (narenas_auto > chunksize / sizeof(arena_t *)) { - narenas_auto = chunksize / sizeof(arena_t *); + if (narenas_auto >= MALLOCX_ARENA_LIMIT) { + narenas_auto = MALLOCX_ARENA_LIMIT - 1; malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", narenas_auto); } - narenas_total = narenas_auto; + narenas_total_set(narenas_auto); - /* Allocate and initialize arenas. */ - arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total); - if (arenas == NULL) { - malloc_mutex_unlock(&init_lock); - return (true); + return false; +} + +static void +malloc_init_percpu(void) { + opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena); +} + +static bool +malloc_init_hard_finish(void) { + if (malloc_mutex_boot()) { + return true; + } + + malloc_init_state = malloc_init_initialized; + malloc_slow_flag_init(); + + return false; +} + +static void +malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) { + malloc_mutex_assert_owner(tsdn, &init_lock); + malloc_mutex_unlock(tsdn, &init_lock); + if (reentrancy_set) { + assert(!tsdn_null(tsdn)); + tsd_t *tsd = tsdn_tsd(tsdn); + assert(tsd_reentrancy_level_get(tsd) > 0); + post_reentrancy(tsd); + } +} + +static bool +malloc_init_hard(void) { + tsd_t *tsd; + +#if defined(_WIN32) && _WIN32_WINNT < 0x0600 + _init_init_lock(); +#endif + malloc_mutex_lock(TSDN_NULL, &init_lock); + +#define UNLOCK_RETURN(tsdn, ret, reentrancy) \ + malloc_init_hard_cleanup(tsdn, reentrancy); \ + return ret; + + if (!malloc_init_hard_needed()) { + UNLOCK_RETURN(TSDN_NULL, false, false) + } + + if (malloc_init_state != malloc_init_a0_initialized && + malloc_init_hard_a0_locked()) { + UNLOCK_RETURN(TSDN_NULL, true, false) + } + + malloc_mutex_unlock(TSDN_NULL, &init_lock); + /* Recursive allocation relies on functional tsd. */ + tsd = malloc_tsd_boot0(); + if (tsd == NULL) { + return true; + } + if (malloc_init_hard_recursible()) { + return true; + } + + malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); + /* Set reentrancy level to 1 during init. */ + pre_reentrancy(tsd, NULL); + /* Initialize narenas before prof_boot2 (for allocation). */ + if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) { + UNLOCK_RETURN(tsd_tsdn(tsd), true, true) + } + if (config_prof && prof_boot2(tsd)) { + UNLOCK_RETURN(tsd_tsdn(tsd), true, true) } - /* - * Zero the array. In practice, this should always be pre-zeroed, - * since it was just mmap()ed, but let's be sure. - */ - memset(arenas, 0, sizeof(arena_t *) * narenas_total); - /* Copy the pointer to the one arena that was already initialized. */ - arenas[0] = init_arenas[0]; - malloc_initialized = true; - malloc_mutex_unlock(&init_lock); + malloc_init_percpu(); - return (false); + if (malloc_init_hard_finish()) { + UNLOCK_RETURN(tsd_tsdn(tsd), true, true) + } + post_reentrancy(tsd); + malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); + + malloc_tsd_boot1(); + /* Update TSD after tsd_boot1. */ + tsd = tsd_fetch(); + if (opt_background_thread) { + assert(have_background_thread); + /* + * Need to finish init & unlock first before creating background + * threads (pthread_create depends on malloc). + */ + malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); + bool err = background_thread_create(tsd, 0); + malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); + if (err) { + return true; + } + } +#undef UNLOCK_RETURN + return false; } /* @@ -843,469 +1518,779 @@ malloc_init_hard(void) */ /******************************************************************************/ /* - * Begin malloc(3)-compatible functions. + * Begin allocation-path internal functions and data structures. */ -static void * -imalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt) -{ - void *p; - - if (cnt == NULL) - return (NULL); - if (prof_promote && usize <= SMALL_MAXCLASS) { - p = imalloc(SMALL_MAXCLASS+1); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = imalloc(usize); +/* + * Settings determined by the documented behavior of the allocation functions. + */ +typedef struct static_opts_s static_opts_t; +struct static_opts_s { + /* Whether or not allocation size may overflow. */ + bool may_overflow; + /* Whether or not allocations of size 0 should be treated as size 1. */ + bool bump_empty_alloc; + /* + * Whether to assert that allocations are not of size 0 (after any + * bumping). + */ + bool assert_nonempty_alloc; - return (p); -} + /* + * Whether or not to modify the 'result' argument to malloc in case of + * error. + */ + bool null_out_result_on_error; + /* Whether to set errno when we encounter an error condition. */ + bool set_errno_on_error; -JEMALLOC_ALWAYS_INLINE_C void * -imalloc_prof(size_t usize, prof_thr_cnt_t *cnt) -{ - void *p; + /* + * The minimum valid alignment for functions requesting aligned storage. + */ + size_t min_alignment; - if ((uintptr_t)cnt != (uintptr_t)1U) - p = imalloc_prof_sample(usize, cnt); - else - p = imalloc(usize); - if (p == NULL) - return (NULL); - prof_malloc(p, usize, cnt); + /* The error string to use if we oom. */ + const char *oom_string; + /* The error string to use if the passed-in alignment is invalid. */ + const char *invalid_alignment_string; - return (p); + /* + * False if we're configured to skip some time-consuming operations. + * + * This isn't really a malloc "behavior", but it acts as a useful + * summary of several other static (or at least, static after program + * initialization) options. + */ + bool slow; +}; + +JEMALLOC_ALWAYS_INLINE void +static_opts_init(static_opts_t *static_opts) { + static_opts->may_overflow = false; + static_opts->bump_empty_alloc = false; + static_opts->assert_nonempty_alloc = false; + static_opts->null_out_result_on_error = false; + static_opts->set_errno_on_error = false; + static_opts->min_alignment = 0; + static_opts->oom_string = ""; + static_opts->invalid_alignment_string = ""; + static_opts->slow = false; } /* - * MALLOC_BODY() is a macro rather than a function because its contents are in - * the fast path, but inlining would cause reliability issues when determining - * how many frames to discard from heap profiling backtraces. + * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we + * should have one constant here per magic value there. Note however that the + * representations need not be related. */ -#define MALLOC_BODY(ret, size, usize) do { \ - if (malloc_init()) \ - ret = NULL; \ - else { \ - if (config_prof && opt_prof) { \ - prof_thr_cnt_t *cnt; \ - \ - usize = s2u(size); \ - /* \ - * Call PROF_ALLOC_PREP() here rather than in \ - * imalloc_prof() so that imalloc_prof() can be \ - * inlined without introducing uncertainty \ - * about the number of backtrace frames to \ - * ignore. imalloc_prof() is in the fast path \ - * when heap profiling is enabled, so inlining \ - * is critical to performance. (For \ - * consistency all callers of PROF_ALLOC_PREP() \ - * are structured similarly, even though e.g. \ - * realloc() isn't called enough for inlining \ - * to be critical.) \ - */ \ - PROF_ALLOC_PREP(1, usize, cnt); \ - ret = imalloc_prof(usize, cnt); \ - } else { \ - if (config_stats || (config_valgrind && \ - opt_valgrind)) \ - usize = s2u(size); \ - ret = imalloc(size); \ - } \ - } \ -} while (0) - -void * -je_malloc(size_t size) -{ - void *ret; - size_t usize JEMALLOC_CC_SILENCE_INIT(0); - - if (size == 0) - size = 1; +#define TCACHE_IND_NONE ((unsigned)-1) +#define TCACHE_IND_AUTOMATIC ((unsigned)-2) +#define ARENA_IND_AUTOMATIC ((unsigned)-1) + +typedef struct dynamic_opts_s dynamic_opts_t; +struct dynamic_opts_s { + void **result; + size_t num_items; + size_t item_size; + size_t alignment; + bool zero; + unsigned tcache_ind; + unsigned arena_ind; +}; + +JEMALLOC_ALWAYS_INLINE void +dynamic_opts_init(dynamic_opts_t *dynamic_opts) { + dynamic_opts->result = NULL; + dynamic_opts->num_items = 0; + dynamic_opts->item_size = 0; + dynamic_opts->alignment = 0; + dynamic_opts->zero = false; + dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC; + dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC; +} - MALLOC_BODY(ret, size, usize); +/* ind is ignored if dopts->alignment > 0. */ +JEMALLOC_ALWAYS_INLINE void * +imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, + size_t size, size_t usize, szind_t ind) { + tcache_t *tcache; + arena_t *arena; - if (ret == NULL) { - if (config_xmalloc && opt_xmalloc) { - malloc_write("<jemalloc>: Error in malloc(): " - "out of memory\n"); - abort(); + /* Fill in the tcache. */ + if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) { + if (likely(!sopts->slow)) { + /* Getting tcache ptr unconditionally. */ + tcache = tsd_tcachep_get(tsd); + assert(tcache == tcache_get(tsd)); + } else { + tcache = tcache_get(tsd); } - set_errno(ENOMEM); - } - if (config_stats && ret != NULL) { - assert(usize == isalloc(ret, config_prof)); - thread_allocated_tsd_get()->allocated += usize; + } else if (dopts->tcache_ind == TCACHE_IND_NONE) { + tcache = NULL; + } else { + tcache = tcaches_get(tsd, dopts->tcache_ind); } - UTRACE(0, size, ret); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false); - return (ret); -} -static void * -imemalign_prof_sample(size_t alignment, size_t usize, prof_thr_cnt_t *cnt) -{ - void *p; + /* Fill in the arena. */ + if (dopts->arena_ind == ARENA_IND_AUTOMATIC) { + /* + * In case of automatic arena management, we defer arena + * computation until as late as we can, hoping to fill the + * allocation out of the tcache. + */ + arena = NULL; + } else { + arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true); + } - if (cnt == NULL) - return (NULL); - if (prof_promote && usize <= SMALL_MAXCLASS) { - assert(sa2u(SMALL_MAXCLASS+1, alignment) != 0); - p = ipalloc(sa2u(SMALL_MAXCLASS+1, alignment), alignment, - false); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = ipalloc(usize, alignment, false); + if (unlikely(dopts->alignment != 0)) { + return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment, + dopts->zero, tcache, arena); + } - return (p); + return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false, + arena, sopts->slow); } -JEMALLOC_ALWAYS_INLINE_C void * -imemalign_prof(size_t alignment, size_t usize, prof_thr_cnt_t *cnt) -{ - void *p; +JEMALLOC_ALWAYS_INLINE void * +imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, + size_t usize, szind_t ind) { + void *ret; - if ((uintptr_t)cnt != (uintptr_t)1U) - p = imemalign_prof_sample(alignment, usize, cnt); - else - p = ipalloc(usize, alignment, false); - if (p == NULL) - return (NULL); - prof_malloc(p, usize, cnt); + /* + * For small allocations, sampling bumps the usize. If so, we allocate + * from the ind_large bucket. + */ + szind_t ind_large; + size_t bumped_usize = usize; + + if (usize <= SMALL_MAXCLASS) { + assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) : + sz_sa2u(LARGE_MINCLASS, dopts->alignment)) + == LARGE_MINCLASS); + ind_large = sz_size2index(LARGE_MINCLASS); + bumped_usize = sz_s2u(LARGE_MINCLASS); + ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize, + bumped_usize, ind_large); + if (unlikely(ret == NULL)) { + return NULL; + } + arena_prof_promote(tsd_tsdn(tsd), ret, usize); + } else { + ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind); + } - return (p); + return ret; } -JEMALLOC_ATTR(nonnull(1)) -#ifdef JEMALLOC_PROF /* - * Avoid any uncertainty as to how many backtrace frames to ignore in - * PROF_ALLOC_PREP(). + * Returns true if the allocation will overflow, and false otherwise. Sets + * *size to the product either way. */ -JEMALLOC_NOINLINE -#endif -static int -imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) -{ - int ret; - size_t usize; - void *result; +JEMALLOC_ALWAYS_INLINE bool +compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts, + size_t *size) { + /* + * This function is just num_items * item_size, except that we may have + * to check for overflow. + */ - assert(min_alignment != 0); + if (!may_overflow) { + assert(dopts->num_items == 1); + *size = dopts->item_size; + return false; + } + + /* A size_t with its high-half bits all set to 1. */ + const static size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2); + + *size = dopts->item_size * dopts->num_items; + + if (unlikely(*size == 0)) { + return (dopts->num_items != 0 && dopts->item_size != 0); + } + + /* + * We got a non-zero size, but we don't know if we overflowed to get + * there. To avoid having to do a divide, we'll be clever and note that + * if both A and B can be represented in N/2 bits, then their product + * can be represented in N bits (without the possibility of overflow). + */ + if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) { + return false; + } + if (likely(*size / dopts->item_size == dopts->num_items)) { + return false; + } + return true; +} + +JEMALLOC_ALWAYS_INLINE int +imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { + /* Where the actual allocated memory will live. */ + void *allocation = NULL; + /* Filled in by compute_size_with_overflow below. */ + size_t size = 0; + /* + * For unaligned allocations, we need only ind. For aligned + * allocations, or in case of stats or profiling we need usize. + * + * These are actually dead stores, in that their values are reset before + * any branch on their value is taken. Sometimes though, it's + * convenient to pass them as arguments before this point. To avoid + * undefined behavior then, we initialize them with dummy stores. + */ + szind_t ind = 0; + size_t usize = 0; - if (malloc_init()) { - result = NULL; + /* Reentrancy is only checked on slow path. */ + int8_t reentrancy_level; + + /* Compute the amount of memory the user wants. */ + if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts, + &size))) { goto label_oom; - } else { - if (size == 0) - size = 1; + } - /* Make sure that alignment is a large enough power of 2. */ - if (((alignment - 1) & alignment) != 0 - || (alignment < min_alignment)) { - if (config_xmalloc && opt_xmalloc) { - malloc_write("<jemalloc>: Error allocating " - "aligned memory: invalid alignment\n"); - abort(); - } - result = NULL; - ret = EINVAL; - goto label_return; + /* Validate the user input. */ + if (sopts->bump_empty_alloc) { + if (unlikely(size == 0)) { + size = 1; } + } + + if (sopts->assert_nonempty_alloc) { + assert (size != 0); + } + + if (unlikely(dopts->alignment < sopts->min_alignment + || (dopts->alignment & (dopts->alignment - 1)) != 0)) { + goto label_invalid_alignment; + } + + /* This is the beginning of the "core" algorithm. */ - usize = sa2u(size, alignment); - if (usize == 0) { - result = NULL; + if (dopts->alignment == 0) { + ind = sz_size2index(size); + if (unlikely(ind >= NSIZES)) { goto label_oom; } + if (config_stats || (config_prof && opt_prof)) { + usize = sz_index2size(ind); + assert(usize > 0 && usize <= LARGE_MAXCLASS); + } + } else { + usize = sz_sa2u(size, dopts->alignment); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { + goto label_oom; + } + } - if (config_prof && opt_prof) { - prof_thr_cnt_t *cnt; + check_entry_exit_locking(tsd_tsdn(tsd)); + + /* + * If we need to handle reentrancy, we can do it out of a + * known-initialized arena (i.e. arena 0). + */ + reentrancy_level = tsd_reentrancy_level_get(tsd); + if (sopts->slow && unlikely(reentrancy_level > 0)) { + /* + * We should never specify particular arenas or tcaches from + * within our internal allocations. + */ + assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC || + dopts->tcache_ind == TCACHE_IND_NONE); + assert(dopts->arena_ind == ARENA_IND_AUTOMATIC); + dopts->tcache_ind = TCACHE_IND_NONE; + /* We know that arena 0 has already been initialized. */ + dopts->arena_ind = 0; + } + + /* If profiling is on, get our profiling context. */ + if (config_prof && opt_prof) { + /* + * Note that if we're going down this path, usize must have been + * initialized in the previous if statement. + */ + prof_tctx_t *tctx = prof_alloc_prep( + tsd, usize, prof_active_get_unlocked(), true); + + alloc_ctx_t alloc_ctx; + if (likely((uintptr_t)tctx == (uintptr_t)1U)) { + alloc_ctx.slab = (usize <= SMALL_MAXCLASS); + allocation = imalloc_no_sample( + sopts, dopts, tsd, usize, usize, ind); + } else if ((uintptr_t)tctx > (uintptr_t)1U) { + /* + * Note that ind might still be 0 here. This is fine; + * imalloc_sample ignores ind if dopts->alignment > 0. + */ + allocation = imalloc_sample( + sopts, dopts, tsd, usize, ind); + alloc_ctx.slab = false; + } else { + allocation = NULL; + } - PROF_ALLOC_PREP(2, usize, cnt); - result = imemalign_prof(alignment, usize, cnt); - } else - result = ipalloc(usize, alignment, false); - if (result == NULL) + if (unlikely(allocation == NULL)) { + prof_alloc_rollback(tsd, tctx, true); + goto label_oom; + } + prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx); + } else { + /* + * If dopts->alignment > 0, then ind is still 0, but usize was + * computed in the previous if statement. Down the positive + * alignment path, imalloc_no_sample ignores ind and size + * (relying only on usize). + */ + allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize, + ind); + if (unlikely(allocation == NULL)) { goto label_oom; + } } - *memptr = result; - ret = 0; -label_return: - if (config_stats && result != NULL) { - assert(usize == isalloc(result, config_prof)); - thread_allocated_tsd_get()->allocated += usize; + /* + * Allocation has been done at this point. We still have some + * post-allocation work to do though. + */ + assert(dopts->alignment == 0 + || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0)); + + if (config_stats) { + assert(usize == isalloc(tsd_tsdn(tsd), allocation)); + *tsd_thread_allocatedp_get(tsd) += usize; } - UTRACE(0, size, result); - return (ret); + + if (sopts->slow) { + UTRACE(0, size, allocation); + } + + /* Success! */ + check_entry_exit_locking(tsd_tsdn(tsd)); + *dopts->result = allocation; + return 0; + label_oom: - assert(result == NULL); - if (config_xmalloc && opt_xmalloc) { - malloc_write("<jemalloc>: Error allocating aligned memory: " - "out of memory\n"); + if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(sopts->oom_string); abort(); } - ret = ENOMEM; - goto label_return; -} -int -je_posix_memalign(void **memptr, size_t alignment, size_t size) -{ - int ret = imemalign(memptr, alignment, size, sizeof(void *)); - JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr, - config_prof), false); - return (ret); + if (sopts->slow) { + UTRACE(NULL, size, NULL); + } + + check_entry_exit_locking(tsd_tsdn(tsd)); + + if (sopts->set_errno_on_error) { + set_errno(ENOMEM); + } + + if (sopts->null_out_result_on_error) { + *dopts->result = NULL; + } + + return ENOMEM; + + /* + * This label is only jumped to by one goto; we move it out of line + * anyways to avoid obscuring the non-error paths, and for symmetry with + * the oom case. + */ +label_invalid_alignment: + if (config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(sopts->invalid_alignment_string); + abort(); + } + + if (sopts->set_errno_on_error) { + set_errno(EINVAL); + } + + if (sopts->slow) { + UTRACE(NULL, size, NULL); + } + + check_entry_exit_locking(tsd_tsdn(tsd)); + + if (sopts->null_out_result_on_error) { + *dopts->result = NULL; + } + + return EINVAL; } -void * -je_aligned_alloc(size_t alignment, size_t size) -{ - void *ret; - int err; +/* Returns the errno-style error code of the allocation. */ +JEMALLOC_ALWAYS_INLINE int +imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) { + if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) { + if (config_xmalloc && unlikely(opt_xmalloc)) { + malloc_write(sopts->oom_string); + abort(); + } + UTRACE(NULL, dopts->num_items * dopts->item_size, NULL); + set_errno(ENOMEM); + *dopts->result = NULL; + + return ENOMEM; + } - if ((err = imemalign(&ret, alignment, size, 1)) != 0) { - ret = NULL; - set_errno(err); + /* We always need the tsd. Let's grab it right away. */ + tsd_t *tsd = tsd_fetch(); + assert(tsd); + if (likely(tsd_fast(tsd))) { + /* Fast and common path. */ + tsd_assert_fast(tsd); + sopts->slow = false; + return imalloc_body(sopts, dopts, tsd); + } else { + sopts->slow = true; + return imalloc_body(sopts, dopts, tsd); } - JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof), - false); - return (ret); } +/******************************************************************************/ +/* + * Begin malloc(3)-compatible functions. + */ -static void * -icalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt) -{ - void *p; +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) +je_malloc(size_t size) { + void *ret; + static_opts_t sopts; + dynamic_opts_t dopts; + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.bump_empty_alloc = true; + sopts.null_out_result_on_error = true; + sopts.set_errno_on_error = true; + sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n"; - if (cnt == NULL) - return (NULL); - if (prof_promote && usize <= SMALL_MAXCLASS) { - p = icalloc(SMALL_MAXCLASS+1); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = icalloc(usize); + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; - return (p); + imalloc(&sopts, &dopts); + + return ret; } -JEMALLOC_ALWAYS_INLINE_C void * -icalloc_prof(size_t usize, prof_thr_cnt_t *cnt) -{ - void *p; +JEMALLOC_EXPORT int JEMALLOC_NOTHROW +JEMALLOC_ATTR(nonnull(1)) +je_posix_memalign(void **memptr, size_t alignment, size_t size) { + int ret; + static_opts_t sopts; + dynamic_opts_t dopts; + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.bump_empty_alloc = true; + sopts.min_alignment = sizeof(void *); + sopts.oom_string = + "<jemalloc>: Error allocating aligned memory: out of memory\n"; + sopts.invalid_alignment_string = + "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; + + dopts.result = memptr; + dopts.num_items = 1; + dopts.item_size = size; + dopts.alignment = alignment; + + ret = imalloc(&sopts, &dopts); + return ret; +} - if ((uintptr_t)cnt != (uintptr_t)1U) - p = icalloc_prof_sample(usize, cnt); - else - p = icalloc(usize); - if (p == NULL) - return (NULL); - prof_malloc(p, usize, cnt); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) +je_aligned_alloc(size_t alignment, size_t size) { + void *ret; - return (p); + static_opts_t sopts; + dynamic_opts_t dopts; + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.bump_empty_alloc = true; + sopts.null_out_result_on_error = true; + sopts.set_errno_on_error = true; + sopts.min_alignment = 1; + sopts.oom_string = + "<jemalloc>: Error allocating aligned memory: out of memory\n"; + sopts.invalid_alignment_string = + "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; + + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + dopts.alignment = alignment; + + imalloc(&sopts, &dopts); + return ret; } -void * -je_calloc(size_t num, size_t size) -{ +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) +je_calloc(size_t num, size_t size) { void *ret; - size_t num_size; - size_t usize JEMALLOC_CC_SILENCE_INIT(0); + static_opts_t sopts; + dynamic_opts_t dopts; - if (malloc_init()) { - num_size = 0; - ret = NULL; - goto label_return; - } + static_opts_init(&sopts); + dynamic_opts_init(&dopts); - num_size = num * size; - if (num_size == 0) { - if (num == 0 || size == 0) - num_size = 1; - else { - ret = NULL; - goto label_return; - } - /* - * Try to avoid division here. We know that it isn't possible to - * overflow during multiplication if neither operand uses any of the - * most significant half of the bits in a size_t. - */ - } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2))) - && (num_size / size != num)) { - /* size_t overflow. */ - ret = NULL; - goto label_return; - } + sopts.may_overflow = true; + sopts.bump_empty_alloc = true; + sopts.null_out_result_on_error = true; + sopts.set_errno_on_error = true; + sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n"; - if (config_prof && opt_prof) { - prof_thr_cnt_t *cnt; + dopts.result = &ret; + dopts.num_items = num; + dopts.item_size = size; + dopts.zero = true; - usize = s2u(num_size); - PROF_ALLOC_PREP(1, usize, cnt); - ret = icalloc_prof(usize, cnt); - } else { - if (config_stats || (config_valgrind && opt_valgrind)) - usize = s2u(num_size); - ret = icalloc(num_size); - } + imalloc(&sopts, &dopts); -label_return: - if (ret == NULL) { - if (config_xmalloc && opt_xmalloc) { - malloc_write("<jemalloc>: Error in calloc(): out of " - "memory\n"); - abort(); - } - set_errno(ENOMEM); - } - if (config_stats && ret != NULL) { - assert(usize == isalloc(ret, config_prof)); - thread_allocated_tsd_get()->allocated += usize; - } - UTRACE(0, num_size, ret); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true); - return (ret); + return ret; } static void * -irealloc_prof_sample(void *oldptr, size_t usize, prof_thr_cnt_t *cnt) -{ +irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, + prof_tctx_t *tctx) { void *p; - if (cnt == NULL) - return (NULL); - if (prof_promote && usize <= SMALL_MAXCLASS) { - p = iralloc(oldptr, SMALL_MAXCLASS+1, 0, 0, false); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = iralloc(oldptr, usize, 0, 0, false); + if (tctx == NULL) { + return NULL; + } + if (usize <= SMALL_MAXCLASS) { + p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); + if (p == NULL) { + return NULL; + } + arena_prof_promote(tsd_tsdn(tsd), p, usize); + } else { + p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); + } - return (p); + return p; } -JEMALLOC_ALWAYS_INLINE_C void * -irealloc_prof(void *oldptr, size_t old_usize, size_t usize, prof_thr_cnt_t *cnt) -{ +JEMALLOC_ALWAYS_INLINE void * +irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, + alloc_ctx_t *alloc_ctx) { void *p; - prof_ctx_t *old_ctx; - - old_ctx = prof_ctx_get(oldptr); - if ((uintptr_t)cnt != (uintptr_t)1U) - p = irealloc_prof_sample(oldptr, usize, cnt); - else - p = iralloc(oldptr, usize, 0, 0, false); - if (p == NULL) - return (NULL); - prof_realloc(p, usize, cnt, old_usize, old_ctx); + bool prof_active; + prof_tctx_t *old_tctx, *tctx; + + prof_active = prof_active_get_unlocked(); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); + tctx = prof_alloc_prep(tsd, usize, prof_active, true); + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { + p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); + } else { + p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); + } + if (unlikely(p == NULL)) { + prof_alloc_rollback(tsd, tctx, true); + return NULL; + } + prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, + old_tctx); - return (p); + return p; } -JEMALLOC_INLINE_C void -ifree(void *ptr) -{ +JEMALLOC_ALWAYS_INLINE void +ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { + if (!slow_path) { + tsd_assert_fast(tsd); + } + check_entry_exit_locking(tsd_tsdn(tsd)); + if (tsd_reentrancy_level_get(tsd) != 0) { + assert(slow_path); + } + + assert(ptr != NULL); + assert(malloc_initialized() || IS_INITIALIZER); + + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != NSIZES); + size_t usize; - UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); + if (config_prof && opt_prof) { + usize = sz_index2size(alloc_ctx.szind); + prof_free(tsd, ptr, usize, &alloc_ctx); + } else if (config_stats) { + usize = sz_index2size(alloc_ctx.szind); + } + if (config_stats) { + *tsd_thread_deallocatedp_get(tsd) += usize; + } + + if (likely(!slow_path)) { + idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, + false); + } else { + idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, + true); + } +} + +JEMALLOC_ALWAYS_INLINE void +isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { + if (!slow_path) { + tsd_assert_fast(tsd); + } + check_entry_exit_locking(tsd_tsdn(tsd)); + if (tsd_reentrancy_level_get(tsd) != 0) { + assert(slow_path); + } assert(ptr != NULL); - assert(malloc_initialized || IS_INITIALIZER); + assert(malloc_initialized() || IS_INITIALIZER); + alloc_ctx_t alloc_ctx, *ctx; if (config_prof && opt_prof) { - usize = isalloc(ptr, config_prof); - prof_free(ptr, usize); - } else if (config_stats || config_valgrind) - usize = isalloc(ptr, config_prof); - if (config_stats) - thread_allocated_tsd_get()->deallocated += usize; - if (config_valgrind && opt_valgrind) - rzsize = p2rz(ptr); - iqalloc(ptr); - JEMALLOC_VALGRIND_FREE(ptr, rzsize); + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind == sz_size2index(usize)); + ctx = &alloc_ctx; + prof_free(tsd, ptr, usize, ctx); + } else { + ctx = NULL; + } + + if (config_stats) { + *tsd_thread_deallocatedp_get(tsd) += usize; + } + + if (likely(!slow_path)) { + isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false); + } else { + isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true); + } } -void * -je_realloc(void *ptr, size_t size) -{ +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ALLOC_SIZE(2) +je_realloc(void *ptr, size_t size) { void *ret; + tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); size_t usize JEMALLOC_CC_SILENCE_INIT(0); size_t old_usize = 0; - UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); - if (size == 0) { + if (unlikely(size == 0)) { if (ptr != NULL) { /* realloc(ptr, 0) is equivalent to free(ptr). */ UTRACE(ptr, 0, 0); - ifree(ptr); - return (NULL); + tcache_t *tcache; + tsd_t *tsd = tsd_fetch(); + if (tsd_reentrancy_level_get(tsd) == 0) { + tcache = tcache_get(tsd); + } else { + tcache = NULL; + } + ifree(tsd, ptr, tcache, true); + return NULL; } size = 1; } - if (ptr != NULL) { - assert(malloc_initialized || IS_INITIALIZER); - malloc_thread_init(); + if (likely(ptr != NULL)) { + assert(malloc_initialized() || IS_INITIALIZER); + tsd_t *tsd = tsd_fetch(); - if ((config_prof && opt_prof) || config_stats || - (config_valgrind && opt_valgrind)) - old_usize = isalloc(ptr, config_prof); - if (config_valgrind && opt_valgrind) - old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize); + check_entry_exit_locking(tsd_tsdn(tsd)); + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != NSIZES); + old_usize = sz_index2size(alloc_ctx.szind); + assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); if (config_prof && opt_prof) { - prof_thr_cnt_t *cnt; - - usize = s2u(size); - PROF_ALLOC_PREP(1, usize, cnt); - ret = irealloc_prof(ptr, old_usize, usize, cnt); + usize = sz_s2u(size); + ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ? + NULL : irealloc_prof(tsd, ptr, old_usize, usize, + &alloc_ctx); } else { - if (config_stats || (config_valgrind && opt_valgrind)) - usize = s2u(size); - ret = iralloc(ptr, size, 0, 0, false); + if (config_stats) { + usize = sz_s2u(size); + } + ret = iralloc(tsd, ptr, old_usize, size, 0, false); } + tsdn = tsd_tsdn(tsd); } else { /* realloc(NULL, size) is equivalent to malloc(size). */ - MALLOC_BODY(ret, size, usize); + return je_malloc(size); } - if (ret == NULL) { - if (config_xmalloc && opt_xmalloc) { + if (unlikely(ret == NULL)) { + if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write("<jemalloc>: Error in realloc(): " "out of memory\n"); abort(); } set_errno(ENOMEM); } - if (config_stats && ret != NULL) { - thread_allocated_t *ta; - assert(usize == isalloc(ret, config_prof)); - ta = thread_allocated_tsd_get(); - ta->allocated += usize; - ta->deallocated += old_usize; + if (config_stats && likely(ret != NULL)) { + tsd_t *tsd; + + assert(usize == isalloc(tsdn, ret)); + tsd = tsdn_tsd(tsdn); + *tsd_thread_allocatedp_get(tsd) += usize; + *tsd_thread_deallocatedp_get(tsd) += old_usize; } UTRACE(ptr, size, ret); - JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_usize, old_rzsize, - false); - return (ret); + check_entry_exit_locking(tsdn); + return ret; } -void -je_free(void *ptr) -{ - +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +je_free(void *ptr) { UTRACE(ptr, 0, 0); - if (ptr != NULL) - ifree(ptr); + if (likely(ptr != NULL)) { + /* + * We avoid setting up tsd fully (e.g. tcache, arena binding) + * based on only free() calls -- other activities trigger the + * minimal to full transition. This is because free() may + * happen during thread shutdown after tls deallocation: if a + * thread never had any malloc activities until then, a + * fully-setup tsd won't be destructed properly. + */ + tsd_t *tsd = tsd_fetch_min(); + check_entry_exit_locking(tsd_tsdn(tsd)); + + tcache_t *tcache; + if (likely(tsd_fast(tsd))) { + tsd_assert_fast(tsd); + /* Unconditionally get tcache ptr on fast path. */ + tcache = tsd_tcachep_get(tsd); + ifree(tsd, ptr, tcache, false); + } else { + if (likely(tsd_reentrancy_level_get(tsd) == 0)) { + tcache = tcache_get(tsd); + } else { + tcache = NULL; + } + ifree(tsd, ptr, tcache, true); + } + check_entry_exit_locking(tsd_tsdn(tsd)); + } } /* @@ -1317,36 +2302,68 @@ je_free(void *ptr) */ #ifdef JEMALLOC_OVERRIDE_MEMALIGN -void * -je_memalign(size_t alignment, size_t size) -{ - void *ret JEMALLOC_CC_SILENCE_INIT(NULL); - imemalign(&ret, alignment, size, 1); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); - return (ret); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) +je_memalign(size_t alignment, size_t size) { + void *ret; + static_opts_t sopts; + dynamic_opts_t dopts; + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.bump_empty_alloc = true; + sopts.min_alignment = 1; + sopts.oom_string = + "<jemalloc>: Error allocating aligned memory: out of memory\n"; + sopts.invalid_alignment_string = + "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; + sopts.null_out_result_on_error = true; + + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + dopts.alignment = alignment; + + imalloc(&sopts, &dopts); + return ret; } #endif #ifdef JEMALLOC_OVERRIDE_VALLOC -void * -je_valloc(size_t size) -{ - void *ret JEMALLOC_CC_SILENCE_INIT(NULL); - imemalign(&ret, PAGE, size, 1); - JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); - return (ret); +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) +je_valloc(size_t size) { + void *ret; + + static_opts_t sopts; + dynamic_opts_t dopts; + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.bump_empty_alloc = true; + sopts.null_out_result_on_error = true; + sopts.min_alignment = PAGE; + sopts.oom_string = + "<jemalloc>: Error allocating aligned memory: out of memory\n"; + sopts.invalid_alignment_string = + "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; + + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + dopts.alignment = PAGE; + + imalloc(&sopts, &dopts); + + return ret; } #endif -/* - * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has - * #define je_malloc malloc - */ -#define malloc_is_malloc 1 -#define is_malloc_(a) malloc_is_ ## a -#define is_malloc(a) is_malloc_(a) - -#if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__)) +#if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK) /* * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible * to inconsistently reference libc's malloc(3)-compatible functions @@ -1356,11 +2373,47 @@ je_valloc(size_t size) * passed an extra argument for the caller return address, which will be * ignored. */ -JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free; -JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc; -JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc; -JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) = +JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; +JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; +JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; +# ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK +JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = je_memalign; +# endif + +# ifdef CPU_COUNT +/* + * To enable static linking with glibc, the libc specific malloc interface must + * be implemented also, so none of glibc's malloc.o functions are added to the + * link. + */ +# define ALIAS(je_fn) __attribute__((alias (#je_fn), used)) +/* To force macro expansion of je_ prefix before stringification. */ +# define PREALIAS(je_fn) ALIAS(je_fn) +# ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC +void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_FREE +void __libc_free(void* ptr) PREALIAS(je_free); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC +void *__libc_malloc(size_t size) PREALIAS(je_malloc); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN +void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC +void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc); +# endif +# ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC +void *__libc_valloc(size_t size) PREALIAS(je_valloc); +# endif +# ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN +int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign); +# endif +# undef PREALIAS +# undef ALIAS +# endif #endif /* @@ -1371,162 +2424,98 @@ JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) = * Begin non-standard functions. */ -JEMALLOC_ALWAYS_INLINE_C void * -imallocx(size_t usize, size_t alignment, bool zero, bool try_tcache, - arena_t *arena) -{ - - assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, - alignment))); - - if (alignment != 0) - return (ipalloct(usize, alignment, zero, try_tcache, arena)); - else if (zero) - return (icalloct(usize, try_tcache, arena)); - else - return (imalloct(usize, try_tcache, arena)); -} - -static void * -imallocx_prof_sample(size_t usize, size_t alignment, bool zero, bool try_tcache, - arena_t *arena, prof_thr_cnt_t *cnt) -{ - void *p; - - if (cnt == NULL) - return (NULL); - if (prof_promote && usize <= SMALL_MAXCLASS) { - size_t usize_promoted = (alignment == 0) ? - s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, alignment); - assert(usize_promoted != 0); - p = imallocx(usize_promoted, alignment, zero, try_tcache, - arena); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); - } else - p = imallocx(usize, alignment, zero, try_tcache, arena); - - return (p); -} - -JEMALLOC_ALWAYS_INLINE_C void * -imallocx_prof(size_t usize, size_t alignment, bool zero, bool try_tcache, - arena_t *arena, prof_thr_cnt_t *cnt) -{ - void *p; - - if ((uintptr_t)cnt != (uintptr_t)1U) { - p = imallocx_prof_sample(usize, alignment, zero, try_tcache, - arena, cnt); - } else - p = imallocx(usize, alignment, zero, try_tcache, arena); - if (p == NULL) - return (NULL); - prof_malloc(p, usize, cnt); - - return (p); -} - -void * -je_mallocx(size_t size, int flags) -{ - void *p; - size_t usize; - size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) - & (SIZE_T_MAX-1)); - bool zero = flags & MALLOCX_ZERO; - unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; - arena_t *arena; - bool try_tcache; +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) +je_mallocx(size_t size, int flags) { + void *ret; + static_opts_t sopts; + dynamic_opts_t dopts; + + static_opts_init(&sopts); + dynamic_opts_init(&dopts); + + sopts.assert_nonempty_alloc = true; + sopts.null_out_result_on_error = true; + sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n"; + + dopts.result = &ret; + dopts.num_items = 1; + dopts.item_size = size; + if (unlikely(flags != 0)) { + if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) { + dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); + } - assert(size != 0); + dopts.zero = MALLOCX_ZERO_GET(flags); - if (malloc_init()) - goto label_oom; + if ((flags & MALLOCX_TCACHE_MASK) != 0) { + if ((flags & MALLOCX_TCACHE_MASK) + == MALLOCX_TCACHE_NONE) { + dopts.tcache_ind = TCACHE_IND_NONE; + } else { + dopts.tcache_ind = MALLOCX_TCACHE_GET(flags); + } + } else { + dopts.tcache_ind = TCACHE_IND_AUTOMATIC; + } - if (arena_ind != UINT_MAX) { - arena = arenas[arena_ind]; - try_tcache = false; - } else { - arena = NULL; - try_tcache = true; + if ((flags & MALLOCX_ARENA_MASK) != 0) + dopts.arena_ind = MALLOCX_ARENA_GET(flags); } - usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); - assert(usize != 0); - - if (config_prof && opt_prof) { - prof_thr_cnt_t *cnt; - - PROF_ALLOC_PREP(1, usize, cnt); - p = imallocx_prof(usize, alignment, zero, try_tcache, arena, - cnt); - } else - p = imallocx(usize, alignment, zero, try_tcache, arena); - if (p == NULL) - goto label_oom; - - if (config_stats) { - assert(usize == isalloc(p, config_prof)); - thread_allocated_tsd_get()->allocated += usize; - } - UTRACE(0, size, p); - JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero); - return (p); -label_oom: - if (config_xmalloc && opt_xmalloc) { - malloc_write("<jemalloc>: Error in mallocx(): out of memory\n"); - abort(); - } - UTRACE(0, size, 0); - return (NULL); + imalloc(&sopts, &dopts); + return ret; } static void * -irallocx_prof_sample(void *oldptr, size_t size, size_t alignment, size_t usize, - bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena, - prof_thr_cnt_t *cnt) -{ +irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize, + size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, + prof_tctx_t *tctx) { void *p; - if (cnt == NULL) - return (NULL); - if (prof_promote && usize <= SMALL_MAXCLASS) { - p = iralloct(oldptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= - size) ? 0 : size - (SMALL_MAXCLASS+1), alignment, zero, - try_tcache_alloc, try_tcache_dalloc, arena); - if (p == NULL) - return (NULL); - arena_prof_promoted(p, usize); + if (tctx == NULL) { + return NULL; + } + if (usize <= SMALL_MAXCLASS) { + p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS, + alignment, zero, tcache, arena); + if (p == NULL) { + return NULL; + } + arena_prof_promote(tsdn, p, usize); } else { - p = iralloct(oldptr, size, 0, alignment, zero, - try_tcache_alloc, try_tcache_dalloc, arena); + p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero, + tcache, arena); } - return (p); + return p; } -JEMALLOC_ALWAYS_INLINE_C void * -irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment, - size_t *usize, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, - arena_t *arena, prof_thr_cnt_t *cnt) -{ +JEMALLOC_ALWAYS_INLINE void * +irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, + size_t alignment, size_t *usize, bool zero, tcache_t *tcache, + arena_t *arena, alloc_ctx_t *alloc_ctx) { void *p; - prof_ctx_t *old_ctx; - - old_ctx = prof_ctx_get(oldptr); - if ((uintptr_t)cnt != (uintptr_t)1U) - p = irallocx_prof_sample(oldptr, size, alignment, *usize, zero, - try_tcache_alloc, try_tcache_dalloc, arena, cnt); - else { - p = iralloct(oldptr, size, 0, alignment, zero, - try_tcache_alloc, try_tcache_dalloc, arena); + bool prof_active; + prof_tctx_t *old_tctx, *tctx; + + prof_active = prof_active_get_unlocked(); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); + tctx = prof_alloc_prep(tsd, *usize, prof_active, false); + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { + p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize, + *usize, alignment, zero, tcache, arena, tctx); + } else { + p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment, + zero, tcache, arena); + } + if (unlikely(p == NULL)) { + prof_alloc_rollback(tsd, tctx, false); + return NULL; } - if (p == NULL) - return (NULL); - if (p == oldptr && alignment != 0) { + if (p == old_ptr && alignment != 0) { /* * The allocation did not move, so it is possible that the size * class is smaller than would guarantee the requested @@ -1535,421 +2524,467 @@ irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment, * be the same as the current usize because of in-place large * reallocation. Therefore, query the actual value of usize. */ - *usize = isalloc(p, config_prof); + *usize = isalloc(tsd_tsdn(tsd), p); } - prof_realloc(p, *usize, cnt, old_usize, old_ctx); + prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr, + old_usize, old_tctx); - return (p); + return p; } -void * -je_rallocx(void *ptr, size_t size, int flags) -{ +JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN +void JEMALLOC_NOTHROW * +JEMALLOC_ALLOC_SIZE(2) +je_rallocx(void *ptr, size_t size, int flags) { void *p; - size_t usize, old_usize; - UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); - size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) - & (SIZE_T_MAX-1)); + tsd_t *tsd; + size_t usize; + size_t old_usize; + size_t alignment = MALLOCX_ALIGN_GET(flags); bool zero = flags & MALLOCX_ZERO; - unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; - bool try_tcache_alloc, try_tcache_dalloc; arena_t *arena; + tcache_t *tcache; assert(ptr != NULL); assert(size != 0); - assert(malloc_initialized || IS_INITIALIZER); - malloc_thread_init(); - - if (arena_ind != UINT_MAX) { - arena_chunk_t *chunk; - try_tcache_alloc = false; - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - try_tcache_dalloc = (chunk == ptr || chunk->arena != - arenas[arena_ind]); - arena = arenas[arena_ind]; + assert(malloc_initialized() || IS_INITIALIZER); + tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + + if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { + unsigned arena_ind = MALLOCX_ARENA_GET(flags); + arena = arena_get(tsd_tsdn(tsd), arena_ind, true); + if (unlikely(arena == NULL)) { + goto label_oom; + } } else { - try_tcache_alloc = true; - try_tcache_dalloc = true; arena = NULL; } - if ((config_prof && opt_prof) || config_stats || - (config_valgrind && opt_valgrind)) - old_usize = isalloc(ptr, config_prof); - if (config_valgrind && opt_valgrind) - old_rzsize = u2rz(old_usize); + if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { + tcache = NULL; + } else { + tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); + } + } else { + tcache = tcache_get(tsd); + } + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != NSIZES); + old_usize = sz_index2size(alloc_ctx.szind); + assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); if (config_prof && opt_prof) { - prof_thr_cnt_t *cnt; - - usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); - assert(usize != 0); - PROF_ALLOC_PREP(1, usize, cnt); - p = irallocx_prof(ptr, old_usize, size, alignment, &usize, zero, - try_tcache_alloc, try_tcache_dalloc, arena, cnt); - if (p == NULL) + usize = (alignment == 0) ? + sz_s2u(size) : sz_sa2u(size, alignment); + if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { + goto label_oom; + } + p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, + zero, tcache, arena, &alloc_ctx); + if (unlikely(p == NULL)) { goto label_oom; + } } else { - p = iralloct(ptr, size, 0, alignment, zero, try_tcache_alloc, - try_tcache_dalloc, arena); - if (p == NULL) + p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment, + zero, tcache, arena); + if (unlikely(p == NULL)) { goto label_oom; - if (config_stats || (config_valgrind && opt_valgrind)) - usize = isalloc(p, config_prof); + } + if (config_stats) { + usize = isalloc(tsd_tsdn(tsd), p); + } } + assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); if (config_stats) { - thread_allocated_t *ta; - ta = thread_allocated_tsd_get(); - ta->allocated += usize; - ta->deallocated += old_usize; + *tsd_thread_allocatedp_get(tsd) += usize; + *tsd_thread_deallocatedp_get(tsd) += old_usize; } UTRACE(ptr, size, p); - JEMALLOC_VALGRIND_REALLOC(p, usize, ptr, old_usize, old_rzsize, zero); - return (p); + check_entry_exit_locking(tsd_tsdn(tsd)); + return p; label_oom: - if (config_xmalloc && opt_xmalloc) { + if (config_xmalloc && unlikely(opt_xmalloc)) { malloc_write("<jemalloc>: Error in rallocx(): out of memory\n"); abort(); } UTRACE(ptr, size, 0); - return (NULL); + check_entry_exit_locking(tsd_tsdn(tsd)); + return NULL; } -JEMALLOC_ALWAYS_INLINE_C size_t -ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra, - size_t alignment, bool zero, arena_t *arena) -{ +JEMALLOC_ALWAYS_INLINE size_t +ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, + size_t extra, size_t alignment, bool zero) { size_t usize; - if (ixalloc(ptr, size, extra, alignment, zero)) - return (old_usize); - usize = isalloc(ptr, config_prof); + if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) { + return old_usize; + } + usize = isalloc(tsdn, ptr); - return (usize); + return usize; } static size_t -ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra, - size_t alignment, size_t max_usize, bool zero, arena_t *arena, - prof_thr_cnt_t *cnt) -{ +ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, + size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) { size_t usize; - if (cnt == NULL) - return (old_usize); - /* Use minimum usize to determine whether promotion may happen. */ - if (prof_promote && ((alignment == 0) ? s2u(size) : sa2u(size, - alignment)) <= SMALL_MAXCLASS) { - if (ixalloc(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= - size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1), - alignment, zero)) - return (old_usize); - usize = isalloc(ptr, config_prof); - if (max_usize < PAGE) - arena_prof_promoted(ptr, usize); - } else { - usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, - zero, arena); + if (tctx == NULL) { + return old_usize; } + usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, + zero); - return (usize); + return usize; } -JEMALLOC_ALWAYS_INLINE_C size_t -ixallocx_prof(void *ptr, size_t old_usize, size_t size, size_t extra, - size_t alignment, size_t max_usize, bool zero, arena_t *arena, - prof_thr_cnt_t *cnt) -{ - size_t usize; - prof_ctx_t *old_ctx; +JEMALLOC_ALWAYS_INLINE size_t +ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, + size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) { + size_t usize_max, usize; + bool prof_active; + prof_tctx_t *old_tctx, *tctx; - old_ctx = prof_ctx_get(ptr); - if ((uintptr_t)cnt != (uintptr_t)1U) { - usize = ixallocx_prof_sample(ptr, old_usize, size, extra, - alignment, zero, max_usize, arena, cnt); + prof_active = prof_active_get_unlocked(); + old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx); + /* + * usize isn't knowable before ixalloc() returns when extra is non-zero. + * Therefore, compute its maximum possible value and use that in + * prof_alloc_prep() to decide whether to capture a backtrace. + * prof_realloc() will use the actual usize to decide whether to sample. + */ + if (alignment == 0) { + usize_max = sz_s2u(size+extra); + assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS); + } else { + usize_max = sz_sa2u(size+extra, alignment); + if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) { + /* + * usize_max is out of range, and chances are that + * allocation will fail, but use the maximum possible + * value and carry on with prof_alloc_prep(), just in + * case allocation succeeds. + */ + usize_max = LARGE_MAXCLASS; + } + } + tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); + + if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { + usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, + size, extra, alignment, zero, tctx); } else { - usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, - zero, arena); + usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, + extra, alignment, zero); } - if (usize == old_usize) - return (usize); - prof_realloc(ptr, usize, cnt, old_usize, old_ctx); + if (usize == old_usize) { + prof_alloc_rollback(tsd, tctx, false); + return usize; + } + prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, + old_tctx); - return (usize); + return usize; } -size_t -je_xallocx(void *ptr, size_t size, size_t extra, int flags) -{ +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +je_xallocx(void *ptr, size_t size, size_t extra, int flags) { + tsd_t *tsd; size_t usize, old_usize; - UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); - size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) - & (SIZE_T_MAX-1)); + size_t alignment = MALLOCX_ALIGN_GET(flags); bool zero = flags & MALLOCX_ZERO; - unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; - arena_t *arena; assert(ptr != NULL); assert(size != 0); assert(SIZE_T_MAX - size >= extra); - assert(malloc_initialized || IS_INITIALIZER); - malloc_thread_init(); - - if (arena_ind != UINT_MAX) - arena = arenas[arena_ind]; - else - arena = NULL; - - old_usize = isalloc(ptr, config_prof); - if (config_valgrind && opt_valgrind) - old_rzsize = u2rz(old_usize); + assert(malloc_initialized() || IS_INITIALIZER); + tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + + alloc_ctx_t alloc_ctx; + rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); + rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, + (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); + assert(alloc_ctx.szind != NSIZES); + old_usize = sz_index2size(alloc_ctx.szind); + assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); + /* + * The API explicitly absolves itself of protecting against (size + + * extra) numerical overflow, but we may need to clamp extra to avoid + * exceeding LARGE_MAXCLASS. + * + * Ordinarily, size limit checking is handled deeper down, but here we + * have to check as part of (size + extra) clamping, since we need the + * clamped value in the above helper functions. + */ + if (unlikely(size > LARGE_MAXCLASS)) { + usize = old_usize; + goto label_not_resized; + } + if (unlikely(LARGE_MAXCLASS - size < extra)) { + extra = LARGE_MAXCLASS - size; + } if (config_prof && opt_prof) { - prof_thr_cnt_t *cnt; - /* - * usize isn't knowable before ixalloc() returns when extra is - * non-zero. Therefore, compute its maximum possible value and - * use that in PROF_ALLOC_PREP() to decide whether to capture a - * backtrace. prof_realloc() will use the actual usize to - * decide whether to sample. - */ - size_t max_usize = (alignment == 0) ? s2u(size+extra) : - sa2u(size+extra, alignment); - PROF_ALLOC_PREP(1, max_usize, cnt); - usize = ixallocx_prof(ptr, old_usize, size, extra, alignment, - max_usize, zero, arena, cnt); + usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, + alignment, zero, &alloc_ctx); } else { - usize = ixallocx_helper(ptr, old_usize, size, extra, alignment, - zero, arena); + usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, + extra, alignment, zero); } - if (usize == old_usize) + if (unlikely(usize == old_usize)) { goto label_not_resized; + } if (config_stats) { - thread_allocated_t *ta; - ta = thread_allocated_tsd_get(); - ta->allocated += usize; - ta->deallocated += old_usize; + *tsd_thread_allocatedp_get(tsd) += usize; + *tsd_thread_deallocatedp_get(tsd) += old_usize; } - JEMALLOC_VALGRIND_REALLOC(ptr, usize, ptr, old_usize, old_rzsize, zero); label_not_resized: UTRACE(ptr, size, ptr); - return (usize); + check_entry_exit_locking(tsd_tsdn(tsd)); + return usize; } -size_t -je_sallocx(const void *ptr, int flags) -{ +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +JEMALLOC_ATTR(pure) +je_sallocx(const void *ptr, int flags) { size_t usize; + tsdn_t *tsdn; + + assert(malloc_initialized() || IS_INITIALIZER); + assert(ptr != NULL); - assert(malloc_initialized || IS_INITIALIZER); - malloc_thread_init(); + tsdn = tsdn_fetch(); + check_entry_exit_locking(tsdn); - if (config_ivsalloc) - usize = ivsalloc(ptr, config_prof); - else { - assert(ptr != NULL); - usize = isalloc(ptr, config_prof); + if (config_debug || force_ivsalloc) { + usize = ivsalloc(tsdn, ptr); + assert(force_ivsalloc || usize != 0); + } else { + usize = isalloc(tsdn, ptr); } - return (usize); + check_entry_exit_locking(tsdn); + return usize; } -void -je_dallocx(void *ptr, int flags) -{ +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +je_dallocx(void *ptr, int flags) { + assert(ptr != NULL); + assert(malloc_initialized() || IS_INITIALIZER); + + tsd_t *tsd = tsd_fetch(); + bool fast = tsd_fast(tsd); + check_entry_exit_locking(tsd_tsdn(tsd)); + + tcache_t *tcache; + if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { + /* Not allowed to be reentrant and specify a custom tcache. */ + assert(tsd_reentrancy_level_get(tsd) == 0); + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { + tcache = NULL; + } else { + tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); + } + } else { + if (likely(fast)) { + tcache = tsd_tcachep_get(tsd); + assert(tcache == tcache_get(tsd)); + } else { + if (likely(tsd_reentrancy_level_get(tsd) == 0)) { + tcache = tcache_get(tsd); + } else { + tcache = NULL; + } + } + } + + UTRACE(ptr, 0, 0); + if (likely(fast)) { + tsd_assert_fast(tsd); + ifree(tsd, ptr, tcache, false); + } else { + ifree(tsd, ptr, tcache, true); + } + check_entry_exit_locking(tsd_tsdn(tsd)); +} + +JEMALLOC_ALWAYS_INLINE size_t +inallocx(tsdn_t *tsdn, size_t size, int flags) { + check_entry_exit_locking(tsdn); + size_t usize; - UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); - unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; - bool try_tcache; + if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) { + usize = sz_s2u(size); + } else { + usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); + } + check_entry_exit_locking(tsdn); + return usize; +} +JEMALLOC_EXPORT void JEMALLOC_NOTHROW +je_sdallocx(void *ptr, size_t size, int flags) { assert(ptr != NULL); - assert(malloc_initialized || IS_INITIALIZER); - - if (arena_ind != UINT_MAX) { - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - try_tcache = (chunk == ptr || chunk->arena != - arenas[arena_ind]); - } else - try_tcache = true; + assert(malloc_initialized() || IS_INITIALIZER); + + tsd_t *tsd = tsd_fetch(); + bool fast = tsd_fast(tsd); + size_t usize = inallocx(tsd_tsdn(tsd), size, flags); + assert(usize == isalloc(tsd_tsdn(tsd), ptr)); + check_entry_exit_locking(tsd_tsdn(tsd)); + + tcache_t *tcache; + if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { + /* Not allowed to be reentrant and specify a custom tcache. */ + assert(tsd_reentrancy_level_get(tsd) == 0); + if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { + tcache = NULL; + } else { + tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); + } + } else { + if (likely(fast)) { + tcache = tsd_tcachep_get(tsd); + assert(tcache == tcache_get(tsd)); + } else { + if (likely(tsd_reentrancy_level_get(tsd) == 0)) { + tcache = tcache_get(tsd); + } else { + tcache = NULL; + } + } + } UTRACE(ptr, 0, 0); - if (config_stats || config_valgrind) - usize = isalloc(ptr, config_prof); - if (config_prof && opt_prof) { - if (config_stats == false && config_valgrind == false) - usize = isalloc(ptr, config_prof); - prof_free(ptr, usize); + if (likely(fast)) { + tsd_assert_fast(tsd); + isfree(tsd, ptr, usize, tcache, false); + } else { + isfree(tsd, ptr, usize, tcache, true); } - if (config_stats) - thread_allocated_tsd_get()->deallocated += usize; - if (config_valgrind && opt_valgrind) - rzsize = p2rz(ptr); - iqalloct(ptr, try_tcache); - JEMALLOC_VALGRIND_FREE(ptr, rzsize); + check_entry_exit_locking(tsd_tsdn(tsd)); } -size_t -je_nallocx(size_t size, int flags) -{ +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +JEMALLOC_ATTR(pure) +je_nallocx(size_t size, int flags) { size_t usize; - size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK) - & (SIZE_T_MAX-1)); + tsdn_t *tsdn; assert(size != 0); - if (malloc_init()) - return (0); + if (unlikely(malloc_init())) { + return 0; + } + + tsdn = tsdn_fetch(); + check_entry_exit_locking(tsdn); - usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); - assert(usize != 0); - return (usize); + usize = inallocx(tsdn, size, flags); + if (unlikely(usize > LARGE_MAXCLASS)) { + return 0; + } + + check_entry_exit_locking(tsdn); + return usize; } -int +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, - size_t newlen) -{ + size_t newlen) { + int ret; + tsd_t *tsd; - if (malloc_init()) - return (EAGAIN); + if (unlikely(malloc_init())) { + return EAGAIN; + } - return (ctl_byname(name, oldp, oldlenp, newp, newlen)); + tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); + check_entry_exit_locking(tsd_tsdn(tsd)); + return ret; } -int -je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) -{ +JEMALLOC_EXPORT int JEMALLOC_NOTHROW +je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) { + int ret; - if (malloc_init()) - return (EAGAIN); + if (unlikely(malloc_init())) { + return EAGAIN; + } - return (ctl_nametomib(name, mibp, miblenp)); + tsd_t *tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + ret = ctl_nametomib(tsd, name, mibp, miblenp); + check_entry_exit_locking(tsd_tsdn(tsd)); + return ret; } -int +JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, - void *newp, size_t newlen) -{ + void *newp, size_t newlen) { + int ret; + tsd_t *tsd; - if (malloc_init()) - return (EAGAIN); + if (unlikely(malloc_init())) { + return EAGAIN; + } - return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); + tsd = tsd_fetch(); + check_entry_exit_locking(tsd_tsdn(tsd)); + ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); + check_entry_exit_locking(tsd_tsdn(tsd)); + return ret; } -void +JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, - const char *opts) -{ + const char *opts) { + tsdn_t *tsdn; + tsdn = tsdn_fetch(); + check_entry_exit_locking(tsdn); stats_print(write_cb, cbopaque, opts); + check_entry_exit_locking(tsdn); } -size_t -je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) -{ +JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW +je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { size_t ret; + tsdn_t *tsdn; - assert(malloc_initialized || IS_INITIALIZER); - malloc_thread_init(); - - if (config_ivsalloc) - ret = ivsalloc(ptr, config_prof); - else - ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0; - - return (ret); -} - -/* - * End non-standard functions. - */ -/******************************************************************************/ -/* - * Begin experimental functions. - */ -#ifdef JEMALLOC_EXPERIMENTAL - -int -je_allocm(void **ptr, size_t *rsize, size_t size, int flags) -{ - void *p; - - assert(ptr != NULL); - - p = je_mallocx(size, flags); - if (p == NULL) - return (ALLOCM_ERR_OOM); - if (rsize != NULL) - *rsize = isalloc(p, config_prof); - *ptr = p; - return (ALLOCM_SUCCESS); -} - -int -je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) -{ - int ret; - bool no_move = flags & ALLOCM_NO_MOVE; + assert(malloc_initialized() || IS_INITIALIZER); - assert(ptr != NULL); - assert(*ptr != NULL); - assert(size != 0); - assert(SIZE_T_MAX - size >= extra); + tsdn = tsdn_fetch(); + check_entry_exit_locking(tsdn); - if (no_move) { - size_t usize = je_xallocx(*ptr, size, extra, flags); - ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED; - if (rsize != NULL) - *rsize = usize; + if (unlikely(ptr == NULL)) { + ret = 0; } else { - void *p = je_rallocx(*ptr, size+extra, flags); - if (p != NULL) { - *ptr = p; - ret = ALLOCM_SUCCESS; - } else - ret = ALLOCM_ERR_OOM; - if (rsize != NULL) - *rsize = isalloc(*ptr, config_prof); + if (config_debug || force_ivsalloc) { + ret = ivsalloc(tsdn, ptr); + assert(force_ivsalloc || ret != 0); + } else { + ret = isalloc(tsdn, ptr); + } } - return (ret); -} - -int -je_sallocm(const void *ptr, size_t *rsize, int flags) -{ - - assert(rsize != NULL); - *rsize = je_sallocx(ptr, flags); - return (ALLOCM_SUCCESS); -} - -int -je_dallocm(void *ptr, int flags) -{ - je_dallocx(ptr, flags); - return (ALLOCM_SUCCESS); + check_entry_exit_locking(tsdn); + return ret; } -int -je_nallocm(size_t *rsize, size_t size, int flags) -{ - size_t usize; - - usize = je_nallocx(size, flags); - if (usize == 0) - return (ALLOCM_ERR_OOM); - if (rsize != NULL) - *rsize = usize; - return (ALLOCM_SUCCESS); -} - -#endif /* - * End experimental functions. + * End non-standard functions. */ /******************************************************************************/ /* @@ -1966,17 +3001,17 @@ je_nallocm(size_t *rsize, size_t size, int flags) * fork/malloc races via the following functions it registers during * initialization using pthread_atfork(), but of course that does no good if * the allocator isn't fully initialized at fork time. The following library - * constructor is a partial solution to this problem. It may still possible to - * trigger the deadlock described above, but doing so would involve forking via - * a library constructor that runs before jemalloc's runs. + * constructor is a partial solution to this problem. It may still be possible + * to trigger the deadlock described above, but doing so would involve forking + * via a library constructor that runs before jemalloc's runs. */ +#ifndef JEMALLOC_JET JEMALLOC_ATTR(constructor) static void -jemalloc_constructor(void) -{ - +jemalloc_constructor(void) { malloc_init(); } +#endif #ifndef JEMALLOC_MUTEX_INIT_CB void @@ -1986,25 +3021,69 @@ JEMALLOC_EXPORT void _malloc_prefork(void) #endif { - unsigned i; + tsd_t *tsd; + unsigned i, j, narenas; + arena_t *arena; #ifdef JEMALLOC_MUTEX_INIT_CB - if (malloc_initialized == false) + if (!malloc_initialized()) { return; + } #endif - assert(malloc_initialized); + assert(malloc_initialized()); + tsd = tsd_fetch(); + + narenas = narenas_total_get(); + + witness_prefork(tsd_witness_tsdp_get(tsd)); /* Acquire all mutexes in a safe order. */ - ctl_prefork(); - prof_prefork(); - malloc_mutex_prefork(&arenas_lock); - for (i = 0; i < narenas_total; i++) { - if (arenas[i] != NULL) - arena_prefork(arenas[i]); + ctl_prefork(tsd_tsdn(tsd)); + tcache_prefork(tsd_tsdn(tsd)); + malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); + if (have_background_thread) { + background_thread_prefork0(tsd_tsdn(tsd)); + } + prof_prefork0(tsd_tsdn(tsd)); + if (have_background_thread) { + background_thread_prefork1(tsd_tsdn(tsd)); + } + /* Break arena prefork into stages to preserve lock order. */ + for (i = 0; i < 8; i++) { + for (j = 0; j < narenas; j++) { + if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != + NULL) { + switch (i) { + case 0: + arena_prefork0(tsd_tsdn(tsd), arena); + break; + case 1: + arena_prefork1(tsd_tsdn(tsd), arena); + break; + case 2: + arena_prefork2(tsd_tsdn(tsd), arena); + break; + case 3: + arena_prefork3(tsd_tsdn(tsd), arena); + break; + case 4: + arena_prefork4(tsd_tsdn(tsd), arena); + break; + case 5: + arena_prefork5(tsd_tsdn(tsd), arena); + break; + case 6: + arena_prefork6(tsd_tsdn(tsd), arena); + break; + case 7: + arena_prefork7(tsd_tsdn(tsd), arena); + break; + default: not_reached(); + } + } + } } - chunk_prefork(); - base_prefork(); - huge_prefork(); + prof_prefork1(tsd_tsdn(tsd)); } #ifndef JEMALLOC_MUTEX_INIT_CB @@ -2015,97 +3094,61 @@ JEMALLOC_EXPORT void _malloc_postfork(void) #endif { - unsigned i; + tsd_t *tsd; + unsigned i, narenas; #ifdef JEMALLOC_MUTEX_INIT_CB - if (malloc_initialized == false) + if (!malloc_initialized()) { return; -#endif - assert(malloc_initialized); - - /* Release all mutexes, now that fork() has completed. */ - huge_postfork_parent(); - base_postfork_parent(); - chunk_postfork_parent(); - for (i = 0; i < narenas_total; i++) { - if (arenas[i] != NULL) - arena_postfork_parent(arenas[i]); } - malloc_mutex_postfork_parent(&arenas_lock); - prof_postfork_parent(); - ctl_postfork_parent(); -} - -void -jemalloc_postfork_child(void) -{ - unsigned i; +#endif + assert(malloc_initialized()); - assert(malloc_initialized); + tsd = tsd_fetch(); + witness_postfork_parent(tsd_witness_tsdp_get(tsd)); /* Release all mutexes, now that fork() has completed. */ - huge_postfork_child(); - base_postfork_child(); - chunk_postfork_child(); - for (i = 0; i < narenas_total; i++) { - if (arenas[i] != NULL) - arena_postfork_child(arenas[i]); - } - malloc_mutex_postfork_child(&arenas_lock); - prof_postfork_child(); - ctl_postfork_child(); -} + for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { + arena_t *arena; -/******************************************************************************/ -/* - * The following functions are used for TLS allocation/deallocation in static - * binaries on FreeBSD. The primary difference between these and i[mcd]alloc() - * is that these avoid accessing TLS variables. - */ - -static void * -a0alloc(size_t size, bool zero) -{ - - if (malloc_init()) - return (NULL); - - if (size == 0) - size = 1; - - if (size <= arena_maxclass) - return (arena_malloc(arenas[0], size, zero, false)); - else - return (huge_malloc(size, zero, huge_dss_prec_get(arenas[0]))); -} - -void * -a0malloc(size_t size) -{ - - return (a0alloc(size, false)); + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { + arena_postfork_parent(tsd_tsdn(tsd), arena); + } + } + prof_postfork_parent(tsd_tsdn(tsd)); + if (have_background_thread) { + background_thread_postfork_parent(tsd_tsdn(tsd)); + } + malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); + tcache_postfork_parent(tsd_tsdn(tsd)); + ctl_postfork_parent(tsd_tsdn(tsd)); } -void * -a0calloc(size_t num, size_t size) -{ +void +jemalloc_postfork_child(void) { + tsd_t *tsd; + unsigned i, narenas; - return (a0alloc(num * size, true)); -} + assert(malloc_initialized()); -void -a0free(void *ptr) -{ - arena_chunk_t *chunk; + tsd = tsd_fetch(); - if (ptr == NULL) - return; + witness_postfork_child(tsd_witness_tsdp_get(tsd)); + /* Release all mutexes, now that fork() has completed. */ + for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { + arena_t *arena; - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk != ptr) - arena_dalloc(chunk->arena, chunk, ptr, false); - else - huge_dalloc(ptr, true); + if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { + arena_postfork_child(tsd_tsdn(tsd), arena); + } + } + prof_postfork_child(tsd_tsdn(tsd)); + if (have_background_thread) { + background_thread_postfork_child(tsd_tsdn(tsd)); + } + malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); + tcache_postfork_child(tsd_tsdn(tsd)); + ctl_postfork_child(tsd_tsdn(tsd)); } /******************************************************************************/ diff --git a/dep/jemalloc/src/jemalloc_cpp.cpp b/dep/jemalloc/src/jemalloc_cpp.cpp new file mode 100644 index 00000000000..844ab398a71 --- /dev/null +++ b/dep/jemalloc/src/jemalloc_cpp.cpp @@ -0,0 +1,132 @@ +#include <mutex> +#include <new> + +#define JEMALLOC_CPP_CPP_ +#ifdef __cplusplus +extern "C" { +#endif + +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#ifdef __cplusplus +} +#endif + +// All operators in this file are exported. + +// Possibly alias hidden versions of malloc and sdallocx to avoid an extra plt +// thunk? +// +// extern __typeof (sdallocx) sdallocx_int +// __attribute ((alias ("sdallocx"), +// visibility ("hidden"))); +// +// ... but it needs to work with jemalloc namespaces. + +void *operator new(std::size_t size); +void *operator new[](std::size_t size); +void *operator new(std::size_t size, const std::nothrow_t &) noexcept; +void *operator new[](std::size_t size, const std::nothrow_t &) noexcept; +void operator delete(void *ptr) noexcept; +void operator delete[](void *ptr) noexcept; +void operator delete(void *ptr, const std::nothrow_t &) noexcept; +void operator delete[](void *ptr, const std::nothrow_t &) noexcept; + +#if __cpp_sized_deallocation >= 201309 +/* C++14's sized-delete operators. */ +void operator delete(void *ptr, std::size_t size) noexcept; +void operator delete[](void *ptr, std::size_t size) noexcept; +#endif + +template <bool IsNoExcept> +void * +newImpl(std::size_t size) noexcept(IsNoExcept) { + void *ptr = je_malloc(size); + if (likely(ptr != nullptr)) + return ptr; + + while (ptr == nullptr) { + std::new_handler handler; + // GCC-4.8 and clang 4.0 do not have std::get_new_handler. + { + static std::mutex mtx; + std::lock_guard<std::mutex> lock(mtx); + + handler = std::set_new_handler(nullptr); + std::set_new_handler(handler); + } + if (handler == nullptr) + break; + + try { + handler(); + } catch (const std::bad_alloc &) { + break; + } + + ptr = je_malloc(size); + } + + if (ptr == nullptr && !IsNoExcept) + std::__throw_bad_alloc(); + return ptr; +} + +void * +operator new(std::size_t size) { + return newImpl<false>(size); +} + +void * +operator new[](std::size_t size) { + return newImpl<false>(size); +} + +void * +operator new(std::size_t size, const std::nothrow_t &) noexcept { + return newImpl<true>(size); +} + +void * +operator new[](std::size_t size, const std::nothrow_t &) noexcept { + return newImpl<true>(size); +} + +void +operator delete(void *ptr) noexcept { + je_free(ptr); +} + +void +operator delete[](void *ptr) noexcept { + je_free(ptr); +} + +void +operator delete(void *ptr, const std::nothrow_t &) noexcept { + je_free(ptr); +} + +void operator delete[](void *ptr, const std::nothrow_t &) noexcept { + je_free(ptr); +} + +#if __cpp_sized_deallocation >= 201309 + +void +operator delete(void *ptr, std::size_t size) noexcept { + if (unlikely(ptr == nullptr)) { + return; + } + je_sdallocx(ptr, size, /*flags=*/0); +} + +void operator delete[](void *ptr, std::size_t size) noexcept { + if (unlikely(ptr == nullptr)) { + return; + } + je_sdallocx(ptr, size, /*flags=*/0); +} + +#endif // __cpp_sized_deallocation diff --git a/dep/jemalloc/src/large.c b/dep/jemalloc/src/large.c new file mode 100644 index 00000000000..27a2c679876 --- /dev/null +++ b/dep/jemalloc/src/large.c @@ -0,0 +1,371 @@ +#define JEMALLOC_LARGE_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/extent_mmap.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree.h" +#include "jemalloc/internal/util.h" + +/******************************************************************************/ + +void * +large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) { + assert(usize == sz_s2u(usize)); + + return large_palloc(tsdn, arena, usize, CACHELINE, zero); +} + +void * +large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, + bool zero) { + size_t ausize; + extent_t *extent; + bool is_zeroed; + UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false); + + assert(!tsdn_null(tsdn) || arena != NULL); + + ausize = sz_sa2u(usize, alignment); + if (unlikely(ausize == 0 || ausize > LARGE_MAXCLASS)) { + return NULL; + } + + if (config_fill && unlikely(opt_zero)) { + zero = true; + } + /* + * Copy zero into is_zeroed and pass the copy when allocating the + * extent, so that it is possible to make correct junk/zero fill + * decisions below, even if is_zeroed ends up true when zero is false. + */ + is_zeroed = zero; + if (likely(!tsdn_null(tsdn))) { + arena = arena_choose(tsdn_tsd(tsdn), arena); + } + if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn, + arena, usize, alignment, &is_zeroed)) == NULL) { + return NULL; + } + + /* See comments in arena_bin_slabs_full_insert(). */ + if (!arena_is_auto(arena)) { + /* Insert extent into large. */ + malloc_mutex_lock(tsdn, &arena->large_mtx); + extent_list_append(&arena->large, extent); + malloc_mutex_unlock(tsdn, &arena->large_mtx); + } + if (config_prof && arena_prof_accum(tsdn, arena, usize)) { + prof_idump(tsdn); + } + + if (zero) { + assert(is_zeroed); + } else if (config_fill && unlikely(opt_junk_alloc)) { + memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK, + extent_usize_get(extent)); + } + + arena_decay_tick(tsdn, arena); + return extent_addr_get(extent); +} + +static void +large_dalloc_junk_impl(void *ptr, size_t size) { + memset(ptr, JEMALLOC_FREE_JUNK, size); +} +large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk = large_dalloc_junk_impl; + +static void +large_dalloc_maybe_junk_impl(void *ptr, size_t size) { + if (config_fill && have_dss && unlikely(opt_junk_free)) { + /* + * Only bother junk filling if the extent isn't about to be + * unmapped. + */ + if (opt_retain || (have_dss && extent_in_dss(ptr))) { + large_dalloc_junk(ptr, size); + } + } +} +large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk = + large_dalloc_maybe_junk_impl; + +static bool +large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) { + arena_t *arena = extent_arena_get(extent); + size_t oldusize = extent_usize_get(extent); + extent_hooks_t *extent_hooks = extent_hooks_get(arena); + size_t diff = extent_size_get(extent) - (usize + sz_large_pad); + + assert(oldusize > usize); + + if (extent_hooks->split == NULL) { + return true; + } + + /* Split excess pages. */ + if (diff != 0) { + extent_t *trail = extent_split_wrapper(tsdn, arena, + &extent_hooks, extent, usize + sz_large_pad, + sz_size2index(usize), false, diff, NSIZES, false); + if (trail == NULL) { + return true; + } + + if (config_fill && unlikely(opt_junk_free)) { + large_dalloc_maybe_junk(extent_addr_get(trail), + extent_size_get(trail)); + } + + arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, trail); + } + + arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize); + + return false; +} + +static bool +large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize, + bool zero) { + arena_t *arena = extent_arena_get(extent); + size_t oldusize = extent_usize_get(extent); + extent_hooks_t *extent_hooks = extent_hooks_get(arena); + size_t trailsize = usize - oldusize; + + if (extent_hooks->merge == NULL) { + return true; + } + + if (config_fill && unlikely(opt_zero)) { + zero = true; + } + /* + * Copy zero into is_zeroed_trail and pass the copy when allocating the + * extent, so that it is possible to make correct junk/zero fill + * decisions below, even if is_zeroed_trail ends up true when zero is + * false. + */ + bool is_zeroed_trail = zero; + bool commit = true; + extent_t *trail; + bool new_mapping; + if ((trail = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_dirty, extent_past_get(extent), trailsize, 0, + CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL + || (trail = extents_alloc(tsdn, arena, &extent_hooks, + &arena->extents_muzzy, extent_past_get(extent), trailsize, 0, + CACHELINE, false, NSIZES, &is_zeroed_trail, &commit)) != NULL) { + if (config_stats) { + new_mapping = false; + } + } else { + if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks, + extent_past_get(extent), trailsize, 0, CACHELINE, false, + NSIZES, &is_zeroed_trail, &commit)) == NULL) { + return true; + } + if (config_stats) { + new_mapping = true; + } + } + + if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) { + extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail); + return true; + } + rtree_ctx_t rtree_ctx_fallback; + rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); + szind_t szind = sz_size2index(usize); + extent_szind_set(extent, szind); + rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, + (uintptr_t)extent_addr_get(extent), szind, false); + + if (config_stats && new_mapping) { + arena_stats_mapped_add(tsdn, &arena->stats, trailsize); + } + + if (zero) { + if (config_cache_oblivious) { + /* + * Zero the trailing bytes of the original allocation's + * last page, since they are in an indeterminate state. + * There will always be trailing bytes, because ptr's + * offset from the beginning of the extent is a multiple + * of CACHELINE in [0 .. PAGE). + */ + void *zbase = (void *) + ((uintptr_t)extent_addr_get(extent) + oldusize); + void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + + PAGE)); + size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase; + assert(nzero > 0); + memset(zbase, 0, nzero); + } + assert(is_zeroed_trail); + } else if (config_fill && unlikely(opt_junk_alloc)) { + memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize), + JEMALLOC_ALLOC_JUNK, usize - oldusize); + } + + arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize); + + return false; +} + +bool +large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, + size_t usize_max, bool zero) { + size_t oldusize = extent_usize_get(extent); + + /* The following should have been caught by callers. */ + assert(usize_min > 0 && usize_max <= LARGE_MAXCLASS); + /* Both allocation sizes must be large to avoid a move. */ + assert(oldusize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS); + + if (usize_max > oldusize) { + /* Attempt to expand the allocation in-place. */ + if (!large_ralloc_no_move_expand(tsdn, extent, usize_max, + zero)) { + arena_decay_tick(tsdn, extent_arena_get(extent)); + return false; + } + /* Try again, this time with usize_min. */ + if (usize_min < usize_max && usize_min > oldusize && + large_ralloc_no_move_expand(tsdn, extent, usize_min, + zero)) { + arena_decay_tick(tsdn, extent_arena_get(extent)); + return false; + } + } + + /* + * Avoid moving the allocation if the existing extent size accommodates + * the new size. + */ + if (oldusize >= usize_min && oldusize <= usize_max) { + arena_decay_tick(tsdn, extent_arena_get(extent)); + return false; + } + + /* Attempt to shrink the allocation in-place. */ + if (oldusize > usize_max) { + if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) { + arena_decay_tick(tsdn, extent_arena_get(extent)); + return false; + } + } + return true; +} + +static void * +large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, + size_t alignment, bool zero) { + if (alignment <= CACHELINE) { + return large_malloc(tsdn, arena, usize, zero); + } + return large_palloc(tsdn, arena, usize, alignment, zero); +} + +void * +large_ralloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent, size_t usize, + size_t alignment, bool zero, tcache_t *tcache) { + size_t oldusize = extent_usize_get(extent); + + /* The following should have been caught by callers. */ + assert(usize > 0 && usize <= LARGE_MAXCLASS); + /* Both allocation sizes must be large to avoid a move. */ + assert(oldusize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS); + + /* Try to avoid moving the allocation. */ + if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) { + return extent_addr_get(extent); + } + + /* + * usize and old size are different enough that we need to use a + * different size class. In that case, fall back to allocating new + * space and copying. + */ + void *ret = large_ralloc_move_helper(tsdn, arena, usize, alignment, + zero); + if (ret == NULL) { + return NULL; + } + + size_t copysize = (usize < oldusize) ? usize : oldusize; + memcpy(ret, extent_addr_get(extent), copysize); + isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true); + return ret; +} + +/* + * junked_locked indicates whether the extent's data have been junk-filled, and + * whether the arena's large_mtx is currently held. + */ +static void +large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent, + bool junked_locked) { + if (!junked_locked) { + /* See comments in arena_bin_slabs_full_insert(). */ + if (!arena_is_auto(arena)) { + malloc_mutex_lock(tsdn, &arena->large_mtx); + extent_list_remove(&arena->large, extent); + malloc_mutex_unlock(tsdn, &arena->large_mtx); + } + large_dalloc_maybe_junk(extent_addr_get(extent), + extent_usize_get(extent)); + } else { + malloc_mutex_assert_owner(tsdn, &arena->large_mtx); + if (!arena_is_auto(arena)) { + extent_list_remove(&arena->large, extent); + } + } + arena_extent_dalloc_large_prep(tsdn, arena, extent); +} + +static void +large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { + extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; + arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, extent); +} + +void +large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) { + large_dalloc_prep_impl(tsdn, extent_arena_get(extent), extent, true); +} + +void +large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) { + large_dalloc_finish_impl(tsdn, extent_arena_get(extent), extent); +} + +void +large_dalloc(tsdn_t *tsdn, extent_t *extent) { + arena_t *arena = extent_arena_get(extent); + large_dalloc_prep_impl(tsdn, arena, extent, false); + large_dalloc_finish_impl(tsdn, arena, extent); + arena_decay_tick(tsdn, arena); +} + +size_t +large_salloc(tsdn_t *tsdn, const extent_t *extent) { + return extent_usize_get(extent); +} + +prof_tctx_t * +large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) { + return extent_prof_tctx_get(extent); +} + +void +large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) { + extent_prof_tctx_set(extent, tctx); +} + +void +large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) { + large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U); +} diff --git a/dep/jemalloc/src/util.c b/dep/jemalloc/src/malloc_io.c index 93a19fd16f7..6b99afcd3fc 100644 --- a/dep/jemalloc/src/util.c +++ b/dep/jemalloc/src/malloc_io.c @@ -1,58 +1,88 @@ -#define assert(e) do { \ +#define JEMALLOC_MALLOC_IO_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/util.h" + +#ifdef assert +# undef assert +#endif +#ifdef not_reached +# undef not_reached +#endif +#ifdef not_implemented +# undef not_implemented +#endif +#ifdef assert_not_implemented +# undef assert_not_implemented +#endif + +/* + * Define simple versions of assertion macros that won't recurse in case + * of assertion failures in malloc_*printf(). + */ +#define assert(e) do { \ if (config_debug && !(e)) { \ malloc_write("<jemalloc>: Failed assertion\n"); \ abort(); \ } \ } while (0) -#define not_reached() do { \ +#define not_reached() do { \ if (config_debug) { \ malloc_write("<jemalloc>: Unreachable code reached\n"); \ abort(); \ } \ + unreachable(); \ } while (0) -#define not_implemented() do { \ +#define not_implemented() do { \ if (config_debug) { \ malloc_write("<jemalloc>: Not implemented\n"); \ abort(); \ } \ } while (0) -#define JEMALLOC_UTIL_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define assert_not_implemented(e) do { \ + if (unlikely(config_debug && !(e))) { \ + not_implemented(); \ + } \ +} while (0) /******************************************************************************/ /* Function prototypes for non-inline static functions. */ -static void wrtmessage(void *cbopaque, const char *s); -#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1) -static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s, +static void wrtmessage(void *cbopaque, const char *s); +#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1) +static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p); -#define D2S_BUFSIZE (1 + U2S_BUFSIZE) -static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p); -#define O2S_BUFSIZE (1 + U2S_BUFSIZE) -static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p); -#define X2S_BUFSIZE (2 + U2S_BUFSIZE) -static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, +#define D2S_BUFSIZE (1 + U2S_BUFSIZE) +static char *d2s(intmax_t x, char sign, char *s, size_t *slen_p); +#define O2S_BUFSIZE (1 + U2S_BUFSIZE) +static char *o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p); +#define X2S_BUFSIZE (2 + U2S_BUFSIZE) +static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p); /******************************************************************************/ /* malloc_message() setup. */ static void -wrtmessage(void *cbopaque, const char *s) -{ - -#ifdef SYS_write +wrtmessage(void *cbopaque, const char *s) { +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_write) /* * Use syscall(2) rather than write(2) when possible in order to avoid * the possibility of memory allocation within libc. This is necessary * on FreeBSD; most operating systems do not have this problem though. + * + * syscall() returns long or int, depending on platform, so capture the + * unused result in the widest plausible type to avoid compiler + * warnings. */ - UNUSED int result = syscall(SYS_write, STDERR_FILENO, s, strlen(s)); + UNUSED long result = syscall(SYS_write, STDERR_FILENO, s, strlen(s)); #else - UNUSED int result = write(STDERR_FILENO, s, strlen(s)); + UNUSED ssize_t result = write(STDERR_FILENO, s, strlen(s)); #endif } @@ -63,13 +93,12 @@ JEMALLOC_EXPORT void (*je_malloc_message)(void *, const char *s); * je_malloc_message(...) throughout the code. */ void -malloc_write(const char *s) -{ - - if (je_malloc_message != NULL) +malloc_write(const char *s) { + if (je_malloc_message != NULL) { je_malloc_message(NULL, s); - else + } else { wrtmessage(NULL, s); + } } /* @@ -77,30 +106,27 @@ malloc_write(const char *s) * provide a wrapper. */ int -buferror(int err, char *buf, size_t buflen) -{ - +buferror(int err, char *buf, size_t buflen) { #ifdef _WIN32 - FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, GetLastError(), 0, - (LPSTR)buf, buflen, NULL); - return (0); -#elif defined(_GNU_SOURCE) + FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0, + (LPSTR)buf, (DWORD)buflen, NULL); + return 0; +#elif defined(__GLIBC__) && defined(_GNU_SOURCE) char *b = strerror_r(err, buf, buflen); if (b != buf) { strncpy(buf, b, buflen); buf[buflen-1] = '\0'; } - return (0); + return 0; #else - return (strerror_r(err, buf, buflen)); + return strerror_r(err, buf, buflen); #endif } uintmax_t -malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) -{ +malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) { uintmax_t ret, digit; - int b; + unsigned b; bool neg; const char *p, *ns; @@ -143,10 +169,12 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) switch (p[1]) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': - if (b == 0) + if (b == 0) { b = 8; - if (b == 8) + } + if (b == 8) { p++; + } break; case 'X': case 'x': switch (p[2]) { @@ -156,10 +184,12 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) case 'F': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': - if (b == 0) + if (b == 0) { b = 16; - if (b == 16) + } + if (b == 16) { p += 2; + } break; default: break; @@ -171,8 +201,9 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) goto label_return; } } - if (b == 0) + if (b == 0) { b = 10; + } /* Convert. */ ret = 0; @@ -190,8 +221,9 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) } p++; } - if (neg) - ret = -ret; + if (neg) { + ret = (uintmax_t)(-((intmax_t)ret)); + } if (p == ns) { /* No conversion performed. */ @@ -205,15 +237,15 @@ label_return: if (p == ns) { /* No characters were converted. */ *endptr = (char *)nptr; - } else + } else { *endptr = (char *)p; + } } - return (ret); + return ret; } static char * -u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) -{ +u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) { unsigned i; i = U2S_BUFSIZE - 1; @@ -251,23 +283,25 @@ u2s(uintmax_t x, unsigned base, bool uppercase, char *s, size_t *slen_p) }} *slen_p = U2S_BUFSIZE - 1 - i; - return (&s[i]); + return &s[i]; } static char * -d2s(intmax_t x, char sign, char *s, size_t *slen_p) -{ +d2s(intmax_t x, char sign, char *s, size_t *slen_p) { bool neg; - if ((neg = (x < 0))) + if ((neg = (x < 0))) { x = -x; + } s = u2s(x, 10, false, s, slen_p); - if (neg) + if (neg) { sign = '-'; + } switch (sign) { case '-': - if (neg == false) + if (!neg) { break; + } /* Fall through. */ case ' ': case '+': @@ -277,73 +311,70 @@ d2s(intmax_t x, char sign, char *s, size_t *slen_p) break; default: not_reached(); } - return (s); + return s; } static char * -o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) -{ - +o2s(uintmax_t x, bool alt_form, char *s, size_t *slen_p) { s = u2s(x, 8, false, s, slen_p); if (alt_form && *s != '0') { s--; (*slen_p)++; *s = '0'; } - return (s); + return s; } static char * -x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) -{ - +x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) { s = u2s(x, 16, uppercase, s, slen_p); if (alt_form) { s -= 2; (*slen_p) += 2; memcpy(s, uppercase ? "0X" : "0x", 2); } - return (s); + return s; } -int -malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) -{ - int ret; +size_t +malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) { size_t i; const char *f; -#define APPEND_C(c) do { \ - if (i < size) \ +#define APPEND_C(c) do { \ + if (i < size) { \ str[i] = (c); \ + } \ i++; \ } while (0) -#define APPEND_S(s, slen) do { \ +#define APPEND_S(s, slen) do { \ if (i < size) { \ size_t cpylen = (slen <= size - i) ? slen : size - i; \ memcpy(&str[i], s, cpylen); \ } \ i += slen; \ } while (0) -#define APPEND_PADDED_S(s, slen, width, left_justify) do { \ +#define APPEND_PADDED_S(s, slen, width, left_justify) do { \ /* Left padding. */ \ size_t pad_len = (width == -1) ? 0 : ((slen < (size_t)width) ? \ (size_t)width - slen : 0); \ - if (left_justify == false && pad_len != 0) { \ + if (!left_justify && pad_len != 0) { \ size_t j; \ - for (j = 0; j < pad_len; j++) \ + for (j = 0; j < pad_len; j++) { \ APPEND_C(' '); \ + } \ } \ /* Value. */ \ APPEND_S(s, slen); \ /* Right padding. */ \ if (left_justify && pad_len != 0) { \ size_t j; \ - for (j = 0; j < pad_len; j++) \ + for (j = 0; j < pad_len; j++) { \ APPEND_C(' '); \ + } \ } \ } while (0) -#define GET_ARG_NUMERIC(val, len) do { \ +#define GET_ARG_NUMERIC(val, len) do { \ switch (len) { \ case '?': \ val = va_arg(ap, int); \ @@ -381,7 +412,9 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) case 'p': /* Synthetic; used for %p. */ \ val = va_arg(ap, uintptr_t); \ break; \ - default: not_reached(); \ + default: \ + not_reached(); \ + val = 0; \ } \ } while (0) @@ -398,25 +431,27 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) int prec = -1; int width = -1; unsigned char len = '?'; + char *s; + size_t slen; f++; /* Flags. */ while (true) { switch (*f) { case '#': - assert(alt_form == false); + assert(!alt_form); alt_form = true; break; case '-': - assert(left_justify == false); + assert(!left_justify); left_justify = true; break; case ' ': - assert(plus_space == false); + assert(!plus_space); plus_space = true; break; case '+': - assert(plus_plus == false); + assert(!plus_plus); plus_plus = true; break; default: goto label_width; @@ -447,10 +482,11 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) break; } /* Width/precision separator. */ - if (*f == '.') + if (*f == '.') { f++; - else + } else { goto label_length; + } /* Precision. */ switch (*f) { case '*': @@ -477,8 +513,9 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) if (*f == 'l') { len = 'q'; f++; - } else + } else { len = 'l'; + } break; case 'q': case 'j': case 't': case 'z': len = *f; @@ -488,8 +525,6 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) } /* Conversion specifier. */ switch (*f) { - char *s; - size_t slen; case '%': /* %% */ APPEND_C(*f); @@ -548,7 +583,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) assert(len == '?' || len == 'l'); assert_not_implemented(len != 'l'); s = va_arg(ap, char *); - slen = (prec < 0) ? strlen(s) : prec; + slen = (prec < 0) ? strlen(s) : (size_t)prec; APPEND_PADDED_S(s, slen, width, left_justify); f++; break; @@ -571,37 +606,35 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) }} } label_out: - if (i < size) + if (i < size) { str[i] = '\0'; - else + } else { str[size - 1] = '\0'; - ret = i; + } #undef APPEND_C #undef APPEND_S #undef APPEND_PADDED_S #undef GET_ARG_NUMERIC - return (ret); + return i; } -JEMALLOC_ATTR(format(printf, 3, 4)) -int -malloc_snprintf(char *str, size_t size, const char *format, ...) -{ - int ret; +JEMALLOC_FORMAT_PRINTF(3, 4) +size_t +malloc_snprintf(char *str, size_t size, const char *format, ...) { + size_t ret; va_list ap; va_start(ap, format); ret = malloc_vsnprintf(str, size, format, ap); va_end(ap); - return (ret); + return ret; } void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, - const char *format, va_list ap) -{ + const char *format, va_list ap) { char buf[MALLOC_PRINTF_BUFSIZE]; if (write_cb == NULL) { @@ -623,11 +656,10 @@ malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, * Print to a callback function in such a way as to (hopefully) avoid memory * allocation. */ -JEMALLOC_ATTR(format(printf, 3, 4)) +JEMALLOC_FORMAT_PRINTF(3, 4) void malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, - const char *format, ...) -{ + const char *format, ...) { va_list ap; va_start(ap, format); @@ -636,13 +668,22 @@ malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque, } /* Print to stderr in such a way as to avoid memory allocation. */ -JEMALLOC_ATTR(format(printf, 1, 2)) +JEMALLOC_FORMAT_PRINTF(1, 2) void -malloc_printf(const char *format, ...) -{ +malloc_printf(const char *format, ...) { va_list ap; va_start(ap, format); malloc_vcprintf(NULL, NULL, format, ap); va_end(ap); } + +/* + * Restore normal assertion macros, in order to make it possible to compile all + * C files as a single concatenation. + */ +#undef assert +#undef not_reached +#undef not_implemented +#undef assert_not_implemented +#include "jemalloc/internal/assert.h" diff --git a/dep/jemalloc/src/mb.c b/dep/jemalloc/src/mb.c deleted file mode 100644 index dc2c0a256fd..00000000000 --- a/dep/jemalloc/src/mb.c +++ /dev/null @@ -1,2 +0,0 @@ -#define JEMALLOC_MB_C_ -#include "jemalloc/internal/jemalloc_internal.h" diff --git a/dep/jemalloc/src/mutex.c b/dep/jemalloc/src/mutex.c index 788eca38703..a528ef0c243 100644 --- a/dep/jemalloc/src/mutex.c +++ b/dep/jemalloc/src/mutex.c @@ -1,12 +1,12 @@ -#define JEMALLOC_MUTEX_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_MUTEX_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" -#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) -#include <dlfcn.h> -#endif +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/malloc_io.h" #ifndef _CRT_SPINCOUNT -#define _CRT_SPINCOUNT 4000 +#define _CRT_SPINCOUNT 4000 #endif /******************************************************************************/ @@ -20,10 +20,6 @@ static bool postpone_init = true; static malloc_mutex_t *postponed_mutexes = NULL; #endif -#if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) -static void pthread_create_once(void); -#endif - /******************************************************************************/ /* * We intercept pthread_create() calls in order to toggle isthreaded if the @@ -31,33 +27,11 @@ static void pthread_create_once(void); */ #if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) -static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, - void *(*)(void *), void *__restrict); - -static void -pthread_create_once(void) -{ - - pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); - if (pthread_create_fptr == NULL) { - malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, " - "\"pthread_create\")\n"); - abort(); - } - - isthreaded = true; -} - JEMALLOC_EXPORT int pthread_create(pthread_t *__restrict thread, const pthread_attr_t *__restrict attr, void *(*start_routine)(void *), - void *__restrict arg) -{ - static pthread_once_t once_control = PTHREAD_ONCE_INIT; - - pthread_once(&once_control, pthread_create_once); - - return (pthread_create_fptr(thread, attr, start_routine, arg)); + void *__restrict arg) { + return pthread_create_wrapper(thread, attr, start_routine, arg); } #endif @@ -68,14 +42,108 @@ JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, void *(calloc_cb)(size_t, size_t)); #endif -bool -malloc_mutex_init(malloc_mutex_t *mutex) -{ +void +malloc_mutex_lock_slow(malloc_mutex_t *mutex) { + mutex_prof_data_t *data = &mutex->prof_data; + UNUSED nstime_t before = NSTIME_ZERO_INITIALIZER; + + if (ncpus == 1) { + goto label_spin_done; + } + + int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN; + do { + CPU_SPINWAIT; + if (!malloc_mutex_trylock_final(mutex)) { + data->n_spin_acquired++; + return; + } + } while (cnt++ < max_cnt); + + if (!config_stats) { + /* Only spin is useful when stats is off. */ + malloc_mutex_lock_final(mutex); + return; + } +label_spin_done: + nstime_update(&before); + /* Copy before to after to avoid clock skews. */ + nstime_t after; + nstime_copy(&after, &before); + uint32_t n_thds = atomic_fetch_add_u32(&data->n_waiting_thds, 1, + ATOMIC_RELAXED) + 1; + /* One last try as above two calls may take quite some cycles. */ + if (!malloc_mutex_trylock_final(mutex)) { + atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED); + data->n_spin_acquired++; + return; + } + + /* True slow path. */ + malloc_mutex_lock_final(mutex); + /* Update more slow-path only counters. */ + atomic_fetch_sub_u32(&data->n_waiting_thds, 1, ATOMIC_RELAXED); + nstime_update(&after); + + nstime_t delta; + nstime_copy(&delta, &after); + nstime_subtract(&delta, &before); + data->n_wait_times++; + nstime_add(&data->tot_wait_time, &delta); + if (nstime_compare(&data->max_wait_time, &delta) < 0) { + nstime_copy(&data->max_wait_time, &delta); + } + if (n_thds > data->max_n_thds) { + data->max_n_thds = n_thds; + } +} + +static void +mutex_prof_data_init(mutex_prof_data_t *data) { + memset(data, 0, sizeof(mutex_prof_data_t)); + nstime_init(&data->max_wait_time, 0); + nstime_init(&data->tot_wait_time, 0); + data->prev_owner = NULL; +} + +void +malloc_mutex_prof_data_reset(tsdn_t *tsdn, malloc_mutex_t *mutex) { + malloc_mutex_assert_owner(tsdn, mutex); + mutex_prof_data_init(&mutex->prof_data); +} + +static int +mutex_addr_comp(const witness_t *witness1, void *mutex1, + const witness_t *witness2, void *mutex2) { + assert(mutex1 != NULL); + assert(mutex2 != NULL); + uintptr_t mu1int = (uintptr_t)mutex1; + uintptr_t mu2int = (uintptr_t)mutex2; + if (mu1int < mu2int) { + return -1; + } else if (mu1int == mu2int) { + return 0; + } else { + return 1; + } +} + +bool +malloc_mutex_init(malloc_mutex_t *mutex, const char *name, + witness_rank_t rank, malloc_mutex_lock_order_t lock_order) { + mutex_prof_data_init(&mutex->prof_data); #ifdef _WIN32 +# if _WIN32_WINNT >= 0x0600 + InitializeSRWLock(&mutex->lock); +# else if (!InitializeCriticalSectionAndSpinCount(&mutex->lock, - _CRT_SPINCOUNT)) - return (true); + _CRT_SPINCOUNT)) { + return true; + } +# endif +#elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) + mutex->lock = OS_UNFAIR_LOCK_INIT; #elif (defined(JEMALLOC_OSSPIN)) mutex->lock = 0; #elif (defined(JEMALLOC_MUTEX_INIT_CB)) @@ -83,67 +151,73 @@ malloc_mutex_init(malloc_mutex_t *mutex) mutex->postponed_next = postponed_mutexes; postponed_mutexes = mutex; } else { - if (_pthread_mutex_init_calloc_cb(&mutex->lock, base_calloc) != - 0) - return (true); + if (_pthread_mutex_init_calloc_cb(&mutex->lock, + bootstrap_calloc) != 0) { + return true; + } } #else pthread_mutexattr_t attr; - if (pthread_mutexattr_init(&attr) != 0) - return (true); + if (pthread_mutexattr_init(&attr) != 0) { + return true; + } pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE); if (pthread_mutex_init(&mutex->lock, &attr) != 0) { pthread_mutexattr_destroy(&attr); - return (true); + return true; } pthread_mutexattr_destroy(&attr); #endif - return (false); + if (config_debug) { + mutex->lock_order = lock_order; + if (lock_order == malloc_mutex_address_ordered) { + witness_init(&mutex->witness, name, rank, + mutex_addr_comp, &mutex); + } else { + witness_init(&mutex->witness, name, rank, NULL, NULL); + } + } + return false; } void -malloc_mutex_prefork(malloc_mutex_t *mutex) -{ - - malloc_mutex_lock(mutex); +malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) { + malloc_mutex_lock(tsdn, mutex); } void -malloc_mutex_postfork_parent(malloc_mutex_t *mutex) -{ - - malloc_mutex_unlock(mutex); +malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) { + malloc_mutex_unlock(tsdn, mutex); } void -malloc_mutex_postfork_child(malloc_mutex_t *mutex) -{ - +malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) { #ifdef JEMALLOC_MUTEX_INIT_CB - malloc_mutex_unlock(mutex); + malloc_mutex_unlock(tsdn, mutex); #else - if (malloc_mutex_init(mutex)) { + if (malloc_mutex_init(mutex, mutex->witness.name, + mutex->witness.rank, mutex->lock_order)) { malloc_printf("<jemalloc>: Error re-initializing mutex in " "child\n"); - if (opt_abort) + if (opt_abort) { abort(); + } } #endif } bool -mutex_boot(void) -{ - +malloc_mutex_boot(void) { #ifdef JEMALLOC_MUTEX_INIT_CB postpone_init = false; while (postponed_mutexes != NULL) { if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock, - base_calloc) != 0) - return (true); + bootstrap_calloc) != 0) { + return true; + } postponed_mutexes = postponed_mutexes->postponed_next; } #endif - return (false); + return false; } diff --git a/dep/jemalloc/src/mutex_pool.c b/dep/jemalloc/src/mutex_pool.c new file mode 100644 index 00000000000..f24d10e44a8 --- /dev/null +++ b/dep/jemalloc/src/mutex_pool.c @@ -0,0 +1,18 @@ +#define JEMALLOC_MUTEX_POOL_C_ + +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mutex_pool.h" + +bool +mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank) { + for (int i = 0; i < MUTEX_POOL_SIZE; ++i) { + if (malloc_mutex_init(&pool->mutexes[i], name, rank, + malloc_mutex_address_ordered)) { + return true; + } + } + return false; +} diff --git a/dep/jemalloc/src/nstime.c b/dep/jemalloc/src/nstime.c new file mode 100644 index 00000000000..71db353965f --- /dev/null +++ b/dep/jemalloc/src/nstime.c @@ -0,0 +1,170 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/nstime.h" + +#include "jemalloc/internal/assert.h" + +#define BILLION UINT64_C(1000000000) +#define MILLION UINT64_C(1000000) + +void +nstime_init(nstime_t *time, uint64_t ns) { + time->ns = ns; +} + +void +nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec) { + time->ns = sec * BILLION + nsec; +} + +uint64_t +nstime_ns(const nstime_t *time) { + return time->ns; +} + +uint64_t +nstime_msec(const nstime_t *time) { + return time->ns / MILLION; +} + +uint64_t +nstime_sec(const nstime_t *time) { + return time->ns / BILLION; +} + +uint64_t +nstime_nsec(const nstime_t *time) { + return time->ns % BILLION; +} + +void +nstime_copy(nstime_t *time, const nstime_t *source) { + *time = *source; +} + +int +nstime_compare(const nstime_t *a, const nstime_t *b) { + return (a->ns > b->ns) - (a->ns < b->ns); +} + +void +nstime_add(nstime_t *time, const nstime_t *addend) { + assert(UINT64_MAX - time->ns >= addend->ns); + + time->ns += addend->ns; +} + +void +nstime_iadd(nstime_t *time, uint64_t addend) { + assert(UINT64_MAX - time->ns >= addend); + + time->ns += addend; +} + +void +nstime_subtract(nstime_t *time, const nstime_t *subtrahend) { + assert(nstime_compare(time, subtrahend) >= 0); + + time->ns -= subtrahend->ns; +} + +void +nstime_isubtract(nstime_t *time, uint64_t subtrahend) { + assert(time->ns >= subtrahend); + + time->ns -= subtrahend; +} + +void +nstime_imultiply(nstime_t *time, uint64_t multiplier) { + assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) << + 2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns)); + + time->ns *= multiplier; +} + +void +nstime_idivide(nstime_t *time, uint64_t divisor) { + assert(divisor != 0); + + time->ns /= divisor; +} + +uint64_t +nstime_divide(const nstime_t *time, const nstime_t *divisor) { + assert(divisor->ns != 0); + + return time->ns / divisor->ns; +} + +#ifdef _WIN32 +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) { + FILETIME ft; + uint64_t ticks_100ns; + + GetSystemTimeAsFileTime(&ft); + ticks_100ns = (((uint64_t)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; + + nstime_init(time, ticks_100ns * 100); +} +#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE) +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) { + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC_COARSE, &ts); + nstime_init2(time, ts.tv_sec, ts.tv_nsec); +} +#elif defined(JEMALLOC_HAVE_CLOCK_MONOTONIC) +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) { + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC, &ts); + nstime_init2(time, ts.tv_sec, ts.tv_nsec); +} +#elif defined(JEMALLOC_HAVE_MACH_ABSOLUTE_TIME) +# define NSTIME_MONOTONIC true +static void +nstime_get(nstime_t *time) { + nstime_init(time, mach_absolute_time()); +} +#else +# define NSTIME_MONOTONIC false +static void +nstime_get(nstime_t *time) { + struct timeval tv; + + gettimeofday(&tv, NULL); + nstime_init2(time, tv.tv_sec, tv.tv_usec * 1000); +} +#endif + +static bool +nstime_monotonic_impl(void) { + return NSTIME_MONOTONIC; +#undef NSTIME_MONOTONIC +} +nstime_monotonic_t *JET_MUTABLE nstime_monotonic = nstime_monotonic_impl; + +static bool +nstime_update_impl(nstime_t *time) { + nstime_t old_time; + + nstime_copy(&old_time, time); + nstime_get(time); + + /* Handle non-monotonic clocks. */ + if (unlikely(nstime_compare(&old_time, time) > 0)) { + nstime_copy(time, &old_time); + return true; + } + + return false; +} +nstime_update_t *JET_MUTABLE nstime_update = nstime_update_impl; diff --git a/dep/jemalloc/src/pages.c b/dep/jemalloc/src/pages.c new file mode 100644 index 00000000000..fec64dd01d7 --- /dev/null +++ b/dep/jemalloc/src/pages.c @@ -0,0 +1,422 @@ +#define JEMALLOC_PAGES_C_ +#include "jemalloc/internal/jemalloc_preamble.h" + +#include "jemalloc/internal/pages.h" + +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/malloc_io.h" + +#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT +#include <sys/sysctl.h> +#endif + +/******************************************************************************/ +/* Data. */ + +/* Actual operating system page size, detected during bootstrap, <= PAGE. */ +static size_t os_page; + +#ifndef _WIN32 +# define PAGES_PROT_COMMIT (PROT_READ | PROT_WRITE) +# define PAGES_PROT_DECOMMIT (PROT_NONE) +static int mmap_flags; +#endif +static bool os_overcommits; + +/******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ + +static void os_pages_unmap(void *addr, size_t size); + +/******************************************************************************/ + +static void * +os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) { + assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr); + assert(ALIGNMENT_CEILING(size, os_page) == size); + assert(size != 0); + + if (os_overcommits) { + *commit = true; + } + + void *ret; +#ifdef _WIN32 + /* + * If VirtualAlloc can't allocate at the given address when one is + * given, it fails and returns NULL. + */ + ret = VirtualAlloc(addr, size, MEM_RESERVE | (*commit ? MEM_COMMIT : 0), + PAGE_READWRITE); +#else + /* + * We don't use MAP_FIXED here, because it can cause the *replacement* + * of existing mappings, and we only want to create new mappings. + */ + { + int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; + + ret = mmap(addr, size, prot, mmap_flags, -1, 0); + } + assert(ret != NULL); + + if (ret == MAP_FAILED) { + ret = NULL; + } else if (addr != NULL && ret != addr) { + /* + * We succeeded in mapping memory, but not in the right place. + */ + os_pages_unmap(ret, size); + ret = NULL; + } +#endif + assert(ret == NULL || (addr == NULL && ret != addr) || (addr != NULL && + ret == addr)); + return ret; +} + +static void * +os_pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size, + bool *commit) { + void *ret = (void *)((uintptr_t)addr + leadsize); + + assert(alloc_size >= leadsize + size); +#ifdef _WIN32 + os_pages_unmap(addr, alloc_size); + void *new_addr = os_pages_map(ret, size, PAGE, commit); + if (new_addr == ret) { + return ret; + } + if (new_addr != NULL) { + os_pages_unmap(new_addr, size); + } + return NULL; +#else + size_t trailsize = alloc_size - leadsize - size; + + if (leadsize != 0) { + os_pages_unmap(addr, leadsize); + } + if (trailsize != 0) { + os_pages_unmap((void *)((uintptr_t)ret + size), trailsize); + } + return ret; +#endif +} + +static void +os_pages_unmap(void *addr, size_t size) { + assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr); + assert(ALIGNMENT_CEILING(size, os_page) == size); + +#ifdef _WIN32 + if (VirtualFree(addr, 0, MEM_RELEASE) == 0) +#else + if (munmap(addr, size) == -1) +#endif + { + char buf[BUFERROR_BUF]; + + buferror(get_errno(), buf, sizeof(buf)); + malloc_printf("<jemalloc>: Error in " +#ifdef _WIN32 + "VirtualFree" +#else + "munmap" +#endif + "(): %s\n", buf); + if (opt_abort) { + abort(); + } + } +} + +static void * +pages_map_slow(size_t size, size_t alignment, bool *commit) { + size_t alloc_size = size + alignment - os_page; + /* Beware size_t wrap-around. */ + if (alloc_size < size) { + return NULL; + } + + void *ret; + do { + void *pages = os_pages_map(NULL, alloc_size, alignment, commit); + if (pages == NULL) { + return NULL; + } + size_t leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) + - (uintptr_t)pages; + ret = os_pages_trim(pages, alloc_size, leadsize, size, commit); + } while (ret == NULL); + + assert(ret != NULL); + assert(PAGE_ADDR2BASE(ret) == ret); + return ret; +} + +void * +pages_map(void *addr, size_t size, size_t alignment, bool *commit) { + assert(alignment >= PAGE); + assert(ALIGNMENT_ADDR2BASE(addr, alignment) == addr); + + /* + * Ideally, there would be a way to specify alignment to mmap() (like + * NetBSD has), but in the absence of such a feature, we have to work + * hard to efficiently create aligned mappings. The reliable, but + * slow method is to create a mapping that is over-sized, then trim the + * excess. However, that always results in one or two calls to + * os_pages_unmap(), and it can leave holes in the process's virtual + * memory map if memory grows downward. + * + * Optimistically try mapping precisely the right amount before falling + * back to the slow method, with the expectation that the optimistic + * approach works most of the time. + */ + + void *ret = os_pages_map(addr, size, os_page, commit); + if (ret == NULL || ret == addr) { + return ret; + } + assert(addr == NULL); + if (ALIGNMENT_ADDR2OFFSET(ret, alignment) != 0) { + os_pages_unmap(ret, size); + return pages_map_slow(size, alignment, commit); + } + + assert(PAGE_ADDR2BASE(ret) == ret); + return ret; +} + +void +pages_unmap(void *addr, size_t size) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); + + os_pages_unmap(addr, size); +} + +static bool +pages_commit_impl(void *addr, size_t size, bool commit) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); + + if (os_overcommits) { + return true; + } + +#ifdef _WIN32 + return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT, + PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT))); +#else + { + int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT; + void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED, + -1, 0); + if (result == MAP_FAILED) { + return true; + } + if (result != addr) { + /* + * We succeeded in mapping memory, but not in the right + * place. + */ + os_pages_unmap(result, size); + return true; + } + return false; + } +#endif +} + +bool +pages_commit(void *addr, size_t size) { + return pages_commit_impl(addr, size, true); +} + +bool +pages_decommit(void *addr, size_t size) { + return pages_commit_impl(addr, size, false); +} + +bool +pages_purge_lazy(void *addr, size_t size) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); + + if (!pages_can_purge_lazy) { + return true; + } + +#ifdef _WIN32 + VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE); + return false; +#elif defined(JEMALLOC_PURGE_MADVISE_FREE) + return (madvise(addr, size, MADV_FREE) != 0); +#elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ + !defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS) + return (madvise(addr, size, MADV_DONTNEED) != 0); +#else + not_reached(); +#endif +} + +bool +pages_purge_forced(void *addr, size_t size) { + assert(PAGE_ADDR2BASE(addr) == addr); + assert(PAGE_CEILING(size) == size); + + if (!pages_can_purge_forced) { + return true; + } + +#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \ + defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS) + return (madvise(addr, size, MADV_DONTNEED) != 0); +#elif defined(JEMALLOC_MAPS_COALESCE) + /* Try to overlay a new demand-zeroed mapping. */ + return pages_commit(addr, size); +#else + not_reached(); +#endif +} + +bool +pages_huge(void *addr, size_t size) { + assert(HUGEPAGE_ADDR2BASE(addr) == addr); + assert(HUGEPAGE_CEILING(size) == size); + +#ifdef JEMALLOC_THP + return (madvise(addr, size, MADV_HUGEPAGE) != 0); +#else + return true; +#endif +} + +bool +pages_nohuge(void *addr, size_t size) { + assert(HUGEPAGE_ADDR2BASE(addr) == addr); + assert(HUGEPAGE_CEILING(size) == size); + +#ifdef JEMALLOC_THP + return (madvise(addr, size, MADV_NOHUGEPAGE) != 0); +#else + return false; +#endif +} + +static size_t +os_page_detect(void) { +#ifdef _WIN32 + SYSTEM_INFO si; + GetSystemInfo(&si); + return si.dwPageSize; +#else + long result = sysconf(_SC_PAGESIZE); + if (result == -1) { + return LG_PAGE; + } + return (size_t)result; +#endif +} + +#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT +static bool +os_overcommits_sysctl(void) { + int vm_overcommit; + size_t sz; + + sz = sizeof(vm_overcommit); + if (sysctlbyname("vm.overcommit", &vm_overcommit, &sz, NULL, 0) != 0) { + return false; /* Error. */ + } + + return ((vm_overcommit & 0x3) == 0); +} +#endif + +#ifdef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY +/* + * Use syscall(2) rather than {open,read,close}(2) when possible to avoid + * reentry during bootstrapping if another library has interposed system call + * wrappers. + */ +static bool +os_overcommits_proc(void) { + int fd; + char buf[1]; + ssize_t nread; + +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open) + fd = (int)syscall(SYS_open, "/proc/sys/vm/overcommit_memory", O_RDONLY | + O_CLOEXEC); +#elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat) + fd = (int)syscall(SYS_openat, + AT_FDCWD, "/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC); +#else + fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY | O_CLOEXEC); +#endif + if (fd == -1) { + return false; /* Error. */ + } + +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read) + nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf)); +#else + nread = read(fd, &buf, sizeof(buf)); +#endif + +#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close) + syscall(SYS_close, fd); +#else + close(fd); +#endif + + if (nread < 1) { + return false; /* Error. */ + } + /* + * /proc/sys/vm/overcommit_memory meanings: + * 0: Heuristic overcommit. + * 1: Always overcommit. + * 2: Never overcommit. + */ + return (buf[0] == '0' || buf[0] == '1'); +} +#endif + +bool +pages_boot(void) { + os_page = os_page_detect(); + if (os_page > PAGE) { + malloc_write("<jemalloc>: Unsupported system page size\n"); + if (opt_abort) { + abort(); + } + return true; + } + +#ifndef _WIN32 + mmap_flags = MAP_PRIVATE | MAP_ANON; +#endif + +#ifdef JEMALLOC_SYSCTL_VM_OVERCOMMIT + os_overcommits = os_overcommits_sysctl(); +#elif defined(JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY) + os_overcommits = os_overcommits_proc(); +# ifdef MAP_NORESERVE + if (os_overcommits) { + mmap_flags |= MAP_NORESERVE; + } +# endif +#else + os_overcommits = false; +#endif + + return false; +} diff --git a/dep/jemalloc/src/prng.c b/dep/jemalloc/src/prng.c new file mode 100644 index 00000000000..83c04bf9b5d --- /dev/null +++ b/dep/jemalloc/src/prng.c @@ -0,0 +1,3 @@ +#define JEMALLOC_PRNG_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" diff --git a/dep/jemalloc/src/prof.c b/dep/jemalloc/src/prof.c index 7722b7b4373..975722c4c38 100644 --- a/dep/jemalloc/src/prof.c +++ b/dep/jemalloc/src/prof.c @@ -1,27 +1,41 @@ -#define JEMALLOC_PROF_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_PROF_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/ckh.h" +#include "jemalloc/internal/hash.h" +#include "jemalloc/internal/malloc_io.h" +#include "jemalloc/internal/mutex.h" + /******************************************************************************/ #ifdef JEMALLOC_PROF_LIBUNWIND -#define UNW_LOCAL_ONLY +#define UNW_LOCAL_ONLY #include <libunwind.h> #endif #ifdef JEMALLOC_PROF_LIBGCC +/* + * We have a circular dependency -- jemalloc_internal.h tells us if we should + * use libgcc's unwinding functionality, but after we've included that, we've + * already hooked _Unwind_Backtrace. We'll temporarily disable hooking. + */ +#undef _Unwind_Backtrace #include <unwind.h> +#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, hooks_libc_hook) #endif /******************************************************************************/ /* Data. */ -malloc_tsd_data(, prof_tdata, prof_tdata_t *, NULL) - bool opt_prof = false; bool opt_prof_active = true; +bool opt_prof_thread_active_init = true; size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT; ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT; bool opt_prof_gdump = false; -bool opt_prof_final = true; +bool opt_prof_final = false; bool opt_prof_leak = false; bool opt_prof_accum = false; char opt_prof_prefix[ @@ -31,25 +45,66 @@ char opt_prof_prefix[ #endif 1]; +/* + * Initialized as opt_prof_active, and accessed via + * prof_active_[gs]et{_unlocked,}(). + */ +bool prof_active; +static malloc_mutex_t prof_active_mtx; + +/* + * Initialized as opt_prof_thread_active_init, and accessed via + * prof_thread_active_init_[gs]et(). + */ +static bool prof_thread_active_init; +static malloc_mutex_t prof_thread_active_init_mtx; + +/* + * Initialized as opt_prof_gdump, and accessed via + * prof_gdump_[gs]et{_unlocked,}(). + */ +bool prof_gdump_val; +static malloc_mutex_t prof_gdump_mtx; + uint64_t prof_interval = 0; -bool prof_promote; + +size_t lg_prof_sample; /* - * Table of mutexes that are shared among ctx's. These are leaf locks, so - * there is no problem with using them for more than one ctx at the same time. - * The primary motivation for this sharing though is that ctx's are ephemeral, + * Table of mutexes that are shared among gctx's. These are leaf locks, so + * there is no problem with using them for more than one gctx at the same time. + * The primary motivation for this sharing though is that gctx's are ephemeral, * and destroying mutexes causes complications for systems that allocate when * creating/destroying mutexes. */ -static malloc_mutex_t *ctx_locks; -static unsigned cum_ctxs; /* Atomic counter. */ +static malloc_mutex_t *gctx_locks; +static atomic_u_t cum_gctxs; /* Atomic counter. */ /* - * Global hash of (prof_bt_t *)-->(prof_ctx_t *). This is the master data + * Table of mutexes that are shared among tdata's. No operations require + * holding multiple tdata locks, so there is no problem with using them for more + * than one tdata at the same time, even though a gctx lock may be acquired + * while holding a tdata lock. + */ +static malloc_mutex_t *tdata_locks; + +/* + * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data * structure that knows about all backtraces currently captured. */ -static ckh_t bt2ctx; -static malloc_mutex_t bt2ctx_mtx; +static ckh_t bt2gctx; +/* Non static to enable profiling. */ +malloc_mutex_t bt2gctx_mtx; + +/* + * Tree of all extant prof_tdata_t structures, regardless of state, + * {attached,detached,expired}. + */ +static prof_tdata_tree_t tdatas; +static malloc_mutex_t tdatas_mtx; + +static uint64_t next_thr_uid; +static malloc_mutex_t next_thr_uid_mtx; static malloc_mutex_t prof_dump_seq_mtx; static uint64_t prof_dump_seq; @@ -70,161 +125,242 @@ static char prof_dump_buf[ 1 #endif ]; -static unsigned prof_dump_buf_end; +static size_t prof_dump_buf_end; static int prof_dump_fd; /* Do not dump any profiles until bootstrapping is complete. */ static bool prof_booted = false; /******************************************************************************/ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ -void -bt_init(prof_bt_t *bt, void **vec) -{ +static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx); +static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx); +static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, + bool even_if_attached); +static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, + bool even_if_attached); +static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name); - cassert(config_prof); +/******************************************************************************/ +/* Red-black trees. */ - bt->vec = vec; - bt->len = 0; +static int +prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) { + uint64_t a_thr_uid = a->thr_uid; + uint64_t b_thr_uid = b->thr_uid; + int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid); + if (ret == 0) { + uint64_t a_thr_discrim = a->thr_discrim; + uint64_t b_thr_discrim = b->thr_discrim; + ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim < + b_thr_discrim); + if (ret == 0) { + uint64_t a_tctx_uid = a->tctx_uid; + uint64_t b_tctx_uid = b->tctx_uid; + ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid < + b_tctx_uid); + } + } + return ret; } -static void -bt_destroy(prof_bt_t *bt) -{ +rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t, + tctx_link, prof_tctx_comp) - cassert(config_prof); +static int +prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) { + unsigned a_len = a->bt.len; + unsigned b_len = b->bt.len; + unsigned comp_len = (a_len < b_len) ? a_len : b_len; + int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *)); + if (ret == 0) { + ret = (a_len > b_len) - (a_len < b_len); + } + return ret; +} + +rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link, + prof_gctx_comp) + +static int +prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) { + int ret; + uint64_t a_uid = a->thr_uid; + uint64_t b_uid = b->thr_uid; + + ret = ((a_uid > b_uid) - (a_uid < b_uid)); + if (ret == 0) { + uint64_t a_discrim = a->thr_discrim; + uint64_t b_discrim = b->thr_discrim; - idalloc(bt); + ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim)); + } + return ret; } -static prof_bt_t * -bt_dup(prof_bt_t *bt) -{ - prof_bt_t *ret; +rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link, + prof_tdata_comp) + +/******************************************************************************/ + +void +prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) { + prof_tdata_t *tdata; cassert(config_prof); - /* - * Create a single allocation that has space for vec immediately - * following the prof_bt_t structure. The backtraces that get - * stored in the backtrace caches are copied from stack-allocated - * temporary variables, so size is known at creation time. Making this - * a contiguous object improves cache locality. - */ - ret = (prof_bt_t *)imalloc(QUANTUM_CEILING(sizeof(prof_bt_t)) + - (bt->len * sizeof(void *))); - if (ret == NULL) - return (NULL); - ret->vec = (void **)((uintptr_t)ret + - QUANTUM_CEILING(sizeof(prof_bt_t))); - memcpy(ret->vec, bt->vec, bt->len * sizeof(void *)); - ret->len = bt->len; + if (updated) { + /* + * Compute a new sample threshold. This isn't very important in + * practice, because this function is rarely executed, so the + * potential for sample bias is minimal except in contrived + * programs. + */ + tdata = prof_tdata_get(tsd, true); + if (tdata != NULL) { + prof_sample_threshold_update(tdata); + } + } - return (ret); + if ((uintptr_t)tctx > (uintptr_t)1U) { + malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); + tctx->prepared = false; + if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) { + prof_tctx_destroy(tsd, tctx); + } else { + malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); + } + } } -static inline void -prof_enter(prof_tdata_t *prof_tdata) -{ +void +prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, + prof_tctx_t *tctx) { + prof_tctx_set(tsdn, ptr, usize, NULL, tctx); - cassert(config_prof); + malloc_mutex_lock(tsdn, tctx->tdata->lock); + tctx->cnts.curobjs++; + tctx->cnts.curbytes += usize; + if (opt_prof_accum) { + tctx->cnts.accumobjs++; + tctx->cnts.accumbytes += usize; + } + tctx->prepared = false; + malloc_mutex_unlock(tsdn, tctx->tdata->lock); +} + +void +prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx) { + malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock); + assert(tctx->cnts.curobjs > 0); + assert(tctx->cnts.curbytes >= usize); + tctx->cnts.curobjs--; + tctx->cnts.curbytes -= usize; + + if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) { + prof_tctx_destroy(tsd, tctx); + } else { + malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock); + } +} - assert(prof_tdata->enq == false); - prof_tdata->enq = true; +void +bt_init(prof_bt_t *bt, void **vec) { + cassert(config_prof); - malloc_mutex_lock(&bt2ctx_mtx); + bt->vec = vec; + bt->len = 0; } -static inline void -prof_leave(prof_tdata_t *prof_tdata) -{ - bool idump, gdump; +static void +prof_enter(tsd_t *tsd, prof_tdata_t *tdata) { + cassert(config_prof); + assert(tdata == prof_tdata_get(tsd, false)); + + if (tdata != NULL) { + assert(!tdata->enq); + tdata->enq = true; + } + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); +} + +static void +prof_leave(tsd_t *tsd, prof_tdata_t *tdata) { cassert(config_prof); + assert(tdata == prof_tdata_get(tsd, false)); - malloc_mutex_unlock(&bt2ctx_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); - assert(prof_tdata->enq); - prof_tdata->enq = false; - idump = prof_tdata->enq_idump; - prof_tdata->enq_idump = false; - gdump = prof_tdata->enq_gdump; - prof_tdata->enq_gdump = false; + if (tdata != NULL) { + bool idump, gdump; - if (idump) - prof_idump(); - if (gdump) - prof_gdump(); + assert(tdata->enq); + tdata->enq = false; + idump = tdata->enq_idump; + tdata->enq_idump = false; + gdump = tdata->enq_gdump; + tdata->enq_gdump = false; + + if (idump) { + prof_idump(tsd_tsdn(tsd)); + } + if (gdump) { + prof_gdump(tsd_tsdn(tsd)); + } + } } #ifdef JEMALLOC_PROF_LIBUNWIND void -prof_backtrace(prof_bt_t *bt, unsigned nignore) -{ - unw_context_t uc; - unw_cursor_t cursor; - unsigned i; - int err; +prof_backtrace(prof_bt_t *bt) { + int nframes; cassert(config_prof); assert(bt->len == 0); assert(bt->vec != NULL); - unw_getcontext(&uc); - unw_init_local(&cursor, &uc); - - /* Throw away (nignore+1) stack frames, if that many exist. */ - for (i = 0; i < nignore + 1; i++) { - err = unw_step(&cursor); - if (err <= 0) - return; - } - - /* - * Iterate over stack frames until there are no more, or until no space - * remains in bt. - */ - for (i = 0; i < PROF_BT_MAX; i++) { - unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *)&bt->vec[i]); - bt->len++; - err = unw_step(&cursor); - if (err <= 0) - break; + nframes = unw_backtrace(bt->vec, PROF_BT_MAX); + if (nframes <= 0) { + return; } + bt->len = nframes; } #elif (defined(JEMALLOC_PROF_LIBGCC)) static _Unwind_Reason_Code -prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) -{ - +prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) { cassert(config_prof); - return (_URC_NO_REASON); + return _URC_NO_REASON; } static _Unwind_Reason_Code -prof_unwind_callback(struct _Unwind_Context *context, void *arg) -{ +prof_unwind_callback(struct _Unwind_Context *context, void *arg) { prof_unwind_data_t *data = (prof_unwind_data_t *)arg; + void *ip; cassert(config_prof); - if (data->nignore > 0) - data->nignore--; - else { - data->bt->vec[data->bt->len] = (void *)_Unwind_GetIP(context); - data->bt->len++; - if (data->bt->len == data->max) - return (_URC_END_OF_STACK); + ip = (void *)_Unwind_GetIP(context); + if (ip == NULL) { + return _URC_END_OF_STACK; + } + data->bt->vec[data->bt->len] = ip; + data->bt->len++; + if (data->bt->len == data->max) { + return _URC_END_OF_STACK; } - return (_URC_NO_REASON); + return _URC_NO_REASON; } void -prof_backtrace(prof_bt_t *bt, unsigned nignore) -{ - prof_unwind_data_t data = {bt, nignore, PROF_BT_MAX}; +prof_backtrace(prof_bt_t *bt) { + prof_unwind_data_t data = {bt, PROF_BT_MAX}; cassert(config_prof); @@ -232,25 +368,24 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore) } #elif (defined(JEMALLOC_PROF_GCC)) void -prof_backtrace(prof_bt_t *bt, unsigned nignore) -{ -#define BT_FRAME(i) \ - if ((i) < nignore + PROF_BT_MAX) { \ +prof_backtrace(prof_bt_t *bt) { +#define BT_FRAME(i) \ + if ((i) < PROF_BT_MAX) { \ void *p; \ - if (__builtin_frame_address(i) == 0) \ + if (__builtin_frame_address(i) == 0) { \ return; \ + } \ p = __builtin_return_address(i); \ - if (p == NULL) \ + if (p == NULL) { \ return; \ - if (i >= nignore) { \ - bt->vec[(i) - nignore] = p; \ - bt->len = (i) - nignore + 1; \ } \ - } else \ - return; + bt->vec[(i)] = p; \ + bt->len = (i) + 1; \ + } else { \ + return; \ + } cassert(config_prof); - assert(nignore <= 3); BT_FRAME(0) BT_FRAME(1) @@ -392,307 +527,452 @@ prof_backtrace(prof_bt_t *bt, unsigned nignore) BT_FRAME(125) BT_FRAME(126) BT_FRAME(127) - - /* Extras to compensate for nignore. */ - BT_FRAME(128) - BT_FRAME(129) - BT_FRAME(130) #undef BT_FRAME } #else void -prof_backtrace(prof_bt_t *bt, unsigned nignore) -{ - +prof_backtrace(prof_bt_t *bt) { cassert(config_prof); not_reached(); } #endif static malloc_mutex_t * -prof_ctx_mutex_choose(void) -{ - unsigned nctxs = atomic_add_u(&cum_ctxs, 1); +prof_gctx_mutex_choose(void) { + unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED); - return (&ctx_locks[(nctxs - 1) % PROF_NCTX_LOCKS]); + return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]; } -static void -prof_ctx_init(prof_ctx_t *ctx, prof_bt_t *bt) -{ +static malloc_mutex_t * +prof_tdata_mutex_choose(uint64_t thr_uid) { + return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS]; +} - ctx->bt = bt; - ctx->lock = prof_ctx_mutex_choose(); +static prof_gctx_t * +prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) { + /* + * Create a single allocation that has space for vec of length bt->len. + */ + size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *)); + prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size, + sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), + true); + if (gctx == NULL) { + return NULL; + } + gctx->lock = prof_gctx_mutex_choose(); /* * Set nlimbo to 1, in order to avoid a race condition with - * prof_ctx_merge()/prof_ctx_destroy(). + * prof_tctx_destroy()/prof_gctx_try_destroy(). */ - ctx->nlimbo = 1; - ql_elm_new(ctx, dump_link); - memset(&ctx->cnt_merged, 0, sizeof(prof_cnt_t)); - ql_new(&ctx->cnts_ql); + gctx->nlimbo = 1; + tctx_tree_new(&gctx->tctxs); + /* Duplicate bt. */ + memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *)); + gctx->bt.vec = gctx->vec; + gctx->bt.len = bt->len; + return gctx; } static void -prof_ctx_destroy(prof_ctx_t *ctx) -{ - prof_tdata_t *prof_tdata; - +prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx, + prof_tdata_t *tdata) { cassert(config_prof); /* - * Check that ctx is still unused by any thread cache before destroying - * it. prof_lookup() increments ctx->nlimbo in order to avoid a race - * condition with this function, as does prof_ctx_merge() in order to - * avoid a race between the main body of prof_ctx_merge() and entry + * Check that gctx is still unused by any thread cache before destroying + * it. prof_lookup() increments gctx->nlimbo in order to avoid a race + * condition with this function, as does prof_tctx_destroy() in order to + * avoid a race between the main body of prof_tctx_destroy() and entry * into this function. */ - prof_tdata = prof_tdata_get(false); - assert((uintptr_t)prof_tdata > (uintptr_t)PROF_TDATA_STATE_MAX); - prof_enter(prof_tdata); - malloc_mutex_lock(ctx->lock); - if (ql_first(&ctx->cnts_ql) == NULL && ctx->cnt_merged.curobjs == 0 && - ctx->nlimbo == 1) { - assert(ctx->cnt_merged.curbytes == 0); - assert(ctx->cnt_merged.accumobjs == 0); - assert(ctx->cnt_merged.accumbytes == 0); - /* Remove ctx from bt2ctx. */ - if (ckh_remove(&bt2ctx, ctx->bt, NULL, NULL)) + prof_enter(tsd, tdata_self); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + assert(gctx->nlimbo != 0); + if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) { + /* Remove gctx from bt2gctx. */ + if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) { not_reached(); - prof_leave(prof_tdata); - /* Destroy ctx. */ - malloc_mutex_unlock(ctx->lock); - bt_destroy(ctx->bt); - idalloc(ctx); + } + prof_leave(tsd, tdata_self); + /* Destroy gctx. */ + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true); } else { /* - * Compensate for increment in prof_ctx_merge() or + * Compensate for increment in prof_tctx_destroy() or * prof_lookup(). */ - ctx->nlimbo--; - malloc_mutex_unlock(ctx->lock); - prof_leave(prof_tdata); + gctx->nlimbo--; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + prof_leave(tsd, tdata_self); } } -static void -prof_ctx_merge(prof_ctx_t *ctx, prof_thr_cnt_t *cnt) -{ - bool destroy; +static bool +prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) { + malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); - cassert(config_prof); + if (opt_prof_accum) { + return false; + } + if (tctx->cnts.curobjs != 0) { + return false; + } + if (tctx->prepared) { + return false; + } + return true; +} + +static bool +prof_gctx_should_destroy(prof_gctx_t *gctx) { + if (opt_prof_accum) { + return false; + } + if (!tctx_tree_empty(&gctx->tctxs)) { + return false; + } + if (gctx->nlimbo != 0) { + return false; + } + return true; +} - /* Merge cnt stats and detach from ctx. */ - malloc_mutex_lock(ctx->lock); - ctx->cnt_merged.curobjs += cnt->cnts.curobjs; - ctx->cnt_merged.curbytes += cnt->cnts.curbytes; - ctx->cnt_merged.accumobjs += cnt->cnts.accumobjs; - ctx->cnt_merged.accumbytes += cnt->cnts.accumbytes; - ql_remove(&ctx->cnts_ql, cnt, cnts_link); - if (opt_prof_accum == false && ql_first(&ctx->cnts_ql) == NULL && - ctx->cnt_merged.curobjs == 0 && ctx->nlimbo == 0) { +static void +prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) { + prof_tdata_t *tdata = tctx->tdata; + prof_gctx_t *gctx = tctx->gctx; + bool destroy_tdata, destroy_tctx, destroy_gctx; + + malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock); + + assert(tctx->cnts.curobjs == 0); + assert(tctx->cnts.curbytes == 0); + assert(!opt_prof_accum); + assert(tctx->cnts.accumobjs == 0); + assert(tctx->cnts.accumbytes == 0); + + ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL); + destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + switch (tctx->state) { + case prof_tctx_state_nominal: + tctx_tree_remove(&gctx->tctxs, tctx); + destroy_tctx = true; + if (prof_gctx_should_destroy(gctx)) { + /* + * Increment gctx->nlimbo in order to keep another + * thread from winning the race to destroy gctx while + * this one has gctx->lock dropped. Without this, it + * would be possible for another thread to: + * + * 1) Sample an allocation associated with gctx. + * 2) Deallocate the sampled object. + * 3) Successfully prof_gctx_try_destroy(gctx). + * + * The result would be that gctx no longer exists by the + * time this thread accesses it in + * prof_gctx_try_destroy(). + */ + gctx->nlimbo++; + destroy_gctx = true; + } else { + destroy_gctx = false; + } + break; + case prof_tctx_state_dumping: /* - * Increment ctx->nlimbo in order to keep another thread from - * winning the race to destroy ctx while this one has ctx->lock - * dropped. Without this, it would be possible for another - * thread to: - * - * 1) Sample an allocation associated with ctx. - * 2) Deallocate the sampled object. - * 3) Successfully prof_ctx_destroy(ctx). - * - * The result would be that ctx no longer exists by the time - * this thread accesses it in prof_ctx_destroy(). + * A dumping thread needs tctx to remain valid until dumping + * has finished. Change state such that the dumping thread will + * complete destruction during a late dump iteration phase. */ - ctx->nlimbo++; - destroy = true; - } else - destroy = false; - malloc_mutex_unlock(ctx->lock); - if (destroy) - prof_ctx_destroy(ctx); + tctx->state = prof_tctx_state_purgatory; + destroy_tctx = false; + destroy_gctx = false; + break; + default: + not_reached(); + destroy_tctx = false; + destroy_gctx = false; + } + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + if (destroy_gctx) { + prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx, + tdata); + } + + malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock); + + if (destroy_tdata) { + prof_tdata_destroy(tsd, tdata, false); + } + + if (destroy_tctx) { + idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true); + } } static bool -prof_lookup_global(prof_bt_t *bt, prof_tdata_t *prof_tdata, void **p_btkey, - prof_ctx_t **p_ctx, bool *p_new_ctx) -{ +prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata, + void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) { union { - prof_ctx_t *p; + prof_gctx_t *p; void *v; - } ctx; + } gctx, tgctx; union { prof_bt_t *p; void *v; } btkey; - bool new_ctx; + bool new_gctx; - prof_enter(prof_tdata); - if (ckh_search(&bt2ctx, bt, &btkey.v, &ctx.v)) { + prof_enter(tsd, tdata); + if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { /* bt has never been seen before. Insert it. */ - ctx.v = imalloc(sizeof(prof_ctx_t)); - if (ctx.v == NULL) { - prof_leave(prof_tdata); - return (true); - } - btkey.p = bt_dup(bt); - if (btkey.v == NULL) { - prof_leave(prof_tdata); - idalloc(ctx.v); - return (true); + prof_leave(tsd, tdata); + tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt); + if (tgctx.v == NULL) { + return true; } - prof_ctx_init(ctx.p, btkey.p); - if (ckh_insert(&bt2ctx, btkey.v, ctx.v)) { - /* OOM. */ - prof_leave(prof_tdata); - idalloc(btkey.v); - idalloc(ctx.v); - return (true); + prof_enter(tsd, tdata); + if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) { + gctx.p = tgctx.p; + btkey.p = &gctx.p->bt; + if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) { + /* OOM. */ + prof_leave(tsd, tdata); + idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL, + true, true); + return true; + } + new_gctx = true; + } else { + new_gctx = false; } - new_ctx = true; } else { + tgctx.v = NULL; + new_gctx = false; + } + + if (!new_gctx) { /* * Increment nlimbo, in order to avoid a race condition with - * prof_ctx_merge()/prof_ctx_destroy(). + * prof_tctx_destroy()/prof_gctx_try_destroy(). */ - malloc_mutex_lock(ctx.p->lock); - ctx.p->nlimbo++; - malloc_mutex_unlock(ctx.p->lock); - new_ctx = false; + malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock); + gctx.p->nlimbo++; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock); + new_gctx = false; + + if (tgctx.v != NULL) { + /* Lost race to insert. */ + idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true, + true); + } } - prof_leave(prof_tdata); + prof_leave(tsd, tdata); *p_btkey = btkey.v; - *p_ctx = ctx.p; - *p_new_ctx = new_ctx; - return (false); + *p_gctx = gctx.p; + *p_new_gctx = new_gctx; + return false; } -prof_thr_cnt_t * -prof_lookup(prof_bt_t *bt) -{ +prof_tctx_t * +prof_lookup(tsd_t *tsd, prof_bt_t *bt) { union { - prof_thr_cnt_t *p; + prof_tctx_t *p; void *v; } ret; - prof_tdata_t *prof_tdata; + prof_tdata_t *tdata; + bool not_found; cassert(config_prof); - prof_tdata = prof_tdata_get(false); - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) - return (NULL); + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { + return NULL; + } - if (ckh_search(&prof_tdata->bt2cnt, bt, NULL, &ret.v)) { + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); + not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v); + if (!not_found) { /* Note double negative! */ + ret.p->prepared = true; + } + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + if (not_found) { void *btkey; - prof_ctx_t *ctx; - bool new_ctx; + prof_gctx_t *gctx; + bool new_gctx, error; /* * This thread's cache lacks bt. Look for it in the global * cache. */ - if (prof_lookup_global(bt, prof_tdata, &btkey, &ctx, &new_ctx)) - return (NULL); + if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx, + &new_gctx)) { + return NULL; + } - /* Link a prof_thd_cnt_t into ctx for this thread. */ - if (ckh_count(&prof_tdata->bt2cnt) == PROF_TCMAX) { - assert(ckh_count(&prof_tdata->bt2cnt) > 0); - /* - * Flush the least recently used cnt in order to keep - * bt2cnt from becoming too large. - */ - ret.p = ql_last(&prof_tdata->lru_ql, lru_link); - assert(ret.v != NULL); - if (ckh_remove(&prof_tdata->bt2cnt, ret.p->ctx->bt, - NULL, NULL)) - not_reached(); - ql_remove(&prof_tdata->lru_ql, ret.p, lru_link); - prof_ctx_merge(ret.p->ctx, ret.p); - /* ret can now be re-used. */ - } else { - assert(ckh_count(&prof_tdata->bt2cnt) < PROF_TCMAX); - /* Allocate and partially initialize a new cnt. */ - ret.v = imalloc(sizeof(prof_thr_cnt_t)); - if (ret.p == NULL) { - if (new_ctx) - prof_ctx_destroy(ctx); - return (NULL); + /* Link a prof_tctx_t into gctx for this thread. */ + ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t), + sz_size2index(sizeof(prof_tctx_t)), false, NULL, true, + arena_ichoose(tsd, NULL), true); + if (ret.p == NULL) { + if (new_gctx) { + prof_gctx_try_destroy(tsd, tdata, gctx, tdata); } - ql_elm_new(ret.p, cnts_link); - ql_elm_new(ret.p, lru_link); + return NULL; } - /* Finish initializing ret. */ - ret.p->ctx = ctx; - ret.p->epoch = 0; + ret.p->tdata = tdata; + ret.p->thr_uid = tdata->thr_uid; + ret.p->thr_discrim = tdata->thr_discrim; memset(&ret.p->cnts, 0, sizeof(prof_cnt_t)); - if (ckh_insert(&prof_tdata->bt2cnt, btkey, ret.v)) { - if (new_ctx) - prof_ctx_destroy(ctx); - idalloc(ret.v); - return (NULL); + ret.p->gctx = gctx; + ret.p->tctx_uid = tdata->tctx_uid_next++; + ret.p->prepared = true; + ret.p->state = prof_tctx_state_initializing; + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); + error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v); + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + if (error) { + if (new_gctx) { + prof_gctx_try_destroy(tsd, tdata, gctx, tdata); + } + idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true); + return NULL; } - ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link); - malloc_mutex_lock(ctx->lock); - ql_tail_insert(&ctx->cnts_ql, ret.p, cnts_link); - ctx->nlimbo--; - malloc_mutex_unlock(ctx->lock); - } else { - /* Move ret to the front of the LRU. */ - ql_remove(&prof_tdata->lru_ql, ret.p, lru_link); - ql_head_insert(&prof_tdata->lru_ql, ret.p, lru_link); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + ret.p->state = prof_tctx_state_nominal; + tctx_tree_insert(&gctx->tctxs, ret.p); + gctx->nlimbo--; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); } - return (ret.p); + return ret.p; +} + +/* + * The bodies of this function and prof_leakcheck() are compiled out unless heap + * profiling is enabled, so that it is possible to compile jemalloc with + * floating point support completely disabled. Avoiding floating point code is + * important on memory-constrained systems, but it also enables a workaround for + * versions of glibc that don't properly save/restore floating point registers + * during dynamic lazy symbol loading (which internally calls into whatever + * malloc implementation happens to be integrated into the application). Note + * that some compilers (e.g. gcc 4.8) may use floating point registers for fast + * memory moves, so jemalloc must be compiled with such optimizations disabled + * (e.g. + * -mno-sse) in order for the workaround to be complete. + */ +void +prof_sample_threshold_update(prof_tdata_t *tdata) { +#ifdef JEMALLOC_PROF + uint64_t r; + double u; + + if (!config_prof) { + return; + } + + if (lg_prof_sample == 0) { + tdata->bytes_until_sample = 0; + return; + } + + /* + * Compute sample interval as a geometrically distributed random + * variable with mean (2^lg_prof_sample). + * + * __ __ + * | log(u) | 1 + * tdata->bytes_until_sample = | -------- |, where p = --------------- + * | log(1-p) | lg_prof_sample + * 2 + * + * For more information on the math, see: + * + * Non-Uniform Random Variate Generation + * Luc Devroye + * Springer-Verlag, New York, 1986 + * pp 500 + * (http://luc.devroye.org/rnbookindex.html) + */ + r = prng_lg_range_u64(&tdata->prng_state, 53); + u = (double)r * (1.0/9007199254740992.0L); + tdata->bytes_until_sample = (uint64_t)(log(u) / + log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample)))) + + (uint64_t)1U; +#endif } #ifdef JEMALLOC_JET +static prof_tdata_t * +prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *arg) { + size_t *tdata_count = (size_t *)arg; + + (*tdata_count)++; + + return NULL; +} + size_t -prof_bt_count(void) -{ +prof_tdata_count(void) { + size_t tdata_count = 0; + tsdn_t *tsdn; + + tsdn = tsdn_fetch(); + malloc_mutex_lock(tsdn, &tdatas_mtx); + tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter, + (void *)&tdata_count); + malloc_mutex_unlock(tsdn, &tdatas_mtx); + + return tdata_count; +} + +size_t +prof_bt_count(void) { size_t bt_count; - prof_tdata_t *prof_tdata; + tsd_t *tsd; + prof_tdata_t *tdata; - prof_tdata = prof_tdata_get(false); - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) - return (0); + tsd = tsd_fetch(); + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { + return 0; + } - prof_enter(prof_tdata); - bt_count = ckh_count(&bt2ctx); - prof_leave(prof_tdata); + malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx); + bt_count = ckh_count(&bt2gctx); + malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx); - return (bt_count); + return bt_count; } #endif -#ifdef JEMALLOC_JET -#undef prof_dump_open -#define prof_dump_open JEMALLOC_N(prof_dump_open_impl) -#endif static int -prof_dump_open(bool propagate_err, const char *filename) -{ +prof_dump_open_impl(bool propagate_err, const char *filename) { int fd; fd = creat(filename, 0644); - if (fd == -1 && propagate_err == false) { + if (fd == -1 && !propagate_err) { malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n", filename); - if (opt_abort) + if (opt_abort) { abort(); + } } - return (fd); + return fd; } -#ifdef JEMALLOC_JET -#undef prof_dump_open -#define prof_dump_open JEMALLOC_N(prof_dump_open) -prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl); -#endif +prof_dump_open_t *JET_MUTABLE prof_dump_open = prof_dump_open_impl; static bool -prof_dump_flush(bool propagate_err) -{ +prof_dump_flush(bool propagate_err) { bool ret = false; ssize_t err; @@ -700,22 +980,22 @@ prof_dump_flush(bool propagate_err) err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end); if (err == -1) { - if (propagate_err == false) { + if (!propagate_err) { malloc_write("<jemalloc>: write() failed during heap " "profile flush\n"); - if (opt_abort) + if (opt_abort) { abort(); + } } ret = true; } prof_dump_buf_end = 0; - return (ret); + return ret; } static bool -prof_dump_close(bool propagate_err) -{ +prof_dump_close(bool propagate_err) { bool ret; assert(prof_dump_fd != -1); @@ -723,13 +1003,12 @@ prof_dump_close(bool propagate_err) close(prof_dump_fd); prof_dump_fd = -1; - return (ret); + return ret; } static bool -prof_dump_write(bool propagate_err, const char *s) -{ - unsigned i, slen, n; +prof_dump_write(bool propagate_err, const char *s) { + size_t i, slen, n; cassert(config_prof); @@ -737,9 +1016,11 @@ prof_dump_write(bool propagate_err, const char *s) slen = strlen(s); while (i < slen) { /* Flush the buffer if it is full. */ - if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) - if (prof_dump_flush(propagate_err) && propagate_err) - return (true); + if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) { + if (prof_dump_flush(propagate_err) && propagate_err) { + return true; + } + } if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) { /* Finish writing. */ @@ -753,13 +1034,12 @@ prof_dump_write(bool propagate_err, const char *s) i += n; } - return (false); + return false; } -JEMALLOC_ATTR(format(printf, 2, 3)) +JEMALLOC_FORMAT_PRINTF(2, 3) static bool -prof_dump_printf(bool propagate_err, const char *format, ...) -{ +prof_dump_printf(bool propagate_err, const char *format, ...) { bool ret; va_list ap; char buf[PROF_PRINTF_BUFSIZE]; @@ -769,179 +1049,401 @@ prof_dump_printf(bool propagate_err, const char *format, ...) va_end(ap); ret = prof_dump_write(propagate_err, buf); - return (ret); + return ret; +} + +static void +prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) { + malloc_mutex_assert_owner(tsdn, tctx->tdata->lock); + + malloc_mutex_lock(tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_initializing: + malloc_mutex_unlock(tsdn, tctx->gctx->lock); + return; + case prof_tctx_state_nominal: + tctx->state = prof_tctx_state_dumping; + malloc_mutex_unlock(tsdn, tctx->gctx->lock); + + memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t)); + + tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs; + tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes; + if (opt_prof_accum) { + tdata->cnt_summed.accumobjs += + tctx->dump_cnts.accumobjs; + tdata->cnt_summed.accumbytes += + tctx->dump_cnts.accumbytes; + } + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + not_reached(); + } } static void -prof_dump_ctx_prep(prof_ctx_t *ctx, prof_cnt_t *cnt_all, size_t *leak_nctx, - prof_ctx_list_t *ctx_ql) -{ - prof_thr_cnt_t *thr_cnt; - prof_cnt_t tcnt; +prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) { + malloc_mutex_assert_owner(tsdn, gctx->lock); + + gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs; + gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes; + if (opt_prof_accum) { + gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs; + gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes; + } +} + +static prof_tctx_t * +prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; + + malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_nominal: + /* New since dumping started; ignore. */ + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx); + break; + default: + not_reached(); + } + + return NULL; +} + +struct prof_tctx_dump_iter_arg_s { + tsdn_t *tsdn; + bool propagate_err; +}; + +static prof_tctx_t * +prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) { + struct prof_tctx_dump_iter_arg_s *arg = + (struct prof_tctx_dump_iter_arg_s *)opaque; + + malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_initializing: + case prof_tctx_state_nominal: + /* Not captured by this dump. */ + break; + case prof_tctx_state_dumping: + case prof_tctx_state_purgatory: + if (prof_dump_printf(arg->propagate_err, + " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": " + "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs, + tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs, + tctx->dump_cnts.accumbytes)) { + return tctx; + } + break; + default: + not_reached(); + } + return NULL; +} +static prof_tctx_t * +prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; + prof_tctx_t *ret; + + malloc_mutex_assert_owner(tsdn, tctx->gctx->lock); + + switch (tctx->state) { + case prof_tctx_state_nominal: + /* New since dumping started; ignore. */ + break; + case prof_tctx_state_dumping: + tctx->state = prof_tctx_state_nominal; + break; + case prof_tctx_state_purgatory: + ret = tctx; + goto label_return; + default: + not_reached(); + } + + ret = NULL; +label_return: + return ret; +} + +static void +prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) { cassert(config_prof); - malloc_mutex_lock(ctx->lock); + malloc_mutex_lock(tsdn, gctx->lock); /* - * Increment nlimbo so that ctx won't go away before dump. - * Additionally, link ctx into the dump list so that it is included in + * Increment nlimbo so that gctx won't go away before dump. + * Additionally, link gctx into the dump list so that it is included in * prof_dump()'s second pass. */ - ctx->nlimbo++; - ql_tail_insert(ctx_ql, ctx, dump_link); + gctx->nlimbo++; + gctx_tree_insert(gctxs, gctx); - memcpy(&ctx->cnt_summed, &ctx->cnt_merged, sizeof(prof_cnt_t)); - ql_foreach(thr_cnt, &ctx->cnts_ql, cnts_link) { - volatile unsigned *epoch = &thr_cnt->epoch; + memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t)); - while (true) { - unsigned epoch0 = *epoch; + malloc_mutex_unlock(tsdn, gctx->lock); +} - /* Make sure epoch is even. */ - if (epoch0 & 1U) - continue; +struct prof_gctx_merge_iter_arg_s { + tsdn_t *tsdn; + size_t leak_ngctx; +}; + +static prof_gctx_t * +prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { + struct prof_gctx_merge_iter_arg_s *arg = + (struct prof_gctx_merge_iter_arg_s *)opaque; + + malloc_mutex_lock(arg->tsdn, gctx->lock); + tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, + (void *)arg->tsdn); + if (gctx->cnt_summed.curobjs != 0) { + arg->leak_ngctx++; + } + malloc_mutex_unlock(arg->tsdn, gctx->lock); - memcpy(&tcnt, &thr_cnt->cnts, sizeof(prof_cnt_t)); + return NULL; +} - /* Terminate if epoch didn't change while reading. */ - if (*epoch == epoch0) - break; - } +static void +prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) { + prof_tdata_t *tdata = prof_tdata_get(tsd, false); + prof_gctx_t *gctx; - ctx->cnt_summed.curobjs += tcnt.curobjs; - ctx->cnt_summed.curbytes += tcnt.curbytes; - if (opt_prof_accum) { - ctx->cnt_summed.accumobjs += tcnt.accumobjs; - ctx->cnt_summed.accumbytes += tcnt.accumbytes; + /* + * Standard tree iteration won't work here, because as soon as we + * decrement gctx->nlimbo and unlock gctx, another thread can + * concurrently destroy it, which will corrupt the tree. Therefore, + * tear down the tree one node at a time during iteration. + */ + while ((gctx = gctx_tree_first(gctxs)) != NULL) { + gctx_tree_remove(gctxs, gctx); + malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock); + { + prof_tctx_t *next; + + next = NULL; + do { + prof_tctx_t *to_destroy = + tctx_tree_iter(&gctx->tctxs, next, + prof_tctx_finish_iter, + (void *)tsd_tsdn(tsd)); + if (to_destroy != NULL) { + next = tctx_tree_next(&gctx->tctxs, + to_destroy); + tctx_tree_remove(&gctx->tctxs, + to_destroy); + idalloctm(tsd_tsdn(tsd), to_destroy, + NULL, NULL, true, true); + } else { + next = NULL; + } + } while (next != NULL); + } + gctx->nlimbo--; + if (prof_gctx_should_destroy(gctx)) { + gctx->nlimbo++; + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); + prof_gctx_try_destroy(tsd, tdata, gctx, tdata); + } else { + malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock); } } +} - if (ctx->cnt_summed.curobjs != 0) - (*leak_nctx)++; +struct prof_tdata_merge_iter_arg_s { + tsdn_t *tsdn; + prof_cnt_t cnt_all; +}; + +static prof_tdata_t * +prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *opaque) { + struct prof_tdata_merge_iter_arg_s *arg = + (struct prof_tdata_merge_iter_arg_s *)opaque; + + malloc_mutex_lock(arg->tsdn, tdata->lock); + if (!tdata->expired) { + size_t tabind; + union { + prof_tctx_t *p; + void *v; + } tctx; + + tdata->dumping = true; + memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t)); + for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL, + &tctx.v);) { + prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata); + } - /* Add to cnt_all. */ - cnt_all->curobjs += ctx->cnt_summed.curobjs; - cnt_all->curbytes += ctx->cnt_summed.curbytes; - if (opt_prof_accum) { - cnt_all->accumobjs += ctx->cnt_summed.accumobjs; - cnt_all->accumbytes += ctx->cnt_summed.accumbytes; + arg->cnt_all.curobjs += tdata->cnt_summed.curobjs; + arg->cnt_all.curbytes += tdata->cnt_summed.curbytes; + if (opt_prof_accum) { + arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs; + arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes; + } + } else { + tdata->dumping = false; } + malloc_mutex_unlock(arg->tsdn, tdata->lock); - malloc_mutex_unlock(ctx->lock); + return NULL; } -static bool -prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all) -{ - - if (opt_lg_prof_sample == 0) { - if (prof_dump_printf(propagate_err, - "heap profile: %"PRId64": %"PRId64 - " [%"PRIu64": %"PRIu64"] @ heapprofile\n", - cnt_all->curobjs, cnt_all->curbytes, - cnt_all->accumobjs, cnt_all->accumbytes)) - return (true); - } else { - if (prof_dump_printf(propagate_err, - "heap profile: %"PRId64": %"PRId64 - " [%"PRIu64": %"PRIu64"] @ heap_v2/%"PRIu64"\n", - cnt_all->curobjs, cnt_all->curbytes, - cnt_all->accumobjs, cnt_all->accumbytes, - ((uint64_t)1U << opt_lg_prof_sample))) - return (true); +static prof_tdata_t * +prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *arg) { + bool propagate_err = *(bool *)arg; + + if (!tdata->dumping) { + return NULL; } - return (false); + if (prof_dump_printf(propagate_err, + " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n", + tdata->thr_uid, tdata->cnt_summed.curobjs, + tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs, + tdata->cnt_summed.accumbytes, + (tdata->thread_name != NULL) ? " " : "", + (tdata->thread_name != NULL) ? tdata->thread_name : "")) { + return tdata; + } + return NULL; } -static void -prof_dump_ctx_cleanup_locked(prof_ctx_t *ctx, prof_ctx_list_t *ctx_ql) -{ - - ctx->nlimbo--; - ql_remove(ctx_ql, ctx, dump_link); -} +static bool +prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err, + const prof_cnt_t *cnt_all) { + bool ret; -static void -prof_dump_ctx_cleanup(prof_ctx_t *ctx, prof_ctx_list_t *ctx_ql) -{ + if (prof_dump_printf(propagate_err, + "heap_v2/%"FMTu64"\n" + " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", + ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs, + cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) { + return true; + } - malloc_mutex_lock(ctx->lock); - prof_dump_ctx_cleanup_locked(ctx, ctx_ql); - malloc_mutex_unlock(ctx->lock); + malloc_mutex_lock(tsdn, &tdatas_mtx); + ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, + (void *)&propagate_err) != NULL); + malloc_mutex_unlock(tsdn, &tdatas_mtx); + return ret; } +prof_dump_header_t *JET_MUTABLE prof_dump_header = prof_dump_header_impl; static bool -prof_dump_ctx(bool propagate_err, prof_ctx_t *ctx, const prof_bt_t *bt, - prof_ctx_list_t *ctx_ql) -{ +prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx, + const prof_bt_t *bt, prof_gctx_tree_t *gctxs) { bool ret; unsigned i; + struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg; cassert(config_prof); - - /* - * Current statistics can sum to 0 as a result of unmerged per thread - * statistics. Additionally, interval- and growth-triggered dumps can - * occur between the time a ctx is created and when its statistics are - * filled in. Avoid dumping any ctx that is an artifact of either - * implementation detail. - */ - malloc_mutex_lock(ctx->lock); - if ((opt_prof_accum == false && ctx->cnt_summed.curobjs == 0) || - (opt_prof_accum && ctx->cnt_summed.accumobjs == 0)) { - assert(ctx->cnt_summed.curobjs == 0); - assert(ctx->cnt_summed.curbytes == 0); - assert(ctx->cnt_summed.accumobjs == 0); - assert(ctx->cnt_summed.accumbytes == 0); + malloc_mutex_assert_owner(tsdn, gctx->lock); + + /* Avoid dumping such gctx's that have no useful data. */ + if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) || + (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) { + assert(gctx->cnt_summed.curobjs == 0); + assert(gctx->cnt_summed.curbytes == 0); + assert(gctx->cnt_summed.accumobjs == 0); + assert(gctx->cnt_summed.accumbytes == 0); ret = false; goto label_return; } - if (prof_dump_printf(propagate_err, "%"PRId64": %"PRId64 - " [%"PRIu64": %"PRIu64"] @", - ctx->cnt_summed.curobjs, ctx->cnt_summed.curbytes, - ctx->cnt_summed.accumobjs, ctx->cnt_summed.accumbytes)) { + if (prof_dump_printf(propagate_err, "@")) { ret = true; goto label_return; } - for (i = 0; i < bt->len; i++) { - if (prof_dump_printf(propagate_err, " %#"PRIxPTR, + if (prof_dump_printf(propagate_err, " %#"FMTxPTR, (uintptr_t)bt->vec[i])) { ret = true; goto label_return; } } - if (prof_dump_write(propagate_err, "\n")) { + if (prof_dump_printf(propagate_err, + "\n" + " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n", + gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes, + gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) { + ret = true; + goto label_return; + } + + prof_tctx_dump_iter_arg.tsdn = tsdn; + prof_tctx_dump_iter_arg.propagate_err = propagate_err; + if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, + (void *)&prof_tctx_dump_iter_arg) != NULL) { ret = true; goto label_return; } ret = false; label_return: - prof_dump_ctx_cleanup_locked(ctx, ctx_ql); - malloc_mutex_unlock(ctx->lock); - return (ret); + return ret; +} + +#ifndef _WIN32 +JEMALLOC_FORMAT_PRINTF(1, 2) +static int +prof_open_maps(const char *format, ...) { + int mfd; + va_list ap; + char filename[PATH_MAX + 1]; + + va_start(ap, format); + malloc_vsnprintf(filename, sizeof(filename), format, ap); + va_end(ap); + mfd = open(filename, O_RDONLY | O_CLOEXEC); + + return mfd; +} +#endif + +static int +prof_getpid(void) { +#ifdef _WIN32 + return GetCurrentProcessId(); +#else + return getpid(); +#endif } static bool -prof_dump_maps(bool propagate_err) -{ +prof_dump_maps(bool propagate_err) { bool ret; int mfd; - char filename[PATH_MAX + 1]; cassert(config_prof); #ifdef __FreeBSD__ - malloc_snprintf(filename, sizeof(filename), "/proc/curproc/map"); + mfd = prof_open_maps("/proc/curproc/map"); +#elif defined(_WIN32) + mfd = -1; // Not implemented #else - malloc_snprintf(filename, sizeof(filename), "/proc/%d/maps", - (int)getpid()); + { + int pid = prof_getpid(); + + mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid); + if (mfd == -1) { + mfd = prof_open_maps("/proc/%d/maps", pid); + } + } #endif - mfd = open(filename, O_RDONLY); if (mfd != -1) { ssize_t nread; @@ -971,214 +1473,391 @@ prof_dump_maps(bool propagate_err) ret = false; label_return: - if (mfd != -1) + if (mfd != -1) { close(mfd); - return (ret); + } + return ret; } +/* + * See prof_sample_threshold_update() comment for why the body of this function + * is conditionally compiled. + */ static void -prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_nctx, - const char *filename) -{ - +prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx, + const char *filename) { +#ifdef JEMALLOC_PROF + /* + * Scaling is equivalent AdjustSamples() in jeprof, but the result may + * differ slightly from what jeprof reports, because here we scale the + * summary values, whereas jeprof scales each context individually and + * reports the sums of the scaled values. + */ if (cnt_all->curbytes != 0) { - malloc_printf("<jemalloc>: Leak summary: %"PRId64" byte%s, %" - PRId64" object%s, %zu context%s\n", - cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "", - cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "", - leak_nctx, (leak_nctx != 1) ? "s" : ""); + double sample_period = (double)((uint64_t)1 << lg_prof_sample); + double ratio = (((double)cnt_all->curbytes) / + (double)cnt_all->curobjs) / sample_period; + double scale_factor = 1.0 / (1.0 - exp(-ratio)); + uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes) + * scale_factor); + uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) * + scale_factor); + + malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64 + " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n", + curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs != + 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : ""); malloc_printf( - "<jemalloc>: Run pprof on \"%s\" for leak detail\n", + "<jemalloc>: Run jeprof on \"%s\" for leak detail\n", filename); } +#endif } -static bool -prof_dump(bool propagate_err, const char *filename, bool leakcheck) -{ - prof_tdata_t *prof_tdata; - prof_cnt_t cnt_all; +struct prof_gctx_dump_iter_arg_s { + tsdn_t *tsdn; + bool propagate_err; +}; + +static prof_gctx_t * +prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) { + prof_gctx_t *ret; + struct prof_gctx_dump_iter_arg_s *arg = + (struct prof_gctx_dump_iter_arg_s *)opaque; + + malloc_mutex_lock(arg->tsdn, gctx->lock); + + if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt, + gctxs)) { + ret = gctx; + goto label_return; + } + + ret = NULL; +label_return: + malloc_mutex_unlock(arg->tsdn, gctx->lock); + return ret; +} + +static void +prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, + struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, + struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, + prof_gctx_tree_t *gctxs) { size_t tabind; union { - prof_ctx_t *p; + prof_gctx_t *p; void *v; - } ctx; - size_t leak_nctx; - prof_ctx_list_t ctx_ql; - - cassert(config_prof); + } gctx; - prof_tdata = prof_tdata_get(false); - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) - return (true); + prof_enter(tsd, tdata); - malloc_mutex_lock(&prof_dump_mtx); + /* + * Put gctx's in limbo and clear their counters in preparation for + * summing. + */ + gctx_tree_new(gctxs); + for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) { + prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs); + } - /* Merge per thread profile stats, and sum them in cnt_all. */ - memset(&cnt_all, 0, sizeof(prof_cnt_t)); - leak_nctx = 0; - ql_new(&ctx_ql); - prof_enter(prof_tdata); - for (tabind = 0; ckh_iter(&bt2ctx, &tabind, NULL, &ctx.v) == false;) - prof_dump_ctx_prep(ctx.p, &cnt_all, &leak_nctx, &ctx_ql); - prof_leave(prof_tdata); + /* + * Iterate over tdatas, and for the non-expired ones snapshot their tctx + * stats and merge them into the associated gctx's. + */ + prof_tdata_merge_iter_arg->tsdn = tsd_tsdn(tsd); + memset(&prof_tdata_merge_iter_arg->cnt_all, 0, sizeof(prof_cnt_t)); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, + (void *)prof_tdata_merge_iter_arg); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + + /* Merge tctx stats into gctx's. */ + prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd); + prof_gctx_merge_iter_arg->leak_ngctx = 0; + gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter, + (void *)prof_gctx_merge_iter_arg); + + prof_leave(tsd, tdata); +} +static bool +prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename, + bool leakcheck, prof_tdata_t *tdata, + struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg, + struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg, + struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg, + prof_gctx_tree_t *gctxs) { /* Create dump file. */ - if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) - goto label_open_close_error; + if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) { + return true; + } /* Dump profile header. */ - if (prof_dump_header(propagate_err, &cnt_all)) + if (prof_dump_header(tsd_tsdn(tsd), propagate_err, + &prof_tdata_merge_iter_arg->cnt_all)) { goto label_write_error; + } - /* Dump per ctx profile stats. */ - while ((ctx.p = ql_first(&ctx_ql)) != NULL) { - if (prof_dump_ctx(propagate_err, ctx.p, ctx.p->bt, &ctx_ql)) - goto label_write_error; + /* Dump per gctx profile stats. */ + prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd); + prof_gctx_dump_iter_arg->propagate_err = propagate_err; + if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter, + (void *)prof_gctx_dump_iter_arg) != NULL) { + goto label_write_error; } /* Dump /proc/<pid>/maps if possible. */ - if (prof_dump_maps(propagate_err)) + if (prof_dump_maps(propagate_err)) { goto label_write_error; + } - if (prof_dump_close(propagate_err)) - goto label_open_close_error; - - malloc_mutex_unlock(&prof_dump_mtx); - - if (leakcheck) - prof_leakcheck(&cnt_all, leak_nctx, filename); + if (prof_dump_close(propagate_err)) { + return true; + } - return (false); + return false; label_write_error: prof_dump_close(propagate_err); -label_open_close_error: - while ((ctx.p = ql_first(&ctx_ql)) != NULL) - prof_dump_ctx_cleanup(ctx.p, &ctx_ql); - malloc_mutex_unlock(&prof_dump_mtx); - return (true); + return true; } -#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) -#define VSEQ_INVALID UINT64_C(0xffffffffffffffff) -static void -prof_dump_filename(char *filename, char v, int64_t vseq) -{ +static bool +prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, + bool leakcheck) { + cassert(config_prof); + assert(tsd_reentrancy_level_get(tsd) == 0); + + prof_tdata_t * tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) { + return true; + } + + pre_reentrancy(tsd, NULL); + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); + + prof_gctx_tree_t gctxs; + struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; + struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; + struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg; + prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg, + &prof_gctx_merge_iter_arg, &gctxs); + bool err = prof_dump_file(tsd, propagate_err, filename, leakcheck, tdata, + &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg, + &prof_gctx_dump_iter_arg, &gctxs); + prof_gctx_finish(tsd, &gctxs); + + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); + post_reentrancy(tsd); + + if (err) { + return true; + } + + if (leakcheck) { + prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all, + prof_gctx_merge_iter_arg.leak_ngctx, filename); + } + return false; +} + +#ifdef JEMALLOC_JET +void +prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs, + uint64_t *accumbytes) { + tsd_t *tsd; + prof_tdata_t *tdata; + struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg; + struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg; + prof_gctx_tree_t gctxs; + + tsd = tsd_fetch(); + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { + if (curobjs != NULL) { + *curobjs = 0; + } + if (curbytes != NULL) { + *curbytes = 0; + } + if (accumobjs != NULL) { + *accumobjs = 0; + } + if (accumbytes != NULL) { + *accumbytes = 0; + } + return; + } + + prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg, + &prof_gctx_merge_iter_arg, &gctxs); + prof_gctx_finish(tsd, &gctxs); + + if (curobjs != NULL) { + *curobjs = prof_tdata_merge_iter_arg.cnt_all.curobjs; + } + if (curbytes != NULL) { + *curbytes = prof_tdata_merge_iter_arg.cnt_all.curbytes; + } + if (accumobjs != NULL) { + *accumobjs = prof_tdata_merge_iter_arg.cnt_all.accumobjs; + } + if (accumbytes != NULL) { + *accumbytes = prof_tdata_merge_iter_arg.cnt_all.accumbytes; + } +} +#endif +#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1) +#define VSEQ_INVALID UINT64_C(0xffffffffffffffff) +static void +prof_dump_filename(char *filename, char v, uint64_t vseq) { cassert(config_prof); if (vseq != VSEQ_INVALID) { /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, - "%s.%d.%"PRIu64".%c%"PRId64".heap", - opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq); + "%s.%d.%"FMTu64".%c%"FMTu64".heap", + opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq); } else { /* "<prefix>.<pid>.<seq>.<v>.heap" */ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE, - "%s.%d.%"PRIu64".%c.heap", - opt_prof_prefix, (int)getpid(), prof_dump_seq, v); + "%s.%d.%"FMTu64".%c.heap", + opt_prof_prefix, prof_getpid(), prof_dump_seq, v); } prof_dump_seq++; } static void -prof_fdump(void) -{ +prof_fdump(void) { + tsd_t *tsd; char filename[DUMP_FILENAME_BUFSIZE]; cassert(config_prof); + assert(opt_prof_final); + assert(opt_prof_prefix[0] != '\0'); - if (prof_booted == false) + if (!prof_booted) { return; + } + tsd = tsd_fetch(); + assert(tsd_reentrancy_level_get(tsd) == 0); + + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); + prof_dump_filename(filename, 'f', VSEQ_INVALID); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); + prof_dump(tsd, false, filename, opt_prof_leak); +} - if (opt_prof_final && opt_prof_prefix[0] != '\0') { - malloc_mutex_lock(&prof_dump_seq_mtx); - prof_dump_filename(filename, 'f', VSEQ_INVALID); - malloc_mutex_unlock(&prof_dump_seq_mtx); - prof_dump(false, filename, opt_prof_leak); +bool +prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) { + cassert(config_prof); + +#ifndef JEMALLOC_ATOMIC_U64 + if (malloc_mutex_init(&prof_accum->mtx, "prof_accum", + WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) { + return true; } + prof_accum->accumbytes = 0; +#else + atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED); +#endif + return false; } void -prof_idump(void) -{ - prof_tdata_t *prof_tdata; - char filename[PATH_MAX + 1]; +prof_idump(tsdn_t *tsdn) { + tsd_t *tsd; + prof_tdata_t *tdata; cassert(config_prof); - if (prof_booted == false) + if (!prof_booted || tsdn_null(tsdn)) { + return; + } + tsd = tsdn_tsd(tsdn); + if (tsd_reentrancy_level_get(tsd) > 0) { return; - prof_tdata = prof_tdata_get(false); - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) + } + + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { return; - if (prof_tdata->enq) { - prof_tdata->enq_idump = true; + } + if (tdata->enq) { + tdata->enq_idump = true; return; } if (opt_prof_prefix[0] != '\0') { - malloc_mutex_lock(&prof_dump_seq_mtx); + char filename[PATH_MAX + 1]; + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename, 'i', prof_dump_iseq); prof_dump_iseq++; - malloc_mutex_unlock(&prof_dump_seq_mtx); - prof_dump(false, filename, false); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); + prof_dump(tsd, false, filename, false); } } bool -prof_mdump(const char *filename) -{ - char filename_buf[DUMP_FILENAME_BUFSIZE]; - +prof_mdump(tsd_t *tsd, const char *filename) { cassert(config_prof); + assert(tsd_reentrancy_level_get(tsd) == 0); - if (opt_prof == false || prof_booted == false) - return (true); - + if (!opt_prof || !prof_booted) { + return true; + } + char filename_buf[DUMP_FILENAME_BUFSIZE]; if (filename == NULL) { /* No filename specified, so automatically generate one. */ - if (opt_prof_prefix[0] == '\0') - return (true); - malloc_mutex_lock(&prof_dump_seq_mtx); + if (opt_prof_prefix[0] == '\0') { + return true; + } + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx); prof_dump_filename(filename_buf, 'm', prof_dump_mseq); prof_dump_mseq++; - malloc_mutex_unlock(&prof_dump_seq_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx); filename = filename_buf; } - return (prof_dump(true, filename, false)); + return prof_dump(tsd, true, filename, false); } void -prof_gdump(void) -{ - prof_tdata_t *prof_tdata; - char filename[DUMP_FILENAME_BUFSIZE]; +prof_gdump(tsdn_t *tsdn) { + tsd_t *tsd; + prof_tdata_t *tdata; cassert(config_prof); - if (prof_booted == false) + if (!prof_booted || tsdn_null(tsdn)) { + return; + } + tsd = tsdn_tsd(tsdn); + if (tsd_reentrancy_level_get(tsd) > 0) { return; - prof_tdata = prof_tdata_get(false); - if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) + } + + tdata = prof_tdata_get(tsd, false); + if (tdata == NULL) { return; - if (prof_tdata->enq) { - prof_tdata->enq_gdump = true; + } + if (tdata->enq) { + tdata->enq_gdump = true; return; } if (opt_prof_prefix[0] != '\0') { - malloc_mutex_lock(&prof_dump_seq_mtx); + char filename[DUMP_FILENAME_BUFSIZE]; + malloc_mutex_lock(tsdn, &prof_dump_seq_mtx); prof_dump_filename(filename, 'u', prof_dump_useq); prof_dump_useq++; - malloc_mutex_unlock(&prof_dump_seq_mtx); - prof_dump(false, filename, false); + malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx); + prof_dump(tsd, false, filename, false); } } static void -prof_bt_hash(const void *key, size_t r_hash[2]) -{ +prof_bt_hash(const void *key, size_t r_hash[2]) { prof_bt_t *bt = (prof_bt_t *)key; cassert(config_prof); @@ -1187,106 +1866,389 @@ prof_bt_hash(const void *key, size_t r_hash[2]) } static bool -prof_bt_keycomp(const void *k1, const void *k2) -{ +prof_bt_keycomp(const void *k1, const void *k2) { const prof_bt_t *bt1 = (prof_bt_t *)k1; const prof_bt_t *bt2 = (prof_bt_t *)k2; cassert(config_prof); - if (bt1->len != bt2->len) - return (false); + if (bt1->len != bt2->len) { + return false; + } return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0); } -prof_tdata_t * -prof_tdata_init(void) -{ - prof_tdata_t *prof_tdata; +static uint64_t +prof_thr_uid_alloc(tsdn_t *tsdn) { + uint64_t thr_uid; + + malloc_mutex_lock(tsdn, &next_thr_uid_mtx); + thr_uid = next_thr_uid; + next_thr_uid++; + malloc_mutex_unlock(tsdn, &next_thr_uid_mtx); + + return thr_uid; +} + +static prof_tdata_t * +prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim, + char *thread_name, bool active) { + prof_tdata_t *tdata; cassert(config_prof); /* Initialize an empty cache for this thread. */ - prof_tdata = (prof_tdata_t *)imalloc(sizeof(prof_tdata_t)); - if (prof_tdata == NULL) - return (NULL); + tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t), + sz_size2index(sizeof(prof_tdata_t)), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); + if (tdata == NULL) { + return NULL; + } - if (ckh_new(&prof_tdata->bt2cnt, PROF_CKH_MINITEMS, - prof_bt_hash, prof_bt_keycomp)) { - idalloc(prof_tdata); - return (NULL); + tdata->lock = prof_tdata_mutex_choose(thr_uid); + tdata->thr_uid = thr_uid; + tdata->thr_discrim = thr_discrim; + tdata->thread_name = thread_name; + tdata->attached = true; + tdata->expired = false; + tdata->tctx_uid_next = 0; + + if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash, + prof_bt_keycomp)) { + idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); + return NULL; } - ql_new(&prof_tdata->lru_ql); - prof_tdata->vec = imalloc(sizeof(void *) * PROF_BT_MAX); - if (prof_tdata->vec == NULL) { - ckh_delete(&prof_tdata->bt2cnt); - idalloc(prof_tdata); - return (NULL); + tdata->prng_state = (uint64_t)(uintptr_t)tdata; + prof_sample_threshold_update(tdata); + + tdata->enq = false; + tdata->enq_idump = false; + tdata->enq_gdump = false; + + tdata->dumping = false; + tdata->active = active; + + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + tdata_tree_insert(&tdatas, tdata); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + + return tdata; +} + +prof_tdata_t * +prof_tdata_init(tsd_t *tsd) { + return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0, + NULL, prof_thread_active_init_get(tsd_tsdn(tsd))); +} + +static bool +prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) { + if (tdata->attached && !even_if_attached) { + return false; } + if (ckh_count(&tdata->bt2tctx) != 0) { + return false; + } + return true; +} + +static bool +prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata, + bool even_if_attached) { + malloc_mutex_assert_owner(tsdn, tdata->lock); - prof_tdata->prng_state = 0; - prof_tdata->threshold = 0; - prof_tdata->accum = 0; + return prof_tdata_should_destroy_unlocked(tdata, even_if_attached); +} + +static void +prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata, + bool even_if_attached) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx); - prof_tdata->enq = false; - prof_tdata->enq_idump = false; - prof_tdata->enq_gdump = false; + tdata_tree_remove(&tdatas, tdata); - prof_tdata_tsd_set(&prof_tdata); + assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached)); - return (prof_tdata); + if (tdata->thread_name != NULL) { + idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true, + true); + } + ckh_delete(tsd, &tdata->bt2tctx); + idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true); } -void -prof_tdata_cleanup(void *arg) -{ - prof_thr_cnt_t *cnt; - prof_tdata_t *prof_tdata = *(prof_tdata_t **)arg; +static void +prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) { + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + prof_tdata_destroy_locked(tsd, tdata, even_if_attached); + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); +} - cassert(config_prof); +static void +prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) { + bool destroy_tdata; - if (prof_tdata == PROF_TDATA_STATE_REINCARNATED) { - /* - * Another destructor deallocated memory after this destructor - * was called. Reset prof_tdata to PROF_TDATA_STATE_PURGATORY - * in order to receive another callback. - */ - prof_tdata = PROF_TDATA_STATE_PURGATORY; - prof_tdata_tsd_set(&prof_tdata); - } else if (prof_tdata == PROF_TDATA_STATE_PURGATORY) { - /* - * The previous time this destructor was called, we set the key - * to PROF_TDATA_STATE_PURGATORY so that other destructors - * wouldn't cause re-creation of the prof_tdata. This time, do - * nothing, so that the destructor will not be called again. - */ - } else if (prof_tdata != NULL) { - /* - * Delete the hash table. All of its contents can still be - * iterated over via the LRU. - */ - ckh_delete(&prof_tdata->bt2cnt); + malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock); + if (tdata->attached) { + destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, + true); /* - * Iteratively merge cnt's into the global stats and delete - * them. + * Only detach if !destroy_tdata, because detaching would allow + * another thread to win the race to destroy tdata. */ - while ((cnt = ql_last(&prof_tdata->lru_ql, lru_link)) != NULL) { - ql_remove(&prof_tdata->lru_ql, cnt, lru_link); - prof_ctx_merge(cnt->ctx, cnt); - idalloc(cnt); + if (!destroy_tdata) { + tdata->attached = false; } - idalloc(prof_tdata->vec); - idalloc(prof_tdata); - prof_tdata = PROF_TDATA_STATE_PURGATORY; - prof_tdata_tsd_set(&prof_tdata); + tsd_prof_tdata_set(tsd, NULL); + } else { + destroy_tdata = false; + } + malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock); + if (destroy_tdata) { + prof_tdata_destroy(tsd, tdata, true); } } +prof_tdata_t * +prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) { + uint64_t thr_uid = tdata->thr_uid; + uint64_t thr_discrim = tdata->thr_discrim + 1; + char *thread_name = (tdata->thread_name != NULL) ? + prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL; + bool active = tdata->active; + + prof_tdata_detach(tsd, tdata); + return prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name, + active); +} + +static bool +prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) { + bool destroy_tdata; + + malloc_mutex_lock(tsdn, tdata->lock); + if (!tdata->expired) { + tdata->expired = true; + destroy_tdata = tdata->attached ? false : + prof_tdata_should_destroy(tsdn, tdata, false); + } else { + destroy_tdata = false; + } + malloc_mutex_unlock(tsdn, tdata->lock); + + return destroy_tdata; +} + +static prof_tdata_t * +prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, + void *arg) { + tsdn_t *tsdn = (tsdn_t *)arg; + + return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL); +} + void -prof_boot0(void) -{ +prof_reset(tsd_t *tsd, size_t lg_sample) { + prof_tdata_t *next; + + assert(lg_sample < (sizeof(uint64_t) << 3)); + + malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx); + malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx); + + lg_prof_sample = lg_sample; + + next = NULL; + do { + prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next, + prof_tdata_reset_iter, (void *)tsd); + if (to_destroy != NULL) { + next = tdata_tree_next(&tdatas, to_destroy); + prof_tdata_destroy_locked(tsd, to_destroy, false); + } else { + next = NULL; + } + } while (next != NULL); + + malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx); + malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx); +} + +void +prof_tdata_cleanup(tsd_t *tsd) { + prof_tdata_t *tdata; + + if (!config_prof) { + return; + } + + tdata = tsd_prof_tdata_get(tsd); + if (tdata != NULL) { + prof_tdata_detach(tsd, tdata); + } +} + +bool +prof_active_get(tsdn_t *tsdn) { + bool prof_active_current; + + malloc_mutex_lock(tsdn, &prof_active_mtx); + prof_active_current = prof_active; + malloc_mutex_unlock(tsdn, &prof_active_mtx); + return prof_active_current; +} +bool +prof_active_set(tsdn_t *tsdn, bool active) { + bool prof_active_old; + + malloc_mutex_lock(tsdn, &prof_active_mtx); + prof_active_old = prof_active; + prof_active = active; + malloc_mutex_unlock(tsdn, &prof_active_mtx); + return prof_active_old; +} + +const char * +prof_thread_name_get(tsd_t *tsd) { + prof_tdata_t *tdata; + + tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) { + return ""; + } + return (tdata->thread_name != NULL ? tdata->thread_name : ""); +} + +static char * +prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) { + char *ret; + size_t size; + + if (thread_name == NULL) { + return NULL; + } + + size = strlen(thread_name) + 1; + if (size == 1) { + return ""; + } + + ret = iallocztm(tsdn, size, sz_size2index(size), false, NULL, true, + arena_get(TSDN_NULL, 0, true), true); + if (ret == NULL) { + return NULL; + } + memcpy(ret, thread_name, size); + return ret; +} + +int +prof_thread_name_set(tsd_t *tsd, const char *thread_name) { + prof_tdata_t *tdata; + unsigned i; + char *s; + + tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) { + return EAGAIN; + } + + /* Validate input. */ + if (thread_name == NULL) { + return EFAULT; + } + for (i = 0; thread_name[i] != '\0'; i++) { + char c = thread_name[i]; + if (!isgraph(c) && !isblank(c)) { + return EFAULT; + } + } + + s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name); + if (s == NULL) { + return EAGAIN; + } + + if (tdata->thread_name != NULL) { + idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true, + true); + tdata->thread_name = NULL; + } + if (strlen(s) > 0) { + tdata->thread_name = s; + } + return 0; +} + +bool +prof_thread_active_get(tsd_t *tsd) { + prof_tdata_t *tdata; + + tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) { + return false; + } + return tdata->active; +} + +bool +prof_thread_active_set(tsd_t *tsd, bool active) { + prof_tdata_t *tdata; + + tdata = prof_tdata_get(tsd, true); + if (tdata == NULL) { + return true; + } + tdata->active = active; + return false; +} + +bool +prof_thread_active_init_get(tsdn_t *tsdn) { + bool active_init; + + malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); + active_init = prof_thread_active_init; + malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); + return active_init; +} + +bool +prof_thread_active_init_set(tsdn_t *tsdn, bool active_init) { + bool active_init_old; + + malloc_mutex_lock(tsdn, &prof_thread_active_init_mtx); + active_init_old = prof_thread_active_init; + prof_thread_active_init = active_init; + malloc_mutex_unlock(tsdn, &prof_thread_active_init_mtx); + return active_init_old; +} + +bool +prof_gdump_get(tsdn_t *tsdn) { + bool prof_gdump_current; + + malloc_mutex_lock(tsdn, &prof_gdump_mtx); + prof_gdump_current = prof_gdump_val; + malloc_mutex_unlock(tsdn, &prof_gdump_mtx); + return prof_gdump_current; +} + +bool +prof_gdump_set(tsdn_t *tsdn, bool gdump) { + bool prof_gdump_old; + + malloc_mutex_lock(tsdn, &prof_gdump_mtx); + prof_gdump_old = prof_gdump_val; + prof_gdump_val = gdump; + malloc_mutex_unlock(tsdn, &prof_gdump_mtx); + return prof_gdump_old; +} + +void +prof_boot0(void) { cassert(config_prof); memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT, @@ -1294,17 +2256,15 @@ prof_boot0(void) } void -prof_boot1(void) -{ - +prof_boot1(void) { cassert(config_prof); /* - * opt_prof and prof_promote must be in their final state before any - * arenas are initialized, so this function must be executed early. + * opt_prof must be in its final state before any arenas are + * initialized, so this function must be executed early. */ - if (opt_prof_leak && opt_prof == false) { + if (opt_prof_leak && !opt_prof) { /* * Enable opt_prof, but in such a way that profiles are never * automatically dumped. @@ -1317,48 +2277,101 @@ prof_boot1(void) opt_lg_prof_interval); } } - - prof_promote = (opt_prof && opt_lg_prof_sample > LG_PAGE); } bool -prof_boot2(void) -{ - +prof_boot2(tsd_t *tsd) { cassert(config_prof); if (opt_prof) { unsigned i; - if (ckh_new(&bt2ctx, PROF_CKH_MINITEMS, prof_bt_hash, - prof_bt_keycomp)) - return (true); - if (malloc_mutex_init(&bt2ctx_mtx)) - return (true); - if (prof_tdata_tsd_boot()) { - malloc_write( - "<jemalloc>: Error in pthread_key_create()\n"); - abort(); + lg_prof_sample = opt_lg_prof_sample; + + prof_active = opt_prof_active; + if (malloc_mutex_init(&prof_active_mtx, "prof_active", + WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) { + return true; + } + + prof_gdump_val = opt_prof_gdump; + if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump", + WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) { + return true; + } + + prof_thread_active_init = opt_prof_thread_active_init; + if (malloc_mutex_init(&prof_thread_active_init_mtx, + "prof_thread_active_init", + WITNESS_RANK_PROF_THREAD_ACTIVE_INIT, + malloc_mutex_rank_exclusive)) { + return true; + } + + if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash, + prof_bt_keycomp)) { + return true; + } + if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx", + WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) { + return true; + } + + tdata_tree_new(&tdatas); + if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas", + WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) { + return true; } - if (malloc_mutex_init(&prof_dump_seq_mtx)) - return (true); - if (malloc_mutex_init(&prof_dump_mtx)) - return (true); + next_thr_uid = 0; + if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid", + WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) { + return true; + } + + if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq", + WITNESS_RANK_PROF_DUMP_SEQ, malloc_mutex_rank_exclusive)) { + return true; + } + if (malloc_mutex_init(&prof_dump_mtx, "prof_dump", + WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) { + return true; + } - if (atexit(prof_fdump) != 0) { + if (opt_prof_final && opt_prof_prefix[0] != '\0' && + atexit(prof_fdump) != 0) { malloc_write("<jemalloc>: Error in atexit()\n"); - if (opt_abort) + if (opt_abort) { abort(); + } } - ctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS * - sizeof(malloc_mutex_t)); - if (ctx_locks == NULL) - return (true); + gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), + b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t), + CACHELINE); + if (gctx_locks == NULL) { + return true; + } for (i = 0; i < PROF_NCTX_LOCKS; i++) { - if (malloc_mutex_init(&ctx_locks[i])) - return (true); + if (malloc_mutex_init(&gctx_locks[i], "prof_gctx", + WITNESS_RANK_PROF_GCTX, + malloc_mutex_rank_exclusive)) { + return true; + } + } + + tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), + b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t), + CACHELINE); + if (tdata_locks == NULL) { + return true; + } + for (i = 0; i < PROF_NTDATA_LOCKS; i++) { + if (malloc_mutex_init(&tdata_locks[i], "prof_tdata", + WITNESS_RANK_PROF_TDATA, + malloc_mutex_rank_exclusive)) { + return true; + } } } @@ -1372,48 +2385,79 @@ prof_boot2(void) prof_booted = true; - return (false); + return false; } void -prof_prefork(void) -{ - - if (opt_prof) { +prof_prefork0(tsdn_t *tsdn) { + if (config_prof && opt_prof) { unsigned i; - malloc_mutex_prefork(&bt2ctx_mtx); - malloc_mutex_prefork(&prof_dump_seq_mtx); - for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_prefork(&ctx_locks[i]); + malloc_mutex_prefork(tsdn, &prof_dump_mtx); + malloc_mutex_prefork(tsdn, &bt2gctx_mtx); + malloc_mutex_prefork(tsdn, &tdatas_mtx); + for (i = 0; i < PROF_NTDATA_LOCKS; i++) { + malloc_mutex_prefork(tsdn, &tdata_locks[i]); + } + for (i = 0; i < PROF_NCTX_LOCKS; i++) { + malloc_mutex_prefork(tsdn, &gctx_locks[i]); + } } } void -prof_postfork_parent(void) -{ +prof_prefork1(tsdn_t *tsdn) { + if (config_prof && opt_prof) { + malloc_mutex_prefork(tsdn, &prof_active_mtx); + malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx); + malloc_mutex_prefork(tsdn, &prof_gdump_mtx); + malloc_mutex_prefork(tsdn, &next_thr_uid_mtx); + malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx); + } +} - if (opt_prof) { +void +prof_postfork_parent(tsdn_t *tsdn) { + if (config_prof && opt_prof) { unsigned i; - for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_postfork_parent(&ctx_locks[i]); - malloc_mutex_postfork_parent(&prof_dump_seq_mtx); - malloc_mutex_postfork_parent(&bt2ctx_mtx); + malloc_mutex_postfork_parent(tsdn, + &prof_thread_active_init_mtx); + malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_active_mtx); + for (i = 0; i < PROF_NCTX_LOCKS; i++) { + malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]); + } + for (i = 0; i < PROF_NTDATA_LOCKS; i++) { + malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]); + } + malloc_mutex_postfork_parent(tsdn, &tdatas_mtx); + malloc_mutex_postfork_parent(tsdn, &bt2gctx_mtx); + malloc_mutex_postfork_parent(tsdn, &prof_dump_mtx); } } void -prof_postfork_child(void) -{ - - if (opt_prof) { +prof_postfork_child(tsdn_t *tsdn) { + if (config_prof && opt_prof) { unsigned i; - for (i = 0; i < PROF_NCTX_LOCKS; i++) - malloc_mutex_postfork_child(&ctx_locks[i]); - malloc_mutex_postfork_child(&prof_dump_seq_mtx); - malloc_mutex_postfork_child(&bt2ctx_mtx); + malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx); + malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx); + malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx); + malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx); + malloc_mutex_postfork_child(tsdn, &prof_active_mtx); + for (i = 0; i < PROF_NCTX_LOCKS; i++) { + malloc_mutex_postfork_child(tsdn, &gctx_locks[i]); + } + for (i = 0; i < PROF_NTDATA_LOCKS; i++) { + malloc_mutex_postfork_child(tsdn, &tdata_locks[i]); + } + malloc_mutex_postfork_child(tsdn, &tdatas_mtx); + malloc_mutex_postfork_child(tsdn, &bt2gctx_mtx); + malloc_mutex_postfork_child(tsdn, &prof_dump_mtx); } } diff --git a/dep/jemalloc/src/quarantine.c b/dep/jemalloc/src/quarantine.c deleted file mode 100644 index 5431511640a..00000000000 --- a/dep/jemalloc/src/quarantine.c +++ /dev/null @@ -1,199 +0,0 @@ -#define JEMALLOC_QUARANTINE_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -/* - * quarantine pointers close to NULL are used to encode state information that - * is used for cleaning up during thread shutdown. - */ -#define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1) -#define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2) -#define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY - -/******************************************************************************/ -/* Data. */ - -malloc_tsd_data(, quarantine, quarantine_t *, NULL) - -/******************************************************************************/ -/* Function prototypes for non-inline static functions. */ - -static quarantine_t *quarantine_grow(quarantine_t *quarantine); -static void quarantine_drain_one(quarantine_t *quarantine); -static void quarantine_drain(quarantine_t *quarantine, size_t upper_bound); - -/******************************************************************************/ - -quarantine_t * -quarantine_init(size_t lg_maxobjs) -{ - quarantine_t *quarantine; - - quarantine = (quarantine_t *)imalloc(offsetof(quarantine_t, objs) + - ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t))); - if (quarantine == NULL) - return (NULL); - quarantine->curbytes = 0; - quarantine->curobjs = 0; - quarantine->first = 0; - quarantine->lg_maxobjs = lg_maxobjs; - - quarantine_tsd_set(&quarantine); - - return (quarantine); -} - -static quarantine_t * -quarantine_grow(quarantine_t *quarantine) -{ - quarantine_t *ret; - - ret = quarantine_init(quarantine->lg_maxobjs + 1); - if (ret == NULL) { - quarantine_drain_one(quarantine); - return (quarantine); - } - - ret->curbytes = quarantine->curbytes; - ret->curobjs = quarantine->curobjs; - if (quarantine->first + quarantine->curobjs <= (ZU(1) << - quarantine->lg_maxobjs)) { - /* objs ring buffer data are contiguous. */ - memcpy(ret->objs, &quarantine->objs[quarantine->first], - quarantine->curobjs * sizeof(quarantine_obj_t)); - } else { - /* objs ring buffer data wrap around. */ - size_t ncopy_a = (ZU(1) << quarantine->lg_maxobjs) - - quarantine->first; - size_t ncopy_b = quarantine->curobjs - ncopy_a; - - memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a - * sizeof(quarantine_obj_t)); - memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b * - sizeof(quarantine_obj_t)); - } - idalloc(quarantine); - - return (ret); -} - -static void -quarantine_drain_one(quarantine_t *quarantine) -{ - quarantine_obj_t *obj = &quarantine->objs[quarantine->first]; - assert(obj->usize == isalloc(obj->ptr, config_prof)); - idalloc(obj->ptr); - quarantine->curbytes -= obj->usize; - quarantine->curobjs--; - quarantine->first = (quarantine->first + 1) & ((ZU(1) << - quarantine->lg_maxobjs) - 1); -} - -static void -quarantine_drain(quarantine_t *quarantine, size_t upper_bound) -{ - - while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) - quarantine_drain_one(quarantine); -} - -void -quarantine(void *ptr) -{ - quarantine_t *quarantine; - size_t usize = isalloc(ptr, config_prof); - - cassert(config_fill); - assert(opt_quarantine); - - quarantine = *quarantine_tsd_get(); - if ((uintptr_t)quarantine <= (uintptr_t)QUARANTINE_STATE_MAX) { - if (quarantine == QUARANTINE_STATE_PURGATORY) { - /* - * Make a note that quarantine() was called after - * quarantine_cleanup() was called. - */ - quarantine = QUARANTINE_STATE_REINCARNATED; - quarantine_tsd_set(&quarantine); - } - idalloc(ptr); - return; - } - /* - * Drain one or more objects if the quarantine size limit would be - * exceeded by appending ptr. - */ - if (quarantine->curbytes + usize > opt_quarantine) { - size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine - - usize : 0; - quarantine_drain(quarantine, upper_bound); - } - /* Grow the quarantine ring buffer if it's full. */ - if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs)) - quarantine = quarantine_grow(quarantine); - /* quarantine_grow() must free a slot if it fails to grow. */ - assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs)); - /* Append ptr if its size doesn't exceed the quarantine size. */ - if (quarantine->curbytes + usize <= opt_quarantine) { - size_t offset = (quarantine->first + quarantine->curobjs) & - ((ZU(1) << quarantine->lg_maxobjs) - 1); - quarantine_obj_t *obj = &quarantine->objs[offset]; - obj->ptr = ptr; - obj->usize = usize; - quarantine->curbytes += usize; - quarantine->curobjs++; - if (config_fill && opt_junk) { - /* - * Only do redzone validation if Valgrind isn't in - * operation. - */ - if ((config_valgrind == false || opt_valgrind == false) - && usize <= SMALL_MAXCLASS) - arena_quarantine_junk_small(ptr, usize); - else - memset(ptr, 0x5a, usize); - } - } else { - assert(quarantine->curbytes == 0); - idalloc(ptr); - } -} - -void -quarantine_cleanup(void *arg) -{ - quarantine_t *quarantine = *(quarantine_t **)arg; - - if (quarantine == QUARANTINE_STATE_REINCARNATED) { - /* - * Another destructor deallocated memory after this destructor - * was called. Reset quarantine to QUARANTINE_STATE_PURGATORY - * in order to receive another callback. - */ - quarantine = QUARANTINE_STATE_PURGATORY; - quarantine_tsd_set(&quarantine); - } else if (quarantine == QUARANTINE_STATE_PURGATORY) { - /* - * The previous time this destructor was called, we set the key - * to QUARANTINE_STATE_PURGATORY so that other destructors - * wouldn't cause re-creation of the quarantine. This time, do - * nothing, so that the destructor will not be called again. - */ - } else if (quarantine != NULL) { - quarantine_drain(quarantine, 0); - idalloc(quarantine); - quarantine = QUARANTINE_STATE_PURGATORY; - quarantine_tsd_set(&quarantine); - } -} - -bool -quarantine_boot(void) -{ - - cassert(config_fill); - - if (quarantine_tsd_boot()) - return (true); - - return (false); -} diff --git a/dep/jemalloc/src/rtree.c b/dep/jemalloc/src/rtree.c index 205957ac4e1..53702cf7236 100644 --- a/dep/jemalloc/src/rtree.c +++ b/dep/jemalloc/src/rtree.c @@ -1,105 +1,320 @@ -#define JEMALLOC_RTREE_C_ -#include "jemalloc/internal/jemalloc_internal.h" - -rtree_t * -rtree_new(unsigned bits, rtree_alloc_t *alloc, rtree_dalloc_t *dalloc) -{ - rtree_t *ret; - unsigned bits_per_level, bits_in_leaf, height, i; - - assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3)); - - bits_per_level = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(void *)))) - 1; - bits_in_leaf = ffs(pow2_ceil((RTREE_NODESIZE / sizeof(uint8_t)))) - 1; - if (bits > bits_in_leaf) { - height = 1 + (bits - bits_in_leaf) / bits_per_level; - if ((height-1) * bits_per_level + bits_in_leaf != bits) - height++; - } else { - height = 1; - } - assert((height-1) * bits_per_level + bits_in_leaf >= bits); - - ret = (rtree_t*)alloc(offsetof(rtree_t, level2bits) + - (sizeof(unsigned) * height)); - if (ret == NULL) - return (NULL); - memset(ret, 0, offsetof(rtree_t, level2bits) + (sizeof(unsigned) * - height)); - - ret->alloc = alloc; - ret->dalloc = dalloc; - if (malloc_mutex_init(&ret->mutex)) { - if (dalloc != NULL) - dalloc(ret); - return (NULL); - } - ret->height = height; - if (height > 1) { - if ((height-1) * bits_per_level + bits_in_leaf > bits) { - ret->level2bits[0] = (bits - bits_in_leaf) % - bits_per_level; - } else - ret->level2bits[0] = bits_per_level; - for (i = 1; i < height-1; i++) - ret->level2bits[i] = bits_per_level; - ret->level2bits[height-1] = bits_in_leaf; - } else - ret->level2bits[0] = bits; - - ret->root = (void**)alloc(sizeof(void *) << ret->level2bits[0]); - if (ret->root == NULL) { - if (dalloc != NULL) - dalloc(ret); - return (NULL); - } - memset(ret->root, 0, sizeof(void *) << ret->level2bits[0]); - - return (ret); +#define JEMALLOC_RTREE_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/mutex.h" + +/* + * Only the most significant bits of keys passed to rtree_{read,write}() are + * used. + */ +bool +rtree_new(rtree_t *rtree, bool zeroed) { +#ifdef JEMALLOC_JET + if (!zeroed) { + memset(rtree, 0, sizeof(rtree_t)); /* Clear root. */ + } +#else + assert(zeroed); +#endif + + if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE, + malloc_mutex_rank_exclusive)) { + return true; + } + + return false; +} + +static rtree_node_elm_t * +rtree_node_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { + return (rtree_node_elm_t *)base_alloc(tsdn, b0get(), nelms * + sizeof(rtree_node_elm_t), CACHELINE); } +rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc = rtree_node_alloc_impl; static void -rtree_delete_subtree(rtree_t *rtree, void **node, unsigned level) -{ +rtree_node_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *node) { + /* Nodes are never deleted during normal operation. */ + not_reached(); +} +UNUSED rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc = + rtree_node_dalloc_impl; + +static rtree_leaf_elm_t * +rtree_leaf_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) { + return (rtree_leaf_elm_t *)base_alloc(tsdn, b0get(), nelms * + sizeof(rtree_leaf_elm_t), CACHELINE); +} +rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc = rtree_leaf_alloc_impl; - if (level < rtree->height - 1) { - size_t nchildren, i; +static void +rtree_leaf_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *leaf) { + /* Leaves are never deleted during normal operation. */ + not_reached(); +} +UNUSED rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc = + rtree_leaf_dalloc_impl; - nchildren = ZU(1) << rtree->level2bits[level]; - for (i = 0; i < nchildren; i++) { - void **child = (void **)node[i]; - if (child != NULL) - rtree_delete_subtree(rtree, child, level + 1); +#ifdef JEMALLOC_JET +# if RTREE_HEIGHT > 1 +static void +rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *subtree, + unsigned level) { + size_t nchildren = ZU(1) << rtree_levels[level].bits; + if (level + 2 < RTREE_HEIGHT) { + for (size_t i = 0; i < nchildren; i++) { + rtree_node_elm_t *node = + (rtree_node_elm_t *)atomic_load_p(&subtree[i].child, + ATOMIC_RELAXED); + if (node != NULL) { + rtree_delete_subtree(tsdn, rtree, node, level + + 1); + } + } + } else { + for (size_t i = 0; i < nchildren; i++) { + rtree_leaf_elm_t *leaf = + (rtree_leaf_elm_t *)atomic_load_p(&subtree[i].child, + ATOMIC_RELAXED); + if (leaf != NULL) { + rtree_leaf_dalloc(tsdn, rtree, leaf); + } } } - rtree->dalloc(node); + + if (subtree != rtree->root) { + rtree_node_dalloc(tsdn, rtree, subtree); + } } +# endif void -rtree_delete(rtree_t *rtree) -{ +rtree_delete(tsdn_t *tsdn, rtree_t *rtree) { +# if RTREE_HEIGHT > 1 + rtree_delete_subtree(tsdn, rtree, rtree->root, 0); +# endif +} +#endif - rtree_delete_subtree(rtree, rtree->root, 0); - rtree->dalloc(rtree); +static rtree_node_elm_t * +rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level, + atomic_p_t *elmp) { + malloc_mutex_lock(tsdn, &rtree->init_lock); + /* + * If *elmp is non-null, then it was initialized with the init lock + * held, so we can get by with 'relaxed' here. + */ + rtree_node_elm_t *node = atomic_load_p(elmp, ATOMIC_RELAXED); + if (node == NULL) { + node = rtree_node_alloc(tsdn, rtree, ZU(1) << + rtree_levels[level].bits); + if (node == NULL) { + malloc_mutex_unlock(tsdn, &rtree->init_lock); + return NULL; + } + /* + * Even though we hold the lock, a later reader might not; we + * need release semantics. + */ + atomic_store_p(elmp, node, ATOMIC_RELEASE); + } + malloc_mutex_unlock(tsdn, &rtree->init_lock); + + return node; } -void -rtree_prefork(rtree_t *rtree) -{ +static rtree_leaf_elm_t * +rtree_leaf_init(tsdn_t *tsdn, rtree_t *rtree, atomic_p_t *elmp) { + malloc_mutex_lock(tsdn, &rtree->init_lock); + /* + * If *elmp is non-null, then it was initialized with the init lock + * held, so we can get by with 'relaxed' here. + */ + rtree_leaf_elm_t *leaf = atomic_load_p(elmp, ATOMIC_RELAXED); + if (leaf == NULL) { + leaf = rtree_leaf_alloc(tsdn, rtree, ZU(1) << + rtree_levels[RTREE_HEIGHT-1].bits); + if (leaf == NULL) { + malloc_mutex_unlock(tsdn, &rtree->init_lock); + return NULL; + } + /* + * Even though we hold the lock, a later reader might not; we + * need release semantics. + */ + atomic_store_p(elmp, leaf, ATOMIC_RELEASE); + } + malloc_mutex_unlock(tsdn, &rtree->init_lock); - malloc_mutex_prefork(&rtree->mutex); + return leaf; } -void -rtree_postfork_parent(rtree_t *rtree) -{ +static bool +rtree_node_valid(rtree_node_elm_t *node) { + return ((uintptr_t)node != (uintptr_t)0); +} - malloc_mutex_postfork_parent(&rtree->mutex); +static bool +rtree_leaf_valid(rtree_leaf_elm_t *leaf) { + return ((uintptr_t)leaf != (uintptr_t)0); } -void -rtree_postfork_child(rtree_t *rtree) -{ +static rtree_node_elm_t * +rtree_child_node_tryread(rtree_node_elm_t *elm, bool dependent) { + rtree_node_elm_t *node; + + if (dependent) { + node = (rtree_node_elm_t *)atomic_load_p(&elm->child, + ATOMIC_RELAXED); + } else { + node = (rtree_node_elm_t *)atomic_load_p(&elm->child, + ATOMIC_ACQUIRE); + } - malloc_mutex_postfork_child(&rtree->mutex); + assert(!dependent || node != NULL); + return node; +} + +static rtree_node_elm_t * +rtree_child_node_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm, + unsigned level, bool dependent) { + rtree_node_elm_t *node; + + node = rtree_child_node_tryread(elm, dependent); + if (!dependent && unlikely(!rtree_node_valid(node))) { + node = rtree_node_init(tsdn, rtree, level + 1, &elm->child); + } + assert(!dependent || node != NULL); + return node; +} + +static rtree_leaf_elm_t * +rtree_child_leaf_tryread(rtree_node_elm_t *elm, bool dependent) { + rtree_leaf_elm_t *leaf; + + if (dependent) { + leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child, + ATOMIC_RELAXED); + } else { + leaf = (rtree_leaf_elm_t *)atomic_load_p(&elm->child, + ATOMIC_ACQUIRE); + } + + assert(!dependent || leaf != NULL); + return leaf; +} + +static rtree_leaf_elm_t * +rtree_child_leaf_read(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *elm, + unsigned level, bool dependent) { + rtree_leaf_elm_t *leaf; + + leaf = rtree_child_leaf_tryread(elm, dependent); + if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { + leaf = rtree_leaf_init(tsdn, rtree, &elm->child); + } + assert(!dependent || leaf != NULL); + return leaf; +} + +rtree_leaf_elm_t * +rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, + uintptr_t key, bool dependent, bool init_missing) { + rtree_node_elm_t *node; + rtree_leaf_elm_t *leaf; +#if RTREE_HEIGHT > 1 + node = rtree->root; +#else + leaf = rtree->root; +#endif + + if (config_debug) { + uintptr_t leafkey = rtree_leafkey(key); + for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) { + assert(rtree_ctx->cache[i].leafkey != leafkey); + } + for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) { + assert(rtree_ctx->l2_cache[i].leafkey != leafkey); + } + } + +#define RTREE_GET_CHILD(level) { \ + assert(level < RTREE_HEIGHT-1); \ + if (level != 0 && !dependent && \ + unlikely(!rtree_node_valid(node))) { \ + return NULL; \ + } \ + uintptr_t subkey = rtree_subkey(key, level); \ + if (level + 2 < RTREE_HEIGHT) { \ + node = init_missing ? \ + rtree_child_node_read(tsdn, rtree, \ + &node[subkey], level, dependent) : \ + rtree_child_node_tryread(&node[subkey], \ + dependent); \ + } else { \ + leaf = init_missing ? \ + rtree_child_leaf_read(tsdn, rtree, \ + &node[subkey], level, dependent) : \ + rtree_child_leaf_tryread(&node[subkey], \ + dependent); \ + } \ + } + /* + * Cache replacement upon hard lookup (i.e. L1 & L2 rtree cache miss): + * (1) evict last entry in L2 cache; (2) move the collision slot from L1 + * cache down to L2; and 3) fill L1. + */ +#define RTREE_GET_LEAF(level) { \ + assert(level == RTREE_HEIGHT-1); \ + if (!dependent && unlikely(!rtree_leaf_valid(leaf))) { \ + return NULL; \ + } \ + if (RTREE_CTX_NCACHE_L2 > 1) { \ + memmove(&rtree_ctx->l2_cache[1], \ + &rtree_ctx->l2_cache[0], \ + sizeof(rtree_ctx_cache_elm_t) * \ + (RTREE_CTX_NCACHE_L2 - 1)); \ + } \ + size_t slot = rtree_cache_direct_map(key); \ + rtree_ctx->l2_cache[0].leafkey = \ + rtree_ctx->cache[slot].leafkey; \ + rtree_ctx->l2_cache[0].leaf = \ + rtree_ctx->cache[slot].leaf; \ + uintptr_t leafkey = rtree_leafkey(key); \ + rtree_ctx->cache[slot].leafkey = leafkey; \ + rtree_ctx->cache[slot].leaf = leaf; \ + uintptr_t subkey = rtree_subkey(key, level); \ + return &leaf[subkey]; \ + } + if (RTREE_HEIGHT > 1) { + RTREE_GET_CHILD(0) + } + if (RTREE_HEIGHT > 2) { + RTREE_GET_CHILD(1) + } + if (RTREE_HEIGHT > 3) { + for (unsigned i = 2; i < RTREE_HEIGHT-1; i++) { + RTREE_GET_CHILD(i) + } + } + RTREE_GET_LEAF(RTREE_HEIGHT-1) +#undef RTREE_GET_CHILD +#undef RTREE_GET_LEAF + not_reached(); +} + +void +rtree_ctx_data_init(rtree_ctx_t *ctx) { + for (unsigned i = 0; i < RTREE_CTX_NCACHE; i++) { + rtree_ctx_cache_elm_t *cache = &ctx->cache[i]; + cache->leafkey = RTREE_LEAFKEY_INVALID; + cache->leaf = NULL; + } + for (unsigned i = 0; i < RTREE_CTX_NCACHE_L2; i++) { + rtree_ctx_cache_elm_t *cache = &ctx->l2_cache[i]; + cache->leafkey = RTREE_LEAFKEY_INVALID; + cache->leaf = NULL; + } } diff --git a/dep/jemalloc/src/spin.c b/dep/jemalloc/src/spin.c new file mode 100644 index 00000000000..24372c26c94 --- /dev/null +++ b/dep/jemalloc/src/spin.c @@ -0,0 +1,4 @@ +#define JEMALLOC_SPIN_C_ +#include "jemalloc/internal/jemalloc_preamble.h" + +#include "jemalloc/internal/spin.h" diff --git a/dep/jemalloc/src/stats.c b/dep/jemalloc/src/stats.c index bef2ab33cd4..087df7676e9 100644 --- a/dep/jemalloc/src/stats.c +++ b/dep/jemalloc/src/stats.c @@ -1,549 +1,1285 @@ -#define JEMALLOC_STATS_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_STATS_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" -#define CTL_GET(n, v, t) do { \ - size_t sz = sizeof(t); \ - xmallctl(n, v, &sz, NULL, 0); \ -} while (0) +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/ctl.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/mutex_prof.h" -#define CTL_I_GET(n, v, t) do { \ - size_t mib[6]; \ - size_t miblen = sizeof(mib) / sizeof(size_t); \ +const char *global_mutex_names[mutex_prof_num_global_mutexes] = { +#define OP(mtx) #mtx, + MUTEX_PROF_GLOBAL_MUTEXES +#undef OP +}; + +const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = { +#define OP(mtx) #mtx, + MUTEX_PROF_ARENA_MUTEXES +#undef OP +}; + +#define CTL_GET(n, v, t) do { \ size_t sz = sizeof(t); \ - xmallctlnametomib(n, mib, &miblen); \ - mib[2] = i; \ - xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ + xmallctl(n, (void *)v, &sz, NULL, 0); \ } while (0) -#define CTL_J_GET(n, v, t) do { \ - size_t mib[6]; \ +#define CTL_M2_GET(n, i, v, t) do { \ + size_t mib[CTL_MAX_DEPTH]; \ size_t miblen = sizeof(mib) / sizeof(size_t); \ size_t sz = sizeof(t); \ xmallctlnametomib(n, mib, &miblen); \ - mib[2] = j; \ - xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ + mib[2] = (i); \ + xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ } while (0) -#define CTL_IJ_GET(n, v, t) do { \ - size_t mib[6]; \ +#define CTL_M2_M4_GET(n, i, j, v, t) do { \ + size_t mib[CTL_MAX_DEPTH]; \ size_t miblen = sizeof(mib) / sizeof(size_t); \ size_t sz = sizeof(t); \ xmallctlnametomib(n, mib, &miblen); \ - mib[2] = i; \ - mib[4] = j; \ - xmallctlbymib(mib, miblen, v, &sz, NULL, 0); \ + mib[2] = (i); \ + mib[4] = (j); \ + xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \ } while (0) /******************************************************************************/ /* Data. */ -bool opt_stats_print = false; - -size_t stats_cactive = 0; +bool opt_stats_print = false; +char opt_stats_print_opts[stats_print_tot_num_options+1] = ""; /******************************************************************************/ -/* Function prototypes for non-inline static functions. */ -static void stats_arena_bins_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i); -static void stats_arena_lruns_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i); -static void stats_arena_print(void (*write_cb)(void *, const char *), - void *cbopaque, unsigned i, bool bins, bool large); +/* Calculate x.yyy and output a string (takes a fixed sized char array). */ +static bool +get_rate_str(uint64_t dividend, uint64_t divisor, char str[6]) { + if (divisor == 0 || dividend > divisor) { + /* The rate is not supposed to be greater than 1. */ + return true; + } + if (dividend > 0) { + assert(UINT64_MAX / dividend >= 1000); + } -/******************************************************************************/ + unsigned n = (unsigned)((dividend * 1000) / divisor); + if (n < 10) { + malloc_snprintf(str, 6, "0.00%u", n); + } else if (n < 100) { + malloc_snprintf(str, 6, "0.0%u", n); + } else if (n < 1000) { + malloc_snprintf(str, 6, "0.%u", n); + } else { + malloc_snprintf(str, 6, "1"); + } + + return false; +} + +#define MUTEX_CTL_STR_MAX_LENGTH 128 +static void +gen_mutex_ctl_str(char *str, size_t buf_len, const char *prefix, + const char *mutex, const char *counter) { + malloc_snprintf(str, buf_len, "stats.%s.%s.%s", prefix, mutex, counter); +} + +static void +read_arena_bin_mutex_stats(unsigned arena_ind, unsigned bin_ind, + uint64_t results[mutex_prof_num_counters]) { + char cmd[MUTEX_CTL_STR_MAX_LENGTH]; +#define OP(c, t) \ + gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ + "arenas.0.bins.0","mutex", #c); \ + CTL_M2_M4_GET(cmd, arena_ind, bin_ind, \ + (t *)&results[mutex_counter_##c], t); +MUTEX_PROF_COUNTERS +#undef OP +} + +static void +mutex_stats_output_json(void (*write_cb)(void *, const char *), void *cbopaque, + const char *name, uint64_t stats[mutex_prof_num_counters], + const char *json_indent, bool last) { + malloc_cprintf(write_cb, cbopaque, "%s\"%s\": {\n", json_indent, name); + + mutex_prof_counter_ind_t k = 0; + char *fmt_str[2] = {"%s\t\"%s\": %"FMTu32"%s\n", + "%s\t\"%s\": %"FMTu64"%s\n"}; +#define OP(c, t) \ + malloc_cprintf(write_cb, cbopaque, \ + fmt_str[sizeof(t) / sizeof(uint32_t) - 1], \ + json_indent, #c, (t)stats[mutex_counter_##c], \ + (++k == mutex_prof_num_counters) ? "" : ","); +MUTEX_PROF_COUNTERS +#undef OP + malloc_cprintf(write_cb, cbopaque, "%s}%s\n", json_indent, + last ? "" : ","); +} static void stats_arena_bins_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i) -{ + bool json, bool large, bool mutex, unsigned i) { size_t page; - bool config_tcache; - unsigned nbins, j, gap_start; + bool in_gap, in_gap_prev; + unsigned nbins, j; CTL_GET("arenas.page", &page, size_t); - CTL_GET("config.tcache", &config_tcache, bool); - if (config_tcache) { + CTL_GET("arenas.nbins", &nbins, unsigned); + if (json) { malloc_cprintf(write_cb, cbopaque, - "bins: bin size regs pgs allocated nmalloc" - " ndalloc nrequests nfills nflushes" - " newruns reruns curruns\n"); + "\t\t\t\t\"bins\": [\n"); } else { + char *mutex_counters = " n_lock_ops n_waiting" + " n_spin_acq total_wait_ns max_wait_ns\n"; malloc_cprintf(write_cb, cbopaque, - "bins: bin size regs pgs allocated nmalloc" - " ndalloc newruns reruns curruns\n"); + "bins: size ind allocated nmalloc" + " ndalloc nrequests curregs curslabs regs" + " pgs util nfills nflushes newslabs" + " reslabs%s", mutex ? mutex_counters : "\n"); } - CTL_GET("arenas.nbins", &nbins, unsigned); - for (j = 0, gap_start = UINT_MAX; j < nbins; j++) { - uint64_t nruns; + for (j = 0, in_gap = false; j < nbins; j++) { + uint64_t nslabs; + size_t reg_size, slab_size, curregs; + size_t curslabs; + uint32_t nregs; + uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; + uint64_t nreslabs; - CTL_IJ_GET("stats.arenas.0.bins.0.nruns", &nruns, uint64_t); - if (nruns == 0) { - if (gap_start == UINT_MAX) - gap_start = j; - } else { - size_t reg_size, run_size, allocated; - uint32_t nregs; - uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes; - uint64_t reruns; - size_t curruns; - - if (gap_start != UINT_MAX) { - if (j > gap_start + 1) { - /* Gap of more than one size class. */ - malloc_cprintf(write_cb, cbopaque, - "[%u..%u]\n", gap_start, - j - 1); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nslabs", i, j, &nslabs, + uint64_t); + in_gap_prev = in_gap; + in_gap = (nslabs == 0); + + if (!json && in_gap_prev && !in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } + + CTL_M2_GET("arenas.bin.0.size", j, ®_size, size_t); + CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t); + CTL_M2_GET("arenas.bin.0.slab_size", j, &slab_size, size_t); + + CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs, + size_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j, + &nrequests, uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j, &nfills, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j, &nflushes, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.nreslabs", i, j, &nreslabs, + uint64_t); + CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j, &curslabs, + size_t); + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t{\n" + "\t\t\t\t\t\t\"nmalloc\": %"FMTu64",\n" + "\t\t\t\t\t\t\"ndalloc\": %"FMTu64",\n" + "\t\t\t\t\t\t\"curregs\": %zu,\n" + "\t\t\t\t\t\t\"nrequests\": %"FMTu64",\n" + "\t\t\t\t\t\t\"nfills\": %"FMTu64",\n" + "\t\t\t\t\t\t\"nflushes\": %"FMTu64",\n" + "\t\t\t\t\t\t\"nreslabs\": %"FMTu64",\n" + "\t\t\t\t\t\t\"curslabs\": %zu%s\n", + nmalloc, ndalloc, curregs, nrequests, nfills, + nflushes, nreslabs, curslabs, mutex ? "," : ""); + if (mutex) { + uint64_t mutex_stats[mutex_prof_num_counters]; + read_arena_bin_mutex_stats(i, j, mutex_stats); + mutex_stats_output_json(write_cb, cbopaque, + "mutex", mutex_stats, "\t\t\t\t\t\t", true); + } + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t}%s\n", + (j + 1 < nbins) ? "," : ""); + } else if (!in_gap) { + size_t availregs = nregs * curslabs; + char util[6]; + if (get_rate_str((uint64_t)curregs, (uint64_t)availregs, + util)) { + if (availregs == 0) { + malloc_snprintf(util, sizeof(util), + "1"); + } else if (curregs > availregs) { + /* + * Race detected: the counters were read + * in separate mallctl calls and + * concurrent operations happened in + * between. In this case no meaningful + * utilization can be computed. + */ + malloc_snprintf(util, sizeof(util), + " race"); } else { - /* Gap of one size class. */ - malloc_cprintf(write_cb, cbopaque, - "[%u]\n", gap_start); + not_reached(); } - gap_start = UINT_MAX; } - CTL_J_GET("arenas.bin.0.size", ®_size, size_t); - CTL_J_GET("arenas.bin.0.nregs", &nregs, uint32_t); - CTL_J_GET("arenas.bin.0.run_size", &run_size, size_t); - CTL_IJ_GET("stats.arenas.0.bins.0.allocated", - &allocated, size_t); - CTL_IJ_GET("stats.arenas.0.bins.0.nmalloc", - &nmalloc, uint64_t); - CTL_IJ_GET("stats.arenas.0.bins.0.ndalloc", - &ndalloc, uint64_t); - if (config_tcache) { - CTL_IJ_GET("stats.arenas.0.bins.0.nrequests", - &nrequests, uint64_t); - CTL_IJ_GET("stats.arenas.0.bins.0.nfills", - &nfills, uint64_t); - CTL_IJ_GET("stats.arenas.0.bins.0.nflushes", - &nflushes, uint64_t); + uint64_t mutex_stats[mutex_prof_num_counters]; + if (mutex) { + read_arena_bin_mutex_stats(i, j, mutex_stats); } - CTL_IJ_GET("stats.arenas.0.bins.0.nreruns", &reruns, - uint64_t); - CTL_IJ_GET("stats.arenas.0.bins.0.curruns", &curruns, - size_t); - if (config_tcache) { + + malloc_cprintf(write_cb, cbopaque, "%20zu %3u %12zu %12" + FMTu64" %12"FMTu64" %12"FMTu64" %12zu %12zu %4u" + " %3zu %-5s %12"FMTu64" %12"FMTu64" %12"FMTu64 + " %12"FMTu64, reg_size, j, curregs * reg_size, + nmalloc, ndalloc, nrequests, curregs, curslabs, + nregs, slab_size / page, util, nfills, nflushes, + nslabs, nreslabs); + + /* Output less info for bin mutexes to save space. */ + if (mutex) { malloc_cprintf(write_cb, cbopaque, - "%13u %5zu %4u %3zu %12zu %12"PRIu64 - " %12"PRIu64" %12"PRIu64" %12"PRIu64 - " %12"PRIu64" %12"PRIu64" %12"PRIu64 - " %12zu\n", - j, reg_size, nregs, run_size / page, - allocated, nmalloc, ndalloc, nrequests, - nfills, nflushes, nruns, reruns, curruns); + " %12"FMTu64" %12"FMTu64" %12"FMTu64 + " %14"FMTu64" %12"FMTu64"\n", + mutex_stats[mutex_counter_num_ops], + mutex_stats[mutex_counter_num_wait], + mutex_stats[mutex_counter_num_spin_acq], + mutex_stats[mutex_counter_total_wait_time], + mutex_stats[mutex_counter_max_wait_time]); } else { - malloc_cprintf(write_cb, cbopaque, - "%13u %5zu %4u %3zu %12zu %12"PRIu64 - " %12"PRIu64" %12"PRIu64" %12"PRIu64 - " %12zu\n", - j, reg_size, nregs, run_size / page, - allocated, nmalloc, ndalloc, nruns, reruns, - curruns); + malloc_cprintf(write_cb, cbopaque, "\n"); } } } - if (gap_start != UINT_MAX) { - if (j > gap_start + 1) { - /* Gap of more than one size class. */ - malloc_cprintf(write_cb, cbopaque, "[%u..%u]\n", - gap_start, j - 1); - } else { - /* Gap of one size class. */ - malloc_cprintf(write_cb, cbopaque, "[%u]\n", gap_start); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t]%s\n", large ? "," : ""); + } else { + if (in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); } } } static void -stats_arena_lruns_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i) -{ - size_t page, nlruns, j; - ssize_t gap_start; - - CTL_GET("arenas.page", &page, size_t); +stats_arena_lextents_print(void (*write_cb)(void *, const char *), + void *cbopaque, bool json, unsigned i) { + unsigned nbins, nlextents, j; + bool in_gap, in_gap_prev; - malloc_cprintf(write_cb, cbopaque, - "large: size pages nmalloc ndalloc nrequests" - " curruns\n"); - CTL_GET("arenas.nlruns", &nlruns, size_t); - for (j = 0, gap_start = -1; j < nlruns; j++) { + CTL_GET("arenas.nbins", &nbins, unsigned); + CTL_GET("arenas.nlextents", &nlextents, unsigned); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"lextents\": [\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "large: size ind allocated nmalloc" + " ndalloc nrequests curlextents\n"); + } + for (j = 0, in_gap = false; j < nlextents; j++) { uint64_t nmalloc, ndalloc, nrequests; - size_t run_size, curruns; + size_t lextent_size, curlextents; - CTL_IJ_GET("stats.arenas.0.lruns.0.nmalloc", &nmalloc, - uint64_t); - CTL_IJ_GET("stats.arenas.0.lruns.0.ndalloc", &ndalloc, - uint64_t); - CTL_IJ_GET("stats.arenas.0.lruns.0.nrequests", &nrequests, - uint64_t); - if (nrequests == 0) { - if (gap_start == -1) - gap_start = j; - } else { - CTL_J_GET("arenas.lrun.0.size", &run_size, size_t); - CTL_IJ_GET("stats.arenas.0.lruns.0.curruns", &curruns, - size_t); - if (gap_start != -1) { - malloc_cprintf(write_cb, cbopaque, "[%zu]\n", - j - gap_start); - gap_start = -1; - } + CTL_M2_M4_GET("stats.arenas.0.lextents.0.nmalloc", i, j, + &nmalloc, uint64_t); + CTL_M2_M4_GET("stats.arenas.0.lextents.0.ndalloc", i, j, + &ndalloc, uint64_t); + CTL_M2_M4_GET("stats.arenas.0.lextents.0.nrequests", i, j, + &nrequests, uint64_t); + in_gap_prev = in_gap; + in_gap = (nrequests == 0); + + if (!json && in_gap_prev && !in_gap) { + malloc_cprintf(write_cb, cbopaque, + " ---\n"); + } + + CTL_M2_GET("arenas.lextent.0.size", j, &lextent_size, size_t); + CTL_M2_M4_GET("stats.arenas.0.lextents.0.curlextents", i, j, + &curlextents, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t{\n" + "\t\t\t\t\t\t\"curlextents\": %zu\n" + "\t\t\t\t\t}%s\n", + curlextents, + (j + 1 < nlextents) ? "," : ""); + } else if (!in_gap) { + malloc_cprintf(write_cb, cbopaque, + "%20zu %3u %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64" %12zu\n", + lextent_size, nbins + j, + curlextents * lextent_size, nmalloc, ndalloc, + nrequests, curlextents); + } + } + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t]\n"); + } else { + if (in_gap) { malloc_cprintf(write_cb, cbopaque, - "%13zu %5zu %12"PRIu64" %12"PRIu64" %12"PRIu64 - " %12zu\n", - run_size, run_size / page, nmalloc, ndalloc, - nrequests, curruns); + " ---\n"); + } + } +} + +static void +read_arena_mutex_stats(unsigned arena_ind, + uint64_t results[mutex_prof_num_arena_mutexes][mutex_prof_num_counters]) { + char cmd[MUTEX_CTL_STR_MAX_LENGTH]; + + mutex_prof_arena_ind_t i; + for (i = 0; i < mutex_prof_num_arena_mutexes; i++) { +#define OP(c, t) \ + gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ + "arenas.0.mutexes", arena_mutex_names[i], #c); \ + CTL_M2_GET(cmd, arena_ind, \ + (t *)&results[i][mutex_counter_##c], t); +MUTEX_PROF_COUNTERS +#undef OP + } +} + +static void +mutex_stats_output(void (*write_cb)(void *, const char *), void *cbopaque, + const char *name, uint64_t stats[mutex_prof_num_counters], + bool first_mutex) { + if (first_mutex) { + /* Print title. */ + malloc_cprintf(write_cb, cbopaque, + " n_lock_ops n_waiting" + " n_spin_acq n_owner_switch total_wait_ns" + " max_wait_ns max_n_thds\n"); + } + + malloc_cprintf(write_cb, cbopaque, "%s", name); + malloc_cprintf(write_cb, cbopaque, ":%*c", + (int)(20 - strlen(name)), ' '); + + char *fmt_str[2] = {"%12"FMTu32, "%16"FMTu64}; +#define OP(c, t) \ + malloc_cprintf(write_cb, cbopaque, \ + fmt_str[sizeof(t) / sizeof(uint32_t) - 1], \ + (t)stats[mutex_counter_##c]); +MUTEX_PROF_COUNTERS +#undef OP + malloc_cprintf(write_cb, cbopaque, "\n"); +} + +static void +stats_arena_mutexes_print(void (*write_cb)(void *, const char *), + void *cbopaque, bool json, bool json_end, unsigned arena_ind) { + uint64_t mutex_stats[mutex_prof_num_arena_mutexes][mutex_prof_num_counters]; + read_arena_mutex_stats(arena_ind, mutex_stats); + + /* Output mutex stats. */ + if (json) { + malloc_cprintf(write_cb, cbopaque, "\t\t\t\t\"mutexes\": {\n"); + mutex_prof_arena_ind_t i, last_mutex; + last_mutex = mutex_prof_num_arena_mutexes - 1; + for (i = 0; i < mutex_prof_num_arena_mutexes; i++) { + mutex_stats_output_json(write_cb, cbopaque, + arena_mutex_names[i], mutex_stats[i], + "\t\t\t\t\t", (i == last_mutex)); + } + malloc_cprintf(write_cb, cbopaque, "\t\t\t\t}%s\n", + json_end ? "" : ","); + } else { + mutex_prof_arena_ind_t i; + for (i = 0; i < mutex_prof_num_arena_mutexes; i++) { + mutex_stats_output(write_cb, cbopaque, + arena_mutex_names[i], mutex_stats[i], i == 0); } } - if (gap_start != -1) - malloc_cprintf(write_cb, cbopaque, "[%zu]\n", j - gap_start); } static void stats_arena_print(void (*write_cb)(void *, const char *), void *cbopaque, - unsigned i, bool bins, bool large) -{ + bool json, unsigned i, bool bins, bool large, bool mutex) { unsigned nthreads; const char *dss; - size_t page, pactive, pdirty, mapped; - uint64_t npurge, nmadvise, purged; + ssize_t dirty_decay_ms, muzzy_decay_ms; + size_t page, pactive, pdirty, pmuzzy, mapped, retained; + size_t base, internal, resident; + uint64_t dirty_npurge, dirty_nmadvise, dirty_purged; + uint64_t muzzy_npurge, muzzy_nmadvise, muzzy_purged; size_t small_allocated; uint64_t small_nmalloc, small_ndalloc, small_nrequests; size_t large_allocated; uint64_t large_nmalloc, large_ndalloc, large_nrequests; + size_t tcache_bytes; + uint64_t uptime; CTL_GET("arenas.page", &page, size_t); - CTL_I_GET("stats.arenas.0.nthreads", &nthreads, unsigned); - malloc_cprintf(write_cb, cbopaque, - "assigned threads: %u\n", nthreads); - CTL_I_GET("stats.arenas.0.dss", &dss, const char *); - malloc_cprintf(write_cb, cbopaque, "dss allocation precedence: %s\n", - dss); - CTL_I_GET("stats.arenas.0.pactive", &pactive, size_t); - CTL_I_GET("stats.arenas.0.pdirty", &pdirty, size_t); - CTL_I_GET("stats.arenas.0.npurge", &npurge, uint64_t); - CTL_I_GET("stats.arenas.0.nmadvise", &nmadvise, uint64_t); - CTL_I_GET("stats.arenas.0.purged", &purged, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "dirty pages: %zu:%zu active:dirty, %"PRIu64" sweep%s," - " %"PRIu64" madvise%s, %"PRIu64" purged\n", - pactive, pdirty, npurge, npurge == 1 ? "" : "s", - nmadvise, nmadvise == 1 ? "" : "s", purged); - - malloc_cprintf(write_cb, cbopaque, - " allocated nmalloc ndalloc nrequests\n"); - CTL_I_GET("stats.arenas.0.small.allocated", &small_allocated, size_t); - CTL_I_GET("stats.arenas.0.small.nmalloc", &small_nmalloc, uint64_t); - CTL_I_GET("stats.arenas.0.small.ndalloc", &small_ndalloc, uint64_t); - CTL_I_GET("stats.arenas.0.small.nrequests", &small_nrequests, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "small: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", - small_allocated, small_nmalloc, small_ndalloc, small_nrequests); - CTL_I_GET("stats.arenas.0.large.allocated", &large_allocated, size_t); - CTL_I_GET("stats.arenas.0.large.nmalloc", &large_nmalloc, uint64_t); - CTL_I_GET("stats.arenas.0.large.ndalloc", &large_ndalloc, uint64_t); - CTL_I_GET("stats.arenas.0.large.nrequests", &large_nrequests, uint64_t); - malloc_cprintf(write_cb, cbopaque, - "large: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", - large_allocated, large_nmalloc, large_ndalloc, large_nrequests); - malloc_cprintf(write_cb, cbopaque, - "total: %12zu %12"PRIu64" %12"PRIu64" %12"PRIu64"\n", - small_allocated + large_allocated, - small_nmalloc + large_nmalloc, - small_ndalloc + large_ndalloc, - small_nrequests + large_nrequests); - malloc_cprintf(write_cb, cbopaque, "active: %12zu\n", pactive * page); - CTL_I_GET("stats.arenas.0.mapped", &mapped, size_t); - malloc_cprintf(write_cb, cbopaque, "mapped: %12zu\n", mapped); - - if (bins) - stats_arena_bins_print(write_cb, cbopaque, i); - if (large) - stats_arena_lruns_print(write_cb, cbopaque, i); -} + CTL_M2_GET("stats.arenas.0.nthreads", i, &nthreads, unsigned); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"nthreads\": %u,\n", nthreads); + } else { + malloc_cprintf(write_cb, cbopaque, + "assigned threads: %u\n", nthreads); + } -void -stats_print(void (*write_cb)(void *, const char *), void *cbopaque, - const char *opts) -{ - int err; - uint64_t epoch; - size_t u64sz; - bool general = true; - bool merged = true; - bool unmerged = true; - bool bins = true; - bool large = true; + CTL_M2_GET("stats.arenas.0.uptime", i, &uptime, uint64_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"uptime_ns\": %"FMTu64",\n", uptime); + } else { + malloc_cprintf(write_cb, cbopaque, + "uptime: %"FMTu64"\n", uptime); + } - /* - * Refresh stats, in case mallctl() was called by the application. - * - * Check for OOM here, since refreshing the ctl cache can trigger - * allocation. In practice, none of the subsequent mallctl()-related - * calls in this function will cause OOM if this one succeeds. - * */ - epoch = 1; - u64sz = sizeof(uint64_t); - err = je_mallctl("epoch", &epoch, &u64sz, &epoch, sizeof(uint64_t)); - if (err != 0) { - if (err == EAGAIN) { - malloc_write("<jemalloc>: Memory allocation failure in " - "mallctl(\"epoch\", ...)\n"); - return; + CTL_M2_GET("stats.arenas.0.dss", i, &dss, const char *); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"dss\": \"%s\",\n", dss); + } else { + malloc_cprintf(write_cb, cbopaque, + "dss allocation precedence: %s\n", dss); + } + + CTL_M2_GET("stats.arenas.0.dirty_decay_ms", i, &dirty_decay_ms, + ssize_t); + CTL_M2_GET("stats.arenas.0.muzzy_decay_ms", i, &muzzy_decay_ms, + ssize_t); + CTL_M2_GET("stats.arenas.0.pactive", i, &pactive, size_t); + CTL_M2_GET("stats.arenas.0.pdirty", i, &pdirty, size_t); + CTL_M2_GET("stats.arenas.0.pmuzzy", i, &pmuzzy, size_t); + CTL_M2_GET("stats.arenas.0.dirty_npurge", i, &dirty_npurge, uint64_t); + CTL_M2_GET("stats.arenas.0.dirty_nmadvise", i, &dirty_nmadvise, + uint64_t); + CTL_M2_GET("stats.arenas.0.dirty_purged", i, &dirty_purged, uint64_t); + CTL_M2_GET("stats.arenas.0.muzzy_npurge", i, &muzzy_npurge, uint64_t); + CTL_M2_GET("stats.arenas.0.muzzy_nmadvise", i, &muzzy_nmadvise, + uint64_t); + CTL_M2_GET("stats.arenas.0.muzzy_purged", i, &muzzy_purged, uint64_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"dirty_decay_ms\": %zd,\n", dirty_decay_ms); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"muzzy_decay_ms\": %zd,\n", muzzy_decay_ms); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"pactive\": %zu,\n", pactive); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"pdirty\": %zu,\n", pdirty); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"pmuzzy\": %zu,\n", pmuzzy); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"dirty_npurge\": %"FMTu64",\n", dirty_npurge); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"dirty_nmadvise\": %"FMTu64",\n", dirty_nmadvise); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"dirty_purged\": %"FMTu64",\n", dirty_purged); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"muzzy_npurge\": %"FMTu64",\n", muzzy_npurge); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"muzzy_nmadvise\": %"FMTu64",\n", muzzy_nmadvise); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"muzzy_purged\": %"FMTu64",\n", muzzy_purged); + } else { + malloc_cprintf(write_cb, cbopaque, + "decaying: time npages sweeps madvises" + " purged\n"); + if (dirty_decay_ms >= 0) { + malloc_cprintf(write_cb, cbopaque, + " dirty: %5zd %12zu %12"FMTu64" %12"FMTu64" %12" + FMTu64"\n", dirty_decay_ms, pdirty, dirty_npurge, + dirty_nmadvise, dirty_purged); + } else { + malloc_cprintf(write_cb, cbopaque, + " dirty: N/A %12zu %12"FMTu64" %12"FMTu64" %12" + FMTu64"\n", pdirty, dirty_npurge, dirty_nmadvise, + dirty_purged); + } + if (muzzy_decay_ms >= 0) { + malloc_cprintf(write_cb, cbopaque, + " muzzy: %5zd %12zu %12"FMTu64" %12"FMTu64" %12" + FMTu64"\n", muzzy_decay_ms, pmuzzy, muzzy_npurge, + muzzy_nmadvise, muzzy_purged); + } else { + malloc_cprintf(write_cb, cbopaque, + " muzzy: N/A %12zu %12"FMTu64" %12"FMTu64" %12" + FMTu64"\n", pmuzzy, muzzy_npurge, muzzy_nmadvise, + muzzy_purged); } - malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", " - "...)\n"); - abort(); } - if (opts != NULL) { - unsigned i; + CTL_M2_GET("stats.arenas.0.small.allocated", i, &small_allocated, + size_t); + CTL_M2_GET("stats.arenas.0.small.nmalloc", i, &small_nmalloc, uint64_t); + CTL_M2_GET("stats.arenas.0.small.ndalloc", i, &small_ndalloc, uint64_t); + CTL_M2_GET("stats.arenas.0.small.nrequests", i, &small_nrequests, + uint64_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"small\": {\n"); - for (i = 0; opts[i] != '\0'; i++) { - switch (opts[i]) { - case 'g': - general = false; - break; - case 'm': - merged = false; - break; - case 'a': - unmerged = false; - break; - case 'b': - bins = false; - break; - case 'l': - large = false; - break; - default:; - } - } + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"allocated\": %zu,\n", small_allocated); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", small_nmalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", small_ndalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", small_nrequests); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t},\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + " allocated nmalloc" + " ndalloc nrequests\n"); + malloc_cprintf(write_cb, cbopaque, + "small: %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64"\n", + small_allocated, small_nmalloc, small_ndalloc, + small_nrequests); } - malloc_cprintf(write_cb, cbopaque, - "___ Begin jemalloc statistics ___\n"); - if (general) { - int err; - const char *cpv; - bool bv; - unsigned uv; - ssize_t ssv; - size_t sv, bsz, ssz, sssz, cpsz; - - bsz = sizeof(bool); - ssz = sizeof(size_t); - sssz = sizeof(ssize_t); - cpsz = sizeof(const char *); - - CTL_GET("version", &cpv, const char *); + CTL_M2_GET("stats.arenas.0.large.allocated", i, &large_allocated, + size_t); + CTL_M2_GET("stats.arenas.0.large.nmalloc", i, &large_nmalloc, uint64_t); + CTL_M2_GET("stats.arenas.0.large.ndalloc", i, &large_ndalloc, uint64_t); + CTL_M2_GET("stats.arenas.0.large.nrequests", i, &large_nrequests, + uint64_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"large\": {\n"); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"allocated\": %zu,\n", large_allocated); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nmalloc\": %"FMTu64",\n", large_nmalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"ndalloc\": %"FMTu64",\n", large_ndalloc); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nrequests\": %"FMTu64"\n", large_nrequests); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t},\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "large: %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64"\n", + large_allocated, large_nmalloc, large_ndalloc, + large_nrequests); + malloc_cprintf(write_cb, cbopaque, + "total: %12zu %12"FMTu64" %12"FMTu64 + " %12"FMTu64"\n", + small_allocated + large_allocated, small_nmalloc + + large_nmalloc, small_ndalloc + large_ndalloc, + small_nrequests + large_nrequests); + } + if (!json) { + malloc_cprintf(write_cb, cbopaque, + "active: %12zu\n", pactive * page); + } + + CTL_M2_GET("stats.arenas.0.mapped", i, &mapped, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"mapped\": %zu,\n", mapped); + } else { + malloc_cprintf(write_cb, cbopaque, + "mapped: %12zu\n", mapped); + } + + CTL_M2_GET("stats.arenas.0.retained", i, &retained, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"retained\": %zu,\n", retained); + } else { + malloc_cprintf(write_cb, cbopaque, + "retained: %12zu\n", retained); + } + + CTL_M2_GET("stats.arenas.0.base", i, &base, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"base\": %zu,\n", base); + } else { + malloc_cprintf(write_cb, cbopaque, + "base: %12zu\n", base); + } + + CTL_M2_GET("stats.arenas.0.internal", i, &internal, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"internal\": %zu,\n", internal); + } else { + malloc_cprintf(write_cb, cbopaque, + "internal: %12zu\n", internal); + } + + CTL_M2_GET("stats.arenas.0.tcache_bytes", i, &tcache_bytes, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"tcache\": %zu,\n", tcache_bytes); + } else { + malloc_cprintf(write_cb, cbopaque, + "tcache: %12zu\n", tcache_bytes); + } + + CTL_M2_GET("stats.arenas.0.resident", i, &resident, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"resident\": %zu%s\n", resident, + (bins || large || mutex) ? "," : ""); + } else { + malloc_cprintf(write_cb, cbopaque, + "resident: %12zu\n", resident); + } + + if (mutex) { + stats_arena_mutexes_print(write_cb, cbopaque, json, + !(bins || large), i); + } + if (bins) { + stats_arena_bins_print(write_cb, cbopaque, json, large, mutex, + i); + } + if (large) { + stats_arena_lextents_print(write_cb, cbopaque, json, i); + } +} + +static void +stats_general_print(void (*write_cb)(void *, const char *), void *cbopaque, + bool json, bool more) { + const char *cpv; + bool bv; + unsigned uv; + uint32_t u32v; + uint64_t u64v; + ssize_t ssv; + size_t sv, bsz, usz, ssz, sssz, cpsz; + + bsz = sizeof(bool); + usz = sizeof(unsigned); + ssz = sizeof(size_t); + sssz = sizeof(ssize_t); + cpsz = sizeof(const char *); + + CTL_GET("version", &cpv, const char *); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"version\": \"%s\",\n", cpv); + } else { malloc_cprintf(write_cb, cbopaque, "Version: %s\n", cpv); - CTL_GET("config.debug", &bv, bool); + } + + /* config. */ +#define CONFIG_WRITE_BOOL_JSON(n, c) \ + if (json) { \ + CTL_GET("config."#n, &bv, bool); \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %s%s\n", bv ? "true" : "false", \ + (c)); \ + } + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"config\": {\n"); + } + + CONFIG_WRITE_BOOL_JSON(cache_oblivious, ",") + + CTL_GET("config.debug", &bv, bool); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"debug\": %s,\n", bv ? "true" : "false"); + } else { malloc_cprintf(write_cb, cbopaque, "Assertions %s\n", bv ? "enabled" : "disabled"); + } + + CONFIG_WRITE_BOOL_JSON(fill, ",") + CONFIG_WRITE_BOOL_JSON(lazy_lock, ",") + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"malloc_conf\": \"%s\",\n", + config_malloc_conf); + } else { + malloc_cprintf(write_cb, cbopaque, + "config.malloc_conf: \"%s\"\n", config_malloc_conf); + } + + CONFIG_WRITE_BOOL_JSON(prof, ",") + CONFIG_WRITE_BOOL_JSON(prof_libgcc, ",") + CONFIG_WRITE_BOOL_JSON(prof_libunwind, ",") + CONFIG_WRITE_BOOL_JSON(stats, ",") + CONFIG_WRITE_BOOL_JSON(thp, ",") + CONFIG_WRITE_BOOL_JSON(utrace, ",") + CONFIG_WRITE_BOOL_JSON(xmalloc, "") + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t},\n"); + } +#undef CONFIG_WRITE_BOOL_JSON -#define OPT_WRITE_BOOL(n) \ - if ((err = je_mallctl("opt."#n, &bv, &bsz, NULL, 0)) \ - == 0) { \ + /* opt. */ +#define OPT_WRITE_BOOL(n, c) \ + if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \ + "false", (c)); \ + } else { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %s\n", bv ? "true" : "false"); \ - } -#define OPT_WRITE_SIZE_T(n) \ - if ((err = je_mallctl("opt."#n, &sv, &ssz, NULL, 0)) \ - == 0) { \ + } \ + } +#define OPT_WRITE_BOOL_MUTABLE(n, m, c) { \ + bool bv2; \ + if (je_mallctl("opt."#n, (void *)&bv, &bsz, NULL, 0) == 0 && \ + je_mallctl(#m, (void *)&bv2, &bsz, NULL, 0) == 0) { \ + if (json) { \ malloc_cprintf(write_cb, cbopaque, \ - " opt."#n": %zu\n", sv); \ - } -#define OPT_WRITE_SSIZE_T(n) \ - if ((err = je_mallctl("opt."#n, &ssv, &sssz, NULL, 0)) \ - == 0) { \ + "\t\t\t\""#n"\": %s%s\n", bv ? "true" : \ + "false", (c)); \ + } else { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %s ("#m": %s)\n", bv ? "true" \ + : "false", bv2 ? "true" : "false"); \ + } \ + } \ +} +#define OPT_WRITE_UNSIGNED(n, c) \ + if (je_mallctl("opt."#n, (void *)&uv, &usz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %u%s\n", uv, (c)); \ + } else { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %u\n", uv); \ + } \ + } +#define OPT_WRITE_SSIZE_T(n, c) \ + if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \ + } else { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": %zd\n", ssv); \ - } -#define OPT_WRITE_CHAR_P(n) \ - if ((err = je_mallctl("opt."#n, &cpv, &cpsz, NULL, 0)) \ - == 0) { \ + } \ + } +#define OPT_WRITE_SSIZE_T_MUTABLE(n, m, c) { \ + ssize_t ssv2; \ + if (je_mallctl("opt."#n, (void *)&ssv, &sssz, NULL, 0) == 0 && \ + je_mallctl(#m, (void *)&ssv2, &sssz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": %zd%s\n", ssv, (c)); \ + } else { \ + malloc_cprintf(write_cb, cbopaque, \ + " opt."#n": %zd ("#m": %zd)\n", \ + ssv, ssv2); \ + } \ + } \ +} +#define OPT_WRITE_CHAR_P(n, c) \ + if (je_mallctl("opt."#n, (void *)&cpv, &cpsz, NULL, 0) == 0) { \ + if (json) { \ + malloc_cprintf(write_cb, cbopaque, \ + "\t\t\t\""#n"\": \"%s\"%s\n", cpv, (c)); \ + } else { \ malloc_cprintf(write_cb, cbopaque, \ " opt."#n": \"%s\"\n", cpv); \ - } + } \ + } + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"opt\": {\n"); + } else { malloc_cprintf(write_cb, cbopaque, "Run-time option settings:\n"); - OPT_WRITE_BOOL(abort) - OPT_WRITE_SIZE_T(lg_chunk) - OPT_WRITE_CHAR_P(dss) - OPT_WRITE_SIZE_T(narenas) - OPT_WRITE_SSIZE_T(lg_dirty_mult) - OPT_WRITE_BOOL(stats_print) - OPT_WRITE_BOOL(junk) - OPT_WRITE_SIZE_T(quarantine) - OPT_WRITE_BOOL(redzone) - OPT_WRITE_BOOL(zero) - OPT_WRITE_BOOL(utrace) - OPT_WRITE_BOOL(valgrind) - OPT_WRITE_BOOL(xmalloc) - OPT_WRITE_BOOL(tcache) - OPT_WRITE_SSIZE_T(lg_tcache_max) - OPT_WRITE_BOOL(prof) - OPT_WRITE_CHAR_P(prof_prefix) - OPT_WRITE_BOOL(prof_active) - OPT_WRITE_SSIZE_T(lg_prof_sample) - OPT_WRITE_BOOL(prof_accum) - OPT_WRITE_SSIZE_T(lg_prof_interval) - OPT_WRITE_BOOL(prof_gdump) - OPT_WRITE_BOOL(prof_final) - OPT_WRITE_BOOL(prof_leak) + } + OPT_WRITE_BOOL(abort, ",") + OPT_WRITE_BOOL(abort_conf, ",") + OPT_WRITE_BOOL(retain, ",") + OPT_WRITE_CHAR_P(dss, ",") + OPT_WRITE_UNSIGNED(narenas, ",") + OPT_WRITE_CHAR_P(percpu_arena, ",") + OPT_WRITE_BOOL_MUTABLE(background_thread, background_thread, ",") + OPT_WRITE_SSIZE_T_MUTABLE(dirty_decay_ms, arenas.dirty_decay_ms, ",") + OPT_WRITE_SSIZE_T_MUTABLE(muzzy_decay_ms, arenas.muzzy_decay_ms, ",") + OPT_WRITE_CHAR_P(junk, ",") + OPT_WRITE_BOOL(zero, ",") + OPT_WRITE_BOOL(utrace, ",") + OPT_WRITE_BOOL(xmalloc, ",") + OPT_WRITE_BOOL(tcache, ",") + OPT_WRITE_SSIZE_T(lg_tcache_max, ",") + OPT_WRITE_BOOL(prof, ",") + OPT_WRITE_CHAR_P(prof_prefix, ",") + OPT_WRITE_BOOL_MUTABLE(prof_active, prof.active, ",") + OPT_WRITE_BOOL_MUTABLE(prof_thread_active_init, prof.thread_active_init, + ",") + OPT_WRITE_SSIZE_T_MUTABLE(lg_prof_sample, prof.lg_sample, ",") + OPT_WRITE_BOOL(prof_accum, ",") + OPT_WRITE_SSIZE_T(lg_prof_interval, ",") + OPT_WRITE_BOOL(prof_gdump, ",") + OPT_WRITE_BOOL(prof_final, ",") + OPT_WRITE_BOOL(prof_leak, ",") + OPT_WRITE_BOOL(stats_print, ",") + if (json || opt_stats_print) { + /* + * stats_print_opts is always emitted for JSON, so as long as it + * comes last it's safe to unconditionally omit the comma here + * (rather than having to conditionally omit it elsewhere + * depending on configuration). + */ + OPT_WRITE_CHAR_P(stats_print_opts, "") + } + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t},\n"); + } #undef OPT_WRITE_BOOL -#undef OPT_WRITE_SIZE_T +#undef OPT_WRITE_BOOL_MUTABLE #undef OPT_WRITE_SSIZE_T #undef OPT_WRITE_CHAR_P - malloc_cprintf(write_cb, cbopaque, "CPUs: %u\n", ncpus); + /* arenas. */ + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"arenas\": {\n"); + } - CTL_GET("arenas.narenas", &uv, unsigned); + CTL_GET("arenas.narenas", &uv, unsigned); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"narenas\": %u,\n", uv); + } else { malloc_cprintf(write_cb, cbopaque, "Arenas: %u\n", uv); + } + + if (json) { + CTL_GET("arenas.dirty_decay_ms", &ssv, ssize_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"dirty_decay_ms\": %zd,\n", ssv); - malloc_cprintf(write_cb, cbopaque, "Pointer size: %zu\n", - sizeof(void *)); + CTL_GET("arenas.muzzy_decay_ms", &ssv, ssize_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"muzzy_decay_ms\": %zd,\n", ssv); + } - CTL_GET("arenas.quantum", &sv, size_t); + CTL_GET("arenas.quantum", &sv, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"quantum\": %zu,\n", sv); + } else { malloc_cprintf(write_cb, cbopaque, "Quantum size: %zu\n", sv); + } - CTL_GET("arenas.page", &sv, size_t); + CTL_GET("arenas.page", &sv, size_t); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"page\": %zu,\n", sv); + } else { malloc_cprintf(write_cb, cbopaque, "Page size: %zu\n", sv); + } - CTL_GET("opt.lg_dirty_mult", &ssv, ssize_t); - if (ssv >= 0) { + if (je_mallctl("arenas.tcache_max", (void *)&sv, &ssz, NULL, 0) == 0) { + if (json) { malloc_cprintf(write_cb, cbopaque, - "Min active:dirty page ratio per arena: %u:1\n", - (1U << ssv)); + "\t\t\t\"tcache_max\": %zu,\n", sv); } else { malloc_cprintf(write_cb, cbopaque, - "Min active:dirty page ratio per arena: N/A\n"); + "Maximum thread-cached size class: %zu\n", sv); } - if ((err = je_mallctl("arenas.tcache_max", &sv, &ssz, NULL, 0)) - == 0) { + } + + if (json) { + unsigned nbins, nlextents, i; + + CTL_GET("arenas.nbins", &nbins, unsigned); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"nbins\": %u,\n", nbins); + + CTL_GET("arenas.nhbins", &uv, unsigned); + malloc_cprintf(write_cb, cbopaque, "\t\t\t\"nhbins\": %u,\n", + uv); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"bin\": [\n"); + for (i = 0; i < nbins; i++) { malloc_cprintf(write_cb, cbopaque, - "Maximum thread-cached size class: %zu\n", sv); + "\t\t\t\t{\n"); + + CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"size\": %zu,\n", sv); + + CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"nregs\": %"FMTu32",\n", u32v); + + CTL_M2_GET("arenas.bin.0.slab_size", i, &sv, size_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"slab_size\": %zu\n", sv); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t}%s\n", (i + 1 < nbins) ? "," : ""); } - if ((err = je_mallctl("opt.prof", &bv, &bsz, NULL, 0)) == 0 && - bv) { - CTL_GET("opt.lg_prof_sample", &sv, size_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t],\n"); + + CTL_GET("arenas.nlextents", &nlextents, unsigned); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"nlextents\": %u,\n", nlextents); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"lextent\": [\n"); + for (i = 0; i < nlextents; i++) { malloc_cprintf(write_cb, cbopaque, - "Average profile sample interval: %"PRIu64 - " (2^%zu)\n", (((uint64_t)1U) << sv), sv); + "\t\t\t\t{\n"); - CTL_GET("opt.lg_prof_interval", &ssv, ssize_t); - if (ssv >= 0) { - malloc_cprintf(write_cb, cbopaque, - "Average profile dump interval: %"PRIu64 - " (2^%zd)\n", - (((uint64_t)1U) << ssv), ssv); - } else { - malloc_cprintf(write_cb, cbopaque, - "Average profile dump interval: N/A\n"); + CTL_M2_GET("arenas.lextent.0.size", i, &sv, size_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\t\"size\": %zu\n", sv); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t}%s\n", (i + 1 < nlextents) ? "," : ""); + } + malloc_cprintf(write_cb, cbopaque, + "\t\t\t]\n"); + + malloc_cprintf(write_cb, cbopaque, + "\t\t}%s\n", (config_prof || more) ? "," : ""); + } + + /* prof. */ + if (config_prof && json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"prof\": {\n"); + + CTL_GET("prof.thread_active_init", &bv, bool); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"thread_active_init\": %s,\n", bv ? "true" : + "false"); + + CTL_GET("prof.active", &bv, bool); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"active\": %s,\n", bv ? "true" : "false"); + + CTL_GET("prof.gdump", &bv, bool); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"gdump\": %s,\n", bv ? "true" : "false"); + + CTL_GET("prof.interval", &u64v, uint64_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"interval\": %"FMTu64",\n", u64v); + + CTL_GET("prof.lg_sample", &ssv, ssize_t); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"lg_sample\": %zd\n", ssv); + + malloc_cprintf(write_cb, cbopaque, + "\t\t}%s\n", more ? "," : ""); + } +} + +static void +read_global_mutex_stats( + uint64_t results[mutex_prof_num_global_mutexes][mutex_prof_num_counters]) { + char cmd[MUTEX_CTL_STR_MAX_LENGTH]; + + mutex_prof_global_ind_t i; + for (i = 0; i < mutex_prof_num_global_mutexes; i++) { +#define OP(c, t) \ + gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \ + "mutexes", global_mutex_names[i], #c); \ + CTL_GET(cmd, (t *)&results[i][mutex_counter_##c], t); +MUTEX_PROF_COUNTERS +#undef OP + } +} + +static void +stats_print_helper(void (*write_cb)(void *, const char *), void *cbopaque, + bool json, bool merged, bool destroyed, bool unmerged, bool bins, + bool large, bool mutex) { + size_t allocated, active, metadata, resident, mapped, retained; + size_t num_background_threads; + uint64_t background_thread_num_runs, background_thread_run_interval; + + CTL_GET("stats.allocated", &allocated, size_t); + CTL_GET("stats.active", &active, size_t); + CTL_GET("stats.metadata", &metadata, size_t); + CTL_GET("stats.resident", &resident, size_t); + CTL_GET("stats.mapped", &mapped, size_t); + CTL_GET("stats.retained", &retained, size_t); + + uint64_t mutex_stats[mutex_prof_num_global_mutexes][mutex_prof_num_counters]; + if (mutex) { + read_global_mutex_stats(mutex_stats); + } + + if (have_background_thread) { + CTL_GET("stats.background_thread.num_threads", + &num_background_threads, size_t); + CTL_GET("stats.background_thread.num_runs", + &background_thread_num_runs, uint64_t); + CTL_GET("stats.background_thread.run_interval", + &background_thread_run_interval, uint64_t); + } else { + num_background_threads = 0; + background_thread_num_runs = 0; + background_thread_run_interval = 0; + } + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"stats\": {\n"); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"allocated\": %zu,\n", allocated); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"active\": %zu,\n", active); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"metadata\": %zu,\n", metadata); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"resident\": %zu,\n", resident); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"mapped\": %zu,\n", mapped); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"retained\": %zu,\n", retained); + + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"background_thread\": {\n"); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"num_threads\": %zu,\n", num_background_threads); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"num_runs\": %"FMTu64",\n", + background_thread_num_runs); + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\t\"run_interval\": %"FMTu64"\n", + background_thread_run_interval); + malloc_cprintf(write_cb, cbopaque, "\t\t\t}%s\n", + mutex ? "," : ""); + + if (mutex) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"mutexes\": {\n"); + mutex_prof_global_ind_t i; + for (i = 0; i < mutex_prof_num_global_mutexes; i++) { + mutex_stats_output_json(write_cb, cbopaque, + global_mutex_names[i], mutex_stats[i], + "\t\t\t\t", + i == mutex_prof_num_global_mutexes - 1); + } + malloc_cprintf(write_cb, cbopaque, "\t\t\t}\n"); + } + malloc_cprintf(write_cb, cbopaque, + "\t\t}%s\n", (merged || unmerged || destroyed) ? "," : ""); + } else { + malloc_cprintf(write_cb, cbopaque, + "Allocated: %zu, active: %zu, metadata: %zu," + " resident: %zu, mapped: %zu, retained: %zu\n", + allocated, active, metadata, resident, mapped, retained); + + if (have_background_thread && num_background_threads > 0) { + malloc_cprintf(write_cb, cbopaque, + "Background threads: %zu, num_runs: %"FMTu64", " + "run_interval: %"FMTu64" ns\n", + num_background_threads, + background_thread_num_runs, + background_thread_run_interval); + } + if (mutex) { + mutex_prof_global_ind_t i; + for (i = 0; i < mutex_prof_num_global_mutexes; i++) { + mutex_stats_output(write_cb, cbopaque, + global_mutex_names[i], mutex_stats[i], + i == 0); } } - CTL_GET("opt.lg_chunk", &sv, size_t); - malloc_cprintf(write_cb, cbopaque, "Chunk size: %zu (2^%zu)\n", - (ZU(1) << sv), sv); } - if (config_stats) { - size_t *cactive; - size_t allocated, active, mapped; - size_t chunks_current, chunks_high; - uint64_t chunks_total; - size_t huge_allocated; - uint64_t huge_nmalloc, huge_ndalloc; - - CTL_GET("stats.cactive", &cactive, size_t *); - CTL_GET("stats.allocated", &allocated, size_t); - CTL_GET("stats.active", &active, size_t); - CTL_GET("stats.mapped", &mapped, size_t); - malloc_cprintf(write_cb, cbopaque, - "Allocated: %zu, active: %zu, mapped: %zu\n", - allocated, active, mapped); - malloc_cprintf(write_cb, cbopaque, - "Current active ceiling: %zu\n", atomic_read_z(cactive)); - - /* Print chunk stats. */ - CTL_GET("stats.chunks.total", &chunks_total, uint64_t); - CTL_GET("stats.chunks.high", &chunks_high, size_t); - CTL_GET("stats.chunks.current", &chunks_current, size_t); - malloc_cprintf(write_cb, cbopaque, "chunks: nchunks " - "highchunks curchunks\n"); - malloc_cprintf(write_cb, cbopaque, - " %13"PRIu64" %12zu %12zu\n", - chunks_total, chunks_high, chunks_current); - - /* Print huge stats. */ - CTL_GET("stats.huge.nmalloc", &huge_nmalloc, uint64_t); - CTL_GET("stats.huge.ndalloc", &huge_ndalloc, uint64_t); - CTL_GET("stats.huge.allocated", &huge_allocated, size_t); - malloc_cprintf(write_cb, cbopaque, - "huge: nmalloc ndalloc allocated\n"); - malloc_cprintf(write_cb, cbopaque, - " %12"PRIu64" %12"PRIu64" %12zu\n", - huge_nmalloc, huge_ndalloc, huge_allocated); - - if (merged) { - unsigned narenas; - - CTL_GET("arenas.narenas", &narenas, unsigned); - { - VARIABLE_ARRAY(bool, initialized, narenas); - size_t isz; - unsigned i, ninitialized; - - isz = sizeof(bool) * narenas; - xmallctl("arenas.initialized", initialized, - &isz, NULL, 0); - for (i = ninitialized = 0; i < narenas; i++) { - if (initialized[i]) - ninitialized++; + if (merged || destroyed || unmerged) { + unsigned narenas; + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\"stats.arenas\": {\n"); + } + + CTL_GET("arenas.narenas", &narenas, unsigned); + { + size_t mib[3]; + size_t miblen = sizeof(mib) / sizeof(size_t); + size_t sz; + VARIABLE_ARRAY(bool, initialized, narenas); + bool destroyed_initialized; + unsigned i, j, ninitialized; + + xmallctlnametomib("arena.0.initialized", mib, &miblen); + for (i = ninitialized = 0; i < narenas; i++) { + mib[1] = i; + sz = sizeof(bool); + xmallctlbymib(mib, miblen, &initialized[i], &sz, + NULL, 0); + if (initialized[i]) { + ninitialized++; } + } + mib[1] = MALLCTL_ARENAS_DESTROYED; + sz = sizeof(bool); + xmallctlbymib(mib, miblen, &destroyed_initialized, &sz, + NULL, 0); - if (ninitialized > 1 || unmerged == false) { - /* Print merged arena stats. */ + /* Merged stats. */ + if (merged && (ninitialized > 1 || !unmerged)) { + /* Print merged arena stats. */ + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"merged\": {\n"); + } else { malloc_cprintf(write_cb, cbopaque, "\nMerged arenas stats:\n"); - stats_arena_print(write_cb, cbopaque, - narenas, bins, large); + } + stats_arena_print(write_cb, cbopaque, json, + MALLCTL_ARENAS_ALL, bins, large, mutex); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t}%s\n", + ((destroyed_initialized && + destroyed) || unmerged) ? "," : + ""); } } - } - - if (unmerged) { - unsigned narenas; - /* Print stats for each arena. */ - - CTL_GET("arenas.narenas", &narenas, unsigned); - { - VARIABLE_ARRAY(bool, initialized, narenas); - size_t isz; - unsigned i; - - isz = sizeof(bool) * narenas; - xmallctl("arenas.initialized", initialized, - &isz, NULL, 0); + /* Destroyed stats. */ + if (destroyed_initialized && destroyed) { + /* Print destroyed arena stats. */ + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t\"destroyed\": {\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "\nDestroyed arenas stats:\n"); + } + stats_arena_print(write_cb, cbopaque, json, + MALLCTL_ARENAS_DESTROYED, bins, large, + mutex); + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t\t}%s\n", unmerged ? "," : + ""); + } + } - for (i = 0; i < narenas; i++) { + /* Unmerged stats. */ + if (unmerged) { + for (i = j = 0; i < narenas; i++) { if (initialized[i]) { - malloc_cprintf(write_cb, - cbopaque, - "\narenas[%u]:\n", i); + if (json) { + j++; + malloc_cprintf(write_cb, + cbopaque, + "\t\t\t\"%u\": {\n", + i); + } else { + malloc_cprintf(write_cb, + cbopaque, + "\narenas[%u]:\n", + i); + } stats_arena_print(write_cb, - cbopaque, i, bins, large); + cbopaque, json, i, bins, + large, mutex); + if (json) { + malloc_cprintf(write_cb, + cbopaque, + "\t\t\t}%s\n", (j < + ninitialized) ? "," + : ""); + } } } } } + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t\t}\n"); + } + } +} + +void +stats_print(void (*write_cb)(void *, const char *), void *cbopaque, + const char *opts) { + int err; + uint64_t epoch; + size_t u64sz; +#define OPTION(o, v, d, s) bool v = d; + STATS_PRINT_OPTIONS +#undef OPTION + + /* + * Refresh stats, in case mallctl() was called by the application. + * + * Check for OOM here, since refreshing the ctl cache can trigger + * allocation. In practice, none of the subsequent mallctl()-related + * calls in this function will cause OOM if this one succeeds. + * */ + epoch = 1; + u64sz = sizeof(uint64_t); + err = je_mallctl("epoch", (void *)&epoch, &u64sz, (void *)&epoch, + sizeof(uint64_t)); + if (err != 0) { + if (err == EAGAIN) { + malloc_write("<jemalloc>: Memory allocation failure in " + "mallctl(\"epoch\", ...)\n"); + return; + } + malloc_write("<jemalloc>: Failure in mallctl(\"epoch\", " + "...)\n"); + abort(); + } + + if (opts != NULL) { + for (unsigned i = 0; opts[i] != '\0'; i++) { + switch (opts[i]) { +#define OPTION(o, v, d, s) case o: v = s; break; + STATS_PRINT_OPTIONS +#undef OPTION + default:; + } + } + } + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "{\n" + "\t\"jemalloc\": {\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "___ Begin jemalloc statistics ___\n"); + } + + if (general) { + stats_general_print(write_cb, cbopaque, json, config_stats); + } + if (config_stats) { + stats_print_helper(write_cb, cbopaque, json, merged, destroyed, + unmerged, bins, large, mutex); + } + + if (json) { + malloc_cprintf(write_cb, cbopaque, + "\t}\n" + "}\n"); + } else { + malloc_cprintf(write_cb, cbopaque, + "--- End jemalloc statistics ---\n"); } - malloc_cprintf(write_cb, cbopaque, "--- End jemalloc statistics ---\n"); } diff --git a/dep/jemalloc/src/sz.c b/dep/jemalloc/src/sz.c new file mode 100644 index 00000000000..0986615f711 --- /dev/null +++ b/dep/jemalloc/src/sz.c @@ -0,0 +1,106 @@ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/sz.h" + +JEMALLOC_ALIGNED(CACHELINE) +const size_t sz_pind2sz_tab[NPSIZES+1] = { +#define PSZ_yes(lg_grp, ndelta, lg_delta) \ + (((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))), +#define PSZ_no(lg_grp, ndelta, lg_delta) +#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \ + PSZ_##psz(lg_grp, ndelta, lg_delta) + SIZE_CLASSES +#undef PSZ_yes +#undef PSZ_no +#undef SC + (LARGE_MAXCLASS + PAGE) +}; + +JEMALLOC_ALIGNED(CACHELINE) +const size_t sz_index2size_tab[NSIZES] = { +#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \ + ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)), + SIZE_CLASSES +#undef SC +}; + +JEMALLOC_ALIGNED(CACHELINE) +const uint8_t sz_size2index_tab[] = { +#if LG_TINY_MIN == 0 +#warning "Dangerous LG_TINY_MIN" +#define S2B_0(i) i, +#elif LG_TINY_MIN == 1 +#warning "Dangerous LG_TINY_MIN" +#define S2B_1(i) i, +#elif LG_TINY_MIN == 2 +#warning "Dangerous LG_TINY_MIN" +#define S2B_2(i) i, +#elif LG_TINY_MIN == 3 +#define S2B_3(i) i, +#elif LG_TINY_MIN == 4 +#define S2B_4(i) i, +#elif LG_TINY_MIN == 5 +#define S2B_5(i) i, +#elif LG_TINY_MIN == 6 +#define S2B_6(i) i, +#elif LG_TINY_MIN == 7 +#define S2B_7(i) i, +#elif LG_TINY_MIN == 8 +#define S2B_8(i) i, +#elif LG_TINY_MIN == 9 +#define S2B_9(i) i, +#elif LG_TINY_MIN == 10 +#define S2B_10(i) i, +#elif LG_TINY_MIN == 11 +#define S2B_11(i) i, +#else +#error "Unsupported LG_TINY_MIN" +#endif +#if LG_TINY_MIN < 1 +#define S2B_1(i) S2B_0(i) S2B_0(i) +#endif +#if LG_TINY_MIN < 2 +#define S2B_2(i) S2B_1(i) S2B_1(i) +#endif +#if LG_TINY_MIN < 3 +#define S2B_3(i) S2B_2(i) S2B_2(i) +#endif +#if LG_TINY_MIN < 4 +#define S2B_4(i) S2B_3(i) S2B_3(i) +#endif +#if LG_TINY_MIN < 5 +#define S2B_5(i) S2B_4(i) S2B_4(i) +#endif +#if LG_TINY_MIN < 6 +#define S2B_6(i) S2B_5(i) S2B_5(i) +#endif +#if LG_TINY_MIN < 7 +#define S2B_7(i) S2B_6(i) S2B_6(i) +#endif +#if LG_TINY_MIN < 8 +#define S2B_8(i) S2B_7(i) S2B_7(i) +#endif +#if LG_TINY_MIN < 9 +#define S2B_9(i) S2B_8(i) S2B_8(i) +#endif +#if LG_TINY_MIN < 10 +#define S2B_10(i) S2B_9(i) S2B_9(i) +#endif +#if LG_TINY_MIN < 11 +#define S2B_11(i) S2B_10(i) S2B_10(i) +#endif +#define S2B_no(i) +#define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, lg_delta_lookup) \ + S2B_##lg_delta_lookup(index) + SIZE_CLASSES +#undef S2B_3 +#undef S2B_4 +#undef S2B_5 +#undef S2B_6 +#undef S2B_7 +#undef S2B_8 +#undef S2B_9 +#undef S2B_10 +#undef S2B_11 +#undef S2B_no +#undef SC +}; diff --git a/dep/jemalloc/src/tcache.c b/dep/jemalloc/src/tcache.c index 6de92960b2d..936ef3140d5 100644 --- a/dep/jemalloc/src/tcache.c +++ b/dep/jemalloc/src/tcache.c @@ -1,131 +1,153 @@ -#define JEMALLOC_TCACHE_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_TCACHE_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/size_classes.h" /******************************************************************************/ /* Data. */ -malloc_tsd_data(, tcache, tcache_t *, NULL) -malloc_tsd_data(, tcache_enabled, tcache_enabled_t, tcache_enabled_default) - bool opt_tcache = true; ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; tcache_bin_info_t *tcache_bin_info; static unsigned stack_nelms; /* Total stack elms per tcache. */ -size_t nhbins; +unsigned nhbins; size_t tcache_maxclass; -/******************************************************************************/ +tcaches_t *tcaches; + +/* Index of first element within tcaches that has never been used. */ +static unsigned tcaches_past; + +/* Head of singly linked list tracking available tcaches elements. */ +static tcaches_t *tcaches_avail; -size_t tcache_salloc(const void *ptr) -{ +/* Protects tcaches{,_past,_avail}. */ +static malloc_mutex_t tcaches_mtx; - return (arena_salloc(ptr, false)); +/******************************************************************************/ + +size_t +tcache_salloc(tsdn_t *tsdn, const void *ptr) { + return arena_salloc(tsdn, ptr); } void -tcache_event_hard(tcache_t *tcache) -{ - size_t binind = tcache->next_gc_bin; - tcache_bin_t *tbin = &tcache->tbins[binind]; - tcache_bin_info_t *tbin_info = &tcache_bin_info[binind]; - +tcache_event_hard(tsd_t *tsd, tcache_t *tcache) { + szind_t binind = tcache->next_gc_bin; + + tcache_bin_t *tbin; + if (binind < NBINS) { + tbin = tcache_small_bin_get(tcache, binind); + } else { + tbin = tcache_large_bin_get(tcache, binind); + } if (tbin->low_water > 0) { /* * Flush (ceiling) 3/4 of the objects below the low water mark. */ if (binind < NBINS) { - tcache_bin_flush_small(tbin, binind, tbin->ncached - - tbin->low_water + (tbin->low_water >> 2), tcache); + tcache_bin_flush_small(tsd, tcache, tbin, binind, + tbin->ncached - tbin->low_water + (tbin->low_water + >> 2)); + /* + * Reduce fill count by 2X. Limit lg_fill_div such that + * the fill count is always at least 1. + */ + tcache_bin_info_t *tbin_info = &tcache_bin_info[binind]; + if ((tbin_info->ncached_max >> + (tcache->lg_fill_div[binind] + 1)) >= 1) { + tcache->lg_fill_div[binind]++; + } } else { - tcache_bin_flush_large(tbin, binind, tbin->ncached - - tbin->low_water + (tbin->low_water >> 2), tcache); + tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached + - tbin->low_water + (tbin->low_water >> 2), tcache); } - /* - * Reduce fill count by 2X. Limit lg_fill_div such that the - * fill count is always at least 1. - */ - if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1) - tbin->lg_fill_div++; } else if (tbin->low_water < 0) { /* - * Increase fill count by 2X. Make sure lg_fill_div stays - * greater than 0. + * Increase fill count by 2X for small bins. Make sure + * lg_fill_div stays greater than 0. */ - if (tbin->lg_fill_div > 1) - tbin->lg_fill_div--; + if (binind < NBINS && tcache->lg_fill_div[binind] > 1) { + tcache->lg_fill_div[binind]--; + } } tbin->low_water = tbin->ncached; tcache->next_gc_bin++; - if (tcache->next_gc_bin == nhbins) + if (tcache->next_gc_bin == nhbins) { tcache->next_gc_bin = 0; - tcache->ev_cnt = 0; + } } void * -tcache_alloc_small_hard(tcache_t *tcache, tcache_bin_t *tbin, size_t binind) -{ +tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, + tcache_bin_t *tbin, szind_t binind, bool *tcache_success) { void *ret; - arena_tcache_fill_small(tcache->arena, tbin, binind, + assert(tcache->arena != NULL); + arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind, config_prof ? tcache->prof_accumbytes : 0); - if (config_prof) + if (config_prof) { tcache->prof_accumbytes = 0; - ret = tcache_alloc_easy(tbin); + } + ret = tcache_alloc_easy(tbin, tcache_success); - return (ret); + return ret; } void -tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem, - tcache_t *tcache) -{ - void *ptr; - unsigned i, nflush, ndeferred; +tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, + szind_t binind, unsigned rem) { bool merged_stats = false; assert(binind < NBINS); assert(rem <= tbin->ncached); - for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { - /* Lock the arena bin associated with the first object. */ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( - tbin->avail[0]); - arena_t *arena = chunk->arena; - arena_bin_t *bin = &arena->bins[binind]; + arena_t *arena = tcache->arena; + assert(arena != NULL); + unsigned nflush = tbin->ncached - rem; + VARIABLE_ARRAY(extent_t *, item_extent, nflush); + /* Look up extent once per item. */ + for (unsigned i = 0 ; i < nflush; i++) { + item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i)); + } - if (config_prof && arena == tcache->arena) { - if (arena_prof_accum(arena, tcache->prof_accumbytes)) - prof_idump(); + while (nflush > 0) { + /* Lock the arena bin associated with the first object. */ + extent_t *extent = item_extent[0]; + arena_t *bin_arena = extent_arena_get(extent); + arena_bin_t *bin = &bin_arena->bins[binind]; + + if (config_prof && bin_arena == arena) { + if (arena_prof_accum(tsd_tsdn(tsd), arena, + tcache->prof_accumbytes)) { + prof_idump(tsd_tsdn(tsd)); + } tcache->prof_accumbytes = 0; } - malloc_mutex_lock(&bin->lock); - if (config_stats && arena == tcache->arena) { - assert(merged_stats == false); + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); + if (config_stats && bin_arena == arena) { + assert(!merged_stats); merged_stats = true; bin->stats.nflushes++; bin->stats.nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; } - ndeferred = 0; - for (i = 0; i < nflush; i++) { - ptr = tbin->avail[i]; - assert(ptr != NULL); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk->arena == arena) { - size_t pageind = ((uintptr_t)ptr - - (uintptr_t)chunk) >> LG_PAGE; - arena_chunk_map_t *mapelm = - arena_mapp_get(chunk, pageind); - if (config_fill && opt_junk) { - arena_alloc_junk_small(ptr, - &arena_bin_info[binind], true); - } - arena_dalloc_bin_locked(arena, chunk, ptr, - mapelm); + unsigned ndeferred = 0; + for (unsigned i = 0; i < nflush; i++) { + void *ptr = *(tbin->avail - 1 - i); + extent = item_extent[i]; + assert(ptr != NULL && extent != NULL); + + if (extent_arena_get(extent) == bin_arena) { + arena_dalloc_bin_junked_locked(tsd_tsdn(tsd), + bin_arena, extent, ptr); } else { /* * This object was allocated via a different @@ -133,276 +155,369 @@ tcache_bin_flush_small(tcache_bin_t *tbin, size_t binind, unsigned rem, * locked. Stash the object, so that it can be * handled in a future pass. */ - tbin->avail[ndeferred] = ptr; + *(tbin->avail - 1 - ndeferred) = ptr; + item_extent[ndeferred] = extent; ndeferred++; } } - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); + arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred); + nflush = ndeferred; } - if (config_stats && merged_stats == false) { + if (config_stats && !merged_stats) { /* * The flush loop didn't happen to flush to this thread's * arena, so the stats didn't get merged. Manually do so now. */ - arena_bin_t *bin = &tcache->arena->bins[binind]; - malloc_mutex_lock(&bin->lock); + arena_bin_t *bin = &arena->bins[binind]; + malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); bin->stats.nflushes++; bin->stats.nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); } - memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], - rem * sizeof(void *)); + memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * + sizeof(void *)); tbin->ncached = rem; - if ((int)tbin->ncached < tbin->low_water) + if ((low_water_t)tbin->ncached < tbin->low_water) { tbin->low_water = tbin->ncached; + } } void -tcache_bin_flush_large(tcache_bin_t *tbin, size_t binind, unsigned rem, - tcache_t *tcache) -{ - void *ptr; - unsigned i, nflush, ndeferred; +tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, + unsigned rem, tcache_t *tcache) { bool merged_stats = false; assert(binind < nhbins); assert(rem <= tbin->ncached); - for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { + arena_t *arena = tcache->arena; + assert(arena != NULL); + unsigned nflush = tbin->ncached - rem; + VARIABLE_ARRAY(extent_t *, item_extent, nflush); + /* Look up extent once per item. */ + for (unsigned i = 0 ; i < nflush; i++) { + item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i)); + } + + while (nflush > 0) { /* Lock the arena associated with the first object. */ - arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( - tbin->avail[0]); - arena_t *arena = chunk->arena; + extent_t *extent = item_extent[0]; + arena_t *locked_arena = extent_arena_get(extent); UNUSED bool idump; - if (config_prof) + if (config_prof) { idump = false; - malloc_mutex_lock(&arena->lock); - if ((config_prof || config_stats) && arena == tcache->arena) { + } + + malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx); + for (unsigned i = 0; i < nflush; i++) { + void *ptr = *(tbin->avail - 1 - i); + assert(ptr != NULL); + extent = item_extent[i]; + if (extent_arena_get(extent) == locked_arena) { + large_dalloc_prep_junked_locked(tsd_tsdn(tsd), + extent); + } + } + if ((config_prof || config_stats) && locked_arena == arena) { if (config_prof) { - idump = arena_prof_accum_locked(arena, + idump = arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes); tcache->prof_accumbytes = 0; } if (config_stats) { merged_stats = true; - arena->stats.nrequests_large += - tbin->tstats.nrequests; - arena->stats.lstats[binind - NBINS].nrequests += - tbin->tstats.nrequests; + arena_stats_large_nrequests_add(tsd_tsdn(tsd), + &arena->stats, binind, + tbin->tstats.nrequests); tbin->tstats.nrequests = 0; } } - ndeferred = 0; - for (i = 0; i < nflush; i++) { - ptr = tbin->avail[i]; - assert(ptr != NULL); - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - if (chunk->arena == arena) - arena_dalloc_large_locked(arena, chunk, ptr); - else { + malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx); + + unsigned ndeferred = 0; + for (unsigned i = 0; i < nflush; i++) { + void *ptr = *(tbin->avail - 1 - i); + extent = item_extent[i]; + assert(ptr != NULL && extent != NULL); + + if (extent_arena_get(extent) == locked_arena) { + large_dalloc_finish(tsd_tsdn(tsd), extent); + } else { /* * This object was allocated via a different * arena than the one that is currently locked. * Stash the object, so that it can be handled * in a future pass. */ - tbin->avail[ndeferred] = ptr; + *(tbin->avail - 1 - ndeferred) = ptr; + item_extent[ndeferred] = extent; ndeferred++; } } - malloc_mutex_unlock(&arena->lock); - if (config_prof && idump) - prof_idump(); + if (config_prof && idump) { + prof_idump(tsd_tsdn(tsd)); + } + arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush - + ndeferred); + nflush = ndeferred; } - if (config_stats && merged_stats == false) { + if (config_stats && !merged_stats) { /* * The flush loop didn't happen to flush to this thread's * arena, so the stats didn't get merged. Manually do so now. */ - arena_t *arena = tcache->arena; - malloc_mutex_lock(&arena->lock); - arena->stats.nrequests_large += tbin->tstats.nrequests; - arena->stats.lstats[binind - NBINS].nrequests += - tbin->tstats.nrequests; + arena_stats_large_nrequests_add(tsd_tsdn(tsd), &arena->stats, + binind, tbin->tstats.nrequests); tbin->tstats.nrequests = 0; - malloc_mutex_unlock(&arena->lock); } - memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], - rem * sizeof(void *)); + memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * + sizeof(void *)); tbin->ncached = rem; - if ((int)tbin->ncached < tbin->low_water) + if ((low_water_t)tbin->ncached < tbin->low_water) { tbin->low_water = tbin->ncached; + } } void -tcache_arena_associate(tcache_t *tcache, arena_t *arena) -{ +tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { + assert(tcache->arena == NULL); + tcache->arena = arena; if (config_stats) { /* Link into list of extant tcaches. */ - malloc_mutex_lock(&arena->lock); + malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); ql_elm_new(tcache, link); ql_tail_insert(&arena->tcache_ql, tcache, link); - malloc_mutex_unlock(&arena->lock); + malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); } - tcache->arena = arena; } -void -tcache_arena_dissociate(tcache_t *tcache) -{ - +static void +tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) { + arena_t *arena = tcache->arena; + assert(arena != NULL); if (config_stats) { /* Unlink from list of extant tcaches. */ - malloc_mutex_lock(&tcache->arena->lock); - ql_remove(&tcache->arena->tcache_ql, tcache, link); - tcache_stats_merge(tcache, tcache->arena); - malloc_mutex_unlock(&tcache->arena->lock); + malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); + if (config_debug) { + bool in_ql = false; + tcache_t *iter; + ql_foreach(iter, &arena->tcache_ql, link) { + if (iter == tcache) { + in_ql = true; + break; + } + } + assert(in_ql); + } + ql_remove(&arena->tcache_ql, tcache, link); + tcache_stats_merge(tsdn, tcache, arena); + malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); } + tcache->arena = NULL; } +void +tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { + tcache_arena_dissociate(tsdn, tcache); + tcache_arena_associate(tsdn, tcache, arena); +} + +bool +tsd_tcache_enabled_data_init(tsd_t *tsd) { + /* Called upon tsd initialization. */ + tsd_tcache_enabled_set(tsd, opt_tcache); + tsd_slow_update(tsd); + + if (opt_tcache) { + /* Trigger tcache init. */ + tsd_tcache_data_init(tsd); + } + + return false; +} + +/* Initialize auto tcache (embedded in TSD). */ +static void +tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) { + memset(&tcache->link, 0, sizeof(ql_elm(tcache_t))); + tcache->prof_accumbytes = 0; + tcache->next_gc_bin = 0; + tcache->arena = NULL; + + ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR); + + size_t stack_offset = 0; + assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); + memset(tcache->tbins_small, 0, sizeof(tcache_bin_t) * NBINS); + memset(tcache->tbins_large, 0, sizeof(tcache_bin_t) * (nhbins - NBINS)); + unsigned i = 0; + for (; i < NBINS; i++) { + tcache->lg_fill_div[i] = 1; + stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); + /* + * avail points past the available space. Allocations will + * access the slots toward higher addresses (for the benefit of + * prefetch). + */ + tcache_small_bin_get(tcache, i)->avail = + (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset); + } + for (; i < nhbins; i++) { + stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); + tcache_large_bin_get(tcache, i)->avail = + (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset); + } + assert(stack_offset == stack_nelms * sizeof(void *)); +} + +/* Initialize auto tcache (embedded in TSD). */ +bool +tsd_tcache_data_init(tsd_t *tsd) { + tcache_t *tcache = tsd_tcachep_get_unsafe(tsd); + assert(tcache_small_bin_get(tcache, 0)->avail == NULL); + size_t size = stack_nelms * sizeof(void *); + /* Avoid false cacheline sharing. */ + size = sz_sa2u(size, CACHELINE); + + void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, + NULL, true, arena_get(TSDN_NULL, 0, true)); + if (avail_array == NULL) { + return true; + } + + tcache_init(tsd, tcache, avail_array); + /* + * Initialization is a bit tricky here. After malloc init is done, all + * threads can rely on arena_choose and associate tcache accordingly. + * However, the thread that does actual malloc bootstrapping relies on + * functional tsd, and it can only rely on a0. In that case, we + * associate its tcache to a0 temporarily, and later on + * arena_choose_hard() will re-associate properly. + */ + tcache->arena = NULL; + arena_t *arena; + if (!malloc_initialized()) { + /* If in initialization, assign to a0. */ + arena = arena_get(tsd_tsdn(tsd), 0, false); + tcache_arena_associate(tsd_tsdn(tsd), tcache, arena); + } else { + arena = arena_choose(tsd, NULL); + /* This may happen if thread.tcache.enabled is used. */ + if (tcache->arena == NULL) { + tcache_arena_associate(tsd_tsdn(tsd), tcache, arena); + } + } + assert(arena == tcache->arena); + + return false; +} + +/* Created manual tcache for tcache.create mallctl. */ tcache_t * -tcache_create(arena_t *arena) -{ +tcache_create_explicit(tsd_t *tsd) { tcache_t *tcache; size_t size, stack_offset; - unsigned i; - size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins); + size = sizeof(tcache_t); /* Naturally align the pointer stacks. */ size = PTR_CEILING(size); stack_offset = size; size += stack_nelms * sizeof(void *); - /* - * Round up to the nearest multiple of the cacheline size, in order to - * avoid the possibility of false cacheline sharing. - * - * That this works relies on the same logic as in ipalloc(), but we - * cannot directly call ipalloc() here due to tcache bootstrapping - * issues. - */ - size = (size + CACHELINE_MASK) & (-CACHELINE); - - if (size <= SMALL_MAXCLASS) - tcache = (tcache_t *)arena_malloc_small(arena, size, true); - else if (size <= tcache_maxclass) - tcache = (tcache_t *)arena_malloc_large(arena, size, true); - else - tcache = (tcache_t *)icalloct(size, false, arena); - - if (tcache == NULL) - return (NULL); - - tcache_arena_associate(tcache, arena); + /* Avoid false cacheline sharing. */ + size = sz_sa2u(size, CACHELINE); - assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); - for (i = 0; i < nhbins; i++) { - tcache->tbins[i].lg_fill_div = 1; - tcache->tbins[i].avail = (void **)((uintptr_t)tcache + - (uintptr_t)stack_offset); - stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); + tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true, + arena_get(TSDN_NULL, 0, true)); + if (tcache == NULL) { + return NULL; } - tcache_tsd_set(&tcache); + tcache_init(tsd, tcache, + (void *)((uintptr_t)tcache + (uintptr_t)stack_offset)); + tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL)); - return (tcache); + return tcache; } -void -tcache_destroy(tcache_t *tcache) -{ - unsigned i; - size_t tcache_size; +static void +tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) { + assert(tcache->arena != NULL); - tcache_arena_dissociate(tcache); + for (unsigned i = 0; i < NBINS; i++) { + tcache_bin_t *tbin = tcache_small_bin_get(tcache, i); + tcache_bin_flush_small(tsd, tcache, tbin, i, 0); - for (i = 0; i < NBINS; i++) { - tcache_bin_t *tbin = &tcache->tbins[i]; - tcache_bin_flush_small(tbin, i, 0, tcache); - - if (config_stats && tbin->tstats.nrequests != 0) { - arena_t *arena = tcache->arena; - arena_bin_t *bin = &arena->bins[i]; - malloc_mutex_lock(&bin->lock); - bin->stats.nrequests += tbin->tstats.nrequests; - malloc_mutex_unlock(&bin->lock); + if (config_stats) { + assert(tbin->tstats.nrequests == 0); } } + for (unsigned i = NBINS; i < nhbins; i++) { + tcache_bin_t *tbin = tcache_large_bin_get(tcache, i); + tcache_bin_flush_large(tsd, tbin, i, 0, tcache); - for (; i < nhbins; i++) { - tcache_bin_t *tbin = &tcache->tbins[i]; - tcache_bin_flush_large(tbin, i, 0, tcache); - - if (config_stats && tbin->tstats.nrequests != 0) { - arena_t *arena = tcache->arena; - malloc_mutex_lock(&arena->lock); - arena->stats.nrequests_large += tbin->tstats.nrequests; - arena->stats.lstats[i - NBINS].nrequests += - tbin->tstats.nrequests; - malloc_mutex_unlock(&arena->lock); + if (config_stats) { + assert(tbin->tstats.nrequests == 0); } } if (config_prof && tcache->prof_accumbytes > 0 && - arena_prof_accum(tcache->arena, tcache->prof_accumbytes)) - prof_idump(); - - tcache_size = arena_salloc(tcache, false); - if (tcache_size <= SMALL_MAXCLASS) { - arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache); - arena_t *arena = chunk->arena; - size_t pageind = ((uintptr_t)tcache - (uintptr_t)chunk) >> - LG_PAGE; - arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); - - arena_dalloc_bin(arena, chunk, tcache, pageind, mapelm); - } else if (tcache_size <= tcache_maxclass) { - arena_chunk_t *chunk = CHUNK_ADDR2BASE(tcache); - arena_t *arena = chunk->arena; - - arena_dalloc_large(arena, chunk, tcache); - } else - idalloct(tcache, false); + arena_prof_accum(tsd_tsdn(tsd), tcache->arena, + tcache->prof_accumbytes)) { + prof_idump(tsd_tsdn(tsd)); + } } void -tcache_thread_cleanup(void *arg) -{ - tcache_t *tcache = *(tcache_t **)arg; +tcache_flush(tsd_t *tsd) { + assert(tcache_available(tsd)); + tcache_flush_cache(tsd, tsd_tcachep_get(tsd)); +} - if (tcache == TCACHE_STATE_DISABLED) { - /* Do nothing. */ - } else if (tcache == TCACHE_STATE_REINCARNATED) { - /* - * Another destructor called an allocator function after this - * destructor was called. Reset tcache to - * TCACHE_STATE_PURGATORY in order to receive another callback. - */ - tcache = TCACHE_STATE_PURGATORY; - tcache_tsd_set(&tcache); - } else if (tcache == TCACHE_STATE_PURGATORY) { - /* - * The previous time this destructor was called, we set the key - * to TCACHE_STATE_PURGATORY so that other destructors wouldn't - * cause re-creation of the tcache. This time, do nothing, so - * that the destructor will not be called again. - */ - } else if (tcache != NULL) { - assert(tcache != TCACHE_STATE_PURGATORY); - tcache_destroy(tcache); - tcache = TCACHE_STATE_PURGATORY; - tcache_tsd_set(&tcache); +static void +tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) { + tcache_flush_cache(tsd, tcache); + tcache_arena_dissociate(tsd_tsdn(tsd), tcache); + + if (tsd_tcache) { + /* Release the avail array for the TSD embedded auto tcache. */ + void *avail_array = + (void *)((uintptr_t)tcache_small_bin_get(tcache, 0)->avail - + (uintptr_t)tcache_bin_info[0].ncached_max * sizeof(void *)); + idalloctm(tsd_tsdn(tsd), avail_array, NULL, NULL, true, true); + } else { + /* Release both the tcache struct and avail array. */ + idalloctm(tsd_tsdn(tsd), tcache, NULL, NULL, true, true); } } -/* Caller must own arena->lock. */ +/* For auto tcache (embedded in TSD) only. */ void -tcache_stats_merge(tcache_t *tcache, arena_t *arena) -{ +tcache_cleanup(tsd_t *tsd) { + tcache_t *tcache = tsd_tcachep_get(tsd); + if (!tcache_available(tsd)) { + assert(tsd_tcache_enabled_get(tsd) == false); + if (config_debug) { + assert(tcache_small_bin_get(tcache, 0)->avail == NULL); + } + return; + } + assert(tsd_tcache_enabled_get(tsd)); + assert(tcache_small_bin_get(tcache, 0)->avail != NULL); + + tcache_destroy(tsd, tcache, true); + if (config_debug) { + tcache_small_bin_get(tcache, 0)->avail = NULL; + } +} + +void +tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { unsigned i; cassert(config_stats); @@ -410,48 +525,151 @@ tcache_stats_merge(tcache_t *tcache, arena_t *arena) /* Merge and reset tcache stats. */ for (i = 0; i < NBINS; i++) { arena_bin_t *bin = &arena->bins[i]; - tcache_bin_t *tbin = &tcache->tbins[i]; - malloc_mutex_lock(&bin->lock); + tcache_bin_t *tbin = tcache_small_bin_get(tcache, i); + malloc_mutex_lock(tsdn, &bin->lock); bin->stats.nrequests += tbin->tstats.nrequests; - malloc_mutex_unlock(&bin->lock); + malloc_mutex_unlock(tsdn, &bin->lock); tbin->tstats.nrequests = 0; } for (; i < nhbins; i++) { - malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS]; - tcache_bin_t *tbin = &tcache->tbins[i]; - arena->stats.nrequests_large += tbin->tstats.nrequests; - lstats->nrequests += tbin->tstats.nrequests; + tcache_bin_t *tbin = tcache_large_bin_get(tcache, i); + arena_stats_large_nrequests_add(tsdn, &arena->stats, i, + tbin->tstats.nrequests); tbin->tstats.nrequests = 0; } } +static bool +tcaches_create_prep(tsd_t *tsd) { + bool err; + + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); + + if (tcaches == NULL) { + tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *) + * (MALLOCX_TCACHE_MAX+1), CACHELINE); + if (tcaches == NULL) { + err = true; + goto label_return; + } + } + + if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) { + err = true; + goto label_return; + } + + err = false; +label_return: + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); + return err; +} + bool -tcache_boot0(void) -{ - unsigned i; +tcaches_create(tsd_t *tsd, unsigned *r_ind) { + witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0); - /* - * If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is - * known. - */ - if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS) + bool err; + + if (tcaches_create_prep(tsd)) { + err = true; + goto label_return; + } + + tcache_t *tcache = tcache_create_explicit(tsd); + if (tcache == NULL) { + err = true; + goto label_return; + } + + tcaches_t *elm; + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); + if (tcaches_avail != NULL) { + elm = tcaches_avail; + tcaches_avail = tcaches_avail->next; + elm->tcache = tcache; + *r_ind = (unsigned)(elm - tcaches); + } else { + elm = &tcaches[tcaches_past]; + elm->tcache = tcache; + *r_ind = tcaches_past; + tcaches_past++; + } + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); + + err = false; +label_return: + witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0); + return err; +} + +static tcache_t * +tcaches_elm_remove(tsd_t *tsd, tcaches_t *elm) { + malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx); + + if (elm->tcache == NULL) { + return NULL; + } + tcache_t *tcache = elm->tcache; + elm->tcache = NULL; + return tcache; +} + +void +tcaches_flush(tsd_t *tsd, unsigned ind) { + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); + tcache_t *tcache = tcaches_elm_remove(tsd, &tcaches[ind]); + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); + if (tcache != NULL) { + tcache_destroy(tsd, tcache, false); + } +} + +void +tcaches_destroy(tsd_t *tsd, unsigned ind) { + malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); + tcaches_t *elm = &tcaches[ind]; + tcache_t *tcache = tcaches_elm_remove(tsd, elm); + elm->next = tcaches_avail; + tcaches_avail = elm; + malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); + if (tcache != NULL) { + tcache_destroy(tsd, tcache, false); + } +} + +bool +tcache_boot(tsdn_t *tsdn) { + /* If necessary, clamp opt_lg_tcache_max. */ + if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < + SMALL_MAXCLASS) { tcache_maxclass = SMALL_MAXCLASS; - else if ((1U << opt_lg_tcache_max) > arena_maxclass) - tcache_maxclass = arena_maxclass; - else - tcache_maxclass = (1U << opt_lg_tcache_max); + } else { + tcache_maxclass = (ZU(1) << opt_lg_tcache_max); + } - nhbins = NBINS + (tcache_maxclass >> LG_PAGE); + if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES, + malloc_mutex_rank_exclusive)) { + return true; + } + + nhbins = sz_size2index(tcache_maxclass) + 1; /* Initialize tcache_bin_info. */ - tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins * - sizeof(tcache_bin_info_t)); - if (tcache_bin_info == NULL) - return (true); + tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins + * sizeof(tcache_bin_info_t), CACHELINE); + if (tcache_bin_info == NULL) { + return true; + } stack_nelms = 0; + unsigned i; for (i = 0; i < NBINS; i++) { - if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) { + if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { + tcache_bin_info[i].ncached_max = + TCACHE_NSLOTS_SMALL_MIN; + } else if ((arena_bin_info[i].nregs << 1) <= + TCACHE_NSLOTS_SMALL_MAX) { tcache_bin_info[i].ncached_max = (arena_bin_info[i].nregs << 1); } else { @@ -465,15 +683,26 @@ tcache_boot0(void) stack_nelms += tcache_bin_info[i].ncached_max; } - return (false); + return false; } -bool -tcache_boot1(void) -{ +void +tcache_prefork(tsdn_t *tsdn) { + if (!config_prof && opt_tcache) { + malloc_mutex_prefork(tsdn, &tcaches_mtx); + } +} - if (tcache_tsd_boot() || tcache_enabled_tsd_boot()) - return (true); +void +tcache_postfork_parent(tsdn_t *tsdn) { + if (!config_prof && opt_tcache) { + malloc_mutex_postfork_parent(tsdn, &tcaches_mtx); + } +} - return (false); +void +tcache_postfork_child(tsdn_t *tsdn) { + if (!config_prof && opt_tcache) { + malloc_mutex_postfork_child(tsdn, &tcaches_mtx); + } } diff --git a/dep/jemalloc/src/ticker.c b/dep/jemalloc/src/ticker.c new file mode 100644 index 00000000000..d7b8cd26c06 --- /dev/null +++ b/dep/jemalloc/src/ticker.c @@ -0,0 +1,3 @@ +#define JEMALLOC_TICKER_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" diff --git a/dep/jemalloc/src/tsd.c b/dep/jemalloc/src/tsd.c index 700caabfe47..f968992f2b5 100644 --- a/dep/jemalloc/src/tsd.c +++ b/dep/jemalloc/src/tsd.c @@ -1,5 +1,10 @@ -#define JEMALLOC_TSD_C_ -#include "jemalloc/internal/jemalloc_internal.h" +#define JEMALLOC_TSD_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/mutex.h" +#include "jemalloc/internal/rtree.h" /******************************************************************************/ /* Data. */ @@ -7,28 +12,148 @@ static unsigned ncleanups; static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; +#ifdef JEMALLOC_MALLOC_THREAD_CLEANUP +__thread tsd_t JEMALLOC_TLS_MODEL tsd_tls = TSD_INITIALIZER; +__thread bool JEMALLOC_TLS_MODEL tsd_initialized = false; +bool tsd_booted = false; +#elif (defined(JEMALLOC_TLS)) +__thread tsd_t JEMALLOC_TLS_MODEL tsd_tls = TSD_INITIALIZER; +pthread_key_t tsd_tsd; +bool tsd_booted = false; +#elif (defined(_WIN32)) +DWORD tsd_tsd; +tsd_wrapper_t tsd_boot_wrapper = {false, TSD_INITIALIZER}; +bool tsd_booted = false; +#else + +/* + * This contains a mutex, but it's pretty convenient to allow the mutex code to + * have a dependency on tsd. So we define the struct here, and only refer to it + * by pointer in the header. + */ +struct tsd_init_head_s { + ql_head(tsd_init_block_t) blocks; + malloc_mutex_t lock; +}; + +pthread_key_t tsd_tsd; +tsd_init_head_t tsd_init_head = { + ql_head_initializer(blocks), + MALLOC_MUTEX_INITIALIZER +}; +tsd_wrapper_t tsd_boot_wrapper = { + false, + TSD_INITIALIZER +}; +bool tsd_booted = false; +#endif + + /******************************************************************************/ -void * -malloc_tsd_malloc(size_t size) -{ +void +tsd_slow_update(tsd_t *tsd) { + if (tsd_nominal(tsd)) { + if (malloc_slow || !tsd_tcache_enabled_get(tsd) || + tsd_reentrancy_level_get(tsd) > 0) { + tsd->state = tsd_state_nominal_slow; + } else { + tsd->state = tsd_state_nominal; + } + } +} + +static bool +tsd_data_init(tsd_t *tsd) { + /* + * We initialize the rtree context first (before the tcache), since the + * tcache initialization depends on it. + */ + rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd)); - /* Avoid choose_arena() in order to dodge bootstrapping issues. */ - return (arena_malloc(arenas[0], size, false, false)); + return tsd_tcache_enabled_data_init(tsd); } -void -malloc_tsd_dalloc(void *wrapper) -{ +static void +assert_tsd_data_cleanup_done(tsd_t *tsd) { + assert(!tsd_nominal(tsd)); + assert(*tsd_arenap_get_unsafe(tsd) == NULL); + assert(*tsd_iarenap_get_unsafe(tsd) == NULL); + assert(*tsd_arenas_tdata_bypassp_get_unsafe(tsd) == true); + assert(*tsd_arenas_tdatap_get_unsafe(tsd) == NULL); + assert(*tsd_tcache_enabledp_get_unsafe(tsd) == false); + assert(*tsd_prof_tdatap_get_unsafe(tsd) == NULL); +} + +static bool +tsd_data_init_nocleanup(tsd_t *tsd) { + assert(tsd->state == tsd_state_reincarnated || + tsd->state == tsd_state_minimal_initialized); + /* + * During reincarnation, there is no guarantee that the cleanup function + * will be called (deallocation may happen after all tsd destructors). + * We set up tsd in a way that no cleanup is needed. + */ + rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd)); + *tsd_arenas_tdata_bypassp_get(tsd) = true; + *tsd_tcache_enabledp_get_unsafe(tsd) = false; + *tsd_reentrancy_levelp_get(tsd) = 1; + assert_tsd_data_cleanup_done(tsd); - idalloct(wrapper, false); + return false; } -void -malloc_tsd_no_cleanup(void *arg) -{ +tsd_t * +tsd_fetch_slow(tsd_t *tsd, bool minimal) { + assert(!tsd_fast(tsd)); + + if (tsd->state == tsd_state_nominal_slow) { + /* On slow path but no work needed. */ + assert(malloc_slow || !tsd_tcache_enabled_get(tsd) || + tsd_reentrancy_level_get(tsd) > 0 || + *tsd_arenas_tdata_bypassp_get(tsd)); + } else if (tsd->state == tsd_state_uninitialized) { + if (!minimal) { + tsd->state = tsd_state_nominal; + tsd_slow_update(tsd); + /* Trigger cleanup handler registration. */ + tsd_set(tsd); + tsd_data_init(tsd); + } else { + tsd->state = tsd_state_minimal_initialized; + tsd_set(tsd); + tsd_data_init_nocleanup(tsd); + } + } else if (tsd->state == tsd_state_minimal_initialized) { + if (!minimal) { + /* Switch to fully initialized. */ + tsd->state = tsd_state_nominal; + assert(*tsd_reentrancy_levelp_get(tsd) >= 1); + (*tsd_reentrancy_levelp_get(tsd))--; + tsd_slow_update(tsd); + tsd_data_init(tsd); + } else { + assert_tsd_data_cleanup_done(tsd); + } + } else if (tsd->state == tsd_state_purgatory) { + tsd->state = tsd_state_reincarnated; + tsd_set(tsd); + tsd_data_init_nocleanup(tsd); + } else { + assert(tsd->state == tsd_state_reincarnated); + } + + return tsd; +} + +void * +malloc_tsd_malloc(size_t size) { + return a0malloc(CACHELINE_CEILING(size)); +} - not_reached(); +void +malloc_tsd_dalloc(void *wrapper) { + a0dalloc(wrapper); } #if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) @@ -36,21 +161,22 @@ malloc_tsd_no_cleanup(void *arg) JEMALLOC_EXPORT #endif void -_malloc_thread_cleanup(void) -{ +_malloc_thread_cleanup(void) { bool pending[MALLOC_TSD_CLEANUPS_MAX], again; unsigned i; - for (i = 0; i < ncleanups; i++) + for (i = 0; i < ncleanups; i++) { pending[i] = true; + } do { again = false; for (i = 0; i < ncleanups; i++) { if (pending[i]) { pending[i] = cleanups[i](); - if (pending[i]) + if (pending[i]) { again = true; + } } } } while (again); @@ -58,26 +184,92 @@ _malloc_thread_cleanup(void) #endif void -malloc_tsd_cleanup_register(bool (*f)(void)) -{ - +malloc_tsd_cleanup_register(bool (*f)(void)) { assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX); cleanups[ncleanups] = f; ncleanups++; } +static void +tsd_do_data_cleanup(tsd_t *tsd) { + prof_tdata_cleanup(tsd); + iarena_cleanup(tsd); + arena_cleanup(tsd); + arenas_tdata_cleanup(tsd); + tcache_cleanup(tsd); + witnesses_cleanup(tsd_witness_tsdp_get_unsafe(tsd)); +} + void -malloc_tsd_boot(void) -{ +tsd_cleanup(void *arg) { + tsd_t *tsd = (tsd_t *)arg; + + switch (tsd->state) { + case tsd_state_uninitialized: + /* Do nothing. */ + break; + case tsd_state_minimal_initialized: + /* This implies the thread only did free() in its life time. */ + /* Fall through. */ + case tsd_state_reincarnated: + /* + * Reincarnated means another destructor deallocated memory + * after the destructor was called. Cleanup isn't required but + * is still called for testing and completeness. + */ + assert_tsd_data_cleanup_done(tsd); + /* Fall through. */ + case tsd_state_nominal: + case tsd_state_nominal_slow: + tsd_do_data_cleanup(tsd); + tsd->state = tsd_state_purgatory; + tsd_set(tsd); + break; + case tsd_state_purgatory: + /* + * The previous time this destructor was called, we set the + * state to tsd_state_purgatory so that other destructors + * wouldn't cause re-creation of the tsd. This time, do + * nothing, and do not request another callback. + */ + break; + default: + not_reached(); + } +#ifdef JEMALLOC_JET + test_callback_t test_callback = *tsd_test_callbackp_get_unsafe(tsd); + int *data = tsd_test_datap_get_unsafe(tsd); + if (test_callback != NULL) { + test_callback(data); + } +#endif +} + +tsd_t * +malloc_tsd_boot0(void) { + tsd_t *tsd; ncleanups = 0; + if (tsd_boot0()) { + return NULL; + } + tsd = tsd_fetch(); + *tsd_arenas_tdata_bypassp_get(tsd) = true; + return tsd; +} + +void +malloc_tsd_boot1(void) { + tsd_boot1(); + tsd_t *tsd = tsd_fetch(); + /* malloc_slow has been set properly. Update tsd_slow. */ + tsd_slow_update(tsd); + *tsd_arenas_tdata_bypassp_get(tsd) = false; } #ifdef _WIN32 static BOOL WINAPI -_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) -{ - +_tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) { switch (fdwReason) { #ifdef JEMALLOC_LAZY_LOCK case DLL_THREAD_ATTACH: @@ -90,52 +282,60 @@ _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) default: break; } - return (true); + return true; } +/* + * We need to be able to say "read" here (in the "pragma section"), but have + * hooked "read". We won't read for the rest of the file, so we can get away + * with unhooking. + */ +#ifdef read +# undef read +#endif + #ifdef _MSC_VER # ifdef _M_IX86 # pragma comment(linker, "/INCLUDE:__tls_used") +# pragma comment(linker, "/INCLUDE:_tls_callback") # else # pragma comment(linker, "/INCLUDE:_tls_used") +# pragma comment(linker, "/INCLUDE:tls_callback") # endif # pragma section(".CRT$XLY",long,read) #endif JEMALLOC_SECTION(".CRT$XLY") JEMALLOC_ATTR(used) -static const BOOL (WINAPI *tls_callback)(HINSTANCE hinstDLL, +BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) = _tls_callback; #endif #if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ !defined(_WIN32)) void * -tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) -{ +tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) { pthread_t self = pthread_self(); tsd_init_block_t *iter; /* Check whether this thread has already inserted into the list. */ - malloc_mutex_lock(&head->lock); + malloc_mutex_lock(TSDN_NULL, &head->lock); ql_foreach(iter, &head->blocks, link) { if (iter->thread == self) { - malloc_mutex_unlock(&head->lock); - return (iter->data); + malloc_mutex_unlock(TSDN_NULL, &head->lock); + return iter->data; } } /* Insert block into list. */ ql_elm_new(block, link); block->thread = self; ql_tail_insert(&head->blocks, block, link); - malloc_mutex_unlock(&head->lock); - return (NULL); + malloc_mutex_unlock(TSDN_NULL, &head->lock); + return NULL; } void -tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) -{ - - malloc_mutex_lock(&head->lock); +tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) { + malloc_mutex_lock(TSDN_NULL, &head->lock); ql_remove(&head->blocks, block, link); - malloc_mutex_unlock(&head->lock); + malloc_mutex_unlock(TSDN_NULL, &head->lock); } #endif diff --git a/dep/jemalloc/src/witness.c b/dep/jemalloc/src/witness.c new file mode 100644 index 00000000000..f42b72ad1a2 --- /dev/null +++ b/dep/jemalloc/src/witness.c @@ -0,0 +1,100 @@ +#define JEMALLOC_WITNESS_C_ +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" +#include "jemalloc/internal/malloc_io.h" + +void +witness_init(witness_t *witness, const char *name, witness_rank_t rank, + witness_comp_t *comp, void *opaque) { + witness->name = name; + witness->rank = rank; + witness->comp = comp; + witness->opaque = opaque; +} + +static void +witness_lock_error_impl(const witness_list_t *witnesses, + const witness_t *witness) { + witness_t *w; + + malloc_printf("<jemalloc>: Lock rank order reversal:"); + ql_foreach(w, witnesses, link) { + malloc_printf(" %s(%u)", w->name, w->rank); + } + malloc_printf(" %s(%u)\n", witness->name, witness->rank); + abort(); +} +witness_lock_error_t *JET_MUTABLE witness_lock_error = witness_lock_error_impl; + +static void +witness_owner_error_impl(const witness_t *witness) { + malloc_printf("<jemalloc>: Should own %s(%u)\n", witness->name, + witness->rank); + abort(); +} +witness_owner_error_t *JET_MUTABLE witness_owner_error = + witness_owner_error_impl; + +static void +witness_not_owner_error_impl(const witness_t *witness) { + malloc_printf("<jemalloc>: Should not own %s(%u)\n", witness->name, + witness->rank); + abort(); +} +witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error = + witness_not_owner_error_impl; + +static void +witness_depth_error_impl(const witness_list_t *witnesses, + witness_rank_t rank_inclusive, unsigned depth) { + witness_t *w; + + malloc_printf("<jemalloc>: Should own %u lock%s of rank >= %u:", depth, + (depth != 1) ? "s" : "", rank_inclusive); + ql_foreach(w, witnesses, link) { + malloc_printf(" %s(%u)", w->name, w->rank); + } + malloc_printf("\n"); + abort(); +} +witness_depth_error_t *JET_MUTABLE witness_depth_error = + witness_depth_error_impl; + +void +witnesses_cleanup(witness_tsd_t *witness_tsd) { + witness_assert_lockless(witness_tsd_tsdn(witness_tsd)); + + /* Do nothing. */ +} + +void +witness_prefork(witness_tsd_t *witness_tsd) { + if (!config_debug) { + return; + } + witness_tsd->forking = true; +} + +void +witness_postfork_parent(witness_tsd_t *witness_tsd) { + if (!config_debug) { + return; + } + witness_tsd->forking = false; +} + +void +witness_postfork_child(witness_tsd_t *witness_tsd) { + if (!config_debug) { + return; + } +#ifndef JEMALLOC_MUTEX_INIT_CB + witness_list_t *witnesses; + + witnesses = &witness_tsd->witnesses; + ql_new(witnesses); +#endif + witness_tsd->forking = false; +} diff --git a/dep/jemalloc/src/zone.c b/dep/jemalloc/src/zone.c index e0302ef4edc..9d3b7b49522 100644 --- a/dep/jemalloc/src/zone.c +++ b/dep/jemalloc/src/zone.c @@ -1,10 +1,83 @@ -#include "jemalloc/internal/jemalloc_internal.h" +#include "jemalloc/internal/jemalloc_preamble.h" +#include "jemalloc/internal/jemalloc_internal_includes.h" + +#include "jemalloc/internal/assert.h" + #ifndef JEMALLOC_ZONE # error "This source file is for zones on Darwin (OS X)." #endif +/* Definitions of the following structs in malloc/malloc.h might be too old + * for the built binary to run on newer versions of OSX. So use the newest + * possible version of those structs. + */ +typedef struct _malloc_zone_t { + void *reserved1; + void *reserved2; + size_t (*size)(struct _malloc_zone_t *, const void *); + void *(*malloc)(struct _malloc_zone_t *, size_t); + void *(*calloc)(struct _malloc_zone_t *, size_t, size_t); + void *(*valloc)(struct _malloc_zone_t *, size_t); + void (*free)(struct _malloc_zone_t *, void *); + void *(*realloc)(struct _malloc_zone_t *, void *, size_t); + void (*destroy)(struct _malloc_zone_t *); + const char *zone_name; + unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned); + void (*batch_free)(struct _malloc_zone_t *, void **, unsigned); + struct malloc_introspection_t *introspect; + unsigned version; + void *(*memalign)(struct _malloc_zone_t *, size_t, size_t); + void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t); + size_t (*pressure_relief)(struct _malloc_zone_t *, size_t); +} malloc_zone_t; + +typedef struct { + vm_address_t address; + vm_size_t size; +} vm_range_t; + +typedef struct malloc_statistics_t { + unsigned blocks_in_use; + size_t size_in_use; + size_t max_size_in_use; + size_t size_allocated; +} malloc_statistics_t; + +typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **); + +typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned); + +typedef struct malloc_introspection_t { + kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t); + size_t (*good_size)(malloc_zone_t *, size_t); + boolean_t (*check)(malloc_zone_t *); + void (*print)(malloc_zone_t *, boolean_t); + void (*log)(malloc_zone_t *, void *); + void (*force_lock)(malloc_zone_t *); + void (*force_unlock)(malloc_zone_t *); + void (*statistics)(malloc_zone_t *, malloc_statistics_t *); + boolean_t (*zone_locked)(malloc_zone_t *); + boolean_t (*enable_discharge_checking)(malloc_zone_t *); + boolean_t (*disable_discharge_checking)(malloc_zone_t *); + void (*discharge)(malloc_zone_t *, void *); +#ifdef __BLOCKS__ + void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *)); +#else + void *enumerate_unavailable_without_blocks; +#endif + void (*reinit_lock)(malloc_zone_t *); +} malloc_introspection_t; + +extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *); + +extern malloc_zone_t *malloc_default_zone(void); + +extern void malloc_zone_register(malloc_zone_t *zone); + +extern void malloc_zone_unregister(malloc_zone_t *zone); + /* - * The malloc_default_purgeable_zone function is only available on >= 10.6. + * The malloc_default_purgeable_zone() function is only available on >= 10.6. * We need to check whether it is present at runtime, thus the weak_import. */ extern malloc_zone_t *malloc_default_purgeable_zone(void) @@ -13,30 +86,42 @@ JEMALLOC_ATTR(weak_import); /******************************************************************************/ /* Data. */ -static malloc_zone_t zone; -static struct malloc_introspection_t zone_introspect; +static malloc_zone_t *default_zone, *purgeable_zone; +static malloc_zone_t jemalloc_zone; +static struct malloc_introspection_t jemalloc_zone_introspect; /******************************************************************************/ /* Function prototypes for non-inline static functions. */ -static size_t zone_size(malloc_zone_t *zone, void *ptr); +static size_t zone_size(malloc_zone_t *zone, const void *ptr); static void *zone_malloc(malloc_zone_t *zone, size_t size); static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size); static void *zone_valloc(malloc_zone_t *zone, size_t size); static void zone_free(malloc_zone_t *zone, void *ptr); static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size); -#if (JEMALLOC_ZONE_VERSION >= 5) static void *zone_memalign(malloc_zone_t *zone, size_t alignment, -#endif -#if (JEMALLOC_ZONE_VERSION >= 6) size_t size); static void zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size); -#endif -static void *zone_destroy(malloc_zone_t *zone); +static void zone_destroy(malloc_zone_t *zone); +static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, + void **results, unsigned num_requested); +static void zone_batch_free(struct _malloc_zone_t *zone, + void **to_be_freed, unsigned num_to_be_freed); +static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal); static size_t zone_good_size(malloc_zone_t *zone, size_t size); +static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask, + vm_address_t zone_address, memory_reader_t reader, + vm_range_recorder_t recorder); +static boolean_t zone_check(malloc_zone_t *zone); +static void zone_print(malloc_zone_t *zone, boolean_t verbose); +static void zone_log(malloc_zone_t *zone, void *address); static void zone_force_lock(malloc_zone_t *zone); static void zone_force_unlock(malloc_zone_t *zone); +static void zone_statistics(malloc_zone_t *zone, + malloc_statistics_t *stats); +static boolean_t zone_locked(malloc_zone_t *zone); +static void zone_reinit_lock(malloc_zone_t *zone); /******************************************************************************/ /* @@ -44,9 +129,7 @@ static void zone_force_unlock(malloc_zone_t *zone); */ static size_t -zone_size(malloc_zone_t *zone, void *ptr) -{ - +zone_size(malloc_zone_t *zone, const void *ptr) { /* * There appear to be places within Darwin (such as setenv(3)) that * cause calls to this function with pointers that *no* zone owns. If @@ -54,40 +137,33 @@ zone_size(malloc_zone_t *zone, void *ptr) * our zone into two parts, and use one as the default allocator and * the other as the default deallocator/reallocator. Since that will * not work in practice, we must check all pointers to assure that they - * reside within a mapped chunk before determining size. + * reside within a mapped extent before determining size. */ - return (ivsalloc(ptr, config_prof)); + return ivsalloc(tsdn_fetch(), ptr); } static void * -zone_malloc(malloc_zone_t *zone, size_t size) -{ - - return (je_malloc(size)); +zone_malloc(malloc_zone_t *zone, size_t size) { + return je_malloc(size); } static void * -zone_calloc(malloc_zone_t *zone, size_t num, size_t size) -{ - - return (je_calloc(num, size)); +zone_calloc(malloc_zone_t *zone, size_t num, size_t size) { + return je_calloc(num, size); } static void * -zone_valloc(malloc_zone_t *zone, size_t size) -{ +zone_valloc(malloc_zone_t *zone, size_t size) { void *ret = NULL; /* Assignment avoids useless compiler warning. */ je_posix_memalign(&ret, PAGE, size); - return (ret); + return ret; } static void -zone_free(malloc_zone_t *zone, void *ptr) -{ - - if (ivsalloc(ptr, config_prof) != 0) { +zone_free(malloc_zone_t *zone, void *ptr) { + if (ivsalloc(tsdn_fetch(), ptr) != 0) { je_free(ptr); return; } @@ -96,163 +172,280 @@ zone_free(malloc_zone_t *zone, void *ptr) } static void * -zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) -{ - - if (ivsalloc(ptr, config_prof) != 0) - return (je_realloc(ptr, size)); +zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) { + if (ivsalloc(tsdn_fetch(), ptr) != 0) { + return je_realloc(ptr, size); + } - return (realloc(ptr, size)); + return realloc(ptr, size); } -#if (JEMALLOC_ZONE_VERSION >= 5) static void * -zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) -{ +zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) { void *ret = NULL; /* Assignment avoids useless compiler warning. */ je_posix_memalign(&ret, alignment, size); - return (ret); + return ret; } -#endif -#if (JEMALLOC_ZONE_VERSION >= 6) static void -zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) -{ +zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) { + size_t alloc_size; - if (ivsalloc(ptr, config_prof) != 0) { - assert(ivsalloc(ptr, config_prof) == size); + alloc_size = ivsalloc(tsdn_fetch(), ptr); + if (alloc_size != 0) { + assert(alloc_size == size); je_free(ptr); return; } free(ptr); } -#endif - -static void * -zone_destroy(malloc_zone_t *zone) -{ +static void +zone_destroy(malloc_zone_t *zone) { /* This function should never be called. */ not_reached(); - return (NULL); +} + +static unsigned +zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results, + unsigned num_requested) { + unsigned i; + + for (i = 0; i < num_requested; i++) { + results[i] = je_malloc(size); + if (!results[i]) + break; + } + + return i; +} + +static void +zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed, + unsigned num_to_be_freed) { + unsigned i; + + for (i = 0; i < num_to_be_freed; i++) { + zone_free(zone, to_be_freed[i]); + to_be_freed[i] = NULL; + } } static size_t -zone_good_size(malloc_zone_t *zone, size_t size) -{ +zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) { + return 0; +} - if (size == 0) +static size_t +zone_good_size(malloc_zone_t *zone, size_t size) { + if (size == 0) { size = 1; - return (s2u(size)); + } + return sz_s2u(size); +} + +static kern_return_t +zone_enumerator(task_t task, void *data, unsigned type_mask, + vm_address_t zone_address, memory_reader_t reader, + vm_range_recorder_t recorder) { + return KERN_SUCCESS; +} + +static boolean_t +zone_check(malloc_zone_t *zone) { + return true; +} + +static void +zone_print(malloc_zone_t *zone, boolean_t verbose) { } static void -zone_force_lock(malloc_zone_t *zone) -{ +zone_log(malloc_zone_t *zone, void *address) { +} - if (isthreaded) +static void +zone_force_lock(malloc_zone_t *zone) { + if (isthreaded) { jemalloc_prefork(); + } +} + +static void +zone_force_unlock(malloc_zone_t *zone) { + /* + * Call jemalloc_postfork_child() rather than + * jemalloc_postfork_parent(), because this function is executed by both + * parent and child. The parent can tolerate having state + * reinitialized, but the child cannot unlock mutexes that were locked + * by the parent. + */ + if (isthreaded) { + jemalloc_postfork_child(); + } +} + +static void +zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) { + /* We make no effort to actually fill the values */ + stats->blocks_in_use = 0; + stats->size_in_use = 0; + stats->max_size_in_use = 0; + stats->size_allocated = 0; +} + +static boolean_t +zone_locked(malloc_zone_t *zone) { + /* Pretend no lock is being held */ + return false; +} + +static void +zone_reinit_lock(malloc_zone_t *zone) { + /* As of OSX 10.12, this function is only used when force_unlock would + * be used if the zone version were < 9. So just use force_unlock. */ + zone_force_unlock(zone); +} + +static void +zone_init(void) { + jemalloc_zone.size = zone_size; + jemalloc_zone.malloc = zone_malloc; + jemalloc_zone.calloc = zone_calloc; + jemalloc_zone.valloc = zone_valloc; + jemalloc_zone.free = zone_free; + jemalloc_zone.realloc = zone_realloc; + jemalloc_zone.destroy = zone_destroy; + jemalloc_zone.zone_name = "jemalloc_zone"; + jemalloc_zone.batch_malloc = zone_batch_malloc; + jemalloc_zone.batch_free = zone_batch_free; + jemalloc_zone.introspect = &jemalloc_zone_introspect; + jemalloc_zone.version = 9; + jemalloc_zone.memalign = zone_memalign; + jemalloc_zone.free_definite_size = zone_free_definite_size; + jemalloc_zone.pressure_relief = zone_pressure_relief; + + jemalloc_zone_introspect.enumerator = zone_enumerator; + jemalloc_zone_introspect.good_size = zone_good_size; + jemalloc_zone_introspect.check = zone_check; + jemalloc_zone_introspect.print = zone_print; + jemalloc_zone_introspect.log = zone_log; + jemalloc_zone_introspect.force_lock = zone_force_lock; + jemalloc_zone_introspect.force_unlock = zone_force_unlock; + jemalloc_zone_introspect.statistics = zone_statistics; + jemalloc_zone_introspect.zone_locked = zone_locked; + jemalloc_zone_introspect.enable_discharge_checking = NULL; + jemalloc_zone_introspect.disable_discharge_checking = NULL; + jemalloc_zone_introspect.discharge = NULL; +#ifdef __BLOCKS__ + jemalloc_zone_introspect.enumerate_discharged_pointers = NULL; +#else + jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL; +#endif + jemalloc_zone_introspect.reinit_lock = zone_reinit_lock; +} + +static malloc_zone_t * +zone_default_get(void) { + malloc_zone_t **zones = NULL; + unsigned int num_zones = 0; + + /* + * On OSX 10.12, malloc_default_zone returns a special zone that is not + * present in the list of registered zones. That zone uses a "lite zone" + * if one is present (apparently enabled when malloc stack logging is + * enabled), or the first registered zone otherwise. In practice this + * means unless malloc stack logging is enabled, the first registered + * zone is the default. So get the list of zones to get the first one, + * instead of relying on malloc_default_zone. + */ + if (KERN_SUCCESS != malloc_get_all_zones(0, NULL, + (vm_address_t**)&zones, &num_zones)) { + /* + * Reset the value in case the failure happened after it was + * set. + */ + num_zones = 0; + } + + if (num_zones) { + return zones[0]; + } + + return malloc_default_zone(); } +/* As written, this function can only promote jemalloc_zone. */ static void -zone_force_unlock(malloc_zone_t *zone) -{ +zone_promote(void) { + malloc_zone_t *zone; + + do { + /* + * Unregister and reregister the default zone. On OSX >= 10.6, + * unregistering takes the last registered zone and places it + * at the location of the specified zone. Unregistering the + * default zone thus makes the last registered one the default. + * On OSX < 10.6, unregistering shifts all registered zones. + * The first registered zone then becomes the default. + */ + malloc_zone_unregister(default_zone); + malloc_zone_register(default_zone); - if (isthreaded) - jemalloc_postfork_parent(); + /* + * On OSX 10.6, having the default purgeable zone appear before + * the default zone makes some things crash because it thinks it + * owns the default zone allocated pointers. We thus + * unregister/re-register it in order to ensure it's always + * after the default zone. On OSX < 10.6, there is no purgeable + * zone, so this does nothing. On OSX >= 10.6, unregistering + * replaces the purgeable zone with the last registered zone + * above, i.e. the default zone. Registering it again then puts + * it at the end, obviously after the default zone. + */ + if (purgeable_zone != NULL) { + malloc_zone_unregister(purgeable_zone); + malloc_zone_register(purgeable_zone); + } + + zone = zone_default_get(); + } while (zone != &jemalloc_zone); } JEMALLOC_ATTR(constructor) void -register_zone(void) -{ - +zone_register(void) { /* * If something else replaced the system default zone allocator, don't * register jemalloc's. */ - malloc_zone_t *default_zone = malloc_default_zone(); - if (!default_zone->zone_name || - strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) { + default_zone = zone_default_get(); + if (!default_zone->zone_name || strcmp(default_zone->zone_name, + "DefaultMallocZone") != 0) { return; } - zone.size = (void *)zone_size; - zone.malloc = (void *)zone_malloc; - zone.calloc = (void *)zone_calloc; - zone.valloc = (void *)zone_valloc; - zone.free = (void *)zone_free; - zone.realloc = (void *)zone_realloc; - zone.destroy = (void *)zone_destroy; - zone.zone_name = "jemalloc_zone"; - zone.batch_malloc = NULL; - zone.batch_free = NULL; - zone.introspect = &zone_introspect; - zone.version = JEMALLOC_ZONE_VERSION; -#if (JEMALLOC_ZONE_VERSION >= 5) - zone.memalign = zone_memalign; -#endif -#if (JEMALLOC_ZONE_VERSION >= 6) - zone.free_definite_size = zone_free_definite_size; -#endif -#if (JEMALLOC_ZONE_VERSION >= 8) - zone.pressure_relief = NULL; -#endif - - zone_introspect.enumerator = NULL; - zone_introspect.good_size = (void *)zone_good_size; - zone_introspect.check = NULL; - zone_introspect.print = NULL; - zone_introspect.log = NULL; - zone_introspect.force_lock = (void *)zone_force_lock; - zone_introspect.force_unlock = (void *)zone_force_unlock; - zone_introspect.statistics = NULL; -#if (JEMALLOC_ZONE_VERSION >= 6) - zone_introspect.zone_locked = NULL; -#endif -#if (JEMALLOC_ZONE_VERSION >= 7) - zone_introspect.enable_discharge_checking = NULL; - zone_introspect.disable_discharge_checking = NULL; - zone_introspect.discharge = NULL; -#ifdef __BLOCKS__ - zone_introspect.enumerate_discharged_pointers = NULL; -#else - zone_introspect.enumerate_unavailable_without_blocks = NULL; -#endif -#endif - /* * The default purgeable zone is created lazily by OSX's libc. It uses * the default zone when it is created for "small" allocations * (< 15 KiB), but assumes the default zone is a scalable_zone. This * obviously fails when the default zone is the jemalloc zone, so - * malloc_default_purgeable_zone is called beforehand so that the + * malloc_default_purgeable_zone() is called beforehand so that the * default purgeable zone is created when the default zone is still * a scalable_zone. As purgeable zones only exist on >= 10.6, we need * to check for the existence of malloc_default_purgeable_zone() at * run time. */ - if (malloc_default_purgeable_zone != NULL) - malloc_default_purgeable_zone(); + purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL : + malloc_default_purgeable_zone(); /* Register the custom zone. At this point it won't be the default. */ - malloc_zone_register(&zone); + zone_init(); + malloc_zone_register(&jemalloc_zone); - /* - * Unregister and reregister the default zone. On OSX >= 10.6, - * unregistering takes the last registered zone and places it at the - * location of the specified zone. Unregistering the default zone thus - * makes the last registered one the default. On OSX < 10.6, - * unregistering shifts all registered zones. The first registered zone - * then becomes the default. - */ - do { - default_zone = malloc_default_zone(); - malloc_zone_unregister(default_zone); - malloc_zone_register(default_zone); - } while (malloc_default_zone() != &zone); + /* Promote the custom zone to be default. */ + zone_promote(); } diff --git a/dep/recastnavigation/Detour/CMakeLists.txt b/dep/recastnavigation/Detour/CMakeLists.txt index 0fed4afc7ec..078e39f0a80 100644 --- a/dep/recastnavigation/Detour/CMakeLists.txt +++ b/dep/recastnavigation/Detour/CMakeLists.txt @@ -10,6 +10,7 @@ set(Detour_STAT_SRCS Source/DetourAlloc.cpp + Source/DetourAssert.cpp Source/DetourCommon.cpp Source/DetourNavMesh.cpp Source/DetourNavMeshBuilder.cpp diff --git a/dep/recastnavigation/Detour/Include/DetourAssert.h b/dep/recastnavigation/Detour/Include/DetourAssert.h index 3cf652288fa..e05fd66fa57 100644 --- a/dep/recastnavigation/Detour/Include/DetourAssert.h +++ b/dep/recastnavigation/Detour/Include/DetourAssert.h @@ -23,11 +23,34 @@ // Feel free to change the file and include your own implementation instead. #ifdef NDEBUG + // From http://cnicholson.net/2009/02/stupid-c-tricks-adventures-in-assert/ # define dtAssert(x) do { (void)sizeof(x); } while((void)(__LINE__==-1),false) + #else + +/// An assertion failure function. +// @param[in] expression asserted expression. +// @param[in] file Filename of the failed assertion. +// @param[in] line Line number of the failed assertion. +/// @see dtAssertFailSetCustom +typedef void (dtAssertFailFunc)(const char* expression, const char* file, int line); + +/// Sets the base custom assertion failure function to be used by Detour. +/// @param[in] assertFailFunc The function to be invoked in case of failure of #dtAssert +void dtAssertFailSetCustom(dtAssertFailFunc *assertFailFunc); + +/// Gets the base custom assertion failure function to be used by Detour. +dtAssertFailFunc* dtAssertFailGetCustom(); + # include <assert.h> -# define dtAssert assert +# define dtAssert(expression) \ + { \ + dtAssertFailFunc* failFunc = dtAssertFailGetCustom(); \ + if(failFunc == NULL) { assert(expression); } \ + else if(!(expression)) { (*failFunc)(#expression, __FILE__, __LINE__); } \ + } + #endif #endif // DETOURASSERT_H diff --git a/dep/recastnavigation/Detour/Include/DetourNavMesh.h b/dep/recastnavigation/Detour/Include/DetourNavMesh.h index f50f705a2c5..5f00804354f 100644 --- a/dep/recastnavigation/Detour/Include/DetourNavMesh.h +++ b/dep/recastnavigation/Detour/Include/DetourNavMesh.h @@ -647,7 +647,7 @@ private: dtPolyRef* polys, const int maxPolys) const; /// Find nearest polygon within a tile. dtPolyRef findNearestPolyInTile(const dtMeshTile* tile, const float* center, - const float* extents, float* nearestPt) const; + const float* halfExtents, float* nearestPt) const; /// Returns closest point on polygon. void closestPointOnPoly(dtPolyRef ref, const float* pos, float* closest, bool* posOverPoly) const; diff --git a/dep/recastnavigation/Detour/Include/DetourNavMeshQuery.h b/dep/recastnavigation/Detour/Include/DetourNavMeshQuery.h index 61541e83dfe..1c23e4857b3 100644 --- a/dep/recastnavigation/Detour/Include/DetourNavMeshQuery.h +++ b/dep/recastnavigation/Detour/Include/DetourNavMeshQuery.h @@ -316,33 +316,33 @@ public: /// Finds the polygon nearest to the specified center point. /// @param[in] center The center of the search box. [(x, y, z)] - /// @param[in] extents The search distance along each axis. [(x, y, z)] + /// @param[in] halfExtents The search distance along each axis. [(x, y, z)] /// @param[in] filter The polygon filter to apply to the query. /// @param[out] nearestRef The reference id of the nearest polygon. /// @param[out] nearestPt The nearest point on the polygon. [opt] [(x, y, z)] /// @returns The status flags for the query. - dtStatus findNearestPoly(const float* center, const float* extents, + dtStatus findNearestPoly(const float* center, const float* halfExtents, const dtQueryFilter* filter, dtPolyRef* nearestRef, float* nearestPt) const; /// Finds polygons that overlap the search box. /// @param[in] center The center of the search box. [(x, y, z)] - /// @param[in] extents The search distance along each axis. [(x, y, z)] + /// @param[in] halfExtents The search distance along each axis. [(x, y, z)] /// @param[in] filter The polygon filter to apply to the query. /// @param[out] polys The reference ids of the polygons that overlap the query box. /// @param[out] polyCount The number of polygons in the search result. /// @param[in] maxPolys The maximum number of polygons the search result can hold. /// @returns The status flags for the query. - dtStatus queryPolygons(const float* center, const float* extents, + dtStatus queryPolygons(const float* center, const float* halfExtents, const dtQueryFilter* filter, dtPolyRef* polys, int* polyCount, const int maxPolys) const; /// Finds polygons that overlap the search box. /// @param[in] center The center of the search box. [(x, y, z)] - /// @param[in] extents The search distance along each axis. [(x, y, z)] + /// @param[in] halfExtents The search distance along each axis. [(x, y, z)] /// @param[in] filter The polygon filter to apply to the query. /// @param[in] query The query. Polygons found will be batched together and passed to this query. - dtStatus queryPolygons(const float* center, const float* extents, + dtStatus queryPolygons(const float* center, const float* halfExtents, const dtQueryFilter* filter, dtPolyQuery* query) const; /// Finds the non-overlapping navigation polygons in the local neighbourhood around the center position. diff --git a/dep/recastnavigation/Detour/Source/DetourAssert.cpp b/dep/recastnavigation/Detour/Source/DetourAssert.cpp new file mode 100644 index 00000000000..5e019e0cfc5 --- /dev/null +++ b/dep/recastnavigation/Detour/Source/DetourAssert.cpp @@ -0,0 +1,35 @@ +// +// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. +// + +#include "DetourAssert.h" + +#ifndef NDEBUG + +static dtAssertFailFunc* sAssertFailFunc = 0; + +void dtAssertFailSetCustom(dtAssertFailFunc *assertFailFunc) +{ + sAssertFailFunc = assertFailFunc; +} + +dtAssertFailFunc* dtAssertFailGetCustom() +{ + return sAssertFailFunc; +} + +#endif diff --git a/dep/recastnavigation/Detour/Source/DetourCommon.cpp b/dep/recastnavigation/Detour/Source/DetourCommon.cpp index 26fe65c1781..41d0d7bd387 100644 --- a/dep/recastnavigation/Detour/Source/DetourCommon.cpp +++ b/dep/recastnavigation/Detour/Source/DetourCommon.cpp @@ -342,8 +342,8 @@ void dtRandomPointInConvexPoly(const float* pts, const int npts, float* areas, // Find sub triangle weighted by area. const float thr = s*areasum; float acc = 0.0f; - float u = 0.0f; - int tri = 0; + float u = 1.0f; + int tri = npts - 1; for (int i = 2; i < npts; i++) { const float dacc = areas[i]; if (thr >= acc && thr < (acc+dacc)) diff --git a/dep/recastnavigation/Detour/Source/DetourNavMesh.cpp b/dep/recastnavigation/Detour/Source/DetourNavMesh.cpp index f70fa04729a..b81a2567b2e 100644 --- a/dep/recastnavigation/Detour/Source/DetourNavMesh.cpp +++ b/dep/recastnavigation/Detour/Source/DetourNavMesh.cpp @@ -470,12 +470,12 @@ void dtNavMesh::connectExtOffMeshLinks(dtMeshTile* tile, dtMeshTile* target, int if (targetPoly->firstLink == DT_NULL_LINK) continue; - const float ext[3] = { targetCon->rad, target->header->walkableClimb, targetCon->rad }; + const float halfExtents[3] = { targetCon->rad, target->header->walkableClimb, targetCon->rad }; // Find polygon to connect to. const float* p = &targetCon->pos[3]; float nearestPt[3]; - dtPolyRef ref = findNearestPolyInTile(tile, p, ext, nearestPt); + dtPolyRef ref = findNearestPolyInTile(tile, p, halfExtents, nearestPt); if (!ref) continue; // findNearestPoly may return too optimistic results, further check to make sure. @@ -570,12 +570,12 @@ void dtNavMesh::baseOffMeshLinks(dtMeshTile* tile) dtOffMeshConnection* con = &tile->offMeshCons[i]; dtPoly* poly = &tile->polys[con->poly]; - const float ext[3] = { con->rad, tile->header->walkableClimb, con->rad }; + const float halfExtents[3] = { con->rad, tile->header->walkableClimb, con->rad }; // Find polygon to connect to. const float* p = &con->pos[0]; // First vertex float nearestPt[3]; - dtPolyRef ref = findNearestPolyInTile(tile, p, ext, nearestPt); + dtPolyRef ref = findNearestPolyInTile(tile, p, halfExtents, nearestPt); if (!ref) continue; // findNearestPoly may return too optimistic results, further check to make sure. if (dtSqr(nearestPt[0]-p[0])+dtSqr(nearestPt[2]-p[2]) > dtSqr(con->rad)) @@ -687,7 +687,7 @@ void dtNavMesh::closestPointOnPoly(dtPolyRef ref, const float* pos, float* close v[k] = &tile->detailVerts[(pd->vertBase+(t[k]-poly->vertCount))*3]; } float h; - if (dtClosestHeightPointTriangle(pos, v[0], v[1], v[2], h)) + if (dtClosestHeightPointTriangle(closest, v[0], v[1], v[2], h)) { closest[1] = h; break; @@ -696,12 +696,12 @@ void dtNavMesh::closestPointOnPoly(dtPolyRef ref, const float* pos, float* close } dtPolyRef dtNavMesh::findNearestPolyInTile(const dtMeshTile* tile, - const float* center, const float* extents, + const float* center, const float* halfExtents, float* nearestPt) const { float bmin[3], bmax[3]; - dtVsub(bmin, center, extents); - dtVadd(bmax, center, extents); + dtVsub(bmin, center, halfExtents); + dtVadd(bmax, center, halfExtents); // Get nearby polygons from proximity grid. dtPolyRef polys[128]; diff --git a/dep/recastnavigation/Detour/Source/DetourNavMeshBuilder.cpp b/dep/recastnavigation/Detour/Source/DetourNavMeshBuilder.cpp index 965e6cdc5c5..e93a97629b8 100644 --- a/dep/recastnavigation/Detour/Source/DetourNavMeshBuilder.cpp +++ b/dep/recastnavigation/Detour/Source/DetourNavMeshBuilder.cpp @@ -168,45 +168,72 @@ static void subdivide(BVItem* items, int nitems, int imin, int imax, int& curNod } } -static int createBVTree(const unsigned short* verts, const int /*nverts*/, - const unsigned short* polys, const int npolys, const int nvp, - const float cs, const float ch, - const int /*nnodes*/, dtBVNode* nodes) +static int createBVTree(dtNavMeshCreateParams* params, dtBVNode* nodes, int /*nnodes*/) { // Build tree - BVItem* items = (BVItem*)dtAlloc(sizeof(BVItem)*npolys, DT_ALLOC_TEMP); - for (int i = 0; i < npolys; i++) + float quantFactor = 1 / params->cs; + BVItem* items = (BVItem*)dtAlloc(sizeof(BVItem)*params->polyCount, DT_ALLOC_TEMP); + for (int i = 0; i < params->polyCount; i++) { BVItem& it = items[i]; it.i = i; - // Calc polygon bounds. - const unsigned short* p = &polys[i*nvp*2]; - it.bmin[0] = it.bmax[0] = verts[p[0]*3+0]; - it.bmin[1] = it.bmax[1] = verts[p[0]*3+1]; - it.bmin[2] = it.bmax[2] = verts[p[0]*3+2]; - - for (int j = 1; j < nvp; ++j) + // Calc polygon bounds. Use detail meshes if available. + if (params->detailMeshes) { - if (p[j] == MESH_NULL_IDX) break; - unsigned short x = verts[p[j]*3+0]; - unsigned short y = verts[p[j]*3+1]; - unsigned short z = verts[p[j]*3+2]; - - if (x < it.bmin[0]) it.bmin[0] = x; - if (y < it.bmin[1]) it.bmin[1] = y; - if (z < it.bmin[2]) it.bmin[2] = z; - - if (x > it.bmax[0]) it.bmax[0] = x; - if (y > it.bmax[1]) it.bmax[1] = y; - if (z > it.bmax[2]) it.bmax[2] = z; + int vb = (int)params->detailMeshes[i*4+0]; + int ndv = (int)params->detailMeshes[i*4+1]; + float bmin[3]; + float bmax[3]; + + const float* dv = ¶ms->detailVerts[vb*3]; + dtVcopy(bmin, dv); + dtVcopy(bmax, dv); + + for (int j = 1; j < ndv; j++) + { + dtVmin(bmin, &dv[j * 3]); + dtVmax(bmax, &dv[j * 3]); + } + + // BV-tree uses cs for all dimensions + it.bmin[0] = (unsigned short)dtClamp((int)((bmin[0] - params->bmin[0])*quantFactor), 0, 0xffff); + it.bmin[1] = (unsigned short)dtClamp((int)((bmin[1] - params->bmin[1])*quantFactor), 0, 0xffff); + it.bmin[2] = (unsigned short)dtClamp((int)((bmin[2] - params->bmin[2])*quantFactor), 0, 0xffff); + + it.bmax[0] = (unsigned short)dtClamp((int)((bmax[0] - params->bmin[0])*quantFactor), 0, 0xffff); + it.bmax[1] = (unsigned short)dtClamp((int)((bmax[1] - params->bmin[1])*quantFactor), 0, 0xffff); + it.bmax[2] = (unsigned short)dtClamp((int)((bmax[2] - params->bmin[2])*quantFactor), 0, 0xffff); + } + else + { + const unsigned short* p = ¶ms->polys[i*params->nvp * 2]; + it.bmin[0] = it.bmax[0] = params->verts[p[0] * 3 + 0]; + it.bmin[1] = it.bmax[1] = params->verts[p[0] * 3 + 1]; + it.bmin[2] = it.bmax[2] = params->verts[p[0] * 3 + 2]; + + for (int j = 1; j < params->nvp; ++j) + { + if (p[j] == MESH_NULL_IDX) break; + unsigned short x = params->verts[p[j] * 3 + 0]; + unsigned short y = params->verts[p[j] * 3 + 1]; + unsigned short z = params->verts[p[j] * 3 + 2]; + + if (x < it.bmin[0]) it.bmin[0] = x; + if (y < it.bmin[1]) it.bmin[1] = y; + if (z < it.bmin[2]) it.bmin[2] = z; + + if (x > it.bmax[0]) it.bmax[0] = x; + if (y > it.bmax[1]) it.bmax[1] = y; + if (z > it.bmax[2]) it.bmax[2] = z; + } + // Remap y + it.bmin[1] = (unsigned short)dtMathFloorf((float)it.bmin[1] * params->ch / params->cs); + it.bmax[1] = (unsigned short)dtMathCeilf((float)it.bmax[1] * params->ch / params->cs); } - // Remap y - it.bmin[1] = (unsigned short)dtMathFloorf((float)it.bmin[1]*ch/cs); - it.bmax[1] = (unsigned short)dtMathCeilf((float)it.bmax[1]*ch/cs); } int curNode = 0; - subdivide(items, npolys, 0, npolys, curNode, nodes); + subdivide(items, params->polyCount, 0, params->polyCount, curNode, nodes); dtFree(items); @@ -595,11 +622,9 @@ bool dtCreateNavMeshData(dtNavMeshCreateParams* params, unsigned char** outData, } // Store and create BVtree. - // TODO: take detail mesh into account! use byte per bbox extent? if (params->buildBvTree) { - createBVTree(params->verts, params->vertCount, params->polys, params->polyCount, - nvp, params->cs, params->ch, params->polyCount*2, navBvtree); + createBVTree(params, navBvtree, 2*params->polyCount); } // Store Off-Mesh connections. diff --git a/dep/recastnavigation/Detour/Source/DetourNavMeshQuery.cpp b/dep/recastnavigation/Detour/Source/DetourNavMeshQuery.cpp index a263106dc1c..fcac11f0729 100644 --- a/dep/recastnavigation/Detour/Source/DetourNavMeshQuery.cpp +++ b/dep/recastnavigation/Detour/Source/DetourNavMeshQuery.cpp @@ -578,7 +578,7 @@ dtStatus dtNavMeshQuery::closestPointOnPoly(dtPolyRef ref, const float* pos, flo v[k] = &tile->detailVerts[(pd->vertBase+(t[k]-poly->vertCount))*3]; } float h; - if (dtClosestHeightPointTriangle(pos, v[0], v[1], v[2], h)) + if (dtClosestHeightPointTriangle(closest, v[0], v[1], v[2], h)) { closest[1] = h; break; @@ -759,7 +759,7 @@ public: /// return #DT_SUCCESS, but @p nearestRef will be zero. So if in doubt, check /// @p nearestRef before using @p nearestPt. /// -dtStatus dtNavMeshQuery::findNearestPoly(const float* center, const float* extents, +dtStatus dtNavMeshQuery::findNearestPoly(const float* center, const float* halfExtents, const dtQueryFilter* filter, dtPolyRef* nearestRef, float* nearestPt) const { @@ -770,7 +770,7 @@ dtStatus dtNavMeshQuery::findNearestPoly(const float* center, const float* exten dtFindNearestPolyQuery query(this, center); - dtStatus status = queryPolygons(center, extents, filter, &query); + dtStatus status = queryPolygons(center, halfExtents, filter, &query); if (dtStatusFailed(status)) return status; @@ -943,7 +943,7 @@ public: /// be filled to capacity. The method of choosing which polygons from the /// full set are included in the partial result set is undefined. /// -dtStatus dtNavMeshQuery::queryPolygons(const float* center, const float* extents, +dtStatus dtNavMeshQuery::queryPolygons(const float* center, const float* halfExtents, const dtQueryFilter* filter, dtPolyRef* polys, int* polyCount, const int maxPolys) const { @@ -952,7 +952,7 @@ dtStatus dtNavMeshQuery::queryPolygons(const float* center, const float* extents dtCollectPolysQuery collector(polys, maxPolys); - dtStatus status = queryPolygons(center, extents, filter, &collector); + dtStatus status = queryPolygons(center, halfExtents, filter, &collector); if (dtStatusFailed(status)) return status; @@ -963,21 +963,21 @@ dtStatus dtNavMeshQuery::queryPolygons(const float* center, const float* extents /// @par /// /// The query will be invoked with batches of polygons. Polygons passed -/// to the query have bounding boxes that overlap with the center and extents +/// to the query have bounding boxes that overlap with the center and halfExtents /// passed to this function. The dtPolyQuery::process function is invoked multiple /// times until all overlapping polygons have been processed. /// -dtStatus dtNavMeshQuery::queryPolygons(const float* center, const float* extents, +dtStatus dtNavMeshQuery::queryPolygons(const float* center, const float* halfExtents, const dtQueryFilter* filter, dtPolyQuery* query) const { dtAssert(m_nav); - if (!center || !extents || !filter || !query) + if (!center || !halfExtents || !filter || !query) return DT_FAILURE | DT_INVALID_PARAM; float bmin[3], bmax[3]; - dtVsub(bmin, center, extents); - dtVadd(bmax, center, extents); + dtVsub(bmin, center, halfExtents); + dtVadd(bmax, center, halfExtents); // Find tiles the query touches. int minx, miny, maxx, maxy; diff --git a/dep/recastnavigation/Recast/CMakeLists.txt b/dep/recastnavigation/Recast/CMakeLists.txt index e83636a76f2..b21c5e38a4f 100644 --- a/dep/recastnavigation/Recast/CMakeLists.txt +++ b/dep/recastnavigation/Recast/CMakeLists.txt @@ -11,6 +11,7 @@ set(Recast_STAT_SRCS Source/Recast.cpp Source/RecastAlloc.cpp + Source/RecastAssert.cpp Source/RecastArea.cpp Source/RecastContour.cpp Source/RecastFilter.cpp diff --git a/dep/recastnavigation/Recast/Include/RecastAssert.h b/dep/recastnavigation/Recast/Include/RecastAssert.h index 2aca0d9a14f..e7cc10e4961 100644 --- a/dep/recastnavigation/Recast/Include/RecastAssert.h +++ b/dep/recastnavigation/Recast/Include/RecastAssert.h @@ -23,11 +23,34 @@ // Feel free to change the file and include your own implementation instead. #ifdef NDEBUG + // From http://cnicholson.net/2009/02/stupid-c-tricks-adventures-in-assert/ -# define rcAssert(x) do { (void)sizeof(x); } while((void)(__LINE__==-1),false) +# define rcAssert(x) do { (void)sizeof(x); } while((void)(__LINE__==-1),false) + #else + +/// An assertion failure function. +// @param[in] expression asserted expression. +// @param[in] file Filename of the failed assertion. +// @param[in] line Line number of the failed assertion. +/// @see rcAssertFailSetCustom +typedef void (rcAssertFailFunc)(const char* expression, const char* file, int line); + +/// Sets the base custom assertion failure function to be used by Recast. +/// @param[in] assertFailFunc The function to be used in case of failure of #dtAssert +void rcAssertFailSetCustom(rcAssertFailFunc *assertFailFunc); + +/// Gets the base custom assertion failure function to be used by Recast. +rcAssertFailFunc* rcAssertFailGetCustom(); + # include <assert.h> -# define rcAssert assert +# define rcAssert(expression) \ + { \ + rcAssertFailFunc* failFunc = rcAssertFailGetCustom(); \ + if(failFunc == NULL) { assert(expression); } \ + else if(!(expression)) { (*failFunc)(#expression, __FILE__, __LINE__); } \ + } + #endif #endif // RECASTASSERT_H diff --git a/dep/recastnavigation/Recast/Source/RecastAssert.cpp b/dep/recastnavigation/Recast/Source/RecastAssert.cpp new file mode 100644 index 00000000000..6297d420239 --- /dev/null +++ b/dep/recastnavigation/Recast/Source/RecastAssert.cpp @@ -0,0 +1,35 @@ +// +// Copyright (c) 2009-2010 Mikko Mononen memon@inside.org +// +// This software is provided 'as-is', without any express or implied +// warranty. In no event will the authors be held liable for any damages +// arising from the use of this software. +// Permission is granted to anyone to use this software for any purpose, +// including commercial applications, and to alter it and redistribute it +// freely, subject to the following restrictions: +// 1. The origin of this software must not be misrepresented; you must not +// claim that you wrote the original software. If you use this software +// in a product, an acknowledgment in the product documentation would be +// appreciated but is not required. +// 2. Altered source versions must be plainly marked as such, and must not be +// misrepresented as being the original software. +// 3. This notice may not be removed or altered from any source distribution. +// + +#include "RecastAssert.h" + +#ifndef NDEBUG + +static rcAssertFailFunc* sRecastAssertFailFunc = 0; + +void rcAssertFailSetCustom(rcAssertFailFunc *assertFailFunc) +{ + sRecastAssertFailFunc = assertFailFunc; +} + +rcAssertFailFunc* rcAssertFailGetCustom() +{ + return sRecastAssertFailFunc; +} + +#endif diff --git a/dep/recastnavigation/Recast/Source/RecastMesh.cpp b/dep/recastnavigation/Recast/Source/RecastMesh.cpp index 9b6f04e3092..e99eaebb796 100644 --- a/dep/recastnavigation/Recast/Source/RecastMesh.cpp +++ b/dep/recastnavigation/Recast/Source/RecastMesh.cpp @@ -379,7 +379,7 @@ static int triangulate(int n, const int* verts, int* indices, int* tris) // We might get here because the contour has overlapping segments, like this: // // A o-o=====o---o B - // / |C D| \ + // / |C D| \. // o o o o // : : : : // We'll try to recover by loosing up the inCone test a bit so that a diagonal diff --git a/dep/recastnavigation/Recast/Source/RecastRegion.cpp b/dep/recastnavigation/Recast/Source/RecastRegion.cpp index 4a87133f2a8..38a2bd6bfa4 100644 --- a/dep/recastnavigation/Recast/Source/RecastRegion.cpp +++ b/dep/recastnavigation/Recast/Source/RecastRegion.cpp @@ -1684,7 +1684,7 @@ bool rcBuildLayerRegions(rcContext* ctx, rcCompactHeightfield& chf, rcScopedDelete<unsigned short> srcReg((unsigned short*)rcAlloc(sizeof(unsigned short)*chf.spanCount, RC_ALLOC_TEMP)); if (!srcReg) { - ctx->log(RC_LOG_ERROR, "rcBuildRegionsMonotone: Out of memory 'src' (%d).", chf.spanCount); + ctx->log(RC_LOG_ERROR, "rcBuildLayerRegions: Out of memory 'src' (%d).", chf.spanCount); return false; } memset(srcReg,0,sizeof(unsigned short)*chf.spanCount); @@ -1693,7 +1693,7 @@ bool rcBuildLayerRegions(rcContext* ctx, rcCompactHeightfield& chf, rcScopedDelete<rcSweepSpan> sweeps((rcSweepSpan*)rcAlloc(sizeof(rcSweepSpan)*nsweeps, RC_ALLOC_TEMP)); if (!sweeps) { - ctx->log(RC_LOG_ERROR, "rcBuildRegionsMonotone: Out of memory 'sweeps' (%d).", nsweeps); + ctx->log(RC_LOG_ERROR, "rcBuildLayerRegions: Out of memory 'sweeps' (%d).", nsweeps); return false; } diff --git a/sql/base/characters_database.sql b/sql/base/characters_database.sql index bbcd0876e41..3c2bd4d321f 100644 --- a/sql/base/characters_database.sql +++ b/sql/base/characters_database.sql @@ -182,6 +182,29 @@ LOCK TABLES `arena_team_member` WRITE; UNLOCK TABLES; -- +-- Table structure for table `auctionbidders` +-- + +DROP TABLE IF EXISTS `auctionbidders`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `auctionbidders` ( + `id` INT(10) unsigned NOT NULL DEFAULT '0', + `bidderguid` INT(10) unsigned NOT NULL DEFAULT '0', + PRIMARY KEY (`id`,`bidderguid`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Dumping data for table `auctionbidders` +-- + +LOCK TABLES `auctionbidders` WRITE; +/*!40000 ALTER TABLE `auctionbidders` DISABLE KEYS */; +/*!40000 ALTER TABLE `auctionbidders` ENABLE KEYS */; +UNLOCK TABLES; + +-- -- Table structure for table `auctionhouse` -- @@ -2568,7 +2591,7 @@ CREATE TABLE `updates` ( LOCK TABLES `updates` WRITE; /*!40000 ALTER TABLE `updates` DISABLE KEYS */; -INSERT INTO `updates` VALUES ('2015_03_20_00_characters.sql','B761760804EA73BD297F296C5C1919687DF7191C','ARCHIVED','2015-03-21 21:44:15',0),('2015_03_20_01_characters.sql','894F08B70449A5481FFAF394EE5571D7FC4D8A3A','ARCHIVED','2015-03-21 21:44:15',0),('2015_03_20_02_characters.sql','97D7BE0CAADC79F3F11B9FD296B8C6CD40FE593B','ARCHIVED','2015-03-21 21:44:51',0),('2015_06_26_00_characters_335.sql','C2CC6E50AFA1ACCBEBF77CC519AAEB09F3BBAEBC','ARCHIVED','2015-07-13 23:49:22',0),('2015_09_28_00_characters_335.sql','F8682A431D50E54BDC4AC0E7DBED21AE8AAB6AD4','ARCHIVED','2015-09-28 21:00:00',0),('2015_08_26_00_characters_335.sql','C7D6A3A00FECA3EBFF1E71744CA40D3076582374','ARCHIVED','2015-08-26 21:00:00',0),('2015_10_06_00_characters.sql','16842FDD7E8547F2260D3312F53EFF8761EFAB35','ARCHIVED','2015-10-06 16:06:38',0),('2015_10_07_00_characters.sql','E15AB463CEBE321001D7BFDEA4B662FF618728FD','ARCHIVED','2015-10-07 23:32:00',0),('2015_10_12_00_characters.sql','D6F9927BDED72AD0A81D6EC2C6500CBC34A39FA2','ARCHIVED','2015-10-12 15:35:47',0),('2015_10_28_00_characters.sql','622A9CA8FCE690429EBE23BA071A37C7A007BF8B','ARCHIVED','2015-10-19 14:32:22',0),('2015_10_29_00_characters_335.sql','4555A7F35C107E54C13D74D20F141039ED42943E','ARCHIVED','2015-10-29 17:05:43',0),('2015_11_03_00_characters.sql','CC045717B8FDD9733351E52A5302560CD08AAD57','ARCHIVED','2015-10-12 15:23:33',0),('2015_11_07_00_characters.sql','0ACDD35EC9745231BCFA701B78056DEF94D0CC53','ARCHIVED','2016-04-11 00:42:36',94),('2016_02_10_00_characters.sql','F1B4DA202819CABC7319A4470A2D224A34609E97','ARCHIVED','2016-02-10 00:00:00',0),('2016_03_13_2016_01_05_00_characters.sql','0EAD24977F40DE2476B4567DA2B477867CC0DA1A','ARCHIVED','2016-03-13 20:03:56',0),('2016_04_11_00_characters.sql','0ACDD35EC9745231BCFA701B78056DEF94D0CC53','ARCHIVED','2016-04-11 03:18:17',0),('2016_09_13_00_characters.sql','27A04615B11B2CFC3A26778F52F74C071E4F9C54','ARCHIVED','2016-07-06 18:55:18',0),('2016_10_16_00_characters.sql','0ACDD35EC9745231BCFA701B78056DEF94D0CC53','ARCHIVED','2016-10-16 14:02:49',35),('2016_10_30_00_characters.sql','7E2D5B226907B5A9AF320797F46E86DC27B7EC90','ARCHIVED','2016-10-30 00:00:00',0),('2017_04_03_00_characters.sql','CB072C56692C9FBF170C4036F15773DD86D368B5','ARCHIVED','2017-04-03 00:00:00',0),('2017_04_12_00_characters.sql','4FE3C6866A6DCD4926D451F6009464D290C2EF1F','ARCHIVED','2017-04-12 00:00:00',0),('2017_04_12_01_characters.sql','5A8A1215E3A2356722F52CD7A64BBE03D21FBEA3','ARCHIVED','2017-04-12 00:00:00',0),('2017_04_19_00_characters.sql','CE06FA9005C8A8EE4BDD925520278A5D83E87485','RELEASED','2017-04-19 00:07:40',25),('2017_10_29_00_characters.sql','6209D716E22C391F1FB464221D9F25AF','RELEASED','2017-04-19 00:07:40',25); +INSERT INTO `updates` VALUES ('2015_03_20_00_characters.sql','B761760804EA73BD297F296C5C1919687DF7191C','ARCHIVED','2015-03-21 21:44:15',0),('2015_03_20_01_characters.sql','894F08B70449A5481FFAF394EE5571D7FC4D8A3A','ARCHIVED','2015-03-21 21:44:15',0),('2015_03_20_02_characters.sql','97D7BE0CAADC79F3F11B9FD296B8C6CD40FE593B','ARCHIVED','2015-03-21 21:44:51',0),('2015_06_26_00_characters_335.sql','C2CC6E50AFA1ACCBEBF77CC519AAEB09F3BBAEBC','ARCHIVED','2015-07-13 23:49:22',0),('2015_09_28_00_characters_335.sql','F8682A431D50E54BDC4AC0E7DBED21AE8AAB6AD4','ARCHIVED','2015-09-28 21:00:00',0),('2015_08_26_00_characters_335.sql','C7D6A3A00FECA3EBFF1E71744CA40D3076582374','ARCHIVED','2015-08-26 21:00:00',0),('2015_10_06_00_characters.sql','16842FDD7E8547F2260D3312F53EFF8761EFAB35','ARCHIVED','2015-10-06 16:06:38',0),('2015_10_07_00_characters.sql','E15AB463CEBE321001D7BFDEA4B662FF618728FD','ARCHIVED','2015-10-07 23:32:00',0),('2015_10_12_00_characters.sql','D6F9927BDED72AD0A81D6EC2C6500CBC34A39FA2','ARCHIVED','2015-10-12 15:35:47',0),('2015_10_28_00_characters.sql','622A9CA8FCE690429EBE23BA071A37C7A007BF8B','ARCHIVED','2015-10-19 14:32:22',0),('2015_10_29_00_characters_335.sql','4555A7F35C107E54C13D74D20F141039ED42943E','ARCHIVED','2015-10-29 17:05:43',0),('2015_11_03_00_characters.sql','CC045717B8FDD9733351E52A5302560CD08AAD57','ARCHIVED','2015-10-12 15:23:33',0),('2015_11_07_00_characters.sql','0ACDD35EC9745231BCFA701B78056DEF94D0CC53','ARCHIVED','2016-04-11 00:42:36',94),('2016_02_10_00_characters.sql','F1B4DA202819CABC7319A4470A2D224A34609E97','ARCHIVED','2016-02-10 00:00:00',0),('2016_03_13_2016_01_05_00_characters.sql','0EAD24977F40DE2476B4567DA2B477867CC0DA1A','ARCHIVED','2016-03-13 20:03:56',0),('2016_04_11_00_characters.sql','0ACDD35EC9745231BCFA701B78056DEF94D0CC53','ARCHIVED','2016-04-11 03:18:17',0),('2016_09_13_00_characters.sql','27A04615B11B2CFC3A26778F52F74C071E4F9C54','ARCHIVED','2016-07-06 18:55:18',0),('2016_10_16_00_characters.sql','0ACDD35EC9745231BCFA701B78056DEF94D0CC53','ARCHIVED','2016-10-16 14:02:49',35),('2016_10_30_00_characters.sql','7E2D5B226907B5A9AF320797F46E86DC27B7EC90','ARCHIVED','2016-10-30 00:00:00',0),('2017_04_03_00_characters.sql','CB072C56692C9FBF170C4036F15773DD86D368B5','ARCHIVED','2017-04-03 00:00:00',0),('2017_04_12_00_characters.sql','4FE3C6866A6DCD4926D451F6009464D290C2EF1F','ARCHIVED','2017-04-12 00:00:00',0),('2017_04_12_01_characters.sql','5A8A1215E3A2356722F52CD7A64BBE03D21FBEA3','ARCHIVED','2017-04-12 00:00:00',0),('2017_04_19_00_characters.sql','CE06FA9005C8A8EE4BDD925520278A5D83E87485','RELEASED','2017-04-19 00:07:40',25),('2017_10_29_00_characters.sql','6209D716E22C391F1FB464221D9F25AF','RELEASED','2017-04-19 00:07:40',25),('2017_11_27_00_characters.sql','6FF1F84B8985ADFC7FF97F0BF8E53403CF13C320','RELEASED','2017-11-27 22:08:42',0); /*!40000 ALTER TABLE `updates` ENABLE KEYS */; UNLOCK TABLES; diff --git a/sql/updates/characters/3.3.5/2017_11_27_00_characters.sql b/sql/updates/characters/3.3.5/2017_11_27_00_characters.sql new file mode 100644 index 00000000000..21c7192f0b2 --- /dev/null +++ b/sql/updates/characters/3.3.5/2017_11_27_00_characters.sql @@ -0,0 +1,9 @@ +DROP TABLE IF EXISTS `auctionbidders`; +CREATE TABLE `auctionbidders` ( + `id` INT(10) UNSIGNED NOT NULL DEFAULT '0', + `bidderguid` INT(10) UNSIGNED NOT NULL DEFAULT '0', + PRIMARY KEY (`id`, `bidderguid`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +INSERT INTO `auctionbidders` (`id`, `bidderguid`) +SELECT DISTINCT `id`, `buyguid` FROM `auctionhouse` WHERE `buyguid` != 0; diff --git a/sql/updates/world/3.3.5/2017_11_13_00_world.sql b/sql/updates/world/3.3.5/2017_11_13_00_world.sql new file mode 100644 index 00000000000..dddf2321b83 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_13_00_world.sql @@ -0,0 +1,2 @@ +-- add Oathbound Warder ability spell1s to pet bar +UPDATE `creature_template` SET `spell1`=56491, `Spell2`=56425, `Spell3`=56451, `Spell4`=56506 WHERE `entry`=30270; diff --git a/sql/updates/world/3.3.5/2017_11_14_00_world_335.sql b/sql/updates/world/3.3.5/2017_11_14_00_world_335.sql new file mode 100644 index 00000000000..ed2f39f0fbe --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_14_00_world_335.sql @@ -0,0 +1,2 @@ +-- +UPDATE `creature_formations` SET `groupAI`=515 WHERE `leaderGUID`=32617; diff --git a/sql/updates/world/3.3.5/2017_11_14_01_world.sql b/sql/updates/world/3.3.5/2017_11_14_01_world.sql new file mode 100644 index 00000000000..3bc0da4ee1c --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_14_01_world.sql @@ -0,0 +1,2 @@ +-- Privateer Groy --> Fix Text Type and Emote +UPDATE `creature_text` SET `Type`=14, `Emote`=5 WHERE `CreatureID`=2616; diff --git a/sql/updates/world/3.3.5/2017_11_14_02_world_335.sql b/sql/updates/world/3.3.5/2017_11_14_02_world_335.sql new file mode 100644 index 00000000000..9d5bfc31845 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_14_02_world_335.sql @@ -0,0 +1,10 @@ +-- Sergra Darkthorn +DELETE FROM `creature_text` WHERE `CreatureID`=3338; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(3338,0,0,"The spirit of Echeyakee flows through you, $n.",12,1,100,2,0,0,1295,0,"Sergra Darkthorn"); + +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3338; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3338 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3338,0,0,1,20,0,100,0,881,0,0,0,1,0,0,0,0,0,0,7,0,0,0,0,0,0,0,"Sergra Darkthorn - On Quest 'Echeyakee' Finished - Say Line 0"), +(3338,0,1,0,61,0,100,0,0,0,0,0,85,5320,0,0,0,0,0,7,0,0,0,0,0,0,0,"Sergra Darkthorn - On Quest 'Echeyakee' Finished - Invoker Cast Echeyakee's Grace"); diff --git a/sql/updates/world/3.3.5/2017_11_14_03_world_335.sql b/sql/updates/world/3.3.5/2017_11_14_03_world_335.sql new file mode 100644 index 00000000000..07ea455cc60 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_14_03_world_335.sql @@ -0,0 +1,18 @@ +-- Jorn Skyseer +DELETE FROM `creature_text` WHERE `CreatureID`=3387; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(3387,0,0,"The rage of Ishamuhale runs through you, $n.",12,1,100,2,0,0,1297,0,"Jorn Skyseer"), +(3387,1,0,"Lakota'mani thunders within you, $n.",12,1,100,2,0,0,1298,0,"Jorn Skyseer"), +(3387,2,0,"The speed of Owatanka is with you, $n.",12,1,100,2,0,0,1299,0,"Jorn Skyseer"), +(3387,3,0,"Your path is steady, $n, for Washte Pawne's resolve is with you.",12,1,100,2,0,0,1300,0,"Jorn Skyseer"); + +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3387; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3387 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3387,0,0,0,20,0,100,0,882,0,0,0,1,0,0,0,0,0,0,7,0,0,0,0,0,0,0,"Jorn Skyseer - On Quest 'Ishamuhale' Finished - Say Line 0"), +(3387,0,1,0,20,0,100,0,883,0,0,0,1,1,0,0,0,0,0,7,0,0,0,0,0,0,0,"Jorn Skyseer - On Quest 'Lakota'mani' Finished - Say Line 1"), +(3387,0,2,0,20,0,100,0,884,0,0,0,1,2,0,0,0,0,0,7,0,0,0,0,0,0,0,"Jorn Skyseer - On Quest 'Owatanka' Finished - Say Line 2"), +(3387,0,3,0,20,0,100,0,885,0,0,0,1,3,0,0,0,0,0,7,0,0,0,0,0,0,0,"Jorn Skyseer - On Quest 'Washte Pawne' Finished - Say Line 3"); + +DELETE FROM `smart_scripts` WHERE `entryorguid`=3338 AND `source_type`=0 AND `id`=1; +UPDATE `smart_scripts` SET `link`=0 WHERE `entryorguid`=3338 AND `source_type`=0 AND `id`=0; diff --git a/sql/updates/world/3.3.5/2017_11_14_04_world_335.sql b/sql/updates/world/3.3.5/2017_11_14_04_world_335.sql new file mode 100644 index 00000000000..fbda06c1efc --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_14_04_world_335.sql @@ -0,0 +1,9 @@ +-- Mahren Skyseer +DELETE FROM `creature_text` WHERE `CreatureID`=3388; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(3388,0,0,"May the spirit of Isha Awak give you strength, $n.",12,1,100,2,0,0,1301,0,"Mahren Skyseer"); + +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3388; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3388 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3388,0,0,0,20,0,100,0,873,0,0,0,1,0,0,0,0,0,0,7,0,0,0,0,0,0,0,"Mahren Skyseer - On Quest 'Isha Awak' Finished - Say Line 0"); diff --git a/sql/updates/world/3.3.5/2017_11_14_05_world_335.sql b/sql/updates/world/3.3.5/2017_11_14_05_world_335.sql new file mode 100644 index 00000000000..dc8bf069f43 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_14_05_world_335.sql @@ -0,0 +1,22 @@ +-- Quest "Samophlange (Part 2)" +-- Main Control Valve +UPDATE `gameobject_template` SET `AIName`="SmartGameObjectAI" WHERE `entry`=4072; +DELETE FROM `smart_scripts` WHERE `entryorguid`=4072 AND `source_type`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4072,1,0,0,70,0,100,0,2,0,0,0,12,3285,1,180000,0,0,0,8,0,0,0,824.479,-2678.73,91.6667,6.04909,"Main Control Valve - On Gameobject State Changed - Summon Creature 'Venture Co. Peon'"), +(4072,1,1,0,70,0,100,0,2,0,0,0,12,3284,1,180000,0,0,0,8,0,0,0,826.776,-2674.53,91.6667,5.53466,"Main Control Valve - On Gameobject State Changed - Summon Creature 'Venture Co. Drudger'"), +(4072,1,2,0,70,0,100,0,2,0,0,0,1,0,0,0,0,0,0,19,3285,0,0,0,0,0,0,"Regulator Valve - On Gameobject State Changed - Say Line 0 (Venture Co. Peon)"), +(4072,1,3,0,70,0,100,0,2,0,0,0,1,0,0,0,0,0,0,19,3284,0,0,0,0,0,0,"Regulator Valve - On Gameobject State Changed - Say Line 0 (Venture Co. Drudger)"); + +-- Regulator Valve +UPDATE `gameobject_template` SET `AIName`="SmartGameObjectAI" WHERE `entry`=61935; +DELETE FROM `smart_scripts` WHERE `entryorguid`=61935 AND `source_type`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(61935,1,0,0,70,0,100,0,2,0,0,0,12,3285,1,180000,0,0,0,8,0,0,0,843.523,-2669.85,91.6668,4.39584,"Regulator Valve - On Gameobject State Changed - Summon Creature 'Venture Co. Peon'"), +(61935,1,1,0,70,0,100,0,2,0,0,0,1,0,0,0,0,0,0,19,3285,0,0,0,0,0,0,"Regulator Valve - On Gameobject State Changed - Say Line 0 (Venture Co. Peon)"); + +-- Texts +DELETE FROM `creature_text` WHERE `CreatureID` IN (3284,3285); +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(3284,0,0,"Get away from there!",12,0,100,0,0,0,4704,0,"Venture Co. Drudger"), +(3285,0,0,"Get away from there!",12,0,100,0,0,0,4704,0,"Venture Co. Peon"); diff --git a/sql/updates/world/3.3.5/2017_11_14_06_world_335.sql b/sql/updates/world/3.3.5/2017_11_14_06_world_335.sql new file mode 100644 index 00000000000..3ebad41d691 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_14_06_world_335.sql @@ -0,0 +1,16 @@ +-- Kin'weelay +DELETE FROM `creature_text` WHERE `CreatureID`=2519; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(2519,0,0,"%s presents the Mind's Eye and the Singing Crystals before him...",16,0,100,0,0,0,742,0,"Kin'weelay"), +(2519,1,0,"I have fashioned Yenniku's Soul Gem, $n...",12,1,100,1,0,0,743,0,"Kin'weelay"); + +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=2519; +DELETE FROM `smart_scripts` WHERE `entryorguid`=2519 AND `source_type`=0; +DELETE FROM `smart_scripts` WHERE `entryorguid`=251900 AND `source_type`=9; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2519,0,0,0,20,0,100,0,591,0,0,0,80,251900,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kin'weelay - On Quest 'The Mind's Eye' finished - Run Script"), +(251900,9,0,0,0,0,100,0,0,0,0,0,83,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kin'weelay - On Script - Remove Npc Flag Questgiver"), +(251900,9,1,0,0,0,100,0,1000,1000,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kin'weelay - On Script - Say Line 0"), +(251900,9,2,0,0,0,100,0,2000,2000,0,0,11,2362,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kin'weelay - On Script - Cast Create Spellstone"), +(251900,9,3,0,0,0,100,0,6000,6000,0,0,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kin'weelay - On Script - Say Line 1"), +(251900,9,4,0,0,0,100,0,0,0,0,0,82,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kin'weelay - On Script - Add Npc Flag Questgiver"); diff --git a/sql/updates/world/3.3.5/2017_11_14_07_world_335.sql b/sql/updates/world/3.3.5/2017_11_14_07_world_335.sql new file mode 100644 index 00000000000..966523d991a --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_14_07_world_335.sql @@ -0,0 +1,18 @@ +-- Gavis Greyshield +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=23941; +DELETE FROM `smart_scripts` WHERE `entryorguid`=23941 AND `source_type`=0; +DELETE FROM `smart_scripts` WHERE `entryorguid`=2394100 AND `source_type`=9; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(23941,0,0,1,2,0,100,1,0,15,0,0,80,2394100,2,0,0,0,0,1,0,0,0,0,0,0,0,"Gavis Greyshield - Between 0-15% Health - Run Script"), +(23941,0,1,2,61,0,100,512,0,0,0,0,2,35,0,0,0,0,0,1,0,0,0,0,0,0,0,"Gavis Greyshield - Between 0-15% Health - Set Faction 35"), +(23941,0,2,3,61,0,100,512,0,0,0,0,24,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Gavis Greyshield - Between 0-15% Health - Evade"), +(23941,0,3,0,61,0,100,512,0,0,0,0,103,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Gavis Greyshield - Between 0-15% Health - Set Root On"), +(2394100,9,0,0,0,0,100,0,0,0,0,0,11,42660,0,0,0,0,0,1,0,0,0,0,0,0,0,"Gavis Greyshield - On Script - Cast Gavis Greyshield Credit"), +(2394100,9,1,0,0,0,100,0,1000,1000,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Gavis Greyshield - On Script - Say Line 0"), +(2394100,9,2,0,0,0,100,0,4000,4000,0,0,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Gavis Greyshield - On Script - Say Line 1"), +(2394100,9,3,0,0,0,100,0,10000,10000,0,0,41,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Gavis Greyshield - On Script - Despawn"); + +DELETE FROM `creature_text` WHERE `CreatureID`=23941; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(23941,0,0,"I surrender! Stay your blade!",12,0,100,5,0,0,22469,0,"Gavis Greyshield"), +(23941,1,0,"I will go with you. Just spare my men. They're good lads who've only made a mistake...",12,0,100,1,0,0,22470,0,"Gavis Greyshield"); diff --git a/sql/updates/world/3.3.5/2017_11_14_08_world_335.sql b/sql/updates/world/3.3.5/2017_11_14_08_world_335.sql new file mode 100644 index 00000000000..5afebe40ecf --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_14_08_world_335.sql @@ -0,0 +1,15 @@ +-- Magatha Grimtotem +DELETE FROM `creature_text` WHERE `CreatureID`=4046 AND `GroupID` IN (0,1); +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(4046,0,0,"%s listens to $n's plea.",16,0,100,0,0,0,1416,0,"Magatha Grimtotem"), +(4046,1,0,"You were right to come. My wisdom will be yours.",12,0,100,273,0,0,1417,0,"Magatha Grimtotem"); + +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=4046; +DELETE FROM `smart_scripts` WHERE `entryorguid`=4046 AND `source_type`=0 AND `id`=0; +DELETE FROM `smart_scripts` WHERE `entryorguid`=404600 AND `source_type`=9; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4046,0,0,0,20,0,100,0,1063,0,0,0,80,404600,0,0,0,0,0,1,0,0,0,0,0,0,0,"Magatha Grimtotem - On Quest 'The Elder Crone' finished - Run Script"), +(404600,9,0,0,0,0,100,0,0,0,0,0,83,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Magatha Grimtotem - On Script - Remove Npc Flag Questgiver"), +(404600,9,1,0,0,0,100,0,1000,1000,0,0,1,0,0,0,0,0,0,7,0,0,0,0,0,0,0,"Magatha Grimtotem - On Script - Say Line 0"), +(404600,9,2,0,0,0,100,0,4000,4000,0,0,1,1,0,0,0,0,0,7,0,0,0,0,0,0,0,"Magatha Grimtotem - On Script - Say Line 1"), +(404600,9,3,0,0,0,100,0,2000,2000,0,0,82,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Magatha Grimtotem - On Script - Add Npc Flag Questgiver"); diff --git a/sql/updates/world/3.3.5/2017_11_14_09_world_335.sql b/sql/updates/world/3.3.5/2017_11_14_09_world_335.sql new file mode 100644 index 00000000000..e586a7b0175 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_14_09_world_335.sql @@ -0,0 +1,73 @@ +-- Quest "Forsaken Aid" +-- Texts +DELETE FROM `creature_text` WHERE `CreatureID`=4046 AND `GroupID` IN (2,3); +DELETE FROM `creature_text` WHERE `CreatureID` IN (4068,3419) AND `GroupID`=0; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(4046,2,0,"%s summons a messenger...",16,0,100,0,0,0,1420,0,"Magatha Grimtotem"), +(4046,3,0,"Go, my servant. Send word of Stonetalon to Apothecary Zamah.",12,0,100,25,0,0,1419,0,"Magatha Grimtotem"), +(4068,0,0,"%s gives message to Apothecary Zamah.",16,0,100,0,0,0,1412,0,"Serpent Messenger"), +(3419,0,0,"%s smiles.",16,0,100,11,0,0,1418,0,"Apothecary Zamah"); + +-- Magatha Grimtotem +DELETE FROM `smart_scripts` WHERE `entryorguid`=4046 AND `source_type`=0 AND `id`=1; +DELETE FROM `smart_scripts` WHERE `entryorguid`=404601 AND `source_type`=9; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4046,0,1,0,19,0,100,0,1064,0,0,0,80,404601,0,0,0,0,0,1,0,0,0,0,0,0,0,"Magatha Grimtotem - On Quest 'Forsaken Aid' Taken - Run Script"), +(404601,9,0,0,0,0,100,0,0,0,0,0,83,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Magatha Grimtotem - On Script - Remove Npc Flag Questgiver"), +(404601,9,1,0,0,0,100,0,1000,1000,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Magatha Grimtotem - On Script - Say Line 2"), +(404601,9,2,0,0,0,100,0,0,0,0,0,11,6657,0,0,0,0,0,1,0,0,0,0,0,0,0,"Magatha Grimtotem - On Script - Cast Summon Serpent Messenger"), +(404601,9,3,0,0,0,100,0,0,0,0,0,12,4068,8,0,0,0,0,8,0,0,0,-1067.22,-213.737,160.473,5.25037,"Magatha Grimtotem - On Script - Summon Creature 'Serpent Messenger'"), +(404601,9,4,0,0,0,100,0,2000,2000,0,0,1,3,0,0,0,0,0,1,0,0,0,0,0,0,0,"Magatha Grimtotem - On Script - Say Line 3"), +(404601,9,5,0,0,0,100,0,5000,5000,0,0,82,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Magatha Grimtotem - On Script - Add Npc Flag Questgiver"); + +-- Serpent Messenger +UPDATE `creature_template` SET `AIName`="SmartAI", `InhabitType`=4 WHERE `entry`=4068; +DELETE FROM `smart_scripts` WHERE `entryorguid`=4068 AND `source_type`=0; +DELETE FROM `smart_scripts` WHERE `entryorguid`=406800 AND `source_type`=9; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4068,0,0,0,54,0,100,0,0,0,0,0,48,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Serpent Messenger - On Just Summoned - Set Active On"), +(4068,0,1,0,1,0,100,1,7000,7000,0,0,53,1,4068,0,0,0,0,1,0,0,0,0,0,0,0,"Serpent Messenger - Out of Combat - Start Waypoint (No Repeat)"), +(4068,0,2,0,40,0,100,0,36,4068,0,0,80,406800,0,0,0,0,0,1,0,0,0,0,0,0,0,"Serpent Messenger - On Waypoint 36 Reached - Run Script"), +(406800,9,0,0,0,0,100,0,1000,1000,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Serpent Messenger - On Script - Say Line 0"), +(406800,9,1,0,0,0,100,0,4000,4000,0,0,1,0,0,0,0,0,0,19,3419,0,0,0,0,0,0,"Serpent Messenger - On Script - Say Line 0 (Apothecary Zamah)"), +(406800,9,2,0,0,0,100,0,8000,8000,0,0,41,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Serpent Messenger - On Script - Despawn"); + +-- Waypoint +DELETE FROM `waypoints` WHERE `entry`=4068; +INSERT INTO `waypoints` (`entry`, `pointid`, `position_x`, `position_y`, `position_z`, `point_comment`) VALUES +(4068,1,-1067.22,-213.737,160.473,""), +(4068,2,-1062.56,-221.981,160.294,""), +(4068,3,-1054.61,-221.248,161.894,""), +(4068,4,-1054.61,-221.248,161.894,""), +(4068,5,-1037.05,-191.12,158.949,""), +(4068,6,-1053.12,-173.969,163.214,""), +(4068,7,-1064.46,-140.953,161.368,""), +(4068,8,-1079.25,-97.1617,156.371,""), +(4068,9,-1085.39,-64.6974,164.416,""), +(4068,10,-1083.91,-26.7695,159.369,""), +(4068,11,-1084.44,-9.29896,158.034,""), +(4068,12,-1109.66,7.02863,154.732,""), +(4068,13,-1132.07,31.5715,158.157,""), +(4068,14,-1129.02,47.1316,154.19,""), +(4068,15,-1095.17,63.5461,150.752,""), +(4068,16,-1086.83,113.827,144.557,""), +(4068,17,-1113.2,129.364,140.268,""), +(4068,18,-1146.35,119.028,141.776,""), +(4068,19,-1169.85,118.638,141.101,""), +(4068,20,-1206.75,117.618,139.861,""), +(4068,21,-1211.53,129.623,135.559,""), +(4068,22,-1192.52,144.921,136.995,""), +(4068,23,-1175.37,157.22,146.484,""), +(4068,24,-1155.67,169.875,146.074,""), +(4068,25,-1127.05,179.696,131.79,""), +(4068,26,-1096.04,189.663,118.055,""), +(4068,27,-1061.46,191.502,109.917,""), +(4068,28,-1056.64,215.227,113.473,""), +(4068,29,-1046.91,223.356,114.831,""), +(4068,30,-1035.35,218.46,112.371,""), +(4068,31,-1027.93,217.483,114.236,""), +(4068,32,-1023.41,229.152,108.92,""), +(4068,33,-1016.34,236.136,109.243,""), +(4068,34,-1013.08,246.142,108.77,""), +(4068,35,-1006.87,255.833,113.284,""), +(4068,36,-997.047,275.161,112.343,""); diff --git a/sql/updates/world/3.3.5/2017_11_14_10_world_335.sql b/sql/updates/world/3.3.5/2017_11_14_10_world_335.sql new file mode 100644 index 00000000000..abd7ca9f9a4 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_14_10_world_335.sql @@ -0,0 +1,17 @@ +-- Apothecary Lydon +DELETE FROM `creature_text` WHERE `CreatureID`=2216; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(2216,0,0,"%s creates his toxin...",16,0,100,0,0,0,1421,0,"Apothecary Lydon"), +(2216,1,0,"There we are, $n. The toxin is ready.",12,1,100,1,0,0,1422,0,"Apothecary Lydon"); + +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=2216; +DELETE FROM `smart_scripts` WHERE `entryorguid`=2216 AND `source_type`=0; +DELETE FROM `smart_scripts` WHERE `entryorguid`=221600 AND `source_type`=9; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2216,0,0,0,20,0,100,0,1066,0,0,0,80,221600,0,0,0,0,0,1,0,0,0,0,0,0,0,"Apothecary Lydon - On Quest 'Blood of Innocents' Finished - Run Script"), +(221600,9,0,0,0,0,100,0,0,0,0,0,83,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Apothecary Lydon - On Script - Remove Npc Flag Questgiver"), +(221600,9,1,0,0,0,100,0,1000,1000,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Apothecary Lydon - On Script - Say Line 0"), +(221600,9,2,0,0,0,100,0,0,0,0,0,17,133,0,0,0,0,0,1,0,0,0,0,0,0,0,"Apothecary Lydon - On Script - Set Emote State 133"), +(221600,9,3,0,0,0,100,0,6000,6000,0,0,17,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Apothecary Lydon - On Script - Set Emote State 0"), +(221600,9,4,0,0,0,100,0,500,500,0,0,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Apothecary Lydon - On Script - Say Line 1"), +(221600,9,5,0,0,0,100,0,0,0,0,0,82,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Apothecary Lydon - On Script - Add Npc Flag Questgiver"); diff --git a/sql/updates/world/3.3.5/2017_11_14_11_world_335.sql b/sql/updates/world/3.3.5/2017_11_14_11_world_335.sql new file mode 100644 index 00000000000..7a916c244b4 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_14_11_world_335.sql @@ -0,0 +1,17 @@ +-- Apothecary Zamah +DELETE FROM `creature_text` WHERE `CreatureID`=3419 AND `GroupID` IN (1,2); +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(3419,1,0,"%s fills a container with the toxin.",16,0,100,0,0,0,1423,0,"Apothecary Zamah"), +(3419,2,0,"The toxin is ready, $n.",12,1,100,1,0,0,1424,0,"Apothecary Zamah"); + +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3419; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3419 AND `source_type`=0; +DELETE FROM `smart_scripts` WHERE `entryorguid`=341900 AND `source_type`=9; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3419,0,0,0,20,0,100,0,1067,0,0,0,80,341900,0,0,0,0,0,1,0,0,0,0,0,0,0,"Apothecary Zamah - On Quest 'Return to Thunder Bluff' Finished - Run Script"), +(341900,9,0,0,0,0,100,0,0,0,0,0,83,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Apothecary Zamah - On Script - Remove Npc Flag Questgiver"), +(341900,9,1,0,0,0,100,0,1000,1000,0,0,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Apothecary Zamah - On Script - Say Line 1"), +(341900,9,2,0,0,0,100,0,0,0,0,0,17,133,0,0,0,0,0,1,0,0,0,0,0,0,0,"Apothecary Zamah - On Script - Set Emote State 133"), +(341900,9,3,0,0,0,100,0,6000,6000,0,0,17,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Apothecary Zamah - On Script - Set Emote State 0"), +(341900,9,4,0,0,0,100,0,500,500,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Apothecary Zamah - On Script - Say Line 2"), +(341900,9,5,0,0,0,100,0,0,0,0,0,82,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Apothecary Zamah - On Script - Add Npc Flag Questgiver"); diff --git a/sql/updates/world/3.3.5/2017_11_14_12_world_335.sql b/sql/updates/world/3.3.5/2017_11_14_12_world_335.sql new file mode 100644 index 00000000000..b5a1dde8d3d --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_14_12_world_335.sql @@ -0,0 +1,553 @@ +-- Quest "Conscript of the Horde" +UPDATE `quest_request_items` SET `EmoteOnComplete`=15 WHERE `ID`=840; +UPDATE `quest_offer_reward` SET `Emote1`=6 WHERE `ID`=840; + +-- Quest "Crossroads Conscription" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=842; + +-- Quest "Meats to Orgrimmar" +DELETE FROM `quest_details` WHERE `ID`=6365; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(6365,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=6 WHERE `ID`=6365; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=6365; + +-- Quest "Ride to Orgrimmar" +DELETE FROM `quest_details` WHERE `ID`=6384; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(6384,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=6 WHERE `ID`=6384; +UPDATE `quest_offer_reward` SET `Emote1`=4 WHERE `ID`=6384; + +-- Quest "Doras the Wind Rider Master" +DELETE FROM `quest_details` WHERE `ID`=6385; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(6385,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=6 WHERE `ID`=6385; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=6385; + +-- Quest "Return to the Crossroads" +DELETE FROM `quest_details` WHERE `ID`=6386; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(6386,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=6 WHERE `ID`=6386; +UPDATE `quest_offer_reward` SET `Emote1`=11, `Emote2`=1 WHERE `ID`=6386; + +-- Quest "Plainstrider Menace" +DELETE FROM `quest_details` WHERE `ID`=844; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(844,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=6, `EmoteOnIncomplete`=6 WHERE `ID`=844; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=844; + +-- Quest "The Zhevra" +DELETE FROM `quest_details` WHERE `ID`=845; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(845,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=845; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=845; + +-- Quest "Prowlers of the Barrens" +DELETE FROM `quest_details` WHERE `ID`=903; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(903,5,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=903; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=903; + +-- Quest "Echeyakee" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=881; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=881; + +-- Quest "The Angry Scytheclaws" +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=905; + +-- Quest "Jorn Skyseer" +DELETE FROM `quest_details` WHERE `ID`=3261; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(3261,1,0,0,0,0,0,0,0,0); +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=3261; + +-- Quest "Ishamuhale" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=882; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=882; + +-- Quest "Enraged Thunder Lizards" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=907; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=907; + +-- Quest "Cry of the Thunderhawk" +DELETE FROM `quest_details` WHERE `ID`=913; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(913,5,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=6, `EmoteOnComplete`=6 WHERE `ID`=913; +UPDATE `quest_offer_reward` SET `Emote1`=2 WHERE `ID`=913; + +-- Quest "Mahren Skyseer" +UPDATE `quest_offer_reward` SET `Emote1`=6 WHERE `ID`=874; + +-- Quest "Isha Awak" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=873; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=873; + +-- Quest "Washte Pawne" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=885; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=885; + +-- Quest "Lakota'mani" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=883; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=883; + +-- Quest "Owatanka" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=884; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=884; + +-- Quest "The Harvester" +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=6, `EmoteOnComplete`=6 WHERE `ID`=897; +UPDATE `quest_offer_reward` SET `Emote1`=274, `Emote2`=1 WHERE `ID`=897; + +-- Quest "The Forgotten Pools" +UPDATE `quest_offer_reward` SET `Emote1`=6 WHERE `ID`=870; + +-- Quest "The Stagnant Oasis" +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=6 WHERE `ID`=877; +UPDATE `quest_offer_reward` SET `Emote1`=5 WHERE `ID`=877; + +-- Quest "Altered Beings" +DELETE FROM `quest_details` WHERE `ID`=880; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(880,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=6, `EmoteOnComplete`=6 WHERE `ID`=880; +UPDATE `quest_offer_reward` SET `Emote1`=2 WHERE `ID`=880; + +-- Quest "Mura Runetotem" +DELETE FROM `quest_details` WHERE `ID`=3301; +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=3301; +UPDATE `quest_offer_reward` SET `Emote1`=6, `Emote2`=1, `Emote3`=2 WHERE `ID`=3301; + +-- Quest "Hamuul Runetotem" +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=1489; + +-- Quest "Nara Wildmane" +DELETE FROM `quest_details` WHERE `ID`=1490; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1490,1,1,1,0,0,0,0,0,0); +UPDATE `quest_offer_reward` SET `Emote1`=1, `Emote2`=5 WHERE `ID`=1490; + +-- Quest "Leaders of the Fang" +UPDATE `quest_request_items` SET `EmoteOnComplete`=6, `EmoteOnIncomplete`=6 WHERE `ID`=914; +UPDATE `quest_offer_reward` SET `Emote1`=4, `Emote2`=1, `Emote3`=2 WHERE `ID`=914; + +-- Quest "Raptor Thieves" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=869; + +-- Quest "Stolen Silver" +DELETE FROM `quest_details` WHERE `ID`=3281; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(3281,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=6, `EmoteOnIncomplete`=6 WHERE `ID`=3281; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=3281; + +-- Quest "Wharfmaster Dizzywig" +DELETE FROM `quest_details` WHERE `ID`=1492; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1492,1,1,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=1492; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=1492; + +-- Quest "Fungal Spores" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=848; + +-- Quest "Apothecary Zamah" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=853; + +-- Quest "Disrupt the Attacks" +DELETE FROM `quest_details` WHERE `ID`=871; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(871,1,1,0,0,0,0,0,0,0); +UPDATE `quest_offer_reward` SET `Emote1`=2, `Emote2`=1 WHERE `ID`=871; + +-- Quest "The Disruption Ends" +DELETE FROM `quest_details` WHERE `ID`=872; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(872,1,1,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=1 WHERE `ID`=872; +UPDATE `quest_offer_reward` SET `Emote1`=1, `Emote2`=1 WHERE `ID`=872; + +-- Quest "Supplies for the Crossroads" +DELETE FROM `quest_details` WHERE `ID`=5041; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(5041,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=6, `EmoteOnIncomplete`=6 WHERE `ID`=5041; +UPDATE `quest_offer_reward` SET `Emote1`=4, `Emote2`=11 WHERE `ID`=5041; + +-- Quest "Harpy Raiders" +DELETE FROM `quest_details` WHERE `ID`=867; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(867,5,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=867; +UPDATE `quest_offer_reward` SET `Emote1`=5 WHERE `ID`=867; + +-- Quest "Harpy Lieutenants" +DELETE FROM `quest_details` WHERE `ID`=875; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(875,11,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=875; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=875; + +-- Quest "Serena Bloodfeather" +DELETE FROM `quest_details` WHERE `ID`=876; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(876,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=876; +UPDATE `quest_offer_reward` SET `Emote1`=11 WHERE `ID`=876; + +-- Quest "Letter to Jin'Zil" +DELETE FROM `quest_details` WHERE `ID`=1060; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1060,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=1060; + +-- Quest "Southsea Freebooters" +DELETE FROM `quest_details` WHERE `ID`=887; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(887,1,0,0,0,0,0,0,0,0); +UPDATE `quest_offer_reward` SET `Emote1`=4 WHERE `ID`=887; + +-- Quest "The Guns of Northwatch" +DELETE FROM `quest_details` WHERE `ID`=891; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(891,1,1,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=5, `EmoteOnIncomplete`=5 WHERE `ID`=891; +UPDATE `quest_offer_reward` SET `Emote1`=4, `Emote2`=1 WHERE `ID`=891; + +-- Quest "Trouble at the Docks" +DELETE FROM `quest_details` WHERE `ID`=959; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(959,5,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=959; +UPDATE `quest_offer_reward` SET `Emote1`=4 WHERE `ID`=959; + +-- Quest "The Missing Shipment" +DELETE FROM `quest_details` WHERE `ID`=890; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(890,5,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=890; + +-- Quest "The Missing Shipment (Part 2)" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=892; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=892; + +-- Quest "Stolen Booty" +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=1 WHERE `ID`=888; + +-- Quest "WANTED: Baron Longshore" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=895; + +-- Quest "Miner's Fortune" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=896; +UPDATE `quest_offer_reward` SET `Emote1`=4 WHERE `ID`=896; + +-- Quest "Ziz Fizziks" +DELETE FROM `quest_details` WHERE `ID`=1483; + +-- Quest "Wenikee Boltbucket" +UPDATE `quest_details` SET `Emote1`=1 WHERE `ID`=3921; +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=3921; +UPDATE `quest_offer_reward` SET `Emote1`=6 WHERE `ID`=3921; + +-- Quest "Nugget Slugs" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=3922; +UPDATE `quest_offer_reward` SET `Emote1`=4 WHERE `ID`=3922; + +-- Quest "Rilli Greasygob" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=3923; +UPDATE `quest_offer_reward` SET `Emote1`=5 WHERE `ID`=3923; + +-- Quest "Samophlange Manual" +DELETE FROM `quest_details` WHERE `ID`=3924; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(3924,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=3924; +UPDATE `quest_offer_reward` SET `Emote1`=4, `Emote2`=1 WHERE `ID`=3924; + +-- Quest "Raptor Horns" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=865; + +-- Quest "Deepmoss Spider Eggs" +DELETE FROM `quest_details` WHERE `ID`=1069; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1069,5,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=1 WHERE `ID`=1069; +UPDATE `quest_offer_reward` SET `Emote1`=2 WHERE `ID`=1069; + +-- Quest "Blueleaf Tubers" +DELETE FROM `quest_details` WHERE `ID`=1221; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1221,22,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=1221; +UPDATE `quest_offer_reward` SET `Emote1`=4 WHERE `ID`=1221; + +-- Quest "Smart Drinks" +DELETE FROM `quest_details` WHERE `ID`=1491; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1491,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=1491; +UPDATE `quest_offer_reward` SET `Emote1`=4 WHERE `ID`=1491; + +-- Quest "Lost in Battle" +DELETE FROM `quest_details` WHERE `ID`=4921; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(4921,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=6 WHERE `ID`=4921; +UPDATE `quest_offer_reward` SET `Emote1`=2 WHERE `ID`=4921; + +-- Quest "Consumed by Hatred" +DELETE FROM `quest_details` WHERE `ID`=899; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(899,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=1 WHERE `ID`=899; +UPDATE `quest_offer_reward` SET `Emote1`=1, `Emote2`=1 WHERE `ID`=899; + +-- Quest "Centaur Bracers" +DELETE FROM `quest_details` WHERE `ID`=855; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(855,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=855; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=855; + +-- Quest "Kolkar Leaders" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=850; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=850; + +-- Quest "Verog the Dervish" +DELETE FROM `quest_details` WHERE `ID`=851; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(851,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=851; + +-- Quest "Hezrul Bloodmark" +DELETE FROM `quest_details` WHERE `ID`=852; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(852,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=852; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=852; + +-- Quest "Counterattack!" +DELETE FROM `quest_details` WHERE `ID`=4021; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(4021,5,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=4021; +UPDATE `quest_offer_reward` SET `Emote1`=4, `Emote2`=1, `Emote3`=66 WHERE `ID`=4021; + +-- Quest "Avenge My Village" +DELETE FROM `quest_details` WHERE `ID`=6548; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(6548,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=6 WHERE `ID`=6548; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=6548; + +-- Quest "Kill Grundig Darkcloud" +DELETE FROM `quest_details` WHERE `ID`=6629; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(6629,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=6 WHERE `ID`=6629; +UPDATE `quest_offer_reward` SET `Emote1`=4 WHERE `ID`=6629; + +-- Quest "Protect Kaya" +DELETE FROM `quest_details` WHERE `ID`=6523; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(6523,2,0,0,0,0,0,0,0,0); +UPDATE `quest_offer_reward` SET `Emote1`=4, `Emote2`=2 WHERE `ID`=6523; + +-- Quest "Kaya's Alive" +DELETE FROM `quest_details` WHERE `ID`=6401; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(6401,1,0,0,0,0,0,0,0,0); +UPDATE `quest_offer_reward` SET `Emote1`=5, `Emote2`=1 WHERE `ID`=6401; + +-- Quest "The Spirits of Stonetalon" +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=1061; + +-- Quest "Goblin Invaders" +DELETE FROM `quest_details` WHERE `ID`=1062; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1062,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=1 WHERE `ID`=1062; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=1062; + +-- Quest "The Elder Crone" +DELETE FROM `quest_details` WHERE `ID`=1063; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1063,1,0,0,0,0,0,0,0,0); +UPDATE `quest_offer_reward` SET `Emote1`=2 WHERE `ID`=1063; + +-- Quest "Forsaken Aid" +DELETE FROM `quest_details` WHERE `ID`=1064; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1064,1,0,0,0,0,0,0,0,0); +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=1064; + +-- Quest "Journey to Tarren Mill" +DELETE FROM `quest_details` WHERE `ID`=1065; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1065,1,1,0,0,0,0,0,0,0); +UPDATE `quest_offer_reward` SET `Emote1`=6, `Emote2`=5 WHERE `ID`=1065; + +-- Quest "Blood of Innocents" +DELETE FROM `quest_details` WHERE `ID`=1066; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1066,1,1,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=6, `EmoteOnComplete`=6 WHERE `ID`=1066; +UPDATE `quest_offer_reward` SET `Emote1`=273, `Emote2`=1 WHERE `ID`=1066; + +-- Quest "Return to Thunder Bluff" +DELETE FROM `quest_details` WHERE `ID`=1067; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1067,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=6 WHERE `ID`=1067; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=1067; + +-- Quest "The Flying Machine Airport" +DELETE FROM `quest_details` WHERE `ID`=1086; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1086,1,1,5,0,0,0,0,0,0); +UPDATE `quest_offer_reward` SET `Emote1`=21, `Emote2`=1, `Emote3`=1 WHERE `ID`=1086; + +-- Quest "Shredding Machines" +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=1 WHERE `ID`=1068; +UPDATE `quest_offer_reward` SET `Emote1`=1, `Emote2`=2 WHERE `ID`=1068; + +-- Quest "Egg Hunt" +DELETE FROM `quest_details` WHERE `ID`=868; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(868,1,1,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=1 WHERE `ID`=868; + +-- Quest "Report to Kadrak" +UPDATE `quest_details` SET `Emote1`=1, `Emote2`=1 WHERE `ID`=6541; +UPDATE `quest_offer_reward` SET `Emote1`=3 WHERE `ID`=6541; + +-- Quest "Weapons of Choice" +DELETE FROM `quest_details` WHERE `ID`=893; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(893,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=1 WHERE `ID`=893; +UPDATE `quest_offer_reward` SET `Emote1`=4 WHERE `ID`=893; + +-- Quest "A New Ore Sample" +DELETE FROM `quest_details` WHERE `ID`=1153; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1153,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=1 WHERE `ID`=1153; +UPDATE `quest_offer_reward` SET `Emote1`=2 WHERE `ID`=1153; + +-- Quest "Chen's Empty Keg" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=819; +UPDATE `quest_offer_reward` SET `Emote1`=1, `EmoteDelay1`=1000 WHERE `ID`=819; + +-- Quest "Chen's Empty Keg (Part 2)" +DELETE FROM `quest_details` WHERE `ID`=821; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(821,6,1,1,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=821; +UPDATE `quest_offer_reward` SET `Emote1`=11, `Emote2`=1 WHERE `ID`=821; + +-- Quest "Chen's Empty Keg (Part 3)" +DELETE FROM `quest_details` WHERE `ID`=822; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(822,1,1,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=6, `EmoteOnIncomplete`=6 WHERE `ID`=822; +UPDATE `quest_offer_reward` SET `Emote1`=11, `Emote2`=1 WHERE `ID`=822; + +-- Quest "Free From the Hold" +DELETE FROM `quest_details` WHERE `ID`=898; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(898,5,0,0,0,0,0,0,0,0); +UPDATE `quest_offer_reward` SET `Emote1`=4, `Emote2`=2 WHERE `ID`=898; + +-- Quest "Gann's Reclamation" +UPDATE `quest_request_items` SET `EmoteOnComplete`=5, `EmoteOnIncomplete`=5 WHERE `ID`=843; + +-- Quest "Revenge of Gann (Part 1)" +DELETE FROM `quest_details` WHERE `ID`=846; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(846,1,1,1,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=1 WHERE `ID`=846; +UPDATE `quest_offer_reward` SET `Emote1`=21 WHERE `ID`=846; + +-- Quest "Revenge of Gann (Part 2)" +DELETE FROM `quest_details` WHERE `ID`=849; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(849,1,0,0,0,0,0,0,0,0); +UPDATE `quest_offer_reward` SET `Emote1`=4 WHERE `ID`=849; + +-- Quest "Betrayal from Within (Part 2)" +UPDATE `quest_request_items` SET `EmoteOnComplete`=6 WHERE `ID`=906; +UPDATE `quest_offer_reward` SET `Emote1`=5, `Emote2`=4, `Emote3`=1 WHERE `ID`=906; + +-- Quest "Melor Sends Word" +UPDATE `quest_offer_reward` SET `Emote1`=2 WHERE `ID`=1130; + +-- Quest "The Tear of the Moons" +DELETE FROM `quest_details` WHERE `ID`=857; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(857,5,5,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=5, `EmoteOnComplete`=5 WHERE `ID`=857; +UPDATE `quest_offer_reward` SET `Emote1`=15 WHERE `ID`=857; + +-- Quest "The Runed Scroll" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=3513; +UPDATE `quest_offer_reward` SET `Emote1`=6 WHERE `ID`=3513; + +-- Quest "Horde Presence" +DELETE FROM `quest_details` WHERE `ID`=3514; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(3514,1,1,25,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=5 WHERE `ID`=3514; +UPDATE `quest_offer_reward` SET `Emote1`=4, `Emote2`=1, `Emote3`=2 WHERE `ID`=3514; + +-- Quest "The Warsong Reports" +UPDATE `quest_details` SET `Emote1`=1, `Emote2`=1 WHERE `ID`=6543; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=6543; + +-- Quest "Regthar Deathgate" +DELETE FROM `quest_details` WHERE `ID`=1361; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1361,1,0,0,0,0,0,0,0,0); + +-- Quest "The Kolkar of Desolace" +DELETE FROM `quest_details` WHERE `ID`=1362; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1362,1,0,0,0,0,0,0,0,0); +UPDATE `quest_offer_reward` SET `Emote1`=6, `Emote2`=1 WHERE `ID`=1362; + +-- Quest "The Swarm Grows" +DELETE FROM `quest_details` WHERE `ID`=1145; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1145,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=25 WHERE `ID`=1145; + +-- Quest "The Swarm Grows (Part 2)" +DELETE FROM `quest_details` WHERE `ID`=1146; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1146,1,1,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=1146; + +-- Quest "The Swarm Grows (Part 3)" +DELETE FROM `quest_details` WHERE `ID`=1147; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1147,1,1,0,0,0,0,0,0,0); +UPDATE `quest_offer_reward` SET `Emote1`=11, `Emote2`=1 WHERE `ID`=1147; + +-- Quest "Deviate Hides" +DELETE FROM `quest_details` WHERE `ID`=1486; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1486,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=1486; +UPDATE `quest_offer_reward` SET `Emote1`=2 WHERE `ID`=1486; + +-- Quest "Deviate Eradication" +DELETE FROM `quest_details` WHERE `ID`=1487; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1487,1,0,0,0,0,0,0,0,0); +UPDATE `quest_offer_reward` SET `Emote1`=1, `Emote2`=2 WHERE `ID`=1487; diff --git a/sql/updates/world/3.3.5/2017_11_14_13_world_335.sql b/sql/updates/world/3.3.5/2017_11_14_13_world_335.sql new file mode 100644 index 00000000000..1193069df87 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_14_13_world_335.sql @@ -0,0 +1,631 @@ +-- Greater Plainstrider +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3244; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3244 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3244,0,0,0,0,0,100,0,7000,9000,17000,21000,11,7272,0,0,0,0,0,1,0,0,0,0,0,0,0,"Greater Plainstrider - In Combat - Cast Dust Cloud"), +(3244,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Greater Plainstrider - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Fleeting Plainstrider +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3246; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3246 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3246,0,0,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Fleeting Plainstrider - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Sunscale Lashtail +DELETE FROM `smart_scripts` WHERE `entryorguid`=3254 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3254,0,0,0,0,0,100,0,5000,9000,12000,20000,11,6607,0,0,0,0,0,2,0,0,0,0,0,0,0,"Sunscale Lashtail - In Combat - Cast Lash"); + +-- Sunscale Screecher +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3255; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3255 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3255,0,0,0,2,0,100,1,0,30,0,0,39,30,0,0,0,0,0,1,0,0,0,0,0,0,0,"Sunscale Screecher - Between 0-30% Health - Call For Help (No Repeat)"), +(3255,0,1,0,2,0,100,1,0,30,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Sunscale Screecher - Between 0-30% Health - Say Line 0 (No Repeat)"); + +DELETE FROM `creature_text` WHERE `CreatureID`=3255; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(3255,0,0,"%s lets out a shriek, calling for help!",16,0,100,0,0,0,2081,0,"Sunscale Screecher"); + +-- Sunscale Scytheclaw +DELETE FROM `smart_scripts` WHERE `entryorguid`=3256 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3256,0,0,0,25,0,100,0,0,0,0,0,11,8876,0,0,0,0,0,1,0,0,0,0,0,0,0,"Sunscale Scytheclaw - On Reset - Cast Thrash"); + +-- Razormane Water Seeker +DELETE FROM `smart_scripts` WHERE `entryorguid`=3267 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3267,0,0,0,0,0,100,0,7000,13000,30000,40000,11,6278,0,0,0,0,0,2,0,0,0,0,0,0,0,"Razormane Water Seeker - In Combat - Cast Creeping Mold"), +(3267,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Razormane Water Seeker - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Razormane Thornweaver +DELETE FROM `smart_scripts` WHERE `entryorguid`=3268 AND `source_type`=0 AND `id` IN (0,2); +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3268,0,0,0,0,0,100,0,4000,7000,15000,21000,11,6950,0,0,0,0,0,2,0,0,0,0,0,0,0,"Razormane Thornweaver - In Combat - Cast Faerie Fire"), +(3268,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Razormane Thornweaver - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Razormane Defender +DELETE FROM `smart_scripts` WHERE `entryorguid`=3266 AND `source_type`=0 AND `id` IN (1,2,3); +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3266,0,1,0,0,0,100,0,8000,10000,30000,35000,11,13730,0,0,0,0,0,2,0,0,0,0,0,0,0,"Razormane Defender - In Combat - Cast Demoralizing Shout"), +(3266,0,2,0,0,0,100,0,5000,7000,11000,13000,11,25710,0,0,0,0,0,2,0,0,0,0,0,0,0,"Razormane Defender - In Combat - Cast Heroic Strike"), +(3266,0,3,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Razormane Defender - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Razormane Geomancer +UPDATE `smart_scripts` SET `event_param3`=3400, `event_param4`=4800 WHERE `entryorguid`=3269 AND `source_type`=0 AND `id`=1; + +-- Razormane Mystic +DELETE FROM `smart_scripts` WHERE `entryorguid`=3271 AND `source_type`=0 AND `id` IN (1,2,3); +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3271,0,1,0,16,0,100,0,324,1,15000,30000,11,324,0,0,0,0,0,1,0,0,0,0,0,0,0,"Razormane Mystic - On Friendly Unit Missing Buff 'Lightning Shield' - Cast Lightning Shield"), +(3271,0,2,0,2,0,100,0,0,40,12000,19000,11,547,0,0,0,0,0,1,0,0,0,0,0,0,0,"Razormane Mystic - Between 0-40% Health - Cast Healing Wave"), +(3271,0,3,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Razormane Mystic - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Razormane Stalker +DELETE FROM `smart_scripts` WHERE `entryorguid`=3457 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3457,0,0,0,25,0,100,0,0,0,0,0,11,22766,0,0,0,0,0,1,0,0,0,0,0,0,0,"Razormane Stalker - On Reset - Cast Sneak"), +(3457,0,1,0,0,0,100,0,5000,9000,4000,8000,11,1758,0,0,0,0,0,2,0,0,0,0,0,0,0,"Razormane Stalker - In Combat - Cast Sinister Strike"), +(3457,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Razormane Stalker - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Razormane Seer +DELETE FROM `smart_scripts` WHERE `entryorguid`=3458 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3458,0,0,0,2,0,100,0,0,50,25000,35000,11,6274,0,0,0,0,0,1,0,0,0,0,0,0,0,"Razormane Seer - Between 0-50% Health - Cast Healing Ward"), +(3458,0,1,0,0,0,100,0,5000,11000,16000,24000,11,6363,0,0,0,0,0,1,0,0,0,0,0,0,0,"Razormane Seer - In Combat - Cast Searing Totem"), +(3458,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Razormane Seer - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Razormane Warfrenzy +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3459; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3459 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3459,0,0,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Razormane Warfrenzy - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Hagg Taurenbane +DELETE FROM `smart_scripts` WHERE `entryorguid`=5859 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5859,0,0,0,4,0,100,1,0,0,0,0,11,7165,0,0,0,0,0,1,0,0,0,0,0,0,0,"Hagg Taurenbane - On Aggro - Cast Battle Stance"), +(5859,0,1,0,9,0,100,0,0,5,9000,12000,11,40505,0,0,0,0,0,2,0,0,0,0,0,0,0,"Hagg Taurenbane - Within 0-5 Range - Cast Cleave"), +(5859,0,2,0,9,0,100,0,0,8,25000,30000,11,13730,0,0,0,0,0,1,0,0,0,0,0,0,0,"Hagg Taurenbane - Within 0-8 Range - Cast Demoralizing Shout"), +(5859,0,3,0,0,0,100,0,5000,7000,11000,18000,11,9080,0,0,0,0,0,2,0,0,0,0,0,0,0,"Hagg Taurenbane - In Combat - Cast Hamstring"); + +-- Lok Orcbane +DELETE FROM `smart_scripts` WHERE `entryorguid`=3435 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3435,0,0,0,0,0,100,0,5000,8000,11000,17000,11,9080,0,0,0,0,0,2,0,0,0,0,0,0,0,"Lok Orcbane - In Combat - Cast Hamstring"); + +-- Nak +UPDATE `smart_scripts` SET `event_param3`=3400, `event_param4`=4800 WHERE `entryorguid`=3434 AND `source_type`=0 AND `id`=0; +UPDATE `smart_scripts` SET `event_param2`=50, `comment`="Nak - Between 0-50% Health - Cast Lesser Healing Wave" WHERE `entryorguid`=3434 AND `source_type`=0 AND `id`=1; + +-- Southsea Brigand +DELETE FROM `smart_scripts` WHERE `entryorguid`=3381 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3381,0,0,0,0,0,100,0,7000,14000,10000,16000,11,6253,0,0,0,0,0,2,0,0,0,0,0,0,0,"Southsea Brigand - In Combat - Cast Backhand"), +(3381,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Southsea Brigand - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Southsea Cannoneer +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3382; +UPDATE `creature_template_addon` SET `bytes2`=2 WHERE `entry`=3382; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3382 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3382,0,0,0,0,0,100,0,0,0,2300,3900,11,6660,64,0,0,0,0,2,0,0,0,0,0,0,0,"Southsea Cannoneer - In Combat - Cast Shoot"), +(3382,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Southsea Cannoneer - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Southsea Cutthroat +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3383; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3383 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3383,0,0,0,0,0,100,0,4000,9000,18000,25000,11,744,0,0,0,0,0,2,0,0,0,0,0,0,0,"Southsea Cutthroat - In Combat - Cast Poison"), +(3383,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Southsea Cutthroat - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Southsea Privateer +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3384; +UPDATE `creature_template_addon` SET `bytes2`=2 WHERE `entry`=3384; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3384 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3384,0,0,0,0,0,100,0,0,0,2300,3900,11,6660,64,0,0,0,0,2,0,0,0,0,0,0,0,"Southsea Privateer - In Combat - Cast Shoot"), +(3384,0,1,0,0,0,100,0,4000,6000,9000,13000,11,3011,2,0,0,0,0,2,0,0,0,0,0,0,0,"Southsea Privateer - In Combat - Cast Fire Shot"), +(3384,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Southsea Privateer - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Tazan +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=6494; +DELETE FROM `smart_scripts` WHERE `entryorguid`=6494 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(6494,0,0,0,0,0,100,0,7000,14000,10000,16000,11,6253,0,0,0,0,0,2,0,0,0,0,0,0,0,"Southsea Brigand - In Combat - Cast Backhand"), +(6494,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Southsea Brigand - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Baron Longshore +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3467; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3467 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3467,0,0,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Baron Longshore - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Theramore Marine +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3385; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3385 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3385,0,0,0,4,0,100,0,0,0,0,0,11,7164,0,0,0,0,0,1,0,0,0,0,0,0,0,"Theramore Marine - On Aggro - Cast Defensive Stance"), +(3385,0,1,0,0,0,100,0,7000,12000,14000,21000,11,6713,0,0,0,0,0,2,0,0,0,0,0,0,0,"Theramore Marine - In Combat - Cast Disarm"), +(3385,0,2,0,13,0,100,0,15000,20000,0,0,11,72,0,0,0,0,0,2,0,0,0,0,0,0,0,"Theramore Marine - Target Casting - Cast Shield Bash"), +(3385,0,3,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Theramore Marine - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Theramore Preserver +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3386; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3386 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3386,0,0,0,0,0,100,0,0,0,3400,4800,11,9734,64,0,0,0,0,2,0,0,0,0,0,0,0,"Theramore Preserver - In Combat - Cast Holy Smite"), +(3386,0,1,0,14,0,100,0,150,40,12000,18000,11,11642,0,0,0,0,0,7,0,0,0,0,0,0,0,"Theramore Preserver - Friendly At 150 Health - Cast Heal"), +(3386,0,2,0,14,0,100,0,250,40,15000,21000,11,8362,0,0,0,0,0,7,0,0,0,0,0,0,0,"Theramore Preserver - Friendly At 250 Health - Cast Renew"), +(3386,0,3,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Theramore Preserver - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Captain Fairmount +UPDATE `smart_scripts` SET `event_param1`=12000, `event_param2`=15000 WHERE `entryorguid`=3393 AND `source_type`=0 AND `id`=2; + +-- Cannoneer Smythe +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3454; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3454 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3454,0,0,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Cannoneer Smythe - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Cannoneer Whessan +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3455; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3455 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3455,0,0,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Cannoneer Whessan - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Witchwing Harpy +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3276; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3276 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3276,0,0,0,4,0,20,0,0,0,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,"Witchwing Harpy - On Aggro - Say Line 0"), +(3276,0,1,0,0,0,100,0,4000,9000,18000,25000,11,7098,0,0,0,0,0,2,0,0,0,0,0,0,0,"Witchwing Harpy - In Combat - Cast Curse of Mending"), +(3276,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Witchwing Harpy - Between 0-15% Health - Flee For Assist (No Repeat)"); + +DELETE FROM `creature_text` WHERE `CreatureID`=3276; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(3276,0,1,"A fine trophy your head will make, $r.",12,0,100,0,0,0,2229,0,"Witchwing Harpy"), +(3276,0,2,"My talons will shred your puny body, $r.",12,0,100,0,0,0,2230,0,"Witchwing Harpy"), +(3276,0,0,"You will be easy prey, $c.",12,0,100,0,0,0,2231,0,"Witchwing Harpy"); + +-- Witchwing Roguefeather +UPDATE `smart_scripts` SET `event_chance`=20 WHERE `entryorguid`=3277 AND `source_type`=0 AND `id`=0; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3277 AND `source_type`=0 AND `id`=3; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3277,0,3,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Witchwing Harpy - Between 0-15% Health - Flee For Assist (No Repeat)"); + +DELETE FROM `creature_text` WHERE `CreatureID`=3277 AND `ID` IN (1,2); +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(3277,0,1,"A fine trophy your head will make, $r.",12,0,100,0,0,0,2229,0,"Witchwing Roguefeather"), +(3277,0,2,"You will be easy prey, $c.",12,0,100,0,0,0,2231,0,"Witchwing Roguefeather"); + +-- Witchwing Slayer +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3278; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3278 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3278,0,0,0,4,0,20,0,0,0,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,"Witchwing Slayer - On Aggro - Say Line 0"), +(3278,0,1,0,0,0,100,0,6000,8000,25000,28000,11,13730,0,0,0,0,0,1,0,0,0,0,0,0,0,"Witchwing Slayer - In Combat - Cast Demoralizing Shout"), +(3278,0,2,0,12,0,100,1,0,20,0,0,11,7160,0,0,0,0,0,2,0,0,0,0,0,0,0,"Witchwing Slayer - Target Between 0-20% Health - Cast 'Execute' (No Repeat)"), +(3278,0,3,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Witchwing Slayer - Between 0-15% Health - Flee For Assist (No Repeat)"); + +DELETE FROM `creature_text` WHERE `CreatureID`=3278; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(3278,0,1,"A fine trophy your head will make, $r.",12,0,100,0,0,0,2229,0,"Witchwing Slayer"), +(3278,0,2,"My talons will shred your puny body, $r.",12,0,100,0,0,0,2230,0,"Witchwing Slayer"), +(3278,0,0,"You will be easy prey, $c.",12,0,100,0,0,0,2231,0,"Witchwing Slayer"); + +-- Witchwing Ambusher +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3279; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3279 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3279,0,0,0,4,0,20,0,0,0,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,"Witchwing Ambusher - On Aggro - Say Line 0"), +(3279,0,1,0,25,0,100,0,0,0,0,0,11,30831,0,0,0,0,0,1,0,0,0,0,0,0,0,"Witchwing Ambusher - On Reset - Cast Stealth"), +(3279,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Witchwing Ambusher - Between 0-15% Health - Flee For Assist (No Repeat)"); + +DELETE FROM `creature_text` WHERE `CreatureID`=3279; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(3279,0,1,"A fine trophy your head will make, $r.",12,0,100,0,0,0,2229,0,"Witchwing Ambusher"), +(3279,0,2,"My talons will shred your puny body, $r.",12,0,100,0,0,0,2230,0,"Witchwing Ambusher"), +(3279,0,0,"You will be easy prey, $c.",12,0,100,0,0,0,2231,0,"Witchwing Ambusher"); + +-- Witchwing Windcaller +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3280; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3280 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3280,0,0,0,4,0,20,0,0,0,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,"Witchwing Windcaller - On Aggro - Say Line 0"), +(3280,0,1,0,0,0,100,0,4000,7000,12000,15000,11,6728,0,0,0,0,0,5,0,0,0,0,0,0,0,"Witchwing Windcaller - In Combat - Cast Enveloping Winds"), +(3280,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Witchwing Windcaller - Between 0-15% Health - Flee For Assist (No Repeat)"); + +DELETE FROM `creature_text` WHERE `CreatureID`=3280; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(3280,0,1,"A fine trophy your head will make, $r.",12,0,100,0,0,0,2229,0,"Witchwing Windcaller"), +(3280,0,2,"My talons will shred your puny body, $r.",12,0,100,0,0,0,2230,0,"Witchwing Windcaller"), +(3280,0,0,"You will be easy prey, $c.",12,0,100,0,0,0,2231,0,"Witchwing Windcaller"); + +-- Serena Bloodfeather +UPDATE `smart_scripts` SET `event_param1`=6000, `event_param2`=9000, `event_param3`=12000, `event_param4`=18000 WHERE `entryorguid`=3452 AND `source_type`=0 AND `id`=0; + +-- Sister Rathtalon +DELETE FROM `smart_scripts` WHERE `entryorguid`=5830 AND `source_type`=0 AND `id` IN (0,1,2); +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5830,0,0,0,0,0,100,0,2000,3000,14000,21000,11,6728,0,0,0,0,0,5,0,0,0,0,0,0,0,"Sister Rathtalon - In Combat - Cast Enveloping Winds"), +(5830,0,1,0,9,0,100,0,0,5,9000,15000,11,6982,0,0,0,0,0,2,0,0,0,0,0,0,0,"Sister Rathtalon - Within 0-5 Range - Cast Gust of Wind"), +(5830,0,2,0,0,0,100,0,11000,16000,25000,35000,11,6535,0,0,0,0,0,5,0,0,0,0,0,0,0,"Sister Rathtalon - In Combat - Cast Lightning Cloud"); + +-- Savannah Matriarch +DELETE FROM `smart_scripts` WHERE `entryorguid`=3416 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3416,0,0,0,4,0,100,0,0,0,0,0,11,6598,2,0,0,0,0,1,0,0,0,0,0,0,0,"Savannah Matriarch - On Aggro - Cast Savannah Cub"); + +-- Stormsnout +DELETE FROM `smart_scripts` WHERE `entryorguid`=3240 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3240,0,0,0,0,0,100,0,1000,2000,2000,6000,11,5401,0,0,0,0,0,2,0,0,0,0,0,0,0,"Stormsnout - In Combat - Cast Lizard Bolt"); + +-- Thunderhead +DELETE FROM `smart_scripts` WHERE `entryorguid`=3239 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3239,0,0,0,0,0,100,0,1000,2000,2000,6000,11,5401,0,0,0,0,0,2,0,0,0,0,0,0,0,"Thunderhead - In Combat - Cast Lizard Bolt"); + +-- Stormhide +DELETE FROM `smart_scripts` WHERE `entryorguid`=3238 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3238,0,0,0,0,0,100,0,1000,2000,2000,6000,11,5401,0,0,0,0,0,2,0,0,0,0,0,0,0,"Stormhide - In Combat - Cast Lizard Bolt"); + +-- Thunderhawk Hatchling +UPDATE `smart_scripts` SET `event_param3`=3400, `event_param4`=4800 WHERE `entryorguid`=3247 AND `source_type`=0 AND `id`=1; + +-- Greater Thunderhawk +DELETE FROM `smart_scripts` WHERE `entryorguid`=3249 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3249,0,0,0,0,0,100,0,5000,11000,13000,19000,11,8078,0,0,0,0,0,1,0,0,0,0,0,0,0,"Greater Thunderhawk - In Combat - Cast Thunderclap"); + +-- Thunderhawk Cloudscraper +DELETE FROM `smart_scripts` WHERE `entryorguid`=3424 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3424,0,0,0,0,0,100,0,5000,11000,13000,19000,11,8078,0,0,0,0,0,1,0,0,0,0,0,0,0,"Thunderhawk Cloudscraper - In Combat - Cast Thunderclap"); + +-- Silithid Protector +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3503; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3503 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3503,0,0,0,54,0,100,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Silithid Protector - Just Summoned - Say Line 0"), +(3503,0,1,0,54,0,100,0,0,0,0,0,49,0,0,0,0,0,0,21,20,0,0,0,0,0,0,"Silithid Protector - Just Summoned - Start Attacking"); + +DELETE FROM `creature_text` WHERE `CreatureID`=3503; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(3503,0,0,"%s digs its way out of the sand to protect its eggs.",16,0,100,0,0,0,1080,0,"Silithid Protector"); + +-- Silithid Creeper +DELETE FROM `smart_scripts` WHERE `entryorguid`=3250 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3250,0,0,1,0,0,100,0,7000,15000,15000,23000,11,6587,0,0,0,0,0,1,0,0,0,0,0,0,0,"Silithid Creeper - In Combat - Cast Silithid Creeper Egg"), +(3250,0,1,0,61,0,100,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Silithid Creeper - In Combat - Say Line 0"); + +DELETE FROM `creature_text` WHERE `CreatureID`=3250; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(3250,0,0,"%s lays an egg!",16,0,100,0,0,0,1408,0,"Silithid Creeper"); + +-- Silithid Creeper Egg +DELETE FROM `smart_scripts` WHERE `entryorguid`=578100 AND `source_type`=9; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(578100,9,0,0,0,0,100,0,2000,2000,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Silithid Creeper Egg - On Script - Say Line 0"), +(578100,9,1,0,0,0,100,0,4000,4000,0,0,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Silithid Creeper Egg - On Script - Say Line 1"), +(578100,9,2,0,0,0,100,0,0,0,0,0,11,6588,0,0,0,0,0,1,0,0,0,0,0,0,0,"Silithid Creeper Egg - On Script - Cast 'Summon Silithid Grub'"), +(578100,9,3,0,0,0,100,0,0,0,0,0,11,7,0,0,0,0,0,1,0,0,0,0,0,0,0,"Silithid Creeper Egg - On Script - Cast 'Suicide'"); + +-- Silithid Grub +DELETE FROM `smart_scripts` WHERE `entryorguid`=3251 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3251,0,1,0,54,0,100,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Silithid Grub - Just Summoned - Say Line 0"); + +DELETE FROM `creature_text` WHERE `CreatureID`=3251; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(3251,0,0,"%s hatches!",16,0,100,0,0,0,1413,0,"Silithid Grub"); + +-- Silithid Swarmer +DELETE FROM `smart_scripts` WHERE `entryorguid`=3252 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3252,0,0,0,0,0,100,0,8000,12000,8000,12000,11,6589,0,0,0,0,0,1,0,0,0,0,0,0,0,"Silithid Swarmer - In Combat - Cast Silithid Swarm"), +(3252,0,1,0,11,0,100,0,0,0,0,0,11,6589,0,0,0,0,0,1,0,0,0,0,0,0,0,"Silithid Swarmer - On Respawn - Cast Silithid Swarm"); + +-- Silithid Swarm +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=4196; +DELETE FROM `smart_scripts` WHERE `entryorguid`=4196 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4196,0,0,0,54,0,100,0,0,0,0,0,49,0,0,0,0,0,0,21,10,0,0,0,0,0,0,"Silithid Swarm - Just Summoned - Start Attacking"); + +-- Silithid Harvester +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3253; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3253 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3253,0,0,0,0,0,100,0,1000,3000,8000,12000,11,7278,0,0,0,0,0,1,0,0,0,0,0,0,0,"Silithid Harvester - In Combat - Cast Summon Harvester Swarm"); + +-- Harvester Swarm +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5409; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5409 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5409,0,0,0,54,0,100,0,0,0,0,0,49,0,0,0,0,0,0,21,40,0,0,0,0,0,0,"Harvester Swarm - Just Summoned - Start Attacking"); + +-- Azzere the Skyblade +DELETE FROM `smart_scripts` WHERE `entryorguid`=5834 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5834,0,0,0,0,0,100,0,0,0,3400,4800,11,13375,64,0,0,0,0,2,0,0,0,0,0,0,0,"Azzere the Skyblade - In Combat - Cast Fireball"), +(5834,0,1,0,0,0,100,0,4000,9000,18000,22000,11,6725,0,0,0,0,0,2,0,0,0,0,0,0,0,"Azzere the Skyblade - In Combat - Cast Flame Spike"); + +-- Brontus +DELETE FROM `smart_scripts` WHERE `entryorguid`=5827 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5827,0,1,0,0,0,100,0,5000,9000,7000,15000,11,6016,0,0,0,0,0,2,0,0,0,0,0,0,0,"Brontus - In Combat - Cast Pierce Armor"); + +-- Venture Co. Peon +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3285; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3285 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3285,0,0,0,2,0,100,1,0,25,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Venture Co. Peon - Between 0-25% Health - Flee For Assist (No Repeat)"); + +-- Venture Co. Drudger +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3284; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3284 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3284,0,0,0,2,0,100,1,0,25,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Venture Co. Drudger - Between 0-25% Health - Flee For Assist (No Repeat)"); + +-- Tinkerer Sniggles +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3471; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3471 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3471,0,0,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Tinkerer Sniggles - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Venture Co. Mercenary +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3282; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3282 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3282,0,0,0,0,0,100,0,0,0,2300,3900,11,6660,64,0,0,0,0,2,0,0,0,0,0,0,0,"Venture Co. Mercenary - In Combat - Cast Shoot"), +(3282,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Venture Co. Mercenary - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Supervisor Lugwizzle +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3445; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3445 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3445,0,0,0,2,0,100,1,0,30,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Supervisor Lugwizzle - Between 0-30% Health - Flee For Assist (No Repeat)"); + +-- Overseer Glibby +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=6606; +DELETE FROM `smart_scripts` WHERE `entryorguid`=6606 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(6606,0,0,0,2,0,100,1,0,30,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Supervisor Glibby - Between 0-30% Health - Flee For Assist (No Repeat)"); + +-- Venture Co. Enforcer +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3283; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3283 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3283,0,0,0,4,0,100,0,0,0,0,0,11,9128,0,0,0,0,0,1,0,0,0,0,0,0,0,"Venture Co. Enforcer - On Aggro - Cast Battle Shout"), +(3283,0,1,0,0,0,100,0,5000,9000,15000,19000,11,6713,0,0,0,0,0,2,0,0,0,0,0,0,0,"Venture Co. Enforcer - In Combat - Cast Disarm"), +(3283,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Venture Co. Enforcer - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Boss Copperplug +DELETE FROM `smart_scripts` WHERE `entryorguid`=9336 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(9336,0,0,0,0,0,100,0,5000,9000,9000,16000,11,6533,0,0,0,0,0,2,0,0,0,0,0,0,0,"Boss Copperplug - In Combat - Cast Net"), +(9336,0,1,0,0,0,100,0,0,0,2300,3900,11,9143,64,0,0,0,0,2,0,0,0,0,0,0,0,"Boss Copperplug - In Combat - Cast Bomb"); + +-- Foreman Grills +DELETE FROM `smart_scripts` WHERE `entryorguid`=5835 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5835,0,0,0,0,0,100,0,5000,7000,8000,11000,11,9080,0,0,0,0,0,2,0,0,0,0,0,0,0,"Foreman Grills - In Combat - Cast Hamstring"), +(5835,0,1,0,2,0,100,1,0,20,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Foreman Grills - Between 0-20% Health - Say Line 0 (No Repeat)"), +(5835,0,2,0,2,0,100,0,0,20,4000,6000,11,6531,32,0,0,0,0,2,0,0,0,0,0,0,0,"Foreman Grills - Between 0-20% Health - Cast Overseer's Poison"); + +DELETE FROM `creature_text` WHERE `CreatureID`=5835; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(5835,0,0,"%s grabs for his poisoned dagger!",16,0,100,0,0,0,1385,0,"Foreman Grills"); + +-- Bristleback Thornweaver +DELETE FROM `smart_scripts` WHERE `entryorguid`=3261 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3261,0,0,0,1,0,100,0,1000,1000,600000,600000,11,782,0,0,0,0,0,1,0,0,0,0,0,0,0,"Bristleback Thornweaver - Out of Combat - Cast Thorns"), +(3261,0,1,0,0,0,100,0,5000,9000,10000,16000,11,12747,0,0,0,0,0,5,0,0,0,0,0,0,0,"Bristleback Thornweaver - In Combat - Cast Entangling Roots"), +(3261,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Bristleback Thornweaver - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Bristleback Geomancer +UPDATE `smart_scripts` SET `event_param3`=3400, `event_param4`=4800 WHERE `entryorguid`=3263 AND `source_type`=0 AND `id`=0; +UPDATE `smart_scripts` SET `action_param2`=0 WHERE `entryorguid`=3263 AND `source_type`=0 AND `id` IN (1,2); + +-- Bristleback Water Seeker +DELETE FROM `smart_scripts` WHERE `entryorguid`=3260 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3260,0,0,0,0,0,100,0,5000,13000,10000,16000,11,12748,0,0,0,0,0,1,0,0,0,0,0,0,0,"Bristleback Water Seeker - In Combat - Cast Frost Nova"), +(3260,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Bristleback Water Seeker - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Barrens Kodo +DELETE FROM `smart_scripts` WHERE `entryorguid`=3236 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3236,0,0,0,0,0,100,0,6000,8000,10000,14000,11,12748,0,0,0,0,0,1,0,0,0,0,0,0,0,"Barrens Kodo - In Combat - Cast Kodo Stomp"); + +-- Bael'dun Excavator +DELETE FROM `smart_scripts` WHERE `entryorguid`=3374 AND `source_type`=0 AND `id` IN (1,2); +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3374,0,1,0,0,0,100,0,5000,9000,13000,17000,11,7386,0,0,0,0,0,2,0,0,0,0,0,0,0,"Bael'dun Excavator - In Combat - Cast Sunder Armor"), +(3374,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Bael'dun Excavator - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Bael'dun Foreman +DELETE FROM `smart_scripts` WHERE `entryorguid`=3375 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3375,0,0,0,0,0,100,0,0,0,3400,4700,11,6257,64,0,0,0,0,2,0,0,0,0,0,0,0,"Bael'dun Foreman - In Combat - Cast Torch Toss"), +(3375,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Bael'dun Foreman - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Digger Flameforge +DELETE FROM `smart_scripts` WHERE `entryorguid`=5849 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5849,0,0,0,0,0,100,0,0,0,2000,3500,11,7978,64,0,0,0,0,2,0,0,0,0,0,0,0,"Digger Flameforge - In Combat - Cast Throw Dynamite"), +(5849,0,1,0,9,0,100,0,0,5,9000,14000,11,6253,0,0,0,0,0,2,0,0,0,0,0,0,0,"Digger Flameforge - Within 0-5 Range - Cast Backhand"); + +-- Prospector Khazgorm +DELETE FROM `smart_scripts` WHERE `entryorguid`=3392 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3392,0,0,0,0,0,100,0,7000,14000,10000,16000,11,6253,0,0,0,0,0,2,0,0,0,0,0,0,0,"Prospector Khazgorm - In Combat - Cast Backhand"); + +-- Bael'dun Soldier +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3376; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3376 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3376,0,0,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Bael'dun Soldier - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Bael'dun Rifleman +UPDATE `creature_template_addon` SET `bytes2`=2 WHERE `entry`=3377; + +-- Bael'dun Officer +DELETE FROM `smart_scripts` WHERE `entryorguid`=3378 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3378,0,0,0,25,0,100,0,0,0,0,0,11,8876,0,0,0,0,0,1,0,0,0,0,0,0,0,"Bael'dun Officer - On Reset - Cast Thrash"), +(3378,0,1,0,0,0,100,0,3000,7000,15000,23000,11,6264,0,0,0,0,0,1,0,0,0,0,0,0,0,"Bael'dun Officer - In Combat - Cast Nimble Reflexes"), +(3378,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Bael'dun Officer - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Captain Gerogg Hammertoe +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5851; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5851 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5851,0,0,0,4,0,100,0,0,0,0,0,11,7164,0,0,0,0,0,1,0,0,0,0,0,0,0,"Captain Gerogg Hammertoe - On Aggro - Cast Defensive Stance"), +(5851,0,1,0,2,0,100,0,0,50,15000,25000,11,3419,0,0,0,0,0,1,0,0,0,0,0,0,0,"Captain Gerogg Hammertoe - Between 0-50% Helath - Cast Improved Blocking"); + +-- Burning Blade Acolyte +DELETE FROM `smart_scripts` WHERE `entryorguid`=3380 AND `source_type`=0 AND `id` IN (1,2,3); +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3380,0,1,0,0,0,100,0,6000,11000,17000,22000,11,707,0,0,0,0,0,2,0,0,0,0,0,0,0,"Burning Blade Acolyte - In Combat - Cast Immolate"), +(3380,0,2,0,0,0,100,0,2000,5000,26000,31000,11,980,0,0,0,0,0,2,0,0,0,0,0,0,0,"Burning Blade Acolyte - In Combat - Cast Curse of Agony"), +(3380,0,3,0,2,0,100,0,0,50,13000,18000,11,689,0,0,0,0,0,2,0,0,0,0,0,0,0,"Burning Blade Acolyte - Between 0-50% Health - Cast Drain Life"); + +-- Rathorian +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3470; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3470 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3470,0,0,0,0,0,100,0,5000,7000,11000,15000,11,13737,0,0,0,0,0,2,0,0,0,0,0,0,0,"Rathorian - In Combat - Cast Mortal Strike"); + +-- Kolkar Bloodcharger +DELETE FROM `smart_scripts` WHERE `entryorguid`=3397 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3397,0,0,0,0,0,100,0,3000,5000,32000,35000,11,6742,32,0,0,0,0,1,0,0,0,0,0,0,0,"Kolkar Bloodcharger - In Combat - Cast Bloodlust"), +(3397,0,1,0,0,0,100,0,6000,8000,15000,19000,11,172,0,0,0,0,0,2,0,0,0,0,0,0,0,"Kolkar Bloodcharger - In Combat - Cast Corruption"), +(3397,0,2,3,6,0,5,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kolkar Bloodcharger - On Just Died - Say Line 0"), +(3397,0,3,0,61,0,100,0,0,0,0,0,12,3395,6,20000,0,0,0,8,0,0,0,-1209.65,-2738.38,102.646,4.99352,"Kolkar Bloodcharger - On Just Died - Summon Creature 'Verog the Dervish'"); + +-- Kolkar Marauder +DELETE FROM `smart_scripts` WHERE `entryorguid`=3275 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3275,0,0,0,4,0,100,1,0,0,0,0,11,6268,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kolkar Marauder - On Aggro - Cast Rushing Charge"), +(3275,0,1,0,0,0,100,0,5000,11000,7000,12000,11,11976,0,0,0,0,0,2,0,0,0,0,0,0,0,"Kolkar Marauder - In Combat - Cast Strike"), +(3275,0,2,0,0,0,100,0,7000,15000,9000,14000,11,8014,32,0,0,0,0,2,0,0,0,0,0,0,0,"Kolkar Marauder - In Combat - Cast Tetanus"), +(3275,0,3,4,6,0,5,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kolkar Marauder - On Just Died - Say Line 0"), +(3275,0,4,0,61,0,100,0,0,0,0,0,12,3395,6,20000,0,0,0,8,0,0,0,-1209.65,-2738.38,102.646,4.99352,"Kolkar Marauder - On Just Died - Summon Creature 'Verog the Dervish'"); + +-- Kolkar Pack Runner +UPDATE `smart_scripts` SET `event_chance`=5 WHERE `entryorguid`=3274 AND `source_type`=0 AND `id`=1; + +-- Kolkar Stormer +UPDATE `smart_scripts` SET `event_param3`=3400, `event_param4`=4800 WHERE `entryorguid`=3273 AND `source_type`=0 AND `id`=1; + +-- Rocklance +DELETE FROM `smart_scripts` WHERE `entryorguid`=5841 AND `source_type`=0 AND `id` IN (1,2); +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5841,0,1,0,0,0,100,0,5000,7000,12000,16000,11,15496,0,0,0,0,0,2,0,0,0,0,0,0,0,"Rocklance - In Combat - Cast Cleave"), +(5841,0,2,0,9,0,100,0,0,5,15000,19000,11,11971,0,0,0,0,0,2,0,0,0,0,0,0,0,"Rocklance - Within 0-5 Range - Cast Sunder Armor"); + +-- Geopriest Gukk'rok +UPDATE `smart_scripts` SET `event_chance`=100 WHERE `entryorguid`=5863 AND `source_type`=0; + +-- Snort the Heckler +DELETE FROM `smart_scripts` WHERE `entryorguid`=5829 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5829,0,0,0,0,0,100,0,3000,5000,7000,11000,11,7951,0,0,0,0,0,2,0,0,0,0,0,0,0,"Snort the Heckler - In Combat - Cast Toxic Spit"), +(5829,0,1,0,9,0,100,0,0,5,15000,21000,11,3604,0,0,0,0,0,2,0,0,0,0,0,0,0,"Snort the Heckler - Within 0-5 Range - Cast Tendon Rip"); + +-- Swinegart Spearhide +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5864; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5864 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5864,0,0,0,4,0,100,0,0,0,0,0,11,7165,0,0,0,0,0,1,0,0,0,0,0,0,0,"Swinegart Spearhide - On Aggro - Cast Battle Stance"), +(5864,0,1,0,0,0,100,0,5000,7000,15000,17000,11,6016,0,0,0,0,0,2,0,0,0,0,0,0,0,"Swinegart Spearhide - In Combat - Cast Pierce Armor"); + +-- Trigore the Lasher +UPDATE `creature_template` SET `AIName`="SmartAI", `unit_flags`=32768 WHERE `entry`=3652; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3652 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3652,0,0,0,25,0,100,0,0,0,0,0,11,3417,0,0,0,0,0,1,0,0,0,0,0,0,0,"Trigore the Lasher - On Reset - Cast Thrash"); + +-- Devouring Ectoplasm +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3638; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3638 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3638,0,0,0,2,0,100,1,0,50,0,0,11,7952,0,0,0,0,0,1,0,0,0,0,0,0,0,"Devouring Ectoplasm - Between 0-50% Health - Cast Clone (No Repeat)"); + +-- Deviate Stinglash +DELETE FROM `smart_scripts` WHERE `entryorguid`=3631 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3631,0,0,0,0,0,100,0,5000,7000,12000,18000,11,6607,0,0,0,0,0,2,0,0,0,0,0,0,0,"Deviate Stinglash - In Combat - Cast Lash"); + +-- Deviate Slayer +DELETE FROM `smart_scripts` WHERE `entryorguid`=3633 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3633,0,0,0,12,0,100,0,0,20,9000,14000,11,7938,0,0,0,0,0,2,0,0,0,0,0,0,0,"Deviate Slayer - Target Between 0-20% Health - Cast Fatal Bite"); + +-- Deviate Creeper +UPDATE `smart_scripts` SET `event_chance`=100 WHERE `entryorguid`=3632 AND `source_type`=0; + +-- Deviate Stalker +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3634; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3634 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3634,0,0,0,25,0,100,0,0,0,0,0,11,30831,0,0,0,0,0,1,0,0,0,0,0,0,0,"Deviate Stalker - On Reset - Cast Stealth"); + +-- Gesharahan +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3398; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3398 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3398,0,0,0,0,0,100,0,6000,9000,17000,23000,11,3583,2,0,0,0,0,5,0,0,0,0,0,0,0,"Gesharahan - In Combat - Cast Deadly Poison"); + +-- Death's Head Cultist +UPDATE `smart_scripts` SET `event_param3`=3400, `event_param4`=4800 WHERE `entryorguid`=7872 AND `source_type`=0 AND `id`=0; + +-- Death's Head Necromancer +UPDATE `smart_scripts` SET `event_param3`=3400, `event_param4`=4800 WHERE `entryorguid`=7337 AND `source_type`=0 AND `id`=2; + +-- Razorfen Servitor +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=6132; +DELETE FROM `smart_scripts` WHERE `entryorguid`=6132 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(6132,0,0,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Razorfen Servitor - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Fix movement for some creatures +UPDATE `creature` SET `spawndist`=0, `MovementType`=0 WHERE `guid` IN (14096,14081,14166,14174,14256); +UPDATE `creature` SET `spawndist`=3, `MovementType`=1 WHERE `guid` IN (13605,13607); +UPDATE `creature` SET `spawndist`=5, `MovementType`=1 WHERE `guid` IN (13820,15066,14418,19791,13598,13594,13614,13611,13615,13627,13626,13600); +UPDATE `creature` SET `spawndist`=10, `MovementType`=1 WHERE `guid` IN (51814,51810,14033); +UPDATE `creature` SET `spawndist`=15, `MovementType`=1 WHERE `guid` IN (51813,14365); +UPDATE `creature` SET `spawndist`=25, `MovementType`=1 WHERE `guid` IN (51815,51816,15081); + +-- Fix position for some creatures +UPDATE `creature` SET `position_x`=-912.213, `position_y`=-2217.01, `position_z`=93.6255, `spawndist`=10, `MovementType`=1 WHERE `guid`=20567; +UPDATE `creature` SET `position_x`=-107.326, `position_y`=-1851.97, `position_z`=92.4291, `spawndist`=8, `MovementType`=1 WHERE `guid`=19473; +UPDATE `creature` SET `position_x`=-53.9019, `position_y`=-1633.82, `position_z`=91.6667, `orientation`=4.1112 WHERE `guid`=20635; +UPDATE `creature` SET `position_x`=-4062.05, `position_y`=-2173.13, `position_z`=51.4807, `orientation`=0.690689 WHERE `guid`=13610; + +-- Add emotes for some creatures +UPDATE `creature_addon` SET `emote`=173 WHERE `guid` IN (13610,13583,13617); +UPDATE `creature_addon` SET `bytes1`=3 WHERE `guid` IN (65607,14200,14253,14195,14215); + +-- Pooling for Baron Longshore +SET @GUID := 84198; +DELETE FROM `creature` WHERE `guid` IN (@GUID, @GUID+1); +INSERT INTO `creature` (`guid`, `id`, `map`, `zoneId`, `areaId`, `spawnMask`, `phaseMask`, `modelid`, `equipment_id`, `position_x`, `position_y`, `position_z`, `orientation`, `spawntimesecs`, `spawndist`, `currentwaypoint`, `curhealth`, `curmana`, `MovementType`, `npcflag`, `unit_flags`, `dynamicflags`, `ScriptName`, `VerifiedBuild`) VALUES +(@GUID,3467,1,0,0,1,1,0,1,-1751.48,-3720.43,14.0041,4.99665,300,5,0,356,0,1,0,0,0,"",0), +(@GUID+1,3467,1,0,0,1,1,0,1,-1723.34,-3811.04,11.8995,5.6289,300,5,0,356,0,1,0,0,0,"",0); + +DELETE FROM `pool_template` WHERE `entry`=1110; +INSERT INTO `pool_template` (`entry`, `max_limit`, `description`) VALUES +(1110,1,"Baron Longshore"); + +DELETE FROM `pool_creature` WHERE `pool_entry`=1110; +INSERT INTO `pool_creature` (`guid`, `pool_entry`, `chance`, `description`) VALUES +(@GUID,1110,0,"Baron Longshore (3467) - Spawn 1"), +(@GUID+1,1110,0,"Baron Longshore (3467) - Spawn 2"), +(15066,1110,0,"Baron Longshore (3467) - Spawn 3"); diff --git a/sql/updates/world/3.3.5/2017_11_14_14_world_335.sql b/sql/updates/world/3.3.5/2017_11_14_14_world_335.sql new file mode 100644 index 00000000000..04f79fb0adf --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_14_14_world_335.sql @@ -0,0 +1,4 @@ +-- +DELETE FROM `creature` WHERE `guid` IN (40489, 40490, 40491, 40492, 40493, 40494, 40495, 40496) AND `id`=3254; +DELETE FROM `creature_addon` WHERE `guid` IN (40489, 40490, 40491, 40492, 40493, 40494, 40495, 40496); +DELETE FROM `spawn_group` WHERE `groupID`=2 AND `spawnType`=0 AND `spawnId` IN (40489, 40490, 40491, 40492, 40493, 40494, 40495, 40496); diff --git a/sql/updates/world/3.3.5/2017_11_14_15_world_335.sql b/sql/updates/world/3.3.5/2017_11_14_15_world_335.sql new file mode 100644 index 00000000000..376f2bb5b93 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_14_15_world_335.sql @@ -0,0 +1,548 @@ +-- +-- Longtooth Runner +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5286; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5286 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5286,0,0,1,1,0,100,0,30000,600000,120000,600000,4,1018,0,1,0,0,0,1,0,0,0,0,0,0,0,"Longtooth Runner - Out of Combat - Play Sound 1018"), +(5286,0,1,0,61,0,100,0,0,0,0,0,5,393,0,0,0,0,0,1,0,0,0,0,0,0,0,"Longtooth Runner - Out of Combat - Play Emote 393"), +(5286,0,2,0,0,0,100,0,4000,7000,17000,20000,11,3149,2,0,0,0,0,1,0,0,0,0,0,0,0,"Longtooth Runner - In Combat - Cast Furious Howl"); + +-- Longtooth Howler +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5287; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5287 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5287,0,0,1,1,0,100,0,30000,600000,120000,600000,4,1018,0,1,0,0,0,1,0,0,0,0,0,0,0,"Longtooth Howler - Out of Combat - Play Sound 1018"), +(5287,0,1,0,61,0,100,0,0,0,0,0,5,393,0,0,0,0,0,1,0,0,0,0,0,0,0,"Longtooth Howler - Out of Combat - Play Emote 393"), +(5287,0,2,1,2,0,100,1,0,20,0,0,39,30,1,0,0,0,0,1,0,0,0,0,0,0,0,"Longtooth Howler - Between 0-20% Health - Call For Help (No Repeat)"); + +-- Longtooth Runner +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5288; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5288 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5288,0,0,1,1,0,100,0,30000,600000,120000,600000,4,1018,0,1,0,0,0,1,0,0,0,0,0,0,0,"Longtooth Runner - Out of Combat - Play Sound 1018"), +(5288,0,1,0,61,0,100,0,0,0,0,0,5,393,0,0,0,0,0,1,0,0,0,0,0,0,0,"Longtooth Runner - Out of Combat - Play Emote 393"), +(5288,0,2,0,0,0,100,0,5000,9000,30000,35000,11,3150,0,0,0,0,0,2,0,0,0,0,0,0,0,"Longtooth Runner - In Combat - Cast Rabies"); + +-- Snarler +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5356; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5356 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5356,0,0,0,0,0,100,0,7000,11000,9000,15000,11,5543,0,0,0,0,0,1,0,0,0,0,0,0,0,"Snarler - In Combat - Cast Fade Out"); + +-- Sprite Dragon +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5276; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5276 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5276,0,0,0,0,0,100,0,3000,5000,9000,15000,11,17630,0,0,0,0,0,2,0,0,0,0,0,0,0,"Sprite Dragon - In Combat - Cast Mana Burn"); + +-- Sprite Darter +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5278; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5278 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5278,0,0,0,0,0,100,0,3000,5000,9000,15000,11,17630,0,0,0,0,0,2,0,0,0,0,0,0,0,"Sprite Darter - In Combat - Cast Mana Burn"); + +-- Ironfur Patriarch +DELETE FROM `smart_scripts` WHERE `entryorguid`=5274 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5274,0,0,0,0,0,100,0,6000,9000,12000,21000,11,10968,0,0,0,0,0,1,0,0,0,0,0,0,0,"Ironfur Patriarch - In Combat - Cast Demoralizing Roar"); + +-- Northspring Harpy +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5362; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5362 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5362,0,0,0,14,0,100,0,1000,40,15000,20000,11,11014,0,0,0,0,0,7,0,0,0,0,0,0,0,"Northspring Harpy - Friendly At 1000 Health - Cast Flow of the Northspring"), +(5362,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Northspring Harpy - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Northspring Roguefeather +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5363; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5363 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5363,0,0,0,25,0,100,0,0,0,0,0,11,8876,0,0,0,0,0,1,0,0,0,0,0,0,0,"Northspring Roguefeather - On Reset - Cast Thrash"), +(5363,0,1,0,0,0,100,0,5000,7000,10000,13000,11,6595,0,0,0,0,0,2,0,0,0,0,0,0,0,"Northspring Roguefeather - In Combat - Cast Exploit Weakness"), +(5363,0,2,0,14,0,100,0,1000,40,15000,20000,11,11014,0,0,0,0,0,7,0,0,0,0,0,0,0,"Northspring Roguefeather - Friendly At 1000 Health - Cast Flow of the Northspring"), +(5363,0,3,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Northspring Roguefeather - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Northspring Slayer +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5364; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5364 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5364,0,0,0,12,0,100,1,0,20,0,0,11,7160,0,0,0,0,0,2,0,0,0,0,0,0,0,"Northspring Slayer - Target Between 0-20% Health - Cast 'Execute' (No Repeat)"), +(5364,0,1,0,14,0,100,0,1000,40,15000,20000,11,11014,0,0,0,0,0,7,0,0,0,0,0,0,0,"Northspring Slayer - Friendly At 1000 Health - Cast Flow of the Northspring"), +(5364,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Northspring Slayer - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Northspring Windcaller +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5366; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5366 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5366,0,0,0,0,0,100,0,0,0,3400,4800,11,9532,64,0,0,0,0,2,0,0,0,0,0,0,0,"Northspring Windcaller - In Combat - Cast Lightning Bolt"), +(5366,0,1,0,0,0,100,0,8000,11000,17000,21000,11,6728,0,0,0,0,0,5,0,0,0,0,0,0,0,"Northspring Windcaller - In Combat - Cast Enveloping Winds"), +(5366,0,2,0,14,0,100,0,1000,40,15000,20000,11,11014,0,0,0,0,0,7,0,0,0,0,0,0,0,"Northspring Windcaller - Friendly At 1000 Health - Cast Flow of the Northspring"), +(5366,0,3,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Northspring Windcaller - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Lethlas +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5312; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5312 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5312,0,0,0,0,0,100,0,6000,9000,6000,9000,11,20667,0,0,0,0,0,5,0,0,0,0,0,0,0,"Lethlas - In Combat - Cast Corrosive Acid Breath"), +(5312,0,1,0,0,0,100,0,9000,12000,10000,14000,11,12882,0,0,0,0,0,1,0,0,0,0,0,0,0,"Lethlas - In Combat - Cast Wing Flap"); + +-- Rage Scar Yeti +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5296; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5296 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5296,0,0,0,2,0,100,1,0,30,0,0,11,8599,0,0,0,0,0,1,0,0,0,0,0,0,0,"Rage Scar Yeti - Between 0-30% Health - Cast Enrage (No Repeat)"), +(5296,0,1,0,2,0,100,1,0,30,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Rage Scar Yeti - Between 0-30% Health - Say Line 0 (No Repeat)"); + +DELETE FROM `creature_text` WHERE `CreatureID`=5296; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(5296,0,0,"%s becomes enraged!",16,0,100,0,0,0,10677,0,"Rage Scar Yeti"); + +-- Ferocious Rage Scar +DELETE FROM `smart_scripts` WHERE `entryorguid`=5299 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5299,0,0,0,0,0,100,0,6000,11000,16000,21000,11,3147,0,0,0,0,0,5,0,0,0,0,0,0,0,"Ferocious Rage Scar - In Combat - Cast Rend Flesh"); + +-- Wandering Forest Walker +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=7584; +DELETE FROM `smart_scripts` WHERE `entryorguid`=7584 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(7584,0,0,0,14,0,100,0,1200,40,25000,31000,11,16561,0,0,0,0,0,7,0,0,0,0,0,0,0,"Wandering Forest Walker - Friendly At 1200 Health - Cast Regrowth"); + +-- Land Walker +DELETE FROM `smart_scripts` WHERE `entryorguid`=5357 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5357,0,1,0,0,0,100,0,0,0,2300,3900,11,23391,64,0,0,0,0,2,0,0,0,0,0,0,0,"Land Walker - In Combat - Cast Boulder"); + +-- Cliff Giant +DELETE FROM `smart_scripts` WHERE `entryorguid`=5358 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5358,0,1,0,0,0,100,0,6000,8000,9000,13000,11,45,0,0,0,0,0,1,0,0,0,0,0,0,0,"Cliff Giant - In Combat - Cast War Stomp"); + +-- Vale Screecher +UPDATE `creature_template` SET `unit_class`=2 WHERE `entry`=5307; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5307 AND `source_type`=0 AND `id` IN (1,2); +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5307,0,1,0,0,0,100,0,3000,5000,7000,11000,11,59220,0,0,0,0,0,2,0,0,0,0,0,0,0,"Vale Screecher - In Combat - Cast Chain Lightning"), +(5307,0,2,0,9,0,100,0,0,8,12000,18000,11,8281,0,0,0,0,0,1,0,0,0,0,0,0,0,"Vale Screecher - Within 0-8 Range - Cast Sonic Burst"); + +-- Rogue Vale Screecher +DELETE FROM `smart_scripts` WHERE `entryorguid`=5308 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5308,0,1,0,9,0,100,0,0,8,12000,18000,11,8281,0,0,0,0,0,1,0,0,0,0,0,0,0,"Rogue Vale Screecher - Within 0-8 Range - Cast Sonic Burst"); + +-- Sea Spray +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5462; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5462 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5462,0,0,0,0,0,100,0,0,0,3400,4800,11,11538,64,0,0,0,0,2,0,0,0,0,0,0,0,"Sea Spray - In Combat - Cast Frostbolt"), +(5462,0,1,0,9,0,100,0,0,5,11000,15000,11,10987,0,0,0,0,0,1,0,0,0,0,0,0,0,"Sea Spray - Within 0-5 Range - Cast Geyser"); + +-- Coast Crawl Deepseer +DELETE FROM `smart_scripts` WHERE `entryorguid`=5328 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5328,0,0,0,1,0,100,0,1000,1000,1800000,1800000,11,12544,0,0,0,0,0,1,0,0,0,0,0,0,0,"Coast Crawl Deepseer - In Combat - Cast Frost Armor"), +(5328,0,1,0,0,0,100,0,0,0,3400,4800,11,9672,64,0,0,0,0,2,0,0,0,0,0,0,0,"Coast Crawl Deepseer - In Combat - Cast Frostbolt"), +(5328,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Coast Crawl Deepseer - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Coast Crawl Snapclaw +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5327; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5327 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5327,0,0,0,0,0,100,0,6000,8000,12000,16000,11,49978,0,0,0,0,0,2,0,0,0,0,0,0,0,"Coast Crawl Snapclaw - In Combat - Cast Claw Grasp"), +(5327,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Coast Crawl Snapclaw - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Hatecrest Screamer +DELETE FROM `smart_scripts` WHERE `entryorguid`=5335 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5335,0,0,0,0,0,100,0,5000,8000,11000,15000,11,8281,0,0,0,0,0,1,0,0,0,0,0,0,0,"Hatecrest Screamer - In Combat - Cast Sonic Burst"), +(5335,0,1,0,14,0,100,0,800,40,13000,16000,11,6078,0,0,0,0,0,7,0,0,0,0,0,0,0,"Hatecrest Screamer - Friendly At 800 Health - Cast Renew"), +(5335,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Hatecrest Screamer - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Hatecrest Warrior +DELETE FROM `smart_scripts` WHERE `entryorguid`=5331 AND `source_type`=0 AND `id` IN (1,2); +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5331,0,1,0,0,0,100,0,4000,8000,9000,13000,11,6713,0,0,0,0,0,2,0,0,0,0,0,0,0,"Hatecrest Warrior - In Combat - Cast Disarm"), +(5331,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Hatecrest Warrior - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Hatecrest Wave Rider +DELETE FROM `smart_scripts` WHERE `entryorguid`=5332 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5332,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Hatecrest Wave Rider - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Hatecrest Siren +DELETE FROM `smart_scripts` WHERE `entryorguid`=5337 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5337,0,0,0,0,0,100,0,0,0,3400,4800,11,9532,64,0,0,0,0,2,0,0,0,0,0,0,0,"Hatecrest Siren - In Combat - Cast Lightning Bolt"), +(5337,0,1,0,0,0,100,0,8000,14000,19000,24000,11,7645,0,0,0,0,0,6,0,0,0,0,0,0,0,"Hatecrest Siren - In Combat - Cast Dominate Mind"), +(5337,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Hatecrest Siren - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Hatecrest Serpent Guard +DELETE FROM `smart_scripts` WHERE `entryorguid`=5333 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5333,0,0,0,0,0,100,0,4000,7000,10000,13000,11,8058,0,0,0,0,0,2,0,0,0,0,0,0,0,"Hatecrest Serpent Guard - In Combat - Cast Frost Shock"), +(5333,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Hatecrest Serpent Guard - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Hatecrest Siren +DELETE FROM `smart_scripts` WHERE `entryorguid`=5336 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5336,0,0,0,0,0,100,0,0,0,3400,4800,11,20822,64,0,0,0,0,2,0,0,0,0,0,0,0,"Hatecrest Siren - In Combat - Cast Frostbolt"), +(5336,0,1,0,0,0,100,0,7000,13000,18000,24000,11,10185,0,0,0,0,0,5,0,0,0,0,0,0,0,"Hatecrest Siren - In Combat - Cast Blizzard"), +(5336,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Hatecrest Siren - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Hatecrest Myrmidon +DELETE FROM `smart_scripts` WHERE `entryorguid`=5334 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5334,0,0,0,0,0,100,0,3000,6000,8000,11000,11,6533,0,0,0,0,0,2,0,0,0,0,0,0,0,"Hatecrest Myrmidon - In Combat - Cast Net"), +(5334,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Hatecrest Myrmidon - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Lord Shalzaru +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=8136; +DELETE FROM `smart_scripts` WHERE `entryorguid`=8136 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(8136,0,0,0,0,0,100,0,4000,7000,10000,13000,11,8058,0,0,0,0,0,2,0,0,0,0,0,0,0,"Lord Shalzaru - In Combat - Cast Frost Shock"); + +-- Lord Lakmaeran +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=12803; +DELETE FROM `smart_scripts` WHERE `entryorguid`=12803 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(12803,0,0,0,1,0,100,0,500,1000,600000,600000,11,20545,0,0,0,0,0,1,0,0,0,0,0,0,0,"Lord Lakmaeran - Out of Combat - Cast Lightning Shield"), +(12803,0,1,0,16,0,100,0,20545,1,15000,30000,11,20545,0,0,0,0,0,1,0,0,0,0,0,0,0,"Lord Lakmaeran - On Friendly Unit Missing Buff 'Lightning Shield' - Cast Lightning Shield"), +(12803,0,2,0,0,0,100,0,3000,5000,12000,16000,11,20543,0,0,0,0,0,2,0,0,0,0,0,0,0,"Lord Lakmaeran - In Combat - Cast Lightning Breath"), +(12803,0,3,0,0,0,100,0,6000,12000,19000,21000,11,20542,0,0,0,0,0,5,0,0,0,0,0,0,0,"Lord Lakmaeran - In Combat - Cast Static Conduit"), +(12803,0,4,0,2,0,100,1,0,20,0,0,11,8269,0,0,0,0,0,1,0,0,0,0,0,0,0,"Lord Lakmaeran - Between 0-20% Health - Cast Frenzy (No Repeat)"), +(12803,0,5,0,2,0,100,1,0,20,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Lord Lakmaeran - Between 0-20% Health - Say Line 0 (No Repeat)"); + +DELETE FROM `creature_text` WHERE `CreatureID`=12803; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(12803,0,0,"%s goes into a frenzy!",16,0,100,0,0,0,10645,0,"Lord Lakmaeran"); + +-- Chimaerok Devourer +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=12802; +DELETE FROM `smart_scripts` WHERE `entryorguid`=12802 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(12802,0,0,0,0,0,100,0,7000,12000,11000,15000,11,20539,0,0,0,0,0,2,0,0,0,0,0,0,0,"Chimaerok Devourer - In Combat - Cast Fatal Bite"), +(12802,0,1,0,2,0,100,1,0,20,0,0,11,8599,0,0,0,0,0,1,0,0,0,0,0,0,0,"Chimaerok Devourer - Between 0-20% Health - Cast Enrage (No Repeat)"), +(12802,0,2,0,2,0,100,1,0,20,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Chimaerok Devourer - Between 0-20% Health - Say Line 0 (No Repeat)"); + +DELETE FROM `creature_text` WHERE `CreatureID`=12802; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(12802,0,0,"%s becomes enraged!",16,0,100,0,0,0,10677,0,"Rage Scar Yeti"); + +-- Chimaerok +DELETE FROM `smart_scripts` WHERE `entryorguid`=12800 AND `source_type`=0 AND `id`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(12800,0,0,0,0,0,100,0,3000,6000,4000,7000,11,20629,0,0,0,0,0,5,0,0,0,0,0,0,0,"Chimaerok - In Combat - Cast Corrosive Venom Spit"); + +-- Arcane Chimaerok +DELETE FROM `smart_scripts` WHERE `entryorguid`=12801 AND `source_type`=0 AND `id`=2; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(12801,0,2,0,13,0,100,0,30000,30000,0,0,11,20537,0,0,0,0,0,2,0,0,0,0,0,0,0,"Arcane Chimaerok - Target Casting - Cast Counterspell"); + +-- Gordok Ogre-Mage +DELETE FROM `smart_scripts` WHERE `entryorguid`=11443 AND `source_type`=0 AND `id` IN (0,1,2); +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(11443,0,0,0,4,0,20,1,0,0,0,0,1,0,0,0,0,0,0,7,0,0,0,0,0,0,0,"Gordok Ogre-Mage - On Aggro - Say Line 0"), +(11443,0,1,0,0,0,100,0,0,0,3400,4800,11,20823,64,0,0,0,0,2,0,0,0,0,0,0,0,"Gordok Ogre-Mage - In Combat - Cast Fireball"), +(11443,0,2,0,0,0,100,0,5000,12000,35000,45000,11,6742,0,0,0,0,0,1,0,0,0,0,0,0,0,"Gordok Ogre-Mage - In Combat - Cast Bloodlust"); + +-- Gordunni Mauler +DELETE FROM `smart_scripts` WHERE `entryorguid`=5234 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5234,0,0,0,25,0,100,0,0,0,0,0,11,8876,0,0,0,0,0,1,0,0,0,0,0,0,0,"Gordunni Mauler - On Reset - Cast Thrash"), +(5234,0,1,0,4,0,15,1,0,0,0,0,1,0,0,0,0,0,0,7,0,0,0,0,0,0,0,"Gordunni Mauler - On Aggro - Say Line 0"); + +DELETE FROM `creature_text` WHERE `CreatureID`=5234; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(5234,0,0,"Raaar!!! Me smash $r!",12,0,100,0,0,0,1925,0,"Gordunni Mauler"), +(5234,0,1,"Me smash! You die!",12,0,100,0,0,0,1926,0,"Gordunni Mauler"), +(5234,0,2,"I'll crush you!",12,0,100,0,0,0,1927,0,"Gordunni Mauler"); + +-- Gordunni Brute +DELETE FROM `smart_scripts` WHERE `entryorguid`=5232 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5232,0,1,0,4,0,15,1,0,0,0,0,1,0,0,0,0,0,0,7,0,0,0,0,0,0,0,"Gordunni Brute - On Aggro - Say Line 0"); + +DELETE FROM `creature_text` WHERE `CreatureID`=5232; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(5232,0,0,"Raaar!!! Me smash $r!",12,0,100,0,0,0,1925,0,"Gordunni Brute"), +(5232,0,1,"Me smash! You die!",12,0,100,0,0,0,1926,0,"Gordunni Brute"), +(5232,0,2,"I'll crush you!",12,0,100,0,0,0,1927,0,"Gordunni Brute"); + +-- Gordunni Warlock +DELETE FROM `smart_scripts` WHERE `entryorguid`=5240 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5240,0,0,0,0,0,100,0,0,0,3400,4800,11,20298,64,0,0,0,0,2,0,0,0,0,0,0,0,"Gordunni Warlock - In Combat - Cast Shadow Bolt"), +(5240,0,1,0,0,0,100,0,6000,11000,14000,21000,11,7289,32,0,0,0,0,5,0,0,0,0,0,0,0,"Gordunni Warlock - In Combat - Cast Shrink"), +(5240,0,2,0,4,0,15,1,0,0,0,0,1,0,0,0,0,0,0,7,0,0,0,0,0,0,0,"Gordunni Warlock - On Aggro - Say Line 0"); + +DELETE FROM `creature_text` WHERE `CreatureID`=5240; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(5240,0,0,"Raaar!!! Me smash $r!",12,0,100,0,0,0,1925,0,"Gordunni Warlock"), +(5240,0,1,"Me smash! You die!",12,0,100,0,0,0,1926,0,"Gordunni Warlock"), +(5240,0,2,"I'll crush you!",12,0,100,0,0,0,1927,0,"Gordunni Warlock"); + +-- Gordunni Warlord +DELETE FROM `smart_scripts` WHERE `entryorguid`=5241 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5241,0,0,0,0,0,100,0,5000,7000,15000,20000,11,10967,0,0,0,0,0,1,0,0,0,0,0,0,0,"Gordunni Warlord - In Combat - Cast Echoing Roar"), +(5241,0,1,0,4,0,15,1,0,0,0,0,1,0,0,0,0,0,0,7,0,0,0,0,0,0,0,"Gordunni Warlord - On Aggro - Say Line 0"); + +DELETE FROM `creature_text` WHERE `CreatureID`=5241; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(5241,0,0,"Raaar!!! Me smash $r!",12,0,100,0,0,0,1925,0,"Gordunni Warlord"), +(5241,0,1,"Me smash! You die!",12,0,100,0,0,0,1926,0,"Gordunni Warlord"), +(5241,0,2,"I'll crush you!",12,0,100,0,0,0,1927,0,"Gordunni Warlord"); + +-- Gordunni Battlemaster +DELETE FROM `smart_scripts` WHERE `entryorguid`=5238 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5238,0,0,0,0,0,100,0,5000,7000,12000,17000,11,13730,0,0,0,0,0,1,0,0,0,0,0,0,0,"Gordunni Battlemaster - In Combat - Cast Demoralizing Shout"), +(5238,0,1,0,4,0,15,1,0,0,0,0,1,0,0,0,0,0,0,7,0,0,0,0,0,0,0,"Gordunni Battlemaster - On Aggro - Say Line 0"); + +DELETE FROM `creature_text` WHERE `CreatureID`=5238; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(5238,0,0,"Raaar!!! Me smash $r!",12,0,100,0,0,0,1925,0,"Gordunni Battlemaster"), +(5238,0,1,"Me smash! You die!",12,0,100,0,0,0,1926,0,"Gordunni Battlemaster"), +(5238,0,2,"I'll crush you!",12,0,100,0,0,0,1927,0,"Gordunni Battlemaster"); + +-- Gordunni Mage-Lord +UPDATE `smart_scripts` SET `event_param3`=3400, `event_param4`=4800 WHERE `entryorguid`=5239 AND `source_type`=0 AND `id`=0; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5239 AND `source_type`=0 AND `id`=3; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5239,0,3,0,4,0,15,1,0,0,0,0,1,0,0,0,0,0,0,7,0,0,0,0,0,0,0,"Gordunni Mage-Lord - On Aggro - Say Line 0"); + +DELETE FROM `creature_text` WHERE `CreatureID`=5239; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(5239,0,0,"Raaar!!! Me smash $r!",12,0,100,0,0,0,1925,0,"Gordunni Mage-Lord"), +(5239,0,1,"Me smash! You die!",12,0,100,0,0,0,1926,0,"Gordunni Mage-Lord"), +(5239,0,2,"I'll crush you!",12,0,100,0,0,0,1927,0,"Gordunni Mage-Lord"); + +-- Gordunni Ogre +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5229; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5229 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5229,0,0,0,4,0,15,1,0,0,0,0,1,0,0,0,0,0,0,7,0,0,0,0,0,0,0,"Gordunni Ogre - On Aggro - Say Line 0"); + +DELETE FROM `creature_text` WHERE `CreatureID`=5229; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(5229,0,0,"Raaar!!! Me smash $r!",12,0,100,0,0,0,1925,0,"Gordunni Ogre"), +(5229,0,1,"Me smash! You die!",12,0,100,0,0,0,1926,0,"Gordunni Ogre"), +(5229,0,2,"I'll crush you!",12,0,100,0,0,0,1927,0,"Gordunni Ogre"); + +-- Gordunni Ogre Mage +UPDATE `smart_scripts` SET `event_param3`=3400, `event_param4`=4800 WHERE `entryorguid`=5237 AND `source_type`=0 AND `id`=0; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5237 AND `source_type`=0 AND `id`=2; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5237,0,2,0,4,0,15,1,0,0,0,0,1,0,0,0,0,0,0,7,0,0,0,0,0,0,0,"Gordunni Ogre Mage - On Aggro - Say Line 0"); + +DELETE FROM `creature_text` WHERE `CreatureID`=5237; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(5237,0,0,"Raaar!!! Me smash $r!",12,0,100,0,0,0,1925,0,"Gordunni Ogre Mage"), +(5237,0,1,"Me smash! You die!",12,0,100,0,0,0,1926,0,"Gordunni Ogre Mage"), +(5237,0,2,"I'll crush you!",12,0,100,0,0,0,1927,0,"Gordunni Ogre Mage"); + +-- Enraged Feral Scar +DELETE FROM `smart_scripts` WHERE `entryorguid`=5295 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5295,0,0,0,2,0,100,1,0,30,0,0,11,8599,0,0,0,0,0,1,0,0,0,0,0,0,0,"Enraged Feral Scar - Between 0-30% Health - Cast Enrage (No Repeat)"), +(5295,0,1,0,2,0,100,1,0,30,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Enraged Feral Scar - Between 0-30% Health - Say Line 0 (No Repeat)"); + +DELETE FROM `creature_text` WHERE `CreatureID`=5295; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(5295,0,0,"%s becomes enraged!",16,0,100,0,0,0,10677,0,"Enraged Feral Scar"); + +-- Grimtotem Shaman +DELETE FROM `smart_scripts` WHERE `entryorguid`=7727 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(7727,0,0,0,0,0,100,0,3000,5000,8000,12000,11,930,0,0,0,0,0,2,0,0,0,0,0,0,0,"Grimtotem Shaman - In Combat - Cast Chain Lightning"), +(7727,0,1,0,2,0,100,1,0,50,0,0,11,11969,2,0,0,0,0,1,0,0,0,0,0,0,0,"Grimtotem Shaman - Between 0-50% Health - Cast Fire Nova (No Repeat)"), +(7727,0,2,0,14,0,100,0,1200,40,14000,17000,11,8005,0,0,0,0,0,7,0,0,0,0,0,0,0,"Grimtotem Shaman - Friendly At 1200 Health - Cast Healing Wave"), +(7727,0,3,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Grimtotem Shaman - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Grimtotem Naturalist +DELETE FROM `smart_scripts` WHERE `entryorguid`=7726 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(7726,0,0,0,4,0,100,1,0,0,0,0,22,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Grimtotem Naturalist - On Aggro - Set Event Phase 1"), +(7726,0,1,0,0,1,100,0,0,0,3400,4700,11,9739,64,0,0,0,0,2,0,0,0,0,0,0,0,"Grimtotem Naturalist - In Combat (Phase 1) - Cast Wrath"), +(7726,0,2,0,2,0,100,1,0,50,0,0,22,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Grimtotem Naturalist - Between 0-50% Health - Set Event Phase 2"), +(7726,0,3,0,0,2,100,1,0,0,0,0,11,19030,1,0,0,0,0,1,0,0,0,0,0,0,0,"Grimtotem Naturalist - In Combat (Phase 2) - Cast Bear Form"), +(7726,0,4,0,0,2,100,0,5000,7000,11000,13000,11,12161,2,0,0,0,0,2,0,0,0,0,0,0,0,"Grimtotem Naturalist - In Combat (Phase 2) - Cast Maul"), +(7726,0,5,0,0,2,100,0,8000,10000,24000,28000,11,15727,2,0,0,0,0,1,0,0,0,0,0,0,0,"Grimtotem Naturalist - In Combat (Phase 2) - Cast Demoralizing Roar"), +(7726,0,6,0,0,2,100,1,0,0,0,0,21,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Grimtotem Naturalist - In Combat (Phase 2) - Enable Combat Movement"), +(7726,0,7,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Grimtotem Naturalist - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Grimtotem Raider +DELETE FROM `smart_scripts` WHERE `entryorguid`=7725 AND `source_type`=0 AND `id` IN (1,2,3); +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(7725,0,1,0,0,0,100,0,3000,5000,9000,11000,11,6533,0,0,0,0,0,2,0,0,0,0,0,0,0,"Grimtotem Raider - In Combat - Cast Net"), +(7725,0,2,0,0,0,100,0,6000,9000,7000,10000,11,845,0,0,0,0,0,2,0,0,0,0,0,0,0,"Grimtotem Raider - In Combat - Cast Cleave"), +(7725,0,3,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Grimtotem Raider - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Woodpaw Mongrel +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5249; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5249 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5249,0,0,0,4,0,15,1,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Woodpaw Mongrel - On Aggro - Say Line 0"), +(5249,0,1,0,0,0,100,0,5000,9000,14000,18000,11,7102,32,0,0,0,0,2,0,0,0,0,0,0,0,"Woodpaw Mongrel - In Combat - Cast Contagion of Rot"), +(5249,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Woodpaw Mongrel - Between 0-15% Health - Flee For Assist (No Repeat)"); + +DELETE FROM `creature_text` WHERE `CreatureID`=5249; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(5249,0,0,"Grrrrr!",12,0,100,0,0,0,1869,0,"Woodpaw Mongrel"), +(5249,0,1,"Grrrr... fresh meat!",12,0,100,0,0,0,1870,0,"Woodpaw Mongrel"), +(5249,0,2,"More bones to gnaw on...",12,0,100,0,0,0,1871,0,"Woodpaw Mongrel"); + +-- Woodpaw Trapper +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5251; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5251 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5251,0,0,0,4,0,15,1,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Woodpaw Trapper - On Aggro - Say Line 0"), +(5251,0,1,0,0,0,100,0,0,0,2300,3900,11,6660,64,0,0,0,0,2,0,0,0,0,0,0,0,"Woodpaw Trapper - In Combat - Cast Shoot"), +(5251,0,2,0,9,0,100,0,0,20,9000,12000,11,6533,0,0,0,0,0,2,0,0,0,0,0,0,0,"Woodpaw Trapper - Within 0-20 Range - Cast Net"), +(5251,0,3,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Woodpaw Trapper - Between 0-15% Health - Flee For Assist (No Repeat)"); + +DELETE FROM `creature_text` WHERE `CreatureID`=5251; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(5251,0,0,"Grrrrr!",12,0,100,0,0,0,1869,0,"Woodpaw Trapper"), +(5251,0,1,"Grrrr... fresh meat!",12,0,100,0,0,0,1870,0,"Woodpaw Trapper"), +(5251,0,2,"More bones to gnaw on...",12,0,100,0,0,0,1871,0,"Woodpaw Trapper"); + +-- Woodpaw Brute +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5253; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5253 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5253,0,0,0,4,0,15,1,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Woodpaw Brute - On Aggro - Say Line 0"), +(5253,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Woodpaw Brute - Between 0-15% Health - Flee For Assist (No Repeat)"); + +DELETE FROM `creature_text` WHERE `CreatureID`=5253; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(5253,0,0,"Grrrrr!",12,0,100,0,0,0,1869,0,"Woodpaw Brute"), +(5253,0,1,"Grrrr... fresh meat!",12,0,100,0,0,0,1870,0,"Woodpaw Brute"), +(5253,0,2,"More bones to gnaw on...",12,0,100,0,0,0,1871,0,"Woodpaw Brute"); + +-- Woodpaw Mystic +DELETE FROM `smart_scripts` WHERE `entryorguid`=5254 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5254,0,0,0,4,0,15,1,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Woodpaw Mystic - On Aggro - Say Line 0"), +(5254,0,1,0,0,0,100,0,0,0,3400,4800,11,9532,64,0,0,0,0,2,0,0,0,0,0,0,0,"Woodpaw Mystic - In Combat - Cast Lightning Bolt"), +(5254,0,2,0,14,0,100,0,800,40,16000,21000,11,8005,0,0,0,0,0,7,0,0,0,0,0,0,0,"Woodpaw Mystic - Friendly At 800 Health - Cast Healing Wave"), +(5254,0,3,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Woodpaw Mystic - Between 0-15% Health - Flee For Assist (No Repeat)"); + +DELETE FROM `creature_text` WHERE `CreatureID`=5254; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(5254,0,0,"Grrrrr!",12,0,100,0,0,0,1869,0,"Woodpaw Mystic"), +(5254,0,1,"Grrrr... fresh meat!",12,0,100,0,0,0,1870,0,"Woodpaw Mystic"), +(5254,0,2,"More bones to gnaw on...",12,0,100,0,0,0,1871,0,"Woodpaw Mystic"); + +-- Woodpaw Reaver +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5255; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5255 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5255,0,0,0,4,0,15,1,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Woodpaw Reaver - On Aggro - Say Line 0"), +(5255,0,1,0,4,0,100,1,0,0,0,0,11,7366,0,0,0,0,0,1,0,0,0,0,0,0,0,"Woodpaw Reaver - On Aggro - Cast Berserker Stance"), +(5255,0,2,0,0,0,100,0,5000,9000,7000,11000,11,7369,0,0,0,0,0,2,0,0,0,0,0,0,0,"Woodpaw Reaver - In Combat - Cast Cleave"), +(5255,0,3,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Woodpaw Reaver - Between 0-15% Health - Flee For Assist (No Repeat)"); + +DELETE FROM `creature_text` WHERE `CreatureID`=5255; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(5255,0,0,"Grrrrr!",12,0,100,0,0,0,1869,0,"Woodpaw Reaver"), +(5255,0,1,"Grrrr... fresh meat!",12,0,100,0,0,0,1870,0,"Woodpaw Reaver"), +(5255,0,2,"More bones to gnaw on...",12,0,100,0,0,0,1871,0,"Woodpaw Reaver"); + +-- Woodpaw Alpha +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5258; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5258 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5258,0,0,0,4,0,15,1,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Woodpaw Alpha - On Aggro - Say Line 0"), +(5258,0,1,0,2,0,100,1,0,40,0,0,11,8599,0,0,0,0,0,1,0,0,0,0,0,0,0,"Woodpaw Alpha - Between 0-40% Health - Cast Enrage (No Repeat)"), +(5258,0,2,0,2,0,100,1,0,40,0,0,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Woodpaw Alpha - Between 0-40% Health - Say Line 1 (No Repeat)"), +(5258,0,3,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Woodpaw Alpha - Between 0-15% Health - Flee For Assist (No Repeat)"); + +DELETE FROM `creature_text` WHERE `CreatureID`=5258; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(5258,0,0,"Grrrrr!",12,0,100,0,0,0,1869,0,"Woodpaw Alpha"), +(5258,0,1,"Grrrr... fresh meat!",12,0,100,0,0,0,1870,0,"Woodpaw Alpha"), +(5258,0,2,"More bones to gnaw on...",12,0,100,0,0,0,1871,0,"Woodpaw Alpha"), +(5258,1,0,"%s becomes enraged!",16,0,100,0,0,0,10677,0,"Woodpaw Alpha"); + +-- Gnarl Leafbrother +DELETE FROM `smart_scripts` WHERE `entryorguid`=5354 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5354,0,0,0,9,0,100,0,0,30,18000,21000,11,11922,0,0,0,0,0,2,0,0,0,0,0,0,0,"Gnarl Leafbrother - Within 0-30 Range - Cast Entangling Roots"); + +-- Zukk'ash Stinger +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5244; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5244 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5244,0,0,0,0,0,100,0,4000,9000,14000,18000,11,5416,0,0,0,0,0,2,0,0,0,0,0,0,0,"Zukk'ash Stinger - In Combat - Cast Venom Sting"); + +-- Zukk'ash Wasp +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5245; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5245 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5245,0,0,0,0,0,100,0,4000,12000,8000,16000,11,744,32,0,0,0,0,2,0,0,0,0,0,0,0,"Zukk'ash Wasp - In Combat - Cast Poison"); + +-- Zukk'ash Worker +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5246; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5246 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5246,0,0,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Zukk'ash Worker - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Zukk'ash Tunneler +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5247; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5247 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5247,0,0,0,0,0,100,0,6000,8000,45000,50000,11,6016,0,0,0,0,0,2,0,0,0,0,0,0,0,"Zukk'ash Tunneler - In Combat - Cast Pierce Armor"); + +-- Qirot +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5350; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5350 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5350,0,0,0,0,0,100,0,3000,6000,6000,11000,11,13298,32,0,0,0,0,2,0,0,0,0,0,0,0,"Qirot - In Combat - Cast Poison"); + +-- Lady Szallah +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5343; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5343 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5343,0,0,0,0,0,100,0,0,0,3400,4800,11,9532,64,0,0,0,0,2,0,0,0,0,0,0,0,"Lady Szallah - In Combat - Cast Lightning Bolt"), +(5343,0,1,0,0,0,100,0,8000,11000,12000,15000,11,8435,0,0,0,0,0,5,0,0,0,0,0,0,0,"Lady Szallah - In Combat - Cast Forked Lightning"), +(5343,0,2,0,0,0,100,0,12000,15000,17000,21000,11,6728,0,0,0,0,0,6,0,0,0,0,0,0,0,"Lady Szallah - In Combat - Cast Enveloping Winds"), +(5343,0,3,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Lady Szallah - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Skarr the Unbreakable +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=11498; +DELETE FROM `smart_scripts` WHERE `entryorguid`=11498 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(11498,0,0,0,0,0,100,0,5000,7000,13000,16000,11,11428,0,0,0,0,0,2,0,0,0,0,0,0,0,"Skarr the Unbreakable - In Combat - Cast Knockdown"), +(11498,0,1,0,9,0,100,0,0,5,7000,10000,11,15496,0,0,0,0,0,2,0,0,0,0,0,0,0,"Skarr the Unbreakable - In Combat - Cast Cleave"), +(11498,0,2,0,0,0,100,0,8000,12000,9000,15000,11,13737,0,0,0,0,0,2,0,0,0,0,0,0,0,"Skarr the Unbreakable - In Combat - Cast Mortal Strike"); + +-- The Razza +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=11497; +DELETE FROM `smart_scripts` WHERE `entryorguid`=11497 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(11497,0,0,0,0,0,100,0,2000,4000,5000,9000,11,21067,0,0,0,0,0,2,0,0,0,0,0,0,0,"The Razza - In Combat - Cast Poison Bolt"), +(11497,0,1,0,0,0,100,0,8000,12000,9000,15000,11,12058,0,0,0,0,0,2,0,0,0,0,0,0,0,"The Razza - In Combat - Cast Chain Lightning"); + +-- Mushgog +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=11447; +DELETE FROM `smart_scripts` WHERE `entryorguid`=11447 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(11447,0,0,0,0,0,100,0,2000,3000,12000,15000,11,22127,0,0,0,0,0,2,0,0,0,0,0,0,0,"Mushgog - In Combat - Cast Entangling Roots"), +(11447,0,1,0,0,0,100,0,7000,10000,8000,12000,11,21749,0,0,0,0,0,1,0,0,0,0,0,0,0,"Mushgog - In Combat - Cast Thorn Volley"), +(11447,0,2,0,5,0,100,0,0,0,0,0,1,0,0,0,0,0,0,7,0,0,0,0,0,0,0,"Mushgog - On Killed Unit - Say Line 0"), +(11447,0,3,0,6,0,100,0,0,0,0,0,11,22948,2,0,0,0,0,1,0,0,0,0,0,0,0,"Mushgog - On Just Died - Cast Spore Cloud"); + +DELETE FROM `creature_text` WHERE `CreatureID`=11447; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(11447,0,0,"That will teach you to lay off the herb, $r.",12,0,100,0,0,0,9500,0,"Mushgog"); + +-- Random movement for some creatures +UPDATE `creature` SET `spawndist`=5, `MovementType`=1 WHERE `guid` IN (51841, 51844); +UPDATE `creature` SET `spawndist`=10, `MovementType`=1 WHERE `guid` IN (51839, 51683, 51843); + +-- Fix spawn position for one creature +UPDATE `creature` SET `position_x`=-5704.74, `position_y`=3379.41, `position_z`=63.0866, `spawndist`=15, `MovementType`=1 WHERE `guid`=49970; + +-- Fix model for Grimtotem Naturalists +UPDATE `creature` SET `modelid`=0 WHERE `id`=7726 AND `modelid`=2289; diff --git a/sql/updates/world/3.3.5/2017_11_14_16_world.sql b/sql/updates/world/3.3.5/2017_11_14_16_world.sql new file mode 100644 index 00000000000..2a518937ad5 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_14_16_world.sql @@ -0,0 +1,139 @@ +-- Death's Head Adept +UPDATE `smart_scripts` SET `event_param3`=3400, `event_param4`=4800 WHERE `entryorguid`=4516 AND `source_type`=0 AND `id`=0; + +-- Rotting Agam'ar +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=4512; +DELETE FROM `smart_scripts` WHERE `entryorguid`=4512 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4512,0,0,1,0,0,100,2,9000,15000,30000,45000,11,8267,0,0,0,0,0,1,0,0,0,0,0,0,0,"Rotting Agam'ar - In Combat - Cast Cursed Blood"), +(4512,0,1,0,61,0,100,2,0,0,0,0,11,8268,0,0,0,0,0,1,0,0,0,0,0,0,0,"Rotting Agam'ar - In Combat - Cast Cursed Blood"); + +-- Raging Agam'ar +DELETE FROM `smart_scripts` WHERE `entryorguid`=4514 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4514,0,1,0,2,0,100,3,0,50,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Raging Agam'ar - Between 0-50% Health - Say Line 0 (No Repeat)"); + +DELETE FROM `creature_text` WHERE `CreatureID`=4514; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(4514,0,0,"%s goes into a frenzy!",16,0,100,0,0,0,38630,0,"Agathelos the Raging"); + +-- Agam'ar +UPDATE `smart_scripts` SET `event_chance`=100 WHERE `entryorguid`=4511 AND `source_type`=0 AND `id`=0; + +-- Death's Head Acolyte +DELETE FROM `smart_scripts` WHERE `entryorguid`=4515 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4515,0,0,0,0,0,100,2,2000,3000,11000,15000,11,15785,0,0,0,0,0,5,0,0,0,0,0,0,0,"Death's Head Acolyte - In Combat - Cast Mana Burn"), +(4515,0,1,0,14,0,100,2,1000,40,14000,19000,11,8362,0,0,0,0,0,7,0,0,0,0,0,0,0,"Death's Head Acolyte - Friendly At 1000 Health - Cast Renew"); + +-- Razorfen Dustweaver +DELETE FROM `smart_scripts` WHERE `entryorguid`=4522 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4522,0,0,0,0,0,100,2,2000,3000,13000,16000,11,6728,0,0,0,0,0,5,0,0,0,0,0,0,0,"Razorfen Dustweaver - In Combat - Cast Enveloping Winds"), +(4522,0,1,0,11,0,100,2,0,0,0,0,11,8271,0,0,0,0,0,1,0,0,0,0,0,0,0,"Razorfen Dustweaver - On Respawn - Cast Summon Wind Howler"); + +-- Razorfen Handler +DELETE FROM `smart_scripts` WHERE `entryorguid`=4530 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4530,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Razorfen Handler - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Razorfen Beastmaster +DELETE FROM `smart_scripts` WHERE `entryorguid`=4532 AND `source_type`=0 AND `id` IN (2,3); +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4532,0,2,0,0,0,100,2,5000,8000,12000,18000,11,8275,0,0,0,0,0,5,0,0,0,0,0,0,0,"Razorfen Beastmaster - In Combat - Cast Poisoned Shot"), +(4532,0,3,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Razorfen Beastmaster - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Razorfen Groundshaker +UPDATE `smart_scripts` SET `target_type`=5 WHERE `entryorguid`=4525 AND `source_type`=0; +UPDATE `smart_scripts` SET `action_param2`=0 WHERE `entryorguid`=4525 AND `source_type`=0 AND `id`=1; + +-- Razorfen Warden +UPDATE `smart_scripts` SET `event_param3`=7000, `event_param4`=10000 WHERE `entryorguid`=4437 AND `source_type`=0 AND `id`=0; + +-- Death's Head Priest +UPDATE `smart_scripts` SET `event_param3`=3400, `event_param4`=4800 WHERE `entryorguid`=4517 AND `source_type`=0 AND `id`=1; +DELETE FROM `smart_scripts` WHERE `entryorguid`=4517 AND `source_type`=0 AND `id`=2; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4517,0,2,0,14,0,100,2,1000,40,14000,20000,11,6063,0,0,0,0,0,7,0,0,0,0,0,0,0,"Death's Head Priest - Friendly At 1000 Health - Cast Heal"); + +-- Razorfen Spearhide +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=4438; +DELETE FROM `smart_scripts` WHERE `entryorguid`=4438 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4438,0,0,0,0,0,100,2,4000,7000,7000,11000,11,8259,0,0,0,0,0,1,0,0,0,0,0,0,0,"Razorfen Spearhide - In Combat - Cast Whirling Barrage"), +(4438,0,1,0,0,0,100,2,1000,3000,60000,70000,11,8148,0,0,0,0,0,1,0,0,0,0,0,0,0,"Razorfen Spearhide - In Combat - Cast Thorns Aura"); + +-- Razorfen Stalker +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=6035; +DELETE FROM `smart_scripts` WHERE `entryorguid`=6035 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(6035,0,0,0,25,0,100,3,0,0,0,0,11,22766,0,0,0,0,0,1,0,0,0,0,0,0,0,"Razorfen Stalker - On Reset - Cast Sneak"), +(6035,0,1,0,67,0,100,2,4000,7000,0,0,11,7159,0,0,0,0,0,2,0,0,0,0,0,0,0,"Razorfen Stalker - On Behind Target - Cast Backstab"); + +-- Greater Kraul Bat +DELETE FROM `smart_scripts` WHERE `entryorguid`=4539 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4539,0,0,0,0,0,100,2,4000,6000,8000,13000,11,8281,0,0,0,0,0,1,0,0,0,0,0,0,0,"Greater Kraul Bat - In Combat - Cast Sonic Burst"); + +-- Roogug +DELETE FROM `smart_scripts` WHERE `entryorguid`=6168 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(6168,0,0,0,0,0,100,2,0,0,3400,4700,11,9532,64,0,0,0,0,2,0,0,0,0,0,0,0,"Roogug - In Combat CMC - Cast Lightning Bolt"), +(6168,0,1,0,11,0,100,3,0,0,0,0,11,8270,0,0,0,0,0,1,0,0,0,0,0,0,0,"Roogug - On Respawn - Cast Summon Earth Rumbler"); + +-- Aggem Thorncurse +DELETE FROM `smart_scripts` WHERE `entryorguid`=4424 AND `source_type`=0 AND `id`=2; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4424,0,2,0,2,0,100,0,0,75,13000,18000,11,14900,0,0,0,0,0,1,0,0,0,0,0,0,0,"Razorfen Handler - Between 0-75% Health - Cast Chain Heal"); + +-- Death Speaker Jargba +UPDATE `smart_scripts` SET `event_param3`=3400, `event_param4`=4800 WHERE `entryorguid`=4428 AND `source_type`=0 AND `id`=0; + +-- Overlord Ramtusk +DELETE FROM `smart_scripts` WHERE `entryorguid`=4420 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4420,0,0,0,0,0,100,2,1000,3000,30000,45000,11,9128,0,0,0,0,0,1,0,0,0,0,0,0,0,"Overlord Ramtusk - In Combat - Cast Battle Shout"), +(4420,0,1,0,9,0,100,2,0,8,8000,14000,11,15548,0,0,0,0,0,1,0,0,0,0,0,0,0,"Overlord Ramtusk - Within 0-8 Range - Cast Thunderclap"), +(4420,0,2,3,4,0,100,2,0,0,0,0,11,7165,0,0,0,0,0,1,0,0,0,0,0,0,0,"Overlord Ramtusk - On Aggro - Cast Battle Stance"), +(4420,0,3,4,61,0,100,2,0,0,0,0,39,15,1,0,0,0,0,1,0,0,0,0,0,0,0,"Overlord Ramtusk - On Aggro - Call For Help"), +(4420,0,4,0,61,0,100,2,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Overlord Ramtusk - On Aggro - Say Line 0"); + +DELETE FROM `creature_text` WHERE `CreatureID`=4420; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(4420,0,0,"Victory! For Agamaggan!",14,0,100,0,0,5812,6178,0,"Overlord Ramtusk"); + +-- Charlga Razorflank +DELETE FROM `smart_scripts` WHERE `entryorguid`=4421 AND `source_type`=0 AND `id` IN (5,7); +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4421,0,5,0,0,0,100,2,9000,13000,9000,13000,11,8361,0,0,0,0,0,1,0,0,0,0,0,0,0,"Charlga Razorflank - In Combat - Cast Purity"), +(4421,0,7,0,0,0,100,2,0,0,3400,4800,11,8292,64,0,0,0,0,2,0,0,0,0,0,0,0,"Charlga Razorflank - In Combat - Cast Chain Bolt"); + +UPDATE `creature_text` SET `Sound`=5813 WHERE `BroadcastTextId`=6179; +UPDATE `creature_text` SET `Sound`=5814 WHERE `BroadcastTextId`=6180; +UPDATE `creature_text` SET `Sound`=5815 WHERE `BroadcastTextId`=6181; +UPDATE `creature_text` SET `Sound`=5816 WHERE `BroadcastTextId`=6182; +UPDATE `creature_text` SET `Sound`=5818 WHERE `BroadcastTextId`=6183; + +-- Agathelos the Raging +DELETE FROM `smart_scripts` WHERE `entryorguid`=4422 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4422,0,0,1,4,0,100,3,0,0,0,0,11,8260,0,0,0,0,0,1,0,0,0,0,0,0,0,"Agathelos the Raging - On Aggro - Cast Rushing Charge"), +(4422,0,1,0,61,0,100,3,0,0,0,0,39,15,1,0,0,0,0,1,0,0,0,0,0,0,0,"Agathelos the Raging - On Aggro - Call For Help"), +(4422,0,2,0,2,0,100,3,0,40,0,0,11,8269,0,0,0,0,0,1,0,0,0,0,0,0,0,"Agathelos the Raging - Between 0-40% Health - Cast Frenzy (No Repeat)"), +(4422,0,3,0,2,0,100,3,0,40,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Agathelos the Raging - Between 0-40% Health - Say Line 0 (No Repeat)"), +(4422,0,4,0,2,0,100,3,0,60,0,0,11,8269,0,0,0,0,0,1,0,0,0,0,0,0,0,"Agathelos the Raging - Between 0-60% Health - Cast Frenzy (No Repeat)"), +(4422,0,5,0,2,0,100,3,0,60,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Agathelos the Raging - Between 0-60% Health - Say Line 0 (No Repeat)"), +(4422,0,6,0,9,0,100,2,0,5,21000,35000,11,8555,0,0,0,0,0,2,0,0,0,0,0,0,0,"Agathelos the Raging - Within 0-5 Range - Cast Left for Dead"), +(4422,0,7,0,0,0,100,2,8000,14000,25000,31000,11,8285,0,0,0,0,0,1,0,0,0,0,0,0,0,"Agathelos the Raging - In Combat - Cast Rampage"); + +DELETE FROM `creature_text` WHERE `CreatureID`=4422; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(4422,0,0,"%s goes into a frenzy!",16,0,100,0,0,0,38630,0,"Agathelos the Raging"); + +-- Remove wrong spawns +DELETE FROM `creature` WHERE `guid` IN +(87339,87341,87366, -- Tamed Hyena (Already spawned as part of Razorfen Beastmaster's scripts) +87300,87309,87410, -- Stone Rumbler (Already spawned as part of Razorfen Geomancer's scripts) +87348, -- Stone Rumbler (Will be spawned as part of Roogug's scripts) +87361,87406,87409); -- Wind Howler (Will be spawned as part of Razorfen Dustweaver's scripts) +DELETE FROM `creature_addon` WHERE `guid` IN (87339,87341,87366,87300,87309,87410,87348,87361,87406,87409);
\ No newline at end of file diff --git a/sql/updates/world/3.3.5/2017_11_14_17_world_335.sql b/sql/updates/world/3.3.5/2017_11_14_17_world_335.sql new file mode 100644 index 00000000000..86905b12dad --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_14_17_world_335.sql @@ -0,0 +1,21 @@ +-- Love is in the Air: "A Gift for the ..." quests +DELETE FROM `quest_request_items` WHERE `ID` IN (24597,24609,24610,24611,24612,24613,24614,24615); +INSERT INTO `quest_request_items` (`ID`,`CompletionText`,`VerifiedBuild`) VALUES +(24597,'Well met, $c. Did you have something for me?',-1), +(24609,'Well met, $c. Did you have something for me?',-1), +(24610,'Well met, $c. Did you have something for me?',-1), +(24611,'Well met, $c. Did you have something for me?',-1), +(24612,'Well met, $c. Did you have something for me?',-1), +(24613,'Well met, $c. Did you have something for me?',-1), +(24614,'Well met, $c. Did you have something for me?',-1), +(24615,'Well met, $c. Did you have something for me?',-1); +DELETE FROM `quest_offer_reward` WHERE `ID` IN (24597,24609,24610,24611,24612,24613,24614,24615); +INSERT INTO `quest_offer_reward` (`ID`,`RewardText`,`VerifiedBuild`) VALUES +(24597,'My thanks for this Lovely Charm Bracelet.',-1), +(24609,'My thanks for this Lovely Charm Bracelet.',-1), +(24610,'My thanks for this Lovely Charm Bracelet.',-1), +(24611,'My thanks for this Lovely Charm Bracelet.',-1), +(24612,'My thanks for this Lovely Charm Bracelet.',-1), +(24613,'My thanks for this Lovely Charm Bracelet.',-1), +(24614,'My thanks for this Lovely Charm Bracelet.',-1), +(24615,'My thanks for this Lovely Charm Bracelet.',-1); diff --git a/sql/updates/world/3.3.5/2017_11_14_18_world.sql b/sql/updates/world/3.3.5/2017_11_14_18_world.sql new file mode 100644 index 00000000000..3321826898a --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_14_18_world.sql @@ -0,0 +1,15 @@ +DELETE FROM `smart_scripts` WHERE `entryorguid`=15958 AND `source_type`=0 AND id=5; +DELETE FROM `smart_scripts` WHERE `entryorguid`=15656 AND `source_type`=0 AND id=7; +DELETE FROM `smart_scripts` WHERE `entryorguid`=15402 AND `source_type`=0 AND id>4; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(15958,0,5,0,6,2,100,0,0,0,0,0,63,1,1,0,0,0,0,19,15402,100,0,0,0,0,0,"Gharsul the Remorseless - On death - Set counter"), +(15656,0,7,0,6,2,100,0,0,0,0,0,63,1,1,0,0,0,0,19,15402,100,0,0,0,0,0,"AngerShadee - On death - Set counter"), +(15402,0,5,6,77,1,100,0,1,3,0,0,15,8488,0,0,0,0,0,12,1,0,0,0,0,0,0,"Apprentice Mirveda - On counter - Complete Quest Unexpected Results"), +(15402,0,6,7,61,1,100,0,0,0,0,0,63,1,0,1,0,0,0,1,0,0,0,0,0,0,0,"Apprentice Mirveda - On counter - Reset counter"), +(15402,0,7,0,61,1,100,0,0,0,0,0,80,1540202,2,0,0,0,0,1,0,0,0,0,0,0,0,"Apprentice Mirveda - Linked with Previous Event - Run Script"), +(15402,0,8,0,11,0,100,512,0,0,0,0,18,512,0,0,0,0,0,1,0,0,0,0,0,0,0,"Apprentice Mirveda - On Spawn - Set Immune to NPC"); + +DELETE FROM `waypoints` WHERE `entry` IN(15958,15656); +INSERT INTO `waypoints` (`entry`, `pointid`, `position_x`, `position_y`, `position_z`, `point_comment`) VALUES +(15958, 1, 8711.897, -7160.284, 42.592, 'Gharsul the Remorseless'), +(15656, 1, 8712.909, -7159.269, 41.493, 'Angershade'); diff --git a/sql/updates/world/3.3.5/2017_11_16_00_world.sql b/sql/updates/world/3.3.5/2017_11_16_00_world.sql new file mode 100644 index 00000000000..b8238149400 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_16_00_world.sql @@ -0,0 +1,2 @@ +-- +UPDATE `creature_template` SET `spell3`=0, `spell7`=54788 WHERE `entry` IN (29602); diff --git a/sql/updates/world/3.3.5/2017_11_16_01_world_335.sql b/sql/updates/world/3.3.5/2017_11_16_01_world_335.sql new file mode 100644 index 00000000000..0278e921051 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_16_01_world_335.sql @@ -0,0 +1,5 @@ +-- +UPDATE `gameobject` SET `position_x`=-9202.341, `position_y`=-2442.0495, `position_z`=58.6061, `orientation`=0.431121 WHERE `guid`=5236; +UPDATE `gameobject` SET `position_x`=-9143.816, `position_y`=-2506.8930, `position_z`=118.587, `orientation`=4.210773 WHERE `guid`=1537; +UPDATE `gameobject` SET `position_x`=-9603.038, `position_y`=-1909.8031, `position_z`=61.5558, `orientation`=4.150383 WHERE `guid`=1740; +UPDATE `gameobject` SET `position_x`=-9752.696, `position_y`=-1765.9846, `position_z`=52.6211, `orientation`=0.900624 WHERE `guid`=1374; diff --git a/sql/updates/world/3.3.5/2017_11_16_02_world.sql b/sql/updates/world/3.3.5/2017_11_16_02_world.sql new file mode 100644 index 00000000000..a72655a466b --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_16_02_world.sql @@ -0,0 +1,18 @@ +-- +SET @ENTRY := 24972; +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=@ENTRY; +DELETE FROM `smart_scripts` WHERE `entryorguid`=@ENTRY AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`,`source_type`,`id`,`link`,`event_type`,`event_phase_mask`,`event_chance`,`event_flags`,`event_param1`,`event_param2`,`event_param3`,`event_param4`,`action_type`,`action_param1`,`action_param2`,`action_param3`,`action_param4`,`action_param5`,`action_param6`,`target_type`,`target_param1`,`target_param2`,`target_param3`,`target_x`,`target_y`,`target_z`,`target_o`,`comment`) VALUES +(@ENTRY,0,0,0,8,0,100,1,44997,0,0,0,41,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Erratic Sentry - On Spellhit 'Converting Sentry' - Despawn Instant"), +(@ENTRY,0,1,0,0,0,100,0,5000,9000,13000,20000,86,35856,1,2,0,0,0,2,0,0,0,0,0,0,0,"Erratic Sentry - In Combat - Cross Cast 'stunn'"), +(@ENTRY,0,2,0,0,0,100,0,5500,6700,11200,16700,11,33688,0,0,0,0,0,2,0,0,0,0,0,0,0,"Erratic Sentry - In Combat - Cast 'Crystal Strike'"), +(@ENTRY,0,3,0,0,0,100,0,8000,12000,8000,12000,11,35892,0,0,0,0,0,1,0,0,0,0,0,0,0,"Erratic Sentry - In Combat - Cast 'Suppression'"), +(@ENTRY,0,4,5,2,0,100,1,0,50,0,0,11,45014,1,0,0,0,0,1,0,0,0,0,0,0,0,"Erratic Sentry - Between 0-50% Health - Cast 'Capacitor Overload' (No Repeat)"), +(@ENTRY,0,5,6,61,0,100,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Erratic Sentry - Between 0-50% Health - Say Line 0 (No Repeat)"), +(@ENTRY,0,6,0,61,0,100,0,0,0,0,0,75,44986,0,0,0,0,0,1,0,0,0,0,0,0,0,"Erratic Sentry - Between 0-50% Health - add aura Broken Capacitor"), +(@ENTRY,0,7,8,2,0,100,0,95,99,2000,2000,28,44994,0,0,0,0,0,1,0,0,0,0,0,0,0,"Erratic Sentry - Between 95% - 100% health - Remove aura 'Self repair'"), +(@ENTRY,0,8,0,61,0,100,0,0,0,0,0,28,44986,0,0,0,0,0,1,0,0,0,0,0,0,0,"Erratic Sentry - Between 95% - 100% health - Remove aura 'Broken Capacitor'"); + +DELETE FROM `creature_text` WHERE `CreatureID`=24972; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(24972, 0, 0, "Core overload detected. System malfunction detected...", 12, 0, 100, 0, 0, 0, 24008, 0, "Erratic Sentry" ); diff --git a/sql/updates/world/3.3.5/2017_11_16_03_world.sql b/sql/updates/world/3.3.5/2017_11_16_03_world.sql new file mode 100644 index 00000000000..7dc084911cc --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_16_03_world.sql @@ -0,0 +1,13 @@ +-- +UPDATE `smart_scripts` SET `event_param1`=2000, `event_param2`=2000, `event_param3`=2000,`event_param4`=2000 WHERE `entryorguid` IN (3021900,3021901,3021902) AND `source_type`=9 AND `id`=5; +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=30222; +DELETE FROM `smart_scripts` WHERE `entryorguid`=30222 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(30222,0,0,0,54,0,100,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Stormforged Infiltrator - On Just Summoned - Say Line 0"), +(30222,0,1,0,6,0,100,0,0,0,0,0,41,0,0,0,0,0,0,23,0,0,0,0,0,0,0,"Stormforged Infiltrator - On Just Died - Despawn Ethereal Frostwarg"); + +DELETE FROM `creature_text` WHERE `CreatureID`=30222; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(30222,0,0,"Found me, did ya? Now ya gotta die!",12,0,100,0,0,0,30916,0,"Stormforged Infiltrator"), +(30222,0,1,"Die, friend of the frost!",12,0,100,0,0,0,30917,0,"Stormforged Infiltrator"), +(30222,0,2,"You won't live to speak of this!",12,0,100,0,0,0,30918,0,"Stormforged Infiltrator"); diff --git a/sql/updates/world/3.3.5/2017_11_16_04_world.sql b/sql/updates/world/3.3.5/2017_11_16_04_world.sql new file mode 100644 index 00000000000..7effb159276 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_16_04_world.sql @@ -0,0 +1,4 @@ +-- +DELETE FROM `smart_scripts` WHERE `entryorguid`=26878 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(26878,0,1,0,19,0,100,0,12440,0,0,0,11,49511,0,0,0,0,0,7,0,0,0,0,0,0,0,"Rodney Wells - On Quest 'To Stars' Rest!' Taken - Invoker Cast 'Gryphon Taxi to Stars' Rest'"); diff --git a/sql/updates/world/3.3.5/2017_11_16_05_world.sql b/sql/updates/world/3.3.5/2017_11_16_05_world.sql new file mode 100644 index 00000000000..7d301ecf1bb --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_16_05_world.sql @@ -0,0 +1,15 @@ +-- +DELETE FROM `smart_scripts` WHERE `source_type`=1 AND `entryorguid` IN (185937,185938,185936,185932); +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(185932, 1, 0, 1, 62, 0, 100, 0, 8685, 0, 0, 0, 85, 41035, 2, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 'Obsidia\'s Egg - On Gossip Option 0 Selected - Invoker Cast 41035 Four Dragons: Force Cast - Obsidia'), +(185932, 1, 1, 2, 61, 0, 100, 0, 0, 0, 0, 0, 72, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 'Obsidia\'s Egg - On Gossip Option 0 Selected - Close Gossip'), +(185932, 1, 2, 0, 61, 0, 100, 0, 0, 0, 0, 0, 70, 300, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 'Obsidia\'s Egg - On Gossip Option 0 Selected - Despawn GO'), +(185936, 1, 0, 1, 62, 0, 100, 0, 8689, 0, 0, 0, 85, 41044, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 'Rivendark\'s Egg - On Gossip Option 0 Selected - Invoker cast Four Dragons: Force Cast - Rivendark'), +(185936, 1, 1, 2, 61, 0, 100, 0, 0, 0, 0, 0, 72, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 'Rivendark\'s Egg - On Gossip Option 0 Selected - Close Gossip'), +(185936, 1, 2, 0, 61, 0, 100, 0, 0, 0, 0, 0, 70, 300, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 'Rivendark\'s Egg - On Gossip Option 0 Selected - Despawn GO'), +(185937, 1, 0, 1, 62, 0, 100, 0, 8690, 0, 0, 0, 85, 41050, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 'Furywing\'s Egg - On Gossip Option 0 Selected - Invoker cast Four Dragons: Force Cast - Furywing'), +(185937, 1, 1, 2, 61, 0, 100, 0, 0, 0, 0, 0, 72, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 'Furywing\'s Egg - On Gossip Option 0 Selected - Close Gossip'), +(185937, 1, 2, 0, 61, 0, 100, 0, 0, 0, 0, 0, 70, 300, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 'Furywing\'s Egg - On Gossip Option 0 Selected - Despawn GO'), +(185938, 1, 0, 1, 62, 0, 100, 0, 8691, 0, 0, 0, 85, 41052, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 'Insidion\'s Egg - On Gossip Option 0 Selected - Invoker cast Four Dragons: Force Cast - Insidion'), +(185938, 1, 1, 2, 61, 0, 100, 0, 0, 0, 0, 0, 72, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 'Insidion\'s Egg - On Gossip Option 0 Selected - Close Gossip'), +(185938, 1, 2, 0, 61, 0, 100, 0, 0, 0, 0, 0, 70, 300, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 'Insidion\'s Egg - On Gossip Option 0 Selected - Despawn GO'); diff --git a/sql/updates/world/3.3.5/2017_11_16_06_world.sql b/sql/updates/world/3.3.5/2017_11_16_06_world.sql new file mode 100644 index 00000000000..9a6807a6f3d --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_16_06_world.sql @@ -0,0 +1,6 @@ +-- +DELETE FROM `conditions` WHERE `SourceTypeOrReferenceId`=17 AND `SourceEntry`=47542; +INSERT INTO `conditions` (`SourceTypeOrReferenceId`, `SourceGroup`, `SourceEntry`, `SourceId`, `ElseGroup`, `ConditionTypeOrReference`, `ConditionTarget`, `ConditionValue1`, `ConditionValue2`, `ConditionValue3`, `NegativeCondition`, `ErrorType`, `ErrorTextId`, `ScriptName`, `Comment`) VALUES +(17, 0, 47542, 0, 0, 29, 0, 26855, 10, 0, 0, 0, 0, "", "Spell 'Draw Power' can be used within 10 yards of 'East Stone' OR"), +(17, 0, 47542, 0, 1, 29, 0, 26856, 10, 0, 0, 0, 0, "", "Spell 'Draw Power' can be used within 10 yards of 'North Stone' OR"), +(17, 0, 47542, 0, 2, 29, 0, 26857, 10, 0, 0, 0, 0, "", "Spell 'Draw Power' can be used within 10 yards of 'South Stone'"); diff --git a/sql/updates/world/3.3.5/2017_11_16_07_world.sql b/sql/updates/world/3.3.5/2017_11_16_07_world.sql new file mode 100644 index 00000000000..6ba4c00b720 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_16_07_world.sql @@ -0,0 +1,33 @@ +UPDATE `creature_template` SET `AIName`= 'SmartAI',`ScriptName`='' WHERE `entry`=25201; +UPDATE `gameobject_template` SET `AIName`='SmartGameObjectAI', `ScriptName`='' WHERE `entry`=187373; + +UPDATE `gameobject` SET `spawntimesecs`=2 WHERE `id`=187373; +UPDATE `creature` SET `spawntimesecs`=120 WHERE `id`=25201; + +DELETE FROM `smart_scripts` WHERE `entryorguid` =187373 and `source_type`=1; +DELETE FROM `smart_scripts` WHERE `entryorguid` =25201 and `source_type`=0; +DELETE FROM `smart_scripts` WHERE `entryorguid` =18737300 and `source_type`=9; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(187373, 1, 0 ,1,70, 0, 100, 0, 2, 0, 0,0,64,1,0,0,0,0,0,7,0,0,0,0, 0, 0, 0, 'Cage - On State Changed - Store Targetlist'), +(187373, 1, 1 ,0,61, 0, 100, 0, 0, 0, 0,0,80,18737300,0,0,0,0,0,1,0,0,0,0, 0, 0, 0, 'Cage - On State Changed - Run Script'), +(25201, 0, 0 ,1,38, 0, 100, 0, 1, 1, 0,0,33,25201,0,0,0,0,0,12,1,0,0,0, 0, 0, 0, 'Winterfin Tadpole - Linked with Previous Event - Kill Credit'), +(25201, 0, 1 ,2,61, 0, 100, 0, 0, 0, 0,0,29,0,0,0,0,0,0,12,1,0,0,0, 0, 0, 0, 'Winterfin Tadpole - Linked with Previous Event - Move Foward'), +(25201, 0, 2 ,3,61, 0, 100, 0, 0, 0, 0,0,48,1,0,0,0,0,0,1,0,0,0,0, 0, 0, 0, 'Winterfin Tadpole - Linked with Previous Event - Set Active'), +(25201, 0, 3 ,4,61, 0, 100, 0, 0, 0, 0,0,1,0,0,0,0,0,0,12,1,0,0,0, 0, 0, 0, 'Winterfin Tadpole - Linked with Previous Event - Say'), +(25201, 0, 4 ,0,61, 0, 100, 0, 0, 0, 0,0,41,60000,0,0,0,0,0,1,0,0,0,0, 0, 0, 0, 'Winterfin Tadpole - Linked with Previous Event - Despawn after 2 seconds'), +(25201, 0, 5 ,0,11, 0, 100, 0, 0, 0, 0,0,70,0,0,0,0,0,0,20,187373,0,0,0, 0, 0, 0, 'Winterfin Tadpole - On Respawn - Respawn Cage'), +(18737300, 9, 0 ,0, 0, 0, 100, 0, 0, 0, 0,0,100,1,0,0,0,0,0,9,25201,0,5,0, 0, 0, 0, 'Cage - Script - Send Targetlist'), +(18737300, 9, 1 ,0, 0, 0, 100, 0, 0, 0, 0,0,45,1,1,0,0,0,0,9,25201,0,5,0, 0, 0, 0, 'Cage - Script - Set Data'); + +DELETE FROM `creature_text` WHERE `CreatureID`=25201; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(25201, 0, 0, '$G Da-da : Ma-ma;?', 12, 0, 100, 0, 0, 0, 24446, 0, 'Winterfin Tadpole'), +(25201, 0, 1, 'Alurglgl.', 12, 0, 100, 0, 0, 0, 24447, 0, 'Winterfin Tadpole'), +(25201, 0, 2, 'Me go home?', 12, 0, 100, 0, 0, 0, 24448, 0, 'Winterfin Tadpole'), +(25201, 0, 3, 'Play!', 12, 0, 100, 0, 0, 0, 24449, 0, 'Winterfin Tadpole'), +(25201, 0, 4, 'You not $g da-da : ma-ma;!', 12, 0, 100, 0, 0, 0, 24457, 0, 'Winterfin Tadpole'); + +DELETE FROM `conditions` WHERE `SourceTypeOrReferenceId`=22 AND `SourceEntry`IN(25201,187373); +INSERT INTO `conditions` (`SourceTypeOrReferenceId`, `SourceGroup`, `SourceEntry`, `SourceId`, `ElseGroup`, `ConditionTypeOrReference`, `ConditionTarget`, `ConditionValue1`, `ConditionValue2`, `ConditionValue3`, `NegativeCondition`, `ErrorTextId`, `ScriptName`, `Comment`) VALUES +(22, 1, 25201, 0, 0, 36, 1, 0, 0, 0, 0, 0, '','Only execute SAI if alive'), +(22, 1, 187373, 1, 0, 9, 0, 11560, 0, 0, 0, 0, '', 'Only run SAI if Player has Oh Noes, the Tadpoles taken but not complete'); diff --git a/sql/updates/world/3.3.5/2017_11_16_08_world.sql b/sql/updates/world/3.3.5/2017_11_16_08_world.sql new file mode 100644 index 00000000000..7c987d7f9bc --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_16_08_world.sql @@ -0,0 +1,7 @@ +-- +DELETE FROM `conditions` WHERE `SourceTypeOrReferenceId`=17 AND `SourceEntry`=42564; +INSERT INTO `conditions` (`SourceTypeOrReferenceId`, `SourceGroup`, `SourceEntry`, `SourceId`, `ElseGroup`, `ConditionTypeOrReference`, `ConditionTarget`, `ConditionValue1`, `ConditionValue2`, `ConditionValue3`, `NegativeCondition`, `ErrorType`, `ErrorTextId`, `ScriptName`, `Comment`) VALUES +(17, 0, 42564, 0, 0, 29, 0, 23921, 20, 0, 0, 0, 0, "", "Spell 'Ever-burning Torch' can be used within 20 yards of 'Halgrind Torch Bunny 01' OR"), +(17, 0, 42564, 0, 1, 29, 0, 23922, 20, 0, 0, 0, 0, "", "Spell 'Ever-burning Torch' can be used within 20 yards of 'Halgrind Torch Bunny 02' OR"), +(17, 0, 42564, 0, 2, 29, 0, 23923, 20, 0, 0, 0, 0, "", "Spell 'Ever-burning Torch' can be used within 20 yards of 'Halgrind Torch Bunny 03' OR"), +(17, 0, 42564, 0, 3, 29, 0, 23924, 20, 0, 0, 0, 0, "", "Spell 'Ever-burning Torch' can be used within 20 yards of 'Halgrind Torch Bunny 04'"); diff --git a/sql/updates/world/3.3.5/2017_11_16_09_world.sql b/sql/updates/world/3.3.5/2017_11_16_09_world.sql new file mode 100644 index 00000000000..6ddab6bbe24 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_16_09_world.sql @@ -0,0 +1,56 @@ +-- +-- Quest "Chieftain Oomooroo" +-- Stillpine the Younger +DELETE FROM `creature_text` WHERE `CreatureID`=17445; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(17445,0,0,"Stillpine raiders, the time to strike is at hand! Charge!",14,0,100,0,0,0,14038,0,"Stillpine the Younger"); + +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=17445; +DELETE FROM `smart_scripts` WHERE `entryorguid`=17445 AND `source_type`=0; +DELETE FROM `smart_scripts` WHERE `entryorguid`=1744500 AND `source_type`=9; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(17445,0,0,0,20,0,100,0,9573,0,0,0,80,1744500,0,0,0,0,0,1,0,0,0,0,0,0,0,"Stillpine the Younger - On Quest 'Chieftain Oomooroo' Finished - Run Script"), +(1744500,9,0,0,0,0,100,0,1000,1000,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Stillpine the Younger - On Script - Say Line 0"), +(1744500,9,1,0,0,0,100,0,2000,2000,0,0,12,17495,6,30000,0,0,0,8,0,0,0,-3367.84,-12424.6,26.1425,1.85513,"Stillpine the Younger - On Script - Summon Creature 'Stillpine Raider'"), +(1744500,9,2,0,0,0,100,0,0,0,0,0,12,17495,6,30000,0,0,0,8,0,0,0,-3363.8,-12428.6,27.3273,1.98472,"Stillpine the Younger - On Script - Summon Creature 'Stillpine Raider'"), +(1744500,9,3,0,0,0,100,0,0,0,0,0,12,17495,6,30000,0,0,0,8,0,0,0,-3364.07,-12432.9,27.338,1.9965,"Stillpine the Younger - On Script - Summon Creature 'Stillpine Raider'"), +(1744500,9,4,0,0,0,100,0,0,0,0,0,12,17495,6,30000,0,0,0,8,0,0,0,-3360.59,-12433.1,28.084,2.08289,"Stillpine the Younger - On Script - Summon Creature 'Stillpine Raider'"), +(1744500,9,5,0,0,0,100,0,0,0,0,0,12,17495,6,30000,0,0,0,8,0,0,0,-3356.08,-12434.3,29.6136,2.49915,"Stillpine the Younger - On Script - Summon Creature 'Stillpine Raider'"), +(1744500,9,6,0,0,0,100,0,0,0,0,0,12,17495,6,30000,0,0,0,8,0,0,0,-3357.16,-12430.5,28.9967,2.46774,"Stillpine the Younger - On Script - Summon Creature 'Stillpine Raider'"), +(1744500,9,7,0,0,0,100,0,0,0,0,0,12,17495,6,30000,0,0,0,8,0,0,0,-3355.98,-12438.2,29.8503,2.46224,"Stillpine the Younger - On Script - Summon Creature 'Stillpine Raider'"), +(1744500,9,8,0,0,0,100,0,0,0,0,0,12,17495,6,30000,0,0,0,8,0,0,0,-3350.3,-12437.5,32.3999,2.41119,"Stillpine the Younger - On Script - Summon Creature 'Stillpine Raider'"), +(1744500,9,9,0,0,0,100,0,0,0,0,0,12,17495,6,30000,0,0,0,8,0,0,0,-3348.27,-12442.5,34.3653,2.58005,"Stillpine the Younger - On Script - Summon Creature 'Stillpine Raider'"), +(1744500,9,10,0,0,0,100,0,0,0,0,0,12,17495,6,30000,0,0,0,8,0,0,0,-3344.08,-12441.7,36.8811,2.36014,"Stillpine the Younger - On Script - Summon Creature 'Stillpine Raider'"); + +-- Stillpine Raider +DELETE FROM `smart_scripts` WHERE `entryorguid`=17495 AND `source_type`=0 AND `id`>0; +DELETE FROM `smart_scripts` WHERE `entryorguid`=1749500 AND `source_type`=9; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(17495,0,1,0,54,0,100,0,0,0,0,0,53,1,17495,0,0,0,2,1,0,0,0,0,0,0,0,"Stillpine Raider - On Just Summoned - Start Waypoint"), +(17495,0,2,0,54,0,100,0,0,0,0,0,48,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Stillpine Raider - On Just Summoned - Set Active On"), +(17495,0,3,0,40,0,100,0,21,17495,0,0,89,15,0,0,0,0,0,1,0,0,0,0,0,0,0,"Stillpine Raider - On Waypoint 20 Reached - Start Random Movement"), +(17495,0,4,0,40,0,100,0,21,17495,0,0,41,10000,0,0,0,0,0,1,0,0,0,0,0,0,0,"Stillpine Raider - On Waypoint 20 Reached - Despawn in 10000 ms"); + +DELETE FROM `waypoints` WHERE `entry`=17495; +INSERT INTO `waypoints` (`entry`, `pointid`, `position_x`, `position_y`, `position_z`, `point_comment`) VALUES +(17495,1,-3374.06,-12411.8,22.0504,""), +(17495,2,-3404.94,-12368.4,17.7868,""), +(17495,3,-3377.49,-12350.4,22.4495,""), +(17495,4,-3328.89,-12346,22.9951,""), +(17495,5,-3305.27,-12346.2,24.0911,""), +(17495,6,-3278.58,-12347.7,19.2297,""), +(17495,7,-3259.37,-12359.1,13.5547,""), +(17495,8,-3248.81,-12373.3,10.5433,""), +(17495,9,-3239.23,-12391.4,10.4531,""), +(17495,10,-3237.35,-12418.7,14.692,""), +(17495,11,-3234.66,-12427.5,16.3937,""), +(17495,12,-3225.87,-12435.1,19.1127,""), +(17495,13,-3202.85,-12463.4,14.6165,""), +(17495,14,-3190.47,-12461.7,13.2373,""), +(17495,15,-3182.54,-12456.9,13.0909,""), +(17495,16,-3177.36,-12452.2,13.0143,""), +(17495,17,-3172.53,-12443.2,12.1312,""), +(17495,18,-3158.44,-12436.1,12.0219,""), +(17495,19,-3143.65,-12438.1,10.1409,""), +(17495,20,-3097.4,-12464.2,3.13821,""), +(17495,21,-3079.57,-12477.1,0.0367675,""); diff --git a/sql/updates/world/3.3.5/2017_11_16_10_world.sql b/sql/updates/world/3.3.5/2017_11_16_10_world.sql new file mode 100644 index 00000000000..294697d4d6c --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_16_10_world.sql @@ -0,0 +1,2 @@ +-- +DELETE FROM `gameobject` WHERE `guid`=99909 AND `id`=181630; diff --git a/sql/updates/world/3.3.5/2017_11_17_00_world.sql b/sql/updates/world/3.3.5/2017_11_17_00_world.sql new file mode 100644 index 00000000000..a12ef0da64a --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_17_00_world.sql @@ -0,0 +1,6 @@ +-- Maggran Earthbinder --> Add Gossip Text +UPDATE `creature_template` SET `gossip_menu_id`=4271 WHERE `entry`=11860; + +DELETE FROM `gossip_menu` WHERE `MenuID`=4271; +INSERT INTO `gossip_menu` (`MenuID`, `TextID`, `VerifiedBuild`) VALUES +(4271,5443,0); diff --git a/sql/updates/world/3.3.5/2017_11_17_01_world.sql b/sql/updates/world/3.3.5/2017_11_17_01_world.sql new file mode 100644 index 00000000000..8816d4e9f38 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_17_01_world.sql @@ -0,0 +1,6 @@ +-- Mor'rogal --> Add Gossip Text +UPDATE `creature_template` SET `gossip_menu_id`=4721 WHERE `entry`=11861; + +DELETE FROM `gossip_menu` WHERE `MenuID`=4721; +INSERT INTO `gossip_menu` (`MenuID`, `TextID`, `VerifiedBuild`) VALUES +(4721,5773,0); diff --git a/sql/updates/world/3.3.5/2017_11_17_02_world.sql b/sql/updates/world/3.3.5/2017_11_17_02_world.sql new file mode 100644 index 00000000000..30f35c3e53f --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_17_02_world.sql @@ -0,0 +1,10 @@ +-- Hezrul Bloodmark +DELETE FROM `smart_scripts` WHERE `entryorguid`=3396 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3396,0,1,0,1,0,100,0,5000,10000,25000,35000,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Hezrul Bloodmark - Out of Combat - Say Line 0"); + +DELETE FROM `creature_text` WHERE `CreatureID`=3396; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(3396,0,0,"Stand straight! Your weakness sickens me!",12,0,100,0,0,0,4854,0,"Hezrul Bloodmark"), +(3396,0,1,"Be alert! The orcs and their allies are close!",12,0,100,0,0,0,4855,0,"Hezrul Bloodmark"), +(3396,0,2,"Look fierce! You are Kolkar, so act like one!",12,0,100,0,0,0,4856,0,"Hezrul Bloodmark"); diff --git a/sql/updates/world/3.3.5/2017_11_17_03_world_335.sql b/sql/updates/world/3.3.5/2017_11_17_03_world_335.sql new file mode 100644 index 00000000000..4a73d35e83a --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_17_03_world_335.sql @@ -0,0 +1,20 @@ +-- Cannoneer Smythe & Cannoneer Whessan +DELETE FROM `smart_scripts` WHERE `entryorguid` IN (3454,3455) AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3454,0,1,0,1,0,100,0,1000,15000,150000,180000,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Cannoneer Smythe - Out of Combat - Say Line 0"), +(3455,0,1,0,1,0,100,0,75000,90000,150000,180000,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Cannoneer Smythe - Out of Combat - Say Line 0"); + +DELETE FROM `creature_text` WHERE `CreatureID` IN (3454,3455); +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(3454,0,0,"Frigate Ho! 'Tis an Orc vessel no doubt! Fire!",14,0,100,5,0,0,1051,0,"Cannoneer Smythe"), +(3454,0,1,"What's that on the horizon? Enemy ship no doubt! Fire!",14,0,100,5,0,0,1052,0,"Cannoneer Smythe"), +(3454,0,2,"Sails on the eastern horizon. Invaders no doubt! Fire!",14,0,100,5,0,0,1053,0,"Cannoneer Smythe"), +(3454,0,3,"What was that? Sea giant? Better safe than sorry. Fire!",14,0,100,5,0,0,1055,0,"Cannoneer Smythe"), +(3454,0,4,"There's something out there. Let loose a warning round!",14,0,100,5,0,0,1054,0,"Cannoneer Smythe"), +(3454,0,5,"Enemy fleet yonder! Fire!",14,0,100,5,0,0,1057,0,"Cannoneer Smythe"), +(3455,0,0,"Enemy ho! Fire!",14,0,100,5,0,0,1056,0,"Cannoneer Whessan"), +(3455,0,1,"Marauders to the north! Fire!",14,0,100,5,0,0,1058,0,"Cannoneer Whessan"), +(3455,0,2,"Inland invaders! Fire!",14,0,100,5,0,0,1059,0,"Cannoneer Whessan"), +(3455,0,3,"Enemy spotted! Fire!",14,0,100,5,0,0,1060,0,"Cannoneer Whessan"), +(3455,0,4,"Raiders from the northwest! Fire!",14,0,100,5,0,0,1061,0,"Cannoneer Whessan"), +(3455,0,5,"Raiding party over yonder! Fire!",14,0,100,5,0,0,1062,0,"Cannoneer Whessan"); diff --git a/sql/updates/world/3.3.5/2017_11_17_04_world_335.sql b/sql/updates/world/3.3.5/2017_11_17_04_world_335.sql new file mode 100644 index 00000000000..44e1e592cd5 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_17_04_world_335.sql @@ -0,0 +1,46 @@ +-- Pathing for Rocklance +UPDATE `creature` SET `position_x`=-1201.63, `position_y`=-3099.24, `position_z`=94.8781, `spawndist`=0, `MovementType`=2 WHERE `guid`=20720; +UPDATE `creature` SET `position_x`=-1198.92, `position_y`=-3101.53, `position_z`=94.8262, `spawndist`=0, `MovementType`=0 WHERE `guid`=14007; +UPDATE `creature` SET `position_x`=-1198.8, `position_y`=-3096.75, `position_z`=94.5592, `spawndist`=0, `MovementType`=0 WHERE `guid`=20588; + +DELETE FROM `creature_addon` WHERE `guid`=20720; +INSERT INTO `creature_addon` (`guid`, `path_id`, `mount`, `bytes1`, `bytes2`, `emote`, `auras`) VALUES +(20720,207200,0,0,1,0,""); + +DELETE FROM `creature_formations` WHERE `leaderGUID`=20720; +INSERT INTO `creature_formations` (`leaderGUID`, `memberGUID`, `dist`, `angle`, `groupAI`, `point_1`, `point_2`) VALUES +(20720,20720,0,0,515,0,0), +(20720,14007,3,330,515,0,0), +(20720,20588,3,60,515,0,0); + +DELETE FROM `waypoint_data` WHERE `id`=207200; +INSERT INTO `waypoint_data` (`id`, `point`, `position_x`, `position_y`, `position_z`, `orientation`, `delay`, `move_type`, `action`, `action_chance`, `wpguid`) VALUES +(207200,1,-1201.63,-3099.24,94.8781,0,0,0,0,100,0), +(207200,2,-1219.21,-3099.76,95.241,0,0,0,0,100,0), +(207200,3,-1272.76,-3089.53,93.8928,0,0,0,0,100,0), +(207200,4,-1315.08,-3106.5,91.7995,0,0,0,0,100,0), +(207200,5,-1330.48,-3095.55,92.4438,0,0,0,0,100,0), +(207200,6,-1341.49,-3060.61,92.6899,0,0,0,0,100,0), +(207200,7,-1354.4,-3030.3,93.3309,0,0,0,0,100,0), +(207200,8,-1386.31,-3005.86,93.1475,0,0,0,0,100,0), +(207200,9,-1430.19,-2974.08,93.1218,0,0,0,0,100,0), +(207200,10,-1436.49,-2943.46,91.668,0,0,0,0,100,0), +(207200,11,-1436.75,-2920.35,92.5429,0,0,0,0,100,0), +(207200,12,-1410.03,-2893.52,93.1282,0,0,0,0,100,0), +(207200,13,-1388.5,-2866.72,94.5754,0,0,0,0,100,0), +(207200,14,-1360.27,-2850.79,94.705,0,0,0,0,100,0), +(207200,15,-1332.8,-2857.77,93.5965,0,0,0,0,100,0), +(207200,16,-1288.33,-2870.37,93.0108,0,0,0,0,100,0), +(207200,17,-1265.32,-2850.79,94.069,0,0,0,0,100,0), +(207200,18,-1239.87,-2834.24,94.41,0,0,0,0,100,0), +(207200,19,-1219.09,-2834.03,93.9937,0,0,0,0,100,0), +(207200,20,-1194.71,-2865.09,93.5771,0,0,0,0,100,0), +(207200,21,-1170.2,-2886.21,94.3522,0,0,0,0,100,0), +(207200,22,-1146.43,-2925.68,93.1956,0,0,0,0,100,0), +(207200,23,-1123.47,-2957.35,92.8819,0,0,0,0,100,0), +(207200,24,-1114.24,-2966.51,92.4826,0,0,0,0,100,0), +(207200,25,-1111.46,-3018.12,94.8292,0,0,0,0,100,0), +(207200,26,-1101.72,-3047.27,93.4784,0,0,0,0,100,0), +(207200,27,-1123.65,-3070.33,91.8408,0,0,0,0,100,0), +(207200,28,-1147.38,-3076.85,92.3126,0,0,0,0,100,0), +(207200,29,-1177.95,-3082.89,92.8266,0,0,0,0,100,0); diff --git a/sql/updates/world/3.3.5/2017_11_17_05_world_335.sql b/sql/updates/world/3.3.5/2017_11_17_05_world_335.sql new file mode 100644 index 00000000000..cee10588764 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_17_05_world_335.sql @@ -0,0 +1,75 @@ +-- Pathing for Wooly Kodo +UPDATE `creature` SET `position_x`=-2891.01, `position_y`=-2150.02, `position_z`=94.2478, `spawndist`=0, `MovementType`=2 WHERE `guid`=15144; +UPDATE `creature` SET `position_x`=-2895.78, `position_y`=-2158.99, `position_z`=91.6667, `spawndist`=0, `MovementType`=0 WHERE `guid`=15142; +UPDATE `creature` SET `position_x`=-2876.98, `position_y`=-2149.49, `position_z`=92.9047, `spawndist`=0, `MovementType`=0 WHERE `guid`=15141; +UPDATE `creature` SET `position_x`=-2878.42, `position_y`=-2160.61, `position_z`=92.0229 WHERE `guid`=15135; + +DELETE FROM `creature_addon` WHERE `guid`=15144; +INSERT INTO `creature_addon` (`guid`, `path_id`, `mount`, `bytes1`, `bytes2`, `emote`, `auras`) VALUES +(15144,151440,0,0,0,0,""); + +DELETE FROM `creature_formations` WHERE `leaderGUID`=15144; +INSERT INTO `creature_formations` (`leaderGUID`, `memberGUID`, `dist`, `angle`, `groupAI`, `point_1`, `point_2`) VALUES +(15144,15144,0,0,512,0,0), +(15144,15135,15,0,512,0,0), +(15144,15142,12,270,512,0,0), +(15144,15141,9,60,512,0,0); + +DELETE FROM `waypoint_data` WHERE `id`=151440; +INSERT INTO `waypoint_data` (`id`, `point`, `position_x`, `position_y`, `position_z`, `orientation`, `delay`, `move_type`, `action`, `action_chance`, `wpguid`) VALUES +(151440,1,-2891.01,-2150.02,94.2478,0,0,0,0,100,214004), +(151440,2,-2903.93,-2136.42,95.657,0,0,0,0,100,214006), +(151440,3,-2921.7,-2112.77,95.5313,0,0,0,0,100,214008), +(151440,4,-2928.01,-2071.33,96.0166,0,0,0,0,100,214012), +(151440,5,-2928.91,-2050.84,95.785,0,0,0,0,100,214014), +(151440,6,-2926.84,-2041.01,96.3781,0,0,0,0,100,214015), +(151440,7,-2923.56,-2034.24,96.0931,0,0,0,0,100,214016), +(151440,8,-2915.72,-2026.93,95.6855,0,0,0,0,100,214017), +(151440,9,-2905.62,-2021.31,93.4736,0,0,0,0,100,214018), +(151440,10,-2896.02,-2017.06,91.7219,0,0,0,0,100,214019), +(151440,11,-2881.85,-2010.83,91.8045,0,0,0,0,100,214021), +(151440,12,-2872.75,-2008.67,92.7372,0,0,0,0,100,214023), +(151440,13,-2860.24,-2009.61,94.2234,0,0,0,0,100,214025), +(151440,14,-2850.68,-2007.45,95.3112,0,0,0,0,100,214027), +(151440,15,-2837.95,-2001.67,96.0295,0,0,0,0,100,214029), +(151440,16,-2798.05,-1973.73,93.4632,0,0,0,0,100,214037), +(151440,17,-2775.55,-1955.8,94.3558,0,0,0,0,100,214042), +(151440,18,-2770.03,-1954.5,94.3486,0,0,0,0,100,214043), +(151440,19,-2737.42,-1955.19,94.2536,0,0,0,0,100,214048), +(151440,20,-2710.84,-1959.71,96.7887,0,0,0,0,100,214052), +(151440,21,-2667.95,-1973.89,97.7269,0,0,0,0,100,214060), +(151440,22,-2649.13,-1983.14,98.7822,0,0,0,0,100,214062), +(151440,23,-2640.2,-1988.65,97.8667,0,0,0,0,100,214064), +(151440,24,-2613.39,-2005.2,91.7337,0,0,0,0,100,214068), +(151440,25,-2554.57,-2041.31,92.0596,0,0,0,0,100,214076), +(151440,26,-2525.49,-2064.46,93.7675,0,0,0,0,100,214080), +(151440,27,-2520.19,-2075.43,93.7559,0,0,0,0,100,214082), +(151440,28,-2518.56,-2091.79,93.4852,0,0,0,0,100,214084), +(151440,29,-2521.19,-2119.21,92.1279,0,0,0,0,100,214087), +(151440,30,-2523.41,-2133.03,95.6538,0,0,0,0,100,214089), +(151440,31,-2523.99,-2157.74,96.1462,0,0,0,0,100,214092), +(151440,32,-2513.76,-2189.22,96.6454,0,0,0,0,100,214095), +(151440,33,-2511.82,-2202.38,95.8735,0,0,0,0,100,214097), +(151440,34,-2511.2,-2208.1,94.7895,0,0,0,0,100,214098), +(151440,35,-2512.97,-2220.95,92.1185,0,0,0,0,100,214099), +(151440,36,-2517,-2236.07,92.0664,0,0,0,0,100,214102), +(151440,37,-2526.55,-2255.15,94.2318,0,0,0,0,100,214105), +(151440,38,-2532.38,-2268.06,95.292,0,0,0,0,100,214107), +(151440,39,-2541.75,-2277.29,95.0165,0,0,0,0,100,214109), +(151440,40,-2564.64,-2288.88,92.3051,0,0,0,0,100,214111), +(151440,41,-2598.33,-2299.11,91.6696,0,0,0,0,100,214115), +(151440,42,-2623.49,-2294.38,93.5493,0,0,0,0,100,214119), +(151440,43,-2652.46,-2282.06,91.8607,0,0,0,0,100,214124), +(151440,44,-2707.78,-2258.55,91.6669,0,0,0,0,100,214133), +(151440,45,-2712.81,-2253.49,93.2486,0,0,0,0,100,214134), +(151440,46,-2719.92,-2249.24,94.5715,0,0,0,0,100,214135), +(151440,47,-2722.89,-2247.63,93.545,0,0,0,0,100,214136), +(151440,48,-2727.47,-2243.92,91.6671,0,0,0,0,100,214137), +(151440,49,-2742.73,-2228.33,92.8483,0,0,0,0,100,214140), +(151440,50,-2753.01,-2206.2,96.3717,0,0,0,0,100,214143), +(151440,51,-2786.31,-2184.29,95.7868,0,0,0,0,100,214148), +(151440,52,-2826.44,-2175.95,96.4113,0,0,0,0,100,214153), +(151440,53,-2833.23,-2174.24,95.3236,0,0,0,0,100,214154), +(151440,54,-2840.01,-2172.53,92.9766,0,0,0,0,100,214155), +(151440,55,-2850.23,-2170.11,91.6667,0,0,0,0,100,214156), +(151440,56,-2874.56,-2162.59,92.2099,0,0,0,0,100,214159); diff --git a/sql/updates/world/3.3.5/2017_11_17_06_world_335.sql b/sql/updates/world/3.3.5/2017_11_17_06_world_335.sql new file mode 100644 index 00000000000..943b5bec6f4 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_17_06_world_335.sql @@ -0,0 +1,17 @@ +-- Tsunaman +UPDATE `creature` SET `position_x`=824.854, `position_y`=933.05, `position_z`=155.485, `orientation`=2.16375 WHERE `guid`=29247; +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=11862; +DELETE FROM `smart_scripts` WHERE `entryorguid`=11862 AND `source_type`=0; +DELETE FROM `smart_scripts` WHERE `entryorguid`=1186200 AND `source_type`=9; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(11862,0,0,0,1,0,100,0,5000,5000,80000,100000,53,0,11862,0,0,0,0,1,0,0,0,0,0,0,0,"Tsunaman - Out of Combat - Start Waypoint"), +(11862,0,1,0,40,0,100,0,1,11862,0,0,80,1186200,0,0,0,0,0,1,0,0,0,0,0,0,0,"Tsunaman - On Waypoint 1 Reached - Run Script"), +(11862,0,2,0,40,0,100,0,2,11862,0,0,66,0,0,0,0,0,0,8,0,0,0,0,0,0,2.16375,"Tsunaman - On Waypoint 2 Reached - Set Orientation"), +(1186200,9,0,0,0,0,100,0,0,0,0,0,54,20000,0,0,0,0,0,1,0,0,0,0,0,0,0,"Tsunaman - On Script - Pause Waypoint"), +(1186200,9,1,0,0,0,100,0,2000,2000,0,0,17,35,0,0,0,0,0,1,0,0,0,0,0,0,0,"Tsunaman - On Script - Set Emote State 'Attack Unarmed'"), +(1186200,9,2,0,0,0,100,0,16000,16000,0,0,17,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Tsunaman - On Script - Set Emote State 0"); + +DELETE FROM `waypoints` WHERE `entry`=11862; +INSERT INTO `waypoints` (`entry`, `pointid`, `position_x`, `position_y`, `position_z`, `point_comment`) VALUES +(11862,1,818.627,941.855,154.26,""), +(11862,2,824.854,933.05,155.485,""); diff --git a/sql/updates/world/3.3.5/2017_11_17_07_world_335.sql b/sql/updates/world/3.3.5/2017_11_17_07_world_335.sql new file mode 100644 index 00000000000..d355c31f603 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_17_07_world_335.sql @@ -0,0 +1,5 @@ +-- Mor'rogal +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=11861; +DELETE FROM `smart_scripts` WHERE `entryorguid`=11861 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(11861,0,0,0,20,0,100,0,6421,0,0,0,11,759,0,0,0,0,0,1,0,0,0,0,0,0,0,"Mor'rogal - On Quest 'Boulderslide Ravine' Finished - Cast Conjure Mana Gem"); diff --git a/sql/updates/world/3.3.5/2017_11_17_08_world_335.sql b/sql/updates/world/3.3.5/2017_11_17_08_world_335.sql new file mode 100644 index 00000000000..938acadeced --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_17_08_world_335.sql @@ -0,0 +1,12 @@ +-- Tammra Windfield +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=11864; +DELETE FROM `smart_scripts` WHERE `entryorguid`=11864 AND `source_type`=0; +DELETE FROM `smart_scripts` WHERE `entryorguid`=1186400 AND `source_type`=9; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(11864,0,0,0,20,0,100,0,6301,0,0,0,80,1186400,0,0,0,0,0,1,0,0,0,0,0,0,0,"Tammra Windfield - On Quest 'Cycle of Rebirth' Finished - Run Script"), +(1186400,9,0,0,0,0,100,0,0,0,0,0,11,32618,0,0,0,0,0,1,0,0,0,0,0,0,0,"Tammra Windfield - On Script - Cast Cosmetic Nature Cast"), +(1186400,9,1,0,0,0,100,0,5000,5000,0,0,9,0,0,0,0,0,0,14,33531,177927,0,0,0,0,0,"Tammra Windfield - On Script - Activate Gameobject"), +(1186400,9,2,0,0,0,100,0,1000,1000,0,0,5,25,0,0,0,0,0,1,0,0,0,0,0,0,0,"Tammra Windfield - On Script - Play Emote 25"); + +UPDATE `gameobject_template_addon` SET `flags`=4 WHERE `entry`=177927; +UPDATE `gameobject` SET `spawntimesecs`=10 WHERE `guid`=33531; diff --git a/sql/updates/world/3.3.5/2017_11_17_09_world_335.sql b/sql/updates/world/3.3.5/2017_11_17_09_world_335.sql new file mode 100644 index 00000000000..30f3ecf90c7 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_17_09_world_335.sql @@ -0,0 +1,20 @@ +DELETE FROM `quest_greeting` WHERE `ID` IN (3390,3339,3995,4049,264,3519,392,900,2080,3337,3567,4791,5767,344,3847,11862); +INSERT INTO `quest_greeting` (`ID`, `Type`, `GreetEmoteType`, `GreetEmoteDelay`, `Greeting`, `VerifiedBuild`) VALUES +(3390,0,0,0,"The Barrens holds a variety of substances for which we, the apothecaries of Lordaeron may find use.",0), -- Apothecary Helbrim +(344,0,5,0,"Redridge is awash in chaos!",0), -- Magistrate Solomon +(3339,0,0,0,"This had better be good...",0), -- Captain Thalo'thas Brightsun +(3995,0,5,0,"The spirits are restless!",0), -- Witch Doctor Jin'Zil +(4049,0,0,0,"The spirit of Stonetalon weeps... It weeps from its mountain peaks, to its rivers, to its severed, dying trees.",0), -- Seereth Stonebreak +(264,0,0,0,"At ease, $c. If you are just passing though I suggest you stick to the roads and only travel by day. If your business is here in Darkshire, consider lending your abilities to the Night Watch. Our Skill is unquestionable but our numbers are small.",0), -- Commander Althea Ebonlocke +(3519,0,0,0,"I, Arynia Cloudsbreak, have been tasked with protecting the sanctity of the Oracle Grove.",0), -- Sentinel Arynia Cloudsbreak +(392,0,0,0,"Do not be alarmed, $r. I have long since passed from this land but I intend no harm to your kind. I have witnessed too much death in my time. My only wish now is for peace. Perhaps you can help my cause.",0), -- Captain Grayson +(900,0,6,0,"What business brings you before the Court of Lakeshire and the Honorable Magistrate Solomon?",0), -- Bailiff Conacher +(2080,0,1,0,"The creation of Teldrassil was a grand achievement, but now the world must shift to regain its balance.",0), -- Denalan +(3337,0,0,0,"The heft of an axe, the battlecry of your allies, the spray of blood in your face. These are the things a warrior craves, $n. I will carve out The Barrens with my sword in the name of the Horde.",0), -- Kargal Battlescar +(3567,0,1,0,"Well met, $n. It is good to see that $cs like yourself are taking an active part in protecting the groves.",0), -- Tallonkai Swiftroot +(4791,0,1,0,"We may not be in open war with the Alliance, but blood is still shed between us.",0), -- Nazeer Bloodpike +(5767,0,1,0,"Our only hope is to create something good from an already bad situation.",0), -- Nalpak +(3847,0,0,0,"Ashenvale is a lush forest, brimming with life. It is a pleasure to walk down its secret paths in search of herbs, but one must take care. The forest is not without its dangers.",0), -- Orendil Broadleaf +(11862,0,0,0,"",0); -- Tsunaman + +UPDATE `creature_template` SET `npcflag`=2 WHERE `entry`=234; diff --git a/sql/updates/world/3.3.5/2017_11_17_10_world_335.sql b/sql/updates/world/3.3.5/2017_11_17_10_world_335.sql new file mode 100644 index 00000000000..8a4a0db6af9 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_17_10_world_335.sql @@ -0,0 +1,108 @@ +-- Besseleth +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=11921; +DELETE FROM `smart_scripts` WHERE `entryorguid`=11921 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(11921,0,0,0,9,0,100,0,0,30,9000,12000,11,745,0,0,0,0,0,2,0,0,0,0,0,0,0,"Besseleth - Within 0-30 Range - Cast Web"), +(11921,0,1,0,0,0,100,0,6000,9000,12000,17000,11,5416,0,0,0,0,0,2,0,0,0,0,0,0,0,"Besseleth - In Combat - Cast Venom Sting"); + +-- Deepmoss Hatchling +DELETE FROM `smart_scripts` WHERE `entryorguid`=4263 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4263,0,0,1,2,0,30,1,0,30,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Deepmoss Hatchling - Between 0-30% Health - Say Line 0 (No Repeat)"), +(4263,0,1,0,61,0,100,1,0,0,0,0,11,6536,0,0,0,0,0,1,0,0,0,0,0,0,0,"Deepmoss Hatchling - Between 0-30% Health - Summon Deepmoss Matriarch (No Repeat)"), +(4263,0,2,0,54,0,100,0,0,0,0,0,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Deepmoss Hatchling - Just Summoned - Say Line 1"), +(4263,0,3,0,54,0,100,0,0,0,0,0,49,0,0,0,0,0,0,21,20,0,0,0,0,0,0,"Deepmoss Hatchling - Just Summoned - Start Attacking"); + +DELETE FROM `creature_text` WHERE `CreatureID`=4263; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(4263,0,0,"%s chitters for help...",16,0,100,0,0,0,1414,0,"Deepmoss Hatchling"), +(4263,1,0,"%s hatches!",16,0,100,0,0,0,1413,0,"Deepmoss Hatchling"); + +-- Deepmoss Matriarch +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=4264; +DELETE FROM `smart_scripts` WHERE `entryorguid`=4264 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4264,0,0,0,54,0,100,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Deepmoss Matriarch - Just Summoned - Say Line 0"), +(4264,0,1,0,54,0,100,0,0,0,0,0,49,0,0,0,0,0,0,21,20,0,0,0,0,0,0,"Deepmoss Matriarch - Just Summoned - Start Attacking"); + +DELETE FROM `creature_text` WHERE `CreatureID`=4264; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(4264,0,0,"%s arrives!",16,0,100,0,0,0,1415,0,"Deepmoss Matriarch"); + +-- Blackened Basilisk +DELETE FROM `smart_scripts` WHERE `entryorguid`=4044 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4044,0,0,0,0,0,100,0,7000,9000,22000,26000,11,3636,0,0,0,0,0,5,0,0,0,0,0,0,0,"Blackened Basilisk - In Combat - Cast Crystalline Slumber"); + +-- Scorched Basilisk +DELETE FROM `smart_scripts` WHERE `entryorguid`=4041 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4041,0,0,0,0,0,100,0,7000,9000,22000,26000,11,3636,0,0,0,0,0,5,0,0,0,0,0,0,0,"Scorched Basilisk - In Combat - Cast Crystalline Slumber"); + +-- Singed Basilisk +DELETE FROM `smart_scripts` WHERE `entryorguid`=4042 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4042,0,0,0,0,0,100,0,7000,9000,22000,26000,11,3636,0,0,0,0,0,5,0,0,0,0,0,0,0,"Singed Basilisk - In Combat - Cast Crystalline Slumber"); + +-- Gerenzo Wrenchwhistle +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=4202; +DELETE FROM `smart_scripts` WHERE `entryorguid`=4202 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4202,0,0,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Gerenzo Wrenchwhistle - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Goggeroc +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=11920; +DELETE FROM `smart_scripts` WHERE `entryorguid`=11920 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(11920,0,0,0,0,0,100,0,5000,7000,9000,13000,11,8646,0,0,0,0,0,2,0,0,0,0,0,0,0,"Goggeroc - In Combat - Cast Snap Kick"), +(11920,0,1,0,0,0,100,0,6000,9000,12000,16000,11,10966,0,0,0,0,0,2,0,0,0,0,0,0,0,"Goggeroc - In Combat - Cast Uppercut"), +(11920,0,2,0,11,0,100,0,0,0,0,0,11,10387,0,0,0,0,0,1,0,0,0,0,0,0,0,"Goggeroc - On Respawn - Cast Lightning Surge"); + +-- Furious Stone Spirit +DELETE FROM `smart_scripts` WHERE `entryorguid`=4035 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4035,0,0,0,25,0,100,0,0,0,0,0,11,7095,0,0,0,0,0,1,0,0,0,0,0,0,0,"Furious Stone Spirit - On Reset - Cast Knockdown Proc"); + +-- Bloodfury Harpy +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=4022; +DELETE FROM `smart_scripts` WHERE `entryorguid`=4022 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4022,0,0,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Bloodfury Harpy - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Bloodfury Roguefeather +DELETE FROM `smart_scripts` WHERE `entryorguid`=4023 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4023,0,0,0,25,0,100,0,0,0,0,0,11,8876,0,0,0,0,0,1,0,0,0,0,0,0,0,"Bloodfury Roguefeather - On Reset - Cast Thrash"), +(4023,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Bloodfury Roguefeather - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Bloodfury Slayer +DELETE FROM `smart_scripts` WHERE `entryorguid`=4024 AND `source_type`=0 AND `id` IN (1,2); +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4024,0,1,0,0,0,100,0,7000,16000,120000,130000,11,16231,0,0,0,0,0,2,0,0,0,0,0,0,0,"Bloodfury Slayer - In Combat - Cast Curse of Recklessness"), +(4024,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Bloodfury Slayer - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Bloodfury Ambusher +DELETE FROM `smart_scripts` WHERE `entryorguid`=4025 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4025,0,0,0,0,0,100,0,3000,5000,8000,12000,11,2608,0,0,0,0,0,2,0,0,0,0,0,0,0,"Bloodfury Ambusher - In Combat - Cast Shock"), +(4025,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Bloodfury Ambusher - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Bloodfury Windcaller +DELETE FROM `smart_scripts` WHERE `entryorguid`=4026 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4026,0,0,0,0,0,100,0,3000,5000,14000,20000,11,6728,0,0,0,0,0,5,0,0,0,0,0,0,0,"Bloodfury Windcaller - In Combat - Cast Enveloping Winds"), +(4026,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Bloodfury Windcaller - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Bloodfury Storm Witch +UPDATE `smart_scripts` SET `event_param3`=3400, `event_param4`=4800 WHERE `entryorguid`=4027 AND `source_type`=0 AND `id`=0; +DELETE FROM `smart_scripts` WHERE `entryorguid`=4027 AND `source_type`=0 AND `id`=2; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4027,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Bloodfury Storm Witch - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Bloodfury Ripper +DELETE FROM `smart_scripts` WHERE `entryorguid`=12579 AND `source_type`=0 AND `id`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(12579,0,0,0,9,0,100,0,0,5,21000,25000,11,13443,0,0,0,0,0,2,0,0,0,0,0,0,0,"Bloodfury Ripper - Within 0-5 Range - Cast Rend"); + +-- Fix faction for Brother Ravenoak +UPDATE `creature_template` SET `faction`=124 WHERE `entry`=5915; diff --git a/sql/updates/world/3.3.5/2017_11_17_11_world.sql b/sql/updates/world/3.3.5/2017_11_17_11_world.sql new file mode 100644 index 00000000000..bf7d8c3890d --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_17_11_world.sql @@ -0,0 +1,2 @@ +-- +DELETE FROM `gameobject_addon` WHERE `guid`=99909; diff --git a/sql/updates/world/3.3.5/2017_11_17_12_world.sql b/sql/updates/world/3.3.5/2017_11_17_12_world.sql new file mode 100644 index 00000000000..f38411e9886 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_17_12_world.sql @@ -0,0 +1,68 @@ +-- +-- duplicates removed +DELETE FROM `creature` WHERE `guid` IN (129439, 129433, 129438, 128479, 129826, 130415,128477,130416,128478,128476,129823, 130272, 130274, 130276,130252); +DELETE FROM `creature_addon` WHERE `guid` IN (129439, 129433, 129438, 128479, 129826, 130415,128477,130416,128478,128476,129823, 130272, 130274, 130276,130252); +-- Scourge Gryphon +UPDATE `creature` SET `phaseMask` = 229 WHERE `guid` IN (128509, 128510, 128500, 128501); +-- Master Siegesmith Corvus +UPDATE `creature` SET `phaseMask` = 197 WHERE `guid` = 128577; +-- Enslaved Laborer +UPDATE `creature` SET `phaseMask` = 197 WHERE `guid` = 128579; +-- Mindless Laborer +UPDATE `creature` SET `phaseMask` = 197 WHERE `guid` = 128580; +-- Risen Drudge +UPDATE `creature` SET `phaseMask` = 197 WHERE `guid` = 128465; +-- Alchemist Karloff +UPDATE `creature` SET `phaseMask` = 197 WHERE `guid` = 128456; +-- Gangrenus +UPDATE `creature` SET `phaseMask` = 197 WHERE `guid` = 128458; +-- Fester +UPDATE `creature` SET `phaseMask` = 197 WHERE `guid` = 128459; +-- Corpulous +UPDATE `creature` SET `phaseMask` = 197 WHERE `guid` = 128457; +-- Instructor Razuvious +UPDATE `creature` SET `phaseMask` = 69 WHERE `guid` = 129307; +-- Acherus Necromancer +UPDATE `creature` SET `phaseMask` = 69 WHERE `guid` IN (129388, 129389, 129390, 129391); +-- Death Knight Initiate +UPDATE `creature` SET `phaseMask` = 69 WHERE `id` IN (28390, 28391, 28392, 28393, 28394); +-- Amal'thazad +UPDATE `creature` SET `phaseMask` = 197 WHERE `guid` = 128507; +-- Coldwraith +UPDATE `creature` SET `phaseMask` = 69 WHERE `guid` IN (128522, 128523, 128525, 128526); +-- Disciple of Frost +UPDATE `creature` SET `phaseMask` = 197 WHERE `guid` IN (128543, 128542, 128541, 128540, 128539, 128538, 128535, 128536, 128537); +-- Lord Thorval +UPDATE `creature` SET `phaseMask` = 197 WHERE `guid` = 128506; +-- Disciple of Blood +UPDATE `creature` SET `phaseMask` = 197 WHERE `guid` IN (128527, 128528, 128529, 128530, 128531, 128532, 128533, 128534); +-- Lady Alistra +UPDATE `creature` SET `phaseMask` = 197 WHERE `guid` = 128505; +-- Disciple of the Unholy +UPDATE `creature` SET `phaseMask` = 197 WHERE `guid` IN (128545, 128546, 128547, 128548, 128549, 128550, 128551, 128552, 128553); +-- Teleport - Hall -> Heart +UPDATE `creature` SET `phaseMask` = 487 WHERE `guid` = 128753; +-- Teleport - Heart -> Hall +UPDATE `creature` SET `phaseMask` = 487 WHERE `guid` = 128754; +-- Initiate's Training Dummy +UPDATE `creature` SET `phaseMask` = 231 WHERE `guid` IN (130414,129825,129824,130417); +-- Scarlet Crusade Prisoner Argent Dawn Prisoner +UPDATE `creature` SET `phaseMask` = 197 WHERE `id` IN (28386,28385); +-- Gameobjects +-- duplicates removed +DELETE FROM `gameobject` WHERE `guid` IN (66143,66119,66117,66255,66432,66118,66116,66254,66431,66145,66147,66316,66146,65973,66436,66441,66438,66439,66458,66442,66120,66448,66137,66437,66454,66452,66136,66449,66138,66139,66451,66457,66450,66444,66130,66447,66133,66129,66455,66127,66453,66124,66140,66141,66445,66446,66456, 65865, 65866, 65867, 65883, 65885, 65887, 65917, 66029, 66031, 66033, 66038, 66123, 66234, 66236, 66237, 66238, 66239, 66240, 66241, 66242, 66245, 66246, 66304, 66305, 66310, 66311, 66312, 66313, 66315, 66317, 66319, 66321, 66323, 66325, 66326, 66329, 66332, 66335, 66338, 66340, 66346, 66348, 66351, 66358, 66387, 66391, 66395, 66399, 66420, 66422, 66471, 66472, 66473, 66480, 66481, 66482, 66483, 66485, 66488, 66490, 66492, 66494, 66497, 66498, 66501, 66504, 66507, 66510, 66512, 66513, 66514, 66515, 66517, 66522, 66611); +DELETE FROM `gameobject_addon` WHERE `guid` IN (66143,66119,66117,66255,66432,66118,66116,66254,66431,66145,66147,66316,66146,65973,66436,66441,66438,66439,66458,66442,66120,66448,66137,66437,66454,66452,66136,66449,66138,66139,66451,66457,66450,66444,66130,66447,66133,66129,66455,66127,66453,66124,66140,66141,66445,66446,66456, 65865, 65866, 65867, 65883, 65885, 65887, 65917, 66029, 66031, 66033, 66038, 66123, 66234, 66236, 66237, 66238, 66239, 66240, 66241, 66242, 66245, 66246, 66304, 66305, 66310, 66311, 66312, 66313, 66315, 66317, 66319, 66321, 66323, 66325, 66326, 66329, 66332, 66335, 66338, 66340, 66346, 66348, 66351, 66358, 66387, 66391, 66395, 66399, 66420, 66422, 66471, 66472, 66473, 66480, 66481, 66482, 66483, 66485, 66488, 66490, 66492, 66494, 66497, 66498, 66501, 66504, 66507, 66510, 66512, 66513, 66514, 66515, 66517, 66522, 66611); +-- Phase: 1+2 +UPDATE `gameobject` SET `phaseMask` = 3 WHERE `guid` IN (65985,66004,65990,66017,65995,65982,65991,66007,66036,65994,66034,65992,65988,65974,65993,65976,65978,65977,65975,66022,66013,66030,66019,66014,65996,66000,65999,66003,66009,65998,66001); +-- Phase: 1+4 +UPDATE `gameobject` SET `phaseMask` = 5 WHERE `guid` = 66069; +-- Phase: 1+2+32 +UPDATE `gameobject` SET `phaseMask` = 35 WHERE `guid` IN (66060,66066,66064,66068,66062,66070,66076,65912,66067,65932,65905,66065); +-- Phase: 4+64+128 +UPDATE `gameobject` SET `phaseMask` = 196 WHERE `guid` IN (66290,66276,66285,66282,66277,66280,66288,66278,66271,66275,66286,66284,66272,66273,66287,66289,66279,66270,66281,66274,66283,66262,66263,66267,66264,66265,66268,66375,66376,66259,66261,66260,66354,66295,66296,66294,66298,66355,66297,66300,66299,66293,66302,66301,66307,66306,66345,66347,66357); +-- Phase: 1+4+32+64+128 +UPDATE `gameobject` SET `phaseMask` = 229 WHERE `guid` IN (65902,65901); +-- Phase: 1+2+4+32+64+128 +UPDATE `gameobject` SET `phaseMask` = 231 WHERE `guid` IN (66040,66024,65970,65968,65981,66005,65984,65987,65972,65979,65969,66037,65967,66002,66020,66343,66341,66342,66027,66519,66303,65919,66010,65918,66015); + +UPDATE `gameobject` SET `phaseMask` = 192 WHERE `guid` IN (66620); diff --git a/sql/updates/world/3.3.5/2017_11_17_13_world.sql b/sql/updates/world/3.3.5/2017_11_17_13_world.sql new file mode 100644 index 00000000000..fee524841b2 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_17_13_world.sql @@ -0,0 +1,34 @@ +DELETE FROM `spell_script_names` WhERE `ScriptName` IN +('spell_midsummer_torch_target_picker', +'spell_midsummer_torch_toss_land', +'spell_midsummer_test_ribbon_pole_channel', +'spell_gen_ribbon_pole_dancer_check', +'spell_midsummer_ribbon_pole_periodic_visual'); +INSERT INTO `spell_script_names` (`spell_id`,`ScriptName`) VALUES +(45907,'spell_midsummer_torch_target_picker'), +(46054,'spell_midsummer_torch_toss_land'), +(29705,'spell_midsummer_test_ribbon_pole_channel'), +(29726,'spell_midsummer_test_ribbon_pole_channel'), +(29727,'spell_midsummer_test_ribbon_pole_channel'), +(45406,'spell_midsummer_ribbon_pole_periodic_visual'); + +UPDATE `spell_dbc` SET `Effect1`=3, `EffectImplicitTargetA1`=1 WHERE `Id`=58934; +UPDATE `creature_template` SET `AIName`='' WHERE `entry`=25535; +DELETE FROM `smart_scripts` WHERE `source_type`=0 AND `entryorguid`=25535; + +DELETE FROM `spell_linked_spell` WHERE `spell_trigger`=29726; +DELETE FROM `spell_linked_spell` WHERE `spell_trigger`=45723 AND `spell_effect`=43313; +DELETE FROM `disables` WHERE `sourceType`=0 AND `entry` IN(45724); +INSERT INTO `disables` (`sourceType`, `entry`, `flags`, `params_0`, `params_1`, `comment`) VALUES +(0,45724,64,0,0,'Disable LoS for spell Braziers Hit'); + +DELETE FROM `conditions` WHERE `SourceTypeOrReferenceId`=13 AND `SourceEntry` IN (45907,46054,45732,29705,29727); +INSERT INTO `conditions` (`SourceTypeOrReferenceId`,`SourceGroup`,`SourceEntry`,`SourceId`,`ElseGroup`,`ConditionTypeOrReference`,`ConditionTarget`,`ConditionValue1`,`ConditionValue2`,`ConditionValue3`,`NegativeCondition` ,`ErrorTextId`,`ScriptName`,`Comment`) VALUES +(13,1,45907,0,0,31,0,3,25535,0,0,0,'','Spell \'Torch Target Picker\' targets [DNT] Torch Tossing Target Bunny'), +(13,1,45907,0,0,1,0,45723,0,0,1,0,'','Spell \'Torch Target Picker\' can not hit targets with Aura \'Target Indicator (Rank 1)\''), +(13,1,45732,0,0,1,0,45723,0,0,0,0,'','Spell \'Torch Toss\' can only hit targets with Aura \'Target Indicator\''), +(13,1,46054,0,0,1,0,45723,0,0,0,0,'','Spell \'Torch Toss\' can only hit targets with Aura \'Target Indicator\''), +(13,1,46054,0,0,31,0,3,25535,0,0,0,'','Spell \'Torch Target Picker\' targets [DNT] Torch Tossing Target Bunny'), +(13,1,45732,0,0,31,0,3,25535,0,0,0,'','Spell \'Torch Target Picker\' targets [DNT] Torch Tossing Target Bunny'), +(13,1,29705,0,0,31,0,3,17066,0,0,0,'','Spell \'Test Ribbon Pole Channel\' targets Ribbon Pole Debug Target'), +(13,1,29727,0,0,31,0,3,17066,0,0,0,'','Spell \'Test Ribbon Pole Channel\' targets Ribbon Pole Debug Target'); diff --git a/sql/updates/world/3.3.5/2017_11_19_00_world_335.sql b/sql/updates/world/3.3.5/2017_11_19_00_world_335.sql new file mode 100644 index 00000000000..409f75a2f6b --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_19_00_world_335.sql @@ -0,0 +1,2 @@ +-- +UPDATE `smart_scripts` SET `action_param1`=7 WHERE `entryorguid`=344200 AND `source_type`=9 AND `id`=1; diff --git a/sql/updates/world/3.3.5/2017_11_19_01_world_335.sql b/sql/updates/world/3.3.5/2017_11_19_01_world_335.sql new file mode 100644 index 00000000000..0021dae798f --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_19_01_world_335.sql @@ -0,0 +1,24 @@ +-- +-- Hezrul Bloodmark +DELETE FROM `smart_scripts` WHERE `entryorguid`=9117 AND `source_type`=0 AND `id`>0; +DELETE FROM `smart_scripts` WHERE `entryorguid`=911701 AND `source_type`=9; +DELETE FROM `smart_scripts` WHERE `entryorguid`=911700 AND `source_type`=9 AND `id`=6; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(9117,0,1,0,25,0,100,0,0,0,0,0,22,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"J.D. Collie - on reset - set event phase 1"), +(9117,0,2,0,19,0,100,0,4321,0,0,0,22,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"J.D. Collie - on reset - set event phase 0"), +(9117,0,3,0,1,1,100,0,5000,10000,90000,120000,80,911701,0,0,0,0,0,1,0,0,0,0,0,0,0,"J.D. Collie - Out of Combat - Action list"), +(911701,9,0,0,0,1,100,0,1000,1000,0,0,1,3,0,0,0,0,0,1,0,0,0,0,0,0,0,"J.D. Collie - Action list - Say Line"), +(911701,9,1,0,0,1,100,0,5000,5000,0,0,1,4,0,0,0,0,0,1,0,0,0,0,0,0,0,"J.D. Collie - Action list - Say Line"), +(911701,9,2,0,0,1,100,0,0,0,0,0,11,32990,0,0,0,0,0,1,0,0,0,0,0,0,0,"J.D. Collie - Action list - Say Line"), +(911701,9,3,0,0,1,100,0,4000,4000,0,0,1,5,0,0,0,0,0,1,0,0,0,0,0,0,0,"J.D. Collie - Action list - Say Line"), +(911701,9,4,0,0,1,100,0,4000,4000,0,0,1,6,0,0,0,0,0,1,0,0,0,0,0,0,0,"J.D. Collie - Action list - Say Line"), +(911701,9,5,0,0,1,100,0,4000,4000,0,0,1,7,0,0,0,0,0,1,0,0,0,0,0,0,0,"J.D. Collie - Action list - Say Line"), +(911700,9,6,0,0,0,100,0,0,0,0,0,22,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"J.D. Collie - Action list - Set eventphase 1"); + +DELETE FROM `creature_text` WHERE `CreatureID`=9117 AND `GroupID`>2; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(9117,3,0,"Ahh... My experiments never seem to work out right. Maybe I should go back downstream.",12,0,100,0,0,0,4843,0,"J.D. Collie"), +(9117,4,0,"%s begins to combine different colored crystals together.",16,0,100,0,0,0,4846,0,"J.D. Collie"), +(9117,5,0,"I think...I think it's working!",12,0,100,0,0,0,4847,0,"J.D. Collie"), +(9117,6,0,"OW! That's not right!",12,0,100,0,0,0,4852,0,"J.D. Collie"), +(9117,7,0,"Oh well... Maybe next time.",12,0,100,0,0,0,4853,0,"J.D. Collie"); diff --git a/sql/updates/world/3.3.5/2017_11_19_02_world_335.sql b/sql/updates/world/3.3.5/2017_11_19_02_world_335.sql new file mode 100644 index 00000000000..2c5ad0b8efc --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_19_02_world_335.sql @@ -0,0 +1,57 @@ +-- +SET @GUID := 40489; +DELETE FROM `creature` WHERE `guid` BETWEEN @GUID+0 AND @GUID+2; +INSERT INTO `creature` (`guid`, `id`, `map`, `spawnMask`, `phaseMask`, `modelid`, `equipment_id`, `position_x`, `position_y`, `position_z`, `orientation`, `spawntimesecs`, `spawndist`, `curhealth`, `curmana`, `MovementType`) VALUES +(@GUID+0,23090,1,1,1,0,1,1708.89,-4407.32,40.1085,2.35614,300,0,104790,0,2), +(@GUID+1,23090,1,1,1,0,1,1685.87,-4437.94,36.0057,2.50928,300,0,104790,0,2), +(@GUID+2,23090,1,1,1,0,1,1651.35,-4448.69,38.0034,1.59431,300,0,104790,0,2); + +DELETE FROM `creature_addon` WHERE `guid` BETWEEN @GUID+0 AND @GUID+2; +INSERT INTO `creature_addon` (`guid`, `path_id`, `mount`, `bytes1`, `bytes2`, `emote`, `auras`) VALUES +(@GUID+0,(@GUID+0)*10,0,0,1,0,'18950 32199'), +(@GUID+1,(@GUID+1)*10,0,0,1,0,'18950 32199'), +(@GUID+2,(@GUID+2)*10,0,0,1,0,'18950 32199'); + +DELETE FROM `waypoint_data` WHERE `id` BETWEEN (@GUID+0)*10 AND (@GUID+2)*10; +INSERT INTO `waypoint_data` (`id`, `point`, `position_x`, `position_y`, `position_z`, `move_type`, `action_chance`) VALUES +((@GUID+0)*10,1,1708.89,-4407.32,40.1085,0,100), +((@GUID+0)*10,2,1699.21,-4398.37,40.5289,0,100), +((@GUID+0)*10,3,1694.41,-4397.78,40.6138,0,100), +((@GUID+0)*10,4,1682.94,-4404.49,40.6853,0,100), +((@GUID+0)*10,5,1680.72,-4409.19,40.8754,0,100), +((@GUID+0)*10,6,1682.71,-4422.11,40.4622,0,100), +((@GUID+0)*10,7,1686.17,-4425.78,40.6093,0,100), +((@GUID+0)*10,8,1698.87,-4427.43,40.7538,0,100), +((@GUID+0)*10,9,1686.17,-4425.78,40.6093,0,100), +((@GUID+0)*10,10,1682.71,-4422.11,40.4622,0,100), +((@GUID+0)*10,11,1680.72,-4409.19,40.8754,0,100), +((@GUID+0)*10,12,1682.94,-4404.49,40.6853,0,100), +((@GUID+0)*10,13,1694.41,-4397.78,40.6138,0,100), +((@GUID+0)*10,14,1699.21,-4398.37,40.5289,0,100), +((@GUID+0)*10,15,1708.89,-4407.32,40.1085,0,100), +((@GUID+1)*10,1,1685.87,-4437.94,36.0057,0,100), +((@GUID+1)*10,2,1681.13,-4434.01,35.3238,0,100), +((@GUID+1)*10,3,1673.04,-4431.93,34.8667,0,100), +((@GUID+1)*10,4,1665.67,-4435.66,34.8775,0,100), +((@GUID+1)*10,5,1661.73,-4441.26,34.9755,0,100), +((@GUID+1)*10,6,1661.59,-4447.54,35.0587,0,100), +((@GUID+1)*10,7,1661.73,-4441.26,34.9755,0,100), +((@GUID+1)*10,8,1665.67,-4435.66,34.8775,0,100), +((@GUID+1)*10,9,1673.04,-4431.93,34.8667,0,100), +((@GUID+1)*10,10,1681.13,-4434.01,35.3238,0,100), +((@GUID+1)*10,11,1685.87,-4437.94,36.0057,0,100), +((@GUID+2)*10,1,1651.35,-4448.69,38.0034,0,100), +((@GUID+2)*10,2,1651.75,-4437.17,38.0053,0,100), +((@GUID+2)*10,3,1648.66,-4432.08,37.8909,0,100), +((@GUID+2)*10,4,1633.78,-4428.19,38.146,0,100), +((@GUID+2)*10,5,1628.51,-4430.07,38.1318,0,100), +((@GUID+2)*10,6,1620.25,-4441.33,37.8515,0,100), +((@GUID+2)*10,7,1620.04,-4447.66,37.9152,0,100), +((@GUID+2)*10,8,1628.39,-4458.96,38.0537,0,100), +((@GUID+2)*10,9,1620.04,-4447.66,37.9152,0,100), +((@GUID+2)*10,10,1620.25,-4441.33,37.8515,0,100), +((@GUID+2)*10,11,1628.51,-4430.07,38.1318,0,100), +((@GUID+2)*10,12,1633.78,-4428.19,38.146,0,100), +((@GUID+2)*10,13,1648.66,-4432.08,37.8909,0,100), +((@GUID+2)*10,14,1651.75,-4437.17,38.0053,0,100), +((@GUID+2)*10,15,1651.35,-4448.69,38.0034,0,100); diff --git a/sql/updates/world/3.3.5/2017_11_20_00_world.sql b/sql/updates/world/3.3.5/2017_11_20_00_world.sql new file mode 100644 index 00000000000..222d4eff314 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_20_00_world.sql @@ -0,0 +1,2 @@ +-- +UPDATE `creature_addon` SET `auras`="17467", `mount`=0 WHERE `guid`=130895; diff --git a/sql/updates/world/3.3.5/2017_11_20_01_world.sql b/sql/updates/world/3.3.5/2017_11_20_01_world.sql new file mode 100644 index 00000000000..77180a4b3f7 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_20_01_world.sql @@ -0,0 +1,51 @@ +-- +UPDATE `creature_template` SET `ScriptName` = '', `AIName` = 'SmartAI' WHERE `entry` IN (22095, 22307, 18588); +UPDATE `creature_template` SET `ScriptName` = '' WHERE `entry` = 19679; +UPDATE `gameobject_template` SET `ScriptName` = '', `AIName` = 'SmartGameObjectAI' WHERE `entry` = 185913; + +DELETE FROM `gossip_menu` WHERE `MenuID` = 8021; +INSERT INTO `gossip_menu` (`MenuID`, `TextID`, `VerifiedBuild`) VALUES +(8021, 9895, 0), +(8021, 9896, 0); + +UPDATE `gossip_menu_option` SET `ActionMenuID` = 7731 WHERE `MenuID` = 7732 AND `OptionID` = 0; + +DELETE FROM `smart_scripts` WHERE (`source_type` = 0 AND `entryorguid` IN (22095, 22307, 18588)) OR (`source_type` = 1 AND `entryorguid` = 185913); +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(22095, 0, 0, 0, 6, 0, 75, 0, 0, 0, 0, 0, 11, 39130, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 'Infested Root-Walker - On Just Died - Cast \'Summon Wood Mites\''), +(22095, 0, 1, 0, 0, 0, 100, 0, 15000, 20000, 32000, 38000, 11, 39000, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 'Infested Root-Walker - In Combat - Cast \'Regrowth\' (No Repeat)'), +(22307, 0, 0, 0, 6, 0, 75, 0, 0, 0, 0, 0, 11, 39134, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 'Rotting Forest-Rager - On Just Died - Cast \'Summon Lots Of Wood Mites\''), +(22307, 0, 1, 0, 0, 0, 100, 0, 0, 5, 12000, 18000, 11, 15548, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 'Rotting Forest-Rager - In Combat - Cast \'Thunderclap\' (No Repeat)'), +(18588, 0, 0, 0, 0, 0, 100, 0, 2000, 2000, 30000, 30000, 11, 6726, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 'Floon - In Combat - Cast \'Silence\''), +(18588, 0, 1, 0, 0, 0, 100, 0, 4000, 4000, 5000, 5000, 11, 9672, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 'Floon - In Combat - Cast \'Frostbolt\''), +(18588, 0, 2, 0, 0, 0, 100, 0, 9000, 9000, 20000, 20000, 11, 11831, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 'Floon - In Combat - Cast \'Frost Nova\''), +(18588, 0, 3, 4, 62, 0, 100, 0, 7731, 0, 0, 0, 72, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 'Floon - On Gossip Option 0 Selected - Close Gossip'), +(18588, 0, 4, 5, 61, 0, 100, 0, 0, 0, 0, 0, 2, 1738, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 'Floon - On Gossip Option 0 Selected - Set Faction Arrakoa'), +(18588, 0, 5, 6, 61, 0, 100, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 'Floon - On Gossip Option 0 Selected - Say Line 0'), +(18588, 0, 6, 0, 61, 0, 100, 0, 0, 0, 0, 0, 49, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 'Floon - On Gossip Option 0 Selected - Start Attacking'), +(18588, 0, 7, 0, 25, 0, 100, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 'Floon - On Reset - Set Default Faction'), +(185913, 1, 0, 4, 62, 0, 100, 0, 8660, 0, 0, 0, 85, 40632, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 'Skull Pile - On Gossip Option 0 Selected - Invoker Cast \'Summon Gezzarak the Huntress\''), +(185913, 1, 1, 4, 62, 0, 100, 0, 8660, 1, 0, 0, 85, 40642, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 'Skull Pile - On Gossip Option 1 Selected - Invoker Cast \'Summon Darkscreecher Akkarai\''), +(185913, 1, 2, 4, 62, 0, 100, 0, 8660, 2, 0, 0, 85, 40640, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 'Skull Pile - On Gossip Option 2 Selected - Invoker Cast \'Summon Karrog\''), +(185913, 1, 3, 4, 62, 0, 100, 0, 8660, 3, 0, 0, 85, 40644, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 'Skull Pile - On Gossip Option 3 Selected - Invoker Cast \'Summon Vakkiz the Windrager\''), +(185913, 1, 4, 0, 61, 0, 100, 0, 0, 0, 0, 0, 72, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 'Skull Pile - On Gossip Option Selected - Close Gossip'); + +DELETE FROM `conditions` WHERE `SourceTypeOrReferenceId` = 15 AND `SourceGroup` IN (7732, 8660); +DELETE FROM `conditions` WHERE `SourceTypeOrReferenceId` IN (14, 15) AND `SourceGroup` = 8021; +DELETE FROM `conditions` WHERE `SourceTypeOrReferenceId` = 22 AND `SourceGroup` = 1 AND `SourceEntry` IN (22095, 22307) AND `SourceId` = 0; + +INSERT INTO `conditions` (`SourceTypeOrReferenceId`, `SourceGroup`, `SourceEntry`, `SourceId`, `ElseGroup`, `ConditionTypeOrReference`, `ConditionTarget`, `ConditionValue1`, `ConditionValue2`, `ConditionValue3`, `NegativeCondition`, `ErrorType`, `ErrorTextId`, `ScriptName`, `Comment`) VALUES +(15, 8660, 0, 0, 0, 9, 0, 11885, 0, 0, 0, 0, 0, '', 'Show gossip menu 8660 option id 0 if quest Adversarial Blood has been taken.'), +(15, 8660, 1, 0, 0, 9, 0, 11885, 0, 0, 0, 0, 0, '', 'Show gossip menu 8660 option id 1 if quest Adversarial Blood has been taken.'), +(15, 8660, 2, 0, 0, 9, 0, 11885, 0, 0, 0, 0, 0, '', 'Show gossip menu 8660 option id 2 if quest Adversarial Blood has been taken.'), +(15, 8660, 3, 0, 0, 9, 0, 11885, 0, 0, 0, 0, 0, '', 'Show gossip menu 8660 option id 3 if quest Adversarial Blood has been taken.'), +(15, 8660, 0, 0, 1, 8, 0, 11885, 0, 0, 0, 0, 0, '', 'Show gossip menu 8660 option id 0 if quest Adversarial Blood has been rewarded.'), +(15, 8660, 1, 0, 1, 8, 0, 11885, 0, 0, 0, 0, 0, '', 'Show gossip menu 8660 option id 1 if quest Adversarial Blood has been rewarded.'), +(15, 8660, 2, 0, 1, 8, 0, 11885, 0, 0, 0, 0, 0, '', 'Show gossip menu 8660 option id 2 if quest Adversarial Blood has been rewarded.'), +(15, 8660, 3, 0, 1, 8, 0, 11885, 0, 0, 0, 0, 0, '', 'Show gossip menu 8660 option id 3 if quest Adversarial Blood has been rewarded.'), +(14, 8021, 9896, 0, 0, 5, 0, 933, 240, 0, 0, 0, 0, '', 'Show gossip menu 8021 text id 9896 if player is at least Friendly with The Consortium.'), +(14, 8021, 9895, 0, 0, 5, 0, 933, 240, 0, 1, 0, 0, '', 'Show gossip menu 8021 text id 9895 if player is not at least Friendly with The Consortium.'), +(15, 8021, 0, 0, 0, 5, 0, 933, 240, 0, 0, 0, 0, '', 'Show gossip menu 8021 option id 0 if player is at least Friendly with The Consortium.'), +(15, 7732, 0, 0, 0, 9, 0, 10009, 0, 0, 0, 0, 0, '', 'Show gossip menu 7732 option id 0 if quest Crackin\' Some Skulls has been taken.'), +(22, 1, 22095, 0, 0, 32, 0, 16, 0, 0, 0, 0, 0, '', ' Id 0 of Creature SAI for Infested Root-Walker will execute if invoker is player.'), +(22, 1, 22307, 0, 0, 32, 0, 16, 0, 0, 0, 0, 0, '', ' Id 0 of Creature SAI for Rotting Forest-Rager will execute if invoker is player.'); diff --git a/sql/updates/world/3.3.5/2017_11_21_00_world_335.sql b/sql/updates/world/3.3.5/2017_11_21_00_world_335.sql new file mode 100644 index 00000000000..0da487ab6c1 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_21_00_world_335.sql @@ -0,0 +1,17 @@ +-- +-- Valusha Gossip Text +DELETE FROM `gossip_menu` WHERE `MenuID`=7408 AND `TextID` IN (8887,8888); +INSERT INTO `gossip_menu` (`MenuID`, `TextID`, `VerifiedBuild`) VALUES +(7408,8887,0), +(7408,8888,0); + +DELETE FROM `npc_text` WHERE `ID`=8888; +INSERT INTO `npc_text` (`ID`, `text0_0`, `text0_1`, `BroadcastTextID0`, `lang0`, `Probability0`, `em0_1`) VALUES +(8888,"You honor us with your presence, $n.","",13803,0,1,1); + +DELETE FROM `conditions` WHERE `SourceTypeOrReferenceId`=14 AND `SourceGroup`=7408; +INSERT INTO `conditions` (`SourceTypeOrReferenceId`, `SourceGroup`, `SourceEntry`, `SourceId`, `ElseGroup`, `ConditionTypeOrReference`, `ConditionTarget`, `ConditionValue1`, `ConditionValue2`, `ConditionValue3`, `NegativeCondition`, `ErrorType`, `ErrorTextId`, `ScriptName`, `Comment`) VALUES +(14,7408,8887,0,0,8,0,9534,0,0,0,0,0,"","Show gossip text 8887 if quest 'Destroy the Legion' is rewarded"), +(14,7408,8887,0,0,8,0,9536,0,0,1,0,0,"","Show gossip text 8887 if quest 'Never Again!' is NOT rewarded"), +(14,7408,8888,0,0,8,0,9534,0,0,0,0,0,"","Show gossip text 8888 if quest 'Destroy the Legion' is rewarded"), +(14,7408,8888,0,0,8,0,9536,0,0,0,0,0,"","Show gossip text 8888 if quest 'Never Again!' is rewarded"); diff --git a/sql/updates/world/3.3.5/2017_11_21_01_world_335.sql b/sql/updates/world/3.3.5/2017_11_21_01_world_335.sql new file mode 100644 index 00000000000..8acc13a0a0a --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_21_01_world_335.sql @@ -0,0 +1,4 @@ +-- +DELETE FROM `quest_template_addon` WHERE `ID`=4770; +INSERT INTO `quest_template_addon` (`ID`, `MaxLevel`, `AllowableClasses`, `SourceSpellID`, `PrevQuestID`, `NextQuestID`, `ExclusiveGroup`, `RewardMailTemplateID`, `RewardMailDelay`, `RequiredSkillID`, `RequiredSkillPoints`, `RequiredMinRepFaction`, `RequiredMaxRepFaction`, `RequiredMinRepValue`, `RequiredMaxRepValue`, `ProvidedItemCount`, `SpecialFlags`) VALUES +(4770,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2); diff --git a/sql/updates/world/3.3.5/2017_11_21_02_world_335.sql b/sql/updates/world/3.3.5/2017_11_21_02_world_335.sql new file mode 100644 index 00000000000..6065aaaf7e7 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_21_02_world_335.sql @@ -0,0 +1,10 @@ +-- +-- Kirge Sternhorn +DELETE FROM `creature_text` WHERE `CreatureID`=3418; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(3418,0,0,"To reach The Crossroads, $n, follow the road east then north.",12,1,100,1,0,0,1364,0,"Kirge Sternhorn"); + +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3418; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3418 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3418,0,0,0,19,0,100,0,854,0,0,0,1,0,0,0,0,0,0,7,0,0,0,0,0,0,0,"Kirge Sternhorn - On Quest 'Journey to the Crossroads' Taken - Say Line 0"); diff --git a/sql/updates/world/3.3.5/2017_11_21_03_world_335.sql b/sql/updates/world/3.3.5/2017_11_21_03_world_335.sql new file mode 100644 index 00000000000..cff544f8310 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_21_03_world_335.sql @@ -0,0 +1,5 @@ +-- +-- Magatha Grimtotem +DELETE FROM `smart_scripts` WHERE `entryorguid`=4046 AND `source_type`=0 AND `id`=2; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4046,0,2,0,20,0,100,0,5062,0,0,0,11,60888,0,0,0,0,0,1,0,0,0,0,0,0,0,"Magatha Grimtotem - On Quest 'Sacred Fire' Finished - Cast 'Cosmetic Enchant Cast'"); diff --git a/sql/updates/world/3.3.5/2017_11_21_04_world_335.sql b/sql/updates/world/3.3.5/2017_11_21_04_world_335.sql new file mode 100644 index 00000000000..5bd309edac8 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_21_04_world_335.sql @@ -0,0 +1,19 @@ +-- +-- Pathing for Motega Firemane +UPDATE `creature` SET `MovementType`=2 WHERE `guid`=21693; +DELETE FROM `creature_addon` WHERE `guid`=21693; +INSERT INTO `creature_addon` (`guid`, `path_id`, `mount`, `bytes1`, `bytes2`, `emote`, `auras`) VALUES +(21693,216930,0,0,1,0,""); + +DELETE FROM `waypoint_data` WHERE `id`=216930; +INSERT INTO `waypoint_data` (`id`, `point`, `position_x`, `position_y`, `position_z`, `orientation`, `delay`, `move_type`, `action`, `action_chance`, `wpguid`) VALUES +(216930,1,-4915.57,-1381.18,-52.6008,5.14872,60000,0,216930,100,0), +(216930,2,-4913.27,-1379.59,-52.6119,0,0,0,0,100,0), +(216930,3,-4911.95,-1376.93,-52.6119,0,0,0,0,100,0), +(216930,4,-4913.82,-1376.04,-52.6119,0,15000,0,0,100,0), +(216930,5,-4915.75,-1378.4,-52.6119,0,0,0,0,100,0), +(216930,6,-4918.77,-1378.01,-52.6119,0,0,0,0,100,0); + +DELETE FROM `waypoint_scripts` WHERE `id`=216930; +INSERT INTO `waypoint_scripts` (`id`, `delay`, `command`, `datalong`, `datalong2`, `dataint`, `x`, `y`, `z`, `o`, `guid`) VALUES +(216930,1,1,23,0,0,0,0,0,0,953); diff --git a/sql/updates/world/3.3.5/2017_11_21_05_world_335.sql b/sql/updates/world/3.3.5/2017_11_21_05_world_335.sql new file mode 100644 index 00000000000..2c50d89f157 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_21_05_world_335.sql @@ -0,0 +1,12 @@ +-- +-- Arikara +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=10882; +DELETE FROM `smart_scripts` WHERE `entryorguid`=10882 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(10882,0,0,0,0,0,100,0,8000,16000,12000,24000,11,17213,32,0,0,0,0,5,0,0,0,0,0,0,0,"Arikara - In Combat - Cast Curse of Vengeance"), +(10882,0,1,0,2,0,100,1,0,30,0,0,11,8599,0,0,0,0,0,1,0,0,0,0,0,0,0,"Arikara - Between 0-30% Health - Cast Enrage (No Repeat)"), +(10882,0,2,0,2,0,100,1,0,30,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Arikara - Between 0-30% Health - Say Line 0 (No Repeat)"); + +DELETE FROM `creature_text` WHERE `CreatureID`=10882; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(10882,0,0,"%s becomes enraged!",16,0,100,0,0,0,10677,0,"Arikara"); diff --git a/sql/updates/world/3.3.5/2017_11_21_06_world_335.sql b/sql/updates/world/3.3.5/2017_11_21_06_world_335.sql new file mode 100644 index 00000000000..437fccf9cab --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_21_06_world_335.sql @@ -0,0 +1,34 @@ +-- +-- Feegly the Exiled +DELETE FROM `creature_text` WHERE `CreatureID`=3421; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(3421,0,0,"The power of the Tear of the Moons is mine! Mine I say!",12,0,100,5,0,0,1026,0,"Feegly the Exiled"), +(3421,1,0,"%s begins to rub the Tear of the Moons.",16,0,100,0,0,0,1027,0,"Feegly the Exiled"), +(3421,2,0,"Power! Glorious power!",12,0,100,0,0,0,1028,0,"Feegly the Exiled"), +(3421,3,0,"%s begins to make strange grunting noises. The Tear of the Moons drops to the ground and shatters.",16,0,100,0,0,0,1029,0,"Feegly the Exiled"); + +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=3421; +DELETE FROM `smart_scripts` WHERE `entryorguid`=3421 AND `source_type`=0; +DELETE FROM `smart_scripts` WHERE `entryorguid`=342100 AND `source_type`=9; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(3421,0,0,0,20,0,100,0,857,0,0,0,80,342100,0,0,0,0,0,1,0,0,0,0,0,0,0,"Feegly the Exiled - On Quest 'The Tear of the Moons' Finished - Run Script"), +(342100,9,0,0,0,0,100,0,1000,1000,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Feegly the Exiled - On Script - Say Line 0"), +(342100,9,1,0,0,0,100,0,4000,4000,0,0,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Feegly the Exiled - On Script - Say Line 1"), +(342100,9,2,0,0,0,100,0,2000,2000,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Feegly the Exiled - On Script - Say Line 2"), +(342100,9,3,0,0,0,100,0,4000,4000,0,0,1,3,0,0,0,0,0,1,0,0,0,0,0,0,0,"Feegly the Exiled - On Script - Say Line 3"), +(342100,9,4,0,0,0,100,0,2000,2000,0,0,5,15,0,0,0,0,0,1,0,0,0,0,0,0,0,"Feegly the Exiled - On Script - Play Emote 15"), +(342100,9,5,0,0,0,100,0,2000,2000,0,0,11,5142,0,0,0,0,0,1,0,0,0,0,0,0,0,"Feegly the Exiled - On Script - Cast Trogg Form"), +(342100,9,6,0,0,0,100,0,0,0,0,0,89,5,0,0,0,0,0,1,0,0,0,0,0,0,0,"Feegly the Exiled - On Script - Start Random Movement"), +(342100,9,7,0,0,0,100,0,10000,10000,0,0,37,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Feegly the Exiled - On Script - Kill Self"); + +-- Pathing for Feegly the Exiled +UPDATE `creature` SET `position_x`=-4217.83, `position_y`=-2341.47, `position_z`=91.7458, `spawndist`=0, `MovementType`=2 WHERE `guid`=14138; + +DELETE FROM `creature_addon` WHERE `guid`=14138; +INSERT INTO `creature_addon` (`guid`, `path_id`, `mount`, `bytes1`, `bytes2`, `emote`, `auras`) VALUES +(14138,141380,0,0,1,0,""); + +DELETE FROM `waypoint_data` WHERE `id`=141380; +INSERT INTO `waypoint_data` (`id`, `point`, `position_x`, `position_y`, `position_z`, `orientation`, `delay`, `move_type`, `action`, `action_chance`, `wpguid`) VALUES +(141380,1,-4217.83,-2341.47,91.7458,2.02701,60000,0,0,100,0), +(141380,2,-4219.46,-2336.15,91.8028,2.02701,60000,0,0,100,0); diff --git a/sql/updates/world/3.3.5/2017_11_21_07_world_335.sql b/sql/updates/world/3.3.5/2017_11_21_07_world_335.sql new file mode 100644 index 00000000000..86bce47eb5e --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_21_07_world_335.sql @@ -0,0 +1,252 @@ +-- +-- Quest "Blood Feeders" +DELETE FROM `quest_details` WHERE `ID`=6461; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(6461,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=6 WHERE `ID`=6461; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=6461; + +-- Quest "Jin'Zil's Forest Magic" +DELETE FROM `quest_details` WHERE `ID`=1058; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1058,4,6,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=11, `EmoteOnComplete`=11 WHERE `ID`=1058; + +-- Quest "Report to Kadrak" +DELETE FROM `quest_details` WHERE `ID`=6542; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(6542,1,1,0,0,0,0,0,0,0); +UPDATE `quest_offer_reward` SET `Emote1`=3 WHERE `ID`=6542; + +-- Quest "Boulderslide Ravine" +DELETE FROM `quest_details` WHERE `ID`=6421; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(6421,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=1 WHERE `ID`=6421; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=6421; + +-- Quest "Earthen Arise" +DELETE FROM `quest_details` WHERE `ID`=6481; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(6481,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=1 WHERE `ID`=6481; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=6481; + +-- Quest "Cenarius' Legacy" +DELETE FROM `quest_details` WHERE `ID`=1087; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1087,1,1,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=1087; + +-- Quest "Ordanus" +DELETE FROM `quest_details` WHERE `ID`=1088; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1088,5,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=1088; + +-- Quest "The Den" +DELETE FROM `quest_details` WHERE `ID`=1089; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(1089,1,0,0,0,0,0,0,0,0); + +-- Quest "Cycle of Rebirth" +DELETE FROM `quest_details` WHERE `ID`=6301; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(6301,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=1 WHERE `ID`=6301; + +-- Quest "New Life" +DELETE FROM `quest_details` WHERE `ID`=6381; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(6381,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=1 WHERE `ID`=6381; + +-- Quest "Harpies Threaten" +DELETE FROM `quest_details` WHERE `ID`=6282; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(6282,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=1 WHERE `ID`=6282; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=6282; + +-- Quest "Bloodfury Bloodline" +DELETE FROM `quest_details` WHERE `ID`=6283; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(6283,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=1 WHERE `ID`=6283; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=6283; + +-- Quest "Calling in the Reserves" +DELETE FROM `quest_details` WHERE `ID`=5881; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(5881,1,0,0,0,0,0,0,0,0); + +-- Quest "Arachnophobia" +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=1 WHERE `ID`=6284; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=6284; + +-- Quest "Super Reaper 6000" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=1093; + +-- Quest "Further Instructions" +UPDATE `quest_request_items` SET `EmoteOnComplete`=3 WHERE `ID`=1094; + +-- Quest "Further Instructions (Part 2)" +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=1095; + +-- Quest "Gerenzo Wrenchwhistle" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=1096; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=1096; + +-- Quest "Gerenzo's Orders" +UPDATE `quest_offer_reward` SET `Emote1`=2 WHERE `ID`=1090; + +-- Quest "Gerenzo's Orders (Part 2)" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=1092; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=1092; + +-- Quest "Trouble in the Deeps" +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=6562; + +-- Quest "Warsong Scout Update" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=6547; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=6547; + +-- Quest "Warsong Runner Update" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=6545; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=6545; + +-- Quest "Warsong Outrider Update" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=6546; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=6546; + +-- Quest "Ashenvale Outrunners" +DELETE FROM `quest_details` WHERE `ID`=6503; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(6503,5,0,0,0,0,0,0,0,0); +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=6503; + +-- Quest "Satyr Horns" +UPDATE `quest_request_items` SET `EmoteOnComplete`=0 WHERE `ID`=6441; + +-- Quest "The Ashenvale Hunt" +DELETE FROM `quest_details` WHERE `ID`=6382; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(6382,1,0,0,0,0,0,0,0,0); +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=6382; + +-- Quest "The Ashenvale Hunt" +DELETE FROM `quest_details` WHERE `ID`=235; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(235,22,1,0,0,0,0,0,0,0); +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=235; + +-- Quest "The Ashenvale Hunt" +DELETE FROM `quest_details` WHERE `ID`=742; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(742,22,1,0,0,0,0,0,0,0); +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=742; + +-- Quest "The Ashenvale Hunt (Part 2)" +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=6383; + +-- Quest "Shadumbra's Head" +UPDATE `quest_request_items` SET `EmoteOnComplete`=6 WHERE `ID`=24; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=24; + +-- Quest "Ursangous's Paw" +UPDATE `quest_request_items` SET `EmoteOnComplete`=6 WHERE `ID`=23; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=23; + +-- Quest "Stonetalon Standstill" +DELETE FROM `quest_details` WHERE `ID`=25; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(25,6,1,0,0,0,0,0,0,0); +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=25; + +-- Quest "The Befouled Element" +UPDATE `quest_request_items` SET `EmoteOnComplete`=6 WHERE `ID`=1918; +UPDATE `quest_offer_reward` SET `Emote1`=6, `Emote2`=1 WHERE `ID`=1918; + +-- Quest "Je'neu of the Earthen Ring" +DELETE FROM `quest_details` WHERE `ID`=824; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(824,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnComplete`=1 WHERE `ID`=824; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=824; + +-- Quest "Warsong Supplies" +DELETE FROM `quest_details` WHERE `ID`=6571; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(6571,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=1 WHERE `ID`=6571; +UPDATE `quest_offer_reward` SET `Emote1`=4 WHERE `ID`=6571; + +-- Quest "Naga at the Zoram Strand" +UPDATE `quest_details` SET `Emote1`=1 WHERE `ID`=6442; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=6442; + +-- Quest "Vorsha the Lasher" +UPDATE `quest_details` SET `Emote1`=1 WHERE `ID`=6641; +UPDATE `quest_offer_reward` SET `Emote1`=1, `Emote2`=2 WHERE `ID`=6641; + +-- Quest "Between a Rock and a Thistlefur" +UPDATE `quest_details` SET `Emote1`=5 WHERE `ID`=216; +UPDATE `quest_offer_reward` SET `Emote1`=4 WHERE `ID`=216; + +-- Quest "King of the Foulweald" +DELETE FROM `quest_details` WHERE `ID`=6621; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(6621,1,1,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=6, `EmoteOnComplete`=6 WHERE `ID`=6621; +UPDATE `quest_offer_reward` SET `Emote1`=4 WHERE `ID`=6621; + +-- Quest "Troll Charm" +UPDATE `quest_details` SET `Emote1`=1 WHERE `ID`=6462; +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=14 WHERE `ID`=6462; + +-- Quest "Amongst the Ruins" +UPDATE `quest_details` SET `Emote1`=1 WHERE `ID`=6921; +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=6 WHERE `ID`=6921; +UPDATE `quest_offer_reward` SET `Emote1`=4, `Emote2`=1 WHERE `ID`=6921; + +-- Quest "The Essence of Aku'Mai" +UPDATE `quest_details` SET `Emote1`=1 WHERE `ID`=6563; +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=6 WHERE `ID`=6563; +UPDATE `quest_offer_reward` SET `Emote1`=1, `Emote2`=1 WHERE `ID`=6563; + +-- Quest "Allegiance to the Old Gods" +UPDATE `quest_request_items` SET `EmoteOnComplete`=6 WHERE `ID`=6564; +UPDATE `quest_offer_reward` SET `Emote1`=1 WHERE `ID`=6564; + +-- Quest "Allegiance to the Old Gods (Part 2)" +DELETE FROM `quest_details` WHERE `ID`=6565; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(6565,1,0,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=5 WHERE `ID`=6565; +UPDATE `quest_offer_reward` SET `Emote1`=4 WHERE `ID`=6565; + +-- Quest "Freedom to Ruul" +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=6 WHERE `ID`=6482; +UPDATE `quest_offer_reward` SET `Emote1`=5, `Emote2`=1, `Emote3`=2 WHERE `ID`=6482; + +-- Quest "Destroy the Legion" +UPDATE `quest_details` SET `Emote1`=5 WHERE `ID`=9534; +UPDATE `quest_offer_reward` SET `Emote1`=21 WHERE `ID`=9534; + +-- Quest "Diabolical Plans" +UPDATE `quest_request_items` SET `EmoteOnComplete`=6 WHERE `ID`=9535; +UPDATE `quest_offer_reward` SET `Emote1`=5 WHERE `ID`=9535; + +-- Quest "Never Again!" +DELETE FROM `quest_details` WHERE `ID`=9536; +INSERT INTO `quest_details` (`ID`, `Emote1`, `Emote2`, `Emote3`, `Emote4`, `EmoteDelay1`, `EmoteDelay2`, `EmoteDelay3`, `EmoteDelay4`, `VerifiedBuild`) VALUES +(9536,25,1,0,0,0,0,0,0,0); +UPDATE `quest_request_items` SET `EmoteOnIncomplete`=5, `EmoteOnComplete`=5 WHERE `ID`=9536; +UPDATE `quest_offer_reward` SET `Emote1`=4, `Emote2`=1, `Emote3`=2 WHERE `ID`=9536; + +-- Quest "The Lost Pages" +UPDATE `quest_details` SET `Emote1`=1, `Emote2`=1 WHERE `ID`=6504; +UPDATE `quest_offer_reward` SET `Emote1`=1, `Emote2`=2 WHERE `ID`=6504; + +-- Quest "Torek's Assault" +UPDATE `quest_offer_reward` SET `Emote1`=4, `Emote2`=1 WHERE `ID`=6544; diff --git a/sql/updates/world/3.3.5/2017_11_21_08_world_335.sql b/sql/updates/world/3.3.5/2017_11_21_08_world_335.sql new file mode 100644 index 00000000000..8f9fbfca627 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_21_08_world_335.sql @@ -0,0 +1,2 @@ +-- Achellios the Banished - waypoint Move Type +UPDATE `waypoint_data` SET `move_type`=1 WHERE `id`=213880; diff --git a/sql/updates/world/3.3.5/2017_11_21_09_world.sql b/sql/updates/world/3.3.5/2017_11_21_09_world.sql new file mode 100644 index 00000000000..d17cadd55b9 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_21_09_world.sql @@ -0,0 +1,201 @@ +-- +UPDATE `creature` SET `spawndist` = 0, `MovementType` = 0, `phaseMask` =2, `spawntimesecs`=120 WHERE `id` = 28529; +UPDATE `creature` SET `spawndist` = 0, `MovementType` = 2, `phaseMask` =2, `spawntimesecs`=120 WHERE `id` = 28530; +UPDATE `creature` SET `spawndist` = 0, `MovementType` = 0 WHERE `id` = 28557; +UPDATE `creature` SET `spawndist` = 0, `MovementType` = 0 WHERE `id` = 28594; +UPDATE `creature_addon` SET `auras`= 29266 WHERE `guid`=130211; + +-- duplicates removed. +DELETE FROM creature WHERE guid IN (128498, 128499, 128763, 128776, 128784, 128786, 128794, 129151, 129152, 129214, 129215, 129235, 129236, 129245, 129246, 129247, 129248, 129249, 129251, 129255, 129259, 129263, 129270, 129271, 129272, 129280, 129283, 129293, 129294, 129295, 129298, 129305); +DELETE FROM creature_addon WHERE guid IN (128498, 128499, 128763, 128776, 128784, 128786, 128794, 129151, 129152, 129214, 129215, 129235, 129236, 129245, 129246, 129247, 129248, 129249, 129251, 129255, 129259, 129263, 129270, 129271, 129272, 129280, 129283, 129293, 129294, 129295, 129298, 129305); + +-- Acherus Geist +UPDATE creature SET position_x = 2374.8103, position_y = -5768.86914, position_z = 151.36789, orientation = 1.0397, spawndist = 10, MovementType = 1 WHERE guid = 128493; +UPDATE creature SET position_x = 2308.156738, position_y = -5692.175293, position_z = 154.035553, orientation = 5.493559, spawndist = 10, MovementType = 1 WHERE guid = 128492; +UPDATE creature SET position_x = 2294.674805, position_y = -5664.577148, position_z = 149.566483, orientation = 5.033237, spawndist = 10, MovementType = 1 WHERE guid = 128491; +UPDATE creature SET position_x = 2353.526855, position_y = -5650.782715, position_z = 155.107605, orientation = 5.975759, spawndist = 10, MovementType = 1 WHERE guid = 128490; +UPDATE creature SET position_x = 2388.947510, position_y = -5671.491211, position_z = 153.925858, orientation = 5.407634, spawndist = 15, MovementType = 1 WHERE guid = 128485; +UPDATE creature SET position_x = 2407.680664, position_y = -5656.884766, position_z = 156.329193, orientation = 6.108020, spawndist = 10, MovementType = 1 WHERE guid = 128489; +UPDATE creature SET position_x = 2396.615234, position_y = -5642.852051, position_z = 156.710114, orientation = 1.415266, spawndist = 10, MovementType = 1 WHERE guid = 128488; +UPDATE creature SET position_x = 2274.42, position_y = -5646.18, position_z = 142.258, orientation = 3.61903, spawndist = 0, MovementType = 2 WHERE guid = 128494; +UPDATE creature SET position_x = 2259.67, position_y = -5641.18, position_z = 138.909, orientation = 3.15041, spawndist = 0, MovementType = 2 WHERE guid = 128495; +UPDATE creature SET position_x = 2317.49, position_y = -5661.7, position_z = 153.201, orientation = 2.40253, spawndist = 0, MovementType = 2 WHERE guid = 128486; +UPDATE creature SET position_x = 2420.91, position_y = -5783.06, position_z = 144.912, orientation = 5.05147, spawndist = 0, MovementType = 2 WHERE guid = 128497; +UPDATE creature SET position_x = 2428.08, position_y = -5781.23, position_z = 143.936, orientation = 4.95301, spawndist = 0, MovementType = 2 WHERE guid = 128496; +UPDATE creature SET position_x = 2345.153809, position_y = -5720.771484, position_z = 153.922424, orientation = 0.743744, spawndist = 0, MovementType = 0 WHERE guid = 128578; +UPDATE `creature` SET `position_x` = -2866.342, `position_y` = 6403.193, `position_z`= 80.44, `orientation` = 0.044664 WHERE `guid`= 28647; + +-- Scarlet Farm Hound +UPDATE creature SET position_x = 1993.386230, position_y = -5786.060547, position_z = 100.834045, orientation = 4.610285, spawndist = 10, MovementType = 1 WHERE guid = 128584; +UPDATE creature SET position_x = 2032.527954, position_y = -5819.889648, position_z = 100.997482, orientation = 3.675666, spawndist = 10, MovementType = 1 WHERE guid = 128586; +UPDATE creature SET position_x = 1992.902222, position_y = -5866.563477, position_z = 100.788521, orientation = 4.040874, spawndist = 10, MovementType = 1 WHERE guid = 128583; +UPDATE creature SET position_x = 1951.767578, position_y = -5825.396484, position_z = 100.842171, orientation = 3.227984, spawndist = 10, MovementType = 1 WHERE guid = 128587; +UPDATE creature SET spawndist = 10, MovementType = 1 WHERE guid = 128589; + +-- Scarlet Medic +UPDATE creature SET position_x = 2172.88, position_y = -5729.58, position_z = 101.757, orientation = 1.30883 WHERE guid = 129257; + +-- Scarlet Peasant +UPDATE creature SET orientation = 5.90615 WHERE guid = 128758; + +-- Scarlet Captain +UPDATE `creature_addon` SET `path_id` = 1292990 WHERE `guid` = 128758; + +-- Havenshire Stallion - spawntime req. for his event +UPDATE `creature` SET `spawntimesecs` = 20 WHERE `guid` IN (129217,129237,129216,129252); + +DELETE FROM `creature_formations` WHERE `leaderGUID`=129217; +INSERT INTO `creature_formations` (`leaderGUID`, `memberGUID`, `dist`, `angle`, `groupAI`, `point_1`, `point_2`) VALUES +(129217, 129217, 0, 0, 515, 0, 0), +(129217, 129237, 5, 45, 515, 0, 0), +(129217, 129216, 5, 315, 515, 0, 0), +(129217, 129252, 5, 0, 515, 0, 0); +UPDATE `creature_formations` SET `dist`=6, `angle`=0 WHERE `leaderGUID`=129212 AND `memberGUID`=129230; + +-- Scarlet Fleet Defender - q. req. +UPDATE creature SET spawntimesecs = 60 WHERE id = 28834; + +-- Scarlet Infantryman - all should have same aura addon (we can use template and remove from creature_addon) +DELETE FROM creature_addon WHERE guid IN (129264,129265,129266,129267,129268,129269,129270,129271,129272,129273,129274,129275,129276,129277,129278,129279,129281,129283,129285,129286,129287,129288,129289,129290,129291); +DELETE FROM creature_template_addon WHERE entry = 28609; +INSERT INTO `creature_template_addon` (`entry`, `path_id`, `mount`, `bytes1`, `bytes2`, `emote`, `auras`) VALUES +(28609,0,0,0,1,333,'48356'); +UPDATE `creature_addon` SET `auras` = "48356 52103", `bytes2`=1, `emote`=0 WHERE `guid` = 129282; +UPDATE `creature_addon` SET `auras` = "48356 52103", `bytes2`=1, `emote`=0 WHERE `guid` = 129284; + +UPDATE creature SET position_x = 2417.918945, position_y = -5859.758789, position_z = 105.522697, orientation = 0.9 WHERE guid = 129286; +UPDATE creature SET position_x = 2435.682373, position_y = -5867.880859, position_z = 106.577202, orientation = 1.6 WHERE guid = 129287; +UPDATE creature SET position_x = 2144.156006, position_y = -5674.584961, position_z = 110.115265, orientation = 0.2 WHERE guid = 129291; +UPDATE creature SET position_x = 2232.0961, position_y = -5832.9101, position_z = 101.341705, orientation = 4.845213 WHERE guid = 129210; + +-- Scarlet Peasant +UPDATE creature SET position_x = 2134.455811, position_y = -5743.186035, position_z = 99.443970, orientation = 2.490976 WHERE guid = 128792; +UPDATE creature SET position_x = 2085.78, position_y = -5754.1, position_z = 99.3376, orientation = 6.07106 WHERE guid = 128766; +UPDATE creature SET spawndist = 0, MovementType = 0 WHERE guid IN (128755,128761); +DELETE FROM creature_addon WHERE guid IN (128766,128782,128772,128774,128791,128792,128795); +INSERT INTO `creature_addon` (`guid`, `path_id`, `mount`, `bytes1`, `bytes2`, `emote`, `auras`) VALUES +(128766,0,0,0,1,234,'48356'), +(128782,0,0,0,1,234,'48356'), +(128772,0,0,0,1,234,'48356'), +(128774,0,0,0,1,234,'48356'), +(128791,0,0,0,1,234,'48356'), +(128792,0,0,0,1,234,'48356'), +(128795,0,0,0,1,234,'48356'); + +-- WAYPOINTS -- for phase 1 +UPDATE creature SET MovementType = 2, spawndist = 0 WHERE guid IN (128486, 128484,128494,128495,128497,128496,129302,129299,129217); +DELETE FROM creature_addon WHERE guid IN (128486, 128484,128494,128495,128497,128496,129302,129299,129217,128735,128736,128737); +INSERT INTO `creature_addon` (`guid`, `path_id`, `mount`, `bytes1`, `bytes2`, `emote`, `auras`) VALUES +(128486, 1284860,0,0,0, 0,''), +(128484, 1284840,0,0,0, 0,''), +(128494, 1284940,0,0,0, 0,''), +(128495, 1284950,0,0,0, 0,''), +(128497, 1284970,0,0,0, 0,''), +(128496, 1284960,0,0,0, 0,''), +(129302, 1293020,2404,0,0, 0,'48356'), +(129299, 1292990,2404,0,0, 0,'48356'), +(129217, 1292170,0,0,0,0,''), +(128735, 1287350,2404,0,0, 0,''), +(128737, 1287370,2404,0,0, 0,''), +(128736, 1287360,2404,0,0, 0,''); + +DELETE FROM `waypoint_data` WHERE `id` IN (1284860,1284840,1284940,1284950,1284970,1284960,1293020,1292990,1292170,1287350,1287360,1287370); +INSERT INTO `waypoint_data` (`id`, `point`, `position_x`, `position_y`, `position_z`, `delay`, `action`, `orientation`, `action_chance`, `move_type`) VALUES +(1284860,1,2317.49,-5661.7,153.201,2000,0,2.40253,100,1), +(1284860,2,2339.6,-5683.8,153.922,0,0,5.5874,100,1), +(1284860,3,2355.11,-5702.48,153.92,0,0,5.47194,100,1), +(1284860,4,2385.46,-5730.73,153.922,0,0,5.43574,100,1), +(1284860,5,2407.29,-5754.73,153.907,2000,0,5.50643,100,1), +(1284860,6,2363.77,-5709.2,153.923,0,0,2.27844,100,1), +(1284860,7,2317.49,-5661.7,153.201,2000,0,2.4,100,1), +(1284840,1,2322.74,-5624.89,152.099,1000,0,0.2,100,1), +(1284840,2,2312.95,-5645.39,149.817,0,0,0.21,100,1), +(1284940,1,2274.42,-5646.18,142.258,1000,0,3.62,100,1), +(1284940,2,2263.38,-5651.55,138.595,0,0,3.37974,100,1), +(1284940,3,2220.51,-5658.85,125.151,0,0,3.72,100,1), +(1284940,4,2206.73,-5668.97,118.319,0,0,3.72453,100,1), +(1284940,5,2184.66,-5685.67,110.593,0,0,1.62937,100,1), +(1284940,6,2183.91,-5677.17,112.45,0,0,1.07671,100,1), +(1284940,7,2202.52,-5652.79,121.832,0,0,0.505729,100,1), +(1284940,8,2234.02,-5638.04,133.237,0,0,0.255441,100,1), +(1284940,9,2262.76,-5630.8,142.946,0,0,5.64327,100,1), +(1284940,10,2279.18,-5643.24,143.819,0,0,3.68213,100,1), +(1284950,1,2259.67,-5641.18,138.909,1000,0,3.15,100,1), +(1284950,2,2220.92,-5644.93,127.963,0,0,3.7,100,1), +(1284950,3,2180.75,-5671.91,113.432,0,0,5.07701,100,1), +(1284950,4,2186.25,-5687.66,110.304,0,0,0.591784,100,1), +(1284950,5,2198.87,-5677.85,113.546,0,0,0.827452,100,1), +(1284950,6,2211.37,-5665.1,120.953,0,0,0.4346,100,1), +(1284950,7,2215.85,-5662.83,122.909,0,0,0.113371,100,1), +(1284950,8,2230.85,-5660.58,127.762,0,0,0.217311,100,1), +(1284950,9,2268.12,-5652.45,140.123,0,0,2.3,100,1), +(1284970,1,2420.91,-5783.06,144.912,1000,0,5.05147,100,1), +(1284970,2,2424.91,-5795.56,136.284,0,0,5.0,100,1), +(1284970,3,2430.05,-5813.4,123.698,0,0,4.76715,100,1), +(1284970,4,2431.55,-5839.9,112.193,0,0,5.14022,100,1), +(1284970,5,2435.88,-5848.18,110.821,0,0,1.28234,100,1), +(1284970,6,2439.07,-5830.71,117.19,0,0,1.7575,100,1), +(1284970,7,2437.07,-5817.21,121.852,0,0,1.7575,100,1), +(1284970,8,2433.13,-5798.02,133.529,0,0,1.72991,100,1), +(1284970,9,2428.88,-5780.52,144.159,0,0,3.64,100,1), +(1284960,1,2428.08,-5781.23,143.936,1000,0,4.95,100,1), +(1284960,2,2431.83,-5801.73,131.007,0,0,4.9,100,1), +(1284960,3,2434.52,-5815.74,122.281,0,0,4.70491,100,1), +(1284960,4,2435.68,-5850.09,110.343,0,0,2.27017,100,1), +(1284960,5,2426.39,-5839.45,112.787,0,0,1.48386,100,1), +(1284960,6,2426.89,-5832.45,115.05,0,0,1.48386,100,1), +(1284960,7,2428.61,-5806.57,127.862,0,0,1.87184,100,1), +(1284960,8,2424.36,-5792.57,138.63,0,0,1.87184,100,1), +(1284960,9,2420.36,-5779.07,146.508,0,0,6.21274,100,1), +(1284960,10,2426.77,-5779.23,145.036,0,0,5.28497,100,1), +(1293020,1,2142.69,-5854.15,101.352,0,0,5.64134,100,0), +(1293020,2,2160.03,-5863.74,101.343,0,0,5.96987,100,0), +(1293020,3,2163.18,-5866.82,101.325,5000,0,5.95,100,0), +(1293020,4,2181.06,-5870.53,101.288,0,0,6.1,100,0), +(1293020,5,2162.67,-5866.22,101.328,0,0,2.74714,100,0), +(1293020,6,2142.94,-5853.58,101.349,0,0,2.0285,100,0), +(1293020,7,2142.59,-5841.03,101.318,0,0,1.42374,100,0), +(1293020,8,2142.77,-5819.21,100.895,0,0,1.86334,100,0), +(1293020,9,2128.82,-5790.25,98.8795,0,0,1.38032,100,0), +(1293020,10,2128.17,-5794.43,99.0854,0,0,4.67271,100,0), +(1293020,11,2130.96,-5801.12,99.619,0,0,5.31045,100,0), +(1293020,12,2143.34,-5819.31,100.926,0,0,5.01514,100,0), +(1292990,1,2144.25,-5736.86,100.243,0,0,2.01409,100,0), +(1292990,2,2137.89,-5727.64,100.314,0,0,2.35,100,0), +(1292990,3,2128.15,-5716.13,100.621,0,0,2.2,100,0), +(1292990,4,2138.36,-5728.58,100.285,5000,0,5.34968,100,0), +(1292990,5,2144.69,-5738.47,100.186,0,0,4.96091,100,0), +(1292990,6,2147.98,-5759.81,99.8653,0,0,4.61769,100,0), +(1292990,7,2145.06,-5766.31,99.5844,0,0,4.25013,100,0), +(1292990,8,2148.29,-5759.08,99.9146,0,0,1.63554,100,0), +(1292170,1,2217.34,-5873.99,101.251,1000,0,0.593412,100,1), +(1292170,2,2214.53,-5869.6,101.534,0,0,2.83545,100,1), +(1292170,3,2181.07,-5865.87,101.331,0,0,3.36324,100,1), +(1292170,4,2139.21,-5878.27,101.469,0,0,3.93265,100,1), +(1292170,5,2118.11,-5903.57,104.343,0,0,3.3436,100,1), +(1292170,6,2072.44,-5914.58,106.378,0,0,3.39073,100,1), +(1292170,7,2028.89,-5920.68,105.233,0,0,3.28863,100,1), +(1292170,8,2003.16,-5924.91,105.324,0,0,3.41822,100,1), +(1292170,9,1968.43,-5935.75,102.545,0,0,3.21401,100,1), +(1292170,10,1924.76,-5931.14,103.405,0,0,2.93127,100,1), +(1292170,11,1894.96,-5921.15,103.129,0,0,2.94698,100,1), +(1292170,12,1849.19,-5916.93,105.57,0,0,3.19438,100,1), +(1292170,13,1818.86,-5920.18,112.552,0,0,3.19438,100,1), +(1292170,14,1780.82,-5925.05,116.113,0,0,2.85273,100,1), +(1292170,15,1741.46,-5907.59,116.121,1000,1188,2.73099,100,1), +-- Scarlet Commander +(1287350,1,1775.91,-5832.22,116.268,0,0,3.27545,100,0), +(1287350,2,1755.28,-5834.67,116.466,0,0,0.155067,100,0), +(1287370,1,1762.27,-5808.16,116.622,0,0,0.0435456,100,0), +(1287370,2,1784.43,-5806.17,114.238,0,0,3.29274,100,0), +(1287360,1,1576.5323,-5779.924,117.6273,0,0, 4.752857,100,0), +(1287360,2,1578.6364,-5790.505,119.1621,0,0, 4.963344,100,0); + +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=28605; +DELETE FROM `smart_scripts` WHERE `entryorguid`=-129217 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`,`source_type`,`id`,`link`,`event_type`,`event_phase_mask`,`event_chance`,`event_flags`,`event_param1`,`event_param2`,`event_param3`,`event_param4`,`action_type`,`action_param1`,`action_param2`,`action_param3`,`action_param4`,`action_param5`,`action_param6`,`target_type`,`target_param1`,`target_param2`,`target_param3`,`target_x`,`target_y`,`target_z`,`target_o`,`comment`) VALUES +(-129217,0,0,0,34,0,100,0,2,14,0,0,41,0,0,0,0,0,0,11,28607,30,0,0,0,0,0,"Havenshire Stallion - On movement informer - despawn all guars"), +(-129217,0,1,0,34,0,100,0,2,14,0,0,41,0,0,0,0,0,0,11,28606,30,0,0,0,0,0,"Havenshire Stallion - On movement informer - despawn all guars"), +(-129217,0,2,0,34,0,100,0,2,14,0,0,41,0,0,0,0,0,0,11,28605,30,0,0,0,0,0,"Havenshire Stallion - On movement informer - despawn all guars"), +(-129217,0,3,0,25,0,100,0,0,0,0,0,48,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Havenshire Stallion - On reset - set active"), +(-129217,0,4,0,25,0,100,0,0,0,0,0,83,16777216,0,0,0,0,0,1,0,0,0,0,0,0,0,"Havenshire Stallion - On reset - Remove npc flag"); diff --git a/sql/updates/world/3.3.5/2017_11_22_00_world.sql b/sql/updates/world/3.3.5/2017_11_22_00_world.sql new file mode 100644 index 00000000000..56a5e6401c1 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_22_00_world.sql @@ -0,0 +1,7 @@ +-- +UPDATE `gameobject_template` SET `AIName`="" WHERE `entry` IN (113768, 113769, 113770, 113771, 113772); +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry` IN (32784); +DELETE FROM `smart_scripts` WHERE `entryorguid` IN (113768, 113769, 113770, 113771, 113772) AND `source_type`=1; +DELETE FROM `smart_scripts` WHERE `entryorguid` IN (32784) AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(32784, 0, 0, 0, 54, 0, 20, 0, 0, 0, 0, 0, 85, 61734, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, "Noblegarden Bunny Controller - On Just summoned - Invoker Cast 'Noblegarden Bunny'"); diff --git a/sql/updates/world/3.3.5/2017_11_22_01_world.sql b/sql/updates/world/3.3.5/2017_11_22_01_world.sql new file mode 100644 index 00000000000..288b3ad425b --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_22_01_world.sql @@ -0,0 +1,5 @@ +-- Gymer +DELETE FROM `creature_template_addon` WHERE `entry`=29647; +INSERT INTO `creature_template_addon` (`entry`,`bytes1`,`bytes2`) VALUES (29647,1,0); +-- Gerk +UPDATE `creature_template` SET `InhabitType`= 4 WHERE `entry`=29455; diff --git a/sql/updates/world/3.3.5/2017_11_22_02_world.sql b/sql/updates/world/3.3.5/2017_11_22_02_world.sql new file mode 100644 index 00000000000..eee98bc53fb --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_22_02_world.sql @@ -0,0 +1,4 @@ +-- +DELETE FROM `smart_scripts` WHERE `entryorguid` IN (28907) AND `source_type`=0 and `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`,`source_type`,`id`,`link`,`event_type`,`event_phase_mask`,`event_chance`,`event_flags`,`event_param1`,`event_param2`,`event_param3`,`event_param4`,`action_type`,`action_param1`,`action_param2`,`action_param3`,`action_param4`,`action_param5`,`action_param6`,`target_type`,`target_param1`,`target_param2`,`target_param3`,`target_x`,`target_y`,`target_z`,`target_o`,`comment`) VALUES +(28907, 0, 1, 0, 25, 0, 100, 0, 0, 0, 0, 0, 59, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, "Prince Valanar - On reset - Set run off"); diff --git a/sql/updates/world/3.3.5/2017_11_22_03_world.sql b/sql/updates/world/3.3.5/2017_11_22_03_world.sql new file mode 100644 index 00000000000..12d213ae2f7 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_22_03_world.sql @@ -0,0 +1,2 @@ +UPDATE `creature_template` SET `ScriptName` = 'npc_guard_generic' WHERE `ScriptName` = 'guard_generic'; +UPDATE `creature_template` SET `ScriptName` = 'npc_guard_shattrath_faction' WHERE `ScriptName` IN ('guard_shattrath_scryer', 'guard_shattrath_aldor'); diff --git a/sql/updates/world/3.3.5/2017_11_22_03_world_335.sql b/sql/updates/world/3.3.5/2017_11_22_03_world_335.sql new file mode 100644 index 00000000000..2b59b31859c --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_22_03_world_335.sql @@ -0,0 +1,2 @@ +-- +DELETE FROM `creature` WHERE `guid`=43465 AND `id`=34675; diff --git a/sql/updates/world/3.3.5/2017_11_22_04_world.sql b/sql/updates/world/3.3.5/2017_11_22_04_world.sql new file mode 100644 index 00000000000..71619b3dc52 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_22_04_world.sql @@ -0,0 +1,3 @@ +-- +UPDATE `creature_template` SET `unit_flags`=33536 WHERE `entry` IN (35337,35338,35340,35341,35342,35343,34714, 34679, 34768, 34653); +DELETE FROM `game_event_creature` WHERE `guid`=43465; diff --git a/sql/updates/world/3.3.5/2017_11_24_00_world.sql b/sql/updates/world/3.3.5/2017_11_24_00_world.sql new file mode 100644 index 00000000000..86b7af76172 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_24_00_world.sql @@ -0,0 +1,18 @@ +-- +-- Phase: 128 +-- Duplicates Removed +DELETE FROM `creature` WHERE `guid` IN (130365,130370,130364,130374,130372,130377,130367,130375,130366,130361,130371); +DELETE FROM `creature_addon` WHERE `guid` IN (130365,130370,130364,130374,130372,130377,130367,130375,130366,130361,130371); + +UPDATE `creature` SET `phaseMask` = 128, `spawndist` = 0, `MovementType` = 0 WHERE `guid` = 130400; +UPDATE `creature` SET `spawndist` = 0, `MovementType` = 0 WHERE `guid` = 130401; +UPDATE `creature` SET `spawndist` = 20, `MovementType` = 1 WHERE `id` = 29189; +UPDATE `creature` SET `phaseMask` = 128 WHERE `guid` IN (130387,130419); +UPDATE `creature` SET `spawndist` = 10, `MovementType` = 1 WHERE `guid` IN (130368,130369); + +-- Howling Geist +UPDATE `creature` SET `position_x` = 2597.903076, `position_y` = -5549.420410, `position_z` = 159.772934, `spawndist` = 20, `MovementType` = 1 WHERE `guid` = 130387; +UPDATE `creature` SET `position_x` = 2622.867432, `position_y` = -5481.481934, `position_z` = 156.948212, `spawndist` = 20, `MovementType` = 1 WHERE `guid` = 130386; +UPDATE `creature` SET `position_x` = 2659.806885, `position_y` = -5537.375000, `position_z` = 163.303589, `spawndist` = 20, `MovementType` = 1 WHERE `guid` = 130385; +UPDATE `creature` SET `position_x` = 2697.650391, `position_y` = -5469.664063, `position_z` = 156.943970, `spawndist` = 20, `MovementType` = 1 WHERE `guid` = 130383; +UPDATE `creature` SET `position_x` = 2735.920410, `position_y` = -5363.948730, `position_z` = 157.715302, `spawndist` = 20, `MovementType` = 1 WHERE `guid` = 130381; diff --git a/sql/updates/world/3.3.5/2017_11_25_00_world.sql b/sql/updates/world/3.3.5/2017_11_25_00_world.sql new file mode 100644 index 00000000000..d91529b1e4e --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_25_00_world.sql @@ -0,0 +1,5 @@ +-- +DELETE FROM `smart_scripts` WHERE `entryorguid`=23671 AND `source_type`=0 AND `id` IN (17,18); +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(23671, 0, 17, 18, 1, 0, 100, 1, 30000, 30000, 0, 0, 44, 2, 0, 0, 0, 0, 0, 9, 24118, 0, 90, 0, 0, 0, 0, "Halfdan the Ice-Hearted - Ooc - Change Phasemask of Val'kyr Observer"), +(23671, 0, 18, 0, 61, 0, 100, 0, 0, 0, 0, 0, 41, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, "Halfdan the Ice-Hearted - Ooc - despawn"); diff --git a/sql/updates/world/3.3.5/2017_11_26_00_world.sql b/sql/updates/world/3.3.5/2017_11_26_00_world.sql new file mode 100644 index 00000000000..d3c9f7b192d --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_26_00_world.sql @@ -0,0 +1,4 @@ +-- +DELETE FROM `smart_scripts` WHERE `source_type`=0 AND `entryorguid`=24464 AND `id`=2; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(24464,0,2,0,11,0,100,0,0,0,0,0,8,0,0,0,0,0,0,1,0,0,0,0,0,0,0,'Scourging Crystal - On Respawn - Set Reactstate Passive'); diff --git a/sql/updates/world/3.3.5/2017_11_28_00_world.sql b/sql/updates/world/3.3.5/2017_11_28_00_world.sql new file mode 100644 index 00000000000..d1f855e6323 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_28_00_world.sql @@ -0,0 +1,169 @@ +SET @CGUID:=144950; +SET @EVENT1:=6; -- New Year's Eve +SET @EVENT2:=72; -- Fireworks Spectacular + +DELETE FROM `creature_template_addon` WHERE `entry` = 15724; +INSERT INTO `creature_template_addon` (`entry`, `auras`) VALUES +(15724, '26115'); + +DELETE FROM `creature` WHERE `guid` BETWEEN @CGUID+0 AND @CGUID+40; +INSERT INTO `creature` (`guid`, `id`, `map`, `zoneId`, `areaId`, `spawnMask`, `phaseMask`, `modelid`, `equipment_id`, `position_x`, `position_y`, `position_z`, `orientation`, `spawntimesecs`, `spawndist`, `currentwaypoint`, `curhealth`, `curmana`, `MovementType`, `npcflag`, `unit_flags`, `dynamicflags`, `ScriptName`, `VerifiedBuild`) VALUES +(@CGUID+0 , 15724, 0, 0, 0, 1, 1, 0, 0, -14303.2, 442.825, 28.34243, 4.363323, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1578) (Auras: 26115 - 26115) +(@CGUID+1 , 15724, 0, 0, 0, 1, 1, 0, 0, -14304.99, 433.2847, 29.36503, 1.396263, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1578) (Auras: 26115 - 26115) +(@CGUID+2 , 15724, 0, 0, 0, 1, 1, 0, 0, -14272.64, 421.7396, 36.33361, 2.032897, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1578) (Auras: 26115 - 26115) +(@CGUID+3 , 15724, 0, 0, 0, 1, 1, 0, 0, -14314.71, 474.2673, 18.35473, 1.095548, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1578) (Auras: 26115 - 26115) +(@CGUID+4 , 15724, 0, 0, 0, 1, 1, 0, 0, -14280.88, 424.5142, 35.37117, 2.526357, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1578) (Auras: 26115 - 26115) +(@CGUID+5 , 15724, 0, 0, 0, 1, 1, 0, 0, -14278.35, 553.1927, 8.983554, 3.909538, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1578) (Auras: 26115 - 26115) +(@CGUID+6 , 15724, 0, 0, 0, 1, 1, 0, 0, -14302.29, 511.1232, 8.829531, 3.860822, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1578) (Auras: 26115 - 26115) +(@CGUID+7 , 15724, 0, 0, 0, 1, 1, 0, 0, -14286.46, 540.7466, 9.004538, 1.972222, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1578) (Auras: 26115 - 26115) +(@CGUID+8 , 15724, 0, 0, 0, 1, 1, 0, 0, -14293.2, 557.6228, 8.855279, 5.218534, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1578) (Auras: 26115 - 26115) +(@CGUID+9 , 15724, 0, 0, 0, 1, 1, 0, 0, -14343.7, 450.156, 7.633924, 5.585053, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1578) (Auras: 26115 - 26115) +(@CGUID+10, 15724, 0, 0, 0, 1, 1, 0, 0, -14366.24, 426.8889, 7.443278, 1.780236, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 35) (Auras: 26115 - 26115) +(@CGUID+11, 15724, 0, 0, 0, 1, 1, 0, 0, -14415.8, 518.922, 5.104904, 3.176499, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 35) (Auras: 26115 - 26115) +(@CGUID+12, 15724, 0, 0, 0, 1, 1, 0, 0, -14390.88, 414.8611, 22.82784, 2.740167, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 35) (Auras: 26115 - 26115) +(@CGUID+13, 15724, 0, 0, 0, 1, 1, 0, 0, -14427.25, 456.5104, 15.42189, 4.485496, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 35) (Auras: 26115 - 26115) +(@CGUID+14, 15724, 0, 0, 0, 1, 1, 0, 0, -14400.63, 413.0382, 7.934666, 0.7126135, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 35) (Auras: 26115 - 26115) +(@CGUID+15, 15724, 0, 0, 0, 1, 1, 0, 0, -14437.55, 462.809, 3.976888, 3.560472, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 35) (Auras: 26115 - 26115) +(@CGUID+16, 15724, 0, 0, 0, 1, 1, 0, 0, -14432.76, 509.2965, 7.009702, 4.28674, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 35) (Auras: 26115 - 26115) +(@CGUID+17, 15724, 0, 0, 0, 1, 1, 0, 0, -14405.37, 410.5955, 27.81997, 0.8901179, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 35) (Auras: 26115 - 26115) +(@CGUID+18, 15724, 0, 0, 0, 1, 1, 0, 0, -14386, 399.288, 6.645936, 2.408554, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 35) (Auras: 26115 - 26115) +(@CGUID+19, 15724, 0, 0, 0, 1, 1, 0, 0, -14426.78, 523.7899, 5.075222, 5.72468, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 35) (Auras: 26115 - 26115) +(@CGUID+20, 15724, 0, 0, 0, 1, 1, 0, 0, -14441.94, 481.1354, 15.273, 4.572762, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1759) (Auras: 26115 - 26115) +(@CGUID+21, 15724, 0, 0, 0, 1, 1, 0, 0, -14423.59, 424.6684, 21.85322, 3.719135, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1759) (Auras: 26115 - 26115) +(@CGUID+22, 15724, 0, 0, 0, 1, 1, 0, 0, -14445.73, 519.0208, 26.399, 5.340707, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1759) (Auras: 26115 - 26115) +(@CGUID+23, 15724, 0, 0, 0, 1, 1, 0, 0, -14442.65, 454.5469, 3.817722, 0.5235988, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1759) (Auras: 26115 - 26115) +(@CGUID+24, 15724, 0, 0, 0, 1, 1, 0, 0, -14438.98, 436.3958, 20.50878, 3.071779, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1759) (Auras: 26115 - 26115) +(@CGUID+25, 15724, 0, 0, 0, 1, 1, 0, 0, -14450.99, 462.4686, 15.43489, 3.682765, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1759) (Auras: 26115 - 26115) +(@CGUID+26, 15724, 0, 0, 0, 1, 1, 0, 0, -14448.31, 448.5817, 20.47261, 5.001486, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1759) (Auras: 26115 - 26115) +(@CGUID+27, 15724, 0, 0, 0, 1, 1, 0, 0, -14451.06, 454.7153, 20.54233, 0.7504916, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1759) (Auras: 26115 - 26115) +(@CGUID+28, 15724, 0, 0, 0, 1, 1, 0, 0, -14457.22, 494.8703, 26.35823, 1.029744, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1759) (Auras: 26115 - 26115) +(@CGUID+29, 15724, 0, 0, 0, 1, 1, 0, 0, -14458.1, 488.625, 15.20773, 1.937315, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1759) (Auras: 26115 - 26115) +(@CGUID+30, 15724, 0, 0, 0, 1, 1, 0, 0, -14449.02, 436.2082, 3.889718, 1.064651, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1759) (Auras: 26115 - 26115) +(@CGUID+31, 15724, 0, 0, 0, 1, 1, 0, 0, -14376.58, 386.1563, 22.67363, 4.852015, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1759) (Auras: 26115 - 26115) +(@CGUID+32, 15724, 0, 0, 0, 1, 1, 0, 0, -14461.74, 478.2934, 26.94364, 3.508112, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1759) (Auras: 26115 - 26115) +(@CGUID+33, 15724, 0, 0, 0, 1, 1, 0, 0, -14434.82, 416.8488, 8.818702, 0.6616975, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1759) (Auras: 26115 - 26115) +(@CGUID+34, 15724, 0, 0, 0, 1, 1, 0, 0, -14465.08, 474.9278, 15.03919, 6.038839, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1759) (Auras: 26115 - 26115) +(@CGUID+35, 15724, 0, 0, 0, 1, 1, 0, 0, -14460.96, 502.9427, 15.2055, 4.764749, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1759) (Auras: 26115 - 26115) +(@CGUID+36, 15724, 0, 0, 0, 1, 1, 0, 0, -14463.32, 502.349, 26.35558, 5.707227, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1759) (Auras: 26115 - 26115) +(@CGUID+37, 15724, 0, 0, 0, 1, 1, 0, 0, -14448.88, 429.5278, 15.10494, 3.735005, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1759) (Auras: 26115 - 26115) +(@CGUID+38, 15724, 0, 0, 0, 1, 1, 0, 0, -14471.41, 468.4601, 36.53928, 4.29351, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1759) (Auras: 26115 - 26115) +(@CGUID+39, 15724, 0, 0, 0, 1, 1, 0, 0, -14472.04, 483.9948, 20.465, 0.6283185, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0), -- 15724 (Area: 1759) (Auras: 26115 - 26115) +(@CGUID+40, 15724, 0, 0, 0, 1, 1, 0, 0, -14472.41, 486.0102, 26.77704, 5.547335, 120, 0, 0, 0, 0, 0, 0, 0, 0, '', 0); -- 15724 (Area: 1759) (Auras: 26115 - 26115) + +DELETE FROM `game_event_creature` WHERE `guid` BETWEEN @CGUID+0 AND @CGUID+40 AND `eventEntry`=@EVENT1; +INSERT INTO `game_event_creature` SELECT @EVENT1, creature.guid FROM `creature` WHERE creature.guid BETWEEN @CGUID+0 AND @CGUID+40; + +DELETE FROM `game_event_creature` WHERE `guid` BETWEEN @CGUID+0 AND @CGUID+40 AND `eventEntry`=@EVENT2; +INSERT INTO `game_event_creature` SELECT @EVENT2, creature.guid FROM `creature` WHERE creature.guid BETWEEN @CGUID+0 AND @CGUID+40; + +-- 6 New Year's Eve & 72 Fireworks Spectacular +-- Despawn Booty Bay Bruiser but not 718, 680, 653 +DELETE FROM `game_event_creature` WHERE `guid` IN +(76,77,160,161,162,163,164,165,178,182,589,590,591,592,593,594,595,596,597,598,599,649,650,651,652,654,655,656,657,679,681,682,683,686,687,688,689,692,693, +694,695,696,697,698,699,700,715,716,717,719,2164,2167) AND `eventEntry` IN (-@EVENT1, -@EVENT2); + +INSERT INTO `game_event_creature` (`eventEntry`, `guid`) VALUES +(-@EVENT1, 76), +(-@EVENT1, 77), +(-@EVENT1, 160), +(-@EVENT1, 161), +(-@EVENT1, 162), +(-@EVENT1, 163), +(-@EVENT1, 164), +(-@EVENT1, 165), +(-@EVENT1, 178), +(-@EVENT1, 182), +(-@EVENT1, 589), +(-@EVENT1, 590), +(-@EVENT1, 591), +(-@EVENT1, 592), +(-@EVENT1, 593), +(-@EVENT1, 594), +(-@EVENT1, 595), +(-@EVENT1, 596), +(-@EVENT1, 597), +(-@EVENT1, 598), +(-@EVENT1, 599), +(-@EVENT1, 649), +(-@EVENT1, 650), +(-@EVENT1, 651), +(-@EVENT1, 652), +(-@EVENT1, 654), +(-@EVENT1, 655), +(-@EVENT1, 656), +(-@EVENT1, 657), +(-@EVENT1, 679), +(-@EVENT1, 681), +(-@EVENT1, 682), +(-@EVENT1, 683), +(-@EVENT1, 686), +(-@EVENT1, 687), +(-@EVENT1, 688), +(-@EVENT1, 689), +(-@EVENT1, 692), +(-@EVENT1, 693), +(-@EVENT1, 694), +(-@EVENT1, 695), +(-@EVENT1, 696), +(-@EVENT1, 697), +(-@EVENT1, 698), +(-@EVENT1, 699), +(-@EVENT1, 700), +(-@EVENT1, 715), +(-@EVENT1, 716), +(-@EVENT1, 717), +(-@EVENT1, 719), +(-@EVENT1, 2164), +(-@EVENT1, 2167), +(-@EVENT2, 76), +(-@EVENT2, 77), +(-@EVENT2, 160), +(-@EVENT2, 161), +(-@EVENT2, 162), +(-@EVENT2, 163), +(-@EVENT2, 164), +(-@EVENT2, 165), +(-@EVENT2, 178), +(-@EVENT2, 182), +(-@EVENT2, 589), +(-@EVENT2, 590), +(-@EVENT2, 591), +(-@EVENT2, 592), +(-@EVENT2, 593), +(-@EVENT2, 594), +(-@EVENT2, 595), +(-@EVENT2, 596), +(-@EVENT2, 597), +(-@EVENT2, 598), +(-@EVENT2, 599), +(-@EVENT2, 649), +(-@EVENT2, 650), +(-@EVENT2, 651), +(-@EVENT2, 652), +(-@EVENT2, 654), +(-@EVENT2, 655), +(-@EVENT2, 656), +(-@EVENT2, 657), +(-@EVENT2, 679), +(-@EVENT2, 681), +(-@EVENT2, 682), +(-@EVENT2, 683), +(-@EVENT2, 686), +(-@EVENT2, 687), +(-@EVENT2, 688), +(-@EVENT2, 689), +(-@EVENT2, 692), +(-@EVENT2, 693), +(-@EVENT2, 694), +(-@EVENT2, 695), +(-@EVENT2, 696), +(-@EVENT2, 697), +(-@EVENT2, 698), +(-@EVENT2, 699), +(-@EVENT2, 700), +(-@EVENT2, 715), +(-@EVENT2, 716), +(-@EVENT2, 717), +(-@EVENT2, 719), +(-@EVENT2, 2164), +(-@EVENT2, 2167); diff --git a/sql/updates/world/3.3.5/2017_11_28_01_world_335.sql b/sql/updates/world/3.3.5/2017_11_28_01_world_335.sql new file mode 100644 index 00000000000..6169bd23f8a --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_28_01_world_335.sql @@ -0,0 +1,2 @@ +-- +UPDATE `creature` SET `position_x`=-8850.944336, `position_y`=716.976990, `position_z`=97.425034, `Orientation`=1.968332, `MovementType`=0, `spawndist`=0 WHERE `guid`=120682 AND `id`=68; diff --git a/sql/updates/world/3.3.5/2017_11_28_02_world.sql b/sql/updates/world/3.3.5/2017_11_28_02_world.sql new file mode 100644 index 00000000000..524a82a691a --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_28_02_world.sql @@ -0,0 +1,13 @@ +-- Naias +UPDATE `event_scripts` SET `x`= -12132.446289, `y`=964.982422, `z`= 5.194236, `o`= 5.130559 WHERE `id`=10554; +SET @ENTRY := 17207; +UPDATE `creature_template` SET `AIName`='SmartAI' WHERE `entry`=@ENTRY; +DELETE FROM `smart_scripts` WHERE `source_type`=0 AND `entryorguid`=@ENTRY; +INSERT INTO `smart_scripts` (`entryorguid`,`source_type`,`id`,`link`,`event_type`,`event_phase_mask`,`event_chance`,`event_flags`,`event_param1`,`event_param2`,`event_param3`,`event_param4`,`action_type`,`action_param1`,`action_param2`,`action_param3`,`action_param4`,`action_param5`,`action_param6`,`target_type`,`target_param1`,`target_param2`,`target_param3`,`target_x`,`target_y`,`target_z`,`target_o`,`comment`) VALUES +(@ENTRY,0,0,0,0,0,100,0,5000,5000,12000,13000,11,37054,0,0,0,0,0,2,0,0,0,0,0,0,0,'Naias - IC - cast Water Bolt'), +(@ENTRY,0,1,0,0,0,100,0,2000,6000,10000,14000,11,34828,0,0,0,0,0,2,0,0,0,0,0,0,0,'Naias - IC -cast Water Shield'), +(@ENTRY,0,2,0,54,0,100,0,0,0,0,0,1,0,0,0,0,0,0,7,0,0,0,0,0,0,0,'Naias - just summoned - say text'); + +DELETE FROM `creature_text` WHERE `CreatureID`=17207; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(17207, 0, 0, 'Who challenges Naias? Puny $r, you are little better than those mindless trolls I have played against each other like so many pieces on a game board!', 12, 0, 100, 0, 0, 0, 13560, 0, 'Naias'); diff --git a/sql/updates/world/3.3.5/2017_11_28_03_world.sql b/sql/updates/world/3.3.5/2017_11_28_03_world.sql new file mode 100644 index 00000000000..7dbfe96e263 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_28_03_world.sql @@ -0,0 +1,5 @@ +-- "Stinky" Ignatz +DELETE FROM `creature_text` WHERE `CreatureID`=4880 AND `GroupID`=10 AND `ID` IN (2,3); +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(4880,10,2,"Look out! The $n attacks!",12,0,100,0,0,0,1628,0,"\"Stinky\" Ignatz"), +(4880,10,3,"I'm glad you're here! Because I need your help!!",12,0,100,0,0,0,1631,0,"\"Stinky\" Ignatz"); diff --git a/sql/updates/world/3.3.5/2017_11_28_04_world.sql b/sql/updates/world/3.3.5/2017_11_28_04_world.sql new file mode 100644 index 00000000000..f3e27cadbbe --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_28_04_world.sql @@ -0,0 +1,5 @@ +-- Grenka Bloodscreech +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=4490; +DELETE FROM `smart_scripts` WHERE `entryorguid`=4490 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4490,0,0,0,0,0,100,0,3000,5000,12000,15000,11,3589,0,0,0,0,0,1,0,0,0,0,0,0,0,"Grenka Bloodscreech - In Combat - Cast Deafening Screech"); diff --git a/sql/updates/world/3.3.5/2017_11_28_05_world.sql b/sql/updates/world/3.3.5/2017_11_28_05_world.sql new file mode 100644 index 00000000000..55274e9631a --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_28_05_world.sql @@ -0,0 +1,19 @@ +-- Kin'weelay +DELETE FROM `creature_text` WHERE `CreatureID`=2519 AND `GroupID` IN (2,3,4); +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(2519,2,0,"I am eager to hear what so angry an orc has to say...",12,0,100,1,0,0,1664,0,"Kin'weelay"), +(2519,3,0,"%s places Marg's head in the cauldron.",16,0,100,0,0,0,1665,0,"Kin'weelay"), +(2519,4,0,"Peer into the cauldron, $n. Marg wishes to speak...",12,0,100,1,0,0,1666,0,"Kin'weelay"); + +DELETE FROM `smart_scripts` WHERE `entryorguid`=2519 AND `source_type`=0 AND `id`=1; +DELETE FROM `smart_scripts` WHERE `entryorguid`=251901 AND `source_type`=9; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2519,0,1,0,20,0,100,0,1240,0,0,0,80,251901,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kin'weelay - On Quest 'The Troll Witchdoctor' Finished - Run Script"), +(251901,9,0,0,0,0,100,0,0,0,0,0,105,16,0,0,0,0,0,14,6783,2076,0,0,0,0,0,"Kin'weelay - On Script - Add Gameobject Flag"), +(251901,9,1,0,0,0,100,0,0,0,0,0,83,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kin'weelay - On Script - Remove Npc Flag Questgiver"), +(251901,9,2,0,0,0,100,0,1000,1000,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kin'weelay - On Script - Say Line 2"), +(251901,9,3,0,0,0,100,0,3000,3000,0,0,1,3,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kin'weelay - On Script - Say Line 3"), +(251901,9,4,0,0,0,100,0,2000,2000,0,0,11,3644,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kin'weelay - On Script - Cast Speak with Heads"), +(251901,9,5,0,0,0,100,0,6000,6000,0,0,1,4,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kin'weelay - On Script - Say Line 4"), +(251901,9,6,0,0,0,100,0,0,0,0,0,82,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kin'weelay - On Script - Add Npc Flag Questgiver"), +(251901,9,7,0,0,0,100,0,0,0,0,0,106,16,0,0,0,0,0,14,6783,2076,0,0,0,0,0,"Kin'weelay - On Script - Remove Gameobject Flag"); diff --git a/sql/updates/world/3.3.5/2017_11_28_06_world.sql b/sql/updates/world/3.3.5/2017_11_28_06_world.sql new file mode 100644 index 00000000000..ac84289e082 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_28_06_world.sql @@ -0,0 +1,17 @@ +-- Kravel Koalbeard +DELETE FROM `creature_text` WHERE `CreatureID`=4452 AND `GroupID` IN (0,1,2); +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(4452,0,0,"%s places the crate of parts on the ground.",16,0,100,0,0,0,1479,0,"Kravel Koalbeard"), +(4452,1,0,"%s grabs a part and puts it in his pocket...",16,0,100,0,0,0,1477,0,"Kravel Koalbeard"), +(4452,2,0,"There, that should do it...",12,0,100,0,0,0,1478,0,"Kravel Koalbeard"); + +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=4452; +DELETE FROM `smart_scripts` WHERE `entryorguid`=4452 AND `source_type`=0 AND `id`=0; +DELETE FROM `smart_scripts` WHERE `entryorguid`=445200 AND `source_type`=9; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4452,0,0,0,20,0,100,0,1112,0,0,0,80,445200,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kravel Koalbeard - On Quest 'Parts for Kravel' Finished - Run Script"), +(445200,9,0,0,0,0,100,0,0,0,0,0,83,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kravel Koalbeard - On Script - Remove Npc Flag Questgiver"), +(445200,9,1,0,0,0,100,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kravel Koalbeard - On Script - Say Line 0"), +(445200,9,2,0,0,0,100,0,4000,4000,0,0,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kravel Koalbeard - On Script - Say Line 1"), +(445200,9,3,0,0,0,100,0,2000,2000,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kravel Koalbeard - On Script - Say Line 2"), +(445200,9,4,0,0,0,100,0,2000,2000,0,0,82,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kravel Koalbeard - On Script - Add Npc Flag Questgiver"); diff --git a/sql/updates/world/3.3.5/2017_11_28_07_world.sql b/sql/updates/world/3.3.5/2017_11_28_07_world.sql new file mode 100644 index 00000000000..721eb0c9f8b --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_28_07_world.sql @@ -0,0 +1,35 @@ +-- Martek the Exiled +DELETE FROM `creature_text` WHERE `CreatureID`=4618; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(4618,0,0,"%s takes the indurium flakes to his forge.",16,0,100,0,0,0,1500,0,"Martek the Exiled"), +(4618,1,0,"Now let's heat up these flakes...",12,0,100,0,0,0,1501,0,"Martek the Exiled"), +(4618,2,0,"So far they're holding. Let's turn up the heat...",12,0,100,1,0,0,1502,0,"Martek the Exiled"), +(4618,3,0,"By Orgrim! This indurium can withstand massive heat!",12,0,100,5,0,0,1503,0,"Martek the Exiled"), +(4618,4,0,"A successful test! $n, this indurium is some amazing stuff.",12,0,100,1,0,0,1504,0,"Martek the Exiled"); + +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=4618; +DELETE FROM `smart_scripts` WHERE `entryorguid`=4618 AND `source_type`=0; +DELETE FROM `smart_scripts` WHERE `entryorguid` IN (461800,461801) AND `source_type`=9; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4618,0,0,0,20,0,100,0,1108,0,0,0,80,461800,0,0,0,0,0,1,0,0,0,0,0,0,0,"Martek the Exiled - On Quest 'Indurium' Finished - Run Script"), +(4618,0,1,0,40,0,100,0,1,4618,0,0,80,461801,0,0,0,0,0,1,0,0,0,0,0,0,0,"Martek the Exiled - On Waypoint 1 Reached - Run Script"), +(4618,0,2,0,40,0,100,0,2,4618,0,0,66,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Martek the Exiled - On Waypoint 2 Reached - Set Orientation"), +(461800,9,0,0,0,0,100,0,0,0,0,0,83,3,0,0,0,0,0,1,0,0,0,0,0,0,0,"Martek the Exiled - On Script - Remove Npc Flag Questgiver+Gossip"), +(461800,9,1,0,0,0,100,0,1000,1000,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Martek the Exiled - On Script - Say Line 0"), +(461800,9,2,0,0,0,100,0,0,0,0,0,53,0,4618,0,0,0,0,1,0,0,0,0,0,0,0,"Martek the Exiled - On Script - Start Waypoint"), +(461801,9,0,0,0,0,100,0,0,0,0,0,54,19000,0,0,0,0,0,1,0,0,0,0,0,0,0,"Martek the Exiled - On Script - Pause Waypoint"), +(461801,9,1,0,0,0,100,0,0,0,0,0,66,0,0,0,0,0,0,8,0,0,0,0,0,0,1.24047,"Martek the Exiled - On Script - Set Orientation"), +(461801,9,2,0,0,0,100,0,1000,1000,0,0,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Martek the Exiled - On Script - Say Line 1"), +(461801,9,3,0,0,0,100,0,0,0,0,0,17,69,0,0,0,0,0,1,0,0,0,0,0,0,0,"Martek the Exiled - On Script - Set Emote State 69"), +(461801,9,4,0,0,0,100,0,3000,3000,0,0,17,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Martek the Exiled - On Script - Set Emote State 0"), +(461801,9,5,0,0,0,100,0,1000,1000,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Martek the Exiled - On Script - Say Line 2"), +(461801,9,6,0,0,0,100,0,3000,3000,0,0,17,69,0,0,0,0,0,1,0,0,0,0,0,0,0,"Martek the Exiled - On Script - Set Emote State 69"), +(461801,9,7,0,0,0,100,0,3000,3000,0,0,17,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Martek the Exiled - On Script - Set Emote State 0"), +(461801,9,8,0,0,0,100,0,1000,1000,0,0,1,3,0,0,0,0,0,1,0,0,0,0,0,0,0,"Martek the Exiled - On Script - Say Line 3"), +(461801,9,9,0,0,0,100,0,5000,5000,0,0,1,4,0,0,0,0,0,1,0,0,0,0,0,0,0,"Martek the Exiled - On Script - Say Line 4"), +(461801,9,10,0,0,0,100,0,5000,5000,0,0,82,3,0,0,0,0,0,1,0,0,0,0,0,0,0,"Martek the Exiled - On Script - Add Npc Flag Questgiver+Gossip"); + +DELETE FROM `waypoints` WHERE `entry`=4618; +INSERT INTO `waypoints` (`entry`, `pointid`, `position_x`, `position_y`, `position_z`, `point_comment`) VALUES +(4618,1,-6764.09,-3126.58,241.756,""), +(4618,2,-6763.39,-3129.3,241.384,""); diff --git a/sql/updates/world/3.3.5/2017_11_28_08_world.sql b/sql/updates/world/3.3.5/2017_11_28_08_world.sql new file mode 100644 index 00000000000..ef6cf8dba8e --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_28_08_world.sql @@ -0,0 +1,15 @@ +-- Fizzle Brassbolts +DELETE FROM `creature_text` WHERE `CreatureID`=4454; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(4454,0,0,"%s listens as $n tells him of $ghis:her; adventures in the Badlands.",16,0,100,0,0,0,1495,0,"Fizzle Brassbolts"), +(4454,1,0,"Astounding! So then indurium is the key!",12,0,100,4,0,0,1496,0,"Fizzle Brassbolts"); + +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=4454; +DELETE FROM `smart_scripts` WHERE `entryorguid`=4454 AND `source_type`=0; +DELETE FROM `smart_scripts` WHERE `entryorguid`=445400 AND `source_type`=9; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4454,0,0,0,20,0,100,0,1137,0,0,0,80,445400,0,0,0,0,0,1,0,0,0,0,0,0,0,"Fizzle Brassbolts - On Quest 'News for Fizzle' Finished - Run Script"), +(445400,9,0,0,0,0,100,0,0,0,0,0,83,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Fizzle Brassbolts - On Script - Remove Npc Flag Questgiver"), +(445400,9,1,0,0,0,100,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Fizzle Brassbolts - On Script - Say Line 0"), +(445400,9,2,0,0,0,100,0,3000,3000,0,0,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Fizzle Brassbolts - On Script - Say Line 1"), +(445400,9,3,0,0,0,100,0,2000,2000,0,0,82,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Fizzle Brassbolts - On Script - Add Npc Flag Questgiver"); diff --git a/sql/updates/world/3.3.5/2017_11_28_09_world.sql b/sql/updates/world/3.3.5/2017_11_28_09_world.sql new file mode 100644 index 00000000000..4b751b3c19c --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_28_09_world.sql @@ -0,0 +1,20 @@ +DELETE FROM `creature_text` WHERE `CreatureID`=773; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(773,0,0,"%s snorts a speck of dream dust up his nose...",16,0,100,0,0,0,1548,0,"Krazek"), +(773,1,0,"%s is dazed...",16,0,100,0,0,0,1549,0,"Krazek"), +(773,2,0,"ACHOO!",12,0,100,33,0,0,1550,0,"Krazek"), +(773,3,0,"Wow! $n, that dream dust is powerful stuff!",12,0,100,5,0,0,1551,0,"Krazek"); + +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=773; +DELETE FROM `smart_scripts` WHERE `entryorguid`=773 AND `source_type`=0; +DELETE FROM `smart_scripts` WHERE `entryorguid`=77300 AND `source_type`=9; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(773,0,0,0,20,0,100,0,1116,0,0,0,80,77300,0,0,0,0,0,1,0,0,0,0,0,0,0,"Krazek - On Quest 'Dream Dust in the Swamp' Finished - Run Script"), +(77300,9,0,0,0,0,100,0,0,0,0,0,83,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Krazek - On Script - Remove Npc Flag Questgiver"), +(77300,9,1,0,0,0,100,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Krazek - On Script - Say Line 0"), +(77300,9,2,0,0,0,100,0,2000,2000,0,0,11,6903,0,0,0,0,0,1,0,0,0,0,0,0,0,"Krazek - On Script - Cast Krazek's Drug"), +(77300,9,3,0,0,0,100,0,0,0,0,0,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Krazek - On Script - Say Line 1"), +(77300,9,4,0,0,0,100,0,11000,11000,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Krazek - On Script - Say Line 2"), +(77300,9,5,0,0,0,100,0,2000,2000,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Krazek - On Script - Say Line 2"), +(77300,9,6,0,0,0,100,0,3000,3000,0,0,1,3,0,0,0,0,0,7,0,0,0,0,0,0,0,"Krazek - On Script - Say Line 3"), +(77300,9,7,0,0,0,100,0,2000,2000,0,0,82,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Krazek - On Script - Add Npc Flag Questgiver"); diff --git a/sql/updates/world/3.3.5/2017_11_28_10_world.sql b/sql/updates/world/3.3.5/2017_11_28_10_world.sql new file mode 100644 index 00000000000..64708e7a29e --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_28_10_world.sql @@ -0,0 +1,16 @@ +-- Kravel Koalbeard +DELETE FROM `creature_text` WHERE `CreatureID`=4452 AND `GroupID` IN (3,4,5); +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(4452,3,0,"This stout sure smells strong!",12,0,100,0,0,0,1489,0,"Kravel Koalbeard"), +(4452,4,0,"Let's have just a small taste...",12,0,100,7,0,0,1491,0,"Kravel Koalbeard"), +(4452,5,0,"Wait! What am I thinking! We have a job to do with this stuff.",12,0,100,5,0,0,1492,0,"Kravel Koalbeard"); + +DELETE FROM `smart_scripts` WHERE `entryorguid`=4452 AND `source_type`=0 AND `id`=1; +DELETE FROM `smart_scripts` WHERE `entryorguid`=445201 AND `source_type`=9; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4452,0,1,0,20,0,100,0,1119,0,0,0,80,445201,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kravel Koalbeard - On Quest 'Zanzil's Mixture and a Fool's Stout' Finished - Run Script"), +(445201,9,0,0,0,0,100,0,0,0,0,0,83,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kravel Koalbeard - On Script - Remove Npc Flag Questgiver"), +(445201,9,1,0,0,0,100,0,1000,1000,0,0,1,3,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kravel Koalbeard - On Script - Say Line 3"), +(445201,9,2,0,0,0,100,0,4000,4000,0,0,1,4,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kravel Koalbeard - On Script - Say Line 4"), +(445201,9,3,0,0,0,100,0,3000,3000,0,0,1,5,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kravel Koalbeard - On Script - Say Line 5"), +(445201,9,4,0,0,0,100,0,2000,2000,0,0,82,2,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kravel Koalbeard - On Script - Add Npc Flag Questgiver"); diff --git a/sql/updates/world/3.3.5/2017_11_28_11_world_335.sql b/sql/updates/world/3.3.5/2017_11_28_11_world_335.sql new file mode 100644 index 00000000000..e727fa42e01 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_28_11_world_335.sql @@ -0,0 +1,11 @@ +DELETE FROM `quest_greeting` WHERE `ID` IN (10537,4046,4452,2216,2706,5412,5641,4498,2817); +INSERT INTO `quest_greeting` (`ID`, `Type`, `GreetEmoteType`, `GreetEmoteDelay`, `Greeting`, `VerifiedBuild`) VALUES +(10537,0,0,0,"We cannot take care of all the threats in this area alone. We could use another fighting hand, $n.",0), -- Cliffwatcher Longhorn +(2216,0,0,0,"We are but so close to developing the New Plague that our Dark Lady desires with such fervor.",0), -- Apothecary Lydon +(4452,0,0,0,"Come a little closer. We have important matters to discuss, you and I.$B$BAnd some of them we don't want everyone to hear...",0), -- Kravel Koalbeard +(4046,0,1,0,"You must listen, young $c. Listen to the whisperings in the darkness, for they offer guidance in these troubled times.",0), -- Magatha Grimtotem +(2706,0,0,0,"Thanks to the Warchief, even here in the ruins of our former prison some hope remains, and the Horde rises anew.",0), -- Tor'gan +(5412,0,0,0,"The centaur clans rule the wastes of Desolace. If united, they would be a terrible force. It is then good that the centaur clans are not united but instead bicker and war amongst themselves.",0), -- Gurda Wildmane +(5641,0,1,0,"The main threat Thrall wishes dealt with is the Burning Blade---members of the Horde that have given their loyalty to the demons. They seek to practice their dark magic and care little for Thrall's visions of the Horde's future here in Kalimdor.",0), -- Takata Steelblade +(4498,0,0,0,"Greetings, $c.",0), -- Maurin Bonesplitter +(2817,0,0,0,"You must be hard up to be wandering this Badlands, $c. A hard up like me.$B$BOr maybe you're here because you're crazy. Crazy, like me.",0); -- Rigglefuzz diff --git a/sql/updates/world/3.3.5/2017_11_28_12_world.sql b/sql/updates/world/3.3.5/2017_11_28_12_world.sql new file mode 100644 index 00000000000..0c667dfc5bd --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_28_12_world.sql @@ -0,0 +1,5 @@ +-- +DELETE FROM `creature_text` WHERE `CreatureID`=35476 AND `GroupID`=0 AND `ID` IN (2,3); +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(35476,0,2,'It won''t be long before Fizzlebang is a household word, used to scare children into bed! "Beware, or Fizzlebang will summon a mighty demon to eat you!"',12,0,100,1,0,0,35817,0,"Wilfred Fizzlebang"), +(35476,0,3,'I, Wilfred Fizzlebang, have been invited to this quaint tournament to make use of my extensive knowledge of the summoning arts. What better choice?',12,0,100,1,0,0,35815,0,"Wilfred Fizzlebang"); diff --git a/sql/updates/world/3.3.5/2017_11_28_13_world.sql b/sql/updates/world/3.3.5/2017_11_28_13_world.sql new file mode 100644 index 00000000000..fcff9e52d6d --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_28_13_world.sql @@ -0,0 +1,11 @@ +-- Gnome Pit Boss & Goblin Pit Boss +DELETE FROM `creature_text` WHERE `CreatureID` IN (4495,4496); +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(4495,0,0,"Look at what $n brought us! Let's drink up!",12,0,100,22,0,0,8242,0,"Gnome Pit Boss"), +(4496,0,0,"$n brought us booze! Let's party!",12,0,100,22,0,0,8244,0,"Goblin Pit Boss"); + +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry` IN (4495,4496); +DELETE FROM `smart_scripts` WHERE `entryorguid` IN (4495,4496) AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4495,0,0,0,20,0,100,0,1120,0,0,0,1,0,0,0,0,0,0,7,0,0,0,0,0,0,0,"Kravel Koalbeard - On Quest 'Get the Gnomes Drunk' Finished - Say Line 0"), +(4496,0,0,0,20,0,100,0,1121,0,0,0,1,0,0,0,0,0,0,7,0,0,0,0,0,0,0,"Kravel Koalbeard - On Quest 'Get the Goblins Drunk' Finished - Say Line 0"); diff --git a/sql/updates/world/3.3.5/2017_11_28_14_world_335.sql b/sql/updates/world/3.3.5/2017_11_28_14_world_335.sql new file mode 100644 index 00000000000..a4825888552 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_28_14_world_335.sql @@ -0,0 +1,225 @@ +-- Forsaken Raider +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=17108; +DELETE FROM `smart_scripts` WHERE `entryorguid`=17108 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(17108,0,0,0,0,0,100,0,3000,7000,11000,15000,11,7992,32,0,0,0,0,2,0,0,0,0,0,0,0,"Forsaken Raider - In Combat - Cast Slowing Poison"), +(17108,0,1,0,25,0,100,0,0,0,0,0,11,6718,0,0,0,0,0,1,0,0,0,0,0,0,0,"Forsaken Raider - On Reset - Cast Phasing Stealth"); + +-- Forest Moss Creeper +DELETE FROM `smart_scripts` WHERE `entryorguid`=2350 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2350,0,0,0,0,0,100,0,3000,6000,31000,36000,11,3396,0,0,0,0,0,2,0,0,0,0,0,0,0,"Forest Moss Creeper - In Combat - Cast Corrosive Poison"); + +-- Giant Moss Creeper +DELETE FROM `smart_scripts` WHERE `entryorguid`=2349 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2349,0,0,0,0,0,100,0,3000,6000,31000,36000,11,3396,0,0,0,0,0,2,0,0,0,0,0,0,0,"Giant Moss Creeper - In Combat - Cast Corrosive Poison"); + +-- Mudsnout Shaman +DELETE FROM `smart_scripts` WHERE `entryorguid`=2373 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2373,0,1,0,14,0,100,0,400,40,14000,19000,11,11014,0,0,0,0,0,7,0,0,0,0,0,0,0,"Mudsnout Shaman - Friendly At 400 Health - Cast Healing Wave"); + +-- Mudsnout Gnoll +DELETE FROM `smart_scripts` WHERE `entryorguid`=2372 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2372,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Mudsnout Gnoll - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Hillsbrad Councilman +UPDATE `smart_scripts` SET `event_param2`=8, `comment`="Hillsbrad Councilman - Within 0-8 Range - Cast 'Frost Nova'" WHERE `entryorguid`=2387 AND `source_type`=0 AND `id`=2; + +-- Magistrate Burnside +UPDATE `smart_scripts` SET `action_param2`=2 WHERE `entryorguid`=2335 AND `source_type`=0 AND `id` IN (1,2); + +-- Syndicate Shadow Mage +UPDATE `smart_scripts` SET `event_param3`=3400, `event_param4`=4800 WHERE `entryorguid`=2244 AND `source_type`=0 AND `id`=0; + +-- Syndicate Watchman +DELETE FROM `smart_scripts` WHERE `entryorguid`=2261 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2261,0,0,0,25,0,100,0,0,0,0,0,11,3582,0,0,0,0,0,1,0,0,0,0,0,0,0,"Syndicate Watchman - On Reset - Cast Torch Burst"), +(2261,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Syndicate Watchman - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Syndicate Rogue +DELETE FROM `smart_scripts` WHERE `entryorguid`=2260 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2260,0,0,0,67,0,100,0,6000,8000,0,0,11,37685,0,0,0,0,0,2,0,0,0,0,0,0,0,"Syndicate Rogue - On Behind Target - Cast Backstab"), +(2260,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Syndicate Rogue - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Dun Garok Priest +DELETE FROM `smart_scripts` WHERE `entryorguid`=2346 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2346,0,1,0,14,0,100,0,250,40,15000,21000,11,11642,0,0,0,0,0,7,0,0,0,0,0,0,0,"Dun Garok Priest - Friendly At 250 Health - Cast Heal"); + +-- Remove wrong texts from Jailor Marlgen +DELETE FROM `creature_text` WHERE `CreatureID`=2428; +DELETE FROM `smart_scripts` WHERE `entryorguid`=2428 AND `source_type`=0 AND `id`=0; + +-- Random movement for some creatures +UPDATE `creature` SET `spawndist`=5, `MovementType`=1 WHERE `guid` IN (15959,15954,15962,15956,16025,16027); +UPDATE `creature` SET `spawndist`=15, `MovementType`=1 WHERE `guid` IN (15839,15974,90785); + +-- Add emote for Hillsbrad Apprentice Blacksmith +UPDATE `creature_addon` SET `emote`=233 WHERE `guid`=15958; + +-- Fix bytes2 for Dun Garok Riflemen +UPDATE `creature_addon` SET `bytes2`=2 WHERE `guid` IN (15686,15858,15869,15874,16105,16113,16167,16179,16180,15699,16183); + +-- Pooling for Creepthess +SET @GUID := 40492; +DELETE FROM `creature` WHERE `guid` IN (@GUID, @GUID+1); +INSERT INTO `creature` (`guid`, `id`, `map`, `zoneId`, `areaId`, `spawnMask`, `phaseMask`, `modelid`, `equipment_id`, `position_x`, `position_y`, `position_z`, `orientation`, `spawntimesecs`, `spawndist`, `currentwaypoint`, `curhealth`, `curmana`, `MovementType`, `npcflag`, `unit_flags`, `dynamicflags`, `ScriptName`, `VerifiedBuild`) VALUES +(@GUID,14279,0,0,0,1,1,0,0,-747.954,278.299,48.0277,6.23466,72000,15,0,665,0,1,0,0,0,"",0), +(@GUID+1,14279,0,0,0,1,1,0,0,-729.424,-202.325,39.7752,3.78422,72000,15,0,665,0,1,0,0,0,"",0); + +DELETE FROM `pool_template` WHERE `entry`=1111; +INSERT INTO `pool_template` (`entry`, `max_limit`, `description`) VALUES +(1111,1,"Creepthess"); + +DELETE FROM `pool_creature` WHERE `pool_entry`=1111; +INSERT INTO `pool_creature` (`guid`, `pool_entry`, `chance`, `description`) VALUES +(@GUID,1111,0,"Creepthess (14279) - Spawn 1"), +(@GUID+1,1111,0,"Creepthess (14279) - Spawn 2"), +(90785,1111,0,"Creepthess (14279) - Spawn 3"); + +-- Pooling for Big Samras +SET @GUID := 40494; +DELETE FROM `creature` WHERE `guid` IN (@GUID, @GUID+1, @GUID+2); +INSERT INTO `creature` (`guid`, `id`, `map`, `zoneId`, `areaId`, `spawnMask`, `phaseMask`, `modelid`, `equipment_id`, `position_x`, `position_y`, `position_z`, `orientation`, `spawntimesecs`, `spawndist`, `currentwaypoint`, `curhealth`, `curmana`, `MovementType`, `npcflag`, `unit_flags`, `dynamicflags`, `ScriptName`, `VerifiedBuild`) VALUES +(@GUID,14280,0,0,0,1,1,0,0,-636.669,-1651.57,60.7494,0.490619,43200,15,0,665,0,1,0,0,0,"",0), +(@GUID+1,14280,0,0,0,1,1,0,0,-281.772,-1664.94,95.4076,3.78422,43200,15,0,665,0,1,0,0,0,"",0), +(@GUID+2,14280,0,0,0,1,1,0,0,-206.604,-1252.77,114.217,1.35927,43200,15,0,665,0,1,0,0,0,"",0); + +DELETE FROM `pool_template` WHERE `entry`=1112; +INSERT INTO `pool_template` (`entry`, `max_limit`, `description`) VALUES +(1112,1,"Big Samras"); + +DELETE FROM `pool_creature` WHERE `pool_entry`=1112; +INSERT INTO `pool_creature` (`guid`, `pool_entry`, `chance`, `description`) VALUES +(@GUID,1112,0,"Big Samras (14280) - Spawn 1"), +(@GUID+1,1112,0,"Big Samras (14280) - Spawn 2"), +(@GUID+2,1112,0,"Big Samras (14280) - Spawn 3"), +(15839,1112,0,"Big Samras (14280) - Spawn 4"); + +-- Pathing for Lady Zephris +UPDATE `creature` SET `position_x`=-1276.08, `position_y`=-976, `position_z`=-0.561754, `spawndist`=0, `MovementType`=2 WHERE `guid`=16047; +DELETE FROM `creature_addon` WHERE `guid`=16047; +INSERT INTO `creature_addon` (`guid`, `path_id`, `mount`, `bytes1`, `bytes2`, `emote`, `auras`) VALUES +(16047,160470,0,0,1,0,""); + +DELETE FROM `waypoint_data` WHERE `id`=160470; +INSERT INTO `waypoint_data` (`id`, `point`, `position_x`, `position_y`, `position_z`, `orientation`, `delay`, `move_type`, `action`, `action_chance`, `wpguid`) VALUES +(160470,1,-1276.08,-976,-0.561754,0,0,0,0,100,0), +(160470,2,-1258.03,-950.293,1.1057,0,0,0,0,100,0), +(160470,3,-1245.88,-925.964,0.299947,0,0,0,0,100,0), +(160470,4,-1241.09,-902.986,0.176827,0,0,0,0,100,0), +(160470,5,-1216.54,-879.045,-0.012203,0,0,0,0,100,0), +(160470,6,-1204.1,-862.604,-0.022923,0,0,0,0,100,0), +(160470,7,-1177.1,-836.439,0.405907,0,0,0,0,100,0), +(160470,8,-1155.64,-807.945,-0.769681,0,0,0,0,100,0), +(160470,9,-1134.61,-785.771,-0.684184,0,0,0,0,100,0), +(160470,10,-1111.14,-792.039,-0.047394,0,0,0,0,100,0), +(160470,11,-1120.56,-803.641,2.45509,0,0,0,0,100,0), +(160470,12,-1149.99,-814.86,1.4159,0,0,0,0,100,0), +(160470,13,-1170.68,-831.741,0.905636,0,0,0,0,100,0), +(160470,14,-1186.74,-842.143,-0.424433,0,0,0,0,100,0), +(160470,15,-1205.23,-862.842,-0.181762,0,0,0,0,100,0), +(160470,16,-1221.71,-885.192,-0.091555,0,0,0,0,100,0), +(160470,17,-1244.15,-916.463,0.404305,0,0,0,0,100,0), +(160470,18,-1261.29,-949.49,0.631858,0,0,0,0,100,0), +(160470,19,-1277.74,-975.288,-0.7961,0,0,0,0,100,0), +(160470,20,-1290.77,-995.08,-0.895832,0,0,0,0,100,0), +(160470,21,-1313.06,-1004.87,-1.52388,0,0,0,0,100,0), +(160470,22,-1344.6,-1026.51,0.392506,0,0,0,0,100,0), +(160470,23,-1366.78,-1036.92,2.69835,0,0,0,0,100,0), +(160470,24,-1392.12,-1048.81,3.73084,0,0,0,0,100,0), +(160470,25,-1414.89,-1059.5,4.62124,0,0,0,0,100,0), +(160470,26,-1433.06,-1083.33,8.08795,0,0,0,0,100,0), +(160470,27,-1441.77,-1091.69,9.71514,0,0,0,0,100,0), +(160470,28,-1457.46,-1095.82,3.53264,0,0,0,0,100,0), +(160470,29,-1468.13,-1097.07,-1.56115,0,0,0,0,100,0), +(160470,30,-1472.68,-1080.05,-1.56115,0,0,0,0,100,0), +(160470,31,-1458.45,-1062.96,-1.56115,0,0,0,0,100,0), +(160470,32,-1451.28,-1056.44,-1.56115,0,0,0,0,100,0), +(160470,33,-1432.58,-1051.24,-0.703612,0,0,0,0,100,0), +(160470,34,-1409.8,-1040.14,-0.755762,0,0,0,0,100,0), +(160470,35,-1385.61,-1038.06,0.275001,0,0,0,0,100,0), +(160470,36,-1357.3,-1024.25,-0.809856,0,0,0,0,100,0), +(160470,37,-1326.17,-1014.01,-1.29519,0,0,0,0,100,0), +(160470,38,-1303.49,-997.97,-1.23017,0,0,0,0,100,0); + +-- Pathing for Scargil +UPDATE `creature` SET `position_x`=-1172.77, `position_y`=123.663, `position_z`=0.157823, `spawndist`=0, `MovementType`=2 WHERE `guid`=15970; +DELETE FROM `creature_addon` WHERE `guid`=15970; +INSERT INTO `creature_addon` (`guid`, `path_id`, `mount`, `bytes1`, `bytes2`, `emote`, `auras`) VALUES +(15970,159700,0,0,1,0,""); + +DELETE FROM `waypoint_data` WHERE `id`=159700; +INSERT INTO `waypoint_data` (`id`, `point`, `position_x`, `position_y`, `position_z`, `orientation`, `delay`, `move_type`, `action`, `action_chance`, `wpguid`) VALUES +(159700,1,-1172.77,123.663,0.157823,0,0,0,0,100,0), +(159700,2,-1172.87,118.873,0.322706,0,0,0,0,100,0), +(159700,3,-1164.9,111.492,3.19687,0,0,0,0,100,0), +(159700,4,-1159.58,90.6803,1.9422,0,0,0,0,100,0), +(159700,5,-1152.25,56.6911,0.424014,0,0,0,0,100,0), +(159700,6,-1151.73,33.0373,-0.172392,0,0,0,0,100,0), +(159700,7,-1151.24,14.382,-0.125738,0,0,0,0,100,0), +(159700,8,-1144.4,0.226702,-0.029557,0,0,0,0,100,0), +(159700,9,-1133.86,-10.8596,-0.189161,0,0,0,0,100,0), +(159700,10,-1104.63,-13.0749,0.554654,0,0,0,0,100,0), +(159700,11,-1081.96,-16.5233,1.19604,0,0,0,0,100,0), +(159700,12,-1073.98,-31.1868,0.77585,0,0,0,0,100,0), +(159700,13,-1089.35,-31.3077,-1.07762,0,0,0,0,100,0), +(159700,14,-1115.1,-22.2406,-1.09916,0,0,0,0,100,0), +(159700,15,-1138.02,-7.81281,-0.156224,0,0,0,0,100,0), +(159700,16,-1150.51,10.8034,-0.153825,0,0,0,0,100,0), +(159700,17,-1154.11,26.389,-0.769621,0,0,0,0,100,0), +(159700,18,-1154.74,64.912,0.486252,0,0,0,0,100,0), +(159700,19,-1150.34,95.8842,3.87744,0,0,0,0,100,0), +(159700,20,-1143.77,109.315,7.45656,0,0,0,0,100,0), +(159700,21,-1146.89,127.139,5.36332,0,0,0,0,100,0), +(159700,22,-1163.18,154.262,0.071613,0,0,0,0,100,0), +(159700,23,-1151.92,184.312,0.078821,0,0,0,0,100,0), +(159700,24,-1138.85,210.151,0.64674,0,0,0,0,100,0), +(159700,25,-1120.27,230.815,2.71415,0,0,0,0,100,0), +(159700,26,-1103.25,257.063,3.6444,0,0,0,0,100,0), +(159700,27,-1088.66,273.3,2.29494,0,0,0,0,100,0), +(159700,28,-1067.18,270.992,2.63842,0,0,0,0,100,0), +(159700,29,-1033.01,278.171,0.676006,0,0,0,0,100,0), +(159700,30,-1007.7,280.798,1.6107,0,0,0,0,100,0), +(159700,31,-989.369,301.059,0.846009,0,0,0,0,100,0), +(159700,32,-981.966,323.582,-1.07191,0,0,0,0,100,0), +(159700,33,-975.038,334.123,-1.3281,0,0,0,0,100,0), +(159700,34,-963.251,334.046,2.5684,0,0,0,0,100,0), +(159700,35,-960.338,322.978,6.63507,0,0,0,0,100,0), +(159700,36,-967.984,309.869,5.90922,0,0,0,0,100,0), +(159700,37,-992.889,300.466,0.063592,0,0,0,0,100,0), +(159700,38,-1009.43,291.063,-1.3482,0,0,0,0,100,0), +(159700,39,-1032.87,284.953,-1.52593,0,0,0,0,100,0), +(159700,40,-1049.37,280.662,-0.629822,0,0,0,0,100,0), +(159700,41,-1078.16,282.367,-0.724659,0,0,0,0,100,0), +(159700,42,-1095.26,287.094,-1.10337,0,0,0,0,100,0), +(159700,43,-1105.95,283.475,-0.136028,0,0,0,0,100,0), +(159700,44,-1117.02,277.427,-0.169369,0,0,0,0,100,0), +(159700,45,-1121.42,258.795,0.527346,0,0,0,0,100,0), +(159700,46,-1130.18,237.408,-0.735995,0,0,0,0,100,0), +(159700,47,-1142.05,208.455,-0.004859,0,0,0,0,100,0), +(159700,48,-1155.78,174.963,-0.17143,0,0,0,0,100,0), +(159700,49,-1163.85,152.053,0.127761,0,0,0,0,100,0), +(159700,50,-1169.17,136.967,0.156267,0,0,0,0,100,0); + +-- Pathing for Tamra Stormpike +UPDATE `creature` SET `position_x`=-1316.82, `position_y`=-1212.59, `position_z`=49.9032, `spawndist`=0, `MovementType`=2 WHERE `guid`=90783; +DELETE FROM `creature_addon` WHERE `guid`=90783; +INSERT INTO `creature_addon` (`guid`, `path_id`, `mount`, `bytes1`, `bytes2`, `emote`, `auras`) VALUES +(90783,907830,0,0,1,0,""); + +DELETE FROM `waypoint_data` WHERE `id`=907830; +INSERT INTO `waypoint_data` (`id`, `point`, `position_x`, `position_y`, `position_z`, `orientation`, `delay`, `move_type`, `action`, `action_chance`, `wpguid`) VALUES +(907830,1,-1316.82,-1212.59,49.9032,0,0,0,0,100,0), +(907830,2,-1353.08,-1242.71,49.9048,0,0,0,0,100,0), +(907830,3,-1357.06,-1248.49,49.9033,0,0,0,0,100,0), +(907830,4,-1329.89,-1278.96,49.9017,0,0,0,0,100,0), +(907830,5,-1289.34,-1243.49,49.9032,0,0,0,0,100,0), +(907830,6,-1331.07,-1278.83,49.9017,0,0,0,0,100,0), +(907830,7,-1357.24,-1246.72,49.9025,0,0,0,0,100,0), +(907830,8,-1334.4,-1227.01,49.9047,0,0,0,0,100,0); diff --git a/sql/updates/world/3.3.5/2017_11_28_15_world_335.sql b/sql/updates/world/3.3.5/2017_11_28_15_world_335.sql new file mode 100644 index 00000000000..21be378429c --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_28_15_world_335.sql @@ -0,0 +1,230 @@ +-- Demon Spirit +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=11876; +DELETE FROM `smart_scripts` WHERE `entryorguid`=11876 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(11876,0,0,0,11,0,100,0,0,0,0,0,11,7741,0,0,0,0,0,1,0,0,0,0,0,0,0,"Demon Spirit - On Respawn - Cast Summoned Demon"), +(11876,0,1,0,11,0,100,0,0,0,0,0,49,0,0,0,0,0,0,21,20,0,0,0,0,0,0,"Demon Spirit - On Respawn - Start Attacking"); + +-- Khan Dez'hepah +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5600; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5600 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5600,0,0,0,4,0,100,0,0,0,0,0,11,7165,0,0,0,0,0,1,0,0,0,0,0,0,0,"Khan Dez'hepah - On Aggro - Cast Battle Stance"), +(5600,0,1,0,0,0,100,0,5000,7000,6000,10000,11,25710,0,0,0,0,0,2,0,0,0,0,0,0,0,"Khan Dez'hepah - In Combat - Cast Heroic Strike"), +(5600,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Khan Dez'hepah - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Khan Hratha +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5402; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5402 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5402,0,0,0,0,0,100,0,1000,5000,12000,18000,11,9128,0,0,0,0,0,1,0,0,0,0,0,0,0,"Khan Hratha - In Combat - Cast Battle Shout"), +(5402,0,1,0,0,0,100,0,5000,7000,7000,11000,11,15496,0,0,0,0,0,2,0,0,0,0,0,0,0,"Khan Hratha - In Combat - Cast Cleave"); + +-- Khan Shaka +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=5602; +DELETE FROM `smart_scripts` WHERE `entryorguid`=5602 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(5602,0,0,0,4,0,100,0,0,0,0,0,11,7165,0,0,0,0,0,1,0,0,0,0,0,0,0,"Khan Shaka - On Aggro - Cast Battle Stance"), +(5602,0,1,0,0,0,100,0,5000,7000,12000,15000,11,9080,0,0,0,0,0,2,0,0,0,0,0,0,0,"Khan Shaka - In Combat - Cast Hamstring"), +(5602,0,2,0,0,0,100,0,6000,9000,18000,21000,11,11977,0,0,0,0,0,2,0,0,0,0,0,0,0,"Khan Shaka - In Combat - Cast Rend"); + +-- Kolkar Centaur +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=4632; +DELETE FROM `smart_scripts` WHERE `entryorguid`=4632 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4632,0,0,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kolkar Centaur - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Kolkar Destroyer +DELETE FROM `smart_scripts` WHERE `entryorguid`=4637 AND `source_type`=0 AND `id`=2; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4637,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kolkar Destroyer - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Kolkar Mauler +DELETE FROM `smart_scripts` WHERE `entryorguid`=4634 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4634,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kolkar Mauler - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Kolkar Scout +DELETE FROM `smart_scripts` WHERE `entryorguid`=4633 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4633,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kolkar Scout - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Kolkar Windchaser +DELETE FROM `smart_scripts` WHERE `entryorguid`=4635 AND `source_type`=0 AND `id`=2; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4635,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Kolkar Windchaser - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Kolkar Battle Lord +DELETE FROM `smart_scripts` WHERE `entryorguid`=4636 AND `source_type`=0 AND `id`=3; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4636,0,3,0,2,0,100,1,0,20,0,0,39,30,1,0,0,0,0,1,0,0,0,0,0,0,0,"Kolkar Battle Lord - Between 0-20% Health - Call For Help (No Repeat)"); + +-- Magram Outrunner +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=4639; +DELETE FROM `smart_scripts` WHERE `entryorguid`=4639 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4639,0,0,0,2,0,100,1,0,20,0,0,39,30,1,0,0,0,0,1,0,0,0,0,0,0,0,"Magram Outrunner - Between 0-20% Health - Call For Help (No Repeat)"); + +-- Magram Wrangler +DELETE FROM `smart_scripts` WHERE `entryorguid`=4640 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4640,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Magram Wrangler - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Magram Scout +DELETE FROM `smart_scripts` WHERE `entryorguid`=4638 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4638,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Magram Scout - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Magram Mauler +DELETE FROM `smart_scripts` WHERE `entryorguid`=4645 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4645,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Magram Mauler - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Magram Stormer +DELETE FROM `smart_scripts` WHERE `entryorguid`=4642 AND `source_type`=0 AND `id`=3; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4642,0,3,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Magram Stormer - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Magram Windchaser +DELETE FROM `smart_scripts` WHERE `entryorguid`=4641 AND `source_type`=0 AND `id`=2; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4641,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Magram Windchaser - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Magram Marauder +DELETE FROM `smart_scripts` WHERE `entryorguid`=4644 AND `source_type`=0 AND `id`=2; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4644,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Magram Marauder - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Gelkis Mauler +DELETE FROM `smart_scripts` WHERE `entryorguid`=4652 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4652,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Gelkis Mauler - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Gelkis Earthcaller +DELETE FROM `smart_scripts` WHERE `entryorguid`=4651 AND `source_type`=0 AND `id`=2; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4651,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Gelkis Earthcaller - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Gelkis Scout +DELETE FROM `smart_scripts` WHERE `entryorguid`=4647 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4647,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Gelkis Scout - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Gelkis Stamper +DELETE FROM `smart_scripts` WHERE `entryorguid`=4648 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4648,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Gelkis Stamper - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Gelkis Windchaser +DELETE FROM `smart_scripts` WHERE `entryorguid`=4649 AND `source_type`=0 AND `id`=2; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4649,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Gelkis Windchaser - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Gelkis Marauder +DELETE FROM `smart_scripts` WHERE `entryorguid`=4653 AND `source_type`=0 AND `id`=2; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4653,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Gelkis Marauder - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Gelkis Outrunner +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=4646; +DELETE FROM `smart_scripts` WHERE `entryorguid`=4646 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4646,0,0,0,2,0,100,1,0,20,0,0,39,30,1,0,0,0,0,1,0,0,0,0,0,0,0,"Gelkis Outrunner - Between 0-20% Health - Call For Help (No Repeat)"); + +-- Slitherblade Naga +DELETE FROM `smart_scripts` WHERE `entryorguid`=4711 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4711,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Slitherblade Naga - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Slitherblade Sorceress +DELETE FROM `smart_scripts` WHERE `entryorguid`=4712 AND `source_type`=0 AND `id`=2; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4712,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Slitherblade Sorceress - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Slitherblade Myrmidon +DELETE FROM `smart_scripts` WHERE `entryorguid`=4714 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4714,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Slitherblade Myrmidon - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Slitherblade Razortail +DELETE FROM `smart_scripts` WHERE `entryorguid`=4715 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4715,0,1,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Slitherblade Razortail - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Slitherblade Oracle +DELETE FROM `smart_scripts` WHERE `entryorguid`=4718 AND `source_type`=0 AND `id`=2; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4718,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Slitherblade Oracle - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Slitherblade Sea Witch +DELETE FROM `smart_scripts` WHERE `entryorguid`=4719 AND `source_type`=0 AND `id`=2; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4719,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Slitherblade Sea Witch - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Add equipments to some creatures +DELETE FROM `creature_equip_template` WHERE `CreatureID` IN (4641,4645); +INSERT INTO `creature_equip_template` (`CreatureID`, `ID`, `ItemID1`, `ItemID2`, `ItemID3`, `VerifiedBuild`) VALUES +(4641,1,5303,0,0,0), +(4645,1,5281,0,0,0); + +-- Pathing for Centaur Pariah +UPDATE `creature` SET `position_x`=-2145.71, `position_y`=1966.42, `position_z`=84.4919, `spawndist`=0, `MovementType`=2 WHERE `guid`=29069; + +DELETE FROM `creature_addon` WHERE `guid`=29069; +INSERT INTO `creature_addon` (`guid`, `path_id`, `mount`, `bytes1`, `bytes2`, `emote`, `auras`) VALUES +(29069,290690,0,0,1,0,""); + +DELETE FROM `waypoint_data` WHERE `id`=290690; +INSERT INTO `waypoint_data` (`id`, `point`, `position_x`, `position_y`, `position_z`, `orientation`, `delay`, `move_type`, `action`, `action_chance`, `wpguid`) VALUES +(290690,1,-2145.71,1966.42,84.4919,0,0,0,0,100,0), +(290690,2,-2148.35,1973.16,84.0701,0,0,0,0,100,0), +(290690,3,-2156.93,1972.24,80.9217,0,0,0,0,100,0), +(290690,4,-2158.67,1959.92,78.2252,0,0,0,0,100,0), +(290690,5,-2161.1,1953.68,76.6696,0,0,0,0,100,0), +(290690,6,-2159.54,1948.01,74.4792,0,0,0,0,100,0), +(290690,7,-2166.39,1942.92,69.9358,0,0,0,0,100,0), +(290690,8,-2165.94,1938.13,66.2389,0,0,0,0,100,0), +(290690,9,-2167.71,1935.14,63.539,0,0,0,0,100,0), +(290690,10,-2173.82,1935.16,61.1357,0,0,0,0,100,0), +(290690,11,-2185.89,1953.4,61.0883,0,0,0,0,100,0), +(290690,12,-2181.04,1971.95,63.0648,0,0,0,0,100,0), +(290690,13,-2185.23,1996.62,64.0418,0,0,0,0,100,0), +(290690,14,-2184.85,2019.55,64.0418,0,0,0,0,100,0), +(290690,15,-2182.58,2034.38,64.3299,0,0,0,0,100,0), +(290690,16,-2187.89,2045.21,65.1992,0,0,0,0,100,0), +(290690,17,-2175.83,2065.84,63.6989,0,0,0,0,100,0), +(290690,18,-2165.2,2087.16,64.4523,0,0,0,0,100,0), +(290690,19,-2156.65,2104.11,61.6077,0,0,0,0,100,0), +(290690,20,-2151.67,2118.24,60.7861,0,0,0,0,100,0), +(290690,21,-2148.89,2129.81,63.6364,0,0,0,0,100,0), +(290690,22,-2144.39,2148.28,65.8843,0,0,0,0,100,0), +(290690,23,-2141.23,2166.66,66.9008,0,0,0,0,100,0), +(290690,24,-2139.35,2184.03,66.5107,0,0,0,0,100,0), +(290690,25,-2134.91,2204.03,65.2096,0,0,0,0,100,0), +(290690,26,-2131.41,2219.61,64.1073,0,0,0,0,100,0), +(290690,27,-2128.85,2237.09,64.7163,0,0,0,0,100,0), +(290690,28,-2128.27,2222.75,63.9388,0,0,0,0,100,0), +(290690,29,-2133.74,2204.02,65.2261,0,0,0,0,100,0), +(290690,30,-2137.84,2186.67,66.0619,0,0,0,0,100,0), +(290690,31,-2141.6,2170.8,67.1661,0,0,0,0,100,0), +(290690,32,-2147.93,2144.09,66.0683,0,0,0,0,100,0), +(290690,33,-2152.54,2124.61,62.9209,0,0,0,0,100,0), +(290690,34,-2154.5,2116.3,61.1993,0,0,0,0,100,0), +(290690,35,-2160.27,2091.94,63.4087,0,0,0,0,100,0), +(290690,36,-2171.46,2076.47,64.0607,0,0,0,0,100,0), +(290690,37,-2180,2055.74,63.8618,0,0,0,0,100,0), +(290690,38,-2187.17,2043.48,65.118,0,0,0,0,100,0), +(290690,39,-2182.03,2025.33,64.3237,0,0,0,0,100,0), +(290690,40,-2185.91,2006.41,64.0427,0,0,0,0,100,0), +(290690,41,-2184.03,1990.34,64.0427,0,0,0,0,100,0), +(290690,42,-2182.62,1974.82,63.3536,0,0,0,0,100,0), +(290690,43,-2185.55,1962.21,62.0913,0,0,0,0,100,0), +(290690,44,-2182.01,1945.01,60.5147,0,0,0,0,100,0), +(290690,45,-2174.77,1934.49,61.1065,0,0,0,0,100,0), +(290690,46,-2165.66,1935.02,64.5662,0,0,0,0,100,0), +(290690,47,-2166.1,1945.12,71.6517,0,0,0,0,100,0), +(290690,48,-2159.56,1948.89,74.686,0,0,0,0,100,0), +(290690,49,-2159.53,1958.33,77.9476,0,0,0,0,100,0), +(290690,50,-2156.65,1975.9,81.5158,0,0,0,0,100,0), +(290690,51,-2145.71,1971.54,84.3149,0,0,0,0,100,0), +(290690,52,-2143.66,1964.77,84.0694,0,0,0,0,100,0); diff --git a/sql/updates/world/3.3.5/2017_11_28_17_world_335.sql b/sql/updates/world/3.3.5/2017_11_28_17_world_335.sql new file mode 100644 index 00000000000..32cf1046279 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_28_17_world_335.sql @@ -0,0 +1,142 @@ +-- Molok the Crusher +SET @GUID := 43465; +DELETE FROM `creature` WHERE `guid`=@GUID; +INSERT INTO `creature` (`guid`, `id`, `map`, `zoneId`, `areaId`, `spawnMask`, `phaseMask`, `modelid`, `equipment_id`, `position_x`, `position_y`, `position_z`, `orientation`, `spawntimesecs`, `spawndist`, `currentwaypoint`, `curhealth`, `curmana`, `MovementType`, `npcflag`, `unit_flags`, `dynamicflags`, `ScriptName`, `VerifiedBuild`) VALUES +(@GUID,2604,0,0,0,1,1,0,1,-2056.97,-2785.61,68.571,5.47225,54000,0,0,1678,0,0,0,0,0,"",0); + +-- Foulbelly +SET @GUID := 84200; +DELETE FROM `creature` WHERE `guid`=@GUID; +INSERT INTO `creature` (`guid`, `id`, `map`, `zoneId`, `areaId`, `spawnMask`, `phaseMask`, `modelid`, `equipment_id`, `position_x`, `position_y`, `position_z`, `orientation`, `spawntimesecs`, `spawndist`, `currentwaypoint`, `curhealth`, `curmana`, `MovementType`, `npcflag`, `unit_flags`, `dynamicflags`, `ScriptName`, `VerifiedBuild`) VALUES +(@GUID,2601,0,0,0,1,1,0,1,-1777.47,-1568.15,53.0089,3.82291,172800,0,0,6605,1381,0,0,0,0,"",0); + +-- Ruul Onestone +SET @GUID := 84227; +DELETE FROM `creature` WHERE `guid`=@GUID; +INSERT INTO `creature` (`guid`, `id`, `map`, `zoneId`, `areaId`, `spawnMask`, `phaseMask`, `modelid`, `equipment_id`, `position_x`, `position_y`, `position_z`, `orientation`, `spawntimesecs`, `spawndist`, `currentwaypoint`, `curhealth`, `curmana`, `MovementType`, `npcflag`, `unit_flags`, `dynamicflags`, `ScriptName`, `VerifiedBuild`) VALUES +(@GUID,2602,0,0,0,1,1,0,1,-1797.35,-1507.83,99.3938,6.12074,180000,0,0,3540,3708,0,0,0,0,"",0); + +UPDATE `creature_template` SET `ManaModifier`=3 WHERE `entry`=2602; + +-- Darbel Montrose +SET @GUID := 84258; +DELETE FROM `creature` WHERE `guid` BETWEEN @GUID AND @GUID+3; +INSERT INTO `creature` (`guid`, `id`, `map`, `zoneId`, `areaId`, `spawnMask`, `phaseMask`, `modelid`, `equipment_id`, `position_x`, `position_y`, `position_z`, `orientation`, `spawntimesecs`, `spawndist`, `currentwaypoint`, `curhealth`, `curmana`, `MovementType`, `npcflag`, `unit_flags`, `dynamicflags`, `ScriptName`, `VerifiedBuild`) VALUES +(@GUID,2598,0,0,0,1,1,0,1,-1590.25,-1889.45,68.5119,1.52227,57600,0,0,2360,2472,0,0,0,0,"",0), +(@GUID+1,2598,0,0,0,1,1,0,1,-1631.82,-1875.26,81.3837,2.99489,57600,5,0,2360,2472,1,0,0,0,"",0), +(@GUID+2,2598,0,0,0,1,1,0,1,-1622.46,-1775.05,81.3642,3.78814,57600,0,0,2360,2472,0,0,0,0,"",0), +(@GUID+3,2598,0,0,0,1,1,0,1,-1683.51,-1853.98,86,3.05772,57600,0,0,2360,2472,0,0,0,0,"",0); + +DELETE FROM `pool_template` WHERE `entry`=1114; +INSERT INTO `pool_template` (`entry`, `max_limit`, `description`) VALUES +(1114,1,"Darbel Montrose"); + +DELETE FROM `pool_creature` WHERE `pool_entry`=1114; +INSERT INTO `pool_creature` (`guid`, `pool_entry`, `chance`, `description`) VALUES +(@GUID,1114,0,"Darbel Montrose (2598) - Spawn 1"), +(@GUID+1,1114,0,"Darbel Montrose (2598) - Spawn 2"), +(@GUID+2,1114,0,"Darbel Montrose (2598) - Spawn 3"), +(@GUID+3,1114,0,"Darbel Montrose (2598) - Spawn 4"); + +-- Kovork +SET @GUID := 84230; +DELETE FROM `creature` WHERE `guid` IN (@GUID, @GUID+1); +INSERT INTO `creature` (`guid`, `id`, `map`, `zoneId`, `areaId`, `spawnMask`, `phaseMask`, `modelid`, `equipment_id`, `position_x`, `position_y`, `position_z`, `orientation`, `spawntimesecs`, `spawndist`, `currentwaypoint`, `curhealth`, `curmana`, `MovementType`, `npcflag`, `unit_flags`, `dynamicflags`, `ScriptName`, `VerifiedBuild`) VALUES +(@GUID,2603,0,0,0,1,1,0,1,-1190.7,-2055.29,42.9287,3.88181,27000,0,0,1469,0,0,0,0,0,"",0), +(@GUID+1,2603,0,0,0,1,1,0,1,-1185.5,-1959.2,24.088,2.65659,27000,3,0,1469,0,1,0,0,0,"",0); + +DELETE FROM `pool_template` WHERE `entry`=1115; +INSERT INTO `pool_template` (`entry`, `max_limit`, `description`) VALUES +(1115,1,"Kovork"); + +DELETE FROM `pool_creature` WHERE `pool_entry`=1115; +INSERT INTO `pool_creature` (`guid`, `pool_entry`, `chance`, `description`) VALUES +(@GUID,1115,0,"Kovork (2603) - Spawn 1"), +(@GUID+1,1115,0,"Kovork (2603) - Spawn 2"); + +-- Zalas Witherbark +SET @GUID := 84293; +DELETE FROM `creature` WHERE `guid` BETWEEN @GUID AND @GUID+3; +INSERT INTO `creature` (`guid`, `id`, `map`, `zoneId`, `areaId`, `spawnMask`, `phaseMask`, `modelid`, `equipment_id`, `position_x`, `position_y`, `position_z`, `orientation`, `spawntimesecs`, `spawndist`, `currentwaypoint`, `curhealth`, `curmana`, `MovementType`, `npcflag`, `unit_flags`, `dynamicflags`, `ScriptName`, `VerifiedBuild`) VALUES +(@GUID,2605,0,0,0,1,1,0,0,-2019.19,-3306.55,54.216,5.47616,252000,0,0,1410,2566,0,0,0,0,"",0), +(@GUID+1,2605,0,0,0,1,1,0,0,-2057.33,-3274,49.8653,4.51998,252000,0,0,1410,2566,2,0,0,0,"",0), +(@GUID+2,2605,0,0,0,1,1,0,0,-2042.3,-3364.3,60.2045,0.712735,252000,0,0,1410,2566,0,0,0,0,"",0), +(@GUID+3,2605,0,0,0,1,1,0,0,-2072.1,-3297.35,67.0706,1.54131,252000,0,0,1410,2566,0,0,0,0,"",0); + +DELETE FROM `pool_template` WHERE `entry`=1116; +INSERT INTO `pool_template` (`entry`, `max_limit`, `description`) VALUES +(1116,1,"Zalas Witherbark"); + +DELETE FROM `pool_creature` WHERE `pool_entry`=1116; +INSERT INTO `pool_creature` (`guid`, `pool_entry`, `chance`, `description`) VALUES +(@GUID,1116,0,"Zalas Witherbark (2605) - Spawn 1"), +(@GUID+1,1116,0,"Zalas Witherbark (2605) - Spawn 2"), +(@GUID+2,1116,0,"Zalas Witherbark (2605) - Spawn 3"), +(@GUID+3,1116,0,"Zalas Witherbark (2605) - Spawn 4"); + +DELETE FROM `creature_template_addon` WHERE `entry`=2605; +INSERT INTO `creature_template_addon` (`entry`, `path_id`, `mount`, `bytes1`, `bytes2`, `emote`, `auras`) VALUES +(2605,0,0,8,1,0,""); -- Standstate Kneel for Zalas Witherbark + +DELETE FROM `creature_addon` WHERE `guid`=@GUID+1; +INSERT INTO `creature_addon` (`guid`, `path_id`, `mount`, `bytes1`, `bytes2`, `emote`, `auras`) VALUES +(@GUID+1,(@GUID+1)*10,0,0,1,0,""); -- Pathing for Zalas Witherbark (Spawn 2) + +DELETE FROM `waypoint_data` WHERE `id`=(@GUID+1)*10; +INSERT INTO `waypoint_data` (`id`, `point`, `position_x`, `position_y`, `position_z`, `orientation`, `delay`, `move_type`, `action`, `action_chance`, `wpguid`) VALUES +((@GUID+1)*10,1,-2056.85,-3271.49,49.4426,0,0,0,0,100,0), +((@GUID+1)*10,2,-2061.19,-3271.54,50.2861,0,0,0,0,100,0), +((@GUID+1)*10,3,-2083.62,-3276.01,51.4356,0,0,0,0,100,0), +((@GUID+1)*10,4,-2098.75,-3286.29,51.7022,0,0,0,0,100,0), +((@GUID+1)*10,5,-2106.52,-3322.71,56.8964,0,0,0,0,100,0), +((@GUID+1)*10,6,-2115.63,-3344.91,58.7511,0,0,0,0,100,0), +((@GUID+1)*10,7,-2109.39,-3369.78,61.0127,0,0,0,0,100,0), +((@GUID+1)*10,8,-2091.81,-3381.89,59.4361,0,0,0,0,100,0), +((@GUID+1)*10,9,-2095.79,-3366.71,59.3,0,0,0,0,100,0), +((@GUID+1)*10,10,-2093.88,-3364.27,59.1176,0,0,0,0,100,0), +((@GUID+1)*10,11,-2099.47,-3360.35,59.2716,0,0,0,0,100,0), +((@GUID+1)*10,12,-2102.05,-3333.13,58.104,0,0,0,0,100,0), +((@GUID+1)*10,13,-2095.42,-3295.27,52.4655,0,0,0,0,100,0), +((@GUID+1)*10,14,-2078.4,-3272.51,51.7336,0,0,0,0,100,0), +((@GUID+1)*10,15,-2055.61,-3271.22,49.3672,0,0,0,0,100,0); + +-- Nimar the Slayer +SET @GUID := 84402; +DELETE FROM `creature` WHERE `guid` BETWEEN @GUID AND @GUID+3; +INSERT INTO `creature` (`guid`, `id`, `map`, `zoneId`, `areaId`, `spawnMask`, `phaseMask`, `modelid`, `equipment_id`, `position_x`, `position_y`, `position_z`, `orientation`, `spawntimesecs`, `spawndist`, `currentwaypoint`, `curhealth`, `curmana`, `MovementType`, `npcflag`, `unit_flags`, `dynamicflags`, `ScriptName`, `VerifiedBuild`) VALUES +(@GUID,2606,0,0,0,1,1,0,0,-1902.16,-3193.09,56.5808,5.63212,27000,0,0,1537,0,0,0,0,0,"",0), +(@GUID+1,2606,0,0,0,1,1,0,0,-1672.56,-3246.93,25.7297,4.51998,27000,0,0,1537,0,2,0,0,0,"",0), +(@GUID+2,2606,0,0,0,1,1,0,0,-1701.21,-3509.16,60.2556,6.26748,27000,0,0,1537,0,0,0,0,0,"",0), +(@GUID+3,2606,0,0,0,1,1,0,0,-1810.14,-3417.2,45.4232,0.985671,27000,0,0,1537,0,0,0,0,0,"",0); + +DELETE FROM `pool_template` WHERE `entry`=1113; +INSERT INTO `pool_template` (`entry`, `max_limit`, `description`) VALUES +(1113,1,"Nimar the Slayer"); + +DELETE FROM `pool_creature` WHERE `pool_entry`=1113; +INSERT INTO `pool_creature` (`guid`, `pool_entry`, `chance`, `description`) VALUES +(@GUID,1113,0,"Nimar the Slayer (2606) - Spawn 1"), +(@GUID+1,1113,0,"Nimar the Slayer (2606) - Spawn 2"), +(@GUID+2,1113,0,"Nimar the Slayer (2606) - Spawn 3"), +(@GUID+3,1113,0,"Nimar the Slayer (2606) - Spawn 4"); + +DELETE FROM `creature_addon` WHERE `guid`=@GUID+1; +INSERT INTO `creature_addon` (`guid`, `path_id`, `mount`, `bytes1`, `bytes2`, `emote`, `auras`) VALUES +(@GUID+1,(@GUID+1)*10,0,0,1,0,""); -- Pathing for Nimar the Slayer (Spawn 2) + +DELETE FROM `waypoint_data` WHERE `id`=(@GUID+1)*10; +INSERT INTO `waypoint_data` (`id`, `point`, `position_x`, `position_y`, `position_z`, `orientation`, `delay`, `move_type`, `action`, `action_chance`, `wpguid`) VALUES +((@GUID+1)*10,1,-1672.56,-3246.93,25.7297,0,0,0,0,100,213867), +((@GUID+1)*10,2,-1673.33,-3252.4,25.7306,0,60000,0,0,100,213868), +((@GUID+1)*10,3,-1679.78,-3257.49,25.5371,0,0,0,0,100,213869), +((@GUID+1)*10,4,-1678.29,-3264.48,24.3447,0,0,0,0,100,213870), +((@GUID+1)*10,5,-1650.41,-3263.6,26.8072,0,0,0,0,100,213871), +((@GUID+1)*10,6,-1642.67,-3252.82,31.5431,0,0,0,0,100,213872), +((@GUID+1)*10,7,-1645.48,-3241.07,33.4071,0,0,0,0,100,213873), +((@GUID+1)*10,8,-1654.84,-3232.66,33.5165,0,0,0,0,100,213874), +((@GUID+1)*10,9,-1670.32,-3228.45,34.6906,0,0,0,0,100,213875), +((@GUID+1)*10,10,-1693.22,-3231.79,29.2882,0,0,0,0,100,213876), +((@GUID+1)*10,11,-1693.78,-3246.29,26.4924,0,0,0,0,100,213877), +((@GUID+1)*10,12,-1672.47,-3263.53,25.0571,0,0,0,0,100,213878), +((@GUID+1)*10,13,-1668.16,-3255.58,25.73,0,0,0,0,100,213879), +((@GUID+1)*10,14,-1673.27,-3253.71,25.7309,0,0,0,0,100,213880); diff --git a/sql/updates/world/3.3.5/2017_11_28_18_world_335.sql b/sql/updates/world/3.3.5/2017_11_28_18_world_335.sql new file mode 100644 index 00000000000..674d49e038b --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_28_18_world_335.sql @@ -0,0 +1,289 @@ +-- Highland Thrasher +DELETE FROM `smart_scripts` WHERE `entryorguid`=2560 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2560,0,0,0,25,0,100,0,0,0,0,0,11,8876,0,0,0,0,0,1,0,0,0,0,0,0,0,"Highland Thrasher - On Reset - Cast Thrash"); + +-- Mesa Buzzard +DELETE FROM `smart_scripts` WHERE `entryorguid`=2579 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2579,0,0,0,0,0,100,0,4000,12000,10000,18000,11,8139,32,0,0,0,0,2,0,0,0,0,0,0,0,"Mesa Buzzard - In Combat - Cast Fevered Fatigue"); + +-- Elder Mesa Buzzard +DELETE FROM `smart_scripts` WHERE `entryorguid`=2580 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2580,0,0,0,0,0,100,0,4000,12000,10000,18000,11,8139,32,0,0,0,0,2,0,0,0,0,0,0,0,"Elder Mesa Buzzard - In Combat - Cast Fevered Fatigue"); + +-- Plains Creeper +DELETE FROM `smart_scripts` WHERE `entryorguid`=2563 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2563,0,0,0,25,0,100,0,0,0,0,0,11,3616,0,0,0,0,0,1,0,0,0,0,0,0,0,"Plains Creeper - On Reset - Cast Poison Proc"), +(2563,0,1,0,0,0,100,0,5000,14000,16000,23000,11,4962,0,0,0,0,0,2,0,0,0,0,0,0,0,"Plains Creeper - In Combat - Cast Encasing Webs"); + +-- Giant Plains Creeper +DELETE FROM `smart_scripts` WHERE `entryorguid`=2565 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2565,0,0,0,25,0,100,0,0,0,0,0,11,3616,0,0,0,0,0,1,0,0,0,0,0,0,0,"Giant Plains Creeper - On Reset - Cast Poison Proc"), +(2565,0,1,0,0,0,100,0,5000,14000,16000,23000,11,4962,0,0,0,0,0,2,0,0,0,0,0,0,0,"Giant Plains Creeper - In Combat - Cast Encasing Webs"); + +-- Boulderfist Ogre +DELETE FROM `smart_scripts` WHERE `entryorguid`=2562 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2562,0,0,0,4,0,15,0,0,0,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,"Boulderfist Ogre - On Aggro - Say Line 0"), +(2562,0,1,0,2,0,100,0,0,30,16000,22000,11,4955,2,0,0,0,0,1,0,0,0,0,0,0,0,"Boulderfist Ogre - Between 0-30% Health - Cast Fist of Stone"); + +-- Boulderfist Enforcer +DELETE FROM `smart_scripts` WHERE `entryorguid`=2564 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2564,0,0,0,4,0,15,0,0,0,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,"Boulderfist Enforcer - On Aggro - Say Line 0"), +(2564,0,1,0,2,0,100,0,0,30,16000,22000,11,4955,2,0,0,0,0,1,0,0,0,0,0,0,0,"Boulderfist Enforcer - Between 0-30% Health - Cast Fist of Stone"), +(2564,0,2,0,0,0,100,0,5000,11000,30000,35000,11,13730,0,0,0,0,0,1,0,0,0,0,0,0,0,"Boulderfist Enforcer - In Combat - Cast Demoralizing Shout"); + +-- Boulderfist Brute +DELETE FROM `smart_scripts` WHERE `entryorguid`=2566 AND `source_type`=0 AND `id` IN (0,2); +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2566,0,0,0,4,0,15,0,0,0,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,"Boulderfist Brute - On Aggro - Say Line 0"), +(2566,0,2,0,2,0,100,0,0,30,16000,22000,11,4955,2,0,0,0,0,1,0,0,0,0,0,0,0,"Boulderfist Brute - Between 0-30% Health - Cast Fist of Stone"); + +-- Boulderfist Mauler +DELETE FROM `smart_scripts` WHERE `entryorguid`=2569 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2569,0,0,0,4,0,15,0,0,0,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,"Boulderfist Mauler - On Aggro - Say Line 0"), +(2569,0,1,0,0,0,100,0,6000,14000,16000,24000,11,4955,2,0,0,0,0,1,0,0,0,0,0,0,0,"Boulderfist Mauler - In Combat - Cast Fist of Stone"); + +-- Boulderfist Magus +DELETE FROM `smart_scripts` WHERE `entryorguid`=2567 AND `source_type`=0 AND `id`=3; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2567,0,3,0,9,0,100,0,0,8,15000,23000,11,11831,1,0,0,0,0,1,0,0,0,0,0,0,0,"Boulderfist Magus - Within 0-8 Range - Cast Frost Nova"); + +-- Boulderfist Shaman +DELETE FROM `smart_scripts` WHERE `entryorguid`=2570 AND `source_type`=0 AND `id`=2; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2570,0,2,0,0,0,100,0,8000,12000,41000,45000,11,6364,0,0,0,0,0,1,0,0,0,0,0,0,0,"Boulderfist Shaman - In Combat - Cast Searing Totem"); + +-- Boulderfist Lord +DELETE FROM `smart_scripts` WHERE `entryorguid`=2571 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2571,0,0,0,4,0,15,0,0,0,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,"Boulderfist Lord - On Aggro - Say Line 0"), +(2571,0,1,0,4,0,100,0,0,0,0,0,11,8258,0,0,0,0,0,1,0,0,0,0,0,0,0,"Boulderfist Lord - On Aggro - Cast Devotion Aura"), +(2571,0,2,0,0,0,100,0,6000,14000,16000,24000,11,4955,2,0,0,0,0,1,0,0,0,0,0,0,0,"Boulderfist Lord - In Combat - Cast Fist of Stone"); + +DELETE FROM `creature_text` WHERE `CreatureID`=2571; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(2571,0,0,"Raaar!!! Me smash $r!",12,0,100,0,0,0,1925,0,"Boulderfist Lord"), +(2571,0,1,"Me smash! You die!",12,0,100,0,0,0,1926,0,"Boulderfist Lord"), +(2571,0,2,"Ill crush you!",12,0,100,0,0,0,1927,0,"Boulderfist Lord"); + +-- Witherbark Troll +DELETE FROM `smart_scripts` WHERE `entryorguid`=2552 AND `source_type`=0 AND `id`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2552,0,0,0,0,0,100,0,4000,12000,8000,16000,11,4974,32,0,0,0,0,2,0,0,0,0,0,0,0,"Witherbark Troll - In Combat - Cast Wither Touch"); + +-- Witherbark Shadowcaster +UPDATE `smart_scripts` SET `event_chance`=100 WHERE `entryorguid`=2553 AND `source_type`=0 AND `id`=0; + +-- Witherbark Axe Thrower +DELETE FROM `smart_scripts` WHERE `entryorguid`=2554 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2554,0,1,0,0,0,100,0,4000,12000,8000,16000,11,4974,32,0,0,0,0,2,0,0,0,0,0,0,0,"Witherbark Axe Thrower - In Combat - Cast Wither Touch"); + +-- Witherbark Headhunter +DELETE FROM `smart_scripts` WHERE `entryorguid`=2556 AND `source_type`=0 AND `id` IN (0,1); +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2556,0,0,0,0,0,100,0,6000,10000,18000,24000,11,6533,0,0,0,0,0,2,0,0,0,0,0,0,0,"Witherbark Headhunter - In Combat - Cast Net"), +(2556,0,1,0,0,0,100,0,4000,12000,8000,16000,11,4974,32,0,0,0,0,2,0,0,0,0,0,0,0,"Witherbark Headhunter - In Combat - Cast Wither Touch"); + +-- Witherbark Witch Doctor +DELETE FROM `smart_scripts` WHERE `entryorguid`=2555 AND `source_type`=0 AND `id` IN (0,1); +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2555,0,0,0,0,0,100,0,5000,7000,19000,22000,11,8190,0,0,0,0,0,1,0,0,0,0,0,0,0,"Witherbark Witch Doctor - In Combat - Cast Magma Totem"), +(2555,0,1,0,0,0,100,0,12000,14000,32000,35000,11,5605,0,0,0,0,0,1,0,0,0,0,0,0,0,"Witherbark Witch Doctor - In Combat - Cast Healing Ward"); + +-- Witherbark Shadow Hunter +DELETE FROM `smart_scripts` WHERE `entryorguid`=2557 AND `source_type`=0 AND `id` IN (0,1,2); +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2557,0,0,0,0,0,100,0,2000,6000,19000,23000,11,992,0,0,0,0,0,5,0,0,0,0,0,0,0,"Witherbark Shadow Hunter - In Combat - Cast Shadow Word: Pain"), +(2557,0,1,0,0,0,100,0,4000,8000,28000,35000,11,7646,32,0,0,0,0,5,0,0,0,0,0,0,0,"Witherbark Shadow Hunter - In Combat - Cast Curse of Weakness"), +(2557,0,2,0,0,0,100,0,9000,14000,31000,36000,11,6726,0,0,0,0,0,2,0,0,0,0,0,0,0,"Witherbark Shadow Hunter - In Combat - Cast Silence"); + +-- Witherbark Berserker +DELETE FROM `smart_scripts` WHERE `entryorguid`=2558 AND `source_type`=0 AND `id`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2558,0,0,0,0,0,100,0,4000,12000,8000,16000,11,4974,32,0,0,0,0,2,0,0,0,0,0,0,0,"Witherbark Berserker - In Combat - Cast Wither Touch"); + +-- Dabyrie Laborer +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=2582; +DELETE FROM `smart_scripts` WHERE `entryorguid`=2582 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2582,0,0,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Dabyrie Laborer - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Fardel Dabyrie +DELETE FROM `smart_scripts` WHERE `entryorguid`=4479 AND `source_type`=0 AND `id`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4479,0,0,0,25,0,100,0,0,0,0,0,11,12787,0,0,0,0,0,1,0,0,0,0,0,0,0,"Fardel Dabyrie - On Reset - Cast Thrash"); + +-- Drywhisker Digger +DELETE FROM `smart_scripts` WHERE `entryorguid`=2574 AND `source_type`=0 AND `id`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2574,0,0,0,0,0,100,0,1000,3000,180000,180000,11,7164,0,0,0,0,0,1,0,0,0,0,0,0,0,"Drywhisker Digger - In Combat - Cast Defensive Stance"); + +-- Drywhisker Surveyor +DELETE FROM `smart_scripts` WHERE `entryorguid`=2573 AND `source_type`=0 AND `id`=2; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2573,0,2,0,9,0,100,0,0,8,15000,22000,11,865,1,0,0,0,0,1,0,0,0,0,0,0,0,"Drywhisker Surveyor - Within 0-8 Range - Cast Frost Nova"); + +-- Thundering Exile +DELETE FROM `smart_scripts` WHERE `entryorguid`=2762 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2762,0,0,0,0,0,100,0,0,0,3300,6600,11,9532,64,0,0,0,0,2,0,0,0,0,0,0,0,"Thundering Exile - In Combat - Cast Lightning Bolt"), +(2762,0,1,0,0,0,100,0,4000,8000,16000,24000,11,11824,0,0,0,0,0,2,0,0,0,0,0,0,0,"Thundering Exile - In Combat - Cast Shock"), +(2762,0,2,0,8,0,100,512,4132,0,0,0,41,500,0,0,0,0,0,1,0,0,0,0,0,0,0,"Thundering Exile - On Spellhit - Despawn"); + +-- Rumbling Exile +DELETE FROM `smart_scripts` WHERE `entryorguid`=2592 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2592,0,0,0,0,0,100,0,3000,12000,12000,21000,11,6524,0,0,0,0,0,1,0,0,0,0,0,0,0,"Rumbling Exile - In Combat - Cast Ground Tremor"); + +-- Cresting Exile +DELETE FROM `smart_scripts` WHERE `entryorguid`=2761 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2761,0,1,0,0,0,100,0,4000,9000,16000,22000,11,865,0,0,0,0,0,1,0,0,0,0,0,0,0,"Cresting Exile - In Combat - Cast Frost Nova"); + +-- Syndicate Thief +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=24477; +DELETE FROM `smart_scripts` WHERE `entryorguid`=24477 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(24477,0,0,0,67,0,100,0,4000,6000,0,0,11,7159,0,0,0,0,0,2,0,0,0,0,0,0,0,"Syndicate Thief - On Behind Target - Cast Backstab"), +(24477,0,1,0,0,0,100,0,5000,9000,18000,24000,11,6713,0,0,0,0,0,2,0,0,0,0,0,0,0,"Syndicate Thief - In Combat - Cast Disarm"), +(24477,0,2,0,25,0,100,0,0,0,0,0,11,3616,0,0,0,0,0,1,0,0,0,0,0,0,0,"Syndicate Thief - On Reset - Cast Poison Proc"), +(24477,0,3,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Syndicate Thief - Between 0-15% Health - Flee For Assist (No Repeat)"); + +-- Syndicate Prowler +DELETE FROM `smart_scripts` WHERE `entryorguid`=2588 AND `source_type`=0 AND `id` IN (2,3); +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2588,0,2,0,0,0,100,0,4000,10000,16000,22000,11,14873,0,0,0,0,0,2,0,0,0,0,0,0,0,"Syndicate Prowler - In Combat - Cast Sinister Strike"), +(2588,0,3,0,0,0,100,0,8000,14000,22000,28000,11,6713,0,0,0,0,0,2,0,0,0,0,0,0,0,"Syndicate Prowler - In Combat - Cast Disarm"); + +-- Otto +DELETE FROM `smart_scripts` WHERE `entryorguid`=2599 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2599,0,0,0,0,0,100,0,9000,15000,12000,20000,11,6253,0,0,0,0,0,2,0,0,0,0,0,0,0,"Otto - In Combat - Cast Backhand"), +(2599,0,1,0,0,0,100,0,6000,11000,14000,19000,11,12555,0,0,0,0,0,2,0,0,0,0,0,0,0,"Otto - In Combat - Cast Pummel"), +(2599,0,2,0,2,0,100,1,0,15,0,0,25,1,0,0,0,0,0,0,0,0,0,0,0,0,0,"Otto - Between 0-15% Health - Flee For Assist"); + +-- Lord Falconcrest +DELETE FROM `smart_scripts` WHERE `entryorguid`=2597 AND `source_type`=0; +DELETE FROM `smart_scripts` WHERE `entryorguid`=259700 AND `source_type`=9; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2597,0,0,0,1,0,100,0,5000,15000,150000,180000,80,259700,0,0,0,0,0,1,0,0,0,0,0,0,0,"Lord Falconcrest - Out of Combat - Run Script"), +(2597,0,1,0,0,0,100,0,2000,5000,6000,9000,11,32064,32,0,0,0,0,1,0,0,0,0,0,0,0,"Lord Falconcrest - In Combat - Cast Battle Shout"), +(2597,0,2,0,0,0,100,0,5000,10000,16000,26000,11,6713,0,0,0,0,0,2,0,0,0,0,0,0,0,"Lord Falconcrest - In Combat - Cast Disarm"), +(259700,9,0,0,0,0,100,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Lord Falconcrest - On Script - Say Line 0"), +(259700,9,1,0,0,0,100,0,8000,8000,0,0,1,0,0,0,0,0,0,19,2599,0,0,0,0,0,0,"Lord Falconcrest - On Script - Say Line 0 (Otto)"); + +-- Stromgarde Troll Hunter +DELETE FROM `smart_scripts` WHERE `entryorguid`=2583 AND `source_type`=0 AND `id`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2583,0,0,0,0,0,100,0,2000,5000,19000,23000,11,2767,0,0,0,0,0,2,0,0,0,0,0,0,0,"Stromgarde Troll Hunter - In Combat - Cast Shadow Word: Pain"); + +-- Feeboz +UPDATE `smart_scripts` SET `target_type`=1 WHERE `entryorguid`=4063 AND `source_type`=0 AND `id`=0; +DELETE FROM `smart_scripts` WHERE `entryorguid`=4063 AND `source_type`=0 AND `id`=3; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(4063,0,3,0,1,0,100,0,5000,15000,65000,75000,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Feeboz - Out of Combat - Say Line 1"); + +DELETE FROM `creature_text` WHERE `CreatureID`=4063 AND `GroupID`=0 AND `ID`>2; +DELETE FROM `creature_text` WHERE `CreatureID`=4063 AND `GroupID`=1; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(4063,0,3,"Arent you a little short for your kind?",12,0,100,0,0,0,809,0,"Feeboz"), +(4063,0,4,"My life means nothing! Our sacred task is all!",12,0,100,0,0,0,810,0,"Feeboz"), +(4063,0,5,"If you mean to free Myzrael, you will be sorry!",12,0,100,0,0,0,811,0,"Feeboz"), +(4063,0,6,"Stand firm, brothers. And dont worry! Size is on our side!",12,0,100,0,0,0,812,0,"Feeboz"), +(4063,0,7,"Take that! The Drywhiskers will prevail!",12,0,100,0,0,0,814,0,"Feeboz"), +(4063,1,0,"May I bask in your shadow forever!",12,0,100,0,0,0,799,0,"Feeboz"), +(4063,1,1,"Master, your wisdom is matched only by your hugeness!",12,0,100,0,0,0,800,0,"Feeboz"), +(4063,1,2,"If I had a copper for each of your great deeds, Id be a rich kobold!",12,0,100,0,0,0,801,0,"Feeboz"), +(4063,1,3,"You will keep us safe from the evil mistress, yes!",12,0,100,0,0,0,802,0,"Feeboz"), +(4063,1,4,"Oh, praise! Your vigilance keeps Myzrael deep in the earth!",12,0,100,0,0,0,803,0,"Feeboz"), +(4063,1,5,"My only wish is to serve you, great one, until I die by your foot!",12,0,100,0,0,0,804,0,"Feeboz"), +(4063,1,6,"Your strength is of the mountain. Your grace is of the winds!",12,0,100,0,0,0,805,0,"Feeboz"), +(4063,1,7,"My feet hurt...",12,0,100,0,0,0,806,0,"Feeboz"); + +-- Znort +DELETE FROM `smart_scripts` WHERE `entryorguid`=2765 AND `source_type`=0 AND `id` IN (1,3); +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2765,0,1,0,0,0,100,0,5000,7000,7000,10000,11,845,0,0,0,0,0,2,0,0,0,0,0,0,0,"Znort - In Combat - Cast Cleave"), +(2765,0,3,0,1,0,100,0,25000,35000,65000,75000,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Znort - Out of Combat - Say Line 1"); + +DELETE FROM `creature_text` WHERE `CreatureID`=2765 AND `GroupID`=0 AND `ID`>2; +DELETE FROM `creature_text` WHERE `CreatureID`=2765 AND `GroupID`=1; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(2765,0,3,"Arent you a little short for your kind?",12,0,100,0,0,0,809,0,"Znort"), +(2765,0,4,"My life means nothing! Our sacred task is all!",12,0,100,0,0,0,810,0,"Znort"), +(2765,0,5,"If you mean to free Myzrael, you will be sorry!",12,0,100,0,0,0,811,0,"Znort"), +(2765,0,6,"Die! You will not free Myzrael!",12,0,100,0,0,0,807,0,"Znort"), +(2765,0,7,"Take that! The Drywhiskers will prevail!",12,0,100,0,0,0,814,0,"Znort"), +(2765,1,0,"May I bask in your shadow forever!",12,0,100,0,0,0,799,0,"Znort"), +(2765,1,1,"Master, your wisdom is matched only by your hugeness!",12,0,100,0,0,0,800,0,"Znort"), +(2765,1,2,"If I had a copper for each of your great deeds, Id be a rich kobold!",12,0,100,0,0,0,801,0,"Znort"), +(2765,1,3,"You will keep us safe from the evil mistress, yes!",12,0,100,0,0,0,802,0,"Znort"), +(2765,1,4,"Oh, praise! Your vigilance keeps Myzrael deep in the earth!",12,0,100,0,0,0,803,0,"Znort"), +(2765,1,5,"My only wish is to serve you, great one, until I die by your foot!",12,0,100,0,0,0,804,0,"Znort"), +(2765,1,6,"Your strength is of the mountain. Your grace is of the winds!",12,0,100,0,0,0,805,0,"Znort"), +(2765,1,7,"My feet hurt...",12,0,100,0,0,0,806,0,"Znort"); + +-- Sleeby +DELETE FROM `smart_scripts` WHERE `entryorguid`=2764 AND `source_type`=0 AND `id` IN (4); +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2764,0,4,0,1,0,100,0,45000,55000,65000,75000,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,"Sleeby - Out of Combat - Say Line 1"); + +DELETE FROM `creature_text` WHERE `CreatureID`=2764 AND `GroupID`=0 AND `ID`>1; +DELETE FROM `creature_text` WHERE `CreatureID`=2764 AND `GroupID`=1; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(2764,0,2,"The Great One will smash you!",12,0,100,0,0,0,808,0,"Sleeby"), +(2764,0,3,"Arent you a little short for your kind?",12,0,100,0,0,0,809,0,"Sleeby"), +(2764,0,4,"My life means nothing! Our sacred task is all!",12,0,100,0,0,0,810,0,"Sleeby"), +(2764,0,5,"If you mean to free Myzrael, you will be sorry!",12,0,100,0,0,0,811,0,"Sleeby"), +(2764,0,6,"Die! You will not free Myzrael!",12,0,100,0,0,0,807,0,"Sleeby"), +(2764,0,7,"No! Leave us! We must not fail our task!",12,0,100,0,0,0,815,0,"Sleeby"), +(2764,1,0,"May I bask in your shadow forever!",12,0,100,0,0,0,799,0,"Sleeby"), +(2764,1,1,"Master, your wisdom is matched only by your hugeness!",12,0,100,0,0,0,800,0,"Sleeby"), +(2764,1,2,"If I had a copper for each of your great deeds, Id be a rich kobold!",12,0,100,0,0,0,801,0,"Sleeby"), +(2764,1,3,"You will keep us safe from the evil mistress, yes!",12,0,100,0,0,0,802,0,"Sleeby"), +(2764,1,4,"Oh, praise! Your vigilance keeps Myzrael deep in the earth!",12,0,100,0,0,0,803,0,"Sleeby"), +(2764,1,5,"My only wish is to serve you, great one, until I die by your foot!",12,0,100,0,0,0,804,0,"Sleeby"), +(2764,1,6,"Your strength is of the mountain. Your grace is of the winds!",12,0,100,0,0,0,805,0,"Sleeby"), +(2764,1,7,"My feet hurt...",12,0,100,0,0,0,806,0,"Sleeby"); + +-- Thenan +DELETE FROM `smart_scripts` WHERE `entryorguid`=2763 AND `source_type`=0 AND `id`=1; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2763,0,1,0,11,0,100,0,0,0,0,0,49,0,0,0,0,0,0,21,50,0,0,0,0,0,0,"Thenan - On Respawn - Start Attacking"); + +-- Darbel Montrose +DELETE FROM `smart_scripts` WHERE `entryorguid`=2598 AND `source_type`=0 AND `id`=2; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2598,0,2,0,0,0,100,0,4000,7000,18000,21000,11,12741,32,0,0,0,0,2,0,0,0,0,0,0,0,"Darbel Montrose - In Combat - Cast Curse of Weakness"); + +-- Molok the Crusher +DELETE FROM `smart_scripts` WHERE `entryorguid`=2604 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2604,0,0,0,0,0,100,0,7000,11000,9000,13000,11,6253,0,0,0,0,0,2,0,0,0,0,0,0,0,"Molok the Crusher - In Combat - Cast Backhand"); + +-- Zalas Witherbark +DELETE FROM `smart_scripts` WHERE `entryorguid`=2605 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2605,0,0,0,0,0,100,0,3000,5000,23000,27000,11,512,0,0,0,0,0,2,0,0,0,0,0,0,0,"Zalas Witherbark - In Combat - Cast Chains of Ice"), +(2605,0,1,0,0,0,100,0,3000,5000,15000,18000,11,851,2,0,0,0,0,6,0,0,0,0,0,0,0,"Zalas Witherbark - In Combat - Cast Polymorph: Sheep"), +(2605,0,2,0,0,0,100,0,5000,8000,8000,15000,11,9081,0,0,0,0,0,2,0,0,0,0,0,0,0,"Zalas Witherbark - In Combat - Cast Shadowbolt Volley"), +(2605,0,3,0,0,0,100,0,2000,3000,13000,15000,11,4974,32,0,0,0,0,5,0,0,0,0,0,0,0,"Zalas Witherbark - In Combat - Cast Wither Touch"); + +-- Ruul Onestone +DELETE FROM `smart_scripts` WHERE `entryorguid`=2602 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2602,0,0,0,0,0,100,0,4000,6000,12000,14000,11,6219,0,0,0,0,0,2,0,0,0,0,0,0,0,"Ruul Onestone - In Combat - Cast Rain of Fire"), +(2602,0,1,0,0,0,100,0,1000,1000,31000,33000,11,6742,0,0,0,0,0,1,0,0,0,0,0,0,0,"Ruul Onestone - In Combat - Cast Bloodlust"), +(2602,0,2,0,0,0,100,0,2000,3000,3000,5000,11,9532,64,0,0,0,0,2,0,0,0,0,0,0,0,"Ruul Onestone - In Combat - Cast Lightning Bolt"); + +-- Fix movement for some creatures +UPDATE `creature` SET `spawndist`=5, `MovementType`=1 WHERE `guid`=12046; +UPDATE `creature` SET `spawndist`=0, `MovementType`=0 WHERE `guid`=14538; diff --git a/sql/updates/world/3.3.5/2017_11_29_00_world.sql b/sql/updates/world/3.3.5/2017_11_29_00_world.sql new file mode 100644 index 00000000000..434ebf3d040 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_29_00_world.sql @@ -0,0 +1,140 @@ +-- Spawns that req. phaseMask +UPDATE `creature` SET `phaseMask` = 2 WHERE `id` IN (28559,28560, 28660,28662); -- Citizen of Havenshire and Citizen of New Avalon +UPDATE `creature` SET `phaseMask` = 2 WHERE `guid` = 128752; -- High General Abbendis +UPDATE `creature` SET `phaseMask` = 2 WHERE `guid` IN (128796,129321); -- High Abbot Landgren +UPDATE `creature` SET `phaseMask` = 2 WHERE `id` IN (28530, 28594); -- Scarlet Commander Scarlet Preacher + +-- Havenshire Colt -- Havenshire Mare -- Havenshire Stallion +DELETE FROM `creature` WHERE `guid` IN (84229,84234,84235,84239,84240,84257,84264,84265,84277,84279,84280,84281,84297,84315,84316,84317,84322,84336,84341,84342,84346,84347,84348,84349,84392,84393,84406); +INSERT INTO `creature` (`guid`, `id`, `map`, `spawnMask`, `phaseMask`, `position_x`, `position_y`, `position_z`, `orientation`, `spawntimesecs`, `spawndist`, `curhealth`, `curmana`, `MovementType`) VALUES +(84229, 28605, 609, 1, 2, 2177.534, -5806.438, 101.4173, 3.543018, 120, 0, 2453, 0, 0), +(84234, 28605, 609, 1, 2, 2221.63, -5881.24, 101.211, 4.74729, 120, 0, 2453, 0, 0), +(84235, 28605, 609, 1, 2, 2206.62, -5840.55, 101.48, 1.43117, 120, 0, 2453, 0, 0), +(84239, 28605, 609, 1, 2, 2209.9, -5840.27, 101.428, 2.35619, 120, 0, 2453, 0, 0), +(84240, 28605, 609, 1, 2, 2204.79, -5840.43, 101.576, 1.13446, 120, 0, 2453, 0, 0), +(84257, 28605, 609, 1, 2, 2199.98, -5871.52, 101.408, 4.34587, 120, 0, 2453, 0, 0), +(84264, 28605, 609, 1, 2, 2228.12, -5818.03, 101.445, 1.43117, 120, 0, 2453, 0, 0), +(84265, 28605, 609, 1, 2, 2232.95, -5814.65, 101.368, 2.35619, 120, 0, 2453, 0, 0), +(84277, 28606, 609, 1, 2, 2172.505, -5810.831, 101.4043, 1.082104, 120, 0, 2453, 0, 0), +(84279, 28606, 609, 1, 2, 2237.33, -5860.84, 101.292, 5.42797, 120, 0, 2453, 0, 0), +(84280, 28606, 609, 1, 2, 2235.99, -5863.17, 101.269, 0.017453, 120, 0, 2453, 0, 0), +(84281, 28606, 609, 1, 2, 2221.94, -5888.14, 101.022, 1.69297, 120, 0, 2453, 0, 0), +(84297, 28606, 609, 1, 2, 2218.83, -5888.31, 100.995, 1.58825, 120, 0, 2453, 0, 0), +(84315, 28606, 609, 1, 2, 2203.25, -5833.98, 101.44, 5.70723, 120, 0, 2453, 0, 0), +(84316, 28606, 609, 1, 2, 2206.26, -5833.58, 101.435, 5.044, 120, 0, 2453, 0, 0), +(84317, 28606, 609, 1, 2, 2185.22, -5867.83, 101.395, 4.85202, 120, 0, 2453, 0, 0), +(84322, 28606, 609, 1, 2, 2197.14, -5870.83, 101.403, 4.57276, 120, 0, 2453, 0, 0), +(84336, 28606, 609, 1, 2, 2225.69, -5815.68, 101.445, 0.488692, 120, 0, 2453, 0, 0), +(84341, 28607, 609, 1, 2, 2212.25, -5881.55, 101.11, 4.59022, 120, 0, 2062, 0, 0), +(84342, 28607, 609, 1, 2, 2176, -5810.381, 101.4176, 2.094395, 120, 0, 2062, 0, 0), +(84346, 28607, 609, 1, 2, 2208.6, -5833.48, 101.428, 4.7822, 120, 0, 2062, 0, 0), +(84347, 28607, 609, 1, 2, 2202.68, -5838.43, 101.498, 0.069813, 120, 0, 2062, 0, 0), +(84348, 28607, 609, 1, 2, 2186.6, -5868.27, 101.39, 4.90438, 120, 0, 2062, 0, 0), +(84349, 28607, 609, 1, 2, 2189.17, -5870.81, 101.378, 2.00713, 120, 0, 2062, 0, 0), +(84392, 28607, 609, 1, 2, 2187.83, -5868.67, 101.387, 4.03171, 120, 0, 2062, 0, 0), +(84393, 28607, 609, 1, 2, 2224.77, -5811.89, 101.434, 0.820305, 120, 0, 2062, 0, 0), +(84406, 28607, 609, 1, 2, 2232.4, -5818.69, 101.392, 2.26893, 120, 0, 2062, 0, 0); + +-- Scarlet Preacher +UPDATE `creature` SET `position_x`=1390.558, `position_y`=-5703.662, `position_z`=136.3209, `Orientation`=2.282122, `MovementType`=0, `spawndist`=0 WHERE `guid`=129171 AND `id`=28594; +UPDATE `creature` SET `position_x`=1390.572, `position_y`=-5703.678, `position_z`=136.3188, `Orientation`=5.372028, `MovementType`=2, `spawndist`=0 WHERE `guid`=129170 AND `id`=28594; +UPDATE `creature` SET `position_x`=1360.449, `position_y`=-5683.808, `position_z`=138.7448, `Orientation`=5.587841, `MovementType`=0, `spawndist`=0 WHERE `guid`=129169 AND `id`=28594; +UPDATE `creature` SET `position_x`=1488.340, `position_y`=-5813.331, `position_z`=131.3350, `Orientation`=1.720976, `MovementType`=2, `spawndist`=0 WHERE `guid`=129165 AND `id`=28594; +UPDATE `creature` SET `position_x`=1484.236, `position_y`=-5850.959, `position_z`=131.3351, `Orientation`=3.808080, `MovementType`=2, `spawndist`=0 WHERE `guid`=129168 AND `id`=28594; + +DELETE FROM `creature_addon` WHERE `guid` IN (129170,129165,129168); +INSERT INTO `creature_addon` (`guid`, `path_id`, `mount`, `bytes1`, `bytes2`, `emote`, `auras`) VALUES +(129170, 1291700,0,0,1, 0,''), +(129165, 1291650,0,0,1, 0,''), +(129168, 1291680,0,0,1, 0,''); + +DELETE FROM `creature_formations` WHERE `leaderGUID` IN (128736,129170); +INSERT INTO `creature_formations` (`leaderGUID`, `memberGUID`, `dist`, `angle`, `groupAI`, `point_1`, `point_2`) VALUES +(128736, 128736, 0, 0, 515, 0, 0), +(128736, 128709, 6, 280, 515, 0, 0), +(128736, 128712, 6, 300, 515, 0, 0), +(128736, 128710, 6, 320, 515, 0, 0), +(128736, 128713, 6, 340, 515, 0, 0), +(128736, 128705, 6, 360, 515, 0, 0), +(128736, 128707, 6, 20, 515, 0, 0), +(128736, 128711, 6, 40, 515, 0, 0), +(128736, 128708, 6, 60, 515, 0, 0), +(128736, 128706, 6, 80, 515, 0, 0), +(129170, 129170, 0, 0, 515, 0, 0), +(129170, 129171, 3, 270, 515, 0, 0); + +-- waypoints +DELETE FROM `waypoint_data` WHERE `id` IN (1291700,1291650,1291680,1287360); +INSERT INTO `waypoint_data` (`id`, `point`, `position_x`, `position_y`, `position_z`, `delay`, `action`, `orientation`, `action_chance`, `move_type`) VALUES +-- Scarlet Preacher +(1291700,1,1402.94,-5716.94,133.387,0,0,5.27582,100,0), +(1291700,2,1423.3,-5747.93,131.208,0,0,5.27415,100,0), +(1291700,3,1402.68,-5716.63,133.475,0,0,2.18361,100,0), +(1291700,4,1397.68,-5710.01,135.097,0,0,2.44907,100,0), +(1291700,5,1389.76,-5704.78,136.089,0,0,2.54489,100,0), +(1291700,6,1386.06,-5702.27,138.002,0,0,2.54489,100,0), +(1291700,7,1366.41,-5688.04,137.968,3000,0,2.54489,100,0), +(1291700,8,1386.36,-5702.1,138.001,0,0,5.66684,100,0), +(1291700,9,1390.1,-5704.74,136.054,0,0,5.66684,100,0), +(1291650,1,1484.85,-5802.34,131.228,0,0,2.09093,100,0), +(1291650,2,1474.38,-5788.07,131.232,0,0,2.24565,100,0), +(1291650,3,1451.5,-5759.41,131.232,0,0,2.40901,100,0), +(1291650,4,1438.17,-5750.76,131.21,0,0,2.77606,100,0), +(1291650,5,1425.59,-5747.92,131.21,0,0,3.33919,100,0), +(1291650,6,1416.48,-5752.81,131.263,0,0,3.84342,100,0), +(1291650,7,1410.85,-5760.42,131.447,0,0,4.38253,100,0), +(1291650,8,1397.58,-5797.64,131.217,0,0,4.52391,100,0), +(1291650,9,1398.15,-5805.79,131.242,0,0,5.00299,100,0), +(1291650,10,1402.93,-5815.12,131.215,0,0,5.33678,100,0), +(1291650,11,1431.32,-5846.87,131.209,0,0,5.47815,100,0), +(1291650,12,1439.07,-5853.76,131.21,0,0,5.82529,100,0), +(1291650,13,1449.41,-5855.99,131.22,0,0,0.0101976,100,0), +(1291650,14,1478.43,-5856.03,131.237,0,0,0.0438187,100,0), +(1291650,15,1487.95,-5854.48,131.214,0,0,0.642292,100,0), +(1291650,16,1491.59,-5851.32,131.214,0,0,1.09625,100,0), +(1291650,17,1493.68,-5843.79,131.213,0,0,1.60511,100,0), +(1291650,18,1488.45,-5813.26,131.21,0,0,1.81953,100,0), +(1291680,1,1451.43,-5852.32,131.316,0,0,2.97292,100,0), +(1291680,2,1441.34,-5848.45,131.214,0,0,2.31868,100,0), +(1291680,3,1415.71,-5822.71,131.208,0,0,2.34224,100,0), +(1291680,4,1402.84,-5805.23,131.217,0,0,1.9087,100,0), +(1291680,5,1403.41,-5795.93,131.212,0,0,1.30316,100,0), +(1291680,6,1414.64,-5764.5,131.301,0,0,1.2741,100,0), +(1291680,7,1421.97,-5754.72,131.213,0,0,0.747881,100,0), +(1291680,8,1431.54,-5753.59,131.21,0,0,6.13021,100,0), +(1291680,9,1440.86,-5758.25,131.21,0,0,5.51839,100,0), +(1291680,10,1465.74,-5787.2,131.213,0,0,5.4532,100,0), +(1291680,11,1479.13,-5805.14,131.211,0,0,5.30005,100,0), +(1291680,12,1485.4,-5822.49,131.219,0,0,4.99375,100,0), +(1291680,13,1487.73,-5844.53,131.216,0,0,4.72279,100,0), +(1291680,14,1486.62,-5849.06,131.214,0,0,4.1039,100,0), +(1291680,15,1482.41,-5851.55,131.217,0,0,3.45988,100,0), +(1287360,1,1554.09, -5787.14, 119.562,0,0,3.1587,100,0), -- Scarlet Commander +(1287360,2,1525.3,-5788.32,127.109,0,0,3.34719,100,0), +(1287360,3,1501.55,-5795.64,131.149,0,0,3.56318,100,0), +(1287360,4,1488.25,-5802.89,131.248,0,0,4.43889,100,0), +(1287360,5,1486.45,-5811.1,131.213,0,0,4.81274,100,0), +(1287360,6,1491.52,-5862.06,131.213,0,0,5.16067,100,0), +(1287360,7,1504.88,-5884.58,131.182,0,0,5.78506,100,0), +(1287360,8,1535.07,-5893.31,129.066,0,0,6.08743,100,0), +(1287360,9,1601.17,-5910.01,116.303,0,0,6.00342,100,0), +(1287360,10,1643.02,-5910.12,116.266,0,0,0.520555,100,0), +(1287360,11,1656.36,-5899.57,116.124,0,0,0.587314,100,0), +(1287360,12,1692.02,-5884.95,116.142,0,0,6.01834,100,0), +(1287360,13,1702.86,-5887.94,116.138,0,0,5.62172,100,0), +(1287360,14,1736.57,-5907.02,116.123,0,0,5.80629,100,0), +(1287360,15,1798.79,-5933.78,115.968,0,0,6.01049,100,0), +(1287360,16,1809.37,-5933.87,115.297,0,0,0.343049,100,0), +(1287360,17,1864.98,-5914.6,104.226,0,0,0.331268,100,0), +(1287360,18,1885.84,-5907.43,102.989,0,0,0.778945,100,0), +(1287360,19,1894.42,-5891.76,102.687,0,0,1.25411,100,0), +(1287360,20,1895.85,-5875.9,101.466,0,0,1.51329,100,0), +(1287360,21,1881.66,-5850.79,102.244,0,0,2.27513,100,0), +(1287360,22,1852.78,-5819.59,99.8482,0,0,3.03697,100,0), +(1287360,23,1815.8,-5818.1,105.145,0,0,3.12336,100,0), +(1287360,24,1749.11,-5822.59,116.12,0,0,3.13514,100,0), +(1287360,25,1731.1,-5820.91,116.122,0,0,2.228,100,0), +(1287360,26,1707.71, -5788.65, 114.503,0,0,2.55002,100,0), +(1287360,27,1688.41, -5781.75, 114.617,0,0,2.96235,100,0), +(1287360,28,1652.1, -5780.44, 116.135,0,0,3.13514,100,0), +(1287360,29,1604.93, -5783.24, 116.183,0,0,3.20583,100,0); diff --git a/sql/updates/world/3.3.5/2017_11_30_00_world.sql b/sql/updates/world/3.3.5/2017_11_30_00_world.sql new file mode 100644 index 00000000000..bce851e9581 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_30_00_world.sql @@ -0,0 +1,2 @@ +-- +DELETE FROM `creature` WHERE `id` IN(28369); diff --git a/sql/updates/world/3.3.5/2017_11_30_01_world.sql b/sql/updates/world/3.3.5/2017_11_30_01_world.sql new file mode 100644 index 00000000000..bc418f88ea7 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_11_30_01_world.sql @@ -0,0 +1,106 @@ +-- Tower of the Damned revamp +SET @CGUID := 85740; +SET @POOLENTRY := 1117; + +-- Move wrong spawns +UPDATE `creature` SET `position_x`=6455.98, `position_y`=-6347.05, `position_z`=55.178, `orientation`=1.3107, `spawndist`=0, `MovementType`=0 WHERE `guid`=82854; +UPDATE `creature` SET `position_x`=6440.71, `position_y`=-6404.61, `position_z`=48.678, `orientation`=1.2039, `spawndist`=0, `MovementType`=2 WHERE `guid`=82937; +UPDATE `creature` SET `position_x`=6437.28, `position_y`=-6360.66, `position_z`=41.276, `orientation`=1.2629, `spawndist`=0, `MovementType`=2 WHERE `guid`=82897; +UPDATE `creature` SET `position_x`=6448.90, `position_y`=-6382.36, `position_z`=41.282, `orientation`=2.7828 WHERE `guid`=82871; + +-- Delete wrong spawns +DELETE FROM `creature` WHERE `guid` IN (82879, 82899, 82870, 82841, 82823, 82864, 82915); +DELETE FROM `creature_addon` WHERE `guid` IN (82879, 82899, 82870, 82841, 82823, 82864, 82915); +DELETE FROM `spawn_group` WHERE `groupId`=2 AND `spawnType`=0 AND `spawnId`=82870; + +-- Add 3 Deatholme Necromancer and 3 Eyes of Dar'Khan +DELETE FROM `creature` where `guid` BETWEEN @CGUID AND @CGUID + 5; +INSERT INTO `creature` (`guid`, `id`, `map`, `equipment_id`, `position_x`, `position_y`, `position_z`, `orientation`, `spawntimesecs`, `curhealth`, `curmana`, `MovementType`) VALUES +(@CGUID , 16317, 530, 1, 6461.42, -6345.30, 41.233, 2.8408, 300, 377, 408, 0), +(@CGUID + 1, 16317, 530, 1, 6476.68, -6362.99, 27.201, 3.8813, 300, 377, 408, 0), +(@CGUID + 2, 16317, 530, 1, 6470.83, -6380.51, 27.202, 1.8361, 300, 377, 408, 0), +(@CGUID + 3, 16320, 530, 0, 6461.81, -6349.33, 55.178, 1.2652, 300, 404, 456, 0), +(@CGUID + 4, 16320, 530, 0, 6437.28, -6360.66, 41.276, 1.2629, 300, 404, 456, 2), +(@CGUID + 5, 16320, 530, 0, 6440.71, -6404.61, 48.678, 1.2039, 300, 404, 456, 2); + +-- Add pathing of two Wailers and two Eyes of Dar'Khan +SET @PATHID_1 := 829370; +SET @PATHID_2 := 828970; +SET @PATHID_3 := (@CGUID + 4) * 10; +SET @PATHID_4 := (@CGUID + 5) * 10; + +DELETE FROM `creature_addon` WHERE `guid` IN (82937, 82897, @CGUID + 4, @CGUID + 5); +INSERT INTO `creature_addon` (`guid`, `path_id`, `bytes2`) VALUES +(82937, @PATHID_1, 1), +(82897, @PATHID_2, 1), +(@CGUID + 4, @PATHID_3, 1), +(@CGUID + 5, @PATHID_4, 1); + +DELETE FROM `waypoint_data` WHERE `id` IN (@PATHID_1, @PATHID_2, @PATHID_3, @PATHID_4); +INSERT INTO `waypoint_data` (`id`, `point`, `position_x`, `position_y`, `position_z`) VALUES +(@PATHID_1, 1, 6445.87, -6388.29, 55.178), +(@PATHID_1, 2, 6462.24, -6385.64, 55.178), +(@PATHID_1, 3, 6477.23, -6373.64, 55.178), +(@PATHID_1, 4, 6472.75, -6355.57, 55.178), +(@PATHID_1, 5, 6460.20, -6343.13, 55.178), +(@PATHID_1, 6, 6465.87, -6326.58, 48.679), +(@PATHID_1, 7, 6460.20, -6343.13, 55.178), +(@PATHID_1, 8, 6443.27, -6346.17, 55.179), +(@PATHID_1, 9, 6430.45, -6358.61, 55.179), +(@PATHID_1, 10, 6433.99, -6376.20, 55.178), +(@PATHID_1, 11, 6445.87, -6388.29, 55.178), +(@PATHID_1, 12, 6440.46, -6404.84, 48.678), +(@PATHID_2, 1, 6443.78, -6340.43, 41.291), +(@PATHID_2, 2, 6459.56, -6345.86, 41.208), +(@PATHID_2, 3, 6453.16, -6365.24, 33.735), +(@PATHID_2, 4, 6446.68, -6384.37, 41.203), +(@PATHID_2, 5, 6430.96, -6379.17, 41.291), +(@PATHID_2, 6, 6437.28, -6360.66, 41.276), +(@PATHID_3, 1, 6443.78, -6340.43, 41.291), +(@PATHID_3, 2, 6459.56, -6345.86, 41.208), +(@PATHID_3, 3, 6453.16, -6365.24, 33.735), +(@PATHID_3, 4, 6446.68, -6384.37, 41.203), +(@PATHID_3, 5, 6430.96, -6379.17, 41.291), +(@PATHID_3, 6, 6437.28, -6360.66, 41.276), +(@PATHID_4, 1, 6445.87, -6388.29, 55.178), +(@PATHID_4, 2, 6462.24, -6385.64, 55.178), +(@PATHID_4, 3, 6477.23, -6373.64, 55.178), +(@PATHID_4, 4, 6472.75, -6355.57, 55.178), +(@PATHID_4, 5, 6460.20, -6343.13, 55.178), +(@PATHID_4, 6, 6465.87, -6326.58, 48.679), +(@PATHID_4, 7, 6460.20, -6343.13, 55.178), +(@PATHID_4, 8, 6443.27, -6346.17, 55.179), +(@PATHID_4, 9, 6430.45, -6358.61, 55.179), +(@PATHID_4, 10, 6433.99, -6376.20, 55.178), +(@PATHID_4, 11, 6445.87, -6388.29, 55.178), +(@PATHID_4, 12, 6440.46, -6404.84, 48.678); + +-- Eyes of Dar'Khan/Wailers shared spawns +SET @POOL_1 := @POOLENTRY; +SET @POOL_2 := @POOLENTRY + 1; + +DELETE FROM `pool_template` WHERE `entry` IN (@POOL_1, @POOL_2); +INSERT INTO `pool_template` VALUES +(@POOL_1, 1, "Eye of DarKhan/Wailer Spawn 1"), +(@POOL_2, 1, "Eye of DarKhan/Wailer Spawn 2"); + +DELETE FROM `pool_creature` WHERE `guid` IN (82897, 82937, @CGUID + 4, @CGUID + 5); +INSERT INTO `pool_creature` VALUES +(82897, @POOL_1, 0, "Wailer Spawn 1"), +(82937, @POOL_2, 0, "Wailer Spawn 2"), +(@CGUID + 4, @POOL_1, 0, "Eye of DarKhan Spawn 1"), +(@CGUID + 5, @POOL_2, 0, "Eye of DarKhan Spawn 2"); + +-- Rescript Dar'Khan Drathir and fix speech +-- by Teppic +SET @ENTRY := 16329; +DELETE FROM `smart_scripts` WHERE `entryorguid`=@ENTRY AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`,`source_type`,`id`,`link`,`event_type`,`event_phase_mask`,`event_chance`,`event_flags`,`event_param1`,`event_param2`,`event_param3`,`event_param4`,`action_type`,`action_param1`,`action_param2`,`action_param3`,`action_param4`,`action_param5`,`action_param6`,`target_type`,`target_param1`,`target_param2`,`target_param3`,`target_x`,`target_y`,`target_z`,`target_o`,`comment`) VALUES +(@ENTRY,0,0,0,4,0,100,0,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,"Dar'Khan Drathir - On Aggro - Say Line 0"), +(@ENTRY,0,1,0,0,0,100,0,0,0,6000,12000,11,20791,0,0,0,0,0,2,0,0,0,0,0,0,0,"Dar'Khan Drathir - In Combat CMC - Cast 'Shadow Bolt'"), +(@ENTRY,0,2,0,0,0,100,0,7000,9000,15000,15000,11,21068,0,0,0,0,0,5,0,0,0,0,0,0,0,"Dar'Khan Drathir - In Combat CMC - Cast 'Corruption'"), +(@ENTRY,0,3,0,0,0,100,0,15000,20000,16000,20000,11,38660,0,0,0,0,0,5,0,0,0,0,0,0,0,"Dar'Khan Drathir - In Combat CMC - Cast 'Fear'"); + +DELETE FROM `creature_text` WHERE `CreatureID`=@ENTRY; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(@ENTRY, 0, 0, 'Mortal fools! The ghouls of Deatholme will feast on your remains!', 12, 0, 100, 0, 0, 0, 12224, 0, "Dar'Khan Drathir"); diff --git a/sql/updates/world/3.3.5/2017_12_02_00_world.sql b/sql/updates/world/3.3.5/2017_12_02_00_world.sql new file mode 100644 index 00000000000..cbcf0331531 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_12_02_00_world.sql @@ -0,0 +1 @@ +DELETE FROM `spell_dbc` WHERE `Effect1` >= 165 OR `Effect2` >= 165 OR `Effect3` >= 165 OR `EffectApplyAuraName1` >= 317 OR `EffectApplyAuraName2` >= 317 OR `EffectApplyAuraName3` >= 317 OR `EffectImplicitTargetA1` >= 111 OR `EffectImplicitTargetA2` >= 111 OR `EffectImplicitTargetA3` >= 111 OR `EffectImplicitTargetB1` >= 111 OR `EffectImplicitTargetB2` >= 111 OR `EffectImplicitTargetB3` >= 111; diff --git a/sql/updates/world/3.3.5/2017_12_04_00_world_335.sql b/sql/updates/world/3.3.5/2017_12_04_00_world_335.sql new file mode 100644 index 00000000000..2084a4aa2e7 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_12_04_00_world_335.sql @@ -0,0 +1,2 @@ +-- Replace two Riverpaw Scouts with Riverpaw Shamans +UPDATE `creature` SET `id`=1065, `modelid`=204, `curhealth`=230, `curmana`=249 WHERE `guid` IN (86657, 86662); diff --git a/sql/updates/world/3.3.5/2017_12_04_01_world.sql b/sql/updates/world/3.3.5/2017_12_04_01_world.sql new file mode 100644 index 00000000000..258db741209 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_12_04_01_world.sql @@ -0,0 +1,8 @@ +-- Add two Draenei Artificers +DELETE FROM `creature` WHERE `guid` IN (82823,82841); +INSERT INTO `creature` (`guid`, `id`, `map`, `zoneId`, `areaId`, `spawnMask`, `phaseMask`, `modelid`, `equipment_id`, `position_x`, `position_y`, `position_z`, `orientation`, `spawntimesecs`, `spawndist`, `currentwaypoint`, `curhealth`, `curmana`, `MovementType`, `npcflag`, `unit_flags`, `dynamicflags`, `VerifiedBuild`) VALUES +(82823, 17228, 530, 0, 0, 1, 1, 0, 1, -4191.44, -12464.07, 45.340, 3.7988, 300, 0, 0, 98, 115, 2, 0, 0, 0, 0), +(82841, 17228, 530, 0, 0, 1, 1, 0, 1, -4213.64, -12467.37, 45.464, 0.9833, 300, 0, 0, 98, 115, 0, 0, 0, 0, 0); + +-- Move script of Draenei Artificer from ID to specific GUID +UPDATE `smart_scripts` SET `entryorguid`=-61962 WHERE `entryorguid`=17228 AND `source_type`=0; diff --git a/sql/updates/world/3.3.5/2017_12_04_02_world_335.sql b/sql/updates/world/3.3.5/2017_12_04_02_world_335.sql new file mode 100644 index 00000000000..6cf61b9b98d --- /dev/null +++ b/sql/updates/world/3.3.5/2017_12_04_02_world_335.sql @@ -0,0 +1,9 @@ +-- Rigglefuzz +DELETE FROM `creature_text` WHERE `CreatureID`=2817; +INSERT INTO `creature_text` (`CreatureID`, `GroupID`, `ID`, `Text`, `Type`, `Language`, `Probability`, `Emote`, `Duration`, `Sound`, `BroadcastTextId`, `TextRange`, `comment`) VALUES +(2817,0,0,"%s cooks up a batch of spicy hot buzzard wings for $n.",16,0,100,0,0,0,887,0,"Rigglefuzz"); + +UPDATE `creature_template` SET `AIName`="SmartAI" WHERE `entry`=2817; +DELETE FROM `smart_scripts` WHERE `entryorguid`=2817 AND `source_type`=0; +INSERT INTO `smart_scripts` (`entryorguid`, `source_type`, `id`, `link`, `event_type`, `event_phase_mask`, `event_chance`, `event_flags`, `event_param1`, `event_param2`, `event_param3`, `event_param4`, `action_type`, `action_param1`, `action_param2`, `action_param3`, `action_param4`, `action_param5`, `action_param6`, `target_type`, `target_param1`, `target_param2`, `target_param3`, `target_x`, `target_y`, `target_z`, `target_o`, `comment`) VALUES +(2817,0,0,0,20,0,100,0,703,0,0,0,1,0,0,0,0,0,0,7,0,0,0,0,0,0,0,"Rigglefuzz - On Quest 'Barbecued Buzzard Wings' Rewarded - Say Line 0"); diff --git a/sql/updates/world/3.3.5/2017_12_04_03_world_335.sql b/sql/updates/world/3.3.5/2017_12_04_03_world_335.sql new file mode 100644 index 00000000000..351b1daba61 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_12_04_03_world_335.sql @@ -0,0 +1,2 @@ +-- Parqual Fintallas +UPDATE `smart_scripts` SET `event_param1`=4764 WHERE `entryorguid`=4488 AND `source_type`=0 AND `id`=10; diff --git a/sql/updates/world/3.3.5/2017_12_04_04_world_335.sql b/sql/updates/world/3.3.5/2017_12_04_04_world_335.sql new file mode 100644 index 00000000000..939fe265bda --- /dev/null +++ b/sql/updates/world/3.3.5/2017_12_04_04_world_335.sql @@ -0,0 +1,2 @@ +-- +UPDATE `creature` SET `MovementType`=0 WHERE `guid`=82823; diff --git a/sql/updates/world/3.3.5/2017_12_05_00_world.sql b/sql/updates/world/3.3.5/2017_12_05_00_world.sql new file mode 100644 index 00000000000..3707ef18f3c --- /dev/null +++ b/sql/updates/world/3.3.5/2017_12_05_00_world.sql @@ -0,0 +1,319 @@ +-- DK Starting Zone - phase 64 +-- Duplicates removed +DELETE FROM `creature` WHERE `guid` IN (130751,130657,130620,130670,130808,130674,130569,130599,130719,130565,130609,130566,130717,130741,130799,130821,130668,130667,130800,130708,130664,130750,130666,130807,130817,130814,130681,130822,130816,130673,130680,130672,130678,130684,130823); +DELETE FROM `creature_addon` WHERE `guid` IN (130751,130657,130620,130670,130808,130674,130569,130599,130719,130565,130609,130566,130717,130741,130799,130821,130668,130667,130800,130708,130664,130750,130666,130807,130817,130814,130681,130822,130816,130673,130680,130672,130678,130684,130823); +-- Rampaging Abomination +UPDATE `creature` SET `spawntimesecs` = 5, `spawndist` = 20, `movementType` = 1 WHERE `id` = 29115; +-- Volatile Ghoul +UPDATE `creature` SET `spawntimesecs` = 5 WHERE `id` = 29136; +UPDATE `creature` SET `position_x` = 2198.392090, `position_y` = -5828.402832, `position_z` = 101.502411 WHERE `guid` = 130923; +UPDATE `creature` SET `position_x` = 2175.717285, `position_y` = -5816.032715, `position_z` = 101.343269 WHERE `guid` = 130951; +UPDATE `creature` SET `position_x` = 2089.767090, `position_y` = -5782.844727, `position_z` = 99.5929110 WHERE `guid` = 130932; +UPDATE `creature` SET `position_x` = 2156.671875, `position_y` = -5757.208984, `position_z` = 100.663902 WHERE `guid` = 130949; +UPDATE `creature` SET `position_x` = 2238.655518, `position_y` = -5845.669434, `position_z` = 101.297417 WHERE `guid` = 130926; +UPDATE `creature` SET `position_x` = 2223.981934, `position_y` = -5862.010742, `position_z` = 101.434555 WHERE `guid` = 130937; +-- Crusaders +UPDATE `creature` SET `Spawndist` = 0, `MovementType` = 0 WHERE `guid` = 130616; +UPDATE `creature` SET `position_x` = 2171.041504, `position_y` = -5746.948242, `position_z` = 101.453552, `orientation` = 4.915133, `Spawndist` = 10, `MovementType` = 1 WHERE `guid` = 130665; +UPDATE `creature` SET `position_x` = 2021.283081, `position_y` = -5767.769043, `position_z` = 102.069725, `orientation` = 0.455777 WHERE `guid` = 130663; +UPDATE `creature` SET `position_x` = 1938.300537, `position_y` = -5806.917480, `position_z` = 100.326469, `orientation` = 0.036730 WHERE `guid` = 130669; +UPDATE `creature` SET `position_x` = 1822.585205, `position_y` = -5806.501465, `position_z` = 103.471199, `orientation` = 6.123573 WHERE `guid` = 130676; +UPDATE `creature` SET `position_x` = 1886.295898, `position_y` = -5815.069824, `position_z` = 102.595604, `orientation` = 0.085570 WHERE `guid` = 130803; +UPDATE `creature` SET `position_x` = 1919.796875, `position_y` = -5810.199707, `position_z` = 100.092346, `orientation` = 0.194942 WHERE `guid` = 130671; +UPDATE `creature` SET `position_x` = 2062.375000, `position_y` = -5827.616211, `position_z` = 101.632080, `orientation` = 6.049894 WHERE `guid` = 130789; +UPDATE `creature` SET `position_x` = 2062.264404, `position_y` = -5863.437988, `position_z` = 103.230919, `orientation` = 0.077718 WHERE `guid` = 130662; +UPDATE `creature` SET `position_x` = 1847.384644, `position_y` = -5861.911621, `position_z` = 102.175438, `orientation` = 6.141752 WHERE `guid` = 130679; +UPDATE `creature` SET `position_x` = 1872.653320, `position_y` = -5855.841309, `position_z` = 102.853523, `orientation` = 0.010887 WHERE `guid` = 130811; +UPDATE `creature` SET `position_x` = 1887.613525, `position_y` = -5854.142578, `position_z` = 102.406212, `orientation` = 0.058871 WHERE `guid` = 130809; +UPDATE `creature` SET `position_x` = 1897.007813, `position_y` = -5852.924805, `position_z` = 101.451141, `orientation` = 0.009392 WHERE `guid` = 130805; +UPDATE `creature` SET `position_x` = 1919.654297, `position_y` = -5846.907715, `position_z` = 100.235977, `orientation` = 0.124753 WHERE `guid` = 130804; +UPDATE `creature` SET `position_x` = 1935.207397, `position_y` = -5843.156250, `position_z` = 100.510971, `orientation` = 0.203284 WHERE `guid` = 130810; +UPDATE `creature` SET `position_x` = 2105.251221, `position_y` = -5900.707031, `position_z` = 104.638618, `orientation` = 6.202802 WHERE `guid` = 130754; +UPDATE `creature` SET `position_x` = 1982.372559, `position_y` = -5917.364258, `position_z` = 104.190681, `orientation` = 0.246334 WHERE `guid` = 130801; +UPDATE `creature` SET `position_x` = 1928.129761, `position_y` = -5933.635742, `position_z` = 103.063942, `orientation` = 6.162736 WHERE `guid` = 130677; +UPDATE `creature` SET `position_x` = 1888.227783, `position_y` = -5932.001465, `position_z` = 104.198326, `orientation` = 0.181147 WHERE `guid` = 130802; +UPDATE `creature` SET `position_x` = 1887.905151, `position_y` = -5911.649414, `position_z` = 102.921516, `orientation` = 0.015411 WHERE `guid` = 130813; +UPDATE `creature` SET `position_x` = 1846.163696, `position_y` = -5901.961426, `position_z` = 104.819687, `orientation` = 0.024871 WHERE `guid` = 130675; +UPDATE `creature` SET `position_x` = 1860.970825, `position_y` = -5927.291504, `position_z` = 104.586868, `orientation` = 0.181141 WHERE `guid` = 130825; +-- Waypoints +UPDATE `creature` SET `spawntimesecs` = 30, `Spawndist` = 0, `MovementType` = 2 WHERE `guid` IN (130615,130653,130788,130791,130820,130663,130669,130819,130676,130812,130803,130806,130671,130789,130662,130679,130811,130809,130805,130804,130810,130754,130801,130677,130802,130813,130675,130825); +DELETE FROM `creature_addon` WHERE `guid` IN (130615,130653,130788,130791,130820,130663,130669,130819,130676,130812,130803,130806,130671,130789,130662,130679,130811,130809,130805,130804,130810,130754,130801,130677,130802,130813,130675,130825); +INSERT INTO `creature_addon` (`guid`, `path_id`, `mount`, `bytes1`, `bytes2`, `emote`, `auras`) VALUES +(130615,1306150,0,0,1, 0,''), +(130653,1306530,0,0,1, 0,''), +(130788,1307880,0,0,1, 0,''), +(130791,1307910,0,0,1, 0,''), +(130820,1308200,0,0,1, 0,''), +(130663,1306630,0,0,1, 0,''), +(130669,1306690,0,0,1, 0,''), +(130819,1308190,0,0,1, 0,''), +(130676,1306760,0,0,1, 0,''), +(130812,1308120,0,0,1, 0,''), +(130803,1308030,0,0,1, 0,''), +(130806,1308060,0,0,1, 0,''), +(130671,1306710,0,0,1, 0,''), +(130789,1307890,0,0,1, 0,''), +(130662,1306620,0,0,1, 0,''), +(130679,1306790,0,0,1, 0,''), +(130811,1308110,0,0,1, 0,''), +(130809,1308090,0,0,1, 0,''), +(130805,1308050,0,0,1, 0,''), +(130804,1308040,0,0,1, 0,''), +(130810,1308100,0,0,1, 0,''), +(130754,1307540,0,0,1, 0,''), +(130801,1308010,0,0,1, 0,''), +(130677,1306770,0,0,1, 0,''), +(130802,1308020,0,0,1, 0,''), +(130813,1308130,0,0,1, 0,''), +(130675,1306750,0,0,1, 0,''), +(130825,1308250,0,0,1, 0,''); +DELETE FROM `waypoint_data` WHERE `id` IN (1306150,1306530,1307880,1307910,1308200,1306630,1306690,1308190,1306760,1308120,1308030,1308060,1306710,1307890,1306620,1306790,1308110,1308090,1308050,1308040,1308100,1307540,1308010,1306770,1308020,1308130,1306750,1308250); +INSERT INTO `waypoint_data` (`id`, `point`, `position_x`, `position_y`, `position_z`, `delay`, `action`, `orientation`, `action_chance`, `move_type`) VALUES +(1308200,1,2110.63,-5730.45,100.176,1500,0,4.8397,100,1), +(1308200,2,2122.33,-5722.27,100.328,0,0,0.410476,100,1), +(1308200,3,2139.85,-5717.74,100.963,0,0,0.558131,100,1), +(1308200,4,2169.78,-5698.14,106.245,0,0,0.644525,100,1), +(1308200,5,2201.61,-5670.41,116.544,0,0,0.691649,100,1), +(1308200,6,2221.2,-5654.19,126.493,0,0,0.49687,100,1), +(1308200,7,2247.14,-5647.54,134.152,0,0,0.100244,100,1), +(1308200,8,2281.82,-5649.43,144.037,10000,1188,6.15959,100,1), +(1306150,1,2082.57,-5729.92,100.968,1500,0,-3.0862,100,1), +(1306150,2,2120.29,-5721.2,100.326,0,0,0.206275,100,1), +(1306150,3,2144.6,-5714.86,101.333,0,0,0.611541,100,1), +(1306150,4,2164.79,-5698.69,105.338,0,0,0.701862,100,1), +(1306150,5,2185.95,-5676.26,112.802,0,0,0.862868,100,1), +(1306150,6,2205.62,-5656.75,121.586,0,0,0.752912,100,1), +(1306150,7,2235.14,-5646.35,131.467,0,0,0.214915,100,1), +(1306150,8,2282.81,-5638.38,145.358,10000,1188,0.210988,100,1), +(1307880,1,2076.79,-5733.08,100.771,1500,0,0.948046,100,1), +(1307880,2,2105.55,-5727.9,100.32,0,0,0.301308,100,1), +(1307880,3,2146.32,-5717.57,101.241,0,0,0.577768,100,1), +(1307880,4,2176.44,-5699.26,106.766,0,0,0.485091,100,1), +(1307880,5,2197.89,-5686.52,111.027,0,0,0.593476,100,1), +(1307880,6,2213.47,-5677.43,117.313,0,0,0.628819,100,1), +(1307880,7,2227.61,-5672.47,125.733,0,0,0.150511,100,1), +(1307880,8,2241.09,-5668.24,133.771,0,0,0.150512,100,1), +(1307880,9,2251.19,-5671.61,140.211,0,0,6.05592,100,1), +(1307880,10,2260.7,-5667.87,139.602,0,0,0.602901,100,1), +(1307880,11,2279.49,-5654.93,143.935,10000,1188,0.602901,100,1), +(1307910,1,2050.23,-5752.25,98.5459,1500,0,0.39066,100,1), +(1307910,2,2084.27,-5739.38,100.087,0,0,0.382198,100,1), +(1307910,3,2124.82,-5722.72,100.35,0,0,0.360207,100,1), +(1307910,4,2161.06,-5706.96,103.656,0,0,0.712851,100,1), +(1307910,5,2179.11,-5688.84,109.467,0,0,0.791391,100,1), +(1307910,6,2204.98,-5664.79,119.164,0,0,0.720705,100,1), +(1307910,7,2239.17,-5651.26,131.423,0,0,0.225904,100,1), +(1307910,8,2280.17,-5649.69,143.613,10000,1188,6.26169,100,1), +(1306530,1,2047.67,-5750.46,98.8524,1500,0,0.017199,100,1), +(1306530,2,2082.62,-5736.98,100.434,0,0,0.682447,100,1), +(1306530,3,2136.05,-5719.88,100.712,0,0,0.382425,100,1), +(1306530,4,2169.42,-5696.4,106.528,0,0,0.739781,100,1), +(1306530,5,2191.5,-5675.34,113.472,0,0,0.77748,100,1), +(1306530,6,2207.61,-5659.48,121.455,0,0,0.77748,100,1), +(1306530,7,2233.29,-5643.46,131.681,0,0,0.520655,100,1), +(1306530,8,2280.33,-5637.12,145.183,10000,1188,0.0706218,100,1), +(1306630,1,2021.283081,-5767.769043,102.069725,1500,0,0.455777,100,1), +(1306630,2,2057,-5753.29,98.0162,0,0,0.389378,100,1), +(1306630,3,2087.16,-5739.97,99.9644,0,0,0.440429,100,1), +(1306630,4,2123.28,-5722.97,100.322,0,0,0.45221,100,1), +(1306630,5,2151.2,-5708.97,102.328,0,0,0.602221,100,1), +(1306630,6,2178.39,-5680.06,111.376,0,0,0.821347,100,1), +(1306630,7,2201.81,-5659.44,119.913,0,0,0.711391,100,1), +(1306630,8,2228.17,-5646.67,129.653,0,0,0.414511,100,1), +(1306630,9,2278.72,-5651.94,143.384,10000,1188,5.70338,100,1), +(1306690,1,1938.300537,-5806.917480,100.326469,1500,0,0.036730,100,1), +(1306690,2,1983.64,-5790.51,100.901,0,0,0.378378,100,1), +(1306690,3,2022.91,-5769.81,102.141,0,0,0.539385,100,1), +(1306690,4,2045.72,-5758.77,98.3942,0,0,0.386232,100,1), +(1306690,5,2076.32,-5743.14,99.4291,0,0,0.456918,100,1), +(1306690,6,2109.68,-5727.88,100.259,0,0,0.421575,100,1), +(1306690,7,2143.97,-5714.37,101.331,0,0,0.496973,100,1), +(1306690,8,2175.38,-5689.75,108.822,0,0,0.680756,100,1), +(1306690,9,2199.47,-5669.2,116.494,0,0,0.716099,100,1), +(1306690,10,2222.41,-5653.69,126.852,0,0,0.567659,100,1), +(1306690,11,2252.28,-5644.34,136.182,0,0,0.200093,100,1), +(1306690,12,2282.95,-5638.85,145.296,10000,1188,0.273134,100,1), +(1308190,1,1848.26,-5817.4,100.084,1500,0,0.031333,100,1), +(1308190,2,1885.95,-5813.1,102.329,0,0,0.127844,100,1), +(1308190,3,1939.43,-5807.45,100.38,0,0,0.151406,100,1), +(1308190,4,1987.9,-5788.39,100.894,0,0,0.383098,100,1), +(1308190,5,2042.2,-5757.65,98.5925,0,0,0.44593,100,1), +(1308190,6,2089.15,-5735.17,100.139,0,0,0.438076,100,1), +(1308190,7,2128.01,-5720.92,100.474,0,0,0.28885,100,1), +(1308190,8,2170.04,-5695.56,106.79,0,0,0.650134,100,1), +(1308190,9,2196.58,-5672.94,114.801,0,0,0.712966,100,1), +(1308190,10,2235.65,-5650.01,130.788,0,0,0.461638,100,1), +(1308190,11,2287.21,-5648.74,145.223,10000,1188,6.18012,100,1), +(1306760,1,1822.585205,-5806.501465,103.471199,1500,0,6.123573,100,1), +(1306760,2,1860.7,-5814.45,100.235,0,0,6.13614,100,1), +(1306760,3,1902.01,-5807.95,100.908,0,0,0.00610447,100,1), +(1306760,4,1944.01,-5807.69,100.583,0,0,0.00610447,100,1), +(1306760,5,1989.78,-5789.95,100.913,0,0,0.482841,100,1), +(1306760,6,2025.41,-5769.5,102.127,0,0,0.593583,100,1), +(1306760,7,2037.01,-5761.67,99.3255,0,0,0.593583,100,1), +(1306760,8,2081.03,-5738.27,100.278,0,0,0.389379,100,1), +(1306760,9,2121.93,-5722.76,100.309,0,0,0.354036,100,1), +(1306760,10,2158.34,-5704.48,103.737,0,0,0.683903,100,1), +(1306760,11,2184.91,-5680.44,111.729,0,0,0.759302,100,1), +(1306760,12,2207.65,-5658.66,121.698,0,0,0.767156,100,1), +(1306760,13,2232.85,-5646.12,130.964,0,0,0.399589,100,1), +(1306760,14,2281.53,-5657.82,144.979,10000,1188,5.88874,100,1), +(1308120,1,1871.52,-5814.89,100.977,1500,0,0.149239,100,1), +(1308120,2,1911.67,-5811.62,100.215,0,0,0.166323,100,1), +(1308120,3,1978.48,-5793.12,100.877,0,0,0.434929,100,1), +(1308120,4,2036.66,-5758.3,98.9885,0,0,0.44671,100,1), +(1308120,5,2100.58,-5730.91,100.349,0,0,0.399586,100,1), +(1308120,6,2155.04,-5707.06,102.968,0,0,0.509542,100,1), +(1308120,7,2201.83,-5665.23,118.262,0,0,0.737308,100,1), +(1308120,8,2256.58,-5647.82,136.801,0,0,0.266069,100,1), +(1308120,9,2287.16,-5648.41,145.2,10000,1188,6.24295,100,1), +(1308030,1,1886.295898,-5815.069824,102.595604,1500,0,0.085570,100,1), +(1308030,2,1940.44,-5807.54,100.42,0,0,0.329044,100,1), +(1308030,3,1996.59,-5784.59,100.768,0,0,0.441355,100,1), +(1308030,4,2037.36,-5758.5,98.9918,0,0,0.517539,100,1), +(1308030,5,2095.88,-5728.01,100.327,0,0,0.156255,100,1), +(1308030,6,2142.68,-5713.79,101.296,0,0,0.442926,100,1), +(1308030,7,2192.1,-5676.26,113.266,0,0,0.717815,100,1), +(1308030,8,2229.92,-5650.07,129.412,0,0,0.521466,100,1), +(1308030,9,2286.4,-5645.88,144.972,10000,1188,0.0266647,100,1), +(1308060,1,1901.66,-5809.61,100.974,1500,0,0.342551,100,1), +(1308060,2,1941.71,-5807.93,100.478,0,0,0.0728286,100,1), +(1308060,3,1998.5,-5780,100.674,0,0,0.41055,100,1), +(1308060,4,2043,-5758.39,98.5521,0,0,0.430185,100,1), +(1308060,5,2100.37,-5732.46,100.333,0,0,0.426258,100,1), +(1308060,6,2154.27,-5707.19,102.864,0,0,0.481236,100,1), +(1308060,7,2193.28,-5674.47,113.916,0,0,0.650096,100,1), +(1308060,8,2232.26,-5647.25,130.574,0,0,0.204775,100,1), +(1308060,9,2297.9,-5651.41,147.427,10000,1188,6.19931,100,1), +(1306710,1,1919.796875,-5810.199707,100.092346,1500,0,0.194942,100,1), +(1306710,2,1984.03,-5789.59,100.889,0,0,0.472187,100,1), +(1306710,3,2039.46,-5758.65,98.8531,0,0,0.511457,100,1), +(1306710,4,2097.4,-5732.33,100.316,0,0,0.319035,100,1), +(1306710,5,2151.62,-5712.35,101.993,0,0,0.57429,100,1), +(1306710,6,2176.82,-5720.66,102.775,10000,1188,5.69509,100,1), +(1307890,1,2062.375,-5827.616211,101.632080,1500,0,6.049894,100,1), +(1307890,2,2080.54,-5829.92,102.423,0,0,5.93994,100,1), +(1307890,3,2115.34,-5840.62,102.029,0,0,5.92109,100,1), +(1307890,4,2158.14,-5843.73,102.082,0,0,6.26274,100,1), +(1307890,5,2229.69,-5844.69,101.44,10000,1188,0.0463068,100,1), +(1306620,1,2062.264404,-5863.437988,103.230919,1500,0,0.077718,100,1), +(1306620,2,2086.83,-5858.89,102.861,0,0,0.160184,100,1), +(1306620,3,2132.76,-5855.41,101.544,0,0,6.21011,100,1), +(1306620,4,2168.02,-5845.59,101.368,0,0,0.292129,100,1), +(1306620,5,2251.48,-5838.18,101.258,10000,1188,0.190027,100,1), +(1306790,1,1847.384644,-5861.911621,102.175438,1500,0,6.141752,100,1), +(1306790,2,1891.38,-5854.81,102.046,0,0,0.11303,100,1), +(1306790,3,1949.22,-5847.57,100.357,0,0,0.0109272,100,1), +(1306790,4,1995.6,-5848.39,100.826,0,0,6.18809,100,1), +(1306790,5,2053.46,-5857.73,103.066,0,0,0.124811,100,1), +(1306790,6,2094.38,-5854.87,103.519,0,0,0.128738,100,1), +(1306790,7,2155.67,-5844.05,102.032,0,0,0.0384166,100,1), +(1306790,8,2220.64,-5849.55,101.569,10000,1188,6.14489,100,1), +(1308110,1,1872.653320,-5855.841309,102.853523,1500,0,0.010887,100,1), +(1308110,2,1945.15,-5846.42,100.348,0,0,0.152258,100,1), +(1308110,3,2009.83,-5844.98,100.555,0,0,0.0108862,100,1), +(1308110,4,2081.97,-5842.18,103.485,0,0,0.0462298,100,1), +(1308110,5,2156.12,-5816.6,101.968,0,0,0.305411,100,1), +(1308110,6,2206.36,-5808.57,101.34,10000,1188,0.219017,100,1), +(1308090,1,1887.613525,-5854.142578,102.406212,1500,0,0.058871,100,1), +(1308090,2,1946.51,-5847.73,100.335,0,0,0.180608,100,1), +(1308090,3,2008.43,-5844.67,100.563,0,0,0.0313824,100,1), +(1308090,4,2073.83,-5841.62,103.163,0,0,0.0235284,100,1), +(1308090,5,2143.41,-5819.68,100.946,0,0,0.28271,100,1), +(1308090,6,2205.36,-5821.85,101.321,10000,1188,6.10251,100,1), +(1308050,1,1897.007813,-5852.924805,101.451141,1500,0,0.009392,100,1), +(1308050,2,1946.35,-5847.82,100.335,0,0,0.107488,100,1), +(1308050,3,2034.81,-5850.42,100.401,0,0,6.17862,100,1), +(1308050,4,2104.6,-5857.17,102.44,0,0,0.00538468,100,1), +(1308050,5,2156.86,-5859.74,101.353,0,0,6.03332,100,1), +(1308050,6,2179.67,-5864.91,101.337,0,0,0.00302753,100,1), +(1308050,7,2226.17,-5848.94,101.294,10000,1188,0.372165,100,1), +(1308040,1,1919.654297,-5846.907715,100.235977,1500,0,0.124753,100,1), +(1308040,2,1989.29,-5843.83,100.667,0,0,0.0422862,100,1), +(1308040,3,2053.05,-5844.47,101.489,0,0,6.28227,100,1), +(1308040,4,2115.32,-5842.73,102.499,0,0,0.085484,100,1), +(1308040,5,2173.43,-5843.51,101.35,0,0,6.24301,100,1), +(1308040,6,2218.12,-5839.82,101.329,10000,1188,0.101191,100,1), +(1308100,1,1935.207397,-5843.156250,100.510971,1500,0,0.203284,100,1), +(1308100,2,1981.56,-5842.15,100.582,0,0,6.24299,100,1), +(1308100,3,2016.35,-5842.54,100.545,0,0,6.25556,100,1), +(1308100,4,2079.2,-5845.73,102.93,0,0,6.22021,100,1), +(1308100,5,2161.35,-5845.08,101.875,0,0,0.0312731,100,1), +(1308100,6,2223.11,-5844.07,101.298,10000,1188,6.23199,100,1), +(1307540,1,2105.251221,-5900.707031,104.638618,1500,0,6.202802,100,1), +(1307540,2,2147.59,-5901.54,101.074,0,0,0.0138609,100,1), +(1307540,3,2207.9,-5900.44,100.831,0,0,0.0138609,100,1), +(1307540,4,2248.68,-5894.8,101.004,10000,1188,0.135598,100,1), +(1308010,1,1982.372559,-5917.364258,104.190681,1500,0,0.246334,100,1), +(1308010,2,2060.33,-5911.86,106.458,0,0,0.0177885,100,1), +(1308010,3,2129.89,-5903.91,102.994,0,0,0.139525,100,1), +(1308010,4,2190.71,-5899.29,101.074,0,0,6.28134,100,1), +(1308010,5,2244.32,-5898.24,100.877,10000,1188,0.0609853,100,1), +(1306770,1,1928.129761,-5933.635742,103.063942,1500,0,6.162736,100,1), +(1306770,2,1981.24,-5929.33,103.63,0,0,0.115955,100,1), +(1306770,3,2046.15,-5921.03,106.025,0,0,0.22591,100,1), +(1306770,4,2119.48,-5903.34,104.28,0,0,0.0491957,100,1), +(1306770,5,2173.87,-5897.81,100.759,0,0,0.025634,100,1), +(1306770,6,2216.33,-5899.95,100.666,0,0,6.21065,100,1), +(1306770,7,2243.88,-5903.52,100.662,10000,1188,6.15567,100,1), +(1308020,1,1888.227783,-5932.001465,104.198326,1500,0,0.181147,100,1), +(1308020,2,1957.72,-5914.99,102.575,0,0,0.224344,100,1), +(1308020,3,2025.95,-5907.58,104.953,0,0,0.0633372,100,1), +(1308020,4,2078.47,-5904.05,105.728,0,0,0.0358482,100,1), +(1308020,5,2141.54,-5898.15,102.3,0,0,0.0162122,100,1), +(1308020,6,2192.28,-5899.28,101.138,0,0,6.18552,100,1), +(1308020,7,2245.25,-5899.67,100.817,10000,1188,6.26406,100,1), +(1308130,1,1887.905151,-5911.649414,102.921516,1500,0,0.015411,100,1), +(1308130,2,1950.41,-5906.33,101.542,0,0,0.125367,100,1), +(1308130,3,2024.78,-5907.01,104.901,0,0,6.27111,100,1), +(1308130,4,2070.91,-5897.83,104.986,0,0,6.1965,100,1), +(1308130,5,2126.33,-5901.08,103.446,0,0,0.0193386,100,1), +(1308130,6,2249.12,-5903.34,100.523,10000,1188,6.22398,100,1), +(1306750,1,1846.163696,-5901.961426,104.819687,1500,0,0.024871,100,1), +(1306750,2,1894.25,-5906.52,102.022,0,0,6.17061,100,1), +(1306750,3,1961.34,-5907.78,102.362,0,0,6.257,100,1), +(1306750,4,2031.98,-5908.53,105.049,0,0,0.0405757,100,1), +(1306750,5,2098.74,-5905.52,105.628,0,0,6.28056,100,1), +(1306750,6,2160.55,-5915.42,100.01,0,0,6.0528,100,1), +(1306750,7,2196.82,-5919.73,101.013,0,0,0.0829842,100,1), +(1306750,8,2250.08,-5905.68,100.325,10000,1188,0.42149,100,1), +(1308250,1,1860.970825,-5927.291504,104.586868,1500,0,0.181141,100,1), +(1308250,2,1919.96,-5923.4,102.163,0,0,0.137944,100,1), +(1308250,3,1994.91,-5908.86,103.971,0,0,0.102601,100,1), +(1308250,4,2050.26,-5906.36,105.613,0,0,0.122236,100,1), +(1308250,5,2112.05,-5908.49,104.778,0,0,0.00835337,100,1), +(1308250,6,2173.17,-5899.52,100.583,0,0,0.0829662,100,1), +(1308250,7,2251.93,-5902.93,100.478,10000,1188,6.20122,100,1); +-- Balista defenders +DELETE FROM `creature_addon` WHERE `guid` IN (130630,130625,130759,130629,130631,130628,130627,130626,130760,130710,130725,130699,130583,130575,130643,130633,130638,130764,130767,130611,130639,130766,130774); +INSERT INTO `creature_addon` (`guid`,`path_id`, `bytes1`, `bytes2`, `emote`, `auras`) VALUES +(130630,0,0,1,333,""), +(130625,0,0,1,333,""), +(130759,0,0,1,333,""), +(130629,0,0,1,333,""), +(130631,0,0,1,333,""), +(130628,0,0,1,333,""), +(130627,0,0,1,333,""), +(130626,0,0,1,333,""), +(130760,0,0,1,333,""), +(130710,0,0,1,333,""), +(130725,0,0,1,333,""), +(130699,0,0,1,333,""), +(130583,0,0,1,333,""), +(130575,0,0,1,333,""), +(130643,0,0,1,333,""), +(130633,0,0,1,333,""), +(130638,0,0,1,333,""), +(130764,0,0,1,333,""), +(130767,0,0,1,333,""), +(130611,0,0,1,333,""), +(130639,0,0,1,333,""), +(130766,0,0,1,333,""), +(130774,0,0,1,333,""); +-- Citizen of Havenshire -- Phase 1 +DELETE FROM `creature_addon` WHERE `guid` IN (SELECT `guid` FROM `creature` WHERE `id` = 28576); +DELETE FROM `creature_addon` WHERE `guid` IN (SELECT `guid` FROM `creature` WHERE `id` = 28577); +DELETE FROM `creature_template_addon` WHERE `entry` IN (28576,28577); +INSERT INTO `creature_template_addon` (`entry`, `path_id`, `bytes1`, `bytes2`, `emote`, `auras`) VALUES +(28576,0,0,1,0,""), +(28577,0,0,1,0,""); diff --git a/sql/updates/world/3.3.5/2017_12_05_01_world.sql b/sql/updates/world/3.3.5/2017_12_05_01_world.sql new file mode 100644 index 00000000000..e990dd8e848 --- /dev/null +++ b/sql/updates/world/3.3.5/2017_12_05_01_world.sql @@ -0,0 +1,3 @@ +-- +DELETE FROM `smart_scripts` WHERE `entryorguid`=22091 AND `source_type`=0; +UPDATE `creature_template` SET `AIName`="" WHERE `entry`=22091; diff --git a/sql/updates/world/3.3.5/2017_12_05_02_world.sql b/sql/updates/world/3.3.5/2017_12_05_02_world.sql new file mode 100644 index 00000000000..0a278bd33eb --- /dev/null +++ b/sql/updates/world/3.3.5/2017_12_05_02_world.sql @@ -0,0 +1,2 @@ +-- +UPDATE `creature_template` SET `InhabitType`=3 WHERE `entry`=18877; diff --git a/src/common/Collision/Management/MMapFactory.cpp b/src/common/Collision/Management/MMapFactory.cpp index 02295c2b2db..6834756dca6 100644 --- a/src/common/Collision/Management/MMapFactory.cpp +++ b/src/common/Collision/Management/MMapFactory.cpp @@ -41,4 +41,4 @@ namespace MMAP g_MMapManager = nullptr; } } -}
\ No newline at end of file +} diff --git a/src/common/Collision/Management/MMapFactory.h b/src/common/Collision/Management/MMapFactory.h index 4aa39c6ffcc..770cec1bf1a 100644 --- a/src/common/Collision/Management/MMapFactory.h +++ b/src/common/Collision/Management/MMapFactory.h @@ -47,4 +47,3 @@ namespace MMAP } #endif - diff --git a/src/common/Collision/Management/MMapManager.h b/src/common/Collision/Management/MMapManager.h index e55a9a3a9d0..296eb813206 100644 --- a/src/common/Collision/Management/MMapManager.h +++ b/src/common/Collision/Management/MMapManager.h @@ -86,4 +86,4 @@ namespace MMAP }; } -#endif
\ No newline at end of file +#endif diff --git a/src/common/Collision/Maps/MapDefines.h b/src/common/Collision/Maps/MapDefines.h index 3dd19735ff9..ad7bdd5fddc 100644 --- a/src/common/Collision/Maps/MapDefines.h +++ b/src/common/Collision/Maps/MapDefines.h @@ -22,7 +22,7 @@ #include "DetourNavMesh.h" const uint32 MMAP_MAGIC = 0x4d4d4150; // 'MMAP' -#define MMAP_VERSION 6 +#define MMAP_VERSION 7 struct MmapTileHeader { diff --git a/src/common/Collision/VMapDefinitions.h b/src/common/Collision/VMapDefinitions.h index d5beac48a3c..5af52770dd3 100644 --- a/src/common/Collision/VMapDefinitions.h +++ b/src/common/Collision/VMapDefinitions.h @@ -25,8 +25,8 @@ namespace VMAP { - const char VMAP_MAGIC[] = "VMAP_4.3"; - const char RAW_VMAP_MAGIC[] = "VMAP043"; // used in extracted vmap files with raw data + const char VMAP_MAGIC[] = "VMAP_4.4"; + const char RAW_VMAP_MAGIC[] = "VMAP044"; // used in extracted vmap files with raw data const char GAMEOBJECT_MODELS[] = "GameObjectModels.dtree"; // defined in TileAssembler.cpp currently... diff --git a/src/common/Common.cpp b/src/common/Common.cpp index 9e361c35e11..a41cf23fadb 100644 --- a/src/common/Common.cpp +++ b/src/common/Common.cpp @@ -39,4 +39,3 @@ LocaleConstant GetLocaleByName(const std::string& name) return LOCALE_enUS; // including enGB case } - diff --git a/src/common/Cryptography/Authentication/AuthCrypt.cpp b/src/common/Cryptography/Authentication/AuthCrypt.cpp index 64d29782424..2b6eb3a5229 100644 --- a/src/common/Cryptography/Authentication/AuthCrypt.cpp +++ b/src/common/Cryptography/Authentication/AuthCrypt.cpp @@ -72,4 +72,3 @@ void AuthCrypt::EncryptSend(uint8 *data, size_t len) _serverEncrypt.UpdateData(len, data); } - diff --git a/src/common/Cryptography/BigNumber.cpp b/src/common/Cryptography/BigNumber.cpp index c67e8c8fbbc..93463fedbd7 100644 --- a/src/common/Cryptography/BigNumber.cpp +++ b/src/common/Cryptography/BigNumber.cpp @@ -209,4 +209,3 @@ std::string BigNumber::AsDecStr() const OPENSSL_free(ch); return ret; } - diff --git a/src/common/Cryptography/BigNumber.h b/src/common/Cryptography/BigNumber.h index 8936ffffe53..0e68a760497 100644 --- a/src/common/Cryptography/BigNumber.h +++ b/src/common/Cryptography/BigNumber.h @@ -99,4 +99,3 @@ class TC_COMMON_API BigNumber }; #endif - diff --git a/src/common/Cryptography/HMACSHA1.h b/src/common/Cryptography/HMACSHA1.h index 972c9b02012..6b66a2a78d9 100644 --- a/src/common/Cryptography/HMACSHA1.h +++ b/src/common/Cryptography/HMACSHA1.h @@ -44,4 +44,3 @@ class TC_COMMON_API HmacHash uint8 m_digest[SHA_DIGEST_LENGTH]; }; #endif - diff --git a/src/common/Debugging/WheatyExceptionReport.cpp b/src/common/Debugging/WheatyExceptionReport.cpp index e818df4e8fb..f541efcc001 100644 --- a/src/common/Debugging/WheatyExceptionReport.cpp +++ b/src/common/Debugging/WheatyExceptionReport.cpp @@ -1028,7 +1028,7 @@ bool logChildren) ULONG64 length; SymGetTypeInfo(m_hProcess, modBase, innerTypeID, TI_GET_LENGTH, &length); char buffer2[50]; - FormatOutputValue(buffer2, basicType, length, (PVOID)address, sizeof(buffer)); + FormatOutputValue(buffer2, basicType, length, (PVOID)address, sizeof(buffer2)); symbolDetails.top().Value = buffer2; } bHandled = true; diff --git a/src/common/Debugging/WheatyExceptionReport.h b/src/common/Debugging/WheatyExceptionReport.h index 2fee14708f2..6a3560f1993 100644 --- a/src/common/Debugging/WheatyExceptionReport.h +++ b/src/common/Debugging/WheatyExceptionReport.h @@ -212,4 +212,3 @@ class WheatyExceptionReport extern WheatyExceptionReport g_WheatyExceptionReport; // global instance of class #endif // _WIN32 #endif // _WHEATYEXCEPTIONREPORT_ - diff --git a/src/common/Metric/Metric.cpp b/src/common/Metric/Metric.cpp index 10f0d13b688..e208cdc4aec 100644 --- a/src/common/Metric/Metric.cpp +++ b/src/common/Metric/Metric.cpp @@ -217,7 +217,12 @@ void Metric::ForceSend() { // Send what's queued only if io_service is stopped (so only on shutdown) if (_enabled && _batchTimer->get_io_service().stopped()) + { + _enabled = false; SendBatch(); + _batchTimer->cancel(); + _overallStatusTimer->cancel(); + } } void Metric::ScheduleOverallStatusLog() diff --git a/src/common/Utilities/ByteConverter.h b/src/common/Utilities/ByteConverter.h index 29d7497f242..18047d0d3a5 100644 --- a/src/common/Utilities/ByteConverter.h +++ b/src/common/Utilities/ByteConverter.h @@ -65,4 +65,3 @@ inline void EndianConvertReverse(uint8&) { } inline void EndianConvertReverse( int8&) { } #endif - diff --git a/src/common/Utilities/EventProcessor.cpp b/src/common/Utilities/EventProcessor.cpp index 0149f222da8..eda5c5ed5ac 100644 --- a/src/common/Utilities/EventProcessor.cpp +++ b/src/common/Utilities/EventProcessor.cpp @@ -111,29 +111,24 @@ void EventProcessor::KillAllEvents(bool force) m_events.clear(); } -void EventProcessor::AddEvent(BasicEvent* Event, uint64 e_time, bool set_addtime) +void EventProcessor::AddEvent(BasicEvent* event, uint64 e_time, bool set_addtime) { if (set_addtime) - Event->m_addTime = m_time; - Event->m_execTime = e_time; - m_events.insert(std::pair<uint64, BasicEvent*>(e_time, Event)); + event->m_addTime = m_time; + event->m_execTime = e_time; + m_events.insert(std::pair<uint64, BasicEvent*>(e_time, event)); } -void EventProcessor::ModifyEventTime(BasicEvent* Event, uint64 newTime) +void EventProcessor::ModifyEventTime(BasicEvent* event, uint64 newTime) { for (auto itr = m_events.begin(); itr != m_events.end(); ++itr) { - if (itr->second != Event) + if (itr->second != event) continue; - Event->m_execTime = newTime; + event->m_execTime = newTime; m_events.erase(itr); - m_events.insert(std::pair<uint64, BasicEvent*>(newTime, Event)); + m_events.insert(std::pair<uint64, BasicEvent*>(newTime, event)); break; } } - -uint64 EventProcessor::CalculateTime(uint64 t_offset) const -{ - return(m_time + t_offset); -} diff --git a/src/common/Utilities/EventProcessor.h b/src/common/Utilities/EventProcessor.h index 9a356b0e3f5..488d4a98649 100644 --- a/src/common/Utilities/EventProcessor.h +++ b/src/common/Utilities/EventProcessor.h @@ -20,6 +20,7 @@ #define __EVENTPROCESSOR_H #include "Define.h" +#include "Duration.h" #include <map> class EventProcessor; @@ -76,9 +77,10 @@ class TC_COMMON_API EventProcessor void Update(uint32 p_time); void KillAllEvents(bool force); - void AddEvent(BasicEvent* Event, uint64 e_time, bool set_addtime = true); - void ModifyEventTime(BasicEvent* Event, uint64 newTime); - uint64 CalculateTime(uint64 t_offset) const; + void AddEvent(BasicEvent* event, uint64 e_time, bool set_addtime = true); + void AddEventAtOffset(BasicEvent* event, Milliseconds const& offset) { AddEvent(event, CalculateTime(offset.count())); } + void ModifyEventTime(BasicEvent* event, uint64 newTime); + uint64 CalculateTime(uint64 t_offset) const { return m_time + t_offset; } protected: uint64 m_time; diff --git a/src/server/database/Database/Field.h b/src/server/database/Database/Field.h index f7f8397b761..d0df441b608 100644 --- a/src/server/database/Database/Field.h +++ b/src/server/database/Database/Field.h @@ -143,4 +143,3 @@ class TC_DATABASE_API Field }; #endif - diff --git a/src/server/database/Database/Implementation/CharacterDatabase.cpp b/src/server/database/Database/Implementation/CharacterDatabase.cpp index 398973a7e85..33b691efaf7 100644 --- a/src/server/database/Database/Implementation/CharacterDatabase.cpp +++ b/src/server/database/Database/Implementation/CharacterDatabase.cpp @@ -127,6 +127,9 @@ void CharacterDatabaseConnection::DoPrepareStatements() PrepareStatement(CHAR_SEL_AUCTIONS, "SELECT id, houseid, itemguid, itemEntry, count, itemowner, buyoutprice, time, buyguid, lastbid, startbid, deposit FROM auctionhouse ah INNER JOIN item_instance ii ON ii.guid = ah.itemguid", CONNECTION_SYNCH); PrepareStatement(CHAR_INS_AUCTION, "INSERT INTO auctionhouse (id, houseid, itemguid, itemowner, buyoutprice, time, buyguid, lastbid, startbid, deposit) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", CONNECTION_ASYNC); PrepareStatement(CHAR_DEL_AUCTION, "DELETE FROM auctionhouse WHERE id = ?", CONNECTION_ASYNC); + PrepareStatement(CHAR_SEL_AUCTION_BIDDERS, "SELECT id, bidderguid FROM auctionbidders", CONNECTION_SYNCH); + PrepareStatement(CHAR_INS_AUCTION_BIDDERS, "INSERT IGNORE INTO auctionbidders (id, bidderguid) VALUES (?, ?)", CONNECTION_ASYNC); + PrepareStatement(CHAR_DEL_AUCTION_BIDDERS, "DELETE FROM auctionbidders WHERE id = ?", CONNECTION_ASYNC); PrepareStatement(CHAR_UPD_AUCTION_BID, "UPDATE auctionhouse SET buyguid = ?, lastbid = ? WHERE id = ?", CONNECTION_ASYNC); PrepareStatement(CHAR_INS_MAIL, "INSERT INTO mail(id, messageType, stationery, mailTemplateId, sender, receiver, subject, body, has_items, expire_time, deliver_time, money, cod, checked) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", CONNECTION_ASYNC); PrepareStatement(CHAR_DEL_MAIL_BY_ID, "DELETE FROM mail WHERE id = ?", CONNECTION_ASYNC); diff --git a/src/server/database/Database/Implementation/CharacterDatabase.h b/src/server/database/Database/Implementation/CharacterDatabase.h index 8f59059abc0..8803ab3c6b3 100644 --- a/src/server/database/Database/Implementation/CharacterDatabase.h +++ b/src/server/database/Database/Implementation/CharacterDatabase.h @@ -113,6 +113,9 @@ enum CharacterDatabaseStatements : uint32 CHAR_DEL_AUCTION, CHAR_UPD_AUCTION_BID, CHAR_SEL_AUCTIONS, + CHAR_SEL_AUCTION_BIDDERS, + CHAR_INS_AUCTION_BIDDERS, + CHAR_DEL_AUCTION_BIDDERS, CHAR_INS_MAIL, CHAR_DEL_MAIL_BY_ID, CHAR_INS_MAIL_ITEM, diff --git a/src/server/game/AI/CoreAI/GuardAI.cpp b/src/server/game/AI/CoreAI/GuardAI.cpp index 7f65af41d63..acc2e69ee06 100644 --- a/src/server/game/AI/CoreAI/GuardAI.cpp +++ b/src/server/game/AI/CoreAI/GuardAI.cpp @@ -67,9 +67,7 @@ void GuardAI::EnterEvadeMode(EvadeReason /*why*/) me->GetThreatManager().ClearAllThreat(); me->CombatStop(true); - // Remove ChaseMovementGenerator from MotionMaster stack list, and add HomeMovementGenerator instead - if (me->GetMotionMaster()->GetCurrentMovementGeneratorType() == CHASE_MOTION_TYPE) - me->GetMotionMaster()->MoveTargetedHome(); + me->GetMotionMaster()->MoveTargetedHome(); } void GuardAI::JustDied(Unit* killer) diff --git a/src/server/game/AI/CoreAI/GuardAI.h b/src/server/game/AI/CoreAI/GuardAI.h index 77b83340c91..ebb65f3e5ea 100644 --- a/src/server/game/AI/CoreAI/GuardAI.h +++ b/src/server/game/AI/CoreAI/GuardAI.h @@ -36,4 +36,3 @@ class TC_GAME_API GuardAI : public ScriptedAI void JustDied(Unit* killer) override; }; #endif - diff --git a/src/server/game/AI/CoreAI/PassiveAI.h b/src/server/game/AI/CoreAI/PassiveAI.h index 277d52f2ec9..687434559af 100644 --- a/src/server/game/AI/CoreAI/PassiveAI.h +++ b/src/server/game/AI/CoreAI/PassiveAI.h @@ -86,4 +86,3 @@ class TC_GAME_API TriggerAI : public NullCreatureAI }; #endif - diff --git a/src/server/game/AI/CoreAI/PetAI.h b/src/server/game/AI/CoreAI/PetAI.h index 7dda6a38644..bf62aff3015 100644 --- a/src/server/game/AI/CoreAI/PetAI.h +++ b/src/server/game/AI/CoreAI/PetAI.h @@ -71,4 +71,3 @@ class TC_GAME_API PetAI : public CreatureAI void ClearCharmInfoFlags(); }; #endif - diff --git a/src/server/game/AI/CoreAI/ReactorAI.h b/src/server/game/AI/CoreAI/ReactorAI.h index db1f60fc263..570e74372bd 100644 --- a/src/server/game/AI/CoreAI/ReactorAI.h +++ b/src/server/game/AI/CoreAI/ReactorAI.h @@ -33,4 +33,3 @@ class TC_GAME_API ReactorAI : public CreatureAI static int32 Permissible(Creature const* creature); }; #endif - diff --git a/src/server/game/AI/CoreAI/TotemAI.h b/src/server/game/AI/CoreAI/TotemAI.h index 31fb68c6f60..60f27f7721b 100644 --- a/src/server/game/AI/CoreAI/TotemAI.h +++ b/src/server/game/AI/CoreAI/TotemAI.h @@ -42,4 +42,3 @@ class TC_GAME_API TotemAI : public CreatureAI ObjectGuid i_victimGuid; }; #endif - diff --git a/src/server/game/AI/CreatureAIRegistry.cpp b/src/server/game/AI/CreatureAIRegistry.cpp index a89c410a93b..4e638a3bed1 100644 --- a/src/server/game/AI/CreatureAIRegistry.cpp +++ b/src/server/game/AI/CreatureAIRegistry.cpp @@ -59,4 +59,3 @@ namespace AIRegistry (new MovementGeneratorFactory<WaypointMovementGenerator<Creature>>(WAYPOINT_MOTION_TYPE))->RegisterSelf(); } } - diff --git a/src/server/game/AI/CreatureAIRegistry.h b/src/server/game/AI/CreatureAIRegistry.h index 0917099890b..1a62699af63 100644 --- a/src/server/game/AI/CreatureAIRegistry.h +++ b/src/server/game/AI/CreatureAIRegistry.h @@ -24,4 +24,3 @@ namespace AIRegistry void Initialize(void); } #endif - diff --git a/src/server/game/AI/CreatureAISelector.h b/src/server/game/AI/CreatureAISelector.h index aeeb60fdca0..501a39eaa87 100644 --- a/src/server/game/AI/CreatureAISelector.h +++ b/src/server/game/AI/CreatureAISelector.h @@ -33,4 +33,3 @@ namespace FactorySelector TC_GAME_API GameObjectAI* SelectGameObjectAI(GameObject* go); } #endif - diff --git a/src/server/game/AuctionHouse/AuctionHouseMgr.cpp b/src/server/game/AuctionHouse/AuctionHouseMgr.cpp index 1dabff8a76e..d4faa188493 100644 --- a/src/server/game/AuctionHouse/AuctionHouseMgr.cpp +++ b/src/server/game/AuctionHouse/AuctionHouseMgr.cpp @@ -355,21 +355,37 @@ void AuctionHouseMgr::LoadAuctions() uint32 oldMSTime = getMSTime(); PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_SEL_AUCTIONS); - PreparedQueryResult result = CharacterDatabase.Query(stmt); + PreparedQueryResult resultAuctions = CharacterDatabase.Query(stmt); - if (!result) + if (!resultAuctions) { TC_LOG_INFO("server.loading", ">> Loaded 0 auctions. DB table `auctionhouse` is empty."); return; } - uint32 count = 0; + // parse bidder list + std::unordered_map<uint32, std::unordered_set<ObjectGuid>> biddersByAuction; + PreparedStatement* stmt2 = CharacterDatabase.GetPreparedStatement(CHAR_SEL_AUCTION_BIDDERS); + uint32 countBidders = 0; + if (PreparedQueryResult resultBidders = CharacterDatabase.Query(stmt2)) + { + do + { + Field* fields = resultBidders->Fetch(); + biddersByAuction[fields[0].GetUInt32()].insert(ObjectGuid::Create<HighGuid::Player>(fields[1].GetUInt32())); + ++countBidders; + } + while (resultBidders->NextRow()); + } + + // parse auctions from db + uint32 countAuctions = 0; SQLTransaction trans = CharacterDatabase.BeginTransaction(); do { - Field* fields = result->Fetch(); + Field* fields = resultAuctions->Fetch(); AuctionEntry* aItem = new AuctionEntry(); if (!aItem->LoadFromDB(fields)) @@ -379,14 +395,17 @@ void AuctionHouseMgr::LoadAuctions() continue; } + auto it = biddersByAuction.find(aItem->Id); + if (it != biddersByAuction.end()) + aItem->bidders = std::move(it->second); + GetAuctionsMapByHouseId(aItem->houseId)->AddAuction(aItem); - ++count; - } while (result->NextRow()); + ++countAuctions; + } while (resultAuctions->NextRow()); CharacterDatabase.CommitTransaction(trans); - TC_LOG_INFO("server.loading", ">> Loaded %u auctions in %u ms", count, GetMSTimeDiffToNow(oldMSTime)); - + TC_LOG_INFO("server.loading", ">> Loaded %u auctions with %u bidders in %u ms", countAuctions, countBidders, GetMSTimeDiffToNow(oldMSTime)); } void AuctionHouseMgr::AddAItem(Item* it) @@ -656,7 +675,7 @@ void AuctionHouseObject::BuildListBidderItems(WorldPacket& data, Player* player, for (AuctionEntryMap::const_iterator itr = AuctionsMap.begin(); itr != AuctionsMap.end(); ++itr) { AuctionEntry* Aentry = itr->second; - if (Aentry && Aentry->bidder == player->GetGUID().GetCounter()) + if (Aentry && Aentry->bidders.find(player->GetGUID()) != Aentry->bidders.end()) { if (itr->second->BuildAuctionInfo(data)) ++count; @@ -863,7 +882,13 @@ uint32 AuctionEntry::GetAuctionOutBid() const void AuctionEntry::DeleteFromDB(SQLTransaction& trans) const { - PreparedStatement* stmt = CharacterDatabase.GetPreparedStatement(CHAR_DEL_AUCTION); + PreparedStatement* stmt; + + stmt = CharacterDatabase.GetPreparedStatement(CHAR_DEL_AUCTION); + stmt->setUInt32(0, Id); + trans->Append(stmt); + + stmt = CharacterDatabase.GetPreparedStatement(CHAR_DEL_AUCTION_BIDDERS); stmt->setUInt32(0, Id); trans->Append(stmt); } @@ -913,6 +938,7 @@ bool AuctionEntry::LoadFromDB(Field* fields) TC_LOG_ERROR("misc", "Auction %u has not a existing item : %u", Id, itemGUIDLow); return false; } + return true; } std::string AuctionEntry::BuildAuctionMailSubject(MailAuctionAnswers response) const diff --git a/src/server/game/AuctionHouse/AuctionHouseMgr.h b/src/server/game/AuctionHouse/AuctionHouseMgr.h index 2fad4264d15..3c2080b1ad5 100644 --- a/src/server/game/AuctionHouse/AuctionHouseMgr.h +++ b/src/server/game/AuctionHouse/AuctionHouseMgr.h @@ -88,6 +88,7 @@ struct TC_GAME_API AuctionEntry ObjectGuid::LowType bidder; uint32 deposit; //deposit can be calculated only when creating auction uint32 etime; + std::unordered_set<ObjectGuid> bidders; AuctionHouseEntry const* auctionHouseEntry; // in AuctionHouse.dbc // helpers @@ -164,7 +165,6 @@ class TC_GAME_API AuctionHouseMgr AuctionHouseObject* GetAuctionsMap(uint32 factionTemplateId); AuctionHouseObject* GetAuctionsMapByHouseId(uint8 auctionHouseId); - AuctionHouseObject* GetBidsMap(uint32 factionTemplateId); Item* GetAItem(ObjectGuid::LowType id) { diff --git a/src/server/game/Battlegrounds/ArenaTeam.h b/src/server/game/Battlegrounds/ArenaTeam.h index 38f2b7daaaa..7b3087813b2 100644 --- a/src/server/game/Battlegrounds/ArenaTeam.h +++ b/src/server/game/Battlegrounds/ArenaTeam.h @@ -200,4 +200,3 @@ class TC_GAME_API ArenaTeam ArenaTeamStats Stats; }; #endif - diff --git a/src/server/game/Battlegrounds/BattlegroundMgr.cpp b/src/server/game/Battlegrounds/BattlegroundMgr.cpp index 76e3d7ea08c..e22a2ee38ee 100644 --- a/src/server/game/Battlegrounds/BattlegroundMgr.cpp +++ b/src/server/game/Battlegrounds/BattlegroundMgr.cpp @@ -1026,4 +1026,3 @@ void BattlegroundMgr::RemoveBattleground(BattlegroundTypeId bgTypeId, uint32 ins { bgDataStore[bgTypeId].m_Battlegrounds.erase(instanceId); } - diff --git a/src/server/game/Battlegrounds/Zones/BattlegroundAV.h b/src/server/game/Battlegrounds/Zones/BattlegroundAV.h index 9adaea025d3..7336caaf0d7 100644 --- a/src/server/game/Battlegrounds/Zones/BattlegroundAV.h +++ b/src/server/game/Battlegrounds/Zones/BattlegroundAV.h @@ -987,19 +987,19 @@ Position const BG_AV_CreaturePos[AV_CPLACE_MAX] = enum BG_AV_CreatureIds { - AV_NPC_A_TOWERDEFENSE = 0, // stormpike bowman - AV_NPC_A_GRAVEDEFENSE0 = 1, // stormpike Defender - AV_NPC_A_GRAVEDEFENSE1 = 2, // seasoned defender - AV_NPC_A_GRAVEDEFENSE2 = 3, // veteran defender - AV_NPC_A_GRAVEDEFENSE3 = 4, // champion defender + AV_NPC_A_GRAVEDEFENSE0 = 0, // stormpike Defender + AV_NPC_A_GRAVEDEFENSE1 = 1, // seasoned defender + AV_NPC_A_GRAVEDEFENSE2 = 2, // veteran defender + AV_NPC_A_GRAVEDEFENSE3 = 3, // champion defender + AV_NPC_A_TOWERDEFENSE = 4, // stormpike bowman AV_NPC_A_CAPTAIN = 5, // balinda AV_NPC_A_BOSS = 6, // vanndar - AV_NPC_H_TOWERDEFENSE = 7, // frostwolf bowman - AV_NPC_H_GRAVEDEFENSE0 = 8, // frostwolf guardian - AV_NPC_H_GRAVEDEFENSE1 = 9, // seasoned guardian - AV_NPC_H_GRAVEDEFENSE2 = 10, // veteran guardian - AV_NPC_H_GRAVEDEFENSE3 = 11, // champion guardian + AV_NPC_H_GRAVEDEFENSE0 = 7, // frostwolf guardian + AV_NPC_H_GRAVEDEFENSE1 = 8, // seasoned guardian + AV_NPC_H_GRAVEDEFENSE2 = 9, // veteran guardian + AV_NPC_H_GRAVEDEFENSE3 = 10, // champion guardian + AV_NPC_H_TOWERDEFENSE = 11, // frostwolf bowman AV_NPC_H_CAPTAIN = 12, // galvangar AV_NPC_H_BOSS = 13, // drek thar diff --git a/src/server/game/Battlegrounds/Zones/BattlegroundEY.h b/src/server/game/Battlegrounds/Zones/BattlegroundEY.h index 2555bc40a29..8451253dec9 100644 --- a/src/server/game/Battlegrounds/Zones/BattlegroundEY.h +++ b/src/server/game/Battlegrounds/Zones/BattlegroundEY.h @@ -432,4 +432,3 @@ class BattlegroundEY : public Battleground uint32 m_HonorTics; }; #endif - diff --git a/src/server/game/Chat/Channels/Channel.h b/src/server/game/Chat/Channels/Channel.h index 364bec73a5f..ac3fe7b747e 100644 --- a/src/server/game/Chat/Channels/Channel.h +++ b/src/server/game/Chat/Channels/Channel.h @@ -253,4 +253,3 @@ class TC_GAME_API Channel AreaTableEntry const* _zoneEntry; }; #endif - diff --git a/src/server/game/DataStores/DBCStructure.h b/src/server/game/DataStores/DBCStructure.h index 409f1ec25f7..195d51b02cc 100644 --- a/src/server/game/DataStores/DBCStructure.h +++ b/src/server/game/DataStores/DBCStructure.h @@ -1895,4 +1895,3 @@ typedef std::vector<TaxiPathNodeEntry const*> TaxiPathNodeList; typedef std::vector<TaxiPathNodeList> TaxiPathNodesByPath; #endif - diff --git a/src/server/game/DataStores/M2Stores.h b/src/server/game/DataStores/M2Stores.h index b37bff395d5..ab05aa9a29f 100644 --- a/src/server/game/DataStores/M2Stores.h +++ b/src/server/game/DataStores/M2Stores.h @@ -32,4 +32,4 @@ TC_GAME_API void LoadM2Cameras(std::string const& dataPath); TC_GAME_API std::vector<FlyByCamera> const* GetFlyByCameras(uint32 cinematicCameraId); -#endif
\ No newline at end of file +#endif diff --git a/src/server/game/DataStores/M2Structure.h b/src/server/game/DataStores/M2Structure.h index 332563df388..81d69b77e4b 100644 --- a/src/server/game/DataStores/M2Structure.h +++ b/src/server/game/DataStores/M2Structure.h @@ -130,4 +130,4 @@ struct M2Camera }; #pragma pack(pop) -#endif
\ No newline at end of file +#endif diff --git a/src/server/game/Entities/Creature/Creature.cpp b/src/server/game/Entities/Creature/Creature.cpp index 1501e2f2c5b..cc1188ede3e 100644 --- a/src/server/game/Entities/Creature/Creature.cpp +++ b/src/server/game/Entities/Creature/Creature.cpp @@ -513,6 +513,8 @@ bool Creature::InitEntry(uint32 entry, CreatureData const* data /*= nullptr*/) SetFloatValue(UNIT_FIELD_HOVERHEIGHT, cinfo->HoverHeight); + SetCanDualWield(cinfo->flags_extra & CREATURE_FLAG_EXTRA_USE_OFFHAND_ATTACK); + // checked at loading m_defaultMovementType = MovementGeneratorType(data ? data->movementType : cinfo->MovementType); if (!m_respawnradius && m_defaultMovementType == RANDOM_MOTION_TYPE) @@ -557,6 +559,8 @@ bool Creature::UpdateEntry(uint32 entry, CreatureData const* data /*= nullptr*/, SetUInt32Value(UNIT_DYNAMIC_FLAGS, dynamicflags); + SetCanDualWield(cInfo->flags_extra & CREATURE_FLAG_EXTRA_USE_OFFHAND_ATTACK); + SetAttackTime(BASE_ATTACK, cInfo->BaseAttackTime); SetAttackTime(OFF_ATTACK, cInfo->BaseAttackTime); SetAttackTime(RANGED_ATTACK, cInfo->RangeAttackTime); @@ -2098,101 +2102,6 @@ bool Creature::isWorldBoss() const return (GetCreatureTemplate()->type_flags & CREATURE_TYPE_FLAG_BOSS_MOB) != 0; } -SpellInfo const* Creature::reachWithSpellAttack(Unit* victim) -{ - if (!victim) - return nullptr; - - for (uint32 i=0; i < MAX_CREATURE_SPELLS; ++i) - { - if (!m_spells[i]) - continue; - SpellInfo const* spellInfo = sSpellMgr->GetSpellInfo(m_spells[i]); - if (!spellInfo) - { - TC_LOG_ERROR("entities.unit", "WORLD: unknown spell id %i", m_spells[i]); - continue; - } - - bool bcontinue = true; - for (uint32 j = 0; j < MAX_SPELL_EFFECTS; j++) - { - if ((spellInfo->Effects[j].Effect == SPELL_EFFECT_SCHOOL_DAMAGE) || - (spellInfo->Effects[j].Effect == SPELL_EFFECT_INSTAKILL) || - (spellInfo->Effects[j].Effect == SPELL_EFFECT_ENVIRONMENTAL_DAMAGE) || - (spellInfo->Effects[j].Effect == SPELL_EFFECT_HEALTH_LEECH) - ) - { - bcontinue = false; - break; - } - } - if (bcontinue) - continue; - - if (spellInfo->ManaCost > GetPower(POWER_MANA)) - continue; - float range = spellInfo->GetMaxRange(false); - float minrange = spellInfo->GetMinRange(false); - float dist = GetDistance(victim); - if (dist > range || dist < minrange) - continue; - if (spellInfo->PreventionType == SPELL_PREVENTION_TYPE_SILENCE && HasFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_SILENCED)) - continue; - if (spellInfo->PreventionType == SPELL_PREVENTION_TYPE_PACIFY && HasFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_PACIFIED)) - continue; - return spellInfo; - } - return nullptr; -} - -SpellInfo const* Creature::reachWithSpellCure(Unit* victim) -{ - if (!victim) - return nullptr; - - for (uint32 i=0; i < MAX_CREATURE_SPELLS; ++i) - { - if (!m_spells[i]) - continue; - SpellInfo const* spellInfo = sSpellMgr->GetSpellInfo(m_spells[i]); - if (!spellInfo) - { - TC_LOG_ERROR("entities.unit", "WORLD: unknown spell id %i", m_spells[i]); - continue; - } - - bool bcontinue = true; - for (uint32 j = 0; j < MAX_SPELL_EFFECTS; j++) - { - if ((spellInfo->Effects[j].Effect == SPELL_EFFECT_HEAL)) - { - bcontinue = false; - break; - } - } - if (bcontinue) - continue; - - if (spellInfo->ManaCost > GetPower(POWER_MANA)) - continue; - - float range = spellInfo->GetMaxRange(true); - float minrange = spellInfo->GetMinRange(true); - float dist = GetDistance(victim); - //if (!isInFront(victim, range) && spellInfo->AttributesEx) - // continue; - if (dist > range || dist < minrange) - continue; - if (spellInfo->PreventionType == SPELL_PREVENTION_TYPE_SILENCE && HasFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_SILENCED)) - continue; - if (spellInfo->PreventionType == SPELL_PREVENTION_TYPE_PACIFY && HasFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_PACIFIED)) - continue; - return spellInfo; - } - return nullptr; -} - // select nearest hostile unit within the given distance (regardless of threat list). Unit* Creature::SelectNearestTarget(float dist, bool playerOnly /* = false */) const { @@ -2372,7 +2281,7 @@ void Creature::SaveRespawnTime(uint32 forceDelay, bool savetodb) return; } - uint32 thisRespawnTime = forceDelay ? time(NULL) + forceDelay : m_respawnTime; + time_t thisRespawnTime = forceDelay ? time(NULL) + forceDelay : m_respawnTime; GetMap()->SaveRespawnTime(SPAWN_TYPE_CREATURE, m_spawnId, GetEntry(), thisRespawnTime, GetMap()->GetZoneId(GetHomePosition()), Trinity::ComputeGridCoord(GetHomePosition().GetPositionX(), GetHomePosition().GetPositionY()).GetId(), savetodb && m_creatureData && m_creatureData->dbData); } @@ -2768,7 +2677,7 @@ uint32 Creature::GetPetAutoSpellOnPos(uint8 pos) const float Creature::GetPetChaseDistance() const { - float range = MELEE_RANGE; + float range = 0.f; for (uint8 i = 0; i < GetPetAutoSpellSize(); ++i) { @@ -2778,10 +2687,8 @@ float Creature::GetPetChaseDistance() const if (SpellInfo const* spellInfo = sSpellMgr->GetSpellInfo(spellID)) { - if (spellInfo->GetRecoveryTime() == 0 && // No cooldown - spellInfo->RangeEntry->ID != 1 /*Self*/ && spellInfo->RangeEntry->ID != 2 /*Combat Range*/ && - spellInfo->GetMinRange() > range) - range = spellInfo->GetMinRange(); + if (spellInfo->GetRecoveryTime() == 0 && spellInfo->RangeEntry->ID != 1 /*Self*/ && spellInfo->RangeEntry->ID != 2 /*Combat Range*/ && spellInfo->GetMaxRange() > range) + range = spellInfo->GetMaxRange(); } } diff --git a/src/server/game/Entities/Creature/Creature.h b/src/server/game/Entities/Creature/Creature.h index 4a7c3362f30..051d0bf6f22 100644 --- a/src/server/game/Entities/Creature/Creature.h +++ b/src/server/game/Entities/Creature/Creature.h @@ -203,9 +203,6 @@ class TC_GAME_API Creature : public Unit, public GridObject<Creature>, public Ma void RemoveLootMode(uint16 lootMode) { m_LootMode &= ~lootMode; } void ResetLootMode() { m_LootMode = LOOT_MODE_DEFAULT; } - SpellInfo const* reachWithSpellAttack(Unit* victim); - SpellInfo const* reachWithSpellCure(Unit* victim); - uint32 m_spells[MAX_CREATURE_SPELLS]; bool CanStartAttack(Unit const* u, bool force) const; diff --git a/src/server/game/Entities/Creature/CreatureData.h b/src/server/game/Entities/Creature/CreatureData.h index de169c97136..f9ae6f24850 100644 --- a/src/server/game/Entities/Creature/CreatureData.h +++ b/src/server/game/Entities/Creature/CreatureData.h @@ -43,7 +43,7 @@ enum CreatureFlagsExtra : uint32 CREATURE_FLAG_EXTRA_NO_TAUNT = 0x00000100, // creature is immune to taunt auras and effect attack me CREATURE_FLAG_EXTRA_NO_MOVE_FLAGS_UPDATE = 0x00000200, // creature won't update movement flags CREATURE_FLAG_EXTRA_GHOST_VISIBILITY = 0x00000400, // creature will be only visible for dead players - CREATURE_FLAG_EXTRA_UNUSED_11 = 0x00000800, + CREATURE_FLAG_EXTRA_USE_OFFHAND_ATTACK = 0x00000800, // creature will use offhand attacks CREATURE_FLAG_EXTRA_UNUSED_12 = 0x00001000, CREATURE_FLAG_EXTRA_UNUSED_13 = 0x00002000, CREATURE_FLAG_EXTRA_WORLDEVENT = 0x00004000, // custom flag for world event creatures (left room for merging) @@ -66,10 +66,10 @@ enum CreatureFlagsExtra : uint32 CREATURE_FLAG_EXTRA_UNUSED_31 = 0x80000000, // Masks - CREATURE_FLAG_EXTRA_UNUSED = (CREATURE_FLAG_EXTRA_UNUSED_11 | CREATURE_FLAG_EXTRA_UNUSED_12 | CREATURE_FLAG_EXTRA_UNUSED_13 | - CREATURE_FLAG_EXTRA_UNUSED_16 | CREATURE_FLAG_EXTRA_UNUSED_22 | CREATURE_FLAG_EXTRA_UNUSED_23 | - CREATURE_FLAG_EXTRA_UNUSED_24 | CREATURE_FLAG_EXTRA_UNUSED_25 | CREATURE_FLAG_EXTRA_UNUSED_26 | - CREATURE_FLAG_EXTRA_UNUSED_27 | CREATURE_FLAG_EXTRA_UNUSED_31), + CREATURE_FLAG_EXTRA_UNUSED = (CREATURE_FLAG_EXTRA_UNUSED_12 | CREATURE_FLAG_EXTRA_UNUSED_13 | CREATURE_FLAG_EXTRA_UNUSED_16 | + CREATURE_FLAG_EXTRA_UNUSED_22 | CREATURE_FLAG_EXTRA_UNUSED_23 | CREATURE_FLAG_EXTRA_UNUSED_24 | + CREATURE_FLAG_EXTRA_UNUSED_25 | CREATURE_FLAG_EXTRA_UNUSED_26 | CREATURE_FLAG_EXTRA_UNUSED_27 | + CREATURE_FLAG_EXTRA_UNUSED_31), CREATURE_FLAG_EXTRA_DB_ALLOWED = (0xFFFFFFFF & ~(CREATURE_FLAG_EXTRA_UNUSED | CREATURE_FLAG_EXTRA_DUNGEON_BOSS)) }; diff --git a/src/server/game/Entities/GameObject/GameObject.cpp b/src/server/game/Entities/GameObject/GameObject.cpp index ce9658ef37e..f7e3bc965d8 100644 --- a/src/server/game/Entities/GameObject/GameObject.cpp +++ b/src/server/game/Entities/GameObject/GameObject.cpp @@ -2513,7 +2513,7 @@ public: virtual G3D::Vector3 GetPosition() const override { return G3D::Vector3(_owner->GetPositionX(), _owner->GetPositionY(), _owner->GetPositionZ()); } virtual float GetOrientation() const override { return _owner->GetOrientation(); } virtual float GetScale() const override { return _owner->GetObjectScale(); } - virtual void DebugVisualizeCorner(G3D::Vector3 const& corner) const override { _owner->SummonCreature(1, corner.x, corner.y, corner.z, 0, TEMPSUMMON_MANUAL_DESPAWN); } + virtual void DebugVisualizeCorner(G3D::Vector3 const& corner) const override { const_cast<GameObject*>(_owner)->SummonCreature(1, corner.x, corner.y, corner.z, 0, TEMPSUMMON_MANUAL_DESPAWN); } private: GameObject const* _owner; diff --git a/src/server/game/Entities/Item/Container/Bag.cpp b/src/server/game/Entities/Item/Container/Bag.cpp index 847142f2ad4..dd24100c279 100644 --- a/src/server/game/Entities/Item/Container/Bag.cpp +++ b/src/server/game/Entities/Item/Container/Bag.cpp @@ -245,4 +245,3 @@ Item* Bag::GetItemByPos(uint8 slot) const return nullptr; } - diff --git a/src/server/game/Entities/Item/Container/Bag.h b/src/server/game/Entities/Item/Container/Bag.h index e39a7ed6b66..b9e814788de 100644 --- a/src/server/game/Entities/Item/Container/Bag.h +++ b/src/server/game/Entities/Item/Container/Bag.h @@ -70,4 +70,3 @@ inline Item* NewItemOrBag(ItemTemplate const* proto) return (proto->InventoryType == INVTYPE_BAG) ? new Bag : new Item; } #endif - diff --git a/src/server/game/Entities/Item/ItemEnchantmentMgr.cpp b/src/server/game/Entities/Item/ItemEnchantmentMgr.cpp index 1468d168ffa..09a2e171f33 100644 --- a/src/server/game/Entities/Item/ItemEnchantmentMgr.cpp +++ b/src/server/game/Entities/Item/ItemEnchantmentMgr.cpp @@ -244,4 +244,3 @@ uint32 GenerateEnchSuffixFactor(uint32 item_id) } return 0; } - diff --git a/src/server/game/Entities/Item/ItemEnchantmentMgr.h b/src/server/game/Entities/Item/ItemEnchantmentMgr.h index 9692a491c89..8cec5b25168 100644 --- a/src/server/game/Entities/Item/ItemEnchantmentMgr.h +++ b/src/server/game/Entities/Item/ItemEnchantmentMgr.h @@ -27,4 +27,3 @@ TC_GAME_API uint32 GetItemEnchantMod(int32 entry); TC_GAME_API uint32 GenerateEnchSuffixFactor(uint32 item_id); #endif - diff --git a/src/server/game/Entities/Object/Object.cpp b/src/server/game/Entities/Object/Object.cpp index 9e2546f2df7..1a94b420b4c 100644 --- a/src/server/game/Entities/Object/Object.cpp +++ b/src/server/game/Entities/Object/Object.cpp @@ -1969,13 +1969,13 @@ void WorldObject::ClearZoneScript() m_zoneScript = nullptr; } -TempSummon* WorldObject::SummonCreature(uint32 entry, Position const& pos, TempSummonType spwtype /*= TEMPSUMMON_MANUAL_DESPAWN*/, uint32 duration /*= 0*/, uint32 /*vehId = 0*/) const +TempSummon* WorldObject::SummonCreature(uint32 entry, Position const& pos, TempSummonType despawnType /*= TEMPSUMMON_MANUAL_DESPAWN*/, uint32 despawnTime /*= 0*/, uint32 /*vehId = 0*/) { if (Map* map = FindMap()) { - if (TempSummon* summon = map->SummonCreature(entry, pos, nullptr, duration, isType(TYPEMASK_UNIT) ? (Unit*)this : nullptr)) + if (TempSummon* summon = map->SummonCreature(entry, pos, nullptr, despawnTime, ToUnit())) { - summon->SetTempSummonType(spwtype); + summon->SetTempSummonType(despawnType); return summon; } } @@ -1983,17 +1983,13 @@ TempSummon* WorldObject::SummonCreature(uint32 entry, Position const& pos, TempS return nullptr; } -TempSummon* WorldObject::SummonCreature(uint32 id, float x, float y, float z, float ang /*= 0*/, TempSummonType spwtype /*= TEMPSUMMON_MANUAL_DESPAWN*/, uint32 despwtime /*= 0*/) const +TempSummon* WorldObject::SummonCreature(uint32 id, float x, float y, float z, float o /*= 0*/, TempSummonType despawnType /*= TEMPSUMMON_MANUAL_DESPAWN*/, uint32 despawnTime /*= 0*/) { if (!x && !y && !z) - { GetClosePoint(x, y, z, GetCombatReach()); - ang = GetOrientation(); - } - - Position pos; - pos.Relocate(x, y, z, ang); - return SummonCreature(id, pos, spwtype, despwtime, 0); + if (!o) + o = GetOrientation(); + return SummonCreature(id, { x,y,z,o }, despawnType, despawnTime); } GameObject* WorldObject::SummonGameObject(uint32 entry, Position const& pos, QuaternionData const& rot, uint32 respawnTime) diff --git a/src/server/game/Entities/Object/Object.h b/src/server/game/Entities/Object/Object.h index b6cee547126..2d94496e20b 100644 --- a/src/server/game/Entities/Object/Object.h +++ b/src/server/game/Entities/Object/Object.h @@ -20,6 +20,7 @@ #define _OBJECT_H #include "Common.h" +#include "Duration.h" #include "GridReference.h" #include "GridRefManager.h" #include "ModelIgnoreFlags.h" @@ -372,8 +373,9 @@ class TC_GAME_API WorldObject : public Object, public WorldLocation void ClearZoneScript(); ZoneScript* GetZoneScript() const { return m_zoneScript; } - TempSummon* SummonCreature(uint32 id, Position const& pos, TempSummonType spwtype = TEMPSUMMON_MANUAL_DESPAWN, uint32 despwtime = 0, uint32 vehId = 0) const; - TempSummon* SummonCreature(uint32 id, float x, float y, float z, float ang = 0, TempSummonType spwtype = TEMPSUMMON_MANUAL_DESPAWN, uint32 despwtime = 0) const; + TempSummon* SummonCreature(uint32 entry, Position const& pos, TempSummonType despawnType = TEMPSUMMON_MANUAL_DESPAWN, uint32 despawnTime = 0, uint32 vehId = 0); + TempSummon* SummonCreature(uint32 entry, Position const& pos, TempSummonType despawnType, Milliseconds const& despawnTime, uint32 vehId = 0) { return SummonCreature(entry, pos, despawnType, uint32(despawnTime.count()), vehId); } + TempSummon* SummonCreature(uint32 entry, float x, float y, float z, float o = 0, TempSummonType despawnType = TEMPSUMMON_MANUAL_DESPAWN, uint32 despawnTime = 0); GameObject* SummonGameObject(uint32 entry, Position const& pos, QuaternionData const& rot, uint32 respawnTime /* s */); GameObject* SummonGameObject(uint32 entry, float x, float y, float z, float ang, QuaternionData const& rot, uint32 respawnTime /* s */); Creature* SummonTrigger(float x, float y, float z, float ang, uint32 dur, CreatureAI* (*GetAI)(Creature*) = nullptr); diff --git a/src/server/game/Entities/Object/Updates/UpdateData.cpp b/src/server/game/Entities/Object/Updates/UpdateData.cpp index cb8ecf45e2f..1003562c3af 100644 --- a/src/server/game/Entities/Object/Updates/UpdateData.cpp +++ b/src/server/game/Entities/Object/Updates/UpdateData.cpp @@ -147,4 +147,3 @@ void UpdateData::Clear() m_outOfRangeGUIDs.clear(); m_blockCount = 0; } - diff --git a/src/server/game/Entities/Object/Updates/UpdateData.h b/src/server/game/Entities/Object/Updates/UpdateData.h index e3437595c0b..b9a4894aa6d 100644 --- a/src/server/game/Entities/Object/Updates/UpdateData.h +++ b/src/server/game/Entities/Object/Updates/UpdateData.h @@ -81,4 +81,3 @@ class UpdateData UpdateData& operator=(UpdateData const& right) = delete; }; #endif - diff --git a/src/server/game/Entities/Object/Updates/UpdateMask.h b/src/server/game/Entities/Object/Updates/UpdateMask.h index 21fe8d700f7..442314d5e25 100644 --- a/src/server/game/Entities/Object/Updates/UpdateMask.h +++ b/src/server/game/Entities/Object/Updates/UpdateMask.h @@ -122,4 +122,3 @@ class UpdateMask }; #endif - diff --git a/src/server/game/Entities/Player/CinematicMgr.h b/src/server/game/Entities/Player/CinematicMgr.h index f2c0970fb81..e9967a6df28 100644 --- a/src/server/game/Entities/Player/CinematicMgr.h +++ b/src/server/game/Entities/Player/CinematicMgr.h @@ -57,4 +57,4 @@ protected: TempSummon* m_CinematicObject; }; -#endif
\ No newline at end of file +#endif diff --git a/src/server/game/Entities/Player/Player.cpp b/src/server/game/Entities/Player/Player.cpp index 7bf4e185efd..55473c2922a 100644 --- a/src/server/game/Entities/Player/Player.cpp +++ b/src/server/game/Entities/Player/Player.cpp @@ -15824,6 +15824,7 @@ void Player::RemoveRewardedQuest(uint32 questId, bool update /*= true*/) // Remove seasonal quest also Quest const* qInfo = sObjectMgr->GetQuestTemplate(questId); + ASSERT(qInfo); if (qInfo->IsSeasonal()) { uint16 eventId = qInfo->GetEventIdForQuest(); diff --git a/src/server/game/Entities/Unit/Unit.cpp b/src/server/game/Entities/Unit/Unit.cpp index bd4c71c06e6..636a386b3b3 100644 --- a/src/server/game/Entities/Unit/Unit.cpp +++ b/src/server/game/Entities/Unit/Unit.cpp @@ -5889,9 +5889,9 @@ bool Unit::Attack(Unit* victim, bool meleeAttack) SetUInt32Value(UNIT_NPC_EMOTESTATE, EMOTE_ONESHOT_NONE); } - // delay offhand weapon attack to next attack time + // delay offhand weapon attack by 50% of the base attack time if (haveOffhandWeapon() && GetTypeId() != TYPEID_PLAYER) - resetAttackTimer(OFF_ATTACK); + setAttackTimer(OFF_ATTACK, std::max(getAttackTimer(OFF_ATTACK), getAttackTimer(BASE_ATTACK) + uint32(CalculatePct(GetFloatValue(UNIT_FIELD_BASEATTACKTIME), 50)))); if (meleeAttack) SendMeleeAttackStart(victim); diff --git a/src/server/game/Events/GameEventMgr.cpp b/src/server/game/Events/GameEventMgr.cpp index 14fdccce85e..39cb88c670b 100644 --- a/src/server/game/Events/GameEventMgr.cpp +++ b/src/server/game/Events/GameEventMgr.cpp @@ -1774,6 +1774,8 @@ void GameEventMgr::SetHolidayEventTime(GameEventData& event) timeInfo.tm_hour = (date >> 6) & 0x1F; timeInfo.tm_min = date & 0x3F; timeInfo.tm_sec = 0; + timeInfo.tm_wday = 0; + timeInfo.tm_yday = 0; timeInfo.tm_isdst = -1; tm tmCopy = timeInfo; diff --git a/src/server/game/Events/GameEventMgr.h b/src/server/game/Events/GameEventMgr.h index 4d1ce718422..0c2bb6e689c 100644 --- a/src/server/game/Events/GameEventMgr.h +++ b/src/server/game/Events/GameEventMgr.h @@ -193,4 +193,3 @@ TC_GAME_API bool IsHolidayActive(HolidayIds id); TC_GAME_API bool IsEventActive(uint16 event_id); #endif - diff --git a/src/server/game/Globals/ObjectAccessor.h b/src/server/game/Globals/ObjectAccessor.h index bc4062eed77..cd108b333d8 100644 --- a/src/server/game/Globals/ObjectAccessor.h +++ b/src/server/game/Globals/ObjectAccessor.h @@ -111,4 +111,3 @@ namespace ObjectAccessor }; #endif - diff --git a/src/server/game/Globals/ObjectMgr.cpp b/src/server/game/Globals/ObjectMgr.cpp index 2f1e4fa48f3..ddcc9e7cae4 100644 --- a/src/server/game/Globals/ObjectMgr.cpp +++ b/src/server/game/Globals/ObjectMgr.cpp @@ -808,6 +808,12 @@ void ObjectMgr::CheckCreatureTemplate(CreatureTemplate const* cInfo) ok = true; } + if (cInfo->AIName == "TotemAI") + { + TC_LOG_ERROR("sql.sql", "Creature (Entry: %u) has not-allowed `AIName` '%s' set, removing", cInfo->Entry, cInfo->AIName.c_str()); + const_cast<CreatureTemplate*>(cInfo)->AIName.clear(); + } + if (!cInfo->AIName.empty() && !sCreatureAIRegistry->HasItem(cInfo->AIName)) { TC_LOG_ERROR("sql.sql", "Creature (Entry: %u) has non-registered `AIName` '%s' set, removing", cInfo->Entry, cInfo->AIName.c_str()); @@ -5992,7 +5998,7 @@ QuestGreeting const* ObjectMgr::GetQuestGreeting(ObjectGuid guid) const if (questItr == itr->second.end()) return nullptr; - return questItr->second; + return &questItr->second; } void ObjectMgr::LoadQuestGreetings() @@ -6020,7 +6026,7 @@ void ObjectMgr::LoadQuestGreetings() uint32 id = fields[0].GetUInt32(); uint8 type = fields[1].GetUInt8(); // overwrite - switch (type) + switch (type) { case 0: // Creature type = TYPEID_UNIT; @@ -6054,7 +6060,7 @@ void ObjectMgr::LoadQuestGreetings() uint32 greetEmoteDelay = fields[3].GetUInt32(); std::string greeting = fields[4].GetString(); - _questGreetingStore[type][id] = new QuestGreeting(greetEmoteType, greetEmoteDelay, greeting); + _questGreetingStore[type][id] = QuestGreeting(greetEmoteType, greetEmoteDelay, greeting); ++count; } @@ -7734,7 +7740,7 @@ void ObjectMgr::LoadQuestPOI() _questPOIStore.at(questId).POIData.QuestPOIBlobDataStats.push_back(POI); } else - TC_LOG_ERROR("server.loading", "Table quest_poi references unknown quest points for quest %u POI id %u", questId, id); + TC_LOG_ERROR("sql.sql", "Table quest_poi references unknown quest points for quest %u POI id %u", questId, id); ++count; } while (result->NextRow()); diff --git a/src/server/game/Globals/ObjectMgr.h b/src/server/game/Globals/ObjectMgr.h index 8a07723a34e..80d120a13ea 100644 --- a/src/server/game/Globals/ObjectMgr.h +++ b/src/server/game/Globals/ObjectMgr.h @@ -811,7 +811,7 @@ struct QuestGreeting : greetEmoteType(_greetEmoteType), greetEmoteDelay(_greetEmoteDelay), greeting(_greeting) { } }; -typedef std::unordered_map<uint8, std::unordered_map<uint32, QuestGreeting const*>> QuestGreetingContainer; +typedef std::unordered_map<uint8, std::unordered_map<uint32, QuestGreeting>> QuestGreetingContainer; struct GraveyardData { diff --git a/src/server/game/Grids/Cells/Cell.h b/src/server/game/Grids/Cells/Cell.h index 56aa7f794af..b238e8cf29d 100644 --- a/src/server/game/Grids/Cells/Cell.h +++ b/src/server/game/Grids/Cells/Cell.h @@ -125,4 +125,3 @@ private: }; #endif - diff --git a/src/server/game/Grids/Cells/CellImpl.h b/src/server/game/Grids/Cells/CellImpl.h index 66f1dcae78c..548d00409d3 100644 --- a/src/server/game/Grids/Cells/CellImpl.h +++ b/src/server/game/Grids/Cells/CellImpl.h @@ -255,4 +255,3 @@ inline void Cell::VisitAllObjects(float x, float y, Map* map, T& visitor, float } #endif - diff --git a/src/server/game/Grids/Dynamic/TypeContainerFunctions.h b/src/server/game/Grids/Dynamic/TypeContainerFunctions.h index 97d20922a05..e1edd28f23a 100644 --- a/src/server/game/Grids/Dynamic/TypeContainerFunctions.h +++ b/src/server/game/Grids/Dynamic/TypeContainerFunctions.h @@ -214,4 +214,3 @@ namespace Trinity //} } #endif - diff --git a/src/server/game/Grids/Dynamic/TypeContainerVisitor.h b/src/server/game/Grids/Dynamic/TypeContainerVisitor.h index 2d08da778e9..0ad3e173322 100644 --- a/src/server/game/Grids/Dynamic/TypeContainerVisitor.h +++ b/src/server/game/Grids/Dynamic/TypeContainerVisitor.h @@ -101,4 +101,3 @@ class TypeContainerVisitor VISITOR &i_visitor; }; #endif - diff --git a/src/server/game/Grids/Grid.h b/src/server/game/Grids/Grid.h index cd358603466..9bdaedc0fba 100644 --- a/src/server/game/Grids/Grid.h +++ b/src/server/game/Grids/Grid.h @@ -141,4 +141,3 @@ class Grid //ActiveGridObjects m_activeGridObjects; }; #endif - diff --git a/src/server/game/Grids/GridLoader.h b/src/server/game/Grids/GridLoader.h index cbf870fedeb..06bda25d423 100644 --- a/src/server/game/Grids/GridLoader.h +++ b/src/server/game/Grids/GridLoader.h @@ -75,4 +75,3 @@ class GridLoader }; */ #endif - diff --git a/src/server/game/Grids/GridRefManager.h b/src/server/game/Grids/GridRefManager.h index 89d6cc9e7e6..739eacc3da7 100644 --- a/src/server/game/Grids/GridRefManager.h +++ b/src/server/game/Grids/GridRefManager.h @@ -37,4 +37,3 @@ class GridRefManager : public RefManager<GridRefManager<OBJECT>, OBJECT> iterator end() { return iterator(nullptr); } }; #endif - diff --git a/src/server/game/Grids/GridReference.h b/src/server/game/Grids/GridReference.h index a85051a8398..69d17b4d8ce 100644 --- a/src/server/game/Grids/GridReference.h +++ b/src/server/game/Grids/GridReference.h @@ -50,4 +50,3 @@ class GridReference : public Reference<GridRefManager<OBJECT>, OBJECT> GridReference* next() { return (GridReference*)Reference<GridRefManager<OBJECT>, OBJECT>::next(); } }; #endif - diff --git a/src/server/game/Grids/GridStates.cpp b/src/server/game/Grids/GridStates.cpp index 4e3b11154af..61a8553e640 100644 --- a/src/server/game/Grids/GridStates.cpp +++ b/src/server/game/Grids/GridStates.cpp @@ -64,4 +64,3 @@ void RemovalState::Update(Map& map, NGridType& grid, GridInfo& info, uint32 diff } } } - diff --git a/src/server/game/Grids/NGrid.h b/src/server/game/Grids/NGrid.h index dc6f2561e4f..a057da26b42 100644 --- a/src/server/game/Grids/NGrid.h +++ b/src/server/game/Grids/NGrid.h @@ -187,4 +187,3 @@ class NGrid bool i_GridObjectDataLoaded; }; #endif - diff --git a/src/server/game/Groups/Group.cpp b/src/server/game/Groups/Group.cpp index 79df88b61a5..e82520d4e16 100644 --- a/src/server/game/Groups/Group.cpp +++ b/src/server/game/Groups/Group.cpp @@ -1501,6 +1501,7 @@ void Group::CountTheRoll(Rolls::iterator rollI, Map* allowedMap) roll->getLoot()->NotifyItemRemoved(roll->itemSlot); roll->getLoot()->unlootedCount--; ItemTemplate const* pProto = sObjectMgr->GetItemTemplate(roll->itemid); + ASSERT(pProto); player->UpdateAchievementCriteria(ACHIEVEMENT_CRITERIA_TYPE_CAST_SPELL, 13262); // Disenchant ItemPosCountVec dest; @@ -2552,4 +2553,3 @@ void Group::ToggleGroupMemberFlag(member_witerator slot, uint8 flag, bool apply) else slot->flags &= ~flag; } - diff --git a/src/server/game/Groups/GroupRefManager.h b/src/server/game/Groups/GroupRefManager.h index d433206bd69..abab0c19688 100644 --- a/src/server/game/Groups/GroupRefManager.h +++ b/src/server/game/Groups/GroupRefManager.h @@ -32,4 +32,3 @@ class GroupRefManager : public RefManager<Group, Player> GroupReference const* getFirst() const { return ((GroupReference const*)RefManager<Group, Player>::getFirst()); } }; #endif - diff --git a/src/server/game/Groups/GroupReference.cpp b/src/server/game/Groups/GroupReference.cpp index f0cfc4ff689..b56354d3977 100644 --- a/src/server/game/Groups/GroupReference.cpp +++ b/src/server/game/Groups/GroupReference.cpp @@ -36,4 +36,3 @@ void GroupReference::sourceObjectDestroyLink() // called from invalidate() //getTarget()->DelinkMember(this); } - diff --git a/src/server/game/Groups/GroupReference.h b/src/server/game/Groups/GroupReference.h index 47d8418c5cb..649f229b086 100644 --- a/src/server/game/Groups/GroupReference.h +++ b/src/server/game/Groups/GroupReference.h @@ -40,4 +40,3 @@ class TC_GAME_API GroupReference : public Reference<Group, Player> void setSubGroup(uint8 pSubGroup) { iSubGroup = pSubGroup; } }; #endif - diff --git a/src/server/game/Guilds/Guild.cpp b/src/server/game/Guilds/Guild.cpp index 54d802ef546..98d066996fd 100644 --- a/src/server/game/Guilds/Guild.cpp +++ b/src/server/game/Guilds/Guild.cpp @@ -1709,6 +1709,7 @@ void Guild::HandleUpdateMemberRank(WorldSession* session, std::string const& nam } Member const* memberMe = GetMember(player->GetGUID()); + ASSERT(memberMe); uint8 rankId = memberMe->GetRankId(); if (demote) { diff --git a/src/server/game/Handlers/AddonHandler.h b/src/server/game/Handlers/AddonHandler.h index ac98cd0a877..f75958182fa 100644 --- a/src/server/game/Handlers/AddonHandler.h +++ b/src/server/game/Handlers/AddonHandler.h @@ -36,4 +36,3 @@ class AddonHandler }; #define sAddOnHandler AddonHandler::instance() #endif - diff --git a/src/server/game/Handlers/AuctionHouseHandler.cpp b/src/server/game/Handlers/AuctionHouseHandler.cpp index a0d6ae3bdd2..89db6ee6e69 100644 --- a/src/server/game/Handlers/AuctionHouseHandler.cpp +++ b/src/server/game/Handlers/AuctionHouseHandler.cpp @@ -515,6 +515,16 @@ void WorldSession::HandleAuctionPlaceBid(WorldPacket& recvData) stmt->setUInt32(2, auction->Id); trans->Append(stmt); + if (auction->bidders.find(player->GetGUID()) == auction->bidders.end()) + { + // save new bidder in list, and save record to db + auction->bidders.insert(player->GetGUID()); + stmt = CharacterDatabase.GetPreparedStatement(CHAR_INS_AUCTION_BIDDERS); + stmt->setUInt32(0, auction->Id); + stmt->setUInt32(1, auction->bidder); + trans->Append(stmt); + } + SendAuctionCommandResult(auction->Id, AUCTION_PLACE_BID, ERR_AUCTION_OK, 0); } else diff --git a/src/server/game/Handlers/MailHandler.cpp b/src/server/game/Handlers/MailHandler.cpp index fc283e1d3df..3d1b995cd1e 100644 --- a/src/server/game/Handlers/MailHandler.cpp +++ b/src/server/game/Handlers/MailHandler.cpp @@ -298,6 +298,7 @@ void WorldSession::HandleSendMail(WorldPacket& recvData) item->DeleteFromInventoryDB(trans); // deletes item from character's inventory item->SetOwnerGUID(receiverGuid); + item->SetState(ITEM_CHANGED); item->SaveToDB(trans); // recursive and not have transaction guard into self, item not in inventory and can be save standalone draft.AddItem(item); @@ -693,7 +694,7 @@ void WorldSession::HandleGetMailList(WorldPacket& recvData) // durability data << uint32((item ? item->GetUInt32Value(ITEM_FIELD_DURABILITY) : 0)); // unknown wotlk - data << uint8(0); + data << uint8((item && !item->IsLocked() ? 1 : 0)); } ++realCount; diff --git a/src/server/game/Handlers/NPCHandler.cpp b/src/server/game/Handlers/NPCHandler.cpp index 59344509283..1ddb36c3b56 100644 --- a/src/server/game/Handlers/NPCHandler.cpp +++ b/src/server/game/Handlers/NPCHandler.cpp @@ -851,4 +851,3 @@ void WorldSession::HandleRepairItemOpcode(WorldPacket& recvData) _player->DurabilityRepairAll(true, discountMod, guildBank != 0); } } - diff --git a/src/server/game/Handlers/NPCHandler.h b/src/server/game/Handlers/NPCHandler.h index 61db8bfb04c..bb8c500c1f8 100644 --- a/src/server/game/Handlers/NPCHandler.h +++ b/src/server/game/Handlers/NPCHandler.h @@ -57,4 +57,3 @@ struct NpcTextLocale std::vector<std::string> Text_1[MAX_GOSSIP_TEXT_OPTIONS]; }; #endif - diff --git a/src/server/game/Handlers/VoiceChatHandler.cpp b/src/server/game/Handlers/VoiceChatHandler.cpp index e67124b0316..8c0aceee3c9 100644 --- a/src/server/game/Handlers/VoiceChatHandler.cpp +++ b/src/server/game/Handlers/VoiceChatHandler.cpp @@ -41,4 +41,3 @@ void WorldSession::HandleSetActiveVoiceChannel(WorldPacket& recvData) recvData.read_skip<uint32>(); recvData.read_skip<char*>(); } - diff --git a/src/server/game/Maps/AreaBoundary.cpp b/src/server/game/Maps/AreaBoundary.cpp index 952d82668bb..6ed21ec50f6 100644 --- a/src/server/game/Maps/AreaBoundary.cpp +++ b/src/server/game/Maps/AreaBoundary.cpp @@ -103,6 +103,11 @@ BoundaryUnionBoundary::BoundaryUnionBoundary(AreaBoundary const* b1, AreaBoundar { ASSERT(b1 && b2); } +BoundaryUnionBoundary::~BoundaryUnionBoundary() +{ + delete _b1; + delete _b2; +} bool BoundaryUnionBoundary::IsWithinBoundaryArea(Position const* pos) const { return (_b1->IsWithinBoundary(pos) || _b2->IsWithinBoundary(pos)); diff --git a/src/server/game/Maps/AreaBoundary.h b/src/server/game/Maps/AreaBoundary.h index 1e8f22e9cd4..dab73e9b51b 100644 --- a/src/server/game/Maps/AreaBoundary.h +++ b/src/server/game/Maps/AreaBoundary.h @@ -157,6 +157,7 @@ class TC_GAME_API BoundaryUnionBoundary : public AreaBoundary BoundaryUnionBoundary(AreaBoundary const* b1, AreaBoundary const* b2, bool isInverted = false); protected: + virtual ~BoundaryUnionBoundary(); bool IsWithinBoundaryArea(Position const* pos) const override; private: diff --git a/src/server/game/Maps/Map.cpp b/src/server/game/Maps/Map.cpp index ee20c63b7e1..f1af14e5b8d 100644 --- a/src/server/game/Maps/Map.cpp +++ b/src/server/game/Maps/Map.cpp @@ -2680,8 +2680,10 @@ void Map::GetFullTerrainStatusForPosition(float x, float y, float z, PositionFul if (vmapData.areaInfo) data.areaInfo = boost::in_place(vmapData.areaInfo->adtId, vmapData.areaInfo->rootId, vmapData.areaInfo->groupId, vmapData.areaInfo->mogpFlags); + float mapHeight = VMAP_INVALID_HEIGHT; GridMap* gmap = const_cast<Map*>(this)->GetGrid(x, y); - float mapHeight = gmap->getHeight(x, y); + if (gmap) + mapHeight = gmap->getHeight(x, y); // area lookup AreaTableEntry const* areaEntry = nullptr; @@ -2689,6 +2691,8 @@ void Map::GetFullTerrainStatusForPosition(float x, float y, float z, PositionFul if (WMOAreaTableEntry const* wmoEntry = GetWMOAreaTableEntryByTripple(vmapData.areaInfo->rootId, vmapData.areaInfo->adtId, vmapData.areaInfo->groupId)) areaEntry = sAreaTableStore.LookupEntry(wmoEntry->areaId); + data.areaId = 0; + if (areaEntry) { data.floorZ = vmapData.floorZ; @@ -2697,7 +2701,8 @@ void Map::GetFullTerrainStatusForPosition(float x, float y, float z, PositionFul else { data.floorZ = mapHeight; - data.areaId = gmap->getArea(x, y); + if (gmap) + data.areaId = gmap->getArea(x, y); if (!data.areaId) data.areaId = i_mapEntry->linked_zone; @@ -2972,7 +2977,9 @@ bool Map::CheckRespawn(RespawnInfo* info) { time_t now = time(NULL); time_t respawnTime; - if (sObjectMgr->GetLinkedRespawnGuid(thisGUID) == thisGUID) // never respawn, save "something" in DB + if (linkedTime == std::numeric_limits<time_t>::max()) + respawnTime = linkedTime; + else if (sObjectMgr->GetLinkedRespawnGuid(thisGUID) == thisGUID) // never respawn, save "something" in DB respawnTime = now + WEEK; else // set us to check again shortly after linked unit respawnTime = std::max<time_t>(now, linkedTime) + urand(5, 15); @@ -3257,7 +3264,7 @@ bool Map::SpawnGroupSpawn(uint32 groupId, bool ignoreRespawn, bool force, std::v TC_LOG_ERROR("maps", "Tried to spawn non-existing (or system) spawn group %u on map %u. Blocked.", groupId, GetId()); return false; } - + for (auto& pair : sObjectMgr->GetSpawnDataForGroup(groupId)) { SpawnData const* data = pair.second; diff --git a/src/server/game/Maps/MapRefManager.h b/src/server/game/Maps/MapRefManager.h index 4b4a53d78fa..52f7bddfcb6 100644 --- a/src/server/game/Maps/MapRefManager.h +++ b/src/server/game/Maps/MapRefManager.h @@ -39,4 +39,3 @@ class MapRefManager : public RefManager<Map, Player> const_iterator end() const { return const_iterator(nullptr); } }; #endif - diff --git a/src/server/game/Maps/MapReference.h b/src/server/game/Maps/MapReference.h index 73b0635390e..ee95fd4ba9a 100644 --- a/src/server/game/Maps/MapReference.h +++ b/src/server/game/Maps/MapReference.h @@ -39,4 +39,3 @@ class MapReference : public Reference<Map, Player> MapReference const* nocheck_prev() const { return (MapReference const*)Reference<Map, Player>::nocheck_prev(); } }; #endif - diff --git a/src/server/game/Movement/MovementGenerators/WaypointMovementGenerator.cpp b/src/server/game/Movement/MovementGenerators/WaypointMovementGenerator.cpp index 3fba420059e..74a8588163c 100755 --- a/src/server/game/Movement/MovementGenerators/WaypointMovementGenerator.cpp +++ b/src/server/game/Movement/MovementGenerators/WaypointMovementGenerator.cpp @@ -252,16 +252,15 @@ bool WaypointMovementGenerator<Creature>::DoUpdate(Creature* creature, uint32 di if (!_nextMoveTime.Passed()) { - _nextMoveTime.Update(diff); - if (_nextMoveTime.Passed()) - return StartMoveNow(creature); + if (creature->movespline->Finalized()) + { + _nextMoveTime.Update(diff); + if (_nextMoveTime.Passed()) + return StartMoveNow(creature); + } } else { - // Set home position at place on waypoint movement. - if (!creature->HasUnitMovementFlag(MOVEMENTFLAG_ONTRANSPORT) || creature->GetTransGUID().IsEmpty()) - creature->SetHomePosition(creature->GetPosition()); - if (creature->movespline->Finalized()) { OnArrived(creature); @@ -270,14 +269,21 @@ bool WaypointMovementGenerator<Creature>::DoUpdate(Creature* creature, uint32 di if (_nextMoveTime.Passed()) return StartMove(creature); } - else if (_recalculateSpeed) + else { - if (_nextMoveTime.Passed()) - StartMove(creature); + // Set home position at place on waypoint movement. + if (!creature->HasUnitMovementFlag(MOVEMENTFLAG_ONTRANSPORT) || creature->GetTransGUID().IsEmpty()) + creature->SetHomePosition(creature->GetPosition()); + + if (_recalculateSpeed) + { + if (_nextMoveTime.Passed()) + StartMove(creature); + } } } return true; - } +} void WaypointMovementGenerator<Creature>::MovementInform(Creature* creature) { diff --git a/src/server/game/Pools/PoolMgr.cpp b/src/server/game/Pools/PoolMgr.cpp index 7ab6c43d410..25b9bd5f363 100644 --- a/src/server/game/Pools/PoolMgr.cpp +++ b/src/server/game/Pools/PoolMgr.cpp @@ -153,34 +153,6 @@ bool PoolGroup<T>::CheckPool() const return true; } -template <class T> -PoolObject* PoolGroup<T>::RollOne(ActivePoolData& spawns, uint32 triggerFrom) -{ - if (!ExplicitlyChanced.empty()) - { - float roll = (float)rand_chance(); - - for (uint32 i = 0; i < ExplicitlyChanced.size(); ++i) - { - roll -= ExplicitlyChanced[i].chance; - // Triggering object is marked as spawned at this time and can be also rolled (respawn case) - // so this need explicit check for this case - if (roll < 0 && (ExplicitlyChanced[i].guid == triggerFrom || !spawns.IsActiveObject<T>(ExplicitlyChanced[i].guid))) - return &ExplicitlyChanced[i]; - } - } - if (!EqualChanced.empty()) - { - uint32 index = urand(0, EqualChanced.size()-1); - // Triggering object is marked as spawned at this time and can be also rolled (respawn case) - // so this need explicit check for this case - if (EqualChanced[index].guid == triggerFrom || !spawns.IsActiveObject<T>(EqualChanced[index].guid)) - return &EqualChanced[index]; - } - - return nullptr; -} - // Main method to despawn a creature or gameobject in a pool // If no guid is passed, the pool is just removed (event end case) // If guid is filled, cache will be used and no removal will occur, it just fill the cache @@ -340,7 +312,6 @@ void PoolGroup<Pool>::RemoveOneRelation(uint32 child_pool_id) template <class T> void PoolGroup<T>::SpawnObject(ActivePoolData& spawns, uint32 limit, uint32 triggerFrom) { - uint32 lastDespawned = 0; int count = limit - spawns.GetActiveObjectCount(poolId); // If triggered from some object respawn this object is still marked as spawned @@ -349,32 +320,70 @@ void PoolGroup<T>::SpawnObject(ActivePoolData& spawns, uint32 limit, uint32 trig if (triggerFrom) ++count; - // This will try to spawn the rest of pool, not guaranteed - for (int i = 0; i < count; ++i) + if (count > 0) { - PoolObject* obj = RollOne(spawns, triggerFrom); - if (!obj) - continue; - if (obj->guid == lastDespawned) - continue; + PoolObjectList rolledObjects; + rolledObjects.reserve(count); - if (obj->guid == triggerFrom) + // roll objects to be spawned + if (!ExplicitlyChanced.empty()) { - ReSpawn1Object(obj); - triggerFrom = 0; - continue; + while (count && ExplicitlyChanced.size() > rolledObjects.size()) + { + --count; + float roll = (float)rand_chance(); + + for (PoolObject& obj : ExplicitlyChanced) + { + roll -= obj.chance; + // Triggering object is marked as spawned at this time and can be also rolled (respawn case) + // so this need explicit check for this case + if (roll < 0 && (obj.guid == triggerFrom || !spawns.IsActiveObject<T>(obj.guid))) + { + rolledObjects.push_back(obj); + break; + } + } + } + } + else if (!EqualChanced.empty()) + { + rolledObjects = EqualChanced; + + for (auto itr = rolledObjects.begin(); itr != rolledObjects.end();) + { + // remove most of the active objects so there is higher chance inactive ones are spawned + if (spawns.IsActiveObject<T>(itr->guid) && urand(1, 4) != 1) + itr = rolledObjects.erase(itr); + else + ++itr; + } + + Trinity::Containers::RandomResize(rolledObjects, count); } - spawns.ActivateObject<T>(obj->guid, poolId); - Spawn1Object(obj); - if (triggerFrom) + // try to spawn rolled objects + for (PoolObject& obj : rolledObjects) { - // One spawn one despawn no count increase - DespawnObject(spawns, triggerFrom); - lastDespawned = triggerFrom; - triggerFrom = 0; + if (spawns.IsActiveObject<T>(obj.guid)) + continue; + + if (obj.guid == triggerFrom) + { + ReSpawn1Object(&obj); + triggerFrom = 0; + } + else + { + spawns.ActivateObject<T>(obj.guid, poolId); + Spawn1Object(&obj); + } } } + + // One spawn one despawn no count increase + if (triggerFrom) + DespawnObject(spawns, triggerFrom); } // Method that is actualy doing the spawn job on 1 creature diff --git a/src/server/game/Pools/PoolMgr.h b/src/server/game/Pools/PoolMgr.h index c36e7bd29ef..6de34c05fb4 100644 --- a/src/server/game/Pools/PoolMgr.h +++ b/src/server/game/Pools/PoolMgr.h @@ -77,7 +77,6 @@ class TC_GAME_API PoolGroup bool isEmpty() const { return ExplicitlyChanced.empty() && EqualChanced.empty(); } void AddEntry(PoolObject& poolitem, uint32 maxentries); bool CheckPool() const; - PoolObject* RollOne(ActivePoolData& spawns, uint32 triggerFrom); void DespawnObject(ActivePoolData& spawns, ObjectGuid::LowType guid=0); void Despawn1Object(ObjectGuid::LowType guid); void SpawnObject(ActivePoolData& spawns, uint32 limit, uint32 triggerFrom); diff --git a/src/server/game/Reputation/ReputationMgr.cpp b/src/server/game/Reputation/ReputationMgr.cpp index b688a212464..3ccec22e7c4 100644 --- a/src/server/game/Reputation/ReputationMgr.cpp +++ b/src/server/game/Reputation/ReputationMgr.cpp @@ -463,8 +463,8 @@ void ReputationMgr::SetAtWar(RepListID repListID, bool on) void ReputationMgr::SetAtWar(FactionState* faction, bool atWar) const { - // not allow declare war to own faction - if (atWar && (faction->Flags & FACTION_FLAG_PEACE_FORCED)) + // Do not allow to declare war to our own faction. But allow for rival factions (eg Aldor vs Scryer). + if (atWar && (faction->Flags & FACTION_FLAG_PEACE_FORCED) && !(faction->Flags & FACTION_FLAG_RIVAL)) return; // already set diff --git a/src/server/game/Server/WorldSession.cpp b/src/server/game/Server/WorldSession.cpp index a5a4f9139b2..58f9bbda64d 100644 --- a/src/server/game/Server/WorldSession.cpp +++ b/src/server/game/Server/WorldSession.cpp @@ -28,6 +28,7 @@ #include "Common.h" #include "DatabaseEnv.h" #include "DBCStructure.h" +#include "GameTime.h" #include "Group.h" #include "Guild.h" #include "GuildMgr.h" @@ -267,9 +268,6 @@ void WorldSession::LogUnprocessedTail(WorldPacket* packet) /// Update the WorldSession (triggered by World update) bool WorldSession::Update(uint32 diff, PacketFilter& updater) { - /// Update Timeout timer. - UpdateTimeOutTime(diff); - ///- Before we process anything: /// If necessary, kick the player because the client didn't send anything for too long /// (or they've been idling in character select) @@ -637,9 +635,14 @@ char const* WorldSession::GetTrinityString(uint32 entry) const void WorldSession::ResetTimeOutTime(bool onlyActive) { if (GetPlayer()) - m_timeOutTime = int32(sWorld->getIntConfig(CONFIG_SOCKET_TIMEOUTTIME_ACTIVE)); + m_timeOutTime = GameTime::GetGameTime() + time_t(sWorld->getIntConfig(CONFIG_SOCKET_TIMEOUTTIME_ACTIVE)); else if (!onlyActive) - m_timeOutTime = int32(sWorld->getIntConfig(CONFIG_SOCKET_TIMEOUTTIME)); + m_timeOutTime = GameTime::GetGameTime() + time_t(sWorld->getIntConfig(CONFIG_SOCKET_TIMEOUTTIME)); +} + +bool WorldSession::IsConnectionIdle() const +{ + return m_timeOutTime < GameTime::GetGameTime() && !m_inQueue; } void WorldSession::Handle_NULL(WorldPacket& null) diff --git a/src/server/game/Server/WorldSession.h b/src/server/game/Server/WorldSession.h index 237fa991456..2caba989384 100644 --- a/src/server/game/Server/WorldSession.h +++ b/src/server/game/Server/WorldSession.h @@ -432,19 +432,11 @@ class TC_GAME_API WorldSession void SetLatency(uint32 latency) { m_latency = latency; } void ResetClientTimeDelay() { m_clientTimeDelay = 0; } - std::atomic<int32> m_timeOutTime; - - void UpdateTimeOutTime(uint32 diff) - { - m_timeOutTime -= int32(diff); - } + std::atomic<time_t> m_timeOutTime; void ResetTimeOutTime(bool onlyActive); - bool IsConnectionIdle() const - { - return m_timeOutTime <= 0 && !m_inQueue; - } + bool IsConnectionIdle() const; // Recruit-A-Friend Handling uint32 GetRecruiterId() const { return recruiterId; } diff --git a/src/server/game/Spells/Auras/SpellAuras.cpp b/src/server/game/Spells/Auras/SpellAuras.cpp index cff25bc8e2c..7c1cdc56de0 100644 --- a/src/server/game/Spells/Auras/SpellAuras.cpp +++ b/src/server/game/Spells/Auras/SpellAuras.cpp @@ -947,6 +947,9 @@ bool Aura::CanBeSaved() const if (IsPassive()) return false; + if (GetSpellInfo()->IsChanneled()) + return false; + // Check if aura is single target, not only spell info if (GetCasterGUID() != GetOwner()->GetGUID()) if (GetSpellInfo()->IsSingleTarget() || IsSingleTarget()) diff --git a/src/server/game/Spells/SpellEffects.cpp b/src/server/game/Spells/SpellEffects.cpp index 75c97b005af..12c437884a6 100644 --- a/src/server/game/Spells/SpellEffects.cpp +++ b/src/server/game/Spells/SpellEffects.cpp @@ -1964,7 +1964,10 @@ void Spell::EffectOpenLock(SpellEffIndex effIndex) if (gameObjTarget) SendLoot(guid, LOOT_SKINNING); else if (itemTarget) + { itemTarget->SetFlag(ITEM_FIELD_FLAGS, ITEM_FIELD_FLAG_UNLOCKED); + itemTarget->SetState(ITEM_CHANGED, itemTarget->GetOwner()); + } // not allow use skill grow at item base open if (!m_CastItem && skillId != SKILL_NONE) @@ -3410,6 +3413,7 @@ void Spell::EffectInterruptCast(SpellEffIndex effIndex) { int32 duration = m_spellInfo->GetDuration(); unitTarget->GetSpellHistory()->LockSpellSchool(curSpellInfo->GetSchoolMask(), unitTarget->ModSpellDuration(m_spellInfo, unitTarget, duration, false, 1 << effIndex)); + m_originalCaster->ProcSkillsAndAuras(unitTarget, PROC_FLAG_DONE_SPELL_MAGIC_DMG_CLASS_NEG, PROC_FLAG_TAKEN_SPELL_MAGIC_DMG_CLASS_NEG, PROC_SPELL_TYPE_MASK_ALL, PROC_SPELL_PHASE_HIT, PROC_HIT_INTERRUPT, nullptr, nullptr, nullptr); } ExecuteLogEffectInterruptCast(effIndex, unitTarget, curSpellInfo->Id); unitTarget->InterruptSpell(CurrentSpellTypes(i), false); diff --git a/src/server/game/Spells/SpellMgr.cpp b/src/server/game/Spells/SpellMgr.cpp index 22b16a29dde..26638ca7c2d 100644 --- a/src/server/game/Spells/SpellMgr.cpp +++ b/src/server/game/Spells/SpellMgr.cpp @@ -2344,9 +2344,11 @@ void SpellMgr::LoadSpellAreas() uint32 oldMSTime = getMSTime(); mSpellAreaMap.clear(); // need for reload case + mSpellAreaForAreaMap.clear(); mSpellAreaForQuestMap.clear(); mSpellAreaForQuestEndMap.clear(); mSpellAreaForAuraMap.clear(); + mSpellAreaForQuestAreaMap.clear(); // 0 1 2 3 4 5 6 7 8 9 QueryResult result = WorldDatabase.Query("SELECT spell, area, quest_start, quest_start_status, quest_end_status, quest_end, aura_spell, racemask, gender, autocast FROM spell_area"); @@ -2542,6 +2544,22 @@ void SpellMgr::LoadSpellInfoStore() for (SpellEntry const* spellEntry : sSpellStore) mSpellInfoMap[spellEntry->Id] = new SpellInfo(spellEntry); + for (uint32 spellIndex = 0; spellIndex < GetSpellInfoStoreSize(); ++spellIndex) + { + if (!mSpellInfoMap[spellIndex]) + continue; + + for (auto const& effect : mSpellInfoMap[spellIndex]->Effects) + { + //ASSERT(effect.EffectIndex < MAX_SPELL_EFFECTS, "MAX_SPELL_EFFECTS must be at least %u", effect.EffectIndex + 1); + ASSERT(effect.Effect < TOTAL_SPELL_EFFECTS, "TOTAL_SPELL_EFFECTS must be at least %u", effect.Effect + 1); + ASSERT(effect.ApplyAuraName < TOTAL_AURAS, "TOTAL_AURAS must be at least %u", effect.ApplyAuraName + 1); + ASSERT(effect.TargetA.GetTarget() < TOTAL_SPELL_TARGETS, "TOTAL_SPELL_TARGETS must be at least %u", effect.TargetA.GetTarget() + 1); + ASSERT(effect.TargetB.GetTarget() < TOTAL_SPELL_TARGETS, "TOTAL_SPELL_TARGETS must be at least %u", effect.TargetB.GetTarget() + 1); + } + } + + TC_LOG_INFO("server.loading", ">> Loaded SpellInfo store in %u ms", GetMSTimeDiffToNow(oldMSTime)); } @@ -3256,11 +3274,6 @@ void SpellMgr::LoadSpellInfoCorrections() spellInfo->Effects[EFFECT_0].SpellClassMask[0] |= 0x800; }); - // The Eye of Acherus (no spawn in phase 2 in db) - ApplySpellFix({ 51852 }, [](SpellInfo* spellInfo) - { - spellInfo->Effects[EFFECT_0].MiscValue |= 1; - }); // Crafty's Ultra-Advanced Proto-Typical Shortening Blaster ApplySpellFix({ 51912 }, [](SpellInfo* spellInfo) @@ -3598,7 +3611,7 @@ void SpellMgr::LoadSpellInfoCorrections() 47134 // Quest Complete }, [](SpellInfo* spellInfo) { - //! HACK: This spell break quest complete for alliance and on retail not used °_O + //! HACK: This spell break quest complete for alliance and on retail not used spellInfo->Effects[EFFECT_0].Effect = 0; }); diff --git a/src/server/game/Spells/SpellMgr.h b/src/server/game/Spells/SpellMgr.h index c9fb69a8eb2..51c84f18d42 100644 --- a/src/server/game/Spells/SpellMgr.h +++ b/src/server/game/Spells/SpellMgr.h @@ -229,7 +229,7 @@ enum ProcFlagsHit PROC_HIT_DEFLECT = 0x0000200, PROC_HIT_ABSORB = 0x0000400, // partial or full absorb PROC_HIT_REFLECT = 0x0000800, - PROC_HIT_INTERRUPT = 0x0001000, // (not used atm) + PROC_HIT_INTERRUPT = 0x0001000, PROC_HIT_FULL_BLOCK = 0x0002000, PROC_HIT_MASK_ALL = 0x0002FFF }; diff --git a/src/server/game/Spells/SpellScript.cpp b/src/server/game/Spells/SpellScript.cpp index 3750cae08ef..e9431d216a3 100644 --- a/src/server/game/Spells/SpellScript.cpp +++ b/src/server/game/Spells/SpellScript.cpp @@ -1200,4 +1200,3 @@ AuraApplication const* AuraScript::GetTargetApplication() const { return m_auraApplication; } - diff --git a/src/server/game/Tools/CharacterDatabaseCleaner.cpp b/src/server/game/Tools/CharacterDatabaseCleaner.cpp index 030f1d170d8..8894f9986e1 100644 --- a/src/server/game/Tools/CharacterDatabaseCleaner.cpp +++ b/src/server/game/Tools/CharacterDatabaseCleaner.cpp @@ -156,4 +156,3 @@ void CharacterDatabaseCleaner::CleanCharacterQuestStatus() { CharacterDatabase.DirectExecute("DELETE FROM character_queststatus WHERE status = 0"); } - diff --git a/src/server/game/Weather/Weather.cpp b/src/server/game/Weather/Weather.cpp index fb26ae61a12..e6c53ca88d3 100644 --- a/src/server/game/Weather/Weather.cpp +++ b/src/server/game/Weather/Weather.cpp @@ -321,4 +321,3 @@ WeatherState Weather::GetWeatherState() const return WEATHER_STATE_FINE; } } - diff --git a/src/server/game/World/World.cpp b/src/server/game/World/World.cpp index 03081a113de..9e4a1ca3a45 100644 --- a/src/server/game/World/World.cpp +++ b/src/server/game/World/World.cpp @@ -721,8 +721,10 @@ void World::LoadConfigSettings(bool reload) else m_int_configs[CONFIG_PORT_WORLD] = sConfigMgr->GetIntDefault("WorldServerPort", 8085); - m_int_configs[CONFIG_SOCKET_TIMEOUTTIME] = sConfigMgr->GetIntDefault("SocketTimeOutTime", 900000); - m_int_configs[CONFIG_SOCKET_TIMEOUTTIME_ACTIVE] = sConfigMgr->GetIntDefault("SocketTimeOutTimeActive", 60000); + // Config values are in "milliseconds" but we handle SocketTimeOut only as "seconds" so divide by 1000 + m_int_configs[CONFIG_SOCKET_TIMEOUTTIME] = sConfigMgr->GetIntDefault("SocketTimeOutTime", 900000) / 1000; + m_int_configs[CONFIG_SOCKET_TIMEOUTTIME_ACTIVE] = sConfigMgr->GetIntDefault("SocketTimeOutTimeActive", 60000) / 1000; + m_int_configs[CONFIG_SESSION_ADD_DELAY] = sConfigMgr->GetIntDefault("SessionAddDelay", 10000); m_float_configs[CONFIG_GROUP_XP_DISTANCE] = sConfigMgr->GetFloatDefault("MaxGroupXPDistance", 74.0f); diff --git a/src/server/scripts/Commands/cs_modify.cpp b/src/server/scripts/Commands/cs_modify.cpp index af8e5c12268..99b229f31d2 100644 --- a/src/server/scripts/Commands/cs_modify.cpp +++ b/src/server/scripts/Commands/cs_modify.cpp @@ -1021,7 +1021,7 @@ public: if (!*args) return false; - uint16 display_id = (uint16)atoi((char*)args); + uint32 display_id = (uint32)atoi((char*)args); Unit* target = handler->getSelectedUnit(); if (!target) diff --git a/src/server/scripts/Commands/cs_npc.cpp b/src/server/scripts/Commands/cs_npc.cpp index 54c48cd1205..3cb7adef2a0 100644 --- a/src/server/scripts/Commands/cs_npc.cpp +++ b/src/server/scripts/Commands/cs_npc.cpp @@ -50,7 +50,7 @@ struct EnumName #define CREATE_NAMED_ENUM(VALUE) { VALUE, STRINGIZE(VALUE) } #define NPCFLAG_COUNT 24 -#define FLAGS_EXTRA_COUNT 20 +#define FLAGS_EXTRA_COUNT 21 EnumName<NPCFlags, int32> const npcFlagTexts[NPCFLAG_COUNT] = { @@ -173,7 +173,8 @@ EnumName<CreatureFlagsExtra> const flagsExtra[FLAGS_EXTRA_COUNT] = CREATE_NAMED_ENUM(CREATURE_FLAG_EXTRA_NO_PLAYER_DAMAGE_REQ), CREATE_NAMED_ENUM(CREATURE_FLAG_EXTRA_DUNGEON_BOSS), CREATE_NAMED_ENUM(CREATURE_FLAG_EXTRA_IGNORE_PATHFINDING), - CREATE_NAMED_ENUM(CREATURE_FLAG_EXTRA_IMMUNITY_KNOCKBACK) + CREATE_NAMED_ENUM(CREATURE_FLAG_EXTRA_IMMUNITY_KNOCKBACK), + CREATE_NAMED_ENUM(CREATURE_FLAG_EXTRA_USE_OFFHAND_ATTACK) }; bool HandleNpcSpawnGroup(ChatHandler* handler, char const* args) diff --git a/src/server/scripts/EasternKingdoms/AlteracValley/boss_drekthar.cpp b/src/server/scripts/EasternKingdoms/AlteracValley/boss_drekthar.cpp index ed97d7670a1..3247dea994b 100644 --- a/src/server/scripts/EasternKingdoms/AlteracValley/boss_drekthar.cpp +++ b/src/server/scripts/EasternKingdoms/AlteracValley/boss_drekthar.cpp @@ -147,4 +147,4 @@ public: void AddSC_boss_drekthar() { new boss_drekthar; -}
\ No newline at end of file +} diff --git a/src/server/scripts/EasternKingdoms/AlteracValley/boss_vanndar.cpp b/src/server/scripts/EasternKingdoms/AlteracValley/boss_vanndar.cpp index 372ee7ba4a1..f1cbd631d18 100644 --- a/src/server/scripts/EasternKingdoms/AlteracValley/boss_vanndar.cpp +++ b/src/server/scripts/EasternKingdoms/AlteracValley/boss_vanndar.cpp @@ -125,4 +125,4 @@ public: void AddSC_boss_vanndar() { new boss_vanndar; -}
\ No newline at end of file +} diff --git a/src/server/scripts/EasternKingdoms/MagistersTerrace/boss_selin_fireheart.cpp b/src/server/scripts/EasternKingdoms/MagistersTerrace/boss_selin_fireheart.cpp index 93f10b1a81e..c0b2a3c9b4c 100644 --- a/src/server/scripts/EasternKingdoms/MagistersTerrace/boss_selin_fireheart.cpp +++ b/src/server/scripts/EasternKingdoms/MagistersTerrace/boss_selin_fireheart.cpp @@ -112,7 +112,7 @@ class boss_selin_fireheart : public CreatureScript DoCast(crystal, SPELL_FEL_CRYSTAL_DUMMY); CrystalGUID = crystal->GetGUID(); - + float x, y, z; crystal->GetClosePoint(x, y, z, me->GetCombatReach(), CONTACT_DISTANCE); diff --git a/src/server/scripts/EasternKingdoms/Uldaman/boss_archaedas.cpp b/src/server/scripts/EasternKingdoms/Uldaman/boss_archaedas.cpp index c96d86aff57..b125184103b 100644 --- a/src/server/scripts/EasternKingdoms/Uldaman/boss_archaedas.cpp +++ b/src/server/scripts/EasternKingdoms/Uldaman/boss_archaedas.cpp @@ -431,4 +431,3 @@ void AddSC_boss_archaedas() new npc_stonekeepers(); new go_altar_of_archaedas(); } - diff --git a/src/server/scripts/EasternKingdoms/Uldaman/uldaman.cpp b/src/server/scripts/EasternKingdoms/Uldaman/uldaman.cpp index 6db29b9bc6a..c1c5b726713 100644 --- a/src/server/scripts/EasternKingdoms/Uldaman/uldaman.cpp +++ b/src/server/scripts/EasternKingdoms/Uldaman/uldaman.cpp @@ -168,4 +168,3 @@ void AddSC_uldaman() new go_keystone_chamber(); new AreaTrigger_at_map_chamber(); } - diff --git a/src/server/scripts/EasternKingdoms/ZulAman/boss_akilzon.cpp b/src/server/scripts/EasternKingdoms/ZulAman/boss_akilzon.cpp index 2983ae15477..42c9dd9d766 100644 --- a/src/server/scripts/EasternKingdoms/ZulAman/boss_akilzon.cpp +++ b/src/server/scripts/EasternKingdoms/ZulAman/boss_akilzon.cpp @@ -463,4 +463,3 @@ void AddSC_boss_akilzon() new boss_akilzon(); new npc_akilzon_eagle(); } - diff --git a/src/server/scripts/EasternKingdoms/ZulAman/boss_hexlord.cpp b/src/server/scripts/EasternKingdoms/ZulAman/boss_hexlord.cpp index 6bced5b64d0..fa617532e00 100644 --- a/src/server/scripts/EasternKingdoms/ZulAman/boss_hexlord.cpp +++ b/src/server/scripts/EasternKingdoms/ZulAman/boss_hexlord.cpp @@ -1052,4 +1052,3 @@ void AddSC_boss_hex_lord_malacrass() new boss_alyson_antille(); new spell_hexlord_unstable_affliction(); } - diff --git a/src/server/scripts/EasternKingdoms/ZulAman/boss_janalai.cpp b/src/server/scripts/EasternKingdoms/ZulAman/boss_janalai.cpp index 90c0516da52..606f6652a89 100644 --- a/src/server/scripts/EasternKingdoms/ZulAman/boss_janalai.cpp +++ b/src/server/scripts/EasternKingdoms/ZulAman/boss_janalai.cpp @@ -692,4 +692,3 @@ void AddSC_boss_janalai() new npc_janalai_hatchling(); new npc_janalai_egg(); } - diff --git a/src/server/scripts/EasternKingdoms/ZulAman/boss_zuljin.cpp b/src/server/scripts/EasternKingdoms/ZulAman/boss_zuljin.cpp index 02313eab281..6ea6388bc3c 100644 --- a/src/server/scripts/EasternKingdoms/ZulAman/boss_zuljin.cpp +++ b/src/server/scripts/EasternKingdoms/ZulAman/boss_zuljin.cpp @@ -608,4 +608,3 @@ void AddSC_boss_zuljin() new boss_zuljin(); new npc_zuljin_vortex(); } - diff --git a/src/server/scripts/EasternKingdoms/ZulGurub/boss_grilek.cpp b/src/server/scripts/EasternKingdoms/ZulGurub/boss_grilek.cpp index 4561a2dd924..f9e4ac50c59 100644 --- a/src/server/scripts/EasternKingdoms/ZulGurub/boss_grilek.cpp +++ b/src/server/scripts/EasternKingdoms/ZulGurub/boss_grilek.cpp @@ -117,4 +117,3 @@ void AddSC_boss_grilek() { new boss_grilek(); } - diff --git a/src/server/scripts/EasternKingdoms/ZulGurub/boss_hakkar.cpp b/src/server/scripts/EasternKingdoms/ZulGurub/boss_hakkar.cpp index 16c8154af87..b59c2650e77 100644 --- a/src/server/scripts/EasternKingdoms/ZulGurub/boss_hakkar.cpp +++ b/src/server/scripts/EasternKingdoms/ZulGurub/boss_hakkar.cpp @@ -182,4 +182,3 @@ void AddSC_boss_hakkar() { new boss_hakkar(); } - diff --git a/src/server/scripts/EasternKingdoms/ZulGurub/boss_renataki.cpp b/src/server/scripts/EasternKingdoms/ZulGurub/boss_renataki.cpp index 972229a1a8f..2fd84c07889 100644 --- a/src/server/scripts/EasternKingdoms/ZulGurub/boss_renataki.cpp +++ b/src/server/scripts/EasternKingdoms/ZulGurub/boss_renataki.cpp @@ -173,4 +173,3 @@ void AddSC_boss_renataki() { new boss_renataki(); } - diff --git a/src/server/scripts/EasternKingdoms/ZulGurub/boss_wushoolay.cpp b/src/server/scripts/EasternKingdoms/ZulGurub/boss_wushoolay.cpp index 44500347ca2..b937a5102f9 100644 --- a/src/server/scripts/EasternKingdoms/ZulGurub/boss_wushoolay.cpp +++ b/src/server/scripts/EasternKingdoms/ZulGurub/boss_wushoolay.cpp @@ -109,4 +109,3 @@ void AddSC_boss_wushoolay() { new boss_wushoolay(); } - diff --git a/src/server/scripts/Kalimdor/CavernsOfTime/BattleForMountHyjal/hyjalAI.h b/src/server/scripts/Kalimdor/CavernsOfTime/BattleForMountHyjal/hyjalAI.h index 9a0c1f78b92..cbba1013573 100644 --- a/src/server/scripts/Kalimdor/CavernsOfTime/BattleForMountHyjal/hyjalAI.h +++ b/src/server/scripts/Kalimdor/CavernsOfTime/BattleForMountHyjal/hyjalAI.h @@ -206,4 +206,3 @@ struct hyjalAI : public EscortAI uint32 SpellTimer[3]; }; #endif - diff --git a/src/server/scripts/Kalimdor/zone_orgrimmar.cpp b/src/server/scripts/Kalimdor/zone_orgrimmar.cpp index fc0d5120015..d893974a3c6 100644 --- a/src/server/scripts/Kalimdor/zone_orgrimmar.cpp +++ b/src/server/scripts/Kalimdor/zone_orgrimmar.cpp @@ -202,7 +202,7 @@ public: Initialize(); } - void EnterCombat(Unit* /*who*/) override + void EnterCombat(Unit* /*who*/) override { DoPlaySoundToSet(me, SOUND_AGGRO); } diff --git a/src/server/scripts/Northrend/ChamberOfAspects/ObsidianSanctum/obsidian_sanctum.cpp b/src/server/scripts/Northrend/ChamberOfAspects/ObsidianSanctum/obsidian_sanctum.cpp index 144d2ab2312..b42470c3bac 100644 --- a/src/server/scripts/Northrend/ChamberOfAspects/ObsidianSanctum/obsidian_sanctum.cpp +++ b/src/server/scripts/Northrend/ChamberOfAspects/ObsidianSanctum/obsidian_sanctum.cpp @@ -1067,4 +1067,3 @@ void AddSC_obsidian_sanctum() new achievement_twilight_duo(); new achievement_twilight_zone(); } - diff --git a/src/server/scripts/Northrend/ChamberOfAspects/RubySanctum/boss_baltharus_the_warborn.cpp b/src/server/scripts/Northrend/ChamberOfAspects/RubySanctum/boss_baltharus_the_warborn.cpp index 24f35115e04..2f9201ec473 100644 --- a/src/server/scripts/Northrend/ChamberOfAspects/RubySanctum/boss_baltharus_the_warborn.cpp +++ b/src/server/scripts/Northrend/ChamberOfAspects/RubySanctum/boss_baltharus_the_warborn.cpp @@ -334,16 +334,22 @@ class spell_baltharus_enervating_brand_trigger : public SpellScriptLoader { PrepareSpellScript(spell_baltharus_enervating_brand_trigger_SpellScript); - void CheckDistance() + bool Validate(SpellInfo const* /*spell*/) override { - Unit* caster = GetCaster(); - Unit* target = GetHitUnit(); - target->CastSpell(caster, SPELL_SIPHONED_MIGHT, true); + return ValidateSpellInfo({ SPELL_SIPHONED_MIGHT }); + } + + void HandleSiphonedMight() + { + if (SpellInfo const* spellInfo = GetTriggeringSpell()) + if (Aura* triggerAura = GetCaster()->GetAura(spellInfo->Id)) + if (Unit* caster = triggerAura->GetCaster()) + GetHitUnit()->CastSpell(caster, SPELL_SIPHONED_MIGHT, true); } void Register() override { - OnHit += SpellHitFn(spell_baltharus_enervating_brand_trigger_SpellScript::CheckDistance); + OnHit += SpellHitFn(spell_baltharus_enervating_brand_trigger_SpellScript::HandleSiphonedMight); } }; diff --git a/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/boss_anubarak_trial.cpp b/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/boss_anubarak_trial.cpp index 72ac4f0e499..4d3f243a4a7 100644 --- a/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/boss_anubarak_trial.cpp +++ b/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/boss_anubarak_trial.cpp @@ -168,7 +168,7 @@ class boss_anubarak_trial : public CreatureScript struct boss_anubarak_trialAI : public BossAI { - boss_anubarak_trialAI(Creature* creature) : BossAI(creature, BOSS_ANUBARAK) + boss_anubarak_trialAI(Creature* creature) : BossAI(creature, DATA_ANUBARAK) { Initialize(); } @@ -223,7 +223,7 @@ class boss_anubarak_trial : public CreatureScript void JustReachedHome() override { - instance->SetBossState(BOSS_ANUBARAK, FAIL); + instance->SetBossState(DATA_ANUBARAK, FAIL); //Summon Scarab Swarms neutral at random places for (int i = 0; i < 10; i++) if (Creature* scarab = me->SummonCreature(NPC_SCARAB, AnubarakLoc[1].GetPositionX()+urand(0, 50)-25, AnubarakLoc[1].GetPositionY()+urand(0, 50)-25, AnubarakLoc[1].GetPositionZ())) @@ -461,8 +461,8 @@ class npc_swarm_scarab : public CreatureScript DoCast(me, SPELL_ACID_MANDIBLE); me->SetInCombatWithZone(); if (me->IsInCombat()) - if (Creature* Anubarak = ObjectAccessor::GetCreature(*me, _instance->GetGuidData(NPC_ANUBARAK))) - Anubarak->AI()->JustSummoned(me); + if (Creature* anubarak = _instance->GetCreature(DATA_ANUBARAK)) + anubarak->AI()->JustSummoned(me); } void DoAction(int32 actionId) override @@ -485,7 +485,7 @@ class npc_swarm_scarab : public CreatureScript void UpdateAI(uint32 diff) override { - if (_instance->GetBossState(BOSS_ANUBARAK) != IN_PROGRESS) + if (_instance->GetBossState(DATA_ANUBARAK) != IN_PROGRESS) me->DisappearAndDie(); if (!UpdateVictim()) @@ -541,8 +541,8 @@ class npc_nerubian_burrower : public CreatureScript DoCast(me, SPELL_AWAKENED); me->SetInCombatWithZone(); if (me->IsInCombat()) - if (Creature* Anubarak = ObjectAccessor::GetCreature(*me, _instance->GetGuidData(NPC_ANUBARAK))) - Anubarak->AI()->JustSummoned(me); + if (Creature* anubarak = _instance->GetCreature(DATA_ANUBARAK)) + anubarak->AI()->JustSummoned(me); } void DoAction(int32 actionId) override @@ -561,7 +561,7 @@ class npc_nerubian_burrower : public CreatureScript void UpdateAI(uint32 diff) override { - if (_instance->GetBossState(BOSS_ANUBARAK) != IN_PROGRESS) + if (_instance->GetBossState(DATA_ANUBARAK) != IN_PROGRESS) me->DisappearAndDie(); if (!UpdateVictim() && !me->HasAura(SPELL_SUBMERGE_EFFECT)) diff --git a/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/boss_faction_champions.cpp b/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/boss_faction_champions.cpp index fc2d5c466cc..be5772f191f 100644 --- a/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/boss_faction_champions.cpp +++ b/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/boss_faction_champions.cpp @@ -375,12 +375,11 @@ class boss_toc_champion_controller : public CreatureScript public: boss_toc_champion_controller() : CreatureScript("boss_toc_champion_controller") { } - struct boss_toc_champion_controllerAI : public ScriptedAI + struct boss_toc_champion_controllerAI : public BossAI { - boss_toc_champion_controllerAI(Creature* creature) : ScriptedAI(creature), _summons(me) + boss_toc_champion_controllerAI(Creature* creature) : BossAI(creature, DATA_FACTION_CRUSADERS) { Initialize(); - _instance = creature->GetInstanceScript(); } void Initialize() @@ -396,6 +395,8 @@ class boss_toc_champion_controller : public CreatureScript Initialize(); } + void JustSummoned(Creature* /*summon*/) override { } + std::vector<uint32> SelectChampions(Team playerTeam) { std::vector<uint32> vHealersEntries; @@ -415,7 +416,7 @@ class boss_toc_champion_controller : public CreatureScript vOtherEntries.push_back(playerTeam == ALLIANCE ? NPC_HORDE_WARRIOR : NPC_ALLIANCE_WARRIOR); uint8 healersSubtracted = 2; - if (_instance->instance->GetSpawnMode() == RAID_DIFFICULTY_25MAN_NORMAL || _instance->instance->GetSpawnMode() == RAID_DIFFICULTY_25MAN_HEROIC) + if (instance->instance->GetSpawnMode() == RAID_DIFFICULTY_25MAN_NORMAL || instance->instance->GetSpawnMode() == RAID_DIFFICULTY_25MAN_HEROIC) healersSubtracted = 1; for (uint8 i = 0; i < healersSubtracted; ++i) { @@ -452,7 +453,7 @@ class boss_toc_champion_controller : public CreatureScript vHealersEntries.erase(vHealersEntries.begin() + pos); } - if (_instance->instance->GetSpawnMode() == RAID_DIFFICULTY_10MAN_NORMAL || _instance->instance->GetSpawnMode() == RAID_DIFFICULTY_10MAN_HEROIC) + if (instance->instance->GetSpawnMode() == RAID_DIFFICULTY_10MAN_NORMAL || instance->instance->GetSpawnMode() == RAID_DIFFICULTY_10MAN_HEROIC) for (uint8 i = 0; i < 4; ++i) vOtherEntries.erase(vOtherEntries.begin() + urand(0, vOtherEntries.size() - 1)); @@ -486,7 +487,7 @@ class boss_toc_champion_controller : public CreatureScript uint8 pos = urand(0, vChampionJumpTarget.size()-1); if (Creature* champion = me->SummonCreature(vChampionEntries[i], vChampionJumpOrigin[urand(0, vChampionJumpOrigin.size()-1)], TEMPSUMMON_MANUAL_DESPAWN)) { - _summons.Summon(champion); + summons.Summon(champion); champion->SetReactState(REACT_PASSIVE); champion->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); champion->SetImmuneToPC(false); @@ -515,7 +516,7 @@ class boss_toc_champion_controller : public CreatureScript SummonChampions((Team)uiData); break; case 1: - for (SummonList::iterator i = _summons.begin(); i != _summons.end(); ++i) + for (SummonList::iterator i = summons.begin(); i != summons.end(); ++i) { if (Creature* summon = ObjectAccessor::GetCreature(*me, *i)) { @@ -530,10 +531,10 @@ class boss_toc_champion_controller : public CreatureScript { case FAIL: _championsFailed++; - if (_championsFailed + _championsKilled >= _summons.size()) + if (_championsFailed + _championsKilled >= summons.size()) { - _instance->SetBossState(BOSS_CRUSADERS, FAIL); - _summons.DespawnAll(); + instance->SetBossState(DATA_FACTION_CRUSADERS, FAIL); + summons.DespawnAll(); me->DespawnOrUnsummon(); } break; @@ -544,21 +545,23 @@ class boss_toc_champion_controller : public CreatureScript _championsFailed = 0; _championsKilled = 0; _inProgress = true; - _summons.DoZoneInCombat(); - _instance->SetBossState(BOSS_CRUSADERS, IN_PROGRESS); + summons.DoZoneInCombat(); + instance->SetBossState(DATA_FACTION_CRUSADERS, IN_PROGRESS); } break; case DONE: + { _championsKilled++; if (_championsKilled == 1) - _instance->SetBossState(BOSS_CRUSADERS, SPECIAL); - else if (_championsKilled >= _summons.size()) + instance->SetBossState(DATA_FACTION_CRUSADERS, SPECIAL); + else if (_championsKilled >= summons.size()) { - _instance->SetBossState(BOSS_CRUSADERS, DONE); - _summons.DespawnAll(); + instance->SetBossState(DATA_FACTION_CRUSADERS, DONE); + summons.DespawnAll(); me->DespawnOrUnsummon(); } break; + } default: break; } @@ -568,8 +571,6 @@ class boss_toc_champion_controller : public CreatureScript } } private: - InstanceScript* _instance; - SummonList _summons; uint32 _championsNotStarted; uint32 _championsFailed; uint32 _championsKilled; @@ -584,9 +585,10 @@ class boss_toc_champion_controller : public CreatureScript struct boss_faction_championsAI : public BossAI { - boss_faction_championsAI(Creature* creature, uint32 aitype) : BossAI(creature, BOSS_CRUSADERS) + boss_faction_championsAI(Creature* creature, uint32 aitype) : BossAI(creature, DATA_FACTION_CHAMPIONS) { _aiType = aitype; + SetBoundary(instance->GetBossBoundary(DATA_FACTION_CRUSADERS)); } void Reset() override @@ -598,7 +600,7 @@ struct boss_faction_championsAI : public BossAI void JustReachedHome() override { - if (Creature* pChampionController = ObjectAccessor::GetCreature((*me), instance->GetGuidData(NPC_CHAMPIONS_CONTROLLER))) + if (Creature* pChampionController = instance->GetCreature(DATA_FACTION_CRUSADERS)) pChampionController->AI()->SetData(2, FAIL); me->DespawnOrUnsummon(); } @@ -637,15 +639,17 @@ struct boss_faction_championsAI : public BossAI void JustDied(Unit* /*killer*/) override { if (_aiType != AI_PET) - if (Creature* pChampionController = ObjectAccessor::GetCreature((*me), instance->GetGuidData(NPC_CHAMPIONS_CONTROLLER))) + if (Creature* pChampionController = instance->GetCreature(DATA_FACTION_CRUSADERS)) pChampionController->AI()->SetData(2, DONE); } void EnterCombat(Unit* /*who*/) override { DoCast(me, SPELL_ANTI_AOE, true); - _EnterCombat(); - if (Creature* pChampionController = ObjectAccessor::GetCreature((*me), instance->GetGuidData(NPC_CHAMPIONS_CONTROLLER))) + me->SetCombatPulseDelay(5); + me->setActive(true); + DoZoneInCombat(); + if (Creature* pChampionController = instance->GetCreature(DATA_FACTION_CRUSADERS)) pChampionController->AI()->SetData(2, IN_PROGRESS); } @@ -662,11 +666,11 @@ struct boss_faction_championsAI : public BossAI if (TeamInInstance == ALLIANCE) { - if (Creature* varian = ObjectAccessor::GetCreature(*me, instance->GetGuidData(NPC_VARIAN))) + if (Creature* varian = instance->GetCreature(DATA_VARIAN)) varian->AI()->Talk(SAY_KILL_PLAYER); } else - if (Creature* garrosh = ObjectAccessor::GetCreature(*me, instance->GetGuidData(NPC_GARROSH))) + if (Creature* garrosh = instance->GetCreature(DATA_GARROSH)) garrosh->AI()->Talk(SAY_KILL_PLAYER); } diff --git a/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/boss_lord_jaraxxus.cpp b/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/boss_lord_jaraxxus.cpp index 8f50ed0e365..8b32acdadb5 100644 --- a/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/boss_lord_jaraxxus.cpp +++ b/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/boss_lord_jaraxxus.cpp @@ -102,7 +102,7 @@ class boss_jaraxxus : public CreatureScript struct boss_jaraxxusAI : public BossAI { - boss_jaraxxusAI(Creature* creature) : BossAI(creature, BOSS_JARAXXUS) { } + boss_jaraxxusAI(Creature* creature) : BossAI(creature, DATA_JARAXXUS) { } void Reset() override { @@ -119,7 +119,7 @@ class boss_jaraxxus : public CreatureScript void JustReachedHome() override { _JustReachedHome(); - instance->SetBossState(BOSS_JARAXXUS, FAIL); + instance->SetBossState(DATA_JARAXXUS, FAIL); DoCast(me, SPELL_JARAXXUS_CHAINS); me->SetFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); me->SetImmuneToPC(true); @@ -238,7 +238,7 @@ class npc_legion_flame : public CreatureScript void UpdateAI(uint32 /*diff*/) override { UpdateVictim(); - if (_instance->GetBossState(BOSS_JARAXXUS) != IN_PROGRESS) + if (_instance->GetBossState(DATA_JARAXXUS) != IN_PROGRESS) me->DespawnOrUnsummon(); } private: @@ -330,7 +330,7 @@ class npc_fel_infernal : public CreatureScript void UpdateAI(uint32 diff) override { - if (_instance->GetBossState(BOSS_JARAXXUS) != IN_PROGRESS) + if (_instance->GetBossState(DATA_JARAXXUS) != IN_PROGRESS) { me->DespawnOrUnsummon(); return; @@ -438,7 +438,7 @@ class npc_mistress_of_pain : public CreatureScript void UpdateAI(uint32 diff) override { - if (_instance->GetBossState(BOSS_JARAXXUS) != IN_PROGRESS) + if (_instance->GetBossState(DATA_JARAXXUS) != IN_PROGRESS) { me->DespawnOrUnsummon(); return; diff --git a/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/boss_northrend_beasts.cpp b/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/boss_northrend_beasts.cpp index 17ce9458905..772edda63c6 100644 --- a/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/boss_northrend_beasts.cpp +++ b/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/boss_northrend_beasts.cpp @@ -179,7 +179,10 @@ class boss_gormok : public CreatureScript struct boss_gormokAI : public BossAI { - boss_gormokAI(Creature* creature) : BossAI(creature, BOSS_BEASTS) { } + boss_gormokAI(Creature* creature) : BossAI(creature, DATA_GORMOK_THE_IMPALER) + { + SetBoundary(instance->GetBossBoundary(DATA_NORTHREND_BEASTS)); + } void Reset() override { @@ -192,7 +195,7 @@ class boss_gormok : public CreatureScript void EnterEvadeMode(EvadeReason why) override { - instance->DoUseDoorOrButton(instance->GetGuidData(GO_MAIN_GATE_DOOR)); + instance->DoUseDoorOrButton(instance->GetGuidData(DATA_MAIN_GATE)); ScriptedAI::EnterEvadeMode(why); } @@ -204,7 +207,7 @@ class boss_gormok : public CreatureScript switch (pointId) { case 0: - instance->DoUseDoorOrButton(instance->GetGuidData(GO_MAIN_GATE_DOOR)); + instance->DoUseDoorOrButton(instance->GetGuidData(DATA_MAIN_GATE)); me->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE | UNIT_FLAG_NOT_SELECTABLE); me->SetImmuneToPC(false); me->SetReactState(REACT_AGGRESSIVE); @@ -222,7 +225,7 @@ class boss_gormok : public CreatureScript void JustReachedHome() override { - instance->DoUseDoorOrButton(instance->GetGuidData(GO_MAIN_GATE_DOOR)); + instance->DoUseDoorOrButton(instance->GetGuidData(DATA_MAIN_GATE)); instance->SetData(TYPE_NORTHREND_BEASTS, FAIL); me->DespawnOrUnsummon(); @@ -230,8 +233,11 @@ class boss_gormok : public CreatureScript void EnterCombat(Unit* /*who*/) override { - _EnterCombat(); + me->SetCombatPulseDelay(5); + me->setActive(true); + //DoZoneInCombat(); instance->SetData(TYPE_NORTHREND_BEASTS, GORMOK_IN_PROGRESS); + instance->SetBossState(DATA_NORTHREND_BEASTS, IN_PROGRESS); } void DamageTaken(Unit* /*who*/, uint32& damage) override @@ -387,7 +393,7 @@ class npc_snobold_vassal : public CreatureScript void MountOnBoss() { - Unit* gormok = ObjectAccessor::GetCreature(*me, _instance->GetGuidData(NPC_GORMOK)); + Unit* gormok = _instance->GetCreature(DATA_GORMOK_THE_IMPALER); if (gormok && gormok->IsAlive()) { me->AttackStop(); @@ -511,7 +517,7 @@ class npc_firebomb : public CreatureScript struct boss_jormungarAI : public BossAI { - boss_jormungarAI(Creature* creature) : BossAI(creature, BOSS_BEASTS) + boss_jormungarAI(Creature* creature, uint32 bossId) : BossAI(creature, bossId) { OtherWormEntry = 0; ModelStationary = 0; @@ -525,6 +531,7 @@ struct boss_jormungarAI : public BossAI Phase = PHASE_MOBILE; Enraged = false; WasMobile = false; + SetBoundary(instance->GetBossBoundary(DATA_NORTHREND_BEASTS)); } void Reset() override @@ -539,9 +546,14 @@ struct boss_jormungarAI : public BossAI events.ScheduleEvent(EVENT_SLIME_POOL, 15*IN_MILLISECONDS, 0, PHASE_MOBILE); } + uint32 GetOtherWormData(uint32 wormEntry) + { + return wormEntry == NPC_ACIDMAW ? DATA_ACIDMAW : DATA_DREADSCALE; + } + void JustDied(Unit* /*killer*/) override { - if (Creature* otherWorm = ObjectAccessor::GetCreature(*me, instance->GetGuidData(OtherWormEntry))) + if (Creature* otherWorm = instance->GetCreature(GetOtherWormData(OtherWormEntry))) { if (!otherWorm->IsAlive()) { @@ -714,7 +726,7 @@ class boss_acidmaw : public CreatureScript struct boss_acidmawAI : public boss_jormungarAI { - boss_acidmawAI(Creature* creature) : boss_jormungarAI(creature) { } + boss_acidmawAI(Creature* creature) : boss_jormungarAI(creature, DATA_ACIDMAW) { } void Reset() override { @@ -745,7 +757,7 @@ class boss_dreadscale : public CreatureScript struct boss_dreadscaleAI : public boss_jormungarAI { - boss_dreadscaleAI(Creature* creature) : boss_jormungarAI(creature) { } + boss_dreadscaleAI(Creature* creature) : boss_jormungarAI(creature, DATA_DREADSCALE) { } void Reset() override { @@ -772,7 +784,7 @@ class boss_dreadscale : public CreatureScript switch (pointId) { case 0: - instance->DoCloseDoorOrButton(instance->GetGuidData(GO_MAIN_GATE_DOOR)); + instance->DoCloseDoorOrButton(instance->GetGuidData(DATA_MAIN_GATE)); me->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE | UNIT_FLAG_NOT_SELECTABLE); me->SetImmuneToPC(false); me->SetReactState(REACT_AGGRESSIVE); @@ -785,13 +797,13 @@ class boss_dreadscale : public CreatureScript void EnterEvadeMode(EvadeReason why) override { - instance->DoUseDoorOrButton(instance->GetGuidData(GO_MAIN_GATE_DOOR)); + instance->DoUseDoorOrButton(instance->GetGuidData(DATA_MAIN_GATE)); boss_jormungarAI::EnterEvadeMode(why); } void JustReachedHome() override { - instance->DoUseDoorOrButton(instance->GetGuidData(GO_MAIN_GATE_DOOR)); + instance->DoUseDoorOrButton(instance->GetGuidData(DATA_MAIN_GATE)); boss_jormungarAI::JustReachedHome(); } @@ -887,9 +899,10 @@ class boss_icehowl : public CreatureScript struct boss_icehowlAI : public BossAI { - boss_icehowlAI(Creature* creature) : BossAI(creature, BOSS_BEASTS) + boss_icehowlAI(Creature* creature) : BossAI(creature, DATA_ICEHOWL) { Initialize(); + SetBoundary(instance->GetBossBoundary(DATA_NORTHREND_BEASTS)); } void Initialize() @@ -946,7 +959,7 @@ class boss_icehowl : public CreatureScript _movementFinish = true; break; case 2: - instance->DoUseDoorOrButton(instance->GetGuidData(GO_MAIN_GATE_DOOR)); + instance->DoUseDoorOrButton(instance->GetGuidData(DATA_MAIN_GATE)); me->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE | UNIT_FLAG_NOT_SELECTABLE); me->SetImmuneToPC(false); me->SetReactState(REACT_AGGRESSIVE); @@ -959,13 +972,13 @@ class boss_icehowl : public CreatureScript void EnterEvadeMode(EvadeReason why) override { - instance->DoUseDoorOrButton(instance->GetGuidData(GO_MAIN_GATE_DOOR)); + instance->DoUseDoorOrButton(instance->GetGuidData(DATA_MAIN_GATE)); ScriptedAI::EnterEvadeMode(why); } void JustReachedHome() override { - instance->DoUseDoorOrButton(instance->GetGuidData(GO_MAIN_GATE_DOOR)); + instance->DoUseDoorOrButton(instance->GetGuidData(DATA_MAIN_GATE)); instance->SetData(TYPE_NORTHREND_BEASTS, FAIL); me->DespawnOrUnsummon(); } diff --git a/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/boss_twin_valkyr.cpp b/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/boss_twin_valkyr.cpp index 49bd28f7e56..7050f20c1c9 100644 --- a/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/boss_twin_valkyr.cpp +++ b/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/boss_twin_valkyr.cpp @@ -158,9 +158,14 @@ class OrbsDespawner : public BasicEvent Creature* _creature; }; +static uint32 GetSisterData(uint32 sisterEntry) +{ + return sisterEntry == NPC_FJOLA_LIGHTBANE ? DATA_FJOLA_LIGHTBANE : DATA_EYDIS_DARKBANE; +} + struct boss_twin_baseAI : public BossAI { - boss_twin_baseAI(Creature* creature) : BossAI(creature, BOSS_VALKIRIES) + boss_twin_baseAI(Creature* creature, uint32 bossId) : BossAI(creature, bossId) { AuraState = AURA_STATE_NONE; Weapon = 0; @@ -173,6 +178,7 @@ struct boss_twin_baseAI : public BossAI TwinPactSpellId = 0; SpikeSpellId = 0; TouchSpellId = 0; + SetBoundary(instance->GetBossBoundary(DATA_TWIN_VALKIRIES)); } void Reset() override @@ -189,7 +195,7 @@ struct boss_twin_baseAI : public BossAI void JustReachedHome() override { - instance->SetBossState(BOSS_VALKIRIES, FAIL); + instance->SetBossState(DATA_TWIN_VALKIRIES, FAIL); summons.DespawnAll(); me->DespawnOrUnsummon(); @@ -248,12 +254,14 @@ struct boss_twin_baseAI : public BossAI { me->SetFlag(UNIT_DYNAMIC_FLAGS, UNIT_DYNFLAG_LOOTABLE); pSister->SetFlag(UNIT_DYNAMIC_FLAGS, UNIT_DYNFLAG_LOOTABLE); - _JustDied(); + events.Reset(); + summons.DespawnAll(); + instance->SetBossState(DATA_TWIN_VALKIRIES, DONE); } else { me->RemoveFlag(UNIT_DYNAMIC_FLAGS, UNIT_DYNFLAG_LOOTABLE); - instance->SetBossState(BOSS_VALKIRIES, SPECIAL); + instance->SetBossState(DATA_TWIN_VALKIRIES, SPECIAL); } } summons.DespawnAll(); @@ -262,7 +270,7 @@ struct boss_twin_baseAI : public BossAI // Called when sister pointer needed Creature* GetSister() { - return ObjectAccessor::GetCreature((*me), instance->GetGuidData(SisterNpcId)); + return instance->GetCreature(GetSisterData(SisterNpcId)); } void EnterCombat(Unit* /*who*/) override @@ -273,10 +281,12 @@ struct boss_twin_baseAI : public BossAI me->AddAura(MyEmphatySpellId, pSister); pSister->SetInCombatWithZone(); } - instance->SetBossState(BOSS_VALKIRIES, IN_PROGRESS); + instance->SetBossState(DATA_TWIN_VALKIRIES, IN_PROGRESS); Talk(SAY_AGGRO); DoCast(me, SurgeSpellId); + me->SetCombatPulseDelay(5); + me->setActive(true); events.ScheduleEvent(EVENT_TWIN_SPIKE, 20 * IN_MILLISECONDS); events.ScheduleEvent(EVENT_BERSERK, IsHeroic() ? 6 * MINUTE*IN_MILLISECONDS : 10 * MINUTE*IN_MILLISECONDS); @@ -354,7 +364,7 @@ class boss_fjola : public CreatureScript struct boss_fjolaAI : public boss_twin_baseAI { - boss_fjolaAI(Creature* creature) : boss_twin_baseAI(creature) + boss_fjolaAI(Creature* creature) : boss_twin_baseAI(creature, DATA_FJOLA_LIGHTBANE) { GenerateStageSequence(); } @@ -364,7 +374,7 @@ class boss_fjola : public CreatureScript SetEquipmentSlots(false, EQUIP_MAIN_1, EQUIP_UNEQUIP, EQUIP_NO_CHANGE); Weapon = EQUIP_MAIN_1; AuraState = AURA_STATE_UNKNOWN22; - SisterNpcId = NPC_DARKBANE; + SisterNpcId = NPC_EYDIS_DARKBANE; MyEmphatySpellId = SPELL_TWIN_EMPATHY_DARK; OtherEssenceSpellId = SPELL_DARK_ESSENCE_HELPER; SurgeSpellId = SPELL_LIGHT_SURGE; @@ -421,13 +431,13 @@ class boss_fjola : public CreatureScript void EnterEvadeMode(EvadeReason why) override { - instance->DoUseDoorOrButton(instance->GetGuidData(GO_MAIN_GATE_DOOR)); + instance->DoUseDoorOrButton(instance->GetGuidData(DATA_MAIN_GATE)); boss_twin_baseAI::EnterEvadeMode(why); } void JustReachedHome() override { - instance->DoUseDoorOrButton(instance->GetGuidData(GO_MAIN_GATE_DOOR)); + instance->DoUseDoorOrButton(instance->GetGuidData(DATA_MAIN_GATE)); boss_twin_baseAI::JustReachedHome(); } @@ -465,14 +475,14 @@ class boss_eydis : public CreatureScript struct boss_eydisAI : public boss_twin_baseAI { - boss_eydisAI(Creature* creature) : boss_twin_baseAI(creature) { } + boss_eydisAI(Creature* creature) : boss_twin_baseAI(creature, DATA_EYDIS_DARKBANE) { } void Reset() override { SetEquipmentSlots(false, EQUIP_MAIN_2, EQUIP_UNEQUIP, EQUIP_NO_CHANGE); Weapon = EQUIP_MAIN_2; AuraState = AURA_STATE_UNKNOWN19; - SisterNpcId = NPC_LIGHTBANE; + SisterNpcId = NPC_FJOLA_LIGHTBANE; MyEmphatySpellId = SPELL_TWIN_EMPATHY_LIGHT; OtherEssenceSpellId = SPELL_LIGHT_ESSENCE_HELPER; SurgeSpellId = SPELL_DARK_SURGE; @@ -860,8 +870,8 @@ class spell_power_of_the_twins : public SpellScriptLoader { if (InstanceScript* instance = GetCaster()->GetInstanceScript()) { - if (Creature* Valk = ObjectAccessor::GetCreature(*GetCaster(), instance->GetGuidData(GetCaster()->GetEntry()))) - ENSURE_AI(boss_twin_baseAI, Valk->AI())->EnableDualWield(true); + if (Creature* valk = instance->GetCreature(GetSisterData(GetCaster()->GetEntry()))) + ENSURE_AI(boss_twin_baseAI, valk->AI())->EnableDualWield(true); } } @@ -869,8 +879,8 @@ class spell_power_of_the_twins : public SpellScriptLoader { if (InstanceScript* instance = GetCaster()->GetInstanceScript()) { - if (Creature* Valk = ObjectAccessor::GetCreature(*GetCaster(), instance->GetGuidData(GetCaster()->GetEntry()))) - ENSURE_AI(boss_twin_baseAI, Valk->AI())->EnableDualWield(false); + if (Creature* valk = instance->GetCreature(GetSisterData(GetCaster()->GetEntry()))) + ENSURE_AI(boss_twin_baseAI, valk->AI())->EnableDualWield(false); } } diff --git a/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/instance_trial_of_the_crusader.cpp b/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/instance_trial_of_the_crusader.cpp index 506f076be33..28891678aef 100644 --- a/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/instance_trial_of_the_crusader.cpp +++ b/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/instance_trial_of_the_crusader.cpp @@ -29,11 +29,53 @@ BossBoundaryData const boundaries = { - { BOSS_BEASTS, new CircleBoundary(Position(563.26f, 139.6f), 75.0) }, - { BOSS_JARAXXUS, new CircleBoundary(Position(563.26f, 139.6f), 75.0) }, - { BOSS_CRUSADERS, new CircleBoundary(Position(563.26f, 139.6f), 75.0) }, - { BOSS_VALKIRIES, new CircleBoundary(Position(563.26f, 139.6f), 75.0) }, - { BOSS_ANUBARAK, new EllipseBoundary(Position(746.0f, 135.0f), 100.0, 75.0) } + { DATA_NORTHREND_BEASTS, new CircleBoundary(Position(563.26f, 139.6f), 75.0) }, + { DATA_JARAXXUS, new CircleBoundary(Position(563.26f, 139.6f), 75.0) }, + { DATA_FACTION_CRUSADERS, new CircleBoundary(Position(563.26f, 139.6f), 75.0) }, + { DATA_TWIN_VALKIRIES, new CircleBoundary(Position(563.26f, 139.6f), 75.0) }, + { DATA_ANUBARAK, new EllipseBoundary(Position(746.0f, 135.0f), 100.0, 75.0) } +}; + +ObjectData const creatureData[] = +{ + { NPC_GORMOK, DATA_GORMOK_THE_IMPALER }, + { NPC_ACIDMAW, DATA_ACIDMAW }, + { NPC_DREADSCALE, DATA_DREADSCALE }, + { NPC_ICEHOWL, DATA_ICEHOWL }, + { NPC_JARAXXUS, DATA_JARAXXUS }, + { NPC_CHAMPIONS_CONTROLLER, DATA_FACTION_CRUSADERS }, + { NPC_FJOLA_LIGHTBANE, DATA_FJOLA_LIGHTBANE }, + { NPC_EYDIS_DARKBANE, DATA_EYDIS_DARKBANE }, + { NPC_LICH_KING, DATA_LICH_KING }, + { NPC_ANUBARAK, DATA_ANUBARAK }, + { NPC_BARRET_RAMSEY, DATA_BARRET_RAMSEY }, + { NPC_TIRION_FORDRING, DATA_FORDRING }, + { NPC_TIRION_FORDRING_ANUBARAK, DATA_FORDRING_ANUBARAK }, + { NPC_VARIAN, DATA_VARIAN }, + { NPC_GARROSH, DATA_GARROSH }, + { NPC_FIZZLEBANG, DATA_FIZZLEBANG }, + { 0, 0 } // END +}; + +ObjectData const gameObjectData[] = +{ + { GO_CRUSADERS_CACHE_10, DATA_CRUSADERS_CHEST }, + { GO_CRUSADERS_CACHE_25, DATA_CRUSADERS_CHEST }, + { GO_CRUSADERS_CACHE_10_H, DATA_CRUSADERS_CHEST }, + { GO_CRUSADERS_CACHE_25_H, DATA_CRUSADERS_CHEST }, + { GO_ARGENT_COLISEUM_FLOOR, DATA_COLISEUM_FLOOR }, + { GO_MAIN_GATE_DOOR, DATA_MAIN_GATE }, + { GO_EAST_PORTCULLIS, DATA_EAST_PORTCULLIS }, + { GO_WEB_DOOR, DATA_WEB_DOOR }, + { GO_TRIBUTE_CHEST_10H_25, DATA_TRIBUTE_CHEST }, + { GO_TRIBUTE_CHEST_10H_45, DATA_TRIBUTE_CHEST }, + { GO_TRIBUTE_CHEST_10H_50, DATA_TRIBUTE_CHEST }, + { GO_TRIBUTE_CHEST_10H_99, DATA_TRIBUTE_CHEST }, + { GO_TRIBUTE_CHEST_25H_25, DATA_TRIBUTE_CHEST }, + { GO_TRIBUTE_CHEST_25H_45, DATA_TRIBUTE_CHEST }, + { GO_TRIBUTE_CHEST_25H_50, DATA_TRIBUTE_CHEST }, + { GO_TRIBUTE_CHEST_25H_99, DATA_TRIBUTE_CHEST }, + { 0, 0 } // END }; class instance_trial_of_the_crusader : public InstanceMapScript @@ -46,8 +88,9 @@ class instance_trial_of_the_crusader : public InstanceMapScript instance_trial_of_the_crusader_InstanceMapScript(Map* map) : InstanceScript(map) { SetHeaders(DataHeader); - SetBossNumber(MAX_ENCOUNTERS); + SetBossNumber(EncounterCount); LoadBossBoundaries(boundaries); + LoadObjectData(creatureData, gameObjectData); TrialCounter = 50; EventStage = 0; NorthrendBeasts = NOT_STARTED; @@ -62,12 +105,12 @@ class instance_trial_of_the_crusader : public InstanceMapScript bool IsEncounterInProgress() const override { - for (uint8 i = 0; i < MAX_ENCOUNTERS; ++i) + for (uint8 i = 0; i < EncounterCount; ++i) if (GetBossState(i) == IN_PROGRESS) return true; // Special state is set at Faction Champions after first champ dead, encounter is still in combat - if (GetBossState(BOSS_CRUSADERS) == SPECIAL) + if (GetBossState(DATA_FACTION_CRUSADERS) == SPECIAL) return true; return false; @@ -84,8 +127,8 @@ class instance_trial_of_the_crusader : public InstanceMapScript player->SendUpdateWorldState(UPDATE_STATE_UI_SHOW, 0); // make sure Anub'arak isnt missing - if (GetBossState(BOSS_LICH_KING) == DONE && TrialCounter && GetBossState(BOSS_ANUBARAK) != DONE) - if (!ObjectAccessor::GetCreature(*player, GetGuidData(NPC_ANUBARAK))) + if (GetBossState(DATA_LICH_KING) == DONE && TrialCounter && GetBossState(DATA_ANUBARAK) != DONE) + if (!GetCreature(DATA_ANUBARAK)) player->SummonCreature(NPC_ANUBARAK, AnubarakLoc[0], TEMPSUMMON_CORPSE_TIMED_DESPAWN, DESPAWN_TIME); } @@ -109,100 +152,18 @@ class instance_trial_of_the_crusader : public InstanceMapScript void OnCreatureCreate(Creature* creature) override { - switch (creature->GetEntry()) - { - case NPC_BARRENT: - BarrentGUID = creature->GetGUID(); - if (!TrialCounter) - creature->DespawnOrUnsummon(); - break; - case NPC_TIRION: - TirionGUID = creature->GetGUID(); - break; - case NPC_TIRION_FORDRING: - TirionFordringGUID = creature->GetGUID(); - break; - case NPC_FIZZLEBANG: - FizzlebangGUID = creature->GetGUID(); - break; - case NPC_GARROSH: - GarroshGUID = creature->GetGUID(); - break; - case NPC_VARIAN: - VarianGUID = creature->GetGUID(); - break; - - case NPC_GORMOK: - GormokGUID = creature->GetGUID(); - break; - case NPC_ACIDMAW: - AcidmawGUID = creature->GetGUID(); - break; - case NPC_DREADSCALE: - DreadscaleGUID = creature->GetGUID(); - break; - case NPC_ICEHOWL: - IcehowlGUID = creature->GetGUID(); - break; - case NPC_JARAXXUS: - JaraxxusGUID = creature->GetGUID(); - break; - case NPC_CHAMPIONS_CONTROLLER: - ChampionsControllerGUID = creature->GetGUID(); - break; - case NPC_DARKBANE: - DarkbaneGUID = creature->GetGUID(); - break; - case NPC_LIGHTBANE: - LightbaneGUID = creature->GetGUID(); - break; - case NPC_ANUBARAK: - AnubarakGUID = creature->GetGUID(); - creature->SetRespawnDelay(7 * DAY); - break; - default: - break; - } + InstanceScript::OnCreatureCreate(creature); + if (creature->GetEntry() == NPC_BARRET_RAMSEY) + if (!TrialCounter) + creature->DespawnOrUnsummon(); } void OnGameObjectCreate(GameObject* go) override { - switch (go->GetEntry()) - { - case GO_CRUSADERS_CACHE_10: - case GO_CRUSADERS_CACHE_25: - case GO_CRUSADERS_CACHE_10_H: - case GO_CRUSADERS_CACHE_25_H: - CrusadersCacheGUID = go->GetGUID(); - break; - case GO_ARGENT_COLISEUM_FLOOR: - FloorGUID = go->GetGUID(); - if (GetBossState(BOSS_LICH_KING) == DONE) - go->SetDestructibleState(GO_DESTRUCTIBLE_DAMAGED); - break; - case GO_MAIN_GATE_DOOR: - MainGateDoorGUID = go->GetGUID(); - break; - case GO_EAST_PORTCULLIS: - EastPortcullisGUID = go->GetGUID(); - break; - case GO_WEB_DOOR: - WebDoorGUID = go->GetGUID(); - break; - - case GO_TRIBUTE_CHEST_10H_25: - case GO_TRIBUTE_CHEST_10H_45: - case GO_TRIBUTE_CHEST_10H_50: - case GO_TRIBUTE_CHEST_10H_99: - case GO_TRIBUTE_CHEST_25H_25: - case GO_TRIBUTE_CHEST_25H_45: - case GO_TRIBUTE_CHEST_25H_50: - case GO_TRIBUTE_CHEST_25H_99: - TributeChestGUID = go->GetGUID(); - break; - default: - break; - } + InstanceScript::OnGameObjectCreate(go); + if (go->GetEntry() == GO_ARGENT_COLISEUM_FLOOR) + if (GetBossState(DATA_LICH_KING) == DONE) + go->SetDestructibleState(GO_DESTRUCTIBLE_DAMAGED); } void OnUnitDeath(Unit* unit) override @@ -219,20 +180,20 @@ class instance_trial_of_the_crusader : public InstanceMapScript switch (type) { - case BOSS_BEASTS: + case DATA_NORTHREND_BEASTS: break; - case BOSS_JARAXXUS: + case DATA_JARAXXUS: // Cleanup Icehowl - if (Creature* icehowl = instance->GetCreature(IcehowlGUID)) + if (Creature* icehowl = GetCreature(DATA_ICEHOWL)) icehowl->DespawnOrUnsummon(); if (state == DONE) EventStage = 2000; break; - case BOSS_CRUSADERS: + case DATA_FACTION_CRUSADERS: // Cleanup Jaraxxus - if (Creature* jaraxxus = instance->GetCreature(JaraxxusGUID)) + if (Creature* jaraxxus = GetCreature(DATA_JARAXXUS)) jaraxxus->DespawnOrUnsummon(); - if (Creature* fizzlebang = instance->GetCreature(FizzlebangGUID)) + if (Creature* fizzlebang = GetCreature(DATA_FIZZLEBANG)) fizzlebang->DespawnOrUnsummon(); switch (state) { @@ -247,8 +208,8 @@ class instance_trial_of_the_crusader : public InstanceMapScript DoUpdateAchievementCriteria(ACHIEVEMENT_CRITERIA_TYPE_BE_SPELL_TARGET, SPELL_DEFEAT_FACTION_CHAMPIONS); if (ResilienceWillFixItTimer > 0) DoUpdateAchievementCriteria(ACHIEVEMENT_CRITERIA_TYPE_BE_SPELL_TARGET, SPELL_CHAMPIONS_KILLED_IN_MINUTE); - DoRespawnGameObject(CrusadersCacheGUID, 7*DAY); - if (GameObject* cache = instance->GetGameObject(CrusadersCacheGUID)) + DoRespawnGameObject(GetGuidData(DATA_CRUSADERS_CHEST), 7*DAY); + if (GameObject* cache = GetGameObject(DATA_CRUSADERS_CHEST)) cache->RemoveFlag(GAMEOBJECT_FLAGS, GO_FLAG_NOT_SELECTABLE); EventStage = 3100; break; @@ -256,27 +217,27 @@ class instance_trial_of_the_crusader : public InstanceMapScript break; } break; - case BOSS_VALKIRIES: + case DATA_TWIN_VALKIRIES: // Cleanup chest - if (GameObject* cache = instance->GetGameObject(CrusadersCacheGUID)) + if (GameObject* cache = GetGameObject(DATA_CRUSADERS_CHEST)) cache->Delete(); switch (state) { case FAIL: - if (GetBossState(BOSS_VALKIRIES) == NOT_STARTED) + if (GetBossState(DATA_TWIN_VALKIRIES) == NOT_STARTED) state = NOT_STARTED; break; case SPECIAL: - if (GetBossState(BOSS_VALKIRIES) == SPECIAL) + if (GetBossState(DATA_TWIN_VALKIRIES) == SPECIAL) state = DONE; break; default: break; } break; - case BOSS_LICH_KING: + case DATA_LICH_KING: break; - case BOSS_ANUBARAK: + case DATA_ANUBARAK: switch (state) { case DONE: @@ -319,7 +280,7 @@ class instance_trial_of_the_crusader : public InstanceMapScript } if (tributeChest) - if (Creature* tirion = instance->GetCreature(TirionGUID)) + if (Creature* tirion = GetCreature(DATA_FORDRING)) if (GameObject* chest = tirion->SummonGameObject(tributeChest, 805.62f, 134.87f, 142.16f, 3.27f, QuaternionData(), WEEK)) chest->SetRespawnTime(chest->GetRespawnDelay()); break; @@ -334,16 +295,16 @@ class instance_trial_of_the_crusader : public InstanceMapScript if (IsEncounterInProgress()) { - CloseDoor(GetGuidData(GO_EAST_PORTCULLIS)); - CloseDoor(GetGuidData(GO_WEB_DOOR)); + CloseDoor(GetGuidData(DATA_EAST_PORTCULLIS)); + CloseDoor(GetGuidData(DATA_WEB_DOOR)); } else { - OpenDoor(GetGuidData(GO_EAST_PORTCULLIS)); - OpenDoor(GetGuidData(GO_WEB_DOOR)); + OpenDoor(GetGuidData(DATA_EAST_PORTCULLIS)); + OpenDoor(GetGuidData(DATA_WEB_DOOR)); } - if (type < MAX_ENCOUNTERS) + if (type < EncounterCount) { TC_LOG_DEBUG("scripts", "[ToCr] BossState(type %u) %u = state %u;", type, GetBossState(type), state); if (state == FAIL) @@ -360,21 +321,21 @@ class instance_trial_of_the_crusader : public InstanceMapScript // if theres no more attemps allowed if (!TrialCounter) { - if (Unit* announcer = instance->GetCreature(GetGuidData(NPC_BARRENT))) + if (Unit* announcer = GetCreature(DATA_BARRET_RAMSEY)) announcer->ToCreature()->DespawnOrUnsummon(); - if (Creature* anubArak = instance->GetCreature(GetGuidData(NPC_ANUBARAK))) - anubArak->DespawnOrUnsummon(); + if (Creature* anubarak = GetCreature(DATA_ANUBARAK)) + anubarak->DespawnOrUnsummon(); } } NeedSave = true; - EventStage = (type == BOSS_BEASTS ? 666 : 0); + EventStage = (type == DATA_NORTHREND_BEASTS ? 666 : 0); state = NOT_STARTED; } if (state == DONE || NeedSave) { - if (Unit* announcer = instance->GetCreature(GetGuidData(NPC_BARRENT))) + if (Unit* announcer = GetCreature(DATA_BARRET_RAMSEY)) announcer->SetFlag(UNIT_NPC_FLAGS, UNIT_NPC_FLAG_GOSSIP); Save(); } @@ -421,10 +382,10 @@ class instance_trial_of_the_crusader : public InstanceMapScript case ICEHOWL_DONE: EventStage = 400; SetData(TYPE_NORTHREND_BEASTS, DONE); - SetBossState(BOSS_BEASTS, DONE); + SetBossState(DATA_NORTHREND_BEASTS, DONE); break; case FAIL: - SetBossState(BOSS_BEASTS, FAIL); + SetBossState(DATA_NORTHREND_BEASTS, FAIL); break; default: break; @@ -448,57 +409,6 @@ class instance_trial_of_the_crusader : public InstanceMapScript } } - ObjectGuid GetGuidData(uint32 type) const override - { - switch (type) - { - case NPC_BARRENT: - return BarrentGUID; - case NPC_TIRION: - return TirionGUID; - case NPC_TIRION_FORDRING: - return TirionFordringGUID; - case NPC_FIZZLEBANG: - return FizzlebangGUID; - case NPC_GARROSH: - return GarroshGUID; - case NPC_VARIAN: - return VarianGUID; - - case NPC_GORMOK: - return GormokGUID; - case NPC_ACIDMAW: - return AcidmawGUID; - case NPC_DREADSCALE: - return DreadscaleGUID; - case NPC_ICEHOWL: - return IcehowlGUID; - case NPC_JARAXXUS: - return JaraxxusGUID; - case NPC_CHAMPIONS_CONTROLLER: - return ChampionsControllerGUID; - case NPC_DARKBANE: - return DarkbaneGUID; - case NPC_LIGHTBANE: - return LightbaneGUID; - case NPC_ANUBARAK: - return AnubarakGUID; - - case GO_ARGENT_COLISEUM_FLOOR: - return FloorGUID; - case GO_MAIN_GATE_DOOR: - return MainGateDoorGUID; - case GO_EAST_PORTCULLIS: - return EastPortcullisGUID; - case GO_WEB_DOOR: - return WebDoorGUID; - default: - break; - } - - return ObjectGuid::Empty; - } - uint32 GetData(uint32 type) const override { switch (type) @@ -553,7 +463,7 @@ class instance_trial_of_the_crusader : public InstanceMapScript case 6000: case 6005: case 6010: - return NPC_TIRION; + return NPC_TIRION_FORDRING; break; case 5010: case 5030: @@ -595,7 +505,7 @@ class instance_trial_of_the_crusader : public InstanceMapScript return NPC_FIZZLEBANG; break; default: - return NPC_TIRION; + return NPC_TIRION_FORDRING; break; }; default: @@ -615,7 +525,7 @@ class instance_trial_of_the_crusader : public InstanceMapScript NotOneButTwoJormungarsTimer -= diff; } - if (GetBossState(BOSS_CRUSADERS) == SPECIAL && ResilienceWillFixItTimer) + if (GetBossState(DATA_FACTION_CRUSADERS) == SPECIAL && ResilienceWillFixItTimer) { if (ResilienceWillFixItTimer <= diff) ResilienceWillFixItTimer = 0; @@ -630,7 +540,7 @@ class instance_trial_of_the_crusader : public InstanceMapScript std::ostringstream saveStream; - for (uint8 i = 0; i < MAX_ENCOUNTERS; ++i) + for (uint8 i = 0; i < EncounterCount; ++i) saveStream << GetBossState(i) << ' '; saveStream << TrialCounter; @@ -658,7 +568,7 @@ class instance_trial_of_the_crusader : public InstanceMapScript std::istringstream loadStream(strIn); - for (uint8 i = 0; i < MAX_ENCOUNTERS; ++i) + for (uint8 i = 0; i < EncounterCount; ++i) { uint32 tmpState; loadStream >> tmpState; @@ -718,30 +628,6 @@ class instance_trial_of_the_crusader : public InstanceMapScript bool NeedSave; std::string SaveDataBuffer; - ObjectGuid BarrentGUID; - ObjectGuid TirionGUID; - ObjectGuid TirionFordringGUID; - ObjectGuid FizzlebangGUID; - ObjectGuid GarroshGUID; - ObjectGuid VarianGUID; - - ObjectGuid GormokGUID; - ObjectGuid AcidmawGUID; - ObjectGuid DreadscaleGUID; - ObjectGuid IcehowlGUID; - ObjectGuid JaraxxusGUID; - ObjectGuid ChampionsControllerGUID; - ObjectGuid DarkbaneGUID; - ObjectGuid LightbaneGUID; - ObjectGuid AnubarakGUID; - - ObjectGuid CrusadersCacheGUID; - ObjectGuid FloorGUID; - ObjectGuid TributeChestGUID; - ObjectGuid MainGateDoorGUID; - ObjectGuid EastPortcullisGUID; - ObjectGuid WebDoorGUID; - // Achievement stuff uint32 NotOneButTwoJormungarsTimer; uint32 ResilienceWillFixItTimer; diff --git a/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/trial_of_the_crusader.cpp b/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/trial_of_the_crusader.cpp index fdc93f56c39..de24ab4a352 100644 --- a/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/trial_of_the_crusader.cpp +++ b/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/trial_of_the_crusader.cpp @@ -164,12 +164,12 @@ struct _Messages static _Messages _GossipMessage[]= { - {MSG_BEASTS, GOSSIP_ACTION_INFO_DEF + 1, false, BOSS_BEASTS}, - {MSG_JARAXXUS, GOSSIP_ACTION_INFO_DEF + 2, false, BOSS_JARAXXUS}, - {MSG_CRUSADERS, GOSSIP_ACTION_INFO_DEF + 3, false, BOSS_CRUSADERS}, - {MSG_VALKIRIES, GOSSIP_ACTION_INFO_DEF + 4, false, BOSS_VALKIRIES}, - {MSG_LICH_KING, GOSSIP_ACTION_INFO_DEF + 5, false, BOSS_ANUBARAK}, - {MSG_ANUBARAK, GOSSIP_ACTION_INFO_DEF + 6, true, BOSS_ANUBARAK} + {MSG_BEASTS, GOSSIP_ACTION_INFO_DEF + 1, false, DATA_NORTHREND_BEASTS}, + {MSG_JARAXXUS, GOSSIP_ACTION_INFO_DEF + 2, false, DATA_JARAXXUS}, + {MSG_CRUSADERS, GOSSIP_ACTION_INFO_DEF + 3, false, DATA_FACTION_CRUSADERS}, + {MSG_VALKIRIES, GOSSIP_ACTION_INFO_DEF + 4, false, DATA_TWIN_VALKIRIES}, + {MSG_LICH_KING, GOSSIP_ACTION_INFO_DEF + 5, false, DATA_ANUBARAK}, + {MSG_ANUBARAK, GOSSIP_ACTION_INFO_DEF + 6, true, DATA_ANUBARAK} }; enum Messages @@ -229,16 +229,16 @@ class npc_announcer_toc10 : public CreatureScript ClearGossipMenuFor(player); CloseGossipMenuFor(player); - if (instance->GetBossState(BOSS_BEASTS) != DONE) + if (instance->GetBossState(DATA_NORTHREND_BEASTS) != DONE) { instance->SetData(TYPE_EVENT, 110); instance->SetData(TYPE_NORTHREND_BEASTS, NOT_STARTED); - instance->SetBossState(BOSS_BEASTS, NOT_STARTED); + instance->SetBossState(DATA_NORTHREND_BEASTS, NOT_STARTED); } - else if (instance->GetBossState(BOSS_JARAXXUS) != DONE) + else if (instance->GetBossState(DATA_JARAXXUS) != DONE) { // if Jaraxxus is spawned, but the raid wiped - if (Creature* jaraxxus = ObjectAccessor::GetCreature(*player, instance->GetGuidData(NPC_JARAXXUS))) + if (Creature* jaraxxus = instance->GetCreature(DATA_JARAXXUS)) { jaraxxus->RemoveAurasDueToSpell(SPELL_JARAXXUS_CHAINS); jaraxxus->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); @@ -249,29 +249,29 @@ class npc_announcer_toc10 : public CreatureScript else { instance->SetData(TYPE_EVENT, 1010); - instance->SetBossState(BOSS_JARAXXUS, NOT_STARTED); + instance->SetBossState(DATA_JARAXXUS, NOT_STARTED); } } - else if (instance->GetBossState(BOSS_CRUSADERS) != DONE) + else if (instance->GetBossState(DATA_FACTION_CRUSADERS) != DONE) { if (player->GetTeam() == ALLIANCE) instance->SetData(TYPE_EVENT, 3000); else instance->SetData(TYPE_EVENT, 3001); - instance->SetBossState(BOSS_CRUSADERS, NOT_STARTED); + instance->SetBossState(DATA_FACTION_CRUSADERS, NOT_STARTED); } - else if (instance->GetBossState(BOSS_VALKIRIES) != DONE) + else if (instance->GetBossState(DATA_TWIN_VALKIRIES) != DONE) { instance->SetData(TYPE_EVENT, 4000); - instance->SetBossState(BOSS_VALKIRIES, NOT_STARTED); + instance->SetBossState(DATA_TWIN_VALKIRIES, NOT_STARTED); } - else if (instance->GetBossState(BOSS_LICH_KING) != DONE) + else if (instance->GetBossState(DATA_LICH_KING) != DONE) { if (me->GetMap()->GetPlayers().getFirst()->GetSource()->GetTeam() == ALLIANCE) instance->SetData(TYPE_EVENT, 4020); else instance->SetData(TYPE_EVENT, 4030); - instance->SetBossState(BOSS_LICH_KING, NOT_STARTED); + instance->SetBossState(DATA_LICH_KING, NOT_STARTED); } me->RemoveFlag(UNIT_NPC_FLAGS, UNIT_NPC_FLAG_GOSSIP); return true; @@ -305,7 +305,7 @@ class boss_lich_king_toc : public CreatureScript summoned->SetDisplayId(summoned->GetCreatureTemplate()->Modelid2); } - _instance->SetBossState(BOSS_LICH_KING, IN_PROGRESS); + _instance->SetBossState(DATA_LICH_KING, IN_PROGRESS); me->SetWalk(true); } @@ -376,15 +376,15 @@ class boss_lich_king_toc : public CreatureScript break; case 5080: { - if (GameObject* go = ObjectAccessor::GetGameObject(*me, _instance->GetGuidData(GO_ARGENT_COLISEUM_FLOOR))) + if (GameObject* go = _instance->GetGameObject(DATA_COLISEUM_FLOOR)) go->SetDestructibleState(GO_DESTRUCTIBLE_DAMAGED); me->CastSpell(me, SPELL_CORPSE_TELEPORT, false); me->CastSpell(me, SPELL_DESTROY_FLOOR_KNOCKUP, false); - _instance->SetBossState(BOSS_LICH_KING, DONE); + _instance->SetBossState(DATA_LICH_KING, DONE); - if (!ObjectAccessor::GetCreature(*me, _instance->GetGuidData(NPC_ANUBARAK))) + if (!_instance->GetCreature(DATA_ANUBARAK)) me->SummonCreature(NPC_ANUBARAK, AnubarakLoc[0], TEMPSUMMON_CORPSE_TIMED_DESPAWN, DESPAWN_TIME); _instance->SetData(TYPE_EVENT, 0); @@ -429,7 +429,7 @@ class npc_fizzlebang_toc : public CreatureScript { Talk(SAY_STAGE_1_06, killer); _instance->SetData(TYPE_EVENT, 1180); - if (Creature* jaraxxus = ObjectAccessor::GetCreature(*me, _instance->GetGuidData(NPC_JARAXXUS))) + if (Creature* jaraxxus = _instance->GetCreature(DATA_JARAXXUS)) { jaraxxus->RemoveFlag(UNIT_FIELD_FLAGS, UNIT_FLAG_NON_ATTACKABLE); jaraxxus->SetImmuneToPC(false); @@ -454,7 +454,7 @@ class npc_fizzlebang_toc : public CreatureScript { case 1: me->SetWalk(false); - _instance->DoUseDoorOrButton(_instance->GetGuidData(GO_MAIN_GATE_DOOR)); + _instance->DoUseDoorOrButton(_instance->GetGuidData(DATA_MAIN_GATE)); _instance->SetData(TYPE_EVENT, 1120); _instance->SetData(TYPE_EVENT_TIMER, 1*IN_MILLISECONDS); break; @@ -538,7 +538,7 @@ class npc_fizzlebang_toc : public CreatureScript _updateTimer = 5*IN_MILLISECONDS; break; case 1142: - if (Creature* jaraxxus = ObjectAccessor::GetCreature(*me, _instance->GetGuidData(NPC_JARAXXUS))) + if (Creature* jaraxxus = _instance->GetCreature(DATA_JARAXXUS)) jaraxxus->SetTarget(me->GetGUID()); if (Creature* pTrigger = ObjectAccessor::GetCreature(*me, _triggerGUID)) pTrigger->DespawnOrUnsummon(); @@ -548,13 +548,13 @@ class npc_fizzlebang_toc : public CreatureScript _updateTimer = 10*IN_MILLISECONDS; break; case 1144: - if (Creature* jaraxxus = ObjectAccessor::GetCreature(*me, _instance->GetGuidData(NPC_JARAXXUS))) + if (Creature* jaraxxus = _instance->GetCreature(DATA_JARAXXUS)) jaraxxus->AI()->Talk(SAY_STAGE_1_05); _instance->SetData(TYPE_EVENT, 1150); _updateTimer = 5*IN_MILLISECONDS; break; case 1150: - if (Creature* jaraxxus = ObjectAccessor::GetCreature(*me, _instance->GetGuidData(NPC_JARAXXUS))) + if (Creature* jaraxxus = _instance->GetCreature(DATA_JARAXXUS)) { //1-shot Fizzlebang jaraxxus->CastSpell(me, 67888, false); // 67888 - Fel Lightning @@ -605,7 +605,7 @@ class npc_tirion_toc : public CreatureScript if (!_instance) return; - if (_instance->GetData(TYPE_EVENT_NPC) != NPC_TIRION) + if (_instance->GetData(TYPE_EVENT_NPC) != NPC_TIRION_FORDRING) return; uint32 _updateTimer = _instance->GetData(TYPE_EVENT_TIMER); @@ -627,9 +627,9 @@ class npc_tirion_toc : public CreatureScript break; case 150: me->SetUInt32Value(UNIT_NPC_EMOTESTATE, EMOTE_STATE_NONE); - if (_instance->GetBossState(BOSS_BEASTS) != DONE) + if (_instance->GetBossState(DATA_NORTHREND_BEASTS) != DONE) { - _instance->DoUseDoorOrButton(_instance->GetGuidData(GO_MAIN_GATE_DOOR)); + _instance->DoUseDoorOrButton(_instance->GetGuidData(DATA_MAIN_GATE)); if (Creature* gormok = me->SummonCreature(NPC_GORMOK, ToCSpawnLoc[0].GetPositionX(), ToCSpawnLoc[0].GetPositionY(), ToCSpawnLoc[0].GetPositionZ(), 5, TEMPSUMMON_CORPSE_TIMED_DESPAWN, 30*IN_MILLISECONDS)) { @@ -649,9 +649,9 @@ class npc_tirion_toc : public CreatureScript break; case 200: Talk(SAY_STAGE_0_04); - if (_instance->GetBossState(BOSS_BEASTS) != DONE) + if (_instance->GetBossState(DATA_NORTHREND_BEASTS) != DONE) { - _instance->DoUseDoorOrButton(_instance->GetGuidData(GO_MAIN_GATE_DOOR)); + _instance->DoUseDoorOrButton(_instance->GetGuidData(DATA_MAIN_GATE)); if (Creature* dreadscale = me->SummonCreature(NPC_DREADSCALE, ToCSpawnLoc[1].GetPositionX(), ToCSpawnLoc[1].GetPositionY(), ToCSpawnLoc[1].GetPositionZ(), 5, TEMPSUMMON_MANUAL_DESPAWN)) { dreadscale->GetMotionMaster()->MovePoint(0, ToCCommonLoc[5].GetPositionX(), ToCCommonLoc[5].GetPositionY(), ToCCommonLoc[5].GetPositionZ()); @@ -667,9 +667,9 @@ class npc_tirion_toc : public CreatureScript break; case 300: Talk(SAY_STAGE_0_05); - if (_instance->GetBossState(BOSS_BEASTS) != DONE) + if (_instance->GetBossState(DATA_NORTHREND_BEASTS) != DONE) { - _instance->DoUseDoorOrButton(_instance->GetGuidData(GO_MAIN_GATE_DOOR)); + _instance->DoUseDoorOrButton(_instance->GetGuidData(DATA_MAIN_GATE)); if (Creature* icehowl = me->SummonCreature(NPC_ICEHOWL, ToCSpawnLoc[0].GetPositionX(), ToCSpawnLoc[0].GetPositionY(), ToCSpawnLoc[0].GetPositionZ(), 5, TEMPSUMMON_DEAD_DESPAWN)) { icehowl->GetMotionMaster()->MovePoint(2, ToCCommonLoc[5].GetPositionX(), ToCCommonLoc[5].GetPositionY(), ToCCommonLoc[5].GetPositionZ()); @@ -697,7 +697,7 @@ class npc_tirion_toc : public CreatureScript case 1010: Talk(SAY_STAGE_1_01); _updateTimer = 7*IN_MILLISECONDS; - _instance->DoUseDoorOrButton(_instance->GetGuidData(GO_MAIN_GATE_DOOR)); + _instance->DoUseDoorOrButton(_instance->GetGuidData(DATA_MAIN_GATE)); me->SummonCreature(NPC_FIZZLEBANG, ToCSpawnLoc[0].GetPositionX(), ToCSpawnLoc[0].GetPositionY(), ToCSpawnLoc[0].GetPositionZ(), 2, TEMPSUMMON_CORPSE_TIMED_DESPAWN, DESPAWN_TIME); _instance->SetData(TYPE_EVENT, 0); break; @@ -751,7 +751,7 @@ class npc_tirion_toc : public CreatureScript _instance->SetData(TYPE_EVENT, 3092); break; case 3092: - if (Creature* pChampionController = ObjectAccessor::GetCreature((*me), _instance->GetGuidData(NPC_CHAMPIONS_CONTROLLER))) + if (Creature* pChampionController = _instance->GetCreature(DATA_FACTION_CRUSADERS)) pChampionController->AI()->SetData(1, NOT_STARTED); _instance->SetData(TYPE_EVENT, 3095); break; @@ -768,14 +768,14 @@ class npc_tirion_toc : public CreatureScript break; case 4010: Talk(SAY_STAGE_3_02); - if (Creature* lightbane = me->SummonCreature(NPC_LIGHTBANE, ToCSpawnLoc[1].GetPositionX(), ToCSpawnLoc[1].GetPositionY(), ToCSpawnLoc[1].GetPositionZ(), 5, TEMPSUMMON_CORPSE_TIMED_DESPAWN, DESPAWN_TIME)) + if (Creature* lightbane = me->SummonCreature(NPC_FJOLA_LIGHTBANE, ToCSpawnLoc[1].GetPositionX(), ToCSpawnLoc[1].GetPositionY(), ToCSpawnLoc[1].GetPositionZ(), 5, TEMPSUMMON_CORPSE_TIMED_DESPAWN, DESPAWN_TIME)) { lightbane->SetVisible(false); lightbane->SetReactState(REACT_PASSIVE); lightbane->SummonCreature(NPC_LIGHT_ESSENCE, TwinValkyrsLoc[0].GetPositionX(), TwinValkyrsLoc[0].GetPositionY(), TwinValkyrsLoc[0].GetPositionZ()); lightbane->SummonCreature(NPC_LIGHT_ESSENCE, TwinValkyrsLoc[1].GetPositionX(), TwinValkyrsLoc[1].GetPositionY(), TwinValkyrsLoc[1].GetPositionZ()); } - if (Creature* darkbane = me->SummonCreature(NPC_DARKBANE, ToCSpawnLoc[2].GetPositionX(), ToCSpawnLoc[2].GetPositionY(), ToCSpawnLoc[2].GetPositionZ(), 5, TEMPSUMMON_CORPSE_TIMED_DESPAWN, DESPAWN_TIME)) + if (Creature* darkbane = me->SummonCreature(NPC_EYDIS_DARKBANE, ToCSpawnLoc[2].GetPositionX(), ToCSpawnLoc[2].GetPositionY(), ToCSpawnLoc[2].GetPositionZ(), 5, TEMPSUMMON_CORPSE_TIMED_DESPAWN, DESPAWN_TIME)) { darkbane->SetVisible(false); darkbane->SetReactState(REACT_PASSIVE); @@ -786,13 +786,13 @@ class npc_tirion_toc : public CreatureScript _instance->SetData(TYPE_EVENT, 4015); break; case 4015: - _instance->DoUseDoorOrButton(_instance->GetGuidData(GO_MAIN_GATE_DOOR)); - if (Creature* lightbane = ObjectAccessor::GetCreature((*me), _instance->GetGuidData(NPC_LIGHTBANE))) + _instance->DoUseDoorOrButton(_instance->GetGuidData(DATA_MAIN_GATE)); + if (Creature* lightbane = _instance->GetCreature(DATA_FJOLA_LIGHTBANE)) { lightbane->GetMotionMaster()->MovePoint(1, ToCCommonLoc[8].GetPositionX(), ToCCommonLoc[8].GetPositionY(), ToCCommonLoc[8].GetPositionZ()); lightbane->SetVisible(true); } - if (Creature* darkbane = ObjectAccessor::GetCreature((*me), _instance->GetGuidData(NPC_DARKBANE))) + if (Creature* darkbane = _instance->GetCreature(DATA_EYDIS_DARKBANE)) { darkbane->GetMotionMaster()->MovePoint(1, ToCCommonLoc[9].GetPositionX(), ToCCommonLoc[9].GetPositionY(), ToCCommonLoc[9].GetPositionZ()); darkbane->SetVisible(true); @@ -801,7 +801,7 @@ class npc_tirion_toc : public CreatureScript _instance->SetData(TYPE_EVENT, 4016); break; case 4016: - _instance->DoUseDoorOrButton(_instance->GetGuidData(GO_MAIN_GATE_DOOR)); + _instance->DoUseDoorOrButton(_instance->GetGuidData(DATA_MAIN_GATE)); _instance->SetData(TYPE_EVENT, 4017); break; case 4040: @@ -824,14 +824,14 @@ class npc_tirion_toc : public CreatureScript _instance->SetData(TYPE_EVENT, 0); break; case 6000: - me->SummonCreature(NPC_TIRION_FORDRING, EndSpawnLoc[0]); + me->SummonCreature(NPC_TIRION_FORDRING_ANUBARAK, EndSpawnLoc[0]); me->SummonCreature(NPC_ARGENT_MAGE, EndSpawnLoc[1]); me->SummonGameObject(GO_PORTAL_TO_DALARAN, EndSpawnLoc[2], QuaternionData(), 0); _updateTimer = 20*IN_MILLISECONDS; _instance->SetData(TYPE_EVENT, 6005); break; case 6005: - if (Creature* tirionFordring = ObjectAccessor::GetCreature((*me), _instance->GetGuidData(NPC_TIRION_FORDRING))) + if (Creature* tirionFordring = _instance->GetCreature(DATA_FORDRING_ANUBARAK)) tirionFordring->AI()->Talk(SAY_STAGE_4_06); _updateTimer = 20*IN_MILLISECONDS; _instance->SetData(TYPE_EVENT, 6010); @@ -839,10 +839,10 @@ class npc_tirion_toc : public CreatureScript case 6010: if (IsHeroic()) { - if (Creature* tirionFordring = ObjectAccessor::GetCreature((*me), _instance->GetGuidData(NPC_TIRION_FORDRING))) + if (Creature* tirionFordring = _instance->GetCreature(DATA_FORDRING_ANUBARAK)) tirionFordring->AI()->Talk(SAY_STAGE_4_07); _updateTimer = 1*MINUTE*IN_MILLISECONDS; - _instance->SetBossState(BOSS_ANUBARAK, SPECIAL); + _instance->SetBossState(DATA_ANUBARAK, SPECIAL); _instance->SetData(TYPE_EVENT, 6020); } else diff --git a/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/trial_of_the_crusader.h b/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/trial_of_the_crusader.h index d744c7e1cd2..d3565fe27eb 100644 --- a/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/trial_of_the_crusader.h +++ b/src/server/scripts/Northrend/CrusadersColiseum/TrialOfTheCrusader/trial_of_the_crusader.h @@ -25,19 +25,42 @@ #define DataHeader "TCR" struct Position; +uint32 const EncounterCount = 6; enum TCRDataTypes { - BOSS_BEASTS = 0, - BOSS_JARAXXUS = 1, - BOSS_CRUSADERS = 2, - BOSS_VALKIRIES = 3, - BOSS_LICH_KING = 4, // not really a boss but oh well - BOSS_ANUBARAK = 5, - MAX_ENCOUNTERS = 6, - - TYPE_COUNTER = 8, - TYPE_EVENT = 9, + // Encounter States + DATA_NORTHREND_BEASTS = 0, + DATA_JARAXXUS = 1, + DATA_FACTION_CRUSADERS = 2, + DATA_TWIN_VALKIRIES = 3, + DATA_LICH_KING = 4, + DATA_ANUBARAK = 5, + + // Additional Data + DATA_GORMOK_THE_IMPALER = 5, + DATA_ACIDMAW = 6, + DATA_DREADSCALE = 7, + DATA_ICEHOWL = 8, + DATA_FJOLA_LIGHTBANE = 9, + DATA_EYDIS_DARKBANE = 10, + DATA_BARRET_RAMSEY = 11, + DATA_FORDRING = 12, + DATA_FORDRING_ANUBARAK = 13, + DATA_VARIAN = 14, + DATA_GARROSH = 15, + DATA_FIZZLEBANG = 16, + DATA_FACTION_CHAMPIONS = 17, + + DATA_CRUSADERS_CHEST = 18, + DATA_COLISEUM_FLOOR = 19, + DATA_MAIN_GATE = 20, + DATA_EAST_PORTCULLIS = 21, + DATA_WEB_DOOR = 22, + DATA_TRIBUTE_CHEST = 23, + + TYPE_COUNTER = 24, + TYPE_EVENT = 25, TYPE_EVENT_TIMER = 101, TYPE_EVENT_NPC = 102, @@ -60,7 +83,7 @@ enum TCRSpellIds enum TCRMiscData { - DESPAWN_TIME = 1200000 + DESPAWN_TIME = 1200000 }; extern Position const ToCCommonLoc[]; @@ -68,121 +91,121 @@ extern Position const AnubarakLoc[]; enum TCRWorldStateIds { - UPDATE_STATE_UI_SHOW = 4390, - UPDATE_STATE_UI_COUNT = 4389 + UPDATE_STATE_UI_SHOW = 4390, + UPDATE_STATE_UI_COUNT = 4389 }; enum NorthrendBeasts { - GORMOK_IN_PROGRESS = 1000, - GORMOK_DONE = 1001, - SNAKES_IN_PROGRESS = 2000, - DREADSCALE_SUBMERGED = 2001, - ACIDMAW_SUBMERGED = 2002, - SNAKES_SPECIAL = 2003, - SNAKES_DONE = 2004, - ICEHOWL_IN_PROGRESS = 3000, - ICEHOWL_DONE = 3001 + GORMOK_IN_PROGRESS = 1000, + GORMOK_DONE = 1001, + SNAKES_IN_PROGRESS = 2000, + DREADSCALE_SUBMERGED = 2001, + ACIDMAW_SUBMERGED = 2002, + SNAKES_SPECIAL = 2003, + SNAKES_DONE = 2004, + ICEHOWL_IN_PROGRESS = 3000, + ICEHOWL_DONE = 3001 }; enum AnnouncerMessages { - MSG_BEASTS = 724001, - MSG_JARAXXUS = 724002, - MSG_CRUSADERS = 724003, - MSG_VALKIRIES = 724004, - MSG_LICH_KING = 724005, - MSG_ANUBARAK = 724006 + MSG_BEASTS = 724001, + MSG_JARAXXUS = 724002, + MSG_CRUSADERS = 724003, + MSG_VALKIRIES = 724004, + MSG_LICH_KING = 724005, + MSG_ANUBARAK = 724006 }; enum TCRCreatureIds { - NPC_BARRENT = 34816, - NPC_TIRION = 34996, - NPC_TIRION_FORDRING = 36095, - NPC_ARGENT_MAGE = 36097, - NPC_FIZZLEBANG = 35458, - NPC_GARROSH = 34995, - NPC_VARIAN = 34990, - NPC_LICH_KING = 35877, - - NPC_THRALL = 34994, - NPC_PROUDMOORE = 34992, - NPC_WILFRED_PORTAL = 17965, - NPC_TRIGGER = 35651, - - NPC_ICEHOWL = 34797, - NPC_GORMOK = 34796, - NPC_DREADSCALE = 34799, - NPC_ACIDMAW = 35144, - - NPC_JARAXXUS = 34780, - - NPC_CHAMPIONS_CONTROLLER = 34781, - - NPC_ALLIANCE_DEATH_KNIGHT = 34461, - NPC_ALLIANCE_DRUID_BALANCE = 34460, - NPC_ALLIANCE_DRUID_RESTORATION = 34469, - NPC_ALLIANCE_HUNTER = 34467, - NPC_ALLIANCE_MAGE = 34468, - NPC_ALLIANCE_PALADIN_HOLY = 34465, - NPC_ALLIANCE_PALADIN_RETRIBUTION = 34471, - NPC_ALLIANCE_PRIEST_DISCIPLINE = 34466, - NPC_ALLIANCE_PRIEST_SHADOW = 34473, - NPC_ALLIANCE_ROGUE = 34472, - NPC_ALLIANCE_SHAMAN_ENHANCEMENT = 34463, - NPC_ALLIANCE_SHAMAN_RESTORATION = 34470, - NPC_ALLIANCE_WARLOCK = 34474, - NPC_ALLIANCE_WARRIOR = 34475, - - NPC_HORDE_DEATH_KNIGHT = 34458, - NPC_HORDE_DRUID_BALANCE = 34451, - NPC_HORDE_DRUID_RESTORATION = 34459, - NPC_HORDE_HUNTER = 34448, - NPC_HORDE_MAGE = 34449, - NPC_HORDE_PALADIN_HOLY = 34445, - NPC_HORDE_PALADIN_RETRIBUTION = 34456, - NPC_HORDE_PRIEST_DISCIPLINE = 34447, - NPC_HORDE_PRIEST_SHADOW = 34441, - NPC_HORDE_ROGUE = 34454, - NPC_HORDE_SHAMAN_ENHANCEMENT = 34455, - NPC_HORDE_SHAMAN_RESTORATION = 34444, - NPC_HORDE_WARLOCK = 34450, - NPC_HORDE_WARRIOR = 34453, - - NPC_LIGHTBANE = 34497, - NPC_DARKBANE = 34496, - - NPC_DARK_ESSENCE = 34567, - NPC_LIGHT_ESSENCE = 34568, - - NPC_ANUBARAK = 34564 + NPC_BARRET_RAMSEY = 34816, + NPC_TIRION_FORDRING = 34996, + NPC_TIRION_FORDRING_ANUBARAK = 36095, + NPC_ARGENT_MAGE = 36097, + NPC_FIZZLEBANG = 35458, + NPC_GARROSH = 34995, + NPC_VARIAN = 34990, + NPC_LICH_KING = 35877, + + NPC_THRALL = 34994, + NPC_PROUDMOORE = 34992, + NPC_WILFRED_PORTAL = 17965, + NPC_TRIGGER = 35651, + + NPC_ICEHOWL = 34797, + NPC_GORMOK = 34796, + NPC_DREADSCALE = 34799, + NPC_ACIDMAW = 35144, + + NPC_JARAXXUS = 34780, + + NPC_CHAMPIONS_CONTROLLER = 34781, + + NPC_ALLIANCE_DEATH_KNIGHT = 34461, + NPC_ALLIANCE_DRUID_BALANCE = 34460, + NPC_ALLIANCE_DRUID_RESTORATION = 34469, + NPC_ALLIANCE_HUNTER = 34467, + NPC_ALLIANCE_MAGE = 34468, + NPC_ALLIANCE_PALADIN_HOLY = 34465, + NPC_ALLIANCE_PALADIN_RETRIBUTION = 34471, + NPC_ALLIANCE_PRIEST_DISCIPLINE = 34466, + NPC_ALLIANCE_PRIEST_SHADOW = 34473, + NPC_ALLIANCE_ROGUE = 34472, + NPC_ALLIANCE_SHAMAN_ENHANCEMENT = 34463, + NPC_ALLIANCE_SHAMAN_RESTORATION = 34470, + NPC_ALLIANCE_WARLOCK = 34474, + NPC_ALLIANCE_WARRIOR = 34475, + + NPC_HORDE_DEATH_KNIGHT = 34458, + NPC_HORDE_DRUID_BALANCE = 34451, + NPC_HORDE_DRUID_RESTORATION = 34459, + NPC_HORDE_HUNTER = 34448, + NPC_HORDE_MAGE = 34449, + NPC_HORDE_PALADIN_HOLY = 34445, + NPC_HORDE_PALADIN_RETRIBUTION = 34456, + NPC_HORDE_PRIEST_DISCIPLINE = 34447, + NPC_HORDE_PRIEST_SHADOW = 34441, + NPC_HORDE_ROGUE = 34454, + NPC_HORDE_SHAMAN_ENHANCEMENT = 34455, + NPC_HORDE_SHAMAN_RESTORATION = 34444, + NPC_HORDE_WARLOCK = 34450, + NPC_HORDE_WARRIOR = 34453, + + NPC_FJOLA_LIGHTBANE = 34497, + NPC_EYDIS_DARKBANE = 34496, + + NPC_DARK_ESSENCE = 34567, + NPC_LIGHT_ESSENCE = 34568, + + NPC_ANUBARAK = 34564 }; enum TCRGameObjectIds { - GO_CRUSADERS_CACHE_10 = 195631, - GO_CRUSADERS_CACHE_25 = 195632, - GO_CRUSADERS_CACHE_10_H = 195633, - GO_CRUSADERS_CACHE_25_H = 195635, + GO_CRUSADERS_CACHE_10 = 195631, + GO_CRUSADERS_CACHE_25 = 195632, + GO_CRUSADERS_CACHE_10_H = 195633, + GO_CRUSADERS_CACHE_25_H = 195635, // Tribute Chest (heroic) // 10-man modes - GO_TRIBUTE_CHEST_10H_25 = 195668, // 10man 01-24 attempts - GO_TRIBUTE_CHEST_10H_45 = 195667, // 10man 25-44 attempts - GO_TRIBUTE_CHEST_10H_50 = 195666, // 10man 45-49 attempts - GO_TRIBUTE_CHEST_10H_99 = 195665, // 10man 50 attempts + GO_TRIBUTE_CHEST_10H_25 = 195668, // 10man 01-24 attempts + GO_TRIBUTE_CHEST_10H_45 = 195667, // 10man 25-44 attempts + GO_TRIBUTE_CHEST_10H_50 = 195666, // 10man 45-49 attempts + GO_TRIBUTE_CHEST_10H_99 = 195665, // 10man 50 attempts // 25-man modes - GO_TRIBUTE_CHEST_25H_25 = 195672, // 25man 01-24 attempts - GO_TRIBUTE_CHEST_25H_45 = 195671, // 25man 25-44 attempts - GO_TRIBUTE_CHEST_25H_50 = 195670, // 25man 45-49 attempts - GO_TRIBUTE_CHEST_25H_99 = 195669, // 25man 50 attempts - - GO_ARGENT_COLISEUM_FLOOR = 195527, //20943 - GO_MAIN_GATE_DOOR = 195647, - GO_EAST_PORTCULLIS = 195648, - GO_WEB_DOOR = 195485, - GO_PORTAL_TO_DALARAN = 195682 + GO_TRIBUTE_CHEST_25H_25 = 195672, // 25man 01-24 attempts + GO_TRIBUTE_CHEST_25H_45 = 195671, // 25man 25-44 attempts + GO_TRIBUTE_CHEST_25H_50 = 195670, // 25man 45-49 attempts + GO_TRIBUTE_CHEST_25H_99 = 195669, // 25man 50 attempts + + GO_ARGENT_COLISEUM_FLOOR = 195527, //20943 + GO_MAIN_GATE_DOOR = 195647, + GO_EAST_PORTCULLIS = 195648, + GO_WEB_DOOR = 195485, + GO_PORTAL_TO_DALARAN = 195682 }; enum TCRAchievementData diff --git a/src/server/scripts/Northrend/Ulduar/Ulduar/boss_flame_leviathan.cpp b/src/server/scripts/Northrend/Ulduar/Ulduar/boss_flame_leviathan.cpp index 3925fd3b042..09f80db1f36 100644 --- a/src/server/scripts/Northrend/Ulduar/Ulduar/boss_flame_leviathan.cpp +++ b/src/server/scripts/Northrend/Ulduar/Ulduar/boss_flame_leviathan.cpp @@ -1626,7 +1626,7 @@ class FlameLeviathanPursuedTargetSelector }; public: - explicit FlameLeviathanPursuedTargetSelector(Unit* unit) : _me(unit) { }; + explicit FlameLeviathanPursuedTargetSelector() { }; bool operator()(WorldObject* target) const { @@ -1656,9 +1656,6 @@ class FlameLeviathanPursuedTargetSelector return !playerFound; } - - private: - Unit const* _me; }; class spell_pursue : public SpellScriptLoader @@ -1679,7 +1676,7 @@ class spell_pursue : public SpellScriptLoader private: void FilterTargets(std::list<WorldObject*>& targets) { - targets.remove_if(FlameLeviathanPursuedTargetSelector(GetCaster())); + targets.remove_if(FlameLeviathanPursuedTargetSelector()); if (!targets.empty()) { //! In the end, only one target should be selected diff --git a/src/server/scripts/Northrend/Ulduar/Ulduar/boss_razorscale.cpp b/src/server/scripts/Northrend/Ulduar/Ulduar/boss_razorscale.cpp index 41985059aae..2b5e11c0d50 100644 --- a/src/server/scripts/Northrend/Ulduar/Ulduar/boss_razorscale.cpp +++ b/src/server/scripts/Northrend/Ulduar/Ulduar/boss_razorscale.cpp @@ -1469,8 +1469,12 @@ struct npc_razorscale_devouring_flame : public ScriptedAI void Reset() override { - DoCastSelf(DEVOURING_FLAME_GROUND); + me->SetReactState(REACT_PASSIVE); + DoCastSelf(DEVOURING_FLAME_GROUND, true); } + + // Evade caused by Spell::SummonGuardian. Creature dont need evade at all, is despawned if razorscale enter in evade + void EnterEvadeMode(EvadeReason /*why*/) override { } }; class go_razorscale_harpoon : public GameObjectScript diff --git a/src/server/scripts/Outland/Auchindoun/SethekkHalls/boss_talon_king_ikiss.cpp b/src/server/scripts/Outland/Auchindoun/SethekkHalls/boss_talon_king_ikiss.cpp index a6340227270..1573fb3bcf7 100644 --- a/src/server/scripts/Outland/Auchindoun/SethekkHalls/boss_talon_king_ikiss.cpp +++ b/src/server/scripts/Outland/Auchindoun/SethekkHalls/boss_talon_king_ikiss.cpp @@ -178,6 +178,9 @@ class spell_talon_king_ikiss_blink : public SpellScriptLoader void FilterTargets(std::list<WorldObject*>& targets) { + if (targets.empty()) + return; + WorldObject* target = Trinity::Containers::SelectRandomContainerElement(targets); targets.clear(); targets.push_back(target); diff --git a/src/server/scripts/Outland/BlackTemple/boss_illidan.cpp b/src/server/scripts/Outland/BlackTemple/boss_illidan.cpp index 3c2eb05a083..181a01f6745 100644 --- a/src/server/scripts/Outland/BlackTemple/boss_illidan.cpp +++ b/src/server/scripts/Outland/BlackTemple/boss_illidan.cpp @@ -407,6 +407,12 @@ Position const IllidanDBTargetPoints[4] = { 660.3492f, 345.5749f, 353.2961f } }; +Position const BladesPositions[2] = +{ + { 676.226013f, 325.230988f }, + { 678.059998f, 285.220001f } +}; + uint32 const SummonCageTrapSpells[8] = { SPELL_SUMMON_CAGE_TRAP_1, @@ -438,15 +444,14 @@ private: class ChargeTargetSelector : public std::unary_function<Unit*, bool> { public: - ChargeTargetSelector(Unit const* unit) : _me(unit) { } + ChargeTargetSelector() { } bool operator()(Unit* unit) const { - return unit->GetTypeId() == TYPEID_PLAYER && _me->GetDistance2d(unit) > 25.0f; + return unit->GetTypeId() == TYPEID_PLAYER + && unit->GetDistance2d(BladesPositions[0].GetPositionX(), BladesPositions[0].GetPositionY()) > 25.0f + && unit->GetDistance2d(BladesPositions[1].GetPositionX(), BladesPositions[1].GetPositionY()) > 25.0f; } - -private: - Unit const* _me; }; struct boss_illidan_stormrage : public BossAI @@ -1497,7 +1502,7 @@ struct npc_flame_of_azzinoth : public ScriptedAI _events.ScheduleEvent(EVENT_FLAME_CHARGE, Seconds(5)); break; case EVENT_FLAME_CHARGE: - if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, ChargeTargetSelector(me))) + if (Unit* target = SelectTarget(SELECT_TARGET_RANDOM, 0, ChargeTargetSelector())) { DoCast(target, SPELL_CHARGE); _events.Repeat(Seconds(5)); diff --git a/src/server/scripts/Outland/HellfireCitadel/BloodFurnace/boss_kelidan_the_breaker.cpp b/src/server/scripts/Outland/HellfireCitadel/BloodFurnace/boss_kelidan_the_breaker.cpp index 059613f9450..f49b02949eb 100644 --- a/src/server/scripts/Outland/HellfireCitadel/BloodFurnace/boss_kelidan_the_breaker.cpp +++ b/src/server/scripts/Outland/HellfireCitadel/BloodFurnace/boss_kelidan_the_breaker.cpp @@ -371,4 +371,3 @@ void AddSC_boss_kelidan_the_breaker() new boss_kelidan_the_breaker(); new npc_shadowmoon_channeler(); } - diff --git a/src/server/scripts/Outland/HellfireCitadel/BloodFurnace/instance_blood_furnace.cpp b/src/server/scripts/Outland/HellfireCitadel/BloodFurnace/instance_blood_furnace.cpp index 6eec5664855..cc68806ecf6 100644 --- a/src/server/scripts/Outland/HellfireCitadel/BloodFurnace/instance_blood_furnace.cpp +++ b/src/server/scripts/Outland/HellfireCitadel/BloodFurnace/instance_blood_furnace.cpp @@ -322,4 +322,3 @@ void AddSC_instance_blood_furnace() { new instance_blood_furnace(); } - diff --git a/src/server/scripts/Outland/HellfireCitadel/HellfireRamparts/boss_omor_the_unscarred.cpp b/src/server/scripts/Outland/HellfireCitadel/HellfireRamparts/boss_omor_the_unscarred.cpp index a8914c31b96..6da7007d42c 100644 --- a/src/server/scripts/Outland/HellfireCitadel/HellfireRamparts/boss_omor_the_unscarred.cpp +++ b/src/server/scripts/Outland/HellfireCitadel/HellfireRamparts/boss_omor_the_unscarred.cpp @@ -239,4 +239,3 @@ void AddSC_boss_omor_the_unscarred() { new boss_omor_the_unscarred(); } - diff --git a/src/server/scripts/Outland/HellfireCitadel/HellfireRamparts/boss_vazruden_the_herald.cpp b/src/server/scripts/Outland/HellfireCitadel/HellfireRamparts/boss_vazruden_the_herald.cpp index 88cf3e61bc7..6a9d6a020f1 100644 --- a/src/server/scripts/Outland/HellfireCitadel/HellfireRamparts/boss_vazruden_the_herald.cpp +++ b/src/server/scripts/Outland/HellfireCitadel/HellfireRamparts/boss_vazruden_the_herald.cpp @@ -524,4 +524,3 @@ void AddSC_boss_vazruden_the_herald() new boss_nazan(); new npc_hellfire_sentry(); } - diff --git a/src/server/scripts/Outland/HellfireCitadel/HellfireRamparts/boss_watchkeeper_gargolmar.cpp b/src/server/scripts/Outland/HellfireCitadel/HellfireRamparts/boss_watchkeeper_gargolmar.cpp index dd2e7700d1c..a00c492a807 100644 --- a/src/server/scripts/Outland/HellfireCitadel/HellfireRamparts/boss_watchkeeper_gargolmar.cpp +++ b/src/server/scripts/Outland/HellfireCitadel/HellfireRamparts/boss_watchkeeper_gargolmar.cpp @@ -183,4 +183,3 @@ void AddSC_boss_watchkeeper_gargolmar() { new boss_watchkeeper_gargolmar(); } - diff --git a/src/server/scripts/Outland/HellfireCitadel/MagtheridonsLair/instance_magtheridons_lair.cpp b/src/server/scripts/Outland/HellfireCitadel/MagtheridonsLair/instance_magtheridons_lair.cpp index 087343a83f4..692c688ad8e 100644 --- a/src/server/scripts/Outland/HellfireCitadel/MagtheridonsLair/instance_magtheridons_lair.cpp +++ b/src/server/scripts/Outland/HellfireCitadel/MagtheridonsLair/instance_magtheridons_lair.cpp @@ -145,4 +145,3 @@ void AddSC_instance_magtheridons_lair() { new instance_magtheridons_lair(); } - diff --git a/src/server/scripts/Outland/TempestKeep/Eye/boss_astromancer.cpp b/src/server/scripts/Outland/TempestKeep/Eye/boss_astromancer.cpp index 813910f5bc5..12cbff7fc25 100644 --- a/src/server/scripts/Outland/TempestKeep/Eye/boss_astromancer.cpp +++ b/src/server/scripts/Outland/TempestKeep/Eye/boss_astromancer.cpp @@ -546,4 +546,3 @@ void AddSC_boss_high_astromancer_solarian() new npc_solarium_priest(); new spell_astromancer_wrath_of_the_astromancer(); } - diff --git a/src/server/scripts/Outland/TempestKeep/Eye/boss_void_reaver.cpp b/src/server/scripts/Outland/TempestKeep/Eye/boss_void_reaver.cpp index 6c2f7500055..f2720b4ffe9 100644 --- a/src/server/scripts/Outland/TempestKeep/Eye/boss_void_reaver.cpp +++ b/src/server/scripts/Outland/TempestKeep/Eye/boss_void_reaver.cpp @@ -171,4 +171,3 @@ void AddSC_boss_void_reaver() { new boss_void_reaver(); } - diff --git a/src/server/scripts/Outland/TempestKeep/Eye/instance_the_eye.cpp b/src/server/scripts/Outland/TempestKeep/Eye/instance_the_eye.cpp index 71471909f6b..87a291ec19e 100644 --- a/src/server/scripts/Outland/TempestKeep/Eye/instance_the_eye.cpp +++ b/src/server/scripts/Outland/TempestKeep/Eye/instance_the_eye.cpp @@ -129,4 +129,3 @@ void AddSC_instance_the_eye() { new instance_the_eye; } - diff --git a/src/server/scripts/Outland/TempestKeep/Eye/the_eye.cpp b/src/server/scripts/Outland/TempestKeep/Eye/the_eye.cpp index fb2361f2837..5aa2d7bfbc8 100644 --- a/src/server/scripts/Outland/TempestKeep/Eye/the_eye.cpp +++ b/src/server/scripts/Outland/TempestKeep/Eye/the_eye.cpp @@ -112,4 +112,3 @@ void AddSC_the_eye() { new npc_crystalcore_devastator(); } - diff --git a/src/server/scripts/Outland/TempestKeep/Mechanar/boss_gatewatcher_ironhand.cpp b/src/server/scripts/Outland/TempestKeep/Mechanar/boss_gatewatcher_ironhand.cpp index 0b060d35771..6fa78e6ff6d 100644 --- a/src/server/scripts/Outland/TempestKeep/Mechanar/boss_gatewatcher_ironhand.cpp +++ b/src/server/scripts/Outland/TempestKeep/Mechanar/boss_gatewatcher_ironhand.cpp @@ -135,4 +135,3 @@ void AddSC_boss_gatewatcher_iron_hand() { new boss_gatewatcher_iron_hand(); } - diff --git a/src/server/scripts/Outland/TempestKeep/Mechanar/boss_pathaleon_the_calculator.cpp b/src/server/scripts/Outland/TempestKeep/Mechanar/boss_pathaleon_the_calculator.cpp index 25bbfe0392a..6f3547f722e 100644 --- a/src/server/scripts/Outland/TempestKeep/Mechanar/boss_pathaleon_the_calculator.cpp +++ b/src/server/scripts/Outland/TempestKeep/Mechanar/boss_pathaleon_the_calculator.cpp @@ -253,4 +253,3 @@ void AddSC_boss_pathaleon_the_calculator() new boss_pathaleon_the_calculator(); new npc_nether_wraith(); } - diff --git a/src/server/scripts/Outland/TempestKeep/arcatraz/boss_harbinger_skyriss.cpp b/src/server/scripts/Outland/TempestKeep/arcatraz/boss_harbinger_skyriss.cpp index bbe26ac1bb2..e0f8577169a 100644 --- a/src/server/scripts/Outland/TempestKeep/arcatraz/boss_harbinger_skyriss.cpp +++ b/src/server/scripts/Outland/TempestKeep/arcatraz/boss_harbinger_skyriss.cpp @@ -304,4 +304,3 @@ void AddSC_boss_harbinger_skyriss() new boss_harbinger_skyriss(); new boss_harbinger_skyriss_illusion(); } - diff --git a/src/server/scripts/Outland/TempestKeep/arcatraz/instance_arcatraz.cpp b/src/server/scripts/Outland/TempestKeep/arcatraz/instance_arcatraz.cpp index ba5b7b7e0a5..b5405d4e86c 100644 --- a/src/server/scripts/Outland/TempestKeep/arcatraz/instance_arcatraz.cpp +++ b/src/server/scripts/Outland/TempestKeep/arcatraz/instance_arcatraz.cpp @@ -195,4 +195,3 @@ void AddSC_instance_arcatraz() { new instance_arcatraz(); } - diff --git a/src/server/scripts/Outland/TempestKeep/botanica/boss_high_botanist_freywinn.cpp b/src/server/scripts/Outland/TempestKeep/botanica/boss_high_botanist_freywinn.cpp index 204a52609b0..35c2ea74c7b 100644 --- a/src/server/scripts/Outland/TempestKeep/botanica/boss_high_botanist_freywinn.cpp +++ b/src/server/scripts/Outland/TempestKeep/botanica/boss_high_botanist_freywinn.cpp @@ -220,4 +220,3 @@ void AddSC_boss_high_botanist_freywinn() { new boss_high_botanist_freywinn(); } - diff --git a/src/server/scripts/Outland/TempestKeep/botanica/boss_laj.cpp b/src/server/scripts/Outland/TempestKeep/botanica/boss_laj.cpp index 31241a050dd..1e747509069 100644 --- a/src/server/scripts/Outland/TempestKeep/botanica/boss_laj.cpp +++ b/src/server/scripts/Outland/TempestKeep/botanica/boss_laj.cpp @@ -231,4 +231,3 @@ void AddSC_boss_laj() { new boss_laj(); } - diff --git a/src/server/scripts/Outland/TempestKeep/botanica/boss_warp_splinter.cpp b/src/server/scripts/Outland/TempestKeep/botanica/boss_warp_splinter.cpp index 291be813dc1..49f7ffde633 100644 --- a/src/server/scripts/Outland/TempestKeep/botanica/boss_warp_splinter.cpp +++ b/src/server/scripts/Outland/TempestKeep/botanica/boss_warp_splinter.cpp @@ -250,4 +250,3 @@ void AddSC_boss_warp_splinter() new boss_warp_splinter(); new npc_warp_splinter_treant(); } - diff --git a/src/server/scripts/Outland/zone_shadowmoon_valley.cpp b/src/server/scripts/Outland/zone_shadowmoon_valley.cpp index 582272d90a4..26f29a08a13 100644 --- a/src/server/scripts/Outland/zone_shadowmoon_valley.cpp +++ b/src/server/scripts/Outland/zone_shadowmoon_valley.cpp @@ -1405,7 +1405,7 @@ enum Enraged_Dpirits // ENRAGED EARTH SPIRIT SPELLS SPELL_FIERY_BOULDER = 38498, SPELL_SUMMON_ENRAGED_EARTH_SHARD = 38365, - + // SOULS NPC_EARTHEN_SOUL = 21073, NPC_FIERY_SOUL = 21097, diff --git a/src/server/scripts/Outland/zone_terokkar_forest.cpp b/src/server/scripts/Outland/zone_terokkar_forest.cpp index ba39e16e4d7..a8b91b913e5 100644 --- a/src/server/scripts/Outland/zone_terokkar_forest.cpp +++ b/src/server/scripts/Outland/zone_terokkar_forest.cpp @@ -19,26 +19,22 @@ /* ScriptData SDName: Terokkar_Forest SD%Complete: 85 -SDComment: Quest support: 9889, 10009, 10873, 10896, 10898, 11096, 10052, 10051. Skettis->Ogri'la Flight +SDComment: Quest support: 9889, 10898, 10052, 10051. SDCategory: Terokkar Forest EndScriptData */ /* ContentData npc_unkor_the_ruthless -npc_infested_root_walker -npc_rotting_forest_rager -npc_floon npc_isla_starmane -npc_slim +npc_skywing +npc_akuno EndContentData */ #include "ScriptMgr.h" #include "GameObject.h" -#include "GameObjectAI.h" #include "Group.h" #include "Player.h" #include "ScriptedEscortAI.h" -#include "ScriptedGossip.h" #include "WorldSession.h" /*###### @@ -166,43 +162,6 @@ public: }; /*###### -## npc_infested_root_walker -######*/ - -enum InfestedRootWalker -{ - SPELL_SUMMON_WOOD_MITES = 39130 -}; - -class npc_infested_root_walker : public CreatureScript -{ -public: - npc_infested_root_walker() : CreatureScript("npc_infested_root_walker") { } - - CreatureAI* GetAI(Creature* creature) const override - { - return new npc_infested_root_walkerAI(creature); - } - - struct npc_infested_root_walkerAI : public ScriptedAI - { - npc_infested_root_walkerAI(Creature* creature) : ScriptedAI(creature) { } - - void Reset() override { } - void EnterCombat(Unit* /*who*/) override { } - - void DamageTaken(Unit* done_by, uint32 &damage) override - { - if (done_by && done_by->GetTypeId() == TYPEID_PLAYER) - if (me->GetHealth() <= damage) - if (rand32() % 100 < 75) - //Summon Wood Mites - DoCast(me, SPELL_SUMMON_WOOD_MITES, true); - } - }; -}; - -/*###### ## npc_skywing ######*/ @@ -264,159 +223,6 @@ public: }; /*###### -## npc_rotting_forest_rager -######*/ - -enum RottingForestRager -{ - SPELL_SUMMON_LOTS_OF_WOOD_MITES = 39134 -}; - -class npc_rotting_forest_rager : public CreatureScript -{ -public: - npc_rotting_forest_rager() : CreatureScript("npc_rotting_forest_rager") { } - - CreatureAI* GetAI(Creature* creature) const override - { - return new npc_rotting_forest_ragerAI(creature); - } - - struct npc_rotting_forest_ragerAI : public ScriptedAI - { - npc_rotting_forest_ragerAI(Creature* creature) : ScriptedAI(creature) { } - - void Reset() override { } - void EnterCombat(Unit* /*who*/) override { } - - void DamageTaken(Unit* done_by, uint32 &damage) override - { - if (done_by->GetTypeId() == TYPEID_PLAYER) - if (me->GetHealth() <= damage) - if (rand32() % 100 < 75) - //Summon Lots of Wood Mites - DoCast(me, SPELL_SUMMON_LOTS_OF_WOOD_MITES, true); - } - }; -}; - -/*###### -## npc_floon -######*/ - -enum Floon -{ - SAY_FLOON_ATTACK = 0, - OPTION_ID_PAY_UP_OR_DIE = 0, - OPTION_ID_COLLECT_A_DEBT = 0, - MENU_ID_PAY_UP_OR_DIE = 7731, - MENU_ID_COLLECT_A_DEBT = 7732, - GOSSIP_FLOON_STRANGE_SOUNDS = 9442, - GOSSIP_HE_ALREADY_KILLED_ME = 9443, - - SPELL_SILENCE = 6726, - SPELL_FROSTBOLT = 9672, - SPELL_FROST_NOVA = 11831, - - QUEST_CRACKIN_SOME_SKULLS = 10009 -}; - -class npc_floon : public CreatureScript -{ -public: - npc_floon() : CreatureScript("npc_floon") { } - - struct npc_floonAI : public ScriptedAI - { - npc_floonAI(Creature* creature) : ScriptedAI(creature) - { - Initialize(); - m_uiNormFaction = creature->GetFaction(); - } - - void Initialize() - { - Silence_Timer = 2000; - Frostbolt_Timer = 4000; - FrostNova_Timer = 9000; - } - - uint32 m_uiNormFaction; - uint32 Silence_Timer; - uint32 Frostbolt_Timer; - uint32 FrostNova_Timer; - - void Reset() override - { - Initialize(); - if (me->GetFaction() != m_uiNormFaction) - me->SetFaction(m_uiNormFaction); - } - - void EnterCombat(Unit* /*who*/) override { } - - void UpdateAI(uint32 diff) override - { - if (!UpdateVictim()) - return; - - if (Silence_Timer <= diff) - { - DoCastVictim(SPELL_SILENCE); - Silence_Timer = 30000; - } else Silence_Timer -= diff; - - if (FrostNova_Timer <= diff) - { - DoCast(me, SPELL_FROST_NOVA); - FrostNova_Timer = 20000; - } else FrostNova_Timer -= diff; - - if (Frostbolt_Timer <= diff) - { - DoCastVictim(SPELL_FROSTBOLT); - Frostbolt_Timer = 5000; - } else Frostbolt_Timer -= diff; - - DoMeleeAttackIfReady(); - } - - bool GossipSelect(Player* player, uint32 /*menuId*/, uint32 gossipListId) override - { - uint32 const action = player->PlayerTalkClass->GetGossipOptionAction(gossipListId); - ClearGossipMenuFor(player); - if (action == GOSSIP_ACTION_INFO_DEF) - { - AddGossipItemFor(player, MENU_ID_PAY_UP_OR_DIE, OPTION_ID_PAY_UP_OR_DIE, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_INFO_DEF + 1); - SendGossipMenuFor(player, GOSSIP_HE_ALREADY_KILLED_ME, me->GetGUID()); - } - if (action == GOSSIP_ACTION_INFO_DEF + 1) - { - CloseGossipMenuFor(player); - me->SetFaction(FACTION_ARAKKOA); - Talk(SAY_FLOON_ATTACK, player); - AttackStart(player); - } - return true; - } - - bool GossipHello(Player* player) override - { - if (player->GetQuestStatus(QUEST_CRACKIN_SOME_SKULLS) == QUEST_STATUS_INCOMPLETE) - AddGossipItemFor(player, MENU_ID_COLLECT_A_DEBT, OPTION_ID_COLLECT_A_DEBT, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_INFO_DEF); - - SendGossipMenuFor(player, GOSSIP_FLOON_STRANGE_SOUNDS, me->GetGUID()); - return true; - } - }; - - CreatureAI* GetAI(Creature* creature) const override - { - return new npc_floonAI(creature); - } -}; - -/*###### ## npc_isla_starmane ######*/ enum IslaStarmaneData @@ -512,135 +318,6 @@ public: } }; -/*###### -## go_skull_pile -######*/ - -enum SkullPile -{ - OPTION_ID_GEZZARAK_THE_HUNTRESS = 0, - OPTION_ID_DARKSCREECHER_AKKARAI = 1, - OPTION_ID_KARROG = 2, - OPTION_ID_VAKKIZ_THE_WINDRAGER = 3, - GOSSIP_MENU_ID_SKULL_PILE = 8660, - ADVERSARIAL_BLOOD = 11885, - SUMMON_GEZZARAK_THE_HUNTRESS = 40632, - SUMMON_KARROG = 40640, - SUMMON_DARKSCREECHER_AKKARAI = 40642, - SUMMON_VAKKIZ_THE_WINDRAGER = 40644 -}; - -class go_skull_pile : public GameObjectScript -{ -public: - go_skull_pile() : GameObjectScript("go_skull_pile") { } - - struct go_skull_pileAI : public GameObjectAI - { - go_skull_pileAI(GameObject* go) : GameObjectAI(go) { } - - bool GossipSelect(Player* player, uint32 /*menuId*/, uint32 gossipListId) override - { - uint32 const sender = player->PlayerTalkClass->GetGossipOptionSender(gossipListId); - uint32 const action = player->PlayerTalkClass->GetGossipOptionAction(gossipListId); - ClearGossipMenuFor(player); - switch (sender) - { - case GOSSIP_SENDER_MAIN: SendActionMenu(player, action); break; - } - return true; - } - - bool GossipHello(Player* player) override - { - if ((player->GetQuestStatus(ADVERSARIAL_BLOOD) == QUEST_STATUS_INCOMPLETE) || player->GetQuestRewardStatus(ADVERSARIAL_BLOOD)) - { - AddGossipItemFor(player, GOSSIP_MENU_ID_SKULL_PILE, OPTION_ID_GEZZARAK_THE_HUNTRESS, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_INFO_DEF + 1); - AddGossipItemFor(player, GOSSIP_MENU_ID_SKULL_PILE, OPTION_ID_DARKSCREECHER_AKKARAI, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_INFO_DEF + 2); - AddGossipItemFor(player, GOSSIP_MENU_ID_SKULL_PILE, OPTION_ID_KARROG, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_INFO_DEF + 3); - AddGossipItemFor(player, GOSSIP_MENU_ID_SKULL_PILE, OPTION_ID_VAKKIZ_THE_WINDRAGER, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_INFO_DEF + 4); - } - - SendGossipMenuFor(player, me->GetGOInfo()->questgiver.gossipID, me->GetGUID()); - return true; - } - - void SendActionMenu(Player* player, uint32 action) - { - switch (action) - { - case GOSSIP_ACTION_INFO_DEF + 1: - player->CastSpell(player, SUMMON_GEZZARAK_THE_HUNTRESS, false); - break; - case GOSSIP_ACTION_INFO_DEF + 2: - player->CastSpell(player, SUMMON_DARKSCREECHER_AKKARAI, false); - break; - case GOSSIP_ACTION_INFO_DEF + 3: - player->CastSpell(player, SUMMON_KARROG, false); - break; - case GOSSIP_ACTION_INFO_DEF + 4: - player->CastSpell(player, SUMMON_VAKKIZ_THE_WINDRAGER, false); - break; - } - } - }; - - GameObjectAI* GetAI(GameObject* go) const override - { - return new go_skull_pileAI(go); - } -}; - -/*###### -## npc_slim -######*/ - -enum Slim -{ - FACTION_CONSORTIUM = 933, - NPC_TEXT_NEITHER_SLIM_NOR_SHADY = 9895, - NPC_TEXT_I_SEE_YOU_ARE_A_FRIEND = 9896 -}; - -class npc_slim : public CreatureScript -{ -public: - npc_slim() : CreatureScript("npc_slim") { } - - struct npc_slimAI : public ScriptedAI - { - npc_slimAI(Creature* creature) : ScriptedAI(creature) { } - - bool GossipSelect(Player* player, uint32 /*menuId*/, uint32 gossipListId) override - { - uint32 const action = player->PlayerTalkClass->GetGossipOptionAction(gossipListId); - ClearGossipMenuFor(player); - if (action == GOSSIP_ACTION_TRADE) - player->GetSession()->SendListInventory(me->GetGUID()); - - return true; - } - - bool GossipHello(Player* player) override - { - if (me->IsVendor() && player->GetReputationRank(FACTION_CONSORTIUM) >= REP_FRIENDLY) - { - AddGossipItemFor(player, GOSSIP_ICON_VENDOR, GOSSIP_TEXT_BROWSE_GOODS, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_TRADE); - SendGossipMenuFor(player, NPC_TEXT_I_SEE_YOU_ARE_A_FRIEND, me->GetGUID()); - } - else - SendGossipMenuFor(player, NPC_TEXT_NEITHER_SLIM_NOR_SHADY, me->GetGUID()); - - return true; - } - }; - - CreatureAI* GetAI(Creature* creature) const override - { - return new npc_slimAI(creature); - } -}; - /*######## ####npc_akuno #####*/ @@ -707,12 +384,7 @@ public: void AddSC_terokkar_forest() { new npc_unkor_the_ruthless(); - new npc_infested_root_walker(); - new npc_rotting_forest_rager(); - new npc_floon(); new npc_isla_starmane(); - new go_skull_pile(); new npc_skywing(); - new npc_slim(); new npc_akuno(); } diff --git a/src/server/scripts/Spells/spell_holiday.cpp b/src/server/scripts/Spells/spell_holiday.cpp index f211f3248ae..8c395077c96 100644 --- a/src/server/scripts/Spells/spell_holiday.cpp +++ b/src/server/scripts/Spells/spell_holiday.cpp @@ -1489,117 +1489,175 @@ enum TorchSpells SPELL_TORCH_TOSSING_PRACTICE = 46630, SPELL_TORCH_TOSSING_TRAINING_SUCCESS_ALLIANCE = 45719, SPELL_TORCH_TOSSING_TRAINING_SUCCESS_HORDE = 46651, + SPELL_TARGET_INDICATOR_COSMETIC = 46901, + SPELL_TARGET_INDICATOR = 45723, SPELL_BRAZIERS_HIT = 45724 }; // 45724 - Braziers Hit! -class spell_midsummer_braziers_hit : public SpellScriptLoader +class spell_midsummer_braziers_hit : public AuraScript { - public: - spell_midsummer_braziers_hit() : SpellScriptLoader("spell_midsummer_braziers_hit") { } + PrepareAuraScript(spell_midsummer_braziers_hit); - class spell_midsummer_braziers_hit_AuraScript : public AuraScript + bool Validate(SpellInfo const* /*spellInfo*/) override + { + return ValidateSpellInfo( { - PrepareAuraScript(spell_midsummer_braziers_hit_AuraScript); + SPELL_TORCH_TOSSING_TRAINING, + SPELL_TORCH_TOSSING_PRACTICE, + SPELL_TORCH_TOSSING_TRAINING_SUCCESS_ALLIANCE, + SPELL_TORCH_TOSSING_TRAINING_SUCCESS_HORDE + }); + } - bool Validate(SpellInfo const* /*spellInfo*/) override - { - return ValidateSpellInfo({ SPELL_TORCH_TOSSING_TRAINING, SPELL_TORCH_TOSSING_PRACTICE }); - } + void HandleEffectApply(AuraEffect const* /*aurEff*/, AuraEffectHandleModes /*mode*/) + { + Player* player = GetTarget()->ToPlayer(); + if (!player) + return; - void HandleEffectApply(AuraEffect const* /*aurEff*/, AuraEffectHandleModes /*mode*/) - { - Player* player = GetTarget()->ToPlayer(); - if (!player) - return; + if ((player->HasAura(SPELL_TORCH_TOSSING_TRAINING) && GetStackAmount() == 8) || (player->HasAura(SPELL_TORCH_TOSSING_PRACTICE) && GetStackAmount() == 20)) + { + if (player->GetTeam() == ALLIANCE) + player->CastSpell(player, SPELL_TORCH_TOSSING_TRAINING_SUCCESS_ALLIANCE, true); + else if (player->GetTeam() == HORDE) + player->CastSpell(player, SPELL_TORCH_TOSSING_TRAINING_SUCCESS_HORDE, true); + Remove(); + } + } - if ((player->HasAura(SPELL_TORCH_TOSSING_TRAINING) && GetStackAmount() == 8) || (player->HasAura(SPELL_TORCH_TOSSING_PRACTICE) && GetStackAmount() == 20)) - { - if (player->GetTeam() == ALLIANCE) - player->CastSpell(player, SPELL_TORCH_TOSSING_TRAINING_SUCCESS_ALLIANCE, true); - else if (player->GetTeam() == HORDE) - player->CastSpell(player, SPELL_TORCH_TOSSING_TRAINING_SUCCESS_HORDE, true); - Remove(); - } - } + void Register() override + { + AfterEffectApply += AuraEffectApplyFn(spell_midsummer_braziers_hit::HandleEffectApply, EFFECT_0, SPELL_AURA_DUMMY, AURA_EFFECT_HANDLE_REAPPLY); + } +}; - void Register() override - { - AfterEffectApply += AuraEffectApplyFn(spell_midsummer_braziers_hit_AuraScript::HandleEffectApply, EFFECT_0, SPELL_AURA_DUMMY, AuraEffectHandleModes(AURA_EFFECT_HANDLE_REAPPLY)); - } - }; +// 45907 - Torch Target Picker +class spell_midsummer_torch_target_picker : public SpellScript +{ + PrepareSpellScript(spell_midsummer_torch_target_picker); - AuraScript* GetAuraScript() const override - { - return new spell_midsummer_braziers_hit_AuraScript(); - } + bool Validate(SpellInfo const* /*spellInfo*/) override + { + return ValidateSpellInfo({ SPELL_TARGET_INDICATOR_COSMETIC, SPELL_TARGET_INDICATOR }); + } + + void HandleScript(SpellEffIndex /*effIndex*/) + { + Unit* target = GetHitUnit(); + target->CastSpell(target, SPELL_TARGET_INDICATOR_COSMETIC, true); + target->CastSpell(target, SPELL_TARGET_INDICATOR, true); + } + + void Register() override + { + OnEffectHitTarget += SpellEffectFn(spell_midsummer_torch_target_picker::HandleScript, EFFECT_0, SPELL_EFFECT_DUMMY); + } +}; + +// 46054 - Torch Toss (land) +class spell_midsummer_torch_toss_land : public SpellScript +{ + PrepareSpellScript(spell_midsummer_torch_toss_land); + + bool Validate(SpellInfo const* /*spellInfo*/) override + { + return ValidateSpellInfo({ SPELL_BRAZIERS_HIT }); + } + + void HandleScript(SpellEffIndex /*effIndex*/) + { + GetHitUnit()->CastSpell(GetCaster(), SPELL_BRAZIERS_HIT, true); + } + + void Register() override + { + OnEffectHitTarget += SpellEffectFn(spell_midsummer_torch_toss_land::HandleScript, EFFECT_0, SPELL_EFFECT_SCRIPT_EFFECT); + } }; enum RibbonPoleData { - SPELL_HAS_FULL_MIDSUMMER_SET = 58933, - SPELL_BURNING_HOT_POLE_DANCE = 58934, - SPELL_RIBBON_DANCE_COSMETIC = 29726, - SPELL_RIBBON_DANCE = 29175, - GO_RIBBON_POLE = 181605, + SPELL_HAS_FULL_MIDSUMMER_SET = 58933, + SPELL_BURNING_HOT_POLE_DANCE = 58934, + SPELL_RIBBON_POLE_PERIODIC_VISUAL = 45406, + SPELL_RIBBON_DANCE = 29175, + SPELL_TEST_RIBBON_POLE_1 = 29705, + SPELL_TEST_RIBBON_POLE_2 = 29726, + SPELL_TEST_RIBBON_POLE_3 = 29727 }; -class spell_gen_ribbon_pole_dancer_check : public SpellScriptLoader +// 29705, 29726, 29727 - Test Ribbon Pole Channel +class spell_midsummer_test_ribbon_pole_channel : public AuraScript { - public: - spell_gen_ribbon_pole_dancer_check() : SpellScriptLoader("spell_gen_ribbon_pole_dancer_check") { } + PrepareAuraScript(spell_midsummer_test_ribbon_pole_channel); - class spell_gen_ribbon_pole_dancer_check_AuraScript : public AuraScript + bool Validate(SpellInfo const* /*spellInfo*/) override + { + return ValidateSpellInfo( { - PrepareAuraScript(spell_gen_ribbon_pole_dancer_check_AuraScript); + SPELL_RIBBON_POLE_PERIODIC_VISUAL, + SPELL_BURNING_HOT_POLE_DANCE, + SPELL_HAS_FULL_MIDSUMMER_SET, + SPELL_RIBBON_DANCE + }); + } - bool Validate(SpellInfo const* /*spellInfo*/) override - { - return ValidateSpellInfo( - { - SPELL_HAS_FULL_MIDSUMMER_SET, - SPELL_RIBBON_DANCE, - SPELL_BURNING_HOT_POLE_DANCE - }); - } + void HandleRemove(AuraEffect const* /*aurEff*/, AuraEffectHandleModes /*mode*/) + { + GetTarget()->RemoveAurasDueToSpell(SPELL_RIBBON_POLE_PERIODIC_VISUAL); + } - void PeriodicTick(AuraEffect const* /*aurEff*/) - { - Unit* target = GetTarget(); + void PeriodicTick(AuraEffect const* /*aurEff*/) + { + Unit* target = GetTarget(); + target->CastSpell(target, SPELL_RIBBON_POLE_PERIODIC_VISUAL, true); - // check if aura needs to be removed - if (!target->FindNearestGameObject(GO_RIBBON_POLE, 8.0f) || !target->HasUnitState(UNIT_STATE_CASTING)) - { - target->InterruptNonMeleeSpells(false); - target->RemoveAurasDueToSpell(GetId()); - target->RemoveAura(SPELL_RIBBON_DANCE_COSMETIC); - return; - } + if (Aura* aur = target->GetAura(SPELL_RIBBON_DANCE)) + { + aur->SetMaxDuration(std::min(3600000, aur->GetMaxDuration() + 180000)); + aur->RefreshDuration(); - // set xp buff duration - if (Aura* aur = target->GetAura(SPELL_RIBBON_DANCE)) - { - aur->SetMaxDuration(std::min(3600000, aur->GetMaxDuration() + 180000)); - aur->RefreshDuration(); + if (aur->GetMaxDuration() == 3600000 && target->HasAura(SPELL_HAS_FULL_MIDSUMMER_SET)) + target->CastSpell(target, SPELL_BURNING_HOT_POLE_DANCE, true); + } + else + target->CastSpell(target, SPELL_RIBBON_DANCE, true); + } - // reward achievement criteria - if (aur->GetMaxDuration() == 3600000 && target->HasAura(SPELL_HAS_FULL_MIDSUMMER_SET)) - target->CastSpell(target, SPELL_BURNING_HOT_POLE_DANCE, true); - } - else - target->AddAura(SPELL_RIBBON_DANCE, target); - } + void Register() override + { + AfterEffectRemove += AuraEffectRemoveFn(spell_midsummer_test_ribbon_pole_channel::HandleRemove, EFFECT_1, SPELL_AURA_PERIODIC_TRIGGER_SPELL, AURA_EFFECT_HANDLE_REAL); + OnEffectPeriodic += AuraEffectPeriodicFn(spell_midsummer_test_ribbon_pole_channel::PeriodicTick, EFFECT_1, SPELL_AURA_PERIODIC_TRIGGER_SPELL); + } +}; - void Register() override - { - OnEffectPeriodic += AuraEffectPeriodicFn(spell_gen_ribbon_pole_dancer_check_AuraScript::PeriodicTick, EFFECT_0, SPELL_AURA_PERIODIC_DUMMY); - } - }; +// 45406 - Holiday - Midsummer, Ribbon Pole Periodic Visual +class spell_midsummer_ribbon_pole_periodic_visual : public AuraScript +{ + PrepareAuraScript(spell_midsummer_ribbon_pole_periodic_visual); - AuraScript* GetAuraScript() const override + bool Validate(SpellInfo const* /*spellInfo*/) override + { + return ValidateSpellInfo( { - return new spell_gen_ribbon_pole_dancer_check_AuraScript(); - } + SPELL_TEST_RIBBON_POLE_1, + SPELL_TEST_RIBBON_POLE_2, + SPELL_TEST_RIBBON_POLE_3 + }); + } + + void PeriodicTick(AuraEffect const* /*aurEff*/) + { + Unit* target = GetTarget(); + if (!target->HasAura(SPELL_TEST_RIBBON_POLE_1) && !target->HasAura(SPELL_TEST_RIBBON_POLE_2) && !target->HasAura(SPELL_TEST_RIBBON_POLE_3)) + Remove(); + } + + void Register() override + { + OnEffectPeriodic += AuraEffectPeriodicFn(spell_midsummer_ribbon_pole_periodic_visual::PeriodicTick, EFFECT_0, SPELL_AURA_PERIODIC_DUMMY); + } }; void AddSC_holiday_spell_scripts() @@ -1650,6 +1708,9 @@ void AddSC_holiday_spell_scripts() new spell_brewfest_dismount_ram(); new spell_brewfest_barker_bunny(); // Midsummer Fire Festival - new spell_midsummer_braziers_hit(); - new spell_gen_ribbon_pole_dancer_check(); + RegisterAuraScript(spell_midsummer_braziers_hit); + RegisterSpellScript(spell_midsummer_torch_target_picker); + RegisterSpellScript(spell_midsummer_torch_toss_land); + RegisterAuraScript(spell_midsummer_test_ribbon_pole_channel); + RegisterAuraScript(spell_midsummer_ribbon_pole_periodic_visual); } diff --git a/src/server/scripts/Spells/spell_quest.cpp b/src/server/scripts/Spells/spell_quest.cpp index 7cc32340665..874fb45638c 100644 --- a/src/server/scripts/Spells/spell_quest.cpp +++ b/src/server/scripts/Spells/spell_quest.cpp @@ -317,7 +317,7 @@ class spell_q11396_11399_scourging_crystal_controller : public SpellScriptLoader void HandleDummy(SpellEffIndex /*effIndex*/) { - if (Unit* target = GetExplTargetUnit()) + if (Unit* target = GetHitUnit()) if (target->GetTypeId() == TYPEID_UNIT && target->HasAura(SPELL_FORCE_SHIELD_ARCANE_PURPLE_X3)) // Make sure nobody else is channeling the same target if (!target->HasAura(SPELL_SCOURGING_CRYSTAL_CONTROLLER)) diff --git a/src/server/scripts/World/duel_reset.cpp b/src/server/scripts/World/duel_reset.cpp index 32c391a5ef6..eb57ee50d81 100644 --- a/src/server/scripts/World/duel_reset.cpp +++ b/src/server/scripts/World/duel_reset.cpp @@ -137,4 +137,3 @@ void AddSC_duel_reset() { new DuelResetScript(); } - diff --git a/src/server/scripts/World/go_scripts.cpp b/src/server/scripts/World/go_scripts.cpp index 6bc225bdb27..91b7bf30ad7 100644 --- a/src/server/scripts/World/go_scripts.cpp +++ b/src/server/scripts/World/go_scripts.cpp @@ -37,7 +37,6 @@ go_soulwell go_bashir_crystalforge go_soulwell go_dragonflayer_cage -go_tadpole_cage go_amberpine_outhouse go_hive_pod go_veil_skith_cage @@ -1232,49 +1231,6 @@ public: }; /*###### -## Quest 11560: Oh Noes, the Tadpoles! -## go_tadpole_cage -######*/ - -enum Tadpoles -{ - QUEST_OH_NOES_THE_TADPOLES = 11560, - NPC_WINTERFIN_TADPOLE = 25201 -}; - -class go_tadpole_cage : public GameObjectScript -{ -public: - go_tadpole_cage() : GameObjectScript("go_tadpole_cage") { } - - struct go_tadpole_cageAI : public GameObjectAI - { - go_tadpole_cageAI(GameObject* go) : GameObjectAI(go) { } - - bool GossipHello(Player* player) override - { - me->UseDoorOrButton(); - if (player->GetQuestStatus(QUEST_OH_NOES_THE_TADPOLES) == QUEST_STATUS_INCOMPLETE) - { - Creature* pTadpole = me->FindNearestCreature(NPC_WINTERFIN_TADPOLE, 1.0f); - if (pTadpole) - { - pTadpole->DisappearAndDie(); - player->KilledMonsterCredit(NPC_WINTERFIN_TADPOLE); - //FIX: Summon minion tadpole - } - } - return true; - } - }; - - GameObjectAI* GetAI(GameObject* go) const override - { - return new go_tadpole_cageAI(go); - } -}; - -/*###### ## go_amberpine_outhouse ######*/ @@ -1528,10 +1484,18 @@ public: enum MidsummerPoleRibbon { - SPELL_POLE_DANCE = 29726, - SPELL_BLUE_FIRE_RING = 46842, - NPC_POLE_RIBBON_BUNNY = 17066, - ACTION_COSMETIC_FIRES = 0 + SPELL_TEST_RIBBON_POLE_1 = 29705, + SPELL_TEST_RIBBON_POLE_2 = 29726, + SPELL_TEST_RIBBON_POLE_3 = 29727, + NPC_POLE_RIBBON_BUNNY = 17066, + ACTION_COSMETIC_FIRES = 0 +}; + +uint32 const RibbonPoleSpells[3] = +{ + SPELL_TEST_RIBBON_POLE_1, + SPELL_TEST_RIBBON_POLE_2, + SPELL_TEST_RIBBON_POLE_3 }; class go_midsummer_ribbon_pole : public GameObjectScript @@ -1548,7 +1512,7 @@ public: if (Creature* creature = me->FindNearestCreature(NPC_POLE_RIBBON_BUNNY, 10.0f)) { creature->GetAI()->DoAction(ACTION_COSMETIC_FIRES); - player->CastSpell(creature, SPELL_POLE_DANCE, true); + player->CastSpell(player, RibbonPoleSpells[urand(0, 2)], true); } return true; } @@ -2101,7 +2065,6 @@ void AddSC_go_scripts() new go_table_theka(); new go_inconspicuous_landmark(); new go_soulwell(); - new go_tadpole_cage(); new go_dragonflayer_cage(); new go_amberpine_outhouse(); new go_hive_pod(); diff --git a/src/server/scripts/World/guards.cpp b/src/server/scripts/World/guards.cpp deleted file mode 100644 index 638e53c45c5..00000000000 --- a/src/server/scripts/World/guards.cpp +++ /dev/null @@ -1,418 +0,0 @@ -/* - * Copyright (C) 2008-2017 TrinityCore <http://www.trinitycore.org/> - * Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along - * with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -/* ScriptData -SDName: Guards -SD%Complete: 100 -SDComment: -SDCategory: Guards -EndScriptData */ - -/* ContentData -guard_generic -guard_shattrath_aldor -guard_shattrath_scryer -EndContentData */ - -#include "ScriptMgr.h" -#include "GuardAI.h" -#include "MotionMaster.h" -#include "ObjectAccessor.h" -#include "Player.h" -#include "SpellInfo.h" - -enum GuardGeneric -{ - GENERIC_CREATURE_COOLDOWN = 5000, - - SAY_GUARD_SIL_AGGRO = 0, - - NPC_CENARION_HOLD_INFANTRY = 15184, - NPC_STORMWIND_CITY_GUARD = 68, - NPC_STORMWIND_CITY_PATROLLER = 1976, - NPC_ORGRIMMAR_GRUNT = 3296 -}; - -class guard_generic : public CreatureScript -{ -public: - guard_generic() : CreatureScript("guard_generic") { } - - struct guard_genericAI : public GuardAI - { - guard_genericAI(Creature* creature) : GuardAI(creature) - { - Initialize(); - } - - void Initialize() - { - globalCooldown = 0; - buffTimer = 0; - } - - void Reset() override - { - Initialize(); - } - - void EnterCombat(Unit* who) override - { - if (me->GetEntry() == NPC_CENARION_HOLD_INFANTRY) - Talk(SAY_GUARD_SIL_AGGRO, who); - if (SpellInfo const* spell = me->reachWithSpellAttack(who)) - DoCast(who, spell->Id); - } - - void UpdateAI(uint32 diff) override - { - //Always decrease our global cooldown first - if (globalCooldown > diff) - globalCooldown -= diff; - else - globalCooldown = 0; - - //Buff timer (only buff when we are alive and not in combat - if (me->IsAlive() && !me->IsInCombat()) - { - if (buffTimer <= diff) - { - //Find a spell that targets friendly and applies an aura (these are generally buffs) - SpellInfo const* info = SelectSpell(me, 0, 0, SELECT_TARGET_ANY_FRIEND, 0, 0, 0, 0, SELECT_EFFECT_AURA); - - if (info && !globalCooldown) - { - //Cast the buff spell - DoCast(me, info->Id); - - //Set our global cooldown - globalCooldown = GENERIC_CREATURE_COOLDOWN; - - //Set our timer to 10 minutes before rebuff - buffTimer = 600000; - } //Try again in 30 seconds - else buffTimer = 30000; - } else buffTimer -= diff; - } - - //Return since we have no target - if (!UpdateVictim()) - return; - - // Make sure our attack is ready and we arn't currently casting - if (me->isAttackReady() && !me->IsNonMeleeSpellCast(false)) - { - //If we are within range melee the target - if (me->IsWithinMeleeRange(me->GetVictim())) - { - bool healing = false; - SpellInfo const* info = nullptr; - - //Select a healing spell if less than 30% hp - if (me->HealthBelowPct(30)) - info = SelectSpell(me, 0, 0, SELECT_TARGET_ANY_FRIEND, 0, 0, 0, 0, SELECT_EFFECT_HEALING); - - //No healing spell available, select a hostile spell - if (info) - healing = true; - else - info = SelectSpell(me->GetVictim(), 0, 0, SELECT_TARGET_ANY_ENEMY, 0, 0, 0, 0, SELECT_EFFECT_DONTCARE); - - //20% chance to replace our white hit with a spell - if (info && urand(0, 99) < 20 && !globalCooldown) - { - //Cast the spell - if (healing) - DoCast(me, info->Id); - else - DoCastVictim(info->Id); - - //Set our global cooldown - globalCooldown = GENERIC_CREATURE_COOLDOWN; - } - else - me->AttackerStateUpdate(me->GetVictim()); - - me->resetAttackTimer(); - } - } - else - { - //Only run this code if we arn't already casting - if (!me->IsNonMeleeSpellCast(false)) - { - bool healing = false; - SpellInfo const* info = nullptr; - - //Select a healing spell if less than 30% hp ONLY 33% of the time - if (me->HealthBelowPct(30) && 33 > urand(0, 99)) - info = SelectSpell(me, 0, 0, SELECT_TARGET_ANY_FRIEND, 0, 0, 0, 0, SELECT_EFFECT_HEALING); - - //No healing spell available, See if we can cast a ranged spell (Range must be greater than ATTACK_DISTANCE) - if (info) - healing = true; - else - info = SelectSpell(me->GetVictim(), 0, 0, SELECT_TARGET_ANY_ENEMY, 0, 0, NOMINAL_MELEE_RANGE, 0, SELECT_EFFECT_DONTCARE); - - //Found a spell, check if we arn't on cooldown - if (info && !globalCooldown) - { - //If we are currently moving stop us and set the movement generator - if (me->GetMotionMaster()->GetCurrentMovementGeneratorType() != IDLE_MOTION_TYPE) - { - me->GetMotionMaster()->Clear(false); - me->GetMotionMaster()->MoveIdle(); - } - - //Cast spell - if (healing) - DoCast(me, info->Id); - else - DoCastVictim(info->Id); - - //Set our global cooldown - globalCooldown = GENERIC_CREATURE_COOLDOWN; - } //If no spells available and we arn't moving run to target - else if (me->GetMotionMaster()->GetCurrentMovementGeneratorType() != CHASE_MOTION_TYPE) - { - //Cancel our current spell and then mutate new movement generator - me->InterruptNonMeleeSpells(false); - me->GetMotionMaster()->Clear(false); - me->GetMotionMaster()->MoveChase(me->GetVictim()); - } - } - } - - DoMeleeAttackIfReady(); - } - - void DoReplyToTextEmote(uint32 emote) - { - switch (emote) - { - case TEXT_EMOTE_KISS: - me->HandleEmoteCommand(EMOTE_ONESHOT_BOW); - break; - - case TEXT_EMOTE_WAVE: - me->HandleEmoteCommand(EMOTE_ONESHOT_WAVE); - break; - - case TEXT_EMOTE_SALUTE: - me->HandleEmoteCommand(EMOTE_ONESHOT_SALUTE); - break; - - case TEXT_EMOTE_SHY: - me->HandleEmoteCommand(EMOTE_ONESHOT_FLEX); - break; - - case TEXT_EMOTE_RUDE: - case TEXT_EMOTE_CHICKEN: - me->HandleEmoteCommand(EMOTE_ONESHOT_POINT); - break; - } - } - - void ReceiveEmote(Player* player, uint32 textEmote) override - { - switch (me->GetEntry()) - { - case NPC_STORMWIND_CITY_GUARD: - case NPC_STORMWIND_CITY_PATROLLER: - case NPC_ORGRIMMAR_GRUNT: - break; - default: - return; - } - - if (!me->IsFriendlyTo(player)) - return; - - DoReplyToTextEmote(textEmote); - } - - private: - uint32 globalCooldown; - uint32 buffTimer; - }; - - CreatureAI* GetAI(Creature* creature) const override - { - return new guard_genericAI(creature); - } -}; - -enum GuardShattrath -{ - SPELL_BANISHED_SHATTRATH_A = 36642, - SPELL_BANISHED_SHATTRATH_S = 36671, - SPELL_BANISH_TELEPORT = 36643, - SPELL_EXILE = 39533 -}; - -class guard_shattrath_scryer : public CreatureScript -{ -public: - guard_shattrath_scryer() : CreatureScript("guard_shattrath_scryer") { } - - struct guard_shattrath_scryerAI : public GuardAI - { - guard_shattrath_scryerAI(Creature* creature) : GuardAI(creature) - { - Initialize(); - } - - void Initialize() - { - banishTimer = 5000; - exileTimer = 8500; - playerGUID.Clear(); - canTeleport = false; - } - - void Reset() override - { - Initialize(); - } - - void UpdateAI(uint32 diff) override - { - if (!UpdateVictim()) - return; - - if (canTeleport) - { - if (exileTimer <= diff) - { - if (Unit* temp = ObjectAccessor::GetUnit(*me, playerGUID)) - { - temp->CastSpell(temp, SPELL_EXILE, true); - temp->CastSpell(temp, SPELL_BANISH_TELEPORT, true); - } - playerGUID.Clear(); - exileTimer = 8500; - canTeleport = false; - } else exileTimer -= diff; - } - else if (banishTimer <= diff) - { - Unit* temp = me->GetVictim(); - if (temp && temp->GetTypeId() == TYPEID_PLAYER) - { - DoCast(temp, SPELL_BANISHED_SHATTRATH_A); - banishTimer = 9000; - playerGUID = temp->GetGUID(); - if (playerGUID) - canTeleport = true; - } - } else banishTimer -= diff; - - DoMeleeAttackIfReady(); - } - - private: - uint32 exileTimer; - uint32 banishTimer; - ObjectGuid playerGUID; - bool canTeleport; - }; - - CreatureAI* GetAI(Creature* creature) const override - { - return new guard_shattrath_scryerAI(creature); - } -}; - -class guard_shattrath_aldor : public CreatureScript -{ -public: - guard_shattrath_aldor() : CreatureScript("guard_shattrath_aldor") { } - - struct guard_shattrath_aldorAI : public GuardAI - { - guard_shattrath_aldorAI(Creature* creature) : GuardAI(creature) - { - Initialize(); - } - - void Initialize() - { - banishTimer = 5000; - exileTimer = 8500; - playerGUID.Clear(); - canTeleport = false; - } - - void Reset() override - { - Initialize(); - } - - void UpdateAI(uint32 diff) override - { - if (!UpdateVictim()) - return; - - if (canTeleport) - { - if (exileTimer <= diff) - { - if (Unit* temp = ObjectAccessor::GetUnit(*me, playerGUID)) - { - temp->CastSpell(temp, SPELL_EXILE, true); - temp->CastSpell(temp, SPELL_BANISH_TELEPORT, true); - } - playerGUID.Clear(); - exileTimer = 8500; - canTeleport = false; - } else exileTimer -= diff; - } - else if (banishTimer <= diff) - { - Unit* temp = me->GetVictim(); - if (temp && temp->GetTypeId() == TYPEID_PLAYER) - { - DoCast(temp, SPELL_BANISHED_SHATTRATH_S); - banishTimer = 9000; - playerGUID = temp->GetGUID(); - if (playerGUID) - canTeleport = true; - } - } else banishTimer -= diff; - - DoMeleeAttackIfReady(); - } - private: - uint32 exileTimer; - uint32 banishTimer; - ObjectGuid playerGUID; - bool canTeleport; - }; - - CreatureAI* GetAI(Creature* creature) const override - { - return new guard_shattrath_aldorAI(creature); - } -}; - -void AddSC_guards() -{ - new guard_generic(); - new guard_shattrath_aldor(); - new guard_shattrath_scryer(); -} diff --git a/src/server/scripts/World/npc_guard.cpp b/src/server/scripts/World/npc_guard.cpp new file mode 100644 index 00000000000..53bfddf56b6 --- /dev/null +++ b/src/server/scripts/World/npc_guard.cpp @@ -0,0 +1,246 @@ +/* + * Copyright (C) 2008-2017 TrinityCore <http://www.trinitycore.org/> + * Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "GuardAI.h" +#include "ObjectAccessor.h" +#include "Player.h" +#include "Random.h" +#include "ScriptMgr.h" +#include "SpellInfo.h" + +enum GuardMisc +{ + SAY_GUARD_SIL_AGGRO = 0, + + NPC_CENARION_HOLD_INFANTRY = 15184, + NPC_STORMWIND_CITY_GUARD = 68, + NPC_STORMWIND_CITY_PATROLLER = 1976, + NPC_ORGRIMMAR_GRUNT = 3296, + NPC_ALDOR_VINDICATOR = 18549, + + SPELL_BANISHED_SHATTRATH_A = 36642, + SPELL_BANISHED_SHATTRATH_S = 36671, + SPELL_BANISH_TELEPORT = 36643, + SPELL_EXILE = 39533, +}; + +struct npc_guard_generic : public GuardAI +{ + npc_guard_generic(Creature* creature) : GuardAI(creature) + { + _scheduler.SetValidator([this] + { + return !me->HasUnitState(UNIT_STATE_CASTING) && !me->IsInEvadeMode() && me->IsAlive(); + }); + _combatScheduler.SetValidator([this] + { + return !me->HasUnitState(UNIT_STATE_CASTING); + }); + } + + void Reset() override + { + _scheduler.CancelAll(); + _combatScheduler.CancelAll(); + _scheduler.Schedule(Seconds(1), [this](TaskContext context) + { + // Find a spell that targets friendly and applies an aura (these are generally buffs) + if (SpellInfo const* spellInfo = SelectSpell(me, 0, 0, SELECT_TARGET_ANY_FRIEND, 0, 0, 0, 0, SELECT_EFFECT_AURA)) + DoCast(me, spellInfo->Id); + + context.Repeat(Minutes(10)); + }); + } + + void DoReplyToTextEmote(uint32 emote) + { + switch (emote) + { + case TEXT_EMOTE_KISS: + me->HandleEmoteCommand(EMOTE_ONESHOT_BOW); + break; + case TEXT_EMOTE_WAVE: + me->HandleEmoteCommand(EMOTE_ONESHOT_WAVE); + break; + case TEXT_EMOTE_SALUTE: + me->HandleEmoteCommand(EMOTE_ONESHOT_SALUTE); + break; + case TEXT_EMOTE_SHY: + me->HandleEmoteCommand(EMOTE_ONESHOT_FLEX); + break; + case TEXT_EMOTE_RUDE: + case TEXT_EMOTE_CHICKEN: + me->HandleEmoteCommand(EMOTE_ONESHOT_POINT); + break; + default: + break; + } + } + + void ReceiveEmote(Player* player, uint32 textEmote) override + { + switch (me->GetEntry()) + { + case NPC_STORMWIND_CITY_GUARD: + case NPC_STORMWIND_CITY_PATROLLER: + case NPC_ORGRIMMAR_GRUNT: + break; + default: + return; + } + + if (!me->IsFriendlyTo(player)) + return; + + DoReplyToTextEmote(textEmote); + } + + void EnterCombat(Unit* who) override + { + if (me->GetEntry() == NPC_CENARION_HOLD_INFANTRY) + Talk(SAY_GUARD_SIL_AGGRO, who); + + _combatScheduler.Schedule(Seconds(1), [this](TaskContext meleeContext) + { + Unit* victim = me->GetVictim(); + if (!me->isAttackReady() || !me->IsWithinMeleeRange(victim)) + { + meleeContext.Repeat(); + return; + } + if (roll_chance_i(20)) + { + if (SpellInfo const* spellInfo = SelectSpell(me->GetVictim(), 0, 0, SELECT_TARGET_ANY_ENEMY, 0, 0, 0, NOMINAL_MELEE_RANGE, SELECT_EFFECT_DONTCARE)) + { + me->resetAttackTimer(); + DoCastVictim(spellInfo->Id); + meleeContext.Repeat(); + return; + } + } + if (ShouldSparWith(victim)) + me->FakeAttackerStateUpdate(victim); + else + me->AttackerStateUpdate(victim); + me->resetAttackTimer(); + meleeContext.Repeat(); + }).Schedule(Seconds(5), [this](TaskContext spellContext) + { + bool healing = false; + SpellInfo const* spellInfo = nullptr; + + // Select a healing spell if less than 30% hp and ONLY 33% of the time + if (me->HealthBelowPct(30) && roll_chance_i(33)) + spellInfo = SelectSpell(me, 0, 0, SELECT_TARGET_ANY_FRIEND, 0, 0, 0, 0, SELECT_EFFECT_HEALING); + + // No healing spell available, check if we can cast a ranged spell + if (spellInfo) + healing = true; + else + spellInfo = SelectSpell(me->GetVictim(), 0, 0, SELECT_TARGET_ANY_ENEMY, 0, 0, NOMINAL_MELEE_RANGE, 0, SELECT_EFFECT_DONTCARE); + + // Found a spell + if (spellInfo) + { + if (healing) + DoCast(me, spellInfo->Id); + else + DoCastVictim(spellInfo->Id); + spellContext.Repeat(Seconds(5)); + } + else + spellContext.Repeat(Seconds(1)); + }); + } + + void UpdateAI(uint32 diff) override + { + _scheduler.Update(diff); + + if (!UpdateVictim()) + return; + + _combatScheduler.Update(diff); + } + +private: + TaskScheduler _scheduler; + TaskScheduler _combatScheduler; +}; + +struct npc_guard_shattrath_faction : public GuardAI +{ + npc_guard_shattrath_faction(Creature* creature) : GuardAI(creature) + { + _scheduler.SetValidator([this] + { + return !me->HasUnitState(UNIT_STATE_CASTING); + }); + } + + void Reset() override + { + _scheduler.CancelAll(); + } + + void EnterCombat(Unit* /*who*/) override + { + ScheduleVanish(); + } + + void UpdateAI(uint32 diff) override + { + if (!UpdateVictim()) + return; + + _scheduler.Update(diff, std::bind(&GuardAI::DoMeleeAttackIfReady, this)); + } + + void ScheduleVanish() + { + _scheduler.Schedule(Seconds(5), [this](TaskContext banishContext) + { + Unit* temp = me->GetVictim(); + if (temp && temp->GetTypeId() == TYPEID_PLAYER) + { + DoCast(temp, me->GetEntry() == NPC_ALDOR_VINDICATOR ? SPELL_BANISHED_SHATTRATH_S : SPELL_BANISHED_SHATTRATH_A); + ObjectGuid playerGUID = temp->GetGUID(); + banishContext.Schedule(Seconds(9), [this, playerGUID](TaskContext /*exileContext*/) + { + if (Unit* temp = ObjectAccessor::GetUnit(*me, playerGUID)) + { + temp->CastSpell(temp, SPELL_EXILE, true); + temp->CastSpell(temp, SPELL_BANISH_TELEPORT, true); + } + ScheduleVanish(); + }); + } + else + banishContext.Repeat(); + }); + } + +private: + TaskScheduler _scheduler; +}; + +void AddSC_npc_guard() +{ + RegisterCreatureAI(npc_guard_generic); + RegisterCreatureAI(npc_guard_shattrath_faction); +} diff --git a/src/server/scripts/World/npc_innkeeper.cpp b/src/server/scripts/World/npc_innkeeper.cpp index 7db733c7f78..ad579f2aa8d 100644 --- a/src/server/scripts/World/npc_innkeeper.cpp +++ b/src/server/scripts/World/npc_innkeeper.cpp @@ -42,7 +42,14 @@ enum Spells #define LOCALE_TRICK_OR_TREAT_6 "¡Truco o trato!" #define LOCALE_INNKEEPER_0 "Make this inn my home." +#define LOCALE_INNKEEPER_2 "Faites de cette auberge votre foyer." #define LOCALE_INNKEEPER_3 "Ich möchte dieses Gasthaus zu meinem Heimatort machen." +#define LOCALE_INNKEEPER_6 "Fija tu hogar en esta taberna." + +#define LOCALE_VENDOR_0 "I want to browse your goods." +#define LOCALE_VENDOR_2 "Je voudrais regarder vos articles." +#define LOCALE_VENDOR_3 "Ich sehe mich nur mal um." +#define LOCALE_VENDOR_6 "Quiero ver tus mercancías." class npc_innkeeper : public CreatureScript { @@ -72,14 +79,26 @@ public: player->PrepareQuestMenu(me->GetGUID()); if (me->IsVendor()) - AddGossipItemFor(player, GOSSIP_ICON_VENDOR, GOSSIP_TEXT_BROWSE_GOODS, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_TRADE); + { + char const* localizedEntry; + switch (player->GetSession()->GetSessionDbcLocale()) + { + case LOCALE_frFR: localizedEntry = LOCALE_VENDOR_2; break; + case LOCALE_deDE: localizedEntry = LOCALE_VENDOR_3; break; + case LOCALE_esES: localizedEntry = LOCALE_VENDOR_6; break; + case LOCALE_enUS: default: localizedEntry = LOCALE_VENDOR_0; + } + AddGossipItemFor(player, GOSSIP_ICON_VENDOR, localizedEntry, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_TRADE); + } if (me->IsInnkeeper()) { char const* localizedEntry; switch (player->GetSession()->GetSessionDbcLocale()) { + case LOCALE_frFR: localizedEntry = LOCALE_INNKEEPER_2; break; case LOCALE_deDE: localizedEntry = LOCALE_INNKEEPER_3; break; + case LOCALE_esES: localizedEntry = LOCALE_INNKEEPER_6; break; case LOCALE_enUS: default: localizedEntry = LOCALE_INNKEEPER_0; } AddGossipItemFor(player, GOSSIP_ICON_INTERACT_1, localizedEntry, GOSSIP_SENDER_MAIN, GOSSIP_ACTION_INN); @@ -147,4 +166,3 @@ void AddSC_npc_innkeeper() { new npc_innkeeper(); } - diff --git a/src/server/scripts/World/npcs_special.cpp b/src/server/scripts/World/npcs_special.cpp index 4ca666a55bb..f7b6173ea22 100644 --- a/src/server/scripts/World/npcs_special.cpp +++ b/src/server/scripts/World/npcs_special.cpp @@ -457,8 +457,7 @@ public: enum TorchTossingTarget { - NPC_TORCH_TOSSING_TARGET_BUNNY = 25535, - SPELL_TARGET_INDICATOR = 45723 + SPELL_TORCH_TARGET_PICKER = 45907 }; class npc_torch_tossing_target_bunny_controller : public CreatureScript @@ -468,42 +467,28 @@ public: struct npc_torch_tossing_target_bunny_controllerAI : public ScriptedAI { - npc_torch_tossing_target_bunny_controllerAI(Creature* creature) : ScriptedAI(creature) - { - _targetTimer = 3000; - } + npc_torch_tossing_target_bunny_controllerAI(Creature* creature) : ScriptedAI(creature) { } - ObjectGuid DoSearchForTargets(ObjectGuid lastTargetGUID) + void Reset() override { - std::list<Creature*> targets; - me->GetCreatureListWithEntryInGrid(targets, NPC_TORCH_TOSSING_TARGET_BUNNY, 60.0f); - targets.remove_if([lastTargetGUID](Creature* creature) { return creature->GetGUID() == lastTargetGUID; }); - - if (!targets.empty()) + _scheduler.Schedule(Seconds(2), [this](TaskContext context) { - _lastTargetGUID = Trinity::Containers::SelectRandomContainerElement(targets)->GetGUID(); - - return _lastTargetGUID; - } - return ObjectGuid::Empty; + me->CastCustomSpell(SPELL_TORCH_TARGET_PICKER, SPELLVALUE_MAX_TARGETS, 1); + _scheduler.Schedule(Seconds(3), [this](TaskContext /*context*/) + { + me->CastCustomSpell(SPELL_TORCH_TARGET_PICKER, SPELLVALUE_MAX_TARGETS, 1); + }); + context.Repeat(Seconds(5)); + }); } void UpdateAI(uint32 diff) override { - if (_targetTimer < diff) - { - if (Unit* target = ObjectAccessor::GetUnit(*me, DoSearchForTargets(_lastTargetGUID))) - target->CastSpell(target, SPELL_TARGET_INDICATOR, true); - - _targetTimer = 3000; - } - else - _targetTimer -= diff; + _scheduler.Update(diff); } private: - uint32 _targetTimer; - ObjectGuid _lastTargetGUID; + TaskScheduler _scheduler; }; CreatureAI* GetAI(Creature* creature) const override @@ -2893,7 +2878,7 @@ class CastFoodSpell : public BasicEvent bool Execute(uint64 /*execTime*/, uint32 /*diff*/) override { _owner->CastSpell(_owner, _spellId, true); - return false; + return true; } private: diff --git a/src/server/scripts/World/world_script_loader.cpp b/src/server/scripts/World/world_script_loader.cpp index 086bd8700f2..43ff1b7c9de 100644 --- a/src/server/scripts/World/world_script_loader.cpp +++ b/src/server/scripts/World/world_script_loader.cpp @@ -23,7 +23,7 @@ void AddSC_areatrigger_scripts(); void AddSC_emerald_dragons(); void AddSC_generic_creature(); void AddSC_go_scripts(); -void AddSC_guards(); +void AddSC_npc_guard(); void AddSC_item_scripts(); void AddSC_npc_professions(); void AddSC_npc_innkeeper(); @@ -43,7 +43,7 @@ void AddWorldScripts() AddSC_emerald_dragons(); AddSC_generic_creature(); AddSC_go_scripts(); - AddSC_guards(); + AddSC_npc_guard(); AddSC_item_scripts(); AddSC_npc_professions(); AddSC_npc_innkeeper(); diff --git a/src/server/shared/Dynamic/FactoryHolder.h b/src/server/shared/Dynamic/FactoryHolder.h index 9e67fe11467..09fa2816faf 100644 --- a/src/server/shared/Dynamic/FactoryHolder.h +++ b/src/server/shared/Dynamic/FactoryHolder.h @@ -54,4 +54,3 @@ class Permissible virtual int32 Permit(T const*) const = 0; }; #endif - diff --git a/src/server/shared/Dynamic/LinkedReference/RefManager.h b/src/server/shared/Dynamic/LinkedReference/RefManager.h index ad6bf1d79ad..0dbf1ebf2a6 100644 --- a/src/server/shared/Dynamic/LinkedReference/RefManager.h +++ b/src/server/shared/Dynamic/LinkedReference/RefManager.h @@ -51,4 +51,3 @@ class RefManager : public LinkedListHead //===================================================== #endif - diff --git a/src/server/shared/Dynamic/TypeList.h b/src/server/shared/Dynamic/TypeList.h index a8dcea05bcc..5afdcef5751 100644 --- a/src/server/shared/Dynamic/TypeList.h +++ b/src/server/shared/Dynamic/TypeList.h @@ -42,4 +42,3 @@ struct TypeList #define TYPELIST_5(T1, T2, T3, T4, T5) TypeList<T1, TYPELIST_4(T2, T3, T4, T5) > #define TYPELIST_6(T1, T2, T3, T4, T5, T6) TypeList<T1, TYPELIST_5(T2, T3, T4, T5, T6) > #endif - diff --git a/src/server/shared/Memory.h b/src/server/shared/Memory.h index e41e4baf025..b251efa34b6 100644 --- a/src/server/shared/Memory.h +++ b/src/server/shared/Memory.h @@ -31,4 +31,4 @@ inline void dtCustomFree(void* ptr) delete [] (unsigned char*)ptr; } -#endif
\ No newline at end of file +#endif diff --git a/src/tools/map_extractor/dbcfile.cpp b/src/tools/map_extractor/dbcfile.cpp index 25ded3e6c60..68cf7047fa9 100644 --- a/src/tools/map_extractor/dbcfile.cpp +++ b/src/tools/map_extractor/dbcfile.cpp @@ -98,4 +98,3 @@ DBCFile::Iterator DBCFile::end() assert(data); return Iterator(*this, stringTable); } - diff --git a/src/tools/map_extractor/dbcfile.h b/src/tools/map_extractor/dbcfile.h index 5130469b5f3..5b3aaeeb1d4 100644 --- a/src/tools/map_extractor/dbcfile.h +++ b/src/tools/map_extractor/dbcfile.h @@ -138,4 +138,3 @@ private: }; #endif - diff --git a/src/tools/mmaps_generator/MapBuilder.cpp b/src/tools/mmaps_generator/MapBuilder.cpp index df9c1607bd3..bfaa4211569 100644 --- a/src/tools/mmaps_generator/MapBuilder.cpp +++ b/src/tools/mmaps_generator/MapBuilder.cpp @@ -26,7 +26,7 @@ #include <limits.h> #define MMAP_MAGIC 0x4d4d4150 // 'MMAP' -#define MMAP_VERSION 6 +#define MMAP_VERSION 7 struct MmapTileHeader { @@ -598,8 +598,8 @@ namespace MMAP config.minRegionArea = rcSqr(60); config.mergeRegionArea = rcSqr(50); config.maxSimplificationError = 1.8f; // eliminates most jagged edges (tiny polygons) - config.detailSampleDist = config.cs * 64; - config.detailSampleMaxError = config.ch * 2; + config.detailSampleDist = config.cs * 16; + config.detailSampleMaxError = config.ch * 1; // this sets the dimensions of the heightfield - should maybe happen before border padding rcCalcGridSize(config.bmin, config.bmax, config.cs, &config.width, &config.height); diff --git a/src/tools/mmaps_generator/TerrainBuilder.h b/src/tools/mmaps_generator/TerrainBuilder.h index 4dcca5f15fe..06347f0471a 100644 --- a/src/tools/mmaps_generator/TerrainBuilder.h +++ b/src/tools/mmaps_generator/TerrainBuilder.h @@ -124,4 +124,3 @@ namespace MMAP } #endif - diff --git a/src/tools/vmap4_extractor/vmapexport.cpp b/src/tools/vmap4_extractor/vmapexport.cpp index aeac69b6751..e6340bde553 100644 --- a/src/tools/vmap4_extractor/vmapexport.cpp +++ b/src/tools/vmap4_extractor/vmapexport.cpp @@ -25,7 +25,7 @@ #include "Banner.h" #include <sys/stat.h> -#ifdef WIN32 +#ifdef _WIN32 #include <direct.h> #define mkdir _mkdir #endif @@ -65,7 +65,7 @@ bool preciseVectorData = false; //static const char * szWorkDirMaps = ".\\Maps"; char const* szWorkDirWmo = "./Buildings"; -char const* szRawVMAPMagic = "VMAP043"; +char const* szRawVMAPMagic = "VMAP044"; // Local testing functions diff --git a/src/tools/vmap4_extractor/wmo.cpp b/src/tools/vmap4_extractor/wmo.cpp index 441fe281c04..d98e1f9112d 100644 --- a/src/tools/vmap4_extractor/wmo.cpp +++ b/src/tools/vmap4_extractor/wmo.cpp @@ -349,10 +349,8 @@ int WMOGroup::ConvertToVMAPGroupWmo(FILE *output, WMORoot *rootWMO, bool precise { // Skip no collision triangles bool isRenderFace = (MOPY[2 * i] & WMO_MATERIAL_RENDER) && !(MOPY[2 * i] & WMO_MATERIAL_DETAIL); - bool isDetail = (MOPY[2 * i] & WMO_MATERIAL_DETAIL) != 0; - bool isCollision = (MOPY[2 * i] & WMO_MATERIAL_COLLISION) != 0; - - if (!isRenderFace && !isDetail && !isCollision) + bool isCollision = MOPY[2 * i] & WMO_MATERIAL_COLLISION || isRenderFace; + if (!isCollision) continue; // Use this triangle |
