diff options
author | megamage <none@none> | 2009-02-18 11:36:19 -0600 |
---|---|---|
committer | megamage <none@none> | 2009-02-18 11:36:19 -0600 |
commit | 31637d4aac6ae2081ccb3db976cab17936019f1a (patch) | |
tree | a0e098d4afafaddcb901486d00da1563ecd628b9 /dep/src | |
parent | 017f309b5b5b981b6edfe0ae071971d513828a20 (diff) | |
parent | 7bff1c1d60b78ba78f2ab0a9c67bdde80427ea30 (diff) |
*Merge.
--HG--
branch : trunk
Diffstat (limited to 'dep/src')
117 files changed, 765 insertions, 681 deletions
diff --git a/dep/src/g3dlite/AABox.cpp b/dep/src/g3dlite/AABox.cpp index 0baa1a07419..f2f2b70535c 100644 --- a/dep/src/g3dlite/AABox.cpp +++ b/dep/src/g3dlite/AABox.cpp @@ -297,3 +297,4 @@ bool AABox::intersects(const class Sphere& sphere) const { } // namespace + diff --git a/dep/src/g3dlite/Box.cpp b/dep/src/g3dlite/Box.cpp index b99104ff28b..c30211fd5b9 100644 --- a/dep/src/g3dlite/Box.cpp +++ b/dep/src/g3dlite/Box.cpp @@ -346,3 +346,4 @@ void Box::getBounds(class AABox& aabb) const { } // namespace + diff --git a/dep/src/g3dlite/Crypto.cpp b/dep/src/g3dlite/Crypto.cpp index 763c2fefd4b..bff3a5a966c 100644 --- a/dep/src/g3dlite/Crypto.cpp +++ b/dep/src/g3dlite/Crypto.cpp @@ -144,3 +144,4 @@ uint32 Crypto::crc32(const void* byte, size_t numBytes) { } } // G3D + diff --git a/dep/src/g3dlite/Matrix3.cpp b/dep/src/g3dlite/Matrix3.cpp index c6fc9729b34..630c1883c0b 100644 --- a/dep/src/g3dlite/Matrix3.cpp +++ b/dep/src/g3dlite/Matrix3.cpp @@ -1684,3 +1684,4 @@ std::string Matrix3::toString() const { } // namespace + diff --git a/dep/src/g3dlite/Plane.cpp b/dep/src/g3dlite/Plane.cpp index 5265ef8f670..10ee7ff0f0c 100644 --- a/dep/src/g3dlite/Plane.cpp +++ b/dep/src/g3dlite/Plane.cpp @@ -129,3 +129,4 @@ std::string Plane::toString() const { } } + diff --git a/dep/src/g3dlite/System.cpp b/dep/src/g3dlite/System.cpp index f28c9d62a9a..cb7169a4c4d 100644 --- a/dep/src/g3dlite/System.cpp +++ b/dep/src/g3dlite/System.cpp @@ -664,3 +664,4 @@ void System::alignedFree(void* _ptr) { } // namespace + diff --git a/dep/src/g3dlite/Triangle.cpp b/dep/src/g3dlite/Triangle.cpp index d3fa835c7c8..bb5b8f94635 100644 --- a/dep/src/g3dlite/Triangle.cpp +++ b/dep/src/g3dlite/Triangle.cpp @@ -110,3 +110,4 @@ void Triangle::getBounds(AABox& out) const { } } // G3D + diff --git a/dep/src/g3dlite/Vector3.cpp b/dep/src/g3dlite/Vector3.cpp index 274769a1e0b..09c3003888b 100644 --- a/dep/src/g3dlite/Vector3.cpp +++ b/dep/src/g3dlite/Vector3.cpp @@ -441,3 +441,4 @@ Vector4 Vector3::zzzz() const { return Vector4 (z, z, z, z); } } // namespace + diff --git a/dep/src/g3dlite/Vector4.cpp b/dep/src/g3dlite/Vector4.cpp index 2a597df65ed..91f916d1040 100644 --- a/dep/src/g3dlite/Vector4.cpp +++ b/dep/src/g3dlite/Vector4.cpp @@ -436,3 +436,4 @@ Vector4 Vector4::wwww() const { return Vector4 (w, w, w, w); } }; // namespace + diff --git a/dep/src/g3dlite/format.cpp b/dep/src/g3dlite/format.cpp index 8737128a0f8..13f01e26e6b 100644 --- a/dep/src/g3dlite/format.cpp +++ b/dep/src/g3dlite/format.cpp @@ -169,3 +169,4 @@ std::string vformat(const char* fmt, va_list argPtr) { #endif #undef NEWLINE + diff --git a/dep/src/sockets/Base64.cpp b/dep/src/sockets/Base64.cpp index 95b91555beb..cb66b93af6f 100644 --- a/dep/src/sockets/Base64.cpp +++ b/dep/src/sockets/Base64.cpp @@ -270,3 +270,4 @@ size_t Base64::decode_length(const std::string& str64) #endif + diff --git a/dep/src/sockets/Exception.cpp b/dep/src/sockets/Exception.cpp index b3844982c0f..33a7b4a133c 100644 --- a/dep/src/sockets/Exception.cpp +++ b/dep/src/sockets/Exception.cpp @@ -46,3 +46,4 @@ const std::string Exception::ToString() const } // namespace SOCKETS_NAMESPACE { #endif + diff --git a/dep/src/sockets/Ipv4Address.cpp b/dep/src/sockets/Ipv4Address.cpp index 88a7d33fb63..b89cc2a449b 100644 --- a/dep/src/sockets/Ipv4Address.cpp +++ b/dep/src/sockets/Ipv4Address.cpp @@ -212,3 +212,4 @@ std::string Ipv4Address::Reverse() } // namespace SOCKETS_NAMESPACE { #endif + diff --git a/dep/src/sockets/Ipv6Address.cpp b/dep/src/sockets/Ipv6Address.cpp index c6f1443af2e..6c92de24871 100644 --- a/dep/src/sockets/Ipv6Address.cpp +++ b/dep/src/sockets/Ipv6Address.cpp @@ -268,3 +268,4 @@ std::string Ipv6Address::Reverse() #endif // IPPROTO_IPV6 #endif // ENABLE_IPV6 + diff --git a/dep/src/sockets/Lock.cpp b/dep/src/sockets/Lock.cpp index 3f7ad87b2ba..597ca30afc6 100644 --- a/dep/src/sockets/Lock.cpp +++ b/dep/src/sockets/Lock.cpp @@ -53,3 +53,4 @@ Lock::~Lock() } #endif + diff --git a/dep/src/sockets/Mutex.cpp b/dep/src/sockets/Mutex.cpp index 8284fefacd8..3bfe64d04c7 100644 --- a/dep/src/sockets/Mutex.cpp +++ b/dep/src/sockets/Mutex.cpp @@ -79,3 +79,4 @@ void Mutex::Unlock() } #endif + diff --git a/dep/src/sockets/Parse.cpp b/dep/src/sockets/Parse.cpp index 4d4dd99cd98..ccbd553b0e6 100644 --- a/dep/src/sockets/Parse.cpp +++ b/dep/src/sockets/Parse.cpp @@ -319,3 +319,4 @@ void Parse::getline(std::string&s) #endif + diff --git a/dep/src/sockets/ResolvServer.cpp b/dep/src/sockets/ResolvServer.cpp index 3923df33079..492d6705080 100644 --- a/dep/src/sockets/ResolvServer.cpp +++ b/dep/src/sockets/ResolvServer.cpp @@ -95,3 +95,4 @@ bool ResolvServer::Ready() #endif // ENABLE_RESOLVER + diff --git a/dep/src/sockets/ResolvSocket.cpp b/dep/src/sockets/ResolvSocket.cpp index 6096dc89e37..236e93f9704 100644 --- a/dep/src/sockets/ResolvSocket.cpp +++ b/dep/src/sockets/ResolvSocket.cpp @@ -434,3 +434,4 @@ DEB(fprintf(stderr, " *** Update cache for [%s][%s] = '%s'\n", m_query.c_str(), #endif // ENABLE_RESOLVER + diff --git a/dep/src/sockets/Socket.cpp b/dep/src/sockets/Socket.cpp index 5e739ec0c71..bf1a73abf6e 100644 --- a/dep/src/sockets/Socket.cpp +++ b/dep/src/sockets/Socket.cpp @@ -1896,3 +1896,4 @@ std::string Socket::GetSockAddress6() } #endif + diff --git a/dep/src/sockets/SocketHandler.cpp b/dep/src/sockets/SocketHandler.cpp index 8a7307f4192..7ade1e99eb6 100644 --- a/dep/src/sockets/SocketHandler.cpp +++ b/dep/src/sockets/SocketHandler.cpp @@ -1420,3 +1420,4 @@ void SocketHandler::Trigger(int id, Socket::TriggerData& data, bool erase) } #endif + diff --git a/dep/src/sockets/StdoutLog.cpp b/dep/src/sockets/StdoutLog.cpp index 93ae535e6d3..054b8489bb3 100644 --- a/dep/src/sockets/StdoutLog.cpp +++ b/dep/src/sockets/StdoutLog.cpp @@ -94,3 +94,4 @@ void StdoutLog::error(ISocketHandler *,Socket *sock,const std::string& call,int #endif + diff --git a/dep/src/sockets/StreamSocket.cpp b/dep/src/sockets/StreamSocket.cpp index 4412d498536..c8a3c32e496 100644 --- a/dep/src/sockets/StreamSocket.cpp +++ b/dep/src/sockets/StreamSocket.cpp @@ -167,3 +167,4 @@ int StreamSocket::GetShutdown() } // namespace SOCKETS_NAMESPACE { #endif + diff --git a/dep/src/sockets/TcpSocket.cpp b/dep/src/sockets/TcpSocket.cpp index 1dc54dd3fac..9dab541eebe 100644 --- a/dep/src/sockets/TcpSocket.cpp +++ b/dep/src/sockets/TcpSocket.cpp @@ -1743,3 +1743,4 @@ void TcpSocket::OnTransferLimit() } #endif + diff --git a/dep/src/sockets/Thread.cpp b/dep/src/sockets/Thread.cpp index e1834ab4a87..a387a7e3824 100644 --- a/dep/src/sockets/Thread.cpp +++ b/dep/src/sockets/Thread.cpp @@ -164,3 +164,4 @@ bool Thread::IsDestructor() #endif + diff --git a/dep/src/sockets/UdpSocket.cpp b/dep/src/sockets/UdpSocket.cpp index 229a26414e5..f4c3d2f9657 100644 --- a/dep/src/sockets/UdpSocket.cpp +++ b/dep/src/sockets/UdpSocket.cpp @@ -850,3 +850,4 @@ void UdpSocket::SetTimestamp(bool x) #endif + diff --git a/dep/src/sockets/Utility.cpp b/dep/src/sockets/Utility.cpp index e552da3b6e7..6473f0a2564 100644 --- a/dep/src/sockets/Utility.cpp +++ b/dep/src/sockets/Utility.cpp @@ -997,3 +997,4 @@ unsigned long Utility::Rng::Get() } #endif + diff --git a/dep/src/sockets/socket_include.cpp b/dep/src/sockets/socket_include.cpp index 6f08e8e16ab..133ff745d91 100644 --- a/dep/src/sockets/socket_include.cpp +++ b/dep/src/sockets/socket_include.cpp @@ -88,3 +88,4 @@ static char tmp[100]; + diff --git a/dep/src/zlib/crc32.h b/dep/src/zlib/crc32.h index 8053b6117c0..dfd1deaa522 100644 --- a/dep/src/zlib/crc32.h +++ b/dep/src/zlib/crc32.h @@ -439,3 +439,4 @@ local const unsigned long FAR crc_table[TBLS][256] = #endif } }; + diff --git a/dep/src/zlib/deflate.h b/dep/src/zlib/deflate.h index 05a5ab3a2c1..d56e97db4cb 100644 --- a/dep/src/zlib/deflate.h +++ b/dep/src/zlib/deflate.h @@ -329,3 +329,4 @@ void _tr_stored_block OF((deflate_state *s, charf *buf, ulg stored_len, #endif #endif /* DEFLATE_H */ + diff --git a/dep/src/zlib/inffast.h b/dep/src/zlib/inffast.h index 1e88d2d97b5..c66fae6da2c 100644 --- a/dep/src/zlib/inffast.h +++ b/dep/src/zlib/inffast.h @@ -9,3 +9,4 @@ */ void inflate_fast OF((z_streamp strm, unsigned start)); + diff --git a/dep/src/zlib/inffixed.h b/dep/src/zlib/inffixed.h index 75ed4b5978d..3757019557f 100644 --- a/dep/src/zlib/inffixed.h +++ b/dep/src/zlib/inffixed.h @@ -92,3 +92,4 @@ {16,5,4},{24,5,769},{20,5,49},{28,5,12289},{18,5,13},{26,5,3073}, {22,5,193},{64,5,0} }; + diff --git a/dep/src/zlib/inflate.h b/dep/src/zlib/inflate.h index 07bd3e78a7c..7c05e08fcba 100644 --- a/dep/src/zlib/inflate.h +++ b/dep/src/zlib/inflate.h @@ -113,3 +113,4 @@ struct inflate_state { unsigned short work[288]; /* work area for code table building */ code codes[ENOUGH]; /* space for code tables */ }; + diff --git a/dep/src/zlib/inftrees.h b/dep/src/zlib/inftrees.h index b1104c87e76..977ee2f78f5 100644 --- a/dep/src/zlib/inftrees.h +++ b/dep/src/zlib/inftrees.h @@ -53,3 +53,4 @@ typedef enum { extern int inflate_table OF((codetype type, unsigned short FAR *lens, unsigned codes, code FAR * FAR *table, unsigned FAR *bits, unsigned short FAR *work)); + diff --git a/dep/src/zlib/trees.h b/dep/src/zlib/trees.h index 72facf900f7..ba8cd78357b 100644 --- a/dep/src/zlib/trees.h +++ b/dep/src/zlib/trees.h @@ -126,3 +126,4 @@ local const int base_dist[D_CODES] = { 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384, 24576 }; + diff --git a/dep/src/zlib/zconf.h b/dep/src/zlib/zconf.h index 03a9431c8be..719855a60d4 100644 --- a/dep/src/zlib/zconf.h +++ b/dep/src/zlib/zconf.h @@ -330,3 +330,4 @@ typedef uLong FAR uLongf; #endif #endif /* ZCONF_H */ + diff --git a/dep/src/zlib/zlib.h b/dep/src/zlib/zlib.h index 022817927ce..2ad74617098 100644 --- a/dep/src/zlib/zlib.h +++ b/dep/src/zlib/zlib.h @@ -1355,3 +1355,4 @@ ZEXTERN const uLongf * ZEXPORT get_crc_table OF((void)); #endif #endif /* ZLIB_H */ + diff --git a/dep/src/zlib/zutil.h b/dep/src/zlib/zutil.h index b7d5eff81b6..b8a722d904d 100644 --- a/dep/src/zlib/zutil.h +++ b/dep/src/zlib/zutil.h @@ -267,3 +267,4 @@ void zcfree OF((voidpf opaque, voidpf ptr)); #define TRY_FREE(s, p) {if (p) ZFREE(s, p);} #endif /* ZUTIL_H */ + diff --git a/dep/src/zthread/ConcurrentExecutor.cxx b/dep/src/zthread/ConcurrentExecutor.cxx index a65e9c5e909..c5076d135ce 100644 --- a/dep/src/zthread/ConcurrentExecutor.cxx +++ b/dep/src/zthread/ConcurrentExecutor.cxx @@ -24,7 +24,7 @@ namespace ZThread { - ConcurrentExecutor::ConcurrentExecutor() + ConcurrentExecutor::ConcurrentExecutor() : _executor(1) {} void ConcurrentExecutor::interrupt() { @@ -34,21 +34,21 @@ namespace ZThread { void ConcurrentExecutor::execute(const Task& task) { _executor.execute(task); } - + void ConcurrentExecutor::cancel() { _executor.cancel(); } - + bool ConcurrentExecutor::isCanceled() { - return _executor.isCanceled(); + return _executor.isCanceled(); } - + void ConcurrentExecutor::wait() { _executor.wait(); } - + bool ConcurrentExecutor::wait(unsigned long timeout) { - return _executor.wait(timeout); + return _executor.wait(timeout); } } diff --git a/dep/src/zthread/Condition.cxx b/dep/src/zthread/Condition.cxx index 39485fb5ca4..f1ea13f35e8 100644 --- a/dep/src/zthread/Condition.cxx +++ b/dep/src/zthread/Condition.cxx @@ -32,14 +32,14 @@ namespace ZThread { }; Condition::Condition(Lockable& lock) { - + _impl = new FifoConditionImpl(lock); } Condition::~Condition() { - + if(_impl != 0) delete _impl; @@ -64,7 +64,7 @@ namespace ZThread { void Condition::signal() { - + _impl->signal(); } diff --git a/dep/src/zthread/ConditionImpl.h b/dep/src/zthread/ConditionImpl.h index ad1503ef197..923bf2bc39d 100644 --- a/dep/src/zthread/ConditionImpl.h +++ b/dep/src/zthread/ConditionImpl.h @@ -375,3 +375,4 @@ bool ConditionImpl<List>::wait(unsigned long timeout) { } // namespace ZThread #endif // __ZTCONDITIONIMPL_H__ + diff --git a/dep/src/zthread/CountingSemaphore.cxx b/dep/src/zthread/CountingSemaphore.cxx index 43e8b8cfba7..1aa0ff79c77 100644 --- a/dep/src/zthread/CountingSemaphore.cxx +++ b/dep/src/zthread/CountingSemaphore.cxx @@ -29,9 +29,9 @@ namespace ZThread { CountingSemaphore::CountingSemaphore(int initialCount) { - + _impl = new FifoSemaphoreImpl(initialCount, 0 , false); - + } diff --git a/dep/src/zthread/Debug.h b/dep/src/zthread/Debug.h index 1762de22fe3..10cc741834f 100644 --- a/dep/src/zthread/Debug.h +++ b/dep/src/zthread/Debug.h @@ -30,3 +30,4 @@ #endif #endif + diff --git a/dep/src/zthread/DeferredInterruptionScope.h b/dep/src/zthread/DeferredInterruptionScope.h index 5205c80ca58..58050da6d8c 100644 --- a/dep/src/zthread/DeferredInterruptionScope.h +++ b/dep/src/zthread/DeferredInterruptionScope.h @@ -61,3 +61,4 @@ class DeferredInterruptionScope { } #endif // __ZTDEFERREDINTERRUPTIONSCOPE_H__ + diff --git a/dep/src/zthread/FastLock.h b/dep/src/zthread/FastLock.h index 4d7f34a086d..8a955bf7b7d 100644 --- a/dep/src/zthread/FastLock.h +++ b/dep/src/zthread/FastLock.h @@ -69,3 +69,4 @@ #endif #endif // __ZTFASTLOCKSELECT_H__ + diff --git a/dep/src/zthread/FastMutex.cxx b/dep/src/zthread/FastMutex.cxx index 464dd83e5e0..fce54a127a2 100644 --- a/dep/src/zthread/FastMutex.cxx +++ b/dep/src/zthread/FastMutex.cxx @@ -28,7 +28,7 @@ namespace ZThread { FastMutex::FastMutex() : _lock(new FastLock) { } FastMutex::~FastMutex() { - delete _lock; + delete _lock; } @@ -39,7 +39,7 @@ namespace ZThread { } bool FastMutex::tryAcquire(unsigned long timeout) { - + return _lock->tryAcquire(timeout); } diff --git a/dep/src/zthread/FastRecursiveLock.h b/dep/src/zthread/FastRecursiveLock.h index 96b66ebe675..b26f5f231d5 100644 --- a/dep/src/zthread/FastRecursiveLock.h +++ b/dep/src/zthread/FastRecursiveLock.h @@ -72,3 +72,4 @@ #endif #endif // __ZTFASTRECURSIVELOCKSELECT_H__ + diff --git a/dep/src/zthread/FastRecursiveMutex.cxx b/dep/src/zthread/FastRecursiveMutex.cxx index 5ca677a654d..f84e207192f 100644 --- a/dep/src/zthread/FastRecursiveMutex.cxx +++ b/dep/src/zthread/FastRecursiveMutex.cxx @@ -25,10 +25,10 @@ namespace ZThread { - FastRecursiveMutex::FastRecursiveMutex() + FastRecursiveMutex::FastRecursiveMutex() : _lock(new FastRecursiveLock) { } - FastRecursiveMutex::~FastRecursiveMutex() + FastRecursiveMutex::~FastRecursiveMutex() { delete _lock; } @@ -39,7 +39,7 @@ namespace ZThread { } bool FastRecursiveMutex::tryAcquire(unsigned long timeout) { - + return _lock->tryAcquire(timeout); } diff --git a/dep/src/zthread/IntrusivePtr.h b/dep/src/zthread/IntrusivePtr.h index 189ebaf8ad7..818f41ea698 100644 --- a/dep/src/zthread/IntrusivePtr.h +++ b/dep/src/zthread/IntrusivePtr.h @@ -97,3 +97,4 @@ public: }; #endif + diff --git a/dep/src/zthread/Monitor.cxx b/dep/src/zthread/Monitor.cxx index 9a578e796ed..2afb6162a22 100644 --- a/dep/src/zthread/Monitor.cxx +++ b/dep/src/zthread/Monitor.cxx @@ -26,10 +26,10 @@ #include "Monitor.h" // This file will select an implementation for a Monitor based on -// what Monitor.h selects. This method is for selecting the +// what Monitor.h selects. This method is for selecting the // source files, to improve portability. Currently, the project is // based on the autoconf tool-set, which doesn't support conditional -// compilation well. Additionally, this should make the library +// compilation well. Additionally, this should make the library // easier to port since its working around conditional compilation // by using C++ features and people won't have to fiddle around with // their make tool as much to compile the source diff --git a/dep/src/zthread/Monitor.h b/dep/src/zthread/Monitor.h index 1170982292e..d6d4aafc7d0 100644 --- a/dep/src/zthread/Monitor.h +++ b/dep/src/zthread/Monitor.h @@ -58,3 +58,4 @@ #endif #endif // __ZTMONITORSELECT_H__ + diff --git a/dep/src/zthread/Mutex.cxx b/dep/src/zthread/Mutex.cxx index eca38ba89c6..a98897d090d 100644 --- a/dep/src/zthread/Mutex.cxx +++ b/dep/src/zthread/Mutex.cxx @@ -64,5 +64,5 @@ namespace ZThread { -} // namespace ZThread +} // namespace ZThread diff --git a/dep/src/zthread/MutexImpl.h b/dep/src/zthread/MutexImpl.h index 21338ba289c..83b32d47a98 100644 --- a/dep/src/zthread/MutexImpl.h +++ b/dep/src/zthread/MutexImpl.h @@ -375,3 +375,4 @@ void MutexImpl<List, Behavior>::release() { + diff --git a/dep/src/zthread/PoolExecutor.cxx b/dep/src/zthread/PoolExecutor.cxx index cf84e145453..82ace996035 100644 --- a/dep/src/zthread/PoolExecutor.cxx +++ b/dep/src/zthread/PoolExecutor.cxx @@ -42,7 +42,7 @@ namespace ZThread { class WaiterQueue { typedef std::deque<ThreadImpl*> ThreadList; - + typedef struct group_t { size_t id; size_t count; @@ -68,14 +68,14 @@ namespace ZThread { void operator()(const Group& grp) { count += grp.count; } operator size_t() { return count; } }; - + FastMutex _lock; GroupList _list; size_t _id; size_t _generation; public: - + WaiterQueue() : _id(0), _generation(0) { // At least one empty-group exists _list.push_back( Group(_id++) ); @@ -89,7 +89,7 @@ namespace ZThread { */ bool wait(unsigned long timeout) { - ThreadImpl* current = ThreadImpl::current(); + ThreadImpl* current = ThreadImpl::current(); Monitor& m = current->getMonitor(); Monitor::STATE state; @@ -103,15 +103,15 @@ namespace ZThread { if((size_t)std::for_each(_list.begin(), _list.end(), counter()) < 1) return true; - // Update the waiter list for the active group + // Update the waiter list for the active group _list.back().waiters.push_back(current); size_t n = _list.back().id; m.acquire(); - + { - Guard<FastMutex, UnlockedScope> g2(g1); + Guard<FastMutex, UnlockedScope> g2(g1); state = timeout == 0 ? m.wait() : m.wait(timeout); } @@ -142,22 +142,22 @@ namespace ZThread { throw Interrupted_Exception(); default: throw Synchronization_Exception(); - } - + } + return true; } - + /** * Increment the active group count * * @pre at least 1 empty group exists - * @post at least 1 non-empty group exists + * @post at least 1 non-empty group exists */ std::pair<size_t, size_t> increment() { - + Guard<FastMutex> g(_lock); - + // At least one empty-group exists assert(!_list.empty()); @@ -176,7 +176,7 @@ namespace ZThread { // When the active group is being incremented, insert a new active group // to replace it if there were waiting threads - if(i == --_list.end() && !i->waiters.empty()) + if(i == --_list.end() && !i->waiters.empty()) _list.push_back(Group(_id++)); // At least 1 non-empty group exists @@ -185,27 +185,27 @@ namespace ZThread { return std::make_pair(n, _generation); } - + /** * Decrease the count for the group with the given id. * * @param n group id - * + * * @pre At least 1 non-empty group exists * @post At least 1 empty group exists */ void decrement(size_t n) { - + Guard<FastMutex> g1(_lock); - + // At least 1 non-empty group exists assert((size_t)std::for_each(_list.begin(), _list.end(), counter()) > 0); // Find the requested group GroupList::iterator i = std::find_if(_list.begin(), _list.end(), by_id(n)); if(i == _list.end()) { - + // A group should never have been removed until // the final task in that group completed assert(0); @@ -214,11 +214,11 @@ namespace ZThread { // Decrease the count for tasks in this group, if(--i->count == 0 && i == _list.begin()) { - - do { + + do { // When the first group completes, wake all waiters for every - // group, starting from the first until a group that is not + // group, starting from the first until a group that is not // complete is reached /* @@ -226,29 +226,29 @@ namespace ZThread { if(i == --_list.end() && i->waiters.empty()) break; */ - + if( awaken(*i) ) { - - // If all waiters were awakened, remove the group + + // If all waiters were awakened, remove the group i = _list.erase(i); - + } else { - + { // Otherwise, unlock and yield allowing the waiter // lists to be updated if other threads are busy Guard<FastLock, UnlockedScope> g2(g1); ThreadImpl::yield(); - + } i = _list.begin(); } - - } while(i != _list.end() && i->count == 0); - + + } while(i != _list.end() && i->count == 0); + // Ensure that an active group exists if(_list.empty()) _list.push_back( Group(++_id) ); @@ -257,7 +257,7 @@ namespace ZThread { // At least one group exists assert(!_list.empty()); - + } /** @@ -268,40 +268,40 @@ namespace ZThread { return next ? _generation++ : _generation; } - + private: - + /** * Awaken all the waiters remaining in the given group - * + * * @return * - true if all the waiting threads were successfully awakened. * - false if there were one or more threads that could not be awakened. */ bool awaken(Group& grp) { - // Go through the waiter list in the given group; + // Go through the waiter list in the given group; for(ThreadList::iterator i = grp.waiters.begin(); i != grp.waiters.end();) { - + ThreadImpl* impl = *i; Monitor& m = impl->getMonitor(); - + // Try the monitor lock, if it cant be locked skip to the next waiter if(m.tryAcquire()) { - + // Notify the monitor & remove from the waiter list so time isn't // wasted checking it again. i = grp.waiters.erase(i); - + // Try to wake the waiter, it doesn't matter if this is successful - // or not (only fails when the monitor is already going to stop waiting). + // or not (only fails when the monitor is already going to stop waiting). m.notify(); m.release(); - + } else ++i; - + } - + return grp.waiters.empty(); } @@ -310,13 +310,13 @@ namespace ZThread { /** * @class GroupedRunnable - * - * Wrap a task with group and generation information. + * + * Wrap a task with group and generation information. * * - 'group' allows tasks to be grouped together so that lists of waiting * threads can be managed. * - * - 'generation' allows tasks to be interrupted + * - 'generation' allows tasks to be interrupted */ class GroupedRunnable : public Runnable { @@ -329,10 +329,10 @@ namespace ZThread { public: GroupedRunnable(const Task& task, WaiterQueue& queue) - : _task(task), _queue(queue) { - + : _task(task), _queue(queue) { + std::pair<size_t, size_t> pr( _queue.increment() ); - + _group = pr.first; _generation = pr.second; @@ -368,10 +368,10 @@ namespace ZThread { * */ class ExecutorImpl { - + typedef MonitoredQueue<ExecutorTask, FastMutex> TaskQueue; typedef std::deque<ThreadImpl*> ThreadList; - + TaskQueue _taskQueue; WaiterQueue _waitingQueue; @@ -380,19 +380,19 @@ namespace ZThread { public: - + ExecutorImpl() : _size(0) {} void registerThread() { - + Guard<TaskQueue> g(_taskQueue); ThreadImpl* impl = ThreadImpl::current(); _threads.push_back(impl); // current cancel if too many threads are being created - if(_threads.size() > _size) + if(_threads.size() > _size) impl->cancel(); } @@ -408,14 +408,14 @@ namespace ZThread { // Wrap the task with a grouped task GroupedRunnable* runnable = new GroupedRunnable(task, _waitingQueue); - + try { - + _taskQueue.add( ExecutorTask(runnable) ); } catch(...) { - // Incase the queue is canceled between the time the WaiterQueue is + // Incase the queue is canceled between the time the WaiterQueue is // updated and the task is added to the TaskQueue _waitingQueue.decrement( runnable->group() ); throw; @@ -430,41 +430,41 @@ namespace ZThread { _waitingQueue.generation(true); Guard<TaskQueue> g(_taskQueue); - + // Interrupt all threads currently running, thier tasks would be // from an older generation for(ThreadList::iterator i = _threads.begin(); i != _threads.end(); ++i) (*i)->interrupt(); - + } //! Adjust the number of desired workers and return the number of Threads needed size_t workers(size_t n) { - + Guard<TaskQueue> g(_taskQueue); size_t m = (_size < n) ? (n - _size) : 0; _size = n; - + return m; } - + size_t workers() { - + Guard<TaskQueue> g(_taskQueue); return _size; - + } - + ExecutorTask next() { - + ExecutorTask task; - + // Draw the task from the queue for(;;) { - try { + try { task = _taskQueue.next(); break; @@ -473,13 +473,13 @@ namespace ZThread { // Ignore interruption here, it can only come from // another thread interrupt()ing the executor. The - // thread was interrupted in the hopes it was busy + // thread was interrupted in the hopes it was busy // with a task } } - + // Interrupt the thread running the tasks when the generation // does not match the current generation if( task->generation() != _waitingQueue.generation() ) @@ -512,31 +512,31 @@ namespace ZThread { class Worker : public Runnable { CountedPtr< ExecutorImpl > _impl; - + public: - + //! Create a Worker that draws upon the given Queue - Worker(const CountedPtr< ExecutorImpl >& impl) + Worker(const CountedPtr< ExecutorImpl >& impl) : _impl(impl) { } - + //! Run until Thread or Queue are canceled - void run() { - + void run() { + _impl->registerThread(); - + // Run until the Queue is canceled while(!Thread::canceled()) { - + // Draw tasks from the queue ExecutorTask task( _impl->next() ); task->run(); - - } - + + } + _impl->unregisterThread(); - + } - + }; /* Worker */ @@ -547,10 +547,10 @@ namespace ZThread { public: - Shutdown(const CountedPtr< ExecutorImpl >& impl) + Shutdown(const CountedPtr< ExecutorImpl >& impl) : _impl(impl) { } - - void run() { + + void run() { _impl->cancel(); } @@ -560,35 +560,35 @@ namespace ZThread { PoolExecutor::PoolExecutor(size_t n) : _impl( new ExecutorImpl() ), _shutdown( new Shutdown(_impl) ) { - + size(n); - + // Request cancelation when main() exits ThreadQueue::instance()->insertShutdownTask(_shutdown); } - PoolExecutor::~PoolExecutor() { + PoolExecutor::~PoolExecutor() { try { - + /** * If the shutdown task for this executor has not already been * selected to run, then run it locally */ - if(ThreadQueue::instance()->removeShutdownTask(_shutdown)) + if(ThreadQueue::instance()->removeShutdownTask(_shutdown)) _shutdown->run(); - + } catch(...) { } - } + } void PoolExecutor::interrupt() { _impl->interrupt(); } void PoolExecutor::size(size_t n) { - + if(n < 1) throw InvalidOp_Exception(); @@ -604,26 +604,26 @@ namespace ZThread { void PoolExecutor::execute(const Task& task) { - // Enqueue the task, the Queue will reject it with a + // Enqueue the task, the Queue will reject it with a // Cancelation_Exception if the Executor has been canceled - _impl->execute(task); + _impl->execute(task); } void PoolExecutor::cancel() { - _impl->cancel(); + _impl->cancel(); } bool PoolExecutor::isCanceled() { - return _impl->isCanceled(); + return _impl->isCanceled(); } - - void PoolExecutor::wait() { + + void PoolExecutor::wait() { _impl->wait(0); } bool PoolExecutor::wait(unsigned long timeout) { - return _impl->wait(timeout == 0 ? 1 : timeout); + return _impl->wait(timeout == 0 ? 1 : timeout); } } diff --git a/dep/src/zthread/PriorityCondition.cxx b/dep/src/zthread/PriorityCondition.cxx index c43953ff73b..70258563ee4 100644 --- a/dep/src/zthread/PriorityCondition.cxx +++ b/dep/src/zthread/PriorityCondition.cxx @@ -32,14 +32,14 @@ namespace ZThread { }; PriorityCondition::PriorityCondition(Lockable& lock) { - + _impl = new PriorityConditionImpl(lock); } PriorityCondition::~PriorityCondition() { - + if(_impl != 0) delete _impl; diff --git a/dep/src/zthread/PriorityInheritanceMutex.cxx b/dep/src/zthread/PriorityInheritanceMutex.cxx index 108e4a74370..6735d15d422 100644 --- a/dep/src/zthread/PriorityInheritanceMutex.cxx +++ b/dep/src/zthread/PriorityInheritanceMutex.cxx @@ -28,28 +28,28 @@ namespace ZThread { class InheritPriorityBehavior : public NullBehavior { - + ThreadImpl* owner; Priority p; protected: - // Temporarily raise the effective priority of the owner - inline void waiterArrived(ThreadImpl* impl) { - + // Temporarily raise the effective priority of the owner + inline void waiterArrived(ThreadImpl* impl) { + Priority q = impl->getPriority(); if((int)q > (int)p) { ThreadOps::setPriority(impl, p); p = q; - + } } // Note the owners priority - inline void ownerAcquired(ThreadImpl* impl) { + inline void ownerAcquired(ThreadImpl* impl) { p = impl->getPriority(); owner = impl; @@ -57,7 +57,7 @@ namespace ZThread { } // Restore its original priority - inline void ownerReleased(ThreadImpl* impl) { + inline void ownerReleased(ThreadImpl* impl) { if(p > owner->getPriority()) ThreadOps::setPriority(impl, impl->getPriority()); @@ -66,18 +66,18 @@ namespace ZThread { }; - class PriorityInheritanceMutexImpl : + class PriorityInheritanceMutexImpl : public MutexImpl<priority_list, InheritPriorityBehavior> { }; PriorityInheritanceMutex::PriorityInheritanceMutex() { - + _impl = new PriorityInheritanceMutexImpl(); - + } PriorityInheritanceMutex::~PriorityInheritanceMutex() { - if(_impl != 0) + if(_impl != 0) delete _impl; } @@ -85,7 +85,7 @@ namespace ZThread { // P void PriorityInheritanceMutex::acquire() { - _impl->acquire(); + _impl->acquire(); } @@ -93,14 +93,14 @@ namespace ZThread { // P bool PriorityInheritanceMutex::tryAcquire(unsigned long ms) { - return _impl->tryAcquire(ms); + return _impl->tryAcquire(ms); } // V void PriorityInheritanceMutex::release() { - _impl->release(); + _impl->release(); } diff --git a/dep/src/zthread/PriorityMutex.cxx b/dep/src/zthread/PriorityMutex.cxx index c25eaebc46c..1823c82f40d 100644 --- a/dep/src/zthread/PriorityMutex.cxx +++ b/dep/src/zthread/PriorityMutex.cxx @@ -29,15 +29,15 @@ namespace ZThread { class PriorityMutexImpl : public MutexImpl<priority_list, NullBehavior> { }; - PriorityMutex::PriorityMutex() { - + PriorityMutex::PriorityMutex() { + _impl = new PriorityMutexImpl(); - + } PriorityMutex::~PriorityMutex() { - if(_impl != 0) + if(_impl != 0) delete _impl; } @@ -45,7 +45,7 @@ namespace ZThread { // P void PriorityMutex::acquire() { - _impl->acquire(); + _impl->acquire(); } @@ -53,14 +53,14 @@ namespace ZThread { // P bool PriorityMutex::tryAcquire(unsigned long ms) { - return _impl->tryAcquire(ms); + return _impl->tryAcquire(ms); } // V void PriorityMutex::release() { - _impl->release(); + _impl->release(); } diff --git a/dep/src/zthread/PrioritySemaphore.cxx b/dep/src/zthread/PrioritySemaphore.cxx index 15138b5f426..cdefe3a4aed 100644 --- a/dep/src/zthread/PrioritySemaphore.cxx +++ b/dep/src/zthread/PrioritySemaphore.cxx @@ -26,10 +26,10 @@ namespace ZThread { - class PrioritySemaphoreImpl : public SemaphoreImpl<priority_list> { + class PrioritySemaphoreImpl : public SemaphoreImpl<priority_list> { public: - PrioritySemaphoreImpl(int count, unsigned int maxCount) + PrioritySemaphoreImpl(int count, unsigned int maxCount) : SemaphoreImpl<priority_list>(count, maxCount, true) { } }; @@ -41,9 +41,9 @@ namespace ZThread { * @param maxCount maximum size of the semaphore count */ PrioritySemaphore::PrioritySemaphore(int count, unsigned int maxCount) { - + _impl = new PrioritySemaphoreImpl(count, maxCount); - + } PrioritySemaphore::~PrioritySemaphore() { diff --git a/dep/src/zthread/RecursiveMutex.cxx b/dep/src/zthread/RecursiveMutex.cxx index 57994f55b81..ad029488725 100644 --- a/dep/src/zthread/RecursiveMutex.cxx +++ b/dep/src/zthread/RecursiveMutex.cxx @@ -26,14 +26,14 @@ namespace ZThread { RecursiveMutex::RecursiveMutex() { - + _impl = new RecursiveMutexImpl(); - + } RecursiveMutex::~RecursiveMutex() { - if(_impl != (RecursiveMutexImpl*)0 ) + if(_impl != (RecursiveMutexImpl*)0 ) delete _impl; } @@ -41,20 +41,20 @@ namespace ZThread { void RecursiveMutex::acquire() { - _impl->acquire(); + _impl->acquire(); } bool RecursiveMutex::tryAcquire(unsigned long ms) { - return _impl->tryAcquire(ms); + return _impl->tryAcquire(ms); } void RecursiveMutex::release() { - _impl->release(); + _impl->release(); } diff --git a/dep/src/zthread/RecursiveMutexImpl.cxx b/dep/src/zthread/RecursiveMutexImpl.cxx index 41ca03547f8..adee8004278 100644 --- a/dep/src/zthread/RecursiveMutexImpl.cxx +++ b/dep/src/zthread/RecursiveMutexImpl.cxx @@ -39,9 +39,9 @@ namespace ZThread { * @exception Initialization_Exception thrown if resources could not be * properly allocated */ - RecursiveMutexImpl::RecursiveMutexImpl() + RecursiveMutexImpl::RecursiveMutexImpl() : _owner(0), _count(0) { - + } /** @@ -52,14 +52,14 @@ namespace ZThread { #ifndef NDEBUG // It is an error to destroy a mutex that has not been released - if(_owner != 0) { + if(_owner != 0) { ZTDEBUG("** You are destroying a mutex which was never released. **\n"); assert(0); // Destroyed mutex while in use } - if(!_waiters.empty()) { + if(!_waiters.empty()) { ZTDEBUG("** You are destroying a mutex which is blocking %d threads. **\n", _waiters.size()); assert(0); // Destroyed mutex while in use @@ -78,10 +78,10 @@ namespace ZThread { Monitor::STATE state; Guard<FastLock> g1(_lock); - - // If there is an entry count and the current thread is + + // If there is an entry count and the current thread is // the owner, increment the count and continue. - if(_owner == &m) + if(_owner == &m) _count++; else { @@ -91,11 +91,11 @@ namespace ZThread { assert(_count == 0); - _owner = &m; + _owner = &m; _count++; } else { // Otherwise, wait() - + _waiters.push_back(&m); m.acquire(); @@ -108,7 +108,7 @@ namespace ZThread { } m.release(); - + // Remove from waiter list, regarless of weather release() is called or // not. The monitor is sticky, so its possible a state 'stuck' from a // previous operation and will leave the wait() w/o release() having @@ -117,39 +117,39 @@ namespace ZThread { if(i != _waiters.end()) _waiters.erase(i); - // If awoke due to a notify(), take ownership. + // If awoke due to a notify(), take ownership. switch(state) { case Monitor::SIGNALED: - + assert(_owner == 0); assert(_count == 0); _owner = &m; _count++; - + break; case Monitor::INTERRUPTED: throw Interrupted_Exception(); - + default: throw Synchronization_Exception(); - } - + } + } - + } } bool RecursiveMutexImpl::tryAcquire(unsigned long timeout) { - + // Get the monitor for the current thread Monitor& m = ThreadImpl::current()->getMonitor(); Guard<FastLock> g1(_lock); - - // If there is an entry count and the current thread is + + // If there is an entry count and the current thread is // the owner, increment the count and continue. if(_owner == &m) _count++; @@ -176,14 +176,14 @@ namespace ZThread { m.acquire(); { - + Guard<FastLock, UnlockedScope> g2(g1); state = m.wait(timeout); - + } m.release(); - + } // Remove from waiter list, regarless of weather release() is called or @@ -194,7 +194,7 @@ namespace ZThread { if(i != _waiters.end()) _waiters.erase(i); - // If awoke due to a notify(), take ownership. + // If awoke due to a notify(), take ownership. switch(state) { case Monitor::SIGNALED: @@ -203,21 +203,21 @@ namespace ZThread { _owner = &m; _count++; - + break; case Monitor::INTERRUPTED: throw Interrupted_Exception(); - + case Monitor::TIMEDOUT: return false; default: throw Synchronization_Exception(); - } - + } + } - + } return true; @@ -237,7 +237,7 @@ namespace ZThread { // Update the count, if it has reached 0, wake another waiter. if(--_count == 0) { - + _owner = 0; // Try to find a waiter with a backoff & retry scheme @@ -245,24 +245,24 @@ namespace ZThread { // Go through the list, attempt to notify() a waiter. for(List::iterator i = _waiters.begin(); i != _waiters.end();) { - + // Try the monitor lock, if it cant be locked skip to the next waiter Monitor* n = *i; if(n->tryAcquire()) { - - // If notify() is not sucessful, it is because the wait() has already + + // If notify() is not sucessful, it is because the wait() has already // been ended (killed/interrupted/notify'd) bool woke = n->notify(); n->release(); - + // Once notify() succeeds, return if(woke) return; - + } else ++i; - + } - + if(_waiters.empty()) return; @@ -276,7 +276,7 @@ namespace ZThread { } } - + } } // namespace ZThread diff --git a/dep/src/zthread/RecursiveMutexImpl.h b/dep/src/zthread/RecursiveMutexImpl.h index 1e477136f52..0c0e20020cc 100644 --- a/dep/src/zthread/RecursiveMutexImpl.h +++ b/dep/src/zthread/RecursiveMutexImpl.h @@ -76,3 +76,4 @@ namespace ZThread { }; #endif + diff --git a/dep/src/zthread/Scheduling.h b/dep/src/zthread/Scheduling.h index 00061341d62..c9df80cbe39 100644 --- a/dep/src/zthread/Scheduling.h +++ b/dep/src/zthread/Scheduling.h @@ -94,3 +94,4 @@ namespace ZThread { } // namespace ZThread #endif // __ZTSCHEDULING_H__ + diff --git a/dep/src/zthread/Semaphore.cxx b/dep/src/zthread/Semaphore.cxx index b9fb8d0f613..f15254eb7ee 100644 --- a/dep/src/zthread/Semaphore.cxx +++ b/dep/src/zthread/Semaphore.cxx @@ -32,9 +32,9 @@ namespace ZThread { * @param maxCount maximum size of the semaphore count */ Semaphore::Semaphore(int count, unsigned int maxCount) { - + _impl = new FifoSemaphoreImpl(count, maxCount, true); - + } Semaphore::~Semaphore() { diff --git a/dep/src/zthread/SemaphoreImpl.h b/dep/src/zthread/SemaphoreImpl.h index 7f1b866c387..d5a8df30872 100644 --- a/dep/src/zthread/SemaphoreImpl.h +++ b/dep/src/zthread/SemaphoreImpl.h @@ -346,3 +346,4 @@ namespace ZThread { } // namespace ZThread #endif // __ZTSEMAPHOREIMPL_H__ + diff --git a/dep/src/zthread/State.h b/dep/src/zthread/State.h index 96697c9d4bd..6250f31599c 100644 --- a/dep/src/zthread/State.h +++ b/dep/src/zthread/State.h @@ -149,3 +149,4 @@ class State { }; #endif + diff --git a/dep/src/zthread/Status.h b/dep/src/zthread/Status.h index a56d505a8f4..cdc050430c0 100644 --- a/dep/src/zthread/Status.h +++ b/dep/src/zthread/Status.h @@ -177,3 +177,4 @@ namespace ZThread { }; // namespace ZThread #endif + diff --git a/dep/src/zthread/SynchronousExecutor.cxx b/dep/src/zthread/SynchronousExecutor.cxx index 0dc75b5f676..da49797ee94 100644 --- a/dep/src/zthread/SynchronousExecutor.cxx +++ b/dep/src/zthread/SynchronousExecutor.cxx @@ -24,12 +24,12 @@ namespace ZThread { - SynchronousExecutor::SynchronousExecutor() + SynchronousExecutor::SynchronousExecutor() : _canceled(false) {} SynchronousExecutor::~SynchronousExecutor() { } - + void SynchronousExecutor::cancel() { Guard<Mutex> g(_lock); @@ -38,41 +38,41 @@ namespace ZThread { } bool SynchronousExecutor::isCanceled() { - + Guard<Mutex> g(_lock); return _canceled; - + } void SynchronousExecutor::interrupt() { } void SynchronousExecutor::execute(const Task& task) { - - // Canceled Executors will not accept new tasks, quick + + // Canceled Executors will not accept new tasks, quick // check to avoid excessive locking in the canceled state - if(_canceled) + if(_canceled) throw Cancellation_Exception(); - + Guard<Mutex> g(_lock); - + if(_canceled) // Double check throw Cancellation_Exception(); - + // Run the task. Task(task)->run(); - - } + + } void SynchronousExecutor::wait() { - + if(Thread::interrupted()) throw Interrupted_Exception(); - + Guard<Mutex> g(_lock); - + } - + /** * @see Executor::wait(unsigned long) */ @@ -80,11 +80,11 @@ namespace ZThread { if(Thread::interrupted()) throw Interrupted_Exception(); - + Guard<Mutex> g(_lock); return true; - + } - - + + } diff --git a/dep/src/zthread/TSS.h b/dep/src/zthread/TSS.h index ed29230ec57..a81d5b5e128 100644 --- a/dep/src/zthread/TSS.h +++ b/dep/src/zthread/TSS.h @@ -50,3 +50,4 @@ #endif #endif // __ZTTSSSELECT_H__ + diff --git a/dep/src/zthread/Thread.cxx b/dep/src/zthread/Thread.cxx index 25cde79969c..5ed7fc3e998 100644 --- a/dep/src/zthread/Thread.cxx +++ b/dep/src/zthread/Thread.cxx @@ -27,20 +27,20 @@ namespace ZThread { - Thread::Thread() - : _impl( ThreadImpl::current() ) { + Thread::Thread() + : _impl( ThreadImpl::current() ) { - // ThreadImpl's start out life with a reference count + // ThreadImpl's start out life with a reference count // of one, and the they are added to the ThreadQueue. _impl->addReference(); - + } Thread::Thread(const Task& task, bool autoCancel) - : _impl( new ThreadImpl(task, autoCancel) ) { - + : _impl( new ThreadImpl(task, autoCancel) ) { + _impl->addReference(); - + } bool Thread::operator==(const Thread& t) const { @@ -94,7 +94,7 @@ namespace ZThread { return _impl->interrupt(); } - + void Thread::cancel() { if(ThreadImpl::current() == _impl) @@ -102,7 +102,7 @@ namespace ZThread { _impl->cancel(); - } + } bool Thread::isCanceled() { @@ -124,4 +124,4 @@ namespace ZThread { } -} // namespace ZThread +} // namespace ZThread diff --git a/dep/src/zthread/ThreadImpl.cxx b/dep/src/zthread/ThreadImpl.cxx index c7c22883b5e..fa2fd9e36c5 100644 --- a/dep/src/zthread/ThreadImpl.cxx +++ b/dep/src/zthread/ThreadImpl.cxx @@ -34,88 +34,88 @@ namespace ZThread { TSS<ThreadImpl*> ThreadImpl::_threadMap; namespace { - + class Launcher : public Runnable { - + ThreadImpl* x; ThreadImpl* y; Task z; - + public: - + Launcher(ThreadImpl* a, ThreadImpl* b, const Task& c) : x(a), y(b), z(c) {} - - void run() { - y->dispatch(x,y,z); + + void run() { + y->dispatch(x,y,z); } - + }; - + } - ThreadImpl::ThreadImpl() + ThreadImpl::ThreadImpl() : _state(State::REFERENCE), _priority(Medium), _autoCancel(false) { - + ZTDEBUG("Reference thread created.\n"); - + } - ThreadImpl::ThreadImpl(const Task& task, bool autoCancel) + ThreadImpl::ThreadImpl(const Task& task, bool autoCancel) : _state(State::IDLE), _priority(Medium), _autoCancel(autoCancel) { - + ZTDEBUG("User thread created.\n"); start(task); } - - + + ThreadImpl::~ThreadImpl() { - + _tls.clear(); if(isActive()) { - + ZTDEBUG("You are destroying an executing thread!\n"); abort(); - + } - + ZTDEBUG("Thread destroyed.\n"); - + } - + Monitor& ThreadImpl::getMonitor() { return _monitor; } - + void ThreadImpl::cancel(bool autoCancel) { - if(!autoCancel || _autoCancel) - _monitor.cancel(); + if(!autoCancel || _autoCancel) + _monitor.cancel(); } - - bool ThreadImpl::interrupt() { - return _monitor.interrupt(); + + bool ThreadImpl::interrupt() { + return _monitor.interrupt(); } - + bool ThreadImpl::isInterrupted() { return _monitor.isInterrupted(); } - + bool ThreadImpl::isCanceled() { return _monitor.isCanceled(); } Priority ThreadImpl::getPriority() const { - return _priority; + return _priority; } - - + + bool ThreadImpl::isReference() { return _state.isReference(); } - + /** * Join the thread, blocking the caller until it is interrupted or until * the thread represented by this object exits. @@ -124,27 +124,27 @@ namespace ZThread { * joined. */ bool ThreadImpl::join(unsigned long timeout) { - + // Serial access to this ThreadImpl's state Guard<Monitor> g1(_monitor); - + // Make sure a thread is not trying to join() itself. if(ThreadOps::isCurrent(this)) throw Deadlock_Exception("Cannot join self."); - - // Reference threads can't be joined. + + // Reference threads can't be joined. if(_state.isReference()) throw InvalidOp_Exception("Can not join this thread."); - - /* - - TODO: Insert cyclic join check. - + + /* + + TODO: Insert cyclic join check. + */ - + // If the task has not completed yet, wait for completion if(!_state.isJoined()) { - + // Add the current thread to the joiner list ThreadImpl* impl = current(); _joiners.push_back(impl); @@ -153,49 +153,49 @@ namespace ZThread { { // Release this ThreadImpl's lock while the joiner sleeps - _monitor.release(); + _monitor.release(); Guard<Monitor> g3(impl->getMonitor()); - + result = impl->_monitor.wait(timeout); - + _monitor.acquire(); - + } - + // Update the joiner list List::iterator i = std::find(_joiners.begin(), _joiners.end(), impl); if(i != _joiners.end()) _joiners.erase(i); - - + + switch(result) { - + case Monitor::TIMEDOUT: return false; - + case Monitor::INTERRUPTED: throw Interrupted_Exception(); - + default: break; - + } - + } return true; - + } - + /** * Translate the priority into a pthread value, and update the thread priority. * - * This is not available on all platforms, and probably works differently - * the platforms that do support it. Pthreads does not have very portable + * This is not available on all platforms, and probably works differently + * the platforms that do support it. Pthreads does not have very portable * priority support as far I am aware. * - * If SCHED_OTHER is not supported priority values are still set but + * If SCHED_OTHER is not supported priority values are still set but * dont not actually in affect anything. * * @param prio PRIORITY value @@ -204,18 +204,18 @@ namespace ZThread { * @exception InvalidOp_Exception thrown by IDLE, JOINING or JOINED threads. */ void ThreadImpl::setPriority(Priority p) { - + Guard<Monitor> g(_monitor); - + // Only set the native priority when the thread is running if(_state.isRunning()) ThreadOps::setPriority(this, p); - + _priority = p; - + } - - + + /** * Test the state Monitor of this thread to determine if the thread * is an active thread created by zthreads. @@ -223,13 +223,13 @@ namespace ZThread { * @return bool indicating the activity of the thread. */ bool ThreadImpl::isActive() { - + Guard<Monitor> g(_monitor); return _state.isRunning(); - + } - - + + /** * Get a reference to an implmenetation that maps to the current thread. * Accomplished by checking the TLS map. This will always return a valid @@ -237,98 +237,98 @@ namespace ZThread { * * @return ThreadImpl* current implementation that maps to the * executing thread. - */ + */ ThreadImpl* ThreadImpl::current() { - + // Get the ThreadImpl previously mapped onto the executing thread. ThreadImpl* impl = _threadMap.get(); - + // Create a reference thread for any threads that have been 'discovered' // because they are not created by ZThreads. - if(impl == 0) { - - // Create a ThreadImpl to represent this thread. + if(impl == 0) { + + // Create a ThreadImpl to represent this thread. impl = new ThreadImpl(); impl->_state.setReference(); - + ThreadOps::activate(impl); - + // Map a reference thread and insert it into the queue _threadMap.set(impl); - + ThreadQueue::instance()->insertReferenceThread(impl); - + } - + assert(impl != 0); return impl; - + } - + /** * Make current thread sleep for the given number of milliseconds. * This sleep can be interrupt()ed. - * + * * @param ms timeout for the sleep. * * @post the calling thread is blocked by waiting on the internal condition * variable. This can be signaled in the monitor of an interrupt */ void ThreadImpl::sleep(unsigned long ms) { - + // Make sleep()ing for 0 milliseconds equivalent to a yield. if(ms == 0) { - + yield(); return; - + } - + // Get the monitor for the current thread Monitor& monitor = current()->getMonitor(); - + // Acquire that threads Monitor with a Guard Guard<Monitor> g(monitor); - + for(;;) { - + switch(monitor.wait(ms)) { - + case Monitor::INTERRUPTED: throw Interrupted_Exception(); - + default: return; - + } - + } - + } - - + + /** - * Yield the current timeslice to another thread. - * If sched_yield() is available it is used. + * Yield the current timeslice to another thread. + * If sched_yield() is available it is used. * Otherwise, the state Monitor for this thread is used to simiulate a - * yield by blocking for 1 millisecond, which should give the + * yield by blocking for 1 millisecond, which should give the * scheduler a chance to schedule another thread. */ void ThreadImpl::yield() { - - // Try to yield with the native operation. If it fails, then + + // Try to yield with the native operation. If it fails, then // simulate with a short wait() on the monitor. if(!ThreadOps::yield()) { - + // Get the monitor for the current thread Monitor& monitor = current()->getMonitor(); - + // Attempt a wait(). - Guard<Monitor> g(monitor); + Guard<Monitor> g(monitor); monitor.wait(1); - + } - + } void ThreadImpl::start(const Task& task) { @@ -338,9 +338,9 @@ namespace ZThread { // A Thread must be idle in order to be eligable to run a task. if(!_state.isIdle()) throw InvalidOp_Exception("Thread is not idle."); - + _state.setRunning(); - + // Spawn a new thread, blocking the parent (current) thread until // the child starts. @@ -353,14 +353,14 @@ namespace ZThread { if(!spawn(&launch)) { // Return to the idle state & report the error if it doesn't work out. - _state.setIdle(); - throw Synchronization_Exception(); + _state.setIdle(); + throw Synchronization_Exception(); } // Wait, uninterruptably, for the child's signal. The parent thread - // still can be interrupted and killed; it just won't take effect + // still can be interrupted and killed; it just won't take effect // until the child has started. Guard<Monitor, DeferredInterruptionScope> g3(parent->_monitor); @@ -368,7 +368,7 @@ namespace ZThread { if(parent->_monitor.wait() != Monitor::SIGNALED) { assert(0); } - + } @@ -378,23 +378,23 @@ namespace ZThread { // Map the implementation object onto the running thread. _threadMap.set(impl); - // Update the reference count on a ThreadImpl before the 'Thread' + // Update the reference count on a ThreadImpl before the 'Thread' // that owns it can go out of scope (by signaling the parent) impl->addReference(); // Update the priority of the thread - if(parent->_state.isReference()) - ThreadOps::setPriority(impl, + if(parent->_state.isReference()) + ThreadOps::setPriority(impl, parent->_state.isReference() ? impl->_priority : parent->_priority); // Inherit ThreadLocal values from the parent typedef ThreadLocalMap::const_iterator It; - for(It i = parent->getThreadLocalMap().begin(); i != parent->getThreadLocalMap().end(); ++i) + for(It i = parent->getThreadLocalMap().begin(); i != parent->getThreadLocalMap().end(); ++i) if( (i->second)->isInheritable() ) impl->getThreadLocalMap()[ i->first ] = (i->second)->clone(); - - // Insert a user-thread mapping + + // Insert a user-thread mapping ThreadQueue::instance()->insertUserThread(impl); // Wake the parent once the thread is setup parent->_monitor.notify(); @@ -403,65 +403,65 @@ namespace ZThread { // not catch exceptions, let program terminate //try { - + task->run(); //} catch(...) { // Result of running a task that threw an exception. // ZTDEBUG("The task has thrown an unhandled exception\n"); - //assert(0); // UQ1: Go to debugger... - + //assert(0); // UQ1: Go to debugger... + //} - ZTDEBUG("Thread joining...\n"); - + ZTDEBUG("Thread joining...\n"); + { // Update the state of the thread - + Guard<Monitor> g(impl->_monitor); impl->_state.setJoined(); - + // Wake the joiners that will be easy to join first for(List::iterator i = impl->_joiners.begin(); i != impl->_joiners.end();) { - + ThreadImpl* joiner = *i; Monitor& m = joiner->getMonitor(); if(m.tryAcquire()) { - + m.notify(); - m.release(); - + m.release(); + i = impl->_joiners.erase(i); } else ++i; - } + } // Wake the joiners that might take a while next for(List::iterator i = impl->_joiners.begin(); i != impl->_joiners.end(); ++i) { - + ThreadImpl* joiner = *i; Monitor& m = joiner->getMonitor(); m.acquire(); m.notify(); - m.release(); - + m.release(); + } - + } - ZTDEBUG("Thread exiting...\n"); + ZTDEBUG("Thread exiting...\n"); - // Insert a pending-thread mapping, allowing the resources to be reclaimed + // Insert a pending-thread mapping, allowing the resources to be reclaimed ThreadQueue::instance()->insertPendingThread(impl); // Cleanup ThreadLocal values impl->getThreadLocalMap().clear(); - // Update the reference count allowing it to be destroyed + // Update the reference count allowing it to be destroyed impl->delReference(); } diff --git a/dep/src/zthread/ThreadImpl.h b/dep/src/zthread/ThreadImpl.h index f464242918c..78b414b63e7 100644 --- a/dep/src/zthread/ThreadImpl.h +++ b/dep/src/zthread/ThreadImpl.h @@ -120,3 +120,4 @@ class ThreadImpl : public IntrusivePtr<ThreadImpl, FastLock>, public ThreadOps { } // namespace ZThread #endif // __ZTTHREADIMPL_H__ + diff --git a/dep/src/zthread/ThreadLocalImpl.cxx b/dep/src/zthread/ThreadLocalImpl.cxx index 25682e66325..502a70734dc 100644 --- a/dep/src/zthread/ThreadLocalImpl.cxx +++ b/dep/src/zthread/ThreadLocalImpl.cxx @@ -27,13 +27,13 @@ namespace ZThread { ThreadLocalImpl::ThreadLocalImpl() {} - ThreadLocalImpl::~ThreadLocalImpl() {} - + ThreadLocalImpl::~ThreadLocalImpl() {} + void ThreadLocalImpl::clearAll() { typedef ThreadImpl::ThreadLocalMap Map; Map& m = ThreadImpl::current()->getThreadLocalMap(); - + m.clear(); } @@ -42,25 +42,25 @@ namespace ZThread { typedef ThreadImpl::ThreadLocalMap Map; Map& m = ThreadImpl::current()->getThreadLocalMap(); - + Map::iterator i = m.find(this); - if(i != m.end()) + if(i != m.end()) m.erase(i); - } + } ThreadLocalImpl::ValuePtr ThreadLocalImpl::value( ValuePtr(*pfn)() ) const { - + typedef ThreadImpl::ThreadLocalMap Map; Map& m = ThreadImpl::current()->getThreadLocalMap(); - + Map::iterator i = m.find(this); - if(i != m.end()) + if(i != m.end()) return i->second; - + m[ this ] = ValuePtr( pfn() ); return m[ this ]; } -} // namespace ZThread +} // namespace ZThread diff --git a/dep/src/zthread/ThreadOps.cxx b/dep/src/zthread/ThreadOps.cxx index 53a3e4457bc..5917891028b 100644 --- a/dep/src/zthread/ThreadOps.cxx +++ b/dep/src/zthread/ThreadOps.cxx @@ -26,10 +26,10 @@ #include "ThreadOps.h" // This file will select an implementation for a ThreadOps based on -// what ThreadOps.h selects. This method is for selecting the +// what ThreadOps.h selects. This method is for selecting the // source files, to improve portability. Currently, the project is // based on the autoconf tool-set, which doesn't support conditional -// compilation well. Additionally, this should make the library +// compilation well. Additionally, this should make the library // easier to port since its working around conditional compilation // by using C++ features and people won't have to fiddle around with // their make tool as much to compile the source diff --git a/dep/src/zthread/ThreadOps.h b/dep/src/zthread/ThreadOps.h index d8737158e3e..86b7e1301ca 100644 --- a/dep/src/zthread/ThreadOps.h +++ b/dep/src/zthread/ThreadOps.h @@ -65,3 +65,4 @@ #endif #endif // __ZTTHREADOPSSELECT_H__ + diff --git a/dep/src/zthread/ThreadQueue.cxx b/dep/src/zthread/ThreadQueue.cxx index 02349504641..d0493a5eabf 100644 --- a/dep/src/zthread/ThreadQueue.cxx +++ b/dep/src/zthread/ThreadQueue.cxx @@ -29,9 +29,9 @@ #include <deque> namespace ZThread { - - ThreadQueue::ThreadQueue() - : _waiter(0) { + + ThreadQueue::ThreadQueue() + : _waiter(0) { ZTDEBUG("ThreadQueue created\n"); @@ -52,36 +52,36 @@ namespace ZThread { TaskList shutdownTasks; { // Check the queue to for pending user threads - + Guard<FastLock> g(_lock); - + waitRequired = (_waiter != (ThreadImpl*)1); _waiter = impl; - + threadsWaiting = !_userThreads.empty() || !_pendingThreads.empty(); - + //ZTDEBUG("Wait required: %d\n", waitRequired); //ZTDEBUG("Threads waiting: %d\n", threadsWaiting); - + // Auto-cancel any active threads at the time main() goes out of scope - // "force" a gentle exit from the executing tasks; eventually the user- + // "force" a gentle exit from the executing tasks; eventually the user- // threads will transition into pending-threads pollUserThreads(); - + // Remove all the tasks about to be run from the task list so an indication // can be given to threads calling removeShutdownTask() too late. - std::remove_copy(_shutdownTasks.begin(), - _shutdownTasks.end(), - std::back_inserter(shutdownTasks), - Task((Runnable*)0)); - + std::remove_copy(_shutdownTasks.begin(), + _shutdownTasks.end(), + std::back_inserter(shutdownTasks), + Task((Runnable*)0)); + //ZTDEBUG("Threads waiting: %d\n", threadsWaiting); - + } // Execute the shutdown tasks for(TaskList::iterator i = shutdownTasks.begin(); i != shutdownTasks.end(); ++i) { - try { + try { (*i)->run(); } catch(...) { } } @@ -93,20 +93,20 @@ namespace ZThread { Monitor& m = _waiter->getMonitor(); - + // Defer interruption while this thread waits for a signal from // the last pending user thread Guard<Monitor, CompoundScope<DeferredInterruptionScope, LockedScope> > g(m); //ZTDEBUG("Threads waiting: %d %d\n", _userThreads.size(), _pendingThreads.size()); // Avoid race-condition where the last threads are done with thier tasks, but - // only begin the final part of the clean up phase after this destructor begins + // only begin the final part of the clean up phase after this destructor begins // to run. Takes advantage of the fact that if all remaining threads have transitioned // into a pending state by the time execution reaches this point, then there is no // need to wait. waitRequired = waitRequired && !(_userThreads.empty() && !_pendingThreads.empty()); - // Reference threads can't be interrupted or otherwise + // Reference threads can't be interrupted or otherwise // manipulated. The only signal this monitor will receive // at this point will be from the last pending thread. if(waitRequired && m.wait() != Monitor::SIGNALED) { @@ -115,16 +115,16 @@ namespace ZThread { // Join those pending threads pollPendingThreads(); - + } - + // Clean up the reference threads - pollReferenceThreads(); - + pollReferenceThreads(); + ZTDEBUG("ThreadQueue destroyed\n"); } - + void ThreadQueue::insertPendingThread(ThreadImpl* impl) { ZTDEBUG("insertPendingThread()\n"); @@ -136,7 +136,7 @@ namespace ZThread { _userThreads.erase(i); _pendingThreads.push_back(impl); - + // Wake the main thread,if its waiting, when the last pending-thread becomes available; // Otherwise, take note that no wait for pending threads to finish is needed if(_userThreads.empty()) @@ -171,7 +171,7 @@ namespace ZThread { impl->cancel(true); ZTDEBUG("1 user-thread added.\n"); - + } @@ -183,9 +183,9 @@ namespace ZThread { ThreadImpl* impl = (ThreadImpl*)*i; ThreadOps::join(impl); - + impl->delReference(); - + i = _pendingThreads.erase(i); ZTDEBUG("1 pending-thread reclaimed.\n"); @@ -199,14 +199,14 @@ namespace ZThread { ZTDEBUG("pollReferenceThreads()\n"); for(ThreadList::iterator i = _referenceThreads.begin(); i != _referenceThreads.end(); ++i) { - + ThreadImpl* impl = (ThreadImpl*)*i; impl->delReference(); - + ZTDEBUG("1 reference-thread reclaimed.\n"); } - + } void ThreadQueue::pollUserThreads() { @@ -219,11 +219,11 @@ namespace ZThread { impl->cancel(true); ZTDEBUG("1 user-thread reclaimed.\n"); - + } } - + void ThreadQueue::insertShutdownTask(Task& task) { bool hasWaiter = false; @@ -231,34 +231,34 @@ namespace ZThread { { Guard<FastLock> g(_lock); - - // Execute later when the ThreadQueue is destroyed + + // Execute later when the ThreadQueue is destroyed if( !(hasWaiter = (_waiter != 0)) ) { _shutdownTasks.push_back(task); //ZTDEBUG("1 shutdown task added. %d\n", _shutdownTasks.size()); - + } } - + // Execute immediately if things are shutting down if(hasWaiter) task->run(); - + } - + bool ThreadQueue::removeShutdownTask(const Task& task) { - + Guard<FastLock> g(_lock); - + TaskList::iterator i = std::find(_shutdownTasks.begin(), _shutdownTasks.end(), task); bool removed = (i != _shutdownTasks.end()); if(removed) _shutdownTasks.erase(i); //ZTDEBUG("1 shutdown task removed (%d)-%d\n", removed, _shutdownTasks.size()); - + return removed; } diff --git a/dep/src/zthread/ThreadQueue.h b/dep/src/zthread/ThreadQueue.h index 72049fda3b2..52761a408c6 100644 --- a/dep/src/zthread/ThreadQueue.h +++ b/dep/src/zthread/ThreadQueue.h @@ -132,3 +132,4 @@ namespace ZThread { #endif // __ZTTHREADQUEUE_H__ + diff --git a/dep/src/zthread/ThreadedExecutor.cxx b/dep/src/zthread/ThreadedExecutor.cxx index 44a213e8daa..e33dda4ab5e 100644 --- a/dep/src/zthread/ThreadedExecutor.cxx +++ b/dep/src/zthread/ThreadedExecutor.cxx @@ -31,11 +31,11 @@ namespace ZThread { namespace { - //! + //! class WaiterQueue { typedef std::deque<ThreadImpl*> ThreadList; - + typedef struct group_t { size_t id; size_t count; @@ -61,14 +61,14 @@ namespace ZThread { void operator()(const Group& grp) { count += grp.count; } operator size_t() { return count; } }; - + FastMutex _lock; GroupList _list; size_t _id; size_t _generation; public: - + WaiterQueue() : _id(0), _generation(0) { // At least one empty-group exists _list.push_back( Group(_id++) ); @@ -96,15 +96,15 @@ namespace ZThread { if((size_t)std::for_each(_list.begin(), _list.end(), counter()) < 1) return true; - // Update the waiter list for the active group + // Update the waiter list for the active group _list.back().waiters.push_back(self); size_t n = _list.back().id; m.acquire(); - + { - - Guard<Lockable, UnlockedScope> g2(g1); + + Guard<Lockable, UnlockedScope> g2(g1); state = timeout == 0 ? m.wait() : m.wait(timeout); } @@ -135,22 +135,22 @@ namespace ZThread { throw Interrupted_Exception(); default: throw Synchronization_Exception(); - } - + } + return true; } - + /** * Increment the active group count * * @pre at least 1 empty group exists - * @post at least 1 non-empty group exists + * @post at least 1 non-empty group exists */ std::pair<size_t, size_t> increment() { - + Guard<FastMutex> g(_lock); - + // At least one empty-group exists assert(!_list.empty()); @@ -169,7 +169,7 @@ namespace ZThread { // When the active group is being incremented, insert a new active group // to replace it if there were waiting threads - if(i == --_list.end() && !i->waiters.empty()) + if(i == --_list.end() && !i->waiters.empty()) _list.push_back(Group(_id++)); // At least 1 non-empty group exists @@ -178,13 +178,13 @@ namespace ZThread { return std::make_pair(n, _generation); } - + /** * Decrease the count for the group with the given id. * * @param n group id - * + * * @pre At least 1 non-empty group exists * @post At least 1 empty group exists */ @@ -198,7 +198,7 @@ namespace ZThread { // Find the requested group GroupList::iterator i = std::find_if(_list.begin(), _list.end(), by_id(n)); if(i == _list.end()) { - + // A group should never have been removed until // the final task in that group completed assert(0); @@ -207,11 +207,11 @@ namespace ZThread { // Decrease the count for tasks in this group, if(--i->count == 0 && i == _list.begin()) { - - do { + + do { // When the first group completes, wake all waiters for every - // group, starting from the first until a group that is not + // group, starting from the first until a group that is not // complete is reached /* @@ -219,29 +219,29 @@ namespace ZThread { if(i == --_list.end() && i->waiters.empty()) break; */ - + if( awaken(*i) ) { - - // If all waiters were awakened, remove the group + + // If all waiters were awakened, remove the group i = _list.erase(i); - + } else { - + { // Otherwise, unlock and yield allowing the waiter // lists to be updated if other threads are busy Guard<FastLock, UnlockedScope> g2(g1); ThreadImpl::yield(); - + } i = _list.begin(); } - - } while(i != _list.end() && i->count == 0); - + + } while(i != _list.end() && i->count == 0); + // Ensure that an active group exists if(_list.empty()) _list.push_back( Group(++_id) ); @@ -250,7 +250,7 @@ namespace ZThread { // At least one group exists assert(!_list.empty()); - + } /** @@ -261,74 +261,74 @@ namespace ZThread { return next ? _generation++ : _generation; } - + private: - + /** * Awaken all the waiters remaining in the given group - * + * * @return * - true if all the waiting threads were successfully awakened. * - false if there were one or more threads that could not be awakened. */ bool awaken(Group& grp) { - // Go through the waiter list in the given group; + // Go through the waiter list in the given group; for(ThreadList::iterator i = grp.waiters.begin(); i != grp.waiters.end();) { - + ThreadImpl* impl = *i; Monitor& m = impl->getMonitor(); - + // Try the monitor lock, if it cant be locked skip to the next waiter if(m.tryAcquire()) { - + // Notify the monitor & remove from the waiter list so time isn't // wasted checking it again. i = grp.waiters.erase(i); - + // Try to wake the waiter, it doesn't matter if this is successful - // or not (only fails when the monitor is already going to stop waiting). + // or not (only fails when the monitor is already going to stop waiting). m.notify(); m.release(); - + } else ++i; - + } - + return grp.waiters.empty(); } }; - //! Synchronization point for the Executor + //! Synchronization point for the Executor class ExecutorImpl { typedef std::deque<ThreadImpl*> ThreadList; bool _canceled; - FastMutex _lock; + FastMutex _lock; //! Worker threads ThreadList _threads; - + WaiterQueue _queue; public: ExecutorImpl() : _canceled(false) {} - WaiterQueue& getWaiterQueue() { + WaiterQueue& getWaiterQueue() { return _queue; } void registerThread(size_t generation) { - + // Interrupt slow starting threads if(getWaiterQueue().generation() != generation) ThreadImpl::current()->interrupt(); - // Enqueue for possible future interrupt() + // Enqueue for possible future interrupt() else { Guard<FastMutex> g(_lock); @@ -339,7 +339,7 @@ namespace ZThread { } void unregisterThread() { - + Guard<FastMutex> g(_lock); std::remove(_threads.begin(), _threads.end(), ThreadImpl::current() ); @@ -369,11 +369,11 @@ namespace ZThread { // Interrupt all the registered threads for(ThreadList::iterator i = _threads.begin(); i != _threads.end(); ++i) (*i)->interrupt(); - + // Bump the generation up, ensuring slow starting threads get this interrupt getWaiterQueue().generation( true ); - } + } }; /* ExecutorImpl */ @@ -392,7 +392,7 @@ namespace ZThread { : _impl(impl), _task(task) { std::pair<size_t, size_t> pr( _impl->getWaiterQueue().increment() ); - + _group = pr.first; _generation = pr.second; @@ -405,20 +405,20 @@ namespace ZThread { size_t generation() const { return _generation; } - + void run() { - + // Register this thread once its begun; the generation is used to ensure // threads that are slow starting are properly interrupted _impl->registerThread( generation() ); - + try { - _task->run(); + _task->run(); } catch(...) { /* consume the exceptions the work propogates */ } - + _impl->getWaiterQueue().decrement( group() ); // Unregister this thread @@ -434,30 +434,30 @@ namespace ZThread { ThreadedExecutor::ThreadedExecutor() : _impl(new ExecutorImpl) {} ThreadedExecutor::~ThreadedExecutor() {} - + void ThreadedExecutor::execute(const Task& task) { - + Thread t( new Worker(_impl, task) ); - } + } void ThreadedExecutor::interrupt() { _impl->interrupt(); } void ThreadedExecutor::cancel() { - _impl->cancel(); + _impl->cancel(); } - + bool ThreadedExecutor::isCanceled() { return _impl->isCanceled(); } - + void ThreadedExecutor::wait() { _impl->getWaiterQueue().wait(0); } - bool ThreadedExecutor::wait(unsigned long timeout) { + bool ThreadedExecutor::wait(unsigned long timeout) { return _impl->getWaiterQueue().wait(timeout == 0 ? 1 : timeout); } diff --git a/dep/src/zthread/Time.cxx b/dep/src/zthread/Time.cxx index 2409d93cb79..ee2b84a4e8a 100644 --- a/dep/src/zthread/Time.cxx +++ b/dep/src/zthread/Time.cxx @@ -27,7 +27,7 @@ using namespace ZThread; Time::Time() { - + // System startup time static TimeStrategy firstHelper; TimeStrategy helper; @@ -37,7 +37,7 @@ Time::Time() { now -= then; - _seconds = now.seconds(); + _seconds = now.seconds(); _milliseconds = now.milliseconds(); } diff --git a/dep/src/zthread/TimeStrategy.h b/dep/src/zthread/TimeStrategy.h index 249d8ab5a64..b82c06f05e2 100644 --- a/dep/src/zthread/TimeStrategy.h +++ b/dep/src/zthread/TimeStrategy.h @@ -84,3 +84,4 @@ #endif #endif // __ZTTIMESELECT_H__ + diff --git a/dep/src/zthread/config.h b/dep/src/zthread/config.h index 4ef2ca213d1..41deb93c0f6 100644 --- a/dep/src/zthread/config.h +++ b/dep/src/zthread/config.h @@ -93,3 +93,4 @@ /* No OS priority support */ /* #undef ZTHREAD_DISABLE_PRIORITY */ + diff --git a/dep/src/zthread/linux/AtomicCount.cxx b/dep/src/zthread/linux/AtomicCount.cxx index 28c2381c3b4..18b31e3a8ce 100644 --- a/dep/src/zthread/linux/AtomicCount.cxx +++ b/dep/src/zthread/linux/AtomicCount.cxx @@ -47,7 +47,7 @@ typedef struct atomic_count_t { AtomicCount::AtomicCount() { _value = reinterpret_cast<void*>(new ATOMIC_COUNT); - + } AtomicCount::~AtomicCount() { @@ -55,19 +55,19 @@ AtomicCount::~AtomicCount() { delete reinterpret_cast<ATOMIC_COUNT*>(_value); } - + void AtomicCount::increment() { atomic_inc(&reinterpret_cast<ATOMIC_COUNT*>(_value)->count); - + } - + bool AtomicCount::decrement() { return atomic_dec_and_test(&reinterpret_cast<ATOMIC_COUNT*>(_value)->count); - + } - + }; #endif // __ZTATOMICCOUNTIMPL_H__ diff --git a/dep/src/zthread/linux/AtomicFastLock.h b/dep/src/zthread/linux/AtomicFastLock.h index a416fef67cb..e5851c96d17 100644 --- a/dep/src/zthread/linux/AtomicFastLock.h +++ b/dep/src/zthread/linux/AtomicFastLock.h @@ -115,3 +115,4 @@ public: } // namespace ZThread #endif + diff --git a/dep/src/zthread/linux/FastRecursiveLock.h b/dep/src/zthread/linux/FastRecursiveLock.h index a9a74d21521..6ba392145ec 100644 --- a/dep/src/zthread/linux/FastRecursiveLock.h +++ b/dep/src/zthread/linux/FastRecursiveLock.h @@ -81,3 +81,4 @@ public: } // namespace ZThread #endif + diff --git a/dep/src/zthread/macos/FastLock.h b/dep/src/zthread/macos/FastLock.h index 0bd40e1f2fd..012843f2e3e 100644 --- a/dep/src/zthread/macos/FastLock.h +++ b/dep/src/zthread/macos/FastLock.h @@ -137,3 +137,4 @@ class FastLock : private NonCopyable { + diff --git a/dep/src/zthread/macos/Monitor.cxx b/dep/src/zthread/macos/Monitor.cxx index ab7806b13df..a6a2f58cb37 100644 --- a/dep/src/zthread/macos/Monitor.cxx +++ b/dep/src/zthread/macos/Monitor.cxx @@ -29,20 +29,20 @@ using namespace ZThread; Monitor::Monitor() : _owner(0), _waiting(false), _pending(false) { - + if(MPCreateSemaphore(1, 0, &_sema) != noErr) { assert(0); throw Initialization_Exception(); } } - + Monitor::~Monitor() throw() { assert(!_waiting); OSStatus status = MPDeleteSemaphore(_sema); - if(status != noErr) + if(status != noErr) assert(false); } @@ -53,9 +53,9 @@ Monitor::STATE Monitor::wait(unsigned long timeout) { // http://developer.apple.com/techpubs/macosx/Carbon/oss/MultiPServices/Multiprocessing_Services/index.html?http://developer.apple.com/techpubs/macosx/Carbon/oss/MultiPServices/Multiprocessing_Services/Functions/Creating_and_ssage_Queues.html AbsoluteTime tTarget; - Duration waitDuration = - (timeout == 0) ? kDurationForever : (kDurationMillisecond * timeout); - + Duration waitDuration = + (timeout == 0) ? kDurationForever : (kDurationMillisecond * timeout); + if(waitDuration != kDurationForever) tTarget = AddDurationToAbsolute(waitDuration, UpTime()); @@ -66,13 +66,13 @@ Monitor::STATE Monitor::wait(unsigned long timeout) { _owner = MPCurrentTaskID(); STATE state(INVALID); - + // Serialize access to the state of the Monitor // and test the state to determine if a wait is needed. _waitLock.acquire(); if(pending(ANYTHING)) { - + // Return without waiting when possible state = next(); @@ -80,57 +80,57 @@ Monitor::STATE Monitor::wait(unsigned long timeout) { return state; } - // Unlock the external lock if a wait() is probably needed. + // Unlock the external lock if a wait() is probably needed. // Access to the state is still serial. _lock.release(); // Wait for a transition in the state that is of interest, this - // allows waits to exclude certain flags (e.g. INTERRUPTED) + // allows waits to exclude certain flags (e.g. INTERRUPTED) // for a single wait() w/o actually discarding those flags - // they will remain set until a wait interested in those flags // occurs. // Wait, ignoring signals _waiting = true; - + _waitLock.release(); // Update the wait time - if(waitDuration != kDurationForever) + if(waitDuration != kDurationForever) waitDuration = AbsoluteDeltaToDuration(tTarget, UpTime()); - // Sleep until a signal arrives or a timeout occurs + // Sleep until a signal arrives or a timeout occurs OSStatus status = MPWaitOnSemaphore(_sema, waitDuration); // Reacquire serialized access to the state _waitLock.acquire(); - + // Awaken only when the event is set or the timeout expired assert(status == kMPTimeoutErr || status == noErr); - + if(status == kMPTimeoutErr) push(TIMEDOUT); // Get the next available STATE - state = next(); + state = next(); - _waiting = false; + _waiting = false; - // Its possible that a timeout will wake the thread before a signal is + // Its possible that a timeout will wake the thread before a signal is // delivered. Absorb that leftover so the next wait isn't aborted right away if(status == kMPTimeoutErr && _pending) { - + status = MPWaitOnSemaphore(_sema, kDurationForever); assert(status == noErr); } _pending = false; - + // Acquire the internal lock & release the external lock _waitLock.release(); - // Reaquire the external lock, keep from deadlocking threads calling + // Reaquire the external lock, keep from deadlocking threads calling // notify(), interrupt(), etc. _lock.acquire(); @@ -143,7 +143,7 @@ bool Monitor::interrupt() { // Serialize access to the state _waitLock.acquire(); - + bool wasInterruptable = !pending(INTERRUPTED); bool hasWaiter = false; @@ -151,7 +151,7 @@ bool Monitor::interrupt() { if(wasInterruptable) { push(INTERRUPTED); - + wasInterruptable = false; if(_waiting && !_pending) { @@ -159,7 +159,7 @@ bool Monitor::interrupt() { _pending = true; hasWaiter = true; - } else + } else wasInterruptable = !(_owner == MPCurrentTaskID()); } @@ -180,7 +180,7 @@ bool Monitor::isInterrupted() { bool wasInterrupted = pending(INTERRUPTED); clear(INTERRUPTED); - + _waitLock.release(); return wasInterrupted; @@ -227,12 +227,12 @@ bool Monitor::cancel() { bool wasInterrupted = !pending(INTERRUPTED); bool hasWaiter = false; - + push(CANCELED); // Update the state if theres a waiter if(wasInterrupted) { - + push(INTERRUPTED); if(_waiting && !_pending) { @@ -243,9 +243,9 @@ bool Monitor::cancel() { } } - + _waitLock.release(); - + if(hasWaiter && !masked(Monitor::INTERRUPTED)) MPSignalSemaphore(_sema); @@ -254,12 +254,12 @@ bool Monitor::cancel() { } bool Monitor::isCanceled() { - + // Serialize access to the state _waitLock.acquire(); - + bool wasCanceled = Status::examine(CANCELED); - + if(_owner == MPCurrentTaskID()) clear(INTERRUPTED); diff --git a/dep/src/zthread/macos/Monitor.h b/dep/src/zthread/macos/Monitor.h index 9fa3c9a5aa2..fa5b2ec7760 100644 --- a/dep/src/zthread/macos/Monitor.h +++ b/dep/src/zthread/macos/Monitor.h @@ -154,3 +154,4 @@ class Monitor : public Status, private NonCopyable { }; #endif + diff --git a/dep/src/zthread/macos/TSS.h b/dep/src/zthread/macos/TSS.h index 1be90c5f278..e73050c4612 100644 --- a/dep/src/zthread/macos/TSS.h +++ b/dep/src/zthread/macos/TSS.h @@ -118,3 +118,4 @@ namespace ZThread { #endif + diff --git a/dep/src/zthread/macos/ThreadOps.cxx b/dep/src/zthread/macos/ThreadOps.cxx index 6a1a4106877..ddb380992b0 100644 --- a/dep/src/zthread/macos/ThreadOps.cxx +++ b/dep/src/zthread/macos/ThreadOps.cxx @@ -27,25 +27,25 @@ namespace ZThread { -const ThreadOps ThreadOps::INVALID(0); +const ThreadOps ThreadOps::INVALID(0); + +ThreadOps::ThreadOps() : _queue(0), _tid(0) { -ThreadOps::ThreadOps() : _queue(0), _tid(0) { - if(MPCreateQueue(&_queue) != noErr) throw Initialization_Exception(); } ThreadOps::~ThreadOps() throw() { - + if(_queue != 0) { OSStatus status = MPDeleteQueue(_queue); - if(status != noErr) + if(status != noErr) assert(0); } - + } bool ThreadOps::join(ThreadOps* ops) { @@ -56,7 +56,7 @@ bool ThreadOps::join(ThreadOps* ops) { OSStatus status = MPWaitOnQueue(ops->_queue, NULL, NULL, NULL, kDurationForever); return status == noErr; - + } bool ThreadOps::yield() { @@ -91,13 +91,13 @@ OSStatus ThreadOps::_dispatch(void *arg) { // Run the task from the correct context task->run(); - + // Exit the thread MPExit(noErr); return noErr; } - + } // namespace ZThread diff --git a/dep/src/zthread/macos/ThreadOps.h b/dep/src/zthread/macos/ThreadOps.h index 5c4eb044e73..c0417f0bb2f 100644 --- a/dep/src/zthread/macos/ThreadOps.h +++ b/dep/src/zthread/macos/ThreadOps.h @@ -165,3 +165,4 @@ protected: #endif // __ZTTHREADOPS_H__ + diff --git a/dep/src/zthread/macos/UpTimeStrategy.h b/dep/src/zthread/macos/UpTimeStrategy.h index 0f7962d7eca..4d94a48f3cd 100644 --- a/dep/src/zthread/macos/UpTimeStrategy.h +++ b/dep/src/zthread/macos/UpTimeStrategy.h @@ -84,3 +84,4 @@ class TimeStrategy { }; #endif // __ZTTIMESTRATEGY_H__ + diff --git a/dep/src/zthread/posix/ConditionRecursiveLock.h b/dep/src/zthread/posix/ConditionRecursiveLock.h index 2be55c13797..bbf77da833b 100644 --- a/dep/src/zthread/posix/ConditionRecursiveLock.h +++ b/dep/src/zthread/posix/ConditionRecursiveLock.h @@ -144,3 +144,4 @@ public: } // namespace ZThread #endif + diff --git a/dep/src/zthread/posix/FastLock.h b/dep/src/zthread/posix/FastLock.h index 261fd7c893a..eee3c6545db 100644 --- a/dep/src/zthread/posix/FastLock.h +++ b/dep/src/zthread/posix/FastLock.h @@ -119,3 +119,4 @@ class FastLock : private NonCopyable { }; #endif + diff --git a/dep/src/zthread/posix/FtimeStrategy.h b/dep/src/zthread/posix/FtimeStrategy.h index 5e8d33b6c9b..a1adc61068c 100644 --- a/dep/src/zthread/posix/FtimeStrategy.h +++ b/dep/src/zthread/posix/FtimeStrategy.h @@ -82,3 +82,4 @@ public: }; #endif // __ZTTIMESTRATEGY_H__ + diff --git a/dep/src/zthread/posix/GetTimeOfDayStrategy.h b/dep/src/zthread/posix/GetTimeOfDayStrategy.h index 8588807f4f7..781162adeaa 100644 --- a/dep/src/zthread/posix/GetTimeOfDayStrategy.h +++ b/dep/src/zthread/posix/GetTimeOfDayStrategy.h @@ -73,3 +73,4 @@ public: }; #endif // __ZTTIMESTRATEGY_H__ + diff --git a/dep/src/zthread/posix/Monitor.cxx b/dep/src/zthread/posix/Monitor.cxx index bb157dae0dc..5e4b34c54cf 100644 --- a/dep/src/zthread/posix/Monitor.cxx +++ b/dep/src/zthread/posix/Monitor.cxx @@ -31,19 +31,19 @@ namespace ZThread { Monitor::Monitor() : _owner(0), _waiting(false) { - + pthread_cond_init(&_waitCond, 0); pthread_mutex_init(&_waitLock, 0); } - + Monitor::~Monitor() { - + assert(!_waiting); pthread_cond_destroy(&_waitCond); pthread_mutex_destroy(&_waitLock); - + } Monitor::STATE Monitor::wait(unsigned long ms) { @@ -55,14 +55,14 @@ Monitor::STATE Monitor::wait(unsigned long ms) { _owner = pthread_self(); STATE state(INVALID); - + // Serialize access to the state of the Monitor // and test the state to determine if a wait is needed. - + pthread_mutex_lock(&_waitLock); - + if(pending(ANYTHING)) { - + // Return without waiting when possible state = next(); @@ -70,13 +70,13 @@ Monitor::STATE Monitor::wait(unsigned long ms) { return state; } - - // Unlock the external lock if a wait() is probably needed. + + // Unlock the external lock if a wait() is probably needed. // Access to the state is still serial. _lock.release(); - + // Wait for a transition in the state that is of interest, this - // allows waits to exclude certain flags (e.g. INTERRUPTED) + // allows waits to exclude certain flags (e.g. INTERRUPTED) // for a single wait() w/o actually discarding those flags - // they will remain set until a wait interested in those flags // occurs. @@ -85,20 +85,20 @@ Monitor::STATE Monitor::wait(unsigned long ms) { // Wait, ignoring signals _waiting = true; int status = 0; - - if(ms == 0) { // Wait forever - - do { // ignore signals unless the state is interesting + + if(ms == 0) { // Wait forever + + do { // ignore signals unless the state is interesting status = pthread_cond_wait(&_waitCond, &_waitLock); } while(status == EINTR && !pending(ANYTHING)); - + // Akwaken only when a state is pending assert(status == 0); - + } else { - + // Find the target time - TimeStrategy t; + TimeStrategy t; ms += t.milliseconds(); @@ -106,34 +106,34 @@ Monitor::STATE Monitor::wait(unsigned long ms) { ms %= 1000; // Convert to a timespec - struct ::timespec timeout; - - timeout.tv_sec = s; + struct ::timespec timeout; + + timeout.tv_sec = s; timeout.tv_nsec = ms*1000000; - - // Wait ignoring signals until the state is interesting - do { - + + // Wait ignoring signals until the state is interesting + do { + // When a timeout occurs, update the state to reflect that. status = pthread_cond_timedwait(&_waitCond, &_waitLock, &timeout); - + } while(status == EINTR && !pending(ANYTHING)); - + // Akwaken only when a state is pending or when the timeout expired assert(status == 0 || status == ETIMEDOUT); - + if(status == ETIMEDOUT) push(TIMEDOUT); } - + // Get the next available STATE - state = next(); - _waiting = false; - + state = next(); + _waiting = false; + pthread_mutex_unlock(&_waitLock); - - // Reaquire the external lock, keep from deadlocking threads calling + + // Reaquire the external lock, keep from deadlocking threads calling // notify(), interrupt(), etc. _lock.acquire(); @@ -147,17 +147,17 @@ bool Monitor::interrupt() { // Serialize access to the state pthread_mutex_lock(&_waitLock); - + bool wasInterruptable = !pending(INTERRUPTED); bool hadWaiter = _waiting; - + if(wasInterruptable) { - + // Update the state & wake the waiter if there is one push(INTERRUPTED); wasInterruptable = false; - + if(hadWaiter && !masked(Monitor::INTERRUPTED)) pthread_cond_signal(&_waitCond); else @@ -180,7 +180,7 @@ bool Monitor::isInterrupted() { bool wasInterrupted = pending(INTERRUPTED); clear(INTERRUPTED); - + pthread_mutex_unlock(&_waitLock); return wasInterrupted; @@ -193,7 +193,7 @@ bool Monitor::isCanceled() { pthread_mutex_lock(&_waitLock); bool wasCanceled = examine(CANCELED); - + if(pthread_equal(_owner, pthread_self())) clear(INTERRUPTED); @@ -210,17 +210,17 @@ bool Monitor::cancel() { bool wasInterrupted = !pending(INTERRUPTED); bool hadWaiter = _waiting; - + push(CANCELED); if(wasInterrupted) { - + // Update the state & wake the waiter if there is one push(INTERRUPTED); - + if(hadWaiter && !masked(Monitor::INTERRUPTED)) pthread_cond_signal(&_waitCond); - + } pthread_mutex_unlock(&_waitLock); @@ -235,16 +235,16 @@ bool Monitor::notify() { pthread_mutex_lock(&_waitLock); bool wasNotifyable = !pending(INTERRUPTED); - + if(wasNotifyable) { - + // Set the flag and wake the waiter if there // is one push(SIGNALED); - - if(_waiting) + + if(_waiting) pthread_cond_signal(&_waitCond); - + } pthread_mutex_unlock(&_waitLock); diff --git a/dep/src/zthread/posix/Monitor.h b/dep/src/zthread/posix/Monitor.h index fe6c8b559e3..380a420b571 100644 --- a/dep/src/zthread/posix/Monitor.h +++ b/dep/src/zthread/posix/Monitor.h @@ -151,3 +151,4 @@ class Monitor : public Status, private NonCopyable { }; #endif + diff --git a/dep/src/zthread/posix/PriorityOps.h b/dep/src/zthread/posix/PriorityOps.h index 3a7f822711d..df6bfced282 100644 --- a/dep/src/zthread/posix/PriorityOps.h +++ b/dep/src/zthread/posix/PriorityOps.h @@ -49,3 +49,4 @@ public: } // namespace ZThread #endif // __ZTPRIORITYOPS_H__ + diff --git a/dep/src/zthread/posix/TSS.h b/dep/src/zthread/posix/TSS.h index c9c9c0e352a..13f564d1558 100644 --- a/dep/src/zthread/posix/TSS.h +++ b/dep/src/zthread/posix/TSS.h @@ -102,3 +102,4 @@ namespace ZThread { #endif + diff --git a/dep/src/zthread/posix/ThreadOps.cxx b/dep/src/zthread/posix/ThreadOps.cxx index e72ef78ada3..994b5903b28 100644 --- a/dep/src/zthread/posix/ThreadOps.cxx +++ b/dep/src/zthread/posix/ThreadOps.cxx @@ -31,7 +31,7 @@ namespace ZThread { -const ThreadOps ThreadOps::INVALID(0); +const ThreadOps ThreadOps::INVALID(0); bool ThreadOps::join(ThreadOps* ops) { @@ -57,7 +57,7 @@ bool ThreadOps::yield() { #if defined(HAVE_SCHED_YIELD) result = sched_yield() == 0; #endif - + return result; } @@ -67,11 +67,11 @@ bool ThreadOps::setPriority(ThreadOps* impl, Priority p) { assert(impl); bool result = true; - + #if !defined(ZTHREAD_DISABLE_PRIORITY) - + struct sched_param param; - + switch(p) { case Low: param.sched_priority = 0; @@ -83,7 +83,7 @@ bool ThreadOps::setPriority(ThreadOps* impl, Priority p) { default: param.sched_priority = 5; } - + result = pthread_setschedparam(impl->_tid, SCHED_OTHER, ¶m) == 0; #endif @@ -97,14 +97,14 @@ bool ThreadOps::getPriority(ThreadOps* impl, Priority& p) { assert(impl); bool result = true; - + #if !defined(ZTHREAD_DISABLE_PRIORITY) struct sched_param param; int policy = SCHED_OTHER; - + if(result = (pthread_getschedparam(impl->_tid, &policy, ¶m) == 0)) { - + // Convert to one of the PRIORITY values if(param.sched_priority < 10) p = Low; @@ -112,9 +112,9 @@ bool ThreadOps::getPriority(ThreadOps* impl, Priority& p) { p = Medium; else p = High; - + } - + #endif return result; @@ -132,16 +132,16 @@ extern "C" void *_dispatch(void *arg) { Runnable* task = reinterpret_cast<Runnable*>(arg); assert(task); - + // Run the task from the correct context task->run(); - + // Exit the thread pthread_exit((void**)0); return (void*)0; - + } - + } // namespace ZThread diff --git a/dep/src/zthread/posix/ThreadOps.h b/dep/src/zthread/posix/ThreadOps.h index 8fef43bb3fd..7176da5b1c9 100644 --- a/dep/src/zthread/posix/ThreadOps.h +++ b/dep/src/zthread/posix/ThreadOps.h @@ -152,3 +152,4 @@ protected: } #endif // __ZTTHREADOPS_H__ + diff --git a/dep/src/zthread/solaris/FastRecursiveLock.h b/dep/src/zthread/solaris/FastRecursiveLock.h index 2de0f8cd022..d743f9a9b55 100644 --- a/dep/src/zthread/solaris/FastRecursiveLock.h +++ b/dep/src/zthread/solaris/FastRecursiveLock.h @@ -120,3 +120,4 @@ public: } // namespace ZThread #endif + diff --git a/dep/src/zthread/vanilla/DualMutexRecursiveLock.h b/dep/src/zthread/vanilla/DualMutexRecursiveLock.h index 63214133c02..22f10b9bc04 100644 --- a/dep/src/zthread/vanilla/DualMutexRecursiveLock.h +++ b/dep/src/zthread/vanilla/DualMutexRecursiveLock.h @@ -171,3 +171,4 @@ class FastRecursiveLock : private NonCopyable { } // namespace ZThread #endif + diff --git a/dep/src/zthread/vanilla/SimpleAtomicCount.cxx b/dep/src/zthread/vanilla/SimpleAtomicCount.cxx index fc63d141d6a..67f50d546dc 100644 --- a/dep/src/zthread/vanilla/SimpleAtomicCount.cxx +++ b/dep/src/zthread/vanilla/SimpleAtomicCount.cxx @@ -34,7 +34,7 @@ typedef struct atomic_count_t { FastLock lock; unsigned long count; - + atomic_count_t() : count(0) {} } ATOMIC_COUNT; @@ -43,7 +43,7 @@ AtomicCount::AtomicCount() { ATOMIC_COUNT* c = new ATOMIC_COUNT; _value = reinterpret_cast<void*>(c); - + } AtomicCount::~AtomicCount() { @@ -54,22 +54,22 @@ AtomicCount::~AtomicCount() { delete c; } - + //! Postfix decrement and return the current value size_t AtomicCount::operator--(int) { ATOMIC_COUNT* c = reinterpret_cast<ATOMIC_COUNT*>(_value); - + Guard<FastLock> g(c->lock); return c->count--; } - + //! Postfix increment and return the current value size_t AtomicCount::operator++(int) { ATOMIC_COUNT* c = reinterpret_cast<ATOMIC_COUNT*>(_value); - + Guard<FastLock> g(c->lock); return c->count++; @@ -79,17 +79,17 @@ size_t AtomicCount::operator++(int) { size_t AtomicCount::operator--() { ATOMIC_COUNT* c = reinterpret_cast<ATOMIC_COUNT*>(_value); - + Guard<FastLock> g(c->lock); return --c->count; } - + //! Prefix increment and return the current value size_t AtomicCount::operator++() { - + ATOMIC_COUNT* c = reinterpret_cast<ATOMIC_COUNT*>(_value); - + Guard<FastLock> g(c->lock); return ++c->count; diff --git a/dep/src/zthread/vanilla/SimpleRecursiveLock.h b/dep/src/zthread/vanilla/SimpleRecursiveLock.h index 6adf4154fc4..ecd1a403ce8 100644 --- a/dep/src/zthread/vanilla/SimpleRecursiveLock.h +++ b/dep/src/zthread/vanilla/SimpleRecursiveLock.h @@ -128,3 +128,4 @@ public: } // namespace ZThread #endif + diff --git a/dep/src/zthread/win32/AtomicCount.cxx b/dep/src/zthread/win32/AtomicCount.cxx index 84cbf8c3ddc..7fe63b0da0c 100644 --- a/dep/src/zthread/win32/AtomicCount.cxx +++ b/dep/src/zthread/win32/AtomicCount.cxx @@ -32,7 +32,7 @@ namespace ZThread { AtomicCount::AtomicCount() { _value = reinterpret_cast<void*>(new LONG(0)); - + } AtomicCount::~AtomicCount() { @@ -41,20 +41,20 @@ AtomicCount::~AtomicCount() { delete reinterpret_cast<LPLONG>(_value); } - + void AtomicCount::increment() { ::InterlockedIncrement(reinterpret_cast<LPLONG>(_value)); - + } - + bool AtomicCount::decrement() { LONG v = ::InterlockedDecrement(reinterpret_cast<LPLONG>(_value)); return static_cast<unsigned long>(v) == 0; - + } - + }; #endif // __ZTATOMICCOUNTIMPL_H__ diff --git a/dep/src/zthread/win32/AtomicFastLock.h b/dep/src/zthread/win32/AtomicFastLock.h index 8d09f132a35..17a38871ae7 100644 --- a/dep/src/zthread/win32/AtomicFastLock.h +++ b/dep/src/zthread/win32/AtomicFastLock.h @@ -120,3 +120,4 @@ class FastLock : private NonCopyable { }; #endif + diff --git a/dep/src/zthread/win32/AtomicFastRecursiveLock.h b/dep/src/zthread/win32/AtomicFastRecursiveLock.h index a4dc3755265..0018a299b17 100644 --- a/dep/src/zthread/win32/AtomicFastRecursiveLock.h +++ b/dep/src/zthread/win32/AtomicFastRecursiveLock.h @@ -156,3 +156,4 @@ class FastRecursiveLock : private NonCopyable { }; #endif + diff --git a/dep/src/zthread/win32/FastLock.h b/dep/src/zthread/win32/FastLock.h index 3b212e1d891..7f056413b96 100644 --- a/dep/src/zthread/win32/FastLock.h +++ b/dep/src/zthread/win32/FastLock.h @@ -144,3 +144,4 @@ namespace ZThread { } // namespace ZThread #endif + diff --git a/dep/src/zthread/win32/FastRecursiveLock.h b/dep/src/zthread/win32/FastRecursiveLock.h index de556ac4aa8..4f32dccf933 100644 --- a/dep/src/zthread/win32/FastRecursiveLock.h +++ b/dep/src/zthread/win32/FastRecursiveLock.h @@ -107,3 +107,4 @@ class FastRecursiveLock : private NonCopyable { } // namespace ZThread #endif + diff --git a/dep/src/zthread/win32/Monitor.cxx b/dep/src/zthread/win32/Monitor.cxx index 6e69487c054..fff056ffaf1 100644 --- a/dep/src/zthread/win32/Monitor.cxx +++ b/dep/src/zthread/win32/Monitor.cxx @@ -29,24 +29,24 @@ using namespace ZThread; Monitor::Monitor() : _owner(0), _waiting(false) { - - _handle = ::CreateEvent(0, TRUE, FALSE, 0); + + _handle = ::CreateEvent(0, TRUE, FALSE, 0); if(_handle == NULL) { assert(0); } } - + Monitor::~Monitor() { - + assert(!_waiting); - ::CloseHandle(_handle); + ::CloseHandle(_handle); } Monitor::STATE Monitor::wait(unsigned long ms) { - + // Update the owner on first use. The owner will not change, each // thread waits only on a single Monitor and a Monitor is never // shared @@ -54,13 +54,13 @@ Monitor::STATE Monitor::wait(unsigned long ms) { _owner = ::GetCurrentThreadId(); STATE state; //(INVALID); - + // Serialize access to the state of the Monitor // and test the state to determine if a wait is needed. _waitLock.acquire(); if(pending(ANYTHING)) { - + // Return without waiting when possible state = next(); @@ -68,12 +68,12 @@ Monitor::STATE Monitor::wait(unsigned long ms) { return state; } - // Unlock the external lock if a wait() is probably needed. + // Unlock the external lock if a wait() is probably needed. // Access to the state is still serial. _lock.release(); // Wait for a transition in the state that is of interest, this - // allows waits to exclude certain flags (e.g. INTERRUPTED) + // allows waits to exclude certain flags (e.g. INTERRUPTED) // for a single wait() w/o actually discarding those flags - // they will remain set until a wait interested in those flags // occurs. @@ -82,13 +82,13 @@ Monitor::STATE Monitor::wait(unsigned long ms) { // Wait, ignoring signals _waiting = true; - // Block until the event is set. + // Block until the event is set. _waitLock.release(); // The event is manual reset so this lack of atmoicity will not // be an issue - DWORD dwResult = + DWORD dwResult = ::WaitForSingleObject(_handle, ((ms == 0) ? INFINITE : (DWORD)ms)); // Reacquire serialized access to the state @@ -99,20 +99,20 @@ Monitor::STATE Monitor::wait(unsigned long ms) { if(dwResult == WAIT_TIMEOUT) push(TIMEDOUT); - + // Get the next available STATE - state = next(); - _waiting = false; + state = next(); + _waiting = false; ::ResetEvent(_handle); // Acquire the internal lock & release the external lock _waitLock.release(); - - // Reaquire the external lock, keep from deadlocking threads calling + + // Reaquire the external lock, keep from deadlocking threads calling // notify(), interrupt(), etc. _lock.acquire(); - + return state; } @@ -122,12 +122,12 @@ bool Monitor::interrupt() { // Serialize access to the state _waitLock.acquire(); - + bool wasInterruptable = !pending(INTERRUPTED); bool hadWaiter = _waiting; - + if(wasInterruptable) { - + // Update the state & wake the waiter if there is one push(INTERRUPTED); @@ -140,9 +140,9 @@ bool Monitor::interrupt() { assert(0); } - } else + } else wasInterruptable = !(_owner == ::GetCurrentThreadId()); - + } _waitLock.release(); @@ -159,7 +159,7 @@ bool Monitor::isInterrupted() { bool wasInterrupted = pending(INTERRUPTED); clear(INTERRUPTED); - + _waitLock.release(); return wasInterrupted; @@ -173,15 +173,15 @@ bool Monitor::notify() { _waitLock.acquire(); bool wasNotifyable = !pending(INTERRUPTED); - + if(wasNotifyable) { - + // Set the flag and wake the waiter if there // is one push(SIGNALED); - + // If there is a waiter then send the signal. - if(_waiting) + if(_waiting) if(::SetEvent(_handle) == FALSE) { assert(0); } @@ -202,20 +202,20 @@ bool Monitor::cancel() { bool wasInterrupted = !pending(INTERRUPTED); bool hadWaiter = _waiting; - + push(CANCELED); if(wasInterrupted) { - + // Update the state & wake the waiter if there is one push(INTERRUPTED); - + // If there is a waiter then send the signal. - if(hadWaiter && !masked(Monitor::INTERRUPTED)) + if(hadWaiter && !masked(Monitor::INTERRUPTED)) if(::SetEvent(_handle) == FALSE) { assert(0); } - + } _waitLock.release(); @@ -230,7 +230,7 @@ bool Monitor::isCanceled() { _waitLock.acquire(); bool wasCanceled = examine(CANCELED); - + if(_owner == ::GetCurrentThreadId()) clear(INTERRUPTED); diff --git a/dep/src/zthread/win32/Monitor.h b/dep/src/zthread/win32/Monitor.h index a527b8afff6..7fb57a33972 100644 --- a/dep/src/zthread/win32/Monitor.h +++ b/dep/src/zthread/win32/Monitor.h @@ -151,3 +151,4 @@ class Monitor : public Status, private NonCopyable { }; #endif + diff --git a/dep/src/zthread/win32/PerformanceCounterStrategy.h b/dep/src/zthread/win32/PerformanceCounterStrategy.h index 4c657af0c8a..79f042cd194 100644 --- a/dep/src/zthread/win32/PerformanceCounterStrategy.h +++ b/dep/src/zthread/win32/PerformanceCounterStrategy.h @@ -106,3 +106,4 @@ private: }; #endif // __ZTTIMESTRATEGY_H__ + diff --git a/dep/src/zthread/win32/TSS.h b/dep/src/zthread/win32/TSS.h index d1307a51e2a..2f8dfd6c847 100644 --- a/dep/src/zthread/win32/TSS.h +++ b/dep/src/zthread/win32/TSS.h @@ -106,3 +106,4 @@ namespace ZThread { #endif + diff --git a/dep/src/zthread/win32/ThreadOps.cxx b/dep/src/zthread/win32/ThreadOps.cxx index 6e8fb8d3b71..8812b9996b1 100644 --- a/dep/src/zthread/win32/ThreadOps.cxx +++ b/dep/src/zthread/win32/ThreadOps.cxx @@ -26,7 +26,7 @@ namespace ZThread { -const ThreadOps ThreadOps::INVALID(0); +const ThreadOps ThreadOps::INVALID(0); /** * Detect OS at runtime and attempt to locate the SwitchToThread @@ -46,10 +46,10 @@ public: v.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); if(::GetVersionEx(&v) && (v.dwPlatformId == VER_PLATFORM_WIN32_NT)) { - + // Uses GetModuleHandle() so the reference count on the dll is // not affected. There is a warning about race conditions involving - // this function being called as FreeLibrary() completes; however + // this function being called as FreeLibrary() completes; however // nearly all win32 applications load this particular and will keep it // in memory until the process exits. HINSTANCE hInst = ::GetModuleHandle("Kernel32.dll"); @@ -62,10 +62,10 @@ public: } bool operator()() { - + // Attempt to yield using the best function available - if(!_fnYield || !_fnYield()) - ::Sleep(0); + if(!_fnYield || !_fnYield()) + ::Sleep(0); return true; @@ -78,8 +78,8 @@ bool ThreadOps::join(ThreadOps* ops) { assert(ops); assert(ops->_tid != 0); assert(ops->_hThread != NULL); - - if(::WaitForSingleObjectEx(ops->_hThread, INFINITE, FALSE) != WAIT_OBJECT_0) + + if(::WaitForSingleObjectEx(ops->_hThread, INFINITE, FALSE) != WAIT_OBJECT_0) return false; ::CloseHandle(ops->_hThread); @@ -102,11 +102,11 @@ bool ThreadOps::yield() { bool ThreadOps::setPriority(ThreadOps* impl, Priority p) { assert(impl); - + #if !defined(ZTHREAD_DISABLE_PRIORITY) bool result; - + // Convert int n; switch(p) { @@ -121,7 +121,7 @@ bool ThreadOps::setPriority(ThreadOps* impl, Priority p) { n = THREAD_PRIORITY_NORMAL; } - + result = (::SetThreadPriority(impl->_hThread, n) != THREAD_PRIORITY_ERROR_RETURN); return result; @@ -137,7 +137,7 @@ bool ThreadOps::getPriority(ThreadOps* impl, Priority& p) { assert(impl); bool result = true; - + #if !defined(ZTHREAD_DISABLE_PRIORITY) // Convert to one of the PRIORITY values @@ -154,7 +154,7 @@ bool ThreadOps::getPriority(ThreadOps* impl, Priority& p) { default: p = Medium; } - + #endif return result; @@ -191,7 +191,7 @@ unsigned int __stdcall ThreadOps::_dispatch(void *arg) { #endif return 0; - + } } diff --git a/dep/src/zthread/win32/ThreadOps.h b/dep/src/zthread/win32/ThreadOps.h index 1b3a0bfa174..3352e524e58 100644 --- a/dep/src/zthread/win32/ThreadOps.h +++ b/dep/src/zthread/win32/ThreadOps.h @@ -150,3 +150,4 @@ protected: } #endif + diff --git a/dep/src/zthread/win9x/AtomicCount.cxx b/dep/src/zthread/win9x/AtomicCount.cxx index 2bf07dcd2e7..c569af1a8a0 100644 --- a/dep/src/zthread/win9x/AtomicCount.cxx +++ b/dep/src/zthread/win9x/AtomicCount.cxx @@ -29,12 +29,12 @@ namespace ZThread { typedef struct atomic_count_t { - + CRITICAL_SECTION cs; size_t count; atomic_count_t() : count(0) {} - + } ATOMIC_COUNT; AtomicCount::AtomicCount() { @@ -42,13 +42,13 @@ AtomicCount::AtomicCount() { ATOMIC_COUNT* c = new ATOMIC_COUNT; _value = reinterpret_cast<void*>(c); ::InitializeCriticalSection(&c->cs); - + } AtomicCount::~AtomicCount() { ATOMIC_COUNT* c = reinterpret_cast<ATOMIC_COUNT*>(_value); - assert(c->count == 0); + assert(c->count == 0); ::DeleteCriticalSection(&c->cs); delete c; @@ -58,8 +58,8 @@ AtomicCount::~AtomicCount() { size_t AtomicCount::operator--(int) { ATOMIC_COUNT* c = reinterpret_cast<ATOMIC_COUNT*>(_value); - size_t value; - + size_t value; + ::EnterCriticalSection(&c->cs); value = c->count--; ::LeaveCriticalSection(&c->cs); @@ -67,13 +67,13 @@ size_t AtomicCount::operator--(int) { return value; } - + //! Postfix increment and return the current value size_t AtomicCount::operator++(int) { ATOMIC_COUNT* c = reinterpret_cast<ATOMIC_COUNT*>(_value); - size_t value; - + size_t value; + ::EnterCriticalSection(&c->cs); value = c->count++; ::LeaveCriticalSection(&c->cs); @@ -86,8 +86,8 @@ size_t AtomicCount::operator++(int) { size_t AtomicCount::operator--() { ATOMIC_COUNT* c = reinterpret_cast<ATOMIC_COUNT*>(_value); - size_t value; - + size_t value; + ::EnterCriticalSection(&c->cs); value = --c->count; ::LeaveCriticalSection(&c->cs); @@ -95,13 +95,13 @@ size_t AtomicCount::operator--() { return value; } - + //! Prefix increment and return the current value size_t AtomicCount::operator++() { - + ATOMIC_COUNT* c = reinterpret_cast<ATOMIC_COUNT*>(_value); - size_t value; - + size_t value; + ::EnterCriticalSection(&c->cs); value = ++c->count; ::LeaveCriticalSection(&c->cs); diff --git a/dep/src/zthread/win9x/AtomicFastLock.h b/dep/src/zthread/win9x/AtomicFastLock.h index 7723b7077d8..0736267233a 100644 --- a/dep/src/zthread/win9x/AtomicFastLock.h +++ b/dep/src/zthread/win9x/AtomicFastLock.h @@ -128,3 +128,4 @@ class FastLock : private NonCopyable { } // namespace ZThread #endif + |