diff options
38 files changed, 702 insertions, 702 deletions
diff --git a/dep/src/zthread/ConcurrentExecutor.cxx b/dep/src/zthread/ConcurrentExecutor.cxx index a65e9c5e909..c5076d135ce 100644 --- a/dep/src/zthread/ConcurrentExecutor.cxx +++ b/dep/src/zthread/ConcurrentExecutor.cxx @@ -24,7 +24,7 @@ namespace ZThread { - ConcurrentExecutor::ConcurrentExecutor() + ConcurrentExecutor::ConcurrentExecutor() : _executor(1) {} void ConcurrentExecutor::interrupt() { @@ -34,21 +34,21 @@ namespace ZThread { void ConcurrentExecutor::execute(const Task& task) { _executor.execute(task); } - + void ConcurrentExecutor::cancel() { _executor.cancel(); } - + bool ConcurrentExecutor::isCanceled() { - return _executor.isCanceled(); + return _executor.isCanceled(); } - + void ConcurrentExecutor::wait() { _executor.wait(); } - + bool ConcurrentExecutor::wait(unsigned long timeout) { - return _executor.wait(timeout); + return _executor.wait(timeout); } } diff --git a/dep/src/zthread/Condition.cxx b/dep/src/zthread/Condition.cxx index 39485fb5ca4..f1ea13f35e8 100644 --- a/dep/src/zthread/Condition.cxx +++ b/dep/src/zthread/Condition.cxx @@ -32,14 +32,14 @@ namespace ZThread { }; Condition::Condition(Lockable& lock) { - + _impl = new FifoConditionImpl(lock); } Condition::~Condition() { - + if(_impl != 0) delete _impl; @@ -64,7 +64,7 @@ namespace ZThread { void Condition::signal() { - + _impl->signal(); } diff --git a/dep/src/zthread/CountingSemaphore.cxx b/dep/src/zthread/CountingSemaphore.cxx index 43e8b8cfba7..1aa0ff79c77 100644 --- a/dep/src/zthread/CountingSemaphore.cxx +++ b/dep/src/zthread/CountingSemaphore.cxx @@ -29,9 +29,9 @@ namespace ZThread { CountingSemaphore::CountingSemaphore(int initialCount) { - + _impl = new FifoSemaphoreImpl(initialCount, 0 , false); - + } diff --git a/dep/src/zthread/FastMutex.cxx b/dep/src/zthread/FastMutex.cxx index 464dd83e5e0..fce54a127a2 100644 --- a/dep/src/zthread/FastMutex.cxx +++ b/dep/src/zthread/FastMutex.cxx @@ -28,7 +28,7 @@ namespace ZThread { FastMutex::FastMutex() : _lock(new FastLock) { } FastMutex::~FastMutex() { - delete _lock; + delete _lock; } @@ -39,7 +39,7 @@ namespace ZThread { } bool FastMutex::tryAcquire(unsigned long timeout) { - + return _lock->tryAcquire(timeout); } diff --git a/dep/src/zthread/FastRecursiveMutex.cxx b/dep/src/zthread/FastRecursiveMutex.cxx index 5ca677a654d..f84e207192f 100644 --- a/dep/src/zthread/FastRecursiveMutex.cxx +++ b/dep/src/zthread/FastRecursiveMutex.cxx @@ -25,10 +25,10 @@ namespace ZThread { - FastRecursiveMutex::FastRecursiveMutex() + FastRecursiveMutex::FastRecursiveMutex() : _lock(new FastRecursiveLock) { } - FastRecursiveMutex::~FastRecursiveMutex() + FastRecursiveMutex::~FastRecursiveMutex() { delete _lock; } @@ -39,7 +39,7 @@ namespace ZThread { } bool FastRecursiveMutex::tryAcquire(unsigned long timeout) { - + return _lock->tryAcquire(timeout); } diff --git a/dep/src/zthread/Monitor.cxx b/dep/src/zthread/Monitor.cxx index 9a578e796ed..2afb6162a22 100644 --- a/dep/src/zthread/Monitor.cxx +++ b/dep/src/zthread/Monitor.cxx @@ -26,10 +26,10 @@ #include "Monitor.h" // This file will select an implementation for a Monitor based on -// what Monitor.h selects. This method is for selecting the +// what Monitor.h selects. This method is for selecting the // source files, to improve portability. Currently, the project is // based on the autoconf tool-set, which doesn't support conditional -// compilation well. Additionally, this should make the library +// compilation well. Additionally, this should make the library // easier to port since its working around conditional compilation // by using C++ features and people won't have to fiddle around with // their make tool as much to compile the source diff --git a/dep/src/zthread/Mutex.cxx b/dep/src/zthread/Mutex.cxx index eca38ba89c6..a98897d090d 100644 --- a/dep/src/zthread/Mutex.cxx +++ b/dep/src/zthread/Mutex.cxx @@ -64,5 +64,5 @@ namespace ZThread { -} // namespace ZThread +} // namespace ZThread diff --git a/dep/src/zthread/PoolExecutor.cxx b/dep/src/zthread/PoolExecutor.cxx index cf84e145453..82ace996035 100644 --- a/dep/src/zthread/PoolExecutor.cxx +++ b/dep/src/zthread/PoolExecutor.cxx @@ -42,7 +42,7 @@ namespace ZThread { class WaiterQueue { typedef std::deque<ThreadImpl*> ThreadList; - + typedef struct group_t { size_t id; size_t count; @@ -68,14 +68,14 @@ namespace ZThread { void operator()(const Group& grp) { count += grp.count; } operator size_t() { return count; } }; - + FastMutex _lock; GroupList _list; size_t _id; size_t _generation; public: - + WaiterQueue() : _id(0), _generation(0) { // At least one empty-group exists _list.push_back( Group(_id++) ); @@ -89,7 +89,7 @@ namespace ZThread { */ bool wait(unsigned long timeout) { - ThreadImpl* current = ThreadImpl::current(); + ThreadImpl* current = ThreadImpl::current(); Monitor& m = current->getMonitor(); Monitor::STATE state; @@ -103,15 +103,15 @@ namespace ZThread { if((size_t)std::for_each(_list.begin(), _list.end(), counter()) < 1) return true; - // Update the waiter list for the active group + // Update the waiter list for the active group _list.back().waiters.push_back(current); size_t n = _list.back().id; m.acquire(); - + { - Guard<FastMutex, UnlockedScope> g2(g1); + Guard<FastMutex, UnlockedScope> g2(g1); state = timeout == 0 ? m.wait() : m.wait(timeout); } @@ -142,22 +142,22 @@ namespace ZThread { throw Interrupted_Exception(); default: throw Synchronization_Exception(); - } - + } + return true; } - + /** * Increment the active group count * * @pre at least 1 empty group exists - * @post at least 1 non-empty group exists + * @post at least 1 non-empty group exists */ std::pair<size_t, size_t> increment() { - + Guard<FastMutex> g(_lock); - + // At least one empty-group exists assert(!_list.empty()); @@ -176,7 +176,7 @@ namespace ZThread { // When the active group is being incremented, insert a new active group // to replace it if there were waiting threads - if(i == --_list.end() && !i->waiters.empty()) + if(i == --_list.end() && !i->waiters.empty()) _list.push_back(Group(_id++)); // At least 1 non-empty group exists @@ -185,27 +185,27 @@ namespace ZThread { return std::make_pair(n, _generation); } - + /** * Decrease the count for the group with the given id. * * @param n group id - * + * * @pre At least 1 non-empty group exists * @post At least 1 empty group exists */ void decrement(size_t n) { - + Guard<FastMutex> g1(_lock); - + // At least 1 non-empty group exists assert((size_t)std::for_each(_list.begin(), _list.end(), counter()) > 0); // Find the requested group GroupList::iterator i = std::find_if(_list.begin(), _list.end(), by_id(n)); if(i == _list.end()) { - + // A group should never have been removed until // the final task in that group completed assert(0); @@ -214,11 +214,11 @@ namespace ZThread { // Decrease the count for tasks in this group, if(--i->count == 0 && i == _list.begin()) { - - do { + + do { // When the first group completes, wake all waiters for every - // group, starting from the first until a group that is not + // group, starting from the first until a group that is not // complete is reached /* @@ -226,29 +226,29 @@ namespace ZThread { if(i == --_list.end() && i->waiters.empty()) break; */ - + if( awaken(*i) ) { - - // If all waiters were awakened, remove the group + + // If all waiters were awakened, remove the group i = _list.erase(i); - + } else { - + { // Otherwise, unlock and yield allowing the waiter // lists to be updated if other threads are busy Guard<FastLock, UnlockedScope> g2(g1); ThreadImpl::yield(); - + } i = _list.begin(); } - - } while(i != _list.end() && i->count == 0); - + + } while(i != _list.end() && i->count == 0); + // Ensure that an active group exists if(_list.empty()) _list.push_back( Group(++_id) ); @@ -257,7 +257,7 @@ namespace ZThread { // At least one group exists assert(!_list.empty()); - + } /** @@ -268,40 +268,40 @@ namespace ZThread { return next ? _generation++ : _generation; } - + private: - + /** * Awaken all the waiters remaining in the given group - * + * * @return * - true if all the waiting threads were successfully awakened. * - false if there were one or more threads that could not be awakened. */ bool awaken(Group& grp) { - // Go through the waiter list in the given group; + // Go through the waiter list in the given group; for(ThreadList::iterator i = grp.waiters.begin(); i != grp.waiters.end();) { - + ThreadImpl* impl = *i; Monitor& m = impl->getMonitor(); - + // Try the monitor lock, if it cant be locked skip to the next waiter if(m.tryAcquire()) { - + // Notify the monitor & remove from the waiter list so time isn't // wasted checking it again. i = grp.waiters.erase(i); - + // Try to wake the waiter, it doesn't matter if this is successful - // or not (only fails when the monitor is already going to stop waiting). + // or not (only fails when the monitor is already going to stop waiting). m.notify(); m.release(); - + } else ++i; - + } - + return grp.waiters.empty(); } @@ -310,13 +310,13 @@ namespace ZThread { /** * @class GroupedRunnable - * - * Wrap a task with group and generation information. + * + * Wrap a task with group and generation information. * * - 'group' allows tasks to be grouped together so that lists of waiting * threads can be managed. * - * - 'generation' allows tasks to be interrupted + * - 'generation' allows tasks to be interrupted */ class GroupedRunnable : public Runnable { @@ -329,10 +329,10 @@ namespace ZThread { public: GroupedRunnable(const Task& task, WaiterQueue& queue) - : _task(task), _queue(queue) { - + : _task(task), _queue(queue) { + std::pair<size_t, size_t> pr( _queue.increment() ); - + _group = pr.first; _generation = pr.second; @@ -368,10 +368,10 @@ namespace ZThread { * */ class ExecutorImpl { - + typedef MonitoredQueue<ExecutorTask, FastMutex> TaskQueue; typedef std::deque<ThreadImpl*> ThreadList; - + TaskQueue _taskQueue; WaiterQueue _waitingQueue; @@ -380,19 +380,19 @@ namespace ZThread { public: - + ExecutorImpl() : _size(0) {} void registerThread() { - + Guard<TaskQueue> g(_taskQueue); ThreadImpl* impl = ThreadImpl::current(); _threads.push_back(impl); // current cancel if too many threads are being created - if(_threads.size() > _size) + if(_threads.size() > _size) impl->cancel(); } @@ -408,14 +408,14 @@ namespace ZThread { // Wrap the task with a grouped task GroupedRunnable* runnable = new GroupedRunnable(task, _waitingQueue); - + try { - + _taskQueue.add( ExecutorTask(runnable) ); } catch(...) { - // Incase the queue is canceled between the time the WaiterQueue is + // Incase the queue is canceled between the time the WaiterQueue is // updated and the task is added to the TaskQueue _waitingQueue.decrement( runnable->group() ); throw; @@ -430,41 +430,41 @@ namespace ZThread { _waitingQueue.generation(true); Guard<TaskQueue> g(_taskQueue); - + // Interrupt all threads currently running, thier tasks would be // from an older generation for(ThreadList::iterator i = _threads.begin(); i != _threads.end(); ++i) (*i)->interrupt(); - + } //! Adjust the number of desired workers and return the number of Threads needed size_t workers(size_t n) { - + Guard<TaskQueue> g(_taskQueue); size_t m = (_size < n) ? (n - _size) : 0; _size = n; - + return m; } - + size_t workers() { - + Guard<TaskQueue> g(_taskQueue); return _size; - + } - + ExecutorTask next() { - + ExecutorTask task; - + // Draw the task from the queue for(;;) { - try { + try { task = _taskQueue.next(); break; @@ -473,13 +473,13 @@ namespace ZThread { // Ignore interruption here, it can only come from // another thread interrupt()ing the executor. The - // thread was interrupted in the hopes it was busy + // thread was interrupted in the hopes it was busy // with a task } } - + // Interrupt the thread running the tasks when the generation // does not match the current generation if( task->generation() != _waitingQueue.generation() ) @@ -512,31 +512,31 @@ namespace ZThread { class Worker : public Runnable { CountedPtr< ExecutorImpl > _impl; - + public: - + //! Create a Worker that draws upon the given Queue - Worker(const CountedPtr< ExecutorImpl >& impl) + Worker(const CountedPtr< ExecutorImpl >& impl) : _impl(impl) { } - + //! Run until Thread or Queue are canceled - void run() { - + void run() { + _impl->registerThread(); - + // Run until the Queue is canceled while(!Thread::canceled()) { - + // Draw tasks from the queue ExecutorTask task( _impl->next() ); task->run(); - - } - + + } + _impl->unregisterThread(); - + } - + }; /* Worker */ @@ -547,10 +547,10 @@ namespace ZThread { public: - Shutdown(const CountedPtr< ExecutorImpl >& impl) + Shutdown(const CountedPtr< ExecutorImpl >& impl) : _impl(impl) { } - - void run() { + + void run() { _impl->cancel(); } @@ -560,35 +560,35 @@ namespace ZThread { PoolExecutor::PoolExecutor(size_t n) : _impl( new ExecutorImpl() ), _shutdown( new Shutdown(_impl) ) { - + size(n); - + // Request cancelation when main() exits ThreadQueue::instance()->insertShutdownTask(_shutdown); } - PoolExecutor::~PoolExecutor() { + PoolExecutor::~PoolExecutor() { try { - + /** * If the shutdown task for this executor has not already been * selected to run, then run it locally */ - if(ThreadQueue::instance()->removeShutdownTask(_shutdown)) + if(ThreadQueue::instance()->removeShutdownTask(_shutdown)) _shutdown->run(); - + } catch(...) { } - } + } void PoolExecutor::interrupt() { _impl->interrupt(); } void PoolExecutor::size(size_t n) { - + if(n < 1) throw InvalidOp_Exception(); @@ -604,26 +604,26 @@ namespace ZThread { void PoolExecutor::execute(const Task& task) { - // Enqueue the task, the Queue will reject it with a + // Enqueue the task, the Queue will reject it with a // Cancelation_Exception if the Executor has been canceled - _impl->execute(task); + _impl->execute(task); } void PoolExecutor::cancel() { - _impl->cancel(); + _impl->cancel(); } bool PoolExecutor::isCanceled() { - return _impl->isCanceled(); + return _impl->isCanceled(); } - - void PoolExecutor::wait() { + + void PoolExecutor::wait() { _impl->wait(0); } bool PoolExecutor::wait(unsigned long timeout) { - return _impl->wait(timeout == 0 ? 1 : timeout); + return _impl->wait(timeout == 0 ? 1 : timeout); } } diff --git a/dep/src/zthread/PriorityCondition.cxx b/dep/src/zthread/PriorityCondition.cxx index c43953ff73b..70258563ee4 100644 --- a/dep/src/zthread/PriorityCondition.cxx +++ b/dep/src/zthread/PriorityCondition.cxx @@ -32,14 +32,14 @@ namespace ZThread { }; PriorityCondition::PriorityCondition(Lockable& lock) { - + _impl = new PriorityConditionImpl(lock); } PriorityCondition::~PriorityCondition() { - + if(_impl != 0) delete _impl; diff --git a/dep/src/zthread/PriorityInheritanceMutex.cxx b/dep/src/zthread/PriorityInheritanceMutex.cxx index 108e4a74370..6735d15d422 100644 --- a/dep/src/zthread/PriorityInheritanceMutex.cxx +++ b/dep/src/zthread/PriorityInheritanceMutex.cxx @@ -28,28 +28,28 @@ namespace ZThread { class InheritPriorityBehavior : public NullBehavior { - + ThreadImpl* owner; Priority p; protected: - // Temporarily raise the effective priority of the owner - inline void waiterArrived(ThreadImpl* impl) { - + // Temporarily raise the effective priority of the owner + inline void waiterArrived(ThreadImpl* impl) { + Priority q = impl->getPriority(); if((int)q > (int)p) { ThreadOps::setPriority(impl, p); p = q; - + } } // Note the owners priority - inline void ownerAcquired(ThreadImpl* impl) { + inline void ownerAcquired(ThreadImpl* impl) { p = impl->getPriority(); owner = impl; @@ -57,7 +57,7 @@ namespace ZThread { } // Restore its original priority - inline void ownerReleased(ThreadImpl* impl) { + inline void ownerReleased(ThreadImpl* impl) { if(p > owner->getPriority()) ThreadOps::setPriority(impl, impl->getPriority()); @@ -66,18 +66,18 @@ namespace ZThread { }; - class PriorityInheritanceMutexImpl : + class PriorityInheritanceMutexImpl : public MutexImpl<priority_list, InheritPriorityBehavior> { }; PriorityInheritanceMutex::PriorityInheritanceMutex() { - + _impl = new PriorityInheritanceMutexImpl(); - + } PriorityInheritanceMutex::~PriorityInheritanceMutex() { - if(_impl != 0) + if(_impl != 0) delete _impl; } @@ -85,7 +85,7 @@ namespace ZThread { // P void PriorityInheritanceMutex::acquire() { - _impl->acquire(); + _impl->acquire(); } @@ -93,14 +93,14 @@ namespace ZThread { // P bool PriorityInheritanceMutex::tryAcquire(unsigned long ms) { - return _impl->tryAcquire(ms); + return _impl->tryAcquire(ms); } // V void PriorityInheritanceMutex::release() { - _impl->release(); + _impl->release(); } diff --git a/dep/src/zthread/PriorityMutex.cxx b/dep/src/zthread/PriorityMutex.cxx index c25eaebc46c..1823c82f40d 100644 --- a/dep/src/zthread/PriorityMutex.cxx +++ b/dep/src/zthread/PriorityMutex.cxx @@ -29,15 +29,15 @@ namespace ZThread { class PriorityMutexImpl : public MutexImpl<priority_list, NullBehavior> { }; - PriorityMutex::PriorityMutex() { - + PriorityMutex::PriorityMutex() { + _impl = new PriorityMutexImpl(); - + } PriorityMutex::~PriorityMutex() { - if(_impl != 0) + if(_impl != 0) delete _impl; } @@ -45,7 +45,7 @@ namespace ZThread { // P void PriorityMutex::acquire() { - _impl->acquire(); + _impl->acquire(); } @@ -53,14 +53,14 @@ namespace ZThread { // P bool PriorityMutex::tryAcquire(unsigned long ms) { - return _impl->tryAcquire(ms); + return _impl->tryAcquire(ms); } // V void PriorityMutex::release() { - _impl->release(); + _impl->release(); } diff --git a/dep/src/zthread/PrioritySemaphore.cxx b/dep/src/zthread/PrioritySemaphore.cxx index 15138b5f426..cdefe3a4aed 100644 --- a/dep/src/zthread/PrioritySemaphore.cxx +++ b/dep/src/zthread/PrioritySemaphore.cxx @@ -26,10 +26,10 @@ namespace ZThread { - class PrioritySemaphoreImpl : public SemaphoreImpl<priority_list> { + class PrioritySemaphoreImpl : public SemaphoreImpl<priority_list> { public: - PrioritySemaphoreImpl(int count, unsigned int maxCount) + PrioritySemaphoreImpl(int count, unsigned int maxCount) : SemaphoreImpl<priority_list>(count, maxCount, true) { } }; @@ -41,9 +41,9 @@ namespace ZThread { * @param maxCount maximum size of the semaphore count */ PrioritySemaphore::PrioritySemaphore(int count, unsigned int maxCount) { - + _impl = new PrioritySemaphoreImpl(count, maxCount); - + } PrioritySemaphore::~PrioritySemaphore() { diff --git a/dep/src/zthread/RecursiveMutex.cxx b/dep/src/zthread/RecursiveMutex.cxx index 57994f55b81..ad029488725 100644 --- a/dep/src/zthread/RecursiveMutex.cxx +++ b/dep/src/zthread/RecursiveMutex.cxx @@ -26,14 +26,14 @@ namespace ZThread { RecursiveMutex::RecursiveMutex() { - + _impl = new RecursiveMutexImpl(); - + } RecursiveMutex::~RecursiveMutex() { - if(_impl != (RecursiveMutexImpl*)0 ) + if(_impl != (RecursiveMutexImpl*)0 ) delete _impl; } @@ -41,20 +41,20 @@ namespace ZThread { void RecursiveMutex::acquire() { - _impl->acquire(); + _impl->acquire(); } bool RecursiveMutex::tryAcquire(unsigned long ms) { - return _impl->tryAcquire(ms); + return _impl->tryAcquire(ms); } void RecursiveMutex::release() { - _impl->release(); + _impl->release(); } diff --git a/dep/src/zthread/RecursiveMutexImpl.cxx b/dep/src/zthread/RecursiveMutexImpl.cxx index 41ca03547f8..adee8004278 100644 --- a/dep/src/zthread/RecursiveMutexImpl.cxx +++ b/dep/src/zthread/RecursiveMutexImpl.cxx @@ -39,9 +39,9 @@ namespace ZThread { * @exception Initialization_Exception thrown if resources could not be * properly allocated */ - RecursiveMutexImpl::RecursiveMutexImpl() + RecursiveMutexImpl::RecursiveMutexImpl() : _owner(0), _count(0) { - + } /** @@ -52,14 +52,14 @@ namespace ZThread { #ifndef NDEBUG // It is an error to destroy a mutex that has not been released - if(_owner != 0) { + if(_owner != 0) { ZTDEBUG("** You are destroying a mutex which was never released. **\n"); assert(0); // Destroyed mutex while in use } - if(!_waiters.empty()) { + if(!_waiters.empty()) { ZTDEBUG("** You are destroying a mutex which is blocking %d threads. **\n", _waiters.size()); assert(0); // Destroyed mutex while in use @@ -78,10 +78,10 @@ namespace ZThread { Monitor::STATE state; Guard<FastLock> g1(_lock); - - // If there is an entry count and the current thread is + + // If there is an entry count and the current thread is // the owner, increment the count and continue. - if(_owner == &m) + if(_owner == &m) _count++; else { @@ -91,11 +91,11 @@ namespace ZThread { assert(_count == 0); - _owner = &m; + _owner = &m; _count++; } else { // Otherwise, wait() - + _waiters.push_back(&m); m.acquire(); @@ -108,7 +108,7 @@ namespace ZThread { } m.release(); - + // Remove from waiter list, regarless of weather release() is called or // not. The monitor is sticky, so its possible a state 'stuck' from a // previous operation and will leave the wait() w/o release() having @@ -117,39 +117,39 @@ namespace ZThread { if(i != _waiters.end()) _waiters.erase(i); - // If awoke due to a notify(), take ownership. + // If awoke due to a notify(), take ownership. switch(state) { case Monitor::SIGNALED: - + assert(_owner == 0); assert(_count == 0); _owner = &m; _count++; - + break; case Monitor::INTERRUPTED: throw Interrupted_Exception(); - + default: throw Synchronization_Exception(); - } - + } + } - + } } bool RecursiveMutexImpl::tryAcquire(unsigned long timeout) { - + // Get the monitor for the current thread Monitor& m = ThreadImpl::current()->getMonitor(); Guard<FastLock> g1(_lock); - - // If there is an entry count and the current thread is + + // If there is an entry count and the current thread is // the owner, increment the count and continue. if(_owner == &m) _count++; @@ -176,14 +176,14 @@ namespace ZThread { m.acquire(); { - + Guard<FastLock, UnlockedScope> g2(g1); state = m.wait(timeout); - + } m.release(); - + } // Remove from waiter list, regarless of weather release() is called or @@ -194,7 +194,7 @@ namespace ZThread { if(i != _waiters.end()) _waiters.erase(i); - // If awoke due to a notify(), take ownership. + // If awoke due to a notify(), take ownership. switch(state) { case Monitor::SIGNALED: @@ -203,21 +203,21 @@ namespace ZThread { _owner = &m; _count++; - + break; case Monitor::INTERRUPTED: throw Interrupted_Exception(); - + case Monitor::TIMEDOUT: return false; default: throw Synchronization_Exception(); - } - + } + } - + } return true; @@ -237,7 +237,7 @@ namespace ZThread { // Update the count, if it has reached 0, wake another waiter. if(--_count == 0) { - + _owner = 0; // Try to find a waiter with a backoff & retry scheme @@ -245,24 +245,24 @@ namespace ZThread { // Go through the list, attempt to notify() a waiter. for(List::iterator i = _waiters.begin(); i != _waiters.end();) { - + // Try the monitor lock, if it cant be locked skip to the next waiter Monitor* n = *i; if(n->tryAcquire()) { - - // If notify() is not sucessful, it is because the wait() has already + + // If notify() is not sucessful, it is because the wait() has already // been ended (killed/interrupted/notify'd) bool woke = n->notify(); n->release(); - + // Once notify() succeeds, return if(woke) return; - + } else ++i; - + } - + if(_waiters.empty()) return; @@ -276,7 +276,7 @@ namespace ZThread { } } - + } } // namespace ZThread diff --git a/dep/src/zthread/Semaphore.cxx b/dep/src/zthread/Semaphore.cxx index b9fb8d0f613..f15254eb7ee 100644 --- a/dep/src/zthread/Semaphore.cxx +++ b/dep/src/zthread/Semaphore.cxx @@ -32,9 +32,9 @@ namespace ZThread { * @param maxCount maximum size of the semaphore count */ Semaphore::Semaphore(int count, unsigned int maxCount) { - + _impl = new FifoSemaphoreImpl(count, maxCount, true); - + } Semaphore::~Semaphore() { diff --git a/dep/src/zthread/SynchronousExecutor.cxx b/dep/src/zthread/SynchronousExecutor.cxx index 0dc75b5f676..da49797ee94 100644 --- a/dep/src/zthread/SynchronousExecutor.cxx +++ b/dep/src/zthread/SynchronousExecutor.cxx @@ -24,12 +24,12 @@ namespace ZThread { - SynchronousExecutor::SynchronousExecutor() + SynchronousExecutor::SynchronousExecutor() : _canceled(false) {} SynchronousExecutor::~SynchronousExecutor() { } - + void SynchronousExecutor::cancel() { Guard<Mutex> g(_lock); @@ -38,41 +38,41 @@ namespace ZThread { } bool SynchronousExecutor::isCanceled() { - + Guard<Mutex> g(_lock); return _canceled; - + } void SynchronousExecutor::interrupt() { } void SynchronousExecutor::execute(const Task& task) { - - // Canceled Executors will not accept new tasks, quick + + // Canceled Executors will not accept new tasks, quick // check to avoid excessive locking in the canceled state - if(_canceled) + if(_canceled) throw Cancellation_Exception(); - + Guard<Mutex> g(_lock); - + if(_canceled) // Double check throw Cancellation_Exception(); - + // Run the task. Task(task)->run(); - - } + + } void SynchronousExecutor::wait() { - + if(Thread::interrupted()) throw Interrupted_Exception(); - + Guard<Mutex> g(_lock); - + } - + /** * @see Executor::wait(unsigned long) */ @@ -80,11 +80,11 @@ namespace ZThread { if(Thread::interrupted()) throw Interrupted_Exception(); - + Guard<Mutex> g(_lock); return true; - + } - - + + } diff --git a/dep/src/zthread/Thread.cxx b/dep/src/zthread/Thread.cxx index 25cde79969c..5ed7fc3e998 100644 --- a/dep/src/zthread/Thread.cxx +++ b/dep/src/zthread/Thread.cxx @@ -27,20 +27,20 @@ namespace ZThread { - Thread::Thread() - : _impl( ThreadImpl::current() ) { + Thread::Thread() + : _impl( ThreadImpl::current() ) { - // ThreadImpl's start out life with a reference count + // ThreadImpl's start out life with a reference count // of one, and the they are added to the ThreadQueue. _impl->addReference(); - + } Thread::Thread(const Task& task, bool autoCancel) - : _impl( new ThreadImpl(task, autoCancel) ) { - + : _impl( new ThreadImpl(task, autoCancel) ) { + _impl->addReference(); - + } bool Thread::operator==(const Thread& t) const { @@ -94,7 +94,7 @@ namespace ZThread { return _impl->interrupt(); } - + void Thread::cancel() { if(ThreadImpl::current() == _impl) @@ -102,7 +102,7 @@ namespace ZThread { _impl->cancel(); - } + } bool Thread::isCanceled() { @@ -124,4 +124,4 @@ namespace ZThread { } -} // namespace ZThread +} // namespace ZThread diff --git a/dep/src/zthread/ThreadImpl.cxx b/dep/src/zthread/ThreadImpl.cxx index c7c22883b5e..fa2fd9e36c5 100644 --- a/dep/src/zthread/ThreadImpl.cxx +++ b/dep/src/zthread/ThreadImpl.cxx @@ -34,88 +34,88 @@ namespace ZThread { TSS<ThreadImpl*> ThreadImpl::_threadMap; namespace { - + class Launcher : public Runnable { - + ThreadImpl* x; ThreadImpl* y; Task z; - + public: - + Launcher(ThreadImpl* a, ThreadImpl* b, const Task& c) : x(a), y(b), z(c) {} - - void run() { - y->dispatch(x,y,z); + + void run() { + y->dispatch(x,y,z); } - + }; - + } - ThreadImpl::ThreadImpl() + ThreadImpl::ThreadImpl() : _state(State::REFERENCE), _priority(Medium), _autoCancel(false) { - + ZTDEBUG("Reference thread created.\n"); - + } - ThreadImpl::ThreadImpl(const Task& task, bool autoCancel) + ThreadImpl::ThreadImpl(const Task& task, bool autoCancel) : _state(State::IDLE), _priority(Medium), _autoCancel(autoCancel) { - + ZTDEBUG("User thread created.\n"); start(task); } - - + + ThreadImpl::~ThreadImpl() { - + _tls.clear(); if(isActive()) { - + ZTDEBUG("You are destroying an executing thread!\n"); abort(); - + } - + ZTDEBUG("Thread destroyed.\n"); - + } - + Monitor& ThreadImpl::getMonitor() { return _monitor; } - + void ThreadImpl::cancel(bool autoCancel) { - if(!autoCancel || _autoCancel) - _monitor.cancel(); + if(!autoCancel || _autoCancel) + _monitor.cancel(); } - - bool ThreadImpl::interrupt() { - return _monitor.interrupt(); + + bool ThreadImpl::interrupt() { + return _monitor.interrupt(); } - + bool ThreadImpl::isInterrupted() { return _monitor.isInterrupted(); } - + bool ThreadImpl::isCanceled() { return _monitor.isCanceled(); } Priority ThreadImpl::getPriority() const { - return _priority; + return _priority; } - - + + bool ThreadImpl::isReference() { return _state.isReference(); } - + /** * Join the thread, blocking the caller until it is interrupted or until * the thread represented by this object exits. @@ -124,27 +124,27 @@ namespace ZThread { * joined. */ bool ThreadImpl::join(unsigned long timeout) { - + // Serial access to this ThreadImpl's state Guard<Monitor> g1(_monitor); - + // Make sure a thread is not trying to join() itself. if(ThreadOps::isCurrent(this)) throw Deadlock_Exception("Cannot join self."); - - // Reference threads can't be joined. + + // Reference threads can't be joined. if(_state.isReference()) throw InvalidOp_Exception("Can not join this thread."); - - /* - - TODO: Insert cyclic join check. - + + /* + + TODO: Insert cyclic join check. + */ - + // If the task has not completed yet, wait for completion if(!_state.isJoined()) { - + // Add the current thread to the joiner list ThreadImpl* impl = current(); _joiners.push_back(impl); @@ -153,49 +153,49 @@ namespace ZThread { { // Release this ThreadImpl's lock while the joiner sleeps - _monitor.release(); + _monitor.release(); Guard<Monitor> g3(impl->getMonitor()); - + result = impl->_monitor.wait(timeout); - + _monitor.acquire(); - + } - + // Update the joiner list List::iterator i = std::find(_joiners.begin(), _joiners.end(), impl); if(i != _joiners.end()) _joiners.erase(i); - - + + switch(result) { - + case Monitor::TIMEDOUT: return false; - + case Monitor::INTERRUPTED: throw Interrupted_Exception(); - + default: break; - + } - + } return true; - + } - + /** * Translate the priority into a pthread value, and update the thread priority. * - * This is not available on all platforms, and probably works differently - * the platforms that do support it. Pthreads does not have very portable + * This is not available on all platforms, and probably works differently + * the platforms that do support it. Pthreads does not have very portable * priority support as far I am aware. * - * If SCHED_OTHER is not supported priority values are still set but + * If SCHED_OTHER is not supported priority values are still set but * dont not actually in affect anything. * * @param prio PRIORITY value @@ -204,18 +204,18 @@ namespace ZThread { * @exception InvalidOp_Exception thrown by IDLE, JOINING or JOINED threads. */ void ThreadImpl::setPriority(Priority p) { - + Guard<Monitor> g(_monitor); - + // Only set the native priority when the thread is running if(_state.isRunning()) ThreadOps::setPriority(this, p); - + _priority = p; - + } - - + + /** * Test the state Monitor of this thread to determine if the thread * is an active thread created by zthreads. @@ -223,13 +223,13 @@ namespace ZThread { * @return bool indicating the activity of the thread. */ bool ThreadImpl::isActive() { - + Guard<Monitor> g(_monitor); return _state.isRunning(); - + } - - + + /** * Get a reference to an implmenetation that maps to the current thread. * Accomplished by checking the TLS map. This will always return a valid @@ -237,98 +237,98 @@ namespace ZThread { * * @return ThreadImpl* current implementation that maps to the * executing thread. - */ + */ ThreadImpl* ThreadImpl::current() { - + // Get the ThreadImpl previously mapped onto the executing thread. ThreadImpl* impl = _threadMap.get(); - + // Create a reference thread for any threads that have been 'discovered' // because they are not created by ZThreads. - if(impl == 0) { - - // Create a ThreadImpl to represent this thread. + if(impl == 0) { + + // Create a ThreadImpl to represent this thread. impl = new ThreadImpl(); impl->_state.setReference(); - + ThreadOps::activate(impl); - + // Map a reference thread and insert it into the queue _threadMap.set(impl); - + ThreadQueue::instance()->insertReferenceThread(impl); - + } - + assert(impl != 0); return impl; - + } - + /** * Make current thread sleep for the given number of milliseconds. * This sleep can be interrupt()ed. - * + * * @param ms timeout for the sleep. * * @post the calling thread is blocked by waiting on the internal condition * variable. This can be signaled in the monitor of an interrupt */ void ThreadImpl::sleep(unsigned long ms) { - + // Make sleep()ing for 0 milliseconds equivalent to a yield. if(ms == 0) { - + yield(); return; - + } - + // Get the monitor for the current thread Monitor& monitor = current()->getMonitor(); - + // Acquire that threads Monitor with a Guard Guard<Monitor> g(monitor); - + for(;;) { - + switch(monitor.wait(ms)) { - + case Monitor::INTERRUPTED: throw Interrupted_Exception(); - + default: return; - + } - + } - + } - - + + /** - * Yield the current timeslice to another thread. - * If sched_yield() is available it is used. + * Yield the current timeslice to another thread. + * If sched_yield() is available it is used. * Otherwise, the state Monitor for this thread is used to simiulate a - * yield by blocking for 1 millisecond, which should give the + * yield by blocking for 1 millisecond, which should give the * scheduler a chance to schedule another thread. */ void ThreadImpl::yield() { - - // Try to yield with the native operation. If it fails, then + + // Try to yield with the native operation. If it fails, then // simulate with a short wait() on the monitor. if(!ThreadOps::yield()) { - + // Get the monitor for the current thread Monitor& monitor = current()->getMonitor(); - + // Attempt a wait(). - Guard<Monitor> g(monitor); + Guard<Monitor> g(monitor); monitor.wait(1); - + } - + } void ThreadImpl::start(const Task& task) { @@ -338,9 +338,9 @@ namespace ZThread { // A Thread must be idle in order to be eligable to run a task. if(!_state.isIdle()) throw InvalidOp_Exception("Thread is not idle."); - + _state.setRunning(); - + // Spawn a new thread, blocking the parent (current) thread until // the child starts. @@ -353,14 +353,14 @@ namespace ZThread { if(!spawn(&launch)) { // Return to the idle state & report the error if it doesn't work out. - _state.setIdle(); - throw Synchronization_Exception(); + _state.setIdle(); + throw Synchronization_Exception(); } // Wait, uninterruptably, for the child's signal. The parent thread - // still can be interrupted and killed; it just won't take effect + // still can be interrupted and killed; it just won't take effect // until the child has started. Guard<Monitor, DeferredInterruptionScope> g3(parent->_monitor); @@ -368,7 +368,7 @@ namespace ZThread { if(parent->_monitor.wait() != Monitor::SIGNALED) { assert(0); } - + } @@ -378,23 +378,23 @@ namespace ZThread { // Map the implementation object onto the running thread. _threadMap.set(impl); - // Update the reference count on a ThreadImpl before the 'Thread' + // Update the reference count on a ThreadImpl before the 'Thread' // that owns it can go out of scope (by signaling the parent) impl->addReference(); // Update the priority of the thread - if(parent->_state.isReference()) - ThreadOps::setPriority(impl, + if(parent->_state.isReference()) + ThreadOps::setPriority(impl, parent->_state.isReference() ? impl->_priority : parent->_priority); // Inherit ThreadLocal values from the parent typedef ThreadLocalMap::const_iterator It; - for(It i = parent->getThreadLocalMap().begin(); i != parent->getThreadLocalMap().end(); ++i) + for(It i = parent->getThreadLocalMap().begin(); i != parent->getThreadLocalMap().end(); ++i) if( (i->second)->isInheritable() ) impl->getThreadLocalMap()[ i->first ] = (i->second)->clone(); - - // Insert a user-thread mapping + + // Insert a user-thread mapping ThreadQueue::instance()->insertUserThread(impl); // Wake the parent once the thread is setup parent->_monitor.notify(); @@ -403,65 +403,65 @@ namespace ZThread { // not catch exceptions, let program terminate //try { - + task->run(); //} catch(...) { // Result of running a task that threw an exception. // ZTDEBUG("The task has thrown an unhandled exception\n"); - //assert(0); // UQ1: Go to debugger... - + //assert(0); // UQ1: Go to debugger... + //} - ZTDEBUG("Thread joining...\n"); - + ZTDEBUG("Thread joining...\n"); + { // Update the state of the thread - + Guard<Monitor> g(impl->_monitor); impl->_state.setJoined(); - + // Wake the joiners that will be easy to join first for(List::iterator i = impl->_joiners.begin(); i != impl->_joiners.end();) { - + ThreadImpl* joiner = *i; Monitor& m = joiner->getMonitor(); if(m.tryAcquire()) { - + m.notify(); - m.release(); - + m.release(); + i = impl->_joiners.erase(i); } else ++i; - } + } // Wake the joiners that might take a while next for(List::iterator i = impl->_joiners.begin(); i != impl->_joiners.end(); ++i) { - + ThreadImpl* joiner = *i; Monitor& m = joiner->getMonitor(); m.acquire(); m.notify(); - m.release(); - + m.release(); + } - + } - ZTDEBUG("Thread exiting...\n"); + ZTDEBUG("Thread exiting...\n"); - // Insert a pending-thread mapping, allowing the resources to be reclaimed + // Insert a pending-thread mapping, allowing the resources to be reclaimed ThreadQueue::instance()->insertPendingThread(impl); // Cleanup ThreadLocal values impl->getThreadLocalMap().clear(); - // Update the reference count allowing it to be destroyed + // Update the reference count allowing it to be destroyed impl->delReference(); } diff --git a/dep/src/zthread/ThreadLocalImpl.cxx b/dep/src/zthread/ThreadLocalImpl.cxx index 25682e66325..502a70734dc 100644 --- a/dep/src/zthread/ThreadLocalImpl.cxx +++ b/dep/src/zthread/ThreadLocalImpl.cxx @@ -27,13 +27,13 @@ namespace ZThread { ThreadLocalImpl::ThreadLocalImpl() {} - ThreadLocalImpl::~ThreadLocalImpl() {} - + ThreadLocalImpl::~ThreadLocalImpl() {} + void ThreadLocalImpl::clearAll() { typedef ThreadImpl::ThreadLocalMap Map; Map& m = ThreadImpl::current()->getThreadLocalMap(); - + m.clear(); } @@ -42,25 +42,25 @@ namespace ZThread { typedef ThreadImpl::ThreadLocalMap Map; Map& m = ThreadImpl::current()->getThreadLocalMap(); - + Map::iterator i = m.find(this); - if(i != m.end()) + if(i != m.end()) m.erase(i); - } + } ThreadLocalImpl::ValuePtr ThreadLocalImpl::value( ValuePtr(*pfn)() ) const { - + typedef ThreadImpl::ThreadLocalMap Map; Map& m = ThreadImpl::current()->getThreadLocalMap(); - + Map::iterator i = m.find(this); - if(i != m.end()) + if(i != m.end()) return i->second; - + m[ this ] = ValuePtr( pfn() ); return m[ this ]; } -} // namespace ZThread +} // namespace ZThread diff --git a/dep/src/zthread/ThreadOps.cxx b/dep/src/zthread/ThreadOps.cxx index 53a3e4457bc..5917891028b 100644 --- a/dep/src/zthread/ThreadOps.cxx +++ b/dep/src/zthread/ThreadOps.cxx @@ -26,10 +26,10 @@ #include "ThreadOps.h" // This file will select an implementation for a ThreadOps based on -// what ThreadOps.h selects. This method is for selecting the +// what ThreadOps.h selects. This method is for selecting the // source files, to improve portability. Currently, the project is // based on the autoconf tool-set, which doesn't support conditional -// compilation well. Additionally, this should make the library +// compilation well. Additionally, this should make the library // easier to port since its working around conditional compilation // by using C++ features and people won't have to fiddle around with // their make tool as much to compile the source diff --git a/dep/src/zthread/ThreadQueue.cxx b/dep/src/zthread/ThreadQueue.cxx index 02349504641..d0493a5eabf 100644 --- a/dep/src/zthread/ThreadQueue.cxx +++ b/dep/src/zthread/ThreadQueue.cxx @@ -29,9 +29,9 @@ #include <deque> namespace ZThread { - - ThreadQueue::ThreadQueue() - : _waiter(0) { + + ThreadQueue::ThreadQueue() + : _waiter(0) { ZTDEBUG("ThreadQueue created\n"); @@ -52,36 +52,36 @@ namespace ZThread { TaskList shutdownTasks; { // Check the queue to for pending user threads - + Guard<FastLock> g(_lock); - + waitRequired = (_waiter != (ThreadImpl*)1); _waiter = impl; - + threadsWaiting = !_userThreads.empty() || !_pendingThreads.empty(); - + //ZTDEBUG("Wait required: %d\n", waitRequired); //ZTDEBUG("Threads waiting: %d\n", threadsWaiting); - + // Auto-cancel any active threads at the time main() goes out of scope - // "force" a gentle exit from the executing tasks; eventually the user- + // "force" a gentle exit from the executing tasks; eventually the user- // threads will transition into pending-threads pollUserThreads(); - + // Remove all the tasks about to be run from the task list so an indication // can be given to threads calling removeShutdownTask() too late. - std::remove_copy(_shutdownTasks.begin(), - _shutdownTasks.end(), - std::back_inserter(shutdownTasks), - Task((Runnable*)0)); - + std::remove_copy(_shutdownTasks.begin(), + _shutdownTasks.end(), + std::back_inserter(shutdownTasks), + Task((Runnable*)0)); + //ZTDEBUG("Threads waiting: %d\n", threadsWaiting); - + } // Execute the shutdown tasks for(TaskList::iterator i = shutdownTasks.begin(); i != shutdownTasks.end(); ++i) { - try { + try { (*i)->run(); } catch(...) { } } @@ -93,20 +93,20 @@ namespace ZThread { Monitor& m = _waiter->getMonitor(); - + // Defer interruption while this thread waits for a signal from // the last pending user thread Guard<Monitor, CompoundScope<DeferredInterruptionScope, LockedScope> > g(m); //ZTDEBUG("Threads waiting: %d %d\n", _userThreads.size(), _pendingThreads.size()); // Avoid race-condition where the last threads are done with thier tasks, but - // only begin the final part of the clean up phase after this destructor begins + // only begin the final part of the clean up phase after this destructor begins // to run. Takes advantage of the fact that if all remaining threads have transitioned // into a pending state by the time execution reaches this point, then there is no // need to wait. waitRequired = waitRequired && !(_userThreads.empty() && !_pendingThreads.empty()); - // Reference threads can't be interrupted or otherwise + // Reference threads can't be interrupted or otherwise // manipulated. The only signal this monitor will receive // at this point will be from the last pending thread. if(waitRequired && m.wait() != Monitor::SIGNALED) { @@ -115,16 +115,16 @@ namespace ZThread { // Join those pending threads pollPendingThreads(); - + } - + // Clean up the reference threads - pollReferenceThreads(); - + pollReferenceThreads(); + ZTDEBUG("ThreadQueue destroyed\n"); } - + void ThreadQueue::insertPendingThread(ThreadImpl* impl) { ZTDEBUG("insertPendingThread()\n"); @@ -136,7 +136,7 @@ namespace ZThread { _userThreads.erase(i); _pendingThreads.push_back(impl); - + // Wake the main thread,if its waiting, when the last pending-thread becomes available; // Otherwise, take note that no wait for pending threads to finish is needed if(_userThreads.empty()) @@ -171,7 +171,7 @@ namespace ZThread { impl->cancel(true); ZTDEBUG("1 user-thread added.\n"); - + } @@ -183,9 +183,9 @@ namespace ZThread { ThreadImpl* impl = (ThreadImpl*)*i; ThreadOps::join(impl); - + impl->delReference(); - + i = _pendingThreads.erase(i); ZTDEBUG("1 pending-thread reclaimed.\n"); @@ -199,14 +199,14 @@ namespace ZThread { ZTDEBUG("pollReferenceThreads()\n"); for(ThreadList::iterator i = _referenceThreads.begin(); i != _referenceThreads.end(); ++i) { - + ThreadImpl* impl = (ThreadImpl*)*i; impl->delReference(); - + ZTDEBUG("1 reference-thread reclaimed.\n"); } - + } void ThreadQueue::pollUserThreads() { @@ -219,11 +219,11 @@ namespace ZThread { impl->cancel(true); ZTDEBUG("1 user-thread reclaimed.\n"); - + } } - + void ThreadQueue::insertShutdownTask(Task& task) { bool hasWaiter = false; @@ -231,34 +231,34 @@ namespace ZThread { { Guard<FastLock> g(_lock); - - // Execute later when the ThreadQueue is destroyed + + // Execute later when the ThreadQueue is destroyed if( !(hasWaiter = (_waiter != 0)) ) { _shutdownTasks.push_back(task); //ZTDEBUG("1 shutdown task added. %d\n", _shutdownTasks.size()); - + } } - + // Execute immediately if things are shutting down if(hasWaiter) task->run(); - + } - + bool ThreadQueue::removeShutdownTask(const Task& task) { - + Guard<FastLock> g(_lock); - + TaskList::iterator i = std::find(_shutdownTasks.begin(), _shutdownTasks.end(), task); bool removed = (i != _shutdownTasks.end()); if(removed) _shutdownTasks.erase(i); //ZTDEBUG("1 shutdown task removed (%d)-%d\n", removed, _shutdownTasks.size()); - + return removed; } diff --git a/dep/src/zthread/ThreadedExecutor.cxx b/dep/src/zthread/ThreadedExecutor.cxx index 44a213e8daa..e33dda4ab5e 100644 --- a/dep/src/zthread/ThreadedExecutor.cxx +++ b/dep/src/zthread/ThreadedExecutor.cxx @@ -31,11 +31,11 @@ namespace ZThread { namespace { - //! + //! class WaiterQueue { typedef std::deque<ThreadImpl*> ThreadList; - + typedef struct group_t { size_t id; size_t count; @@ -61,14 +61,14 @@ namespace ZThread { void operator()(const Group& grp) { count += grp.count; } operator size_t() { return count; } }; - + FastMutex _lock; GroupList _list; size_t _id; size_t _generation; public: - + WaiterQueue() : _id(0), _generation(0) { // At least one empty-group exists _list.push_back( Group(_id++) ); @@ -96,15 +96,15 @@ namespace ZThread { if((size_t)std::for_each(_list.begin(), _list.end(), counter()) < 1) return true; - // Update the waiter list for the active group + // Update the waiter list for the active group _list.back().waiters.push_back(self); size_t n = _list.back().id; m.acquire(); - + { - - Guard<Lockable, UnlockedScope> g2(g1); + + Guard<Lockable, UnlockedScope> g2(g1); state = timeout == 0 ? m.wait() : m.wait(timeout); } @@ -135,22 +135,22 @@ namespace ZThread { throw Interrupted_Exception(); default: throw Synchronization_Exception(); - } - + } + return true; } - + /** * Increment the active group count * * @pre at least 1 empty group exists - * @post at least 1 non-empty group exists + * @post at least 1 non-empty group exists */ std::pair<size_t, size_t> increment() { - + Guard<FastMutex> g(_lock); - + // At least one empty-group exists assert(!_list.empty()); @@ -169,7 +169,7 @@ namespace ZThread { // When the active group is being incremented, insert a new active group // to replace it if there were waiting threads - if(i == --_list.end() && !i->waiters.empty()) + if(i == --_list.end() && !i->waiters.empty()) _list.push_back(Group(_id++)); // At least 1 non-empty group exists @@ -178,13 +178,13 @@ namespace ZThread { return std::make_pair(n, _generation); } - + /** * Decrease the count for the group with the given id. * * @param n group id - * + * * @pre At least 1 non-empty group exists * @post At least 1 empty group exists */ @@ -198,7 +198,7 @@ namespace ZThread { // Find the requested group GroupList::iterator i = std::find_if(_list.begin(), _list.end(), by_id(n)); if(i == _list.end()) { - + // A group should never have been removed until // the final task in that group completed assert(0); @@ -207,11 +207,11 @@ namespace ZThread { // Decrease the count for tasks in this group, if(--i->count == 0 && i == _list.begin()) { - - do { + + do { // When the first group completes, wake all waiters for every - // group, starting from the first until a group that is not + // group, starting from the first until a group that is not // complete is reached /* @@ -219,29 +219,29 @@ namespace ZThread { if(i == --_list.end() && i->waiters.empty()) break; */ - + if( awaken(*i) ) { - - // If all waiters were awakened, remove the group + + // If all waiters were awakened, remove the group i = _list.erase(i); - + } else { - + { // Otherwise, unlock and yield allowing the waiter // lists to be updated if other threads are busy Guard<FastLock, UnlockedScope> g2(g1); ThreadImpl::yield(); - + } i = _list.begin(); } - - } while(i != _list.end() && i->count == 0); - + + } while(i != _list.end() && i->count == 0); + // Ensure that an active group exists if(_list.empty()) _list.push_back( Group(++_id) ); @@ -250,7 +250,7 @@ namespace ZThread { // At least one group exists assert(!_list.empty()); - + } /** @@ -261,74 +261,74 @@ namespace ZThread { return next ? _generation++ : _generation; } - + private: - + /** * Awaken all the waiters remaining in the given group - * + * * @return * - true if all the waiting threads were successfully awakened. * - false if there were one or more threads that could not be awakened. */ bool awaken(Group& grp) { - // Go through the waiter list in the given group; + // Go through the waiter list in the given group; for(ThreadList::iterator i = grp.waiters.begin(); i != grp.waiters.end();) { - + ThreadImpl* impl = *i; Monitor& m = impl->getMonitor(); - + // Try the monitor lock, if it cant be locked skip to the next waiter if(m.tryAcquire()) { - + // Notify the monitor & remove from the waiter list so time isn't // wasted checking it again. i = grp.waiters.erase(i); - + // Try to wake the waiter, it doesn't matter if this is successful - // or not (only fails when the monitor is already going to stop waiting). + // or not (only fails when the monitor is already going to stop waiting). m.notify(); m.release(); - + } else ++i; - + } - + return grp.waiters.empty(); } }; - //! Synchronization point for the Executor + //! Synchronization point for the Executor class ExecutorImpl { typedef std::deque<ThreadImpl*> ThreadList; bool _canceled; - FastMutex _lock; + FastMutex _lock; //! Worker threads ThreadList _threads; - + WaiterQueue _queue; public: ExecutorImpl() : _canceled(false) {} - WaiterQueue& getWaiterQueue() { + WaiterQueue& getWaiterQueue() { return _queue; } void registerThread(size_t generation) { - + // Interrupt slow starting threads if(getWaiterQueue().generation() != generation) ThreadImpl::current()->interrupt(); - // Enqueue for possible future interrupt() + // Enqueue for possible future interrupt() else { Guard<FastMutex> g(_lock); @@ -339,7 +339,7 @@ namespace ZThread { } void unregisterThread() { - + Guard<FastMutex> g(_lock); std::remove(_threads.begin(), _threads.end(), ThreadImpl::current() ); @@ -369,11 +369,11 @@ namespace ZThread { // Interrupt all the registered threads for(ThreadList::iterator i = _threads.begin(); i != _threads.end(); ++i) (*i)->interrupt(); - + // Bump the generation up, ensuring slow starting threads get this interrupt getWaiterQueue().generation( true ); - } + } }; /* ExecutorImpl */ @@ -392,7 +392,7 @@ namespace ZThread { : _impl(impl), _task(task) { std::pair<size_t, size_t> pr( _impl->getWaiterQueue().increment() ); - + _group = pr.first; _generation = pr.second; @@ -405,20 +405,20 @@ namespace ZThread { size_t generation() const { return _generation; } - + void run() { - + // Register this thread once its begun; the generation is used to ensure // threads that are slow starting are properly interrupted _impl->registerThread( generation() ); - + try { - _task->run(); + _task->run(); } catch(...) { /* consume the exceptions the work propogates */ } - + _impl->getWaiterQueue().decrement( group() ); // Unregister this thread @@ -434,30 +434,30 @@ namespace ZThread { ThreadedExecutor::ThreadedExecutor() : _impl(new ExecutorImpl) {} ThreadedExecutor::~ThreadedExecutor() {} - + void ThreadedExecutor::execute(const Task& task) { - + Thread t( new Worker(_impl, task) ); - } + } void ThreadedExecutor::interrupt() { _impl->interrupt(); } void ThreadedExecutor::cancel() { - _impl->cancel(); + _impl->cancel(); } - + bool ThreadedExecutor::isCanceled() { return _impl->isCanceled(); } - + void ThreadedExecutor::wait() { _impl->getWaiterQueue().wait(0); } - bool ThreadedExecutor::wait(unsigned long timeout) { + bool ThreadedExecutor::wait(unsigned long timeout) { return _impl->getWaiterQueue().wait(timeout == 0 ? 1 : timeout); } diff --git a/dep/src/zthread/Time.cxx b/dep/src/zthread/Time.cxx index 2409d93cb79..ee2b84a4e8a 100644 --- a/dep/src/zthread/Time.cxx +++ b/dep/src/zthread/Time.cxx @@ -27,7 +27,7 @@ using namespace ZThread; Time::Time() { - + // System startup time static TimeStrategy firstHelper; TimeStrategy helper; @@ -37,7 +37,7 @@ Time::Time() { now -= then; - _seconds = now.seconds(); + _seconds = now.seconds(); _milliseconds = now.milliseconds(); } diff --git a/dep/src/zthread/linux/AtomicCount.cxx b/dep/src/zthread/linux/AtomicCount.cxx index 28c2381c3b4..18b31e3a8ce 100644 --- a/dep/src/zthread/linux/AtomicCount.cxx +++ b/dep/src/zthread/linux/AtomicCount.cxx @@ -47,7 +47,7 @@ typedef struct atomic_count_t { AtomicCount::AtomicCount() { _value = reinterpret_cast<void*>(new ATOMIC_COUNT); - + } AtomicCount::~AtomicCount() { @@ -55,19 +55,19 @@ AtomicCount::~AtomicCount() { delete reinterpret_cast<ATOMIC_COUNT*>(_value); } - + void AtomicCount::increment() { atomic_inc(&reinterpret_cast<ATOMIC_COUNT*>(_value)->count); - + } - + bool AtomicCount::decrement() { return atomic_dec_and_test(&reinterpret_cast<ATOMIC_COUNT*>(_value)->count); - + } - + }; #endif // __ZTATOMICCOUNTIMPL_H__ diff --git a/dep/src/zthread/macos/Monitor.cxx b/dep/src/zthread/macos/Monitor.cxx index ab7806b13df..a6a2f58cb37 100644 --- a/dep/src/zthread/macos/Monitor.cxx +++ b/dep/src/zthread/macos/Monitor.cxx @@ -29,20 +29,20 @@ using namespace ZThread; Monitor::Monitor() : _owner(0), _waiting(false), _pending(false) { - + if(MPCreateSemaphore(1, 0, &_sema) != noErr) { assert(0); throw Initialization_Exception(); } } - + Monitor::~Monitor() throw() { assert(!_waiting); OSStatus status = MPDeleteSemaphore(_sema); - if(status != noErr) + if(status != noErr) assert(false); } @@ -53,9 +53,9 @@ Monitor::STATE Monitor::wait(unsigned long timeout) { // http://developer.apple.com/techpubs/macosx/Carbon/oss/MultiPServices/Multiprocessing_Services/index.html?http://developer.apple.com/techpubs/macosx/Carbon/oss/MultiPServices/Multiprocessing_Services/Functions/Creating_and_ssage_Queues.html AbsoluteTime tTarget; - Duration waitDuration = - (timeout == 0) ? kDurationForever : (kDurationMillisecond * timeout); - + Duration waitDuration = + (timeout == 0) ? kDurationForever : (kDurationMillisecond * timeout); + if(waitDuration != kDurationForever) tTarget = AddDurationToAbsolute(waitDuration, UpTime()); @@ -66,13 +66,13 @@ Monitor::STATE Monitor::wait(unsigned long timeout) { _owner = MPCurrentTaskID(); STATE state(INVALID); - + // Serialize access to the state of the Monitor // and test the state to determine if a wait is needed. _waitLock.acquire(); if(pending(ANYTHING)) { - + // Return without waiting when possible state = next(); @@ -80,57 +80,57 @@ Monitor::STATE Monitor::wait(unsigned long timeout) { return state; } - // Unlock the external lock if a wait() is probably needed. + // Unlock the external lock if a wait() is probably needed. // Access to the state is still serial. _lock.release(); // Wait for a transition in the state that is of interest, this - // allows waits to exclude certain flags (e.g. INTERRUPTED) + // allows waits to exclude certain flags (e.g. INTERRUPTED) // for a single wait() w/o actually discarding those flags - // they will remain set until a wait interested in those flags // occurs. // Wait, ignoring signals _waiting = true; - + _waitLock.release(); // Update the wait time - if(waitDuration != kDurationForever) + if(waitDuration != kDurationForever) waitDuration = AbsoluteDeltaToDuration(tTarget, UpTime()); - // Sleep until a signal arrives or a timeout occurs + // Sleep until a signal arrives or a timeout occurs OSStatus status = MPWaitOnSemaphore(_sema, waitDuration); // Reacquire serialized access to the state _waitLock.acquire(); - + // Awaken only when the event is set or the timeout expired assert(status == kMPTimeoutErr || status == noErr); - + if(status == kMPTimeoutErr) push(TIMEDOUT); // Get the next available STATE - state = next(); + state = next(); - _waiting = false; + _waiting = false; - // Its possible that a timeout will wake the thread before a signal is + // Its possible that a timeout will wake the thread before a signal is // delivered. Absorb that leftover so the next wait isn't aborted right away if(status == kMPTimeoutErr && _pending) { - + status = MPWaitOnSemaphore(_sema, kDurationForever); assert(status == noErr); } _pending = false; - + // Acquire the internal lock & release the external lock _waitLock.release(); - // Reaquire the external lock, keep from deadlocking threads calling + // Reaquire the external lock, keep from deadlocking threads calling // notify(), interrupt(), etc. _lock.acquire(); @@ -143,7 +143,7 @@ bool Monitor::interrupt() { // Serialize access to the state _waitLock.acquire(); - + bool wasInterruptable = !pending(INTERRUPTED); bool hasWaiter = false; @@ -151,7 +151,7 @@ bool Monitor::interrupt() { if(wasInterruptable) { push(INTERRUPTED); - + wasInterruptable = false; if(_waiting && !_pending) { @@ -159,7 +159,7 @@ bool Monitor::interrupt() { _pending = true; hasWaiter = true; - } else + } else wasInterruptable = !(_owner == MPCurrentTaskID()); } @@ -180,7 +180,7 @@ bool Monitor::isInterrupted() { bool wasInterrupted = pending(INTERRUPTED); clear(INTERRUPTED); - + _waitLock.release(); return wasInterrupted; @@ -227,12 +227,12 @@ bool Monitor::cancel() { bool wasInterrupted = !pending(INTERRUPTED); bool hasWaiter = false; - + push(CANCELED); // Update the state if theres a waiter if(wasInterrupted) { - + push(INTERRUPTED); if(_waiting && !_pending) { @@ -243,9 +243,9 @@ bool Monitor::cancel() { } } - + _waitLock.release(); - + if(hasWaiter && !masked(Monitor::INTERRUPTED)) MPSignalSemaphore(_sema); @@ -254,12 +254,12 @@ bool Monitor::cancel() { } bool Monitor::isCanceled() { - + // Serialize access to the state _waitLock.acquire(); - + bool wasCanceled = Status::examine(CANCELED); - + if(_owner == MPCurrentTaskID()) clear(INTERRUPTED); diff --git a/dep/src/zthread/macos/ThreadOps.cxx b/dep/src/zthread/macos/ThreadOps.cxx index 6a1a4106877..ddb380992b0 100644 --- a/dep/src/zthread/macos/ThreadOps.cxx +++ b/dep/src/zthread/macos/ThreadOps.cxx @@ -27,25 +27,25 @@ namespace ZThread { -const ThreadOps ThreadOps::INVALID(0); +const ThreadOps ThreadOps::INVALID(0); + +ThreadOps::ThreadOps() : _queue(0), _tid(0) { -ThreadOps::ThreadOps() : _queue(0), _tid(0) { - if(MPCreateQueue(&_queue) != noErr) throw Initialization_Exception(); } ThreadOps::~ThreadOps() throw() { - + if(_queue != 0) { OSStatus status = MPDeleteQueue(_queue); - if(status != noErr) + if(status != noErr) assert(0); } - + } bool ThreadOps::join(ThreadOps* ops) { @@ -56,7 +56,7 @@ bool ThreadOps::join(ThreadOps* ops) { OSStatus status = MPWaitOnQueue(ops->_queue, NULL, NULL, NULL, kDurationForever); return status == noErr; - + } bool ThreadOps::yield() { @@ -91,13 +91,13 @@ OSStatus ThreadOps::_dispatch(void *arg) { // Run the task from the correct context task->run(); - + // Exit the thread MPExit(noErr); return noErr; } - + } // namespace ZThread diff --git a/dep/src/zthread/posix/Monitor.cxx b/dep/src/zthread/posix/Monitor.cxx index bb157dae0dc..5e4b34c54cf 100644 --- a/dep/src/zthread/posix/Monitor.cxx +++ b/dep/src/zthread/posix/Monitor.cxx @@ -31,19 +31,19 @@ namespace ZThread { Monitor::Monitor() : _owner(0), _waiting(false) { - + pthread_cond_init(&_waitCond, 0); pthread_mutex_init(&_waitLock, 0); } - + Monitor::~Monitor() { - + assert(!_waiting); pthread_cond_destroy(&_waitCond); pthread_mutex_destroy(&_waitLock); - + } Monitor::STATE Monitor::wait(unsigned long ms) { @@ -55,14 +55,14 @@ Monitor::STATE Monitor::wait(unsigned long ms) { _owner = pthread_self(); STATE state(INVALID); - + // Serialize access to the state of the Monitor // and test the state to determine if a wait is needed. - + pthread_mutex_lock(&_waitLock); - + if(pending(ANYTHING)) { - + // Return without waiting when possible state = next(); @@ -70,13 +70,13 @@ Monitor::STATE Monitor::wait(unsigned long ms) { return state; } - - // Unlock the external lock if a wait() is probably needed. + + // Unlock the external lock if a wait() is probably needed. // Access to the state is still serial. _lock.release(); - + // Wait for a transition in the state that is of interest, this - // allows waits to exclude certain flags (e.g. INTERRUPTED) + // allows waits to exclude certain flags (e.g. INTERRUPTED) // for a single wait() w/o actually discarding those flags - // they will remain set until a wait interested in those flags // occurs. @@ -85,20 +85,20 @@ Monitor::STATE Monitor::wait(unsigned long ms) { // Wait, ignoring signals _waiting = true; int status = 0; - - if(ms == 0) { // Wait forever - - do { // ignore signals unless the state is interesting + + if(ms == 0) { // Wait forever + + do { // ignore signals unless the state is interesting status = pthread_cond_wait(&_waitCond, &_waitLock); } while(status == EINTR && !pending(ANYTHING)); - + // Akwaken only when a state is pending assert(status == 0); - + } else { - + // Find the target time - TimeStrategy t; + TimeStrategy t; ms += t.milliseconds(); @@ -106,34 +106,34 @@ Monitor::STATE Monitor::wait(unsigned long ms) { ms %= 1000; // Convert to a timespec - struct ::timespec timeout; - - timeout.tv_sec = s; + struct ::timespec timeout; + + timeout.tv_sec = s; timeout.tv_nsec = ms*1000000; - - // Wait ignoring signals until the state is interesting - do { - + + // Wait ignoring signals until the state is interesting + do { + // When a timeout occurs, update the state to reflect that. status = pthread_cond_timedwait(&_waitCond, &_waitLock, &timeout); - + } while(status == EINTR && !pending(ANYTHING)); - + // Akwaken only when a state is pending or when the timeout expired assert(status == 0 || status == ETIMEDOUT); - + if(status == ETIMEDOUT) push(TIMEDOUT); } - + // Get the next available STATE - state = next(); - _waiting = false; - + state = next(); + _waiting = false; + pthread_mutex_unlock(&_waitLock); - - // Reaquire the external lock, keep from deadlocking threads calling + + // Reaquire the external lock, keep from deadlocking threads calling // notify(), interrupt(), etc. _lock.acquire(); @@ -147,17 +147,17 @@ bool Monitor::interrupt() { // Serialize access to the state pthread_mutex_lock(&_waitLock); - + bool wasInterruptable = !pending(INTERRUPTED); bool hadWaiter = _waiting; - + if(wasInterruptable) { - + // Update the state & wake the waiter if there is one push(INTERRUPTED); wasInterruptable = false; - + if(hadWaiter && !masked(Monitor::INTERRUPTED)) pthread_cond_signal(&_waitCond); else @@ -180,7 +180,7 @@ bool Monitor::isInterrupted() { bool wasInterrupted = pending(INTERRUPTED); clear(INTERRUPTED); - + pthread_mutex_unlock(&_waitLock); return wasInterrupted; @@ -193,7 +193,7 @@ bool Monitor::isCanceled() { pthread_mutex_lock(&_waitLock); bool wasCanceled = examine(CANCELED); - + if(pthread_equal(_owner, pthread_self())) clear(INTERRUPTED); @@ -210,17 +210,17 @@ bool Monitor::cancel() { bool wasInterrupted = !pending(INTERRUPTED); bool hadWaiter = _waiting; - + push(CANCELED); if(wasInterrupted) { - + // Update the state & wake the waiter if there is one push(INTERRUPTED); - + if(hadWaiter && !masked(Monitor::INTERRUPTED)) pthread_cond_signal(&_waitCond); - + } pthread_mutex_unlock(&_waitLock); @@ -235,16 +235,16 @@ bool Monitor::notify() { pthread_mutex_lock(&_waitLock); bool wasNotifyable = !pending(INTERRUPTED); - + if(wasNotifyable) { - + // Set the flag and wake the waiter if there // is one push(SIGNALED); - - if(_waiting) + + if(_waiting) pthread_cond_signal(&_waitCond); - + } pthread_mutex_unlock(&_waitLock); diff --git a/dep/src/zthread/posix/ThreadOps.cxx b/dep/src/zthread/posix/ThreadOps.cxx index e72ef78ada3..994b5903b28 100644 --- a/dep/src/zthread/posix/ThreadOps.cxx +++ b/dep/src/zthread/posix/ThreadOps.cxx @@ -31,7 +31,7 @@ namespace ZThread { -const ThreadOps ThreadOps::INVALID(0); +const ThreadOps ThreadOps::INVALID(0); bool ThreadOps::join(ThreadOps* ops) { @@ -57,7 +57,7 @@ bool ThreadOps::yield() { #if defined(HAVE_SCHED_YIELD) result = sched_yield() == 0; #endif - + return result; } @@ -67,11 +67,11 @@ bool ThreadOps::setPriority(ThreadOps* impl, Priority p) { assert(impl); bool result = true; - + #if !defined(ZTHREAD_DISABLE_PRIORITY) - + struct sched_param param; - + switch(p) { case Low: param.sched_priority = 0; @@ -83,7 +83,7 @@ bool ThreadOps::setPriority(ThreadOps* impl, Priority p) { default: param.sched_priority = 5; } - + result = pthread_setschedparam(impl->_tid, SCHED_OTHER, ¶m) == 0; #endif @@ -97,14 +97,14 @@ bool ThreadOps::getPriority(ThreadOps* impl, Priority& p) { assert(impl); bool result = true; - + #if !defined(ZTHREAD_DISABLE_PRIORITY) struct sched_param param; int policy = SCHED_OTHER; - + if(result = (pthread_getschedparam(impl->_tid, &policy, ¶m) == 0)) { - + // Convert to one of the PRIORITY values if(param.sched_priority < 10) p = Low; @@ -112,9 +112,9 @@ bool ThreadOps::getPriority(ThreadOps* impl, Priority& p) { p = Medium; else p = High; - + } - + #endif return result; @@ -132,16 +132,16 @@ extern "C" void *_dispatch(void *arg) { Runnable* task = reinterpret_cast<Runnable*>(arg); assert(task); - + // Run the task from the correct context task->run(); - + // Exit the thread pthread_exit((void**)0); return (void*)0; - + } - + } // namespace ZThread diff --git a/dep/src/zthread/vanilla/SimpleAtomicCount.cxx b/dep/src/zthread/vanilla/SimpleAtomicCount.cxx index fc63d141d6a..67f50d546dc 100644 --- a/dep/src/zthread/vanilla/SimpleAtomicCount.cxx +++ b/dep/src/zthread/vanilla/SimpleAtomicCount.cxx @@ -34,7 +34,7 @@ typedef struct atomic_count_t { FastLock lock; unsigned long count; - + atomic_count_t() : count(0) {} } ATOMIC_COUNT; @@ -43,7 +43,7 @@ AtomicCount::AtomicCount() { ATOMIC_COUNT* c = new ATOMIC_COUNT; _value = reinterpret_cast<void*>(c); - + } AtomicCount::~AtomicCount() { @@ -54,22 +54,22 @@ AtomicCount::~AtomicCount() { delete c; } - + //! Postfix decrement and return the current value size_t AtomicCount::operator--(int) { ATOMIC_COUNT* c = reinterpret_cast<ATOMIC_COUNT*>(_value); - + Guard<FastLock> g(c->lock); return c->count--; } - + //! Postfix increment and return the current value size_t AtomicCount::operator++(int) { ATOMIC_COUNT* c = reinterpret_cast<ATOMIC_COUNT*>(_value); - + Guard<FastLock> g(c->lock); return c->count++; @@ -79,17 +79,17 @@ size_t AtomicCount::operator++(int) { size_t AtomicCount::operator--() { ATOMIC_COUNT* c = reinterpret_cast<ATOMIC_COUNT*>(_value); - + Guard<FastLock> g(c->lock); return --c->count; } - + //! Prefix increment and return the current value size_t AtomicCount::operator++() { - + ATOMIC_COUNT* c = reinterpret_cast<ATOMIC_COUNT*>(_value); - + Guard<FastLock> g(c->lock); return ++c->count; diff --git a/dep/src/zthread/win32/AtomicCount.cxx b/dep/src/zthread/win32/AtomicCount.cxx index 84cbf8c3ddc..7fe63b0da0c 100644 --- a/dep/src/zthread/win32/AtomicCount.cxx +++ b/dep/src/zthread/win32/AtomicCount.cxx @@ -32,7 +32,7 @@ namespace ZThread { AtomicCount::AtomicCount() { _value = reinterpret_cast<void*>(new LONG(0)); - + } AtomicCount::~AtomicCount() { @@ -41,20 +41,20 @@ AtomicCount::~AtomicCount() { delete reinterpret_cast<LPLONG>(_value); } - + void AtomicCount::increment() { ::InterlockedIncrement(reinterpret_cast<LPLONG>(_value)); - + } - + bool AtomicCount::decrement() { LONG v = ::InterlockedDecrement(reinterpret_cast<LPLONG>(_value)); return static_cast<unsigned long>(v) == 0; - + } - + }; #endif // __ZTATOMICCOUNTIMPL_H__ diff --git a/dep/src/zthread/win32/Monitor.cxx b/dep/src/zthread/win32/Monitor.cxx index 6e69487c054..fff056ffaf1 100644 --- a/dep/src/zthread/win32/Monitor.cxx +++ b/dep/src/zthread/win32/Monitor.cxx @@ -29,24 +29,24 @@ using namespace ZThread; Monitor::Monitor() : _owner(0), _waiting(false) { - - _handle = ::CreateEvent(0, TRUE, FALSE, 0); + + _handle = ::CreateEvent(0, TRUE, FALSE, 0); if(_handle == NULL) { assert(0); } } - + Monitor::~Monitor() { - + assert(!_waiting); - ::CloseHandle(_handle); + ::CloseHandle(_handle); } Monitor::STATE Monitor::wait(unsigned long ms) { - + // Update the owner on first use. The owner will not change, each // thread waits only on a single Monitor and a Monitor is never // shared @@ -54,13 +54,13 @@ Monitor::STATE Monitor::wait(unsigned long ms) { _owner = ::GetCurrentThreadId(); STATE state; //(INVALID); - + // Serialize access to the state of the Monitor // and test the state to determine if a wait is needed. _waitLock.acquire(); if(pending(ANYTHING)) { - + // Return without waiting when possible state = next(); @@ -68,12 +68,12 @@ Monitor::STATE Monitor::wait(unsigned long ms) { return state; } - // Unlock the external lock if a wait() is probably needed. + // Unlock the external lock if a wait() is probably needed. // Access to the state is still serial. _lock.release(); // Wait for a transition in the state that is of interest, this - // allows waits to exclude certain flags (e.g. INTERRUPTED) + // allows waits to exclude certain flags (e.g. INTERRUPTED) // for a single wait() w/o actually discarding those flags - // they will remain set until a wait interested in those flags // occurs. @@ -82,13 +82,13 @@ Monitor::STATE Monitor::wait(unsigned long ms) { // Wait, ignoring signals _waiting = true; - // Block until the event is set. + // Block until the event is set. _waitLock.release(); // The event is manual reset so this lack of atmoicity will not // be an issue - DWORD dwResult = + DWORD dwResult = ::WaitForSingleObject(_handle, ((ms == 0) ? INFINITE : (DWORD)ms)); // Reacquire serialized access to the state @@ -99,20 +99,20 @@ Monitor::STATE Monitor::wait(unsigned long ms) { if(dwResult == WAIT_TIMEOUT) push(TIMEDOUT); - + // Get the next available STATE - state = next(); - _waiting = false; + state = next(); + _waiting = false; ::ResetEvent(_handle); // Acquire the internal lock & release the external lock _waitLock.release(); - - // Reaquire the external lock, keep from deadlocking threads calling + + // Reaquire the external lock, keep from deadlocking threads calling // notify(), interrupt(), etc. _lock.acquire(); - + return state; } @@ -122,12 +122,12 @@ bool Monitor::interrupt() { // Serialize access to the state _waitLock.acquire(); - + bool wasInterruptable = !pending(INTERRUPTED); bool hadWaiter = _waiting; - + if(wasInterruptable) { - + // Update the state & wake the waiter if there is one push(INTERRUPTED); @@ -140,9 +140,9 @@ bool Monitor::interrupt() { assert(0); } - } else + } else wasInterruptable = !(_owner == ::GetCurrentThreadId()); - + } _waitLock.release(); @@ -159,7 +159,7 @@ bool Monitor::isInterrupted() { bool wasInterrupted = pending(INTERRUPTED); clear(INTERRUPTED); - + _waitLock.release(); return wasInterrupted; @@ -173,15 +173,15 @@ bool Monitor::notify() { _waitLock.acquire(); bool wasNotifyable = !pending(INTERRUPTED); - + if(wasNotifyable) { - + // Set the flag and wake the waiter if there // is one push(SIGNALED); - + // If there is a waiter then send the signal. - if(_waiting) + if(_waiting) if(::SetEvent(_handle) == FALSE) { assert(0); } @@ -202,20 +202,20 @@ bool Monitor::cancel() { bool wasInterrupted = !pending(INTERRUPTED); bool hadWaiter = _waiting; - + push(CANCELED); if(wasInterrupted) { - + // Update the state & wake the waiter if there is one push(INTERRUPTED); - + // If there is a waiter then send the signal. - if(hadWaiter && !masked(Monitor::INTERRUPTED)) + if(hadWaiter && !masked(Monitor::INTERRUPTED)) if(::SetEvent(_handle) == FALSE) { assert(0); } - + } _waitLock.release(); @@ -230,7 +230,7 @@ bool Monitor::isCanceled() { _waitLock.acquire(); bool wasCanceled = examine(CANCELED); - + if(_owner == ::GetCurrentThreadId()) clear(INTERRUPTED); diff --git a/dep/src/zthread/win32/ThreadOps.cxx b/dep/src/zthread/win32/ThreadOps.cxx index 6e8fb8d3b71..8812b9996b1 100644 --- a/dep/src/zthread/win32/ThreadOps.cxx +++ b/dep/src/zthread/win32/ThreadOps.cxx @@ -26,7 +26,7 @@ namespace ZThread { -const ThreadOps ThreadOps::INVALID(0); +const ThreadOps ThreadOps::INVALID(0); /** * Detect OS at runtime and attempt to locate the SwitchToThread @@ -46,10 +46,10 @@ public: v.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); if(::GetVersionEx(&v) && (v.dwPlatformId == VER_PLATFORM_WIN32_NT)) { - + // Uses GetModuleHandle() so the reference count on the dll is // not affected. There is a warning about race conditions involving - // this function being called as FreeLibrary() completes; however + // this function being called as FreeLibrary() completes; however // nearly all win32 applications load this particular and will keep it // in memory until the process exits. HINSTANCE hInst = ::GetModuleHandle("Kernel32.dll"); @@ -62,10 +62,10 @@ public: } bool operator()() { - + // Attempt to yield using the best function available - if(!_fnYield || !_fnYield()) - ::Sleep(0); + if(!_fnYield || !_fnYield()) + ::Sleep(0); return true; @@ -78,8 +78,8 @@ bool ThreadOps::join(ThreadOps* ops) { assert(ops); assert(ops->_tid != 0); assert(ops->_hThread != NULL); - - if(::WaitForSingleObjectEx(ops->_hThread, INFINITE, FALSE) != WAIT_OBJECT_0) + + if(::WaitForSingleObjectEx(ops->_hThread, INFINITE, FALSE) != WAIT_OBJECT_0) return false; ::CloseHandle(ops->_hThread); @@ -102,11 +102,11 @@ bool ThreadOps::yield() { bool ThreadOps::setPriority(ThreadOps* impl, Priority p) { assert(impl); - + #if !defined(ZTHREAD_DISABLE_PRIORITY) bool result; - + // Convert int n; switch(p) { @@ -121,7 +121,7 @@ bool ThreadOps::setPriority(ThreadOps* impl, Priority p) { n = THREAD_PRIORITY_NORMAL; } - + result = (::SetThreadPriority(impl->_hThread, n) != THREAD_PRIORITY_ERROR_RETURN); return result; @@ -137,7 +137,7 @@ bool ThreadOps::getPriority(ThreadOps* impl, Priority& p) { assert(impl); bool result = true; - + #if !defined(ZTHREAD_DISABLE_PRIORITY) // Convert to one of the PRIORITY values @@ -154,7 +154,7 @@ bool ThreadOps::getPriority(ThreadOps* impl, Priority& p) { default: p = Medium; } - + #endif return result; @@ -191,7 +191,7 @@ unsigned int __stdcall ThreadOps::_dispatch(void *arg) { #endif return 0; - + } } diff --git a/dep/src/zthread/win9x/AtomicCount.cxx b/dep/src/zthread/win9x/AtomicCount.cxx index 2bf07dcd2e7..c569af1a8a0 100644 --- a/dep/src/zthread/win9x/AtomicCount.cxx +++ b/dep/src/zthread/win9x/AtomicCount.cxx @@ -29,12 +29,12 @@ namespace ZThread { typedef struct atomic_count_t { - + CRITICAL_SECTION cs; size_t count; atomic_count_t() : count(0) {} - + } ATOMIC_COUNT; AtomicCount::AtomicCount() { @@ -42,13 +42,13 @@ AtomicCount::AtomicCount() { ATOMIC_COUNT* c = new ATOMIC_COUNT; _value = reinterpret_cast<void*>(c); ::InitializeCriticalSection(&c->cs); - + } AtomicCount::~AtomicCount() { ATOMIC_COUNT* c = reinterpret_cast<ATOMIC_COUNT*>(_value); - assert(c->count == 0); + assert(c->count == 0); ::DeleteCriticalSection(&c->cs); delete c; @@ -58,8 +58,8 @@ AtomicCount::~AtomicCount() { size_t AtomicCount::operator--(int) { ATOMIC_COUNT* c = reinterpret_cast<ATOMIC_COUNT*>(_value); - size_t value; - + size_t value; + ::EnterCriticalSection(&c->cs); value = c->count--; ::LeaveCriticalSection(&c->cs); @@ -67,13 +67,13 @@ size_t AtomicCount::operator--(int) { return value; } - + //! Postfix increment and return the current value size_t AtomicCount::operator++(int) { ATOMIC_COUNT* c = reinterpret_cast<ATOMIC_COUNT*>(_value); - size_t value; - + size_t value; + ::EnterCriticalSection(&c->cs); value = c->count++; ::LeaveCriticalSection(&c->cs); @@ -86,8 +86,8 @@ size_t AtomicCount::operator++(int) { size_t AtomicCount::operator--() { ATOMIC_COUNT* c = reinterpret_cast<ATOMIC_COUNT*>(_value); - size_t value; - + size_t value; + ::EnterCriticalSection(&c->cs); value = --c->count; ::LeaveCriticalSection(&c->cs); @@ -95,13 +95,13 @@ size_t AtomicCount::operator--() { return value; } - + //! Prefix increment and return the current value size_t AtomicCount::operator++() { - + ATOMIC_COUNT* c = reinterpret_cast<ATOMIC_COUNT*>(_value); - size_t value; - + size_t value; + ::EnterCriticalSection(&c->cs); value = ++c->count; ::LeaveCriticalSection(&c->cs); diff --git a/src/bindings/scripts/scripts/zone/naxxramas/boss_sapphiron.cpp b/src/bindings/scripts/scripts/zone/naxxramas/boss_sapphiron.cpp index 80b4fa49782..333d13e91f9 100644 --- a/src/bindings/scripts/scripts/zone/naxxramas/boss_sapphiron.cpp +++ b/src/bindings/scripts/scripts/zone/naxxramas/boss_sapphiron.cpp @@ -108,7 +108,7 @@ struct TRINITY_DLL_DECL boss_sapphironAI : public ScriptedAI m_creature->HandleEmoteCommand(EMOTE_ONESHOT_LIFTOFF); m_creature->AddUnitMovementFlag(MOVEMENTFLAG_LEVITATING + MOVEMENTFLAG_ONTRANSPORT); m_creature->GetMotionMaster()->Clear(false); - m_creature->GetMotionMaster()->MoveIdle(); + m_creature->GetMotionMaster()->MoveIdle(); m_creature->SetHover(true); Icebolt_Timer = 4000; Icebolt_Count = 0; @@ -132,16 +132,16 @@ struct TRINITY_DLL_DECL boss_sapphironAI : public ScriptedAI }else Icebolt_Timer -= diff; if(Icebolt_Count == 5 && IsInFly && FrostBreath_Timer < diff ) - { + { DoScriptText(EMOTE_BREATH, m_creature); DoCast(m_creature->getVictim(),SPELL_FROST_BREATH); land_Timer = 2000; IsInFly = false; FrostBreath_Timer = 6000; - }else FrostBreath_Timer -= diff; + }else FrostBreath_Timer -= diff; if(!IsInFly && land_Timer < diff) - { + { phase = 1; m_creature->HandleEmoteCommand(EMOTE_ONESHOT_LAND); m_creature->RemoveUnitMovementFlag(MOVEMENTFLAG_LEVITATING + MOVEMENTFLAG_ONTRANSPORT); diff --git a/src/bindings/scripts/scripts/zone/sunwell_plateau/boss_kalecgos.cpp b/src/bindings/scripts/scripts/zone/sunwell_plateau/boss_kalecgos.cpp index 763317d1457..539a3db3ab4 100644 --- a/src/bindings/scripts/scripts/zone/sunwell_plateau/boss_kalecgos.cpp +++ b/src/bindings/scripts/scripts/zone/sunwell_plateau/boss_kalecgos.cpp @@ -147,7 +147,7 @@ struct TRINITY_DLL_DECL boss_kalecgosAI : public ScriptedAI TalkSequence = 0; isFriendly = false; isEnraged = false; - isBanished = false; + isBanished = false; } void DamageTaken(Unit *done_by, uint32 &damage) @@ -191,7 +191,7 @@ struct TRINITY_DLL_DECL boss_kalecgosAI : public ScriptedAI switch(TalkSequence) { case 1: - m_creature->setFaction(35); + m_creature->setFaction(35); TalkTimer = 1000; break; case 2: @@ -276,7 +276,7 @@ struct TRINITY_DLL_DECL boss_sathrovarrAI : public ScriptedAI CheckTimer = 1000; ResetThreat = 1000; isEnraged = false; - isBanished = false; + isBanished = false; } void Aggro(Unit* who) @@ -339,7 +339,7 @@ struct TRINITY_DLL_DECL boss_sathrovarrAI : public ScriptedAI if(Player* i_pl = i->getSource()) if(i_pl->HasAura(AURA_SPECTRAL_REALM,0)) i_pl->RemoveAurasDueToSpell(AURA_SPECTRAL_REALM); - } + } void UpdateAI(const uint32 diff) { @@ -471,7 +471,7 @@ struct TRINITY_DLL_DECL boss_kalecAI : public ScriptedAI void UpdateAI(const uint32 diff) { if (!UpdateVictim()) - return; + return; if(YellTimer < diff) { @@ -559,7 +559,7 @@ void boss_kalecgosAI::UpdateAI(const uint32 diff) DoCast(m_creature, SPELL_ENRAGE, true); isEnraged = true; } - + if(!isBanished && (m_creature->GetHealth()*100)/m_creature->GetMaxHealth() < 1) { if(Unit *Sath = Unit::GetUnit(*m_creature, SathGUID)) diff --git a/src/bindings/scripts/scripts/zone/tempest_keep/the_eye/boss_astromancer.cpp b/src/bindings/scripts/scripts/zone/tempest_keep/the_eye/boss_astromancer.cpp index e86c7565628..fa177b7b240 100644 --- a/src/bindings/scripts/scripts/zone/tempest_keep/the_eye/boss_astromancer.cpp +++ b/src/bindings/scripts/scripts/zone/tempest_keep/the_eye/boss_astromancer.cpp @@ -41,7 +41,7 @@ EndScriptData */ #define SPELL_FEAR 29321 #define SPELL_VOID_BOLT 39329 #define SPELL_SPOTLIGHT 25824 - + #define CENTER_X 432.909f #define CENTER_Y -373.424f #define CENTER_Z 17.9608f @@ -100,7 +100,7 @@ struct TRINITY_DLL_DECL boss_high_astromancer_solarianAI : public ScriptedAI float Portals[3][3]; bool AppearDelay; - bool BlindingLight; + bool BlindingLight; void Reset() { @@ -134,7 +134,7 @@ struct TRINITY_DLL_DECL boss_high_astromancer_solarianAI : public ScriptedAI void StartEvent() { DoScriptText(SAY_AGGRO, m_creature); - + if(pInstance) pInstance->SetData(DATA_HIGHASTROMANCERSOLARIANEVENT, IN_PROGRESS); } @@ -171,7 +171,7 @@ struct TRINITY_DLL_DECL boss_high_astromancer_solarianAI : public ScriptedAI { if (Unit* target = SelectUnit(SELECT_TARGET_RANDOM, 0)) Summoned->AI()->AttackStart(target); - + Summons.Summon(Summoned); } } @@ -215,7 +215,7 @@ struct TRINITY_DLL_DECL boss_high_astromancer_solarianAI : public ScriptedAI } AppearDelay_Timer = 2000; }else AppearDelay_Timer -= diff; - } + } if (Phase == 1) { @@ -236,12 +236,12 @@ struct TRINITY_DLL_DECL boss_high_astromancer_solarianAI : public ScriptedAI if(!m_creature->HasInArc(2.5f, target)) target = m_creature->getVictim(); - if(target) + if(target) DoCast(target, SPELL_ARCANE_MISSILES); } ArcaneMissiles_Timer = 3000; }else ArcaneMissiles_Timer -= diff; - + if (MarkOfTheSolarian_Timer < diff) { DoCast(m_creature->getVictim(), MARK_OF_SOLARIAN); diff --git a/src/game/SpellAuras.cpp b/src/game/SpellAuras.cpp index 06b092cf8c4..40edea80ce9 100644 --- a/src/game/SpellAuras.cpp +++ b/src/game/SpellAuras.cpp @@ -5680,7 +5680,7 @@ void Aura::PeriodicTick() int32 gain = pCaster->ModifyPower(power,gain_amount); m_target->AddThreat(pCaster, float(gain) * 0.5f, GetSpellSchoolMask(GetSpellProto()), GetSpellProto()); } - + // Mark of Kaz'rogal if(GetId() == 31447 && m_target->GetPower(power) == 0) { diff --git a/src/game/SpellEffects.cpp b/src/game/SpellEffects.cpp index b92202375dc..db4df9a5862 100644 --- a/src/game/SpellEffects.cpp +++ b/src/game/SpellEffects.cpp @@ -653,7 +653,7 @@ void Spell::EffectDummy(uint32 i) if(ihit->effectMask & (1<<i)) { Unit* casttarget = Unit::GetUnit((*unitTarget), ihit->targetGUID); - if(casttarget) + if(casttarget) m_caster->DealDamage(casttarget, damage, NULL, SPELL_DIRECT_DAMAGE, SPELL_SCHOOL_MASK_ARCANE, spellInfo, false); } } @@ -4978,7 +4978,7 @@ void Spell::EffectScriptEffect(uint32 effIndex) } case 48025: // Headless Horseman's Mount { - if(!unitTarget) + if(!unitTarget) return; if(unitTarget) @@ -4987,7 +4987,7 @@ void Spell::EffectScriptEffect(uint32 effIndex) { case 75: unitTarget->CastSpell(unitTarget, 51621, true); break;; case 150: unitTarget->CastSpell(unitTarget, 48024, true); break; - case 225: unitTarget->CastSpell(unitTarget, 51617, true); break; + case 225: unitTarget->CastSpell(unitTarget, 51617, true); break; case 300: unitTarget->CastSpell(unitTarget, 48023, true); break; default: break; } |