[asterisk-scf-commits] asterisk-scf/integration/ice-util-cpp.git branch "workqueue" updated.

Commits to the Asterisk SCF project code repositories asterisk-scf-commits at lists.digium.com
Wed Apr 6 17:10:32 CDT 2011


branch "workqueue" has been updated
       via  a823445114cf5e0b548c7467e7b8e4a43a4a6e92 (commit)
       via  4e10e0ba17d37f03ab442712a714628d31408bf0 (commit)
      from  86abba83aaa0ab18fcf39fb9de17064f5bdce030 (commit)

Summary of changes:
 ThreadPool/src/ThreadPool.cpp      |   72 ++++---
 ThreadPool/test/TestThreadPool.cpp |  389 +++++++++++++++++++++++++++++++++++-
 2 files changed, 426 insertions(+), 35 deletions(-)


- Log -----------------------------------------------------------------
commit a823445114cf5e0b548c7467e7b8e4a43a4a6e92
Author: Mark Michelson <mmichelson at digium.com>
Date:   Wed Apr 6 17:09:16 2011 -0500

    Add more tests to the ThreadPool testsuite.
    
    The last test currently fails due invalid use of an iterator
    in the ThreadPoolPriv's shrink() method. Hint: Don't use
    vector::erase inside a for loop and expect the iterator to
    still be usable.

diff --git a/ThreadPool/src/ThreadPool.cpp b/ThreadPool/src/ThreadPool.cpp
index 8b60ac4..20ad0bd 100644
--- a/ThreadPool/src/ThreadPool.cpp
+++ b/ThreadPool/src/ThreadPool.cpp
@@ -175,14 +175,16 @@ public:
     
         // If we've made it here, then it means that there weren't enough idle
         // threads to kill. We'll need to zombify some active threads then.
+        
+        std::cout << "Not enough idle threads to kill. About to zombify " << threadsToKill << " threads" << std::endl;
         for (std::vector<WorkerThread*>::iterator i = mActiveThreads.begin();
                 i != mActiveThreads.end(); ++i)
         {
             //Active threads, on the other hand, need to at least temporarily be
             //pushed into the zombie container.
             mZombieThreads.push_back(*i);
-            mActiveThreads.erase(i);
             (*i)->poke(Zombie);
+            mActiveThreads.erase(i);
     
             if (--threadsToKill == 0)
             {
diff --git a/ThreadPool/test/TestThreadPool.cpp b/ThreadPool/test/TestThreadPool.cpp
index 0f7e35e..55ba29b 100644
--- a/ThreadPool/test/TestThreadPool.cpp
+++ b/ThreadPool/test/TestThreadPool.cpp
@@ -78,17 +78,53 @@ public:
     SimpleTask() : taskExecuted(false) { }
     void execute()
     {
+        boost::unique_lock<boost::mutex> lock(mLock);
         taskExecuted = true;
+        mCond.notify_one();
     }
     bool taskExecuted;
+
+    boost::mutex mLock;
+    boost::condition_variable mCond;
 };
 
 typedef IceUtil::Handle<SimpleTask> SimpleTaskPtr;
 
+class ComplexTask : public Work
+{
+public:
+    ComplexTask() : mContinue(false), taskExecuted(false) { }
+
+    void execute()
+    {
+        //Complex tasks will start there execution
+        //but then halt until they are poked.
+        boost::unique_lock<boost::mutex> lock(mLock);
+        while (!mContinue)
+        {
+            mStall.wait(lock);
+        }
+        //We've been poked, so let's finish up.
+        taskExecuted = true;
+        mDone.notify_one();
+    }
+
+    boost::mutex mLock;
+    boost::condition_variable mStall;
+    boost::condition_variable mDone;
+
+    bool mContinue;
+    bool taskExecuted;
+};
+
+typedef IceUtil::Handle<ComplexTask> ComplexTaskPtr;
+
 BOOST_AUTO_TEST_SUITE(ThreadPoolTest)
 
 BOOST_AUTO_TEST_CASE(addWork)
 {
+    BOOST_TEST_MESSAGE("Running addWork test");
+
     TestListenerPtr listener(new TestListener);
     QueuePtr queue(new WorkQueue());
     SimpleTaskPtr work(new SimpleTask());
@@ -106,6 +142,8 @@ BOOST_AUTO_TEST_CASE(addWork)
 
 BOOST_AUTO_TEST_CASE(threadCreation)
 {
+    BOOST_TEST_MESSAGE("Running threadCreation test");
+
     TestListenerPtr listener(new TestListener);
     QueuePtr queue(new WorkQueue());
     PoolPtr pool(new ThreadPool(listener, queue));
@@ -130,6 +168,8 @@ BOOST_AUTO_TEST_CASE(threadCreation)
 
 BOOST_AUTO_TEST_CASE(threadDestruction)
 {
+    BOOST_TEST_MESSAGE("Running threadDestruction test");
+
     TestListenerPtr listener(new TestListener);
     QueuePtr queue(new WorkQueue());
     PoolPtr pool(new ThreadPool(listener, queue));
@@ -167,4 +207,283 @@ BOOST_AUTO_TEST_CASE(threadDestruction)
     BOOST_CHECK(listener->mZombie == 0);
 }
 
+BOOST_AUTO_TEST_CASE(oneTaskOneThread)
+{
+    BOOST_TEST_MESSAGE("Running oneTaskOneThread test");
+
+    TestListenerPtr listener(new TestListener);
+    QueuePtr queue(new WorkQueue());
+    SimpleTaskPtr work(new SimpleTask());
+    PoolPtr pool(new ThreadPool(listener, queue));
+
+    queue->enqueueWork(work);
+    pool->setSize(1);
+
+    //The thread should execute the work and then
+    //become idle.
+    {
+        boost::unique_lock<boost::mutex> lock(work->mLock);
+        while (!work->taskExecuted)
+        {
+            work->mCond.wait(lock);
+        }
+    }
+
+    BOOST_CHECK(work->taskExecuted == true);
+    BOOST_CHECK(listener->mEmptyNotice == true);
+
+    //The thread should be idle now. Let's make sure
+    //that's happening.
+    {
+        boost::unique_lock<boost::mutex> lock(listener->mLock);
+        while (listener->mIdle == 0)
+        {
+            listener->mCond.wait(lock);
+        }
+    }
+
+    BOOST_CHECK(listener->mIdle == 1);
+    BOOST_CHECK(listener->mActive == 0);
+    BOOST_CHECK(listener->mZombie == 0);
+}
+
+BOOST_AUTO_TEST_CASE(oneThreadOneTask)
+{
+    BOOST_TEST_MESSAGE("Running oneThreadOneTask test");
+
+    TestListenerPtr listener(new TestListener);
+    QueuePtr queue(new WorkQueue());
+    PoolPtr pool(new ThreadPool(listener, queue));
+    SimpleTaskPtr work(new SimpleTask());
+
+    pool->setSize(1);
+
+    {
+        boost::unique_lock<boost::mutex> lock(listener->mLock);
+        while (listener->mIdle == 0)
+        {
+            listener->mCond.wait(lock);
+        }
+    }
+
+    //The thread is idle now. When we queue work, it should
+    //become active and execute the work.
+    queue->enqueueWork(work);
+
+    {
+        boost::unique_lock<boost::mutex> lock(work->mLock);
+        while (!work->taskExecuted)
+        {
+            work->mCond.wait(lock);
+        }
+    }
+
+    BOOST_CHECK(work->taskExecuted == true);
+    BOOST_CHECK(listener->mEmptyNotice == true);
+
+    //And of course, the thread should become idle once work is done
+    {
+        boost::unique_lock<boost::mutex> lock(listener->mLock);
+        while (listener->mIdle == 0)
+        {
+            listener->mCond.wait(lock);
+        }
+    }
+
+    BOOST_CHECK(listener->mIdle == 1);
+    BOOST_CHECK(listener->mActive == 0);
+    BOOST_CHECK(listener->mZombie == 0);
+}
+
+BOOST_AUTO_TEST_CASE(oneThreadMultipleTasks)
+{
+    BOOST_TEST_MESSAGE("Running oneThreadMultipleTasks test");
+
+    TestListenerPtr listener(new TestListener);
+    QueuePtr queue(new WorkQueue());
+    PoolPtr pool(new ThreadPool(listener, queue));
+    SimpleTaskPtr work1(new SimpleTask());
+    SimpleTaskPtr work2(new SimpleTask());
+    SimpleTaskPtr work3(new SimpleTask());
+    WorkSeq works;
+
+    works.push_back(work1);
+    works.push_back(work2);
+    works.push_back(work3);
+
+    queue->enqueueWorkSeq(works);
+
+    pool->setSize(1);
+    //The single thread should execute all three tests. We've
+    //ensured in our queue tests that execution happens in the
+    //correct order, so we just need to wait for the third task
+    //to be complete.
+    {
+        boost::unique_lock<boost::mutex> lock(work3->mLock);
+        while (!work3->taskExecuted)
+        {
+            work3->mCond.wait(lock);
+        }
+    }
+
+    BOOST_CHECK(work1->taskExecuted == true);
+    BOOST_CHECK(work2->taskExecuted == true);
+    BOOST_CHECK(work3->taskExecuted == true);
+
+    //And of course, the thread should become idle once work is done
+    {
+        boost::unique_lock<boost::mutex> lock(listener->mLock);
+        while (listener->mIdle == 0)
+        {
+            listener->mCond.wait(lock);
+        }
+    }
+
+    BOOST_CHECK(listener->mIdle == 1);
+    BOOST_CHECK(listener->mActive == 0);
+    BOOST_CHECK(listener->mZombie == 0);
+}
+
+BOOST_AUTO_TEST_CASE(taskDistribution)
+{
+    BOOST_TEST_MESSAGE("Running taskDistribution test");
+
+    TestListenerPtr listener(new TestListener);
+    QueuePtr queue(new WorkQueue());
+    PoolPtr pool(new ThreadPool(listener, queue));
+    ComplexTaskPtr work1(new ComplexTask());
+    ComplexTaskPtr work2(new ComplexTask());
+    WorkSeq works;
+
+    works.push_back(work1);
+    works.push_back(work2);
+
+    queue->enqueueWorkSeq(works);
+
+    pool->setSize(2);
+    
+    //Since these tasks halt until they are poked,
+    //The two tasks should be evenly divided amongst
+    //the threads.
+    BOOST_CHECK(listener->mActive == 2);
+    BOOST_CHECK(listener->mIdle == 0);
+    BOOST_CHECK(listener->mZombie == 0);
+
+    //Cool, so let's give those threads a poke
+    {
+        boost::unique_lock<boost::mutex> lock1(work1->mLock);
+        boost::unique_lock<boost::mutex> lock2(work2->mLock);
+        work1->mContinue = true;
+        work2->mContinue = true;
+        work1->mStall.notify_one();
+        work2->mStall.notify_one();
+    }
+
+    //Now be sure the tasks finish
+    {
+        boost::unique_lock<boost::mutex> lock1(work1->mLock);
+        boost::unique_lock<boost::mutex> lock2(work2->mLock);
+        while (!work1->taskExecuted)
+        {
+            work1->mDone.wait(lock1);
+        }
+        while (!work2->taskExecuted)
+        {
+            work2->mDone.wait(lock2);
+        }
+    }
+
+    BOOST_CHECK(work1->taskExecuted == true);
+    BOOST_CHECK(work2->taskExecuted == true);
+
+    //And of course, the threads should become idle once work is done
+    {
+        boost::unique_lock<boost::mutex> lock(listener->mLock);
+        while (listener->mIdle < 2)
+        {
+            listener->mCond.wait(lock);
+        }
+    }
+
+    BOOST_CHECK(listener->mIdle == 2);
+    BOOST_CHECK(listener->mActive == 0);
+    BOOST_CHECK(listener->mZombie == 0);
+}
+
+BOOST_AUTO_TEST_CASE(zombies)
+{
+    BOOST_TEST_MESSAGE("Running zombies test");
+
+    TestListenerPtr listener(new TestListener);
+    QueuePtr queue(new WorkQueue());
+    PoolPtr pool(new ThreadPool(listener, queue));
+    ComplexTaskPtr work1(new ComplexTask());
+    ComplexTaskPtr work2(new ComplexTask());
+    WorkSeq works;
+
+    works.push_back(work1);
+    works.push_back(work2);
+
+    queue->enqueueWorkSeq(works);
+
+    pool->setSize(2);
+    
+    //Since these tasks halt until they are poked,
+    //The two tasks should be evenly divided amongst
+    //the threads.
+    BOOST_CHECK(listener->mActive == 2);
+    BOOST_CHECK(listener->mIdle == 0);
+    BOOST_CHECK(listener->mZombie == 0);
+
+    //Now we'll set the size down to 0. This should
+    //result in the active threads immediately becoming
+    //zombies.
+    pool->setSize(0);
+    BOOST_CHECK(listener->mActive == 0);
+    BOOST_CHECK(listener->mIdle == 0);
+    BOOST_CHECK(listener->mZombie == 2);
+
+    //Now we should still be able to poke the work and
+    //have it complete executing.
+    {
+        boost::unique_lock<boost::mutex> lock1(work1->mLock);
+        boost::unique_lock<boost::mutex> lock2(work2->mLock);
+        work1->mContinue = true;
+        work2->mContinue = true;
+        work1->mStall.notify_one();
+        work2->mStall.notify_one();
+    }
+
+    //Now be sure the tasks finish
+    {
+        boost::unique_lock<boost::mutex> lock1(work1->mLock);
+        boost::unique_lock<boost::mutex> lock2(work2->mLock);
+        while (!work1->taskExecuted)
+        {
+            work1->mDone.wait(lock1);
+        }
+        while (!work2->taskExecuted)
+        {
+            work2->mDone.wait(lock2);
+        }
+    }
+
+    BOOST_CHECK(work1->taskExecuted == true);
+    BOOST_CHECK(work2->taskExecuted == true);
+
+    //Since the tasks finished executing, the zombie
+    //threads should die.
+    {
+        boost::unique_lock<boost::mutex> lock(listener->mLock);
+        while (listener->mZombie > 0)
+        {
+            listener->mCond.wait(lock);
+        }
+    }
+
+    BOOST_CHECK(listener->mActive == 0);
+    BOOST_CHECK(listener->mIdle == 0);
+    BOOST_CHECK(listener->mZombie == 0);
+}
+
 BOOST_AUTO_TEST_SUITE_END()

commit 4e10e0ba17d37f03ab442712a714628d31408bf0
Author: Mark Michelson <mmichelson at digium.com>
Date:   Wed Apr 6 15:38:39 2011 -0500

    Fix up some issues I saw while testing the ThreadPool
    
    * Don't use shared pointers if we don't have to. There were a few
      issues fixed here. First, this gets rid of circular reference
      issues that I had feared. Second, make sure not to write code
      that results in independent shared_ptrs to the same object. The
      object will get deleted at an unexpected time.
    
      There may actually be another place where bare pointers will
      work well too. We shall see.
    
    * I referenced the wrong vector in zombieThreadDead
    
    * Now calling stateChanged() functions with the ThreadPoolPriv's
      lock held. This corrected an issue where state changes were
      sometimes sent out of order. I really don't like doing this
      and would like to formulate a fix.
    
    * Added some cout debug messages to aid in testing. I may remove
      these or I may add logger integration if it turns out these
      sorts of messages may be helpful.

diff --git a/ThreadPool/src/ThreadPool.cpp b/ThreadPool/src/ThreadPool.cpp
index 5babc9f..8b60ac4 100644
--- a/ThreadPool/src/ThreadPool.cpp
+++ b/ThreadPool/src/ThreadPool.cpp
@@ -27,7 +27,7 @@ namespace ThreadPool
 using namespace AsteriskSCF::System::ThreadPool::V1;
 using namespace AsteriskSCF::System::WorkQueue::V1;
 
-WorkerThread::WorkerThread(const QueuePtr& workQueue, const boost::shared_ptr<WorkerThreadListener>& listener)
+WorkerThread::WorkerThread(const QueuePtr& workQueue, WorkerThreadListener *listener)
     : mState(Active), mListener(listener), mQueue(workQueue), mThread(boost::bind(&WorkerThread::active, this)) { }
 
 void WorkerThread::active()
@@ -49,7 +49,7 @@ void WorkerThread::active()
     // vector of zombie threads.
     if (mState == Zombie)
     {
-        mListener->zombieThreadDead(boost::shared_ptr<WorkerThread>(this));
+        mListener->zombieThreadDead(this);
     }
 }
 
@@ -69,7 +69,7 @@ void WorkerThread::idle()
         mState = Idle;
     }
 
-    mListener->activeThreadIdle(boost::shared_ptr<WorkerThread>(this));
+    mListener->activeThreadIdle(this);
 
     {
         boost::unique_lock<boost::mutex> lock(mLock);
@@ -99,14 +99,14 @@ public:
         mQueue->setListener(new ThreadQueueListener(boost::shared_ptr<ThreadPoolPriv> (this), mPool));
     }
 
-    void activeThreadIdle(const boost::shared_ptr<WorkerThread>& thread)
+    void activeThreadIdle(WorkerThread *thread)
     {
         int activeSize;
         int idleSize;
         int zombieSize;
         {
             boost::unique_lock<boost::mutex> lock(mLock);
-            std::vector<boost::shared_ptr<WorkerThread> >::iterator iter =
+            std::vector<WorkerThread*>::iterator iter =
                 std::find(mActiveThreads.begin(), mActiveThreads.end(), thread);
     
             if (iter != mActiveThreads.end())
@@ -117,52 +117,55 @@ public:
             activeSize = mActiveThreads.size();
             idleSize = mIdleThreads.size();
             zombieSize = mZombieThreads.size();
+            std::cout << "Active Thread has become idle. Idle: " << idleSize << ". Active: " << activeSize << ". Zombie: " << zombieSize << std::endl;
+            //XXX I don't like calling listener operations without a lock held since it makes
+            //it much more difficult to call pool operations from the listener. The problem is
+            //that by NOT calling with the lock held, state changes can arrive out of order...believe
+            //me, I've seen my tests fail as a result of this.
+            mListener->stateChanged(mPool, activeSize, idleSize, zombieSize);
         }
-        mListener->stateChanged(mPool, activeSize, idleSize, zombieSize);
     }
     
-    void zombieThreadDead(const boost::shared_ptr<WorkerThread>& thread)
+    void zombieThreadDead(WorkerThread *thread)
     {
         int activeSize;
         int idleSize;
         int zombieSize;
         {
             boost::unique_lock<boost::mutex> lock(mLock);
-            mZombieThreads.erase(std::find(mActiveThreads.begin(), mActiveThreads.end(), thread));
+            std::vector<WorkerThread*>::iterator i = std::find(mZombieThreads.begin(), mZombieThreads.end(), thread);
+            if (i != mZombieThreads.end())
+            {
+                mZombieThreads.erase(i);
+                delete *i;
+            }
             activeSize = mActiveThreads.size();
             idleSize = mIdleThreads.size();
             zombieSize = mZombieThreads.size();
+            std::cout << "Endeadened a Zombie thread. Idle: " << idleSize << ". Active: " << activeSize << ". Zombie: " << zombieSize << std::endl;
+            mListener->stateChanged(mPool, activeSize, idleSize, zombieSize);
         }
-        mListener->stateChanged(mPool, activeSize, idleSize, zombieSize);
     }
     
     void grow(int numNewThreads)
     {
-        int activeSize;
-        int idleSize;
-        int zombieSize;
-        {
-            boost::unique_lock<boost::mutex> lock(mLock);
-            for (int i = 0; i < numNewThreads; ++i)
-            {
-                boost::shared_ptr<WorkerThread> newThread(new WorkerThread(mQueue, boost::shared_ptr<ThreadPoolPriv>(this)));
-                mActiveThreads.push_back(newThread);
-            }
-            activeSize = mActiveThreads.size();
-            idleSize = mIdleThreads.size();
-            zombieSize = mZombieThreads.size();
-        }
-        mListener->stateChanged(mPool, activeSize, idleSize, zombieSize);
+        std::cout << "Growing...adding " << numNewThreads << " threads" << std::endl;
+       for (int i = 0; i < numNewThreads; ++i)
+       {
+           WorkerThread *newThread(new WorkerThread(mQueue, this));
+           mActiveThreads.push_back(newThread);
+       }
     }
     
     void shrink(int threadsToKill)
     {
-        boost::unique_lock<boost::mutex> lock(mLock);
-        for (std::vector<boost::shared_ptr<WorkerThread> >::iterator i = mIdleThreads.begin();
+        std::cout << "Shrinking...removing " << threadsToKill << " threads" << std::endl;
+        for (std::vector<WorkerThread*>::iterator i = mIdleThreads.begin();
                 i != mIdleThreads.end(); ++i)
         {
             mIdleThreads.erase(i);
             (*i)->poke(Dead);
+            delete *i;
     
             if (--threadsToKill == 0)
             {
@@ -172,7 +175,7 @@ public:
     
         // If we've made it here, then it means that there weren't enough idle
         // threads to kill. We'll need to zombify some active threads then.
-        for (std::vector<boost::shared_ptr<WorkerThread> >::iterator i = mActiveThreads.begin();
+        for (std::vector<WorkerThread*>::iterator i = mActiveThreads.begin();
                 i != mActiveThreads.end(); ++i)
         {
             //Active threads, on the other hand, need to at least temporarily be
@@ -214,17 +217,18 @@ public:
             activeSize = mActiveThreads.size();
             idleSize = mIdleThreads.size();
             zombieSize = mZombieThreads.size();
+            std::cout << "Finished resizing. Idle: " << idleSize << ". Active: " << activeSize << ". Zombie: " << zombieSize << std::endl;
+            mListener->stateChanged(mPool, activeSize, idleSize, zombieSize);
         }
-        mListener->stateChanged(mPool, activeSize, idleSize, zombieSize);
     }
 
     AsteriskSCF::System::ThreadPool::V1::PoolListenerPtr mListener;
     AsteriskSCF::System::WorkQueue::V1::QueuePtr mQueue;
     AsteriskSCF::System::ThreadPool::V1::PoolPtr mPool;
     
-    std::vector<boost::shared_ptr<WorkerThread> > mActiveThreads;
-    std::vector<boost::shared_ptr<WorkerThread> > mIdleThreads;
-    std::vector<boost::shared_ptr<WorkerThread> > mZombieThreads;
+    std::vector<WorkerThread*> mActiveThreads;
+    std::vector<WorkerThread*> mIdleThreads;
+    std::vector<WorkerThread*> mZombieThreads;
 
     boost::mutex mLock;
 };
@@ -250,7 +254,7 @@ void ThreadQueueListener::workAdded(bool wasEmpty)
     //
     // A potential alternative would be to poke a number of idle threads equal to the
     // new work count.
-    for (std::vector<boost::shared_ptr<WorkerThread> >::iterator i = mThreadPoolPriv->mIdleThreads.begin();
+    for (std::vector<WorkerThread*>::iterator i = mThreadPoolPriv->mIdleThreads.begin();
             i != mThreadPoolPriv->mIdleThreads.end(); ++i)
     {
         (*i)->poke(Active);
diff --git a/ThreadPool/test/TestThreadPool.cpp b/ThreadPool/test/TestThreadPool.cpp
index d6e6c54..0f7e35e 100644
--- a/ThreadPool/test/TestThreadPool.cpp
+++ b/ThreadPool/test/TestThreadPool.cpp
@@ -15,8 +15,8 @@
  */
 
 #include <boost/test/unit_test.hpp>
+#include <boost/thread.hpp>
 #include <boost/thread/locks.hpp>
-#include <boost/thread/mutex.hpp>
 
 #include <AsteriskSCF/WorkQueue.h>
 #include <AsteriskSCF/ThreadPool.h>
@@ -35,10 +35,12 @@ public:
     
     void stateChanged(const PoolPtr& pool, int active, int idle, int zombie)
     {
-        boost::lock_guard<boost::mutex> lock(mLock);
+        boost::unique_lock<boost::mutex> lock(mLock);
         mActive = active;
         mIdle = idle;
         mZombie = zombie;
+        std::cout << "Got stateChanged message: idle: " << mIdle << ". active: " << mActive << ". zombie: " << mZombie << std::endl;
+        mCond.notify_one();
     }
 
     void queueWorkAdded(const PoolPtr& pool, int count, bool wasEmpty)
@@ -65,6 +67,7 @@ public:
     bool mEmptyNotice;
 
     boost::mutex mLock;
+    boost::condition_variable mCond;
 };
 
 typedef IceUtil::Handle<TestListener> TestListenerPtr;
@@ -101,4 +104,67 @@ BOOST_AUTO_TEST_CASE(addWork)
     BOOST_CHECK(listener->mZombie == 0);
 }
 
+BOOST_AUTO_TEST_CASE(threadCreation)
+{
+    TestListenerPtr listener(new TestListener);
+    QueuePtr queue(new WorkQueue());
+    PoolPtr pool(new ThreadPool(listener, queue));
+
+    pool->setSize(1);
+
+    //The thread will initially be active but will
+    //turn idle nearly immediately since there is no
+    //work to do.
+    {
+        boost::unique_lock<boost::mutex> lock(listener->mLock);
+        while (listener->mIdle == 0)
+        {
+            listener->mCond.wait(lock);
+        }
+    }
+
+    BOOST_CHECK(listener->mIdle == 1);
+    BOOST_CHECK(listener->mActive == 0);
+    BOOST_CHECK(listener->mZombie == 0);
+}
+
+BOOST_AUTO_TEST_CASE(threadDestruction)
+{
+    TestListenerPtr listener(new TestListener);
+    QueuePtr queue(new WorkQueue());
+    PoolPtr pool(new ThreadPool(listener, queue));
+    
+    std::cout << "Initializing 3 threads for thread pool" << std::endl;
+
+    pool->setSize(3);
+
+    {
+        boost::unique_lock<boost::mutex> lock(listener->mLock);
+        while (listener->mIdle != 3)
+        {
+            listener->mCond.wait(lock);
+        }
+    }
+
+    BOOST_CHECK(listener->mIdle == 3);
+    BOOST_CHECK(listener->mActive == 0);
+    BOOST_CHECK(listener->mZombie == 0);
+
+    std::cout << "Shrinking thread pool to 2 threads" << std::endl;
+
+    pool->setSize(2);
+
+    {
+        boost::unique_lock<boost::mutex> lock(listener->mLock);
+        while (listener->mIdle != 2)
+        {
+            listener->mCond.wait(lock);
+        }
+    }
+
+    BOOST_CHECK(listener->mIdle == 2);
+    BOOST_CHECK(listener->mActive == 0);
+    BOOST_CHECK(listener->mZombie == 0);
+}
+
 BOOST_AUTO_TEST_SUITE_END()

-----------------------------------------------------------------------


-- 
asterisk-scf/integration/ice-util-cpp.git



More information about the asterisk-scf-commits mailing list