Use full/empty conditions rather than just a single condition for the server and...
authorCarl Hetherington <cth@carlh.net>
Thu, 26 Jun 2014 10:04:02 +0000 (11:04 +0100)
committerCarl Hetherington <cth@carlh.net>
Thu, 26 Jun 2014 10:04:02 +0000 (11:04 +0100)
ChangeLog
src/lib/encoder.cc
src/lib/encoder.h
src/lib/server.cc
src/lib/server.h

index 6be780b4ba072b057a18cc3247f5ff6d5ed1c73c..6b004770ca8e28513717b18a34cd48329c9e3c1c 100644 (file)
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,8 @@
 2014-06-26  Carl Hetherington  <cth@carlh.net>
 
+       * Optimisation of uncertain effect to encoder and server
+       thread handling.
+
        * Version 1.70.0 released.
 
 2014-06-25  Carl Hetherington  <cth@carlh.net>
index e83ac70f52a379cef6688f00e0c72a284544abf5..02a2710290f5c8ef6656fd3a49497dc65dbc45bc 100644 (file)
@@ -108,8 +108,8 @@ Encoder::process_end ()
 
        /* Keep waking workers until the queue is empty */
        while (!_queue.empty ()) {
-               _condition.notify_all ();
-               _condition.wait (lock);
+               _empty_condition.notify_all ();
+               _full_condition.wait (lock);
        }
 
        lock.unlock ();
@@ -194,7 +194,7 @@ Encoder::process_video (shared_ptr<PlayerVideoFrame> pvf, bool same)
        /* Wait until the queue has gone down a bit */
        while (_queue.size() >= _threads.size() * 2 && !_terminate) {
                LOG_TIMING ("decoder sleeps with queue of %1", _queue.size());
-               _condition.wait (lock);
+               _full_condition.wait (lock);
                LOG_TIMING ("decoder wakes with queue of %1", _queue.size());
        }
 
@@ -226,8 +226,11 @@ Encoder::process_video (shared_ptr<PlayerVideoFrame> pvf, bool same)
                                                  _film->j2k_bandwidth(), _film->resolution(), _film->log()
                                                  )
                                          ));
-               
-               _condition.notify_all ();
+
+               /* The queue might not be empty any more, so notify anything which is
+                  waiting on that.
+               */
+               _empty_condition.notify_all ();
                _have_a_real_frame[pvf->eyes()] = true;
        }
 
@@ -248,7 +251,8 @@ Encoder::terminate_threads ()
        {
                boost::mutex::scoped_lock lock (_mutex);
                _terminate = true;
-               _condition.notify_all ();
+               _full_condition.notify_all ();
+               _empty_condition.notify_all ();
        }
 
        for (list<boost::thread *>::iterator i = _threads.begin(); i != _threads.end(); ++i) {
@@ -271,12 +275,12 @@ try
        */
        int remote_backoff = 0;
        
-       while (1) {
+       while (true) {
 
                LOG_TIMING ("[%1] encoder thread sleeps", boost::this_thread::get_id());
                boost::mutex::scoped_lock lock (_mutex);
                while (_queue.empty () && !_terminate) {
-                       _condition.wait (lock);
+                       _empty_condition.wait (lock);
                }
 
                if (_terminate) {
@@ -338,8 +342,9 @@ try
                        dcpomatic_sleep (remote_backoff);
                }
 
+               /* The queue might not be full any more, so notify anything that is waiting on that */
                lock.lock ();
-               _condition.notify_all ();
+               _full_condition.notify_all ();
        }
 }
 catch (...)
index a8ee220aaac8cdea58ec3f0cd7c493dddeff93c0..8d5aa2c405a0c54843b49a30ae24d1bf42ea15aa 100644 (file)
@@ -111,7 +111,10 @@ private:
        std::list<boost::shared_ptr<DCPVideoFrame> > _queue;
        std::list<boost::thread *> _threads;
        mutable boost::mutex _mutex;
-       boost::condition _condition;
+       /** condition to manage thread wakeups when we have nothing to do */
+       boost::condition _empty_condition;
+       /** condition to manage thread wakeups when we have too much to do */
+       boost::condition _full_condition;
 
        boost::shared_ptr<Writer> _writer;
        Waker _waker;
index ed7fb6145c6a4b4498bbf6e3ff552a8fd3b8e01b..7450fd12e476324c190b9b93aa9e68b1e0ac4c35 100644 (file)
@@ -118,7 +118,7 @@ Server::worker_thread ()
        while (1) {
                boost::mutex::scoped_lock lock (_worker_mutex);
                while (_queue.empty ()) {
-                       _worker_condition.wait (lock);
+                       _empty_condition.wait (lock);
                }
 
                shared_ptr<Socket> socket = _queue.front ();
@@ -169,7 +169,7 @@ Server::worker_thread ()
                        LOG_GENERAL_NC (message.str ());
                }
                
-               _worker_condition.notify_all ();
+               _full_condition.notify_all ();
        }
 }
 
@@ -202,11 +202,11 @@ Server::run (int num_threads)
                
                /* Wait until the queue has gone down a bit */
                while (int (_queue.size()) >= num_threads * 2) {
-                       _worker_condition.wait (lock);
+                       _full_condition.wait (lock);
                }
                
                _queue.push_back (socket);
-               _worker_condition.notify_all ();
+               _empty_condition.notify_all ();
        }
 }
 
index a9b4b1c1c8982ed9d57c615c9cee6fcfd9df0324..b925031eb2df667cfff90f64a71eacdf3ecb6d7f 100644 (file)
@@ -102,7 +102,8 @@ private:
        std::vector<boost::thread *> _worker_threads;
        std::list<boost::shared_ptr<Socket> > _queue;
        boost::mutex _worker_mutex;
-       boost::condition _worker_condition;
+       boost::condition _full_condition;
+       boost::condition _empty_condition;
        boost::shared_ptr<Log> _log;
        bool _verbose;