tweak transport bar spacing
[ardour.git] / libs / pbd / pbd / abstract_ui.cc
index e5319c21079e6d258a6ba7a13b8b857bf9bc45e4..4c13ec1b09f6a05b5c62e6251e1b05e8e22681ef 100644 (file)
@@ -1,4 +1,5 @@
 #include <unistd.h>
+#include <iostream>
 
 #include "pbd/stacktrace.h"
 #include "pbd/abstract_ui.h"
 
 using namespace std;
 
+template<typename R>
+Glib::StaticPrivate<typename AbstractUI<R>::RequestBuffer> AbstractUI<R>::per_thread_request_buffer;
+
+template<typename RequestBuffer> void 
+cleanup_request_buffer (void* ptr)
+{
+        RequestBuffer* rb = (RequestBuffer*) ptr;
+
+        {
+                Glib::Mutex::Lock lm (rb->ui.request_buffer_map_lock);
+                rb->dead = true;
+        }
+}
+
 template <typename RequestObject>
 AbstractUI<RequestObject>::AbstractUI (const string& name)
        : BaseUI (name)
 {
-       PBD::ThreadCreatedWithRequestSize.connect (mem_fun (*this, &AbstractUI<RequestObject>::register_thread));
+       void (AbstractUI<RequestObject>::*pmf)(string,pthread_t,string,uint32_t) = &AbstractUI<RequestObject>::register_thread;
+
+       /* better to make this connect a handler that runs in the UI event loop but the syntax seems hard, and 
+          register_thread() is thread safe anyway.
+       */
+
+       PBD::ThreadCreatedWithRequestSize.connect_same_thread (new_thread_connection, boost::bind (pmf, this, _1, _2, _3, _4));
 }
 
 template <typename RequestObject> void
-AbstractUI<RequestObject>::register_thread (string target_gui, pthread_t thread_id, string thread_name, uint32_t num_requests)
+AbstractUI<RequestObject>::register_thread (string target_gui, pthread_t thread_id, string /*thread name*/, uint32_t num_requests)
 {
        if (target_gui != name()) {
                return;
        }
 
-       RequestBuffer* b = new RequestBuffer (num_requests);
+       RequestBuffer* b = per_thread_request_buffer.get();
+
+        if (b) {
+                /* thread already registered with this UI
+                 */
+                return;
+        }
+
+        b = new RequestBuffer (num_requests, *this);
 
        {
                Glib::Mutex::Lock lm (request_buffer_map_lock);
                request_buffers[thread_id] = b;
        }
 
-       per_thread_request_buffer.set (b);
+       per_thread_request_buffer.set (b, cleanup_request_buffer<RequestBuffer>);
 }
 
 template <typename RequestObject> RequestObject*
@@ -49,11 +78,13 @@ AbstractUI<RequestObject>::get_request (RequestType rt)
                }
 
                vec.buf[0]->type = rt;
+                vec.buf[0]->valid = true;
                return vec.buf[0];
        }
 
        RequestObject* req = new RequestObject;
        req->type = rt;
+
        return req;
 }
 
@@ -69,30 +100,49 @@ AbstractUI<RequestObject>::handle_ui_requests ()
 
        for (i = request_buffers.begin(); i != request_buffers.end(); ++i) {
 
-               while (true) {
-
-                       /* we must process requests 1 by 1 because
-                          the request may run a recursive main
-                          event loop that will itself call
-                          handle_ui_requests. when we return
-                          from the request handler, we cannot
-                          expect that the state of queued requests
-                          is even remotely consistent with
-                          the condition before we called it.
-                       */
-
-                       i->second->get_read_vector (&vec);
-
-                       if (vec.len[0] == 0) {
-                               break;
-                       } else {
-                               request_buffer_map_lock.unlock ();
-                               do_request (vec.buf[0]);
-                               request_buffer_map_lock.lock ();
-                               i->second->increment_read_ptr (1);
-                       } 
-               }
-       }
+                while (true) {
+                        
+                        /* we must process requests 1 by 1 because
+                           the request may run a recursive main
+                           event loop that will itself call
+                           handle_ui_requests. when we return
+                           from the request handler, we cannot
+                           expect that the state of queued requests
+                           is even remotely consistent with
+                           the condition before we called it.
+                        */
+                        
+                        i->second->get_read_vector (&vec);
+                        
+                        if (vec.len[0] == 0) {
+                                break;
+                        } else {
+                                if (vec.buf[0]->valid) {
+                                        request_buffer_map_lock.unlock ();
+                                        do_request (vec.buf[0]);
+                                        request_buffer_map_lock.lock ();
+                                        if (vec.buf[0]->invalidation) {
+                                                vec.buf[0]->invalidation->requests.remove (vec.buf[0]);
+                                        }
+                                        i->second->increment_read_ptr (1);
+                                }
+                        } 
+                }
+        }
+
+        /* clean up any dead request buffers (their thread has exited) */
+
+       for (i = request_buffers.begin(); i != request_buffers.end(); ) {
+             if ((*i).second->dead) {
+                     delete (*i).second;
+                     RequestBufferMapIterator tmp = i;
+                     ++tmp;
+                     request_buffers.erase (i);
+                     i = tmp;
+             } else {          
+                     ++i;
+             }
+        }
 
        request_buffer_map_lock.unlock ();
 
@@ -103,6 +153,30 @@ AbstractUI<RequestObject>::handle_ui_requests ()
        while (!request_list.empty()) {
                RequestObject* req = request_list.front ();
                request_list.pop_front ();
+
+                /* We need to use this lock, because its the one
+                   returned by slot_invalidation_mutex() and protects
+                   against request invalidation.
+                */
+
+                request_buffer_map_lock.lock ();
+                if (!req->valid) {
+                        delete req;
+                        request_buffer_map_lock.unlock ();
+                        continue;
+                }
+
+                /* we're about to execute this request, so its
+                   too late for any invalidation. mark
+                   the request as "done" before we start.
+                */
+
+                if (req->invalidation) {
+                        req->invalidation->requests.remove (req);
+                }
+
+                request_buffer_map_lock.unlock ();
+
                lm.release ();
 
                do_request (req);
@@ -140,7 +214,7 @@ AbstractUI<RequestObject>::send_request (RequestObject *req)
 }
 
 template<typename RequestObject> void
-AbstractUI<RequestObject>::call_slot (const boost::function<void()>& f)
+AbstractUI<RequestObject>::call_slot (InvalidationRecord* invalidation, const boost::function<void()>& f)
 {
        if (caller_is_self()) {
                f ();
@@ -154,6 +228,13 @@ AbstractUI<RequestObject>::call_slot (const boost::function<void()>& f)
        }
 
        req->the_slot = f;
+        req->invalidation = invalidation;
+
+        if (invalidation) {
+                invalidation->requests.push_back (req);
+                invalidation->event_loop = this;
+        }
+
        send_request (req);
 }