X-Git-Url: https://main.carlh.net/gitweb/?a=blobdiff_plain;f=libs%2Fpbd%2Fpbd%2Fabstract_ui.cc;h=d2f8840b907ad1f65b2964a047bef764ecc12775;hb=4dc63966f0872efe768dad61eb9b8785d06b92d1;hp=6c3f0bac7f110ac8fa1e480be15413dff1de6a2e;hpb=bfe5a07204c402bdd2f651a5d5cbce6db5f52be7;p=ardour.git diff --git a/libs/pbd/pbd/abstract_ui.cc b/libs/pbd/pbd/abstract_ui.cc index 6c3f0bac7f..d2f8840b90 100644 --- a/libs/pbd/pbd/abstract_ui.cc +++ b/libs/pbd/pbd/abstract_ui.cc @@ -1,147 +1,405 @@ +/* + Copyright (C) 2012 Paul Davis + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + +*/ + #include +#include -#include -#include -#include +#include "pbd/stacktrace.h" +#include "pbd/abstract_ui.h" +#include "pbd/pthread_utils.h" +#include "pbd/failed_constructor.h" +#include "pbd/debug.h" #include "i18n.h" +#ifdef COMPILER_MSVC +#include // Needed for 'DECLARE_DEFAULT_COMPARISONS'. Objects in an STL container can be + // searched and sorted. Thus, when instantiating the container, MSVC complains + // if the type of object being contained has no appropriate comparison operators + // defined (specifically, if operators '<' and '==' are undefined). This seems + // to be the case with ptw32 'pthread_t' which is a simple struct. +DECLARE_DEFAULT_COMPARISONS(ptw32_handle_t) +#endif + using namespace std; -template -AbstractUI::AbstractUI (string name, bool with_signal_pipes) - : BaseUI (name, with_signal_pipes) +template void +cleanup_request_buffer (void* ptr) { - if (pthread_key_create (&thread_request_buffer_key, 0)) { - cerr << _("cannot create thread request buffer key") << endl; - throw failed_constructor(); - } - - PBD::ThreadCreated.connect (mem_fun (*this, &AbstractUI::register_thread)); - PBD::ThreadCreatedWithRequestSize.connect (mem_fun (*this, &AbstractUI::register_thread_with_request_count)); + RequestBuffer* rb = (RequestBuffer*) ptr; + + /* this is called when the thread for which this request buffer was + * allocated dies. That could be before or after the end of the UI + * event loop for which this request buffer provides communication. + * + * We are not modifying the UI's thread/buffer map, just marking it + * dead. If the UI is currently processing the buffers and misses + * this "dead" signal, it will find it the next time it receives + * a request. If the UI has finished processing requests, then + * we will leak this buffer object. + */ + + rb->dead = true; } -template void -AbstractUI::register_thread (pthread_t thread_id, string name) +template +Glib::Threads::Private::RequestBuffer> AbstractUI::per_thread_request_buffer (cleanup_request_buffer::RequestBuffer>); + +template +AbstractUI::AbstractUI (const string& name) + : BaseUI (name) { - register_thread_with_request_count (thread_id, name, 256); + void (AbstractUI::*pmf)(string,pthread_t,string,uint32_t) = &AbstractUI::register_thread; + + /* better to make this connect a handler that runs in the UI event loop but the syntax seems hard, and + register_thread() is thread safe anyway. + */ + + PBD::ThreadCreatedWithRequestSize.connect_same_thread (new_thread_connection, boost::bind (pmf, this, _1, _2, _3, _4)); } template void -AbstractUI::register_thread_with_request_count (pthread_t thread_id, string thread_name, uint32_t num_requests) +AbstractUI::register_thread (string target_gui, pthread_t thread_id, string /*thread name*/, uint32_t num_requests) { - RequestBuffer* b = new RequestBuffer (num_requests); + /* the calling thread wants to register with the thread that runs this + * UI's event loop, so that it will have its own per-thread queue of + * requests. this means that when it makes a request to this UI it can + * do so in a realtime-safe manner (no locks). + */ + + if (target_gui != name()) { + /* this UI is not the UI that the calling thread is trying to + register with + */ + return; + } + + /* the per_thread_request_buffer is a thread-private variable. + See pthreads documentation for more on these, but the key + thing is that it is a variable that as unique value for + each thread, guaranteed. + */ + + RequestBuffer* b = per_thread_request_buffer.get(); + + if (b) { + /* thread already registered with this UI + */ + return; + } + + /* create a new request queue/ringbuffer */ + + b = new RequestBuffer (num_requests, *this); { - Glib::Mutex::Lock lm (request_buffer_map_lock); + /* add the new request queue (ringbuffer) to our map + so that we can iterate over it when the time is right. + This step is not RT-safe, but is assumed to be called + only at thread initialization time, not repeatedly, + and so this is of little consequence. + */ + Glib::Threads::Mutex::Lock lm (request_buffer_map_lock); request_buffers[thread_id] = b; } - pthread_setspecific (thread_request_buffer_key, b); + /* set this thread's per_thread_request_buffer to this new + queue/ringbuffer. remember that only this thread will + get this queue when it calls per_thread_request_buffer.get() + + the second argument is a function that will be called + when the thread exits, and ensures that the buffer is marked + dead. it will then be deleted during a call to handle_ui_requests() + */ + + per_thread_request_buffer.set (b); } template RequestObject* AbstractUI::get_request (RequestType rt) { - RequestBuffer* rbuf = static_cast(pthread_getspecific (thread_request_buffer_key)); - - if (rbuf == 0) { - /* Cannot happen, but if it does we can't use the error reporting mechanism */ - cerr << _("programming error: ") - << string_compose ("no %1-UI request buffer found for thread %2", name(), pthread_name()) - << endl; - abort (); - } - + RequestBuffer* rbuf = per_thread_request_buffer.get (); RequestBufferVector vec; - - rbuf->get_write_vector (&vec); - if (vec.len[0] == 0) { - if (vec.len[1] == 0) { - cerr << string_compose ("no space in %1-UI request buffer for thread %2", name(), pthread_name()) - << endl; + /* see comments in ::register_thread() above for an explanation of + the per_thread_request_buffer variable + */ + + if (rbuf != 0) { + + /* the calling thread has registered with this UI and therefore + * we have a per-thread request queue/ringbuffer. use it. this + * "allocation" of a request is RT-safe. + */ + + rbuf->get_write_vector (&vec); + + if (vec.len[0] == 0) { + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1: no space in per thread pool for request of type %2\n", name(), rt)); return 0; - } else { - vec.buf[1]->type = rt; - return vec.buf[1]; } - } else { + + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1: allocated per-thread request of type %2, caller %3\n", name(), rt, pthread_name())); + vec.buf[0]->type = rt; + vec.buf[0]->valid = true; return vec.buf[0]; } + + /* calling thread has not registered, so just allocate a new request on + * the heap. the lack of registration implies that realtime constraints + * are not at work. + */ + + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1: allocated normal heap request of type %2, caller %3\n", name(), rt, pthread_name())); + + RequestObject* req = new RequestObject; + req->type = rt; + + return req; } template void AbstractUI::handle_ui_requests () { RequestBufferMapIterator i; + RequestBufferVector vec; + + /* check all registered per-thread buffers first */ request_buffer_map_lock.lock (); for (i = request_buffers.begin(); i != request_buffers.end(); ++i) { - RequestBufferVector vec; + while (true) { - while (true) { + /* we must process requests 1 by 1 because + the request may run a recursive main + event loop that will itself call + handle_ui_requests. when we return + from the request handler, we cannot + expect that the state of queued requests + is even remotely consistent with + the condition before we called it. + */ - /* we must process requests 1 by 1 because - the request may run a recursive main - event loop that will itself call - handle_ui_requests. when we return - from the request handler, we cannot - expect that the state of queued requests - is even remotely consistent with - the condition before we called it. - */ + i->second->get_read_vector (&vec); - i->second->get_read_vector (&vec); + if (vec.len[0] == 0) { + break; + } else { + if (vec.buf[0]->valid) { + request_buffer_map_lock.unlock (); + do_request (vec.buf[0]); + request_buffer_map_lock.lock (); + if (vec.buf[0]->invalidation) { + vec.buf[0]->invalidation->requests.remove (vec.buf[0]); + } + delete vec.buf[0]; + i->second->increment_read_ptr (1); + } + } + } + } - if (vec.len[0] == 0) { - break; - } else { - request_buffer_map_lock.unlock (); - do_request (vec.buf[0]); - request_buffer_map_lock.lock (); - i->second->increment_read_ptr (1); - } - } - } + /* clean up any dead request buffers (their thread has exited) */ + + for (i = request_buffers.begin(); i != request_buffers.end(); ) { + if ((*i).second->dead) { + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 deleting dead per-thread request buffer for %3 @ %4\n", + name(), pthread_name(), i->second)); + delete (*i).second; + RequestBufferMapIterator tmp = i; + ++tmp; + request_buffers.erase (i); + i = tmp; + } else { + ++i; + } + } request_buffer_map_lock.unlock (); + + /* and now, the generic request buffer. same rules as above apply */ + + Glib::Threads::Mutex::Lock lm (request_list_lock); + + while (!request_list.empty()) { + RequestObject* req = request_list.front (); + request_list.pop_front (); + + /* We need to use this lock, because its the one + returned by slot_invalidation_mutex() and protects + against request invalidation. + */ + + request_buffer_map_lock.lock (); + if (!req->valid) { + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 handling invalid heap request, type %3, deleting\n", name(), pthread_name(), req->type)); + delete req; + request_buffer_map_lock.unlock (); + continue; + } + + /* we're about to execute this request, so its + too late for any invalidation. mark + the request as "done" before we start. + */ + + if (req->invalidation) { + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 remove request from its invalidation list\n", name(), pthread_name())); + + /* after this call, if the object referenced by the + * invalidation record is deleted, it will no longer + * try to mark the request as invalid. + */ + + req->invalidation->requests.remove (req); + } + + /* at this point, an object involved in a functor could be + * deleted before we actually execute the functor. so there is + * a race condition that makes the invalidation architecture + * somewhat pointless. + * + * really, we should only allow functors containing shared_ptr + * references to objects to enter into the request queue. + */ + + request_buffer_map_lock.unlock (); + + /* unlock the request lock while we execute the request, so + * that we don't needlessly block other threads (note: not RT + * threads since they have their own queue) from making requests. + */ + + lm.release (); + + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 execute request type %3\n", name(), pthread_name(), req->type)); + + /* and lets do it ... this is a virtual call so that each + * specific type of UI can have its own set of requests without + * some kind of central request type registration logic + */ + + do_request (req); + + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 delete heap request type %3\n", name(), pthread_name(), req->type)); + delete req; + + /* re-acquire the list lock so that we check again */ + + lm.acquire(); + } } template void AbstractUI::send_request (RequestObject *req) { + /* This is called to ask a given UI to carry out a request. It may be + * called from the same thread that runs the UI's event loop (see the + * caller_is_self() case below), or from any other thread. + */ + if (base_instance() == 0) { return; /* XXX is this the right thing to do ? */ } - - if (caller_is_ui_thread()) { - // cerr << "GUI thread sent request " << req << " type = " << req->type << endl; + + if (caller_is_self ()) { + /* the thread that runs this UI's event loop is sending itself + a request: we dispatch it immediately and inline. + */ + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 direct dispatch of request type %3\n", name(), pthread_name(), req->type)); do_request (req); + delete req; } else { - RequestBuffer* rbuf = static_cast (pthread_getspecific (thread_request_buffer_key)); - if (rbuf == 0) { - /* can't use the error system to report this, because this - thread isn't registered! + /* If called from a different thread, we first check to see if + * the calling thread is registered with this UI. If so, there + * is a per-thread ringbuffer of requests that ::get_request() + * just set up a new request in. If so, all we need do here is + * to advance the write ptr in that ringbuffer so that the next + * request by this calling thread will use the next slot in + * the ringbuffer. The ringbuffer has + * single-reader/single-writer semantics because the calling + * thread is the only writer, and the UI event loop is the only + * reader. + */ + + RequestBuffer* rbuf = per_thread_request_buffer.get (); + + if (rbuf != 0) { + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 send per-thread request type %3\n", name(), pthread_name(), req->type)); + rbuf->increment_write_ptr (1); + } else { + /* no per-thread buffer, so just use a list with a lock so that it remains + single-reader/single-writer semantics */ - cerr << _("programming error: ") - << string_compose ("AbstractUI::send_request() called from %1 (%2), but no request buffer exists for that thread", name(), pthread_name()) - << endl; - abort (); + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 send heap request type %3\n", name(), pthread_name(), req->type)); + Glib::Threads::Mutex::Lock lm (request_list_lock); + request_list.push_back (req); } - - // cerr << "thread " << pthread_self() << " sent request " << req << " type = " << req->type << endl; - rbuf->increment_write_ptr (1); + /* send the UI event loop thread a wakeup so that it will look + at the per-thread and generic request lists. + */ - if (signal_pipe[1] >= 0) { - const char c = 0; - write (signal_pipe[1], &c, 1); - } + signal_new_request (); } } +template void +AbstractUI::call_slot (InvalidationRecord* invalidation, const boost::function& f) +{ + if (caller_is_self()) { + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 direct dispatch of call slot via functor @ %3, invalidation %4\n", name(), pthread_name(), &f, invalidation)); + f (); + return; + } + + RequestObject *req = get_request (BaseUI::CallSlot); + + if (req == 0) { + return; + } + + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 queue call-slot using functor @ %3, invalidation %4\n", name(), pthread_name(), &f, invalidation)); + + /* copy semantics: copy the functor into the request object */ + + req->the_slot = f; + + /* the invalidation record is an object which will carry out + * invalidation of any requests associated with it when it is + * destroyed. it can be null. if its not null, associate this + * request with the invalidation record. this allows us to + * "cancel" requests submitted to the UI because they involved + * a functor that uses an object that is being deleted. + */ + + req->invalidation = invalidation; + + if (invalidation) { + invalidation->requests.push_back (req); + invalidation->event_loop = this; + } + + send_request (req); +} +