X-Git-Url: https://main.carlh.net/gitweb/?a=blobdiff_plain;ds=sidebyside;f=libs%2Fpbd%2Fpbd%2Fabstract_ui.cc;h=bb44b279fa7c4240cdb4546ae73d922e35d098ce;hb=3c7dea43af9d9e5b2563aee81ac5253fb2ee7858;hp=9c405745ac3fc6f4b7d693f244c5cfc840763871;hpb=69a8fc43e2552d9d3c72f9117131ca6b9392093b;p=ardour.git diff --git a/libs/pbd/pbd/abstract_ui.cc b/libs/pbd/pbd/abstract_ui.cc index 9c405745ac..bb44b279fa 100644 --- a/libs/pbd/pbd/abstract_ui.cc +++ b/libs/pbd/pbd/abstract_ui.cc @@ -1,5 +1,5 @@ /* - Copyright (C) 2012 Paul Davis + Copyright (C) 2012 Paul Davis This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -14,11 +14,11 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - */ #include #include +#include #include "pbd/stacktrace.h" #include "pbd/abstract_ui.h" @@ -26,7 +26,7 @@ #include "pbd/failed_constructor.h" #include "pbd/debug.h" -#include "i18n.h" +#include "pbd/i18n.h" #ifdef COMPILER_MSVC #include // Needed for 'DECLARE_DEFAULT_COMPARISONS'. Objects in an STL container can be @@ -34,28 +34,28 @@ // if the type of object being contained has no appropriate comparison operators // defined (specifically, if operators '<' and '==' are undefined). This seems // to be the case with ptw32 'pthread_t' which is a simple struct. -DECLARE_DEFAULT_COMPARISONS(pthread_t) +DECLARE_DEFAULT_COMPARISONS(ptw32_handle_t) #endif using namespace std; -template void +template void cleanup_request_buffer (void* ptr) { - RequestBuffer* rb = (RequestBuffer*) ptr; - - /* there is the question of why we don't simply erase the request - * buffer and delete it right here, since we have to take the lock - * anyway. + RequestBuffer* rb = (RequestBuffer*) ptr; + + /* this is called when the thread for which this request buffer was + * allocated dies. That could be before or after the end of the UI + * event loop for which this request buffer provides communication. * - * as of april 24th 2012, i don't have a good answer to that. + * We are not modifying the UI's thread/buffer map, just marking it + * dead. If the UI is currently processing the buffers and misses + * this "dead" signal, it will find it the next time it receives + * a request. If the UI has finished processing requests, then + * we will leak this buffer object. */ - - - { - Glib::Threads::Mutex::Lock lm (rb->ui.request_buffer_map_lock); - rb->dead = true; - } + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("thread \"%1\" exits: marking request buffer as dead @ %2\n", pthread_name(), rb)); + rb->dead = true; } template @@ -65,17 +65,39 @@ template AbstractUI::AbstractUI (const string& name) : BaseUI (name) { - void (AbstractUI::*pmf)(string,pthread_t,string,uint32_t) = &AbstractUI::register_thread; + void (AbstractUI::*pmf)(pthread_t,string,uint32_t) = &AbstractUI::register_thread; - /* better to make this connect a handler that runs in the UI event loop but the syntax seems hard, and + /* better to make this connect a handler that runs in the UI event loop but the syntax seems hard, and register_thread() is thread safe anyway. */ - PBD::ThreadCreatedWithRequestSize.connect_same_thread (new_thread_connection, boost::bind (pmf, this, _1, _2, _3, _4)); + PBD::ThreadCreatedWithRequestSize.connect_same_thread (new_thread_connection, boost::bind (pmf, this, _1, _2, _3)); + + /* find pre-registerer threads */ + + vector tbm = EventLoop::get_request_buffers_for_target_thread (event_loop_name()); + + { + Glib::Threads::Mutex::Lock rbml (request_buffer_map_lock); + for (vector::iterator t = tbm.begin(); t != tbm.end(); ++t) { + request_buffers[t->emitting_thread] = static_cast (t->request_buffer); + } + } +} + +template +AbstractUI::~AbstractUI () +{ + for (RequestBufferMapIterator i = request_buffers.begin(); i != request_buffers.end(); ++i) { + if ((*i).second->dead) { + EventLoop::remove_request_buffer_from_map ((*i).second); + delete (*i).second; + } + } } template void -AbstractUI::register_thread (string target_gui, pthread_t thread_id, string /*thread name*/, uint32_t num_requests) +AbstractUI::register_thread (pthread_t thread_id, string thread_name, uint32_t num_requests) { /* the calling thread wants to register with the thread that runs this * UI's event loop, so that it will have its own per-thread queue of @@ -83,52 +105,51 @@ AbstractUI::register_thread (string target_gui, pthread_t thread_ * do so in a realtime-safe manner (no locks). */ - if (target_gui != name()) { - /* this UI is not the UI that the calling thread is trying to - register with - */ - return; - } + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("in %1 (thread name %4), %2 (%5) wants to register with UIs\n", event_loop_name(), thread_name, pthread_name(), DEBUG_THREAD_SELF)); /* the per_thread_request_buffer is a thread-private variable. See pthreads documentation for more on these, but the key thing is that it is a variable that as unique value for - each thread, guaranteed. + each thread, guaranteed. Note that the thread in question + is the caller of this function, which is assumed to be the + thread from which signals will be emitted that this UI's + event loop will catch. */ RequestBuffer* b = per_thread_request_buffer.get(); - if (b) { - /* thread already registered with this UI - */ - return; - } + if (!b) { + + /* create a new request queue/ringbuffer */ + + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("create new request buffer for %1 in %2\n", thread_name, event_loop_name())); - /* create a new request queue/ringbuffer */ + b = new RequestBuffer (num_requests); // XXX leaks + /* set this thread's per_thread_request_buffer to this new + queue/ringbuffer. remember that only this thread will + get this queue when it calls per_thread_request_buffer.get() - b = new RequestBuffer (num_requests, *this); + the second argument is a function that will be called + when the thread exits, and ensures that the buffer is marked + dead. it will then be deleted during a call to handle_ui_requests() + */ + + per_thread_request_buffer.set (b); + } else { + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1 : %2 is already registered\n", event_loop_name(), thread_name)); + } { /* add the new request queue (ringbuffer) to our map so that we can iterate over it when the time is right. This step is not RT-safe, but is assumed to be called - only at thread initialization time, not repeatedly, + only at thread initialization time, not repeatedly, and so this is of little consequence. */ - Glib::Threads::Mutex::Lock lm (request_buffer_map_lock); + Glib::Threads::Mutex::Lock rbml (request_buffer_map_lock); request_buffers[thread_id] = b; } - /* set this thread's per_thread_request_buffer to this new - queue/ringbuffer. remember that only this thread will - get this queue when it calls per_thread_request_buffer.get() - - the second argument is a function that will be called - when the thread exits, and ensures that the buffer is marked - dead. it will then be deleted during a call to handle_ui_requests() - */ - - per_thread_request_buffer.set (b); } template RequestObject* @@ -151,14 +172,13 @@ AbstractUI::get_request (RequestType rt) rbuf->get_write_vector (&vec); if (vec.len[0] == 0) { - DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1: no space in per thread pool for request of type %2\n", name(), rt)); + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1: no space in per thread pool for request of type %2\n", event_loop_name(), rt)); return 0; } - DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1: allocated per-thread request of type %2, caller %3\n", name(), rt, pthread_name())); + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1: allocated per-thread request of type %2, caller %3\n", event_loop_name(), rt, pthread_name())); vec.buf[0]->type = rt; - vec.buf[0]->valid = true; return vec.buf[0]; } @@ -167,7 +187,7 @@ AbstractUI::get_request (RequestType rt) * are not at work. */ - DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1: allocated normal heap request of type %2, caller %3\n", name(), rt, pthread_name())); + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1: allocated normal heap request of type %2, caller %3\n", event_loop_name(), rt, pthread_name())); RequestObject* req = new RequestObject; req->type = rt; @@ -182,115 +202,150 @@ AbstractUI::handle_ui_requests () RequestBufferVector vec; /* check all registered per-thread buffers first */ + Glib::Threads::Mutex::Lock rbml (request_buffer_map_lock); + + /* clean up any dead invalidation records (object was deleted) */ + trash.sort(); + trash.unique(); + for (std::list::iterator r = trash.begin(); r != trash.end();) { + if (!(*r)->in_use ()) { + assert (!(*r)->valid ()); + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1 drop invalidation trash %2\n", event_loop_name(), *r)); + std::list::iterator tmp = r; + ++tmp; + delete *r; + trash.erase (r); + r = tmp; + } else { + ++r; + } + } +#ifndef NDEBUG + if (trash.size() > 0) { + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1 items in trash: %2\n", event_loop_name(), trash.size())); + } +#endif - request_buffer_map_lock.lock (); + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1 check %2 request buffers for requests\n", event_loop_name(), request_buffers.size())); for (i = request_buffers.begin(); i != request_buffers.end(); ++i) { - while (true) { - - /* we must process requests 1 by 1 because - the request may run a recursive main - event loop that will itself call - handle_ui_requests. when we return - from the request handler, we cannot - expect that the state of queued requests - is even remotely consistent with - the condition before we called it. - */ - - i->second->get_read_vector (&vec); - - if (vec.len[0] == 0) { - break; - } else { - if (vec.buf[0]->valid) { - request_buffer_map_lock.unlock (); - do_request (vec.buf[0]); - request_buffer_map_lock.lock (); - if (vec.buf[0]->invalidation) { - vec.buf[0]->invalidation->requests.remove (vec.buf[0]); - } - i->second->increment_read_ptr (1); - } - } - } - } - - /* clean up any dead request buffers (their thread has exited) */ + while (!(*i).second->dead) { + /* we must process requests 1 by 1 because + * the request may run a recursive main + * event loop that will itself call + * handle_ui_requests. when we return + * from the request handler, we cannot + * expect that the state of queued requests + * is even remotely consistent with + * the condition before we called it. + */ + + i->second->get_read_vector (&vec); + + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1 reading requests from RB[%2] @ %5, requests = %3 + %4\n", + event_loop_name(), std::distance (request_buffers.begin(), i), vec.len[0], vec.len[1], i->second)); + + if (vec.len[0] == 0) { + break; + } else { + if (vec.buf[0]->invalidation && !vec.buf[0]->invalidation->valid ()) { + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1: skipping invalidated request\n", event_loop_name())); + rbml.release (); + } else { + + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1: valid request, unlocking before calling\n", event_loop_name())); + rbml.release (); + + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1: valid request, calling ::do_request()\n", event_loop_name())); + do_request (vec.buf[0]); + } + + /* if the request was CallSlot, then we need to ensure that we reset the functor in the request, in case it + * held a shared_ptr<>. Failure to do so can lead to dangling references to objects passed to PBD::Signals. + * + * Note that this method (::handle_ui_requests()) is by definition called from the event loop thread, so + * caller_is_self() is true, which means that the execution of the functor has definitely happened after + * do_request() returns and we no longer need the functor for any reason. + */ + + if (vec.buf[0]->type == CallSlot) { + vec.buf[0]->the_slot = 0; + } + + rbml.acquire (); + if (vec.buf[0]->invalidation) { + vec.buf[0]->invalidation->unref (); + } + vec.buf[0]->invalidation = NULL; + i->second->increment_read_ptr (1); + } + } + } + + assert (rbml.locked ()); for (i = request_buffers.begin(); i != request_buffers.end(); ) { - if ((*i).second->dead) { - DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 deleting dead per-thread request buffer for %3 @ %4\n", - name(), pthread_name(), i->second)); - delete (*i).second; - RequestBufferMapIterator tmp = i; - ++tmp; - request_buffers.erase (i); - i = tmp; - } else { - ++i; - } - } - - request_buffer_map_lock.unlock (); + if ((*i).second->dead) { + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 deleting dead per-thread request buffer for %3 @ %4 (%5 requests)\n", event_loop_name(), pthread_name(), i->second, (*i).second->read_space())); + RequestBufferMapIterator tmp = i; + ++tmp; + /* remove it from the EventLoop static map of all request buffers */ + EventLoop::remove_request_buffer_from_map ((*i).second); + /* delete it + * + * Deleting the ringbuffer destroys all RequestObjects + * and thereby drops any InvalidationRecord references of + * requests that have not been processed. + */ + delete (*i).second; + /* remove it from this thread's list of request buffers */ + request_buffers.erase (i); + i = tmp; + } else { + ++i; + } + } /* and now, the generic request buffer. same rules as above apply */ - Glib::Threads::Mutex::Lock lm (request_list_lock); - while (!request_list.empty()) { + assert (rbml.locked ()); RequestObject* req = request_list.front (); request_list.pop_front (); - /* We need to use this lock, because its the one - returned by slot_invalidation_mutex() and protects - against request invalidation. - */ - - request_buffer_map_lock.lock (); - if (!req->valid) { - DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 handling invalid heap request, type %3, deleting\n", name(), pthread_name(), req->type)); - delete req; - request_buffer_map_lock.unlock (); - continue; - } - - /* we're about to execute this request, so its - too late for any invalidation. mark - the request as "done" before we start. - */ - - if (req->invalidation) { - DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 remove request from its invalidation list\n", name(), pthread_name())); - - /* after this call, if the object referenced by the - * invalidation record is deleted, it will no longer - * try to mark the request as invalid. - */ + /* we're about to execute this request, so its + * too late for any invalidation. mark + * the request as "done" before we start. + */ - req->invalidation->requests.remove (req); - } + if (req->invalidation && !req->invalidation->valid()) { + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 handling invalid heap request, type %3, deleting\n", event_loop_name(), pthread_name(), req->type)); + delete req; + continue; + } /* at this point, an object involved in a functor could be * deleted before we actually execute the functor. so there is * a race condition that makes the invalidation architecture - * somewhat pointless. + * somewhat pointless. * * really, we should only allow functors containing shared_ptr * references to objects to enter into the request queue. */ - request_buffer_map_lock.unlock (); - /* unlock the request lock while we execute the request, so - * that we don't needlessly block other threads (note: not RT + * that we don't needlessly block other threads (note: not RT * threads since they have their own queue) from making requests. */ - lm.release (); + /* also the request may destroy the object itself resulting in a direct + * path to EventLoop::invalidate_request () from here + * which takes the lock */ - DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 execute request type %3\n", name(), pthread_name(), req->type)); + rbml.release (); + + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 execute request type %3\n", event_loop_name(), pthread_name(), req->type)); /* and lets do it ... this is a virtual call so that each * specific type of UI can have its own set of requests without @@ -299,13 +354,15 @@ AbstractUI::handle_ui_requests () do_request (req); - DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 delete heap request type %3\n", name(), pthread_name(), req->type)); + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 delete heap request type %3\n", event_loop_name(), pthread_name(), req->type)); delete req; /* re-acquire the list lock so that we check again */ - lm.acquire(); + rbml.acquire(); } + + rbml.release (); } template void @@ -317,6 +374,7 @@ AbstractUI::send_request (RequestObject *req) */ if (base_instance() == 0) { + delete req; return; /* XXX is this the right thing to do ? */ } @@ -324,9 +382,10 @@ AbstractUI::send_request (RequestObject *req) /* the thread that runs this UI's event loop is sending itself a request: we dispatch it immediately and inline. */ - DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 direct dispatch of request type %3\n", name(), pthread_name(), req->type)); + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 direct dispatch of request type %3\n", event_loop_name(), pthread_name(), req->type)); do_request (req); - } else { + delete req; + } else { /* If called from a different thread, we first check to see if * the calling thread is registered with this UI. If so, there @@ -343,14 +402,14 @@ AbstractUI::send_request (RequestObject *req) RequestBuffer* rbuf = per_thread_request_buffer.get (); if (rbuf != 0) { - DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 send per-thread request type %3\n", name(), pthread_name(), req->type)); + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 send per-thread request type %3 using ringbuffer @ %4 IR: %5\n", event_loop_name(), pthread_name(), req->type, rbuf, req->invalidation)); rbuf->increment_write_ptr (1); } else { /* no per-thread buffer, so just use a list with a lock so that it remains - single-reader/single-writer semantics - */ - DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 send heap request type %3\n", name(), pthread_name(), req->type)); - Glib::Threads::Mutex::Lock lm (request_list_lock); + * single-reader/single-writer semantics + */ + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 send heap request type %3 IR %4\n", event_loop_name(), pthread_name(), req->type, req->invalidation)); + Glib::Threads::Mutex::Lock lm (request_buffer_map_lock); request_list.push_back (req); } @@ -366,38 +425,62 @@ template void AbstractUI::call_slot (InvalidationRecord* invalidation, const boost::function& f) { if (caller_is_self()) { - DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 direct dispatch of call slot via functor @ %3, invalidation %4\n", name(), pthread_name(), &f, invalidation)); + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 direct dispatch of call slot via functor @ %3, invalidation %4\n", event_loop_name(), pthread_name(), &f, invalidation)); f (); return; } + /* object destruction may race with realtime signal emission. + * + * There may be a concurrent event-loop in progress of deleting + * the slot-object. That's perfectly fine. But we need to mark + * the invalidation record itself as being used by the request. + * + * The IR needs to be kept around until the last signal using + * it is disconnected and then it can be deleted in the event-loop + * (GUI thread). + */ + if (invalidation) { + if (!invalidation->valid()) { + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 ignoring call-slot using functor @ %3, dead invalidation %4\n", event_loop_name(), pthread_name(), &f, invalidation)); + return; + } + invalidation->ref (); + invalidation->event_loop = this; + } + RequestObject *req = get_request (BaseUI::CallSlot); - + if (req == 0) { + if (invalidation) { + invalidation->unref (); + } return; } - DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 queue call-slot using functor @ %3, invalidation %4\n", name(), pthread_name(), &f, invalidation)); + DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 queue call-slot using functor @ %3, invalidation %4\n", event_loop_name(), pthread_name(), &f, invalidation)); /* copy semantics: copy the functor into the request object */ req->the_slot = f; - + /* the invalidation record is an object which will carry out - * invalidation of any requests associated with it when it is + * invalidation of any requests associated with it when it is * destroyed. it can be null. if its not null, associate this * request with the invalidation record. this allows us to * "cancel" requests submitted to the UI because they involved * a functor that uses an object that is being deleted. */ - req->invalidation = invalidation; - - if (invalidation) { - invalidation->requests.push_back (req); - invalidation->event_loop = this; - } + req->invalidation = invalidation; send_request (req); -} +} +template void* +AbstractUI::request_buffer_factory (uint32_t num_requests) +{ + RequestBuffer* mcr = new RequestBuffer (num_requests); // XXX leaks + per_thread_request_buffer.set (mcr); + return mcr; +}