2 Copyright (C) 2012 Paul Davis
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include "pbd/stacktrace.h"
24 #include "pbd/abstract_ui.h"
25 #include "pbd/pthread_utils.h"
26 #include "pbd/failed_constructor.h"
27 #include "pbd/debug.h"
32 #include <ardourext/misc.h> // Needed for 'DECLARE_DEFAULT_COMPARISONS'. Objects in an STL container can be
33 // searched and sorted. Thus, when instantiating the container, MSVC complains
34 // if the type of object being contained has no appropriate comparison operators
35 // defined (specifically, if operators '<' and '==' are undefined). This seems
36 // to be the case with ptw32 'pthread_t' which is a simple struct.
37 DECLARE_DEFAULT_COMPARISONS(pthread_t)
42 template<typename RequestBuffer> void
43 cleanup_request_buffer (void* ptr)
45 RequestBuffer* rb = (RequestBuffer*) ptr;
47 /* there is the question of why we don't simply erase the request
48 * buffer and delete it right here, since we have to take the lock
51 * as of april 24th 2012, i don't have a good answer to that.
56 Glib::Threads::Mutex::Lock lm (rb->ui.request_buffer_map_lock);
62 Glib::Threads::Private<typename AbstractUI<R>::RequestBuffer> AbstractUI<R>::per_thread_request_buffer (cleanup_request_buffer<AbstractUI<R>::RequestBuffer>);
64 template <typename RequestObject>
65 AbstractUI<RequestObject>::AbstractUI (const string& name)
68 void (AbstractUI<RequestObject>::*pmf)(string,pthread_t,string,uint32_t) = &AbstractUI<RequestObject>::register_thread;
70 /* better to make this connect a handler that runs in the UI event loop but the syntax seems hard, and
71 register_thread() is thread safe anyway.
74 PBD::ThreadCreatedWithRequestSize.connect_same_thread (new_thread_connection, boost::bind (pmf, this, _1, _2, _3, _4));
77 template <typename RequestObject> void
78 AbstractUI<RequestObject>::register_thread (string target_gui, pthread_t thread_id, string /*thread name*/, uint32_t num_requests)
80 /* the calling thread wants to register with the thread that runs this
81 * UI's event loop, so that it will have its own per-thread queue of
82 * requests. this means that when it makes a request to this UI it can
83 * do so in a realtime-safe manner (no locks).
86 if (target_gui != name()) {
87 /* this UI is not the UI that the calling thread is trying to
93 /* the per_thread_request_buffer is a thread-private variable.
94 See pthreads documentation for more on these, but the key
95 thing is that it is a variable that as unique value for
96 each thread, guaranteed.
99 RequestBuffer* b = per_thread_request_buffer.get();
102 /* thread already registered with this UI
107 /* create a new request queue/ringbuffer */
109 b = new RequestBuffer (num_requests, *this);
112 /* add the new request queue (ringbuffer) to our map
113 so that we can iterate over it when the time is right.
114 This step is not RT-safe, but is assumed to be called
115 only at thread initialization time, not repeatedly,
116 and so this is of little consequence.
118 Glib::Threads::Mutex::Lock lm (request_buffer_map_lock);
119 request_buffers[thread_id] = b;
122 /* set this thread's per_thread_request_buffer to this new
123 queue/ringbuffer. remember that only this thread will
124 get this queue when it calls per_thread_request_buffer.get()
126 the second argument is a function that will be called
127 when the thread exits, and ensures that the buffer is marked
128 dead. it will then be deleted during a call to handle_ui_requests()
131 per_thread_request_buffer.set (b);
134 template <typename RequestObject> RequestObject*
135 AbstractUI<RequestObject>::get_request (RequestType rt)
137 RequestBuffer* rbuf = per_thread_request_buffer.get ();
138 RequestBufferVector vec;
140 /* see comments in ::register_thread() above for an explanation of
141 the per_thread_request_buffer variable
146 /* the calling thread has registered with this UI and therefore
147 * we have a per-thread request queue/ringbuffer. use it. this
148 * "allocation" of a request is RT-safe.
151 rbuf->get_write_vector (&vec);
153 if (vec.len[0] == 0) {
154 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1: no space in per thread pool for request of type %2\n", name(), rt));
158 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1: allocated per-thread request of type %2, caller %3\n", name(), rt, pthread_name()));
160 vec.buf[0]->type = rt;
161 vec.buf[0]->valid = true;
165 /* calling thread has not registered, so just allocate a new request on
166 * the heap. the lack of registration implies that realtime constraints
170 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1: allocated normal heap request of type %2, caller %3\n", name(), rt, pthread_name()));
172 RequestObject* req = new RequestObject;
178 template <typename RequestObject> void
179 AbstractUI<RequestObject>::handle_ui_requests ()
181 RequestBufferMapIterator i;
182 RequestBufferVector vec;
184 /* check all registered per-thread buffers first */
186 request_buffer_map_lock.lock ();
188 for (i = request_buffers.begin(); i != request_buffers.end(); ++i) {
192 /* we must process requests 1 by 1 because
193 the request may run a recursive main
194 event loop that will itself call
195 handle_ui_requests. when we return
196 from the request handler, we cannot
197 expect that the state of queued requests
198 is even remotely consistent with
199 the condition before we called it.
202 i->second->get_read_vector (&vec);
204 if (vec.len[0] == 0) {
207 if (vec.buf[0]->valid) {
208 request_buffer_map_lock.unlock ();
209 do_request (vec.buf[0]);
210 request_buffer_map_lock.lock ();
211 if (vec.buf[0]->invalidation) {
212 vec.buf[0]->invalidation->requests.remove (vec.buf[0]);
214 i->second->increment_read_ptr (1);
220 /* clean up any dead request buffers (their thread has exited) */
222 for (i = request_buffers.begin(); i != request_buffers.end(); ) {
223 if ((*i).second->dead) {
224 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 deleting dead per-thread request buffer for %3 @ %4\n",
225 name(), pthread_name(), i->second));
227 RequestBufferMapIterator tmp = i;
229 request_buffers.erase (i);
236 request_buffer_map_lock.unlock ();
238 /* and now, the generic request buffer. same rules as above apply */
240 Glib::Threads::Mutex::Lock lm (request_list_lock);
242 while (!request_list.empty()) {
243 RequestObject* req = request_list.front ();
244 request_list.pop_front ();
246 /* We need to use this lock, because its the one
247 returned by slot_invalidation_mutex() and protects
248 against request invalidation.
251 request_buffer_map_lock.lock ();
253 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 handling invalid heap request, type %3, deleting\n", name(), pthread_name(), req->type));
255 request_buffer_map_lock.unlock ();
259 /* we're about to execute this request, so its
260 too late for any invalidation. mark
261 the request as "done" before we start.
264 if (req->invalidation) {
265 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 remove request from its invalidation list\n", name(), pthread_name()));
267 /* after this call, if the object referenced by the
268 * invalidation record is deleted, it will no longer
269 * try to mark the request as invalid.
272 req->invalidation->requests.remove (req);
275 /* at this point, an object involved in a functor could be
276 * deleted before we actually execute the functor. so there is
277 * a race condition that makes the invalidation architecture
278 * somewhat pointless.
280 * really, we should only allow functors containing shared_ptr
281 * references to objects to enter into the request queue.
284 request_buffer_map_lock.unlock ();
286 /* unlock the request lock while we execute the request, so
287 * that we don't needlessly block other threads (note: not RT
288 * threads since they have their own queue) from making requests.
293 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 execute request type %3\n", name(), pthread_name(), req->type));
295 /* and lets do it ... this is a virtual call so that each
296 * specific type of UI can have its own set of requests without
297 * some kind of central request type registration logic
302 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 delete heap request type %3\n", name(), pthread_name(), req->type));
305 /* re-acquire the list lock so that we check again */
311 template <typename RequestObject> void
312 AbstractUI<RequestObject>::send_request (RequestObject *req)
314 /* This is called to ask a given UI to carry out a request. It may be
315 * called from the same thread that runs the UI's event loop (see the
316 * caller_is_self() case below), or from any other thread.
319 if (base_instance() == 0) {
320 return; /* XXX is this the right thing to do ? */
323 if (caller_is_self ()) {
324 /* the thread that runs this UI's event loop is sending itself
325 a request: we dispatch it immediately and inline.
327 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 direct dispatch of request type %3\n", name(), pthread_name(), req->type));
331 /* If called from a different thread, we first check to see if
332 * the calling thread is registered with this UI. If so, there
333 * is a per-thread ringbuffer of requests that ::get_request()
334 * just set up a new request in. If so, all we need do here is
335 * to advance the write ptr in that ringbuffer so that the next
336 * request by this calling thread will use the next slot in
337 * the ringbuffer. The ringbuffer has
338 * single-reader/single-writer semantics because the calling
339 * thread is the only writer, and the UI event loop is the only
343 RequestBuffer* rbuf = per_thread_request_buffer.get ();
346 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 send per-thread request type %3\n", name(), pthread_name(), req->type));
347 rbuf->increment_write_ptr (1);
349 /* no per-thread buffer, so just use a list with a lock so that it remains
350 single-reader/single-writer semantics
352 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 send heap request type %3\n", name(), pthread_name(), req->type));
353 Glib::Threads::Mutex::Lock lm (request_list_lock);
354 request_list.push_back (req);
357 /* send the UI event loop thread a wakeup so that it will look
358 at the per-thread and generic request lists.
361 signal_new_request ();
365 template<typename RequestObject> void
366 AbstractUI<RequestObject>::call_slot (InvalidationRecord* invalidation, const boost::function<void()>& f)
368 if (caller_is_self()) {
369 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 direct dispatch of call slot via functor @ %3, invalidation %4\n", name(), pthread_name(), &f, invalidation));
374 RequestObject *req = get_request (BaseUI::CallSlot);
380 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 queue call-slot using functor @ %3, invalidation %4\n", name(), pthread_name(), &f, invalidation));
382 /* copy semantics: copy the functor into the request object */
386 /* the invalidation record is an object which will carry out
387 * invalidation of any requests associated with it when it is
388 * destroyed. it can be null. if its not null, associate this
389 * request with the invalidation record. this allows us to
390 * "cancel" requests submitted to the UI because they involved
391 * a functor that uses an object that is being deleted.
394 req->invalidation = invalidation;
397 invalidation->requests.push_back (req);
398 invalidation->event_loop = this;