2 Copyright (C) 2012 Paul Davis
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #include "pbd/stacktrace.h"
23 #include "pbd/abstract_ui.h"
24 #include "pbd/pthread_utils.h"
25 #include "pbd/failed_constructor.h"
26 #include "pbd/debug.h"
31 #include <ardourext/misc.h> // Needed for 'DECLARE_DEFAULT_COMPARISONS'. Objects in an STL container can be
32 // searched and sorted. Thus, when instantiating the container, MSVC complains
33 // if the type of object being contained has no appropriate comparison operators
34 // defined (specifically, if operators '<' and '==' are undefined). This seems
35 // to be the case with ptw32 'pthread_t' which is a simple struct.
36 DECLARE_DEFAULT_COMPARISONS(ptw32_handle_t)
41 template<typename RequestBuffer> void
42 cleanup_request_buffer (void* ptr)
44 RequestBuffer* rb = (RequestBuffer*) ptr;
46 /* this is called when the thread for which this request buffer was
47 * allocated dies. That could be before or after the end of the UI
48 * event loop for which this request buffer provides communication.
50 * We are not modifying the UI's thread/buffer map, just marking it
51 * dead. If the UI is currently processing the buffers and misses
52 * this "dead" signal, it will find it the next time it receives
53 * a request. If the UI has finished processing requests, then
54 * we will leak this buffer object.
61 Glib::Threads::Private<typename AbstractUI<R>::RequestBuffer> AbstractUI<R>::per_thread_request_buffer (cleanup_request_buffer<AbstractUI<R>::RequestBuffer>);
63 template <typename RequestObject>
64 AbstractUI<RequestObject>::AbstractUI (const string& name)
67 void (AbstractUI<RequestObject>::*pmf)(pthread_t,string,uint32_t) = &AbstractUI<RequestObject>::register_thread;
69 /* better to make this connect a handler that runs in the UI event loop but the syntax seems hard, and
70 register_thread() is thread safe anyway.
73 PBD::ThreadCreatedWithRequestSize.connect_same_thread (new_thread_connection, boost::bind (pmf, this, _1, _2, _3));
75 /* find pre-registerer threads */
77 vector<EventLoop::ThreadBufferMapping> tbm = EventLoop::get_request_buffers_for_target_thread (event_loop_name());
80 Glib::Threads::Mutex::Lock lm (request_buffer_map_lock);
81 for (vector<EventLoop::ThreadBufferMapping>::iterator t = tbm.begin(); t != tbm.end(); ++t) {
82 request_buffers[t->emitting_thread] = static_cast<RequestBuffer*> (t->request_buffer);
87 template <typename RequestObject> void
88 AbstractUI<RequestObject>::register_thread (pthread_t thread_id, string thread_name, uint32_t num_requests)
90 /* the calling thread wants to register with the thread that runs this
91 * UI's event loop, so that it will have its own per-thread queue of
92 * requests. this means that when it makes a request to this UI it can
93 * do so in a realtime-safe manner (no locks).
96 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("in %1 (thread name %4), %2 (%5) wants to register with UIs\n", event_loop_name(), thread_name, pthread_name(), DEBUG_THREAD_SELF));
98 /* the per_thread_request_buffer is a thread-private variable.
99 See pthreads documentation for more on these, but the key
100 thing is that it is a variable that as unique value for
101 each thread, guaranteed. Note that the thread in question
102 is the caller of this function, which is assumed to be the
103 thread from which signals will be emitted that this UI's
104 event loop will catch.
107 RequestBuffer* b = per_thread_request_buffer.get();
111 /* create a new request queue/ringbuffer */
113 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("create new request buffer for %1 in %2\n", thread_name, event_loop_name()));
115 b = new RequestBuffer (num_requests);
116 /* set this thread's per_thread_request_buffer to this new
117 queue/ringbuffer. remember that only this thread will
118 get this queue when it calls per_thread_request_buffer.get()
120 the second argument is a function that will be called
121 when the thread exits, and ensures that the buffer is marked
122 dead. it will then be deleted during a call to handle_ui_requests()
125 per_thread_request_buffer.set (b);
127 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1 : %2 is already registered\n", event_loop_name(), thread_name));
131 /* add the new request queue (ringbuffer) to our map
132 so that we can iterate over it when the time is right.
133 This step is not RT-safe, but is assumed to be called
134 only at thread initialization time, not repeatedly,
135 and so this is of little consequence.
137 Glib::Threads::Mutex::Lock lm (request_buffer_map_lock);
138 request_buffers[thread_id] = b;
143 template <typename RequestObject> RequestObject*
144 AbstractUI<RequestObject>::get_request (RequestType rt)
146 RequestBuffer* rbuf = per_thread_request_buffer.get ();
147 RequestBufferVector vec;
149 /* see comments in ::register_thread() above for an explanation of
150 the per_thread_request_buffer variable
155 /* the calling thread has registered with this UI and therefore
156 * we have a per-thread request queue/ringbuffer. use it. this
157 * "allocation" of a request is RT-safe.
160 rbuf->get_write_vector (&vec);
162 if (vec.len[0] == 0) {
163 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1: no space in per thread pool for request of type %2\n", event_loop_name(), rt));
167 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1: allocated per-thread request of type %2, caller %3\n", event_loop_name(), rt, pthread_name()));
169 vec.buf[0]->type = rt;
170 vec.buf[0]->valid = true;
174 /* calling thread has not registered, so just allocate a new request on
175 * the heap. the lack of registration implies that realtime constraints
179 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1: allocated normal heap request of type %2, caller %3\n", event_loop_name(), rt, pthread_name()));
181 RequestObject* req = new RequestObject;
187 template <typename RequestObject> void
188 AbstractUI<RequestObject>::handle_ui_requests ()
190 RequestBufferMapIterator i;
191 RequestBufferVector vec;
193 /* check all registered per-thread buffers first */
195 request_buffer_map_lock.lock ();
197 for (i = request_buffers.begin(); i != request_buffers.end(); ++i) {
201 /* we must process requests 1 by 1 because
202 * the request may run a recursive main
203 * event loop that will itself call
204 * handle_ui_requests. when we return
205 * from the request handler, we cannot
206 * expect that the state of queued requests
207 * is even remotely consistent with
208 * the condition before we called it.
211 i->second->get_read_vector (&vec);
213 if (vec.len[0] == 0) {
216 if (vec.buf[0]->valid) {
217 request_buffer_map_lock.unlock ();
218 do_request (vec.buf[0]);
219 request_buffer_map_lock.lock ();
220 if (vec.buf[0]->invalidation) {
221 vec.buf[0]->invalidation->requests.remove (vec.buf[0]);
224 i->second->increment_read_ptr (1);
229 /* clean up any dead request buffers (their thread has exited) */
231 for (i = request_buffers.begin(); i != request_buffers.end(); ) {
232 if ((*i).second->dead) {
233 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 deleting dead per-thread request buffer for %3 @ %4\n",
234 event_loop_name(), pthread_name(), i->second));
235 cerr << event_loop_name() << " noticed that a buffer was dead\n";
237 RequestBufferMapIterator tmp = i;
239 request_buffers.erase (i);
246 request_buffer_map_lock.unlock ();
248 /* and now, the generic request buffer. same rules as above apply */
250 Glib::Threads::Mutex::Lock lm (request_list_lock);
252 while (!request_list.empty()) {
253 RequestObject* req = request_list.front ();
254 request_list.pop_front ();
256 /* We need to use this lock, because its the one
257 * returned by slot_invalidation_mutex() and protects
258 * against request invalidation.
261 request_buffer_map_lock.lock ();
263 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 handling invalid heap request, type %3, deleting\n", event_loop_name(), pthread_name(), req->type));
265 request_buffer_map_lock.unlock ();
269 /* we're about to execute this request, so its
270 * too late for any invalidation. mark
271 * the request as "done" before we start.
274 if (req->invalidation) {
275 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 remove request from its invalidation list\n", event_loop_name(), pthread_name()));
277 /* after this call, if the object referenced by the
278 * invalidation record is deleted, it will no longer
279 * try to mark the request as invalid.
282 req->invalidation->requests.remove (req);
285 /* at this point, an object involved in a functor could be
286 * deleted before we actually execute the functor. so there is
287 * a race condition that makes the invalidation architecture
288 * somewhat pointless.
290 * really, we should only allow functors containing shared_ptr
291 * references to objects to enter into the request queue.
294 request_buffer_map_lock.unlock ();
296 /* unlock the request lock while we execute the request, so
297 * that we don't needlessly block other threads (note: not RT
298 * threads since they have their own queue) from making requests.
303 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 execute request type %3\n", event_loop_name(), pthread_name(), req->type));
305 /* and lets do it ... this is a virtual call so that each
306 * specific type of UI can have its own set of requests without
307 * some kind of central request type registration logic
312 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 delete heap request type %3\n", event_loop_name(), pthread_name(), req->type));
315 /* re-acquire the list lock so that we check again */
321 template <typename RequestObject> void
322 AbstractUI<RequestObject>::send_request (RequestObject *req)
324 /* This is called to ask a given UI to carry out a request. It may be
325 * called from the same thread that runs the UI's event loop (see the
326 * caller_is_self() case below), or from any other thread.
329 if (base_instance() == 0) {
330 return; /* XXX is this the right thing to do ? */
333 if (caller_is_self ()) {
334 /* the thread that runs this UI's event loop is sending itself
335 a request: we dispatch it immediately and inline.
337 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 direct dispatch of request type %3\n", event_loop_name(), pthread_name(), req->type));
342 /* If called from a different thread, we first check to see if
343 * the calling thread is registered with this UI. If so, there
344 * is a per-thread ringbuffer of requests that ::get_request()
345 * just set up a new request in. If so, all we need do here is
346 * to advance the write ptr in that ringbuffer so that the next
347 * request by this calling thread will use the next slot in
348 * the ringbuffer. The ringbuffer has
349 * single-reader/single-writer semantics because the calling
350 * thread is the only writer, and the UI event loop is the only
354 RequestBuffer* rbuf = per_thread_request_buffer.get ();
357 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 send per-thread request type %3\n", event_loop_name(), pthread_name(), req->type));
358 rbuf->increment_write_ptr (1);
360 /* no per-thread buffer, so just use a list with a lock so that it remains
361 single-reader/single-writer semantics
363 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 send heap request type %3\n", event_loop_name(), pthread_name(), req->type));
364 cerr << "Send request to " << event_loop_name() << " via LIST from " << pthread_name() << endl;
365 Glib::Threads::Mutex::Lock lm (request_list_lock);
366 request_list.push_back (req);
369 /* send the UI event loop thread a wakeup so that it will look
370 at the per-thread and generic request lists.
373 signal_new_request ();
377 template<typename RequestObject> void
378 AbstractUI<RequestObject>::call_slot (InvalidationRecord* invalidation, const boost::function<void()>& f)
380 if (caller_is_self()) {
381 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 direct dispatch of call slot via functor @ %3, invalidation %4\n", event_loop_name(), pthread_name(), &f, invalidation));
386 RequestObject *req = get_request (BaseUI::CallSlot);
392 DEBUG_TRACE (PBD::DEBUG::AbstractUI, string_compose ("%1/%2 queue call-slot using functor @ %3, invalidation %4\n", event_loop_name(), pthread_name(), &f, invalidation));
394 /* copy semantics: copy the functor into the request object */
398 /* the invalidation record is an object which will carry out
399 * invalidation of any requests associated with it when it is
400 * destroyed. it can be null. if its not null, associate this
401 * request with the invalidation record. this allows us to
402 * "cancel" requests submitted to the UI because they involved
403 * a functor that uses an object that is being deleted.
406 req->invalidation = invalidation;
409 invalidation->requests.push_back (req);
410 invalidation->event_loop = this;
416 template<typename RequestObject> void*
417 AbstractUI<RequestObject>::request_buffer_factory (uint32_t num_requests)
419 RequestBuffer* mcr = new RequestBuffer (num_requests);
420 per_thread_request_buffer.set (mcr);