4 #include "pbd/stacktrace.h"
5 #include "pbd/abstract_ui.h"
6 #include "pbd/pthread_utils.h"
7 #include "pbd/failed_constructor.h"
14 Glib::StaticPrivate<typename AbstractUI<R>::RequestBuffer> AbstractUI<R>::per_thread_request_buffer;
16 template<typename RequestBuffer> void
17 cleanup_request_buffer (void* ptr)
19 RequestBuffer* rb = (RequestBuffer*) ptr;
22 Glib::Mutex::Lock lm (rb->ui.request_buffer_map_lock);
27 template <typename RequestObject>
28 AbstractUI<RequestObject>::AbstractUI (const string& name)
31 void (AbstractUI<RequestObject>::*pmf)(string,pthread_t,string,uint32_t) = &AbstractUI<RequestObject>::register_thread;
33 /* better to make this connect a handler that runs in the UI event loop but the syntax seems hard, and
34 register_thread() is thread safe anyway.
37 PBD::ThreadCreatedWithRequestSize.connect_same_thread (new_thread_connection, boost::bind (pmf, this, _1, _2, _3, _4));
40 template <typename RequestObject> void
41 AbstractUI<RequestObject>::register_thread (string target_gui, pthread_t thread_id, string /*thread name*/, uint32_t num_requests)
43 if (target_gui != name()) {
47 RequestBuffer* b = per_thread_request_buffer.get();
50 /* thread already registered with this UI
55 b = new RequestBuffer (num_requests, *this);
58 Glib::Mutex::Lock lm (request_buffer_map_lock);
59 request_buffers[thread_id] = b;
62 per_thread_request_buffer.set (b, cleanup_request_buffer<RequestBuffer>);
65 template <typename RequestObject> RequestObject*
66 AbstractUI<RequestObject>::get_request (RequestType rt)
68 RequestBuffer* rbuf = per_thread_request_buffer.get ();
69 RequestBufferVector vec;
72 /* we have a per-thread FIFO, use it */
74 rbuf->get_write_vector (&vec);
76 if (vec.len[0] == 0) {
80 vec.buf[0]->type = rt;
81 vec.buf[0]->valid = true;
85 RequestObject* req = new RequestObject;
91 template <typename RequestObject> void
92 AbstractUI<RequestObject>::handle_ui_requests ()
94 RequestBufferMapIterator i;
95 RequestBufferVector vec;
97 /* per-thread buffers first */
99 request_buffer_map_lock.lock ();
101 for (i = request_buffers.begin(); i != request_buffers.end(); ++i) {
105 /* we must process requests 1 by 1 because
106 the request may run a recursive main
107 event loop that will itself call
108 handle_ui_requests. when we return
109 from the request handler, we cannot
110 expect that the state of queued requests
111 is even remotely consistent with
112 the condition before we called it.
115 i->second->get_read_vector (&vec);
117 if (vec.len[0] == 0) {
120 if (vec.buf[0]->valid) {
121 request_buffer_map_lock.unlock ();
122 do_request (vec.buf[0]);
123 request_buffer_map_lock.lock ();
124 if (vec.buf[0]->invalidation) {
125 vec.buf[0]->invalidation->requests.remove (vec.buf[0]);
127 i->second->increment_read_ptr (1);
133 /* clean up any dead request buffers (their thread has exited) */
135 for (i = request_buffers.begin(); i != request_buffers.end(); ) {
136 if ((*i).second->dead) {
138 RequestBufferMapIterator tmp = i;
140 request_buffers.erase (i);
147 request_buffer_map_lock.unlock ();
149 /* and now, the generic request buffer. same rules as above apply */
151 Glib::Mutex::Lock lm (request_list_lock);
153 while (!request_list.empty()) {
154 RequestObject* req = request_list.front ();
155 request_list.pop_front ();
157 /* We need to use this lock, because its the one
158 returned by slot_invalidation_mutex() and protects
159 against request invalidation.
162 request_buffer_map_lock.lock ();
165 request_buffer_map_lock.unlock ();
169 /* we're about to execute this request, so its
170 too late for any invalidation. mark
171 the request as "done" before we start.
174 if (req->invalidation) {
175 req->invalidation->requests.remove (req);
178 request_buffer_map_lock.unlock ();
190 template <typename RequestObject> void
191 AbstractUI<RequestObject>::send_request (RequestObject *req)
193 if (base_instance() == 0) {
194 return; /* XXX is this the right thing to do ? */
197 if (caller_is_self ()) {
200 RequestBuffer* rbuf = per_thread_request_buffer.get ();
203 rbuf->increment_write_ptr (1);
205 /* no per-thread buffer, so just use a list with a lock so that it remains
206 single-reader/single-writer semantics
208 Glib::Mutex::Lock lm (request_list_lock);
209 request_list.push_back (req);
212 request_channel.wakeup ();
216 template<typename RequestObject> void
217 AbstractUI<RequestObject>::call_slot (InvalidationRecord* invalidation, const boost::function<void()>& f)
219 if (caller_is_self()) {
224 RequestObject *req = get_request (BaseUI::CallSlot);
231 req->invalidation = invalidation;
234 invalidation->requests.push_back (req);
235 invalidation->event_loop = this;