tweak transport bar spacing
[ardour.git] / libs / pbd / pbd / abstract_ui.cc
1 #include <unistd.h>
2 #include <iostream>
3
4 #include "pbd/stacktrace.h"
5 #include "pbd/abstract_ui.h"
6 #include "pbd/pthread_utils.h"
7 #include "pbd/failed_constructor.h"
8
9 #include "i18n.h"
10
11 using namespace std;
12
13 template<typename R>
14 Glib::StaticPrivate<typename AbstractUI<R>::RequestBuffer> AbstractUI<R>::per_thread_request_buffer;
15
16 template<typename RequestBuffer> void 
17 cleanup_request_buffer (void* ptr)
18 {
19         RequestBuffer* rb = (RequestBuffer*) ptr;
20
21         {
22                 Glib::Mutex::Lock lm (rb->ui.request_buffer_map_lock);
23                 rb->dead = true;
24         }
25 }
26
27 template <typename RequestObject>
28 AbstractUI<RequestObject>::AbstractUI (const string& name)
29         : BaseUI (name)
30 {
31         void (AbstractUI<RequestObject>::*pmf)(string,pthread_t,string,uint32_t) = &AbstractUI<RequestObject>::register_thread;
32
33         /* better to make this connect a handler that runs in the UI event loop but the syntax seems hard, and 
34            register_thread() is thread safe anyway.
35         */
36
37         PBD::ThreadCreatedWithRequestSize.connect_same_thread (new_thread_connection, boost::bind (pmf, this, _1, _2, _3, _4));
38 }
39
40 template <typename RequestObject> void
41 AbstractUI<RequestObject>::register_thread (string target_gui, pthread_t thread_id, string /*thread name*/, uint32_t num_requests)
42 {
43         if (target_gui != name()) {
44                 return;
45         }
46
47         RequestBuffer* b = per_thread_request_buffer.get();
48
49         if (b) {
50                 /* thread already registered with this UI
51                  */
52                 return;
53         }
54
55         b = new RequestBuffer (num_requests, *this);
56
57         {
58                 Glib::Mutex::Lock lm (request_buffer_map_lock);
59                 request_buffers[thread_id] = b;
60         }
61
62         per_thread_request_buffer.set (b, cleanup_request_buffer<RequestBuffer>);
63 }
64
65 template <typename RequestObject> RequestObject*
66 AbstractUI<RequestObject>::get_request (RequestType rt)
67 {
68         RequestBuffer* rbuf = per_thread_request_buffer.get ();
69         RequestBufferVector vec;
70
71         if (rbuf != 0) {
72                 /* we have a per-thread FIFO, use it */
73
74                 rbuf->get_write_vector (&vec);
75
76                 if (vec.len[0] == 0) {
77                         return 0;
78                 }
79
80                 vec.buf[0]->type = rt;
81                 vec.buf[0]->valid = true;
82                 return vec.buf[0];
83         }
84
85         RequestObject* req = new RequestObject;
86         req->type = rt;
87
88         return req;
89 }
90
91 template <typename RequestObject> void
92 AbstractUI<RequestObject>::handle_ui_requests ()
93 {
94         RequestBufferMapIterator i;
95         RequestBufferVector vec;
96
97         /* per-thread buffers first */
98
99         request_buffer_map_lock.lock ();
100
101         for (i = request_buffers.begin(); i != request_buffers.end(); ++i) {
102
103                 while (true) {
104                         
105                         /* we must process requests 1 by 1 because
106                            the request may run a recursive main
107                            event loop that will itself call
108                            handle_ui_requests. when we return
109                            from the request handler, we cannot
110                            expect that the state of queued requests
111                            is even remotely consistent with
112                            the condition before we called it.
113                         */
114                         
115                         i->second->get_read_vector (&vec);
116                         
117                         if (vec.len[0] == 0) {
118                                 break;
119                         } else {
120                                 if (vec.buf[0]->valid) {
121                                         request_buffer_map_lock.unlock ();
122                                         do_request (vec.buf[0]);
123                                         request_buffer_map_lock.lock ();
124                                         if (vec.buf[0]->invalidation) {
125                                                 vec.buf[0]->invalidation->requests.remove (vec.buf[0]);
126                                         }
127                                         i->second->increment_read_ptr (1);
128                                 }
129                         } 
130                 }
131         }
132
133         /* clean up any dead request buffers (their thread has exited) */
134
135         for (i = request_buffers.begin(); i != request_buffers.end(); ) {
136              if ((*i).second->dead) {
137                      delete (*i).second;
138                      RequestBufferMapIterator tmp = i;
139                      ++tmp;
140                      request_buffers.erase (i);
141                      i = tmp;
142              } else {           
143                      ++i;
144              }
145         }
146
147         request_buffer_map_lock.unlock ();
148
149         /* and now, the generic request buffer. same rules as above apply */
150
151         Glib::Mutex::Lock lm (request_list_lock);
152
153         while (!request_list.empty()) {
154                 RequestObject* req = request_list.front ();
155                 request_list.pop_front ();
156
157                 /* We need to use this lock, because its the one
158                    returned by slot_invalidation_mutex() and protects
159                    against request invalidation.
160                 */
161
162                 request_buffer_map_lock.lock ();
163                 if (!req->valid) {
164                         delete req;
165                         request_buffer_map_lock.unlock ();
166                         continue;
167                 }
168
169                 /* we're about to execute this request, so its
170                    too late for any invalidation. mark
171                    the request as "done" before we start.
172                 */
173
174                 if (req->invalidation) {
175                         req->invalidation->requests.remove (req);
176                 }
177
178                 request_buffer_map_lock.unlock ();
179
180                 lm.release ();
181
182                 do_request (req);
183
184                 delete req;
185
186                 lm.acquire();
187         }
188 }
189
190 template <typename RequestObject> void
191 AbstractUI<RequestObject>::send_request (RequestObject *req)
192 {
193         if (base_instance() == 0) {
194                 return; /* XXX is this the right thing to do ? */
195         }
196
197         if (caller_is_self ()) {
198                 do_request (req);
199         } else {        
200                 RequestBuffer* rbuf = per_thread_request_buffer.get ();
201
202                 if (rbuf != 0) {
203                         rbuf->increment_write_ptr (1);
204                 } else {
205                         /* no per-thread buffer, so just use a list with a lock so that it remains
206                            single-reader/single-writer semantics
207                         */
208                         Glib::Mutex::Lock lm (request_list_lock);
209                         request_list.push_back (req);
210                 }
211
212                 request_channel.wakeup ();
213         }
214 }
215
216 template<typename RequestObject> void
217 AbstractUI<RequestObject>::call_slot (InvalidationRecord* invalidation, const boost::function<void()>& f)
218 {
219         if (caller_is_self()) {
220                 f ();
221                 return;
222         }
223
224         RequestObject *req = get_request (BaseUI::CallSlot);
225         
226         if (req == 0) {
227                 return;
228         }
229
230         req->the_slot = f;
231         req->invalidation = invalidation;
232
233         if (invalidation) {
234                 invalidation->requests.push_back (req);
235                 invalidation->event_loop = this;
236         }
237
238         send_request (req);
239 }       
240