Merged fix from cocoa branch -r 831.
[ardour.git] / libs / pbd / pbd / abstract_ui.cc
1 #include <unistd.h>
2
3 #include <pbd/abstract_ui.h>
4 #include <pbd/pthread_utils.h>
5 #include <pbd/failed_constructor.h>
6
7 #include "i18n.h"
8
9 using namespace std;
10
11 template <typename RequestObject>
12 AbstractUI<RequestObject>::AbstractUI (string name, bool with_signal_pipes)
13         : BaseUI (name, with_signal_pipes)
14 {
15         if (pthread_key_create (&thread_request_buffer_key, 0)) {
16                 cerr << _("cannot create thread request buffer key") << endl;
17                 throw failed_constructor();
18         }
19
20         PBD::ThreadCreated.connect (mem_fun (*this, &AbstractUI<RequestObject>::register_thread));
21         PBD::ThreadCreatedWithRequestSize.connect (mem_fun (*this, &AbstractUI<RequestObject>::register_thread_with_request_count));
22 }
23
24 template <typename RequestObject> void
25 AbstractUI<RequestObject>::register_thread (pthread_t thread_id, string name)
26 {
27         register_thread_with_request_count (thread_id, name, 256);
28 }
29
30 template <typename RequestObject> void
31 AbstractUI<RequestObject>::register_thread_with_request_count (pthread_t thread_id, string thread_name, uint32_t num_requests)
32 {
33         RequestBuffer* b = new RequestBuffer (num_requests);
34
35         {
36         Glib::Mutex::Lock lm (request_buffer_map_lock);
37                 request_buffers[thread_id] = b;
38         }
39
40         pthread_setspecific (thread_request_buffer_key, b);
41 }
42
43 template <typename RequestObject> RequestObject*
44 AbstractUI<RequestObject>::get_request (RequestType rt)
45 {
46         RequestBuffer* rbuf = static_cast<RequestBuffer*>(pthread_getspecific (thread_request_buffer_key));
47         
48         if (rbuf == 0) {
49                 /* Cannot happen, but if it does we can't use the error reporting mechanism */
50                 cerr << _("programming error: ")
51                      << string_compose ("no %1-UI request buffer found for thread %2", name(), pthread_name())
52                      << endl;
53                 abort ();
54         }
55         
56         RequestBufferVector vec;
57         
58         rbuf->get_write_vector (&vec);
59
60         if (vec.len[0] == 0) {
61                 if (vec.len[1] == 0) {
62                         cerr << string_compose ("no space in %1-UI request buffer for thread %2", name(), pthread_name())
63                              << endl;
64                         return 0;
65                 } else {
66                         vec.buf[1]->type = rt;
67                         return vec.buf[1];
68                 }
69         } else {
70                 vec.buf[0]->type = rt;
71                 return vec.buf[0];
72         }
73 }
74
75 template <typename RequestObject> void
76 AbstractUI<RequestObject>::handle_ui_requests ()
77 {
78         RequestBufferMapIterator i;
79
80         request_buffer_map_lock.lock ();
81
82         for (i = request_buffers.begin(); i != request_buffers.end(); ++i) {
83
84                 RequestBufferVector vec;
85
86                 while (true) {
87
88                         /* we must process requests 1 by 1 because
89                            the request may run a recursive main
90                            event loop that will itself call
91                            handle_ui_requests. when we return
92                            from the request handler, we cannot
93                            expect that the state of queued requests
94                            is even remotely consistent with
95                            the condition before we called it.
96                         */
97
98                         i->second->get_read_vector (&vec);
99
100                         if (vec.len[0] == 0) {
101                                 break;
102                         } else {
103                                 /* request_factory/copy constructor does a deep
104                                    copy of the Request object,
105                                    unlike Ringbuffer::read()
106                                 */
107
108                                 RequestObject req (*vec.buf[0]);
109                                 i->second->increment_read_ptr (1);
110                                 request_buffer_map_lock.unlock ();
111                                 do_request (&req);
112                                 request_buffer_map_lock.lock ();
113                         } 
114                 }
115         }
116
117         request_buffer_map_lock.unlock ();
118 }
119
120 template <typename RequestObject> void
121 AbstractUI<RequestObject>::send_request (RequestObject *req)
122 {
123         if (base_instance() == 0) {
124                 return; /* XXX is this the right thing to do ? */
125         }
126         
127         if (caller_is_ui_thread()) {
128                 // cerr << "GUI thread sent request " << req << " type = " << req->type << endl;
129                 do_request (req);
130         } else {        
131                 RequestBuffer* rbuf = static_cast<RequestBuffer*> (pthread_getspecific (thread_request_buffer_key));
132
133                 if (rbuf == 0) {
134                         /* can't use the error system to report this, because this
135                            thread isn't registered!
136                         */
137                         cerr << _("programming error: ")
138                              << string_compose ("AbstractUI::send_request() called from %1 (%2), but no request buffer exists for that thread", name(), pthread_name())
139                              << endl;
140                         abort ();
141                 }
142                 
143                 // cerr << "thread " << pthread_self() << " sent request " << req << " type = " << req->type << endl;
144
145                 rbuf->increment_write_ptr (1);
146
147                 if (signal_pipe[1] >= 0) {
148                         const char c = 0;
149                         write (signal_pipe[1], &c, 1);
150                 }
151         }
152 }
153