1 /************************************************************************/
\r
3 \brief Realtime audio i/o C++ classes.
\r
5 RtAudio provides a common API (Application Programming Interface)
\r
6 for realtime audio input/output across Linux (native ALSA, Jack,
\r
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
\r
8 (DirectSound, ASIO and WASAPI) operating systems.
\r
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
\r
12 RtAudio: realtime audio i/o C++ classes
\r
13 Copyright (c) 2001-2014 Gary P. Scavone
\r
15 Permission is hereby granted, free of charge, to any person
\r
16 obtaining a copy of this software and associated documentation files
\r
17 (the "Software"), to deal in the Software without restriction,
\r
18 including without limitation the rights to use, copy, modify, merge,
\r
19 publish, distribute, sublicense, and/or sell copies of the Software,
\r
20 and to permit persons to whom the Software is furnished to do so,
\r
21 subject to the following conditions:
\r
23 The above copyright notice and this permission notice shall be
\r
24 included in all copies or substantial portions of the Software.
\r
26 Any person wishing to distribute modifications to the Software is
\r
27 asked to send the modifications to the original developer so that
\r
28 they can be incorporated into the canonical version. This is,
\r
29 however, not a binding provision of this license.
\r
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
\r
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
\r
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
\r
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
\r
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
\r
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
\r
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\r
39 /************************************************************************/
\r
41 // RtAudio: Version 4.1.1
\r
43 #include "RtAudio.h"
\r
49 // Static variable definitions.
\r
50 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
\r
51 const unsigned int RtApi::SAMPLE_RATES[] = {
\r
52 4000, 5512, 8000, 9600, 11025, 16000, 22050,
\r
53 32000, 44100, 48000, 88200, 96000, 176400, 192000
\r
56 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
\r
57 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
\r
58 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
\r
59 #define MUTEX_LOCK(A) EnterCriticalSection(A)
\r
60 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
\r
64 static std::string convertCharPointerToStdString(const char *text)
\r
66 return std::string(text);
\r
69 static std::string convertCharPointerToStdString(const wchar_t *text)
\r
71 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
\r
72 std::string s( length-1, '\0' );
\r
73 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
\r
77 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
\r
79 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
\r
80 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
\r
81 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
\r
82 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
\r
84 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
\r
85 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
\r
88 // *************************************************** //
\r
90 // RtAudio definitions.
\r
92 // *************************************************** //
\r
94 std::string RtAudio :: getVersion( void ) throw()
\r
96 return RTAUDIO_VERSION;
\r
99 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis ) throw()
\r
103 // The order here will control the order of RtAudio's API search in
\r
104 // the constructor.
\r
105 #if defined(__UNIX_JACK__)
\r
106 apis.push_back( UNIX_JACK );
\r
108 #if defined(__LINUX_ALSA__)
\r
109 apis.push_back( LINUX_ALSA );
\r
111 #if defined(__LINUX_PULSE__)
\r
112 apis.push_back( LINUX_PULSE );
\r
114 #if defined(__LINUX_OSS__)
\r
115 apis.push_back( LINUX_OSS );
\r
117 #if defined(__WINDOWS_ASIO__)
\r
118 apis.push_back( WINDOWS_ASIO );
\r
120 #if defined(__WINDOWS_WASAPI__)
\r
121 apis.push_back( WINDOWS_WASAPI );
\r
123 #if defined(__WINDOWS_DS__)
\r
124 apis.push_back( WINDOWS_DS );
\r
126 #if defined(__MACOSX_CORE__)
\r
127 apis.push_back( MACOSX_CORE );
\r
129 #if defined(__RTAUDIO_DUMMY__)
\r
130 apis.push_back( RTAUDIO_DUMMY );
\r
134 void RtAudio :: openRtApi( RtAudio::Api api )
\r
140 #if defined(__UNIX_JACK__)
\r
141 if ( api == UNIX_JACK )
\r
142 rtapi_ = new RtApiJack();
\r
144 #if defined(__LINUX_ALSA__)
\r
145 if ( api == LINUX_ALSA )
\r
146 rtapi_ = new RtApiAlsa();
\r
148 #if defined(__LINUX_PULSE__)
\r
149 if ( api == LINUX_PULSE )
\r
150 rtapi_ = new RtApiPulse();
\r
152 #if defined(__LINUX_OSS__)
\r
153 if ( api == LINUX_OSS )
\r
154 rtapi_ = new RtApiOss();
\r
156 #if defined(__WINDOWS_ASIO__)
\r
157 if ( api == WINDOWS_ASIO )
\r
158 rtapi_ = new RtApiAsio();
\r
160 #if defined(__WINDOWS_WASAPI__)
\r
161 if ( api == WINDOWS_WASAPI )
\r
162 rtapi_ = new RtApiWasapi();
\r
164 #if defined(__WINDOWS_DS__)
\r
165 if ( api == WINDOWS_DS )
\r
166 rtapi_ = new RtApiDs();
\r
168 #if defined(__MACOSX_CORE__)
\r
169 if ( api == MACOSX_CORE )
\r
170 rtapi_ = new RtApiCore();
\r
172 #if defined(__RTAUDIO_DUMMY__)
\r
173 if ( api == RTAUDIO_DUMMY )
\r
174 rtapi_ = new RtApiDummy();
\r
178 RtAudio :: RtAudio( RtAudio::Api api )
\r
182 if ( api != UNSPECIFIED ) {
\r
183 // Attempt to open the specified API.
\r
185 if ( rtapi_ ) return;
\r
187 // No compiled support for specified API value. Issue a debug
\r
188 // warning and continue as if no API was specified.
\r
189 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
\r
192 // Iterate through the compiled APIs and return as soon as we find
\r
193 // one with at least one device or we reach the end of the list.
\r
194 std::vector< RtAudio::Api > apis;
\r
195 getCompiledApi( apis );
\r
196 for ( unsigned int i=0; i<apis.size(); i++ ) {
\r
197 openRtApi( apis[i] );
\r
198 if ( rtapi_->getDeviceCount() ) break;
\r
201 if ( rtapi_ ) return;
\r
203 // It should not be possible to get here because the preprocessor
\r
204 // definition __RTAUDIO_DUMMY__ is automatically defined if no
\r
205 // API-specific definitions are passed to the compiler. But just in
\r
206 // case something weird happens, we'll thow an error.
\r
207 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
\r
208 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
\r
211 RtAudio :: ~RtAudio() throw()
\r
217 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
\r
218 RtAudio::StreamParameters *inputParameters,
\r
219 RtAudioFormat format, unsigned int sampleRate,
\r
220 unsigned int *bufferFrames,
\r
221 RtAudioCallback callback, void *userData,
\r
222 RtAudio::StreamOptions *options,
\r
223 RtAudioErrorCallback errorCallback )
\r
225 return rtapi_->openStream( outputParameters, inputParameters, format,
\r
226 sampleRate, bufferFrames, callback,
\r
227 userData, options, errorCallback );
\r
230 // *************************************************** //
\r
232 // Public RtApi definitions (see end of file for
\r
233 // private or protected utility functions).
\r
235 // *************************************************** //
\r
239 stream_.state = STREAM_CLOSED;
\r
240 stream_.mode = UNINITIALIZED;
\r
241 stream_.apiHandle = 0;
\r
242 stream_.userBuffer[0] = 0;
\r
243 stream_.userBuffer[1] = 0;
\r
244 MUTEX_INITIALIZE( &stream_.mutex );
\r
245 showWarnings_ = true;
\r
246 firstErrorOccurred_ = false;
\r
251 MUTEX_DESTROY( &stream_.mutex );
\r
254 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
\r
255 RtAudio::StreamParameters *iParams,
\r
256 RtAudioFormat format, unsigned int sampleRate,
\r
257 unsigned int *bufferFrames,
\r
258 RtAudioCallback callback, void *userData,
\r
259 RtAudio::StreamOptions *options,
\r
260 RtAudioErrorCallback errorCallback )
\r
262 if ( stream_.state != STREAM_CLOSED ) {
\r
263 errorText_ = "RtApi::openStream: a stream is already open!";
\r
264 error( RtAudioError::INVALID_USE );
\r
268 // Clear stream information potentially left from a previously open stream.
\r
271 if ( oParams && oParams->nChannels < 1 ) {
\r
272 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
\r
273 error( RtAudioError::INVALID_USE );
\r
277 if ( iParams && iParams->nChannels < 1 ) {
\r
278 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
\r
279 error( RtAudioError::INVALID_USE );
\r
283 if ( oParams == NULL && iParams == NULL ) {
\r
284 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
\r
285 error( RtAudioError::INVALID_USE );
\r
289 if ( formatBytes(format) == 0 ) {
\r
290 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
\r
291 error( RtAudioError::INVALID_USE );
\r
295 unsigned int nDevices = getDeviceCount();
\r
296 unsigned int oChannels = 0;
\r
298 oChannels = oParams->nChannels;
\r
299 if ( oParams->deviceId >= nDevices ) {
\r
300 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
\r
301 error( RtAudioError::INVALID_USE );
\r
306 unsigned int iChannels = 0;
\r
308 iChannels = iParams->nChannels;
\r
309 if ( iParams->deviceId >= nDevices ) {
\r
310 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
\r
311 error( RtAudioError::INVALID_USE );
\r
318 if ( oChannels > 0 ) {
\r
320 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
\r
321 sampleRate, format, bufferFrames, options );
\r
322 if ( result == false ) {
\r
323 error( RtAudioError::SYSTEM_ERROR );
\r
328 if ( iChannels > 0 ) {
\r
330 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
\r
331 sampleRate, format, bufferFrames, options );
\r
332 if ( result == false ) {
\r
333 if ( oChannels > 0 ) closeStream();
\r
334 error( RtAudioError::SYSTEM_ERROR );
\r
339 stream_.callbackInfo.callback = (void *) callback;
\r
340 stream_.callbackInfo.userData = userData;
\r
341 stream_.callbackInfo.errorCallback = (void *) errorCallback;
\r
343 if ( options ) options->numberOfBuffers = stream_.nBuffers;
\r
344 stream_.state = STREAM_STOPPED;
\r
347 unsigned int RtApi :: getDefaultInputDevice( void )
\r
349 // Should be implemented in subclasses if possible.
\r
353 unsigned int RtApi :: getDefaultOutputDevice( void )
\r
355 // Should be implemented in subclasses if possible.
\r
359 void RtApi :: closeStream( void )
\r
361 // MUST be implemented in subclasses!
\r
365 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
\r
366 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
\r
367 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
\r
368 RtAudio::StreamOptions * /*options*/ )
\r
370 // MUST be implemented in subclasses!
\r
374 void RtApi :: tickStreamTime( void )
\r
376 // Subclasses that do not provide their own implementation of
\r
377 // getStreamTime should call this function once per buffer I/O to
\r
378 // provide basic stream time support.
\r
380 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
\r
382 #if defined( HAVE_GETTIMEOFDAY )
\r
383 gettimeofday( &stream_.lastTickTimestamp, NULL );
\r
387 long RtApi :: getStreamLatency( void )
\r
391 long totalLatency = 0;
\r
392 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
393 totalLatency = stream_.latency[0];
\r
394 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
395 totalLatency += stream_.latency[1];
\r
397 return totalLatency;
\r
400 double RtApi :: getStreamTime( void )
\r
404 #if defined( HAVE_GETTIMEOFDAY )
\r
405 // Return a very accurate estimate of the stream time by
\r
406 // adding in the elapsed time since the last tick.
\r
407 struct timeval then;
\r
408 struct timeval now;
\r
410 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
\r
411 return stream_.streamTime;
\r
413 gettimeofday( &now, NULL );
\r
414 then = stream_.lastTickTimestamp;
\r
415 return stream_.streamTime +
\r
416 ((now.tv_sec + 0.000001 * now.tv_usec) -
\r
417 (then.tv_sec + 0.000001 * then.tv_usec));
\r
419 return stream_.streamTime;
\r
423 void RtApi :: setStreamTime( double time )
\r
428 stream_.streamTime = time;
\r
431 unsigned int RtApi :: getStreamSampleRate( void )
\r
435 return stream_.sampleRate;
\r
439 // *************************************************** //
\r
441 // OS/API-specific methods.
\r
443 // *************************************************** //
\r
445 #if defined(__MACOSX_CORE__)
\r
447 // The OS X CoreAudio API is designed to use a separate callback
\r
448 // procedure for each of its audio devices. A single RtAudio duplex
\r
449 // stream using two different devices is supported here, though it
\r
450 // cannot be guaranteed to always behave correctly because we cannot
\r
451 // synchronize these two callbacks.
\r
453 // A property listener is installed for over/underrun information.
\r
454 // However, no functionality is currently provided to allow property
\r
455 // listeners to trigger user handlers because it is unclear what could
\r
456 // be done if a critical stream parameter (buffer size, sample rate,
\r
457 // device disconnect) notification arrived. The listeners entail
\r
458 // quite a bit of extra code and most likely, a user program wouldn't
\r
459 // be prepared for the result anyway. However, we do provide a flag
\r
460 // to the client callback function to inform of an over/underrun.
\r
462 // A structure to hold various information related to the CoreAudio API
\r
464 struct CoreHandle {
\r
465 AudioDeviceID id[2]; // device ids
\r
466 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
467 AudioDeviceIOProcID procId[2];
\r
469 UInt32 iStream[2]; // device stream index (or first if using multiple)
\r
470 UInt32 nStreams[2]; // number of streams to use
\r
472 char *deviceBuffer;
\r
473 pthread_cond_t condition;
\r
474 int drainCounter; // Tracks callback counts when draining
\r
475 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
478 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
481 RtApiCore:: RtApiCore()
\r
483 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
\r
484 // This is a largely undocumented but absolutely necessary
\r
485 // requirement starting with OS-X 10.6. If not called, queries and
\r
486 // updates to various audio device properties are not handled
\r
488 CFRunLoopRef theRunLoop = NULL;
\r
489 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
\r
490 kAudioObjectPropertyScopeGlobal,
\r
491 kAudioObjectPropertyElementMaster };
\r
492 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
\r
493 if ( result != noErr ) {
\r
494 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
\r
495 error( RtAudioError::WARNING );
\r
500 RtApiCore :: ~RtApiCore()
\r
502 // The subclass destructor gets called before the base class
\r
503 // destructor, so close an existing stream before deallocating
\r
504 // apiDeviceId memory.
\r
505 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
508 unsigned int RtApiCore :: getDeviceCount( void )
\r
510 // Find out how many audio devices there are, if any.
\r
512 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
513 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
\r
514 if ( result != noErr ) {
\r
515 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
\r
516 error( RtAudioError::WARNING );
\r
520 return dataSize / sizeof( AudioDeviceID );
\r
523 unsigned int RtApiCore :: getDefaultInputDevice( void )
\r
525 unsigned int nDevices = getDeviceCount();
\r
526 if ( nDevices <= 1 ) return 0;
\r
529 UInt32 dataSize = sizeof( AudioDeviceID );
\r
530 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
531 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
532 if ( result != noErr ) {
\r
533 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
\r
534 error( RtAudioError::WARNING );
\r
538 dataSize *= nDevices;
\r
539 AudioDeviceID deviceList[ nDevices ];
\r
540 property.mSelector = kAudioHardwarePropertyDevices;
\r
541 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
542 if ( result != noErr ) {
\r
543 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
\r
544 error( RtAudioError::WARNING );
\r
548 for ( unsigned int i=0; i<nDevices; i++ )
\r
549 if ( id == deviceList[i] ) return i;
\r
551 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
\r
552 error( RtAudioError::WARNING );
\r
556 unsigned int RtApiCore :: getDefaultOutputDevice( void )
\r
558 unsigned int nDevices = getDeviceCount();
\r
559 if ( nDevices <= 1 ) return 0;
\r
562 UInt32 dataSize = sizeof( AudioDeviceID );
\r
563 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
564 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
\r
565 if ( result != noErr ) {
\r
566 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
\r
567 error( RtAudioError::WARNING );
\r
571 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
572 AudioDeviceID deviceList[ nDevices ];
\r
573 property.mSelector = kAudioHardwarePropertyDevices;
\r
574 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
\r
575 if ( result != noErr ) {
\r
576 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
\r
577 error( RtAudioError::WARNING );
\r
581 for ( unsigned int i=0; i<nDevices; i++ )
\r
582 if ( id == deviceList[i] ) return i;
\r
584 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
\r
585 error( RtAudioError::WARNING );
\r
589 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
\r
591 RtAudio::DeviceInfo info;
\r
592 info.probed = false;
\r
595 unsigned int nDevices = getDeviceCount();
\r
596 if ( nDevices == 0 ) {
\r
597 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
\r
598 error( RtAudioError::INVALID_USE );
\r
602 if ( device >= nDevices ) {
\r
603 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
\r
604 error( RtAudioError::INVALID_USE );
\r
608 AudioDeviceID deviceList[ nDevices ];
\r
609 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
610 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
611 kAudioObjectPropertyScopeGlobal,
\r
612 kAudioObjectPropertyElementMaster };
\r
613 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
614 0, NULL, &dataSize, (void *) &deviceList );
\r
615 if ( result != noErr ) {
\r
616 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
\r
617 error( RtAudioError::WARNING );
\r
621 AudioDeviceID id = deviceList[ device ];
\r
623 // Get the device name.
\r
625 CFStringRef cfname;
\r
626 dataSize = sizeof( CFStringRef );
\r
627 property.mSelector = kAudioObjectPropertyManufacturer;
\r
628 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
629 if ( result != noErr ) {
\r
630 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
\r
631 errorText_ = errorStream_.str();
\r
632 error( RtAudioError::WARNING );
\r
636 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
637 int length = CFStringGetLength(cfname);
\r
638 char *mname = (char *)malloc(length * 3 + 1);
\r
639 #if defined( UNICODE ) || defined( _UNICODE )
\r
640 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
\r
642 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
\r
644 info.name.append( (const char *)mname, strlen(mname) );
\r
645 info.name.append( ": " );
\r
646 CFRelease( cfname );
\r
649 property.mSelector = kAudioObjectPropertyName;
\r
650 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
\r
651 if ( result != noErr ) {
\r
652 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
\r
653 errorText_ = errorStream_.str();
\r
654 error( RtAudioError::WARNING );
\r
658 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
\r
659 length = CFStringGetLength(cfname);
\r
660 char *name = (char *)malloc(length * 3 + 1);
\r
661 #if defined( UNICODE ) || defined( _UNICODE )
\r
662 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
\r
664 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
\r
666 info.name.append( (const char *)name, strlen(name) );
\r
667 CFRelease( cfname );
\r
670 // Get the output stream "configuration".
\r
671 AudioBufferList *bufferList = nil;
\r
672 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
673 property.mScope = kAudioDevicePropertyScopeOutput;
\r
674 // property.mElement = kAudioObjectPropertyElementWildcard;
\r
676 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
677 if ( result != noErr || dataSize == 0 ) {
\r
678 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
\r
679 errorText_ = errorStream_.str();
\r
680 error( RtAudioError::WARNING );
\r
684 // Allocate the AudioBufferList.
\r
685 bufferList = (AudioBufferList *) malloc( dataSize );
\r
686 if ( bufferList == NULL ) {
\r
687 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
\r
688 error( RtAudioError::WARNING );
\r
692 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
693 if ( result != noErr || dataSize == 0 ) {
\r
694 free( bufferList );
\r
695 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
\r
696 errorText_ = errorStream_.str();
\r
697 error( RtAudioError::WARNING );
\r
701 // Get output channel information.
\r
702 unsigned int i, nStreams = bufferList->mNumberBuffers;
\r
703 for ( i=0; i<nStreams; i++ )
\r
704 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
705 free( bufferList );
\r
707 // Get the input stream "configuration".
\r
708 property.mScope = kAudioDevicePropertyScopeInput;
\r
709 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
710 if ( result != noErr || dataSize == 0 ) {
\r
711 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
\r
712 errorText_ = errorStream_.str();
\r
713 error( RtAudioError::WARNING );
\r
717 // Allocate the AudioBufferList.
\r
718 bufferList = (AudioBufferList *) malloc( dataSize );
\r
719 if ( bufferList == NULL ) {
\r
720 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
\r
721 error( RtAudioError::WARNING );
\r
725 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
726 if (result != noErr || dataSize == 0) {
\r
727 free( bufferList );
\r
728 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
\r
729 errorText_ = errorStream_.str();
\r
730 error( RtAudioError::WARNING );
\r
734 // Get input channel information.
\r
735 nStreams = bufferList->mNumberBuffers;
\r
736 for ( i=0; i<nStreams; i++ )
\r
737 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
\r
738 free( bufferList );
\r
740 // If device opens for both playback and capture, we determine the channels.
\r
741 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
742 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
744 // Probe the device sample rates.
\r
745 bool isInput = false;
\r
746 if ( info.outputChannels == 0 ) isInput = true;
\r
748 // Determine the supported sample rates.
\r
749 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
\r
750 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
\r
751 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
752 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
\r
753 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
\r
754 errorText_ = errorStream_.str();
\r
755 error( RtAudioError::WARNING );
\r
759 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
\r
760 AudioValueRange rangeList[ nRanges ];
\r
761 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
\r
762 if ( result != kAudioHardwareNoError ) {
\r
763 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
\r
764 errorText_ = errorStream_.str();
\r
765 error( RtAudioError::WARNING );
\r
769 // The sample rate reporting mechanism is a bit of a mystery. It
\r
770 // seems that it can either return individual rates or a range of
\r
771 // rates. I assume that if the min / max range values are the same,
\r
772 // then that represents a single supported rate and if the min / max
\r
773 // range values are different, the device supports an arbitrary
\r
774 // range of values (though there might be multiple ranges, so we'll
\r
775 // use the most conservative range).
\r
776 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
\r
777 bool haveValueRange = false;
\r
778 info.sampleRates.clear();
\r
779 for ( UInt32 i=0; i<nRanges; i++ ) {
\r
780 if ( rangeList[i].mMinimum == rangeList[i].mMaximum )
\r
781 info.sampleRates.push_back( (unsigned int) rangeList[i].mMinimum );
\r
783 haveValueRange = true;
\r
784 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
\r
785 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
\r
789 if ( haveValueRange ) {
\r
790 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
791 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate )
\r
792 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
796 // Sort and remove any redundant values
\r
797 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
798 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
\r
800 if ( info.sampleRates.size() == 0 ) {
\r
801 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
\r
802 errorText_ = errorStream_.str();
\r
803 error( RtAudioError::WARNING );
\r
807 // CoreAudio always uses 32-bit floating point data for PCM streams.
\r
808 // Thus, any other "physical" formats supported by the device are of
\r
809 // no interest to the client.
\r
810 info.nativeFormats = RTAUDIO_FLOAT32;
\r
812 if ( info.outputChannels > 0 )
\r
813 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
814 if ( info.inputChannels > 0 )
\r
815 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
817 info.probed = true;
\r
821 static OSStatus callbackHandler( AudioDeviceID inDevice,
\r
822 const AudioTimeStamp* /*inNow*/,
\r
823 const AudioBufferList* inInputData,
\r
824 const AudioTimeStamp* /*inInputTime*/,
\r
825 AudioBufferList* outOutputData,
\r
826 const AudioTimeStamp* /*inOutputTime*/,
\r
827 void* infoPointer )
\r
829 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
831 RtApiCore *object = (RtApiCore *) info->object;
\r
832 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
\r
833 return kAudioHardwareUnspecifiedError;
\r
835 return kAudioHardwareNoError;
\r
838 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
\r
840 const AudioObjectPropertyAddress properties[],
\r
841 void* handlePointer )
\r
843 CoreHandle *handle = (CoreHandle *) handlePointer;
\r
844 for ( UInt32 i=0; i<nAddresses; i++ ) {
\r
845 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
\r
846 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
\r
847 handle->xrun[1] = true;
\r
849 handle->xrun[0] = true;
\r
853 return kAudioHardwareNoError;
\r
856 static OSStatus rateListener( AudioObjectID inDevice,
\r
857 UInt32 /*nAddresses*/,
\r
858 const AudioObjectPropertyAddress /*properties*/[],
\r
859 void* ratePointer )
\r
861 Float64 *rate = (Float64 *) ratePointer;
\r
862 UInt32 dataSize = sizeof( Float64 );
\r
863 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
\r
864 kAudioObjectPropertyScopeGlobal,
\r
865 kAudioObjectPropertyElementMaster };
\r
866 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
\r
867 return kAudioHardwareNoError;
\r
870 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
871 unsigned int firstChannel, unsigned int sampleRate,
\r
872 RtAudioFormat format, unsigned int *bufferSize,
\r
873 RtAudio::StreamOptions *options )
\r
876 unsigned int nDevices = getDeviceCount();
\r
877 if ( nDevices == 0 ) {
\r
878 // This should not happen because a check is made before this function is called.
\r
879 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
\r
883 if ( device >= nDevices ) {
\r
884 // This should not happen because a check is made before this function is called.
\r
885 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
\r
889 AudioDeviceID deviceList[ nDevices ];
\r
890 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
\r
891 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
\r
892 kAudioObjectPropertyScopeGlobal,
\r
893 kAudioObjectPropertyElementMaster };
\r
894 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
\r
895 0, NULL, &dataSize, (void *) &deviceList );
\r
896 if ( result != noErr ) {
\r
897 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
\r
901 AudioDeviceID id = deviceList[ device ];
\r
903 // Setup for stream mode.
\r
904 bool isInput = false;
\r
905 if ( mode == INPUT ) {
\r
907 property.mScope = kAudioDevicePropertyScopeInput;
\r
910 property.mScope = kAudioDevicePropertyScopeOutput;
\r
912 // Get the stream "configuration".
\r
913 AudioBufferList *bufferList = nil;
\r
915 property.mSelector = kAudioDevicePropertyStreamConfiguration;
\r
916 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
\r
917 if ( result != noErr || dataSize == 0 ) {
\r
918 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
\r
919 errorText_ = errorStream_.str();
\r
923 // Allocate the AudioBufferList.
\r
924 bufferList = (AudioBufferList *) malloc( dataSize );
\r
925 if ( bufferList == NULL ) {
\r
926 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
\r
930 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
\r
931 if (result != noErr || dataSize == 0) {
\r
932 free( bufferList );
\r
933 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
\r
934 errorText_ = errorStream_.str();
\r
938 // Search for one or more streams that contain the desired number of
\r
939 // channels. CoreAudio devices can have an arbitrary number of
\r
940 // streams and each stream can have an arbitrary number of channels.
\r
941 // For each stream, a single buffer of interleaved samples is
\r
942 // provided. RtAudio prefers the use of one stream of interleaved
\r
943 // data or multiple consecutive single-channel streams. However, we
\r
944 // now support multiple consecutive multi-channel streams of
\r
945 // interleaved data as well.
\r
946 UInt32 iStream, offsetCounter = firstChannel;
\r
947 UInt32 nStreams = bufferList->mNumberBuffers;
\r
948 bool monoMode = false;
\r
949 bool foundStream = false;
\r
951 // First check that the device supports the requested number of
\r
953 UInt32 deviceChannels = 0;
\r
954 for ( iStream=0; iStream<nStreams; iStream++ )
\r
955 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
\r
957 if ( deviceChannels < ( channels + firstChannel ) ) {
\r
958 free( bufferList );
\r
959 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
\r
960 errorText_ = errorStream_.str();
\r
964 // Look for a single stream meeting our needs.
\r
965 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
\r
966 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
967 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
968 if ( streamChannels >= channels + offsetCounter ) {
\r
969 firstStream = iStream;
\r
970 channelOffset = offsetCounter;
\r
971 foundStream = true;
\r
974 if ( streamChannels > offsetCounter ) break;
\r
975 offsetCounter -= streamChannels;
\r
978 // If we didn't find a single stream above, then we should be able
\r
979 // to meet the channel specification with multiple streams.
\r
980 if ( foundStream == false ) {
\r
982 offsetCounter = firstChannel;
\r
983 for ( iStream=0; iStream<nStreams; iStream++ ) {
\r
984 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
\r
985 if ( streamChannels > offsetCounter ) break;
\r
986 offsetCounter -= streamChannels;
\r
989 firstStream = iStream;
\r
990 channelOffset = offsetCounter;
\r
991 Int32 channelCounter = channels + offsetCounter - streamChannels;
\r
993 if ( streamChannels > 1 ) monoMode = false;
\r
994 while ( channelCounter > 0 ) {
\r
995 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
\r
996 if ( streamChannels > 1 ) monoMode = false;
\r
997 channelCounter -= streamChannels;
\r
1002 free( bufferList );
\r
1004 // Determine the buffer size.
\r
1005 AudioValueRange bufferRange;
\r
1006 dataSize = sizeof( AudioValueRange );
\r
1007 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
\r
1008 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
\r
1010 if ( result != noErr ) {
\r
1011 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
\r
1012 errorText_ = errorStream_.str();
\r
1016 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1017 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
\r
1018 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
\r
1020 // Set the buffer size. For multiple streams, I'm assuming we only
\r
1021 // need to make this setting for the master channel.
\r
1022 UInt32 theSize = (UInt32) *bufferSize;
\r
1023 dataSize = sizeof( UInt32 );
\r
1024 property.mSelector = kAudioDevicePropertyBufferFrameSize;
\r
1025 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
\r
1027 if ( result != noErr ) {
\r
1028 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
\r
1029 errorText_ = errorStream_.str();
\r
1033 // If attempting to setup a duplex stream, the bufferSize parameter
\r
1034 // MUST be the same in both directions!
\r
1035 *bufferSize = theSize;
\r
1036 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
1037 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
\r
1038 errorText_ = errorStream_.str();
\r
1042 stream_.bufferSize = *bufferSize;
\r
1043 stream_.nBuffers = 1;
\r
1045 // Try to set "hog" mode ... it's not clear to me this is working.
\r
1046 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
\r
1048 dataSize = sizeof( hog_pid );
\r
1049 property.mSelector = kAudioDevicePropertyHogMode;
\r
1050 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
\r
1051 if ( result != noErr ) {
\r
1052 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
\r
1053 errorText_ = errorStream_.str();
\r
1057 if ( hog_pid != getpid() ) {
\r
1058 hog_pid = getpid();
\r
1059 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
\r
1060 if ( result != noErr ) {
\r
1061 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
\r
1062 errorText_ = errorStream_.str();
\r
1068 // Check and if necessary, change the sample rate for the device.
\r
1069 Float64 nominalRate;
\r
1070 dataSize = sizeof( Float64 );
\r
1071 property.mSelector = kAudioDevicePropertyNominalSampleRate;
\r
1072 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
\r
1073 if ( result != noErr ) {
\r
1074 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
\r
1075 errorText_ = errorStream_.str();
\r
1079 // Only change the sample rate if off by more than 1 Hz.
\r
1080 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
\r
1082 // Set a property listener for the sample rate change
\r
1083 Float64 reportedRate = 0.0;
\r
1084 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
\r
1085 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1086 if ( result != noErr ) {
\r
1087 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
\r
1088 errorText_ = errorStream_.str();
\r
1092 nominalRate = (Float64) sampleRate;
\r
1093 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
\r
1094 if ( result != noErr ) {
\r
1095 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1096 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
\r
1097 errorText_ = errorStream_.str();
\r
1101 // Now wait until the reported nominal rate is what we just set.
\r
1102 UInt32 microCounter = 0;
\r
1103 while ( reportedRate != nominalRate ) {
\r
1104 microCounter += 5000;
\r
1105 if ( microCounter > 5000000 ) break;
\r
1109 // Remove the property listener.
\r
1110 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
\r
1112 if ( microCounter > 5000000 ) {
\r
1113 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
\r
1114 errorText_ = errorStream_.str();
\r
1119 // Now set the stream format for all streams. Also, check the
\r
1120 // physical format of the device and change that if necessary.
\r
1121 AudioStreamBasicDescription description;
\r
1122 dataSize = sizeof( AudioStreamBasicDescription );
\r
1123 property.mSelector = kAudioStreamPropertyVirtualFormat;
\r
1124 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1125 if ( result != noErr ) {
\r
1126 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
\r
1127 errorText_ = errorStream_.str();
\r
1131 // Set the sample rate and data format id. However, only make the
\r
1132 // change if the sample rate is not within 1.0 of the desired
\r
1133 // rate and the format is not linear pcm.
\r
1134 bool updateFormat = false;
\r
1135 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
\r
1136 description.mSampleRate = (Float64) sampleRate;
\r
1137 updateFormat = true;
\r
1140 if ( description.mFormatID != kAudioFormatLinearPCM ) {
\r
1141 description.mFormatID = kAudioFormatLinearPCM;
\r
1142 updateFormat = true;
\r
1145 if ( updateFormat ) {
\r
1146 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
\r
1147 if ( result != noErr ) {
\r
1148 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
\r
1149 errorText_ = errorStream_.str();
\r
1154 // Now check the physical format.
\r
1155 property.mSelector = kAudioStreamPropertyPhysicalFormat;
\r
1156 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
\r
1157 if ( result != noErr ) {
\r
1158 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
\r
1159 errorText_ = errorStream_.str();
\r
1163 //std::cout << "Current physical stream format:" << std::endl;
\r
1164 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
\r
1165 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1166 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
\r
1167 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
\r
1169 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
\r
1170 description.mFormatID = kAudioFormatLinearPCM;
\r
1171 //description.mSampleRate = (Float64) sampleRate;
\r
1172 AudioStreamBasicDescription testDescription = description;
\r
1173 UInt32 formatFlags;
\r
1175 // We'll try higher bit rates first and then work our way down.
\r
1176 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
\r
1177 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
\r
1178 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1179 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1180 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
\r
1181 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
\r
1182 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
\r
1183 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
\r
1184 formatFlags |= kAudioFormatFlagIsAlignedHigh;
\r
1185 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
\r
1186 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
\r
1187 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
\r
1188 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
\r
1190 bool setPhysicalFormat = false;
\r
1191 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
\r
1192 testDescription = description;
\r
1193 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
\r
1194 testDescription.mFormatFlags = physicalFormats[i].second;
\r
1195 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
\r
1196 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
\r
1198 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
\r
1199 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
\r
1200 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
\r
1201 if ( result == noErr ) {
\r
1202 setPhysicalFormat = true;
\r
1203 //std::cout << "Updated physical stream format:" << std::endl;
\r
1204 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
\r
1205 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
\r
1206 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
\r
1207 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
\r
1212 if ( !setPhysicalFormat ) {
\r
1213 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
\r
1214 errorText_ = errorStream_.str();
\r
1217 } // done setting virtual/physical formats.
\r
1219 // Get the stream / device latency.
\r
1221 dataSize = sizeof( UInt32 );
\r
1222 property.mSelector = kAudioDevicePropertyLatency;
\r
1223 if ( AudioObjectHasProperty( id, &property ) == true ) {
\r
1224 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
\r
1225 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
\r
1227 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
\r
1228 errorText_ = errorStream_.str();
\r
1229 error( RtAudioError::WARNING );
\r
1233 // Byte-swapping: According to AudioHardware.h, the stream data will
\r
1234 // always be presented in native-endian format, so we should never
\r
1235 // need to byte swap.
\r
1236 stream_.doByteSwap[mode] = false;
\r
1238 // From the CoreAudio documentation, PCM data must be supplied as
\r
1240 stream_.userFormat = format;
\r
1241 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
1243 if ( streamCount == 1 )
\r
1244 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
\r
1245 else // multiple streams
\r
1246 stream_.nDeviceChannels[mode] = channels;
\r
1247 stream_.nUserChannels[mode] = channels;
\r
1248 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
\r
1249 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
1250 else stream_.userInterleaved = true;
\r
1251 stream_.deviceInterleaved[mode] = true;
\r
1252 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
\r
1254 // Set flags for buffer conversion.
\r
1255 stream_.doConvertBuffer[mode] = false;
\r
1256 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
1257 stream_.doConvertBuffer[mode] = true;
\r
1258 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
1259 stream_.doConvertBuffer[mode] = true;
\r
1260 if ( streamCount == 1 ) {
\r
1261 if ( stream_.nUserChannels[mode] > 1 &&
\r
1262 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
\r
1263 stream_.doConvertBuffer[mode] = true;
\r
1265 else if ( monoMode && stream_.userInterleaved )
\r
1266 stream_.doConvertBuffer[mode] = true;
\r
1268 // Allocate our CoreHandle structure for the stream.
\r
1269 CoreHandle *handle = 0;
\r
1270 if ( stream_.apiHandle == 0 ) {
\r
1272 handle = new CoreHandle;
\r
1274 catch ( std::bad_alloc& ) {
\r
1275 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
\r
1279 if ( pthread_cond_init( &handle->condition, NULL ) ) {
\r
1280 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
\r
1283 stream_.apiHandle = (void *) handle;
\r
1286 handle = (CoreHandle *) stream_.apiHandle;
\r
1287 handle->iStream[mode] = firstStream;
\r
1288 handle->nStreams[mode] = streamCount;
\r
1289 handle->id[mode] = id;
\r
1291 // Allocate necessary internal buffers.
\r
1292 unsigned long bufferBytes;
\r
1293 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
1294 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
1295 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
\r
1296 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
\r
1297 if ( stream_.userBuffer[mode] == NULL ) {
\r
1298 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
\r
1302 // If possible, we will make use of the CoreAudio stream buffers as
\r
1303 // "device buffers". However, we can't do this if using multiple
\r
1305 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
\r
1307 bool makeBuffer = true;
\r
1308 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
1309 if ( mode == INPUT ) {
\r
1310 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
1311 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
1312 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
1316 if ( makeBuffer ) {
\r
1317 bufferBytes *= *bufferSize;
\r
1318 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
1319 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
1320 if ( stream_.deviceBuffer == NULL ) {
\r
1321 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
\r
1327 stream_.sampleRate = sampleRate;
\r
1328 stream_.device[mode] = device;
\r
1329 stream_.state = STREAM_STOPPED;
\r
1330 stream_.callbackInfo.object = (void *) this;
\r
1332 // Setup the buffer conversion information structure.
\r
1333 if ( stream_.doConvertBuffer[mode] ) {
\r
1334 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
\r
1335 else setConvertInfo( mode, channelOffset );
\r
1338 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
\r
1339 // Only one callback procedure per device.
\r
1340 stream_.mode = DUPLEX;
\r
1342 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1343 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
\r
1345 // deprecated in favor of AudioDeviceCreateIOProcID()
\r
1346 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
\r
1348 if ( result != noErr ) {
\r
1349 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
\r
1350 errorText_ = errorStream_.str();
\r
1353 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
1354 stream_.mode = DUPLEX;
\r
1356 stream_.mode = mode;
\r
1359 // Setup the device property listener for over/underload.
\r
1360 property.mSelector = kAudioDeviceProcessorOverload;
\r
1361 property.mScope = kAudioObjectPropertyScopeGlobal;
\r
1362 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
\r
1368 pthread_cond_destroy( &handle->condition );
\r
1370 stream_.apiHandle = 0;
\r
1373 for ( int i=0; i<2; i++ ) {
\r
1374 if ( stream_.userBuffer[i] ) {
\r
1375 free( stream_.userBuffer[i] );
\r
1376 stream_.userBuffer[i] = 0;
\r
1380 if ( stream_.deviceBuffer ) {
\r
1381 free( stream_.deviceBuffer );
\r
1382 stream_.deviceBuffer = 0;
\r
1385 stream_.state = STREAM_CLOSED;
\r
1389 void RtApiCore :: closeStream( void )
\r
1391 if ( stream_.state == STREAM_CLOSED ) {
\r
1392 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
\r
1393 error( RtAudioError::WARNING );
\r
1397 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1398 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1399 if ( stream_.state == STREAM_RUNNING )
\r
1400 AudioDeviceStop( handle->id[0], callbackHandler );
\r
1401 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1402 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
\r
1404 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1405 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
\r
1409 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1410 if ( stream_.state == STREAM_RUNNING )
\r
1411 AudioDeviceStop( handle->id[1], callbackHandler );
\r
1412 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
\r
1413 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
\r
1415 // deprecated in favor of AudioDeviceDestroyIOProcID()
\r
1416 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
\r
1420 for ( int i=0; i<2; i++ ) {
\r
1421 if ( stream_.userBuffer[i] ) {
\r
1422 free( stream_.userBuffer[i] );
\r
1423 stream_.userBuffer[i] = 0;
\r
1427 if ( stream_.deviceBuffer ) {
\r
1428 free( stream_.deviceBuffer );
\r
1429 stream_.deviceBuffer = 0;
\r
1432 // Destroy pthread condition variable.
\r
1433 pthread_cond_destroy( &handle->condition );
\r
1435 stream_.apiHandle = 0;
\r
1437 stream_.mode = UNINITIALIZED;
\r
1438 stream_.state = STREAM_CLOSED;
\r
1441 void RtApiCore :: startStream( void )
\r
1444 if ( stream_.state == STREAM_RUNNING ) {
\r
1445 errorText_ = "RtApiCore::startStream(): the stream is already running!";
\r
1446 error( RtAudioError::WARNING );
\r
1450 OSStatus result = noErr;
\r
1451 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1452 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1454 result = AudioDeviceStart( handle->id[0], callbackHandler );
\r
1455 if ( result != noErr ) {
\r
1456 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
\r
1457 errorText_ = errorStream_.str();
\r
1462 if ( stream_.mode == INPUT ||
\r
1463 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1465 result = AudioDeviceStart( handle->id[1], callbackHandler );
\r
1466 if ( result != noErr ) {
\r
1467 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
\r
1468 errorText_ = errorStream_.str();
\r
1473 handle->drainCounter = 0;
\r
1474 handle->internalDrain = false;
\r
1475 stream_.state = STREAM_RUNNING;
\r
1478 if ( result == noErr ) return;
\r
1479 error( RtAudioError::SYSTEM_ERROR );
\r
1482 void RtApiCore :: stopStream( void )
\r
1485 if ( stream_.state == STREAM_STOPPED ) {
\r
1486 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
\r
1487 error( RtAudioError::WARNING );
\r
1491 OSStatus result = noErr;
\r
1492 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1493 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
1495 if ( handle->drainCounter == 0 ) {
\r
1496 handle->drainCounter = 2;
\r
1497 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
1500 result = AudioDeviceStop( handle->id[0], callbackHandler );
\r
1501 if ( result != noErr ) {
\r
1502 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
\r
1503 errorText_ = errorStream_.str();
\r
1508 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
\r
1510 result = AudioDeviceStop( handle->id[1], callbackHandler );
\r
1511 if ( result != noErr ) {
\r
1512 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
\r
1513 errorText_ = errorStream_.str();
\r
1518 stream_.state = STREAM_STOPPED;
\r
1521 if ( result == noErr ) return;
\r
1522 error( RtAudioError::SYSTEM_ERROR );
\r
1525 void RtApiCore :: abortStream( void )
\r
1528 if ( stream_.state == STREAM_STOPPED ) {
\r
1529 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
\r
1530 error( RtAudioError::WARNING );
\r
1534 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1535 handle->drainCounter = 2;
\r
1540 // This function will be called by a spawned thread when the user
\r
1541 // callback function signals that the stream should be stopped or
\r
1542 // aborted. It is better to handle it this way because the
\r
1543 // callbackEvent() function probably should return before the AudioDeviceStop()
\r
1544 // function is called.
\r
1545 static void *coreStopStream( void *ptr )
\r
1547 CallbackInfo *info = (CallbackInfo *) ptr;
\r
1548 RtApiCore *object = (RtApiCore *) info->object;
\r
1550 object->stopStream();
\r
1551 pthread_exit( NULL );
\r
1554 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
\r
1555 const AudioBufferList *inBufferList,
\r
1556 const AudioBufferList *outBufferList )
\r
1558 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
1559 if ( stream_.state == STREAM_CLOSED ) {
\r
1560 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
1561 error( RtAudioError::WARNING );
\r
1565 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
1566 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
\r
1568 // Check if we were draining the stream and signal is finished.
\r
1569 if ( handle->drainCounter > 3 ) {
\r
1570 ThreadHandle threadId;
\r
1572 stream_.state = STREAM_STOPPING;
\r
1573 if ( handle->internalDrain == true )
\r
1574 pthread_create( &threadId, NULL, coreStopStream, info );
\r
1575 else // external call to stopStream()
\r
1576 pthread_cond_signal( &handle->condition );
\r
1580 AudioDeviceID outputDevice = handle->id[0];
\r
1582 // Invoke user callback to get fresh output data UNLESS we are
\r
1583 // draining stream or duplex mode AND the input/output devices are
\r
1584 // different AND this function is called for the input device.
\r
1585 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
\r
1586 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
1587 double streamTime = getStreamTime();
\r
1588 RtAudioStreamStatus status = 0;
\r
1589 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
1590 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
1591 handle->xrun[0] = false;
\r
1593 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
1594 status |= RTAUDIO_INPUT_OVERFLOW;
\r
1595 handle->xrun[1] = false;
\r
1598 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
1599 stream_.bufferSize, streamTime, status, info->userData );
\r
1600 if ( cbReturnValue == 2 ) {
\r
1601 stream_.state = STREAM_STOPPING;
\r
1602 handle->drainCounter = 2;
\r
1606 else if ( cbReturnValue == 1 ) {
\r
1607 handle->drainCounter = 1;
\r
1608 handle->internalDrain = true;
\r
1612 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
\r
1614 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
1616 if ( handle->nStreams[0] == 1 ) {
\r
1617 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1619 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1621 else { // fill multiple streams with zeros
\r
1622 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1623 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1625 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
\r
1629 else if ( handle->nStreams[0] == 1 ) {
\r
1630 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
\r
1631 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1632 stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1634 else { // copy from user buffer
\r
1635 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
\r
1636 stream_.userBuffer[0],
\r
1637 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
\r
1640 else { // fill multiple streams
\r
1641 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
\r
1642 if ( stream_.doConvertBuffer[0] ) {
\r
1643 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
1644 inBuffer = (Float32 *) stream_.deviceBuffer;
\r
1647 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
\r
1648 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
\r
1649 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
1650 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
\r
1651 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
\r
1654 else { // fill multiple multi-channel streams with interleaved data
\r
1655 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
\r
1656 Float32 *out, *in;
\r
1658 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1659 UInt32 inChannels = stream_.nUserChannels[0];
\r
1660 if ( stream_.doConvertBuffer[0] ) {
\r
1661 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1662 inChannels = stream_.nDeviceChannels[0];
\r
1665 if ( inInterleaved ) inOffset = 1;
\r
1666 else inOffset = stream_.bufferSize;
\r
1668 channelsLeft = inChannels;
\r
1669 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
\r
1671 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
\r
1672 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
\r
1675 // Account for possible channel offset in first stream
\r
1676 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
\r
1677 streamChannels -= stream_.channelOffset[0];
\r
1678 outJump = stream_.channelOffset[0];
\r
1682 // Account for possible unfilled channels at end of the last stream
\r
1683 if ( streamChannels > channelsLeft ) {
\r
1684 outJump = streamChannels - channelsLeft;
\r
1685 streamChannels = channelsLeft;
\r
1688 // Determine input buffer offsets and skips
\r
1689 if ( inInterleaved ) {
\r
1690 inJump = inChannels;
\r
1691 in += inChannels - channelsLeft;
\r
1695 in += (inChannels - channelsLeft) * inOffset;
\r
1698 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1699 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1700 *out++ = in[j*inOffset];
\r
1705 channelsLeft -= streamChannels;
\r
1711 // Don't bother draining input
\r
1712 if ( handle->drainCounter ) {
\r
1713 handle->drainCounter++;
\r
1717 AudioDeviceID inputDevice;
\r
1718 inputDevice = handle->id[1];
\r
1719 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
\r
1721 if ( handle->nStreams[1] == 1 ) {
\r
1722 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
\r
1723 convertBuffer( stream_.userBuffer[1],
\r
1724 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1725 stream_.convertInfo[1] );
\r
1727 else { // copy to user buffer
\r
1728 memcpy( stream_.userBuffer[1],
\r
1729 inBufferList->mBuffers[handle->iStream[1]].mData,
\r
1730 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
\r
1733 else { // read from multiple streams
\r
1734 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
\r
1735 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
\r
1737 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
\r
1738 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
\r
1739 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
1740 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
\r
1741 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
\r
1744 else { // read from multiple multi-channel streams
\r
1745 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
\r
1746 Float32 *out, *in;
\r
1748 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
\r
1749 UInt32 outChannels = stream_.nUserChannels[1];
\r
1750 if ( stream_.doConvertBuffer[1] ) {
\r
1751 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
\r
1752 outChannels = stream_.nDeviceChannels[1];
\r
1755 if ( outInterleaved ) outOffset = 1;
\r
1756 else outOffset = stream_.bufferSize;
\r
1758 channelsLeft = outChannels;
\r
1759 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
\r
1761 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
\r
1762 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
\r
1765 // Account for possible channel offset in first stream
\r
1766 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
\r
1767 streamChannels -= stream_.channelOffset[1];
\r
1768 inJump = stream_.channelOffset[1];
\r
1772 // Account for possible unread channels at end of the last stream
\r
1773 if ( streamChannels > channelsLeft ) {
\r
1774 inJump = streamChannels - channelsLeft;
\r
1775 streamChannels = channelsLeft;
\r
1778 // Determine output buffer offsets and skips
\r
1779 if ( outInterleaved ) {
\r
1780 outJump = outChannels;
\r
1781 out += outChannels - channelsLeft;
\r
1785 out += (outChannels - channelsLeft) * outOffset;
\r
1788 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
\r
1789 for ( unsigned int j=0; j<streamChannels; j++ ) {
\r
1790 out[j*outOffset] = *in++;
\r
1795 channelsLeft -= streamChannels;
\r
1799 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
\r
1800 convertBuffer( stream_.userBuffer[1],
\r
1801 stream_.deviceBuffer,
\r
1802 stream_.convertInfo[1] );
\r
1808 //MUTEX_UNLOCK( &stream_.mutex );
\r
1810 RtApi::tickStreamTime();
\r
1814 const char* RtApiCore :: getErrorCode( OSStatus code )
\r
1818 case kAudioHardwareNotRunningError:
\r
1819 return "kAudioHardwareNotRunningError";
\r
1821 case kAudioHardwareUnspecifiedError:
\r
1822 return "kAudioHardwareUnspecifiedError";
\r
1824 case kAudioHardwareUnknownPropertyError:
\r
1825 return "kAudioHardwareUnknownPropertyError";
\r
1827 case kAudioHardwareBadPropertySizeError:
\r
1828 return "kAudioHardwareBadPropertySizeError";
\r
1830 case kAudioHardwareIllegalOperationError:
\r
1831 return "kAudioHardwareIllegalOperationError";
\r
1833 case kAudioHardwareBadObjectError:
\r
1834 return "kAudioHardwareBadObjectError";
\r
1836 case kAudioHardwareBadDeviceError:
\r
1837 return "kAudioHardwareBadDeviceError";
\r
1839 case kAudioHardwareBadStreamError:
\r
1840 return "kAudioHardwareBadStreamError";
\r
1842 case kAudioHardwareUnsupportedOperationError:
\r
1843 return "kAudioHardwareUnsupportedOperationError";
\r
1845 case kAudioDeviceUnsupportedFormatError:
\r
1846 return "kAudioDeviceUnsupportedFormatError";
\r
1848 case kAudioDevicePermissionsError:
\r
1849 return "kAudioDevicePermissionsError";
\r
1852 return "CoreAudio unknown error";
\r
1856 //******************** End of __MACOSX_CORE__ *********************//
\r
1859 #if defined(__UNIX_JACK__)
\r
1861 // JACK is a low-latency audio server, originally written for the
\r
1862 // GNU/Linux operating system and now also ported to OS-X. It can
\r
1863 // connect a number of different applications to an audio device, as
\r
1864 // well as allowing them to share audio between themselves.
\r
1866 // When using JACK with RtAudio, "devices" refer to JACK clients that
\r
1867 // have ports connected to the server. The JACK server is typically
\r
1868 // started in a terminal as follows:
\r
1870 // .jackd -d alsa -d hw:0
\r
1872 // or through an interface program such as qjackctl. Many of the
\r
1873 // parameters normally set for a stream are fixed by the JACK server
\r
1874 // and can be specified when the JACK server is started. In
\r
1877 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
\r
1879 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
\r
1880 // frames, and number of buffers = 4. Once the server is running, it
\r
1881 // is not possible to override these values. If the values are not
\r
1882 // specified in the command-line, the JACK server uses default values.
\r
1884 // The JACK server does not have to be running when an instance of
\r
1885 // RtApiJack is created, though the function getDeviceCount() will
\r
1886 // report 0 devices found until JACK has been started. When no
\r
1887 // devices are available (i.e., the JACK server is not running), a
\r
1888 // stream cannot be opened.
\r
1890 #include <jack/jack.h>
\r
1891 #include <unistd.h>
\r
1894 // A structure to hold various information related to the Jack API
\r
1895 // implementation.
\r
1896 struct JackHandle {
\r
1897 jack_client_t *client;
\r
1898 jack_port_t **ports[2];
\r
1899 std::string deviceName[2];
\r
1901 pthread_cond_t condition;
\r
1902 int drainCounter; // Tracks callback counts when draining
\r
1903 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
1906 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
1909 static void jackSilentError( const char * ) {};
\r
1911 RtApiJack :: RtApiJack()
\r
1913 // Nothing to do here.
\r
1914 #if !defined(__RTAUDIO_DEBUG__)
\r
1915 // Turn off Jack's internal error reporting.
\r
1916 jack_set_error_function( &jackSilentError );
\r
1920 RtApiJack :: ~RtApiJack()
\r
1922 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
1925 unsigned int RtApiJack :: getDeviceCount( void )
\r
1927 // See if we can become a jack client.
\r
1928 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
1929 jack_status_t *status = NULL;
\r
1930 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
\r
1931 if ( client == 0 ) return 0;
\r
1933 const char **ports;
\r
1934 std::string port, previousPort;
\r
1935 unsigned int nChannels = 0, nDevices = 0;
\r
1936 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1938 // Parse the port names up to the first colon (:).
\r
1939 size_t iColon = 0;
\r
1941 port = (char *) ports[ nChannels ];
\r
1942 iColon = port.find(":");
\r
1943 if ( iColon != std::string::npos ) {
\r
1944 port = port.substr( 0, iColon + 1 );
\r
1945 if ( port != previousPort ) {
\r
1947 previousPort = port;
\r
1950 } while ( ports[++nChannels] );
\r
1954 jack_client_close( client );
\r
1958 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
\r
1960 RtAudio::DeviceInfo info;
\r
1961 info.probed = false;
\r
1963 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
\r
1964 jack_status_t *status = NULL;
\r
1965 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
\r
1966 if ( client == 0 ) {
\r
1967 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
\r
1968 error( RtAudioError::WARNING );
\r
1972 const char **ports;
\r
1973 std::string port, previousPort;
\r
1974 unsigned int nPorts = 0, nDevices = 0;
\r
1975 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
1977 // Parse the port names up to the first colon (:).
\r
1978 size_t iColon = 0;
\r
1980 port = (char *) ports[ nPorts ];
\r
1981 iColon = port.find(":");
\r
1982 if ( iColon != std::string::npos ) {
\r
1983 port = port.substr( 0, iColon );
\r
1984 if ( port != previousPort ) {
\r
1985 if ( nDevices == device ) info.name = port;
\r
1987 previousPort = port;
\r
1990 } while ( ports[++nPorts] );
\r
1994 if ( device >= nDevices ) {
\r
1995 jack_client_close( client );
\r
1996 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
\r
1997 error( RtAudioError::INVALID_USE );
\r
2001 // Get the current jack server sample rate.
\r
2002 info.sampleRates.clear();
\r
2003 info.sampleRates.push_back( jack_get_sample_rate( client ) );
\r
2005 // Count the available ports containing the client name as device
\r
2006 // channels. Jack "input ports" equal RtAudio output channels.
\r
2007 unsigned int nChannels = 0;
\r
2008 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
\r
2010 while ( ports[ nChannels ] ) nChannels++;
\r
2012 info.outputChannels = nChannels;
\r
2015 // Jack "output ports" equal RtAudio input channels.
\r
2017 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
\r
2019 while ( ports[ nChannels ] ) nChannels++;
\r
2021 info.inputChannels = nChannels;
\r
2024 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
\r
2025 jack_client_close(client);
\r
2026 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
\r
2027 error( RtAudioError::WARNING );
\r
2031 // If device opens for both playback and capture, we determine the channels.
\r
2032 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2033 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2035 // Jack always uses 32-bit floats.
\r
2036 info.nativeFormats = RTAUDIO_FLOAT32;
\r
2038 // Jack doesn't provide default devices so we'll use the first available one.
\r
2039 if ( device == 0 && info.outputChannels > 0 )
\r
2040 info.isDefaultOutput = true;
\r
2041 if ( device == 0 && info.inputChannels > 0 )
\r
2042 info.isDefaultInput = true;
\r
2044 jack_client_close(client);
\r
2045 info.probed = true;
\r
2049 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
\r
2051 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2053 RtApiJack *object = (RtApiJack *) info->object;
\r
2054 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
\r
2059 // This function will be called by a spawned thread when the Jack
\r
2060 // server signals that it is shutting down. It is necessary to handle
\r
2061 // it this way because the jackShutdown() function must return before
\r
2062 // the jack_deactivate() function (in closeStream()) will return.
\r
2063 static void *jackCloseStream( void *ptr )
\r
2065 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2066 RtApiJack *object = (RtApiJack *) info->object;
\r
2068 object->closeStream();
\r
2070 pthread_exit( NULL );
\r
2072 static void jackShutdown( void *infoPointer )
\r
2074 CallbackInfo *info = (CallbackInfo *) infoPointer;
\r
2075 RtApiJack *object = (RtApiJack *) info->object;
\r
2077 // Check current stream state. If stopped, then we'll assume this
\r
2078 // was called as a result of a call to RtApiJack::stopStream (the
\r
2079 // deactivation of a client handle causes this function to be called).
\r
2080 // If not, we'll assume the Jack server is shutting down or some
\r
2081 // other problem occurred and we should close the stream.
\r
2082 if ( object->isStreamRunning() == false ) return;
\r
2084 ThreadHandle threadId;
\r
2085 pthread_create( &threadId, NULL, jackCloseStream, info );
\r
2086 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
\r
2089 static int jackXrun( void *infoPointer )
\r
2091 JackHandle *handle = (JackHandle *) infoPointer;
\r
2093 if ( handle->ports[0] ) handle->xrun[0] = true;
\r
2094 if ( handle->ports[1] ) handle->xrun[1] = true;
\r
2099 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2100 unsigned int firstChannel, unsigned int sampleRate,
\r
2101 RtAudioFormat format, unsigned int *bufferSize,
\r
2102 RtAudio::StreamOptions *options )
\r
2104 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2106 // Look for jack server and try to become a client (only do once per stream).
\r
2107 jack_client_t *client = 0;
\r
2108 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
\r
2109 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
\r
2110 jack_status_t *status = NULL;
\r
2111 if ( options && !options->streamName.empty() )
\r
2112 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
\r
2114 client = jack_client_open( "RtApiJack", jackoptions, status );
\r
2115 if ( client == 0 ) {
\r
2116 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
\r
2117 error( RtAudioError::WARNING );
\r
2122 // The handle must have been created on an earlier pass.
\r
2123 client = handle->client;
\r
2126 const char **ports;
\r
2127 std::string port, previousPort, deviceName;
\r
2128 unsigned int nPorts = 0, nDevices = 0;
\r
2129 ports = jack_get_ports( client, NULL, NULL, 0 );
\r
2131 // Parse the port names up to the first colon (:).
\r
2132 size_t iColon = 0;
\r
2134 port = (char *) ports[ nPorts ];
\r
2135 iColon = port.find(":");
\r
2136 if ( iColon != std::string::npos ) {
\r
2137 port = port.substr( 0, iColon );
\r
2138 if ( port != previousPort ) {
\r
2139 if ( nDevices == device ) deviceName = port;
\r
2141 previousPort = port;
\r
2144 } while ( ports[++nPorts] );
\r
2148 if ( device >= nDevices ) {
\r
2149 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
\r
2153 // Count the available ports containing the client name as device
\r
2154 // channels. Jack "input ports" equal RtAudio output channels.
\r
2155 unsigned int nChannels = 0;
\r
2156 unsigned long flag = JackPortIsInput;
\r
2157 if ( mode == INPUT ) flag = JackPortIsOutput;
\r
2158 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2160 while ( ports[ nChannels ] ) nChannels++;
\r
2164 // Compare the jack ports for specified client to the requested number of channels.
\r
2165 if ( nChannels < (channels + firstChannel) ) {
\r
2166 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
\r
2167 errorText_ = errorStream_.str();
\r
2171 // Check the jack server sample rate.
\r
2172 unsigned int jackRate = jack_get_sample_rate( client );
\r
2173 if ( sampleRate != jackRate ) {
\r
2174 jack_client_close( client );
\r
2175 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
\r
2176 errorText_ = errorStream_.str();
\r
2179 stream_.sampleRate = jackRate;
\r
2181 // Get the latency of the JACK port.
\r
2182 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
\r
2183 if ( ports[ firstChannel ] ) {
\r
2184 // Added by Ge Wang
\r
2185 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
\r
2186 // the range (usually the min and max are equal)
\r
2187 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
\r
2188 // get the latency range
\r
2189 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
\r
2190 // be optimistic, use the min!
\r
2191 stream_.latency[mode] = latrange.min;
\r
2192 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
\r
2196 // The jack server always uses 32-bit floating-point data.
\r
2197 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2198 stream_.userFormat = format;
\r
2200 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
2201 else stream_.userInterleaved = true;
\r
2203 // Jack always uses non-interleaved buffers.
\r
2204 stream_.deviceInterleaved[mode] = false;
\r
2206 // Jack always provides host byte-ordered data.
\r
2207 stream_.doByteSwap[mode] = false;
\r
2209 // Get the buffer size. The buffer size and number of buffers
\r
2210 // (periods) is set when the jack server is started.
\r
2211 stream_.bufferSize = (int) jack_get_buffer_size( client );
\r
2212 *bufferSize = stream_.bufferSize;
\r
2214 stream_.nDeviceChannels[mode] = channels;
\r
2215 stream_.nUserChannels[mode] = channels;
\r
2217 // Set flags for buffer conversion.
\r
2218 stream_.doConvertBuffer[mode] = false;
\r
2219 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
2220 stream_.doConvertBuffer[mode] = true;
\r
2221 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
2222 stream_.nUserChannels[mode] > 1 )
\r
2223 stream_.doConvertBuffer[mode] = true;
\r
2225 // Allocate our JackHandle structure for the stream.
\r
2226 if ( handle == 0 ) {
\r
2228 handle = new JackHandle;
\r
2230 catch ( std::bad_alloc& ) {
\r
2231 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
\r
2235 if ( pthread_cond_init(&handle->condition, NULL) ) {
\r
2236 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
\r
2239 stream_.apiHandle = (void *) handle;
\r
2240 handle->client = client;
\r
2242 handle->deviceName[mode] = deviceName;
\r
2244 // Allocate necessary internal buffers.
\r
2245 unsigned long bufferBytes;
\r
2246 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
2247 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
2248 if ( stream_.userBuffer[mode] == NULL ) {
\r
2249 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
\r
2253 if ( stream_.doConvertBuffer[mode] ) {
\r
2255 bool makeBuffer = true;
\r
2256 if ( mode == OUTPUT )
\r
2257 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
2258 else { // mode == INPUT
\r
2259 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
\r
2260 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
2261 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
\r
2262 if ( bufferBytes < bytesOut ) makeBuffer = false;
\r
2266 if ( makeBuffer ) {
\r
2267 bufferBytes *= *bufferSize;
\r
2268 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
2269 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
2270 if ( stream_.deviceBuffer == NULL ) {
\r
2271 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
\r
2277 // Allocate memory for the Jack ports (channels) identifiers.
\r
2278 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
\r
2279 if ( handle->ports[mode] == NULL ) {
\r
2280 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
\r
2284 stream_.device[mode] = device;
\r
2285 stream_.channelOffset[mode] = firstChannel;
\r
2286 stream_.state = STREAM_STOPPED;
\r
2287 stream_.callbackInfo.object = (void *) this;
\r
2289 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
2290 // We had already set up the stream for output.
\r
2291 stream_.mode = DUPLEX;
\r
2293 stream_.mode = mode;
\r
2294 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
\r
2295 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
\r
2296 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
\r
2299 // Register our ports.
\r
2301 if ( mode == OUTPUT ) {
\r
2302 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2303 snprintf( label, 64, "outport %d", i );
\r
2304 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
\r
2305 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
\r
2309 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2310 snprintf( label, 64, "inport %d", i );
\r
2311 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
\r
2312 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
\r
2316 // Setup the buffer conversion information structure. We don't use
\r
2317 // buffers to do channel offsets, so we override that parameter
\r
2319 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
2325 pthread_cond_destroy( &handle->condition );
\r
2326 jack_client_close( handle->client );
\r
2328 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2329 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2332 stream_.apiHandle = 0;
\r
2335 for ( int i=0; i<2; i++ ) {
\r
2336 if ( stream_.userBuffer[i] ) {
\r
2337 free( stream_.userBuffer[i] );
\r
2338 stream_.userBuffer[i] = 0;
\r
2342 if ( stream_.deviceBuffer ) {
\r
2343 free( stream_.deviceBuffer );
\r
2344 stream_.deviceBuffer = 0;
\r
2350 void RtApiJack :: closeStream( void )
\r
2352 if ( stream_.state == STREAM_CLOSED ) {
\r
2353 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
\r
2354 error( RtAudioError::WARNING );
\r
2358 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2361 if ( stream_.state == STREAM_RUNNING )
\r
2362 jack_deactivate( handle->client );
\r
2364 jack_client_close( handle->client );
\r
2368 if ( handle->ports[0] ) free( handle->ports[0] );
\r
2369 if ( handle->ports[1] ) free( handle->ports[1] );
\r
2370 pthread_cond_destroy( &handle->condition );
\r
2372 stream_.apiHandle = 0;
\r
2375 for ( int i=0; i<2; i++ ) {
\r
2376 if ( stream_.userBuffer[i] ) {
\r
2377 free( stream_.userBuffer[i] );
\r
2378 stream_.userBuffer[i] = 0;
\r
2382 if ( stream_.deviceBuffer ) {
\r
2383 free( stream_.deviceBuffer );
\r
2384 stream_.deviceBuffer = 0;
\r
2387 stream_.mode = UNINITIALIZED;
\r
2388 stream_.state = STREAM_CLOSED;
\r
2391 void RtApiJack :: startStream( void )
\r
2394 if ( stream_.state == STREAM_RUNNING ) {
\r
2395 errorText_ = "RtApiJack::startStream(): the stream is already running!";
\r
2396 error( RtAudioError::WARNING );
\r
2400 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2401 int result = jack_activate( handle->client );
\r
2403 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
\r
2407 const char **ports;
\r
2409 // Get the list of available ports.
\r
2410 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2412 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
\r
2413 if ( ports == NULL) {
\r
2414 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
\r
2418 // Now make the port connections. Since RtAudio wasn't designed to
\r
2419 // allow the user to select particular channels of a device, we'll
\r
2420 // just open the first "nChannels" ports with offset.
\r
2421 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2423 if ( ports[ stream_.channelOffset[0] + i ] )
\r
2424 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
\r
2427 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
\r
2434 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2436 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
\r
2437 if ( ports == NULL) {
\r
2438 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
\r
2442 // Now make the port connections. See note above.
\r
2443 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2445 if ( ports[ stream_.channelOffset[1] + i ] )
\r
2446 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
\r
2449 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
\r
2456 handle->drainCounter = 0;
\r
2457 handle->internalDrain = false;
\r
2458 stream_.state = STREAM_RUNNING;
\r
2461 if ( result == 0 ) return;
\r
2462 error( RtAudioError::SYSTEM_ERROR );
\r
2465 void RtApiJack :: stopStream( void )
\r
2468 if ( stream_.state == STREAM_STOPPED ) {
\r
2469 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
\r
2470 error( RtAudioError::WARNING );
\r
2474 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2475 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2477 if ( handle->drainCounter == 0 ) {
\r
2478 handle->drainCounter = 2;
\r
2479 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
\r
2483 jack_deactivate( handle->client );
\r
2484 stream_.state = STREAM_STOPPED;
\r
2487 void RtApiJack :: abortStream( void )
\r
2490 if ( stream_.state == STREAM_STOPPED ) {
\r
2491 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
\r
2492 error( RtAudioError::WARNING );
\r
2496 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2497 handle->drainCounter = 2;
\r
2502 // This function will be called by a spawned thread when the user
\r
2503 // callback function signals that the stream should be stopped or
\r
2504 // aborted. It is necessary to handle it this way because the
\r
2505 // callbackEvent() function must return before the jack_deactivate()
\r
2506 // function will return.
\r
2507 static void *jackStopStream( void *ptr )
\r
2509 CallbackInfo *info = (CallbackInfo *) ptr;
\r
2510 RtApiJack *object = (RtApiJack *) info->object;
\r
2512 object->stopStream();
\r
2513 pthread_exit( NULL );
\r
2516 bool RtApiJack :: callbackEvent( unsigned long nframes )
\r
2518 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
2519 if ( stream_.state == STREAM_CLOSED ) {
\r
2520 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
2521 error( RtAudioError::WARNING );
\r
2524 if ( stream_.bufferSize != nframes ) {
\r
2525 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
\r
2526 error( RtAudioError::WARNING );
\r
2530 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
2531 JackHandle *handle = (JackHandle *) stream_.apiHandle;
\r
2533 // Check if we were draining the stream and signal is finished.
\r
2534 if ( handle->drainCounter > 3 ) {
\r
2535 ThreadHandle threadId;
\r
2537 stream_.state = STREAM_STOPPING;
\r
2538 if ( handle->internalDrain == true )
\r
2539 pthread_create( &threadId, NULL, jackStopStream, info );
\r
2541 pthread_cond_signal( &handle->condition );
\r
2545 // Invoke user callback first, to get fresh output data.
\r
2546 if ( handle->drainCounter == 0 ) {
\r
2547 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
2548 double streamTime = getStreamTime();
\r
2549 RtAudioStreamStatus status = 0;
\r
2550 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
2551 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
2552 handle->xrun[0] = false;
\r
2554 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
2555 status |= RTAUDIO_INPUT_OVERFLOW;
\r
2556 handle->xrun[1] = false;
\r
2558 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
2559 stream_.bufferSize, streamTime, status, info->userData );
\r
2560 if ( cbReturnValue == 2 ) {
\r
2561 stream_.state = STREAM_STOPPING;
\r
2562 handle->drainCounter = 2;
\r
2564 pthread_create( &id, NULL, jackStopStream, info );
\r
2567 else if ( cbReturnValue == 1 ) {
\r
2568 handle->drainCounter = 1;
\r
2569 handle->internalDrain = true;
\r
2573 jack_default_audio_sample_t *jackbuffer;
\r
2574 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
\r
2575 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
2577 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
2579 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2580 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2581 memset( jackbuffer, 0, bufferBytes );
\r
2585 else if ( stream_.doConvertBuffer[0] ) {
\r
2587 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
2589 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
\r
2590 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2591 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
\r
2594 else { // no buffer conversion
\r
2595 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
\r
2596 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
\r
2597 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
\r
2602 // Don't bother draining input
\r
2603 if ( handle->drainCounter ) {
\r
2604 handle->drainCounter++;
\r
2608 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
2610 if ( stream_.doConvertBuffer[1] ) {
\r
2611 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
\r
2612 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2613 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
\r
2615 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
2617 else { // no buffer conversion
\r
2618 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
\r
2619 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
\r
2620 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
\r
2626 RtApi::tickStreamTime();
\r
2629 //******************** End of __UNIX_JACK__ *********************//
\r
2632 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
\r
2634 // The ASIO API is designed around a callback scheme, so this
\r
2635 // implementation is similar to that used for OS-X CoreAudio and Linux
\r
2636 // Jack. The primary constraint with ASIO is that it only allows
\r
2637 // access to a single driver at a time. Thus, it is not possible to
\r
2638 // have more than one simultaneous RtAudio stream.
\r
2640 // This implementation also requires a number of external ASIO files
\r
2641 // and a few global variables. The ASIO callback scheme does not
\r
2642 // allow for the passing of user data, so we must create a global
\r
2643 // pointer to our callbackInfo structure.
\r
2645 // On unix systems, we make use of a pthread condition variable.
\r
2646 // Since there is no equivalent in Windows, I hacked something based
\r
2647 // on information found in
\r
2648 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
\r
2650 #include "asiosys.h"
\r
2652 #include "iasiothiscallresolver.h"
\r
2653 #include "asiodrivers.h"
\r
2656 static AsioDrivers drivers;
\r
2657 static ASIOCallbacks asioCallbacks;
\r
2658 static ASIODriverInfo driverInfo;
\r
2659 static CallbackInfo *asioCallbackInfo;
\r
2660 static bool asioXRun;
\r
2662 struct AsioHandle {
\r
2663 int drainCounter; // Tracks callback counts when draining
\r
2664 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
2665 ASIOBufferInfo *bufferInfos;
\r
2669 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
\r
2672 // Function declarations (definitions at end of section)
\r
2673 static const char* getAsioErrorString( ASIOError result );
\r
2674 static void sampleRateChanged( ASIOSampleRate sRate );
\r
2675 static long asioMessages( long selector, long value, void* message, double* opt );
\r
2677 RtApiAsio :: RtApiAsio()
\r
2679 // ASIO cannot run on a multi-threaded appartment. You can call
\r
2680 // CoInitialize beforehand, but it must be for appartment threading
\r
2681 // (in which case, CoInitilialize will return S_FALSE here).
\r
2682 coInitialized_ = false;
\r
2683 HRESULT hr = CoInitialize( NULL );
\r
2684 if ( FAILED(hr) ) {
\r
2685 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
\r
2686 error( RtAudioError::WARNING );
\r
2688 coInitialized_ = true;
\r
2690 drivers.removeCurrentDriver();
\r
2691 driverInfo.asioVersion = 2;
\r
2693 // See note in DirectSound implementation about GetDesktopWindow().
\r
2694 driverInfo.sysRef = GetForegroundWindow();
\r
2697 RtApiAsio :: ~RtApiAsio()
\r
2699 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
2700 if ( coInitialized_ ) CoUninitialize();
\r
2703 unsigned int RtApiAsio :: getDeviceCount( void )
\r
2705 return (unsigned int) drivers.asioGetNumDev();
\r
2708 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
\r
2710 RtAudio::DeviceInfo info;
\r
2711 info.probed = false;
\r
2714 unsigned int nDevices = getDeviceCount();
\r
2715 if ( nDevices == 0 ) {
\r
2716 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
\r
2717 error( RtAudioError::INVALID_USE );
\r
2721 if ( device >= nDevices ) {
\r
2722 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
\r
2723 error( RtAudioError::INVALID_USE );
\r
2727 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
\r
2728 if ( stream_.state != STREAM_CLOSED ) {
\r
2729 if ( device >= devices_.size() ) {
\r
2730 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
\r
2731 error( RtAudioError::WARNING );
\r
2734 return devices_[ device ];
\r
2737 char driverName[32];
\r
2738 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2739 if ( result != ASE_OK ) {
\r
2740 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2741 errorText_ = errorStream_.str();
\r
2742 error( RtAudioError::WARNING );
\r
2746 info.name = driverName;
\r
2748 if ( !drivers.loadDriver( driverName ) ) {
\r
2749 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
\r
2750 errorText_ = errorStream_.str();
\r
2751 error( RtAudioError::WARNING );
\r
2755 result = ASIOInit( &driverInfo );
\r
2756 if ( result != ASE_OK ) {
\r
2757 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2758 errorText_ = errorStream_.str();
\r
2759 error( RtAudioError::WARNING );
\r
2763 // Determine the device channel information.
\r
2764 long inputChannels, outputChannels;
\r
2765 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2766 if ( result != ASE_OK ) {
\r
2767 drivers.removeCurrentDriver();
\r
2768 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2769 errorText_ = errorStream_.str();
\r
2770 error( RtAudioError::WARNING );
\r
2774 info.outputChannels = outputChannels;
\r
2775 info.inputChannels = inputChannels;
\r
2776 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
2777 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
2779 // Determine the supported sample rates.
\r
2780 info.sampleRates.clear();
\r
2781 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
2782 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
\r
2783 if ( result == ASE_OK )
\r
2784 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
2787 // Determine supported data types ... just check first channel and assume rest are the same.
\r
2788 ASIOChannelInfo channelInfo;
\r
2789 channelInfo.channel = 0;
\r
2790 channelInfo.isInput = true;
\r
2791 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
\r
2792 result = ASIOGetChannelInfo( &channelInfo );
\r
2793 if ( result != ASE_OK ) {
\r
2794 drivers.removeCurrentDriver();
\r
2795 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
\r
2796 errorText_ = errorStream_.str();
\r
2797 error( RtAudioError::WARNING );
\r
2801 info.nativeFormats = 0;
\r
2802 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
\r
2803 info.nativeFormats |= RTAUDIO_SINT16;
\r
2804 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
\r
2805 info.nativeFormats |= RTAUDIO_SINT32;
\r
2806 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
\r
2807 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
2808 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
\r
2809 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
2810 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
\r
2811 info.nativeFormats |= RTAUDIO_SINT24;
\r
2813 if ( info.outputChannels > 0 )
\r
2814 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
\r
2815 if ( info.inputChannels > 0 )
\r
2816 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
\r
2818 info.probed = true;
\r
2819 drivers.removeCurrentDriver();
\r
2823 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
\r
2825 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
\r
2826 object->callbackEvent( index );
\r
2829 void RtApiAsio :: saveDeviceInfo( void )
\r
2833 unsigned int nDevices = getDeviceCount();
\r
2834 devices_.resize( nDevices );
\r
2835 for ( unsigned int i=0; i<nDevices; i++ )
\r
2836 devices_[i] = getDeviceInfo( i );
\r
2839 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
2840 unsigned int firstChannel, unsigned int sampleRate,
\r
2841 RtAudioFormat format, unsigned int *bufferSize,
\r
2842 RtAudio::StreamOptions *options )
\r
2844 // For ASIO, a duplex stream MUST use the same driver.
\r
2845 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] != device ) {
\r
2846 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
\r
2850 char driverName[32];
\r
2851 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
\r
2852 if ( result != ASE_OK ) {
\r
2853 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
\r
2854 errorText_ = errorStream_.str();
\r
2858 // Only load the driver once for duplex stream.
\r
2859 if ( mode != INPUT || stream_.mode != OUTPUT ) {
\r
2860 // The getDeviceInfo() function will not work when a stream is open
\r
2861 // because ASIO does not allow multiple devices to run at the same
\r
2862 // time. Thus, we'll probe the system before opening a stream and
\r
2863 // save the results for use by getDeviceInfo().
\r
2864 this->saveDeviceInfo();
\r
2866 if ( !drivers.loadDriver( driverName ) ) {
\r
2867 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
\r
2868 errorText_ = errorStream_.str();
\r
2872 result = ASIOInit( &driverInfo );
\r
2873 if ( result != ASE_OK ) {
\r
2874 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
\r
2875 errorText_ = errorStream_.str();
\r
2880 // Check the device channel count.
\r
2881 long inputChannels, outputChannels;
\r
2882 result = ASIOGetChannels( &inputChannels, &outputChannels );
\r
2883 if ( result != ASE_OK ) {
\r
2884 drivers.removeCurrentDriver();
\r
2885 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
\r
2886 errorText_ = errorStream_.str();
\r
2890 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
\r
2891 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
\r
2892 drivers.removeCurrentDriver();
\r
2893 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
\r
2894 errorText_ = errorStream_.str();
\r
2897 stream_.nDeviceChannels[mode] = channels;
\r
2898 stream_.nUserChannels[mode] = channels;
\r
2899 stream_.channelOffset[mode] = firstChannel;
\r
2901 // Verify the sample rate is supported.
\r
2902 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
\r
2903 if ( result != ASE_OK ) {
\r
2904 drivers.removeCurrentDriver();
\r
2905 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
\r
2906 errorText_ = errorStream_.str();
\r
2910 // Get the current sample rate
\r
2911 ASIOSampleRate currentRate;
\r
2912 result = ASIOGetSampleRate( ¤tRate );
\r
2913 if ( result != ASE_OK ) {
\r
2914 drivers.removeCurrentDriver();
\r
2915 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
\r
2916 errorText_ = errorStream_.str();
\r
2920 // Set the sample rate only if necessary
\r
2921 if ( currentRate != sampleRate ) {
\r
2922 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
\r
2923 if ( result != ASE_OK ) {
\r
2924 drivers.removeCurrentDriver();
\r
2925 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
\r
2926 errorText_ = errorStream_.str();
\r
2931 // Determine the driver data type.
\r
2932 ASIOChannelInfo channelInfo;
\r
2933 channelInfo.channel = 0;
\r
2934 if ( mode == OUTPUT ) channelInfo.isInput = false;
\r
2935 else channelInfo.isInput = true;
\r
2936 result = ASIOGetChannelInfo( &channelInfo );
\r
2937 if ( result != ASE_OK ) {
\r
2938 drivers.removeCurrentDriver();
\r
2939 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
\r
2940 errorText_ = errorStream_.str();
\r
2944 // Assuming WINDOWS host is always little-endian.
\r
2945 stream_.doByteSwap[mode] = false;
\r
2946 stream_.userFormat = format;
\r
2947 stream_.deviceFormat[mode] = 0;
\r
2948 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
\r
2949 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
2950 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
\r
2952 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
\r
2953 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
2954 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
\r
2956 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
\r
2957 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
2958 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
\r
2960 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
\r
2961 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
2962 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
\r
2964 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
\r
2965 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
2966 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
\r
2969 if ( stream_.deviceFormat[mode] == 0 ) {
\r
2970 drivers.removeCurrentDriver();
\r
2971 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
\r
2972 errorText_ = errorStream_.str();
\r
2976 // Set the buffer size. For a duplex stream, this will end up
\r
2977 // setting the buffer size based on the input constraints, which
\r
2979 long minSize, maxSize, preferSize, granularity;
\r
2980 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
\r
2981 if ( result != ASE_OK ) {
\r
2982 drivers.removeCurrentDriver();
\r
2983 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
\r
2984 errorText_ = errorStream_.str();
\r
2988 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
2989 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
2990 else if ( granularity == -1 ) {
\r
2991 // Make sure bufferSize is a power of two.
\r
2992 int log2_of_min_size = 0;
\r
2993 int log2_of_max_size = 0;
\r
2995 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
\r
2996 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
\r
2997 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
\r
3000 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
\r
3001 int min_delta_num = log2_of_min_size;
\r
3003 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
\r
3004 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
\r
3005 if (current_delta < min_delta) {
\r
3006 min_delta = current_delta;
\r
3007 min_delta_num = i;
\r
3011 *bufferSize = ( (unsigned int)1 << min_delta_num );
\r
3012 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
\r
3013 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
\r
3015 else if ( granularity != 0 ) {
\r
3016 // Set to an even multiple of granularity, rounding up.
\r
3017 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
\r
3020 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.bufferSize != *bufferSize ) {
\r
3021 drivers.removeCurrentDriver();
\r
3022 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
\r
3026 stream_.bufferSize = *bufferSize;
\r
3027 stream_.nBuffers = 2;
\r
3029 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
3030 else stream_.userInterleaved = true;
\r
3032 // ASIO always uses non-interleaved buffers.
\r
3033 stream_.deviceInterleaved[mode] = false;
\r
3035 // Allocate, if necessary, our AsioHandle structure for the stream.
\r
3036 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3037 if ( handle == 0 ) {
\r
3039 handle = new AsioHandle;
\r
3041 catch ( std::bad_alloc& ) {
\r
3042 //if ( handle == NULL ) {
\r
3043 drivers.removeCurrentDriver();
\r
3044 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
\r
3047 handle->bufferInfos = 0;
\r
3049 // Create a manual-reset event.
\r
3050 handle->condition = CreateEvent( NULL, // no security
\r
3051 TRUE, // manual-reset
\r
3052 FALSE, // non-signaled initially
\r
3053 NULL ); // unnamed
\r
3054 stream_.apiHandle = (void *) handle;
\r
3057 // Create the ASIO internal buffers. Since RtAudio sets up input
\r
3058 // and output separately, we'll have to dispose of previously
\r
3059 // created output buffers for a duplex stream.
\r
3060 long inputLatency, outputLatency;
\r
3061 if ( mode == INPUT && stream_.mode == OUTPUT ) {
\r
3062 ASIODisposeBuffers();
\r
3063 if ( handle->bufferInfos ) free( handle->bufferInfos );
\r
3066 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
\r
3067 bool buffersAllocated = false;
\r
3068 unsigned int i, nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3069 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
\r
3070 if ( handle->bufferInfos == NULL ) {
\r
3071 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
\r
3072 errorText_ = errorStream_.str();
\r
3076 ASIOBufferInfo *infos;
\r
3077 infos = handle->bufferInfos;
\r
3078 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
\r
3079 infos->isInput = ASIOFalse;
\r
3080 infos->channelNum = i + stream_.channelOffset[0];
\r
3081 infos->buffers[0] = infos->buffers[1] = 0;
\r
3083 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
\r
3084 infos->isInput = ASIOTrue;
\r
3085 infos->channelNum = i + stream_.channelOffset[1];
\r
3086 infos->buffers[0] = infos->buffers[1] = 0;
\r
3089 // prepare for callbacks
\r
3090 stream_.sampleRate = sampleRate;
\r
3091 stream_.device[mode] = device;
\r
3092 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
3093 // We had already set up an output stream.
\r
3094 stream_.mode = DUPLEX;
\r
3096 stream_.mode = mode;
\r
3098 // store this class instance before registering callbacks, that are going to use it
\r
3099 asioCallbackInfo = &stream_.callbackInfo;
\r
3100 stream_.callbackInfo.object = (void *) this;
\r
3102 // Set up the ASIO callback structure and create the ASIO data buffers.
\r
3103 asioCallbacks.bufferSwitch = &bufferSwitch;
\r
3104 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
\r
3105 asioCallbacks.asioMessage = &asioMessages;
\r
3106 asioCallbacks.bufferSwitchTimeInfo = NULL;
\r
3107 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3108 if ( result != ASE_OK ) {
\r
3109 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
\r
3110 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
\r
3111 // in that case, let's be naïve and try that instead
\r
3112 *bufferSize = preferSize;
\r
3113 stream_.bufferSize = *bufferSize;
\r
3114 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
\r
3117 if ( result != ASE_OK ) {
\r
3118 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
\r
3119 errorText_ = errorStream_.str();
\r
3122 buffersAllocated = true;
\r
3123 stream_.state = STREAM_STOPPED;
\r
3125 // Set flags for buffer conversion.
\r
3126 stream_.doConvertBuffer[mode] = false;
\r
3127 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
3128 stream_.doConvertBuffer[mode] = true;
\r
3129 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
3130 stream_.nUserChannels[mode] > 1 )
\r
3131 stream_.doConvertBuffer[mode] = true;
\r
3133 // Allocate necessary internal buffers
\r
3134 unsigned long bufferBytes;
\r
3135 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
3136 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
3137 if ( stream_.userBuffer[mode] == NULL ) {
\r
3138 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
\r
3142 if ( stream_.doConvertBuffer[mode] ) {
\r
3144 bool makeBuffer = true;
\r
3145 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
3146 if ( stream_.mode == DUPLEX && stream_.deviceBuffer ) {
\r
3147 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
3148 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
3151 if ( makeBuffer ) {
\r
3152 bufferBytes *= *bufferSize;
\r
3153 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
3154 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
3155 if ( stream_.deviceBuffer == NULL ) {
\r
3156 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
\r
3162 // Determine device latencies
\r
3163 result = ASIOGetLatencies( &inputLatency, &outputLatency );
\r
3164 if ( result != ASE_OK ) {
\r
3165 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
\r
3166 errorText_ = errorStream_.str();
\r
3167 error( RtAudioError::WARNING); // warn but don't fail
\r
3170 stream_.latency[0] = outputLatency;
\r
3171 stream_.latency[1] = inputLatency;
\r
3174 // Setup the buffer conversion information structure. We don't use
\r
3175 // buffers to do channel offsets, so we override that parameter
\r
3177 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
\r
3182 if ( buffersAllocated )
\r
3183 ASIODisposeBuffers();
\r
3184 drivers.removeCurrentDriver();
\r
3187 CloseHandle( handle->condition );
\r
3188 if ( handle->bufferInfos )
\r
3189 free( handle->bufferInfos );
\r
3191 stream_.apiHandle = 0;
\r
3194 for ( int i=0; i<2; i++ ) {
\r
3195 if ( stream_.userBuffer[i] ) {
\r
3196 free( stream_.userBuffer[i] );
\r
3197 stream_.userBuffer[i] = 0;
\r
3201 if ( stream_.deviceBuffer ) {
\r
3202 free( stream_.deviceBuffer );
\r
3203 stream_.deviceBuffer = 0;
\r
3209 void RtApiAsio :: closeStream()
\r
3211 if ( stream_.state == STREAM_CLOSED ) {
\r
3212 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
\r
3213 error( RtAudioError::WARNING );
\r
3217 if ( stream_.state == STREAM_RUNNING ) {
\r
3218 stream_.state = STREAM_STOPPED;
\r
3221 ASIODisposeBuffers();
\r
3222 drivers.removeCurrentDriver();
\r
3224 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3226 CloseHandle( handle->condition );
\r
3227 if ( handle->bufferInfos )
\r
3228 free( handle->bufferInfos );
\r
3230 stream_.apiHandle = 0;
\r
3233 for ( int i=0; i<2; i++ ) {
\r
3234 if ( stream_.userBuffer[i] ) {
\r
3235 free( stream_.userBuffer[i] );
\r
3236 stream_.userBuffer[i] = 0;
\r
3240 if ( stream_.deviceBuffer ) {
\r
3241 free( stream_.deviceBuffer );
\r
3242 stream_.deviceBuffer = 0;
\r
3245 stream_.mode = UNINITIALIZED;
\r
3246 stream_.state = STREAM_CLOSED;
\r
3249 bool stopThreadCalled = false;
\r
3251 void RtApiAsio :: startStream()
\r
3254 if ( stream_.state == STREAM_RUNNING ) {
\r
3255 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
\r
3256 error( RtAudioError::WARNING );
\r
3260 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3261 ASIOError result = ASIOStart();
\r
3262 if ( result != ASE_OK ) {
\r
3263 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
\r
3264 errorText_ = errorStream_.str();
\r
3268 handle->drainCounter = 0;
\r
3269 handle->internalDrain = false;
\r
3270 ResetEvent( handle->condition );
\r
3271 stream_.state = STREAM_RUNNING;
\r
3275 stopThreadCalled = false;
\r
3277 if ( result == ASE_OK ) return;
\r
3278 error( RtAudioError::SYSTEM_ERROR );
\r
3281 void RtApiAsio :: stopStream()
\r
3284 if ( stream_.state == STREAM_STOPPED ) {
\r
3285 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
\r
3286 error( RtAudioError::WARNING );
\r
3290 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3291 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3292 if ( handle->drainCounter == 0 ) {
\r
3293 handle->drainCounter = 2;
\r
3294 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
3298 stream_.state = STREAM_STOPPED;
\r
3300 ASIOError result = ASIOStop();
\r
3301 if ( result != ASE_OK ) {
\r
3302 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
\r
3303 errorText_ = errorStream_.str();
\r
3306 if ( result == ASE_OK ) return;
\r
3307 error( RtAudioError::SYSTEM_ERROR );
\r
3310 void RtApiAsio :: abortStream()
\r
3313 if ( stream_.state == STREAM_STOPPED ) {
\r
3314 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
\r
3315 error( RtAudioError::WARNING );
\r
3319 // The following lines were commented-out because some behavior was
\r
3320 // noted where the device buffers need to be zeroed to avoid
\r
3321 // continuing sound, even when the device buffers are completely
\r
3322 // disposed. So now, calling abort is the same as calling stop.
\r
3323 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3324 // handle->drainCounter = 2;
\r
3328 // This function will be called by a spawned thread when the user
\r
3329 // callback function signals that the stream should be stopped or
\r
3330 // aborted. It is necessary to handle it this way because the
\r
3331 // callbackEvent() function must return before the ASIOStop()
\r
3332 // function will return.
\r
3333 static unsigned __stdcall asioStopStream( void *ptr )
\r
3335 CallbackInfo *info = (CallbackInfo *) ptr;
\r
3336 RtApiAsio *object = (RtApiAsio *) info->object;
\r
3338 object->stopStream();
\r
3339 _endthreadex( 0 );
\r
3343 bool RtApiAsio :: callbackEvent( long bufferIndex )
\r
3345 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
\r
3346 if ( stream_.state == STREAM_CLOSED ) {
\r
3347 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
3348 error( RtAudioError::WARNING );
\r
3352 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
3353 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
\r
3355 // Check if we were draining the stream and signal if finished.
\r
3356 if ( handle->drainCounter > 3 ) {
\r
3358 stream_.state = STREAM_STOPPING;
\r
3359 if ( handle->internalDrain == false )
\r
3360 SetEvent( handle->condition );
\r
3361 else { // spawn a thread to stop the stream
\r
3362 unsigned threadId;
\r
3363 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3364 &stream_.callbackInfo, 0, &threadId );
\r
3369 // Invoke user callback to get fresh output data UNLESS we are
\r
3370 // draining stream.
\r
3371 if ( handle->drainCounter == 0 ) {
\r
3372 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
3373 double streamTime = getStreamTime();
\r
3374 RtAudioStreamStatus status = 0;
\r
3375 if ( stream_.mode != INPUT && asioXRun == true ) {
\r
3376 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
3379 if ( stream_.mode != OUTPUT && asioXRun == true ) {
\r
3380 status |= RTAUDIO_INPUT_OVERFLOW;
\r
3383 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
3384 stream_.bufferSize, streamTime, status, info->userData );
\r
3385 if ( cbReturnValue == 2 ) {
\r
3386 stream_.state = STREAM_STOPPING;
\r
3387 handle->drainCounter = 2;
\r
3388 unsigned threadId;
\r
3389 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
\r
3390 &stream_.callbackInfo, 0, &threadId );
\r
3393 else if ( cbReturnValue == 1 ) {
\r
3394 handle->drainCounter = 1;
\r
3395 handle->internalDrain = true;
\r
3399 unsigned int nChannels, bufferBytes, i, j;
\r
3400 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
\r
3401 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
3403 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
\r
3405 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
3407 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3408 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3409 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
\r
3413 else if ( stream_.doConvertBuffer[0] ) {
\r
3415 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
3416 if ( stream_.doByteSwap[0] )
\r
3417 byteSwapBuffer( stream_.deviceBuffer,
\r
3418 stream_.bufferSize * stream_.nDeviceChannels[0],
\r
3419 stream_.deviceFormat[0] );
\r
3421 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3422 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3423 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3424 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
\r
3430 if ( stream_.doByteSwap[0] )
\r
3431 byteSwapBuffer( stream_.userBuffer[0],
\r
3432 stream_.bufferSize * stream_.nUserChannels[0],
\r
3433 stream_.userFormat );
\r
3435 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3436 if ( handle->bufferInfos[i].isInput != ASIOTrue )
\r
3437 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
\r
3438 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
\r
3444 // Don't bother draining input
\r
3445 if ( handle->drainCounter ) {
\r
3446 handle->drainCounter++;
\r
3450 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
3452 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
\r
3454 if (stream_.doConvertBuffer[1]) {
\r
3456 // Always interleave ASIO input data.
\r
3457 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3458 if ( handle->bufferInfos[i].isInput == ASIOTrue )
\r
3459 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
\r
3460 handle->bufferInfos[i].buffers[bufferIndex],
\r
3464 if ( stream_.doByteSwap[1] )
\r
3465 byteSwapBuffer( stream_.deviceBuffer,
\r
3466 stream_.bufferSize * stream_.nDeviceChannels[1],
\r
3467 stream_.deviceFormat[1] );
\r
3468 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
3472 for ( i=0, j=0; i<nChannels; i++ ) {
\r
3473 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
\r
3474 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
\r
3475 handle->bufferInfos[i].buffers[bufferIndex],
\r
3480 if ( stream_.doByteSwap[1] )
\r
3481 byteSwapBuffer( stream_.userBuffer[1],
\r
3482 stream_.bufferSize * stream_.nUserChannels[1],
\r
3483 stream_.userFormat );
\r
3488 // The following call was suggested by Malte Clasen. While the API
\r
3489 // documentation indicates it should not be required, some device
\r
3490 // drivers apparently do not function correctly without it.
\r
3491 ASIOOutputReady();
\r
3493 RtApi::tickStreamTime();
\r
3497 static void sampleRateChanged( ASIOSampleRate sRate )
\r
3499 // The ASIO documentation says that this usually only happens during
\r
3500 // external sync. Audio processing is not stopped by the driver,
\r
3501 // actual sample rate might not have even changed, maybe only the
\r
3502 // sample rate status of an AES/EBU or S/PDIF digital input at the
\r
3505 RtApi *object = (RtApi *) asioCallbackInfo->object;
\r
3507 object->stopStream();
\r
3509 catch ( RtAudioError &exception ) {
\r
3510 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
\r
3514 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
\r
3517 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
\r
3521 switch( selector ) {
\r
3522 case kAsioSelectorSupported:
\r
3523 if ( value == kAsioResetRequest
\r
3524 || value == kAsioEngineVersion
\r
3525 || value == kAsioResyncRequest
\r
3526 || value == kAsioLatenciesChanged
\r
3527 // The following three were added for ASIO 2.0, you don't
\r
3528 // necessarily have to support them.
\r
3529 || value == kAsioSupportsTimeInfo
\r
3530 || value == kAsioSupportsTimeCode
\r
3531 || value == kAsioSupportsInputMonitor)
\r
3534 case kAsioResetRequest:
\r
3535 // Defer the task and perform the reset of the driver during the
\r
3536 // next "safe" situation. You cannot reset the driver right now,
\r
3537 // as this code is called from the driver. Reset the driver is
\r
3538 // done by completely destruct is. I.e. ASIOStop(),
\r
3539 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
\r
3541 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
\r
3544 case kAsioResyncRequest:
\r
3545 // This informs the application that the driver encountered some
\r
3546 // non-fatal data loss. It is used for synchronization purposes
\r
3547 // of different media. Added mainly to work around the Win16Mutex
\r
3548 // problems in Windows 95/98 with the Windows Multimedia system,
\r
3549 // which could lose data because the Mutex was held too long by
\r
3550 // another thread. However a driver can issue it in other
\r
3551 // situations, too.
\r
3552 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
\r
3556 case kAsioLatenciesChanged:
\r
3557 // This will inform the host application that the drivers were
\r
3558 // latencies changed. Beware, it this does not mean that the
\r
3559 // buffer sizes have changed! You might need to update internal
\r
3561 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
\r
3564 case kAsioEngineVersion:
\r
3565 // Return the supported ASIO version of the host application. If
\r
3566 // a host application does not implement this selector, ASIO 1.0
\r
3567 // is assumed by the driver.
\r
3570 case kAsioSupportsTimeInfo:
\r
3571 // Informs the driver whether the
\r
3572 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
\r
3573 // For compatibility with ASIO 1.0 drivers the host application
\r
3574 // should always support the "old" bufferSwitch method, too.
\r
3577 case kAsioSupportsTimeCode:
\r
3578 // Informs the driver whether application is interested in time
\r
3579 // code info. If an application does not need to know about time
\r
3580 // code, the driver has less work to do.
\r
3587 static const char* getAsioErrorString( ASIOError result )
\r
3592 const char*message;
\r
3595 static const Messages m[] =
\r
3597 { ASE_NotPresent, "Hardware input or output is not present or available." },
\r
3598 { ASE_HWMalfunction, "Hardware is malfunctioning." },
\r
3599 { ASE_InvalidParameter, "Invalid input parameter." },
\r
3600 { ASE_InvalidMode, "Invalid mode." },
\r
3601 { ASE_SPNotAdvancing, "Sample position not advancing." },
\r
3602 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
\r
3603 { ASE_NoMemory, "Not enough memory to complete the request." }
\r
3606 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
\r
3607 if ( m[i].value == result ) return m[i].message;
\r
3609 return "Unknown error.";
\r
3612 //******************** End of __WINDOWS_ASIO__ *********************//
\r
3616 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
\r
3618 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
\r
3619 // - Introduces support for the Windows WASAPI API
\r
3620 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
\r
3621 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
\r
3622 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
\r
3627 #include <audioclient.h>
\r
3629 #include <mmdeviceapi.h>
\r
3630 #include <functiondiscoverykeys_devpkey.h>
\r
3632 //=============================================================================
\r
3634 #define SAFE_RELEASE( objectPtr )\
\r
3637 objectPtr->Release();\
\r
3638 objectPtr = NULL;\
\r
3641 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
\r
3643 //-----------------------------------------------------------------------------
\r
3645 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
\r
3646 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
\r
3647 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
\r
3648 // provide intermediate storage for read / write synchronization.
\r
3649 class WasapiBuffer
\r
3653 : buffer_( NULL ),
\r
3662 // sets the length of the internal ring buffer
\r
3663 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
\r
3666 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
\r
3668 bufferSize_ = bufferSize;
\r
3673 // attempt to push a buffer into the ring buffer at the current "in" index
\r
3674 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3676 if ( !buffer || // incoming buffer is NULL
\r
3677 bufferSize == 0 || // incoming buffer has no data
\r
3678 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3683 unsigned int relOutIndex = outIndex_;
\r
3684 unsigned int inIndexEnd = inIndex_ + bufferSize;
\r
3685 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
\r
3686 relOutIndex += bufferSize_;
\r
3689 // "in" index can end on the "out" index but cannot begin at it
\r
3690 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
\r
3691 return false; // not enough space between "in" index and "out" index
\r
3694 // copy buffer from external to internal
\r
3695 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
\r
3696 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3697 int fromInSize = bufferSize - fromZeroSize;
\r
3701 case RTAUDIO_SINT8:
\r
3702 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
\r
3703 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
\r
3705 case RTAUDIO_SINT16:
\r
3706 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
\r
3707 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
\r
3709 case RTAUDIO_SINT24:
\r
3710 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
\r
3711 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
\r
3713 case RTAUDIO_SINT32:
\r
3714 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
\r
3715 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
\r
3717 case RTAUDIO_FLOAT32:
\r
3718 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
\r
3719 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
\r
3721 case RTAUDIO_FLOAT64:
\r
3722 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
\r
3723 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
\r
3727 // update "in" index
\r
3728 inIndex_ += bufferSize;
\r
3729 inIndex_ %= bufferSize_;
\r
3734 // attempt to pull a buffer from the ring buffer from the current "out" index
\r
3735 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
\r
3737 if ( !buffer || // incoming buffer is NULL
\r
3738 bufferSize == 0 || // incoming buffer has no data
\r
3739 bufferSize > bufferSize_ ) // incoming buffer too large
\r
3744 unsigned int relInIndex = inIndex_;
\r
3745 unsigned int outIndexEnd = outIndex_ + bufferSize;
\r
3746 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
\r
3747 relInIndex += bufferSize_;
\r
3750 // "out" index can begin at and end on the "in" index
\r
3751 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
\r
3752 return false; // not enough space between "out" index and "in" index
\r
3755 // copy buffer from internal to external
\r
3756 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
\r
3757 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
\r
3758 int fromOutSize = bufferSize - fromZeroSize;
\r
3762 case RTAUDIO_SINT8:
\r
3763 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
\r
3764 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
\r
3766 case RTAUDIO_SINT16:
\r
3767 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
\r
3768 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
\r
3770 case RTAUDIO_SINT24:
\r
3771 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
\r
3772 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
\r
3774 case RTAUDIO_SINT32:
\r
3775 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
\r
3776 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
\r
3778 case RTAUDIO_FLOAT32:
\r
3779 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
\r
3780 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
\r
3782 case RTAUDIO_FLOAT64:
\r
3783 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
\r
3784 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
\r
3788 // update "out" index
\r
3789 outIndex_ += bufferSize;
\r
3790 outIndex_ %= bufferSize_;
\r
3797 unsigned int bufferSize_;
\r
3798 unsigned int inIndex_;
\r
3799 unsigned int outIndex_;
\r
3802 //-----------------------------------------------------------------------------
\r
3804 // In order to satisfy WASAPI's buffer requirements, we need a means of converting sample rate
\r
3805 // between HW and the user. The convertBufferWasapi function is used to perform this conversion
\r
3806 // between HwIn->UserIn and UserOut->HwOut during the stream callback loop.
\r
3807 // This sample rate converter favors speed over quality, and works best with conversions between
\r
3808 // one rate and its multiple.
\r
3809 void convertBufferWasapi( char* outBuffer,
\r
3810 const char* inBuffer,
\r
3811 const unsigned int& channelCount,
\r
3812 const unsigned int& inSampleRate,
\r
3813 const unsigned int& outSampleRate,
\r
3814 const unsigned int& inSampleCount,
\r
3815 unsigned int& outSampleCount,
\r
3816 const RtAudioFormat& format )
\r
3818 // calculate the new outSampleCount and relative sampleStep
\r
3819 float sampleRatio = ( float ) outSampleRate / inSampleRate;
\r
3820 float sampleStep = 1.0f / sampleRatio;
\r
3821 float inSampleFraction = 0.0f;
\r
3823 outSampleCount = ( unsigned int ) roundf( inSampleCount * sampleRatio );
\r
3825 // frame-by-frame, copy each relative input sample into it's corresponding output sample
\r
3826 for ( unsigned int outSample = 0; outSample < outSampleCount; outSample++ )
\r
3828 unsigned int inSample = ( unsigned int ) inSampleFraction;
\r
3832 case RTAUDIO_SINT8:
\r
3833 memcpy( &( ( char* ) outBuffer )[ outSample * channelCount ], &( ( char* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( char ) );
\r
3835 case RTAUDIO_SINT16:
\r
3836 memcpy( &( ( short* ) outBuffer )[ outSample * channelCount ], &( ( short* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( short ) );
\r
3838 case RTAUDIO_SINT24:
\r
3839 memcpy( &( ( S24* ) outBuffer )[ outSample * channelCount ], &( ( S24* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( S24 ) );
\r
3841 case RTAUDIO_SINT32:
\r
3842 memcpy( &( ( int* ) outBuffer )[ outSample * channelCount ], &( ( int* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( int ) );
\r
3844 case RTAUDIO_FLOAT32:
\r
3845 memcpy( &( ( float* ) outBuffer )[ outSample * channelCount ], &( ( float* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( float ) );
\r
3847 case RTAUDIO_FLOAT64:
\r
3848 memcpy( &( ( double* ) outBuffer )[ outSample * channelCount ], &( ( double* ) inBuffer )[ inSample * channelCount ], channelCount * sizeof( double ) );
\r
3852 // jump to next in sample
\r
3853 inSampleFraction += sampleStep;
\r
3857 //-----------------------------------------------------------------------------
\r
3859 // A structure to hold various information related to the WASAPI implementation.
\r
3860 struct WasapiHandle
\r
3862 IAudioClient* captureAudioClient;
\r
3863 IAudioClient* renderAudioClient;
\r
3864 IAudioCaptureClient* captureClient;
\r
3865 IAudioRenderClient* renderClient;
\r
3866 HANDLE captureEvent;
\r
3867 HANDLE renderEvent;
\r
3870 : captureAudioClient( NULL ),
\r
3871 renderAudioClient( NULL ),
\r
3872 captureClient( NULL ),
\r
3873 renderClient( NULL ),
\r
3874 captureEvent( NULL ),
\r
3875 renderEvent( NULL ) {}
\r
3878 //=============================================================================
\r
3880 RtApiWasapi::RtApiWasapi()
\r
3881 : coInitialized_( false ), deviceEnumerator_( NULL )
\r
3883 // WASAPI can run either apartment or multi-threaded
\r
3884 HRESULT hr = CoInitialize( NULL );
\r
3885 if ( !FAILED( hr ) )
\r
3886 coInitialized_ = true;
\r
3888 // Instantiate device enumerator
\r
3889 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
\r
3890 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
\r
3891 ( void** ) &deviceEnumerator_ );
\r
3893 if ( FAILED( hr ) ) {
\r
3894 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
\r
3895 error( RtAudioError::DRIVER_ERROR );
\r
3899 //-----------------------------------------------------------------------------
\r
3901 RtApiWasapi::~RtApiWasapi()
\r
3903 if ( stream_.state != STREAM_CLOSED )
\r
3906 SAFE_RELEASE( deviceEnumerator_ );
\r
3908 // If this object previously called CoInitialize()
\r
3909 if ( coInitialized_ )
\r
3913 //=============================================================================
\r
3915 unsigned int RtApiWasapi::getDeviceCount( void )
\r
3917 unsigned int captureDeviceCount = 0;
\r
3918 unsigned int renderDeviceCount = 0;
\r
3920 IMMDeviceCollection* captureDevices = NULL;
\r
3921 IMMDeviceCollection* renderDevices = NULL;
\r
3923 // Count capture devices
\r
3924 errorText_.clear();
\r
3925 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3926 if ( FAILED( hr ) ) {
\r
3927 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
\r
3931 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3932 if ( FAILED( hr ) ) {
\r
3933 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
\r
3937 // Count render devices
\r
3938 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
3939 if ( FAILED( hr ) ) {
\r
3940 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
\r
3944 hr = renderDevices->GetCount( &renderDeviceCount );
\r
3945 if ( FAILED( hr ) ) {
\r
3946 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
\r
3951 // release all references
\r
3952 SAFE_RELEASE( captureDevices );
\r
3953 SAFE_RELEASE( renderDevices );
\r
3955 if ( errorText_.empty() )
\r
3956 return captureDeviceCount + renderDeviceCount;
\r
3958 error( RtAudioError::DRIVER_ERROR );
\r
3962 //-----------------------------------------------------------------------------
\r
3964 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
\r
3966 RtAudio::DeviceInfo info;
\r
3967 unsigned int captureDeviceCount = 0;
\r
3968 unsigned int renderDeviceCount = 0;
\r
3969 std::string defaultDeviceName;
\r
3970 bool isCaptureDevice = false;
\r
3972 PROPVARIANT deviceNameProp;
\r
3973 PROPVARIANT defaultDeviceNameProp;
\r
3975 IMMDeviceCollection* captureDevices = NULL;
\r
3976 IMMDeviceCollection* renderDevices = NULL;
\r
3977 IMMDevice* devicePtr = NULL;
\r
3978 IMMDevice* defaultDevicePtr = NULL;
\r
3979 IAudioClient* audioClient = NULL;
\r
3980 IPropertyStore* devicePropStore = NULL;
\r
3981 IPropertyStore* defaultDevicePropStore = NULL;
\r
3983 WAVEFORMATEX* deviceFormat = NULL;
\r
3984 WAVEFORMATEX* closestMatchFormat = NULL;
\r
3987 info.probed = false;
\r
3989 // Count capture devices
\r
3990 errorText_.clear();
\r
3991 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
3992 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
3993 if ( FAILED( hr ) ) {
\r
3994 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
\r
3998 hr = captureDevices->GetCount( &captureDeviceCount );
\r
3999 if ( FAILED( hr ) ) {
\r
4000 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
\r
4004 // Count render devices
\r
4005 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4006 if ( FAILED( hr ) ) {
\r
4007 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
\r
4011 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4012 if ( FAILED( hr ) ) {
\r
4013 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
\r
4017 // validate device index
\r
4018 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4019 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
\r
4020 errorType = RtAudioError::INVALID_USE;
\r
4024 // determine whether index falls within capture or render devices
\r
4025 if ( device >= renderDeviceCount ) {
\r
4026 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4027 if ( FAILED( hr ) ) {
\r
4028 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
\r
4031 isCaptureDevice = true;
\r
4034 hr = renderDevices->Item( device, &devicePtr );
\r
4035 if ( FAILED( hr ) ) {
\r
4036 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
\r
4039 isCaptureDevice = false;
\r
4042 // get default device name
\r
4043 if ( isCaptureDevice ) {
\r
4044 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
\r
4045 if ( FAILED( hr ) ) {
\r
4046 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
\r
4051 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
\r
4052 if ( FAILED( hr ) ) {
\r
4053 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
\r
4058 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
\r
4059 if ( FAILED( hr ) ) {
\r
4060 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
\r
4063 PropVariantInit( &defaultDeviceNameProp );
\r
4065 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
\r
4066 if ( FAILED( hr ) ) {
\r
4067 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
\r
4071 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
\r
4074 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
\r
4075 if ( FAILED( hr ) ) {
\r
4076 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
\r
4080 PropVariantInit( &deviceNameProp );
\r
4082 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
\r
4083 if ( FAILED( hr ) ) {
\r
4084 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
\r
4088 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
\r
4091 if ( isCaptureDevice ) {
\r
4092 info.isDefaultInput = info.name == defaultDeviceName;
\r
4093 info.isDefaultOutput = false;
\r
4096 info.isDefaultInput = false;
\r
4097 info.isDefaultOutput = info.name == defaultDeviceName;
\r
4101 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
\r
4102 if ( FAILED( hr ) ) {
\r
4103 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
\r
4107 hr = audioClient->GetMixFormat( &deviceFormat );
\r
4108 if ( FAILED( hr ) ) {
\r
4109 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
\r
4113 if ( isCaptureDevice ) {
\r
4114 info.inputChannels = deviceFormat->nChannels;
\r
4115 info.outputChannels = 0;
\r
4116 info.duplexChannels = 0;
\r
4119 info.inputChannels = 0;
\r
4120 info.outputChannels = deviceFormat->nChannels;
\r
4121 info.duplexChannels = 0;
\r
4125 info.sampleRates.clear();
\r
4127 // allow support for all sample rates as we have a built-in sample rate converter
\r
4128 for ( unsigned int i = 0; i < MAX_SAMPLE_RATES; i++ ) {
\r
4129 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
4133 info.nativeFormats = 0;
\r
4135 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
\r
4136 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4137 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
\r
4139 if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4140 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
4142 else if ( deviceFormat->wBitsPerSample == 64 ) {
\r
4143 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
4146 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
\r
4147 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
\r
4148 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
\r
4150 if ( deviceFormat->wBitsPerSample == 8 ) {
\r
4151 info.nativeFormats |= RTAUDIO_SINT8;
\r
4153 else if ( deviceFormat->wBitsPerSample == 16 ) {
\r
4154 info.nativeFormats |= RTAUDIO_SINT16;
\r
4156 else if ( deviceFormat->wBitsPerSample == 24 ) {
\r
4157 info.nativeFormats |= RTAUDIO_SINT24;
\r
4159 else if ( deviceFormat->wBitsPerSample == 32 ) {
\r
4160 info.nativeFormats |= RTAUDIO_SINT32;
\r
4165 info.probed = true;
\r
4168 // release all references
\r
4169 PropVariantClear( &deviceNameProp );
\r
4170 PropVariantClear( &defaultDeviceNameProp );
\r
4172 SAFE_RELEASE( captureDevices );
\r
4173 SAFE_RELEASE( renderDevices );
\r
4174 SAFE_RELEASE( devicePtr );
\r
4175 SAFE_RELEASE( defaultDevicePtr );
\r
4176 SAFE_RELEASE( audioClient );
\r
4177 SAFE_RELEASE( devicePropStore );
\r
4178 SAFE_RELEASE( defaultDevicePropStore );
\r
4180 CoTaskMemFree( deviceFormat );
\r
4181 CoTaskMemFree( closestMatchFormat );
\r
4183 if ( !errorText_.empty() )
\r
4184 error( errorType );
\r
4188 //-----------------------------------------------------------------------------
\r
4190 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
\r
4192 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4193 if ( getDeviceInfo( i ).isDefaultOutput ) {
\r
4201 //-----------------------------------------------------------------------------
\r
4203 unsigned int RtApiWasapi::getDefaultInputDevice( void )
\r
4205 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
\r
4206 if ( getDeviceInfo( i ).isDefaultInput ) {
\r
4214 //-----------------------------------------------------------------------------
\r
4216 void RtApiWasapi::closeStream( void )
\r
4218 if ( stream_.state == STREAM_CLOSED ) {
\r
4219 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
\r
4220 error( RtAudioError::WARNING );
\r
4224 if ( stream_.state != STREAM_STOPPED )
\r
4227 // clean up stream memory
\r
4228 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
\r
4229 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
\r
4231 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
\r
4232 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
\r
4234 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
\r
4235 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
\r
4237 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
\r
4238 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
\r
4240 delete ( WasapiHandle* ) stream_.apiHandle;
\r
4241 stream_.apiHandle = NULL;
\r
4243 for ( int i = 0; i < 2; i++ ) {
\r
4244 if ( stream_.userBuffer[i] ) {
\r
4245 free( stream_.userBuffer[i] );
\r
4246 stream_.userBuffer[i] = 0;
\r
4250 if ( stream_.deviceBuffer ) {
\r
4251 free( stream_.deviceBuffer );
\r
4252 stream_.deviceBuffer = 0;
\r
4255 // update stream state
\r
4256 stream_.state = STREAM_CLOSED;
\r
4259 //-----------------------------------------------------------------------------
\r
4261 void RtApiWasapi::startStream( void )
\r
4265 if ( stream_.state == STREAM_RUNNING ) {
\r
4266 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
\r
4267 error( RtAudioError::WARNING );
\r
4271 // update stream state
\r
4272 stream_.state = STREAM_RUNNING;
\r
4274 // create WASAPI stream thread
\r
4275 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
\r
4277 if ( !stream_.callbackInfo.thread ) {
\r
4278 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
\r
4279 error( RtAudioError::THREAD_ERROR );
\r
4282 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
\r
4283 ResumeThread( ( void* ) stream_.callbackInfo.thread );
\r
4287 //-----------------------------------------------------------------------------
\r
4289 void RtApiWasapi::stopStream( void )
\r
4293 if ( stream_.state == STREAM_STOPPED ) {
\r
4294 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
\r
4295 error( RtAudioError::WARNING );
\r
4299 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4300 stream_.state = STREAM_STOPPING;
\r
4302 // wait until stream thread is stopped
\r
4303 while( stream_.state != STREAM_STOPPED ) {
\r
4307 // Wait for the last buffer to play before stopping.
\r
4308 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
\r
4310 // stop capture client if applicable
\r
4311 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4312 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4313 if ( FAILED( hr ) ) {
\r
4314 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
\r
4315 error( RtAudioError::DRIVER_ERROR );
\r
4320 // stop render client if applicable
\r
4321 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4322 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4323 if ( FAILED( hr ) ) {
\r
4324 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
\r
4325 error( RtAudioError::DRIVER_ERROR );
\r
4330 // close thread handle
\r
4331 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4332 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
\r
4333 error( RtAudioError::THREAD_ERROR );
\r
4337 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4340 //-----------------------------------------------------------------------------
\r
4342 void RtApiWasapi::abortStream( void )
\r
4346 if ( stream_.state == STREAM_STOPPED ) {
\r
4347 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
\r
4348 error( RtAudioError::WARNING );
\r
4352 // inform stream thread by setting stream state to STREAM_STOPPING
\r
4353 stream_.state = STREAM_STOPPING;
\r
4355 // wait until stream thread is stopped
\r
4356 while ( stream_.state != STREAM_STOPPED ) {
\r
4360 // stop capture client if applicable
\r
4361 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
\r
4362 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
\r
4363 if ( FAILED( hr ) ) {
\r
4364 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
\r
4365 error( RtAudioError::DRIVER_ERROR );
\r
4370 // stop render client if applicable
\r
4371 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
\r
4372 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
\r
4373 if ( FAILED( hr ) ) {
\r
4374 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
\r
4375 error( RtAudioError::DRIVER_ERROR );
\r
4380 // close thread handle
\r
4381 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
\r
4382 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
\r
4383 error( RtAudioError::THREAD_ERROR );
\r
4387 stream_.callbackInfo.thread = (ThreadHandle) NULL;
\r
4390 //-----------------------------------------------------------------------------
\r
4392 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
4393 unsigned int firstChannel, unsigned int sampleRate,
\r
4394 RtAudioFormat format, unsigned int* bufferSize,
\r
4395 RtAudio::StreamOptions* options )
\r
4397 bool methodResult = FAILURE;
\r
4398 unsigned int captureDeviceCount = 0;
\r
4399 unsigned int renderDeviceCount = 0;
\r
4401 IMMDeviceCollection* captureDevices = NULL;
\r
4402 IMMDeviceCollection* renderDevices = NULL;
\r
4403 IMMDevice* devicePtr = NULL;
\r
4404 WAVEFORMATEX* deviceFormat = NULL;
\r
4405 unsigned int bufferBytes;
\r
4406 stream_.state = STREAM_STOPPED;
\r
4408 // create API Handle if not already created
\r
4409 if ( !stream_.apiHandle )
\r
4410 stream_.apiHandle = ( void* ) new WasapiHandle();
\r
4412 // Count capture devices
\r
4413 errorText_.clear();
\r
4414 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4415 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
\r
4416 if ( FAILED( hr ) ) {
\r
4417 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
\r
4421 hr = captureDevices->GetCount( &captureDeviceCount );
\r
4422 if ( FAILED( hr ) ) {
\r
4423 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
\r
4427 // Count render devices
\r
4428 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
\r
4429 if ( FAILED( hr ) ) {
\r
4430 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
\r
4434 hr = renderDevices->GetCount( &renderDeviceCount );
\r
4435 if ( FAILED( hr ) ) {
\r
4436 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
\r
4440 // validate device index
\r
4441 if ( device >= captureDeviceCount + renderDeviceCount ) {
\r
4442 errorType = RtAudioError::INVALID_USE;
\r
4443 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
\r
4447 // determine whether index falls within capture or render devices
\r
4448 if ( device >= renderDeviceCount ) {
\r
4449 if ( mode != INPUT ) {
\r
4450 errorType = RtAudioError::INVALID_USE;
\r
4451 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
\r
4455 // retrieve captureAudioClient from devicePtr
\r
4456 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4458 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
\r
4459 if ( FAILED( hr ) ) {
\r
4460 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
\r
4464 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4465 NULL, ( void** ) &captureAudioClient );
\r
4466 if ( FAILED( hr ) ) {
\r
4467 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4471 hr = captureAudioClient->GetMixFormat( &deviceFormat );
\r
4472 if ( FAILED( hr ) ) {
\r
4473 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4477 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4478 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4481 if ( mode != OUTPUT ) {
\r
4482 errorType = RtAudioError::INVALID_USE;
\r
4483 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
\r
4487 // retrieve renderAudioClient from devicePtr
\r
4488 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4490 hr = renderDevices->Item( device, &devicePtr );
\r
4491 if ( FAILED( hr ) ) {
\r
4492 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
\r
4496 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
\r
4497 NULL, ( void** ) &renderAudioClient );
\r
4498 if ( FAILED( hr ) ) {
\r
4499 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
\r
4503 hr = renderAudioClient->GetMixFormat( &deviceFormat );
\r
4504 if ( FAILED( hr ) ) {
\r
4505 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
\r
4509 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
\r
4510 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
\r
4513 // fill stream data
\r
4514 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
\r
4515 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
\r
4516 stream_.mode = DUPLEX;
\r
4519 stream_.mode = mode;
\r
4522 stream_.device[mode] = device;
\r
4523 stream_.doByteSwap[mode] = false;
\r
4524 stream_.sampleRate = sampleRate;
\r
4525 stream_.bufferSize = *bufferSize;
\r
4526 stream_.nBuffers = 1;
\r
4527 stream_.nUserChannels[mode] = channels;
\r
4528 stream_.channelOffset[mode] = firstChannel;
\r
4529 stream_.userFormat = format;
\r
4530 stream_.deviceFormat[mode] = getDeviceInfo( device ).nativeFormats;
\r
4532 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
4533 stream_.userInterleaved = false;
\r
4535 stream_.userInterleaved = true;
\r
4536 stream_.deviceInterleaved[mode] = true;
\r
4538 // Set flags for buffer conversion.
\r
4539 stream_.doConvertBuffer[mode] = false;
\r
4540 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
\r
4541 stream_.nUserChannels != stream_.nDeviceChannels )
\r
4542 stream_.doConvertBuffer[mode] = true;
\r
4543 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
4544 stream_.nUserChannels[mode] > 1 )
\r
4545 stream_.doConvertBuffer[mode] = true;
\r
4547 if ( stream_.doConvertBuffer[mode] )
\r
4548 setConvertInfo( mode, 0 );
\r
4550 // Allocate necessary internal buffers
\r
4551 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
\r
4553 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
\r
4554 if ( !stream_.userBuffer[mode] ) {
\r
4555 errorType = RtAudioError::MEMORY_ERROR;
\r
4556 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
\r
4560 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
\r
4561 stream_.callbackInfo.priority = 15;
\r
4563 stream_.callbackInfo.priority = 0;
\r
4565 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
\r
4566 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
\r
4568 methodResult = SUCCESS;
\r
4572 SAFE_RELEASE( captureDevices );
\r
4573 SAFE_RELEASE( renderDevices );
\r
4574 SAFE_RELEASE( devicePtr );
\r
4575 CoTaskMemFree( deviceFormat );
\r
4577 // if method failed, close the stream
\r
4578 if ( methodResult == FAILURE )
\r
4581 if ( !errorText_.empty() )
\r
4582 error( errorType );
\r
4583 return methodResult;
\r
4586 //=============================================================================
\r
4588 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
\r
4591 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
\r
4596 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
\r
4599 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
\r
4604 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
\r
4607 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
\r
4612 //-----------------------------------------------------------------------------
\r
4614 void RtApiWasapi::wasapiThread()
\r
4616 // as this is a new thread, we must CoInitialize it
\r
4617 CoInitialize( NULL );
\r
4621 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
\r
4622 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
\r
4623 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
\r
4624 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
\r
4625 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
\r
4626 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
\r
4628 WAVEFORMATEX* captureFormat = NULL;
\r
4629 WAVEFORMATEX* renderFormat = NULL;
\r
4630 float captureSrRatio = 0.0f;
\r
4631 float renderSrRatio = 0.0f;
\r
4632 WasapiBuffer captureBuffer;
\r
4633 WasapiBuffer renderBuffer;
\r
4635 // declare local stream variables
\r
4636 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
\r
4637 BYTE* streamBuffer = NULL;
\r
4638 unsigned long captureFlags = 0;
\r
4639 unsigned int bufferFrameCount = 0;
\r
4640 unsigned int numFramesPadding = 0;
\r
4641 unsigned int convBufferSize = 0;
\r
4642 bool callbackPushed = false;
\r
4643 bool callbackPulled = false;
\r
4644 bool callbackStopped = false;
\r
4645 int callbackResult = 0;
\r
4647 // convBuffer is used to store converted buffers between WASAPI and the user
\r
4648 char* convBuffer = NULL;
\r
4649 unsigned int convBuffSize = 0;
\r
4650 unsigned int deviceBuffSize = 0;
\r
4652 errorText_.clear();
\r
4653 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
\r
4655 // Attempt to assign "Pro Audio" characteristic to thread
\r
4656 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
\r
4658 DWORD taskIndex = 0;
\r
4659 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
\r
4660 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
\r
4661 FreeLibrary( AvrtDll );
\r
4664 // start capture stream if applicable
\r
4665 if ( captureAudioClient ) {
\r
4666 hr = captureAudioClient->GetMixFormat( &captureFormat );
\r
4667 if ( FAILED( hr ) ) {
\r
4668 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4672 captureSrRatio = ( ( float ) captureFormat->nSamplesPerSec / stream_.sampleRate );
\r
4674 // initialize capture stream according to desire buffer size
\r
4675 float desiredBufferSize = stream_.bufferSize * captureSrRatio;
\r
4676 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / captureFormat->nSamplesPerSec );
\r
4678 if ( !captureClient ) {
\r
4679 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4680 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4681 desiredBufferPeriod,
\r
4682 desiredBufferPeriod,
\r
4685 if ( FAILED( hr ) ) {
\r
4686 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
\r
4690 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
\r
4691 ( void** ) &captureClient );
\r
4692 if ( FAILED( hr ) ) {
\r
4693 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
\r
4697 // configure captureEvent to trigger on every available capture buffer
\r
4698 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4699 if ( !captureEvent ) {
\r
4700 errorType = RtAudioError::SYSTEM_ERROR;
\r
4701 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
\r
4705 hr = captureAudioClient->SetEventHandle( captureEvent );
\r
4706 if ( FAILED( hr ) ) {
\r
4707 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
\r
4711 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
\r
4712 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
\r
4715 unsigned int inBufferSize = 0;
\r
4716 hr = captureAudioClient->GetBufferSize( &inBufferSize );
\r
4717 if ( FAILED( hr ) ) {
\r
4718 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
\r
4722 // scale outBufferSize according to stream->user sample rate ratio
\r
4723 unsigned int outBufferSize = ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT];
\r
4724 inBufferSize *= stream_.nDeviceChannels[INPUT];
\r
4726 // set captureBuffer size
\r
4727 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
\r
4729 // reset the capture stream
\r
4730 hr = captureAudioClient->Reset();
\r
4731 if ( FAILED( hr ) ) {
\r
4732 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
\r
4736 // start the capture stream
\r
4737 hr = captureAudioClient->Start();
\r
4738 if ( FAILED( hr ) ) {
\r
4739 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
\r
4744 // start render stream if applicable
\r
4745 if ( renderAudioClient ) {
\r
4746 hr = renderAudioClient->GetMixFormat( &renderFormat );
\r
4747 if ( FAILED( hr ) ) {
\r
4748 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
\r
4752 renderSrRatio = ( ( float ) renderFormat->nSamplesPerSec / stream_.sampleRate );
\r
4754 // initialize render stream according to desire buffer size
\r
4755 float desiredBufferSize = stream_.bufferSize * renderSrRatio;
\r
4756 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) desiredBufferSize * 10000000 / renderFormat->nSamplesPerSec );
\r
4758 if ( !renderClient ) {
\r
4759 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
\r
4760 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
\r
4761 desiredBufferPeriod,
\r
4762 desiredBufferPeriod,
\r
4765 if ( FAILED( hr ) ) {
\r
4766 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
\r
4770 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
\r
4771 ( void** ) &renderClient );
\r
4772 if ( FAILED( hr ) ) {
\r
4773 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
\r
4777 // configure renderEvent to trigger on every available render buffer
\r
4778 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
\r
4779 if ( !renderEvent ) {
\r
4780 errorType = RtAudioError::SYSTEM_ERROR;
\r
4781 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
\r
4785 hr = renderAudioClient->SetEventHandle( renderEvent );
\r
4786 if ( FAILED( hr ) ) {
\r
4787 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
\r
4791 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
\r
4792 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
\r
4795 unsigned int outBufferSize = 0;
\r
4796 hr = renderAudioClient->GetBufferSize( &outBufferSize );
\r
4797 if ( FAILED( hr ) ) {
\r
4798 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
\r
4802 // scale inBufferSize according to user->stream sample rate ratio
\r
4803 unsigned int inBufferSize = ( unsigned int ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT];
\r
4804 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
\r
4806 // set renderBuffer size
\r
4807 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4809 // reset the render stream
\r
4810 hr = renderAudioClient->Reset();
\r
4811 if ( FAILED( hr ) ) {
\r
4812 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
\r
4816 // start the render stream
\r
4817 hr = renderAudioClient->Start();
\r
4818 if ( FAILED( hr ) ) {
\r
4819 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
\r
4824 if ( stream_.mode == INPUT ) {
\r
4825 convBuffSize = ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4826 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
\r
4828 else if ( stream_.mode == OUTPUT ) {
\r
4829 convBuffSize = ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4830 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
\r
4832 else if ( stream_.mode == DUPLEX ) {
\r
4833 convBuffSize = std::max( ( size_t ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4834 ( size_t ) ( stream_.bufferSize * renderSrRatio ) * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4835 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
\r
4836 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
\r
4839 convBuffer = ( char* ) malloc( convBuffSize );
\r
4840 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
\r
4841 if ( !convBuffer || !stream_.deviceBuffer ) {
\r
4842 errorType = RtAudioError::MEMORY_ERROR;
\r
4843 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
\r
4847 // stream process loop
\r
4848 while ( stream_.state != STREAM_STOPPING ) {
\r
4849 if ( !callbackPulled ) {
\r
4852 // 1. Pull callback buffer from inputBuffer
\r
4853 // 2. If 1. was successful: Convert callback buffer to user sample rate and channel count
\r
4854 // Convert callback buffer to user format
\r
4856 if ( captureAudioClient ) {
\r
4857 // Pull callback buffer from inputBuffer
\r
4858 callbackPulled = captureBuffer.pullBuffer( convBuffer,
\r
4859 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ) * stream_.nDeviceChannels[INPUT],
\r
4860 stream_.deviceFormat[INPUT] );
\r
4862 if ( callbackPulled ) {
\r
4863 // Convert callback buffer to user sample rate
\r
4864 convertBufferWasapi( stream_.deviceBuffer,
\r
4866 stream_.nDeviceChannels[INPUT],
\r
4867 captureFormat->nSamplesPerSec,
\r
4868 stream_.sampleRate,
\r
4869 ( unsigned int ) ( stream_.bufferSize * captureSrRatio ),
\r
4871 stream_.deviceFormat[INPUT] );
\r
4873 if ( stream_.doConvertBuffer[INPUT] ) {
\r
4874 // Convert callback buffer to user format
\r
4875 convertBuffer( stream_.userBuffer[INPUT],
\r
4876 stream_.deviceBuffer,
\r
4877 stream_.convertInfo[INPUT] );
\r
4880 // no further conversion, simple copy deviceBuffer to userBuffer
\r
4881 memcpy( stream_.userBuffer[INPUT],
\r
4882 stream_.deviceBuffer,
\r
4883 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
\r
4888 // if there is no capture stream, set callbackPulled flag
\r
4889 callbackPulled = true;
\r
4892 // Execute Callback
\r
4893 // ================
\r
4894 // 1. Execute user callback method
\r
4895 // 2. Handle return value from callback
\r
4897 // if callback has not requested the stream to stop
\r
4898 if ( callbackPulled && !callbackStopped ) {
\r
4899 // Execute user callback method
\r
4900 callbackResult = callback( stream_.userBuffer[OUTPUT],
\r
4901 stream_.userBuffer[INPUT],
\r
4902 stream_.bufferSize,
\r
4904 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
\r
4905 stream_.callbackInfo.userData );
\r
4907 // Handle return value from callback
\r
4908 if ( callbackResult == 1 ) {
\r
4909 // instantiate a thread to stop this thread
\r
4910 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
\r
4911 if ( !threadHandle ) {
\r
4912 errorType = RtAudioError::THREAD_ERROR;
\r
4913 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
\r
4916 else if ( !CloseHandle( threadHandle ) ) {
\r
4917 errorType = RtAudioError::THREAD_ERROR;
\r
4918 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
\r
4922 callbackStopped = true;
\r
4924 else if ( callbackResult == 2 ) {
\r
4925 // instantiate a thread to stop this thread
\r
4926 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
\r
4927 if ( !threadHandle ) {
\r
4928 errorType = RtAudioError::THREAD_ERROR;
\r
4929 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
\r
4932 else if ( !CloseHandle( threadHandle ) ) {
\r
4933 errorType = RtAudioError::THREAD_ERROR;
\r
4934 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
\r
4938 callbackStopped = true;
\r
4943 // Callback Output
\r
4944 // ===============
\r
4945 // 1. Convert callback buffer to stream format
\r
4946 // 2. Convert callback buffer to stream sample rate and channel count
\r
4947 // 3. Push callback buffer into outputBuffer
\r
4949 if ( renderAudioClient && callbackPulled ) {
\r
4950 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
4951 // Convert callback buffer to stream format
\r
4952 convertBuffer( stream_.deviceBuffer,
\r
4953 stream_.userBuffer[OUTPUT],
\r
4954 stream_.convertInfo[OUTPUT] );
\r
4958 // Convert callback buffer to stream sample rate
\r
4959 convertBufferWasapi( convBuffer,
\r
4960 stream_.deviceBuffer,
\r
4961 stream_.nDeviceChannels[OUTPUT],
\r
4962 stream_.sampleRate,
\r
4963 renderFormat->nSamplesPerSec,
\r
4964 stream_.bufferSize,
\r
4966 stream_.deviceFormat[OUTPUT] );
\r
4968 // Push callback buffer into outputBuffer
\r
4969 callbackPushed = renderBuffer.pushBuffer( convBuffer,
\r
4970 convBufferSize * stream_.nDeviceChannels[OUTPUT],
\r
4971 stream_.deviceFormat[OUTPUT] );
\r
4974 // if there is no render stream, set callbackPushed flag
\r
4975 callbackPushed = true;
\r
4980 // 1. Get capture buffer from stream
\r
4981 // 2. Push capture buffer into inputBuffer
\r
4982 // 3. If 2. was successful: Release capture buffer
\r
4984 if ( captureAudioClient ) {
\r
4985 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
\r
4986 if ( !callbackPulled ) {
\r
4987 WaitForSingleObject( captureEvent, INFINITE );
\r
4990 // Get capture buffer from stream
\r
4991 hr = captureClient->GetBuffer( &streamBuffer,
\r
4992 &bufferFrameCount,
\r
4993 &captureFlags, NULL, NULL );
\r
4994 if ( FAILED( hr ) ) {
\r
4995 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
\r
4999 if ( bufferFrameCount != 0 ) {
\r
5000 // Push capture buffer into inputBuffer
\r
5001 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
\r
5002 bufferFrameCount * stream_.nDeviceChannels[INPUT],
\r
5003 stream_.deviceFormat[INPUT] ) )
\r
5005 // Release capture buffer
\r
5006 hr = captureClient->ReleaseBuffer( bufferFrameCount );
\r
5007 if ( FAILED( hr ) ) {
\r
5008 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5014 // Inform WASAPI that capture was unsuccessful
\r
5015 hr = captureClient->ReleaseBuffer( 0 );
\r
5016 if ( FAILED( hr ) ) {
\r
5017 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5024 // Inform WASAPI that capture was unsuccessful
\r
5025 hr = captureClient->ReleaseBuffer( 0 );
\r
5026 if ( FAILED( hr ) ) {
\r
5027 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
\r
5035 // 1. Get render buffer from stream
\r
5036 // 2. Pull next buffer from outputBuffer
\r
5037 // 3. If 2. was successful: Fill render buffer with next buffer
\r
5038 // Release render buffer
\r
5040 if ( renderAudioClient ) {
\r
5041 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
\r
5042 if ( callbackPulled && !callbackPushed ) {
\r
5043 WaitForSingleObject( renderEvent, INFINITE );
\r
5046 // Get render buffer from stream
\r
5047 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
\r
5048 if ( FAILED( hr ) ) {
\r
5049 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
\r
5053 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
\r
5054 if ( FAILED( hr ) ) {
\r
5055 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
\r
5059 bufferFrameCount -= numFramesPadding;
\r
5061 if ( bufferFrameCount != 0 ) {
\r
5062 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
\r
5063 if ( FAILED( hr ) ) {
\r
5064 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
\r
5068 // Pull next buffer from outputBuffer
\r
5069 // Fill render buffer with next buffer
\r
5070 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
\r
5071 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
\r
5072 stream_.deviceFormat[OUTPUT] ) )
\r
5074 // Release render buffer
\r
5075 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
\r
5076 if ( FAILED( hr ) ) {
\r
5077 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5083 // Inform WASAPI that render was unsuccessful
\r
5084 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5085 if ( FAILED( hr ) ) {
\r
5086 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5093 // Inform WASAPI that render was unsuccessful
\r
5094 hr = renderClient->ReleaseBuffer( 0, 0 );
\r
5095 if ( FAILED( hr ) ) {
\r
5096 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
\r
5102 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
\r
5103 if ( callbackPushed ) {
\r
5104 callbackPulled = false;
\r
5107 // tick stream time
\r
5108 RtApi::tickStreamTime();
\r
5113 CoTaskMemFree( captureFormat );
\r
5114 CoTaskMemFree( renderFormat );
\r
5116 free ( convBuffer );
\r
5120 // update stream state
\r
5121 stream_.state = STREAM_STOPPED;
\r
5123 if ( errorText_.empty() )
\r
5126 error( errorType );
\r
5129 //******************** End of __WINDOWS_WASAPI__ *********************//
\r
5133 #if defined(__WINDOWS_DS__) // Windows DirectSound API
\r
5135 // Modified by Robin Davies, October 2005
\r
5136 // - Improvements to DirectX pointer chasing.
\r
5137 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
\r
5138 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
\r
5139 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
\r
5140 // Changed device query structure for RtAudio 4.0.7, January 2010
\r
5142 #include <dsound.h>
\r
5143 #include <assert.h>
\r
5144 #include <algorithm>
\r
5146 #if defined(__MINGW32__)
\r
5147 // missing from latest mingw winapi
\r
5148 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
\r
5149 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
\r
5150 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
\r
5151 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
\r
5154 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
\r
5156 #ifdef _MSC_VER // if Microsoft Visual C++
\r
5157 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
\r
5160 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
\r
5162 if ( pointer > bufferSize ) pointer -= bufferSize;
\r
5163 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
\r
5164 if ( pointer < earlierPointer ) pointer += bufferSize;
\r
5165 return pointer >= earlierPointer && pointer < laterPointer;
\r
5168 // A structure to hold various information related to the DirectSound
\r
5169 // API implementation.
\r
5171 unsigned int drainCounter; // Tracks callback counts when draining
\r
5172 bool internalDrain; // Indicates if stop is initiated from callback or not.
\r
5176 UINT bufferPointer[2];
\r
5177 DWORD dsBufferSize[2];
\r
5178 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
\r
5182 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
\r
5185 // Declarations for utility functions, callbacks, and structures
\r
5186 // specific to the DirectSound implementation.
\r
5187 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
5188 LPCTSTR description,
\r
5190 LPVOID lpContext );
\r
5192 static const char* getErrorString( int code );
\r
5194 static unsigned __stdcall callbackHandler( void *ptr );
\r
5203 : found(false) { validId[0] = false; validId[1] = false; }
\r
5206 struct DsProbeData {
\r
5208 std::vector<struct DsDevice>* dsDevices;
\r
5211 RtApiDs :: RtApiDs()
\r
5213 // Dsound will run both-threaded. If CoInitialize fails, then just
\r
5214 // accept whatever the mainline chose for a threading model.
\r
5215 coInitialized_ = false;
\r
5216 HRESULT hr = CoInitialize( NULL );
\r
5217 if ( !FAILED( hr ) ) coInitialized_ = true;
\r
5220 RtApiDs :: ~RtApiDs()
\r
5222 if ( coInitialized_ ) CoUninitialize(); // balanced call.
\r
5223 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
5226 // The DirectSound default output is always the first device.
\r
5227 unsigned int RtApiDs :: getDefaultOutputDevice( void )
\r
5232 // The DirectSound default input is always the first input device,
\r
5233 // which is the first capture device enumerated.
\r
5234 unsigned int RtApiDs :: getDefaultInputDevice( void )
\r
5239 unsigned int RtApiDs :: getDeviceCount( void )
\r
5241 // Set query flag for previously found devices to false, so that we
\r
5242 // can check for any devices that have disappeared.
\r
5243 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5244 dsDevices[i].found = false;
\r
5246 // Query DirectSound devices.
\r
5247 struct DsProbeData probeInfo;
\r
5248 probeInfo.isInput = false;
\r
5249 probeInfo.dsDevices = &dsDevices;
\r
5250 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5251 if ( FAILED( result ) ) {
\r
5252 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
\r
5253 errorText_ = errorStream_.str();
\r
5254 error( RtAudioError::WARNING );
\r
5257 // Query DirectSoundCapture devices.
\r
5258 probeInfo.isInput = true;
\r
5259 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
\r
5260 if ( FAILED( result ) ) {
\r
5261 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
\r
5262 errorText_ = errorStream_.str();
\r
5263 error( RtAudioError::WARNING );
\r
5266 // Clean out any devices that may have disappeared.
\r
5267 std::vector< int > indices;
\r
5268 for ( unsigned int i=0; i<dsDevices.size(); i++ )
\r
5269 if ( dsDevices[i].found == false ) indices.push_back( i );
\r
5270 //unsigned int nErased = 0;
\r
5271 for ( unsigned int i=0; i<indices.size(); i++ )
\r
5272 dsDevices.erase( dsDevices.begin()+indices[i] );
\r
5273 //dsDevices.erase( dsDevices.begin()-nErased++ );
\r
5275 return static_cast<unsigned int>(dsDevices.size());
\r
5278 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
\r
5280 RtAudio::DeviceInfo info;
\r
5281 info.probed = false;
\r
5283 if ( dsDevices.size() == 0 ) {
\r
5284 // Force a query of all devices
\r
5286 if ( dsDevices.size() == 0 ) {
\r
5287 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
\r
5288 error( RtAudioError::INVALID_USE );
\r
5293 if ( device >= dsDevices.size() ) {
\r
5294 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
\r
5295 error( RtAudioError::INVALID_USE );
\r
5300 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
\r
5302 LPDIRECTSOUND output;
\r
5304 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5305 if ( FAILED( result ) ) {
\r
5306 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5307 errorText_ = errorStream_.str();
\r
5308 error( RtAudioError::WARNING );
\r
5312 outCaps.dwSize = sizeof( outCaps );
\r
5313 result = output->GetCaps( &outCaps );
\r
5314 if ( FAILED( result ) ) {
\r
5315 output->Release();
\r
5316 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
\r
5317 errorText_ = errorStream_.str();
\r
5318 error( RtAudioError::WARNING );
\r
5322 // Get output channel information.
\r
5323 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
\r
5325 // Get sample rate information.
\r
5326 info.sampleRates.clear();
\r
5327 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
5328 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
\r
5329 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate )
\r
5330 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
5333 // Get format information.
\r
5334 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5335 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5337 output->Release();
\r
5339 if ( getDefaultOutputDevice() == device )
\r
5340 info.isDefaultOutput = true;
\r
5342 if ( dsDevices[ device ].validId[1] == false ) {
\r
5343 info.name = dsDevices[ device ].name;
\r
5344 info.probed = true;
\r
5350 LPDIRECTSOUNDCAPTURE input;
\r
5351 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5352 if ( FAILED( result ) ) {
\r
5353 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5354 errorText_ = errorStream_.str();
\r
5355 error( RtAudioError::WARNING );
\r
5360 inCaps.dwSize = sizeof( inCaps );
\r
5361 result = input->GetCaps( &inCaps );
\r
5362 if ( FAILED( result ) ) {
\r
5364 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
\r
5365 errorText_ = errorStream_.str();
\r
5366 error( RtAudioError::WARNING );
\r
5370 // Get input channel information.
\r
5371 info.inputChannels = inCaps.dwChannels;
\r
5373 // Get sample rate and format information.
\r
5374 std::vector<unsigned int> rates;
\r
5375 if ( inCaps.dwChannels >= 2 ) {
\r
5376 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5377 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5378 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5379 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5380 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5381 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5382 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5383 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5385 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5386 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
\r
5387 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
\r
5388 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
\r
5389 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
\r
5391 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5392 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
\r
5393 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
\r
5394 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
\r
5395 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
\r
5398 else if ( inCaps.dwChannels == 1 ) {
\r
5399 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5400 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5401 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5402 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
\r
5403 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5404 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5405 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5406 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
\r
5408 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
\r
5409 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
\r
5410 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
\r
5411 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
\r
5412 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
\r
5414 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
\r
5415 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
\r
5416 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
\r
5417 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
\r
5418 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
\r
5421 else info.inputChannels = 0; // technically, this would be an error
\r
5425 if ( info.inputChannels == 0 ) return info;
\r
5427 // Copy the supported rates to the info structure but avoid duplication.
\r
5429 for ( unsigned int i=0; i<rates.size(); i++ ) {
\r
5431 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
\r
5432 if ( rates[i] == info.sampleRates[j] ) {
\r
5437 if ( found == false ) info.sampleRates.push_back( rates[i] );
\r
5439 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
\r
5441 // If device opens for both playback and capture, we determine the channels.
\r
5442 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
5443 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
5445 if ( device == 0 ) info.isDefaultInput = true;
\r
5447 // Copy name and return.
\r
5448 info.name = dsDevices[ device ].name;
\r
5449 info.probed = true;
\r
5453 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
5454 unsigned int firstChannel, unsigned int sampleRate,
\r
5455 RtAudioFormat format, unsigned int *bufferSize,
\r
5456 RtAudio::StreamOptions *options )
\r
5458 if ( channels + firstChannel > 2 ) {
\r
5459 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
\r
5463 size_t nDevices = dsDevices.size();
\r
5464 if ( nDevices == 0 ) {
\r
5465 // This should not happen because a check is made before this function is called.
\r
5466 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
\r
5470 if ( device >= nDevices ) {
\r
5471 // This should not happen because a check is made before this function is called.
\r
5472 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
\r
5476 if ( mode == OUTPUT ) {
\r
5477 if ( dsDevices[ device ].validId[0] == false ) {
\r
5478 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
\r
5479 errorText_ = errorStream_.str();
\r
5483 else { // mode == INPUT
\r
5484 if ( dsDevices[ device ].validId[1] == false ) {
\r
5485 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
\r
5486 errorText_ = errorStream_.str();
\r
5491 // According to a note in PortAudio, using GetDesktopWindow()
\r
5492 // instead of GetForegroundWindow() is supposed to avoid problems
\r
5493 // that occur when the application's window is not the foreground
\r
5494 // window. Also, if the application window closes before the
\r
5495 // DirectSound buffer, DirectSound can crash. In the past, I had
\r
5496 // problems when using GetDesktopWindow() but it seems fine now
\r
5497 // (January 2010). I'll leave it commented here.
\r
5498 // HWND hWnd = GetForegroundWindow();
\r
5499 HWND hWnd = GetDesktopWindow();
\r
5501 // Check the numberOfBuffers parameter and limit the lowest value to
\r
5502 // two. This is a judgement call and a value of two is probably too
\r
5503 // low for capture, but it should work for playback.
\r
5505 if ( options ) nBuffers = options->numberOfBuffers;
\r
5506 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
\r
5507 if ( nBuffers < 2 ) nBuffers = 3;
\r
5509 // Check the lower range of the user-specified buffer size and set
\r
5510 // (arbitrarily) to a lower bound of 32.
\r
5511 if ( *bufferSize < 32 ) *bufferSize = 32;
\r
5513 // Create the wave format structure. The data format setting will
\r
5514 // be determined later.
\r
5515 WAVEFORMATEX waveFormat;
\r
5516 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
\r
5517 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
\r
5518 waveFormat.nChannels = channels + firstChannel;
\r
5519 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
\r
5521 // Determine the device buffer size. By default, we'll use the value
\r
5522 // defined above (32K), but we will grow it to make allowances for
\r
5523 // very large software buffer sizes.
\r
5524 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
\r
5525 DWORD dsPointerLeadTime = 0;
\r
5527 void *ohandle = 0, *bhandle = 0;
\r
5529 if ( mode == OUTPUT ) {
\r
5531 LPDIRECTSOUND output;
\r
5532 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
\r
5533 if ( FAILED( result ) ) {
\r
5534 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
\r
5535 errorText_ = errorStream_.str();
\r
5540 outCaps.dwSize = sizeof( outCaps );
\r
5541 result = output->GetCaps( &outCaps );
\r
5542 if ( FAILED( result ) ) {
\r
5543 output->Release();
\r
5544 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
\r
5545 errorText_ = errorStream_.str();
\r
5549 // Check channel information.
\r
5550 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
\r
5551 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
\r
5552 errorText_ = errorStream_.str();
\r
5556 // Check format information. Use 16-bit format unless not
\r
5557 // supported or user requests 8-bit.
\r
5558 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
\r
5559 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
\r
5560 waveFormat.wBitsPerSample = 16;
\r
5561 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5564 waveFormat.wBitsPerSample = 8;
\r
5565 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5567 stream_.userFormat = format;
\r
5569 // Update wave format structure and buffer information.
\r
5570 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5571 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5572 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5574 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5575 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5576 dsBufferSize *= 2;
\r
5578 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
\r
5579 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
\r
5580 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
\r
5581 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
\r
5582 if ( FAILED( result ) ) {
\r
5583 output->Release();
\r
5584 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
\r
5585 errorText_ = errorStream_.str();
\r
5589 // Even though we will write to the secondary buffer, we need to
\r
5590 // access the primary buffer to set the correct output format
\r
5591 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
\r
5592 // buffer description.
\r
5593 DSBUFFERDESC bufferDescription;
\r
5594 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5595 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5596 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
\r
5598 // Obtain the primary buffer
\r
5599 LPDIRECTSOUNDBUFFER buffer;
\r
5600 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5601 if ( FAILED( result ) ) {
\r
5602 output->Release();
\r
5603 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
\r
5604 errorText_ = errorStream_.str();
\r
5608 // Set the primary DS buffer sound format.
\r
5609 result = buffer->SetFormat( &waveFormat );
\r
5610 if ( FAILED( result ) ) {
\r
5611 output->Release();
\r
5612 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
\r
5613 errorText_ = errorStream_.str();
\r
5617 // Setup the secondary DS buffer description.
\r
5618 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
\r
5619 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
\r
5620 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5621 DSBCAPS_GLOBALFOCUS |
\r
5622 DSBCAPS_GETCURRENTPOSITION2 |
\r
5623 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
\r
5624 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5625 bufferDescription.lpwfxFormat = &waveFormat;
\r
5627 // Try to create the secondary DS buffer. If that doesn't work,
\r
5628 // try to use software mixing. Otherwise, there's a problem.
\r
5629 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5630 if ( FAILED( result ) ) {
\r
5631 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
\r
5632 DSBCAPS_GLOBALFOCUS |
\r
5633 DSBCAPS_GETCURRENTPOSITION2 |
\r
5634 DSBCAPS_LOCSOFTWARE ); // Force software mixing
\r
5635 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
\r
5636 if ( FAILED( result ) ) {
\r
5637 output->Release();
\r
5638 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
\r
5639 errorText_ = errorStream_.str();
\r
5644 // Get the buffer size ... might be different from what we specified.
\r
5646 dsbcaps.dwSize = sizeof( DSBCAPS );
\r
5647 result = buffer->GetCaps( &dsbcaps );
\r
5648 if ( FAILED( result ) ) {
\r
5649 output->Release();
\r
5650 buffer->Release();
\r
5651 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5652 errorText_ = errorStream_.str();
\r
5656 dsBufferSize = dsbcaps.dwBufferBytes;
\r
5658 // Lock the DS buffer
\r
5661 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5662 if ( FAILED( result ) ) {
\r
5663 output->Release();
\r
5664 buffer->Release();
\r
5665 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
\r
5666 errorText_ = errorStream_.str();
\r
5670 // Zero the DS buffer
\r
5671 ZeroMemory( audioPtr, dataLen );
\r
5673 // Unlock the DS buffer
\r
5674 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5675 if ( FAILED( result ) ) {
\r
5676 output->Release();
\r
5677 buffer->Release();
\r
5678 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
\r
5679 errorText_ = errorStream_.str();
\r
5683 ohandle = (void *) output;
\r
5684 bhandle = (void *) buffer;
\r
5687 if ( mode == INPUT ) {
\r
5689 LPDIRECTSOUNDCAPTURE input;
\r
5690 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
\r
5691 if ( FAILED( result ) ) {
\r
5692 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
\r
5693 errorText_ = errorStream_.str();
\r
5698 inCaps.dwSize = sizeof( inCaps );
\r
5699 result = input->GetCaps( &inCaps );
\r
5700 if ( FAILED( result ) ) {
\r
5702 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
\r
5703 errorText_ = errorStream_.str();
\r
5707 // Check channel information.
\r
5708 if ( inCaps.dwChannels < channels + firstChannel ) {
\r
5709 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
\r
5713 // Check format information. Use 16-bit format unless user
\r
5714 // requests 8-bit.
\r
5715 DWORD deviceFormats;
\r
5716 if ( channels + firstChannel == 2 ) {
\r
5717 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
\r
5718 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5719 waveFormat.wBitsPerSample = 8;
\r
5720 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5722 else { // assume 16-bit is supported
\r
5723 waveFormat.wBitsPerSample = 16;
\r
5724 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5727 else { // channel == 1
\r
5728 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
\r
5729 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
\r
5730 waveFormat.wBitsPerSample = 8;
\r
5731 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
5733 else { // assume 16-bit is supported
\r
5734 waveFormat.wBitsPerSample = 16;
\r
5735 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
5738 stream_.userFormat = format;
\r
5740 // Update wave format structure and buffer information.
\r
5741 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
\r
5742 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
\r
5743 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
\r
5745 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
\r
5746 while ( dsPointerLeadTime * 2U > dsBufferSize )
\r
5747 dsBufferSize *= 2;
\r
5749 // Setup the secondary DS buffer description.
\r
5750 DSCBUFFERDESC bufferDescription;
\r
5751 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
\r
5752 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
\r
5753 bufferDescription.dwFlags = 0;
\r
5754 bufferDescription.dwReserved = 0;
\r
5755 bufferDescription.dwBufferBytes = dsBufferSize;
\r
5756 bufferDescription.lpwfxFormat = &waveFormat;
\r
5758 // Create the capture buffer.
\r
5759 LPDIRECTSOUNDCAPTUREBUFFER buffer;
\r
5760 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
\r
5761 if ( FAILED( result ) ) {
\r
5763 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
\r
5764 errorText_ = errorStream_.str();
\r
5768 // Get the buffer size ... might be different from what we specified.
\r
5769 DSCBCAPS dscbcaps;
\r
5770 dscbcaps.dwSize = sizeof( DSCBCAPS );
\r
5771 result = buffer->GetCaps( &dscbcaps );
\r
5772 if ( FAILED( result ) ) {
\r
5774 buffer->Release();
\r
5775 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
\r
5776 errorText_ = errorStream_.str();
\r
5780 dsBufferSize = dscbcaps.dwBufferBytes;
\r
5782 // NOTE: We could have a problem here if this is a duplex stream
\r
5783 // and the play and capture hardware buffer sizes are different
\r
5784 // (I'm actually not sure if that is a problem or not).
\r
5785 // Currently, we are not verifying that.
\r
5787 // Lock the capture buffer
\r
5790 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
\r
5791 if ( FAILED( result ) ) {
\r
5793 buffer->Release();
\r
5794 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
\r
5795 errorText_ = errorStream_.str();
\r
5799 // Zero the buffer
\r
5800 ZeroMemory( audioPtr, dataLen );
\r
5802 // Unlock the buffer
\r
5803 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
5804 if ( FAILED( result ) ) {
\r
5806 buffer->Release();
\r
5807 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
\r
5808 errorText_ = errorStream_.str();
\r
5812 ohandle = (void *) input;
\r
5813 bhandle = (void *) buffer;
\r
5816 // Set various stream parameters
\r
5817 DsHandle *handle = 0;
\r
5818 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
5819 stream_.nUserChannels[mode] = channels;
\r
5820 stream_.bufferSize = *bufferSize;
\r
5821 stream_.channelOffset[mode] = firstChannel;
\r
5822 stream_.deviceInterleaved[mode] = true;
\r
5823 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
5824 else stream_.userInterleaved = true;
\r
5826 // Set flag for buffer conversion
\r
5827 stream_.doConvertBuffer[mode] = false;
\r
5828 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
\r
5829 stream_.doConvertBuffer[mode] = true;
\r
5830 if (stream_.userFormat != stream_.deviceFormat[mode])
\r
5831 stream_.doConvertBuffer[mode] = true;
\r
5832 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
5833 stream_.nUserChannels[mode] > 1 )
\r
5834 stream_.doConvertBuffer[mode] = true;
\r
5836 // Allocate necessary internal buffers
\r
5837 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
5838 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
5839 if ( stream_.userBuffer[mode] == NULL ) {
\r
5840 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
\r
5844 if ( stream_.doConvertBuffer[mode] ) {
\r
5846 bool makeBuffer = true;
\r
5847 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
5848 if ( mode == INPUT ) {
\r
5849 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
5850 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
5851 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
\r
5855 if ( makeBuffer ) {
\r
5856 bufferBytes *= *bufferSize;
\r
5857 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
5858 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
5859 if ( stream_.deviceBuffer == NULL ) {
\r
5860 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
\r
5866 // Allocate our DsHandle structures for the stream.
\r
5867 if ( stream_.apiHandle == 0 ) {
\r
5869 handle = new DsHandle;
\r
5871 catch ( std::bad_alloc& ) {
\r
5872 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
\r
5876 // Create a manual-reset event.
\r
5877 handle->condition = CreateEvent( NULL, // no security
\r
5878 TRUE, // manual-reset
\r
5879 FALSE, // non-signaled initially
\r
5880 NULL ); // unnamed
\r
5881 stream_.apiHandle = (void *) handle;
\r
5884 handle = (DsHandle *) stream_.apiHandle;
\r
5885 handle->id[mode] = ohandle;
\r
5886 handle->buffer[mode] = bhandle;
\r
5887 handle->dsBufferSize[mode] = dsBufferSize;
\r
5888 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
\r
5890 stream_.device[mode] = device;
\r
5891 stream_.state = STREAM_STOPPED;
\r
5892 if ( stream_.mode == OUTPUT && mode == INPUT )
\r
5893 // We had already set up an output stream.
\r
5894 stream_.mode = DUPLEX;
\r
5896 stream_.mode = mode;
\r
5897 stream_.nBuffers = nBuffers;
\r
5898 stream_.sampleRate = sampleRate;
\r
5900 // Setup the buffer conversion information structure.
\r
5901 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
5903 // Setup the callback thread.
\r
5904 if ( stream_.callbackInfo.isRunning == false ) {
\r
5905 unsigned threadId;
\r
5906 stream_.callbackInfo.isRunning = true;
\r
5907 stream_.callbackInfo.object = (void *) this;
\r
5908 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
\r
5909 &stream_.callbackInfo, 0, &threadId );
\r
5910 if ( stream_.callbackInfo.thread == 0 ) {
\r
5911 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
\r
5915 // Boost DS thread priority
\r
5916 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
\r
5922 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5923 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5924 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5925 if ( buffer ) buffer->Release();
\r
5926 object->Release();
\r
5928 if ( handle->buffer[1] ) {
\r
5929 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5930 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5931 if ( buffer ) buffer->Release();
\r
5932 object->Release();
\r
5934 CloseHandle( handle->condition );
\r
5936 stream_.apiHandle = 0;
\r
5939 for ( int i=0; i<2; i++ ) {
\r
5940 if ( stream_.userBuffer[i] ) {
\r
5941 free( stream_.userBuffer[i] );
\r
5942 stream_.userBuffer[i] = 0;
\r
5946 if ( stream_.deviceBuffer ) {
\r
5947 free( stream_.deviceBuffer );
\r
5948 stream_.deviceBuffer = 0;
\r
5951 stream_.state = STREAM_CLOSED;
\r
5955 void RtApiDs :: closeStream()
\r
5957 if ( stream_.state == STREAM_CLOSED ) {
\r
5958 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
\r
5959 error( RtAudioError::WARNING );
\r
5963 // Stop the callback thread.
\r
5964 stream_.callbackInfo.isRunning = false;
\r
5965 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
\r
5966 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
\r
5968 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
5970 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
\r
5971 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
\r
5972 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
5975 buffer->Release();
\r
5977 object->Release();
\r
5979 if ( handle->buffer[1] ) {
\r
5980 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
\r
5981 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
5984 buffer->Release();
\r
5986 object->Release();
\r
5988 CloseHandle( handle->condition );
\r
5990 stream_.apiHandle = 0;
\r
5993 for ( int i=0; i<2; i++ ) {
\r
5994 if ( stream_.userBuffer[i] ) {
\r
5995 free( stream_.userBuffer[i] );
\r
5996 stream_.userBuffer[i] = 0;
\r
6000 if ( stream_.deviceBuffer ) {
\r
6001 free( stream_.deviceBuffer );
\r
6002 stream_.deviceBuffer = 0;
\r
6005 stream_.mode = UNINITIALIZED;
\r
6006 stream_.state = STREAM_CLOSED;
\r
6009 void RtApiDs :: startStream()
\r
6012 if ( stream_.state == STREAM_RUNNING ) {
\r
6013 errorText_ = "RtApiDs::startStream(): the stream is already running!";
\r
6014 error( RtAudioError::WARNING );
\r
6018 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6020 // Increase scheduler frequency on lesser windows (a side-effect of
\r
6021 // increasing timer accuracy). On greater windows (Win2K or later),
\r
6022 // this is already in effect.
\r
6023 timeBeginPeriod( 1 );
\r
6025 buffersRolling = false;
\r
6026 duplexPrerollBytes = 0;
\r
6028 if ( stream_.mode == DUPLEX ) {
\r
6029 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
\r
6030 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
\r
6033 HRESULT result = 0;
\r
6034 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6036 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6037 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
\r
6038 if ( FAILED( result ) ) {
\r
6039 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
\r
6040 errorText_ = errorStream_.str();
\r
6045 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6047 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6048 result = buffer->Start( DSCBSTART_LOOPING );
\r
6049 if ( FAILED( result ) ) {
\r
6050 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
\r
6051 errorText_ = errorStream_.str();
\r
6056 handle->drainCounter = 0;
\r
6057 handle->internalDrain = false;
\r
6058 ResetEvent( handle->condition );
\r
6059 stream_.state = STREAM_RUNNING;
\r
6062 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6065 void RtApiDs :: stopStream()
\r
6068 if ( stream_.state == STREAM_STOPPED ) {
\r
6069 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
\r
6070 error( RtAudioError::WARNING );
\r
6074 HRESULT result = 0;
\r
6077 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6078 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6079 if ( handle->drainCounter == 0 ) {
\r
6080 handle->drainCounter = 2;
\r
6081 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
\r
6084 stream_.state = STREAM_STOPPED;
\r
6086 MUTEX_LOCK( &stream_.mutex );
\r
6088 // Stop the buffer and clear memory
\r
6089 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6090 result = buffer->Stop();
\r
6091 if ( FAILED( result ) ) {
\r
6092 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
\r
6093 errorText_ = errorStream_.str();
\r
6097 // Lock the buffer and clear it so that if we start to play again,
\r
6098 // we won't have old data playing.
\r
6099 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6100 if ( FAILED( result ) ) {
\r
6101 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
\r
6102 errorText_ = errorStream_.str();
\r
6106 // Zero the DS buffer
\r
6107 ZeroMemory( audioPtr, dataLen );
\r
6109 // Unlock the DS buffer
\r
6110 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6111 if ( FAILED( result ) ) {
\r
6112 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
\r
6113 errorText_ = errorStream_.str();
\r
6117 // If we start playing again, we must begin at beginning of buffer.
\r
6118 handle->bufferPointer[0] = 0;
\r
6121 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6122 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6126 stream_.state = STREAM_STOPPED;
\r
6128 if ( stream_.mode != DUPLEX )
\r
6129 MUTEX_LOCK( &stream_.mutex );
\r
6131 result = buffer->Stop();
\r
6132 if ( FAILED( result ) ) {
\r
6133 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
\r
6134 errorText_ = errorStream_.str();
\r
6138 // Lock the buffer and clear it so that if we start to play again,
\r
6139 // we won't have old data playing.
\r
6140 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
\r
6141 if ( FAILED( result ) ) {
\r
6142 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
\r
6143 errorText_ = errorStream_.str();
\r
6147 // Zero the DS buffer
\r
6148 ZeroMemory( audioPtr, dataLen );
\r
6150 // Unlock the DS buffer
\r
6151 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
\r
6152 if ( FAILED( result ) ) {
\r
6153 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
\r
6154 errorText_ = errorStream_.str();
\r
6158 // If we start recording again, we must begin at beginning of buffer.
\r
6159 handle->bufferPointer[1] = 0;
\r
6163 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
\r
6164 MUTEX_UNLOCK( &stream_.mutex );
\r
6166 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
\r
6169 void RtApiDs :: abortStream()
\r
6172 if ( stream_.state == STREAM_STOPPED ) {
\r
6173 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
\r
6174 error( RtAudioError::WARNING );
\r
6178 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6179 handle->drainCounter = 2;
\r
6184 void RtApiDs :: callbackEvent()
\r
6186 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
\r
6187 Sleep( 50 ); // sleep 50 milliseconds
\r
6191 if ( stream_.state == STREAM_CLOSED ) {
\r
6192 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
6193 error( RtAudioError::WARNING );
\r
6197 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
\r
6198 DsHandle *handle = (DsHandle *) stream_.apiHandle;
\r
6200 // Check if we were draining the stream and signal is finished.
\r
6201 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
\r
6203 stream_.state = STREAM_STOPPING;
\r
6204 if ( handle->internalDrain == false )
\r
6205 SetEvent( handle->condition );
\r
6211 // Invoke user callback to get fresh output data UNLESS we are
\r
6212 // draining stream.
\r
6213 if ( handle->drainCounter == 0 ) {
\r
6214 RtAudioCallback callback = (RtAudioCallback) info->callback;
\r
6215 double streamTime = getStreamTime();
\r
6216 RtAudioStreamStatus status = 0;
\r
6217 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
6218 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
6219 handle->xrun[0] = false;
\r
6221 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
6222 status |= RTAUDIO_INPUT_OVERFLOW;
\r
6223 handle->xrun[1] = false;
\r
6225 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
6226 stream_.bufferSize, streamTime, status, info->userData );
\r
6227 if ( cbReturnValue == 2 ) {
\r
6228 stream_.state = STREAM_STOPPING;
\r
6229 handle->drainCounter = 2;
\r
6233 else if ( cbReturnValue == 1 ) {
\r
6234 handle->drainCounter = 1;
\r
6235 handle->internalDrain = true;
\r
6240 DWORD currentWritePointer, safeWritePointer;
\r
6241 DWORD currentReadPointer, safeReadPointer;
\r
6242 UINT nextWritePointer;
\r
6244 LPVOID buffer1 = NULL;
\r
6245 LPVOID buffer2 = NULL;
\r
6246 DWORD bufferSize1 = 0;
\r
6247 DWORD bufferSize2 = 0;
\r
6252 MUTEX_LOCK( &stream_.mutex );
\r
6253 if ( stream_.state == STREAM_STOPPED ) {
\r
6254 MUTEX_UNLOCK( &stream_.mutex );
\r
6258 if ( buffersRolling == false ) {
\r
6259 if ( stream_.mode == DUPLEX ) {
\r
6260 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6262 // It takes a while for the devices to get rolling. As a result,
\r
6263 // there's no guarantee that the capture and write device pointers
\r
6264 // will move in lockstep. Wait here for both devices to start
\r
6265 // rolling, and then set our buffer pointers accordingly.
\r
6266 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
\r
6267 // bytes later than the write buffer.
\r
6269 // Stub: a serious risk of having a pre-emptive scheduling round
\r
6270 // take place between the two GetCurrentPosition calls... but I'm
\r
6271 // really not sure how to solve the problem. Temporarily boost to
\r
6272 // Realtime priority, maybe; but I'm not sure what priority the
\r
6273 // DirectSound service threads run at. We *should* be roughly
\r
6274 // within a ms or so of correct.
\r
6276 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6277 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6279 DWORD startSafeWritePointer, startSafeReadPointer;
\r
6281 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
\r
6282 if ( FAILED( result ) ) {
\r
6283 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6284 errorText_ = errorStream_.str();
\r
6285 error( RtAudioError::SYSTEM_ERROR );
\r
6288 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
\r
6289 if ( FAILED( result ) ) {
\r
6290 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6291 errorText_ = errorStream_.str();
\r
6292 error( RtAudioError::SYSTEM_ERROR );
\r
6296 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
\r
6297 if ( FAILED( result ) ) {
\r
6298 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6299 errorText_ = errorStream_.str();
\r
6300 error( RtAudioError::SYSTEM_ERROR );
\r
6303 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
\r
6304 if ( FAILED( result ) ) {
\r
6305 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6306 errorText_ = errorStream_.str();
\r
6307 error( RtAudioError::SYSTEM_ERROR );
\r
6310 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
\r
6314 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
\r
6316 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6317 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6318 handle->bufferPointer[1] = safeReadPointer;
\r
6320 else if ( stream_.mode == OUTPUT ) {
\r
6322 // Set the proper nextWritePosition after initial startup.
\r
6323 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6324 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6325 if ( FAILED( result ) ) {
\r
6326 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6327 errorText_ = errorStream_.str();
\r
6328 error( RtAudioError::SYSTEM_ERROR );
\r
6331 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6332 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
\r
6335 buffersRolling = true;
\r
6338 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
6340 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
\r
6342 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
\r
6343 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6344 bufferBytes *= formatBytes( stream_.userFormat );
\r
6345 memset( stream_.userBuffer[0], 0, bufferBytes );
\r
6348 // Setup parameters and do buffer conversion if necessary.
\r
6349 if ( stream_.doConvertBuffer[0] ) {
\r
6350 buffer = stream_.deviceBuffer;
\r
6351 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
6352 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
6353 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
\r
6356 buffer = stream_.userBuffer[0];
\r
6357 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
\r
6358 bufferBytes *= formatBytes( stream_.userFormat );
\r
6361 // No byte swapping necessary in DirectSound implementation.
\r
6363 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
\r
6364 // unsigned. So, we need to convert our signed 8-bit data here to
\r
6366 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
\r
6367 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
\r
6369 DWORD dsBufferSize = handle->dsBufferSize[0];
\r
6370 nextWritePointer = handle->bufferPointer[0];
\r
6372 DWORD endWrite, leadPointer;
\r
6374 // Find out where the read and "safe write" pointers are.
\r
6375 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
\r
6376 if ( FAILED( result ) ) {
\r
6377 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
\r
6378 errorText_ = errorStream_.str();
\r
6379 error( RtAudioError::SYSTEM_ERROR );
\r
6383 // We will copy our output buffer into the region between
\r
6384 // safeWritePointer and leadPointer. If leadPointer is not
\r
6385 // beyond the next endWrite position, wait until it is.
\r
6386 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
\r
6387 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
\r
6388 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
\r
6389 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
\r
6390 endWrite = nextWritePointer + bufferBytes;
\r
6392 // Check whether the entire write region is behind the play pointer.
\r
6393 if ( leadPointer >= endWrite ) break;
\r
6395 // If we are here, then we must wait until the leadPointer advances
\r
6396 // beyond the end of our next write region. We use the
\r
6397 // Sleep() function to suspend operation until that happens.
\r
6398 double millis = ( endWrite - leadPointer ) * 1000.0;
\r
6399 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
\r
6400 if ( millis < 1.0 ) millis = 1.0;
\r
6401 Sleep( (DWORD) millis );
\r
6404 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
\r
6405 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
\r
6406 // We've strayed into the forbidden zone ... resync the read pointer.
\r
6407 handle->xrun[0] = true;
\r
6408 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
\r
6409 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
\r
6410 handle->bufferPointer[0] = nextWritePointer;
\r
6411 endWrite = nextWritePointer + bufferBytes;
\r
6414 // Lock free space in the buffer
\r
6415 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
\r
6416 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6417 if ( FAILED( result ) ) {
\r
6418 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
\r
6419 errorText_ = errorStream_.str();
\r
6420 error( RtAudioError::SYSTEM_ERROR );
\r
6424 // Copy our buffer into the DS buffer
\r
6425 CopyMemory( buffer1, buffer, bufferSize1 );
\r
6426 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
\r
6428 // Update our buffer offset and unlock sound buffer
\r
6429 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6430 if ( FAILED( result ) ) {
\r
6431 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
\r
6432 errorText_ = errorStream_.str();
\r
6433 error( RtAudioError::SYSTEM_ERROR );
\r
6436 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6437 handle->bufferPointer[0] = nextWritePointer;
\r
6440 // Don't bother draining input
\r
6441 if ( handle->drainCounter ) {
\r
6442 handle->drainCounter++;
\r
6446 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
6448 // Setup parameters.
\r
6449 if ( stream_.doConvertBuffer[1] ) {
\r
6450 buffer = stream_.deviceBuffer;
\r
6451 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
6452 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
\r
6455 buffer = stream_.userBuffer[1];
\r
6456 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
\r
6457 bufferBytes *= formatBytes( stream_.userFormat );
\r
6460 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
\r
6461 long nextReadPointer = handle->bufferPointer[1];
\r
6462 DWORD dsBufferSize = handle->dsBufferSize[1];
\r
6464 // Find out where the write and "safe read" pointers are.
\r
6465 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6466 if ( FAILED( result ) ) {
\r
6467 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6468 errorText_ = errorStream_.str();
\r
6469 error( RtAudioError::SYSTEM_ERROR );
\r
6473 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6474 DWORD endRead = nextReadPointer + bufferBytes;
\r
6476 // Handling depends on whether we are INPUT or DUPLEX.
\r
6477 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
\r
6478 // then a wait here will drag the write pointers into the forbidden zone.
\r
6480 // In DUPLEX mode, rather than wait, we will back off the read pointer until
\r
6481 // it's in a safe position. This causes dropouts, but it seems to be the only
\r
6482 // practical way to sync up the read and write pointers reliably, given the
\r
6483 // the very complex relationship between phase and increment of the read and write
\r
6486 // In order to minimize audible dropouts in DUPLEX mode, we will
\r
6487 // provide a pre-roll period of 0.5 seconds in which we return
\r
6488 // zeros from the read buffer while the pointers sync up.
\r
6490 if ( stream_.mode == DUPLEX ) {
\r
6491 if ( safeReadPointer < endRead ) {
\r
6492 if ( duplexPrerollBytes <= 0 ) {
\r
6493 // Pre-roll time over. Be more agressive.
\r
6494 int adjustment = endRead-safeReadPointer;
\r
6496 handle->xrun[1] = true;
\r
6498 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
\r
6499 // and perform fine adjustments later.
\r
6500 // - small adjustments: back off by twice as much.
\r
6501 if ( adjustment >= 2*bufferBytes )
\r
6502 nextReadPointer = safeReadPointer-2*bufferBytes;
\r
6504 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
\r
6506 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6510 // In pre=roll time. Just do it.
\r
6511 nextReadPointer = safeReadPointer - bufferBytes;
\r
6512 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
\r
6514 endRead = nextReadPointer + bufferBytes;
\r
6517 else { // mode == INPUT
\r
6518 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
\r
6519 // See comments for playback.
\r
6520 double millis = (endRead - safeReadPointer) * 1000.0;
\r
6521 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
\r
6522 if ( millis < 1.0 ) millis = 1.0;
\r
6523 Sleep( (DWORD) millis );
\r
6525 // Wake up and find out where we are now.
\r
6526 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
\r
6527 if ( FAILED( result ) ) {
\r
6528 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
\r
6529 errorText_ = errorStream_.str();
\r
6530 error( RtAudioError::SYSTEM_ERROR );
\r
6534 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
\r
6538 // Lock free space in the buffer
\r
6539 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
\r
6540 &bufferSize1, &buffer2, &bufferSize2, 0 );
\r
6541 if ( FAILED( result ) ) {
\r
6542 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
\r
6543 errorText_ = errorStream_.str();
\r
6544 error( RtAudioError::SYSTEM_ERROR );
\r
6548 if ( duplexPrerollBytes <= 0 ) {
\r
6549 // Copy our buffer into the DS buffer
\r
6550 CopyMemory( buffer, buffer1, bufferSize1 );
\r
6551 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
\r
6554 memset( buffer, 0, bufferSize1 );
\r
6555 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
\r
6556 duplexPrerollBytes -= bufferSize1 + bufferSize2;
\r
6559 // Update our buffer offset and unlock sound buffer
\r
6560 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
\r
6561 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
\r
6562 if ( FAILED( result ) ) {
\r
6563 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
\r
6564 errorText_ = errorStream_.str();
\r
6565 error( RtAudioError::SYSTEM_ERROR );
\r
6568 handle->bufferPointer[1] = nextReadPointer;
\r
6570 // No byte swapping necessary in DirectSound implementation.
\r
6572 // If necessary, convert 8-bit data from unsigned to signed.
\r
6573 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
\r
6574 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
\r
6576 // Do buffer conversion if necessary.
\r
6577 if ( stream_.doConvertBuffer[1] )
\r
6578 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
6582 MUTEX_UNLOCK( &stream_.mutex );
\r
6583 RtApi::tickStreamTime();
\r
6586 // Definitions for utility functions and callbacks
\r
6587 // specific to the DirectSound implementation.
\r
6589 static unsigned __stdcall callbackHandler( void *ptr )
\r
6591 CallbackInfo *info = (CallbackInfo *) ptr;
\r
6592 RtApiDs *object = (RtApiDs *) info->object;
\r
6593 bool* isRunning = &info->isRunning;
\r
6595 while ( *isRunning == true ) {
\r
6596 object->callbackEvent();
\r
6599 _endthreadex( 0 );
\r
6603 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
\r
6604 LPCTSTR description,
\r
6605 LPCTSTR /*module*/,
\r
6606 LPVOID lpContext )
\r
6608 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
\r
6609 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
\r
6612 bool validDevice = false;
\r
6613 if ( probeInfo.isInput == true ) {
\r
6615 LPDIRECTSOUNDCAPTURE object;
\r
6617 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
\r
6618 if ( hr != DS_OK ) return TRUE;
\r
6620 caps.dwSize = sizeof(caps);
\r
6621 hr = object->GetCaps( &caps );
\r
6622 if ( hr == DS_OK ) {
\r
6623 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
\r
6624 validDevice = true;
\r
6626 object->Release();
\r
6630 LPDIRECTSOUND object;
\r
6631 hr = DirectSoundCreate( lpguid, &object, NULL );
\r
6632 if ( hr != DS_OK ) return TRUE;
\r
6634 caps.dwSize = sizeof(caps);
\r
6635 hr = object->GetCaps( &caps );
\r
6636 if ( hr == DS_OK ) {
\r
6637 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
\r
6638 validDevice = true;
\r
6640 object->Release();
\r
6643 // If good device, then save its name and guid.
\r
6644 std::string name = convertCharPointerToStdString( description );
\r
6645 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
\r
6646 if ( lpguid == NULL )
\r
6647 name = "Default Device";
\r
6648 if ( validDevice ) {
\r
6649 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
\r
6650 if ( dsDevices[i].name == name ) {
\r
6651 dsDevices[i].found = true;
\r
6652 if ( probeInfo.isInput ) {
\r
6653 dsDevices[i].id[1] = lpguid;
\r
6654 dsDevices[i].validId[1] = true;
\r
6657 dsDevices[i].id[0] = lpguid;
\r
6658 dsDevices[i].validId[0] = true;
\r
6665 device.name = name;
\r
6666 device.found = true;
\r
6667 if ( probeInfo.isInput ) {
\r
6668 device.id[1] = lpguid;
\r
6669 device.validId[1] = true;
\r
6672 device.id[0] = lpguid;
\r
6673 device.validId[0] = true;
\r
6675 dsDevices.push_back( device );
\r
6681 static const char* getErrorString( int code )
\r
6685 case DSERR_ALLOCATED:
\r
6686 return "Already allocated";
\r
6688 case DSERR_CONTROLUNAVAIL:
\r
6689 return "Control unavailable";
\r
6691 case DSERR_INVALIDPARAM:
\r
6692 return "Invalid parameter";
\r
6694 case DSERR_INVALIDCALL:
\r
6695 return "Invalid call";
\r
6697 case DSERR_GENERIC:
\r
6698 return "Generic error";
\r
6700 case DSERR_PRIOLEVELNEEDED:
\r
6701 return "Priority level needed";
\r
6703 case DSERR_OUTOFMEMORY:
\r
6704 return "Out of memory";
\r
6706 case DSERR_BADFORMAT:
\r
6707 return "The sample rate or the channel format is not supported";
\r
6709 case DSERR_UNSUPPORTED:
\r
6710 return "Not supported";
\r
6712 case DSERR_NODRIVER:
\r
6713 return "No driver";
\r
6715 case DSERR_ALREADYINITIALIZED:
\r
6716 return "Already initialized";
\r
6718 case DSERR_NOAGGREGATION:
\r
6719 return "No aggregation";
\r
6721 case DSERR_BUFFERLOST:
\r
6722 return "Buffer lost";
\r
6724 case DSERR_OTHERAPPHASPRIO:
\r
6725 return "Another application already has priority";
\r
6727 case DSERR_UNINITIALIZED:
\r
6728 return "Uninitialized";
\r
6731 return "DirectSound unknown error";
\r
6734 //******************** End of __WINDOWS_DS__ *********************//
\r
6738 #if defined(__LINUX_ALSA__)
\r
6740 #include <alsa/asoundlib.h>
\r
6741 #include <unistd.h>
\r
6743 // A structure to hold various information related to the ALSA API
\r
6744 // implementation.
\r
6745 struct AlsaHandle {
\r
6746 snd_pcm_t *handles[2];
\r
6747 bool synchronized;
\r
6749 pthread_cond_t runnable_cv;
\r
6753 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
\r
6756 static void *alsaCallbackHandler( void * ptr );
\r
6758 RtApiAlsa :: RtApiAlsa()
\r
6760 // Nothing to do here.
\r
6763 RtApiAlsa :: ~RtApiAlsa()
\r
6765 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
6768 unsigned int RtApiAlsa :: getDeviceCount( void )
\r
6770 unsigned nDevices = 0;
\r
6771 int result, subdevice, card;
\r
6773 snd_ctl_t *handle;
\r
6775 // Count cards and devices
\r
6777 snd_card_next( &card );
\r
6778 while ( card >= 0 ) {
\r
6779 sprintf( name, "hw:%d", card );
\r
6780 result = snd_ctl_open( &handle, name, 0 );
\r
6781 if ( result < 0 ) {
\r
6782 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6783 errorText_ = errorStream_.str();
\r
6784 error( RtAudioError::WARNING );
\r
6789 result = snd_ctl_pcm_next_device( handle, &subdevice );
\r
6790 if ( result < 0 ) {
\r
6791 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6792 errorText_ = errorStream_.str();
\r
6793 error( RtAudioError::WARNING );
\r
6796 if ( subdevice < 0 )
\r
6801 snd_ctl_close( handle );
\r
6802 snd_card_next( &card );
\r
6805 result = snd_ctl_open( &handle, "default", 0 );
\r
6806 if (result == 0) {
\r
6808 snd_ctl_close( handle );
\r
6814 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
\r
6816 RtAudio::DeviceInfo info;
\r
6817 info.probed = false;
\r
6819 unsigned nDevices = 0;
\r
6820 int result, subdevice, card;
\r
6822 snd_ctl_t *chandle;
\r
6824 // Count cards and devices
\r
6826 snd_card_next( &card );
\r
6827 while ( card >= 0 ) {
\r
6828 sprintf( name, "hw:%d", card );
\r
6829 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
6830 if ( result < 0 ) {
\r
6831 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6832 errorText_ = errorStream_.str();
\r
6833 error( RtAudioError::WARNING );
\r
6838 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
6839 if ( result < 0 ) {
\r
6840 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
\r
6841 errorText_ = errorStream_.str();
\r
6842 error( RtAudioError::WARNING );
\r
6845 if ( subdevice < 0 ) break;
\r
6846 if ( nDevices == device ) {
\r
6847 sprintf( name, "hw:%d,%d", card, subdevice );
\r
6853 snd_ctl_close( chandle );
\r
6854 snd_card_next( &card );
\r
6857 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
6858 if ( result == 0 ) {
\r
6859 if ( nDevices == device ) {
\r
6860 strcpy( name, "default" );
\r
6866 if ( nDevices == 0 ) {
\r
6867 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
\r
6868 error( RtAudioError::INVALID_USE );
\r
6872 if ( device >= nDevices ) {
\r
6873 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
\r
6874 error( RtAudioError::INVALID_USE );
\r
6880 // If a stream is already open, we cannot probe the stream devices.
\r
6881 // Thus, use the saved results.
\r
6882 if ( stream_.state != STREAM_CLOSED &&
\r
6883 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
\r
6884 snd_ctl_close( chandle );
\r
6885 if ( device >= devices_.size() ) {
\r
6886 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
\r
6887 error( RtAudioError::WARNING );
\r
6890 return devices_[ device ];
\r
6893 int openMode = SND_PCM_ASYNC;
\r
6894 snd_pcm_stream_t stream;
\r
6895 snd_pcm_info_t *pcminfo;
\r
6896 snd_pcm_info_alloca( &pcminfo );
\r
6897 snd_pcm_t *phandle;
\r
6898 snd_pcm_hw_params_t *params;
\r
6899 snd_pcm_hw_params_alloca( ¶ms );
\r
6901 // First try for playback unless default device (which has subdev -1)
\r
6902 stream = SND_PCM_STREAM_PLAYBACK;
\r
6903 snd_pcm_info_set_stream( pcminfo, stream );
\r
6904 if ( subdevice != -1 ) {
\r
6905 snd_pcm_info_set_device( pcminfo, subdevice );
\r
6906 snd_pcm_info_set_subdevice( pcminfo, 0 );
\r
6908 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6909 if ( result < 0 ) {
\r
6910 // Device probably doesn't support playback.
\r
6911 goto captureProbe;
\r
6915 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
\r
6916 if ( result < 0 ) {
\r
6917 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6918 errorText_ = errorStream_.str();
\r
6919 error( RtAudioError::WARNING );
\r
6920 goto captureProbe;
\r
6923 // The device is open ... fill the parameter structure.
\r
6924 result = snd_pcm_hw_params_any( phandle, params );
\r
6925 if ( result < 0 ) {
\r
6926 snd_pcm_close( phandle );
\r
6927 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6928 errorText_ = errorStream_.str();
\r
6929 error( RtAudioError::WARNING );
\r
6930 goto captureProbe;
\r
6933 // Get output channel information.
\r
6934 unsigned int value;
\r
6935 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6936 if ( result < 0 ) {
\r
6937 snd_pcm_close( phandle );
\r
6938 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
\r
6939 errorText_ = errorStream_.str();
\r
6940 error( RtAudioError::WARNING );
\r
6941 goto captureProbe;
\r
6943 info.outputChannels = value;
\r
6944 snd_pcm_close( phandle );
\r
6947 stream = SND_PCM_STREAM_CAPTURE;
\r
6948 snd_pcm_info_set_stream( pcminfo, stream );
\r
6950 // Now try for capture unless default device (with subdev = -1)
\r
6951 if ( subdevice != -1 ) {
\r
6952 result = snd_ctl_pcm_info( chandle, pcminfo );
\r
6953 snd_ctl_close( chandle );
\r
6954 if ( result < 0 ) {
\r
6955 // Device probably doesn't support capture.
\r
6956 if ( info.outputChannels == 0 ) return info;
\r
6957 goto probeParameters;
\r
6961 snd_ctl_close( chandle );
\r
6963 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
6964 if ( result < 0 ) {
\r
6965 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6966 errorText_ = errorStream_.str();
\r
6967 error( RtAudioError::WARNING );
\r
6968 if ( info.outputChannels == 0 ) return info;
\r
6969 goto probeParameters;
\r
6972 // The device is open ... fill the parameter structure.
\r
6973 result = snd_pcm_hw_params_any( phandle, params );
\r
6974 if ( result < 0 ) {
\r
6975 snd_pcm_close( phandle );
\r
6976 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
6977 errorText_ = errorStream_.str();
\r
6978 error( RtAudioError::WARNING );
\r
6979 if ( info.outputChannels == 0 ) return info;
\r
6980 goto probeParameters;
\r
6983 result = snd_pcm_hw_params_get_channels_max( params, &value );
\r
6984 if ( result < 0 ) {
\r
6985 snd_pcm_close( phandle );
\r
6986 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
\r
6987 errorText_ = errorStream_.str();
\r
6988 error( RtAudioError::WARNING );
\r
6989 if ( info.outputChannels == 0 ) return info;
\r
6990 goto probeParameters;
\r
6992 info.inputChannels = value;
\r
6993 snd_pcm_close( phandle );
\r
6995 // If device opens for both playback and capture, we determine the channels.
\r
6996 if ( info.outputChannels > 0 && info.inputChannels > 0 )
\r
6997 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
6999 // ALSA doesn't provide default devices so we'll use the first available one.
\r
7000 if ( device == 0 && info.outputChannels > 0 )
\r
7001 info.isDefaultOutput = true;
\r
7002 if ( device == 0 && info.inputChannels > 0 )
\r
7003 info.isDefaultInput = true;
\r
7006 // At this point, we just need to figure out the supported data
\r
7007 // formats and sample rates. We'll proceed by opening the device in
\r
7008 // the direction with the maximum number of channels, or playback if
\r
7009 // they are equal. This might limit our sample rate options, but so
\r
7012 if ( info.outputChannels >= info.inputChannels )
\r
7013 stream = SND_PCM_STREAM_PLAYBACK;
\r
7015 stream = SND_PCM_STREAM_CAPTURE;
\r
7016 snd_pcm_info_set_stream( pcminfo, stream );
\r
7018 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
\r
7019 if ( result < 0 ) {
\r
7020 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7021 errorText_ = errorStream_.str();
\r
7022 error( RtAudioError::WARNING );
\r
7026 // The device is open ... fill the parameter structure.
\r
7027 result = snd_pcm_hw_params_any( phandle, params );
\r
7028 if ( result < 0 ) {
\r
7029 snd_pcm_close( phandle );
\r
7030 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7031 errorText_ = errorStream_.str();
\r
7032 error( RtAudioError::WARNING );
\r
7036 // Test our discrete set of sample rate values.
\r
7037 info.sampleRates.clear();
\r
7038 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
\r
7039 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 )
\r
7040 info.sampleRates.push_back( SAMPLE_RATES[i] );
\r
7042 if ( info.sampleRates.size() == 0 ) {
\r
7043 snd_pcm_close( phandle );
\r
7044 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
\r
7045 errorText_ = errorStream_.str();
\r
7046 error( RtAudioError::WARNING );
\r
7050 // Probe the supported data formats ... we don't care about endian-ness just yet
\r
7051 snd_pcm_format_t format;
\r
7052 info.nativeFormats = 0;
\r
7053 format = SND_PCM_FORMAT_S8;
\r
7054 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7055 info.nativeFormats |= RTAUDIO_SINT8;
\r
7056 format = SND_PCM_FORMAT_S16;
\r
7057 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7058 info.nativeFormats |= RTAUDIO_SINT16;
\r
7059 format = SND_PCM_FORMAT_S24;
\r
7060 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7061 info.nativeFormats |= RTAUDIO_SINT24;
\r
7062 format = SND_PCM_FORMAT_S32;
\r
7063 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7064 info.nativeFormats |= RTAUDIO_SINT32;
\r
7065 format = SND_PCM_FORMAT_FLOAT;
\r
7066 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7067 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
7068 format = SND_PCM_FORMAT_FLOAT64;
\r
7069 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
\r
7070 info.nativeFormats |= RTAUDIO_FLOAT64;
\r
7072 // Check that we have at least one supported format
\r
7073 if ( info.nativeFormats == 0 ) {
\r
7074 snd_pcm_close( phandle );
\r
7075 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
\r
7076 errorText_ = errorStream_.str();
\r
7077 error( RtAudioError::WARNING );
\r
7081 // Get the device name
\r
7083 result = snd_card_get_name( card, &cardname );
\r
7084 if ( result >= 0 ) {
\r
7085 sprintf( name, "hw:%s,%d", cardname, subdevice );
\r
7090 // That's all ... close the device and return
\r
7091 snd_pcm_close( phandle );
\r
7092 info.probed = true;
\r
7096 void RtApiAlsa :: saveDeviceInfo( void )
\r
7100 unsigned int nDevices = getDeviceCount();
\r
7101 devices_.resize( nDevices );
\r
7102 for ( unsigned int i=0; i<nDevices; i++ )
\r
7103 devices_[i] = getDeviceInfo( i );
\r
7106 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
7107 unsigned int firstChannel, unsigned int sampleRate,
\r
7108 RtAudioFormat format, unsigned int *bufferSize,
\r
7109 RtAudio::StreamOptions *options )
\r
7112 #if defined(__RTAUDIO_DEBUG__)
\r
7113 snd_output_t *out;
\r
7114 snd_output_stdio_attach(&out, stderr, 0);
\r
7117 // I'm not using the "plug" interface ... too much inconsistent behavior.
\r
7119 unsigned nDevices = 0;
\r
7120 int result, subdevice, card;
\r
7122 snd_ctl_t *chandle;
\r
7124 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
\r
7125 snprintf(name, sizeof(name), "%s", "default");
\r
7127 // Count cards and devices
\r
7129 snd_card_next( &card );
\r
7130 while ( card >= 0 ) {
\r
7131 sprintf( name, "hw:%d", card );
\r
7132 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
\r
7133 if ( result < 0 ) {
\r
7134 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
\r
7135 errorText_ = errorStream_.str();
\r
7140 result = snd_ctl_pcm_next_device( chandle, &subdevice );
\r
7141 if ( result < 0 ) break;
\r
7142 if ( subdevice < 0 ) break;
\r
7143 if ( nDevices == device ) {
\r
7144 sprintf( name, "hw:%d,%d", card, subdevice );
\r
7145 snd_ctl_close( chandle );
\r
7150 snd_ctl_close( chandle );
\r
7151 snd_card_next( &card );
\r
7154 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
\r
7155 if ( result == 0 ) {
\r
7156 if ( nDevices == device ) {
\r
7157 strcpy( name, "default" );
\r
7163 if ( nDevices == 0 ) {
\r
7164 // This should not happen because a check is made before this function is called.
\r
7165 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
\r
7169 if ( device >= nDevices ) {
\r
7170 // This should not happen because a check is made before this function is called.
\r
7171 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
\r
7178 // The getDeviceInfo() function will not work for a device that is
\r
7179 // already open. Thus, we'll probe the system before opening a
\r
7180 // stream and save the results for use by getDeviceInfo().
\r
7181 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
\r
7182 this->saveDeviceInfo();
\r
7184 snd_pcm_stream_t stream;
\r
7185 if ( mode == OUTPUT )
\r
7186 stream = SND_PCM_STREAM_PLAYBACK;
\r
7188 stream = SND_PCM_STREAM_CAPTURE;
\r
7190 snd_pcm_t *phandle;
\r
7191 int openMode = SND_PCM_ASYNC;
\r
7192 result = snd_pcm_open( &phandle, name, stream, openMode );
\r
7193 if ( result < 0 ) {
\r
7194 if ( mode == OUTPUT )
\r
7195 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
\r
7197 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
\r
7198 errorText_ = errorStream_.str();
\r
7202 // Fill the parameter structure.
\r
7203 snd_pcm_hw_params_t *hw_params;
\r
7204 snd_pcm_hw_params_alloca( &hw_params );
\r
7205 result = snd_pcm_hw_params_any( phandle, hw_params );
\r
7206 if ( result < 0 ) {
\r
7207 snd_pcm_close( phandle );
\r
7208 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
\r
7209 errorText_ = errorStream_.str();
\r
7213 #if defined(__RTAUDIO_DEBUG__)
\r
7214 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
\r
7215 snd_pcm_hw_params_dump( hw_params, out );
\r
7218 // Set access ... check user preference.
\r
7219 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
\r
7220 stream_.userInterleaved = false;
\r
7221 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7222 if ( result < 0 ) {
\r
7223 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7224 stream_.deviceInterleaved[mode] = true;
\r
7227 stream_.deviceInterleaved[mode] = false;
\r
7230 stream_.userInterleaved = true;
\r
7231 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
\r
7232 if ( result < 0 ) {
\r
7233 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
\r
7234 stream_.deviceInterleaved[mode] = false;
\r
7237 stream_.deviceInterleaved[mode] = true;
\r
7240 if ( result < 0 ) {
\r
7241 snd_pcm_close( phandle );
\r
7242 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
\r
7243 errorText_ = errorStream_.str();
\r
7247 // Determine how to set the device format.
\r
7248 stream_.userFormat = format;
\r
7249 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
\r
7251 if ( format == RTAUDIO_SINT8 )
\r
7252 deviceFormat = SND_PCM_FORMAT_S8;
\r
7253 else if ( format == RTAUDIO_SINT16 )
\r
7254 deviceFormat = SND_PCM_FORMAT_S16;
\r
7255 else if ( format == RTAUDIO_SINT24 )
\r
7256 deviceFormat = SND_PCM_FORMAT_S24;
\r
7257 else if ( format == RTAUDIO_SINT32 )
\r
7258 deviceFormat = SND_PCM_FORMAT_S32;
\r
7259 else if ( format == RTAUDIO_FLOAT32 )
\r
7260 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7261 else if ( format == RTAUDIO_FLOAT64 )
\r
7262 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7264 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
\r
7265 stream_.deviceFormat[mode] = format;
\r
7269 // The user requested format is not natively supported by the device.
\r
7270 deviceFormat = SND_PCM_FORMAT_FLOAT64;
\r
7271 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
\r
7272 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
\r
7276 deviceFormat = SND_PCM_FORMAT_FLOAT;
\r
7277 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7278 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
7282 deviceFormat = SND_PCM_FORMAT_S32;
\r
7283 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7284 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
7288 deviceFormat = SND_PCM_FORMAT_S24;
\r
7289 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7290 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
7294 deviceFormat = SND_PCM_FORMAT_S16;
\r
7295 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7296 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
7300 deviceFormat = SND_PCM_FORMAT_S8;
\r
7301 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
\r
7302 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
7306 // If we get here, no supported format was found.
\r
7307 snd_pcm_close( phandle );
\r
7308 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
\r
7309 errorText_ = errorStream_.str();
\r
7313 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
\r
7314 if ( result < 0 ) {
\r
7315 snd_pcm_close( phandle );
\r
7316 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
\r
7317 errorText_ = errorStream_.str();
\r
7321 // Determine whether byte-swaping is necessary.
\r
7322 stream_.doByteSwap[mode] = false;
\r
7323 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
\r
7324 result = snd_pcm_format_cpu_endian( deviceFormat );
\r
7325 if ( result == 0 )
\r
7326 stream_.doByteSwap[mode] = true;
\r
7327 else if (result < 0) {
\r
7328 snd_pcm_close( phandle );
\r
7329 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
\r
7330 errorText_ = errorStream_.str();
\r
7335 // Set the sample rate.
\r
7336 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
\r
7337 if ( result < 0 ) {
\r
7338 snd_pcm_close( phandle );
\r
7339 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7340 errorText_ = errorStream_.str();
\r
7344 // Determine the number of channels for this device. We support a possible
\r
7345 // minimum device channel number > than the value requested by the user.
\r
7346 stream_.nUserChannels[mode] = channels;
\r
7347 unsigned int value;
\r
7348 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
\r
7349 unsigned int deviceChannels = value;
\r
7350 if ( result < 0 || deviceChannels < channels + firstChannel ) {
\r
7351 snd_pcm_close( phandle );
\r
7352 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
\r
7353 errorText_ = errorStream_.str();
\r
7357 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
\r
7358 if ( result < 0 ) {
\r
7359 snd_pcm_close( phandle );
\r
7360 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7361 errorText_ = errorStream_.str();
\r
7364 deviceChannels = value;
\r
7365 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
\r
7366 stream_.nDeviceChannels[mode] = deviceChannels;
\r
7368 // Set the device channels.
\r
7369 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
\r
7370 if ( result < 0 ) {
\r
7371 snd_pcm_close( phandle );
\r
7372 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7373 errorText_ = errorStream_.str();
\r
7377 // Set the buffer (or period) size.
\r
7379 snd_pcm_uframes_t periodSize = *bufferSize;
\r
7380 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
\r
7381 if ( result < 0 ) {
\r
7382 snd_pcm_close( phandle );
\r
7383 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7384 errorText_ = errorStream_.str();
\r
7387 *bufferSize = periodSize;
\r
7389 // Set the buffer number, which in ALSA is referred to as the "period".
\r
7390 unsigned int periods = 0;
\r
7391 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
\r
7392 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
\r
7393 if ( periods < 2 ) periods = 4; // a fairly safe default value
\r
7394 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
\r
7395 if ( result < 0 ) {
\r
7396 snd_pcm_close( phandle );
\r
7397 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
\r
7398 errorText_ = errorStream_.str();
\r
7402 // If attempting to setup a duplex stream, the bufferSize parameter
\r
7403 // MUST be the same in both directions!
\r
7404 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
\r
7405 snd_pcm_close( phandle );
\r
7406 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
\r
7407 errorText_ = errorStream_.str();
\r
7411 stream_.bufferSize = *bufferSize;
\r
7413 // Install the hardware configuration
\r
7414 result = snd_pcm_hw_params( phandle, hw_params );
\r
7415 if ( result < 0 ) {
\r
7416 snd_pcm_close( phandle );
\r
7417 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7418 errorText_ = errorStream_.str();
\r
7422 #if defined(__RTAUDIO_DEBUG__)
\r
7423 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
\r
7424 snd_pcm_hw_params_dump( hw_params, out );
\r
7427 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
\r
7428 snd_pcm_sw_params_t *sw_params = NULL;
\r
7429 snd_pcm_sw_params_alloca( &sw_params );
\r
7430 snd_pcm_sw_params_current( phandle, sw_params );
\r
7431 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
\r
7432 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
\r
7433 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
\r
7435 // The following two settings were suggested by Theo Veenker
\r
7436 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
\r
7437 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
\r
7439 // here are two options for a fix
\r
7440 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
\r
7441 snd_pcm_uframes_t val;
\r
7442 snd_pcm_sw_params_get_boundary( sw_params, &val );
\r
7443 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
\r
7445 result = snd_pcm_sw_params( phandle, sw_params );
\r
7446 if ( result < 0 ) {
\r
7447 snd_pcm_close( phandle );
\r
7448 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
\r
7449 errorText_ = errorStream_.str();
\r
7453 #if defined(__RTAUDIO_DEBUG__)
\r
7454 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
\r
7455 snd_pcm_sw_params_dump( sw_params, out );
\r
7458 // Set flags for buffer conversion
\r
7459 stream_.doConvertBuffer[mode] = false;
\r
7460 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
7461 stream_.doConvertBuffer[mode] = true;
\r
7462 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
7463 stream_.doConvertBuffer[mode] = true;
\r
7464 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
7465 stream_.nUserChannels[mode] > 1 )
\r
7466 stream_.doConvertBuffer[mode] = true;
\r
7468 // Allocate the ApiHandle if necessary and then save.
\r
7469 AlsaHandle *apiInfo = 0;
\r
7470 if ( stream_.apiHandle == 0 ) {
\r
7472 apiInfo = (AlsaHandle *) new AlsaHandle;
\r
7474 catch ( std::bad_alloc& ) {
\r
7475 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
\r
7479 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
\r
7480 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
\r
7484 stream_.apiHandle = (void *) apiInfo;
\r
7485 apiInfo->handles[0] = 0;
\r
7486 apiInfo->handles[1] = 0;
\r
7489 apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7491 apiInfo->handles[mode] = phandle;
\r
7494 // Allocate necessary internal buffers.
\r
7495 unsigned long bufferBytes;
\r
7496 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
7497 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
7498 if ( stream_.userBuffer[mode] == NULL ) {
\r
7499 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
\r
7503 if ( stream_.doConvertBuffer[mode] ) {
\r
7505 bool makeBuffer = true;
\r
7506 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
7507 if ( mode == INPUT ) {
\r
7508 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
7509 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
7510 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
7514 if ( makeBuffer ) {
\r
7515 bufferBytes *= *bufferSize;
\r
7516 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
7517 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
7518 if ( stream_.deviceBuffer == NULL ) {
\r
7519 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
\r
7525 stream_.sampleRate = sampleRate;
\r
7526 stream_.nBuffers = periods;
\r
7527 stream_.device[mode] = device;
\r
7528 stream_.state = STREAM_STOPPED;
\r
7530 // Setup the buffer conversion information structure.
\r
7531 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
7533 // Setup thread if necessary.
\r
7534 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
7535 // We had already set up an output stream.
\r
7536 stream_.mode = DUPLEX;
\r
7537 // Link the streams if possible.
\r
7538 apiInfo->synchronized = false;
\r
7539 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
\r
7540 apiInfo->synchronized = true;
\r
7542 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
\r
7543 error( RtAudioError::WARNING );
\r
7547 stream_.mode = mode;
\r
7549 // Setup callback thread.
\r
7550 stream_.callbackInfo.object = (void *) this;
\r
7552 // Set the thread attributes for joinable and realtime scheduling
\r
7553 // priority (optional). The higher priority will only take affect
\r
7554 // if the program is run as root or suid. Note, under Linux
\r
7555 // processes with CAP_SYS_NICE privilege, a user can change
\r
7556 // scheduling policy and priority (thus need not be root). See
\r
7557 // POSIX "capabilities".
\r
7558 pthread_attr_t attr;
\r
7559 pthread_attr_init( &attr );
\r
7560 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
7562 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7563 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
7564 // We previously attempted to increase the audio callback priority
\r
7565 // to SCHED_RR here via the attributes. However, while no errors
\r
7566 // were reported in doing so, it did not work. So, now this is
\r
7567 // done in the alsaCallbackHandler function.
\r
7568 stream_.callbackInfo.doRealtime = true;
\r
7569 int priority = options->priority;
\r
7570 int min = sched_get_priority_min( SCHED_RR );
\r
7571 int max = sched_get_priority_max( SCHED_RR );
\r
7572 if ( priority < min ) priority = min;
\r
7573 else if ( priority > max ) priority = max;
\r
7574 stream_.callbackInfo.priority = priority;
\r
7578 stream_.callbackInfo.isRunning = true;
\r
7579 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
\r
7580 pthread_attr_destroy( &attr );
\r
7582 stream_.callbackInfo.isRunning = false;
\r
7583 errorText_ = "RtApiAlsa::error creating callback thread!";
\r
7592 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7593 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7594 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7596 stream_.apiHandle = 0;
\r
7599 if ( phandle) snd_pcm_close( phandle );
\r
7601 for ( int i=0; i<2; i++ ) {
\r
7602 if ( stream_.userBuffer[i] ) {
\r
7603 free( stream_.userBuffer[i] );
\r
7604 stream_.userBuffer[i] = 0;
\r
7608 if ( stream_.deviceBuffer ) {
\r
7609 free( stream_.deviceBuffer );
\r
7610 stream_.deviceBuffer = 0;
\r
7613 stream_.state = STREAM_CLOSED;
\r
7617 void RtApiAlsa :: closeStream()
\r
7619 if ( stream_.state == STREAM_CLOSED ) {
\r
7620 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
\r
7621 error( RtAudioError::WARNING );
\r
7625 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7626 stream_.callbackInfo.isRunning = false;
\r
7627 MUTEX_LOCK( &stream_.mutex );
\r
7628 if ( stream_.state == STREAM_STOPPED ) {
\r
7629 apiInfo->runnable = true;
\r
7630 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7632 MUTEX_UNLOCK( &stream_.mutex );
\r
7633 pthread_join( stream_.callbackInfo.thread, NULL );
\r
7635 if ( stream_.state == STREAM_RUNNING ) {
\r
7636 stream_.state = STREAM_STOPPED;
\r
7637 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
7638 snd_pcm_drop( apiInfo->handles[0] );
\r
7639 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
\r
7640 snd_pcm_drop( apiInfo->handles[1] );
\r
7644 pthread_cond_destroy( &apiInfo->runnable_cv );
\r
7645 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
\r
7646 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
\r
7648 stream_.apiHandle = 0;
\r
7651 for ( int i=0; i<2; i++ ) {
\r
7652 if ( stream_.userBuffer[i] ) {
\r
7653 free( stream_.userBuffer[i] );
\r
7654 stream_.userBuffer[i] = 0;
\r
7658 if ( stream_.deviceBuffer ) {
\r
7659 free( stream_.deviceBuffer );
\r
7660 stream_.deviceBuffer = 0;
\r
7663 stream_.mode = UNINITIALIZED;
\r
7664 stream_.state = STREAM_CLOSED;
\r
7667 void RtApiAlsa :: startStream()
\r
7669 // This method calls snd_pcm_prepare if the device isn't already in that state.
\r
7672 if ( stream_.state == STREAM_RUNNING ) {
\r
7673 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
\r
7674 error( RtAudioError::WARNING );
\r
7678 MUTEX_LOCK( &stream_.mutex );
\r
7681 snd_pcm_state_t state;
\r
7682 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7683 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7684 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7685 state = snd_pcm_state( handle[0] );
\r
7686 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7687 result = snd_pcm_prepare( handle[0] );
\r
7688 if ( result < 0 ) {
\r
7689 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
\r
7690 errorText_ = errorStream_.str();
\r
7696 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7697 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
\r
7698 state = snd_pcm_state( handle[1] );
\r
7699 if ( state != SND_PCM_STATE_PREPARED ) {
\r
7700 result = snd_pcm_prepare( handle[1] );
\r
7701 if ( result < 0 ) {
\r
7702 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
\r
7703 errorText_ = errorStream_.str();
\r
7709 stream_.state = STREAM_RUNNING;
\r
7712 apiInfo->runnable = true;
\r
7713 pthread_cond_signal( &apiInfo->runnable_cv );
\r
7714 MUTEX_UNLOCK( &stream_.mutex );
\r
7716 if ( result >= 0 ) return;
\r
7717 error( RtAudioError::SYSTEM_ERROR );
\r
7720 void RtApiAlsa :: stopStream()
\r
7723 if ( stream_.state == STREAM_STOPPED ) {
\r
7724 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
\r
7725 error( RtAudioError::WARNING );
\r
7729 stream_.state = STREAM_STOPPED;
\r
7730 MUTEX_LOCK( &stream_.mutex );
\r
7733 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7734 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7735 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7736 if ( apiInfo->synchronized )
\r
7737 result = snd_pcm_drop( handle[0] );
\r
7739 result = snd_pcm_drain( handle[0] );
\r
7740 if ( result < 0 ) {
\r
7741 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
\r
7742 errorText_ = errorStream_.str();
\r
7747 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7748 result = snd_pcm_drop( handle[1] );
\r
7749 if ( result < 0 ) {
\r
7750 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
\r
7751 errorText_ = errorStream_.str();
\r
7757 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7758 MUTEX_UNLOCK( &stream_.mutex );
\r
7760 if ( result >= 0 ) return;
\r
7761 error( RtAudioError::SYSTEM_ERROR );
\r
7764 void RtApiAlsa :: abortStream()
\r
7767 if ( stream_.state == STREAM_STOPPED ) {
\r
7768 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
\r
7769 error( RtAudioError::WARNING );
\r
7773 stream_.state = STREAM_STOPPED;
\r
7774 MUTEX_LOCK( &stream_.mutex );
\r
7777 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7778 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
\r
7779 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7780 result = snd_pcm_drop( handle[0] );
\r
7781 if ( result < 0 ) {
\r
7782 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
\r
7783 errorText_ = errorStream_.str();
\r
7788 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
\r
7789 result = snd_pcm_drop( handle[1] );
\r
7790 if ( result < 0 ) {
\r
7791 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
\r
7792 errorText_ = errorStream_.str();
\r
7798 apiInfo->runnable = false; // fixes high CPU usage when stopped
\r
7799 MUTEX_UNLOCK( &stream_.mutex );
\r
7801 if ( result >= 0 ) return;
\r
7802 error( RtAudioError::SYSTEM_ERROR );
\r
7805 void RtApiAlsa :: callbackEvent()
\r
7807 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
\r
7808 if ( stream_.state == STREAM_STOPPED ) {
\r
7809 MUTEX_LOCK( &stream_.mutex );
\r
7810 while ( !apiInfo->runnable )
\r
7811 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
\r
7813 if ( stream_.state != STREAM_RUNNING ) {
\r
7814 MUTEX_UNLOCK( &stream_.mutex );
\r
7817 MUTEX_UNLOCK( &stream_.mutex );
\r
7820 if ( stream_.state == STREAM_CLOSED ) {
\r
7821 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
7822 error( RtAudioError::WARNING );
\r
7826 int doStopStream = 0;
\r
7827 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
7828 double streamTime = getStreamTime();
\r
7829 RtAudioStreamStatus status = 0;
\r
7830 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
\r
7831 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
7832 apiInfo->xrun[0] = false;
\r
7834 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
\r
7835 status |= RTAUDIO_INPUT_OVERFLOW;
\r
7836 apiInfo->xrun[1] = false;
\r
7838 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
7839 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
7841 if ( doStopStream == 2 ) {
\r
7846 MUTEX_LOCK( &stream_.mutex );
\r
7848 // The state might change while waiting on a mutex.
\r
7849 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
7854 snd_pcm_t **handle;
\r
7855 snd_pcm_sframes_t frames;
\r
7856 RtAudioFormat format;
\r
7857 handle = (snd_pcm_t **) apiInfo->handles;
\r
7859 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
7861 // Setup parameters.
\r
7862 if ( stream_.doConvertBuffer[1] ) {
\r
7863 buffer = stream_.deviceBuffer;
\r
7864 channels = stream_.nDeviceChannels[1];
\r
7865 format = stream_.deviceFormat[1];
\r
7868 buffer = stream_.userBuffer[1];
\r
7869 channels = stream_.nUserChannels[1];
\r
7870 format = stream_.userFormat;
\r
7873 // Read samples from device in interleaved/non-interleaved format.
\r
7874 if ( stream_.deviceInterleaved[1] )
\r
7875 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
\r
7877 void *bufs[channels];
\r
7878 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7879 for ( int i=0; i<channels; i++ )
\r
7880 bufs[i] = (void *) (buffer + (i * offset));
\r
7881 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
\r
7884 if ( result < (int) stream_.bufferSize ) {
\r
7885 // Either an error or overrun occured.
\r
7886 if ( result == -EPIPE ) {
\r
7887 snd_pcm_state_t state = snd_pcm_state( handle[1] );
\r
7888 if ( state == SND_PCM_STATE_XRUN ) {
\r
7889 apiInfo->xrun[1] = true;
\r
7890 result = snd_pcm_prepare( handle[1] );
\r
7891 if ( result < 0 ) {
\r
7892 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
\r
7893 errorText_ = errorStream_.str();
\r
7897 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7898 errorText_ = errorStream_.str();
\r
7902 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
\r
7903 errorText_ = errorStream_.str();
\r
7905 error( RtAudioError::WARNING );
\r
7909 // Do byte swapping if necessary.
\r
7910 if ( stream_.doByteSwap[1] )
\r
7911 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
\r
7913 // Do buffer conversion if necessary.
\r
7914 if ( stream_.doConvertBuffer[1] )
\r
7915 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
7917 // Check stream latency
\r
7918 result = snd_pcm_delay( handle[1], &frames );
\r
7919 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
\r
7924 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
7926 // Setup parameters and do buffer conversion if necessary.
\r
7927 if ( stream_.doConvertBuffer[0] ) {
\r
7928 buffer = stream_.deviceBuffer;
\r
7929 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
7930 channels = stream_.nDeviceChannels[0];
\r
7931 format = stream_.deviceFormat[0];
\r
7934 buffer = stream_.userBuffer[0];
\r
7935 channels = stream_.nUserChannels[0];
\r
7936 format = stream_.userFormat;
\r
7939 // Do byte swapping if necessary.
\r
7940 if ( stream_.doByteSwap[0] )
\r
7941 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
\r
7943 // Write samples to device in interleaved/non-interleaved format.
\r
7944 if ( stream_.deviceInterleaved[0] )
\r
7945 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
\r
7947 void *bufs[channels];
\r
7948 size_t offset = stream_.bufferSize * formatBytes( format );
\r
7949 for ( int i=0; i<channels; i++ )
\r
7950 bufs[i] = (void *) (buffer + (i * offset));
\r
7951 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
\r
7954 if ( result < (int) stream_.bufferSize ) {
\r
7955 // Either an error or underrun occured.
\r
7956 if ( result == -EPIPE ) {
\r
7957 snd_pcm_state_t state = snd_pcm_state( handle[0] );
\r
7958 if ( state == SND_PCM_STATE_XRUN ) {
\r
7959 apiInfo->xrun[0] = true;
\r
7960 result = snd_pcm_prepare( handle[0] );
\r
7961 if ( result < 0 ) {
\r
7962 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
\r
7963 errorText_ = errorStream_.str();
\r
7967 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
\r
7968 errorText_ = errorStream_.str();
\r
7972 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
\r
7973 errorText_ = errorStream_.str();
\r
7975 error( RtAudioError::WARNING );
\r
7979 // Check stream latency
\r
7980 result = snd_pcm_delay( handle[0], &frames );
\r
7981 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
\r
7985 MUTEX_UNLOCK( &stream_.mutex );
\r
7987 RtApi::tickStreamTime();
\r
7988 if ( doStopStream == 1 ) this->stopStream();
\r
7991 static void *alsaCallbackHandler( void *ptr )
\r
7993 CallbackInfo *info = (CallbackInfo *) ptr;
\r
7994 RtApiAlsa *object = (RtApiAlsa *) info->object;
\r
7995 bool *isRunning = &info->isRunning;
\r
7997 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
7998 if ( &info->doRealtime ) {
\r
7999 pthread_t tID = pthread_self(); // ID of this thread
\r
8000 sched_param prio = { info->priority }; // scheduling priority of thread
\r
8001 pthread_setschedparam( tID, SCHED_RR, &prio );
\r
8005 while ( *isRunning == true ) {
\r
8006 pthread_testcancel();
\r
8007 object->callbackEvent();
\r
8010 pthread_exit( NULL );
\r
8013 //******************** End of __LINUX_ALSA__ *********************//
\r
8016 #if defined(__LINUX_PULSE__)
\r
8018 // Code written by Peter Meerwald, pmeerw@pmeerw.net
\r
8019 // and Tristan Matthews.
\r
8021 #include <pulse/error.h>
\r
8022 #include <pulse/simple.h>
\r
8025 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
\r
8026 44100, 48000, 96000, 0};
\r
8028 struct rtaudio_pa_format_mapping_t {
\r
8029 RtAudioFormat rtaudio_format;
\r
8030 pa_sample_format_t pa_format;
\r
8033 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
\r
8034 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
\r
8035 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
\r
8036 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
\r
8037 {0, PA_SAMPLE_INVALID}};
\r
8039 struct PulseAudioHandle {
\r
8040 pa_simple *s_play;
\r
8043 pthread_cond_t runnable_cv;
\r
8045 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
\r
8048 RtApiPulse::~RtApiPulse()
\r
8050 if ( stream_.state != STREAM_CLOSED )
\r
8054 unsigned int RtApiPulse::getDeviceCount( void )
\r
8059 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
\r
8061 RtAudio::DeviceInfo info;
\r
8062 info.probed = true;
\r
8063 info.name = "PulseAudio";
\r
8064 info.outputChannels = 2;
\r
8065 info.inputChannels = 2;
\r
8066 info.duplexChannels = 2;
\r
8067 info.isDefaultOutput = true;
\r
8068 info.isDefaultInput = true;
\r
8070 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
\r
8071 info.sampleRates.push_back( *sr );
\r
8073 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
\r
8078 static void *pulseaudio_callback( void * user )
\r
8080 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
\r
8081 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
\r
8082 volatile bool *isRunning = &cbi->isRunning;
\r
8084 while ( *isRunning ) {
\r
8085 pthread_testcancel();
\r
8086 context->callbackEvent();
\r
8089 pthread_exit( NULL );
\r
8092 void RtApiPulse::closeStream( void )
\r
8094 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8096 stream_.callbackInfo.isRunning = false;
\r
8098 MUTEX_LOCK( &stream_.mutex );
\r
8099 if ( stream_.state == STREAM_STOPPED ) {
\r
8100 pah->runnable = true;
\r
8101 pthread_cond_signal( &pah->runnable_cv );
\r
8103 MUTEX_UNLOCK( &stream_.mutex );
\r
8105 pthread_join( pah->thread, 0 );
\r
8106 if ( pah->s_play ) {
\r
8107 pa_simple_flush( pah->s_play, NULL );
\r
8108 pa_simple_free( pah->s_play );
\r
8111 pa_simple_free( pah->s_rec );
\r
8113 pthread_cond_destroy( &pah->runnable_cv );
\r
8115 stream_.apiHandle = 0;
\r
8118 if ( stream_.userBuffer[0] ) {
\r
8119 free( stream_.userBuffer[0] );
\r
8120 stream_.userBuffer[0] = 0;
\r
8122 if ( stream_.userBuffer[1] ) {
\r
8123 free( stream_.userBuffer[1] );
\r
8124 stream_.userBuffer[1] = 0;
\r
8127 stream_.state = STREAM_CLOSED;
\r
8128 stream_.mode = UNINITIALIZED;
\r
8131 void RtApiPulse::callbackEvent( void )
\r
8133 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8135 if ( stream_.state == STREAM_STOPPED ) {
\r
8136 MUTEX_LOCK( &stream_.mutex );
\r
8137 while ( !pah->runnable )
\r
8138 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
\r
8140 if ( stream_.state != STREAM_RUNNING ) {
\r
8141 MUTEX_UNLOCK( &stream_.mutex );
\r
8144 MUTEX_UNLOCK( &stream_.mutex );
\r
8147 if ( stream_.state == STREAM_CLOSED ) {
\r
8148 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
\r
8149 "this shouldn't happen!";
\r
8150 error( RtAudioError::WARNING );
\r
8154 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
8155 double streamTime = getStreamTime();
\r
8156 RtAudioStreamStatus status = 0;
\r
8157 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
\r
8158 stream_.bufferSize, streamTime, status,
\r
8159 stream_.callbackInfo.userData );
\r
8161 if ( doStopStream == 2 ) {
\r
8166 MUTEX_LOCK( &stream_.mutex );
\r
8167 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
\r
8168 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
\r
8170 if ( stream_.state != STREAM_RUNNING )
\r
8175 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
8176 if ( stream_.doConvertBuffer[OUTPUT] ) {
\r
8177 convertBuffer( stream_.deviceBuffer,
\r
8178 stream_.userBuffer[OUTPUT],
\r
8179 stream_.convertInfo[OUTPUT] );
\r
8180 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
\r
8181 formatBytes( stream_.deviceFormat[OUTPUT] );
\r
8183 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
\r
8184 formatBytes( stream_.userFormat );
\r
8186 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
\r
8187 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
\r
8188 pa_strerror( pa_error ) << ".";
\r
8189 errorText_ = errorStream_.str();
\r
8190 error( RtAudioError::WARNING );
\r
8194 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
\r
8195 if ( stream_.doConvertBuffer[INPUT] )
\r
8196 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
\r
8197 formatBytes( stream_.deviceFormat[INPUT] );
\r
8199 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
\r
8200 formatBytes( stream_.userFormat );
\r
8202 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
\r
8203 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
\r
8204 pa_strerror( pa_error ) << ".";
\r
8205 errorText_ = errorStream_.str();
\r
8206 error( RtAudioError::WARNING );
\r
8208 if ( stream_.doConvertBuffer[INPUT] ) {
\r
8209 convertBuffer( stream_.userBuffer[INPUT],
\r
8210 stream_.deviceBuffer,
\r
8211 stream_.convertInfo[INPUT] );
\r
8216 MUTEX_UNLOCK( &stream_.mutex );
\r
8217 RtApi::tickStreamTime();
\r
8219 if ( doStopStream == 1 )
\r
8223 void RtApiPulse::startStream( void )
\r
8225 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8227 if ( stream_.state == STREAM_CLOSED ) {
\r
8228 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
\r
8229 error( RtAudioError::INVALID_USE );
\r
8232 if ( stream_.state == STREAM_RUNNING ) {
\r
8233 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
\r
8234 error( RtAudioError::WARNING );
\r
8238 MUTEX_LOCK( &stream_.mutex );
\r
8240 stream_.state = STREAM_RUNNING;
\r
8242 pah->runnable = true;
\r
8243 pthread_cond_signal( &pah->runnable_cv );
\r
8244 MUTEX_UNLOCK( &stream_.mutex );
\r
8247 void RtApiPulse::stopStream( void )
\r
8249 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8251 if ( stream_.state == STREAM_CLOSED ) {
\r
8252 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
\r
8253 error( RtAudioError::INVALID_USE );
\r
8256 if ( stream_.state == STREAM_STOPPED ) {
\r
8257 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
\r
8258 error( RtAudioError::WARNING );
\r
8262 stream_.state = STREAM_STOPPED;
\r
8263 MUTEX_LOCK( &stream_.mutex );
\r
8265 if ( pah && pah->s_play ) {
\r
8267 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
\r
8268 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
\r
8269 pa_strerror( pa_error ) << ".";
\r
8270 errorText_ = errorStream_.str();
\r
8271 MUTEX_UNLOCK( &stream_.mutex );
\r
8272 error( RtAudioError::SYSTEM_ERROR );
\r
8277 stream_.state = STREAM_STOPPED;
\r
8278 MUTEX_UNLOCK( &stream_.mutex );
\r
8281 void RtApiPulse::abortStream( void )
\r
8283 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
\r
8285 if ( stream_.state == STREAM_CLOSED ) {
\r
8286 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
\r
8287 error( RtAudioError::INVALID_USE );
\r
8290 if ( stream_.state == STREAM_STOPPED ) {
\r
8291 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
\r
8292 error( RtAudioError::WARNING );
\r
8296 stream_.state = STREAM_STOPPED;
\r
8297 MUTEX_LOCK( &stream_.mutex );
\r
8299 if ( pah && pah->s_play ) {
\r
8301 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
\r
8302 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
\r
8303 pa_strerror( pa_error ) << ".";
\r
8304 errorText_ = errorStream_.str();
\r
8305 MUTEX_UNLOCK( &stream_.mutex );
\r
8306 error( RtAudioError::SYSTEM_ERROR );
\r
8311 stream_.state = STREAM_STOPPED;
\r
8312 MUTEX_UNLOCK( &stream_.mutex );
\r
8315 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
\r
8316 unsigned int channels, unsigned int firstChannel,
\r
8317 unsigned int sampleRate, RtAudioFormat format,
\r
8318 unsigned int *bufferSize, RtAudio::StreamOptions *options )
\r
8320 PulseAudioHandle *pah = 0;
\r
8321 unsigned long bufferBytes = 0;
\r
8322 pa_sample_spec ss;
\r
8324 if ( device != 0 ) return false;
\r
8325 if ( mode != INPUT && mode != OUTPUT ) return false;
\r
8326 if ( channels != 1 && channels != 2 ) {
\r
8327 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
\r
8330 ss.channels = channels;
\r
8332 if ( firstChannel != 0 ) return false;
\r
8334 bool sr_found = false;
\r
8335 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
\r
8336 if ( sampleRate == *sr ) {
\r
8338 stream_.sampleRate = sampleRate;
\r
8339 ss.rate = sampleRate;
\r
8343 if ( !sr_found ) {
\r
8344 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
\r
8348 bool sf_found = 0;
\r
8349 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
\r
8350 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
\r
8351 if ( format == sf->rtaudio_format ) {
\r
8353 stream_.userFormat = sf->rtaudio_format;
\r
8354 stream_.deviceFormat[mode] = stream_.userFormat;
\r
8355 ss.format = sf->pa_format;
\r
8359 if ( !sf_found ) { // Use internal data format conversion.
\r
8360 stream_.userFormat = format;
\r
8361 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
\r
8362 ss.format = PA_SAMPLE_FLOAT32LE;
\r
8365 // Set other stream parameters.
\r
8366 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
\r
8367 else stream_.userInterleaved = true;
\r
8368 stream_.deviceInterleaved[mode] = true;
\r
8369 stream_.nBuffers = 1;
\r
8370 stream_.doByteSwap[mode] = false;
\r
8371 stream_.nUserChannels[mode] = channels;
\r
8372 stream_.nDeviceChannels[mode] = channels + firstChannel;
\r
8373 stream_.channelOffset[mode] = 0;
\r
8374 std::string streamName = "RtAudio";
\r
8376 // Set flags for buffer conversion.
\r
8377 stream_.doConvertBuffer[mode] = false;
\r
8378 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8379 stream_.doConvertBuffer[mode] = true;
\r
8380 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8381 stream_.doConvertBuffer[mode] = true;
\r
8383 // Allocate necessary internal buffers.
\r
8384 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8385 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8386 if ( stream_.userBuffer[mode] == NULL ) {
\r
8387 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
\r
8390 stream_.bufferSize = *bufferSize;
\r
8392 if ( stream_.doConvertBuffer[mode] ) {
\r
8394 bool makeBuffer = true;
\r
8395 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
8396 if ( mode == INPUT ) {
\r
8397 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
8398 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
8399 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
8403 if ( makeBuffer ) {
\r
8404 bufferBytes *= *bufferSize;
\r
8405 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
8406 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
8407 if ( stream_.deviceBuffer == NULL ) {
\r
8408 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
\r
8414 stream_.device[mode] = device;
\r
8416 // Setup the buffer conversion information structure.
\r
8417 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
8419 if ( !stream_.apiHandle ) {
\r
8420 PulseAudioHandle *pah = new PulseAudioHandle;
\r
8422 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
\r
8426 stream_.apiHandle = pah;
\r
8427 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
\r
8428 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
\r
8432 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
\r
8435 if ( !options->streamName.empty() ) streamName = options->streamName;
\r
8438 pa_buffer_attr buffer_attr;
\r
8439 buffer_attr.fragsize = bufferBytes;
\r
8440 buffer_attr.maxlength = -1;
\r
8442 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
\r
8443 if ( !pah->s_rec ) {
\r
8444 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
\r
8449 pah->s_play = pa_simple_new( NULL, "RtAudio", PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
\r
8450 if ( !pah->s_play ) {
\r
8451 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
\r
8459 if ( stream_.mode == UNINITIALIZED )
\r
8460 stream_.mode = mode;
\r
8461 else if ( stream_.mode == mode )
\r
8464 stream_.mode = DUPLEX;
\r
8466 if ( !stream_.callbackInfo.isRunning ) {
\r
8467 stream_.callbackInfo.object = this;
\r
8468 stream_.callbackInfo.isRunning = true;
\r
8469 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
\r
8470 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
\r
8475 stream_.state = STREAM_STOPPED;
\r
8479 if ( pah && stream_.callbackInfo.isRunning ) {
\r
8480 pthread_cond_destroy( &pah->runnable_cv );
\r
8482 stream_.apiHandle = 0;
\r
8485 for ( int i=0; i<2; i++ ) {
\r
8486 if ( stream_.userBuffer[i] ) {
\r
8487 free( stream_.userBuffer[i] );
\r
8488 stream_.userBuffer[i] = 0;
\r
8492 if ( stream_.deviceBuffer ) {
\r
8493 free( stream_.deviceBuffer );
\r
8494 stream_.deviceBuffer = 0;
\r
8500 //******************** End of __LINUX_PULSE__ *********************//
\r
8503 #if defined(__LINUX_OSS__)
\r
8505 #include <unistd.h>
\r
8506 #include <sys/ioctl.h>
\r
8507 #include <unistd.h>
\r
8508 #include <fcntl.h>
\r
8509 #include <sys/soundcard.h>
\r
8510 #include <errno.h>
\r
8513 static void *ossCallbackHandler(void * ptr);
\r
8515 // A structure to hold various information related to the OSS API
\r
8516 // implementation.
\r
8517 struct OssHandle {
\r
8518 int id[2]; // device ids
\r
8521 pthread_cond_t runnable;
\r
8524 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
\r
8527 RtApiOss :: RtApiOss()
\r
8529 // Nothing to do here.
\r
8532 RtApiOss :: ~RtApiOss()
\r
8534 if ( stream_.state != STREAM_CLOSED ) closeStream();
\r
8537 unsigned int RtApiOss :: getDeviceCount( void )
\r
8539 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8540 if ( mixerfd == -1 ) {
\r
8541 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
\r
8542 error( RtAudioError::WARNING );
\r
8546 oss_sysinfo sysinfo;
\r
8547 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
\r
8549 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8550 error( RtAudioError::WARNING );
\r
8555 return sysinfo.numaudios;
\r
8558 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
\r
8560 RtAudio::DeviceInfo info;
\r
8561 info.probed = false;
\r
8563 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8564 if ( mixerfd == -1 ) {
\r
8565 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
\r
8566 error( RtAudioError::WARNING );
\r
8570 oss_sysinfo sysinfo;
\r
8571 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8572 if ( result == -1 ) {
\r
8574 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8575 error( RtAudioError::WARNING );
\r
8579 unsigned nDevices = sysinfo.numaudios;
\r
8580 if ( nDevices == 0 ) {
\r
8582 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
\r
8583 error( RtAudioError::INVALID_USE );
\r
8587 if ( device >= nDevices ) {
\r
8589 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
\r
8590 error( RtAudioError::INVALID_USE );
\r
8594 oss_audioinfo ainfo;
\r
8595 ainfo.dev = device;
\r
8596 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8598 if ( result == -1 ) {
\r
8599 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8600 errorText_ = errorStream_.str();
\r
8601 error( RtAudioError::WARNING );
\r
8606 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
\r
8607 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
\r
8608 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
\r
8609 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
\r
8610 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
\r
8613 // Probe data formats ... do for input
\r
8614 unsigned long mask = ainfo.iformats;
\r
8615 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
\r
8616 info.nativeFormats |= RTAUDIO_SINT16;
\r
8617 if ( mask & AFMT_S8 )
\r
8618 info.nativeFormats |= RTAUDIO_SINT8;
\r
8619 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
\r
8620 info.nativeFormats |= RTAUDIO_SINT32;
\r
8621 if ( mask & AFMT_FLOAT )
\r
8622 info.nativeFormats |= RTAUDIO_FLOAT32;
\r
8623 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
\r
8624 info.nativeFormats |= RTAUDIO_SINT24;
\r
8626 // Check that we have at least one supported format
\r
8627 if ( info.nativeFormats == 0 ) {
\r
8628 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8629 errorText_ = errorStream_.str();
\r
8630 error( RtAudioError::WARNING );
\r
8634 // Probe the supported sample rates.
\r
8635 info.sampleRates.clear();
\r
8636 if ( ainfo.nrates ) {
\r
8637 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
\r
8638 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8639 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
\r
8640 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8647 // Check min and max rate values;
\r
8648 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
\r
8649 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] )
\r
8650 info.sampleRates.push_back( SAMPLE_RATES[k] );
\r
8654 if ( info.sampleRates.size() == 0 ) {
\r
8655 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
\r
8656 errorText_ = errorStream_.str();
\r
8657 error( RtAudioError::WARNING );
\r
8660 info.probed = true;
\r
8661 info.name = ainfo.name;
\r
8668 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
\r
8669 unsigned int firstChannel, unsigned int sampleRate,
\r
8670 RtAudioFormat format, unsigned int *bufferSize,
\r
8671 RtAudio::StreamOptions *options )
\r
8673 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
\r
8674 if ( mixerfd == -1 ) {
\r
8675 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
\r
8679 oss_sysinfo sysinfo;
\r
8680 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
\r
8681 if ( result == -1 ) {
\r
8683 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
\r
8687 unsigned nDevices = sysinfo.numaudios;
\r
8688 if ( nDevices == 0 ) {
\r
8689 // This should not happen because a check is made before this function is called.
\r
8691 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
\r
8695 if ( device >= nDevices ) {
\r
8696 // This should not happen because a check is made before this function is called.
\r
8698 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
\r
8702 oss_audioinfo ainfo;
\r
8703 ainfo.dev = device;
\r
8704 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
\r
8706 if ( result == -1 ) {
\r
8707 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
\r
8708 errorText_ = errorStream_.str();
\r
8712 // Check if device supports input or output
\r
8713 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
\r
8714 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
\r
8715 if ( mode == OUTPUT )
\r
8716 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
\r
8718 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
\r
8719 errorText_ = errorStream_.str();
\r
8724 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
8725 if ( mode == OUTPUT )
\r
8726 flags |= O_WRONLY;
\r
8727 else { // mode == INPUT
\r
8728 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8729 // We just set the same device for playback ... close and reopen for duplex (OSS only).
\r
8730 close( handle->id[0] );
\r
8731 handle->id[0] = 0;
\r
8732 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
\r
8733 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
\r
8734 errorText_ = errorStream_.str();
\r
8737 // Check that the number previously set channels is the same.
\r
8738 if ( stream_.nUserChannels[0] != channels ) {
\r
8739 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
\r
8740 errorText_ = errorStream_.str();
\r
8746 flags |= O_RDONLY;
\r
8749 // Set exclusive access if specified.
\r
8750 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
\r
8752 // Try to open the device.
\r
8754 fd = open( ainfo.devnode, flags, 0 );
\r
8756 if ( errno == EBUSY )
\r
8757 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
\r
8759 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
\r
8760 errorText_ = errorStream_.str();
\r
8764 // For duplex operation, specifically set this mode (this doesn't seem to work).
\r
8766 if ( flags | O_RDWR ) {
\r
8767 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
\r
8768 if ( result == -1) {
\r
8769 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
\r
8770 errorText_ = errorStream_.str();
\r
8776 // Check the device channel support.
\r
8777 stream_.nUserChannels[mode] = channels;
\r
8778 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
\r
8780 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
\r
8781 errorText_ = errorStream_.str();
\r
8785 // Set the number of channels.
\r
8786 int deviceChannels = channels + firstChannel;
\r
8787 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
\r
8788 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
\r
8790 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
\r
8791 errorText_ = errorStream_.str();
\r
8794 stream_.nDeviceChannels[mode] = deviceChannels;
\r
8796 // Get the data format mask
\r
8798 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
\r
8799 if ( result == -1 ) {
\r
8801 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
\r
8802 errorText_ = errorStream_.str();
\r
8806 // Determine how to set the device format.
\r
8807 stream_.userFormat = format;
\r
8808 int deviceFormat = -1;
\r
8809 stream_.doByteSwap[mode] = false;
\r
8810 if ( format == RTAUDIO_SINT8 ) {
\r
8811 if ( mask & AFMT_S8 ) {
\r
8812 deviceFormat = AFMT_S8;
\r
8813 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8816 else if ( format == RTAUDIO_SINT16 ) {
\r
8817 if ( mask & AFMT_S16_NE ) {
\r
8818 deviceFormat = AFMT_S16_NE;
\r
8819 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8821 else if ( mask & AFMT_S16_OE ) {
\r
8822 deviceFormat = AFMT_S16_OE;
\r
8823 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8824 stream_.doByteSwap[mode] = true;
\r
8827 else if ( format == RTAUDIO_SINT24 ) {
\r
8828 if ( mask & AFMT_S24_NE ) {
\r
8829 deviceFormat = AFMT_S24_NE;
\r
8830 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8832 else if ( mask & AFMT_S24_OE ) {
\r
8833 deviceFormat = AFMT_S24_OE;
\r
8834 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8835 stream_.doByteSwap[mode] = true;
\r
8838 else if ( format == RTAUDIO_SINT32 ) {
\r
8839 if ( mask & AFMT_S32_NE ) {
\r
8840 deviceFormat = AFMT_S32_NE;
\r
8841 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8843 else if ( mask & AFMT_S32_OE ) {
\r
8844 deviceFormat = AFMT_S32_OE;
\r
8845 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8846 stream_.doByteSwap[mode] = true;
\r
8850 if ( deviceFormat == -1 ) {
\r
8851 // The user requested format is not natively supported by the device.
\r
8852 if ( mask & AFMT_S16_NE ) {
\r
8853 deviceFormat = AFMT_S16_NE;
\r
8854 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8856 else if ( mask & AFMT_S32_NE ) {
\r
8857 deviceFormat = AFMT_S32_NE;
\r
8858 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8860 else if ( mask & AFMT_S24_NE ) {
\r
8861 deviceFormat = AFMT_S24_NE;
\r
8862 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8864 else if ( mask & AFMT_S16_OE ) {
\r
8865 deviceFormat = AFMT_S16_OE;
\r
8866 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
\r
8867 stream_.doByteSwap[mode] = true;
\r
8869 else if ( mask & AFMT_S32_OE ) {
\r
8870 deviceFormat = AFMT_S32_OE;
\r
8871 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
\r
8872 stream_.doByteSwap[mode] = true;
\r
8874 else if ( mask & AFMT_S24_OE ) {
\r
8875 deviceFormat = AFMT_S24_OE;
\r
8876 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
\r
8877 stream_.doByteSwap[mode] = true;
\r
8879 else if ( mask & AFMT_S8) {
\r
8880 deviceFormat = AFMT_S8;
\r
8881 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
\r
8885 if ( stream_.deviceFormat[mode] == 0 ) {
\r
8886 // This really shouldn't happen ...
\r
8888 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
\r
8889 errorText_ = errorStream_.str();
\r
8893 // Set the data format.
\r
8894 int temp = deviceFormat;
\r
8895 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
\r
8896 if ( result == -1 || deviceFormat != temp ) {
\r
8898 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
\r
8899 errorText_ = errorStream_.str();
\r
8903 // Attempt to set the buffer size. According to OSS, the minimum
\r
8904 // number of buffers is two. The supposed minimum buffer size is 16
\r
8905 // bytes, so that will be our lower bound. The argument to this
\r
8906 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
\r
8907 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
\r
8908 // We'll check the actual value used near the end of the setup
\r
8910 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
\r
8911 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
\r
8913 if ( options ) buffers = options->numberOfBuffers;
\r
8914 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
\r
8915 if ( buffers < 2 ) buffers = 3;
\r
8916 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
\r
8917 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
\r
8918 if ( result == -1 ) {
\r
8920 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
\r
8921 errorText_ = errorStream_.str();
\r
8924 stream_.nBuffers = buffers;
\r
8926 // Save buffer size (in sample frames).
\r
8927 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
\r
8928 stream_.bufferSize = *bufferSize;
\r
8930 // Set the sample rate.
\r
8931 int srate = sampleRate;
\r
8932 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
\r
8933 if ( result == -1 ) {
\r
8935 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
\r
8936 errorText_ = errorStream_.str();
\r
8940 // Verify the sample rate setup worked.
\r
8941 if ( abs( srate - sampleRate ) > 100 ) {
\r
8943 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
\r
8944 errorText_ = errorStream_.str();
\r
8947 stream_.sampleRate = sampleRate;
\r
8949 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
\r
8950 // We're doing duplex setup here.
\r
8951 stream_.deviceFormat[0] = stream_.deviceFormat[1];
\r
8952 stream_.nDeviceChannels[0] = deviceChannels;
\r
8955 // Set interleaving parameters.
\r
8956 stream_.userInterleaved = true;
\r
8957 stream_.deviceInterleaved[mode] = true;
\r
8958 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
\r
8959 stream_.userInterleaved = false;
\r
8961 // Set flags for buffer conversion
\r
8962 stream_.doConvertBuffer[mode] = false;
\r
8963 if ( stream_.userFormat != stream_.deviceFormat[mode] )
\r
8964 stream_.doConvertBuffer[mode] = true;
\r
8965 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
\r
8966 stream_.doConvertBuffer[mode] = true;
\r
8967 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
\r
8968 stream_.nUserChannels[mode] > 1 )
\r
8969 stream_.doConvertBuffer[mode] = true;
\r
8971 // Allocate the stream handles if necessary and then save.
\r
8972 if ( stream_.apiHandle == 0 ) {
\r
8974 handle = new OssHandle;
\r
8976 catch ( std::bad_alloc& ) {
\r
8977 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
\r
8981 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
\r
8982 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
\r
8986 stream_.apiHandle = (void *) handle;
\r
8989 handle = (OssHandle *) stream_.apiHandle;
\r
8991 handle->id[mode] = fd;
\r
8993 // Allocate necessary internal buffers.
\r
8994 unsigned long bufferBytes;
\r
8995 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
\r
8996 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
\r
8997 if ( stream_.userBuffer[mode] == NULL ) {
\r
8998 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
\r
9002 if ( stream_.doConvertBuffer[mode] ) {
\r
9004 bool makeBuffer = true;
\r
9005 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
\r
9006 if ( mode == INPUT ) {
\r
9007 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
\r
9008 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
\r
9009 if ( bufferBytes <= bytesOut ) makeBuffer = false;
\r
9013 if ( makeBuffer ) {
\r
9014 bufferBytes *= *bufferSize;
\r
9015 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
\r
9016 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
\r
9017 if ( stream_.deviceBuffer == NULL ) {
\r
9018 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
\r
9024 stream_.device[mode] = device;
\r
9025 stream_.state = STREAM_STOPPED;
\r
9027 // Setup the buffer conversion information structure.
\r
9028 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
\r
9030 // Setup thread if necessary.
\r
9031 if ( stream_.mode == OUTPUT && mode == INPUT ) {
\r
9032 // We had already set up an output stream.
\r
9033 stream_.mode = DUPLEX;
\r
9034 if ( stream_.device[0] == device ) handle->id[0] = fd;
\r
9037 stream_.mode = mode;
\r
9039 // Setup callback thread.
\r
9040 stream_.callbackInfo.object = (void *) this;
\r
9042 // Set the thread attributes for joinable and realtime scheduling
\r
9043 // priority. The higher priority will only take affect if the
\r
9044 // program is run as root or suid.
\r
9045 pthread_attr_t attr;
\r
9046 pthread_attr_init( &attr );
\r
9047 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
\r
9048 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
\r
9049 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
\r
9050 struct sched_param param;
\r
9051 int priority = options->priority;
\r
9052 int min = sched_get_priority_min( SCHED_RR );
\r
9053 int max = sched_get_priority_max( SCHED_RR );
\r
9054 if ( priority < min ) priority = min;
\r
9055 else if ( priority > max ) priority = max;
\r
9056 param.sched_priority = priority;
\r
9057 pthread_attr_setschedparam( &attr, ¶m );
\r
9058 pthread_attr_setschedpolicy( &attr, SCHED_RR );
\r
9061 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9063 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
\r
9066 stream_.callbackInfo.isRunning = true;
\r
9067 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
\r
9068 pthread_attr_destroy( &attr );
\r
9070 stream_.callbackInfo.isRunning = false;
\r
9071 errorText_ = "RtApiOss::error creating callback thread!";
\r
9080 pthread_cond_destroy( &handle->runnable );
\r
9081 if ( handle->id[0] ) close( handle->id[0] );
\r
9082 if ( handle->id[1] ) close( handle->id[1] );
\r
9084 stream_.apiHandle = 0;
\r
9087 for ( int i=0; i<2; i++ ) {
\r
9088 if ( stream_.userBuffer[i] ) {
\r
9089 free( stream_.userBuffer[i] );
\r
9090 stream_.userBuffer[i] = 0;
\r
9094 if ( stream_.deviceBuffer ) {
\r
9095 free( stream_.deviceBuffer );
\r
9096 stream_.deviceBuffer = 0;
\r
9102 void RtApiOss :: closeStream()
\r
9104 if ( stream_.state == STREAM_CLOSED ) {
\r
9105 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
\r
9106 error( RtAudioError::WARNING );
\r
9110 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9111 stream_.callbackInfo.isRunning = false;
\r
9112 MUTEX_LOCK( &stream_.mutex );
\r
9113 if ( stream_.state == STREAM_STOPPED )
\r
9114 pthread_cond_signal( &handle->runnable );
\r
9115 MUTEX_UNLOCK( &stream_.mutex );
\r
9116 pthread_join( stream_.callbackInfo.thread, NULL );
\r
9118 if ( stream_.state == STREAM_RUNNING ) {
\r
9119 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
\r
9120 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9122 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9123 stream_.state = STREAM_STOPPED;
\r
9127 pthread_cond_destroy( &handle->runnable );
\r
9128 if ( handle->id[0] ) close( handle->id[0] );
\r
9129 if ( handle->id[1] ) close( handle->id[1] );
\r
9131 stream_.apiHandle = 0;
\r
9134 for ( int i=0; i<2; i++ ) {
\r
9135 if ( stream_.userBuffer[i] ) {
\r
9136 free( stream_.userBuffer[i] );
\r
9137 stream_.userBuffer[i] = 0;
\r
9141 if ( stream_.deviceBuffer ) {
\r
9142 free( stream_.deviceBuffer );
\r
9143 stream_.deviceBuffer = 0;
\r
9146 stream_.mode = UNINITIALIZED;
\r
9147 stream_.state = STREAM_CLOSED;
\r
9150 void RtApiOss :: startStream()
\r
9153 if ( stream_.state == STREAM_RUNNING ) {
\r
9154 errorText_ = "RtApiOss::startStream(): the stream is already running!";
\r
9155 error( RtAudioError::WARNING );
\r
9159 MUTEX_LOCK( &stream_.mutex );
\r
9161 stream_.state = STREAM_RUNNING;
\r
9163 // No need to do anything else here ... OSS automatically starts
\r
9164 // when fed samples.
\r
9166 MUTEX_UNLOCK( &stream_.mutex );
\r
9168 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9169 pthread_cond_signal( &handle->runnable );
\r
9172 void RtApiOss :: stopStream()
\r
9175 if ( stream_.state == STREAM_STOPPED ) {
\r
9176 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
\r
9177 error( RtAudioError::WARNING );
\r
9181 MUTEX_LOCK( &stream_.mutex );
\r
9183 // The state might change while waiting on a mutex.
\r
9184 if ( stream_.state == STREAM_STOPPED ) {
\r
9185 MUTEX_UNLOCK( &stream_.mutex );
\r
9190 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9191 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9193 // Flush the output with zeros a few times.
\r
9196 RtAudioFormat format;
\r
9198 if ( stream_.doConvertBuffer[0] ) {
\r
9199 buffer = stream_.deviceBuffer;
\r
9200 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9201 format = stream_.deviceFormat[0];
\r
9204 buffer = stream_.userBuffer[0];
\r
9205 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9206 format = stream_.userFormat;
\r
9209 memset( buffer, 0, samples * formatBytes(format) );
\r
9210 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
\r
9211 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9212 if ( result == -1 ) {
\r
9213 errorText_ = "RtApiOss::stopStream: audio write error.";
\r
9214 error( RtAudioError::WARNING );
\r
9218 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9219 if ( result == -1 ) {
\r
9220 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9221 errorText_ = errorStream_.str();
\r
9224 handle->triggered = false;
\r
9227 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9228 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9229 if ( result == -1 ) {
\r
9230 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9231 errorText_ = errorStream_.str();
\r
9237 stream_.state = STREAM_STOPPED;
\r
9238 MUTEX_UNLOCK( &stream_.mutex );
\r
9240 if ( result != -1 ) return;
\r
9241 error( RtAudioError::SYSTEM_ERROR );
\r
9244 void RtApiOss :: abortStream()
\r
9247 if ( stream_.state == STREAM_STOPPED ) {
\r
9248 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
\r
9249 error( RtAudioError::WARNING );
\r
9253 MUTEX_LOCK( &stream_.mutex );
\r
9255 // The state might change while waiting on a mutex.
\r
9256 if ( stream_.state == STREAM_STOPPED ) {
\r
9257 MUTEX_UNLOCK( &stream_.mutex );
\r
9262 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9263 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9264 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
\r
9265 if ( result == -1 ) {
\r
9266 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
\r
9267 errorText_ = errorStream_.str();
\r
9270 handle->triggered = false;
\r
9273 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
\r
9274 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
\r
9275 if ( result == -1 ) {
\r
9276 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
\r
9277 errorText_ = errorStream_.str();
\r
9283 stream_.state = STREAM_STOPPED;
\r
9284 MUTEX_UNLOCK( &stream_.mutex );
\r
9286 if ( result != -1 ) return;
\r
9287 error( RtAudioError::SYSTEM_ERROR );
\r
9290 void RtApiOss :: callbackEvent()
\r
9292 OssHandle *handle = (OssHandle *) stream_.apiHandle;
\r
9293 if ( stream_.state == STREAM_STOPPED ) {
\r
9294 MUTEX_LOCK( &stream_.mutex );
\r
9295 pthread_cond_wait( &handle->runnable, &stream_.mutex );
\r
9296 if ( stream_.state != STREAM_RUNNING ) {
\r
9297 MUTEX_UNLOCK( &stream_.mutex );
\r
9300 MUTEX_UNLOCK( &stream_.mutex );
\r
9303 if ( stream_.state == STREAM_CLOSED ) {
\r
9304 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
\r
9305 error( RtAudioError::WARNING );
\r
9309 // Invoke user callback to get fresh output data.
\r
9310 int doStopStream = 0;
\r
9311 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
\r
9312 double streamTime = getStreamTime();
\r
9313 RtAudioStreamStatus status = 0;
\r
9314 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
\r
9315 status |= RTAUDIO_OUTPUT_UNDERFLOW;
\r
9316 handle->xrun[0] = false;
\r
9318 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
\r
9319 status |= RTAUDIO_INPUT_OVERFLOW;
\r
9320 handle->xrun[1] = false;
\r
9322 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
\r
9323 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
\r
9324 if ( doStopStream == 2 ) {
\r
9325 this->abortStream();
\r
9329 MUTEX_LOCK( &stream_.mutex );
\r
9331 // The state might change while waiting on a mutex.
\r
9332 if ( stream_.state == STREAM_STOPPED ) goto unlock;
\r
9337 RtAudioFormat format;
\r
9339 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
\r
9341 // Setup parameters and do buffer conversion if necessary.
\r
9342 if ( stream_.doConvertBuffer[0] ) {
\r
9343 buffer = stream_.deviceBuffer;
\r
9344 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
\r
9345 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
\r
9346 format = stream_.deviceFormat[0];
\r
9349 buffer = stream_.userBuffer[0];
\r
9350 samples = stream_.bufferSize * stream_.nUserChannels[0];
\r
9351 format = stream_.userFormat;
\r
9354 // Do byte swapping if necessary.
\r
9355 if ( stream_.doByteSwap[0] )
\r
9356 byteSwapBuffer( buffer, samples, format );
\r
9358 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
\r
9360 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9361 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9362 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
\r
9363 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
\r
9364 handle->triggered = true;
\r
9367 // Write samples to device.
\r
9368 result = write( handle->id[0], buffer, samples * formatBytes(format) );
\r
9370 if ( result == -1 ) {
\r
9371 // We'll assume this is an underrun, though there isn't a
\r
9372 // specific means for determining that.
\r
9373 handle->xrun[0] = true;
\r
9374 errorText_ = "RtApiOss::callbackEvent: audio write error.";
\r
9375 error( RtAudioError::WARNING );
\r
9376 // Continue on to input section.
\r
9380 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
\r
9382 // Setup parameters.
\r
9383 if ( stream_.doConvertBuffer[1] ) {
\r
9384 buffer = stream_.deviceBuffer;
\r
9385 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
\r
9386 format = stream_.deviceFormat[1];
\r
9389 buffer = stream_.userBuffer[1];
\r
9390 samples = stream_.bufferSize * stream_.nUserChannels[1];
\r
9391 format = stream_.userFormat;
\r
9394 // Read samples from device.
\r
9395 result = read( handle->id[1], buffer, samples * formatBytes(format) );
\r
9397 if ( result == -1 ) {
\r
9398 // We'll assume this is an overrun, though there isn't a
\r
9399 // specific means for determining that.
\r
9400 handle->xrun[1] = true;
\r
9401 errorText_ = "RtApiOss::callbackEvent: audio read error.";
\r
9402 error( RtAudioError::WARNING );
\r
9406 // Do byte swapping if necessary.
\r
9407 if ( stream_.doByteSwap[1] )
\r
9408 byteSwapBuffer( buffer, samples, format );
\r
9410 // Do buffer conversion if necessary.
\r
9411 if ( stream_.doConvertBuffer[1] )
\r
9412 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
\r
9416 MUTEX_UNLOCK( &stream_.mutex );
\r
9418 RtApi::tickStreamTime();
\r
9419 if ( doStopStream == 1 ) this->stopStream();
\r
9422 static void *ossCallbackHandler( void *ptr )
\r
9424 CallbackInfo *info = (CallbackInfo *) ptr;
\r
9425 RtApiOss *object = (RtApiOss *) info->object;
\r
9426 bool *isRunning = &info->isRunning;
\r
9428 while ( *isRunning == true ) {
\r
9429 pthread_testcancel();
\r
9430 object->callbackEvent();
\r
9433 pthread_exit( NULL );
\r
9436 //******************** End of __LINUX_OSS__ *********************//
\r
9440 // *************************************************** //
\r
9442 // Protected common (OS-independent) RtAudio methods.
\r
9444 // *************************************************** //
\r
9446 // This method can be modified to control the behavior of error
\r
9447 // message printing.
\r
9448 void RtApi :: error( RtAudioError::Type type )
\r
9450 errorStream_.str(""); // clear the ostringstream
\r
9452 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
\r
9453 if ( errorCallback ) {
\r
9454 // abortStream() can generate new error messages. Ignore them. Just keep original one.
\r
9456 if ( firstErrorOccurred_ )
\r
9459 firstErrorOccurred_ = true;
\r
9460 const std::string errorMessage = errorText_;
\r
9462 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
\r
9463 stream_.callbackInfo.isRunning = false; // exit from the thread
\r
9467 errorCallback( type, errorMessage );
\r
9468 firstErrorOccurred_ = false;
\r
9472 if ( type == RtAudioError::WARNING && showWarnings_ == true )
\r
9473 std::cerr << '\n' << errorText_ << "\n\n";
\r
9474 else if ( type != RtAudioError::WARNING )
\r
9475 throw( RtAudioError( errorText_, type ) );
\r
9478 void RtApi :: verifyStream()
\r
9480 if ( stream_.state == STREAM_CLOSED ) {
\r
9481 errorText_ = "RtApi:: a stream is not open!";
\r
9482 error( RtAudioError::INVALID_USE );
\r
9486 void RtApi :: clearStreamInfo()
\r
9488 stream_.mode = UNINITIALIZED;
\r
9489 stream_.state = STREAM_CLOSED;
\r
9490 stream_.sampleRate = 0;
\r
9491 stream_.bufferSize = 0;
\r
9492 stream_.nBuffers = 0;
\r
9493 stream_.userFormat = 0;
\r
9494 stream_.userInterleaved = true;
\r
9495 stream_.streamTime = 0.0;
\r
9496 stream_.apiHandle = 0;
\r
9497 stream_.deviceBuffer = 0;
\r
9498 stream_.callbackInfo.callback = 0;
\r
9499 stream_.callbackInfo.userData = 0;
\r
9500 stream_.callbackInfo.isRunning = false;
\r
9501 stream_.callbackInfo.errorCallback = 0;
\r
9502 for ( int i=0; i<2; i++ ) {
\r
9503 stream_.device[i] = 11111;
\r
9504 stream_.doConvertBuffer[i] = false;
\r
9505 stream_.deviceInterleaved[i] = true;
\r
9506 stream_.doByteSwap[i] = false;
\r
9507 stream_.nUserChannels[i] = 0;
\r
9508 stream_.nDeviceChannels[i] = 0;
\r
9509 stream_.channelOffset[i] = 0;
\r
9510 stream_.deviceFormat[i] = 0;
\r
9511 stream_.latency[i] = 0;
\r
9512 stream_.userBuffer[i] = 0;
\r
9513 stream_.convertInfo[i].channels = 0;
\r
9514 stream_.convertInfo[i].inJump = 0;
\r
9515 stream_.convertInfo[i].outJump = 0;
\r
9516 stream_.convertInfo[i].inFormat = 0;
\r
9517 stream_.convertInfo[i].outFormat = 0;
\r
9518 stream_.convertInfo[i].inOffset.clear();
\r
9519 stream_.convertInfo[i].outOffset.clear();
\r
9523 unsigned int RtApi :: formatBytes( RtAudioFormat format )
\r
9525 if ( format == RTAUDIO_SINT16 )
\r
9527 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
\r
9529 else if ( format == RTAUDIO_FLOAT64 )
\r
9531 else if ( format == RTAUDIO_SINT24 )
\r
9533 else if ( format == RTAUDIO_SINT8 )
\r
9536 errorText_ = "RtApi::formatBytes: undefined format.";
\r
9537 error( RtAudioError::WARNING );
\r
9542 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
\r
9544 if ( mode == INPUT ) { // convert device to user buffer
\r
9545 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
\r
9546 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
\r
9547 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
\r
9548 stream_.convertInfo[mode].outFormat = stream_.userFormat;
\r
9550 else { // convert user to device buffer
\r
9551 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
\r
9552 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
\r
9553 stream_.convertInfo[mode].inFormat = stream_.userFormat;
\r
9554 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
\r
9557 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
\r
9558 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
\r
9560 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
\r
9562 // Set up the interleave/deinterleave offsets.
\r
9563 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
\r
9564 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
\r
9565 ( mode == INPUT && stream_.userInterleaved ) ) {
\r
9566 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9567 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9568 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9569 stream_.convertInfo[mode].inJump = 1;
\r
9573 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9574 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9575 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9576 stream_.convertInfo[mode].outJump = 1;
\r
9580 else { // no (de)interleaving
\r
9581 if ( stream_.userInterleaved ) {
\r
9582 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9583 stream_.convertInfo[mode].inOffset.push_back( k );
\r
9584 stream_.convertInfo[mode].outOffset.push_back( k );
\r
9588 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
\r
9589 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
\r
9590 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
\r
9591 stream_.convertInfo[mode].inJump = 1;
\r
9592 stream_.convertInfo[mode].outJump = 1;
\r
9597 // Add channel offset.
\r
9598 if ( firstChannel > 0 ) {
\r
9599 if ( stream_.deviceInterleaved[mode] ) {
\r
9600 if ( mode == OUTPUT ) {
\r
9601 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9602 stream_.convertInfo[mode].outOffset[k] += firstChannel;
\r
9605 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9606 stream_.convertInfo[mode].inOffset[k] += firstChannel;
\r
9610 if ( mode == OUTPUT ) {
\r
9611 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9612 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9615 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
\r
9616 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
\r
9622 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
\r
9624 // This function does format conversion, input/output channel compensation, and
\r
9625 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
\r
9626 // the lower three bytes of a 32-bit integer.
\r
9628 // Clear our device buffer when in/out duplex device channels are different
\r
9629 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
\r
9630 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
\r
9631 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
\r
9634 if (info.outFormat == RTAUDIO_FLOAT64) {
\r
9636 Float64 *out = (Float64 *)outBuffer;
\r
9638 if (info.inFormat == RTAUDIO_SINT8) {
\r
9639 signed char *in = (signed char *)inBuffer;
\r
9640 scale = 1.0 / 127.5;
\r
9641 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9642 for (j=0; j<info.channels; j++) {
\r
9643 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9644 out[info.outOffset[j]] += 0.5;
\r
9645 out[info.outOffset[j]] *= scale;
\r
9647 in += info.inJump;
\r
9648 out += info.outJump;
\r
9651 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9652 Int16 *in = (Int16 *)inBuffer;
\r
9653 scale = 1.0 / 32767.5;
\r
9654 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9655 for (j=0; j<info.channels; j++) {
\r
9656 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9657 out[info.outOffset[j]] += 0.5;
\r
9658 out[info.outOffset[j]] *= scale;
\r
9660 in += info.inJump;
\r
9661 out += info.outJump;
\r
9664 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9665 Int24 *in = (Int24 *)inBuffer;
\r
9666 scale = 1.0 / 8388607.5;
\r
9667 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9668 for (j=0; j<info.channels; j++) {
\r
9669 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
\r
9670 out[info.outOffset[j]] += 0.5;
\r
9671 out[info.outOffset[j]] *= scale;
\r
9673 in += info.inJump;
\r
9674 out += info.outJump;
\r
9677 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9678 Int32 *in = (Int32 *)inBuffer;
\r
9679 scale = 1.0 / 2147483647.5;
\r
9680 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9681 for (j=0; j<info.channels; j++) {
\r
9682 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9683 out[info.outOffset[j]] += 0.5;
\r
9684 out[info.outOffset[j]] *= scale;
\r
9686 in += info.inJump;
\r
9687 out += info.outJump;
\r
9690 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9691 Float32 *in = (Float32 *)inBuffer;
\r
9692 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9693 for (j=0; j<info.channels; j++) {
\r
9694 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
\r
9696 in += info.inJump;
\r
9697 out += info.outJump;
\r
9700 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9701 // Channel compensation and/or (de)interleaving only.
\r
9702 Float64 *in = (Float64 *)inBuffer;
\r
9703 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9704 for (j=0; j<info.channels; j++) {
\r
9705 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9707 in += info.inJump;
\r
9708 out += info.outJump;
\r
9712 else if (info.outFormat == RTAUDIO_FLOAT32) {
\r
9714 Float32 *out = (Float32 *)outBuffer;
\r
9716 if (info.inFormat == RTAUDIO_SINT8) {
\r
9717 signed char *in = (signed char *)inBuffer;
\r
9718 scale = (Float32) ( 1.0 / 127.5 );
\r
9719 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9720 for (j=0; j<info.channels; j++) {
\r
9721 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9722 out[info.outOffset[j]] += 0.5;
\r
9723 out[info.outOffset[j]] *= scale;
\r
9725 in += info.inJump;
\r
9726 out += info.outJump;
\r
9729 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9730 Int16 *in = (Int16 *)inBuffer;
\r
9731 scale = (Float32) ( 1.0 / 32767.5 );
\r
9732 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9733 for (j=0; j<info.channels; j++) {
\r
9734 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9735 out[info.outOffset[j]] += 0.5;
\r
9736 out[info.outOffset[j]] *= scale;
\r
9738 in += info.inJump;
\r
9739 out += info.outJump;
\r
9742 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9743 Int24 *in = (Int24 *)inBuffer;
\r
9744 scale = (Float32) ( 1.0 / 8388607.5 );
\r
9745 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9746 for (j=0; j<info.channels; j++) {
\r
9747 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
\r
9748 out[info.outOffset[j]] += 0.5;
\r
9749 out[info.outOffset[j]] *= scale;
\r
9751 in += info.inJump;
\r
9752 out += info.outJump;
\r
9755 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9756 Int32 *in = (Int32 *)inBuffer;
\r
9757 scale = (Float32) ( 1.0 / 2147483647.5 );
\r
9758 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9759 for (j=0; j<info.channels; j++) {
\r
9760 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9761 out[info.outOffset[j]] += 0.5;
\r
9762 out[info.outOffset[j]] *= scale;
\r
9764 in += info.inJump;
\r
9765 out += info.outJump;
\r
9768 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9769 // Channel compensation and/or (de)interleaving only.
\r
9770 Float32 *in = (Float32 *)inBuffer;
\r
9771 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9772 for (j=0; j<info.channels; j++) {
\r
9773 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9775 in += info.inJump;
\r
9776 out += info.outJump;
\r
9779 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9780 Float64 *in = (Float64 *)inBuffer;
\r
9781 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9782 for (j=0; j<info.channels; j++) {
\r
9783 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
\r
9785 in += info.inJump;
\r
9786 out += info.outJump;
\r
9790 else if (info.outFormat == RTAUDIO_SINT32) {
\r
9791 Int32 *out = (Int32 *)outBuffer;
\r
9792 if (info.inFormat == RTAUDIO_SINT8) {
\r
9793 signed char *in = (signed char *)inBuffer;
\r
9794 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9795 for (j=0; j<info.channels; j++) {
\r
9796 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9797 out[info.outOffset[j]] <<= 24;
\r
9799 in += info.inJump;
\r
9800 out += info.outJump;
\r
9803 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9804 Int16 *in = (Int16 *)inBuffer;
\r
9805 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9806 for (j=0; j<info.channels; j++) {
\r
9807 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
\r
9808 out[info.outOffset[j]] <<= 16;
\r
9810 in += info.inJump;
\r
9811 out += info.outJump;
\r
9814 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9815 Int24 *in = (Int24 *)inBuffer;
\r
9816 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9817 for (j=0; j<info.channels; j++) {
\r
9818 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
\r
9819 out[info.outOffset[j]] <<= 8;
\r
9821 in += info.inJump;
\r
9822 out += info.outJump;
\r
9825 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9826 // Channel compensation and/or (de)interleaving only.
\r
9827 Int32 *in = (Int32 *)inBuffer;
\r
9828 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9829 for (j=0; j<info.channels; j++) {
\r
9830 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9832 in += info.inJump;
\r
9833 out += info.outJump;
\r
9836 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9837 Float32 *in = (Float32 *)inBuffer;
\r
9838 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9839 for (j=0; j<info.channels; j++) {
\r
9840 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9842 in += info.inJump;
\r
9843 out += info.outJump;
\r
9846 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9847 Float64 *in = (Float64 *)inBuffer;
\r
9848 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9849 for (j=0; j<info.channels; j++) {
\r
9850 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
\r
9852 in += info.inJump;
\r
9853 out += info.outJump;
\r
9857 else if (info.outFormat == RTAUDIO_SINT24) {
\r
9858 Int24 *out = (Int24 *)outBuffer;
\r
9859 if (info.inFormat == RTAUDIO_SINT8) {
\r
9860 signed char *in = (signed char *)inBuffer;
\r
9861 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9862 for (j=0; j<info.channels; j++) {
\r
9863 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
\r
9864 //out[info.outOffset[j]] <<= 16;
\r
9866 in += info.inJump;
\r
9867 out += info.outJump;
\r
9870 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9871 Int16 *in = (Int16 *)inBuffer;
\r
9872 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9873 for (j=0; j<info.channels; j++) {
\r
9874 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
\r
9875 //out[info.outOffset[j]] <<= 8;
\r
9877 in += info.inJump;
\r
9878 out += info.outJump;
\r
9881 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9882 // Channel compensation and/or (de)interleaving only.
\r
9883 Int24 *in = (Int24 *)inBuffer;
\r
9884 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9885 for (j=0; j<info.channels; j++) {
\r
9886 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9888 in += info.inJump;
\r
9889 out += info.outJump;
\r
9892 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9893 Int32 *in = (Int32 *)inBuffer;
\r
9894 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9895 for (j=0; j<info.channels; j++) {
\r
9896 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
\r
9897 //out[info.outOffset[j]] >>= 8;
\r
9899 in += info.inJump;
\r
9900 out += info.outJump;
\r
9903 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9904 Float32 *in = (Float32 *)inBuffer;
\r
9905 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9906 for (j=0; j<info.channels; j++) {
\r
9907 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9909 in += info.inJump;
\r
9910 out += info.outJump;
\r
9913 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9914 Float64 *in = (Float64 *)inBuffer;
\r
9915 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9916 for (j=0; j<info.channels; j++) {
\r
9917 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
\r
9919 in += info.inJump;
\r
9920 out += info.outJump;
\r
9924 else if (info.outFormat == RTAUDIO_SINT16) {
\r
9925 Int16 *out = (Int16 *)outBuffer;
\r
9926 if (info.inFormat == RTAUDIO_SINT8) {
\r
9927 signed char *in = (signed char *)inBuffer;
\r
9928 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9929 for (j=0; j<info.channels; j++) {
\r
9930 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
\r
9931 out[info.outOffset[j]] <<= 8;
\r
9933 in += info.inJump;
\r
9934 out += info.outJump;
\r
9937 else if (info.inFormat == RTAUDIO_SINT16) {
\r
9938 // Channel compensation and/or (de)interleaving only.
\r
9939 Int16 *in = (Int16 *)inBuffer;
\r
9940 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9941 for (j=0; j<info.channels; j++) {
\r
9942 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9944 in += info.inJump;
\r
9945 out += info.outJump;
\r
9948 else if (info.inFormat == RTAUDIO_SINT24) {
\r
9949 Int24 *in = (Int24 *)inBuffer;
\r
9950 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9951 for (j=0; j<info.channels; j++) {
\r
9952 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
\r
9954 in += info.inJump;
\r
9955 out += info.outJump;
\r
9958 else if (info.inFormat == RTAUDIO_SINT32) {
\r
9959 Int32 *in = (Int32 *)inBuffer;
\r
9960 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9961 for (j=0; j<info.channels; j++) {
\r
9962 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
\r
9964 in += info.inJump;
\r
9965 out += info.outJump;
\r
9968 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
9969 Float32 *in = (Float32 *)inBuffer;
\r
9970 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9971 for (j=0; j<info.channels; j++) {
\r
9972 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9974 in += info.inJump;
\r
9975 out += info.outJump;
\r
9978 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
9979 Float64 *in = (Float64 *)inBuffer;
\r
9980 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9981 for (j=0; j<info.channels; j++) {
\r
9982 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
\r
9984 in += info.inJump;
\r
9985 out += info.outJump;
\r
9989 else if (info.outFormat == RTAUDIO_SINT8) {
\r
9990 signed char *out = (signed char *)outBuffer;
\r
9991 if (info.inFormat == RTAUDIO_SINT8) {
\r
9992 // Channel compensation and/or (de)interleaving only.
\r
9993 signed char *in = (signed char *)inBuffer;
\r
9994 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
9995 for (j=0; j<info.channels; j++) {
\r
9996 out[info.outOffset[j]] = in[info.inOffset[j]];
\r
9998 in += info.inJump;
\r
9999 out += info.outJump;
\r
10002 if (info.inFormat == RTAUDIO_SINT16) {
\r
10003 Int16 *in = (Int16 *)inBuffer;
\r
10004 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10005 for (j=0; j<info.channels; j++) {
\r
10006 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
\r
10008 in += info.inJump;
\r
10009 out += info.outJump;
\r
10012 else if (info.inFormat == RTAUDIO_SINT24) {
\r
10013 Int24 *in = (Int24 *)inBuffer;
\r
10014 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10015 for (j=0; j<info.channels; j++) {
\r
10016 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
\r
10018 in += info.inJump;
\r
10019 out += info.outJump;
\r
10022 else if (info.inFormat == RTAUDIO_SINT32) {
\r
10023 Int32 *in = (Int32 *)inBuffer;
\r
10024 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10025 for (j=0; j<info.channels; j++) {
\r
10026 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
\r
10028 in += info.inJump;
\r
10029 out += info.outJump;
\r
10032 else if (info.inFormat == RTAUDIO_FLOAT32) {
\r
10033 Float32 *in = (Float32 *)inBuffer;
\r
10034 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10035 for (j=0; j<info.channels; j++) {
\r
10036 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10038 in += info.inJump;
\r
10039 out += info.outJump;
\r
10042 else if (info.inFormat == RTAUDIO_FLOAT64) {
\r
10043 Float64 *in = (Float64 *)inBuffer;
\r
10044 for (unsigned int i=0; i<stream_.bufferSize; i++) {
\r
10045 for (j=0; j<info.channels; j++) {
\r
10046 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
\r
10048 in += info.inJump;
\r
10049 out += info.outJump;
\r
10055 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
\r
10056 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
\r
10057 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
\r
10059 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
\r
10061 register char val;
\r
10062 register char *ptr;
\r
10065 if ( format == RTAUDIO_SINT16 ) {
\r
10066 for ( unsigned int i=0; i<samples; i++ ) {
\r
10067 // Swap 1st and 2nd bytes.
\r
10069 *(ptr) = *(ptr+1);
\r
10072 // Increment 2 bytes.
\r
10076 else if ( format == RTAUDIO_SINT32 ||
\r
10077 format == RTAUDIO_FLOAT32 ) {
\r
10078 for ( unsigned int i=0; i<samples; i++ ) {
\r
10079 // Swap 1st and 4th bytes.
\r
10081 *(ptr) = *(ptr+3);
\r
10084 // Swap 2nd and 3rd bytes.
\r
10087 *(ptr) = *(ptr+1);
\r
10090 // Increment 3 more bytes.
\r
10094 else if ( format == RTAUDIO_SINT24 ) {
\r
10095 for ( unsigned int i=0; i<samples; i++ ) {
\r
10096 // Swap 1st and 3rd bytes.
\r
10098 *(ptr) = *(ptr+2);
\r
10101 // Increment 2 more bytes.
\r
10105 else if ( format == RTAUDIO_FLOAT64 ) {
\r
10106 for ( unsigned int i=0; i<samples; i++ ) {
\r
10107 // Swap 1st and 8th bytes
\r
10109 *(ptr) = *(ptr+7);
\r
10112 // Swap 2nd and 7th bytes
\r
10115 *(ptr) = *(ptr+5);
\r
10118 // Swap 3rd and 6th bytes
\r
10121 *(ptr) = *(ptr+3);
\r
10124 // Swap 4th and 5th bytes
\r
10127 *(ptr) = *(ptr+1);
\r
10130 // Increment 5 more bytes.
\r
10136 // Indentation settings for Vim and Emacs
\r
10138 // Local Variables:
\r
10139 // c-basic-offset: 2
\r
10140 // indent-tabs-mode: nil
\r
10143 // vim: et sts=2 sw=2
\r