1 /************************************************************************/
3 \brief Realtime audio i/o C++ classes.
5 RtAudio provides a common API (Application Programming Interface)
6 for realtime audio input/output across Linux (native ALSA, Jack,
7 and OSS), Macintosh OS X (CoreAudio and Jack), and Windows
8 (DirectSound, ASIO and WASAPI) operating systems.
10 RtAudio WWW site: http://www.music.mcgill.ca/~gary/rtaudio/
12 RtAudio: realtime audio i/o C++ classes
13 Copyright (c) 2001-2017 Gary P. Scavone
15 Permission is hereby granted, free of charge, to any person
16 obtaining a copy of this software and associated documentation files
17 (the "Software"), to deal in the Software without restriction,
18 including without limitation the rights to use, copy, modify, merge,
19 publish, distribute, sublicense, and/or sell copies of the Software,
20 and to permit persons to whom the Software is furnished to do so,
21 subject to the following conditions:
23 The above copyright notice and this permission notice shall be
24 included in all copies or substantial portions of the Software.
26 Any person wishing to distribute modifications to the Software is
27 asked to send the modifications to the original developer so that
28 they can be incorporated into the canonical version. This is,
29 however, not a binding provision of this license.
31 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
34 IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
35 ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
36 CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
39 /************************************************************************/
41 // RtAudio: Version 5.0.0
51 // Static variable definitions.
52 const unsigned int RtApi::MAX_SAMPLE_RATES = 14;
53 const unsigned int RtApi::SAMPLE_RATES[] = {
54 4000, 5512, 8000, 9600, 11025, 16000, 22050,
55 32000, 44100, 48000, 88200, 96000, 176400, 192000
58 #if defined(__WINDOWS_DS__) || defined(__WINDOWS_ASIO__) || defined(__WINDOWS_WASAPI__)
59 #define MUTEX_INITIALIZE(A) InitializeCriticalSection(A)
60 #define MUTEX_DESTROY(A) DeleteCriticalSection(A)
61 #define MUTEX_LOCK(A) EnterCriticalSection(A)
62 #define MUTEX_UNLOCK(A) LeaveCriticalSection(A)
66 static std::string convertCharPointerToStdString(const char *text)
68 return std::string(text);
71 static std::string convertCharPointerToStdString(const wchar_t *text)
73 int length = WideCharToMultiByte(CP_UTF8, 0, text, -1, NULL, 0, NULL, NULL);
74 std::string s( length-1, '\0' );
75 WideCharToMultiByte(CP_UTF8, 0, text, -1, &s[0], length, NULL, NULL);
79 #elif defined(__LINUX_ALSA__) || defined(__LINUX_PULSE__) || defined(__UNIX_JACK__) || defined(__LINUX_OSS__) || defined(__MACOSX_CORE__)
81 #define MUTEX_INITIALIZE(A) pthread_mutex_init(A, NULL)
82 #define MUTEX_DESTROY(A) pthread_mutex_destroy(A)
83 #define MUTEX_LOCK(A) pthread_mutex_lock(A)
84 #define MUTEX_UNLOCK(A) pthread_mutex_unlock(A)
86 #define MUTEX_INITIALIZE(A) abs(*A) // dummy definitions
87 #define MUTEX_DESTROY(A) abs(*A) // dummy definitions
90 // *************************************************** //
92 // RtAudio definitions.
94 // *************************************************** //
96 std::string RtAudio :: getVersion( void )
98 return RTAUDIO_VERSION;
101 void RtAudio :: getCompiledApi( std::vector<RtAudio::Api> &apis )
105 // The order here will control the order of RtAudio's API search in
107 #if defined(__UNIX_JACK__)
108 apis.push_back( UNIX_JACK );
110 #if defined(__LINUX_ALSA__)
111 apis.push_back( LINUX_ALSA );
113 #if defined(__LINUX_PULSE__)
114 apis.push_back( LINUX_PULSE );
116 #if defined(__LINUX_OSS__)
117 apis.push_back( LINUX_OSS );
119 #if defined(__WINDOWS_ASIO__)
120 apis.push_back( WINDOWS_ASIO );
122 #if defined(__WINDOWS_WASAPI__)
123 apis.push_back( WINDOWS_WASAPI );
125 #if defined(__WINDOWS_DS__)
126 apis.push_back( WINDOWS_DS );
128 #if defined(__MACOSX_CORE__)
129 apis.push_back( MACOSX_CORE );
131 #if defined(__RTAUDIO_DUMMY__)
132 apis.push_back( RTAUDIO_DUMMY );
136 void RtAudio :: openRtApi( RtAudio::Api api )
142 #if defined(__UNIX_JACK__)
143 if ( api == UNIX_JACK )
144 rtapi_ = new RtApiJack();
146 #if defined(__LINUX_ALSA__)
147 if ( api == LINUX_ALSA )
148 rtapi_ = new RtApiAlsa();
150 #if defined(__LINUX_PULSE__)
151 if ( api == LINUX_PULSE )
152 rtapi_ = new RtApiPulse();
154 #if defined(__LINUX_OSS__)
155 if ( api == LINUX_OSS )
156 rtapi_ = new RtApiOss();
158 #if defined(__WINDOWS_ASIO__)
159 if ( api == WINDOWS_ASIO )
160 rtapi_ = new RtApiAsio();
162 #if defined(__WINDOWS_WASAPI__)
163 if ( api == WINDOWS_WASAPI )
164 rtapi_ = new RtApiWasapi();
166 #if defined(__WINDOWS_DS__)
167 if ( api == WINDOWS_DS )
168 rtapi_ = new RtApiDs();
170 #if defined(__MACOSX_CORE__)
171 if ( api == MACOSX_CORE )
172 rtapi_ = new RtApiCore();
174 #if defined(__RTAUDIO_DUMMY__)
175 if ( api == RTAUDIO_DUMMY )
176 rtapi_ = new RtApiDummy();
180 RtAudio :: RtAudio( RtAudio::Api api )
184 if ( api != UNSPECIFIED ) {
185 // Attempt to open the specified API.
187 if ( rtapi_ ) return;
189 // No compiled support for specified API value. Issue a debug
190 // warning and continue as if no API was specified.
191 std::cerr << "\nRtAudio: no compiled support for specified API argument!\n" << std::endl;
194 // Iterate through the compiled APIs and return as soon as we find
195 // one with at least one device or we reach the end of the list.
196 std::vector< RtAudio::Api > apis;
197 getCompiledApi( apis );
198 for ( unsigned int i=0; i<apis.size(); i++ ) {
199 openRtApi( apis[i] );
200 if ( rtapi_ && rtapi_->getDeviceCount() ) break;
203 if ( rtapi_ ) return;
205 // It should not be possible to get here because the preprocessor
206 // definition __RTAUDIO_DUMMY__ is automatically defined if no
207 // API-specific definitions are passed to the compiler. But just in
208 // case something weird happens, we'll thow an error.
209 std::string errorText = "\nRtAudio: no compiled API support found ... critical error!!\n\n";
210 throw( RtAudioError( errorText, RtAudioError::UNSPECIFIED ) );
213 RtAudio :: ~RtAudio()
219 void RtAudio :: openStream( RtAudio::StreamParameters *outputParameters,
220 RtAudio::StreamParameters *inputParameters,
221 RtAudioFormat format, unsigned int sampleRate,
222 unsigned int *bufferFrames,
223 RtAudioCallback callback, void *userData,
224 RtAudio::StreamOptions *options,
225 RtAudioErrorCallback errorCallback )
227 return rtapi_->openStream( outputParameters, inputParameters, format,
228 sampleRate, bufferFrames, callback,
229 userData, options, errorCallback );
232 // *************************************************** //
234 // Public RtApi definitions (see end of file for
235 // private or protected utility functions).
237 // *************************************************** //
241 stream_.state = STREAM_CLOSED;
242 stream_.mode = UNINITIALIZED;
243 stream_.apiHandle = 0;
244 stream_.userBuffer[0] = 0;
245 stream_.userBuffer[1] = 0;
246 MUTEX_INITIALIZE( &stream_.mutex );
247 showWarnings_ = true;
248 firstErrorOccurred_ = false;
253 MUTEX_DESTROY( &stream_.mutex );
256 void RtApi :: openStream( RtAudio::StreamParameters *oParams,
257 RtAudio::StreamParameters *iParams,
258 RtAudioFormat format, unsigned int sampleRate,
259 unsigned int *bufferFrames,
260 RtAudioCallback callback, void *userData,
261 RtAudio::StreamOptions *options,
262 RtAudioErrorCallback errorCallback )
264 if ( stream_.state != STREAM_CLOSED ) {
265 errorText_ = "RtApi::openStream: a stream is already open!";
266 error( RtAudioError::INVALID_USE );
270 // Clear stream information potentially left from a previously open stream.
273 if ( oParams && oParams->nChannels < 1 ) {
274 errorText_ = "RtApi::openStream: a non-NULL output StreamParameters structure cannot have an nChannels value less than one.";
275 error( RtAudioError::INVALID_USE );
279 if ( iParams && iParams->nChannels < 1 ) {
280 errorText_ = "RtApi::openStream: a non-NULL input StreamParameters structure cannot have an nChannels value less than one.";
281 error( RtAudioError::INVALID_USE );
285 if ( oParams == NULL && iParams == NULL ) {
286 errorText_ = "RtApi::openStream: input and output StreamParameters structures are both NULL!";
287 error( RtAudioError::INVALID_USE );
291 if ( formatBytes(format) == 0 ) {
292 errorText_ = "RtApi::openStream: 'format' parameter value is undefined.";
293 error( RtAudioError::INVALID_USE );
297 unsigned int nDevices = getDeviceCount();
298 unsigned int oChannels = 0;
300 oChannels = oParams->nChannels;
301 if ( oParams->deviceId >= nDevices ) {
302 errorText_ = "RtApi::openStream: output device parameter value is invalid.";
303 error( RtAudioError::INVALID_USE );
308 unsigned int iChannels = 0;
310 iChannels = iParams->nChannels;
311 if ( iParams->deviceId >= nDevices ) {
312 errorText_ = "RtApi::openStream: input device parameter value is invalid.";
313 error( RtAudioError::INVALID_USE );
320 if ( oChannels > 0 ) {
322 result = probeDeviceOpen( oParams->deviceId, OUTPUT, oChannels, oParams->firstChannel,
323 sampleRate, format, bufferFrames, options );
324 if ( result == false ) {
325 error( RtAudioError::SYSTEM_ERROR );
330 if ( iChannels > 0 ) {
332 result = probeDeviceOpen( iParams->deviceId, INPUT, iChannels, iParams->firstChannel,
333 sampleRate, format, bufferFrames, options );
334 if ( result == false ) {
335 if ( oChannels > 0 ) closeStream();
336 error( RtAudioError::SYSTEM_ERROR );
341 stream_.callbackInfo.callback = (void *) callback;
342 stream_.callbackInfo.userData = userData;
343 stream_.callbackInfo.errorCallback = (void *) errorCallback;
345 if ( options ) options->numberOfBuffers = stream_.nBuffers;
346 stream_.state = STREAM_STOPPED;
349 unsigned int RtApi :: getDefaultInputDevice( void )
351 // Should be implemented in subclasses if possible.
355 unsigned int RtApi :: getDefaultOutputDevice( void )
357 // Should be implemented in subclasses if possible.
361 void RtApi :: closeStream( void )
363 // MUST be implemented in subclasses!
367 bool RtApi :: probeDeviceOpen( unsigned int /*device*/, StreamMode /*mode*/, unsigned int /*channels*/,
368 unsigned int /*firstChannel*/, unsigned int /*sampleRate*/,
369 RtAudioFormat /*format*/, unsigned int * /*bufferSize*/,
370 RtAudio::StreamOptions * /*options*/ )
372 // MUST be implemented in subclasses!
376 void RtApi :: tickStreamTime( void )
378 // Subclasses that do not provide their own implementation of
379 // getStreamTime should call this function once per buffer I/O to
380 // provide basic stream time support.
382 stream_.streamTime += ( stream_.bufferSize * 1.0 / stream_.sampleRate );
384 #if defined( HAVE_GETTIMEOFDAY )
385 gettimeofday( &stream_.lastTickTimestamp, NULL );
389 long RtApi :: getStreamLatency( void )
393 long totalLatency = 0;
394 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
395 totalLatency = stream_.latency[0];
396 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
397 totalLatency += stream_.latency[1];
402 double RtApi :: getStreamTime( void )
406 #if defined( HAVE_GETTIMEOFDAY )
407 // Return a very accurate estimate of the stream time by
408 // adding in the elapsed time since the last tick.
412 if ( stream_.state != STREAM_RUNNING || stream_.streamTime == 0.0 )
413 return stream_.streamTime;
415 gettimeofday( &now, NULL );
416 then = stream_.lastTickTimestamp;
417 return stream_.streamTime +
418 ((now.tv_sec + 0.000001 * now.tv_usec) -
419 (then.tv_sec + 0.000001 * then.tv_usec));
421 return stream_.streamTime;
425 void RtApi :: setStreamTime( double time )
430 stream_.streamTime = time;
431 #if defined( HAVE_GETTIMEOFDAY )
432 gettimeofday( &stream_.lastTickTimestamp, NULL );
436 unsigned int RtApi :: getStreamSampleRate( void )
440 return stream_.sampleRate;
444 // *************************************************** //
446 // OS/API-specific methods.
448 // *************************************************** //
450 #if defined(__MACOSX_CORE__)
452 // The OS X CoreAudio API is designed to use a separate callback
453 // procedure for each of its audio devices. A single RtAudio duplex
454 // stream using two different devices is supported here, though it
455 // cannot be guaranteed to always behave correctly because we cannot
456 // synchronize these two callbacks.
458 // A property listener is installed for over/underrun information.
459 // However, no functionality is currently provided to allow property
460 // listeners to trigger user handlers because it is unclear what could
461 // be done if a critical stream parameter (buffer size, sample rate,
462 // device disconnect) notification arrived. The listeners entail
463 // quite a bit of extra code and most likely, a user program wouldn't
464 // be prepared for the result anyway. However, we do provide a flag
465 // to the client callback function to inform of an over/underrun.
467 // A structure to hold various information related to the CoreAudio API
470 AudioDeviceID id[2]; // device ids
471 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
472 AudioDeviceIOProcID procId[2];
474 UInt32 iStream[2]; // device stream index (or first if using multiple)
475 UInt32 nStreams[2]; // number of streams to use
478 pthread_cond_t condition;
479 int drainCounter; // Tracks callback counts when draining
480 bool internalDrain; // Indicates if stop is initiated from callback or not.
483 :deviceBuffer(0), drainCounter(0), internalDrain(false) { nStreams[0] = 1; nStreams[1] = 1; id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
486 RtApiCore:: RtApiCore()
488 #if defined( AVAILABLE_MAC_OS_X_VERSION_10_6_AND_LATER )
489 // This is a largely undocumented but absolutely necessary
490 // requirement starting with OS-X 10.6. If not called, queries and
491 // updates to various audio device properties are not handled
493 CFRunLoopRef theRunLoop = NULL;
494 AudioObjectPropertyAddress property = { kAudioHardwarePropertyRunLoop,
495 kAudioObjectPropertyScopeGlobal,
496 kAudioObjectPropertyElementMaster };
497 OSStatus result = AudioObjectSetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, sizeof(CFRunLoopRef), &theRunLoop);
498 if ( result != noErr ) {
499 errorText_ = "RtApiCore::RtApiCore: error setting run loop property!";
500 error( RtAudioError::WARNING );
505 RtApiCore :: ~RtApiCore()
507 // The subclass destructor gets called before the base class
508 // destructor, so close an existing stream before deallocating
509 // apiDeviceId memory.
510 if ( stream_.state != STREAM_CLOSED ) closeStream();
513 unsigned int RtApiCore :: getDeviceCount( void )
515 // Find out how many audio devices there are, if any.
517 AudioObjectPropertyAddress propertyAddress = { kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
518 OSStatus result = AudioObjectGetPropertyDataSize( kAudioObjectSystemObject, &propertyAddress, 0, NULL, &dataSize );
519 if ( result != noErr ) {
520 errorText_ = "RtApiCore::getDeviceCount: OS-X error getting device info!";
521 error( RtAudioError::WARNING );
525 return dataSize / sizeof( AudioDeviceID );
528 unsigned int RtApiCore :: getDefaultInputDevice( void )
530 unsigned int nDevices = getDeviceCount();
531 if ( nDevices <= 1 ) return 0;
534 UInt32 dataSize = sizeof( AudioDeviceID );
535 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
536 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
537 if ( result != noErr ) {
538 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device.";
539 error( RtAudioError::WARNING );
543 dataSize *= nDevices;
544 AudioDeviceID deviceList[ nDevices ];
545 property.mSelector = kAudioHardwarePropertyDevices;
546 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
547 if ( result != noErr ) {
548 errorText_ = "RtApiCore::getDefaultInputDevice: OS-X system error getting device IDs.";
549 error( RtAudioError::WARNING );
553 for ( unsigned int i=0; i<nDevices; i++ )
554 if ( id == deviceList[i] ) return i;
556 errorText_ = "RtApiCore::getDefaultInputDevice: No default device found!";
557 error( RtAudioError::WARNING );
561 unsigned int RtApiCore :: getDefaultOutputDevice( void )
563 unsigned int nDevices = getDeviceCount();
564 if ( nDevices <= 1 ) return 0;
567 UInt32 dataSize = sizeof( AudioDeviceID );
568 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
569 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, &id );
570 if ( result != noErr ) {
571 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device.";
572 error( RtAudioError::WARNING );
576 dataSize = sizeof( AudioDeviceID ) * nDevices;
577 AudioDeviceID deviceList[ nDevices ];
578 property.mSelector = kAudioHardwarePropertyDevices;
579 result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property, 0, NULL, &dataSize, (void *) &deviceList );
580 if ( result != noErr ) {
581 errorText_ = "RtApiCore::getDefaultOutputDevice: OS-X system error getting device IDs.";
582 error( RtAudioError::WARNING );
586 for ( unsigned int i=0; i<nDevices; i++ )
587 if ( id == deviceList[i] ) return i;
589 errorText_ = "RtApiCore::getDefaultOutputDevice: No default device found!";
590 error( RtAudioError::WARNING );
594 RtAudio::DeviceInfo RtApiCore :: getDeviceInfo( unsigned int device )
596 RtAudio::DeviceInfo info;
600 unsigned int nDevices = getDeviceCount();
601 if ( nDevices == 0 ) {
602 errorText_ = "RtApiCore::getDeviceInfo: no devices found!";
603 error( RtAudioError::INVALID_USE );
607 if ( device >= nDevices ) {
608 errorText_ = "RtApiCore::getDeviceInfo: device ID is invalid!";
609 error( RtAudioError::INVALID_USE );
613 AudioDeviceID deviceList[ nDevices ];
614 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
615 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
616 kAudioObjectPropertyScopeGlobal,
617 kAudioObjectPropertyElementMaster };
618 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
619 0, NULL, &dataSize, (void *) &deviceList );
620 if ( result != noErr ) {
621 errorText_ = "RtApiCore::getDeviceInfo: OS-X system error getting device IDs.";
622 error( RtAudioError::WARNING );
626 AudioDeviceID id = deviceList[ device ];
628 // Get the device name.
631 dataSize = sizeof( CFStringRef );
632 property.mSelector = kAudioObjectPropertyManufacturer;
633 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
634 if ( result != noErr ) {
635 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device manufacturer.";
636 errorText_ = errorStream_.str();
637 error( RtAudioError::WARNING );
641 //const char *mname = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
642 int length = CFStringGetLength(cfname);
643 char *mname = (char *)malloc(length * 3 + 1);
644 #if defined( UNICODE ) || defined( _UNICODE )
645 CFStringGetCString(cfname, mname, length * 3 + 1, kCFStringEncodingUTF8);
647 CFStringGetCString(cfname, mname, length * 3 + 1, CFStringGetSystemEncoding());
649 info.name.append( (const char *)mname, strlen(mname) );
650 info.name.append( ": " );
654 property.mSelector = kAudioObjectPropertyName;
655 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &cfname );
656 if ( result != noErr ) {
657 errorStream_ << "RtApiCore::probeDeviceInfo: system error (" << getErrorCode( result ) << ") getting device name.";
658 errorText_ = errorStream_.str();
659 error( RtAudioError::WARNING );
663 //const char *name = CFStringGetCStringPtr( cfname, CFStringGetSystemEncoding() );
664 length = CFStringGetLength(cfname);
665 char *name = (char *)malloc(length * 3 + 1);
666 #if defined( UNICODE ) || defined( _UNICODE )
667 CFStringGetCString(cfname, name, length * 3 + 1, kCFStringEncodingUTF8);
669 CFStringGetCString(cfname, name, length * 3 + 1, CFStringGetSystemEncoding());
671 info.name.append( (const char *)name, strlen(name) );
675 // Get the output stream "configuration".
676 AudioBufferList *bufferList = nil;
677 property.mSelector = kAudioDevicePropertyStreamConfiguration;
678 property.mScope = kAudioDevicePropertyScopeOutput;
679 // property.mElement = kAudioObjectPropertyElementWildcard;
681 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
682 if ( result != noErr || dataSize == 0 ) {
683 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration info for device (" << device << ").";
684 errorText_ = errorStream_.str();
685 error( RtAudioError::WARNING );
689 // Allocate the AudioBufferList.
690 bufferList = (AudioBufferList *) malloc( dataSize );
691 if ( bufferList == NULL ) {
692 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating output AudioBufferList.";
693 error( RtAudioError::WARNING );
697 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
698 if ( result != noErr || dataSize == 0 ) {
700 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting output stream configuration for device (" << device << ").";
701 errorText_ = errorStream_.str();
702 error( RtAudioError::WARNING );
706 // Get output channel information.
707 unsigned int i, nStreams = bufferList->mNumberBuffers;
708 for ( i=0; i<nStreams; i++ )
709 info.outputChannels += bufferList->mBuffers[i].mNumberChannels;
712 // Get the input stream "configuration".
713 property.mScope = kAudioDevicePropertyScopeInput;
714 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
715 if ( result != noErr || dataSize == 0 ) {
716 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration info for device (" << device << ").";
717 errorText_ = errorStream_.str();
718 error( RtAudioError::WARNING );
722 // Allocate the AudioBufferList.
723 bufferList = (AudioBufferList *) malloc( dataSize );
724 if ( bufferList == NULL ) {
725 errorText_ = "RtApiCore::getDeviceInfo: memory error allocating input AudioBufferList.";
726 error( RtAudioError::WARNING );
730 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
731 if (result != noErr || dataSize == 0) {
733 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting input stream configuration for device (" << device << ").";
734 errorText_ = errorStream_.str();
735 error( RtAudioError::WARNING );
739 // Get input channel information.
740 nStreams = bufferList->mNumberBuffers;
741 for ( i=0; i<nStreams; i++ )
742 info.inputChannels += bufferList->mBuffers[i].mNumberChannels;
745 // If device opens for both playback and capture, we determine the channels.
746 if ( info.outputChannels > 0 && info.inputChannels > 0 )
747 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
749 // Probe the device sample rates.
750 bool isInput = false;
751 if ( info.outputChannels == 0 ) isInput = true;
753 // Determine the supported sample rates.
754 property.mSelector = kAudioDevicePropertyAvailableNominalSampleRates;
755 if ( isInput == false ) property.mScope = kAudioDevicePropertyScopeOutput;
756 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
757 if ( result != kAudioHardwareNoError || dataSize == 0 ) {
758 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rate info.";
759 errorText_ = errorStream_.str();
760 error( RtAudioError::WARNING );
764 UInt32 nRanges = dataSize / sizeof( AudioValueRange );
765 AudioValueRange rangeList[ nRanges ];
766 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &rangeList );
767 if ( result != kAudioHardwareNoError ) {
768 errorStream_ << "RtApiCore::getDeviceInfo: system error (" << getErrorCode( result ) << ") getting sample rates.";
769 errorText_ = errorStream_.str();
770 error( RtAudioError::WARNING );
774 // The sample rate reporting mechanism is a bit of a mystery. It
775 // seems that it can either return individual rates or a range of
776 // rates. I assume that if the min / max range values are the same,
777 // then that represents a single supported rate and if the min / max
778 // range values are different, the device supports an arbitrary
779 // range of values (though there might be multiple ranges, so we'll
780 // use the most conservative range).
781 Float64 minimumRate = 1.0, maximumRate = 10000000000.0;
782 bool haveValueRange = false;
783 info.sampleRates.clear();
784 for ( UInt32 i=0; i<nRanges; i++ ) {
785 if ( rangeList[i].mMinimum == rangeList[i].mMaximum ) {
786 unsigned int tmpSr = (unsigned int) rangeList[i].mMinimum;
787 info.sampleRates.push_back( tmpSr );
789 if ( !info.preferredSampleRate || ( tmpSr <= 48000 && tmpSr > info.preferredSampleRate ) )
790 info.preferredSampleRate = tmpSr;
793 haveValueRange = true;
794 if ( rangeList[i].mMinimum > minimumRate ) minimumRate = rangeList[i].mMinimum;
795 if ( rangeList[i].mMaximum < maximumRate ) maximumRate = rangeList[i].mMaximum;
799 if ( haveValueRange ) {
800 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
801 if ( SAMPLE_RATES[k] >= (unsigned int) minimumRate && SAMPLE_RATES[k] <= (unsigned int) maximumRate ) {
802 info.sampleRates.push_back( SAMPLE_RATES[k] );
804 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
805 info.preferredSampleRate = SAMPLE_RATES[k];
810 // Sort and remove any redundant values
811 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
812 info.sampleRates.erase( unique( info.sampleRates.begin(), info.sampleRates.end() ), info.sampleRates.end() );
814 if ( info.sampleRates.size() == 0 ) {
815 errorStream_ << "RtApiCore::probeDeviceInfo: No supported sample rates found for device (" << device << ").";
816 errorText_ = errorStream_.str();
817 error( RtAudioError::WARNING );
821 // CoreAudio always uses 32-bit floating point data for PCM streams.
822 // Thus, any other "physical" formats supported by the device are of
823 // no interest to the client.
824 info.nativeFormats = RTAUDIO_FLOAT32;
826 if ( info.outputChannels > 0 )
827 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
828 if ( info.inputChannels > 0 )
829 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
835 static OSStatus callbackHandler( AudioDeviceID inDevice,
836 const AudioTimeStamp* /*inNow*/,
837 const AudioBufferList* inInputData,
838 const AudioTimeStamp* /*inInputTime*/,
839 AudioBufferList* outOutputData,
840 const AudioTimeStamp* /*inOutputTime*/,
843 CallbackInfo *info = (CallbackInfo *) infoPointer;
845 RtApiCore *object = (RtApiCore *) info->object;
846 if ( object->callbackEvent( inDevice, inInputData, outOutputData ) == false )
847 return kAudioHardwareUnspecifiedError;
849 return kAudioHardwareNoError;
852 static OSStatus xrunListener( AudioObjectID /*inDevice*/,
854 const AudioObjectPropertyAddress properties[],
855 void* handlePointer )
857 CoreHandle *handle = (CoreHandle *) handlePointer;
858 for ( UInt32 i=0; i<nAddresses; i++ ) {
859 if ( properties[i].mSelector == kAudioDeviceProcessorOverload ) {
860 if ( properties[i].mScope == kAudioDevicePropertyScopeInput )
861 handle->xrun[1] = true;
863 handle->xrun[0] = true;
867 return kAudioHardwareNoError;
870 static OSStatus rateListener( AudioObjectID inDevice,
871 UInt32 /*nAddresses*/,
872 const AudioObjectPropertyAddress /*properties*/[],
875 Float64 *rate = (Float64 *) ratePointer;
876 UInt32 dataSize = sizeof( Float64 );
877 AudioObjectPropertyAddress property = { kAudioDevicePropertyNominalSampleRate,
878 kAudioObjectPropertyScopeGlobal,
879 kAudioObjectPropertyElementMaster };
880 AudioObjectGetPropertyData( inDevice, &property, 0, NULL, &dataSize, rate );
881 return kAudioHardwareNoError;
884 bool RtApiCore :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
885 unsigned int firstChannel, unsigned int sampleRate,
886 RtAudioFormat format, unsigned int *bufferSize,
887 RtAudio::StreamOptions *options )
890 unsigned int nDevices = getDeviceCount();
891 if ( nDevices == 0 ) {
892 // This should not happen because a check is made before this function is called.
893 errorText_ = "RtApiCore::probeDeviceOpen: no devices found!";
897 if ( device >= nDevices ) {
898 // This should not happen because a check is made before this function is called.
899 errorText_ = "RtApiCore::probeDeviceOpen: device ID is invalid!";
903 AudioDeviceID deviceList[ nDevices ];
904 UInt32 dataSize = sizeof( AudioDeviceID ) * nDevices;
905 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
906 kAudioObjectPropertyScopeGlobal,
907 kAudioObjectPropertyElementMaster };
908 OSStatus result = AudioObjectGetPropertyData( kAudioObjectSystemObject, &property,
909 0, NULL, &dataSize, (void *) &deviceList );
910 if ( result != noErr ) {
911 errorText_ = "RtApiCore::probeDeviceOpen: OS-X system error getting device IDs.";
915 AudioDeviceID id = deviceList[ device ];
917 // Setup for stream mode.
918 bool isInput = false;
919 if ( mode == INPUT ) {
921 property.mScope = kAudioDevicePropertyScopeInput;
924 property.mScope = kAudioDevicePropertyScopeOutput;
926 // Get the stream "configuration".
927 AudioBufferList *bufferList = nil;
929 property.mSelector = kAudioDevicePropertyStreamConfiguration;
930 result = AudioObjectGetPropertyDataSize( id, &property, 0, NULL, &dataSize );
931 if ( result != noErr || dataSize == 0 ) {
932 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration info for device (" << device << ").";
933 errorText_ = errorStream_.str();
937 // Allocate the AudioBufferList.
938 bufferList = (AudioBufferList *) malloc( dataSize );
939 if ( bufferList == NULL ) {
940 errorText_ = "RtApiCore::probeDeviceOpen: memory error allocating AudioBufferList.";
944 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, bufferList );
945 if (result != noErr || dataSize == 0) {
947 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream configuration for device (" << device << ").";
948 errorText_ = errorStream_.str();
952 // Search for one or more streams that contain the desired number of
953 // channels. CoreAudio devices can have an arbitrary number of
954 // streams and each stream can have an arbitrary number of channels.
955 // For each stream, a single buffer of interleaved samples is
956 // provided. RtAudio prefers the use of one stream of interleaved
957 // data or multiple consecutive single-channel streams. However, we
958 // now support multiple consecutive multi-channel streams of
959 // interleaved data as well.
960 UInt32 iStream, offsetCounter = firstChannel;
961 UInt32 nStreams = bufferList->mNumberBuffers;
962 bool monoMode = false;
963 bool foundStream = false;
965 // First check that the device supports the requested number of
967 UInt32 deviceChannels = 0;
968 for ( iStream=0; iStream<nStreams; iStream++ )
969 deviceChannels += bufferList->mBuffers[iStream].mNumberChannels;
971 if ( deviceChannels < ( channels + firstChannel ) ) {
973 errorStream_ << "RtApiCore::probeDeviceOpen: the device (" << device << ") does not support the requested channel count.";
974 errorText_ = errorStream_.str();
978 // Look for a single stream meeting our needs.
979 UInt32 firstStream, streamCount = 1, streamChannels = 0, channelOffset = 0;
980 for ( iStream=0; iStream<nStreams; iStream++ ) {
981 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
982 if ( streamChannels >= channels + offsetCounter ) {
983 firstStream = iStream;
984 channelOffset = offsetCounter;
988 if ( streamChannels > offsetCounter ) break;
989 offsetCounter -= streamChannels;
992 // If we didn't find a single stream above, then we should be able
993 // to meet the channel specification with multiple streams.
994 if ( foundStream == false ) {
996 offsetCounter = firstChannel;
997 for ( iStream=0; iStream<nStreams; iStream++ ) {
998 streamChannels = bufferList->mBuffers[iStream].mNumberChannels;
999 if ( streamChannels > offsetCounter ) break;
1000 offsetCounter -= streamChannels;
1003 firstStream = iStream;
1004 channelOffset = offsetCounter;
1005 Int32 channelCounter = channels + offsetCounter - streamChannels;
1007 if ( streamChannels > 1 ) monoMode = false;
1008 while ( channelCounter > 0 ) {
1009 streamChannels = bufferList->mBuffers[++iStream].mNumberChannels;
1010 if ( streamChannels > 1 ) monoMode = false;
1011 channelCounter -= streamChannels;
1018 // Determine the buffer size.
1019 AudioValueRange bufferRange;
1020 dataSize = sizeof( AudioValueRange );
1021 property.mSelector = kAudioDevicePropertyBufferFrameSizeRange;
1022 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &bufferRange );
1024 if ( result != noErr ) {
1025 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting buffer size range for device (" << device << ").";
1026 errorText_ = errorStream_.str();
1030 if ( bufferRange.mMinimum > *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1031 else if ( bufferRange.mMaximum < *bufferSize ) *bufferSize = (unsigned long) bufferRange.mMaximum;
1032 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) *bufferSize = (unsigned long) bufferRange.mMinimum;
1034 // Set the buffer size. For multiple streams, I'm assuming we only
1035 // need to make this setting for the master channel.
1036 UInt32 theSize = (UInt32) *bufferSize;
1037 dataSize = sizeof( UInt32 );
1038 property.mSelector = kAudioDevicePropertyBufferFrameSize;
1039 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &theSize );
1041 if ( result != noErr ) {
1042 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting the buffer size for device (" << device << ").";
1043 errorText_ = errorStream_.str();
1047 // If attempting to setup a duplex stream, the bufferSize parameter
1048 // MUST be the same in both directions!
1049 *bufferSize = theSize;
1050 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
1051 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << device << ").";
1052 errorText_ = errorStream_.str();
1056 stream_.bufferSize = *bufferSize;
1057 stream_.nBuffers = 1;
1059 // Try to set "hog" mode ... it's not clear to me this is working.
1060 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) {
1062 dataSize = sizeof( hog_pid );
1063 property.mSelector = kAudioDevicePropertyHogMode;
1064 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &hog_pid );
1065 if ( result != noErr ) {
1066 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting 'hog' state!";
1067 errorText_ = errorStream_.str();
1071 if ( hog_pid != getpid() ) {
1073 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &hog_pid );
1074 if ( result != noErr ) {
1075 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting 'hog' state!";
1076 errorText_ = errorStream_.str();
1082 // Check and if necessary, change the sample rate for the device.
1083 Float64 nominalRate;
1084 dataSize = sizeof( Float64 );
1085 property.mSelector = kAudioDevicePropertyNominalSampleRate;
1086 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &nominalRate );
1087 if ( result != noErr ) {
1088 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting current sample rate.";
1089 errorText_ = errorStream_.str();
1093 // Only change the sample rate if off by more than 1 Hz.
1094 if ( fabs( nominalRate - (double)sampleRate ) > 1.0 ) {
1096 // Set a property listener for the sample rate change
1097 Float64 reportedRate = 0.0;
1098 AudioObjectPropertyAddress tmp = { kAudioDevicePropertyNominalSampleRate, kAudioObjectPropertyScopeGlobal, kAudioObjectPropertyElementMaster };
1099 result = AudioObjectAddPropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1100 if ( result != noErr ) {
1101 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate property listener for device (" << device << ").";
1102 errorText_ = errorStream_.str();
1106 nominalRate = (Float64) sampleRate;
1107 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &nominalRate );
1108 if ( result != noErr ) {
1109 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1110 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate for device (" << device << ").";
1111 errorText_ = errorStream_.str();
1115 // Now wait until the reported nominal rate is what we just set.
1116 UInt32 microCounter = 0;
1117 while ( reportedRate != nominalRate ) {
1118 microCounter += 5000;
1119 if ( microCounter > 5000000 ) break;
1123 // Remove the property listener.
1124 AudioObjectRemovePropertyListener( id, &tmp, rateListener, (void *) &reportedRate );
1126 if ( microCounter > 5000000 ) {
1127 errorStream_ << "RtApiCore::probeDeviceOpen: timeout waiting for sample rate update for device (" << device << ").";
1128 errorText_ = errorStream_.str();
1133 // Now set the stream format for all streams. Also, check the
1134 // physical format of the device and change that if necessary.
1135 AudioStreamBasicDescription description;
1136 dataSize = sizeof( AudioStreamBasicDescription );
1137 property.mSelector = kAudioStreamPropertyVirtualFormat;
1138 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1139 if ( result != noErr ) {
1140 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream format for device (" << device << ").";
1141 errorText_ = errorStream_.str();
1145 // Set the sample rate and data format id. However, only make the
1146 // change if the sample rate is not within 1.0 of the desired
1147 // rate and the format is not linear pcm.
1148 bool updateFormat = false;
1149 if ( fabs( description.mSampleRate - (Float64)sampleRate ) > 1.0 ) {
1150 description.mSampleRate = (Float64) sampleRate;
1151 updateFormat = true;
1154 if ( description.mFormatID != kAudioFormatLinearPCM ) {
1155 description.mFormatID = kAudioFormatLinearPCM;
1156 updateFormat = true;
1159 if ( updateFormat ) {
1160 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &description );
1161 if ( result != noErr ) {
1162 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting sample rate or data format for device (" << device << ").";
1163 errorText_ = errorStream_.str();
1168 // Now check the physical format.
1169 property.mSelector = kAudioStreamPropertyPhysicalFormat;
1170 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &description );
1171 if ( result != noErr ) {
1172 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting stream physical format for device (" << device << ").";
1173 errorText_ = errorStream_.str();
1177 //std::cout << "Current physical stream format:" << std::endl;
1178 //std::cout << " mBitsPerChan = " << description.mBitsPerChannel << std::endl;
1179 //std::cout << " aligned high = " << (description.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (description.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1180 //std::cout << " bytesPerFrame = " << description.mBytesPerFrame << std::endl;
1181 //std::cout << " sample rate = " << description.mSampleRate << std::endl;
1183 if ( description.mFormatID != kAudioFormatLinearPCM || description.mBitsPerChannel < 16 ) {
1184 description.mFormatID = kAudioFormatLinearPCM;
1185 //description.mSampleRate = (Float64) sampleRate;
1186 AudioStreamBasicDescription testDescription = description;
1189 // We'll try higher bit rates first and then work our way down.
1190 std::vector< std::pair<UInt32, UInt32> > physicalFormats;
1191 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsFloat) & ~kLinearPCMFormatFlagIsSignedInteger;
1192 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1193 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1194 physicalFormats.push_back( std::pair<Float32, UInt32>( 32, formatFlags ) );
1195 physicalFormats.push_back( std::pair<Float32, UInt32>( 24, formatFlags ) ); // 24-bit packed
1196 formatFlags &= ~( kAudioFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh );
1197 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.2, formatFlags ) ); // 24-bit in 4 bytes, aligned low
1198 formatFlags |= kAudioFormatFlagIsAlignedHigh;
1199 physicalFormats.push_back( std::pair<Float32, UInt32>( 24.4, formatFlags ) ); // 24-bit in 4 bytes, aligned high
1200 formatFlags = (description.mFormatFlags | kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked) & ~kLinearPCMFormatFlagIsFloat;
1201 physicalFormats.push_back( std::pair<Float32, UInt32>( 16, formatFlags ) );
1202 physicalFormats.push_back( std::pair<Float32, UInt32>( 8, formatFlags ) );
1204 bool setPhysicalFormat = false;
1205 for( unsigned int i=0; i<physicalFormats.size(); i++ ) {
1206 testDescription = description;
1207 testDescription.mBitsPerChannel = (UInt32) physicalFormats[i].first;
1208 testDescription.mFormatFlags = physicalFormats[i].second;
1209 if ( (24 == (UInt32)physicalFormats[i].first) && ~( physicalFormats[i].second & kAudioFormatFlagIsPacked ) )
1210 testDescription.mBytesPerFrame = 4 * testDescription.mChannelsPerFrame;
1212 testDescription.mBytesPerFrame = testDescription.mBitsPerChannel/8 * testDescription.mChannelsPerFrame;
1213 testDescription.mBytesPerPacket = testDescription.mBytesPerFrame * testDescription.mFramesPerPacket;
1214 result = AudioObjectSetPropertyData( id, &property, 0, NULL, dataSize, &testDescription );
1215 if ( result == noErr ) {
1216 setPhysicalFormat = true;
1217 //std::cout << "Updated physical stream format:" << std::endl;
1218 //std::cout << " mBitsPerChan = " << testDescription.mBitsPerChannel << std::endl;
1219 //std::cout << " aligned high = " << (testDescription.mFormatFlags & kAudioFormatFlagIsAlignedHigh) << ", isPacked = " << (testDescription.mFormatFlags & kAudioFormatFlagIsPacked) << std::endl;
1220 //std::cout << " bytesPerFrame = " << testDescription.mBytesPerFrame << std::endl;
1221 //std::cout << " sample rate = " << testDescription.mSampleRate << std::endl;
1226 if ( !setPhysicalFormat ) {
1227 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") setting physical data format for device (" << device << ").";
1228 errorText_ = errorStream_.str();
1231 } // done setting virtual/physical formats.
1233 // Get the stream / device latency.
1235 dataSize = sizeof( UInt32 );
1236 property.mSelector = kAudioDevicePropertyLatency;
1237 if ( AudioObjectHasProperty( id, &property ) == true ) {
1238 result = AudioObjectGetPropertyData( id, &property, 0, NULL, &dataSize, &latency );
1239 if ( result == kAudioHardwareNoError ) stream_.latency[ mode ] = latency;
1241 errorStream_ << "RtApiCore::probeDeviceOpen: system error (" << getErrorCode( result ) << ") getting device latency for device (" << device << ").";
1242 errorText_ = errorStream_.str();
1243 error( RtAudioError::WARNING );
1247 // Byte-swapping: According to AudioHardware.h, the stream data will
1248 // always be presented in native-endian format, so we should never
1249 // need to byte swap.
1250 stream_.doByteSwap[mode] = false;
1252 // From the CoreAudio documentation, PCM data must be supplied as
1254 stream_.userFormat = format;
1255 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
1257 if ( streamCount == 1 )
1258 stream_.nDeviceChannels[mode] = description.mChannelsPerFrame;
1259 else // multiple streams
1260 stream_.nDeviceChannels[mode] = channels;
1261 stream_.nUserChannels[mode] = channels;
1262 stream_.channelOffset[mode] = channelOffset; // offset within a CoreAudio stream
1263 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
1264 else stream_.userInterleaved = true;
1265 stream_.deviceInterleaved[mode] = true;
1266 if ( monoMode == true ) stream_.deviceInterleaved[mode] = false;
1268 // Set flags for buffer conversion.
1269 stream_.doConvertBuffer[mode] = false;
1270 if ( stream_.userFormat != stream_.deviceFormat[mode] )
1271 stream_.doConvertBuffer[mode] = true;
1272 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
1273 stream_.doConvertBuffer[mode] = true;
1274 if ( streamCount == 1 ) {
1275 if ( stream_.nUserChannels[mode] > 1 &&
1276 stream_.userInterleaved != stream_.deviceInterleaved[mode] )
1277 stream_.doConvertBuffer[mode] = true;
1279 else if ( monoMode && stream_.userInterleaved )
1280 stream_.doConvertBuffer[mode] = true;
1282 // Allocate our CoreHandle structure for the stream.
1283 CoreHandle *handle = 0;
1284 if ( stream_.apiHandle == 0 ) {
1286 handle = new CoreHandle;
1288 catch ( std::bad_alloc& ) {
1289 errorText_ = "RtApiCore::probeDeviceOpen: error allocating CoreHandle memory.";
1293 if ( pthread_cond_init( &handle->condition, NULL ) ) {
1294 errorText_ = "RtApiCore::probeDeviceOpen: error initializing pthread condition variable.";
1297 stream_.apiHandle = (void *) handle;
1300 handle = (CoreHandle *) stream_.apiHandle;
1301 handle->iStream[mode] = firstStream;
1302 handle->nStreams[mode] = streamCount;
1303 handle->id[mode] = id;
1305 // Allocate necessary internal buffers.
1306 unsigned long bufferBytes;
1307 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
1308 // stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
1309 stream_.userBuffer[mode] = (char *) malloc( bufferBytes * sizeof(char) );
1310 memset( stream_.userBuffer[mode], 0, bufferBytes * sizeof(char) );
1311 if ( stream_.userBuffer[mode] == NULL ) {
1312 errorText_ = "RtApiCore::probeDeviceOpen: error allocating user buffer memory.";
1316 // If possible, we will make use of the CoreAudio stream buffers as
1317 // "device buffers". However, we can't do this if using multiple
1319 if ( stream_.doConvertBuffer[mode] && handle->nStreams[mode] > 1 ) {
1321 bool makeBuffer = true;
1322 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
1323 if ( mode == INPUT ) {
1324 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
1325 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
1326 if ( bufferBytes <= bytesOut ) makeBuffer = false;
1331 bufferBytes *= *bufferSize;
1332 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
1333 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
1334 if ( stream_.deviceBuffer == NULL ) {
1335 errorText_ = "RtApiCore::probeDeviceOpen: error allocating device buffer memory.";
1341 stream_.sampleRate = sampleRate;
1342 stream_.device[mode] = device;
1343 stream_.state = STREAM_STOPPED;
1344 stream_.callbackInfo.object = (void *) this;
1346 // Setup the buffer conversion information structure.
1347 if ( stream_.doConvertBuffer[mode] ) {
1348 if ( streamCount > 1 ) setConvertInfo( mode, 0 );
1349 else setConvertInfo( mode, channelOffset );
1352 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device )
1353 // Only one callback procedure per device.
1354 stream_.mode = DUPLEX;
1356 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1357 result = AudioDeviceCreateIOProcID( id, callbackHandler, (void *) &stream_.callbackInfo, &handle->procId[mode] );
1359 // deprecated in favor of AudioDeviceCreateIOProcID()
1360 result = AudioDeviceAddIOProc( id, callbackHandler, (void *) &stream_.callbackInfo );
1362 if ( result != noErr ) {
1363 errorStream_ << "RtApiCore::probeDeviceOpen: system error setting callback for device (" << device << ").";
1364 errorText_ = errorStream_.str();
1367 if ( stream_.mode == OUTPUT && mode == INPUT )
1368 stream_.mode = DUPLEX;
1370 stream_.mode = mode;
1373 // Setup the device property listener for over/underload.
1374 property.mSelector = kAudioDeviceProcessorOverload;
1375 property.mScope = kAudioObjectPropertyScopeGlobal;
1376 result = AudioObjectAddPropertyListener( id, &property, xrunListener, (void *) handle );
1382 pthread_cond_destroy( &handle->condition );
1384 stream_.apiHandle = 0;
1387 for ( int i=0; i<2; i++ ) {
1388 if ( stream_.userBuffer[i] ) {
1389 free( stream_.userBuffer[i] );
1390 stream_.userBuffer[i] = 0;
1394 if ( stream_.deviceBuffer ) {
1395 free( stream_.deviceBuffer );
1396 stream_.deviceBuffer = 0;
1399 stream_.state = STREAM_CLOSED;
1403 void RtApiCore :: closeStream( void )
1405 if ( stream_.state == STREAM_CLOSED ) {
1406 errorText_ = "RtApiCore::closeStream(): no open stream to close!";
1407 error( RtAudioError::WARNING );
1411 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1412 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1414 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1415 kAudioObjectPropertyScopeGlobal,
1416 kAudioObjectPropertyElementMaster };
1418 property.mSelector = kAudioDeviceProcessorOverload;
1419 property.mScope = kAudioObjectPropertyScopeGlobal;
1420 if (AudioObjectRemovePropertyListener( handle->id[0], &property, xrunListener, (void *) handle ) != noErr) {
1421 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1422 error( RtAudioError::WARNING );
1425 if ( stream_.state == STREAM_RUNNING )
1426 AudioDeviceStop( handle->id[0], callbackHandler );
1427 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1428 AudioDeviceDestroyIOProcID( handle->id[0], handle->procId[0] );
1430 // deprecated in favor of AudioDeviceDestroyIOProcID()
1431 AudioDeviceRemoveIOProc( handle->id[0], callbackHandler );
1435 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1437 AudioObjectPropertyAddress property = { kAudioHardwarePropertyDevices,
1438 kAudioObjectPropertyScopeGlobal,
1439 kAudioObjectPropertyElementMaster };
1441 property.mSelector = kAudioDeviceProcessorOverload;
1442 property.mScope = kAudioObjectPropertyScopeGlobal;
1443 if (AudioObjectRemovePropertyListener( handle->id[1], &property, xrunListener, (void *) handle ) != noErr) {
1444 errorText_ = "RtApiCore::closeStream(): error removing property listener!";
1445 error( RtAudioError::WARNING );
1448 if ( stream_.state == STREAM_RUNNING )
1449 AudioDeviceStop( handle->id[1], callbackHandler );
1450 #if defined( MAC_OS_X_VERSION_10_5 ) && ( MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_5 )
1451 AudioDeviceDestroyIOProcID( handle->id[1], handle->procId[1] );
1453 // deprecated in favor of AudioDeviceDestroyIOProcID()
1454 AudioDeviceRemoveIOProc( handle->id[1], callbackHandler );
1458 for ( int i=0; i<2; i++ ) {
1459 if ( stream_.userBuffer[i] ) {
1460 free( stream_.userBuffer[i] );
1461 stream_.userBuffer[i] = 0;
1465 if ( stream_.deviceBuffer ) {
1466 free( stream_.deviceBuffer );
1467 stream_.deviceBuffer = 0;
1470 // Destroy pthread condition variable.
1471 pthread_cond_destroy( &handle->condition );
1473 stream_.apiHandle = 0;
1475 stream_.mode = UNINITIALIZED;
1476 stream_.state = STREAM_CLOSED;
1479 void RtApiCore :: startStream( void )
1482 if ( stream_.state == STREAM_RUNNING ) {
1483 errorText_ = "RtApiCore::startStream(): the stream is already running!";
1484 error( RtAudioError::WARNING );
1488 OSStatus result = noErr;
1489 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1490 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1492 result = AudioDeviceStart( handle->id[0], callbackHandler );
1493 if ( result != noErr ) {
1494 errorStream_ << "RtApiCore::startStream: system error (" << getErrorCode( result ) << ") starting callback procedure on device (" << stream_.device[0] << ").";
1495 errorText_ = errorStream_.str();
1500 if ( stream_.mode == INPUT ||
1501 ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1503 result = AudioDeviceStart( handle->id[1], callbackHandler );
1504 if ( result != noErr ) {
1505 errorStream_ << "RtApiCore::startStream: system error starting input callback procedure on device (" << stream_.device[1] << ").";
1506 errorText_ = errorStream_.str();
1511 handle->drainCounter = 0;
1512 handle->internalDrain = false;
1513 stream_.state = STREAM_RUNNING;
1516 if ( result == noErr ) return;
1517 error( RtAudioError::SYSTEM_ERROR );
1520 void RtApiCore :: stopStream( void )
1523 if ( stream_.state == STREAM_STOPPED ) {
1524 errorText_ = "RtApiCore::stopStream(): the stream is already stopped!";
1525 error( RtAudioError::WARNING );
1529 OSStatus result = noErr;
1530 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1531 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
1533 if ( handle->drainCounter == 0 ) {
1534 handle->drainCounter = 2;
1535 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
1538 result = AudioDeviceStop( handle->id[0], callbackHandler );
1539 if ( result != noErr ) {
1540 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping callback procedure on device (" << stream_.device[0] << ").";
1541 errorText_ = errorStream_.str();
1546 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && stream_.device[0] != stream_.device[1] ) ) {
1548 result = AudioDeviceStop( handle->id[1], callbackHandler );
1549 if ( result != noErr ) {
1550 errorStream_ << "RtApiCore::stopStream: system error (" << getErrorCode( result ) << ") stopping input callback procedure on device (" << stream_.device[1] << ").";
1551 errorText_ = errorStream_.str();
1556 stream_.state = STREAM_STOPPED;
1559 if ( result == noErr ) return;
1560 error( RtAudioError::SYSTEM_ERROR );
1563 void RtApiCore :: abortStream( void )
1566 if ( stream_.state == STREAM_STOPPED ) {
1567 errorText_ = "RtApiCore::abortStream(): the stream is already stopped!";
1568 error( RtAudioError::WARNING );
1572 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1573 handle->drainCounter = 2;
1578 // This function will be called by a spawned thread when the user
1579 // callback function signals that the stream should be stopped or
1580 // aborted. It is better to handle it this way because the
1581 // callbackEvent() function probably should return before the AudioDeviceStop()
1582 // function is called.
1583 static void *coreStopStream( void *ptr )
1585 CallbackInfo *info = (CallbackInfo *) ptr;
1586 RtApiCore *object = (RtApiCore *) info->object;
1588 object->stopStream();
1589 pthread_exit( NULL );
1592 bool RtApiCore :: callbackEvent( AudioDeviceID deviceId,
1593 const AudioBufferList *inBufferList,
1594 const AudioBufferList *outBufferList )
1596 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
1597 if ( stream_.state == STREAM_CLOSED ) {
1598 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
1599 error( RtAudioError::WARNING );
1603 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
1604 CoreHandle *handle = (CoreHandle *) stream_.apiHandle;
1606 // Check if we were draining the stream and signal is finished.
1607 if ( handle->drainCounter > 3 ) {
1608 ThreadHandle threadId;
1610 stream_.state = STREAM_STOPPING;
1611 if ( handle->internalDrain == true )
1612 pthread_create( &threadId, NULL, coreStopStream, info );
1613 else // external call to stopStream()
1614 pthread_cond_signal( &handle->condition );
1618 AudioDeviceID outputDevice = handle->id[0];
1620 // Invoke user callback to get fresh output data UNLESS we are
1621 // draining stream or duplex mode AND the input/output devices are
1622 // different AND this function is called for the input device.
1623 if ( handle->drainCounter == 0 && ( stream_.mode != DUPLEX || deviceId == outputDevice ) ) {
1624 RtAudioCallback callback = (RtAudioCallback) info->callback;
1625 double streamTime = getStreamTime();
1626 RtAudioStreamStatus status = 0;
1627 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
1628 status |= RTAUDIO_OUTPUT_UNDERFLOW;
1629 handle->xrun[0] = false;
1631 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
1632 status |= RTAUDIO_INPUT_OVERFLOW;
1633 handle->xrun[1] = false;
1636 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
1637 stream_.bufferSize, streamTime, status, info->userData );
1638 if ( cbReturnValue == 2 ) {
1639 stream_.state = STREAM_STOPPING;
1640 handle->drainCounter = 2;
1644 else if ( cbReturnValue == 1 ) {
1645 handle->drainCounter = 1;
1646 handle->internalDrain = true;
1650 if ( stream_.mode == OUTPUT || ( stream_.mode == DUPLEX && deviceId == outputDevice ) ) {
1652 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
1654 if ( handle->nStreams[0] == 1 ) {
1655 memset( outBufferList->mBuffers[handle->iStream[0]].mData,
1657 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1659 else { // fill multiple streams with zeros
1660 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1661 memset( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1663 outBufferList->mBuffers[handle->iStream[0]+i].mDataByteSize );
1667 else if ( handle->nStreams[0] == 1 ) {
1668 if ( stream_.doConvertBuffer[0] ) { // convert directly to CoreAudio stream buffer
1669 convertBuffer( (char *) outBufferList->mBuffers[handle->iStream[0]].mData,
1670 stream_.userBuffer[0], stream_.convertInfo[0] );
1672 else { // copy from user buffer
1673 memcpy( outBufferList->mBuffers[handle->iStream[0]].mData,
1674 stream_.userBuffer[0],
1675 outBufferList->mBuffers[handle->iStream[0]].mDataByteSize );
1678 else { // fill multiple streams
1679 Float32 *inBuffer = (Float32 *) stream_.userBuffer[0];
1680 if ( stream_.doConvertBuffer[0] ) {
1681 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
1682 inBuffer = (Float32 *) stream_.deviceBuffer;
1685 if ( stream_.deviceInterleaved[0] == false ) { // mono mode
1686 UInt32 bufferBytes = outBufferList->mBuffers[handle->iStream[0]].mDataByteSize;
1687 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
1688 memcpy( outBufferList->mBuffers[handle->iStream[0]+i].mData,
1689 (void *)&inBuffer[i*stream_.bufferSize], bufferBytes );
1692 else { // fill multiple multi-channel streams with interleaved data
1693 UInt32 streamChannels, channelsLeft, inJump, outJump, inOffset;
1696 bool inInterleaved = ( stream_.userInterleaved ) ? true : false;
1697 UInt32 inChannels = stream_.nUserChannels[0];
1698 if ( stream_.doConvertBuffer[0] ) {
1699 inInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1700 inChannels = stream_.nDeviceChannels[0];
1703 if ( inInterleaved ) inOffset = 1;
1704 else inOffset = stream_.bufferSize;
1706 channelsLeft = inChannels;
1707 for ( unsigned int i=0; i<handle->nStreams[0]; i++ ) {
1709 out = (Float32 *) outBufferList->mBuffers[handle->iStream[0]+i].mData;
1710 streamChannels = outBufferList->mBuffers[handle->iStream[0]+i].mNumberChannels;
1713 // Account for possible channel offset in first stream
1714 if ( i == 0 && stream_.channelOffset[0] > 0 ) {
1715 streamChannels -= stream_.channelOffset[0];
1716 outJump = stream_.channelOffset[0];
1720 // Account for possible unfilled channels at end of the last stream
1721 if ( streamChannels > channelsLeft ) {
1722 outJump = streamChannels - channelsLeft;
1723 streamChannels = channelsLeft;
1726 // Determine input buffer offsets and skips
1727 if ( inInterleaved ) {
1728 inJump = inChannels;
1729 in += inChannels - channelsLeft;
1733 in += (inChannels - channelsLeft) * inOffset;
1736 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1737 for ( unsigned int j=0; j<streamChannels; j++ ) {
1738 *out++ = in[j*inOffset];
1743 channelsLeft -= streamChannels;
1749 // Don't bother draining input
1750 if ( handle->drainCounter ) {
1751 handle->drainCounter++;
1755 AudioDeviceID inputDevice;
1756 inputDevice = handle->id[1];
1757 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && deviceId == inputDevice ) ) {
1759 if ( handle->nStreams[1] == 1 ) {
1760 if ( stream_.doConvertBuffer[1] ) { // convert directly from CoreAudio stream buffer
1761 convertBuffer( stream_.userBuffer[1],
1762 (char *) inBufferList->mBuffers[handle->iStream[1]].mData,
1763 stream_.convertInfo[1] );
1765 else { // copy to user buffer
1766 memcpy( stream_.userBuffer[1],
1767 inBufferList->mBuffers[handle->iStream[1]].mData,
1768 inBufferList->mBuffers[handle->iStream[1]].mDataByteSize );
1771 else { // read from multiple streams
1772 Float32 *outBuffer = (Float32 *) stream_.userBuffer[1];
1773 if ( stream_.doConvertBuffer[1] ) outBuffer = (Float32 *) stream_.deviceBuffer;
1775 if ( stream_.deviceInterleaved[1] == false ) { // mono mode
1776 UInt32 bufferBytes = inBufferList->mBuffers[handle->iStream[1]].mDataByteSize;
1777 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
1778 memcpy( (void *)&outBuffer[i*stream_.bufferSize],
1779 inBufferList->mBuffers[handle->iStream[1]+i].mData, bufferBytes );
1782 else { // read from multiple multi-channel streams
1783 UInt32 streamChannels, channelsLeft, inJump, outJump, outOffset;
1786 bool outInterleaved = ( stream_.userInterleaved ) ? true : false;
1787 UInt32 outChannels = stream_.nUserChannels[1];
1788 if ( stream_.doConvertBuffer[1] ) {
1789 outInterleaved = true; // device buffer will always be interleaved for nStreams > 1 and not mono mode
1790 outChannels = stream_.nDeviceChannels[1];
1793 if ( outInterleaved ) outOffset = 1;
1794 else outOffset = stream_.bufferSize;
1796 channelsLeft = outChannels;
1797 for ( unsigned int i=0; i<handle->nStreams[1]; i++ ) {
1799 in = (Float32 *) inBufferList->mBuffers[handle->iStream[1]+i].mData;
1800 streamChannels = inBufferList->mBuffers[handle->iStream[1]+i].mNumberChannels;
1803 // Account for possible channel offset in first stream
1804 if ( i == 0 && stream_.channelOffset[1] > 0 ) {
1805 streamChannels -= stream_.channelOffset[1];
1806 inJump = stream_.channelOffset[1];
1810 // Account for possible unread channels at end of the last stream
1811 if ( streamChannels > channelsLeft ) {
1812 inJump = streamChannels - channelsLeft;
1813 streamChannels = channelsLeft;
1816 // Determine output buffer offsets and skips
1817 if ( outInterleaved ) {
1818 outJump = outChannels;
1819 out += outChannels - channelsLeft;
1823 out += (outChannels - channelsLeft) * outOffset;
1826 for ( unsigned int i=0; i<stream_.bufferSize; i++ ) {
1827 for ( unsigned int j=0; j<streamChannels; j++ ) {
1828 out[j*outOffset] = *in++;
1833 channelsLeft -= streamChannels;
1837 if ( stream_.doConvertBuffer[1] ) { // convert from our internal "device" buffer
1838 convertBuffer( stream_.userBuffer[1],
1839 stream_.deviceBuffer,
1840 stream_.convertInfo[1] );
1846 //MUTEX_UNLOCK( &stream_.mutex );
1848 RtApi::tickStreamTime();
1852 const char* RtApiCore :: getErrorCode( OSStatus code )
1856 case kAudioHardwareNotRunningError:
1857 return "kAudioHardwareNotRunningError";
1859 case kAudioHardwareUnspecifiedError:
1860 return "kAudioHardwareUnspecifiedError";
1862 case kAudioHardwareUnknownPropertyError:
1863 return "kAudioHardwareUnknownPropertyError";
1865 case kAudioHardwareBadPropertySizeError:
1866 return "kAudioHardwareBadPropertySizeError";
1868 case kAudioHardwareIllegalOperationError:
1869 return "kAudioHardwareIllegalOperationError";
1871 case kAudioHardwareBadObjectError:
1872 return "kAudioHardwareBadObjectError";
1874 case kAudioHardwareBadDeviceError:
1875 return "kAudioHardwareBadDeviceError";
1877 case kAudioHardwareBadStreamError:
1878 return "kAudioHardwareBadStreamError";
1880 case kAudioHardwareUnsupportedOperationError:
1881 return "kAudioHardwareUnsupportedOperationError";
1883 case kAudioDeviceUnsupportedFormatError:
1884 return "kAudioDeviceUnsupportedFormatError";
1886 case kAudioDevicePermissionsError:
1887 return "kAudioDevicePermissionsError";
1890 return "CoreAudio unknown error";
1894 //******************** End of __MACOSX_CORE__ *********************//
1897 #if defined(__UNIX_JACK__)
1899 // JACK is a low-latency audio server, originally written for the
1900 // GNU/Linux operating system and now also ported to OS-X. It can
1901 // connect a number of different applications to an audio device, as
1902 // well as allowing them to share audio between themselves.
1904 // When using JACK with RtAudio, "devices" refer to JACK clients that
1905 // have ports connected to the server. The JACK server is typically
1906 // started in a terminal as follows:
1908 // .jackd -d alsa -d hw:0
1910 // or through an interface program such as qjackctl. Many of the
1911 // parameters normally set for a stream are fixed by the JACK server
1912 // and can be specified when the JACK server is started. In
1915 // .jackd -d alsa -d hw:0 -r 44100 -p 512 -n 4
1917 // specifies a sample rate of 44100 Hz, a buffer size of 512 sample
1918 // frames, and number of buffers = 4. Once the server is running, it
1919 // is not possible to override these values. If the values are not
1920 // specified in the command-line, the JACK server uses default values.
1922 // The JACK server does not have to be running when an instance of
1923 // RtApiJack is created, though the function getDeviceCount() will
1924 // report 0 devices found until JACK has been started. When no
1925 // devices are available (i.e., the JACK server is not running), a
1926 // stream cannot be opened.
1928 #include <jack/jack.h>
1932 // A structure to hold various information related to the Jack API
1935 jack_client_t *client;
1936 jack_port_t **ports[2];
1937 std::string deviceName[2];
1939 pthread_cond_t condition;
1940 int drainCounter; // Tracks callback counts when draining
1941 bool internalDrain; // Indicates if stop is initiated from callback or not.
1944 :client(0), drainCounter(0), internalDrain(false) { ports[0] = 0; ports[1] = 0; xrun[0] = false; xrun[1] = false; }
1947 #if !defined(__RTAUDIO_DEBUG__)
1948 static void jackSilentError( const char * ) {};
1951 RtApiJack :: RtApiJack()
1952 :shouldAutoconnect_(true) {
1953 // Nothing to do here.
1954 #if !defined(__RTAUDIO_DEBUG__)
1955 // Turn off Jack's internal error reporting.
1956 jack_set_error_function( &jackSilentError );
1960 RtApiJack :: ~RtApiJack()
1962 if ( stream_.state != STREAM_CLOSED ) closeStream();
1965 unsigned int RtApiJack :: getDeviceCount( void )
1967 // See if we can become a jack client.
1968 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
1969 jack_status_t *status = NULL;
1970 jack_client_t *client = jack_client_open( "RtApiJackCount", options, status );
1971 if ( client == 0 ) return 0;
1974 std::string port, previousPort;
1975 unsigned int nChannels = 0, nDevices = 0;
1976 ports = jack_get_ports( client, NULL, NULL, 0 );
1978 // Parse the port names up to the first colon (:).
1981 port = (char *) ports[ nChannels ];
1982 iColon = port.find(":");
1983 if ( iColon != std::string::npos ) {
1984 port = port.substr( 0, iColon + 1 );
1985 if ( port != previousPort ) {
1987 previousPort = port;
1990 } while ( ports[++nChannels] );
1994 jack_client_close( client );
1998 RtAudio::DeviceInfo RtApiJack :: getDeviceInfo( unsigned int device )
2000 RtAudio::DeviceInfo info;
2001 info.probed = false;
2003 jack_options_t options = (jack_options_t) ( JackNoStartServer ); //JackNullOption
2004 jack_status_t *status = NULL;
2005 jack_client_t *client = jack_client_open( "RtApiJackInfo", options, status );
2006 if ( client == 0 ) {
2007 errorText_ = "RtApiJack::getDeviceInfo: Jack server not found or connection error!";
2008 error( RtAudioError::WARNING );
2013 std::string port, previousPort;
2014 unsigned int nPorts = 0, nDevices = 0;
2015 ports = jack_get_ports( client, NULL, NULL, 0 );
2017 // Parse the port names up to the first colon (:).
2020 port = (char *) ports[ nPorts ];
2021 iColon = port.find(":");
2022 if ( iColon != std::string::npos ) {
2023 port = port.substr( 0, iColon );
2024 if ( port != previousPort ) {
2025 if ( nDevices == device ) info.name = port;
2027 previousPort = port;
2030 } while ( ports[++nPorts] );
2034 if ( device >= nDevices ) {
2035 jack_client_close( client );
2036 errorText_ = "RtApiJack::getDeviceInfo: device ID is invalid!";
2037 error( RtAudioError::INVALID_USE );
2041 // Get the current jack server sample rate.
2042 info.sampleRates.clear();
2044 info.preferredSampleRate = jack_get_sample_rate( client );
2045 info.sampleRates.push_back( info.preferredSampleRate );
2047 // Count the available ports containing the client name as device
2048 // channels. Jack "input ports" equal RtAudio output channels.
2049 unsigned int nChannels = 0;
2050 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsInput );
2052 while ( ports[ nChannels ] ) nChannels++;
2054 info.outputChannels = nChannels;
2057 // Jack "output ports" equal RtAudio input channels.
2059 ports = jack_get_ports( client, info.name.c_str(), NULL, JackPortIsOutput );
2061 while ( ports[ nChannels ] ) nChannels++;
2063 info.inputChannels = nChannels;
2066 if ( info.outputChannels == 0 && info.inputChannels == 0 ) {
2067 jack_client_close(client);
2068 errorText_ = "RtApiJack::getDeviceInfo: error determining Jack input/output channels!";
2069 error( RtAudioError::WARNING );
2073 // If device opens for both playback and capture, we determine the channels.
2074 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2075 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2077 // Jack always uses 32-bit floats.
2078 info.nativeFormats = RTAUDIO_FLOAT32;
2080 // Jack doesn't provide default devices so we'll use the first available one.
2081 if ( device == 0 && info.outputChannels > 0 )
2082 info.isDefaultOutput = true;
2083 if ( device == 0 && info.inputChannels > 0 )
2084 info.isDefaultInput = true;
2086 jack_client_close(client);
2091 static int jackCallbackHandler( jack_nframes_t nframes, void *infoPointer )
2093 CallbackInfo *info = (CallbackInfo *) infoPointer;
2095 RtApiJack *object = (RtApiJack *) info->object;
2096 if ( object->callbackEvent( (unsigned long) nframes ) == false ) return 1;
2101 // This function will be called by a spawned thread when the Jack
2102 // server signals that it is shutting down. It is necessary to handle
2103 // it this way because the jackShutdown() function must return before
2104 // the jack_deactivate() function (in closeStream()) will return.
2105 static void *jackCloseStream( void *ptr )
2107 CallbackInfo *info = (CallbackInfo *) ptr;
2108 RtApiJack *object = (RtApiJack *) info->object;
2110 object->closeStream();
2112 pthread_exit( NULL );
2114 static void jackShutdown( void *infoPointer )
2116 CallbackInfo *info = (CallbackInfo *) infoPointer;
2117 RtApiJack *object = (RtApiJack *) info->object;
2119 // Check current stream state. If stopped, then we'll assume this
2120 // was called as a result of a call to RtApiJack::stopStream (the
2121 // deactivation of a client handle causes this function to be called).
2122 // If not, we'll assume the Jack server is shutting down or some
2123 // other problem occurred and we should close the stream.
2124 if ( object->isStreamRunning() == false ) return;
2126 ThreadHandle threadId;
2127 pthread_create( &threadId, NULL, jackCloseStream, info );
2128 std::cerr << "\nRtApiJack: the Jack server is shutting down this client ... stream stopped and closed!!\n" << std::endl;
2131 static int jackXrun( void *infoPointer )
2133 JackHandle *handle = (JackHandle *) infoPointer;
2135 if ( handle->ports[0] ) handle->xrun[0] = true;
2136 if ( handle->ports[1] ) handle->xrun[1] = true;
2141 bool RtApiJack :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2142 unsigned int firstChannel, unsigned int sampleRate,
2143 RtAudioFormat format, unsigned int *bufferSize,
2144 RtAudio::StreamOptions *options )
2146 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2148 // Look for jack server and try to become a client (only do once per stream).
2149 jack_client_t *client = 0;
2150 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) {
2151 jack_options_t jackoptions = (jack_options_t) ( JackNoStartServer ); //JackNullOption;
2152 jack_status_t *status = NULL;
2153 if ( options && !options->streamName.empty() )
2154 client = jack_client_open( options->streamName.c_str(), jackoptions, status );
2156 client = jack_client_open( "RtApiJack", jackoptions, status );
2157 if ( client == 0 ) {
2158 errorText_ = "RtApiJack::probeDeviceOpen: Jack server not found or connection error!";
2159 error( RtAudioError::WARNING );
2164 // The handle must have been created on an earlier pass.
2165 client = handle->client;
2169 std::string port, previousPort, deviceName;
2170 unsigned int nPorts = 0, nDevices = 0;
2171 ports = jack_get_ports( client, NULL, NULL, 0 );
2173 // Parse the port names up to the first colon (:).
2176 port = (char *) ports[ nPorts ];
2177 iColon = port.find(":");
2178 if ( iColon != std::string::npos ) {
2179 port = port.substr( 0, iColon );
2180 if ( port != previousPort ) {
2181 if ( nDevices == device ) deviceName = port;
2183 previousPort = port;
2186 } while ( ports[++nPorts] );
2190 if ( device >= nDevices ) {
2191 errorText_ = "RtApiJack::probeDeviceOpen: device ID is invalid!";
2195 // Count the available ports containing the client name as device
2196 // channels. Jack "input ports" equal RtAudio output channels.
2197 unsigned int nChannels = 0;
2198 unsigned long flag = JackPortIsInput;
2199 if ( mode == INPUT ) flag = JackPortIsOutput;
2200 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2202 while ( ports[ nChannels ] ) nChannels++;
2206 // Compare the jack ports for specified client to the requested number of channels.
2207 if ( nChannels < (channels + firstChannel) ) {
2208 errorStream_ << "RtApiJack::probeDeviceOpen: requested number of channels (" << channels << ") + offset (" << firstChannel << ") not found for specified device (" << device << ":" << deviceName << ").";
2209 errorText_ = errorStream_.str();
2213 // Check the jack server sample rate.
2214 unsigned int jackRate = jack_get_sample_rate( client );
2215 if ( sampleRate != jackRate ) {
2216 jack_client_close( client );
2217 errorStream_ << "RtApiJack::probeDeviceOpen: the requested sample rate (" << sampleRate << ") is different than the JACK server rate (" << jackRate << ").";
2218 errorText_ = errorStream_.str();
2221 stream_.sampleRate = jackRate;
2223 // Get the latency of the JACK port.
2224 ports = jack_get_ports( client, deviceName.c_str(), NULL, flag );
2225 if ( ports[ firstChannel ] ) {
2227 jack_latency_callback_mode_t cbmode = (mode == INPUT ? JackCaptureLatency : JackPlaybackLatency);
2228 // the range (usually the min and max are equal)
2229 jack_latency_range_t latrange; latrange.min = latrange.max = 0;
2230 // get the latency range
2231 jack_port_get_latency_range( jack_port_by_name( client, ports[firstChannel] ), cbmode, &latrange );
2232 // be optimistic, use the min!
2233 stream_.latency[mode] = latrange.min;
2234 //stream_.latency[mode] = jack_port_get_latency( jack_port_by_name( client, ports[ firstChannel ] ) );
2238 // The jack server always uses 32-bit floating-point data.
2239 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
2240 stream_.userFormat = format;
2242 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
2243 else stream_.userInterleaved = true;
2245 // Jack always uses non-interleaved buffers.
2246 stream_.deviceInterleaved[mode] = false;
2248 // Jack always provides host byte-ordered data.
2249 stream_.doByteSwap[mode] = false;
2251 // Get the buffer size. The buffer size and number of buffers
2252 // (periods) is set when the jack server is started.
2253 stream_.bufferSize = (int) jack_get_buffer_size( client );
2254 *bufferSize = stream_.bufferSize;
2256 stream_.nDeviceChannels[mode] = channels;
2257 stream_.nUserChannels[mode] = channels;
2259 // Set flags for buffer conversion.
2260 stream_.doConvertBuffer[mode] = false;
2261 if ( stream_.userFormat != stream_.deviceFormat[mode] )
2262 stream_.doConvertBuffer[mode] = true;
2263 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
2264 stream_.nUserChannels[mode] > 1 )
2265 stream_.doConvertBuffer[mode] = true;
2267 // Allocate our JackHandle structure for the stream.
2268 if ( handle == 0 ) {
2270 handle = new JackHandle;
2272 catch ( std::bad_alloc& ) {
2273 errorText_ = "RtApiJack::probeDeviceOpen: error allocating JackHandle memory.";
2277 if ( pthread_cond_init(&handle->condition, NULL) ) {
2278 errorText_ = "RtApiJack::probeDeviceOpen: error initializing pthread condition variable.";
2281 stream_.apiHandle = (void *) handle;
2282 handle->client = client;
2284 handle->deviceName[mode] = deviceName;
2286 // Allocate necessary internal buffers.
2287 unsigned long bufferBytes;
2288 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
2289 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
2290 if ( stream_.userBuffer[mode] == NULL ) {
2291 errorText_ = "RtApiJack::probeDeviceOpen: error allocating user buffer memory.";
2295 if ( stream_.doConvertBuffer[mode] ) {
2297 bool makeBuffer = true;
2298 if ( mode == OUTPUT )
2299 bufferBytes = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
2300 else { // mode == INPUT
2301 bufferBytes = stream_.nDeviceChannels[1] * formatBytes( stream_.deviceFormat[1] );
2302 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
2303 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes(stream_.deviceFormat[0]);
2304 if ( bufferBytes < bytesOut ) makeBuffer = false;
2309 bufferBytes *= *bufferSize;
2310 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
2311 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
2312 if ( stream_.deviceBuffer == NULL ) {
2313 errorText_ = "RtApiJack::probeDeviceOpen: error allocating device buffer memory.";
2319 // Allocate memory for the Jack ports (channels) identifiers.
2320 handle->ports[mode] = (jack_port_t **) malloc ( sizeof (jack_port_t *) * channels );
2321 if ( handle->ports[mode] == NULL ) {
2322 errorText_ = "RtApiJack::probeDeviceOpen: error allocating port memory.";
2326 stream_.device[mode] = device;
2327 stream_.channelOffset[mode] = firstChannel;
2328 stream_.state = STREAM_STOPPED;
2329 stream_.callbackInfo.object = (void *) this;
2331 if ( stream_.mode == OUTPUT && mode == INPUT )
2332 // We had already set up the stream for output.
2333 stream_.mode = DUPLEX;
2335 stream_.mode = mode;
2336 jack_set_process_callback( handle->client, jackCallbackHandler, (void *) &stream_.callbackInfo );
2337 jack_set_xrun_callback( handle->client, jackXrun, (void *) &handle );
2338 jack_on_shutdown( handle->client, jackShutdown, (void *) &stream_.callbackInfo );
2341 // Register our ports.
2343 if ( mode == OUTPUT ) {
2344 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2345 snprintf( label, 64, "outport %d", i );
2346 handle->ports[0][i] = jack_port_register( handle->client, (const char *)label,
2347 JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput, 0 );
2351 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2352 snprintf( label, 64, "inport %d", i );
2353 handle->ports[1][i] = jack_port_register( handle->client, (const char *)label,
2354 JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput, 0 );
2358 // Setup the buffer conversion information structure. We don't use
2359 // buffers to do channel offsets, so we override that parameter
2361 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
2363 if ( options && options->flags & RTAUDIO_JACK_DONT_CONNECT ) shouldAutoconnect_ = false;
2369 pthread_cond_destroy( &handle->condition );
2370 jack_client_close( handle->client );
2372 if ( handle->ports[0] ) free( handle->ports[0] );
2373 if ( handle->ports[1] ) free( handle->ports[1] );
2376 stream_.apiHandle = 0;
2379 for ( int i=0; i<2; i++ ) {
2380 if ( stream_.userBuffer[i] ) {
2381 free( stream_.userBuffer[i] );
2382 stream_.userBuffer[i] = 0;
2386 if ( stream_.deviceBuffer ) {
2387 free( stream_.deviceBuffer );
2388 stream_.deviceBuffer = 0;
2394 void RtApiJack :: closeStream( void )
2396 if ( stream_.state == STREAM_CLOSED ) {
2397 errorText_ = "RtApiJack::closeStream(): no open stream to close!";
2398 error( RtAudioError::WARNING );
2402 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2405 if ( stream_.state == STREAM_RUNNING )
2406 jack_deactivate( handle->client );
2408 jack_client_close( handle->client );
2412 if ( handle->ports[0] ) free( handle->ports[0] );
2413 if ( handle->ports[1] ) free( handle->ports[1] );
2414 pthread_cond_destroy( &handle->condition );
2416 stream_.apiHandle = 0;
2419 for ( int i=0; i<2; i++ ) {
2420 if ( stream_.userBuffer[i] ) {
2421 free( stream_.userBuffer[i] );
2422 stream_.userBuffer[i] = 0;
2426 if ( stream_.deviceBuffer ) {
2427 free( stream_.deviceBuffer );
2428 stream_.deviceBuffer = 0;
2431 stream_.mode = UNINITIALIZED;
2432 stream_.state = STREAM_CLOSED;
2435 void RtApiJack :: startStream( void )
2438 if ( stream_.state == STREAM_RUNNING ) {
2439 errorText_ = "RtApiJack::startStream(): the stream is already running!";
2440 error( RtAudioError::WARNING );
2444 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2445 int result = jack_activate( handle->client );
2447 errorText_ = "RtApiJack::startStream(): unable to activate JACK client!";
2453 // Get the list of available ports.
2454 if ( shouldAutoconnect_ && (stream_.mode == OUTPUT || stream_.mode == DUPLEX) ) {
2456 ports = jack_get_ports( handle->client, handle->deviceName[0].c_str(), NULL, JackPortIsInput);
2457 if ( ports == NULL) {
2458 errorText_ = "RtApiJack::startStream(): error determining available JACK input ports!";
2462 // Now make the port connections. Since RtAudio wasn't designed to
2463 // allow the user to select particular channels of a device, we'll
2464 // just open the first "nChannels" ports with offset.
2465 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2467 if ( ports[ stream_.channelOffset[0] + i ] )
2468 result = jack_connect( handle->client, jack_port_name( handle->ports[0][i] ), ports[ stream_.channelOffset[0] + i ] );
2471 errorText_ = "RtApiJack::startStream(): error connecting output ports!";
2478 if ( shouldAutoconnect_ && (stream_.mode == INPUT || stream_.mode == DUPLEX) ) {
2480 ports = jack_get_ports( handle->client, handle->deviceName[1].c_str(), NULL, JackPortIsOutput );
2481 if ( ports == NULL) {
2482 errorText_ = "RtApiJack::startStream(): error determining available JACK output ports!";
2486 // Now make the port connections. See note above.
2487 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2489 if ( ports[ stream_.channelOffset[1] + i ] )
2490 result = jack_connect( handle->client, ports[ stream_.channelOffset[1] + i ], jack_port_name( handle->ports[1][i] ) );
2493 errorText_ = "RtApiJack::startStream(): error connecting input ports!";
2500 handle->drainCounter = 0;
2501 handle->internalDrain = false;
2502 stream_.state = STREAM_RUNNING;
2505 if ( result == 0 ) return;
2506 error( RtAudioError::SYSTEM_ERROR );
2509 void RtApiJack :: stopStream( void )
2512 if ( stream_.state == STREAM_STOPPED ) {
2513 errorText_ = "RtApiJack::stopStream(): the stream is already stopped!";
2514 error( RtAudioError::WARNING );
2518 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2519 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2521 if ( handle->drainCounter == 0 ) {
2522 handle->drainCounter = 2;
2523 pthread_cond_wait( &handle->condition, &stream_.mutex ); // block until signaled
2527 jack_deactivate( handle->client );
2528 stream_.state = STREAM_STOPPED;
2531 void RtApiJack :: abortStream( void )
2534 if ( stream_.state == STREAM_STOPPED ) {
2535 errorText_ = "RtApiJack::abortStream(): the stream is already stopped!";
2536 error( RtAudioError::WARNING );
2540 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2541 handle->drainCounter = 2;
2546 // This function will be called by a spawned thread when the user
2547 // callback function signals that the stream should be stopped or
2548 // aborted. It is necessary to handle it this way because the
2549 // callbackEvent() function must return before the jack_deactivate()
2550 // function will return.
2551 static void *jackStopStream( void *ptr )
2553 CallbackInfo *info = (CallbackInfo *) ptr;
2554 RtApiJack *object = (RtApiJack *) info->object;
2556 object->stopStream();
2557 pthread_exit( NULL );
2560 bool RtApiJack :: callbackEvent( unsigned long nframes )
2562 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
2563 if ( stream_.state == STREAM_CLOSED ) {
2564 errorText_ = "RtApiCore::callbackEvent(): the stream is closed ... this shouldn't happen!";
2565 error( RtAudioError::WARNING );
2568 if ( stream_.bufferSize != nframes ) {
2569 errorText_ = "RtApiCore::callbackEvent(): the JACK buffer size has changed ... cannot process!";
2570 error( RtAudioError::WARNING );
2574 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
2575 JackHandle *handle = (JackHandle *) stream_.apiHandle;
2577 // Check if we were draining the stream and signal is finished.
2578 if ( handle->drainCounter > 3 ) {
2579 ThreadHandle threadId;
2581 stream_.state = STREAM_STOPPING;
2582 if ( handle->internalDrain == true )
2583 pthread_create( &threadId, NULL, jackStopStream, info );
2585 pthread_cond_signal( &handle->condition );
2589 // Invoke user callback first, to get fresh output data.
2590 if ( handle->drainCounter == 0 ) {
2591 RtAudioCallback callback = (RtAudioCallback) info->callback;
2592 double streamTime = getStreamTime();
2593 RtAudioStreamStatus status = 0;
2594 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
2595 status |= RTAUDIO_OUTPUT_UNDERFLOW;
2596 handle->xrun[0] = false;
2598 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
2599 status |= RTAUDIO_INPUT_OVERFLOW;
2600 handle->xrun[1] = false;
2602 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
2603 stream_.bufferSize, streamTime, status, info->userData );
2604 if ( cbReturnValue == 2 ) {
2605 stream_.state = STREAM_STOPPING;
2606 handle->drainCounter = 2;
2608 pthread_create( &id, NULL, jackStopStream, info );
2611 else if ( cbReturnValue == 1 ) {
2612 handle->drainCounter = 1;
2613 handle->internalDrain = true;
2617 jack_default_audio_sample_t *jackbuffer;
2618 unsigned long bufferBytes = nframes * sizeof( jack_default_audio_sample_t );
2619 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
2621 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
2623 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2624 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2625 memset( jackbuffer, 0, bufferBytes );
2629 else if ( stream_.doConvertBuffer[0] ) {
2631 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
2633 for ( unsigned int i=0; i<stream_.nDeviceChannels[0]; i++ ) {
2634 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2635 memcpy( jackbuffer, &stream_.deviceBuffer[i*bufferBytes], bufferBytes );
2638 else { // no buffer conversion
2639 for ( unsigned int i=0; i<stream_.nUserChannels[0]; i++ ) {
2640 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[0][i], (jack_nframes_t) nframes );
2641 memcpy( jackbuffer, &stream_.userBuffer[0][i*bufferBytes], bufferBytes );
2646 // Don't bother draining input
2647 if ( handle->drainCounter ) {
2648 handle->drainCounter++;
2652 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
2654 if ( stream_.doConvertBuffer[1] ) {
2655 for ( unsigned int i=0; i<stream_.nDeviceChannels[1]; i++ ) {
2656 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2657 memcpy( &stream_.deviceBuffer[i*bufferBytes], jackbuffer, bufferBytes );
2659 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
2661 else { // no buffer conversion
2662 for ( unsigned int i=0; i<stream_.nUserChannels[1]; i++ ) {
2663 jackbuffer = (jack_default_audio_sample_t *) jack_port_get_buffer( handle->ports[1][i], (jack_nframes_t) nframes );
2664 memcpy( &stream_.userBuffer[1][i*bufferBytes], jackbuffer, bufferBytes );
2670 RtApi::tickStreamTime();
2673 //******************** End of __UNIX_JACK__ *********************//
2676 #if defined(__WINDOWS_ASIO__) // ASIO API on Windows
2678 // The ASIO API is designed around a callback scheme, so this
2679 // implementation is similar to that used for OS-X CoreAudio and Linux
2680 // Jack. The primary constraint with ASIO is that it only allows
2681 // access to a single driver at a time. Thus, it is not possible to
2682 // have more than one simultaneous RtAudio stream.
2684 // This implementation also requires a number of external ASIO files
2685 // and a few global variables. The ASIO callback scheme does not
2686 // allow for the passing of user data, so we must create a global
2687 // pointer to our callbackInfo structure.
2689 // On unix systems, we make use of a pthread condition variable.
2690 // Since there is no equivalent in Windows, I hacked something based
2691 // on information found in
2692 // http://www.cs.wustl.edu/~schmidt/win32-cv-1.html.
2694 #include "asiosys.h"
2696 #include "iasiothiscallresolver.h"
2697 #include "asiodrivers.h"
2700 static AsioDrivers drivers;
2701 static ASIOCallbacks asioCallbacks;
2702 static ASIODriverInfo driverInfo;
2703 static CallbackInfo *asioCallbackInfo;
2704 static bool asioXRun;
2707 int drainCounter; // Tracks callback counts when draining
2708 bool internalDrain; // Indicates if stop is initiated from callback or not.
2709 ASIOBufferInfo *bufferInfos;
2713 :drainCounter(0), internalDrain(false), bufferInfos(0) {}
2716 // Function declarations (definitions at end of section)
2717 static const char* getAsioErrorString( ASIOError result );
2718 static void sampleRateChanged( ASIOSampleRate sRate );
2719 static long asioMessages( long selector, long value, void* message, double* opt );
2721 RtApiAsio :: RtApiAsio()
2723 // ASIO cannot run on a multi-threaded appartment. You can call
2724 // CoInitialize beforehand, but it must be for appartment threading
2725 // (in which case, CoInitilialize will return S_FALSE here).
2726 coInitialized_ = false;
2727 HRESULT hr = CoInitialize( NULL );
2729 errorText_ = "RtApiAsio::ASIO requires a single-threaded appartment. Call CoInitializeEx(0,COINIT_APARTMENTTHREADED)";
2730 error( RtAudioError::WARNING );
2732 coInitialized_ = true;
2734 drivers.removeCurrentDriver();
2735 driverInfo.asioVersion = 2;
2737 // See note in DirectSound implementation about GetDesktopWindow().
2738 driverInfo.sysRef = GetForegroundWindow();
2741 RtApiAsio :: ~RtApiAsio()
2743 if ( stream_.state != STREAM_CLOSED ) closeStream();
2744 if ( coInitialized_ ) CoUninitialize();
2747 unsigned int RtApiAsio :: getDeviceCount( void )
2749 return (unsigned int) drivers.asioGetNumDev();
2752 RtAudio::DeviceInfo RtApiAsio :: getDeviceInfo( unsigned int device )
2754 RtAudio::DeviceInfo info;
2755 info.probed = false;
2758 unsigned int nDevices = getDeviceCount();
2759 if ( nDevices == 0 ) {
2760 errorText_ = "RtApiAsio::getDeviceInfo: no devices found!";
2761 error( RtAudioError::INVALID_USE );
2765 if ( device >= nDevices ) {
2766 errorText_ = "RtApiAsio::getDeviceInfo: device ID is invalid!";
2767 error( RtAudioError::INVALID_USE );
2771 // If a stream is already open, we cannot probe other devices. Thus, use the saved results.
2772 if ( stream_.state != STREAM_CLOSED ) {
2773 if ( device >= devices_.size() ) {
2774 errorText_ = "RtApiAsio::getDeviceInfo: device ID was not present before stream was opened.";
2775 error( RtAudioError::WARNING );
2778 return devices_[ device ];
2781 char driverName[32];
2782 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2783 if ( result != ASE_OK ) {
2784 errorStream_ << "RtApiAsio::getDeviceInfo: unable to get driver name (" << getAsioErrorString( result ) << ").";
2785 errorText_ = errorStream_.str();
2786 error( RtAudioError::WARNING );
2790 info.name = driverName;
2792 if ( !drivers.loadDriver( driverName ) ) {
2793 errorStream_ << "RtApiAsio::getDeviceInfo: unable to load driver (" << driverName << ").";
2794 errorText_ = errorStream_.str();
2795 error( RtAudioError::WARNING );
2799 result = ASIOInit( &driverInfo );
2800 if ( result != ASE_OK ) {
2801 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2802 errorText_ = errorStream_.str();
2803 error( RtAudioError::WARNING );
2807 // Determine the device channel information.
2808 long inputChannels, outputChannels;
2809 result = ASIOGetChannels( &inputChannels, &outputChannels );
2810 if ( result != ASE_OK ) {
2811 drivers.removeCurrentDriver();
2812 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2813 errorText_ = errorStream_.str();
2814 error( RtAudioError::WARNING );
2818 info.outputChannels = outputChannels;
2819 info.inputChannels = inputChannels;
2820 if ( info.outputChannels > 0 && info.inputChannels > 0 )
2821 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
2823 // Determine the supported sample rates.
2824 info.sampleRates.clear();
2825 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
2826 result = ASIOCanSampleRate( (ASIOSampleRate) SAMPLE_RATES[i] );
2827 if ( result == ASE_OK ) {
2828 info.sampleRates.push_back( SAMPLE_RATES[i] );
2830 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
2831 info.preferredSampleRate = SAMPLE_RATES[i];
2835 // Determine supported data types ... just check first channel and assume rest are the same.
2836 ASIOChannelInfo channelInfo;
2837 channelInfo.channel = 0;
2838 channelInfo.isInput = true;
2839 if ( info.inputChannels <= 0 ) channelInfo.isInput = false;
2840 result = ASIOGetChannelInfo( &channelInfo );
2841 if ( result != ASE_OK ) {
2842 drivers.removeCurrentDriver();
2843 errorStream_ << "RtApiAsio::getDeviceInfo: error (" << getAsioErrorString( result ) << ") getting driver channel info (" << driverName << ").";
2844 errorText_ = errorStream_.str();
2845 error( RtAudioError::WARNING );
2849 info.nativeFormats = 0;
2850 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB )
2851 info.nativeFormats |= RTAUDIO_SINT16;
2852 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB )
2853 info.nativeFormats |= RTAUDIO_SINT32;
2854 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB )
2855 info.nativeFormats |= RTAUDIO_FLOAT32;
2856 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB )
2857 info.nativeFormats |= RTAUDIO_FLOAT64;
2858 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB )
2859 info.nativeFormats |= RTAUDIO_SINT24;
2861 if ( info.outputChannels > 0 )
2862 if ( getDefaultOutputDevice() == device ) info.isDefaultOutput = true;
2863 if ( info.inputChannels > 0 )
2864 if ( getDefaultInputDevice() == device ) info.isDefaultInput = true;
2867 drivers.removeCurrentDriver();
2871 static void bufferSwitch( long index, ASIOBool /*processNow*/ )
2873 RtApiAsio *object = (RtApiAsio *) asioCallbackInfo->object;
2874 object->callbackEvent( index );
2877 void RtApiAsio :: saveDeviceInfo( void )
2881 unsigned int nDevices = getDeviceCount();
2882 devices_.resize( nDevices );
2883 for ( unsigned int i=0; i<nDevices; i++ )
2884 devices_[i] = getDeviceInfo( i );
2887 bool RtApiAsio :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
2888 unsigned int firstChannel, unsigned int sampleRate,
2889 RtAudioFormat format, unsigned int *bufferSize,
2890 RtAudio::StreamOptions *options )
2891 {////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
2893 bool isDuplexInput = mode == INPUT && stream_.mode == OUTPUT;
2895 // For ASIO, a duplex stream MUST use the same driver.
2896 if ( isDuplexInput && stream_.device[0] != device ) {
2897 errorText_ = "RtApiAsio::probeDeviceOpen: an ASIO duplex stream must use the same device for input and output!";
2901 char driverName[32];
2902 ASIOError result = drivers.asioGetDriverName( (int) device, driverName, 32 );
2903 if ( result != ASE_OK ) {
2904 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to get driver name (" << getAsioErrorString( result ) << ").";
2905 errorText_ = errorStream_.str();
2909 // Only load the driver once for duplex stream.
2910 if ( !isDuplexInput ) {
2911 // The getDeviceInfo() function will not work when a stream is open
2912 // because ASIO does not allow multiple devices to run at the same
2913 // time. Thus, we'll probe the system before opening a stream and
2914 // save the results for use by getDeviceInfo().
2915 this->saveDeviceInfo();
2917 if ( !drivers.loadDriver( driverName ) ) {
2918 errorStream_ << "RtApiAsio::probeDeviceOpen: unable to load driver (" << driverName << ").";
2919 errorText_ = errorStream_.str();
2923 result = ASIOInit( &driverInfo );
2924 if ( result != ASE_OK ) {
2925 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") initializing driver (" << driverName << ").";
2926 errorText_ = errorStream_.str();
2931 // keep them before any "goto error", they are used for error cleanup + goto device boundary checks
2932 bool buffersAllocated = false;
2933 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
2934 unsigned int nChannels;
2937 // Check the device channel count.
2938 long inputChannels, outputChannels;
2939 result = ASIOGetChannels( &inputChannels, &outputChannels );
2940 if ( result != ASE_OK ) {
2941 errorStream_ << "RtApiAsio::probeDeviceOpen: error (" << getAsioErrorString( result ) << ") getting channel count (" << driverName << ").";
2942 errorText_ = errorStream_.str();
2946 if ( ( mode == OUTPUT && (channels+firstChannel) > (unsigned int) outputChannels) ||
2947 ( mode == INPUT && (channels+firstChannel) > (unsigned int) inputChannels) ) {
2948 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested channel count (" << channels << ") + offset (" << firstChannel << ").";
2949 errorText_ = errorStream_.str();
2952 stream_.nDeviceChannels[mode] = channels;
2953 stream_.nUserChannels[mode] = channels;
2954 stream_.channelOffset[mode] = firstChannel;
2956 // Verify the sample rate is supported.
2957 result = ASIOCanSampleRate( (ASIOSampleRate) sampleRate );
2958 if ( result != ASE_OK ) {
2959 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") does not support requested sample rate (" << sampleRate << ").";
2960 errorText_ = errorStream_.str();
2964 // Get the current sample rate
2965 ASIOSampleRate currentRate;
2966 result = ASIOGetSampleRate( ¤tRate );
2967 if ( result != ASE_OK ) {
2968 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error getting sample rate.";
2969 errorText_ = errorStream_.str();
2973 // Set the sample rate only if necessary
2974 if ( currentRate != sampleRate ) {
2975 result = ASIOSetSampleRate( (ASIOSampleRate) sampleRate );
2976 if ( result != ASE_OK ) {
2977 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error setting sample rate (" << sampleRate << ").";
2978 errorText_ = errorStream_.str();
2983 // Determine the driver data type.
2984 ASIOChannelInfo channelInfo;
2985 channelInfo.channel = 0;
2986 if ( mode == OUTPUT ) channelInfo.isInput = false;
2987 else channelInfo.isInput = true;
2988 result = ASIOGetChannelInfo( &channelInfo );
2989 if ( result != ASE_OK ) {
2990 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting data format.";
2991 errorText_ = errorStream_.str();
2995 // Assuming WINDOWS host is always little-endian.
2996 stream_.doByteSwap[mode] = false;
2997 stream_.userFormat = format;
2998 stream_.deviceFormat[mode] = 0;
2999 if ( channelInfo.type == ASIOSTInt16MSB || channelInfo.type == ASIOSTInt16LSB ) {
3000 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
3001 if ( channelInfo.type == ASIOSTInt16MSB ) stream_.doByteSwap[mode] = true;
3003 else if ( channelInfo.type == ASIOSTInt32MSB || channelInfo.type == ASIOSTInt32LSB ) {
3004 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
3005 if ( channelInfo.type == ASIOSTInt32MSB ) stream_.doByteSwap[mode] = true;
3007 else if ( channelInfo.type == ASIOSTFloat32MSB || channelInfo.type == ASIOSTFloat32LSB ) {
3008 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
3009 if ( channelInfo.type == ASIOSTFloat32MSB ) stream_.doByteSwap[mode] = true;
3011 else if ( channelInfo.type == ASIOSTFloat64MSB || channelInfo.type == ASIOSTFloat64LSB ) {
3012 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
3013 if ( channelInfo.type == ASIOSTFloat64MSB ) stream_.doByteSwap[mode] = true;
3015 else if ( channelInfo.type == ASIOSTInt24MSB || channelInfo.type == ASIOSTInt24LSB ) {
3016 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
3017 if ( channelInfo.type == ASIOSTInt24MSB ) stream_.doByteSwap[mode] = true;
3020 if ( stream_.deviceFormat[mode] == 0 ) {
3021 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") data format not supported by RtAudio.";
3022 errorText_ = errorStream_.str();
3026 // Set the buffer size. For a duplex stream, this will end up
3027 // setting the buffer size based on the input constraints, which
3029 long minSize, maxSize, preferSize, granularity;
3030 result = ASIOGetBufferSize( &minSize, &maxSize, &preferSize, &granularity );
3031 if ( result != ASE_OK ) {
3032 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting buffer size.";
3033 errorText_ = errorStream_.str();
3037 if ( isDuplexInput ) {
3038 // When this is the duplex input (output was opened before), then we have to use the same
3039 // buffersize as the output, because it might use the preferred buffer size, which most
3040 // likely wasn't passed as input to this. The buffer sizes have to be identically anyway,
3041 // So instead of throwing an error, make them equal. The caller uses the reference
3042 // to the "bufferSize" param as usual to set up processing buffers.
3044 *bufferSize = stream_.bufferSize;
3047 if ( *bufferSize == 0 ) *bufferSize = preferSize;
3048 else if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3049 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3050 else if ( granularity == -1 ) {
3051 // Make sure bufferSize is a power of two.
3052 int log2_of_min_size = 0;
3053 int log2_of_max_size = 0;
3055 for ( unsigned int i = 0; i < sizeof(long) * 8; i++ ) {
3056 if ( minSize & ((long)1 << i) ) log2_of_min_size = i;
3057 if ( maxSize & ((long)1 << i) ) log2_of_max_size = i;
3060 long min_delta = std::abs( (long)*bufferSize - ((long)1 << log2_of_min_size) );
3061 int min_delta_num = log2_of_min_size;
3063 for (int i = log2_of_min_size + 1; i <= log2_of_max_size; i++) {
3064 long current_delta = std::abs( (long)*bufferSize - ((long)1 << i) );
3065 if (current_delta < min_delta) {
3066 min_delta = current_delta;
3071 *bufferSize = ( (unsigned int)1 << min_delta_num );
3072 if ( *bufferSize < (unsigned int) minSize ) *bufferSize = (unsigned int) minSize;
3073 else if ( *bufferSize > (unsigned int) maxSize ) *bufferSize = (unsigned int) maxSize;
3075 else if ( granularity != 0 ) {
3076 // Set to an even multiple of granularity, rounding up.
3077 *bufferSize = (*bufferSize + granularity-1) / granularity * granularity;
3082 // we don't use it anymore, see above!
3083 // Just left it here for the case...
3084 if ( isDuplexInput && stream_.bufferSize != *bufferSize ) {
3085 errorText_ = "RtApiAsio::probeDeviceOpen: input/output buffersize discrepancy!";
3090 stream_.bufferSize = *bufferSize;
3091 stream_.nBuffers = 2;
3093 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
3094 else stream_.userInterleaved = true;
3096 // ASIO always uses non-interleaved buffers.
3097 stream_.deviceInterleaved[mode] = false;
3099 // Allocate, if necessary, our AsioHandle structure for the stream.
3100 if ( handle == 0 ) {
3102 handle = new AsioHandle;
3104 catch ( std::bad_alloc& ) {
3105 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating AsioHandle memory.";
3108 handle->bufferInfos = 0;
3110 // Create a manual-reset event.
3111 handle->condition = CreateEvent( NULL, // no security
3112 TRUE, // manual-reset
3113 FALSE, // non-signaled initially
3115 stream_.apiHandle = (void *) handle;
3118 // Create the ASIO internal buffers. Since RtAudio sets up input
3119 // and output separately, we'll have to dispose of previously
3120 // created output buffers for a duplex stream.
3121 if ( mode == INPUT && stream_.mode == OUTPUT ) {
3122 ASIODisposeBuffers();
3123 if ( handle->bufferInfos ) free( handle->bufferInfos );
3126 // Allocate, initialize, and save the bufferInfos in our stream callbackInfo structure.
3128 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3129 handle->bufferInfos = (ASIOBufferInfo *) malloc( nChannels * sizeof(ASIOBufferInfo) );
3130 if ( handle->bufferInfos == NULL ) {
3131 errorStream_ << "RtApiAsio::probeDeviceOpen: error allocating bufferInfo memory for driver (" << driverName << ").";
3132 errorText_ = errorStream_.str();
3136 ASIOBufferInfo *infos;
3137 infos = handle->bufferInfos;
3138 for ( i=0; i<stream_.nDeviceChannels[0]; i++, infos++ ) {
3139 infos->isInput = ASIOFalse;
3140 infos->channelNum = i + stream_.channelOffset[0];
3141 infos->buffers[0] = infos->buffers[1] = 0;
3143 for ( i=0; i<stream_.nDeviceChannels[1]; i++, infos++ ) {
3144 infos->isInput = ASIOTrue;
3145 infos->channelNum = i + stream_.channelOffset[1];
3146 infos->buffers[0] = infos->buffers[1] = 0;
3149 // prepare for callbacks
3150 stream_.sampleRate = sampleRate;
3151 stream_.device[mode] = device;
3152 stream_.mode = isDuplexInput ? DUPLEX : mode;
3154 // store this class instance before registering callbacks, that are going to use it
3155 asioCallbackInfo = &stream_.callbackInfo;
3156 stream_.callbackInfo.object = (void *) this;
3158 // Set up the ASIO callback structure and create the ASIO data buffers.
3159 asioCallbacks.bufferSwitch = &bufferSwitch;
3160 asioCallbacks.sampleRateDidChange = &sampleRateChanged;
3161 asioCallbacks.asioMessage = &asioMessages;
3162 asioCallbacks.bufferSwitchTimeInfo = NULL;
3163 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3164 if ( result != ASE_OK ) {
3165 // Standard method failed. This can happen with strict/misbehaving drivers that return valid buffer size ranges
3166 // but only accept the preferred buffer size as parameter for ASIOCreateBuffers. eg. Creatives ASIO driver
3167 // in that case, let's be naïve and try that instead
3168 *bufferSize = preferSize;
3169 stream_.bufferSize = *bufferSize;
3170 result = ASIOCreateBuffers( handle->bufferInfos, nChannels, stream_.bufferSize, &asioCallbacks );
3173 if ( result != ASE_OK ) {
3174 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") creating buffers.";
3175 errorText_ = errorStream_.str();
3178 buffersAllocated = true;
3179 stream_.state = STREAM_STOPPED;
3181 // Set flags for buffer conversion.
3182 stream_.doConvertBuffer[mode] = false;
3183 if ( stream_.userFormat != stream_.deviceFormat[mode] )
3184 stream_.doConvertBuffer[mode] = true;
3185 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
3186 stream_.nUserChannels[mode] > 1 )
3187 stream_.doConvertBuffer[mode] = true;
3189 // Allocate necessary internal buffers
3190 unsigned long bufferBytes;
3191 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
3192 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
3193 if ( stream_.userBuffer[mode] == NULL ) {
3194 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating user buffer memory.";
3198 if ( stream_.doConvertBuffer[mode] ) {
3200 bool makeBuffer = true;
3201 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
3202 if ( isDuplexInput && stream_.deviceBuffer ) {
3203 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
3204 if ( bufferBytes <= bytesOut ) makeBuffer = false;
3208 bufferBytes *= *bufferSize;
3209 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
3210 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
3211 if ( stream_.deviceBuffer == NULL ) {
3212 errorText_ = "RtApiAsio::probeDeviceOpen: error allocating device buffer memory.";
3218 // Determine device latencies
3219 long inputLatency, outputLatency;
3220 result = ASIOGetLatencies( &inputLatency, &outputLatency );
3221 if ( result != ASE_OK ) {
3222 errorStream_ << "RtApiAsio::probeDeviceOpen: driver (" << driverName << ") error (" << getAsioErrorString( result ) << ") getting latency.";
3223 errorText_ = errorStream_.str();
3224 error( RtAudioError::WARNING); // warn but don't fail
3227 stream_.latency[0] = outputLatency;
3228 stream_.latency[1] = inputLatency;
3231 // Setup the buffer conversion information structure. We don't use
3232 // buffers to do channel offsets, so we override that parameter
3234 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, 0 );
3239 if ( !isDuplexInput ) {
3240 // the cleanup for error in the duplex input, is done by RtApi::openStream
3241 // So we clean up for single channel only
3243 if ( buffersAllocated )
3244 ASIODisposeBuffers();
3246 drivers.removeCurrentDriver();
3249 CloseHandle( handle->condition );
3250 if ( handle->bufferInfos )
3251 free( handle->bufferInfos );
3254 stream_.apiHandle = 0;
3258 if ( stream_.userBuffer[mode] ) {
3259 free( stream_.userBuffer[mode] );
3260 stream_.userBuffer[mode] = 0;
3263 if ( stream_.deviceBuffer ) {
3264 free( stream_.deviceBuffer );
3265 stream_.deviceBuffer = 0;
3270 }////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
3272 void RtApiAsio :: closeStream()
3274 if ( stream_.state == STREAM_CLOSED ) {
3275 errorText_ = "RtApiAsio::closeStream(): no open stream to close!";
3276 error( RtAudioError::WARNING );
3280 if ( stream_.state == STREAM_RUNNING ) {
3281 stream_.state = STREAM_STOPPED;
3284 ASIODisposeBuffers();
3285 drivers.removeCurrentDriver();
3287 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3289 CloseHandle( handle->condition );
3290 if ( handle->bufferInfos )
3291 free( handle->bufferInfos );
3293 stream_.apiHandle = 0;
3296 for ( int i=0; i<2; i++ ) {
3297 if ( stream_.userBuffer[i] ) {
3298 free( stream_.userBuffer[i] );
3299 stream_.userBuffer[i] = 0;
3303 if ( stream_.deviceBuffer ) {
3304 free( stream_.deviceBuffer );
3305 stream_.deviceBuffer = 0;
3308 stream_.mode = UNINITIALIZED;
3309 stream_.state = STREAM_CLOSED;
3312 bool stopThreadCalled = false;
3314 void RtApiAsio :: startStream()
3317 if ( stream_.state == STREAM_RUNNING ) {
3318 errorText_ = "RtApiAsio::startStream(): the stream is already running!";
3319 error( RtAudioError::WARNING );
3323 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3324 ASIOError result = ASIOStart();
3325 if ( result != ASE_OK ) {
3326 errorStream_ << "RtApiAsio::startStream: error (" << getAsioErrorString( result ) << ") starting device.";
3327 errorText_ = errorStream_.str();
3331 handle->drainCounter = 0;
3332 handle->internalDrain = false;
3333 ResetEvent( handle->condition );
3334 stream_.state = STREAM_RUNNING;
3338 stopThreadCalled = false;
3340 if ( result == ASE_OK ) return;
3341 error( RtAudioError::SYSTEM_ERROR );
3344 void RtApiAsio :: stopStream()
3347 if ( stream_.state == STREAM_STOPPED ) {
3348 errorText_ = "RtApiAsio::stopStream(): the stream is already stopped!";
3349 error( RtAudioError::WARNING );
3353 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3354 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3355 if ( handle->drainCounter == 0 ) {
3356 handle->drainCounter = 2;
3357 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
3361 stream_.state = STREAM_STOPPED;
3363 ASIOError result = ASIOStop();
3364 if ( result != ASE_OK ) {
3365 errorStream_ << "RtApiAsio::stopStream: error (" << getAsioErrorString( result ) << ") stopping device.";
3366 errorText_ = errorStream_.str();
3369 if ( result == ASE_OK ) return;
3370 error( RtAudioError::SYSTEM_ERROR );
3373 void RtApiAsio :: abortStream()
3376 if ( stream_.state == STREAM_STOPPED ) {
3377 errorText_ = "RtApiAsio::abortStream(): the stream is already stopped!";
3378 error( RtAudioError::WARNING );
3382 // The following lines were commented-out because some behavior was
3383 // noted where the device buffers need to be zeroed to avoid
3384 // continuing sound, even when the device buffers are completely
3385 // disposed. So now, calling abort is the same as calling stop.
3386 // AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3387 // handle->drainCounter = 2;
3391 // This function will be called by a spawned thread when the user
3392 // callback function signals that the stream should be stopped or
3393 // aborted. It is necessary to handle it this way because the
3394 // callbackEvent() function must return before the ASIOStop()
3395 // function will return.
3396 static unsigned __stdcall asioStopStream( void *ptr )
3398 CallbackInfo *info = (CallbackInfo *) ptr;
3399 RtApiAsio *object = (RtApiAsio *) info->object;
3401 object->stopStream();
3406 bool RtApiAsio :: callbackEvent( long bufferIndex )
3408 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) return SUCCESS;
3409 if ( stream_.state == STREAM_CLOSED ) {
3410 errorText_ = "RtApiAsio::callbackEvent(): the stream is closed ... this shouldn't happen!";
3411 error( RtAudioError::WARNING );
3415 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
3416 AsioHandle *handle = (AsioHandle *) stream_.apiHandle;
3418 // Check if we were draining the stream and signal if finished.
3419 if ( handle->drainCounter > 3 ) {
3421 stream_.state = STREAM_STOPPING;
3422 if ( handle->internalDrain == false )
3423 SetEvent( handle->condition );
3424 else { // spawn a thread to stop the stream
3426 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3427 &stream_.callbackInfo, 0, &threadId );
3432 // Invoke user callback to get fresh output data UNLESS we are
3434 if ( handle->drainCounter == 0 ) {
3435 RtAudioCallback callback = (RtAudioCallback) info->callback;
3436 double streamTime = getStreamTime();
3437 RtAudioStreamStatus status = 0;
3438 if ( stream_.mode != INPUT && asioXRun == true ) {
3439 status |= RTAUDIO_OUTPUT_UNDERFLOW;
3442 if ( stream_.mode != OUTPUT && asioXRun == true ) {
3443 status |= RTAUDIO_INPUT_OVERFLOW;
3446 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
3447 stream_.bufferSize, streamTime, status, info->userData );
3448 if ( cbReturnValue == 2 ) {
3449 stream_.state = STREAM_STOPPING;
3450 handle->drainCounter = 2;
3452 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &asioStopStream,
3453 &stream_.callbackInfo, 0, &threadId );
3456 else if ( cbReturnValue == 1 ) {
3457 handle->drainCounter = 1;
3458 handle->internalDrain = true;
3462 unsigned int nChannels, bufferBytes, i, j;
3463 nChannels = stream_.nDeviceChannels[0] + stream_.nDeviceChannels[1];
3464 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
3466 bufferBytes = stream_.bufferSize * formatBytes( stream_.deviceFormat[0] );
3468 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
3470 for ( i=0, j=0; i<nChannels; i++ ) {
3471 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3472 memset( handle->bufferInfos[i].buffers[bufferIndex], 0, bufferBytes );
3476 else if ( stream_.doConvertBuffer[0] ) {
3478 convertBuffer( stream_.deviceBuffer, stream_.userBuffer[0], stream_.convertInfo[0] );
3479 if ( stream_.doByteSwap[0] )
3480 byteSwapBuffer( stream_.deviceBuffer,
3481 stream_.bufferSize * stream_.nDeviceChannels[0],
3482 stream_.deviceFormat[0] );
3484 for ( i=0, j=0; i<nChannels; i++ ) {
3485 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3486 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3487 &stream_.deviceBuffer[j++*bufferBytes], bufferBytes );
3493 if ( stream_.doByteSwap[0] )
3494 byteSwapBuffer( stream_.userBuffer[0],
3495 stream_.bufferSize * stream_.nUserChannels[0],
3496 stream_.userFormat );
3498 for ( i=0, j=0; i<nChannels; i++ ) {
3499 if ( handle->bufferInfos[i].isInput != ASIOTrue )
3500 memcpy( handle->bufferInfos[i].buffers[bufferIndex],
3501 &stream_.userBuffer[0][bufferBytes*j++], bufferBytes );
3507 // Don't bother draining input
3508 if ( handle->drainCounter ) {
3509 handle->drainCounter++;
3513 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
3515 bufferBytes = stream_.bufferSize * formatBytes(stream_.deviceFormat[1]);
3517 if (stream_.doConvertBuffer[1]) {
3519 // Always interleave ASIO input data.
3520 for ( i=0, j=0; i<nChannels; i++ ) {
3521 if ( handle->bufferInfos[i].isInput == ASIOTrue )
3522 memcpy( &stream_.deviceBuffer[j++*bufferBytes],
3523 handle->bufferInfos[i].buffers[bufferIndex],
3527 if ( stream_.doByteSwap[1] )
3528 byteSwapBuffer( stream_.deviceBuffer,
3529 stream_.bufferSize * stream_.nDeviceChannels[1],
3530 stream_.deviceFormat[1] );
3531 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
3535 for ( i=0, j=0; i<nChannels; i++ ) {
3536 if ( handle->bufferInfos[i].isInput == ASIOTrue ) {
3537 memcpy( &stream_.userBuffer[1][bufferBytes*j++],
3538 handle->bufferInfos[i].buffers[bufferIndex],
3543 if ( stream_.doByteSwap[1] )
3544 byteSwapBuffer( stream_.userBuffer[1],
3545 stream_.bufferSize * stream_.nUserChannels[1],
3546 stream_.userFormat );
3551 // The following call was suggested by Malte Clasen. While the API
3552 // documentation indicates it should not be required, some device
3553 // drivers apparently do not function correctly without it.
3556 RtApi::tickStreamTime();
3560 static void sampleRateChanged( ASIOSampleRate sRate )
3562 // The ASIO documentation says that this usually only happens during
3563 // external sync. Audio processing is not stopped by the driver,
3564 // actual sample rate might not have even changed, maybe only the
3565 // sample rate status of an AES/EBU or S/PDIF digital input at the
3568 RtApi *object = (RtApi *) asioCallbackInfo->object;
3570 object->stopStream();
3572 catch ( RtAudioError &exception ) {
3573 std::cerr << "\nRtApiAsio: sampleRateChanged() error (" << exception.getMessage() << ")!\n" << std::endl;
3577 std::cerr << "\nRtApiAsio: driver reports sample rate changed to " << sRate << " ... stream stopped!!!\n" << std::endl;
3580 static long asioMessages( long selector, long value, void* /*message*/, double* /*opt*/ )
3584 switch( selector ) {
3585 case kAsioSelectorSupported:
3586 if ( value == kAsioResetRequest
3587 || value == kAsioEngineVersion
3588 || value == kAsioResyncRequest
3589 || value == kAsioLatenciesChanged
3590 // The following three were added for ASIO 2.0, you don't
3591 // necessarily have to support them.
3592 || value == kAsioSupportsTimeInfo
3593 || value == kAsioSupportsTimeCode
3594 || value == kAsioSupportsInputMonitor)
3597 case kAsioResetRequest:
3598 // Defer the task and perform the reset of the driver during the
3599 // next "safe" situation. You cannot reset the driver right now,
3600 // as this code is called from the driver. Reset the driver is
3601 // done by completely destruct is. I.e. ASIOStop(),
3602 // ASIODisposeBuffers(), Destruction Afterwards you initialize the
3604 std::cerr << "\nRtApiAsio: driver reset requested!!!" << std::endl;
3607 case kAsioResyncRequest:
3608 // This informs the application that the driver encountered some
3609 // non-fatal data loss. It is used for synchronization purposes
3610 // of different media. Added mainly to work around the Win16Mutex
3611 // problems in Windows 95/98 with the Windows Multimedia system,
3612 // which could lose data because the Mutex was held too long by
3613 // another thread. However a driver can issue it in other
3615 // std::cerr << "\nRtApiAsio: driver resync requested!!!" << std::endl;
3619 case kAsioLatenciesChanged:
3620 // This will inform the host application that the drivers were
3621 // latencies changed. Beware, it this does not mean that the
3622 // buffer sizes have changed! You might need to update internal
3624 std::cerr << "\nRtApiAsio: driver latency may have changed!!!" << std::endl;
3627 case kAsioEngineVersion:
3628 // Return the supported ASIO version of the host application. If
3629 // a host application does not implement this selector, ASIO 1.0
3630 // is assumed by the driver.
3633 case kAsioSupportsTimeInfo:
3634 // Informs the driver whether the
3635 // asioCallbacks.bufferSwitchTimeInfo() callback is supported.
3636 // For compatibility with ASIO 1.0 drivers the host application
3637 // should always support the "old" bufferSwitch method, too.
3640 case kAsioSupportsTimeCode:
3641 // Informs the driver whether application is interested in time
3642 // code info. If an application does not need to know about time
3643 // code, the driver has less work to do.
3650 static const char* getAsioErrorString( ASIOError result )
3658 static const Messages m[] =
3660 { ASE_NotPresent, "Hardware input or output is not present or available." },
3661 { ASE_HWMalfunction, "Hardware is malfunctioning." },
3662 { ASE_InvalidParameter, "Invalid input parameter." },
3663 { ASE_InvalidMode, "Invalid mode." },
3664 { ASE_SPNotAdvancing, "Sample position not advancing." },
3665 { ASE_NoClock, "Sample clock or rate cannot be determined or is not present." },
3666 { ASE_NoMemory, "Not enough memory to complete the request." }
3669 for ( unsigned int i = 0; i < sizeof(m)/sizeof(m[0]); ++i )
3670 if ( m[i].value == result ) return m[i].message;
3672 return "Unknown error.";
3675 //******************** End of __WINDOWS_ASIO__ *********************//
3679 #if defined(__WINDOWS_WASAPI__) // Windows WASAPI API
3681 // Authored by Marcus Tomlinson <themarcustomlinson@gmail.com>, April 2014
3682 // - Introduces support for the Windows WASAPI API
3683 // - Aims to deliver bit streams to and from hardware at the lowest possible latency, via the absolute minimum buffer sizes required
3684 // - Provides flexible stream configuration to an otherwise strict and inflexible WASAPI interface
3685 // - Includes automatic internal conversion of sample rate and buffer size between hardware and the user
3690 #include <audioclient.h>
3692 #include <mmdeviceapi.h>
3693 #include <functiondiscoverykeys_devpkey.h>
3695 //=============================================================================
3697 #define SAFE_RELEASE( objectPtr )\
3700 objectPtr->Release();\
3704 typedef HANDLE ( __stdcall *TAvSetMmThreadCharacteristicsPtr )( LPCWSTR TaskName, LPDWORD TaskIndex );
3706 //-----------------------------------------------------------------------------
3708 // WASAPI dictates stream sample rate, format, channel count, and in some cases, buffer size.
3709 // Therefore we must perform all necessary conversions to user buffers in order to satisfy these
3710 // requirements. WasapiBuffer ring buffers are used between HwIn->UserIn and UserOut->HwOut to
3711 // provide intermediate storage for read / write synchronization.
3725 // sets the length of the internal ring buffer
3726 void setBufferSize( unsigned int bufferSize, unsigned int formatBytes ) {
3729 buffer_ = ( char* ) calloc( bufferSize, formatBytes );
3731 bufferSize_ = bufferSize;
3736 // attempt to push a buffer into the ring buffer at the current "in" index
3737 bool pushBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3739 if ( !buffer || // incoming buffer is NULL
3740 bufferSize == 0 || // incoming buffer has no data
3741 bufferSize > bufferSize_ ) // incoming buffer too large
3746 unsigned int relOutIndex = outIndex_;
3747 unsigned int inIndexEnd = inIndex_ + bufferSize;
3748 if ( relOutIndex < inIndex_ && inIndexEnd >= bufferSize_ ) {
3749 relOutIndex += bufferSize_;
3752 // "in" index can end on the "out" index but cannot begin at it
3753 if ( inIndex_ <= relOutIndex && inIndexEnd > relOutIndex ) {
3754 return false; // not enough space between "in" index and "out" index
3757 // copy buffer from external to internal
3758 int fromZeroSize = inIndex_ + bufferSize - bufferSize_;
3759 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3760 int fromInSize = bufferSize - fromZeroSize;
3765 memcpy( &( ( char* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( char ) );
3766 memcpy( buffer_, &( ( char* ) buffer )[fromInSize], fromZeroSize * sizeof( char ) );
3768 case RTAUDIO_SINT16:
3769 memcpy( &( ( short* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( short ) );
3770 memcpy( buffer_, &( ( short* ) buffer )[fromInSize], fromZeroSize * sizeof( short ) );
3772 case RTAUDIO_SINT24:
3773 memcpy( &( ( S24* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( S24 ) );
3774 memcpy( buffer_, &( ( S24* ) buffer )[fromInSize], fromZeroSize * sizeof( S24 ) );
3776 case RTAUDIO_SINT32:
3777 memcpy( &( ( int* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( int ) );
3778 memcpy( buffer_, &( ( int* ) buffer )[fromInSize], fromZeroSize * sizeof( int ) );
3780 case RTAUDIO_FLOAT32:
3781 memcpy( &( ( float* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( float ) );
3782 memcpy( buffer_, &( ( float* ) buffer )[fromInSize], fromZeroSize * sizeof( float ) );
3784 case RTAUDIO_FLOAT64:
3785 memcpy( &( ( double* ) buffer_ )[inIndex_], buffer, fromInSize * sizeof( double ) );
3786 memcpy( buffer_, &( ( double* ) buffer )[fromInSize], fromZeroSize * sizeof( double ) );
3790 // update "in" index
3791 inIndex_ += bufferSize;
3792 inIndex_ %= bufferSize_;
3797 // attempt to pull a buffer from the ring buffer from the current "out" index
3798 bool pullBuffer( char* buffer, unsigned int bufferSize, RtAudioFormat format )
3800 if ( !buffer || // incoming buffer is NULL
3801 bufferSize == 0 || // incoming buffer has no data
3802 bufferSize > bufferSize_ ) // incoming buffer too large
3807 unsigned int relInIndex = inIndex_;
3808 unsigned int outIndexEnd = outIndex_ + bufferSize;
3809 if ( relInIndex < outIndex_ && outIndexEnd >= bufferSize_ ) {
3810 relInIndex += bufferSize_;
3813 // "out" index can begin at and end on the "in" index
3814 if ( outIndex_ < relInIndex && outIndexEnd > relInIndex ) {
3815 return false; // not enough space between "out" index and "in" index
3818 // copy buffer from internal to external
3819 int fromZeroSize = outIndex_ + bufferSize - bufferSize_;
3820 fromZeroSize = fromZeroSize < 0 ? 0 : fromZeroSize;
3821 int fromOutSize = bufferSize - fromZeroSize;
3826 memcpy( buffer, &( ( char* ) buffer_ )[outIndex_], fromOutSize * sizeof( char ) );
3827 memcpy( &( ( char* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( char ) );
3829 case RTAUDIO_SINT16:
3830 memcpy( buffer, &( ( short* ) buffer_ )[outIndex_], fromOutSize * sizeof( short ) );
3831 memcpy( &( ( short* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( short ) );
3833 case RTAUDIO_SINT24:
3834 memcpy( buffer, &( ( S24* ) buffer_ )[outIndex_], fromOutSize * sizeof( S24 ) );
3835 memcpy( &( ( S24* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( S24 ) );
3837 case RTAUDIO_SINT32:
3838 memcpy( buffer, &( ( int* ) buffer_ )[outIndex_], fromOutSize * sizeof( int ) );
3839 memcpy( &( ( int* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( int ) );
3841 case RTAUDIO_FLOAT32:
3842 memcpy( buffer, &( ( float* ) buffer_ )[outIndex_], fromOutSize * sizeof( float ) );
3843 memcpy( &( ( float* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( float ) );
3845 case RTAUDIO_FLOAT64:
3846 memcpy( buffer, &( ( double* ) buffer_ )[outIndex_], fromOutSize * sizeof( double ) );
3847 memcpy( &( ( double* ) buffer )[fromOutSize], buffer_, fromZeroSize * sizeof( double ) );
3851 // update "out" index
3852 outIndex_ += bufferSize;
3853 outIndex_ %= bufferSize_;
3860 unsigned int bufferSize_;
3861 unsigned int inIndex_;
3862 unsigned int outIndex_;
3865 //-----------------------------------------------------------------------------
3867 // A structure to hold various information related to the WASAPI implementation.
3870 IAudioClient* captureAudioClient;
3871 IAudioClient* renderAudioClient;
3872 IAudioCaptureClient* captureClient;
3873 IAudioRenderClient* renderClient;
3874 HANDLE captureEvent;
3878 : captureAudioClient( NULL ),
3879 renderAudioClient( NULL ),
3880 captureClient( NULL ),
3881 renderClient( NULL ),
3882 captureEvent( NULL ),
3883 renderEvent( NULL ) {}
3886 //=============================================================================
3888 RtApiWasapi::RtApiWasapi()
3889 : coInitialized_( false ), deviceEnumerator_( NULL )
3891 // WASAPI can run either apartment or multi-threaded
3892 HRESULT hr = CoInitialize( NULL );
3893 if ( !FAILED( hr ) )
3894 coInitialized_ = true;
3896 // Instantiate device enumerator
3897 hr = CoCreateInstance( __uuidof( MMDeviceEnumerator ), NULL,
3898 CLSCTX_ALL, __uuidof( IMMDeviceEnumerator ),
3899 ( void** ) &deviceEnumerator_ );
3901 if ( FAILED( hr ) ) {
3902 errorText_ = "RtApiWasapi::RtApiWasapi: Unable to instantiate device enumerator";
3903 error( RtAudioError::DRIVER_ERROR );
3907 //-----------------------------------------------------------------------------
3909 RtApiWasapi::~RtApiWasapi()
3911 if ( stream_.state != STREAM_CLOSED )
3914 SAFE_RELEASE( deviceEnumerator_ );
3916 // If this object previously called CoInitialize()
3917 if ( coInitialized_ )
3921 //=============================================================================
3923 unsigned int RtApiWasapi::getDeviceCount( void )
3925 unsigned int captureDeviceCount = 0;
3926 unsigned int renderDeviceCount = 0;
3928 IMMDeviceCollection* captureDevices = NULL;
3929 IMMDeviceCollection* renderDevices = NULL;
3931 // Count capture devices
3933 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
3934 if ( FAILED( hr ) ) {
3935 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device collection.";
3939 hr = captureDevices->GetCount( &captureDeviceCount );
3940 if ( FAILED( hr ) ) {
3941 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve capture device count.";
3945 // Count render devices
3946 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
3947 if ( FAILED( hr ) ) {
3948 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device collection.";
3952 hr = renderDevices->GetCount( &renderDeviceCount );
3953 if ( FAILED( hr ) ) {
3954 errorText_ = "RtApiWasapi::getDeviceCount: Unable to retrieve render device count.";
3959 // release all references
3960 SAFE_RELEASE( captureDevices );
3961 SAFE_RELEASE( renderDevices );
3963 if ( errorText_.empty() )
3964 return captureDeviceCount + renderDeviceCount;
3966 error( RtAudioError::DRIVER_ERROR );
3970 //-----------------------------------------------------------------------------
3972 RtAudio::DeviceInfo RtApiWasapi::getDeviceInfo( unsigned int device )
3974 RtAudio::DeviceInfo info;
3975 unsigned int captureDeviceCount = 0;
3976 unsigned int renderDeviceCount = 0;
3977 std::string defaultDeviceName;
3978 bool isCaptureDevice = false;
3980 PROPVARIANT deviceNameProp;
3981 PROPVARIANT defaultDeviceNameProp;
3983 IMMDeviceCollection* captureDevices = NULL;
3984 IMMDeviceCollection* renderDevices = NULL;
3985 IMMDevice* devicePtr = NULL;
3986 IMMDevice* defaultDevicePtr = NULL;
3987 IAudioClient* audioClient = NULL;
3988 IPropertyStore* devicePropStore = NULL;
3989 IPropertyStore* defaultDevicePropStore = NULL;
3991 WAVEFORMATEX* deviceFormat = NULL;
3992 WAVEFORMATEX* closestMatchFormat = NULL;
3995 info.probed = false;
3997 // Count capture devices
3999 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4000 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4001 if ( FAILED( hr ) ) {
4002 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device collection.";
4006 hr = captureDevices->GetCount( &captureDeviceCount );
4007 if ( FAILED( hr ) ) {
4008 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device count.";
4012 // Count render devices
4013 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4014 if ( FAILED( hr ) ) {
4015 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device collection.";
4019 hr = renderDevices->GetCount( &renderDeviceCount );
4020 if ( FAILED( hr ) ) {
4021 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device count.";
4025 // validate device index
4026 if ( device >= captureDeviceCount + renderDeviceCount ) {
4027 errorText_ = "RtApiWasapi::getDeviceInfo: Invalid device index.";
4028 errorType = RtAudioError::INVALID_USE;
4032 // determine whether index falls within capture or render devices
4033 if ( device >= renderDeviceCount ) {
4034 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4035 if ( FAILED( hr ) ) {
4036 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve capture device handle.";
4039 isCaptureDevice = true;
4042 hr = renderDevices->Item( device, &devicePtr );
4043 if ( FAILED( hr ) ) {
4044 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve render device handle.";
4047 isCaptureDevice = false;
4050 // get default device name
4051 if ( isCaptureDevice ) {
4052 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eCapture, eConsole, &defaultDevicePtr );
4053 if ( FAILED( hr ) ) {
4054 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default capture device handle.";
4059 hr = deviceEnumerator_->GetDefaultAudioEndpoint( eRender, eConsole, &defaultDevicePtr );
4060 if ( FAILED( hr ) ) {
4061 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default render device handle.";
4066 hr = defaultDevicePtr->OpenPropertyStore( STGM_READ, &defaultDevicePropStore );
4067 if ( FAILED( hr ) ) {
4068 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open default device property store.";
4071 PropVariantInit( &defaultDeviceNameProp );
4073 hr = defaultDevicePropStore->GetValue( PKEY_Device_FriendlyName, &defaultDeviceNameProp );
4074 if ( FAILED( hr ) ) {
4075 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve default device property: PKEY_Device_FriendlyName.";
4079 defaultDeviceName = convertCharPointerToStdString(defaultDeviceNameProp.pwszVal);
4082 hr = devicePtr->OpenPropertyStore( STGM_READ, &devicePropStore );
4083 if ( FAILED( hr ) ) {
4084 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to open device property store.";
4088 PropVariantInit( &deviceNameProp );
4090 hr = devicePropStore->GetValue( PKEY_Device_FriendlyName, &deviceNameProp );
4091 if ( FAILED( hr ) ) {
4092 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device property: PKEY_Device_FriendlyName.";
4096 info.name =convertCharPointerToStdString(deviceNameProp.pwszVal);
4099 if ( isCaptureDevice ) {
4100 info.isDefaultInput = info.name == defaultDeviceName;
4101 info.isDefaultOutput = false;
4104 info.isDefaultInput = false;
4105 info.isDefaultOutput = info.name == defaultDeviceName;
4109 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL, NULL, ( void** ) &audioClient );
4110 if ( FAILED( hr ) ) {
4111 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device audio client.";
4115 hr = audioClient->GetMixFormat( &deviceFormat );
4116 if ( FAILED( hr ) ) {
4117 errorText_ = "RtApiWasapi::getDeviceInfo: Unable to retrieve device mix format.";
4121 if ( isCaptureDevice ) {
4122 info.inputChannels = deviceFormat->nChannels;
4123 info.outputChannels = 0;
4124 info.duplexChannels = 0;
4127 info.inputChannels = 0;
4128 info.outputChannels = deviceFormat->nChannels;
4129 info.duplexChannels = 0;
4132 // sample rates (WASAPI only supports the one native sample rate)
4133 info.preferredSampleRate = deviceFormat->nSamplesPerSec;
4135 info.sampleRates.clear();
4136 info.sampleRates.push_back( deviceFormat->nSamplesPerSec );
4139 info.nativeFormats = 0;
4141 if ( deviceFormat->wFormatTag == WAVE_FORMAT_IEEE_FLOAT ||
4142 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4143 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT ) )
4145 if ( deviceFormat->wBitsPerSample == 32 ) {
4146 info.nativeFormats |= RTAUDIO_FLOAT32;
4148 else if ( deviceFormat->wBitsPerSample == 64 ) {
4149 info.nativeFormats |= RTAUDIO_FLOAT64;
4152 else if ( deviceFormat->wFormatTag == WAVE_FORMAT_PCM ||
4153 ( deviceFormat->wFormatTag == WAVE_FORMAT_EXTENSIBLE &&
4154 ( ( WAVEFORMATEXTENSIBLE* ) deviceFormat )->SubFormat == KSDATAFORMAT_SUBTYPE_PCM ) )
4156 if ( deviceFormat->wBitsPerSample == 8 ) {
4157 info.nativeFormats |= RTAUDIO_SINT8;
4159 else if ( deviceFormat->wBitsPerSample == 16 ) {
4160 info.nativeFormats |= RTAUDIO_SINT16;
4162 else if ( deviceFormat->wBitsPerSample == 24 ) {
4163 info.nativeFormats |= RTAUDIO_SINT24;
4165 else if ( deviceFormat->wBitsPerSample == 32 ) {
4166 info.nativeFormats |= RTAUDIO_SINT32;
4174 // release all references
4175 PropVariantClear( &deviceNameProp );
4176 PropVariantClear( &defaultDeviceNameProp );
4178 SAFE_RELEASE( captureDevices );
4179 SAFE_RELEASE( renderDevices );
4180 SAFE_RELEASE( devicePtr );
4181 SAFE_RELEASE( defaultDevicePtr );
4182 SAFE_RELEASE( audioClient );
4183 SAFE_RELEASE( devicePropStore );
4184 SAFE_RELEASE( defaultDevicePropStore );
4186 CoTaskMemFree( deviceFormat );
4187 CoTaskMemFree( closestMatchFormat );
4189 if ( !errorText_.empty() )
4194 //-----------------------------------------------------------------------------
4196 unsigned int RtApiWasapi::getDefaultOutputDevice( void )
4198 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4199 if ( getDeviceInfo( i ).isDefaultOutput ) {
4207 //-----------------------------------------------------------------------------
4209 unsigned int RtApiWasapi::getDefaultInputDevice( void )
4211 for ( unsigned int i = 0; i < getDeviceCount(); i++ ) {
4212 if ( getDeviceInfo( i ).isDefaultInput ) {
4220 //-----------------------------------------------------------------------------
4222 void RtApiWasapi::closeStream( void )
4224 if ( stream_.state == STREAM_CLOSED ) {
4225 errorText_ = "RtApiWasapi::closeStream: No open stream to close.";
4226 error( RtAudioError::WARNING );
4230 if ( stream_.state != STREAM_STOPPED )
4233 // clean up stream memory
4234 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient )
4235 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient )
4237 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->captureClient )
4238 SAFE_RELEASE( ( ( WasapiHandle* ) stream_.apiHandle )->renderClient )
4240 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent )
4241 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent );
4243 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent )
4244 CloseHandle( ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent );
4246 delete ( WasapiHandle* ) stream_.apiHandle;
4247 stream_.apiHandle = NULL;
4249 for ( int i = 0; i < 2; i++ ) {
4250 if ( stream_.userBuffer[i] ) {
4251 free( stream_.userBuffer[i] );
4252 stream_.userBuffer[i] = 0;
4256 if ( stream_.deviceBuffer ) {
4257 free( stream_.deviceBuffer );
4258 stream_.deviceBuffer = 0;
4261 // update stream state
4262 stream_.state = STREAM_CLOSED;
4265 //-----------------------------------------------------------------------------
4267 void RtApiWasapi::startStream( void )
4271 if ( stream_.state == STREAM_RUNNING ) {
4272 errorText_ = "RtApiWasapi::startStream: The stream is already running.";
4273 error( RtAudioError::WARNING );
4277 // update stream state
4278 stream_.state = STREAM_RUNNING;
4280 // create WASAPI stream thread
4281 stream_.callbackInfo.thread = ( ThreadHandle ) CreateThread( NULL, 0, runWasapiThread, this, CREATE_SUSPENDED, NULL );
4283 if ( !stream_.callbackInfo.thread ) {
4284 errorText_ = "RtApiWasapi::startStream: Unable to instantiate callback thread.";
4285 error( RtAudioError::THREAD_ERROR );
4288 SetThreadPriority( ( void* ) stream_.callbackInfo.thread, stream_.callbackInfo.priority );
4289 ResumeThread( ( void* ) stream_.callbackInfo.thread );
4293 //-----------------------------------------------------------------------------
4295 void RtApiWasapi::stopStream( void )
4299 if ( stream_.state == STREAM_STOPPED ) {
4300 errorText_ = "RtApiWasapi::stopStream: The stream is already stopped.";
4301 error( RtAudioError::WARNING );
4305 // inform stream thread by setting stream state to STREAM_STOPPING
4306 stream_.state = STREAM_STOPPING;
4308 // wait until stream thread is stopped
4309 while( stream_.state != STREAM_STOPPED ) {
4313 // Wait for the last buffer to play before stopping.
4314 Sleep( 1000 * stream_.bufferSize / stream_.sampleRate );
4316 // stop capture client if applicable
4317 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4318 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4319 if ( FAILED( hr ) ) {
4320 errorText_ = "RtApiWasapi::stopStream: Unable to stop capture stream.";
4321 error( RtAudioError::DRIVER_ERROR );
4326 // stop render client if applicable
4327 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4328 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4329 if ( FAILED( hr ) ) {
4330 errorText_ = "RtApiWasapi::stopStream: Unable to stop render stream.";
4331 error( RtAudioError::DRIVER_ERROR );
4336 // close thread handle
4337 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4338 errorText_ = "RtApiWasapi::stopStream: Unable to close callback thread.";
4339 error( RtAudioError::THREAD_ERROR );
4343 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4346 //-----------------------------------------------------------------------------
4348 void RtApiWasapi::abortStream( void )
4352 if ( stream_.state == STREAM_STOPPED ) {
4353 errorText_ = "RtApiWasapi::abortStream: The stream is already stopped.";
4354 error( RtAudioError::WARNING );
4358 // inform stream thread by setting stream state to STREAM_STOPPING
4359 stream_.state = STREAM_STOPPING;
4361 // wait until stream thread is stopped
4362 while ( stream_.state != STREAM_STOPPED ) {
4366 // stop capture client if applicable
4367 if ( ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient ) {
4368 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient->Stop();
4369 if ( FAILED( hr ) ) {
4370 errorText_ = "RtApiWasapi::abortStream: Unable to stop capture stream.";
4371 error( RtAudioError::DRIVER_ERROR );
4376 // stop render client if applicable
4377 if ( ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient ) {
4378 HRESULT hr = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient->Stop();
4379 if ( FAILED( hr ) ) {
4380 errorText_ = "RtApiWasapi::abortStream: Unable to stop render stream.";
4381 error( RtAudioError::DRIVER_ERROR );
4386 // close thread handle
4387 if ( stream_.callbackInfo.thread && !CloseHandle( ( void* ) stream_.callbackInfo.thread ) ) {
4388 errorText_ = "RtApiWasapi::abortStream: Unable to close callback thread.";
4389 error( RtAudioError::THREAD_ERROR );
4393 stream_.callbackInfo.thread = (ThreadHandle) NULL;
4396 //-----------------------------------------------------------------------------
4398 bool RtApiWasapi::probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
4399 unsigned int firstChannel, unsigned int sampleRate,
4400 RtAudioFormat format, unsigned int* bufferSize,
4401 RtAudio::StreamOptions* options )
4403 bool methodResult = FAILURE;
4404 unsigned int captureDeviceCount = 0;
4405 unsigned int renderDeviceCount = 0;
4407 IMMDeviceCollection* captureDevices = NULL;
4408 IMMDeviceCollection* renderDevices = NULL;
4409 IMMDevice* devicePtr = NULL;
4410 WAVEFORMATEX* deviceFormat = NULL;
4411 unsigned int bufferBytes;
4412 stream_.state = STREAM_STOPPED;
4413 RtAudio::DeviceInfo deviceInfo;
4415 // create API Handle if not already created
4416 if ( !stream_.apiHandle )
4417 stream_.apiHandle = ( void* ) new WasapiHandle();
4419 // Count capture devices
4421 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4422 HRESULT hr = deviceEnumerator_->EnumAudioEndpoints( eCapture, DEVICE_STATE_ACTIVE, &captureDevices );
4423 if ( FAILED( hr ) ) {
4424 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device collection.";
4428 hr = captureDevices->GetCount( &captureDeviceCount );
4429 if ( FAILED( hr ) ) {
4430 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device count.";
4434 // Count render devices
4435 hr = deviceEnumerator_->EnumAudioEndpoints( eRender, DEVICE_STATE_ACTIVE, &renderDevices );
4436 if ( FAILED( hr ) ) {
4437 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device collection.";
4441 hr = renderDevices->GetCount( &renderDeviceCount );
4442 if ( FAILED( hr ) ) {
4443 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device count.";
4447 // validate device index
4448 if ( device >= captureDeviceCount + renderDeviceCount ) {
4449 errorType = RtAudioError::INVALID_USE;
4450 errorText_ = "RtApiWasapi::probeDeviceOpen: Invalid device index.";
4454 deviceInfo = getDeviceInfo( device );
4456 // validate sample rate
4457 if ( sampleRate != deviceInfo.preferredSampleRate )
4459 errorType = RtAudioError::INVALID_USE;
4460 errorText_ = "RtApiWasapi::probeDeviceOpen: " + std::to_string( sampleRate ) + "Hz sample rate not supported. This device only supports " + std::to_string( deviceInfo.preferredSampleRate ) + "Hz.";
4464 // determine whether index falls within capture or render devices
4465 if ( device >= renderDeviceCount ) {
4466 if ( mode != INPUT ) {
4467 errorType = RtAudioError::INVALID_USE;
4468 errorText_ = "RtApiWasapi::probeDeviceOpen: Capture device selected as output device.";
4472 // retrieve captureAudioClient from devicePtr
4473 IAudioClient*& captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4475 hr = captureDevices->Item( device - renderDeviceCount, &devicePtr );
4476 if ( FAILED( hr ) ) {
4477 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve capture device handle.";
4481 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4482 NULL, ( void** ) &captureAudioClient );
4483 if ( FAILED( hr ) ) {
4484 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4488 hr = captureAudioClient->GetMixFormat( &deviceFormat );
4489 if ( FAILED( hr ) ) {
4490 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4494 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4495 captureAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4498 if ( mode != OUTPUT ) {
4499 errorType = RtAudioError::INVALID_USE;
4500 errorText_ = "RtApiWasapi::probeDeviceOpen: Render device selected as input device.";
4504 // retrieve renderAudioClient from devicePtr
4505 IAudioClient*& renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4507 hr = renderDevices->Item( device, &devicePtr );
4508 if ( FAILED( hr ) ) {
4509 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve render device handle.";
4513 hr = devicePtr->Activate( __uuidof( IAudioClient ), CLSCTX_ALL,
4514 NULL, ( void** ) &renderAudioClient );
4515 if ( FAILED( hr ) ) {
4516 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device audio client.";
4520 hr = renderAudioClient->GetMixFormat( &deviceFormat );
4521 if ( FAILED( hr ) ) {
4522 errorText_ = "RtApiWasapi::probeDeviceOpen: Unable to retrieve device mix format.";
4526 stream_.nDeviceChannels[mode] = deviceFormat->nChannels;
4527 renderAudioClient->GetStreamLatency( ( long long* ) &stream_.latency[mode] );
4531 if ( ( stream_.mode == OUTPUT && mode == INPUT ) ||
4532 ( stream_.mode == INPUT && mode == OUTPUT ) ) {
4533 stream_.mode = DUPLEX;
4536 stream_.mode = mode;
4539 stream_.device[mode] = device;
4540 stream_.doByteSwap[mode] = false;
4541 stream_.sampleRate = sampleRate;
4542 stream_.bufferSize = *bufferSize;
4543 stream_.nBuffers = 1;
4544 stream_.nUserChannels[mode] = channels;
4545 stream_.channelOffset[mode] = firstChannel;
4546 stream_.userFormat = format;
4547 stream_.deviceFormat[mode] = deviceInfo.nativeFormats;
4549 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
4550 stream_.userInterleaved = false;
4552 stream_.userInterleaved = true;
4553 stream_.deviceInterleaved[mode] = true;
4555 // Set flags for buffer conversion.
4556 stream_.doConvertBuffer[mode] = false;
4557 if ( stream_.userFormat != stream_.deviceFormat[mode] ||
4558 stream_.nUserChannels != stream_.nDeviceChannels )
4559 stream_.doConvertBuffer[mode] = true;
4560 else if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
4561 stream_.nUserChannels[mode] > 1 )
4562 stream_.doConvertBuffer[mode] = true;
4564 if ( stream_.doConvertBuffer[mode] )
4565 setConvertInfo( mode, 0 );
4567 // Allocate necessary internal buffers
4568 bufferBytes = stream_.nUserChannels[mode] * stream_.bufferSize * formatBytes( stream_.userFormat );
4570 stream_.userBuffer[mode] = ( char* ) calloc( bufferBytes, 1 );
4571 if ( !stream_.userBuffer[mode] ) {
4572 errorType = RtAudioError::MEMORY_ERROR;
4573 errorText_ = "RtApiWasapi::probeDeviceOpen: Error allocating user buffer memory.";
4577 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME )
4578 stream_.callbackInfo.priority = 15;
4580 stream_.callbackInfo.priority = 0;
4582 ///! TODO: RTAUDIO_MINIMIZE_LATENCY // Provide stream buffers directly to callback
4583 ///! TODO: RTAUDIO_HOG_DEVICE // Exclusive mode
4585 methodResult = SUCCESS;
4589 SAFE_RELEASE( captureDevices );
4590 SAFE_RELEASE( renderDevices );
4591 SAFE_RELEASE( devicePtr );
4592 CoTaskMemFree( deviceFormat );
4594 // if method failed, close the stream
4595 if ( methodResult == FAILURE )
4598 if ( !errorText_.empty() )
4600 return methodResult;
4603 //=============================================================================
4605 DWORD WINAPI RtApiWasapi::runWasapiThread( void* wasapiPtr )
4608 ( ( RtApiWasapi* ) wasapiPtr )->wasapiThread();
4613 DWORD WINAPI RtApiWasapi::stopWasapiThread( void* wasapiPtr )
4616 ( ( RtApiWasapi* ) wasapiPtr )->stopStream();
4621 DWORD WINAPI RtApiWasapi::abortWasapiThread( void* wasapiPtr )
4624 ( ( RtApiWasapi* ) wasapiPtr )->abortStream();
4629 //-----------------------------------------------------------------------------
4631 void RtApiWasapi::wasapiThread()
4633 // as this is a new thread, we must CoInitialize it
4634 CoInitialize( NULL );
4638 IAudioClient* captureAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureAudioClient;
4639 IAudioClient* renderAudioClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderAudioClient;
4640 IAudioCaptureClient* captureClient = ( ( WasapiHandle* ) stream_.apiHandle )->captureClient;
4641 IAudioRenderClient* renderClient = ( ( WasapiHandle* ) stream_.apiHandle )->renderClient;
4642 HANDLE captureEvent = ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent;
4643 HANDLE renderEvent = ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent;
4645 WAVEFORMATEX* captureFormat = NULL;
4646 WAVEFORMATEX* renderFormat = NULL;
4647 WasapiBuffer captureBuffer;
4648 WasapiBuffer renderBuffer;
4650 // declare local stream variables
4651 RtAudioCallback callback = ( RtAudioCallback ) stream_.callbackInfo.callback;
4652 BYTE* streamBuffer = NULL;
4653 unsigned long captureFlags = 0;
4654 unsigned int bufferFrameCount = 0;
4655 unsigned int numFramesPadding = 0;
4656 bool callbackPushed = false;
4657 bool callbackPulled = false;
4658 bool callbackStopped = false;
4659 int callbackResult = 0;
4661 unsigned int deviceBuffSize = 0;
4664 RtAudioError::Type errorType = RtAudioError::DRIVER_ERROR;
4666 // Attempt to assign "Pro Audio" characteristic to thread
4667 HMODULE AvrtDll = LoadLibrary( (LPCTSTR) "AVRT.dll" );
4669 DWORD taskIndex = 0;
4670 TAvSetMmThreadCharacteristicsPtr AvSetMmThreadCharacteristicsPtr = ( TAvSetMmThreadCharacteristicsPtr ) GetProcAddress( AvrtDll, "AvSetMmThreadCharacteristicsW" );
4671 AvSetMmThreadCharacteristicsPtr( L"Pro Audio", &taskIndex );
4672 FreeLibrary( AvrtDll );
4675 // start capture stream if applicable
4676 if ( captureAudioClient ) {
4677 hr = captureAudioClient->GetMixFormat( &captureFormat );
4678 if ( FAILED( hr ) ) {
4679 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4683 // initialize capture stream according to desire buffer size
4684 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / captureFormat->nSamplesPerSec );
4686 if ( !captureClient ) {
4687 hr = captureAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4688 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4689 desiredBufferPeriod,
4690 desiredBufferPeriod,
4693 if ( FAILED( hr ) ) {
4694 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize capture audio client.";
4698 hr = captureAudioClient->GetService( __uuidof( IAudioCaptureClient ),
4699 ( void** ) &captureClient );
4700 if ( FAILED( hr ) ) {
4701 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture client handle.";
4705 // configure captureEvent to trigger on every available capture buffer
4706 captureEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4707 if ( !captureEvent ) {
4708 errorType = RtAudioError::SYSTEM_ERROR;
4709 errorText_ = "RtApiWasapi::wasapiThread: Unable to create capture event.";
4713 hr = captureAudioClient->SetEventHandle( captureEvent );
4714 if ( FAILED( hr ) ) {
4715 errorText_ = "RtApiWasapi::wasapiThread: Unable to set capture event handle.";
4719 ( ( WasapiHandle* ) stream_.apiHandle )->captureClient = captureClient;
4720 ( ( WasapiHandle* ) stream_.apiHandle )->captureEvent = captureEvent;
4723 unsigned int inBufferSize = 0;
4724 hr = captureAudioClient->GetBufferSize( &inBufferSize );
4725 if ( FAILED( hr ) ) {
4726 errorText_ = "RtApiWasapi::wasapiThread: Unable to get capture buffer size.";
4730 // scale outBufferSize according to stream->user sample rate ratio
4731 unsigned int outBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT];
4732 inBufferSize *= stream_.nDeviceChannels[INPUT];
4734 // set captureBuffer size
4735 captureBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[INPUT] ) );
4737 // reset the capture stream
4738 hr = captureAudioClient->Reset();
4739 if ( FAILED( hr ) ) {
4740 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset capture stream.";
4744 // start the capture stream
4745 hr = captureAudioClient->Start();
4746 if ( FAILED( hr ) ) {
4747 errorText_ = "RtApiWasapi::wasapiThread: Unable to start capture stream.";
4752 // start render stream if applicable
4753 if ( renderAudioClient ) {
4754 hr = renderAudioClient->GetMixFormat( &renderFormat );
4755 if ( FAILED( hr ) ) {
4756 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve device mix format.";
4760 // initialize render stream according to desire buffer size
4761 REFERENCE_TIME desiredBufferPeriod = ( REFERENCE_TIME ) ( ( float ) stream_.bufferSize * 10000000 / renderFormat->nSamplesPerSec );
4763 if ( !renderClient ) {
4764 hr = renderAudioClient->Initialize( AUDCLNT_SHAREMODE_SHARED,
4765 AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
4766 desiredBufferPeriod,
4767 desiredBufferPeriod,
4770 if ( FAILED( hr ) ) {
4771 errorText_ = "RtApiWasapi::wasapiThread: Unable to initialize render audio client.";
4775 hr = renderAudioClient->GetService( __uuidof( IAudioRenderClient ),
4776 ( void** ) &renderClient );
4777 if ( FAILED( hr ) ) {
4778 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render client handle.";
4782 // configure renderEvent to trigger on every available render buffer
4783 renderEvent = CreateEvent( NULL, FALSE, FALSE, NULL );
4784 if ( !renderEvent ) {
4785 errorType = RtAudioError::SYSTEM_ERROR;
4786 errorText_ = "RtApiWasapi::wasapiThread: Unable to create render event.";
4790 hr = renderAudioClient->SetEventHandle( renderEvent );
4791 if ( FAILED( hr ) ) {
4792 errorText_ = "RtApiWasapi::wasapiThread: Unable to set render event handle.";
4796 ( ( WasapiHandle* ) stream_.apiHandle )->renderClient = renderClient;
4797 ( ( WasapiHandle* ) stream_.apiHandle )->renderEvent = renderEvent;
4800 unsigned int outBufferSize = 0;
4801 hr = renderAudioClient->GetBufferSize( &outBufferSize );
4802 if ( FAILED( hr ) ) {
4803 errorText_ = "RtApiWasapi::wasapiThread: Unable to get render buffer size.";
4807 // scale inBufferSize according to user->stream sample rate ratio
4808 unsigned int inBufferSize = ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[OUTPUT];
4809 outBufferSize *= stream_.nDeviceChannels[OUTPUT];
4811 // set renderBuffer size
4812 renderBuffer.setBufferSize( inBufferSize + outBufferSize, formatBytes( stream_.deviceFormat[OUTPUT] ) );
4814 // reset the render stream
4815 hr = renderAudioClient->Reset();
4816 if ( FAILED( hr ) ) {
4817 errorText_ = "RtApiWasapi::wasapiThread: Unable to reset render stream.";
4821 // start the render stream
4822 hr = renderAudioClient->Start();
4823 if ( FAILED( hr ) ) {
4824 errorText_ = "RtApiWasapi::wasapiThread: Unable to start render stream.";
4829 if ( stream_.mode == INPUT ) {
4830 using namespace std; // for roundf
4831 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] );
4833 else if ( stream_.mode == OUTPUT ) {
4834 deviceBuffSize = stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] );
4836 else if ( stream_.mode == DUPLEX ) {
4837 deviceBuffSize = std::max( stream_.bufferSize * stream_.nDeviceChannels[INPUT] * formatBytes( stream_.deviceFormat[INPUT] ),
4838 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT] * formatBytes( stream_.deviceFormat[OUTPUT] ) );
4841 stream_.deviceBuffer = ( char* ) malloc( deviceBuffSize );
4842 if ( !stream_.deviceBuffer ) {
4843 errorType = RtAudioError::MEMORY_ERROR;
4844 errorText_ = "RtApiWasapi::wasapiThread: Error allocating device buffer memory.";
4848 // stream process loop
4849 while ( stream_.state != STREAM_STOPPING ) {
4850 if ( !callbackPulled ) {
4853 // 1. Pull callback buffer from inputBuffer
4854 // 2. If 1. was successful: Convert callback buffer to user format
4856 if ( captureAudioClient ) {
4857 // Pull callback buffer from inputBuffer
4858 callbackPulled = captureBuffer.pullBuffer( stream_.deviceBuffer,
4859 ( unsigned int ) stream_.bufferSize * stream_.nDeviceChannels[INPUT],
4860 stream_.deviceFormat[INPUT] );
4862 if ( callbackPulled ) {
4863 if ( stream_.doConvertBuffer[INPUT] ) {
4864 // Convert callback buffer to user format
4865 convertBuffer( stream_.userBuffer[INPUT],
4866 stream_.deviceBuffer,
4867 stream_.convertInfo[INPUT] );
4870 // no further conversion, simple copy deviceBuffer to userBuffer
4871 memcpy( stream_.userBuffer[INPUT],
4872 stream_.deviceBuffer,
4873 stream_.bufferSize * stream_.nUserChannels[INPUT] * formatBytes( stream_.userFormat ) );
4878 // if there is no capture stream, set callbackPulled flag
4879 callbackPulled = true;
4884 // 1. Execute user callback method
4885 // 2. Handle return value from callback
4887 // if callback has not requested the stream to stop
4888 if ( callbackPulled && !callbackStopped ) {
4889 // Execute user callback method
4890 callbackResult = callback( stream_.userBuffer[OUTPUT],
4891 stream_.userBuffer[INPUT],
4894 captureFlags & AUDCLNT_BUFFERFLAGS_DATA_DISCONTINUITY ? RTAUDIO_INPUT_OVERFLOW : 0,
4895 stream_.callbackInfo.userData );
4897 // Handle return value from callback
4898 if ( callbackResult == 1 ) {
4899 // instantiate a thread to stop this thread
4900 HANDLE threadHandle = CreateThread( NULL, 0, stopWasapiThread, this, 0, NULL );
4901 if ( !threadHandle ) {
4902 errorType = RtAudioError::THREAD_ERROR;
4903 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream stop thread.";
4906 else if ( !CloseHandle( threadHandle ) ) {
4907 errorType = RtAudioError::THREAD_ERROR;
4908 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream stop thread handle.";
4912 callbackStopped = true;
4914 else if ( callbackResult == 2 ) {
4915 // instantiate a thread to stop this thread
4916 HANDLE threadHandle = CreateThread( NULL, 0, abortWasapiThread, this, 0, NULL );
4917 if ( !threadHandle ) {
4918 errorType = RtAudioError::THREAD_ERROR;
4919 errorText_ = "RtApiWasapi::wasapiThread: Unable to instantiate stream abort thread.";
4922 else if ( !CloseHandle( threadHandle ) ) {
4923 errorType = RtAudioError::THREAD_ERROR;
4924 errorText_ = "RtApiWasapi::wasapiThread: Unable to close stream abort thread handle.";
4928 callbackStopped = true;
4935 // 1. Convert callback buffer to stream format
4936 // 2. Push callback buffer into outputBuffer
4938 if ( renderAudioClient && callbackPulled ) {
4939 if ( stream_.doConvertBuffer[OUTPUT] ) {
4940 // Convert callback buffer to stream format
4941 convertBuffer( stream_.deviceBuffer,
4942 stream_.userBuffer[OUTPUT],
4943 stream_.convertInfo[OUTPUT] );
4947 // Push callback buffer into outputBuffer
4948 callbackPushed = renderBuffer.pushBuffer( stream_.deviceBuffer,
4949 stream_.bufferSize * stream_.nDeviceChannels[OUTPUT],
4950 stream_.deviceFormat[OUTPUT] );
4953 // if there is no render stream, set callbackPushed flag
4954 callbackPushed = true;
4959 // 1. Get capture buffer from stream
4960 // 2. Push capture buffer into inputBuffer
4961 // 3. If 2. was successful: Release capture buffer
4963 if ( captureAudioClient ) {
4964 // if the callback input buffer was not pulled from captureBuffer, wait for next capture event
4965 if ( !callbackPulled ) {
4966 WaitForSingleObject( captureEvent, INFINITE );
4969 // Get capture buffer from stream
4970 hr = captureClient->GetBuffer( &streamBuffer,
4972 &captureFlags, NULL, NULL );
4973 if ( FAILED( hr ) ) {
4974 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve capture buffer.";
4978 if ( bufferFrameCount != 0 ) {
4979 // Push capture buffer into inputBuffer
4980 if ( captureBuffer.pushBuffer( ( char* ) streamBuffer,
4981 bufferFrameCount * stream_.nDeviceChannels[INPUT],
4982 stream_.deviceFormat[INPUT] ) )
4984 // Release capture buffer
4985 hr = captureClient->ReleaseBuffer( bufferFrameCount );
4986 if ( FAILED( hr ) ) {
4987 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
4993 // Inform WASAPI that capture was unsuccessful
4994 hr = captureClient->ReleaseBuffer( 0 );
4995 if ( FAILED( hr ) ) {
4996 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5003 // Inform WASAPI that capture was unsuccessful
5004 hr = captureClient->ReleaseBuffer( 0 );
5005 if ( FAILED( hr ) ) {
5006 errorText_ = "RtApiWasapi::wasapiThread: Unable to release capture buffer.";
5014 // 1. Get render buffer from stream
5015 // 2. Pull next buffer from outputBuffer
5016 // 3. If 2. was successful: Fill render buffer with next buffer
5017 // Release render buffer
5019 if ( renderAudioClient ) {
5020 // if the callback output buffer was not pushed to renderBuffer, wait for next render event
5021 if ( callbackPulled && !callbackPushed ) {
5022 WaitForSingleObject( renderEvent, INFINITE );
5025 // Get render buffer from stream
5026 hr = renderAudioClient->GetBufferSize( &bufferFrameCount );
5027 if ( FAILED( hr ) ) {
5028 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer size.";
5032 hr = renderAudioClient->GetCurrentPadding( &numFramesPadding );
5033 if ( FAILED( hr ) ) {
5034 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer padding.";
5038 bufferFrameCount -= numFramesPadding;
5040 if ( bufferFrameCount != 0 ) {
5041 hr = renderClient->GetBuffer( bufferFrameCount, &streamBuffer );
5042 if ( FAILED( hr ) ) {
5043 errorText_ = "RtApiWasapi::wasapiThread: Unable to retrieve render buffer.";
5047 // Pull next buffer from outputBuffer
5048 // Fill render buffer with next buffer
5049 if ( renderBuffer.pullBuffer( ( char* ) streamBuffer,
5050 bufferFrameCount * stream_.nDeviceChannels[OUTPUT],
5051 stream_.deviceFormat[OUTPUT] ) )
5053 // Release render buffer
5054 hr = renderClient->ReleaseBuffer( bufferFrameCount, 0 );
5055 if ( FAILED( hr ) ) {
5056 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5062 // Inform WASAPI that render was unsuccessful
5063 hr = renderClient->ReleaseBuffer( 0, 0 );
5064 if ( FAILED( hr ) ) {
5065 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5072 // Inform WASAPI that render was unsuccessful
5073 hr = renderClient->ReleaseBuffer( 0, 0 );
5074 if ( FAILED( hr ) ) {
5075 errorText_ = "RtApiWasapi::wasapiThread: Unable to release render buffer.";
5081 // if the callback buffer was pushed renderBuffer reset callbackPulled flag
5082 if ( callbackPushed ) {
5083 callbackPulled = false;
5085 RtApi::tickStreamTime();
5092 CoTaskMemFree( captureFormat );
5093 CoTaskMemFree( renderFormat );
5097 // update stream state
5098 stream_.state = STREAM_STOPPED;
5100 if ( errorText_.empty() )
5106 //******************** End of __WINDOWS_WASAPI__ *********************//
5110 #if defined(__WINDOWS_DS__) // Windows DirectSound API
5112 // Modified by Robin Davies, October 2005
5113 // - Improvements to DirectX pointer chasing.
5114 // - Bug fix for non-power-of-two Asio granularity used by Edirol PCR-A30.
5115 // - Auto-call CoInitialize for DSOUND and ASIO platforms.
5116 // Various revisions for RtAudio 4.0 by Gary Scavone, April 2007
5117 // Changed device query structure for RtAudio 4.0.7, January 2010
5119 #include <mmsystem.h>
5123 #include <algorithm>
5125 #if defined(__MINGW32__)
5126 // missing from latest mingw winapi
5127 #define WAVE_FORMAT_96M08 0x00010000 /* 96 kHz, Mono, 8-bit */
5128 #define WAVE_FORMAT_96S08 0x00020000 /* 96 kHz, Stereo, 8-bit */
5129 #define WAVE_FORMAT_96M16 0x00040000 /* 96 kHz, Mono, 16-bit */
5130 #define WAVE_FORMAT_96S16 0x00080000 /* 96 kHz, Stereo, 16-bit */
5133 #define MINIMUM_DEVICE_BUFFER_SIZE 32768
5135 #ifdef _MSC_VER // if Microsoft Visual C++
5136 #pragma comment( lib, "winmm.lib" ) // then, auto-link winmm.lib. Otherwise, it has to be added manually.
5139 static inline DWORD dsPointerBetween( DWORD pointer, DWORD laterPointer, DWORD earlierPointer, DWORD bufferSize )
5141 if ( pointer > bufferSize ) pointer -= bufferSize;
5142 if ( laterPointer < earlierPointer ) laterPointer += bufferSize;
5143 if ( pointer < earlierPointer ) pointer += bufferSize;
5144 return pointer >= earlierPointer && pointer < laterPointer;
5147 // A structure to hold various information related to the DirectSound
5148 // API implementation.
5150 unsigned int drainCounter; // Tracks callback counts when draining
5151 bool internalDrain; // Indicates if stop is initiated from callback or not.
5155 UINT bufferPointer[2];
5156 DWORD dsBufferSize[2];
5157 DWORD dsPointerLeadTime[2]; // the number of bytes ahead of the safe pointer to lead by.
5161 :drainCounter(0), internalDrain(false) { id[0] = 0; id[1] = 0; buffer[0] = 0; buffer[1] = 0; xrun[0] = false; xrun[1] = false; bufferPointer[0] = 0; bufferPointer[1] = 0; }
5164 // Declarations for utility functions, callbacks, and structures
5165 // specific to the DirectSound implementation.
5166 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
5167 LPCTSTR description,
5171 static const char* getErrorString( int code );
5173 static unsigned __stdcall callbackHandler( void *ptr );
5182 : found(false) { validId[0] = false; validId[1] = false; }
5185 struct DsProbeData {
5187 std::vector<struct DsDevice>* dsDevices;
5190 RtApiDs :: RtApiDs()
5192 // Dsound will run both-threaded. If CoInitialize fails, then just
5193 // accept whatever the mainline chose for a threading model.
5194 coInitialized_ = false;
5195 HRESULT hr = CoInitialize( NULL );
5196 if ( !FAILED( hr ) ) coInitialized_ = true;
5199 RtApiDs :: ~RtApiDs()
5201 if ( stream_.state != STREAM_CLOSED ) closeStream();
5202 if ( coInitialized_ ) CoUninitialize(); // balanced call.
5205 // The DirectSound default output is always the first device.
5206 unsigned int RtApiDs :: getDefaultOutputDevice( void )
5211 // The DirectSound default input is always the first input device,
5212 // which is the first capture device enumerated.
5213 unsigned int RtApiDs :: getDefaultInputDevice( void )
5218 unsigned int RtApiDs :: getDeviceCount( void )
5220 // Set query flag for previously found devices to false, so that we
5221 // can check for any devices that have disappeared.
5222 for ( unsigned int i=0; i<dsDevices.size(); i++ )
5223 dsDevices[i].found = false;
5225 // Query DirectSound devices.
5226 struct DsProbeData probeInfo;
5227 probeInfo.isInput = false;
5228 probeInfo.dsDevices = &dsDevices;
5229 HRESULT result = DirectSoundEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5230 if ( FAILED( result ) ) {
5231 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating output devices!";
5232 errorText_ = errorStream_.str();
5233 error( RtAudioError::WARNING );
5236 // Query DirectSoundCapture devices.
5237 probeInfo.isInput = true;
5238 result = DirectSoundCaptureEnumerate( (LPDSENUMCALLBACK) deviceQueryCallback, &probeInfo );
5239 if ( FAILED( result ) ) {
5240 errorStream_ << "RtApiDs::getDeviceCount: error (" << getErrorString( result ) << ") enumerating input devices!";
5241 errorText_ = errorStream_.str();
5242 error( RtAudioError::WARNING );
5245 // Clean out any devices that may have disappeared (code update submitted by Eli Zehngut).
5246 for ( unsigned int i=0; i<dsDevices.size(); ) {
5247 if ( dsDevices[i].found == false ) dsDevices.erase( dsDevices.begin() + i );
5251 return static_cast<unsigned int>(dsDevices.size());
5254 RtAudio::DeviceInfo RtApiDs :: getDeviceInfo( unsigned int device )
5256 RtAudio::DeviceInfo info;
5257 info.probed = false;
5259 if ( dsDevices.size() == 0 ) {
5260 // Force a query of all devices
5262 if ( dsDevices.size() == 0 ) {
5263 errorText_ = "RtApiDs::getDeviceInfo: no devices found!";
5264 error( RtAudioError::INVALID_USE );
5269 if ( device >= dsDevices.size() ) {
5270 errorText_ = "RtApiDs::getDeviceInfo: device ID is invalid!";
5271 error( RtAudioError::INVALID_USE );
5276 if ( dsDevices[ device ].validId[0] == false ) goto probeInput;
5278 LPDIRECTSOUND output;
5280 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5281 if ( FAILED( result ) ) {
5282 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5283 errorText_ = errorStream_.str();
5284 error( RtAudioError::WARNING );
5288 outCaps.dwSize = sizeof( outCaps );
5289 result = output->GetCaps( &outCaps );
5290 if ( FAILED( result ) ) {
5292 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting capabilities!";
5293 errorText_ = errorStream_.str();
5294 error( RtAudioError::WARNING );
5298 // Get output channel information.
5299 info.outputChannels = ( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ? 2 : 1;
5301 // Get sample rate information.
5302 info.sampleRates.clear();
5303 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
5304 if ( SAMPLE_RATES[k] >= (unsigned int) outCaps.dwMinSecondarySampleRate &&
5305 SAMPLE_RATES[k] <= (unsigned int) outCaps.dwMaxSecondarySampleRate ) {
5306 info.sampleRates.push_back( SAMPLE_RATES[k] );
5308 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
5309 info.preferredSampleRate = SAMPLE_RATES[k];
5313 // Get format information.
5314 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT ) info.nativeFormats |= RTAUDIO_SINT16;
5315 if ( outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) info.nativeFormats |= RTAUDIO_SINT8;
5319 if ( getDefaultOutputDevice() == device )
5320 info.isDefaultOutput = true;
5322 if ( dsDevices[ device ].validId[1] == false ) {
5323 info.name = dsDevices[ device ].name;
5330 LPDIRECTSOUNDCAPTURE input;
5331 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5332 if ( FAILED( result ) ) {
5333 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5334 errorText_ = errorStream_.str();
5335 error( RtAudioError::WARNING );
5340 inCaps.dwSize = sizeof( inCaps );
5341 result = input->GetCaps( &inCaps );
5342 if ( FAILED( result ) ) {
5344 errorStream_ << "RtApiDs::getDeviceInfo: error (" << getErrorString( result ) << ") getting object capabilities (" << dsDevices[ device ].name << ")!";
5345 errorText_ = errorStream_.str();
5346 error( RtAudioError::WARNING );
5350 // Get input channel information.
5351 info.inputChannels = inCaps.dwChannels;
5353 // Get sample rate and format information.
5354 std::vector<unsigned int> rates;
5355 if ( inCaps.dwChannels >= 2 ) {
5356 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5357 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5358 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5359 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) info.nativeFormats |= RTAUDIO_SINT16;
5360 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5361 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5362 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5363 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) info.nativeFormats |= RTAUDIO_SINT8;
5365 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5366 if ( inCaps.dwFormats & WAVE_FORMAT_1S16 ) rates.push_back( 11025 );
5367 if ( inCaps.dwFormats & WAVE_FORMAT_2S16 ) rates.push_back( 22050 );
5368 if ( inCaps.dwFormats & WAVE_FORMAT_4S16 ) rates.push_back( 44100 );
5369 if ( inCaps.dwFormats & WAVE_FORMAT_96S16 ) rates.push_back( 96000 );
5371 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5372 if ( inCaps.dwFormats & WAVE_FORMAT_1S08 ) rates.push_back( 11025 );
5373 if ( inCaps.dwFormats & WAVE_FORMAT_2S08 ) rates.push_back( 22050 );
5374 if ( inCaps.dwFormats & WAVE_FORMAT_4S08 ) rates.push_back( 44100 );
5375 if ( inCaps.dwFormats & WAVE_FORMAT_96S08 ) rates.push_back( 96000 );
5378 else if ( inCaps.dwChannels == 1 ) {
5379 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5380 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5381 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5382 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) info.nativeFormats |= RTAUDIO_SINT16;
5383 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5384 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5385 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5386 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) info.nativeFormats |= RTAUDIO_SINT8;
5388 if ( info.nativeFormats & RTAUDIO_SINT16 ) {
5389 if ( inCaps.dwFormats & WAVE_FORMAT_1M16 ) rates.push_back( 11025 );
5390 if ( inCaps.dwFormats & WAVE_FORMAT_2M16 ) rates.push_back( 22050 );
5391 if ( inCaps.dwFormats & WAVE_FORMAT_4M16 ) rates.push_back( 44100 );
5392 if ( inCaps.dwFormats & WAVE_FORMAT_96M16 ) rates.push_back( 96000 );
5394 else if ( info.nativeFormats & RTAUDIO_SINT8 ) {
5395 if ( inCaps.dwFormats & WAVE_FORMAT_1M08 ) rates.push_back( 11025 );
5396 if ( inCaps.dwFormats & WAVE_FORMAT_2M08 ) rates.push_back( 22050 );
5397 if ( inCaps.dwFormats & WAVE_FORMAT_4M08 ) rates.push_back( 44100 );
5398 if ( inCaps.dwFormats & WAVE_FORMAT_96M08 ) rates.push_back( 96000 );
5401 else info.inputChannels = 0; // technically, this would be an error
5405 if ( info.inputChannels == 0 ) return info;
5407 // Copy the supported rates to the info structure but avoid duplication.
5409 for ( unsigned int i=0; i<rates.size(); i++ ) {
5411 for ( unsigned int j=0; j<info.sampleRates.size(); j++ ) {
5412 if ( rates[i] == info.sampleRates[j] ) {
5417 if ( found == false ) info.sampleRates.push_back( rates[i] );
5419 std::sort( info.sampleRates.begin(), info.sampleRates.end() );
5421 // If device opens for both playback and capture, we determine the channels.
5422 if ( info.outputChannels > 0 && info.inputChannels > 0 )
5423 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
5425 if ( device == 0 ) info.isDefaultInput = true;
5427 // Copy name and return.
5428 info.name = dsDevices[ device ].name;
5433 bool RtApiDs :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
5434 unsigned int firstChannel, unsigned int sampleRate,
5435 RtAudioFormat format, unsigned int *bufferSize,
5436 RtAudio::StreamOptions *options )
5438 if ( channels + firstChannel > 2 ) {
5439 errorText_ = "RtApiDs::probeDeviceOpen: DirectSound does not support more than 2 channels per device.";
5443 size_t nDevices = dsDevices.size();
5444 if ( nDevices == 0 ) {
5445 // This should not happen because a check is made before this function is called.
5446 errorText_ = "RtApiDs::probeDeviceOpen: no devices found!";
5450 if ( device >= nDevices ) {
5451 // This should not happen because a check is made before this function is called.
5452 errorText_ = "RtApiDs::probeDeviceOpen: device ID is invalid!";
5456 if ( mode == OUTPUT ) {
5457 if ( dsDevices[ device ].validId[0] == false ) {
5458 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support output!";
5459 errorText_ = errorStream_.str();
5463 else { // mode == INPUT
5464 if ( dsDevices[ device ].validId[1] == false ) {
5465 errorStream_ << "RtApiDs::probeDeviceOpen: device (" << device << ") does not support input!";
5466 errorText_ = errorStream_.str();
5471 // According to a note in PortAudio, using GetDesktopWindow()
5472 // instead of GetForegroundWindow() is supposed to avoid problems
5473 // that occur when the application's window is not the foreground
5474 // window. Also, if the application window closes before the
5475 // DirectSound buffer, DirectSound can crash. In the past, I had
5476 // problems when using GetDesktopWindow() but it seems fine now
5477 // (January 2010). I'll leave it commented here.
5478 // HWND hWnd = GetForegroundWindow();
5479 HWND hWnd = GetDesktopWindow();
5481 // Check the numberOfBuffers parameter and limit the lowest value to
5482 // two. This is a judgement call and a value of two is probably too
5483 // low for capture, but it should work for playback.
5485 if ( options ) nBuffers = options->numberOfBuffers;
5486 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) nBuffers = 2;
5487 if ( nBuffers < 2 ) nBuffers = 3;
5489 // Check the lower range of the user-specified buffer size and set
5490 // (arbitrarily) to a lower bound of 32.
5491 if ( *bufferSize < 32 ) *bufferSize = 32;
5493 // Create the wave format structure. The data format setting will
5494 // be determined later.
5495 WAVEFORMATEX waveFormat;
5496 ZeroMemory( &waveFormat, sizeof(WAVEFORMATEX) );
5497 waveFormat.wFormatTag = WAVE_FORMAT_PCM;
5498 waveFormat.nChannels = channels + firstChannel;
5499 waveFormat.nSamplesPerSec = (unsigned long) sampleRate;
5501 // Determine the device buffer size. By default, we'll use the value
5502 // defined above (32K), but we will grow it to make allowances for
5503 // very large software buffer sizes.
5504 DWORD dsBufferSize = MINIMUM_DEVICE_BUFFER_SIZE;
5505 DWORD dsPointerLeadTime = 0;
5507 void *ohandle = 0, *bhandle = 0;
5509 if ( mode == OUTPUT ) {
5511 LPDIRECTSOUND output;
5512 result = DirectSoundCreate( dsDevices[ device ].id[0], &output, NULL );
5513 if ( FAILED( result ) ) {
5514 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening output device (" << dsDevices[ device ].name << ")!";
5515 errorText_ = errorStream_.str();
5520 outCaps.dwSize = sizeof( outCaps );
5521 result = output->GetCaps( &outCaps );
5522 if ( FAILED( result ) ) {
5524 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting capabilities (" << dsDevices[ device ].name << ")!";
5525 errorText_ = errorStream_.str();
5529 // Check channel information.
5530 if ( channels + firstChannel == 2 && !( outCaps.dwFlags & DSCAPS_PRIMARYSTEREO ) ) {
5531 errorStream_ << "RtApiDs::getDeviceInfo: the output device (" << dsDevices[ device ].name << ") does not support stereo playback.";
5532 errorText_ = errorStream_.str();
5536 // Check format information. Use 16-bit format unless not
5537 // supported or user requests 8-bit.
5538 if ( outCaps.dwFlags & DSCAPS_PRIMARY16BIT &&
5539 !( format == RTAUDIO_SINT8 && outCaps.dwFlags & DSCAPS_PRIMARY8BIT ) ) {
5540 waveFormat.wBitsPerSample = 16;
5541 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5544 waveFormat.wBitsPerSample = 8;
5545 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5547 stream_.userFormat = format;
5549 // Update wave format structure and buffer information.
5550 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5551 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5552 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5554 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5555 while ( dsPointerLeadTime * 2U > dsBufferSize )
5558 // Set cooperative level to DSSCL_EXCLUSIVE ... sound stops when window focus changes.
5559 // result = output->SetCooperativeLevel( hWnd, DSSCL_EXCLUSIVE );
5560 // Set cooperative level to DSSCL_PRIORITY ... sound remains when window focus changes.
5561 result = output->SetCooperativeLevel( hWnd, DSSCL_PRIORITY );
5562 if ( FAILED( result ) ) {
5564 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting cooperative level (" << dsDevices[ device ].name << ")!";
5565 errorText_ = errorStream_.str();
5569 // Even though we will write to the secondary buffer, we need to
5570 // access the primary buffer to set the correct output format
5571 // (since the default is 8-bit, 22 kHz!). Setup the DS primary
5572 // buffer description.
5573 DSBUFFERDESC bufferDescription;
5574 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5575 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5576 bufferDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
5578 // Obtain the primary buffer
5579 LPDIRECTSOUNDBUFFER buffer;
5580 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5581 if ( FAILED( result ) ) {
5583 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") accessing primary buffer (" << dsDevices[ device ].name << ")!";
5584 errorText_ = errorStream_.str();
5588 // Set the primary DS buffer sound format.
5589 result = buffer->SetFormat( &waveFormat );
5590 if ( FAILED( result ) ) {
5592 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") setting primary buffer format (" << dsDevices[ device ].name << ")!";
5593 errorText_ = errorStream_.str();
5597 // Setup the secondary DS buffer description.
5598 ZeroMemory( &bufferDescription, sizeof( DSBUFFERDESC ) );
5599 bufferDescription.dwSize = sizeof( DSBUFFERDESC );
5600 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5601 DSBCAPS_GLOBALFOCUS |
5602 DSBCAPS_GETCURRENTPOSITION2 |
5603 DSBCAPS_LOCHARDWARE ); // Force hardware mixing
5604 bufferDescription.dwBufferBytes = dsBufferSize;
5605 bufferDescription.lpwfxFormat = &waveFormat;
5607 // Try to create the secondary DS buffer. If that doesn't work,
5608 // try to use software mixing. Otherwise, there's a problem.
5609 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5610 if ( FAILED( result ) ) {
5611 bufferDescription.dwFlags = ( DSBCAPS_STICKYFOCUS |
5612 DSBCAPS_GLOBALFOCUS |
5613 DSBCAPS_GETCURRENTPOSITION2 |
5614 DSBCAPS_LOCSOFTWARE ); // Force software mixing
5615 result = output->CreateSoundBuffer( &bufferDescription, &buffer, NULL );
5616 if ( FAILED( result ) ) {
5618 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating secondary buffer (" << dsDevices[ device ].name << ")!";
5619 errorText_ = errorStream_.str();
5624 // Get the buffer size ... might be different from what we specified.
5626 dsbcaps.dwSize = sizeof( DSBCAPS );
5627 result = buffer->GetCaps( &dsbcaps );
5628 if ( FAILED( result ) ) {
5631 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5632 errorText_ = errorStream_.str();
5636 dsBufferSize = dsbcaps.dwBufferBytes;
5638 // Lock the DS buffer
5641 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5642 if ( FAILED( result ) ) {
5645 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking buffer (" << dsDevices[ device ].name << ")!";
5646 errorText_ = errorStream_.str();
5650 // Zero the DS buffer
5651 ZeroMemory( audioPtr, dataLen );
5653 // Unlock the DS buffer
5654 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5655 if ( FAILED( result ) ) {
5658 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking buffer (" << dsDevices[ device ].name << ")!";
5659 errorText_ = errorStream_.str();
5663 ohandle = (void *) output;
5664 bhandle = (void *) buffer;
5667 if ( mode == INPUT ) {
5669 LPDIRECTSOUNDCAPTURE input;
5670 result = DirectSoundCaptureCreate( dsDevices[ device ].id[1], &input, NULL );
5671 if ( FAILED( result ) ) {
5672 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") opening input device (" << dsDevices[ device ].name << ")!";
5673 errorText_ = errorStream_.str();
5678 inCaps.dwSize = sizeof( inCaps );
5679 result = input->GetCaps( &inCaps );
5680 if ( FAILED( result ) ) {
5682 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting input capabilities (" << dsDevices[ device ].name << ")!";
5683 errorText_ = errorStream_.str();
5687 // Check channel information.
5688 if ( inCaps.dwChannels < channels + firstChannel ) {
5689 errorText_ = "RtApiDs::getDeviceInfo: the input device does not support requested input channels.";
5693 // Check format information. Use 16-bit format unless user
5695 DWORD deviceFormats;
5696 if ( channels + firstChannel == 2 ) {
5697 deviceFormats = WAVE_FORMAT_1S08 | WAVE_FORMAT_2S08 | WAVE_FORMAT_4S08 | WAVE_FORMAT_96S08;
5698 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5699 waveFormat.wBitsPerSample = 8;
5700 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5702 else { // assume 16-bit is supported
5703 waveFormat.wBitsPerSample = 16;
5704 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5707 else { // channel == 1
5708 deviceFormats = WAVE_FORMAT_1M08 | WAVE_FORMAT_2M08 | WAVE_FORMAT_4M08 | WAVE_FORMAT_96M08;
5709 if ( format == RTAUDIO_SINT8 && inCaps.dwFormats & deviceFormats ) {
5710 waveFormat.wBitsPerSample = 8;
5711 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
5713 else { // assume 16-bit is supported
5714 waveFormat.wBitsPerSample = 16;
5715 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
5718 stream_.userFormat = format;
5720 // Update wave format structure and buffer information.
5721 waveFormat.nBlockAlign = waveFormat.nChannels * waveFormat.wBitsPerSample / 8;
5722 waveFormat.nAvgBytesPerSec = waveFormat.nSamplesPerSec * waveFormat.nBlockAlign;
5723 dsPointerLeadTime = nBuffers * (*bufferSize) * (waveFormat.wBitsPerSample / 8) * channels;
5725 // If the user wants an even bigger buffer, increase the device buffer size accordingly.
5726 while ( dsPointerLeadTime * 2U > dsBufferSize )
5729 // Setup the secondary DS buffer description.
5730 DSCBUFFERDESC bufferDescription;
5731 ZeroMemory( &bufferDescription, sizeof( DSCBUFFERDESC ) );
5732 bufferDescription.dwSize = sizeof( DSCBUFFERDESC );
5733 bufferDescription.dwFlags = 0;
5734 bufferDescription.dwReserved = 0;
5735 bufferDescription.dwBufferBytes = dsBufferSize;
5736 bufferDescription.lpwfxFormat = &waveFormat;
5738 // Create the capture buffer.
5739 LPDIRECTSOUNDCAPTUREBUFFER buffer;
5740 result = input->CreateCaptureBuffer( &bufferDescription, &buffer, NULL );
5741 if ( FAILED( result ) ) {
5743 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") creating input buffer (" << dsDevices[ device ].name << ")!";
5744 errorText_ = errorStream_.str();
5748 // Get the buffer size ... might be different from what we specified.
5750 dscbcaps.dwSize = sizeof( DSCBCAPS );
5751 result = buffer->GetCaps( &dscbcaps );
5752 if ( FAILED( result ) ) {
5755 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") getting buffer settings (" << dsDevices[ device ].name << ")!";
5756 errorText_ = errorStream_.str();
5760 dsBufferSize = dscbcaps.dwBufferBytes;
5762 // NOTE: We could have a problem here if this is a duplex stream
5763 // and the play and capture hardware buffer sizes are different
5764 // (I'm actually not sure if that is a problem or not).
5765 // Currently, we are not verifying that.
5767 // Lock the capture buffer
5770 result = buffer->Lock( 0, dsBufferSize, &audioPtr, &dataLen, NULL, NULL, 0 );
5771 if ( FAILED( result ) ) {
5774 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") locking input buffer (" << dsDevices[ device ].name << ")!";
5775 errorText_ = errorStream_.str();
5780 ZeroMemory( audioPtr, dataLen );
5782 // Unlock the buffer
5783 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
5784 if ( FAILED( result ) ) {
5787 errorStream_ << "RtApiDs::probeDeviceOpen: error (" << getErrorString( result ) << ") unlocking input buffer (" << dsDevices[ device ].name << ")!";
5788 errorText_ = errorStream_.str();
5792 ohandle = (void *) input;
5793 bhandle = (void *) buffer;
5796 // Set various stream parameters
5797 DsHandle *handle = 0;
5798 stream_.nDeviceChannels[mode] = channels + firstChannel;
5799 stream_.nUserChannels[mode] = channels;
5800 stream_.bufferSize = *bufferSize;
5801 stream_.channelOffset[mode] = firstChannel;
5802 stream_.deviceInterleaved[mode] = true;
5803 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
5804 else stream_.userInterleaved = true;
5806 // Set flag for buffer conversion
5807 stream_.doConvertBuffer[mode] = false;
5808 if (stream_.nUserChannels[mode] != stream_.nDeviceChannels[mode])
5809 stream_.doConvertBuffer[mode] = true;
5810 if (stream_.userFormat != stream_.deviceFormat[mode])
5811 stream_.doConvertBuffer[mode] = true;
5812 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
5813 stream_.nUserChannels[mode] > 1 )
5814 stream_.doConvertBuffer[mode] = true;
5816 // Allocate necessary internal buffers
5817 long bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
5818 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
5819 if ( stream_.userBuffer[mode] == NULL ) {
5820 errorText_ = "RtApiDs::probeDeviceOpen: error allocating user buffer memory.";
5824 if ( stream_.doConvertBuffer[mode] ) {
5826 bool makeBuffer = true;
5827 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
5828 if ( mode == INPUT ) {
5829 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
5830 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
5831 if ( bufferBytes <= (long) bytesOut ) makeBuffer = false;
5836 bufferBytes *= *bufferSize;
5837 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
5838 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
5839 if ( stream_.deviceBuffer == NULL ) {
5840 errorText_ = "RtApiDs::probeDeviceOpen: error allocating device buffer memory.";
5846 // Allocate our DsHandle structures for the stream.
5847 if ( stream_.apiHandle == 0 ) {
5849 handle = new DsHandle;
5851 catch ( std::bad_alloc& ) {
5852 errorText_ = "RtApiDs::probeDeviceOpen: error allocating AsioHandle memory.";
5856 // Create a manual-reset event.
5857 handle->condition = CreateEvent( NULL, // no security
5858 TRUE, // manual-reset
5859 FALSE, // non-signaled initially
5861 stream_.apiHandle = (void *) handle;
5864 handle = (DsHandle *) stream_.apiHandle;
5865 handle->id[mode] = ohandle;
5866 handle->buffer[mode] = bhandle;
5867 handle->dsBufferSize[mode] = dsBufferSize;
5868 handle->dsPointerLeadTime[mode] = dsPointerLeadTime;
5870 stream_.device[mode] = device;
5871 stream_.state = STREAM_STOPPED;
5872 if ( stream_.mode == OUTPUT && mode == INPUT )
5873 // We had already set up an output stream.
5874 stream_.mode = DUPLEX;
5876 stream_.mode = mode;
5877 stream_.nBuffers = nBuffers;
5878 stream_.sampleRate = sampleRate;
5880 // Setup the buffer conversion information structure.
5881 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
5883 // Setup the callback thread.
5884 if ( stream_.callbackInfo.isRunning == false ) {
5886 stream_.callbackInfo.isRunning = true;
5887 stream_.callbackInfo.object = (void *) this;
5888 stream_.callbackInfo.thread = _beginthreadex( NULL, 0, &callbackHandler,
5889 &stream_.callbackInfo, 0, &threadId );
5890 if ( stream_.callbackInfo.thread == 0 ) {
5891 errorText_ = "RtApiDs::probeDeviceOpen: error creating callback thread!";
5895 // Boost DS thread priority
5896 SetThreadPriority( (HANDLE) stream_.callbackInfo.thread, THREAD_PRIORITY_HIGHEST );
5902 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
5903 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
5904 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
5905 if ( buffer ) buffer->Release();
5908 if ( handle->buffer[1] ) {
5909 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
5910 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
5911 if ( buffer ) buffer->Release();
5914 CloseHandle( handle->condition );
5916 stream_.apiHandle = 0;
5919 for ( int i=0; i<2; i++ ) {
5920 if ( stream_.userBuffer[i] ) {
5921 free( stream_.userBuffer[i] );
5922 stream_.userBuffer[i] = 0;
5926 if ( stream_.deviceBuffer ) {
5927 free( stream_.deviceBuffer );
5928 stream_.deviceBuffer = 0;
5931 stream_.state = STREAM_CLOSED;
5935 void RtApiDs :: closeStream()
5937 if ( stream_.state == STREAM_CLOSED ) {
5938 errorText_ = "RtApiDs::closeStream(): no open stream to close!";
5939 error( RtAudioError::WARNING );
5943 // Stop the callback thread.
5944 stream_.callbackInfo.isRunning = false;
5945 WaitForSingleObject( (HANDLE) stream_.callbackInfo.thread, INFINITE );
5946 CloseHandle( (HANDLE) stream_.callbackInfo.thread );
5948 DsHandle *handle = (DsHandle *) stream_.apiHandle;
5950 if ( handle->buffer[0] ) { // the object pointer can be NULL and valid
5951 LPDIRECTSOUND object = (LPDIRECTSOUND) handle->id[0];
5952 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
5959 if ( handle->buffer[1] ) {
5960 LPDIRECTSOUNDCAPTURE object = (LPDIRECTSOUNDCAPTURE) handle->id[1];
5961 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
5968 CloseHandle( handle->condition );
5970 stream_.apiHandle = 0;
5973 for ( int i=0; i<2; i++ ) {
5974 if ( stream_.userBuffer[i] ) {
5975 free( stream_.userBuffer[i] );
5976 stream_.userBuffer[i] = 0;
5980 if ( stream_.deviceBuffer ) {
5981 free( stream_.deviceBuffer );
5982 stream_.deviceBuffer = 0;
5985 stream_.mode = UNINITIALIZED;
5986 stream_.state = STREAM_CLOSED;
5989 void RtApiDs :: startStream()
5992 if ( stream_.state == STREAM_RUNNING ) {
5993 errorText_ = "RtApiDs::startStream(): the stream is already running!";
5994 error( RtAudioError::WARNING );
5998 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6000 // Increase scheduler frequency on lesser windows (a side-effect of
6001 // increasing timer accuracy). On greater windows (Win2K or later),
6002 // this is already in effect.
6003 timeBeginPeriod( 1 );
6005 buffersRolling = false;
6006 duplexPrerollBytes = 0;
6008 if ( stream_.mode == DUPLEX ) {
6009 // 0.5 seconds of silence in DUPLEX mode while the devices spin up and synchronize.
6010 duplexPrerollBytes = (int) ( 0.5 * stream_.sampleRate * formatBytes( stream_.deviceFormat[1] ) * stream_.nDeviceChannels[1] );
6014 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6016 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6017 result = buffer->Play( 0, 0, DSBPLAY_LOOPING );
6018 if ( FAILED( result ) ) {
6019 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting output buffer!";
6020 errorText_ = errorStream_.str();
6025 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6027 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6028 result = buffer->Start( DSCBSTART_LOOPING );
6029 if ( FAILED( result ) ) {
6030 errorStream_ << "RtApiDs::startStream: error (" << getErrorString( result ) << ") starting input buffer!";
6031 errorText_ = errorStream_.str();
6036 handle->drainCounter = 0;
6037 handle->internalDrain = false;
6038 ResetEvent( handle->condition );
6039 stream_.state = STREAM_RUNNING;
6042 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6045 void RtApiDs :: stopStream()
6048 if ( stream_.state == STREAM_STOPPED ) {
6049 errorText_ = "RtApiDs::stopStream(): the stream is already stopped!";
6050 error( RtAudioError::WARNING );
6057 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6058 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6059 if ( handle->drainCounter == 0 ) {
6060 handle->drainCounter = 2;
6061 WaitForSingleObject( handle->condition, INFINITE ); // block until signaled
6064 stream_.state = STREAM_STOPPED;
6066 MUTEX_LOCK( &stream_.mutex );
6068 // Stop the buffer and clear memory
6069 LPDIRECTSOUNDBUFFER buffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6070 result = buffer->Stop();
6071 if ( FAILED( result ) ) {
6072 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping output buffer!";
6073 errorText_ = errorStream_.str();
6077 // Lock the buffer and clear it so that if we start to play again,
6078 // we won't have old data playing.
6079 result = buffer->Lock( 0, handle->dsBufferSize[0], &audioPtr, &dataLen, NULL, NULL, 0 );
6080 if ( FAILED( result ) ) {
6081 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking output buffer!";
6082 errorText_ = errorStream_.str();
6086 // Zero the DS buffer
6087 ZeroMemory( audioPtr, dataLen );
6089 // Unlock the DS buffer
6090 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6091 if ( FAILED( result ) ) {
6092 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking output buffer!";
6093 errorText_ = errorStream_.str();
6097 // If we start playing again, we must begin at beginning of buffer.
6098 handle->bufferPointer[0] = 0;
6101 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6102 LPDIRECTSOUNDCAPTUREBUFFER buffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6106 stream_.state = STREAM_STOPPED;
6108 if ( stream_.mode != DUPLEX )
6109 MUTEX_LOCK( &stream_.mutex );
6111 result = buffer->Stop();
6112 if ( FAILED( result ) ) {
6113 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") stopping input buffer!";
6114 errorText_ = errorStream_.str();
6118 // Lock the buffer and clear it so that if we start to play again,
6119 // we won't have old data playing.
6120 result = buffer->Lock( 0, handle->dsBufferSize[1], &audioPtr, &dataLen, NULL, NULL, 0 );
6121 if ( FAILED( result ) ) {
6122 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") locking input buffer!";
6123 errorText_ = errorStream_.str();
6127 // Zero the DS buffer
6128 ZeroMemory( audioPtr, dataLen );
6130 // Unlock the DS buffer
6131 result = buffer->Unlock( audioPtr, dataLen, NULL, 0 );
6132 if ( FAILED( result ) ) {
6133 errorStream_ << "RtApiDs::stopStream: error (" << getErrorString( result ) << ") unlocking input buffer!";
6134 errorText_ = errorStream_.str();
6138 // If we start recording again, we must begin at beginning of buffer.
6139 handle->bufferPointer[1] = 0;
6143 timeEndPeriod( 1 ); // revert to normal scheduler frequency on lesser windows.
6144 MUTEX_UNLOCK( &stream_.mutex );
6146 if ( FAILED( result ) ) error( RtAudioError::SYSTEM_ERROR );
6149 void RtApiDs :: abortStream()
6152 if ( stream_.state == STREAM_STOPPED ) {
6153 errorText_ = "RtApiDs::abortStream(): the stream is already stopped!";
6154 error( RtAudioError::WARNING );
6158 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6159 handle->drainCounter = 2;
6164 void RtApiDs :: callbackEvent()
6166 if ( stream_.state == STREAM_STOPPED || stream_.state == STREAM_STOPPING ) {
6167 Sleep( 50 ); // sleep 50 milliseconds
6171 if ( stream_.state == STREAM_CLOSED ) {
6172 errorText_ = "RtApiDs::callbackEvent(): the stream is closed ... this shouldn't happen!";
6173 error( RtAudioError::WARNING );
6177 CallbackInfo *info = (CallbackInfo *) &stream_.callbackInfo;
6178 DsHandle *handle = (DsHandle *) stream_.apiHandle;
6180 // Check if we were draining the stream and signal is finished.
6181 if ( handle->drainCounter > stream_.nBuffers + 2 ) {
6183 stream_.state = STREAM_STOPPING;
6184 if ( handle->internalDrain == false )
6185 SetEvent( handle->condition );
6191 // Invoke user callback to get fresh output data UNLESS we are
6193 if ( handle->drainCounter == 0 ) {
6194 RtAudioCallback callback = (RtAudioCallback) info->callback;
6195 double streamTime = getStreamTime();
6196 RtAudioStreamStatus status = 0;
6197 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
6198 status |= RTAUDIO_OUTPUT_UNDERFLOW;
6199 handle->xrun[0] = false;
6201 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
6202 status |= RTAUDIO_INPUT_OVERFLOW;
6203 handle->xrun[1] = false;
6205 int cbReturnValue = callback( stream_.userBuffer[0], stream_.userBuffer[1],
6206 stream_.bufferSize, streamTime, status, info->userData );
6207 if ( cbReturnValue == 2 ) {
6208 stream_.state = STREAM_STOPPING;
6209 handle->drainCounter = 2;
6213 else if ( cbReturnValue == 1 ) {
6214 handle->drainCounter = 1;
6215 handle->internalDrain = true;
6220 DWORD currentWritePointer, safeWritePointer;
6221 DWORD currentReadPointer, safeReadPointer;
6222 UINT nextWritePointer;
6224 LPVOID buffer1 = NULL;
6225 LPVOID buffer2 = NULL;
6226 DWORD bufferSize1 = 0;
6227 DWORD bufferSize2 = 0;
6232 MUTEX_LOCK( &stream_.mutex );
6233 if ( stream_.state == STREAM_STOPPED ) {
6234 MUTEX_UNLOCK( &stream_.mutex );
6238 if ( buffersRolling == false ) {
6239 if ( stream_.mode == DUPLEX ) {
6240 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6242 // It takes a while for the devices to get rolling. As a result,
6243 // there's no guarantee that the capture and write device pointers
6244 // will move in lockstep. Wait here for both devices to start
6245 // rolling, and then set our buffer pointers accordingly.
6246 // e.g. Crystal Drivers: the capture buffer starts up 5700 to 9600
6247 // bytes later than the write buffer.
6249 // Stub: a serious risk of having a pre-emptive scheduling round
6250 // take place between the two GetCurrentPosition calls... but I'm
6251 // really not sure how to solve the problem. Temporarily boost to
6252 // Realtime priority, maybe; but I'm not sure what priority the
6253 // DirectSound service threads run at. We *should* be roughly
6254 // within a ms or so of correct.
6256 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6257 LPDIRECTSOUNDCAPTUREBUFFER dsCaptureBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6259 DWORD startSafeWritePointer, startSafeReadPointer;
6261 result = dsWriteBuffer->GetCurrentPosition( NULL, &startSafeWritePointer );
6262 if ( FAILED( result ) ) {
6263 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6264 errorText_ = errorStream_.str();
6265 MUTEX_UNLOCK( &stream_.mutex );
6266 error( RtAudioError::SYSTEM_ERROR );
6269 result = dsCaptureBuffer->GetCurrentPosition( NULL, &startSafeReadPointer );
6270 if ( FAILED( result ) ) {
6271 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6272 errorText_ = errorStream_.str();
6273 MUTEX_UNLOCK( &stream_.mutex );
6274 error( RtAudioError::SYSTEM_ERROR );
6278 result = dsWriteBuffer->GetCurrentPosition( NULL, &safeWritePointer );
6279 if ( FAILED( result ) ) {
6280 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6281 errorText_ = errorStream_.str();
6282 MUTEX_UNLOCK( &stream_.mutex );
6283 error( RtAudioError::SYSTEM_ERROR );
6286 result = dsCaptureBuffer->GetCurrentPosition( NULL, &safeReadPointer );
6287 if ( FAILED( result ) ) {
6288 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6289 errorText_ = errorStream_.str();
6290 MUTEX_UNLOCK( &stream_.mutex );
6291 error( RtAudioError::SYSTEM_ERROR );
6294 if ( safeWritePointer != startSafeWritePointer && safeReadPointer != startSafeReadPointer ) break;
6298 //assert( handle->dsBufferSize[0] == handle->dsBufferSize[1] );
6300 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6301 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6302 handle->bufferPointer[1] = safeReadPointer;
6304 else if ( stream_.mode == OUTPUT ) {
6306 // Set the proper nextWritePosition after initial startup.
6307 LPDIRECTSOUNDBUFFER dsWriteBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6308 result = dsWriteBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6309 if ( FAILED( result ) ) {
6310 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6311 errorText_ = errorStream_.str();
6312 MUTEX_UNLOCK( &stream_.mutex );
6313 error( RtAudioError::SYSTEM_ERROR );
6316 handle->bufferPointer[0] = safeWritePointer + handle->dsPointerLeadTime[0];
6317 if ( handle->bufferPointer[0] >= handle->dsBufferSize[0] ) handle->bufferPointer[0] -= handle->dsBufferSize[0];
6320 buffersRolling = true;
6323 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
6325 LPDIRECTSOUNDBUFFER dsBuffer = (LPDIRECTSOUNDBUFFER) handle->buffer[0];
6327 if ( handle->drainCounter > 1 ) { // write zeros to the output stream
6328 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6329 bufferBytes *= formatBytes( stream_.userFormat );
6330 memset( stream_.userBuffer[0], 0, bufferBytes );
6333 // Setup parameters and do buffer conversion if necessary.
6334 if ( stream_.doConvertBuffer[0] ) {
6335 buffer = stream_.deviceBuffer;
6336 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
6337 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[0];
6338 bufferBytes *= formatBytes( stream_.deviceFormat[0] );
6341 buffer = stream_.userBuffer[0];
6342 bufferBytes = stream_.bufferSize * stream_.nUserChannels[0];
6343 bufferBytes *= formatBytes( stream_.userFormat );
6346 // No byte swapping necessary in DirectSound implementation.
6348 // Ahhh ... windoze. 16-bit data is signed but 8-bit data is
6349 // unsigned. So, we need to convert our signed 8-bit data here to
6351 if ( stream_.deviceFormat[0] == RTAUDIO_SINT8 )
6352 for ( int i=0; i<bufferBytes; i++ ) buffer[i] = (unsigned char) ( buffer[i] + 128 );
6354 DWORD dsBufferSize = handle->dsBufferSize[0];
6355 nextWritePointer = handle->bufferPointer[0];
6357 DWORD endWrite, leadPointer;
6359 // Find out where the read and "safe write" pointers are.
6360 result = dsBuffer->GetCurrentPosition( ¤tWritePointer, &safeWritePointer );
6361 if ( FAILED( result ) ) {
6362 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current write position!";
6363 errorText_ = errorStream_.str();
6364 MUTEX_UNLOCK( &stream_.mutex );
6365 error( RtAudioError::SYSTEM_ERROR );
6369 // We will copy our output buffer into the region between
6370 // safeWritePointer and leadPointer. If leadPointer is not
6371 // beyond the next endWrite position, wait until it is.
6372 leadPointer = safeWritePointer + handle->dsPointerLeadTime[0];
6373 //std::cout << "safeWritePointer = " << safeWritePointer << ", leadPointer = " << leadPointer << ", nextWritePointer = " << nextWritePointer << std::endl;
6374 if ( leadPointer > dsBufferSize ) leadPointer -= dsBufferSize;
6375 if ( leadPointer < nextWritePointer ) leadPointer += dsBufferSize; // unwrap offset
6376 endWrite = nextWritePointer + bufferBytes;
6378 // Check whether the entire write region is behind the play pointer.
6379 if ( leadPointer >= endWrite ) break;
6381 // If we are here, then we must wait until the leadPointer advances
6382 // beyond the end of our next write region. We use the
6383 // Sleep() function to suspend operation until that happens.
6384 double millis = ( endWrite - leadPointer ) * 1000.0;
6385 millis /= ( formatBytes( stream_.deviceFormat[0]) * stream_.nDeviceChannels[0] * stream_.sampleRate);
6386 if ( millis < 1.0 ) millis = 1.0;
6387 Sleep( (DWORD) millis );
6390 if ( dsPointerBetween( nextWritePointer, safeWritePointer, currentWritePointer, dsBufferSize )
6391 || dsPointerBetween( endWrite, safeWritePointer, currentWritePointer, dsBufferSize ) ) {
6392 // We've strayed into the forbidden zone ... resync the read pointer.
6393 handle->xrun[0] = true;
6394 nextWritePointer = safeWritePointer + handle->dsPointerLeadTime[0] - bufferBytes;
6395 if ( nextWritePointer >= dsBufferSize ) nextWritePointer -= dsBufferSize;
6396 handle->bufferPointer[0] = nextWritePointer;
6397 endWrite = nextWritePointer + bufferBytes;
6400 // Lock free space in the buffer
6401 result = dsBuffer->Lock( nextWritePointer, bufferBytes, &buffer1,
6402 &bufferSize1, &buffer2, &bufferSize2, 0 );
6403 if ( FAILED( result ) ) {
6404 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking buffer during playback!";
6405 errorText_ = errorStream_.str();
6406 MUTEX_UNLOCK( &stream_.mutex );
6407 error( RtAudioError::SYSTEM_ERROR );
6411 // Copy our buffer into the DS buffer
6412 CopyMemory( buffer1, buffer, bufferSize1 );
6413 if ( buffer2 != NULL ) CopyMemory( buffer2, buffer+bufferSize1, bufferSize2 );
6415 // Update our buffer offset and unlock sound buffer
6416 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6417 if ( FAILED( result ) ) {
6418 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking buffer during playback!";
6419 errorText_ = errorStream_.str();
6420 MUTEX_UNLOCK( &stream_.mutex );
6421 error( RtAudioError::SYSTEM_ERROR );
6424 nextWritePointer = ( nextWritePointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6425 handle->bufferPointer[0] = nextWritePointer;
6428 // Don't bother draining input
6429 if ( handle->drainCounter ) {
6430 handle->drainCounter++;
6434 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
6436 // Setup parameters.
6437 if ( stream_.doConvertBuffer[1] ) {
6438 buffer = stream_.deviceBuffer;
6439 bufferBytes = stream_.bufferSize * stream_.nDeviceChannels[1];
6440 bufferBytes *= formatBytes( stream_.deviceFormat[1] );
6443 buffer = stream_.userBuffer[1];
6444 bufferBytes = stream_.bufferSize * stream_.nUserChannels[1];
6445 bufferBytes *= formatBytes( stream_.userFormat );
6448 LPDIRECTSOUNDCAPTUREBUFFER dsBuffer = (LPDIRECTSOUNDCAPTUREBUFFER) handle->buffer[1];
6449 long nextReadPointer = handle->bufferPointer[1];
6450 DWORD dsBufferSize = handle->dsBufferSize[1];
6452 // Find out where the write and "safe read" pointers are.
6453 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6454 if ( FAILED( result ) ) {
6455 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6456 errorText_ = errorStream_.str();
6457 MUTEX_UNLOCK( &stream_.mutex );
6458 error( RtAudioError::SYSTEM_ERROR );
6462 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6463 DWORD endRead = nextReadPointer + bufferBytes;
6465 // Handling depends on whether we are INPUT or DUPLEX.
6466 // If we're in INPUT mode then waiting is a good thing. If we're in DUPLEX mode,
6467 // then a wait here will drag the write pointers into the forbidden zone.
6469 // In DUPLEX mode, rather than wait, we will back off the read pointer until
6470 // it's in a safe position. This causes dropouts, but it seems to be the only
6471 // practical way to sync up the read and write pointers reliably, given the
6472 // the very complex relationship between phase and increment of the read and write
6475 // In order to minimize audible dropouts in DUPLEX mode, we will
6476 // provide a pre-roll period of 0.5 seconds in which we return
6477 // zeros from the read buffer while the pointers sync up.
6479 if ( stream_.mode == DUPLEX ) {
6480 if ( safeReadPointer < endRead ) {
6481 if ( duplexPrerollBytes <= 0 ) {
6482 // Pre-roll time over. Be more agressive.
6483 int adjustment = endRead-safeReadPointer;
6485 handle->xrun[1] = true;
6487 // - large adjustments: we've probably run out of CPU cycles, so just resync exactly,
6488 // and perform fine adjustments later.
6489 // - small adjustments: back off by twice as much.
6490 if ( adjustment >= 2*bufferBytes )
6491 nextReadPointer = safeReadPointer-2*bufferBytes;
6493 nextReadPointer = safeReadPointer-bufferBytes-adjustment;
6495 if ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6499 // In pre=roll time. Just do it.
6500 nextReadPointer = safeReadPointer - bufferBytes;
6501 while ( nextReadPointer < 0 ) nextReadPointer += dsBufferSize;
6503 endRead = nextReadPointer + bufferBytes;
6506 else { // mode == INPUT
6507 while ( safeReadPointer < endRead && stream_.callbackInfo.isRunning ) {
6508 // See comments for playback.
6509 double millis = (endRead - safeReadPointer) * 1000.0;
6510 millis /= ( formatBytes(stream_.deviceFormat[1]) * stream_.nDeviceChannels[1] * stream_.sampleRate);
6511 if ( millis < 1.0 ) millis = 1.0;
6512 Sleep( (DWORD) millis );
6514 // Wake up and find out where we are now.
6515 result = dsBuffer->GetCurrentPosition( ¤tReadPointer, &safeReadPointer );
6516 if ( FAILED( result ) ) {
6517 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") getting current read position!";
6518 errorText_ = errorStream_.str();
6519 MUTEX_UNLOCK( &stream_.mutex );
6520 error( RtAudioError::SYSTEM_ERROR );
6524 if ( safeReadPointer < (DWORD)nextReadPointer ) safeReadPointer += dsBufferSize; // unwrap offset
6528 // Lock free space in the buffer
6529 result = dsBuffer->Lock( nextReadPointer, bufferBytes, &buffer1,
6530 &bufferSize1, &buffer2, &bufferSize2, 0 );
6531 if ( FAILED( result ) ) {
6532 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") locking capture buffer!";
6533 errorText_ = errorStream_.str();
6534 MUTEX_UNLOCK( &stream_.mutex );
6535 error( RtAudioError::SYSTEM_ERROR );
6539 if ( duplexPrerollBytes <= 0 ) {
6540 // Copy our buffer into the DS buffer
6541 CopyMemory( buffer, buffer1, bufferSize1 );
6542 if ( buffer2 != NULL ) CopyMemory( buffer+bufferSize1, buffer2, bufferSize2 );
6545 memset( buffer, 0, bufferSize1 );
6546 if ( buffer2 != NULL ) memset( buffer + bufferSize1, 0, bufferSize2 );
6547 duplexPrerollBytes -= bufferSize1 + bufferSize2;
6550 // Update our buffer offset and unlock sound buffer
6551 nextReadPointer = ( nextReadPointer + bufferSize1 + bufferSize2 ) % dsBufferSize;
6552 dsBuffer->Unlock( buffer1, bufferSize1, buffer2, bufferSize2 );
6553 if ( FAILED( result ) ) {
6554 errorStream_ << "RtApiDs::callbackEvent: error (" << getErrorString( result ) << ") unlocking capture buffer!";
6555 errorText_ = errorStream_.str();
6556 MUTEX_UNLOCK( &stream_.mutex );
6557 error( RtAudioError::SYSTEM_ERROR );
6560 handle->bufferPointer[1] = nextReadPointer;
6562 // No byte swapping necessary in DirectSound implementation.
6564 // If necessary, convert 8-bit data from unsigned to signed.
6565 if ( stream_.deviceFormat[1] == RTAUDIO_SINT8 )
6566 for ( int j=0; j<bufferBytes; j++ ) buffer[j] = (signed char) ( buffer[j] - 128 );
6568 // Do buffer conversion if necessary.
6569 if ( stream_.doConvertBuffer[1] )
6570 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
6574 MUTEX_UNLOCK( &stream_.mutex );
6575 RtApi::tickStreamTime();
6578 // Definitions for utility functions and callbacks
6579 // specific to the DirectSound implementation.
6581 static unsigned __stdcall callbackHandler( void *ptr )
6583 CallbackInfo *info = (CallbackInfo *) ptr;
6584 RtApiDs *object = (RtApiDs *) info->object;
6585 bool* isRunning = &info->isRunning;
6587 while ( *isRunning == true ) {
6588 object->callbackEvent();
6595 static BOOL CALLBACK deviceQueryCallback( LPGUID lpguid,
6596 LPCTSTR description,
6600 struct DsProbeData& probeInfo = *(struct DsProbeData*) lpContext;
6601 std::vector<struct DsDevice>& dsDevices = *probeInfo.dsDevices;
6604 bool validDevice = false;
6605 if ( probeInfo.isInput == true ) {
6607 LPDIRECTSOUNDCAPTURE object;
6609 hr = DirectSoundCaptureCreate( lpguid, &object, NULL );
6610 if ( hr != DS_OK ) return TRUE;
6612 caps.dwSize = sizeof(caps);
6613 hr = object->GetCaps( &caps );
6614 if ( hr == DS_OK ) {
6615 if ( caps.dwChannels > 0 && caps.dwFormats > 0 )
6622 LPDIRECTSOUND object;
6623 hr = DirectSoundCreate( lpguid, &object, NULL );
6624 if ( hr != DS_OK ) return TRUE;
6626 caps.dwSize = sizeof(caps);
6627 hr = object->GetCaps( &caps );
6628 if ( hr == DS_OK ) {
6629 if ( caps.dwFlags & DSCAPS_PRIMARYMONO || caps.dwFlags & DSCAPS_PRIMARYSTEREO )
6635 // If good device, then save its name and guid.
6636 std::string name = convertCharPointerToStdString( description );
6637 //if ( name == "Primary Sound Driver" || name == "Primary Sound Capture Driver" )
6638 if ( lpguid == NULL )
6639 name = "Default Device";
6640 if ( validDevice ) {
6641 for ( unsigned int i=0; i<dsDevices.size(); i++ ) {
6642 if ( dsDevices[i].name == name ) {
6643 dsDevices[i].found = true;
6644 if ( probeInfo.isInput ) {
6645 dsDevices[i].id[1] = lpguid;
6646 dsDevices[i].validId[1] = true;
6649 dsDevices[i].id[0] = lpguid;
6650 dsDevices[i].validId[0] = true;
6658 device.found = true;
6659 if ( probeInfo.isInput ) {
6660 device.id[1] = lpguid;
6661 device.validId[1] = true;
6664 device.id[0] = lpguid;
6665 device.validId[0] = true;
6667 dsDevices.push_back( device );
6673 static const char* getErrorString( int code )
6677 case DSERR_ALLOCATED:
6678 return "Already allocated";
6680 case DSERR_CONTROLUNAVAIL:
6681 return "Control unavailable";
6683 case DSERR_INVALIDPARAM:
6684 return "Invalid parameter";
6686 case DSERR_INVALIDCALL:
6687 return "Invalid call";
6690 return "Generic error";
6692 case DSERR_PRIOLEVELNEEDED:
6693 return "Priority level needed";
6695 case DSERR_OUTOFMEMORY:
6696 return "Out of memory";
6698 case DSERR_BADFORMAT:
6699 return "The sample rate or the channel format is not supported";
6701 case DSERR_UNSUPPORTED:
6702 return "Not supported";
6704 case DSERR_NODRIVER:
6707 case DSERR_ALREADYINITIALIZED:
6708 return "Already initialized";
6710 case DSERR_NOAGGREGATION:
6711 return "No aggregation";
6713 case DSERR_BUFFERLOST:
6714 return "Buffer lost";
6716 case DSERR_OTHERAPPHASPRIO:
6717 return "Another application already has priority";
6719 case DSERR_UNINITIALIZED:
6720 return "Uninitialized";
6723 return "DirectSound unknown error";
6726 //******************** End of __WINDOWS_DS__ *********************//
6730 #if defined(__LINUX_ALSA__)
6732 #include <alsa/asoundlib.h>
6735 // A structure to hold various information related to the ALSA API
6738 snd_pcm_t *handles[2];
6741 pthread_cond_t runnable_cv;
6745 :synchronized(false), runnable(false) { xrun[0] = false; xrun[1] = false; }
6748 static void *alsaCallbackHandler( void * ptr );
6750 RtApiAlsa :: RtApiAlsa()
6752 // Nothing to do here.
6755 RtApiAlsa :: ~RtApiAlsa()
6757 if ( stream_.state != STREAM_CLOSED ) closeStream();
6760 unsigned int RtApiAlsa :: getDeviceCount( void )
6762 unsigned nDevices = 0;
6763 int result, subdevice, card;
6767 // Count cards and devices
6769 snd_card_next( &card );
6770 while ( card >= 0 ) {
6771 sprintf( name, "hw:%d", card );
6772 result = snd_ctl_open( &handle, name, 0 );
6774 errorStream_ << "RtApiAlsa::getDeviceCount: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6775 errorText_ = errorStream_.str();
6776 error( RtAudioError::WARNING );
6781 result = snd_ctl_pcm_next_device( handle, &subdevice );
6783 errorStream_ << "RtApiAlsa::getDeviceCount: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6784 errorText_ = errorStream_.str();
6785 error( RtAudioError::WARNING );
6788 if ( subdevice < 0 )
6793 snd_ctl_close( handle );
6794 snd_card_next( &card );
6797 result = snd_ctl_open( &handle, "default", 0 );
6800 snd_ctl_close( handle );
6806 RtAudio::DeviceInfo RtApiAlsa :: getDeviceInfo( unsigned int device )
6808 RtAudio::DeviceInfo info;
6809 info.probed = false;
6811 unsigned nDevices = 0;
6812 int result, subdevice, card;
6816 // Count cards and devices
6819 snd_card_next( &card );
6820 while ( card >= 0 ) {
6821 sprintf( name, "hw:%d", card );
6822 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
6824 errorStream_ << "RtApiAlsa::getDeviceInfo: control open, card = " << card << ", " << snd_strerror( result ) << ".";
6825 errorText_ = errorStream_.str();
6826 error( RtAudioError::WARNING );
6831 result = snd_ctl_pcm_next_device( chandle, &subdevice );
6833 errorStream_ << "RtApiAlsa::getDeviceInfo: control next device, card = " << card << ", " << snd_strerror( result ) << ".";
6834 errorText_ = errorStream_.str();
6835 error( RtAudioError::WARNING );
6838 if ( subdevice < 0 ) break;
6839 if ( nDevices == device ) {
6840 sprintf( name, "hw:%d,%d", card, subdevice );
6846 snd_ctl_close( chandle );
6847 snd_card_next( &card );
6850 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
6851 if ( result == 0 ) {
6852 if ( nDevices == device ) {
6853 strcpy( name, "default" );
6859 if ( nDevices == 0 ) {
6860 errorText_ = "RtApiAlsa::getDeviceInfo: no devices found!";
6861 error( RtAudioError::INVALID_USE );
6865 if ( device >= nDevices ) {
6866 errorText_ = "RtApiAlsa::getDeviceInfo: device ID is invalid!";
6867 error( RtAudioError::INVALID_USE );
6873 // If a stream is already open, we cannot probe the stream devices.
6874 // Thus, use the saved results.
6875 if ( stream_.state != STREAM_CLOSED &&
6876 ( stream_.device[0] == device || stream_.device[1] == device ) ) {
6877 snd_ctl_close( chandle );
6878 if ( device >= devices_.size() ) {
6879 errorText_ = "RtApiAlsa::getDeviceInfo: device ID was not present before stream was opened.";
6880 error( RtAudioError::WARNING );
6883 return devices_[ device ];
6886 int openMode = SND_PCM_ASYNC;
6887 snd_pcm_stream_t stream;
6888 snd_pcm_info_t *pcminfo;
6889 snd_pcm_info_alloca( &pcminfo );
6891 snd_pcm_hw_params_t *params;
6892 snd_pcm_hw_params_alloca( ¶ms );
6894 // First try for playback unless default device (which has subdev -1)
6895 stream = SND_PCM_STREAM_PLAYBACK;
6896 snd_pcm_info_set_stream( pcminfo, stream );
6897 if ( subdevice != -1 ) {
6898 snd_pcm_info_set_device( pcminfo, subdevice );
6899 snd_pcm_info_set_subdevice( pcminfo, 0 );
6901 result = snd_ctl_pcm_info( chandle, pcminfo );
6903 // Device probably doesn't support playback.
6908 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK );
6910 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
6911 errorText_ = errorStream_.str();
6912 error( RtAudioError::WARNING );
6916 // The device is open ... fill the parameter structure.
6917 result = snd_pcm_hw_params_any( phandle, params );
6919 snd_pcm_close( phandle );
6920 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
6921 errorText_ = errorStream_.str();
6922 error( RtAudioError::WARNING );
6926 // Get output channel information.
6928 result = snd_pcm_hw_params_get_channels_max( params, &value );
6930 snd_pcm_close( phandle );
6931 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") output channels, " << snd_strerror( result ) << ".";
6932 errorText_ = errorStream_.str();
6933 error( RtAudioError::WARNING );
6936 info.outputChannels = value;
6937 snd_pcm_close( phandle );
6940 stream = SND_PCM_STREAM_CAPTURE;
6941 snd_pcm_info_set_stream( pcminfo, stream );
6943 // Now try for capture unless default device (with subdev = -1)
6944 if ( subdevice != -1 ) {
6945 result = snd_ctl_pcm_info( chandle, pcminfo );
6946 snd_ctl_close( chandle );
6948 // Device probably doesn't support capture.
6949 if ( info.outputChannels == 0 ) return info;
6950 goto probeParameters;
6954 snd_ctl_close( chandle );
6956 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
6958 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
6959 errorText_ = errorStream_.str();
6960 error( RtAudioError::WARNING );
6961 if ( info.outputChannels == 0 ) return info;
6962 goto probeParameters;
6965 // The device is open ... fill the parameter structure.
6966 result = snd_pcm_hw_params_any( phandle, params );
6968 snd_pcm_close( phandle );
6969 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
6970 errorText_ = errorStream_.str();
6971 error( RtAudioError::WARNING );
6972 if ( info.outputChannels == 0 ) return info;
6973 goto probeParameters;
6976 result = snd_pcm_hw_params_get_channels_max( params, &value );
6978 snd_pcm_close( phandle );
6979 errorStream_ << "RtApiAlsa::getDeviceInfo: error getting device (" << name << ") input channels, " << snd_strerror( result ) << ".";
6980 errorText_ = errorStream_.str();
6981 error( RtAudioError::WARNING );
6982 if ( info.outputChannels == 0 ) return info;
6983 goto probeParameters;
6985 info.inputChannels = value;
6986 snd_pcm_close( phandle );
6988 // If device opens for both playback and capture, we determine the channels.
6989 if ( info.outputChannels > 0 && info.inputChannels > 0 )
6990 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
6992 // ALSA doesn't provide default devices so we'll use the first available one.
6993 if ( device == 0 && info.outputChannels > 0 )
6994 info.isDefaultOutput = true;
6995 if ( device == 0 && info.inputChannels > 0 )
6996 info.isDefaultInput = true;
6999 // At this point, we just need to figure out the supported data
7000 // formats and sample rates. We'll proceed by opening the device in
7001 // the direction with the maximum number of channels, or playback if
7002 // they are equal. This might limit our sample rate options, but so
7005 if ( info.outputChannels >= info.inputChannels )
7006 stream = SND_PCM_STREAM_PLAYBACK;
7008 stream = SND_PCM_STREAM_CAPTURE;
7009 snd_pcm_info_set_stream( pcminfo, stream );
7011 result = snd_pcm_open( &phandle, name, stream, openMode | SND_PCM_NONBLOCK);
7013 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_open error for device (" << name << "), " << snd_strerror( result ) << ".";
7014 errorText_ = errorStream_.str();
7015 error( RtAudioError::WARNING );
7019 // The device is open ... fill the parameter structure.
7020 result = snd_pcm_hw_params_any( phandle, params );
7022 snd_pcm_close( phandle );
7023 errorStream_ << "RtApiAlsa::getDeviceInfo: snd_pcm_hw_params error for device (" << name << "), " << snd_strerror( result ) << ".";
7024 errorText_ = errorStream_.str();
7025 error( RtAudioError::WARNING );
7029 // Test our discrete set of sample rate values.
7030 info.sampleRates.clear();
7031 for ( unsigned int i=0; i<MAX_SAMPLE_RATES; i++ ) {
7032 if ( snd_pcm_hw_params_test_rate( phandle, params, SAMPLE_RATES[i], 0 ) == 0 ) {
7033 info.sampleRates.push_back( SAMPLE_RATES[i] );
7035 if ( !info.preferredSampleRate || ( SAMPLE_RATES[i] <= 48000 && SAMPLE_RATES[i] > info.preferredSampleRate ) )
7036 info.preferredSampleRate = SAMPLE_RATES[i];
7039 if ( info.sampleRates.size() == 0 ) {
7040 snd_pcm_close( phandle );
7041 errorStream_ << "RtApiAlsa::getDeviceInfo: no supported sample rates found for device (" << name << ").";
7042 errorText_ = errorStream_.str();
7043 error( RtAudioError::WARNING );
7047 // Probe the supported data formats ... we don't care about endian-ness just yet
7048 snd_pcm_format_t format;
7049 info.nativeFormats = 0;
7050 format = SND_PCM_FORMAT_S8;
7051 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7052 info.nativeFormats |= RTAUDIO_SINT8;
7053 format = SND_PCM_FORMAT_S16;
7054 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7055 info.nativeFormats |= RTAUDIO_SINT16;
7056 format = SND_PCM_FORMAT_S24;
7057 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7058 info.nativeFormats |= RTAUDIO_SINT24;
7059 format = SND_PCM_FORMAT_S32;
7060 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7061 info.nativeFormats |= RTAUDIO_SINT32;
7062 format = SND_PCM_FORMAT_FLOAT;
7063 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7064 info.nativeFormats |= RTAUDIO_FLOAT32;
7065 format = SND_PCM_FORMAT_FLOAT64;
7066 if ( snd_pcm_hw_params_test_format( phandle, params, format ) == 0 )
7067 info.nativeFormats |= RTAUDIO_FLOAT64;
7069 // Check that we have at least one supported format
7070 if ( info.nativeFormats == 0 ) {
7071 snd_pcm_close( phandle );
7072 errorStream_ << "RtApiAlsa::getDeviceInfo: pcm device (" << name << ") data format not supported by RtAudio.";
7073 errorText_ = errorStream_.str();
7074 error( RtAudioError::WARNING );
7078 // Get the device name
7080 result = snd_card_get_name( card, &cardname );
7081 if ( result >= 0 ) {
7082 sprintf( name, "hw:%s,%d", cardname, subdevice );
7087 // That's all ... close the device and return
7088 snd_pcm_close( phandle );
7093 void RtApiAlsa :: saveDeviceInfo( void )
7097 unsigned int nDevices = getDeviceCount();
7098 devices_.resize( nDevices );
7099 for ( unsigned int i=0; i<nDevices; i++ )
7100 devices_[i] = getDeviceInfo( i );
7103 bool RtApiAlsa :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
7104 unsigned int firstChannel, unsigned int sampleRate,
7105 RtAudioFormat format, unsigned int *bufferSize,
7106 RtAudio::StreamOptions *options )
7109 #if defined(__RTAUDIO_DEBUG__)
7111 snd_output_stdio_attach(&out, stderr, 0);
7114 // I'm not using the "plug" interface ... too much inconsistent behavior.
7116 unsigned nDevices = 0;
7117 int result, subdevice, card;
7121 if ( options && options->flags & RTAUDIO_ALSA_USE_DEFAULT )
7122 snprintf(name, sizeof(name), "%s", "default");
7124 // Count cards and devices
7126 snd_card_next( &card );
7127 while ( card >= 0 ) {
7128 sprintf( name, "hw:%d", card );
7129 result = snd_ctl_open( &chandle, name, SND_CTL_NONBLOCK );
7131 errorStream_ << "RtApiAlsa::probeDeviceOpen: control open, card = " << card << ", " << snd_strerror( result ) << ".";
7132 errorText_ = errorStream_.str();
7137 result = snd_ctl_pcm_next_device( chandle, &subdevice );
7138 if ( result < 0 ) break;
7139 if ( subdevice < 0 ) break;
7140 if ( nDevices == device ) {
7141 sprintf( name, "hw:%d,%d", card, subdevice );
7142 snd_ctl_close( chandle );
7147 snd_ctl_close( chandle );
7148 snd_card_next( &card );
7151 result = snd_ctl_open( &chandle, "default", SND_CTL_NONBLOCK );
7152 if ( result == 0 ) {
7153 if ( nDevices == device ) {
7154 strcpy( name, "default" );
7160 if ( nDevices == 0 ) {
7161 // This should not happen because a check is made before this function is called.
7162 errorText_ = "RtApiAlsa::probeDeviceOpen: no devices found!";
7166 if ( device >= nDevices ) {
7167 // This should not happen because a check is made before this function is called.
7168 errorText_ = "RtApiAlsa::probeDeviceOpen: device ID is invalid!";
7175 // The getDeviceInfo() function will not work for a device that is
7176 // already open. Thus, we'll probe the system before opening a
7177 // stream and save the results for use by getDeviceInfo().
7178 if ( mode == OUTPUT || ( mode == INPUT && stream_.mode != OUTPUT ) ) // only do once
7179 this->saveDeviceInfo();
7181 snd_pcm_stream_t stream;
7182 if ( mode == OUTPUT )
7183 stream = SND_PCM_STREAM_PLAYBACK;
7185 stream = SND_PCM_STREAM_CAPTURE;
7188 int openMode = SND_PCM_ASYNC;
7189 result = snd_pcm_open( &phandle, name, stream, openMode );
7191 if ( mode == OUTPUT )
7192 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for output.";
7194 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device (" << name << ") won't open for input.";
7195 errorText_ = errorStream_.str();
7199 // Fill the parameter structure.
7200 snd_pcm_hw_params_t *hw_params;
7201 snd_pcm_hw_params_alloca( &hw_params );
7202 result = snd_pcm_hw_params_any( phandle, hw_params );
7204 snd_pcm_close( phandle );
7205 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") parameters, " << snd_strerror( result ) << ".";
7206 errorText_ = errorStream_.str();
7210 #if defined(__RTAUDIO_DEBUG__)
7211 fprintf( stderr, "\nRtApiAlsa: dump hardware params just after device open:\n\n" );
7212 snd_pcm_hw_params_dump( hw_params, out );
7215 // Set access ... check user preference.
7216 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) {
7217 stream_.userInterleaved = false;
7218 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7220 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7221 stream_.deviceInterleaved[mode] = true;
7224 stream_.deviceInterleaved[mode] = false;
7227 stream_.userInterleaved = true;
7228 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED );
7230 result = snd_pcm_hw_params_set_access( phandle, hw_params, SND_PCM_ACCESS_RW_NONINTERLEAVED );
7231 stream_.deviceInterleaved[mode] = false;
7234 stream_.deviceInterleaved[mode] = true;
7238 snd_pcm_close( phandle );
7239 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") access, " << snd_strerror( result ) << ".";
7240 errorText_ = errorStream_.str();
7244 // Determine how to set the device format.
7245 stream_.userFormat = format;
7246 snd_pcm_format_t deviceFormat = SND_PCM_FORMAT_UNKNOWN;
7248 if ( format == RTAUDIO_SINT8 )
7249 deviceFormat = SND_PCM_FORMAT_S8;
7250 else if ( format == RTAUDIO_SINT16 )
7251 deviceFormat = SND_PCM_FORMAT_S16;
7252 else if ( format == RTAUDIO_SINT24 )
7253 deviceFormat = SND_PCM_FORMAT_S24;
7254 else if ( format == RTAUDIO_SINT32 )
7255 deviceFormat = SND_PCM_FORMAT_S32;
7256 else if ( format == RTAUDIO_FLOAT32 )
7257 deviceFormat = SND_PCM_FORMAT_FLOAT;
7258 else if ( format == RTAUDIO_FLOAT64 )
7259 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7261 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat) == 0) {
7262 stream_.deviceFormat[mode] = format;
7266 // The user requested format is not natively supported by the device.
7267 deviceFormat = SND_PCM_FORMAT_FLOAT64;
7268 if ( snd_pcm_hw_params_test_format( phandle, hw_params, deviceFormat ) == 0 ) {
7269 stream_.deviceFormat[mode] = RTAUDIO_FLOAT64;
7273 deviceFormat = SND_PCM_FORMAT_FLOAT;
7274 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7275 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
7279 deviceFormat = SND_PCM_FORMAT_S32;
7280 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7281 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
7285 deviceFormat = SND_PCM_FORMAT_S24;
7286 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7287 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
7291 deviceFormat = SND_PCM_FORMAT_S16;
7292 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7293 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
7297 deviceFormat = SND_PCM_FORMAT_S8;
7298 if ( snd_pcm_hw_params_test_format(phandle, hw_params, deviceFormat ) == 0 ) {
7299 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
7303 // If we get here, no supported format was found.
7304 snd_pcm_close( phandle );
7305 errorStream_ << "RtApiAlsa::probeDeviceOpen: pcm device " << device << " data format not supported by RtAudio.";
7306 errorText_ = errorStream_.str();
7310 result = snd_pcm_hw_params_set_format( phandle, hw_params, deviceFormat );
7312 snd_pcm_close( phandle );
7313 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting pcm device (" << name << ") data format, " << snd_strerror( result ) << ".";
7314 errorText_ = errorStream_.str();
7318 // Determine whether byte-swaping is necessary.
7319 stream_.doByteSwap[mode] = false;
7320 if ( deviceFormat != SND_PCM_FORMAT_S8 ) {
7321 result = snd_pcm_format_cpu_endian( deviceFormat );
7323 stream_.doByteSwap[mode] = true;
7324 else if (result < 0) {
7325 snd_pcm_close( phandle );
7326 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting pcm device (" << name << ") endian-ness, " << snd_strerror( result ) << ".";
7327 errorText_ = errorStream_.str();
7332 // Set the sample rate.
7333 result = snd_pcm_hw_params_set_rate_near( phandle, hw_params, (unsigned int*) &sampleRate, 0 );
7335 snd_pcm_close( phandle );
7336 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting sample rate on device (" << name << "), " << snd_strerror( result ) << ".";
7337 errorText_ = errorStream_.str();
7341 // Determine the number of channels for this device. We support a possible
7342 // minimum device channel number > than the value requested by the user.
7343 stream_.nUserChannels[mode] = channels;
7345 result = snd_pcm_hw_params_get_channels_max( hw_params, &value );
7346 unsigned int deviceChannels = value;
7347 if ( result < 0 || deviceChannels < channels + firstChannel ) {
7348 snd_pcm_close( phandle );
7349 errorStream_ << "RtApiAlsa::probeDeviceOpen: requested channel parameters not supported by device (" << name << "), " << snd_strerror( result ) << ".";
7350 errorText_ = errorStream_.str();
7354 result = snd_pcm_hw_params_get_channels_min( hw_params, &value );
7356 snd_pcm_close( phandle );
7357 errorStream_ << "RtApiAlsa::probeDeviceOpen: error getting minimum channels for device (" << name << "), " << snd_strerror( result ) << ".";
7358 errorText_ = errorStream_.str();
7361 deviceChannels = value;
7362 if ( deviceChannels < channels + firstChannel ) deviceChannels = channels + firstChannel;
7363 stream_.nDeviceChannels[mode] = deviceChannels;
7365 // Set the device channels.
7366 result = snd_pcm_hw_params_set_channels( phandle, hw_params, deviceChannels );
7368 snd_pcm_close( phandle );
7369 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting channels for device (" << name << "), " << snd_strerror( result ) << ".";
7370 errorText_ = errorStream_.str();
7374 // Set the buffer (or period) size.
7376 snd_pcm_uframes_t periodSize = *bufferSize;
7377 result = snd_pcm_hw_params_set_period_size_near( phandle, hw_params, &periodSize, &dir );
7379 snd_pcm_close( phandle );
7380 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting period size for device (" << name << "), " << snd_strerror( result ) << ".";
7381 errorText_ = errorStream_.str();
7384 *bufferSize = periodSize;
7386 // Set the buffer number, which in ALSA is referred to as the "period".
7387 unsigned int periods = 0;
7388 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) periods = 2;
7389 if ( options && options->numberOfBuffers > 0 ) periods = options->numberOfBuffers;
7390 if ( periods < 2 ) periods = 4; // a fairly safe default value
7391 result = snd_pcm_hw_params_set_periods_near( phandle, hw_params, &periods, &dir );
7393 snd_pcm_close( phandle );
7394 errorStream_ << "RtApiAlsa::probeDeviceOpen: error setting periods for device (" << name << "), " << snd_strerror( result ) << ".";
7395 errorText_ = errorStream_.str();
7399 // If attempting to setup a duplex stream, the bufferSize parameter
7400 // MUST be the same in both directions!
7401 if ( stream_.mode == OUTPUT && mode == INPUT && *bufferSize != stream_.bufferSize ) {
7402 snd_pcm_close( phandle );
7403 errorStream_ << "RtApiAlsa::probeDeviceOpen: system error setting buffer size for duplex stream on device (" << name << ").";
7404 errorText_ = errorStream_.str();
7408 stream_.bufferSize = *bufferSize;
7410 // Install the hardware configuration
7411 result = snd_pcm_hw_params( phandle, hw_params );
7413 snd_pcm_close( phandle );
7414 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing hardware configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7415 errorText_ = errorStream_.str();
7419 #if defined(__RTAUDIO_DEBUG__)
7420 fprintf(stderr, "\nRtApiAlsa: dump hardware params after installation:\n\n");
7421 snd_pcm_hw_params_dump( hw_params, out );
7424 // Set the software configuration to fill buffers with zeros and prevent device stopping on xruns.
7425 snd_pcm_sw_params_t *sw_params = NULL;
7426 snd_pcm_sw_params_alloca( &sw_params );
7427 snd_pcm_sw_params_current( phandle, sw_params );
7428 snd_pcm_sw_params_set_start_threshold( phandle, sw_params, *bufferSize );
7429 snd_pcm_sw_params_set_stop_threshold( phandle, sw_params, ULONG_MAX );
7430 snd_pcm_sw_params_set_silence_threshold( phandle, sw_params, 0 );
7432 // The following two settings were suggested by Theo Veenker
7433 //snd_pcm_sw_params_set_avail_min( phandle, sw_params, *bufferSize );
7434 //snd_pcm_sw_params_set_xfer_align( phandle, sw_params, 1 );
7436 // here are two options for a fix
7437 //snd_pcm_sw_params_set_silence_size( phandle, sw_params, ULONG_MAX );
7438 snd_pcm_uframes_t val;
7439 snd_pcm_sw_params_get_boundary( sw_params, &val );
7440 snd_pcm_sw_params_set_silence_size( phandle, sw_params, val );
7442 result = snd_pcm_sw_params( phandle, sw_params );
7444 snd_pcm_close( phandle );
7445 errorStream_ << "RtApiAlsa::probeDeviceOpen: error installing software configuration on device (" << name << "), " << snd_strerror( result ) << ".";
7446 errorText_ = errorStream_.str();
7450 #if defined(__RTAUDIO_DEBUG__)
7451 fprintf(stderr, "\nRtApiAlsa: dump software params after installation:\n\n");
7452 snd_pcm_sw_params_dump( sw_params, out );
7455 // Set flags for buffer conversion
7456 stream_.doConvertBuffer[mode] = false;
7457 if ( stream_.userFormat != stream_.deviceFormat[mode] )
7458 stream_.doConvertBuffer[mode] = true;
7459 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
7460 stream_.doConvertBuffer[mode] = true;
7461 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
7462 stream_.nUserChannels[mode] > 1 )
7463 stream_.doConvertBuffer[mode] = true;
7465 // Allocate the ApiHandle if necessary and then save.
7466 AlsaHandle *apiInfo = 0;
7467 if ( stream_.apiHandle == 0 ) {
7469 apiInfo = (AlsaHandle *) new AlsaHandle;
7471 catch ( std::bad_alloc& ) {
7472 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating AlsaHandle memory.";
7476 if ( pthread_cond_init( &apiInfo->runnable_cv, NULL ) ) {
7477 errorText_ = "RtApiAlsa::probeDeviceOpen: error initializing pthread condition variable.";
7481 stream_.apiHandle = (void *) apiInfo;
7482 apiInfo->handles[0] = 0;
7483 apiInfo->handles[1] = 0;
7486 apiInfo = (AlsaHandle *) stream_.apiHandle;
7488 apiInfo->handles[mode] = phandle;
7491 // Allocate necessary internal buffers.
7492 unsigned long bufferBytes;
7493 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
7494 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
7495 if ( stream_.userBuffer[mode] == NULL ) {
7496 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating user buffer memory.";
7500 if ( stream_.doConvertBuffer[mode] ) {
7502 bool makeBuffer = true;
7503 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
7504 if ( mode == INPUT ) {
7505 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
7506 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
7507 if ( bufferBytes <= bytesOut ) makeBuffer = false;
7512 bufferBytes *= *bufferSize;
7513 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
7514 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
7515 if ( stream_.deviceBuffer == NULL ) {
7516 errorText_ = "RtApiAlsa::probeDeviceOpen: error allocating device buffer memory.";
7522 stream_.sampleRate = sampleRate;
7523 stream_.nBuffers = periods;
7524 stream_.device[mode] = device;
7525 stream_.state = STREAM_STOPPED;
7527 // Setup the buffer conversion information structure.
7528 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
7530 // Setup thread if necessary.
7531 if ( stream_.mode == OUTPUT && mode == INPUT ) {
7532 // We had already set up an output stream.
7533 stream_.mode = DUPLEX;
7534 // Link the streams if possible.
7535 apiInfo->synchronized = false;
7536 if ( snd_pcm_link( apiInfo->handles[0], apiInfo->handles[1] ) == 0 )
7537 apiInfo->synchronized = true;
7539 errorText_ = "RtApiAlsa::probeDeviceOpen: unable to synchronize input and output devices.";
7540 error( RtAudioError::WARNING );
7544 stream_.mode = mode;
7546 // Setup callback thread.
7547 stream_.callbackInfo.object = (void *) this;
7549 // Set the thread attributes for joinable and realtime scheduling
7550 // priority (optional). The higher priority will only take affect
7551 // if the program is run as root or suid. Note, under Linux
7552 // processes with CAP_SYS_NICE privilege, a user can change
7553 // scheduling policy and priority (thus need not be root). See
7554 // POSIX "capabilities".
7555 pthread_attr_t attr;
7556 pthread_attr_init( &attr );
7557 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
7559 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7560 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
7561 // We previously attempted to increase the audio callback priority
7562 // to SCHED_RR here via the attributes. However, while no errors
7563 // were reported in doing so, it did not work. So, now this is
7564 // done in the alsaCallbackHandler function.
7565 stream_.callbackInfo.doRealtime = true;
7566 int priority = options->priority;
7567 int min = sched_get_priority_min( SCHED_RR );
7568 int max = sched_get_priority_max( SCHED_RR );
7569 if ( priority < min ) priority = min;
7570 else if ( priority > max ) priority = max;
7571 stream_.callbackInfo.priority = priority;
7575 stream_.callbackInfo.isRunning = true;
7576 result = pthread_create( &stream_.callbackInfo.thread, &attr, alsaCallbackHandler, &stream_.callbackInfo );
7577 pthread_attr_destroy( &attr );
7579 stream_.callbackInfo.isRunning = false;
7580 errorText_ = "RtApiAlsa::error creating callback thread!";
7589 pthread_cond_destroy( &apiInfo->runnable_cv );
7590 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7591 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7593 stream_.apiHandle = 0;
7596 if ( phandle) snd_pcm_close( phandle );
7598 for ( int i=0; i<2; i++ ) {
7599 if ( stream_.userBuffer[i] ) {
7600 free( stream_.userBuffer[i] );
7601 stream_.userBuffer[i] = 0;
7605 if ( stream_.deviceBuffer ) {
7606 free( stream_.deviceBuffer );
7607 stream_.deviceBuffer = 0;
7610 stream_.state = STREAM_CLOSED;
7614 void RtApiAlsa :: closeStream()
7616 if ( stream_.state == STREAM_CLOSED ) {
7617 errorText_ = "RtApiAlsa::closeStream(): no open stream to close!";
7618 error( RtAudioError::WARNING );
7622 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7623 stream_.callbackInfo.isRunning = false;
7624 MUTEX_LOCK( &stream_.mutex );
7625 if ( stream_.state == STREAM_STOPPED ) {
7626 apiInfo->runnable = true;
7627 pthread_cond_signal( &apiInfo->runnable_cv );
7629 MUTEX_UNLOCK( &stream_.mutex );
7630 pthread_join( stream_.callbackInfo.thread, NULL );
7632 if ( stream_.state == STREAM_RUNNING ) {
7633 stream_.state = STREAM_STOPPED;
7634 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
7635 snd_pcm_drop( apiInfo->handles[0] );
7636 if ( stream_.mode == INPUT || stream_.mode == DUPLEX )
7637 snd_pcm_drop( apiInfo->handles[1] );
7641 pthread_cond_destroy( &apiInfo->runnable_cv );
7642 if ( apiInfo->handles[0] ) snd_pcm_close( apiInfo->handles[0] );
7643 if ( apiInfo->handles[1] ) snd_pcm_close( apiInfo->handles[1] );
7645 stream_.apiHandle = 0;
7648 for ( int i=0; i<2; i++ ) {
7649 if ( stream_.userBuffer[i] ) {
7650 free( stream_.userBuffer[i] );
7651 stream_.userBuffer[i] = 0;
7655 if ( stream_.deviceBuffer ) {
7656 free( stream_.deviceBuffer );
7657 stream_.deviceBuffer = 0;
7660 stream_.mode = UNINITIALIZED;
7661 stream_.state = STREAM_CLOSED;
7664 void RtApiAlsa :: startStream()
7666 // This method calls snd_pcm_prepare if the device isn't already in that state.
7669 if ( stream_.state == STREAM_RUNNING ) {
7670 errorText_ = "RtApiAlsa::startStream(): the stream is already running!";
7671 error( RtAudioError::WARNING );
7675 MUTEX_LOCK( &stream_.mutex );
7678 snd_pcm_state_t state;
7679 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7680 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7681 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7682 state = snd_pcm_state( handle[0] );
7683 if ( state != SND_PCM_STATE_PREPARED ) {
7684 result = snd_pcm_prepare( handle[0] );
7686 errorStream_ << "RtApiAlsa::startStream: error preparing output pcm device, " << snd_strerror( result ) << ".";
7687 errorText_ = errorStream_.str();
7693 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7694 result = snd_pcm_drop(handle[1]); // fix to remove stale data received since device has been open
7695 state = snd_pcm_state( handle[1] );
7696 if ( state != SND_PCM_STATE_PREPARED ) {
7697 result = snd_pcm_prepare( handle[1] );
7699 errorStream_ << "RtApiAlsa::startStream: error preparing input pcm device, " << snd_strerror( result ) << ".";
7700 errorText_ = errorStream_.str();
7706 stream_.state = STREAM_RUNNING;
7709 apiInfo->runnable = true;
7710 pthread_cond_signal( &apiInfo->runnable_cv );
7711 MUTEX_UNLOCK( &stream_.mutex );
7713 if ( result >= 0 ) return;
7714 error( RtAudioError::SYSTEM_ERROR );
7717 void RtApiAlsa :: stopStream()
7720 if ( stream_.state == STREAM_STOPPED ) {
7721 errorText_ = "RtApiAlsa::stopStream(): the stream is already stopped!";
7722 error( RtAudioError::WARNING );
7726 stream_.state = STREAM_STOPPED;
7727 MUTEX_LOCK( &stream_.mutex );
7730 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7731 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7732 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7733 if ( apiInfo->synchronized )
7734 result = snd_pcm_drop( handle[0] );
7736 result = snd_pcm_drain( handle[0] );
7738 errorStream_ << "RtApiAlsa::stopStream: error draining output pcm device, " << snd_strerror( result ) << ".";
7739 errorText_ = errorStream_.str();
7744 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7745 result = snd_pcm_drop( handle[1] );
7747 errorStream_ << "RtApiAlsa::stopStream: error stopping input pcm device, " << snd_strerror( result ) << ".";
7748 errorText_ = errorStream_.str();
7754 apiInfo->runnable = false; // fixes high CPU usage when stopped
7755 MUTEX_UNLOCK( &stream_.mutex );
7757 if ( result >= 0 ) return;
7758 error( RtAudioError::SYSTEM_ERROR );
7761 void RtApiAlsa :: abortStream()
7764 if ( stream_.state == STREAM_STOPPED ) {
7765 errorText_ = "RtApiAlsa::abortStream(): the stream is already stopped!";
7766 error( RtAudioError::WARNING );
7770 stream_.state = STREAM_STOPPED;
7771 MUTEX_LOCK( &stream_.mutex );
7774 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7775 snd_pcm_t **handle = (snd_pcm_t **) apiInfo->handles;
7776 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7777 result = snd_pcm_drop( handle[0] );
7779 errorStream_ << "RtApiAlsa::abortStream: error aborting output pcm device, " << snd_strerror( result ) << ".";
7780 errorText_ = errorStream_.str();
7785 if ( ( stream_.mode == INPUT || stream_.mode == DUPLEX ) && !apiInfo->synchronized ) {
7786 result = snd_pcm_drop( handle[1] );
7788 errorStream_ << "RtApiAlsa::abortStream: error aborting input pcm device, " << snd_strerror( result ) << ".";
7789 errorText_ = errorStream_.str();
7795 apiInfo->runnable = false; // fixes high CPU usage when stopped
7796 MUTEX_UNLOCK( &stream_.mutex );
7798 if ( result >= 0 ) return;
7799 error( RtAudioError::SYSTEM_ERROR );
7802 void RtApiAlsa :: callbackEvent()
7804 AlsaHandle *apiInfo = (AlsaHandle *) stream_.apiHandle;
7805 if ( stream_.state == STREAM_STOPPED ) {
7806 MUTEX_LOCK( &stream_.mutex );
7807 while ( !apiInfo->runnable )
7808 pthread_cond_wait( &apiInfo->runnable_cv, &stream_.mutex );
7810 if ( stream_.state != STREAM_RUNNING ) {
7811 MUTEX_UNLOCK( &stream_.mutex );
7814 MUTEX_UNLOCK( &stream_.mutex );
7817 if ( stream_.state == STREAM_CLOSED ) {
7818 errorText_ = "RtApiAlsa::callbackEvent(): the stream is closed ... this shouldn't happen!";
7819 error( RtAudioError::WARNING );
7823 int doStopStream = 0;
7824 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
7825 double streamTime = getStreamTime();
7826 RtAudioStreamStatus status = 0;
7827 if ( stream_.mode != INPUT && apiInfo->xrun[0] == true ) {
7828 status |= RTAUDIO_OUTPUT_UNDERFLOW;
7829 apiInfo->xrun[0] = false;
7831 if ( stream_.mode != OUTPUT && apiInfo->xrun[1] == true ) {
7832 status |= RTAUDIO_INPUT_OVERFLOW;
7833 apiInfo->xrun[1] = false;
7835 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
7836 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
7838 if ( doStopStream == 2 ) {
7843 MUTEX_LOCK( &stream_.mutex );
7845 // The state might change while waiting on a mutex.
7846 if ( stream_.state == STREAM_STOPPED ) goto unlock;
7852 snd_pcm_sframes_t frames;
7853 RtAudioFormat format;
7854 handle = (snd_pcm_t **) apiInfo->handles;
7856 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
7858 // Setup parameters.
7859 if ( stream_.doConvertBuffer[1] ) {
7860 buffer = stream_.deviceBuffer;
7861 channels = stream_.nDeviceChannels[1];
7862 format = stream_.deviceFormat[1];
7865 buffer = stream_.userBuffer[1];
7866 channels = stream_.nUserChannels[1];
7867 format = stream_.userFormat;
7870 // Read samples from device in interleaved/non-interleaved format.
7871 if ( stream_.deviceInterleaved[1] )
7872 result = snd_pcm_readi( handle[1], buffer, stream_.bufferSize );
7874 void *bufs[channels];
7875 size_t offset = stream_.bufferSize * formatBytes( format );
7876 for ( int i=0; i<channels; i++ )
7877 bufs[i] = (void *) (buffer + (i * offset));
7878 result = snd_pcm_readn( handle[1], bufs, stream_.bufferSize );
7881 if ( result < (int) stream_.bufferSize ) {
7882 // Either an error or overrun occured.
7883 if ( result == -EPIPE ) {
7884 snd_pcm_state_t state = snd_pcm_state( handle[1] );
7885 if ( state == SND_PCM_STATE_XRUN ) {
7886 apiInfo->xrun[1] = true;
7887 result = snd_pcm_prepare( handle[1] );
7889 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after overrun, " << snd_strerror( result ) << ".";
7890 errorText_ = errorStream_.str();
7894 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
7895 errorText_ = errorStream_.str();
7899 errorStream_ << "RtApiAlsa::callbackEvent: audio read error, " << snd_strerror( result ) << ".";
7900 errorText_ = errorStream_.str();
7902 error( RtAudioError::WARNING );
7906 // Do byte swapping if necessary.
7907 if ( stream_.doByteSwap[1] )
7908 byteSwapBuffer( buffer, stream_.bufferSize * channels, format );
7910 // Do buffer conversion if necessary.
7911 if ( stream_.doConvertBuffer[1] )
7912 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
7914 // Check stream latency
7915 result = snd_pcm_delay( handle[1], &frames );
7916 if ( result == 0 && frames > 0 ) stream_.latency[1] = frames;
7921 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
7923 // Setup parameters and do buffer conversion if necessary.
7924 if ( stream_.doConvertBuffer[0] ) {
7925 buffer = stream_.deviceBuffer;
7926 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
7927 channels = stream_.nDeviceChannels[0];
7928 format = stream_.deviceFormat[0];
7931 buffer = stream_.userBuffer[0];
7932 channels = stream_.nUserChannels[0];
7933 format = stream_.userFormat;
7936 // Do byte swapping if necessary.
7937 if ( stream_.doByteSwap[0] )
7938 byteSwapBuffer(buffer, stream_.bufferSize * channels, format);
7940 // Write samples to device in interleaved/non-interleaved format.
7941 if ( stream_.deviceInterleaved[0] )
7942 result = snd_pcm_writei( handle[0], buffer, stream_.bufferSize );
7944 void *bufs[channels];
7945 size_t offset = stream_.bufferSize * formatBytes( format );
7946 for ( int i=0; i<channels; i++ )
7947 bufs[i] = (void *) (buffer + (i * offset));
7948 result = snd_pcm_writen( handle[0], bufs, stream_.bufferSize );
7951 if ( result < (int) stream_.bufferSize ) {
7952 // Either an error or underrun occured.
7953 if ( result == -EPIPE ) {
7954 snd_pcm_state_t state = snd_pcm_state( handle[0] );
7955 if ( state == SND_PCM_STATE_XRUN ) {
7956 apiInfo->xrun[0] = true;
7957 result = snd_pcm_prepare( handle[0] );
7959 errorStream_ << "RtApiAlsa::callbackEvent: error preparing device after underrun, " << snd_strerror( result ) << ".";
7960 errorText_ = errorStream_.str();
7963 errorText_ = "RtApiAlsa::callbackEvent: audio write error, underrun.";
7966 errorStream_ << "RtApiAlsa::callbackEvent: error, current state is " << snd_pcm_state_name( state ) << ", " << snd_strerror( result ) << ".";
7967 errorText_ = errorStream_.str();
7971 errorStream_ << "RtApiAlsa::callbackEvent: audio write error, " << snd_strerror( result ) << ".";
7972 errorText_ = errorStream_.str();
7974 error( RtAudioError::WARNING );
7978 // Check stream latency
7979 result = snd_pcm_delay( handle[0], &frames );
7980 if ( result == 0 && frames > 0 ) stream_.latency[0] = frames;
7984 MUTEX_UNLOCK( &stream_.mutex );
7986 RtApi::tickStreamTime();
7987 if ( doStopStream == 1 ) this->stopStream();
7990 static void *alsaCallbackHandler( void *ptr )
7992 CallbackInfo *info = (CallbackInfo *) ptr;
7993 RtApiAlsa *object = (RtApiAlsa *) info->object;
7994 bool *isRunning = &info->isRunning;
7996 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
7997 if ( info->doRealtime ) {
7998 pthread_t tID = pthread_self(); // ID of this thread
7999 sched_param prio = { info->priority }; // scheduling priority of thread
8000 pthread_setschedparam( tID, SCHED_RR, &prio );
8004 while ( *isRunning == true ) {
8005 pthread_testcancel();
8006 object->callbackEvent();
8009 pthread_exit( NULL );
8012 //******************** End of __LINUX_ALSA__ *********************//
8015 #if defined(__LINUX_PULSE__)
8017 // Code written by Peter Meerwald, pmeerw@pmeerw.net
8018 // and Tristan Matthews.
8020 #include <pulse/error.h>
8021 #include <pulse/simple.h>
8024 static const unsigned int SUPPORTED_SAMPLERATES[] = { 8000, 16000, 22050, 32000,
8025 44100, 48000, 96000, 0};
8027 struct rtaudio_pa_format_mapping_t {
8028 RtAudioFormat rtaudio_format;
8029 pa_sample_format_t pa_format;
8032 static const rtaudio_pa_format_mapping_t supported_sampleformats[] = {
8033 {RTAUDIO_SINT16, PA_SAMPLE_S16LE},
8034 {RTAUDIO_SINT32, PA_SAMPLE_S32LE},
8035 {RTAUDIO_FLOAT32, PA_SAMPLE_FLOAT32LE},
8036 {0, PA_SAMPLE_INVALID}};
8038 struct PulseAudioHandle {
8042 pthread_cond_t runnable_cv;
8044 PulseAudioHandle() : s_play(0), s_rec(0), runnable(false) { }
8047 RtApiPulse::~RtApiPulse()
8049 if ( stream_.state != STREAM_CLOSED )
8053 unsigned int RtApiPulse::getDeviceCount( void )
8058 RtAudio::DeviceInfo RtApiPulse::getDeviceInfo( unsigned int /*device*/ )
8060 RtAudio::DeviceInfo info;
8062 info.name = "PulseAudio";
8063 info.outputChannels = 2;
8064 info.inputChannels = 2;
8065 info.duplexChannels = 2;
8066 info.isDefaultOutput = true;
8067 info.isDefaultInput = true;
8069 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr )
8070 info.sampleRates.push_back( *sr );
8072 info.preferredSampleRate = 48000;
8073 info.nativeFormats = RTAUDIO_SINT16 | RTAUDIO_SINT32 | RTAUDIO_FLOAT32;
8078 static void *pulseaudio_callback( void * user )
8080 CallbackInfo *cbi = static_cast<CallbackInfo *>( user );
8081 RtApiPulse *context = static_cast<RtApiPulse *>( cbi->object );
8082 volatile bool *isRunning = &cbi->isRunning;
8084 while ( *isRunning ) {
8085 pthread_testcancel();
8086 context->callbackEvent();
8089 pthread_exit( NULL );
8092 void RtApiPulse::closeStream( void )
8094 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8096 stream_.callbackInfo.isRunning = false;
8098 MUTEX_LOCK( &stream_.mutex );
8099 if ( stream_.state == STREAM_STOPPED ) {
8100 pah->runnable = true;
8101 pthread_cond_signal( &pah->runnable_cv );
8103 MUTEX_UNLOCK( &stream_.mutex );
8105 pthread_join( pah->thread, 0 );
8106 if ( pah->s_play ) {
8107 pa_simple_flush( pah->s_play, NULL );
8108 pa_simple_free( pah->s_play );
8111 pa_simple_free( pah->s_rec );
8113 pthread_cond_destroy( &pah->runnable_cv );
8115 stream_.apiHandle = 0;
8118 if ( stream_.userBuffer[0] ) {
8119 free( stream_.userBuffer[0] );
8120 stream_.userBuffer[0] = 0;
8122 if ( stream_.userBuffer[1] ) {
8123 free( stream_.userBuffer[1] );
8124 stream_.userBuffer[1] = 0;
8127 stream_.state = STREAM_CLOSED;
8128 stream_.mode = UNINITIALIZED;
8131 void RtApiPulse::callbackEvent( void )
8133 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8135 if ( stream_.state == STREAM_STOPPED ) {
8136 MUTEX_LOCK( &stream_.mutex );
8137 while ( !pah->runnable )
8138 pthread_cond_wait( &pah->runnable_cv, &stream_.mutex );
8140 if ( stream_.state != STREAM_RUNNING ) {
8141 MUTEX_UNLOCK( &stream_.mutex );
8144 MUTEX_UNLOCK( &stream_.mutex );
8147 if ( stream_.state == STREAM_CLOSED ) {
8148 errorText_ = "RtApiPulse::callbackEvent(): the stream is closed ... "
8149 "this shouldn't happen!";
8150 error( RtAudioError::WARNING );
8154 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
8155 double streamTime = getStreamTime();
8156 RtAudioStreamStatus status = 0;
8157 int doStopStream = callback( stream_.userBuffer[OUTPUT], stream_.userBuffer[INPUT],
8158 stream_.bufferSize, streamTime, status,
8159 stream_.callbackInfo.userData );
8161 if ( doStopStream == 2 ) {
8166 MUTEX_LOCK( &stream_.mutex );
8167 void *pulse_in = stream_.doConvertBuffer[INPUT] ? stream_.deviceBuffer : stream_.userBuffer[INPUT];
8168 void *pulse_out = stream_.doConvertBuffer[OUTPUT] ? stream_.deviceBuffer : stream_.userBuffer[OUTPUT];
8170 if ( stream_.state != STREAM_RUNNING )
8175 if (stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
8176 if ( stream_.doConvertBuffer[OUTPUT] ) {
8177 convertBuffer( stream_.deviceBuffer,
8178 stream_.userBuffer[OUTPUT],
8179 stream_.convertInfo[OUTPUT] );
8180 bytes = stream_.nDeviceChannels[OUTPUT] * stream_.bufferSize *
8181 formatBytes( stream_.deviceFormat[OUTPUT] );
8183 bytes = stream_.nUserChannels[OUTPUT] * stream_.bufferSize *
8184 formatBytes( stream_.userFormat );
8186 if ( pa_simple_write( pah->s_play, pulse_out, bytes, &pa_error ) < 0 ) {
8187 errorStream_ << "RtApiPulse::callbackEvent: audio write error, " <<
8188 pa_strerror( pa_error ) << ".";
8189 errorText_ = errorStream_.str();
8190 error( RtAudioError::WARNING );
8194 if ( stream_.mode == INPUT || stream_.mode == DUPLEX) {
8195 if ( stream_.doConvertBuffer[INPUT] )
8196 bytes = stream_.nDeviceChannels[INPUT] * stream_.bufferSize *
8197 formatBytes( stream_.deviceFormat[INPUT] );
8199 bytes = stream_.nUserChannels[INPUT] * stream_.bufferSize *
8200 formatBytes( stream_.userFormat );
8202 if ( pa_simple_read( pah->s_rec, pulse_in, bytes, &pa_error ) < 0 ) {
8203 errorStream_ << "RtApiPulse::callbackEvent: audio read error, " <<
8204 pa_strerror( pa_error ) << ".";
8205 errorText_ = errorStream_.str();
8206 error( RtAudioError::WARNING );
8208 if ( stream_.doConvertBuffer[INPUT] ) {
8209 convertBuffer( stream_.userBuffer[INPUT],
8210 stream_.deviceBuffer,
8211 stream_.convertInfo[INPUT] );
8216 MUTEX_UNLOCK( &stream_.mutex );
8217 RtApi::tickStreamTime();
8219 if ( doStopStream == 1 )
8223 void RtApiPulse::startStream( void )
8225 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8227 if ( stream_.state == STREAM_CLOSED ) {
8228 errorText_ = "RtApiPulse::startStream(): the stream is not open!";
8229 error( RtAudioError::INVALID_USE );
8232 if ( stream_.state == STREAM_RUNNING ) {
8233 errorText_ = "RtApiPulse::startStream(): the stream is already running!";
8234 error( RtAudioError::WARNING );
8238 MUTEX_LOCK( &stream_.mutex );
8240 stream_.state = STREAM_RUNNING;
8242 pah->runnable = true;
8243 pthread_cond_signal( &pah->runnable_cv );
8244 MUTEX_UNLOCK( &stream_.mutex );
8247 void RtApiPulse::stopStream( void )
8249 PulseAudioHandle *pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8251 if ( stream_.state == STREAM_CLOSED ) {
8252 errorText_ = "RtApiPulse::stopStream(): the stream is not open!";
8253 error( RtAudioError::INVALID_USE );
8256 if ( stream_.state == STREAM_STOPPED ) {
8257 errorText_ = "RtApiPulse::stopStream(): the stream is already stopped!";
8258 error( RtAudioError::WARNING );
8262 stream_.state = STREAM_STOPPED;
8263 MUTEX_LOCK( &stream_.mutex );
8265 if ( pah && pah->s_play ) {
8267 if ( pa_simple_drain( pah->s_play, &pa_error ) < 0 ) {
8268 errorStream_ << "RtApiPulse::stopStream: error draining output device, " <<
8269 pa_strerror( pa_error ) << ".";
8270 errorText_ = errorStream_.str();
8271 MUTEX_UNLOCK( &stream_.mutex );
8272 error( RtAudioError::SYSTEM_ERROR );
8277 stream_.state = STREAM_STOPPED;
8278 MUTEX_UNLOCK( &stream_.mutex );
8281 void RtApiPulse::abortStream( void )
8283 PulseAudioHandle *pah = static_cast<PulseAudioHandle*>( stream_.apiHandle );
8285 if ( stream_.state == STREAM_CLOSED ) {
8286 errorText_ = "RtApiPulse::abortStream(): the stream is not open!";
8287 error( RtAudioError::INVALID_USE );
8290 if ( stream_.state == STREAM_STOPPED ) {
8291 errorText_ = "RtApiPulse::abortStream(): the stream is already stopped!";
8292 error( RtAudioError::WARNING );
8296 stream_.state = STREAM_STOPPED;
8297 MUTEX_LOCK( &stream_.mutex );
8299 if ( pah && pah->s_play ) {
8301 if ( pa_simple_flush( pah->s_play, &pa_error ) < 0 ) {
8302 errorStream_ << "RtApiPulse::abortStream: error flushing output device, " <<
8303 pa_strerror( pa_error ) << ".";
8304 errorText_ = errorStream_.str();
8305 MUTEX_UNLOCK( &stream_.mutex );
8306 error( RtAudioError::SYSTEM_ERROR );
8311 stream_.state = STREAM_STOPPED;
8312 MUTEX_UNLOCK( &stream_.mutex );
8315 bool RtApiPulse::probeDeviceOpen( unsigned int device, StreamMode mode,
8316 unsigned int channels, unsigned int firstChannel,
8317 unsigned int sampleRate, RtAudioFormat format,
8318 unsigned int *bufferSize, RtAudio::StreamOptions *options )
8320 PulseAudioHandle *pah = 0;
8321 unsigned long bufferBytes = 0;
8324 if ( device != 0 ) return false;
8325 if ( mode != INPUT && mode != OUTPUT ) return false;
8326 if ( channels != 1 && channels != 2 ) {
8327 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported number of channels.";
8330 ss.channels = channels;
8332 if ( firstChannel != 0 ) return false;
8334 bool sr_found = false;
8335 for ( const unsigned int *sr = SUPPORTED_SAMPLERATES; *sr; ++sr ) {
8336 if ( sampleRate == *sr ) {
8338 stream_.sampleRate = sampleRate;
8339 ss.rate = sampleRate;
8344 errorText_ = "RtApiPulse::probeDeviceOpen: unsupported sample rate.";
8349 for ( const rtaudio_pa_format_mapping_t *sf = supported_sampleformats;
8350 sf->rtaudio_format && sf->pa_format != PA_SAMPLE_INVALID; ++sf ) {
8351 if ( format == sf->rtaudio_format ) {
8353 stream_.userFormat = sf->rtaudio_format;
8354 stream_.deviceFormat[mode] = stream_.userFormat;
8355 ss.format = sf->pa_format;
8359 if ( !sf_found ) { // Use internal data format conversion.
8360 stream_.userFormat = format;
8361 stream_.deviceFormat[mode] = RTAUDIO_FLOAT32;
8362 ss.format = PA_SAMPLE_FLOAT32LE;
8365 // Set other stream parameters.
8366 if ( options && options->flags & RTAUDIO_NONINTERLEAVED ) stream_.userInterleaved = false;
8367 else stream_.userInterleaved = true;
8368 stream_.deviceInterleaved[mode] = true;
8369 stream_.nBuffers = 1;
8370 stream_.doByteSwap[mode] = false;
8371 stream_.nUserChannels[mode] = channels;
8372 stream_.nDeviceChannels[mode] = channels + firstChannel;
8373 stream_.channelOffset[mode] = 0;
8374 std::string streamName = "RtAudio";
8376 // Set flags for buffer conversion.
8377 stream_.doConvertBuffer[mode] = false;
8378 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8379 stream_.doConvertBuffer[mode] = true;
8380 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8381 stream_.doConvertBuffer[mode] = true;
8383 // Allocate necessary internal buffers.
8384 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
8385 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
8386 if ( stream_.userBuffer[mode] == NULL ) {
8387 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating user buffer memory.";
8390 stream_.bufferSize = *bufferSize;
8392 if ( stream_.doConvertBuffer[mode] ) {
8394 bool makeBuffer = true;
8395 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
8396 if ( mode == INPUT ) {
8397 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
8398 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
8399 if ( bufferBytes <= bytesOut ) makeBuffer = false;
8404 bufferBytes *= *bufferSize;
8405 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
8406 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
8407 if ( stream_.deviceBuffer == NULL ) {
8408 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating device buffer memory.";
8414 stream_.device[mode] = device;
8416 // Setup the buffer conversion information structure.
8417 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
8419 if ( !stream_.apiHandle ) {
8420 PulseAudioHandle *pah = new PulseAudioHandle;
8422 errorText_ = "RtApiPulse::probeDeviceOpen: error allocating memory for handle.";
8426 stream_.apiHandle = pah;
8427 if ( pthread_cond_init( &pah->runnable_cv, NULL ) != 0 ) {
8428 errorText_ = "RtApiPulse::probeDeviceOpen: error creating condition variable.";
8432 pah = static_cast<PulseAudioHandle *>( stream_.apiHandle );
8435 if ( options && !options->streamName.empty() ) streamName = options->streamName;
8438 pa_buffer_attr buffer_attr;
8439 buffer_attr.fragsize = bufferBytes;
8440 buffer_attr.maxlength = -1;
8442 pah->s_rec = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_RECORD, NULL, "Record", &ss, NULL, &buffer_attr, &error );
8443 if ( !pah->s_rec ) {
8444 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting input to PulseAudio server.";
8449 pah->s_play = pa_simple_new( NULL, streamName.c_str(), PA_STREAM_PLAYBACK, NULL, "Playback", &ss, NULL, NULL, &error );
8450 if ( !pah->s_play ) {
8451 errorText_ = "RtApiPulse::probeDeviceOpen: error connecting output to PulseAudio server.";
8459 if ( stream_.mode == UNINITIALIZED )
8460 stream_.mode = mode;
8461 else if ( stream_.mode == mode )
8464 stream_.mode = DUPLEX;
8466 if ( !stream_.callbackInfo.isRunning ) {
8467 stream_.callbackInfo.object = this;
8468 stream_.callbackInfo.isRunning = true;
8469 if ( pthread_create( &pah->thread, NULL, pulseaudio_callback, (void *)&stream_.callbackInfo) != 0 ) {
8470 errorText_ = "RtApiPulse::probeDeviceOpen: error creating thread.";
8475 stream_.state = STREAM_STOPPED;
8479 if ( pah && stream_.callbackInfo.isRunning ) {
8480 pthread_cond_destroy( &pah->runnable_cv );
8482 stream_.apiHandle = 0;
8485 for ( int i=0; i<2; i++ ) {
8486 if ( stream_.userBuffer[i] ) {
8487 free( stream_.userBuffer[i] );
8488 stream_.userBuffer[i] = 0;
8492 if ( stream_.deviceBuffer ) {
8493 free( stream_.deviceBuffer );
8494 stream_.deviceBuffer = 0;
8500 //******************** End of __LINUX_PULSE__ *********************//
8503 #if defined(__LINUX_OSS__)
8506 #include <sys/ioctl.h>
8509 #include <sys/soundcard.h>
8513 static void *ossCallbackHandler(void * ptr);
8515 // A structure to hold various information related to the OSS API
8518 int id[2]; // device ids
8521 pthread_cond_t runnable;
8524 :triggered(false) { id[0] = 0; id[1] = 0; xrun[0] = false; xrun[1] = false; }
8527 RtApiOss :: RtApiOss()
8529 // Nothing to do here.
8532 RtApiOss :: ~RtApiOss()
8534 if ( stream_.state != STREAM_CLOSED ) closeStream();
8537 unsigned int RtApiOss :: getDeviceCount( void )
8539 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8540 if ( mixerfd == -1 ) {
8541 errorText_ = "RtApiOss::getDeviceCount: error opening '/dev/mixer'.";
8542 error( RtAudioError::WARNING );
8546 oss_sysinfo sysinfo;
8547 if ( ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo ) == -1 ) {
8549 errorText_ = "RtApiOss::getDeviceCount: error getting sysinfo, OSS version >= 4.0 is required.";
8550 error( RtAudioError::WARNING );
8555 return sysinfo.numaudios;
8558 RtAudio::DeviceInfo RtApiOss :: getDeviceInfo( unsigned int device )
8560 RtAudio::DeviceInfo info;
8561 info.probed = false;
8563 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8564 if ( mixerfd == -1 ) {
8565 errorText_ = "RtApiOss::getDeviceInfo: error opening '/dev/mixer'.";
8566 error( RtAudioError::WARNING );
8570 oss_sysinfo sysinfo;
8571 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8572 if ( result == -1 ) {
8574 errorText_ = "RtApiOss::getDeviceInfo: error getting sysinfo, OSS version >= 4.0 is required.";
8575 error( RtAudioError::WARNING );
8579 unsigned nDevices = sysinfo.numaudios;
8580 if ( nDevices == 0 ) {
8582 errorText_ = "RtApiOss::getDeviceInfo: no devices found!";
8583 error( RtAudioError::INVALID_USE );
8587 if ( device >= nDevices ) {
8589 errorText_ = "RtApiOss::getDeviceInfo: device ID is invalid!";
8590 error( RtAudioError::INVALID_USE );
8594 oss_audioinfo ainfo;
8596 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8598 if ( result == -1 ) {
8599 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8600 errorText_ = errorStream_.str();
8601 error( RtAudioError::WARNING );
8606 if ( ainfo.caps & PCM_CAP_OUTPUT ) info.outputChannels = ainfo.max_channels;
8607 if ( ainfo.caps & PCM_CAP_INPUT ) info.inputChannels = ainfo.max_channels;
8608 if ( ainfo.caps & PCM_CAP_DUPLEX ) {
8609 if ( info.outputChannels > 0 && info.inputChannels > 0 && ainfo.caps & PCM_CAP_DUPLEX )
8610 info.duplexChannels = (info.outputChannels > info.inputChannels) ? info.inputChannels : info.outputChannels;
8613 // Probe data formats ... do for input
8614 unsigned long mask = ainfo.iformats;
8615 if ( mask & AFMT_S16_LE || mask & AFMT_S16_BE )
8616 info.nativeFormats |= RTAUDIO_SINT16;
8617 if ( mask & AFMT_S8 )
8618 info.nativeFormats |= RTAUDIO_SINT8;
8619 if ( mask & AFMT_S32_LE || mask & AFMT_S32_BE )
8620 info.nativeFormats |= RTAUDIO_SINT32;
8622 if ( mask & AFMT_FLOAT )
8623 info.nativeFormats |= RTAUDIO_FLOAT32;
8625 if ( mask & AFMT_S24_LE || mask & AFMT_S24_BE )
8626 info.nativeFormats |= RTAUDIO_SINT24;
8628 // Check that we have at least one supported format
8629 if ( info.nativeFormats == 0 ) {
8630 errorStream_ << "RtApiOss::getDeviceInfo: device (" << ainfo.name << ") data format not supported by RtAudio.";
8631 errorText_ = errorStream_.str();
8632 error( RtAudioError::WARNING );
8636 // Probe the supported sample rates.
8637 info.sampleRates.clear();
8638 if ( ainfo.nrates ) {
8639 for ( unsigned int i=0; i<ainfo.nrates; i++ ) {
8640 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8641 if ( ainfo.rates[i] == SAMPLE_RATES[k] ) {
8642 info.sampleRates.push_back( SAMPLE_RATES[k] );
8644 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8645 info.preferredSampleRate = SAMPLE_RATES[k];
8653 // Check min and max rate values;
8654 for ( unsigned int k=0; k<MAX_SAMPLE_RATES; k++ ) {
8655 if ( ainfo.min_rate <= (int) SAMPLE_RATES[k] && ainfo.max_rate >= (int) SAMPLE_RATES[k] ) {
8656 info.sampleRates.push_back( SAMPLE_RATES[k] );
8658 if ( !info.preferredSampleRate || ( SAMPLE_RATES[k] <= 48000 && SAMPLE_RATES[k] > info.preferredSampleRate ) )
8659 info.preferredSampleRate = SAMPLE_RATES[k];
8664 if ( info.sampleRates.size() == 0 ) {
8665 errorStream_ << "RtApiOss::getDeviceInfo: no supported sample rates found for device (" << ainfo.name << ").";
8666 errorText_ = errorStream_.str();
8667 error( RtAudioError::WARNING );
8671 info.name = ainfo.name;
8678 bool RtApiOss :: probeDeviceOpen( unsigned int device, StreamMode mode, unsigned int channels,
8679 unsigned int firstChannel, unsigned int sampleRate,
8680 RtAudioFormat format, unsigned int *bufferSize,
8681 RtAudio::StreamOptions *options )
8683 int mixerfd = open( "/dev/mixer", O_RDWR, 0 );
8684 if ( mixerfd == -1 ) {
8685 errorText_ = "RtApiOss::probeDeviceOpen: error opening '/dev/mixer'.";
8689 oss_sysinfo sysinfo;
8690 int result = ioctl( mixerfd, SNDCTL_SYSINFO, &sysinfo );
8691 if ( result == -1 ) {
8693 errorText_ = "RtApiOss::probeDeviceOpen: error getting sysinfo, OSS version >= 4.0 is required.";
8697 unsigned nDevices = sysinfo.numaudios;
8698 if ( nDevices == 0 ) {
8699 // This should not happen because a check is made before this function is called.
8701 errorText_ = "RtApiOss::probeDeviceOpen: no devices found!";
8705 if ( device >= nDevices ) {
8706 // This should not happen because a check is made before this function is called.
8708 errorText_ = "RtApiOss::probeDeviceOpen: device ID is invalid!";
8712 oss_audioinfo ainfo;
8714 result = ioctl( mixerfd, SNDCTL_AUDIOINFO, &ainfo );
8716 if ( result == -1 ) {
8717 errorStream_ << "RtApiOss::getDeviceInfo: error getting device (" << ainfo.name << ") info.";
8718 errorText_ = errorStream_.str();
8722 // Check if device supports input or output
8723 if ( ( mode == OUTPUT && !( ainfo.caps & PCM_CAP_OUTPUT ) ) ||
8724 ( mode == INPUT && !( ainfo.caps & PCM_CAP_INPUT ) ) ) {
8725 if ( mode == OUTPUT )
8726 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support output.";
8728 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support input.";
8729 errorText_ = errorStream_.str();
8734 OssHandle *handle = (OssHandle *) stream_.apiHandle;
8735 if ( mode == OUTPUT )
8737 else { // mode == INPUT
8738 if (stream_.mode == OUTPUT && stream_.device[0] == device) {
8739 // We just set the same device for playback ... close and reopen for duplex (OSS only).
8740 close( handle->id[0] );
8742 if ( !( ainfo.caps & PCM_CAP_DUPLEX ) ) {
8743 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support duplex mode.";
8744 errorText_ = errorStream_.str();
8747 // Check that the number previously set channels is the same.
8748 if ( stream_.nUserChannels[0] != channels ) {
8749 errorStream_ << "RtApiOss::probeDeviceOpen: input/output channels must be equal for OSS duplex device (" << ainfo.name << ").";
8750 errorText_ = errorStream_.str();
8759 // Set exclusive access if specified.
8760 if ( options && options->flags & RTAUDIO_HOG_DEVICE ) flags |= O_EXCL;
8762 // Try to open the device.
8764 fd = open( ainfo.devnode, flags, 0 );
8766 if ( errno == EBUSY )
8767 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") is busy.";
8769 errorStream_ << "RtApiOss::probeDeviceOpen: error opening device (" << ainfo.name << ").";
8770 errorText_ = errorStream_.str();
8774 // For duplex operation, specifically set this mode (this doesn't seem to work).
8776 if ( flags | O_RDWR ) {
8777 result = ioctl( fd, SNDCTL_DSP_SETDUPLEX, NULL );
8778 if ( result == -1) {
8779 errorStream_ << "RtApiOss::probeDeviceOpen: error setting duplex mode for device (" << ainfo.name << ").";
8780 errorText_ = errorStream_.str();
8786 // Check the device channel support.
8787 stream_.nUserChannels[mode] = channels;
8788 if ( ainfo.max_channels < (int)(channels + firstChannel) ) {
8790 errorStream_ << "RtApiOss::probeDeviceOpen: the device (" << ainfo.name << ") does not support requested channel parameters.";
8791 errorText_ = errorStream_.str();
8795 // Set the number of channels.
8796 int deviceChannels = channels + firstChannel;
8797 result = ioctl( fd, SNDCTL_DSP_CHANNELS, &deviceChannels );
8798 if ( result == -1 || deviceChannels < (int)(channels + firstChannel) ) {
8800 errorStream_ << "RtApiOss::probeDeviceOpen: error setting channel parameters on device (" << ainfo.name << ").";
8801 errorText_ = errorStream_.str();
8804 stream_.nDeviceChannels[mode] = deviceChannels;
8806 // Get the data format mask
8808 result = ioctl( fd, SNDCTL_DSP_GETFMTS, &mask );
8809 if ( result == -1 ) {
8811 errorStream_ << "RtApiOss::probeDeviceOpen: error getting device (" << ainfo.name << ") data formats.";
8812 errorText_ = errorStream_.str();
8816 // Determine how to set the device format.
8817 stream_.userFormat = format;
8818 int deviceFormat = -1;
8819 stream_.doByteSwap[mode] = false;
8820 if ( format == RTAUDIO_SINT8 ) {
8821 if ( mask & AFMT_S8 ) {
8822 deviceFormat = AFMT_S8;
8823 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
8826 else if ( format == RTAUDIO_SINT16 ) {
8827 if ( mask & AFMT_S16_NE ) {
8828 deviceFormat = AFMT_S16_NE;
8829 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8831 else if ( mask & AFMT_S16_OE ) {
8832 deviceFormat = AFMT_S16_OE;
8833 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8834 stream_.doByteSwap[mode] = true;
8837 else if ( format == RTAUDIO_SINT24 ) {
8838 if ( mask & AFMT_S24_NE ) {
8839 deviceFormat = AFMT_S24_NE;
8840 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8842 else if ( mask & AFMT_S24_OE ) {
8843 deviceFormat = AFMT_S24_OE;
8844 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8845 stream_.doByteSwap[mode] = true;
8848 else if ( format == RTAUDIO_SINT32 ) {
8849 if ( mask & AFMT_S32_NE ) {
8850 deviceFormat = AFMT_S32_NE;
8851 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8853 else if ( mask & AFMT_S32_OE ) {
8854 deviceFormat = AFMT_S32_OE;
8855 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8856 stream_.doByteSwap[mode] = true;
8860 if ( deviceFormat == -1 ) {
8861 // The user requested format is not natively supported by the device.
8862 if ( mask & AFMT_S16_NE ) {
8863 deviceFormat = AFMT_S16_NE;
8864 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8866 else if ( mask & AFMT_S32_NE ) {
8867 deviceFormat = AFMT_S32_NE;
8868 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8870 else if ( mask & AFMT_S24_NE ) {
8871 deviceFormat = AFMT_S24_NE;
8872 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8874 else if ( mask & AFMT_S16_OE ) {
8875 deviceFormat = AFMT_S16_OE;
8876 stream_.deviceFormat[mode] = RTAUDIO_SINT16;
8877 stream_.doByteSwap[mode] = true;
8879 else if ( mask & AFMT_S32_OE ) {
8880 deviceFormat = AFMT_S32_OE;
8881 stream_.deviceFormat[mode] = RTAUDIO_SINT32;
8882 stream_.doByteSwap[mode] = true;
8884 else if ( mask & AFMT_S24_OE ) {
8885 deviceFormat = AFMT_S24_OE;
8886 stream_.deviceFormat[mode] = RTAUDIO_SINT24;
8887 stream_.doByteSwap[mode] = true;
8889 else if ( mask & AFMT_S8) {
8890 deviceFormat = AFMT_S8;
8891 stream_.deviceFormat[mode] = RTAUDIO_SINT8;
8895 if ( stream_.deviceFormat[mode] == 0 ) {
8896 // This really shouldn't happen ...
8898 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") data format not supported by RtAudio.";
8899 errorText_ = errorStream_.str();
8903 // Set the data format.
8904 int temp = deviceFormat;
8905 result = ioctl( fd, SNDCTL_DSP_SETFMT, &deviceFormat );
8906 if ( result == -1 || deviceFormat != temp ) {
8908 errorStream_ << "RtApiOss::probeDeviceOpen: error setting data format on device (" << ainfo.name << ").";
8909 errorText_ = errorStream_.str();
8913 // Attempt to set the buffer size. According to OSS, the minimum
8914 // number of buffers is two. The supposed minimum buffer size is 16
8915 // bytes, so that will be our lower bound. The argument to this
8916 // call is in the form 0xMMMMSSSS (hex), where the buffer size (in
8917 // bytes) is given as 2^SSSS and the number of buffers as 2^MMMM.
8918 // We'll check the actual value used near the end of the setup
8920 int ossBufferBytes = *bufferSize * formatBytes( stream_.deviceFormat[mode] ) * deviceChannels;
8921 if ( ossBufferBytes < 16 ) ossBufferBytes = 16;
8923 if ( options ) buffers = options->numberOfBuffers;
8924 if ( options && options->flags & RTAUDIO_MINIMIZE_LATENCY ) buffers = 2;
8925 if ( buffers < 2 ) buffers = 3;
8926 temp = ((int) buffers << 16) + (int)( log10( (double)ossBufferBytes ) / log10( 2.0 ) );
8927 result = ioctl( fd, SNDCTL_DSP_SETFRAGMENT, &temp );
8928 if ( result == -1 ) {
8930 errorStream_ << "RtApiOss::probeDeviceOpen: error setting buffer size on device (" << ainfo.name << ").";
8931 errorText_ = errorStream_.str();
8934 stream_.nBuffers = buffers;
8936 // Save buffer size (in sample frames).
8937 *bufferSize = ossBufferBytes / ( formatBytes(stream_.deviceFormat[mode]) * deviceChannels );
8938 stream_.bufferSize = *bufferSize;
8940 // Set the sample rate.
8941 int srate = sampleRate;
8942 result = ioctl( fd, SNDCTL_DSP_SPEED, &srate );
8943 if ( result == -1 ) {
8945 errorStream_ << "RtApiOss::probeDeviceOpen: error setting sample rate (" << sampleRate << ") on device (" << ainfo.name << ").";
8946 errorText_ = errorStream_.str();
8950 // Verify the sample rate setup worked.
8951 if ( abs( srate - (int)sampleRate ) > 100 ) {
8953 errorStream_ << "RtApiOss::probeDeviceOpen: device (" << ainfo.name << ") does not support sample rate (" << sampleRate << ").";
8954 errorText_ = errorStream_.str();
8957 stream_.sampleRate = sampleRate;
8959 if ( mode == INPUT && stream_.mode == OUTPUT && stream_.device[0] == device) {
8960 // We're doing duplex setup here.
8961 stream_.deviceFormat[0] = stream_.deviceFormat[1];
8962 stream_.nDeviceChannels[0] = deviceChannels;
8965 // Set interleaving parameters.
8966 stream_.userInterleaved = true;
8967 stream_.deviceInterleaved[mode] = true;
8968 if ( options && options->flags & RTAUDIO_NONINTERLEAVED )
8969 stream_.userInterleaved = false;
8971 // Set flags for buffer conversion
8972 stream_.doConvertBuffer[mode] = false;
8973 if ( stream_.userFormat != stream_.deviceFormat[mode] )
8974 stream_.doConvertBuffer[mode] = true;
8975 if ( stream_.nUserChannels[mode] < stream_.nDeviceChannels[mode] )
8976 stream_.doConvertBuffer[mode] = true;
8977 if ( stream_.userInterleaved != stream_.deviceInterleaved[mode] &&
8978 stream_.nUserChannels[mode] > 1 )
8979 stream_.doConvertBuffer[mode] = true;
8981 // Allocate the stream handles if necessary and then save.
8982 if ( stream_.apiHandle == 0 ) {
8984 handle = new OssHandle;
8986 catch ( std::bad_alloc& ) {
8987 errorText_ = "RtApiOss::probeDeviceOpen: error allocating OssHandle memory.";
8991 if ( pthread_cond_init( &handle->runnable, NULL ) ) {
8992 errorText_ = "RtApiOss::probeDeviceOpen: error initializing pthread condition variable.";
8996 stream_.apiHandle = (void *) handle;
8999 handle = (OssHandle *) stream_.apiHandle;
9001 handle->id[mode] = fd;
9003 // Allocate necessary internal buffers.
9004 unsigned long bufferBytes;
9005 bufferBytes = stream_.nUserChannels[mode] * *bufferSize * formatBytes( stream_.userFormat );
9006 stream_.userBuffer[mode] = (char *) calloc( bufferBytes, 1 );
9007 if ( stream_.userBuffer[mode] == NULL ) {
9008 errorText_ = "RtApiOss::probeDeviceOpen: error allocating user buffer memory.";
9012 if ( stream_.doConvertBuffer[mode] ) {
9014 bool makeBuffer = true;
9015 bufferBytes = stream_.nDeviceChannels[mode] * formatBytes( stream_.deviceFormat[mode] );
9016 if ( mode == INPUT ) {
9017 if ( stream_.mode == OUTPUT && stream_.deviceBuffer ) {
9018 unsigned long bytesOut = stream_.nDeviceChannels[0] * formatBytes( stream_.deviceFormat[0] );
9019 if ( bufferBytes <= bytesOut ) makeBuffer = false;
9024 bufferBytes *= *bufferSize;
9025 if ( stream_.deviceBuffer ) free( stream_.deviceBuffer );
9026 stream_.deviceBuffer = (char *) calloc( bufferBytes, 1 );
9027 if ( stream_.deviceBuffer == NULL ) {
9028 errorText_ = "RtApiOss::probeDeviceOpen: error allocating device buffer memory.";
9034 stream_.device[mode] = device;
9035 stream_.state = STREAM_STOPPED;
9037 // Setup the buffer conversion information structure.
9038 if ( stream_.doConvertBuffer[mode] ) setConvertInfo( mode, firstChannel );
9040 // Setup thread if necessary.
9041 if ( stream_.mode == OUTPUT && mode == INPUT ) {
9042 // We had already set up an output stream.
9043 stream_.mode = DUPLEX;
9044 if ( stream_.device[0] == device ) handle->id[0] = fd;
9047 stream_.mode = mode;
9049 // Setup callback thread.
9050 stream_.callbackInfo.object = (void *) this;
9052 // Set the thread attributes for joinable and realtime scheduling
9053 // priority. The higher priority will only take affect if the
9054 // program is run as root or suid.
9055 pthread_attr_t attr;
9056 pthread_attr_init( &attr );
9057 pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_JOINABLE );
9058 #ifdef SCHED_RR // Undefined with some OSes (eg: NetBSD 1.6.x with GNU Pthread)
9059 if ( options && options->flags & RTAUDIO_SCHEDULE_REALTIME ) {
9060 struct sched_param param;
9061 int priority = options->priority;
9062 int min = sched_get_priority_min( SCHED_RR );
9063 int max = sched_get_priority_max( SCHED_RR );
9064 if ( priority < min ) priority = min;
9065 else if ( priority > max ) priority = max;
9066 param.sched_priority = priority;
9067 pthread_attr_setschedparam( &attr, ¶m );
9068 pthread_attr_setschedpolicy( &attr, SCHED_RR );
9071 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9073 pthread_attr_setschedpolicy( &attr, SCHED_OTHER );
9076 stream_.callbackInfo.isRunning = true;
9077 result = pthread_create( &stream_.callbackInfo.thread, &attr, ossCallbackHandler, &stream_.callbackInfo );
9078 pthread_attr_destroy( &attr );
9080 stream_.callbackInfo.isRunning = false;
9081 errorText_ = "RtApiOss::error creating callback thread!";
9090 pthread_cond_destroy( &handle->runnable );
9091 if ( handle->id[0] ) close( handle->id[0] );
9092 if ( handle->id[1] ) close( handle->id[1] );
9094 stream_.apiHandle = 0;
9097 for ( int i=0; i<2; i++ ) {
9098 if ( stream_.userBuffer[i] ) {
9099 free( stream_.userBuffer[i] );
9100 stream_.userBuffer[i] = 0;
9104 if ( stream_.deviceBuffer ) {
9105 free( stream_.deviceBuffer );
9106 stream_.deviceBuffer = 0;
9112 void RtApiOss :: closeStream()
9114 if ( stream_.state == STREAM_CLOSED ) {
9115 errorText_ = "RtApiOss::closeStream(): no open stream to close!";
9116 error( RtAudioError::WARNING );
9120 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9121 stream_.callbackInfo.isRunning = false;
9122 MUTEX_LOCK( &stream_.mutex );
9123 if ( stream_.state == STREAM_STOPPED )
9124 pthread_cond_signal( &handle->runnable );
9125 MUTEX_UNLOCK( &stream_.mutex );
9126 pthread_join( stream_.callbackInfo.thread, NULL );
9128 if ( stream_.state == STREAM_RUNNING ) {
9129 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX )
9130 ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9132 ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9133 stream_.state = STREAM_STOPPED;
9137 pthread_cond_destroy( &handle->runnable );
9138 if ( handle->id[0] ) close( handle->id[0] );
9139 if ( handle->id[1] ) close( handle->id[1] );
9141 stream_.apiHandle = 0;
9144 for ( int i=0; i<2; i++ ) {
9145 if ( stream_.userBuffer[i] ) {
9146 free( stream_.userBuffer[i] );
9147 stream_.userBuffer[i] = 0;
9151 if ( stream_.deviceBuffer ) {
9152 free( stream_.deviceBuffer );
9153 stream_.deviceBuffer = 0;
9156 stream_.mode = UNINITIALIZED;
9157 stream_.state = STREAM_CLOSED;
9160 void RtApiOss :: startStream()
9163 if ( stream_.state == STREAM_RUNNING ) {
9164 errorText_ = "RtApiOss::startStream(): the stream is already running!";
9165 error( RtAudioError::WARNING );
9169 MUTEX_LOCK( &stream_.mutex );
9171 stream_.state = STREAM_RUNNING;
9173 // No need to do anything else here ... OSS automatically starts
9174 // when fed samples.
9176 MUTEX_UNLOCK( &stream_.mutex );
9178 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9179 pthread_cond_signal( &handle->runnable );
9182 void RtApiOss :: stopStream()
9185 if ( stream_.state == STREAM_STOPPED ) {
9186 errorText_ = "RtApiOss::stopStream(): the stream is already stopped!";
9187 error( RtAudioError::WARNING );
9191 MUTEX_LOCK( &stream_.mutex );
9193 // The state might change while waiting on a mutex.
9194 if ( stream_.state == STREAM_STOPPED ) {
9195 MUTEX_UNLOCK( &stream_.mutex );
9200 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9201 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9203 // Flush the output with zeros a few times.
9206 RtAudioFormat format;
9208 if ( stream_.doConvertBuffer[0] ) {
9209 buffer = stream_.deviceBuffer;
9210 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9211 format = stream_.deviceFormat[0];
9214 buffer = stream_.userBuffer[0];
9215 samples = stream_.bufferSize * stream_.nUserChannels[0];
9216 format = stream_.userFormat;
9219 memset( buffer, 0, samples * formatBytes(format) );
9220 for ( unsigned int i=0; i<stream_.nBuffers+1; i++ ) {
9221 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9222 if ( result == -1 ) {
9223 errorText_ = "RtApiOss::stopStream: audio write error.";
9224 error( RtAudioError::WARNING );
9228 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9229 if ( result == -1 ) {
9230 errorStream_ << "RtApiOss::stopStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9231 errorText_ = errorStream_.str();
9234 handle->triggered = false;
9237 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9238 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9239 if ( result == -1 ) {
9240 errorStream_ << "RtApiOss::stopStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9241 errorText_ = errorStream_.str();
9247 stream_.state = STREAM_STOPPED;
9248 MUTEX_UNLOCK( &stream_.mutex );
9250 if ( result != -1 ) return;
9251 error( RtAudioError::SYSTEM_ERROR );
9254 void RtApiOss :: abortStream()
9257 if ( stream_.state == STREAM_STOPPED ) {
9258 errorText_ = "RtApiOss::abortStream(): the stream is already stopped!";
9259 error( RtAudioError::WARNING );
9263 MUTEX_LOCK( &stream_.mutex );
9265 // The state might change while waiting on a mutex.
9266 if ( stream_.state == STREAM_STOPPED ) {
9267 MUTEX_UNLOCK( &stream_.mutex );
9272 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9273 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9274 result = ioctl( handle->id[0], SNDCTL_DSP_HALT, 0 );
9275 if ( result == -1 ) {
9276 errorStream_ << "RtApiOss::abortStream: system error stopping callback procedure on device (" << stream_.device[0] << ").";
9277 errorText_ = errorStream_.str();
9280 handle->triggered = false;
9283 if ( stream_.mode == INPUT || ( stream_.mode == DUPLEX && handle->id[0] != handle->id[1] ) ) {
9284 result = ioctl( handle->id[1], SNDCTL_DSP_HALT, 0 );
9285 if ( result == -1 ) {
9286 errorStream_ << "RtApiOss::abortStream: system error stopping input callback procedure on device (" << stream_.device[0] << ").";
9287 errorText_ = errorStream_.str();
9293 stream_.state = STREAM_STOPPED;
9294 MUTEX_UNLOCK( &stream_.mutex );
9296 if ( result != -1 ) return;
9297 error( RtAudioError::SYSTEM_ERROR );
9300 void RtApiOss :: callbackEvent()
9302 OssHandle *handle = (OssHandle *) stream_.apiHandle;
9303 if ( stream_.state == STREAM_STOPPED ) {
9304 MUTEX_LOCK( &stream_.mutex );
9305 pthread_cond_wait( &handle->runnable, &stream_.mutex );
9306 if ( stream_.state != STREAM_RUNNING ) {
9307 MUTEX_UNLOCK( &stream_.mutex );
9310 MUTEX_UNLOCK( &stream_.mutex );
9313 if ( stream_.state == STREAM_CLOSED ) {
9314 errorText_ = "RtApiOss::callbackEvent(): the stream is closed ... this shouldn't happen!";
9315 error( RtAudioError::WARNING );
9319 // Invoke user callback to get fresh output data.
9320 int doStopStream = 0;
9321 RtAudioCallback callback = (RtAudioCallback) stream_.callbackInfo.callback;
9322 double streamTime = getStreamTime();
9323 RtAudioStreamStatus status = 0;
9324 if ( stream_.mode != INPUT && handle->xrun[0] == true ) {
9325 status |= RTAUDIO_OUTPUT_UNDERFLOW;
9326 handle->xrun[0] = false;
9328 if ( stream_.mode != OUTPUT && handle->xrun[1] == true ) {
9329 status |= RTAUDIO_INPUT_OVERFLOW;
9330 handle->xrun[1] = false;
9332 doStopStream = callback( stream_.userBuffer[0], stream_.userBuffer[1],
9333 stream_.bufferSize, streamTime, status, stream_.callbackInfo.userData );
9334 if ( doStopStream == 2 ) {
9335 this->abortStream();
9339 MUTEX_LOCK( &stream_.mutex );
9341 // The state might change while waiting on a mutex.
9342 if ( stream_.state == STREAM_STOPPED ) goto unlock;
9347 RtAudioFormat format;
9349 if ( stream_.mode == OUTPUT || stream_.mode == DUPLEX ) {
9351 // Setup parameters and do buffer conversion if necessary.
9352 if ( stream_.doConvertBuffer[0] ) {
9353 buffer = stream_.deviceBuffer;
9354 convertBuffer( buffer, stream_.userBuffer[0], stream_.convertInfo[0] );
9355 samples = stream_.bufferSize * stream_.nDeviceChannels[0];
9356 format = stream_.deviceFormat[0];
9359 buffer = stream_.userBuffer[0];
9360 samples = stream_.bufferSize * stream_.nUserChannels[0];
9361 format = stream_.userFormat;
9364 // Do byte swapping if necessary.
9365 if ( stream_.doByteSwap[0] )
9366 byteSwapBuffer( buffer, samples, format );
9368 if ( stream_.mode == DUPLEX && handle->triggered == false ) {
9370 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9371 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9372 trig = PCM_ENABLE_INPUT|PCM_ENABLE_OUTPUT;
9373 ioctl( handle->id[0], SNDCTL_DSP_SETTRIGGER, &trig );
9374 handle->triggered = true;
9377 // Write samples to device.
9378 result = write( handle->id[0], buffer, samples * formatBytes(format) );
9380 if ( result == -1 ) {
9381 // We'll assume this is an underrun, though there isn't a
9382 // specific means for determining that.
9383 handle->xrun[0] = true;
9384 errorText_ = "RtApiOss::callbackEvent: audio write error.";
9385 error( RtAudioError::WARNING );
9386 // Continue on to input section.
9390 if ( stream_.mode == INPUT || stream_.mode == DUPLEX ) {
9392 // Setup parameters.
9393 if ( stream_.doConvertBuffer[1] ) {
9394 buffer = stream_.deviceBuffer;
9395 samples = stream_.bufferSize * stream_.nDeviceChannels[1];
9396 format = stream_.deviceFormat[1];
9399 buffer = stream_.userBuffer[1];
9400 samples = stream_.bufferSize * stream_.nUserChannels[1];
9401 format = stream_.userFormat;
9404 // Read samples from device.
9405 result = read( handle->id[1], buffer, samples * formatBytes(format) );
9407 if ( result == -1 ) {
9408 // We'll assume this is an overrun, though there isn't a
9409 // specific means for determining that.
9410 handle->xrun[1] = true;
9411 errorText_ = "RtApiOss::callbackEvent: audio read error.";
9412 error( RtAudioError::WARNING );
9416 // Do byte swapping if necessary.
9417 if ( stream_.doByteSwap[1] )
9418 byteSwapBuffer( buffer, samples, format );
9420 // Do buffer conversion if necessary.
9421 if ( stream_.doConvertBuffer[1] )
9422 convertBuffer( stream_.userBuffer[1], stream_.deviceBuffer, stream_.convertInfo[1] );
9426 MUTEX_UNLOCK( &stream_.mutex );
9428 RtApi::tickStreamTime();
9429 if ( doStopStream == 1 ) this->stopStream();
9432 static void *ossCallbackHandler( void *ptr )
9434 CallbackInfo *info = (CallbackInfo *) ptr;
9435 RtApiOss *object = (RtApiOss *) info->object;
9436 bool *isRunning = &info->isRunning;
9438 while ( *isRunning == true ) {
9439 pthread_testcancel();
9440 object->callbackEvent();
9443 pthread_exit( NULL );
9446 //******************** End of __LINUX_OSS__ *********************//
9450 // *************************************************** //
9452 // Protected common (OS-independent) RtAudio methods.
9454 // *************************************************** //
9456 // This method can be modified to control the behavior of error
9457 // message printing.
9458 void RtApi :: error( RtAudioError::Type type )
9460 errorStream_.str(""); // clear the ostringstream
9462 RtAudioErrorCallback errorCallback = (RtAudioErrorCallback) stream_.callbackInfo.errorCallback;
9463 if ( errorCallback ) {
9464 // abortStream() can generate new error messages. Ignore them. Just keep original one.
9466 if ( firstErrorOccurred_ )
9469 firstErrorOccurred_ = true;
9470 const std::string errorMessage = errorText_;
9472 if ( type != RtAudioError::WARNING && stream_.state != STREAM_STOPPED) {
9473 stream_.callbackInfo.isRunning = false; // exit from the thread
9477 errorCallback( type, errorMessage );
9478 firstErrorOccurred_ = false;
9482 if ( type == RtAudioError::WARNING && showWarnings_ == true )
9483 std::cerr << '\n' << errorText_ << "\n\n";
9484 else if ( type != RtAudioError::WARNING )
9485 throw( RtAudioError( errorText_, type ) );
9488 void RtApi :: verifyStream()
9490 if ( stream_.state == STREAM_CLOSED ) {
9491 errorText_ = "RtApi:: a stream is not open!";
9492 error( RtAudioError::INVALID_USE );
9496 void RtApi :: clearStreamInfo()
9498 stream_.mode = UNINITIALIZED;
9499 stream_.state = STREAM_CLOSED;
9500 stream_.sampleRate = 0;
9501 stream_.bufferSize = 0;
9502 stream_.nBuffers = 0;
9503 stream_.userFormat = 0;
9504 stream_.userInterleaved = true;
9505 stream_.streamTime = 0.0;
9506 stream_.apiHandle = 0;
9507 stream_.deviceBuffer = 0;
9508 stream_.callbackInfo.callback = 0;
9509 stream_.callbackInfo.userData = 0;
9510 stream_.callbackInfo.isRunning = false;
9511 stream_.callbackInfo.errorCallback = 0;
9512 for ( int i=0; i<2; i++ ) {
9513 stream_.device[i] = 11111;
9514 stream_.doConvertBuffer[i] = false;
9515 stream_.deviceInterleaved[i] = true;
9516 stream_.doByteSwap[i] = false;
9517 stream_.nUserChannels[i] = 0;
9518 stream_.nDeviceChannels[i] = 0;
9519 stream_.channelOffset[i] = 0;
9520 stream_.deviceFormat[i] = 0;
9521 stream_.latency[i] = 0;
9522 stream_.userBuffer[i] = 0;
9523 stream_.convertInfo[i].channels = 0;
9524 stream_.convertInfo[i].inJump = 0;
9525 stream_.convertInfo[i].outJump = 0;
9526 stream_.convertInfo[i].inFormat = 0;
9527 stream_.convertInfo[i].outFormat = 0;
9528 stream_.convertInfo[i].inOffset.clear();
9529 stream_.convertInfo[i].outOffset.clear();
9533 unsigned int RtApi :: formatBytes( RtAudioFormat format )
9535 if ( format == RTAUDIO_SINT16 )
9537 else if ( format == RTAUDIO_SINT32 || format == RTAUDIO_FLOAT32 )
9539 else if ( format == RTAUDIO_FLOAT64 )
9541 else if ( format == RTAUDIO_SINT24 )
9543 else if ( format == RTAUDIO_SINT8 )
9546 errorText_ = "RtApi::formatBytes: undefined format.";
9547 error( RtAudioError::WARNING );
9552 void RtApi :: setConvertInfo( StreamMode mode, unsigned int firstChannel )
9554 if ( mode == INPUT ) { // convert device to user buffer
9555 stream_.convertInfo[mode].inJump = stream_.nDeviceChannels[1];
9556 stream_.convertInfo[mode].outJump = stream_.nUserChannels[1];
9557 stream_.convertInfo[mode].inFormat = stream_.deviceFormat[1];
9558 stream_.convertInfo[mode].outFormat = stream_.userFormat;
9560 else { // convert user to device buffer
9561 stream_.convertInfo[mode].inJump = stream_.nUserChannels[0];
9562 stream_.convertInfo[mode].outJump = stream_.nDeviceChannels[0];
9563 stream_.convertInfo[mode].inFormat = stream_.userFormat;
9564 stream_.convertInfo[mode].outFormat = stream_.deviceFormat[0];
9567 if ( stream_.convertInfo[mode].inJump < stream_.convertInfo[mode].outJump )
9568 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].inJump;
9570 stream_.convertInfo[mode].channels = stream_.convertInfo[mode].outJump;
9572 // Set up the interleave/deinterleave offsets.
9573 if ( stream_.deviceInterleaved[mode] != stream_.userInterleaved ) {
9574 if ( ( mode == OUTPUT && stream_.deviceInterleaved[mode] ) ||
9575 ( mode == INPUT && stream_.userInterleaved ) ) {
9576 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9577 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9578 stream_.convertInfo[mode].outOffset.push_back( k );
9579 stream_.convertInfo[mode].inJump = 1;
9583 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9584 stream_.convertInfo[mode].inOffset.push_back( k );
9585 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9586 stream_.convertInfo[mode].outJump = 1;
9590 else { // no (de)interleaving
9591 if ( stream_.userInterleaved ) {
9592 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9593 stream_.convertInfo[mode].inOffset.push_back( k );
9594 stream_.convertInfo[mode].outOffset.push_back( k );
9598 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ ) {
9599 stream_.convertInfo[mode].inOffset.push_back( k * stream_.bufferSize );
9600 stream_.convertInfo[mode].outOffset.push_back( k * stream_.bufferSize );
9601 stream_.convertInfo[mode].inJump = 1;
9602 stream_.convertInfo[mode].outJump = 1;
9607 // Add channel offset.
9608 if ( firstChannel > 0 ) {
9609 if ( stream_.deviceInterleaved[mode] ) {
9610 if ( mode == OUTPUT ) {
9611 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9612 stream_.convertInfo[mode].outOffset[k] += firstChannel;
9615 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9616 stream_.convertInfo[mode].inOffset[k] += firstChannel;
9620 if ( mode == OUTPUT ) {
9621 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9622 stream_.convertInfo[mode].outOffset[k] += ( firstChannel * stream_.bufferSize );
9625 for ( int k=0; k<stream_.convertInfo[mode].channels; k++ )
9626 stream_.convertInfo[mode].inOffset[k] += ( firstChannel * stream_.bufferSize );
9632 void RtApi :: convertBuffer( char *outBuffer, char *inBuffer, ConvertInfo &info )
9634 // This function does format conversion, input/output channel compensation, and
9635 // data interleaving/deinterleaving. 24-bit integers are assumed to occupy
9636 // the lower three bytes of a 32-bit integer.
9638 // Clear our device buffer when in/out duplex device channels are different
9639 if ( outBuffer == stream_.deviceBuffer && stream_.mode == DUPLEX &&
9640 ( stream_.nDeviceChannels[0] < stream_.nDeviceChannels[1] ) )
9641 memset( outBuffer, 0, stream_.bufferSize * info.outJump * formatBytes( info.outFormat ) );
9644 if (info.outFormat == RTAUDIO_FLOAT64) {
9646 Float64 *out = (Float64 *)outBuffer;
9648 if (info.inFormat == RTAUDIO_SINT8) {
9649 signed char *in = (signed char *)inBuffer;
9650 scale = 1.0 / 127.5;
9651 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9652 for (j=0; j<info.channels; j++) {
9653 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9654 out[info.outOffset[j]] += 0.5;
9655 out[info.outOffset[j]] *= scale;
9658 out += info.outJump;
9661 else if (info.inFormat == RTAUDIO_SINT16) {
9662 Int16 *in = (Int16 *)inBuffer;
9663 scale = 1.0 / 32767.5;
9664 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9665 for (j=0; j<info.channels; j++) {
9666 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9667 out[info.outOffset[j]] += 0.5;
9668 out[info.outOffset[j]] *= scale;
9671 out += info.outJump;
9674 else if (info.inFormat == RTAUDIO_SINT24) {
9675 Int24 *in = (Int24 *)inBuffer;
9676 scale = 1.0 / 8388607.5;
9677 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9678 for (j=0; j<info.channels; j++) {
9679 out[info.outOffset[j]] = (Float64) (in[info.inOffset[j]].asInt());
9680 out[info.outOffset[j]] += 0.5;
9681 out[info.outOffset[j]] *= scale;
9684 out += info.outJump;
9687 else if (info.inFormat == RTAUDIO_SINT32) {
9688 Int32 *in = (Int32 *)inBuffer;
9689 scale = 1.0 / 2147483647.5;
9690 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9691 for (j=0; j<info.channels; j++) {
9692 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9693 out[info.outOffset[j]] += 0.5;
9694 out[info.outOffset[j]] *= scale;
9697 out += info.outJump;
9700 else if (info.inFormat == RTAUDIO_FLOAT32) {
9701 Float32 *in = (Float32 *)inBuffer;
9702 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9703 for (j=0; j<info.channels; j++) {
9704 out[info.outOffset[j]] = (Float64) in[info.inOffset[j]];
9707 out += info.outJump;
9710 else if (info.inFormat == RTAUDIO_FLOAT64) {
9711 // Channel compensation and/or (de)interleaving only.
9712 Float64 *in = (Float64 *)inBuffer;
9713 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9714 for (j=0; j<info.channels; j++) {
9715 out[info.outOffset[j]] = in[info.inOffset[j]];
9718 out += info.outJump;
9722 else if (info.outFormat == RTAUDIO_FLOAT32) {
9724 Float32 *out = (Float32 *)outBuffer;
9726 if (info.inFormat == RTAUDIO_SINT8) {
9727 signed char *in = (signed char *)inBuffer;
9728 scale = (Float32) ( 1.0 / 127.5 );
9729 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9730 for (j=0; j<info.channels; j++) {
9731 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9732 out[info.outOffset[j]] += 0.5;
9733 out[info.outOffset[j]] *= scale;
9736 out += info.outJump;
9739 else if (info.inFormat == RTAUDIO_SINT16) {
9740 Int16 *in = (Int16 *)inBuffer;
9741 scale = (Float32) ( 1.0 / 32767.5 );
9742 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9743 for (j=0; j<info.channels; j++) {
9744 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9745 out[info.outOffset[j]] += 0.5;
9746 out[info.outOffset[j]] *= scale;
9749 out += info.outJump;
9752 else if (info.inFormat == RTAUDIO_SINT24) {
9753 Int24 *in = (Int24 *)inBuffer;
9754 scale = (Float32) ( 1.0 / 8388607.5 );
9755 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9756 for (j=0; j<info.channels; j++) {
9757 out[info.outOffset[j]] = (Float32) (in[info.inOffset[j]].asInt());
9758 out[info.outOffset[j]] += 0.5;
9759 out[info.outOffset[j]] *= scale;
9762 out += info.outJump;
9765 else if (info.inFormat == RTAUDIO_SINT32) {
9766 Int32 *in = (Int32 *)inBuffer;
9767 scale = (Float32) ( 1.0 / 2147483647.5 );
9768 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9769 for (j=0; j<info.channels; j++) {
9770 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9771 out[info.outOffset[j]] += 0.5;
9772 out[info.outOffset[j]] *= scale;
9775 out += info.outJump;
9778 else if (info.inFormat == RTAUDIO_FLOAT32) {
9779 // Channel compensation and/or (de)interleaving only.
9780 Float32 *in = (Float32 *)inBuffer;
9781 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9782 for (j=0; j<info.channels; j++) {
9783 out[info.outOffset[j]] = in[info.inOffset[j]];
9786 out += info.outJump;
9789 else if (info.inFormat == RTAUDIO_FLOAT64) {
9790 Float64 *in = (Float64 *)inBuffer;
9791 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9792 for (j=0; j<info.channels; j++) {
9793 out[info.outOffset[j]] = (Float32) in[info.inOffset[j]];
9796 out += info.outJump;
9800 else if (info.outFormat == RTAUDIO_SINT32) {
9801 Int32 *out = (Int32 *)outBuffer;
9802 if (info.inFormat == RTAUDIO_SINT8) {
9803 signed char *in = (signed char *)inBuffer;
9804 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9805 for (j=0; j<info.channels; j++) {
9806 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9807 out[info.outOffset[j]] <<= 24;
9810 out += info.outJump;
9813 else if (info.inFormat == RTAUDIO_SINT16) {
9814 Int16 *in = (Int16 *)inBuffer;
9815 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9816 for (j=0; j<info.channels; j++) {
9817 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]];
9818 out[info.outOffset[j]] <<= 16;
9821 out += info.outJump;
9824 else if (info.inFormat == RTAUDIO_SINT24) {
9825 Int24 *in = (Int24 *)inBuffer;
9826 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9827 for (j=0; j<info.channels; j++) {
9828 out[info.outOffset[j]] = (Int32) in[info.inOffset[j]].asInt();
9829 out[info.outOffset[j]] <<= 8;
9832 out += info.outJump;
9835 else if (info.inFormat == RTAUDIO_SINT32) {
9836 // Channel compensation and/or (de)interleaving only.
9837 Int32 *in = (Int32 *)inBuffer;
9838 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9839 for (j=0; j<info.channels; j++) {
9840 out[info.outOffset[j]] = in[info.inOffset[j]];
9843 out += info.outJump;
9846 else if (info.inFormat == RTAUDIO_FLOAT32) {
9847 Float32 *in = (Float32 *)inBuffer;
9848 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9849 for (j=0; j<info.channels; j++) {
9850 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
9853 out += info.outJump;
9856 else if (info.inFormat == RTAUDIO_FLOAT64) {
9857 Float64 *in = (Float64 *)inBuffer;
9858 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9859 for (j=0; j<info.channels; j++) {
9860 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 2147483647.5 - 0.5);
9863 out += info.outJump;
9867 else if (info.outFormat == RTAUDIO_SINT24) {
9868 Int24 *out = (Int24 *)outBuffer;
9869 if (info.inFormat == RTAUDIO_SINT8) {
9870 signed char *in = (signed char *)inBuffer;
9871 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9872 for (j=0; j<info.channels; j++) {
9873 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 16);
9874 //out[info.outOffset[j]] <<= 16;
9877 out += info.outJump;
9880 else if (info.inFormat == RTAUDIO_SINT16) {
9881 Int16 *in = (Int16 *)inBuffer;
9882 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9883 for (j=0; j<info.channels; j++) {
9884 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] << 8);
9885 //out[info.outOffset[j]] <<= 8;
9888 out += info.outJump;
9891 else if (info.inFormat == RTAUDIO_SINT24) {
9892 // Channel compensation and/or (de)interleaving only.
9893 Int24 *in = (Int24 *)inBuffer;
9894 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9895 for (j=0; j<info.channels; j++) {
9896 out[info.outOffset[j]] = in[info.inOffset[j]];
9899 out += info.outJump;
9902 else if (info.inFormat == RTAUDIO_SINT32) {
9903 Int32 *in = (Int32 *)inBuffer;
9904 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9905 for (j=0; j<info.channels; j++) {
9906 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] >> 8);
9907 //out[info.outOffset[j]] >>= 8;
9910 out += info.outJump;
9913 else if (info.inFormat == RTAUDIO_FLOAT32) {
9914 Float32 *in = (Float32 *)inBuffer;
9915 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9916 for (j=0; j<info.channels; j++) {
9917 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
9920 out += info.outJump;
9923 else if (info.inFormat == RTAUDIO_FLOAT64) {
9924 Float64 *in = (Float64 *)inBuffer;
9925 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9926 for (j=0; j<info.channels; j++) {
9927 out[info.outOffset[j]] = (Int32) (in[info.inOffset[j]] * 8388607.5 - 0.5);
9930 out += info.outJump;
9934 else if (info.outFormat == RTAUDIO_SINT16) {
9935 Int16 *out = (Int16 *)outBuffer;
9936 if (info.inFormat == RTAUDIO_SINT8) {
9937 signed char *in = (signed char *)inBuffer;
9938 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9939 for (j=0; j<info.channels; j++) {
9940 out[info.outOffset[j]] = (Int16) in[info.inOffset[j]];
9941 out[info.outOffset[j]] <<= 8;
9944 out += info.outJump;
9947 else if (info.inFormat == RTAUDIO_SINT16) {
9948 // Channel compensation and/or (de)interleaving only.
9949 Int16 *in = (Int16 *)inBuffer;
9950 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9951 for (j=0; j<info.channels; j++) {
9952 out[info.outOffset[j]] = in[info.inOffset[j]];
9955 out += info.outJump;
9958 else if (info.inFormat == RTAUDIO_SINT24) {
9959 Int24 *in = (Int24 *)inBuffer;
9960 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9961 for (j=0; j<info.channels; j++) {
9962 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]].asInt() >> 8);
9965 out += info.outJump;
9968 else if (info.inFormat == RTAUDIO_SINT32) {
9969 Int32 *in = (Int32 *)inBuffer;
9970 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9971 for (j=0; j<info.channels; j++) {
9972 out[info.outOffset[j]] = (Int16) ((in[info.inOffset[j]] >> 16) & 0x0000ffff);
9975 out += info.outJump;
9978 else if (info.inFormat == RTAUDIO_FLOAT32) {
9979 Float32 *in = (Float32 *)inBuffer;
9980 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9981 for (j=0; j<info.channels; j++) {
9982 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
9985 out += info.outJump;
9988 else if (info.inFormat == RTAUDIO_FLOAT64) {
9989 Float64 *in = (Float64 *)inBuffer;
9990 for (unsigned int i=0; i<stream_.bufferSize; i++) {
9991 for (j=0; j<info.channels; j++) {
9992 out[info.outOffset[j]] = (Int16) (in[info.inOffset[j]] * 32767.5 - 0.5);
9995 out += info.outJump;
9999 else if (info.outFormat == RTAUDIO_SINT8) {
10000 signed char *out = (signed char *)outBuffer;
10001 if (info.inFormat == RTAUDIO_SINT8) {
10002 // Channel compensation and/or (de)interleaving only.
10003 signed char *in = (signed char *)inBuffer;
10004 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10005 for (j=0; j<info.channels; j++) {
10006 out[info.outOffset[j]] = in[info.inOffset[j]];
10009 out += info.outJump;
10012 if (info.inFormat == RTAUDIO_SINT16) {
10013 Int16 *in = (Int16 *)inBuffer;
10014 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10015 for (j=0; j<info.channels; j++) {
10016 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 8) & 0x00ff);
10019 out += info.outJump;
10022 else if (info.inFormat == RTAUDIO_SINT24) {
10023 Int24 *in = (Int24 *)inBuffer;
10024 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10025 for (j=0; j<info.channels; j++) {
10026 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]].asInt() >> 16);
10029 out += info.outJump;
10032 else if (info.inFormat == RTAUDIO_SINT32) {
10033 Int32 *in = (Int32 *)inBuffer;
10034 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10035 for (j=0; j<info.channels; j++) {
10036 out[info.outOffset[j]] = (signed char) ((in[info.inOffset[j]] >> 24) & 0x000000ff);
10039 out += info.outJump;
10042 else if (info.inFormat == RTAUDIO_FLOAT32) {
10043 Float32 *in = (Float32 *)inBuffer;
10044 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10045 for (j=0; j<info.channels; j++) {
10046 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10049 out += info.outJump;
10052 else if (info.inFormat == RTAUDIO_FLOAT64) {
10053 Float64 *in = (Float64 *)inBuffer;
10054 for (unsigned int i=0; i<stream_.bufferSize; i++) {
10055 for (j=0; j<info.channels; j++) {
10056 out[info.outOffset[j]] = (signed char) (in[info.inOffset[j]] * 127.5 - 0.5);
10059 out += info.outJump;
10065 //static inline uint16_t bswap_16(uint16_t x) { return (x>>8) | (x<<8); }
10066 //static inline uint32_t bswap_32(uint32_t x) { return (bswap_16(x&0xffff)<<16) | (bswap_16(x>>16)); }
10067 //static inline uint64_t bswap_64(uint64_t x) { return (((unsigned long long)bswap_32(x&0xffffffffull))<<32) | (bswap_32(x>>32)); }
10069 void RtApi :: byteSwapBuffer( char *buffer, unsigned int samples, RtAudioFormat format )
10075 if ( format == RTAUDIO_SINT16 ) {
10076 for ( unsigned int i=0; i<samples; i++ ) {
10077 // Swap 1st and 2nd bytes.
10082 // Increment 2 bytes.
10086 else if ( format == RTAUDIO_SINT32 ||
10087 format == RTAUDIO_FLOAT32 ) {
10088 for ( unsigned int i=0; i<samples; i++ ) {
10089 // Swap 1st and 4th bytes.
10094 // Swap 2nd and 3rd bytes.
10100 // Increment 3 more bytes.
10104 else if ( format == RTAUDIO_SINT24 ) {
10105 for ( unsigned int i=0; i<samples; i++ ) {
10106 // Swap 1st and 3rd bytes.
10111 // Increment 2 more bytes.
10115 else if ( format == RTAUDIO_FLOAT64 ) {
10116 for ( unsigned int i=0; i<samples; i++ ) {
10117 // Swap 1st and 8th bytes
10122 // Swap 2nd and 7th bytes
10128 // Swap 3rd and 6th bytes
10134 // Swap 4th and 5th bytes
10140 // Increment 5 more bytes.
10146 // Indentation settings for Vim and Emacs
10148 // Local Variables:
10149 // c-basic-offset: 2
10150 // indent-tabs-mode: nil
10153 // vim: et sts=2 sw=2